Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / fs / btrfs / extent-tree.c
1 /*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
22 #include <linux/sort.h>
23 #include <linux/rcupdate.h>
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include "compat.h"
27 #include "hash.h"
28 #include "ctree.h"
29 #include "disk-io.h"
30 #include "print-tree.h"
31 #include "transaction.h"
32 #include "volumes.h"
33 #include "locking.h"
34 #include "free-space-cache.h"
35
36 static int update_block_group(struct btrfs_trans_handle *trans,
37 struct btrfs_root *root,
38 u64 bytenr, u64 num_bytes, int alloc);
39 static int update_reserved_bytes(struct btrfs_block_group_cache *cache,
40 u64 num_bytes, int reserve, int sinfo);
41 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
42 struct btrfs_root *root,
43 u64 bytenr, u64 num_bytes, u64 parent,
44 u64 root_objectid, u64 owner_objectid,
45 u64 owner_offset, int refs_to_drop,
46 struct btrfs_delayed_extent_op *extra_op);
47 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
48 struct extent_buffer *leaf,
49 struct btrfs_extent_item *ei);
50 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
51 struct btrfs_root *root,
52 u64 parent, u64 root_objectid,
53 u64 flags, u64 owner, u64 offset,
54 struct btrfs_key *ins, int ref_mod);
55 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
56 struct btrfs_root *root,
57 u64 parent, u64 root_objectid,
58 u64 flags, struct btrfs_disk_key *key,
59 int level, struct btrfs_key *ins);
60 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
61 struct btrfs_root *extent_root, u64 alloc_bytes,
62 u64 flags, int force);
63 static int find_next_key(struct btrfs_path *path, int level,
64 struct btrfs_key *key);
65 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
66 int dump_block_groups);
67
68 static noinline int
69 block_group_cache_done(struct btrfs_block_group_cache *cache)
70 {
71 smp_mb();
72 return cache->cached == BTRFS_CACHE_FINISHED;
73 }
74
75 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
76 {
77 return (cache->flags & bits) == bits;
78 }
79
80 void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
81 {
82 atomic_inc(&cache->count);
83 }
84
85 void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
86 {
87 if (atomic_dec_and_test(&cache->count)) {
88 WARN_ON(cache->pinned > 0);
89 WARN_ON(cache->reserved > 0);
90 WARN_ON(cache->reserved_pinned > 0);
91 kfree(cache);
92 }
93 }
94
95 /*
96 * this adds the block group to the fs_info rb tree for the block group
97 * cache
98 */
99 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
100 struct btrfs_block_group_cache *block_group)
101 {
102 struct rb_node **p;
103 struct rb_node *parent = NULL;
104 struct btrfs_block_group_cache *cache;
105
106 spin_lock(&info->block_group_cache_lock);
107 p = &info->block_group_cache_tree.rb_node;
108
109 while (*p) {
110 parent = *p;
111 cache = rb_entry(parent, struct btrfs_block_group_cache,
112 cache_node);
113 if (block_group->key.objectid < cache->key.objectid) {
114 p = &(*p)->rb_left;
115 } else if (block_group->key.objectid > cache->key.objectid) {
116 p = &(*p)->rb_right;
117 } else {
118 spin_unlock(&info->block_group_cache_lock);
119 return -EEXIST;
120 }
121 }
122
123 rb_link_node(&block_group->cache_node, parent, p);
124 rb_insert_color(&block_group->cache_node,
125 &info->block_group_cache_tree);
126 spin_unlock(&info->block_group_cache_lock);
127
128 return 0;
129 }
130
131 /*
132 * This will return the block group at or after bytenr if contains is 0, else
133 * it will return the block group that contains the bytenr
134 */
135 static struct btrfs_block_group_cache *
136 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
137 int contains)
138 {
139 struct btrfs_block_group_cache *cache, *ret = NULL;
140 struct rb_node *n;
141 u64 end, start;
142
143 spin_lock(&info->block_group_cache_lock);
144 n = info->block_group_cache_tree.rb_node;
145
146 while (n) {
147 cache = rb_entry(n, struct btrfs_block_group_cache,
148 cache_node);
149 end = cache->key.objectid + cache->key.offset - 1;
150 start = cache->key.objectid;
151
152 if (bytenr < start) {
153 if (!contains && (!ret || start < ret->key.objectid))
154 ret = cache;
155 n = n->rb_left;
156 } else if (bytenr > start) {
157 if (contains && bytenr <= end) {
158 ret = cache;
159 break;
160 }
161 n = n->rb_right;
162 } else {
163 ret = cache;
164 break;
165 }
166 }
167 if (ret)
168 btrfs_get_block_group(ret);
169 spin_unlock(&info->block_group_cache_lock);
170
171 return ret;
172 }
173
174 static int add_excluded_extent(struct btrfs_root *root,
175 u64 start, u64 num_bytes)
176 {
177 u64 end = start + num_bytes - 1;
178 set_extent_bits(&root->fs_info->freed_extents[0],
179 start, end, EXTENT_UPTODATE, GFP_NOFS);
180 set_extent_bits(&root->fs_info->freed_extents[1],
181 start, end, EXTENT_UPTODATE, GFP_NOFS);
182 return 0;
183 }
184
185 static void free_excluded_extents(struct btrfs_root *root,
186 struct btrfs_block_group_cache *cache)
187 {
188 u64 start, end;
189
190 start = cache->key.objectid;
191 end = start + cache->key.offset - 1;
192
193 clear_extent_bits(&root->fs_info->freed_extents[0],
194 start, end, EXTENT_UPTODATE, GFP_NOFS);
195 clear_extent_bits(&root->fs_info->freed_extents[1],
196 start, end, EXTENT_UPTODATE, GFP_NOFS);
197 }
198
199 static int exclude_super_stripes(struct btrfs_root *root,
200 struct btrfs_block_group_cache *cache)
201 {
202 u64 bytenr;
203 u64 *logical;
204 int stripe_len;
205 int i, nr, ret;
206
207 if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
208 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
209 cache->bytes_super += stripe_len;
210 ret = add_excluded_extent(root, cache->key.objectid,
211 stripe_len);
212 BUG_ON(ret);
213 }
214
215 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
216 bytenr = btrfs_sb_offset(i);
217 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
218 cache->key.objectid, bytenr,
219 0, &logical, &nr, &stripe_len);
220 BUG_ON(ret);
221
222 while (nr--) {
223 cache->bytes_super += stripe_len;
224 ret = add_excluded_extent(root, logical[nr],
225 stripe_len);
226 BUG_ON(ret);
227 }
228
229 kfree(logical);
230 }
231 return 0;
232 }
233
234 static struct btrfs_caching_control *
235 get_caching_control(struct btrfs_block_group_cache *cache)
236 {
237 struct btrfs_caching_control *ctl;
238
239 spin_lock(&cache->lock);
240 if (cache->cached != BTRFS_CACHE_STARTED) {
241 spin_unlock(&cache->lock);
242 return NULL;
243 }
244
245 /* We're loading it the fast way, so we don't have a caching_ctl. */
246 if (!cache->caching_ctl) {
247 spin_unlock(&cache->lock);
248 return NULL;
249 }
250
251 ctl = cache->caching_ctl;
252 atomic_inc(&ctl->count);
253 spin_unlock(&cache->lock);
254 return ctl;
255 }
256
257 static void put_caching_control(struct btrfs_caching_control *ctl)
258 {
259 if (atomic_dec_and_test(&ctl->count))
260 kfree(ctl);
261 }
262
263 /*
264 * this is only called by cache_block_group, since we could have freed extents
265 * we need to check the pinned_extents for any extents that can't be used yet
266 * since their free space will be released as soon as the transaction commits.
267 */
268 static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
269 struct btrfs_fs_info *info, u64 start, u64 end)
270 {
271 u64 extent_start, extent_end, size, total_added = 0;
272 int ret;
273
274 while (start < end) {
275 ret = find_first_extent_bit(info->pinned_extents, start,
276 &extent_start, &extent_end,
277 EXTENT_DIRTY | EXTENT_UPTODATE);
278 if (ret)
279 break;
280
281 if (extent_start <= start) {
282 start = extent_end + 1;
283 } else if (extent_start > start && extent_start < end) {
284 size = extent_start - start;
285 total_added += size;
286 ret = btrfs_add_free_space(block_group, start,
287 size);
288 BUG_ON(ret);
289 start = extent_end + 1;
290 } else {
291 break;
292 }
293 }
294
295 if (start < end) {
296 size = end - start;
297 total_added += size;
298 ret = btrfs_add_free_space(block_group, start, size);
299 BUG_ON(ret);
300 }
301
302 return total_added;
303 }
304
305 static int caching_kthread(void *data)
306 {
307 struct btrfs_block_group_cache *block_group = data;
308 struct btrfs_fs_info *fs_info = block_group->fs_info;
309 struct btrfs_caching_control *caching_ctl = block_group->caching_ctl;
310 struct btrfs_root *extent_root = fs_info->extent_root;
311 struct btrfs_path *path;
312 struct extent_buffer *leaf;
313 struct btrfs_key key;
314 u64 total_found = 0;
315 u64 last = 0;
316 u32 nritems;
317 int ret = 0;
318
319 path = btrfs_alloc_path();
320 if (!path)
321 return -ENOMEM;
322
323 exclude_super_stripes(extent_root, block_group);
324 spin_lock(&block_group->space_info->lock);
325 block_group->space_info->bytes_readonly += block_group->bytes_super;
326 spin_unlock(&block_group->space_info->lock);
327
328 last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
329
330 /*
331 * We don't want to deadlock with somebody trying to allocate a new
332 * extent for the extent root while also trying to search the extent
333 * root to add free space. So we skip locking and search the commit
334 * root, since its read-only
335 */
336 path->skip_locking = 1;
337 path->search_commit_root = 1;
338 path->reada = 2;
339
340 key.objectid = last;
341 key.offset = 0;
342 key.type = BTRFS_EXTENT_ITEM_KEY;
343 again:
344 mutex_lock(&caching_ctl->mutex);
345 /* need to make sure the commit_root doesn't disappear */
346 down_read(&fs_info->extent_commit_sem);
347
348 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
349 if (ret < 0)
350 goto err;
351
352 leaf = path->nodes[0];
353 nritems = btrfs_header_nritems(leaf);
354
355 while (1) {
356 smp_mb();
357 if (fs_info->closing > 1) {
358 last = (u64)-1;
359 break;
360 }
361
362 if (path->slots[0] < nritems) {
363 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
364 } else {
365 ret = find_next_key(path, 0, &key);
366 if (ret)
367 break;
368
369 caching_ctl->progress = last;
370 btrfs_release_path(extent_root, path);
371 up_read(&fs_info->extent_commit_sem);
372 mutex_unlock(&caching_ctl->mutex);
373 if (btrfs_transaction_in_commit(fs_info))
374 schedule_timeout(1);
375 else
376 cond_resched();
377 goto again;
378 }
379
380 if (key.objectid < block_group->key.objectid) {
381 path->slots[0]++;
382 continue;
383 }
384
385 if (key.objectid >= block_group->key.objectid +
386 block_group->key.offset)
387 break;
388
389 if (key.type == BTRFS_EXTENT_ITEM_KEY) {
390 total_found += add_new_free_space(block_group,
391 fs_info, last,
392 key.objectid);
393 last = key.objectid + key.offset;
394
395 if (total_found > (1024 * 1024 * 2)) {
396 total_found = 0;
397 wake_up(&caching_ctl->wait);
398 }
399 }
400 path->slots[0]++;
401 }
402 ret = 0;
403
404 total_found += add_new_free_space(block_group, fs_info, last,
405 block_group->key.objectid +
406 block_group->key.offset);
407 caching_ctl->progress = (u64)-1;
408
409 spin_lock(&block_group->lock);
410 block_group->caching_ctl = NULL;
411 block_group->cached = BTRFS_CACHE_FINISHED;
412 spin_unlock(&block_group->lock);
413
414 err:
415 btrfs_free_path(path);
416 up_read(&fs_info->extent_commit_sem);
417
418 free_excluded_extents(extent_root, block_group);
419
420 mutex_unlock(&caching_ctl->mutex);
421 wake_up(&caching_ctl->wait);
422
423 put_caching_control(caching_ctl);
424 atomic_dec(&block_group->space_info->caching_threads);
425 btrfs_put_block_group(block_group);
426
427 return 0;
428 }
429
430 static int cache_block_group(struct btrfs_block_group_cache *cache,
431 struct btrfs_trans_handle *trans,
432 int load_cache_only)
433 {
434 struct btrfs_fs_info *fs_info = cache->fs_info;
435 struct btrfs_caching_control *caching_ctl;
436 struct task_struct *tsk;
437 int ret = 0;
438
439 smp_mb();
440 if (cache->cached != BTRFS_CACHE_NO)
441 return 0;
442
443 /*
444 * We can't do the read from on-disk cache during a commit since we need
445 * to have the normal tree locking.
446 */
447 if (!trans->transaction->in_commit) {
448 spin_lock(&cache->lock);
449 if (cache->cached != BTRFS_CACHE_NO) {
450 spin_unlock(&cache->lock);
451 return 0;
452 }
453 cache->cached = BTRFS_CACHE_STARTED;
454 spin_unlock(&cache->lock);
455
456 ret = load_free_space_cache(fs_info, cache);
457
458 spin_lock(&cache->lock);
459 if (ret == 1) {
460 cache->cached = BTRFS_CACHE_FINISHED;
461 cache->last_byte_to_unpin = (u64)-1;
462 } else {
463 cache->cached = BTRFS_CACHE_NO;
464 }
465 spin_unlock(&cache->lock);
466 if (ret == 1)
467 return 0;
468 }
469
470 if (load_cache_only)
471 return 0;
472
473 caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_KERNEL);
474 BUG_ON(!caching_ctl);
475
476 INIT_LIST_HEAD(&caching_ctl->list);
477 mutex_init(&caching_ctl->mutex);
478 init_waitqueue_head(&caching_ctl->wait);
479 caching_ctl->block_group = cache;
480 caching_ctl->progress = cache->key.objectid;
481 /* one for caching kthread, one for caching block group list */
482 atomic_set(&caching_ctl->count, 2);
483
484 spin_lock(&cache->lock);
485 if (cache->cached != BTRFS_CACHE_NO) {
486 spin_unlock(&cache->lock);
487 kfree(caching_ctl);
488 return 0;
489 }
490 cache->caching_ctl = caching_ctl;
491 cache->cached = BTRFS_CACHE_STARTED;
492 spin_unlock(&cache->lock);
493
494 down_write(&fs_info->extent_commit_sem);
495 list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
496 up_write(&fs_info->extent_commit_sem);
497
498 atomic_inc(&cache->space_info->caching_threads);
499 btrfs_get_block_group(cache);
500
501 tsk = kthread_run(caching_kthread, cache, "btrfs-cache-%llu\n",
502 cache->key.objectid);
503 if (IS_ERR(tsk)) {
504 ret = PTR_ERR(tsk);
505 printk(KERN_ERR "error running thread %d\n", ret);
506 BUG();
507 }
508
509 return ret;
510 }
511
512 /*
513 * return the block group that starts at or after bytenr
514 */
515 static struct btrfs_block_group_cache *
516 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
517 {
518 struct btrfs_block_group_cache *cache;
519
520 cache = block_group_cache_tree_search(info, bytenr, 0);
521
522 return cache;
523 }
524
525 /*
526 * return the block group that contains the given bytenr
527 */
528 struct btrfs_block_group_cache *btrfs_lookup_block_group(
529 struct btrfs_fs_info *info,
530 u64 bytenr)
531 {
532 struct btrfs_block_group_cache *cache;
533
534 cache = block_group_cache_tree_search(info, bytenr, 1);
535
536 return cache;
537 }
538
539 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
540 u64 flags)
541 {
542 struct list_head *head = &info->space_info;
543 struct btrfs_space_info *found;
544
545 flags &= BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_SYSTEM |
546 BTRFS_BLOCK_GROUP_METADATA;
547
548 rcu_read_lock();
549 list_for_each_entry_rcu(found, head, list) {
550 if (found->flags & flags) {
551 rcu_read_unlock();
552 return found;
553 }
554 }
555 rcu_read_unlock();
556 return NULL;
557 }
558
559 /*
560 * after adding space to the filesystem, we need to clear the full flags
561 * on all the space infos.
562 */
563 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
564 {
565 struct list_head *head = &info->space_info;
566 struct btrfs_space_info *found;
567
568 rcu_read_lock();
569 list_for_each_entry_rcu(found, head, list)
570 found->full = 0;
571 rcu_read_unlock();
572 }
573
574 static u64 div_factor(u64 num, int factor)
575 {
576 if (factor == 10)
577 return num;
578 num *= factor;
579 do_div(num, 10);
580 return num;
581 }
582
583 static u64 div_factor_fine(u64 num, int factor)
584 {
585 if (factor == 100)
586 return num;
587 num *= factor;
588 do_div(num, 100);
589 return num;
590 }
591
592 u64 btrfs_find_block_group(struct btrfs_root *root,
593 u64 search_start, u64 search_hint, int owner)
594 {
595 struct btrfs_block_group_cache *cache;
596 u64 used;
597 u64 last = max(search_hint, search_start);
598 u64 group_start = 0;
599 int full_search = 0;
600 int factor = 9;
601 int wrapped = 0;
602 again:
603 while (1) {
604 cache = btrfs_lookup_first_block_group(root->fs_info, last);
605 if (!cache)
606 break;
607
608 spin_lock(&cache->lock);
609 last = cache->key.objectid + cache->key.offset;
610 used = btrfs_block_group_used(&cache->item);
611
612 if ((full_search || !cache->ro) &&
613 block_group_bits(cache, BTRFS_BLOCK_GROUP_METADATA)) {
614 if (used + cache->pinned + cache->reserved <
615 div_factor(cache->key.offset, factor)) {
616 group_start = cache->key.objectid;
617 spin_unlock(&cache->lock);
618 btrfs_put_block_group(cache);
619 goto found;
620 }
621 }
622 spin_unlock(&cache->lock);
623 btrfs_put_block_group(cache);
624 cond_resched();
625 }
626 if (!wrapped) {
627 last = search_start;
628 wrapped = 1;
629 goto again;
630 }
631 if (!full_search && factor < 10) {
632 last = search_start;
633 full_search = 1;
634 factor = 10;
635 goto again;
636 }
637 found:
638 return group_start;
639 }
640
641 /* simple helper to search for an existing extent at a given offset */
642 int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
643 {
644 int ret;
645 struct btrfs_key key;
646 struct btrfs_path *path;
647
648 path = btrfs_alloc_path();
649 BUG_ON(!path);
650 key.objectid = start;
651 key.offset = len;
652 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
653 ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
654 0, 0);
655 btrfs_free_path(path);
656 return ret;
657 }
658
659 /*
660 * helper function to lookup reference count and flags of extent.
661 *
662 * the head node for delayed ref is used to store the sum of all the
663 * reference count modifications queued up in the rbtree. the head
664 * node may also store the extent flags to set. This way you can check
665 * to see what the reference count and extent flags would be if all of
666 * the delayed refs are not processed.
667 */
668 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
669 struct btrfs_root *root, u64 bytenr,
670 u64 num_bytes, u64 *refs, u64 *flags)
671 {
672 struct btrfs_delayed_ref_head *head;
673 struct btrfs_delayed_ref_root *delayed_refs;
674 struct btrfs_path *path;
675 struct btrfs_extent_item *ei;
676 struct extent_buffer *leaf;
677 struct btrfs_key key;
678 u32 item_size;
679 u64 num_refs;
680 u64 extent_flags;
681 int ret;
682
683 path = btrfs_alloc_path();
684 if (!path)
685 return -ENOMEM;
686
687 key.objectid = bytenr;
688 key.type = BTRFS_EXTENT_ITEM_KEY;
689 key.offset = num_bytes;
690 if (!trans) {
691 path->skip_locking = 1;
692 path->search_commit_root = 1;
693 }
694 again:
695 ret = btrfs_search_slot(trans, root->fs_info->extent_root,
696 &key, path, 0, 0);
697 if (ret < 0)
698 goto out_free;
699
700 if (ret == 0) {
701 leaf = path->nodes[0];
702 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
703 if (item_size >= sizeof(*ei)) {
704 ei = btrfs_item_ptr(leaf, path->slots[0],
705 struct btrfs_extent_item);
706 num_refs = btrfs_extent_refs(leaf, ei);
707 extent_flags = btrfs_extent_flags(leaf, ei);
708 } else {
709 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
710 struct btrfs_extent_item_v0 *ei0;
711 BUG_ON(item_size != sizeof(*ei0));
712 ei0 = btrfs_item_ptr(leaf, path->slots[0],
713 struct btrfs_extent_item_v0);
714 num_refs = btrfs_extent_refs_v0(leaf, ei0);
715 /* FIXME: this isn't correct for data */
716 extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
717 #else
718 BUG();
719 #endif
720 }
721 BUG_ON(num_refs == 0);
722 } else {
723 num_refs = 0;
724 extent_flags = 0;
725 ret = 0;
726 }
727
728 if (!trans)
729 goto out;
730
731 delayed_refs = &trans->transaction->delayed_refs;
732 spin_lock(&delayed_refs->lock);
733 head = btrfs_find_delayed_ref_head(trans, bytenr);
734 if (head) {
735 if (!mutex_trylock(&head->mutex)) {
736 atomic_inc(&head->node.refs);
737 spin_unlock(&delayed_refs->lock);
738
739 btrfs_release_path(root->fs_info->extent_root, path);
740
741 mutex_lock(&head->mutex);
742 mutex_unlock(&head->mutex);
743 btrfs_put_delayed_ref(&head->node);
744 goto again;
745 }
746 if (head->extent_op && head->extent_op->update_flags)
747 extent_flags |= head->extent_op->flags_to_set;
748 else
749 BUG_ON(num_refs == 0);
750
751 num_refs += head->node.ref_mod;
752 mutex_unlock(&head->mutex);
753 }
754 spin_unlock(&delayed_refs->lock);
755 out:
756 WARN_ON(num_refs == 0);
757 if (refs)
758 *refs = num_refs;
759 if (flags)
760 *flags = extent_flags;
761 out_free:
762 btrfs_free_path(path);
763 return ret;
764 }
765
766 /*
767 * Back reference rules. Back refs have three main goals:
768 *
769 * 1) differentiate between all holders of references to an extent so that
770 * when a reference is dropped we can make sure it was a valid reference
771 * before freeing the extent.
772 *
773 * 2) Provide enough information to quickly find the holders of an extent
774 * if we notice a given block is corrupted or bad.
775 *
776 * 3) Make it easy to migrate blocks for FS shrinking or storage pool
777 * maintenance. This is actually the same as #2, but with a slightly
778 * different use case.
779 *
780 * There are two kinds of back refs. The implicit back refs is optimized
781 * for pointers in non-shared tree blocks. For a given pointer in a block,
782 * back refs of this kind provide information about the block's owner tree
783 * and the pointer's key. These information allow us to find the block by
784 * b-tree searching. The full back refs is for pointers in tree blocks not
785 * referenced by their owner trees. The location of tree block is recorded
786 * in the back refs. Actually the full back refs is generic, and can be
787 * used in all cases the implicit back refs is used. The major shortcoming
788 * of the full back refs is its overhead. Every time a tree block gets
789 * COWed, we have to update back refs entry for all pointers in it.
790 *
791 * For a newly allocated tree block, we use implicit back refs for
792 * pointers in it. This means most tree related operations only involve
793 * implicit back refs. For a tree block created in old transaction, the
794 * only way to drop a reference to it is COW it. So we can detect the
795 * event that tree block loses its owner tree's reference and do the
796 * back refs conversion.
797 *
798 * When a tree block is COW'd through a tree, there are four cases:
799 *
800 * The reference count of the block is one and the tree is the block's
801 * owner tree. Nothing to do in this case.
802 *
803 * The reference count of the block is one and the tree is not the
804 * block's owner tree. In this case, full back refs is used for pointers
805 * in the block. Remove these full back refs, add implicit back refs for
806 * every pointers in the new block.
807 *
808 * The reference count of the block is greater than one and the tree is
809 * the block's owner tree. In this case, implicit back refs is used for
810 * pointers in the block. Add full back refs for every pointers in the
811 * block, increase lower level extents' reference counts. The original
812 * implicit back refs are entailed to the new block.
813 *
814 * The reference count of the block is greater than one and the tree is
815 * not the block's owner tree. Add implicit back refs for every pointer in
816 * the new block, increase lower level extents' reference count.
817 *
818 * Back Reference Key composing:
819 *
820 * The key objectid corresponds to the first byte in the extent,
821 * The key type is used to differentiate between types of back refs.
822 * There are different meanings of the key offset for different types
823 * of back refs.
824 *
825 * File extents can be referenced by:
826 *
827 * - multiple snapshots, subvolumes, or different generations in one subvol
828 * - different files inside a single subvolume
829 * - different offsets inside a file (bookend extents in file.c)
830 *
831 * The extent ref structure for the implicit back refs has fields for:
832 *
833 * - Objectid of the subvolume root
834 * - objectid of the file holding the reference
835 * - original offset in the file
836 * - how many bookend extents
837 *
838 * The key offset for the implicit back refs is hash of the first
839 * three fields.
840 *
841 * The extent ref structure for the full back refs has field for:
842 *
843 * - number of pointers in the tree leaf
844 *
845 * The key offset for the implicit back refs is the first byte of
846 * the tree leaf
847 *
848 * When a file extent is allocated, The implicit back refs is used.
849 * the fields are filled in:
850 *
851 * (root_key.objectid, inode objectid, offset in file, 1)
852 *
853 * When a file extent is removed file truncation, we find the
854 * corresponding implicit back refs and check the following fields:
855 *
856 * (btrfs_header_owner(leaf), inode objectid, offset in file)
857 *
858 * Btree extents can be referenced by:
859 *
860 * - Different subvolumes
861 *
862 * Both the implicit back refs and the full back refs for tree blocks
863 * only consist of key. The key offset for the implicit back refs is
864 * objectid of block's owner tree. The key offset for the full back refs
865 * is the first byte of parent block.
866 *
867 * When implicit back refs is used, information about the lowest key and
868 * level of the tree block are required. These information are stored in
869 * tree block info structure.
870 */
871
872 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
873 static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
874 struct btrfs_root *root,
875 struct btrfs_path *path,
876 u64 owner, u32 extra_size)
877 {
878 struct btrfs_extent_item *item;
879 struct btrfs_extent_item_v0 *ei0;
880 struct btrfs_extent_ref_v0 *ref0;
881 struct btrfs_tree_block_info *bi;
882 struct extent_buffer *leaf;
883 struct btrfs_key key;
884 struct btrfs_key found_key;
885 u32 new_size = sizeof(*item);
886 u64 refs;
887 int ret;
888
889 leaf = path->nodes[0];
890 BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
891
892 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
893 ei0 = btrfs_item_ptr(leaf, path->slots[0],
894 struct btrfs_extent_item_v0);
895 refs = btrfs_extent_refs_v0(leaf, ei0);
896
897 if (owner == (u64)-1) {
898 while (1) {
899 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
900 ret = btrfs_next_leaf(root, path);
901 if (ret < 0)
902 return ret;
903 BUG_ON(ret > 0);
904 leaf = path->nodes[0];
905 }
906 btrfs_item_key_to_cpu(leaf, &found_key,
907 path->slots[0]);
908 BUG_ON(key.objectid != found_key.objectid);
909 if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
910 path->slots[0]++;
911 continue;
912 }
913 ref0 = btrfs_item_ptr(leaf, path->slots[0],
914 struct btrfs_extent_ref_v0);
915 owner = btrfs_ref_objectid_v0(leaf, ref0);
916 break;
917 }
918 }
919 btrfs_release_path(root, path);
920
921 if (owner < BTRFS_FIRST_FREE_OBJECTID)
922 new_size += sizeof(*bi);
923
924 new_size -= sizeof(*ei0);
925 ret = btrfs_search_slot(trans, root, &key, path,
926 new_size + extra_size, 1);
927 if (ret < 0)
928 return ret;
929 BUG_ON(ret);
930
931 ret = btrfs_extend_item(trans, root, path, new_size);
932 BUG_ON(ret);
933
934 leaf = path->nodes[0];
935 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
936 btrfs_set_extent_refs(leaf, item, refs);
937 /* FIXME: get real generation */
938 btrfs_set_extent_generation(leaf, item, 0);
939 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
940 btrfs_set_extent_flags(leaf, item,
941 BTRFS_EXTENT_FLAG_TREE_BLOCK |
942 BTRFS_BLOCK_FLAG_FULL_BACKREF);
943 bi = (struct btrfs_tree_block_info *)(item + 1);
944 /* FIXME: get first key of the block */
945 memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
946 btrfs_set_tree_block_level(leaf, bi, (int)owner);
947 } else {
948 btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
949 }
950 btrfs_mark_buffer_dirty(leaf);
951 return 0;
952 }
953 #endif
954
955 static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
956 {
957 u32 high_crc = ~(u32)0;
958 u32 low_crc = ~(u32)0;
959 __le64 lenum;
960
961 lenum = cpu_to_le64(root_objectid);
962 high_crc = crc32c(high_crc, &lenum, sizeof(lenum));
963 lenum = cpu_to_le64(owner);
964 low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
965 lenum = cpu_to_le64(offset);
966 low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
967
968 return ((u64)high_crc << 31) ^ (u64)low_crc;
969 }
970
971 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
972 struct btrfs_extent_data_ref *ref)
973 {
974 return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
975 btrfs_extent_data_ref_objectid(leaf, ref),
976 btrfs_extent_data_ref_offset(leaf, ref));
977 }
978
979 static int match_extent_data_ref(struct extent_buffer *leaf,
980 struct btrfs_extent_data_ref *ref,
981 u64 root_objectid, u64 owner, u64 offset)
982 {
983 if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
984 btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
985 btrfs_extent_data_ref_offset(leaf, ref) != offset)
986 return 0;
987 return 1;
988 }
989
990 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
991 struct btrfs_root *root,
992 struct btrfs_path *path,
993 u64 bytenr, u64 parent,
994 u64 root_objectid,
995 u64 owner, u64 offset)
996 {
997 struct btrfs_key key;
998 struct btrfs_extent_data_ref *ref;
999 struct extent_buffer *leaf;
1000 u32 nritems;
1001 int ret;
1002 int recow;
1003 int err = -ENOENT;
1004
1005 key.objectid = bytenr;
1006 if (parent) {
1007 key.type = BTRFS_SHARED_DATA_REF_KEY;
1008 key.offset = parent;
1009 } else {
1010 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1011 key.offset = hash_extent_data_ref(root_objectid,
1012 owner, offset);
1013 }
1014 again:
1015 recow = 0;
1016 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1017 if (ret < 0) {
1018 err = ret;
1019 goto fail;
1020 }
1021
1022 if (parent) {
1023 if (!ret)
1024 return 0;
1025 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1026 key.type = BTRFS_EXTENT_REF_V0_KEY;
1027 btrfs_release_path(root, path);
1028 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1029 if (ret < 0) {
1030 err = ret;
1031 goto fail;
1032 }
1033 if (!ret)
1034 return 0;
1035 #endif
1036 goto fail;
1037 }
1038
1039 leaf = path->nodes[0];
1040 nritems = btrfs_header_nritems(leaf);
1041 while (1) {
1042 if (path->slots[0] >= nritems) {
1043 ret = btrfs_next_leaf(root, path);
1044 if (ret < 0)
1045 err = ret;
1046 if (ret)
1047 goto fail;
1048
1049 leaf = path->nodes[0];
1050 nritems = btrfs_header_nritems(leaf);
1051 recow = 1;
1052 }
1053
1054 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1055 if (key.objectid != bytenr ||
1056 key.type != BTRFS_EXTENT_DATA_REF_KEY)
1057 goto fail;
1058
1059 ref = btrfs_item_ptr(leaf, path->slots[0],
1060 struct btrfs_extent_data_ref);
1061
1062 if (match_extent_data_ref(leaf, ref, root_objectid,
1063 owner, offset)) {
1064 if (recow) {
1065 btrfs_release_path(root, path);
1066 goto again;
1067 }
1068 err = 0;
1069 break;
1070 }
1071 path->slots[0]++;
1072 }
1073 fail:
1074 return err;
1075 }
1076
1077 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
1078 struct btrfs_root *root,
1079 struct btrfs_path *path,
1080 u64 bytenr, u64 parent,
1081 u64 root_objectid, u64 owner,
1082 u64 offset, int refs_to_add)
1083 {
1084 struct btrfs_key key;
1085 struct extent_buffer *leaf;
1086 u32 size;
1087 u32 num_refs;
1088 int ret;
1089
1090 key.objectid = bytenr;
1091 if (parent) {
1092 key.type = BTRFS_SHARED_DATA_REF_KEY;
1093 key.offset = parent;
1094 size = sizeof(struct btrfs_shared_data_ref);
1095 } else {
1096 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1097 key.offset = hash_extent_data_ref(root_objectid,
1098 owner, offset);
1099 size = sizeof(struct btrfs_extent_data_ref);
1100 }
1101
1102 ret = btrfs_insert_empty_item(trans, root, path, &key, size);
1103 if (ret && ret != -EEXIST)
1104 goto fail;
1105
1106 leaf = path->nodes[0];
1107 if (parent) {
1108 struct btrfs_shared_data_ref *ref;
1109 ref = btrfs_item_ptr(leaf, path->slots[0],
1110 struct btrfs_shared_data_ref);
1111 if (ret == 0) {
1112 btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
1113 } else {
1114 num_refs = btrfs_shared_data_ref_count(leaf, ref);
1115 num_refs += refs_to_add;
1116 btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
1117 }
1118 } else {
1119 struct btrfs_extent_data_ref *ref;
1120 while (ret == -EEXIST) {
1121 ref = btrfs_item_ptr(leaf, path->slots[0],
1122 struct btrfs_extent_data_ref);
1123 if (match_extent_data_ref(leaf, ref, root_objectid,
1124 owner, offset))
1125 break;
1126 btrfs_release_path(root, path);
1127 key.offset++;
1128 ret = btrfs_insert_empty_item(trans, root, path, &key,
1129 size);
1130 if (ret && ret != -EEXIST)
1131 goto fail;
1132
1133 leaf = path->nodes[0];
1134 }
1135 ref = btrfs_item_ptr(leaf, path->slots[0],
1136 struct btrfs_extent_data_ref);
1137 if (ret == 0) {
1138 btrfs_set_extent_data_ref_root(leaf, ref,
1139 root_objectid);
1140 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
1141 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
1142 btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
1143 } else {
1144 num_refs = btrfs_extent_data_ref_count(leaf, ref);
1145 num_refs += refs_to_add;
1146 btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
1147 }
1148 }
1149 btrfs_mark_buffer_dirty(leaf);
1150 ret = 0;
1151 fail:
1152 btrfs_release_path(root, path);
1153 return ret;
1154 }
1155
1156 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
1157 struct btrfs_root *root,
1158 struct btrfs_path *path,
1159 int refs_to_drop)
1160 {
1161 struct btrfs_key key;
1162 struct btrfs_extent_data_ref *ref1 = NULL;
1163 struct btrfs_shared_data_ref *ref2 = NULL;
1164 struct extent_buffer *leaf;
1165 u32 num_refs = 0;
1166 int ret = 0;
1167
1168 leaf = path->nodes[0];
1169 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1170
1171 if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1172 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1173 struct btrfs_extent_data_ref);
1174 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1175 } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1176 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1177 struct btrfs_shared_data_ref);
1178 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1179 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1180 } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1181 struct btrfs_extent_ref_v0 *ref0;
1182 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1183 struct btrfs_extent_ref_v0);
1184 num_refs = btrfs_ref_count_v0(leaf, ref0);
1185 #endif
1186 } else {
1187 BUG();
1188 }
1189
1190 BUG_ON(num_refs < refs_to_drop);
1191 num_refs -= refs_to_drop;
1192
1193 if (num_refs == 0) {
1194 ret = btrfs_del_item(trans, root, path);
1195 } else {
1196 if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
1197 btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
1198 else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
1199 btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
1200 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1201 else {
1202 struct btrfs_extent_ref_v0 *ref0;
1203 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1204 struct btrfs_extent_ref_v0);
1205 btrfs_set_ref_count_v0(leaf, ref0, num_refs);
1206 }
1207 #endif
1208 btrfs_mark_buffer_dirty(leaf);
1209 }
1210 return ret;
1211 }
1212
1213 static noinline u32 extent_data_ref_count(struct btrfs_root *root,
1214 struct btrfs_path *path,
1215 struct btrfs_extent_inline_ref *iref)
1216 {
1217 struct btrfs_key key;
1218 struct extent_buffer *leaf;
1219 struct btrfs_extent_data_ref *ref1;
1220 struct btrfs_shared_data_ref *ref2;
1221 u32 num_refs = 0;
1222
1223 leaf = path->nodes[0];
1224 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1225 if (iref) {
1226 if (btrfs_extent_inline_ref_type(leaf, iref) ==
1227 BTRFS_EXTENT_DATA_REF_KEY) {
1228 ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
1229 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1230 } else {
1231 ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
1232 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1233 }
1234 } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1235 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1236 struct btrfs_extent_data_ref);
1237 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1238 } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1239 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1240 struct btrfs_shared_data_ref);
1241 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1242 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1243 } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1244 struct btrfs_extent_ref_v0 *ref0;
1245 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1246 struct btrfs_extent_ref_v0);
1247 num_refs = btrfs_ref_count_v0(leaf, ref0);
1248 #endif
1249 } else {
1250 WARN_ON(1);
1251 }
1252 return num_refs;
1253 }
1254
1255 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1256 struct btrfs_root *root,
1257 struct btrfs_path *path,
1258 u64 bytenr, u64 parent,
1259 u64 root_objectid)
1260 {
1261 struct btrfs_key key;
1262 int ret;
1263
1264 key.objectid = bytenr;
1265 if (parent) {
1266 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1267 key.offset = parent;
1268 } else {
1269 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1270 key.offset = root_objectid;
1271 }
1272
1273 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1274 if (ret > 0)
1275 ret = -ENOENT;
1276 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1277 if (ret == -ENOENT && parent) {
1278 btrfs_release_path(root, path);
1279 key.type = BTRFS_EXTENT_REF_V0_KEY;
1280 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1281 if (ret > 0)
1282 ret = -ENOENT;
1283 }
1284 #endif
1285 return ret;
1286 }
1287
1288 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1289 struct btrfs_root *root,
1290 struct btrfs_path *path,
1291 u64 bytenr, u64 parent,
1292 u64 root_objectid)
1293 {
1294 struct btrfs_key key;
1295 int ret;
1296
1297 key.objectid = bytenr;
1298 if (parent) {
1299 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1300 key.offset = parent;
1301 } else {
1302 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1303 key.offset = root_objectid;
1304 }
1305
1306 ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1307 btrfs_release_path(root, path);
1308 return ret;
1309 }
1310
1311 static inline int extent_ref_type(u64 parent, u64 owner)
1312 {
1313 int type;
1314 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1315 if (parent > 0)
1316 type = BTRFS_SHARED_BLOCK_REF_KEY;
1317 else
1318 type = BTRFS_TREE_BLOCK_REF_KEY;
1319 } else {
1320 if (parent > 0)
1321 type = BTRFS_SHARED_DATA_REF_KEY;
1322 else
1323 type = BTRFS_EXTENT_DATA_REF_KEY;
1324 }
1325 return type;
1326 }
1327
1328 static int find_next_key(struct btrfs_path *path, int level,
1329 struct btrfs_key *key)
1330
1331 {
1332 for (; level < BTRFS_MAX_LEVEL; level++) {
1333 if (!path->nodes[level])
1334 break;
1335 if (path->slots[level] + 1 >=
1336 btrfs_header_nritems(path->nodes[level]))
1337 continue;
1338 if (level == 0)
1339 btrfs_item_key_to_cpu(path->nodes[level], key,
1340 path->slots[level] + 1);
1341 else
1342 btrfs_node_key_to_cpu(path->nodes[level], key,
1343 path->slots[level] + 1);
1344 return 0;
1345 }
1346 return 1;
1347 }
1348
1349 /*
1350 * look for inline back ref. if back ref is found, *ref_ret is set
1351 * to the address of inline back ref, and 0 is returned.
1352 *
1353 * if back ref isn't found, *ref_ret is set to the address where it
1354 * should be inserted, and -ENOENT is returned.
1355 *
1356 * if insert is true and there are too many inline back refs, the path
1357 * points to the extent item, and -EAGAIN is returned.
1358 *
1359 * NOTE: inline back refs are ordered in the same way that back ref
1360 * items in the tree are ordered.
1361 */
1362 static noinline_for_stack
1363 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1364 struct btrfs_root *root,
1365 struct btrfs_path *path,
1366 struct btrfs_extent_inline_ref **ref_ret,
1367 u64 bytenr, u64 num_bytes,
1368 u64 parent, u64 root_objectid,
1369 u64 owner, u64 offset, int insert)
1370 {
1371 struct btrfs_key key;
1372 struct extent_buffer *leaf;
1373 struct btrfs_extent_item *ei;
1374 struct btrfs_extent_inline_ref *iref;
1375 u64 flags;
1376 u64 item_size;
1377 unsigned long ptr;
1378 unsigned long end;
1379 int extra_size;
1380 int type;
1381 int want;
1382 int ret;
1383 int err = 0;
1384
1385 key.objectid = bytenr;
1386 key.type = BTRFS_EXTENT_ITEM_KEY;
1387 key.offset = num_bytes;
1388
1389 want = extent_ref_type(parent, owner);
1390 if (insert) {
1391 extra_size = btrfs_extent_inline_ref_size(want);
1392 path->keep_locks = 1;
1393 } else
1394 extra_size = -1;
1395 ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
1396 if (ret < 0) {
1397 err = ret;
1398 goto out;
1399 }
1400 BUG_ON(ret);
1401
1402 leaf = path->nodes[0];
1403 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1404 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1405 if (item_size < sizeof(*ei)) {
1406 if (!insert) {
1407 err = -ENOENT;
1408 goto out;
1409 }
1410 ret = convert_extent_item_v0(trans, root, path, owner,
1411 extra_size);
1412 if (ret < 0) {
1413 err = ret;
1414 goto out;
1415 }
1416 leaf = path->nodes[0];
1417 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1418 }
1419 #endif
1420 BUG_ON(item_size < sizeof(*ei));
1421
1422 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1423 flags = btrfs_extent_flags(leaf, ei);
1424
1425 ptr = (unsigned long)(ei + 1);
1426 end = (unsigned long)ei + item_size;
1427
1428 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1429 ptr += sizeof(struct btrfs_tree_block_info);
1430 BUG_ON(ptr > end);
1431 } else {
1432 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
1433 }
1434
1435 err = -ENOENT;
1436 while (1) {
1437 if (ptr >= end) {
1438 WARN_ON(ptr > end);
1439 break;
1440 }
1441 iref = (struct btrfs_extent_inline_ref *)ptr;
1442 type = btrfs_extent_inline_ref_type(leaf, iref);
1443 if (want < type)
1444 break;
1445 if (want > type) {
1446 ptr += btrfs_extent_inline_ref_size(type);
1447 continue;
1448 }
1449
1450 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1451 struct btrfs_extent_data_ref *dref;
1452 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1453 if (match_extent_data_ref(leaf, dref, root_objectid,
1454 owner, offset)) {
1455 err = 0;
1456 break;
1457 }
1458 if (hash_extent_data_ref_item(leaf, dref) <
1459 hash_extent_data_ref(root_objectid, owner, offset))
1460 break;
1461 } else {
1462 u64 ref_offset;
1463 ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1464 if (parent > 0) {
1465 if (parent == ref_offset) {
1466 err = 0;
1467 break;
1468 }
1469 if (ref_offset < parent)
1470 break;
1471 } else {
1472 if (root_objectid == ref_offset) {
1473 err = 0;
1474 break;
1475 }
1476 if (ref_offset < root_objectid)
1477 break;
1478 }
1479 }
1480 ptr += btrfs_extent_inline_ref_size(type);
1481 }
1482 if (err == -ENOENT && insert) {
1483 if (item_size + extra_size >=
1484 BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1485 err = -EAGAIN;
1486 goto out;
1487 }
1488 /*
1489 * To add new inline back ref, we have to make sure
1490 * there is no corresponding back ref item.
1491 * For simplicity, we just do not add new inline back
1492 * ref if there is any kind of item for this block
1493 */
1494 if (find_next_key(path, 0, &key) == 0 &&
1495 key.objectid == bytenr &&
1496 key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
1497 err = -EAGAIN;
1498 goto out;
1499 }
1500 }
1501 *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1502 out:
1503 if (insert) {
1504 path->keep_locks = 0;
1505 btrfs_unlock_up_safe(path, 1);
1506 }
1507 return err;
1508 }
1509
1510 /*
1511 * helper to add new inline back ref
1512 */
1513 static noinline_for_stack
1514 int setup_inline_extent_backref(struct btrfs_trans_handle *trans,
1515 struct btrfs_root *root,
1516 struct btrfs_path *path,
1517 struct btrfs_extent_inline_ref *iref,
1518 u64 parent, u64 root_objectid,
1519 u64 owner, u64 offset, int refs_to_add,
1520 struct btrfs_delayed_extent_op *extent_op)
1521 {
1522 struct extent_buffer *leaf;
1523 struct btrfs_extent_item *ei;
1524 unsigned long ptr;
1525 unsigned long end;
1526 unsigned long item_offset;
1527 u64 refs;
1528 int size;
1529 int type;
1530 int ret;
1531
1532 leaf = path->nodes[0];
1533 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1534 item_offset = (unsigned long)iref - (unsigned long)ei;
1535
1536 type = extent_ref_type(parent, owner);
1537 size = btrfs_extent_inline_ref_size(type);
1538
1539 ret = btrfs_extend_item(trans, root, path, size);
1540 BUG_ON(ret);
1541
1542 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1543 refs = btrfs_extent_refs(leaf, ei);
1544 refs += refs_to_add;
1545 btrfs_set_extent_refs(leaf, ei, refs);
1546 if (extent_op)
1547 __run_delayed_extent_op(extent_op, leaf, ei);
1548
1549 ptr = (unsigned long)ei + item_offset;
1550 end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1551 if (ptr < end - size)
1552 memmove_extent_buffer(leaf, ptr + size, ptr,
1553 end - size - ptr);
1554
1555 iref = (struct btrfs_extent_inline_ref *)ptr;
1556 btrfs_set_extent_inline_ref_type(leaf, iref, type);
1557 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1558 struct btrfs_extent_data_ref *dref;
1559 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1560 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1561 btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1562 btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1563 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1564 } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1565 struct btrfs_shared_data_ref *sref;
1566 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1567 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1568 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1569 } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1570 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1571 } else {
1572 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1573 }
1574 btrfs_mark_buffer_dirty(leaf);
1575 return 0;
1576 }
1577
1578 static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1579 struct btrfs_root *root,
1580 struct btrfs_path *path,
1581 struct btrfs_extent_inline_ref **ref_ret,
1582 u64 bytenr, u64 num_bytes, u64 parent,
1583 u64 root_objectid, u64 owner, u64 offset)
1584 {
1585 int ret;
1586
1587 ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
1588 bytenr, num_bytes, parent,
1589 root_objectid, owner, offset, 0);
1590 if (ret != -ENOENT)
1591 return ret;
1592
1593 btrfs_release_path(root, path);
1594 *ref_ret = NULL;
1595
1596 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1597 ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
1598 root_objectid);
1599 } else {
1600 ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
1601 root_objectid, owner, offset);
1602 }
1603 return ret;
1604 }
1605
1606 /*
1607 * helper to update/remove inline back ref
1608 */
1609 static noinline_for_stack
1610 int update_inline_extent_backref(struct btrfs_trans_handle *trans,
1611 struct btrfs_root *root,
1612 struct btrfs_path *path,
1613 struct btrfs_extent_inline_ref *iref,
1614 int refs_to_mod,
1615 struct btrfs_delayed_extent_op *extent_op)
1616 {
1617 struct extent_buffer *leaf;
1618 struct btrfs_extent_item *ei;
1619 struct btrfs_extent_data_ref *dref = NULL;
1620 struct btrfs_shared_data_ref *sref = NULL;
1621 unsigned long ptr;
1622 unsigned long end;
1623 u32 item_size;
1624 int size;
1625 int type;
1626 int ret;
1627 u64 refs;
1628
1629 leaf = path->nodes[0];
1630 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1631 refs = btrfs_extent_refs(leaf, ei);
1632 WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1633 refs += refs_to_mod;
1634 btrfs_set_extent_refs(leaf, ei, refs);
1635 if (extent_op)
1636 __run_delayed_extent_op(extent_op, leaf, ei);
1637
1638 type = btrfs_extent_inline_ref_type(leaf, iref);
1639
1640 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1641 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1642 refs = btrfs_extent_data_ref_count(leaf, dref);
1643 } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1644 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1645 refs = btrfs_shared_data_ref_count(leaf, sref);
1646 } else {
1647 refs = 1;
1648 BUG_ON(refs_to_mod != -1);
1649 }
1650
1651 BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1652 refs += refs_to_mod;
1653
1654 if (refs > 0) {
1655 if (type == BTRFS_EXTENT_DATA_REF_KEY)
1656 btrfs_set_extent_data_ref_count(leaf, dref, refs);
1657 else
1658 btrfs_set_shared_data_ref_count(leaf, sref, refs);
1659 } else {
1660 size = btrfs_extent_inline_ref_size(type);
1661 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1662 ptr = (unsigned long)iref;
1663 end = (unsigned long)ei + item_size;
1664 if (ptr + size < end)
1665 memmove_extent_buffer(leaf, ptr, ptr + size,
1666 end - ptr - size);
1667 item_size -= size;
1668 ret = btrfs_truncate_item(trans, root, path, item_size, 1);
1669 BUG_ON(ret);
1670 }
1671 btrfs_mark_buffer_dirty(leaf);
1672 return 0;
1673 }
1674
1675 static noinline_for_stack
1676 int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1677 struct btrfs_root *root,
1678 struct btrfs_path *path,
1679 u64 bytenr, u64 num_bytes, u64 parent,
1680 u64 root_objectid, u64 owner,
1681 u64 offset, int refs_to_add,
1682 struct btrfs_delayed_extent_op *extent_op)
1683 {
1684 struct btrfs_extent_inline_ref *iref;
1685 int ret;
1686
1687 ret = lookup_inline_extent_backref(trans, root, path, &iref,
1688 bytenr, num_bytes, parent,
1689 root_objectid, owner, offset, 1);
1690 if (ret == 0) {
1691 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1692 ret = update_inline_extent_backref(trans, root, path, iref,
1693 refs_to_add, extent_op);
1694 } else if (ret == -ENOENT) {
1695 ret = setup_inline_extent_backref(trans, root, path, iref,
1696 parent, root_objectid,
1697 owner, offset, refs_to_add,
1698 extent_op);
1699 }
1700 return ret;
1701 }
1702
1703 static int insert_extent_backref(struct btrfs_trans_handle *trans,
1704 struct btrfs_root *root,
1705 struct btrfs_path *path,
1706 u64 bytenr, u64 parent, u64 root_objectid,
1707 u64 owner, u64 offset, int refs_to_add)
1708 {
1709 int ret;
1710 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1711 BUG_ON(refs_to_add != 1);
1712 ret = insert_tree_block_ref(trans, root, path, bytenr,
1713 parent, root_objectid);
1714 } else {
1715 ret = insert_extent_data_ref(trans, root, path, bytenr,
1716 parent, root_objectid,
1717 owner, offset, refs_to_add);
1718 }
1719 return ret;
1720 }
1721
1722 static int remove_extent_backref(struct btrfs_trans_handle *trans,
1723 struct btrfs_root *root,
1724 struct btrfs_path *path,
1725 struct btrfs_extent_inline_ref *iref,
1726 int refs_to_drop, int is_data)
1727 {
1728 int ret;
1729
1730 BUG_ON(!is_data && refs_to_drop != 1);
1731 if (iref) {
1732 ret = update_inline_extent_backref(trans, root, path, iref,
1733 -refs_to_drop, NULL);
1734 } else if (is_data) {
1735 ret = remove_extent_data_ref(trans, root, path, refs_to_drop);
1736 } else {
1737 ret = btrfs_del_item(trans, root, path);
1738 }
1739 return ret;
1740 }
1741
1742 static void btrfs_issue_discard(struct block_device *bdev,
1743 u64 start, u64 len)
1744 {
1745 blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_KERNEL, 0);
1746 }
1747
1748 static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1749 u64 num_bytes)
1750 {
1751 int ret;
1752 u64 map_length = num_bytes;
1753 struct btrfs_multi_bio *multi = NULL;
1754
1755 if (!btrfs_test_opt(root, DISCARD))
1756 return 0;
1757
1758 /* Tell the block device(s) that the sectors can be discarded */
1759 ret = btrfs_map_block(&root->fs_info->mapping_tree, READ,
1760 bytenr, &map_length, &multi, 0);
1761 if (!ret) {
1762 struct btrfs_bio_stripe *stripe = multi->stripes;
1763 int i;
1764
1765 if (map_length > num_bytes)
1766 map_length = num_bytes;
1767
1768 for (i = 0; i < multi->num_stripes; i++, stripe++) {
1769 btrfs_issue_discard(stripe->dev->bdev,
1770 stripe->physical,
1771 map_length);
1772 }
1773 kfree(multi);
1774 }
1775
1776 return ret;
1777 }
1778
1779 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1780 struct btrfs_root *root,
1781 u64 bytenr, u64 num_bytes, u64 parent,
1782 u64 root_objectid, u64 owner, u64 offset)
1783 {
1784 int ret;
1785 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
1786 root_objectid == BTRFS_TREE_LOG_OBJECTID);
1787
1788 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1789 ret = btrfs_add_delayed_tree_ref(trans, bytenr, num_bytes,
1790 parent, root_objectid, (int)owner,
1791 BTRFS_ADD_DELAYED_REF, NULL);
1792 } else {
1793 ret = btrfs_add_delayed_data_ref(trans, bytenr, num_bytes,
1794 parent, root_objectid, owner, offset,
1795 BTRFS_ADD_DELAYED_REF, NULL);
1796 }
1797 return ret;
1798 }
1799
1800 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1801 struct btrfs_root *root,
1802 u64 bytenr, u64 num_bytes,
1803 u64 parent, u64 root_objectid,
1804 u64 owner, u64 offset, int refs_to_add,
1805 struct btrfs_delayed_extent_op *extent_op)
1806 {
1807 struct btrfs_path *path;
1808 struct extent_buffer *leaf;
1809 struct btrfs_extent_item *item;
1810 u64 refs;
1811 int ret;
1812 int err = 0;
1813
1814 path = btrfs_alloc_path();
1815 if (!path)
1816 return -ENOMEM;
1817
1818 path->reada = 1;
1819 path->leave_spinning = 1;
1820 /* this will setup the path even if it fails to insert the back ref */
1821 ret = insert_inline_extent_backref(trans, root->fs_info->extent_root,
1822 path, bytenr, num_bytes, parent,
1823 root_objectid, owner, offset,
1824 refs_to_add, extent_op);
1825 if (ret == 0)
1826 goto out;
1827
1828 if (ret != -EAGAIN) {
1829 err = ret;
1830 goto out;
1831 }
1832
1833 leaf = path->nodes[0];
1834 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1835 refs = btrfs_extent_refs(leaf, item);
1836 btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
1837 if (extent_op)
1838 __run_delayed_extent_op(extent_op, leaf, item);
1839
1840 btrfs_mark_buffer_dirty(leaf);
1841 btrfs_release_path(root->fs_info->extent_root, path);
1842
1843 path->reada = 1;
1844 path->leave_spinning = 1;
1845
1846 /* now insert the actual backref */
1847 ret = insert_extent_backref(trans, root->fs_info->extent_root,
1848 path, bytenr, parent, root_objectid,
1849 owner, offset, refs_to_add);
1850 BUG_ON(ret);
1851 out:
1852 btrfs_free_path(path);
1853 return err;
1854 }
1855
1856 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
1857 struct btrfs_root *root,
1858 struct btrfs_delayed_ref_node *node,
1859 struct btrfs_delayed_extent_op *extent_op,
1860 int insert_reserved)
1861 {
1862 int ret = 0;
1863 struct btrfs_delayed_data_ref *ref;
1864 struct btrfs_key ins;
1865 u64 parent = 0;
1866 u64 ref_root = 0;
1867 u64 flags = 0;
1868
1869 ins.objectid = node->bytenr;
1870 ins.offset = node->num_bytes;
1871 ins.type = BTRFS_EXTENT_ITEM_KEY;
1872
1873 ref = btrfs_delayed_node_to_data_ref(node);
1874 if (node->type == BTRFS_SHARED_DATA_REF_KEY)
1875 parent = ref->parent;
1876 else
1877 ref_root = ref->root;
1878
1879 if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
1880 if (extent_op) {
1881 BUG_ON(extent_op->update_key);
1882 flags |= extent_op->flags_to_set;
1883 }
1884 ret = alloc_reserved_file_extent(trans, root,
1885 parent, ref_root, flags,
1886 ref->objectid, ref->offset,
1887 &ins, node->ref_mod);
1888 } else if (node->action == BTRFS_ADD_DELAYED_REF) {
1889 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
1890 node->num_bytes, parent,
1891 ref_root, ref->objectid,
1892 ref->offset, node->ref_mod,
1893 extent_op);
1894 } else if (node->action == BTRFS_DROP_DELAYED_REF) {
1895 ret = __btrfs_free_extent(trans, root, node->bytenr,
1896 node->num_bytes, parent,
1897 ref_root, ref->objectid,
1898 ref->offset, node->ref_mod,
1899 extent_op);
1900 } else {
1901 BUG();
1902 }
1903 return ret;
1904 }
1905
1906 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
1907 struct extent_buffer *leaf,
1908 struct btrfs_extent_item *ei)
1909 {
1910 u64 flags = btrfs_extent_flags(leaf, ei);
1911 if (extent_op->update_flags) {
1912 flags |= extent_op->flags_to_set;
1913 btrfs_set_extent_flags(leaf, ei, flags);
1914 }
1915
1916 if (extent_op->update_key) {
1917 struct btrfs_tree_block_info *bi;
1918 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
1919 bi = (struct btrfs_tree_block_info *)(ei + 1);
1920 btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
1921 }
1922 }
1923
1924 static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
1925 struct btrfs_root *root,
1926 struct btrfs_delayed_ref_node *node,
1927 struct btrfs_delayed_extent_op *extent_op)
1928 {
1929 struct btrfs_key key;
1930 struct btrfs_path *path;
1931 struct btrfs_extent_item *ei;
1932 struct extent_buffer *leaf;
1933 u32 item_size;
1934 int ret;
1935 int err = 0;
1936
1937 path = btrfs_alloc_path();
1938 if (!path)
1939 return -ENOMEM;
1940
1941 key.objectid = node->bytenr;
1942 key.type = BTRFS_EXTENT_ITEM_KEY;
1943 key.offset = node->num_bytes;
1944
1945 path->reada = 1;
1946 path->leave_spinning = 1;
1947 ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
1948 path, 0, 1);
1949 if (ret < 0) {
1950 err = ret;
1951 goto out;
1952 }
1953 if (ret > 0) {
1954 err = -EIO;
1955 goto out;
1956 }
1957
1958 leaf = path->nodes[0];
1959 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1960 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1961 if (item_size < sizeof(*ei)) {
1962 ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
1963 path, (u64)-1, 0);
1964 if (ret < 0) {
1965 err = ret;
1966 goto out;
1967 }
1968 leaf = path->nodes[0];
1969 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1970 }
1971 #endif
1972 BUG_ON(item_size < sizeof(*ei));
1973 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1974 __run_delayed_extent_op(extent_op, leaf, ei);
1975
1976 btrfs_mark_buffer_dirty(leaf);
1977 out:
1978 btrfs_free_path(path);
1979 return err;
1980 }
1981
1982 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
1983 struct btrfs_root *root,
1984 struct btrfs_delayed_ref_node *node,
1985 struct btrfs_delayed_extent_op *extent_op,
1986 int insert_reserved)
1987 {
1988 int ret = 0;
1989 struct btrfs_delayed_tree_ref *ref;
1990 struct btrfs_key ins;
1991 u64 parent = 0;
1992 u64 ref_root = 0;
1993
1994 ins.objectid = node->bytenr;
1995 ins.offset = node->num_bytes;
1996 ins.type = BTRFS_EXTENT_ITEM_KEY;
1997
1998 ref = btrfs_delayed_node_to_tree_ref(node);
1999 if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2000 parent = ref->parent;
2001 else
2002 ref_root = ref->root;
2003
2004 BUG_ON(node->ref_mod != 1);
2005 if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2006 BUG_ON(!extent_op || !extent_op->update_flags ||
2007 !extent_op->update_key);
2008 ret = alloc_reserved_tree_block(trans, root,
2009 parent, ref_root,
2010 extent_op->flags_to_set,
2011 &extent_op->key,
2012 ref->level, &ins);
2013 } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2014 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
2015 node->num_bytes, parent, ref_root,
2016 ref->level, 0, 1, extent_op);
2017 } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2018 ret = __btrfs_free_extent(trans, root, node->bytenr,
2019 node->num_bytes, parent, ref_root,
2020 ref->level, 0, 1, extent_op);
2021 } else {
2022 BUG();
2023 }
2024 return ret;
2025 }
2026
2027 /* helper function to actually process a single delayed ref entry */
2028 static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
2029 struct btrfs_root *root,
2030 struct btrfs_delayed_ref_node *node,
2031 struct btrfs_delayed_extent_op *extent_op,
2032 int insert_reserved)
2033 {
2034 int ret;
2035 if (btrfs_delayed_ref_is_head(node)) {
2036 struct btrfs_delayed_ref_head *head;
2037 /*
2038 * we've hit the end of the chain and we were supposed
2039 * to insert this extent into the tree. But, it got
2040 * deleted before we ever needed to insert it, so all
2041 * we have to do is clean up the accounting
2042 */
2043 BUG_ON(extent_op);
2044 head = btrfs_delayed_node_to_head(node);
2045 if (insert_reserved) {
2046 btrfs_pin_extent(root, node->bytenr,
2047 node->num_bytes, 1);
2048 if (head->is_data) {
2049 ret = btrfs_del_csums(trans, root,
2050 node->bytenr,
2051 node->num_bytes);
2052 BUG_ON(ret);
2053 }
2054 }
2055 mutex_unlock(&head->mutex);
2056 return 0;
2057 }
2058
2059 if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
2060 node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2061 ret = run_delayed_tree_ref(trans, root, node, extent_op,
2062 insert_reserved);
2063 else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
2064 node->type == BTRFS_SHARED_DATA_REF_KEY)
2065 ret = run_delayed_data_ref(trans, root, node, extent_op,
2066 insert_reserved);
2067 else
2068 BUG();
2069 return ret;
2070 }
2071
2072 static noinline struct btrfs_delayed_ref_node *
2073 select_delayed_ref(struct btrfs_delayed_ref_head *head)
2074 {
2075 struct rb_node *node;
2076 struct btrfs_delayed_ref_node *ref;
2077 int action = BTRFS_ADD_DELAYED_REF;
2078 again:
2079 /*
2080 * select delayed ref of type BTRFS_ADD_DELAYED_REF first.
2081 * this prevents ref count from going down to zero when
2082 * there still are pending delayed ref.
2083 */
2084 node = rb_prev(&head->node.rb_node);
2085 while (1) {
2086 if (!node)
2087 break;
2088 ref = rb_entry(node, struct btrfs_delayed_ref_node,
2089 rb_node);
2090 if (ref->bytenr != head->node.bytenr)
2091 break;
2092 if (ref->action == action)
2093 return ref;
2094 node = rb_prev(node);
2095 }
2096 if (action == BTRFS_ADD_DELAYED_REF) {
2097 action = BTRFS_DROP_DELAYED_REF;
2098 goto again;
2099 }
2100 return NULL;
2101 }
2102
2103 static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
2104 struct btrfs_root *root,
2105 struct list_head *cluster)
2106 {
2107 struct btrfs_delayed_ref_root *delayed_refs;
2108 struct btrfs_delayed_ref_node *ref;
2109 struct btrfs_delayed_ref_head *locked_ref = NULL;
2110 struct btrfs_delayed_extent_op *extent_op;
2111 int ret;
2112 int count = 0;
2113 int must_insert_reserved = 0;
2114
2115 delayed_refs = &trans->transaction->delayed_refs;
2116 while (1) {
2117 if (!locked_ref) {
2118 /* pick a new head ref from the cluster list */
2119 if (list_empty(cluster))
2120 break;
2121
2122 locked_ref = list_entry(cluster->next,
2123 struct btrfs_delayed_ref_head, cluster);
2124
2125 /* grab the lock that says we are going to process
2126 * all the refs for this head */
2127 ret = btrfs_delayed_ref_lock(trans, locked_ref);
2128
2129 /*
2130 * we may have dropped the spin lock to get the head
2131 * mutex lock, and that might have given someone else
2132 * time to free the head. If that's true, it has been
2133 * removed from our list and we can move on.
2134 */
2135 if (ret == -EAGAIN) {
2136 locked_ref = NULL;
2137 count++;
2138 continue;
2139 }
2140 }
2141
2142 /*
2143 * record the must insert reserved flag before we
2144 * drop the spin lock.
2145 */
2146 must_insert_reserved = locked_ref->must_insert_reserved;
2147 locked_ref->must_insert_reserved = 0;
2148
2149 extent_op = locked_ref->extent_op;
2150 locked_ref->extent_op = NULL;
2151
2152 /*
2153 * locked_ref is the head node, so we have to go one
2154 * node back for any delayed ref updates
2155 */
2156 ref = select_delayed_ref(locked_ref);
2157 if (!ref) {
2158 /* All delayed refs have been processed, Go ahead
2159 * and send the head node to run_one_delayed_ref,
2160 * so that any accounting fixes can happen
2161 */
2162 ref = &locked_ref->node;
2163
2164 if (extent_op && must_insert_reserved) {
2165 kfree(extent_op);
2166 extent_op = NULL;
2167 }
2168
2169 if (extent_op) {
2170 spin_unlock(&delayed_refs->lock);
2171
2172 ret = run_delayed_extent_op(trans, root,
2173 ref, extent_op);
2174 BUG_ON(ret);
2175 kfree(extent_op);
2176
2177 cond_resched();
2178 spin_lock(&delayed_refs->lock);
2179 continue;
2180 }
2181
2182 list_del_init(&locked_ref->cluster);
2183 locked_ref = NULL;
2184 }
2185
2186 ref->in_tree = 0;
2187 rb_erase(&ref->rb_node, &delayed_refs->root);
2188 delayed_refs->num_entries--;
2189
2190 spin_unlock(&delayed_refs->lock);
2191
2192 ret = run_one_delayed_ref(trans, root, ref, extent_op,
2193 must_insert_reserved);
2194 BUG_ON(ret);
2195
2196 btrfs_put_delayed_ref(ref);
2197 kfree(extent_op);
2198 count++;
2199
2200 cond_resched();
2201 spin_lock(&delayed_refs->lock);
2202 }
2203 return count;
2204 }
2205
2206 /*
2207 * this starts processing the delayed reference count updates and
2208 * extent insertions we have queued up so far. count can be
2209 * 0, which means to process everything in the tree at the start
2210 * of the run (but not newly added entries), or it can be some target
2211 * number you'd like to process.
2212 */
2213 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2214 struct btrfs_root *root, unsigned long count)
2215 {
2216 struct rb_node *node;
2217 struct btrfs_delayed_ref_root *delayed_refs;
2218 struct btrfs_delayed_ref_node *ref;
2219 struct list_head cluster;
2220 int ret;
2221 int run_all = count == (unsigned long)-1;
2222 int run_most = 0;
2223
2224 if (root == root->fs_info->extent_root)
2225 root = root->fs_info->tree_root;
2226
2227 delayed_refs = &trans->transaction->delayed_refs;
2228 INIT_LIST_HEAD(&cluster);
2229 again:
2230 spin_lock(&delayed_refs->lock);
2231 if (count == 0) {
2232 count = delayed_refs->num_entries * 2;
2233 run_most = 1;
2234 }
2235 while (1) {
2236 if (!(run_all || run_most) &&
2237 delayed_refs->num_heads_ready < 64)
2238 break;
2239
2240 /*
2241 * go find something we can process in the rbtree. We start at
2242 * the beginning of the tree, and then build a cluster
2243 * of refs to process starting at the first one we are able to
2244 * lock
2245 */
2246 ret = btrfs_find_ref_cluster(trans, &cluster,
2247 delayed_refs->run_delayed_start);
2248 if (ret)
2249 break;
2250
2251 ret = run_clustered_refs(trans, root, &cluster);
2252 BUG_ON(ret < 0);
2253
2254 count -= min_t(unsigned long, ret, count);
2255
2256 if (count == 0)
2257 break;
2258 }
2259
2260 if (run_all) {
2261 node = rb_first(&delayed_refs->root);
2262 if (!node)
2263 goto out;
2264 count = (unsigned long)-1;
2265
2266 while (node) {
2267 ref = rb_entry(node, struct btrfs_delayed_ref_node,
2268 rb_node);
2269 if (btrfs_delayed_ref_is_head(ref)) {
2270 struct btrfs_delayed_ref_head *head;
2271
2272 head = btrfs_delayed_node_to_head(ref);
2273 atomic_inc(&ref->refs);
2274
2275 spin_unlock(&delayed_refs->lock);
2276 mutex_lock(&head->mutex);
2277 mutex_unlock(&head->mutex);
2278
2279 btrfs_put_delayed_ref(ref);
2280 cond_resched();
2281 goto again;
2282 }
2283 node = rb_next(node);
2284 }
2285 spin_unlock(&delayed_refs->lock);
2286 schedule_timeout(1);
2287 goto again;
2288 }
2289 out:
2290 spin_unlock(&delayed_refs->lock);
2291 return 0;
2292 }
2293
2294 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2295 struct btrfs_root *root,
2296 u64 bytenr, u64 num_bytes, u64 flags,
2297 int is_data)
2298 {
2299 struct btrfs_delayed_extent_op *extent_op;
2300 int ret;
2301
2302 extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
2303 if (!extent_op)
2304 return -ENOMEM;
2305
2306 extent_op->flags_to_set = flags;
2307 extent_op->update_flags = 1;
2308 extent_op->update_key = 0;
2309 extent_op->is_data = is_data ? 1 : 0;
2310
2311 ret = btrfs_add_delayed_extent_op(trans, bytenr, num_bytes, extent_op);
2312 if (ret)
2313 kfree(extent_op);
2314 return ret;
2315 }
2316
2317 static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
2318 struct btrfs_root *root,
2319 struct btrfs_path *path,
2320 u64 objectid, u64 offset, u64 bytenr)
2321 {
2322 struct btrfs_delayed_ref_head *head;
2323 struct btrfs_delayed_ref_node *ref;
2324 struct btrfs_delayed_data_ref *data_ref;
2325 struct btrfs_delayed_ref_root *delayed_refs;
2326 struct rb_node *node;
2327 int ret = 0;
2328
2329 ret = -ENOENT;
2330 delayed_refs = &trans->transaction->delayed_refs;
2331 spin_lock(&delayed_refs->lock);
2332 head = btrfs_find_delayed_ref_head(trans, bytenr);
2333 if (!head)
2334 goto out;
2335
2336 if (!mutex_trylock(&head->mutex)) {
2337 atomic_inc(&head->node.refs);
2338 spin_unlock(&delayed_refs->lock);
2339
2340 btrfs_release_path(root->fs_info->extent_root, path);
2341
2342 mutex_lock(&head->mutex);
2343 mutex_unlock(&head->mutex);
2344 btrfs_put_delayed_ref(&head->node);
2345 return -EAGAIN;
2346 }
2347
2348 node = rb_prev(&head->node.rb_node);
2349 if (!node)
2350 goto out_unlock;
2351
2352 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2353
2354 if (ref->bytenr != bytenr)
2355 goto out_unlock;
2356
2357 ret = 1;
2358 if (ref->type != BTRFS_EXTENT_DATA_REF_KEY)
2359 goto out_unlock;
2360
2361 data_ref = btrfs_delayed_node_to_data_ref(ref);
2362
2363 node = rb_prev(node);
2364 if (node) {
2365 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2366 if (ref->bytenr == bytenr)
2367 goto out_unlock;
2368 }
2369
2370 if (data_ref->root != root->root_key.objectid ||
2371 data_ref->objectid != objectid || data_ref->offset != offset)
2372 goto out_unlock;
2373
2374 ret = 0;
2375 out_unlock:
2376 mutex_unlock(&head->mutex);
2377 out:
2378 spin_unlock(&delayed_refs->lock);
2379 return ret;
2380 }
2381
2382 static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
2383 struct btrfs_root *root,
2384 struct btrfs_path *path,
2385 u64 objectid, u64 offset, u64 bytenr)
2386 {
2387 struct btrfs_root *extent_root = root->fs_info->extent_root;
2388 struct extent_buffer *leaf;
2389 struct btrfs_extent_data_ref *ref;
2390 struct btrfs_extent_inline_ref *iref;
2391 struct btrfs_extent_item *ei;
2392 struct btrfs_key key;
2393 u32 item_size;
2394 int ret;
2395
2396 key.objectid = bytenr;
2397 key.offset = (u64)-1;
2398 key.type = BTRFS_EXTENT_ITEM_KEY;
2399
2400 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2401 if (ret < 0)
2402 goto out;
2403 BUG_ON(ret == 0);
2404
2405 ret = -ENOENT;
2406 if (path->slots[0] == 0)
2407 goto out;
2408
2409 path->slots[0]--;
2410 leaf = path->nodes[0];
2411 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2412
2413 if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
2414 goto out;
2415
2416 ret = 1;
2417 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2418 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2419 if (item_size < sizeof(*ei)) {
2420 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
2421 goto out;
2422 }
2423 #endif
2424 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2425
2426 if (item_size != sizeof(*ei) +
2427 btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
2428 goto out;
2429
2430 if (btrfs_extent_generation(leaf, ei) <=
2431 btrfs_root_last_snapshot(&root->root_item))
2432 goto out;
2433
2434 iref = (struct btrfs_extent_inline_ref *)(ei + 1);
2435 if (btrfs_extent_inline_ref_type(leaf, iref) !=
2436 BTRFS_EXTENT_DATA_REF_KEY)
2437 goto out;
2438
2439 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
2440 if (btrfs_extent_refs(leaf, ei) !=
2441 btrfs_extent_data_ref_count(leaf, ref) ||
2442 btrfs_extent_data_ref_root(leaf, ref) !=
2443 root->root_key.objectid ||
2444 btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
2445 btrfs_extent_data_ref_offset(leaf, ref) != offset)
2446 goto out;
2447
2448 ret = 0;
2449 out:
2450 return ret;
2451 }
2452
2453 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
2454 struct btrfs_root *root,
2455 u64 objectid, u64 offset, u64 bytenr)
2456 {
2457 struct btrfs_path *path;
2458 int ret;
2459 int ret2;
2460
2461 path = btrfs_alloc_path();
2462 if (!path)
2463 return -ENOENT;
2464
2465 do {
2466 ret = check_committed_ref(trans, root, path, objectid,
2467 offset, bytenr);
2468 if (ret && ret != -ENOENT)
2469 goto out;
2470
2471 ret2 = check_delayed_ref(trans, root, path, objectid,
2472 offset, bytenr);
2473 } while (ret2 == -EAGAIN);
2474
2475 if (ret2 && ret2 != -ENOENT) {
2476 ret = ret2;
2477 goto out;
2478 }
2479
2480 if (ret != -ENOENT || ret2 != -ENOENT)
2481 ret = 0;
2482 out:
2483 btrfs_free_path(path);
2484 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
2485 WARN_ON(ret > 0);
2486 return ret;
2487 }
2488
2489 #if 0
2490 int btrfs_cache_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2491 struct extent_buffer *buf, u32 nr_extents)
2492 {
2493 struct btrfs_key key;
2494 struct btrfs_file_extent_item *fi;
2495 u64 root_gen;
2496 u32 nritems;
2497 int i;
2498 int level;
2499 int ret = 0;
2500 int shared = 0;
2501
2502 if (!root->ref_cows)
2503 return 0;
2504
2505 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
2506 shared = 0;
2507 root_gen = root->root_key.offset;
2508 } else {
2509 shared = 1;
2510 root_gen = trans->transid - 1;
2511 }
2512
2513 level = btrfs_header_level(buf);
2514 nritems = btrfs_header_nritems(buf);
2515
2516 if (level == 0) {
2517 struct btrfs_leaf_ref *ref;
2518 struct btrfs_extent_info *info;
2519
2520 ref = btrfs_alloc_leaf_ref(root, nr_extents);
2521 if (!ref) {
2522 ret = -ENOMEM;
2523 goto out;
2524 }
2525
2526 ref->root_gen = root_gen;
2527 ref->bytenr = buf->start;
2528 ref->owner = btrfs_header_owner(buf);
2529 ref->generation = btrfs_header_generation(buf);
2530 ref->nritems = nr_extents;
2531 info = ref->extents;
2532
2533 for (i = 0; nr_extents > 0 && i < nritems; i++) {
2534 u64 disk_bytenr;
2535 btrfs_item_key_to_cpu(buf, &key, i);
2536 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
2537 continue;
2538 fi = btrfs_item_ptr(buf, i,
2539 struct btrfs_file_extent_item);
2540 if (btrfs_file_extent_type(buf, fi) ==
2541 BTRFS_FILE_EXTENT_INLINE)
2542 continue;
2543 disk_bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
2544 if (disk_bytenr == 0)
2545 continue;
2546
2547 info->bytenr = disk_bytenr;
2548 info->num_bytes =
2549 btrfs_file_extent_disk_num_bytes(buf, fi);
2550 info->objectid = key.objectid;
2551 info->offset = key.offset;
2552 info++;
2553 }
2554
2555 ret = btrfs_add_leaf_ref(root, ref, shared);
2556 if (ret == -EEXIST && shared) {
2557 struct btrfs_leaf_ref *old;
2558 old = btrfs_lookup_leaf_ref(root, ref->bytenr);
2559 BUG_ON(!old);
2560 btrfs_remove_leaf_ref(root, old);
2561 btrfs_free_leaf_ref(root, old);
2562 ret = btrfs_add_leaf_ref(root, ref, shared);
2563 }
2564 WARN_ON(ret);
2565 btrfs_free_leaf_ref(root, ref);
2566 }
2567 out:
2568 return ret;
2569 }
2570
2571 /* when a block goes through cow, we update the reference counts of
2572 * everything that block points to. The internal pointers of the block
2573 * can be in just about any order, and it is likely to have clusters of
2574 * things that are close together and clusters of things that are not.
2575 *
2576 * To help reduce the seeks that come with updating all of these reference
2577 * counts, sort them by byte number before actual updates are done.
2578 *
2579 * struct refsort is used to match byte number to slot in the btree block.
2580 * we sort based on the byte number and then use the slot to actually
2581 * find the item.
2582 *
2583 * struct refsort is smaller than strcut btrfs_item and smaller than
2584 * struct btrfs_key_ptr. Since we're currently limited to the page size
2585 * for a btree block, there's no way for a kmalloc of refsorts for a
2586 * single node to be bigger than a page.
2587 */
2588 struct refsort {
2589 u64 bytenr;
2590 u32 slot;
2591 };
2592
2593 /*
2594 * for passing into sort()
2595 */
2596 static int refsort_cmp(const void *a_void, const void *b_void)
2597 {
2598 const struct refsort *a = a_void;
2599 const struct refsort *b = b_void;
2600
2601 if (a->bytenr < b->bytenr)
2602 return -1;
2603 if (a->bytenr > b->bytenr)
2604 return 1;
2605 return 0;
2606 }
2607 #endif
2608
2609 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
2610 struct btrfs_root *root,
2611 struct extent_buffer *buf,
2612 int full_backref, int inc)
2613 {
2614 u64 bytenr;
2615 u64 num_bytes;
2616 u64 parent;
2617 u64 ref_root;
2618 u32 nritems;
2619 struct btrfs_key key;
2620 struct btrfs_file_extent_item *fi;
2621 int i;
2622 int level;
2623 int ret = 0;
2624 int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
2625 u64, u64, u64, u64, u64, u64);
2626
2627 ref_root = btrfs_header_owner(buf);
2628 nritems = btrfs_header_nritems(buf);
2629 level = btrfs_header_level(buf);
2630
2631 if (!root->ref_cows && level == 0)
2632 return 0;
2633
2634 if (inc)
2635 process_func = btrfs_inc_extent_ref;
2636 else
2637 process_func = btrfs_free_extent;
2638
2639 if (full_backref)
2640 parent = buf->start;
2641 else
2642 parent = 0;
2643
2644 for (i = 0; i < nritems; i++) {
2645 if (level == 0) {
2646 btrfs_item_key_to_cpu(buf, &key, i);
2647 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
2648 continue;
2649 fi = btrfs_item_ptr(buf, i,
2650 struct btrfs_file_extent_item);
2651 if (btrfs_file_extent_type(buf, fi) ==
2652 BTRFS_FILE_EXTENT_INLINE)
2653 continue;
2654 bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
2655 if (bytenr == 0)
2656 continue;
2657
2658 num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
2659 key.offset -= btrfs_file_extent_offset(buf, fi);
2660 ret = process_func(trans, root, bytenr, num_bytes,
2661 parent, ref_root, key.objectid,
2662 key.offset);
2663 if (ret)
2664 goto fail;
2665 } else {
2666 bytenr = btrfs_node_blockptr(buf, i);
2667 num_bytes = btrfs_level_size(root, level - 1);
2668 ret = process_func(trans, root, bytenr, num_bytes,
2669 parent, ref_root, level - 1, 0);
2670 if (ret)
2671 goto fail;
2672 }
2673 }
2674 return 0;
2675 fail:
2676 BUG();
2677 return ret;
2678 }
2679
2680 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2681 struct extent_buffer *buf, int full_backref)
2682 {
2683 return __btrfs_mod_ref(trans, root, buf, full_backref, 1);
2684 }
2685
2686 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2687 struct extent_buffer *buf, int full_backref)
2688 {
2689 return __btrfs_mod_ref(trans, root, buf, full_backref, 0);
2690 }
2691
2692 static int write_one_cache_group(struct btrfs_trans_handle *trans,
2693 struct btrfs_root *root,
2694 struct btrfs_path *path,
2695 struct btrfs_block_group_cache *cache)
2696 {
2697 int ret;
2698 struct btrfs_root *extent_root = root->fs_info->extent_root;
2699 unsigned long bi;
2700 struct extent_buffer *leaf;
2701
2702 ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
2703 if (ret < 0)
2704 goto fail;
2705 BUG_ON(ret);
2706
2707 leaf = path->nodes[0];
2708 bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
2709 write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
2710 btrfs_mark_buffer_dirty(leaf);
2711 btrfs_release_path(extent_root, path);
2712 fail:
2713 if (ret)
2714 return ret;
2715 return 0;
2716
2717 }
2718
2719 static struct btrfs_block_group_cache *
2720 next_block_group(struct btrfs_root *root,
2721 struct btrfs_block_group_cache *cache)
2722 {
2723 struct rb_node *node;
2724 spin_lock(&root->fs_info->block_group_cache_lock);
2725 node = rb_next(&cache->cache_node);
2726 btrfs_put_block_group(cache);
2727 if (node) {
2728 cache = rb_entry(node, struct btrfs_block_group_cache,
2729 cache_node);
2730 btrfs_get_block_group(cache);
2731 } else
2732 cache = NULL;
2733 spin_unlock(&root->fs_info->block_group_cache_lock);
2734 return cache;
2735 }
2736
2737 static int cache_save_setup(struct btrfs_block_group_cache *block_group,
2738 struct btrfs_trans_handle *trans,
2739 struct btrfs_path *path)
2740 {
2741 struct btrfs_root *root = block_group->fs_info->tree_root;
2742 struct inode *inode = NULL;
2743 u64 alloc_hint = 0;
2744 int num_pages = 0;
2745 int retries = 0;
2746 int ret = 0;
2747
2748 /*
2749 * If this block group is smaller than 100 megs don't bother caching the
2750 * block group.
2751 */
2752 if (block_group->key.offset < (100 * 1024 * 1024)) {
2753 spin_lock(&block_group->lock);
2754 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
2755 spin_unlock(&block_group->lock);
2756 return 0;
2757 }
2758
2759 again:
2760 inode = lookup_free_space_inode(root, block_group, path);
2761 if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
2762 ret = PTR_ERR(inode);
2763 btrfs_release_path(root, path);
2764 goto out;
2765 }
2766
2767 if (IS_ERR(inode)) {
2768 BUG_ON(retries);
2769 retries++;
2770
2771 if (block_group->ro)
2772 goto out_free;
2773
2774 ret = create_free_space_inode(root, trans, block_group, path);
2775 if (ret)
2776 goto out_free;
2777 goto again;
2778 }
2779
2780 /*
2781 * We want to set the generation to 0, that way if anything goes wrong
2782 * from here on out we know not to trust this cache when we load up next
2783 * time.
2784 */
2785 BTRFS_I(inode)->generation = 0;
2786 ret = btrfs_update_inode(trans, root, inode);
2787 WARN_ON(ret);
2788
2789 if (i_size_read(inode) > 0) {
2790 ret = btrfs_truncate_free_space_cache(root, trans, path,
2791 inode);
2792 if (ret)
2793 goto out_put;
2794 }
2795
2796 spin_lock(&block_group->lock);
2797 if (block_group->cached != BTRFS_CACHE_FINISHED) {
2798 spin_unlock(&block_group->lock);
2799 goto out_put;
2800 }
2801 spin_unlock(&block_group->lock);
2802
2803 num_pages = (int)div64_u64(block_group->key.offset, 1024 * 1024 * 1024);
2804 if (!num_pages)
2805 num_pages = 1;
2806
2807 /*
2808 * Just to make absolutely sure we have enough space, we're going to
2809 * preallocate 12 pages worth of space for each block group. In
2810 * practice we ought to use at most 8, but we need extra space so we can
2811 * add our header and have a terminator between the extents and the
2812 * bitmaps.
2813 */
2814 num_pages *= 16;
2815 num_pages *= PAGE_CACHE_SIZE;
2816
2817 ret = btrfs_check_data_free_space(inode, num_pages);
2818 if (ret)
2819 goto out_put;
2820
2821 ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
2822 num_pages, num_pages,
2823 &alloc_hint);
2824 btrfs_free_reserved_data_space(inode, num_pages);
2825 out_put:
2826 iput(inode);
2827 out_free:
2828 btrfs_release_path(root, path);
2829 out:
2830 spin_lock(&block_group->lock);
2831 if (ret)
2832 block_group->disk_cache_state = BTRFS_DC_ERROR;
2833 else
2834 block_group->disk_cache_state = BTRFS_DC_SETUP;
2835 spin_unlock(&block_group->lock);
2836
2837 return ret;
2838 }
2839
2840 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
2841 struct btrfs_root *root)
2842 {
2843 struct btrfs_block_group_cache *cache;
2844 int err = 0;
2845 struct btrfs_path *path;
2846 u64 last = 0;
2847
2848 path = btrfs_alloc_path();
2849 if (!path)
2850 return -ENOMEM;
2851
2852 again:
2853 while (1) {
2854 cache = btrfs_lookup_first_block_group(root->fs_info, last);
2855 while (cache) {
2856 if (cache->disk_cache_state == BTRFS_DC_CLEAR)
2857 break;
2858 cache = next_block_group(root, cache);
2859 }
2860 if (!cache) {
2861 if (last == 0)
2862 break;
2863 last = 0;
2864 continue;
2865 }
2866 err = cache_save_setup(cache, trans, path);
2867 last = cache->key.objectid + cache->key.offset;
2868 btrfs_put_block_group(cache);
2869 }
2870
2871 while (1) {
2872 if (last == 0) {
2873 err = btrfs_run_delayed_refs(trans, root,
2874 (unsigned long)-1);
2875 BUG_ON(err);
2876 }
2877
2878 cache = btrfs_lookup_first_block_group(root->fs_info, last);
2879 while (cache) {
2880 if (cache->disk_cache_state == BTRFS_DC_CLEAR) {
2881 btrfs_put_block_group(cache);
2882 goto again;
2883 }
2884
2885 if (cache->dirty)
2886 break;
2887 cache = next_block_group(root, cache);
2888 }
2889 if (!cache) {
2890 if (last == 0)
2891 break;
2892 last = 0;
2893 continue;
2894 }
2895
2896 if (cache->disk_cache_state == BTRFS_DC_SETUP)
2897 cache->disk_cache_state = BTRFS_DC_NEED_WRITE;
2898 cache->dirty = 0;
2899 last = cache->key.objectid + cache->key.offset;
2900
2901 err = write_one_cache_group(trans, root, path, cache);
2902 BUG_ON(err);
2903 btrfs_put_block_group(cache);
2904 }
2905
2906 while (1) {
2907 /*
2908 * I don't think this is needed since we're just marking our
2909 * preallocated extent as written, but just in case it can't
2910 * hurt.
2911 */
2912 if (last == 0) {
2913 err = btrfs_run_delayed_refs(trans, root,
2914 (unsigned long)-1);
2915 BUG_ON(err);
2916 }
2917
2918 cache = btrfs_lookup_first_block_group(root->fs_info, last);
2919 while (cache) {
2920 /*
2921 * Really this shouldn't happen, but it could if we
2922 * couldn't write the entire preallocated extent and
2923 * splitting the extent resulted in a new block.
2924 */
2925 if (cache->dirty) {
2926 btrfs_put_block_group(cache);
2927 goto again;
2928 }
2929 if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
2930 break;
2931 cache = next_block_group(root, cache);
2932 }
2933 if (!cache) {
2934 if (last == 0)
2935 break;
2936 last = 0;
2937 continue;
2938 }
2939
2940 btrfs_write_out_cache(root, trans, cache, path);
2941
2942 /*
2943 * If we didn't have an error then the cache state is still
2944 * NEED_WRITE, so we can set it to WRITTEN.
2945 */
2946 if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
2947 cache->disk_cache_state = BTRFS_DC_WRITTEN;
2948 last = cache->key.objectid + cache->key.offset;
2949 btrfs_put_block_group(cache);
2950 }
2951
2952 btrfs_free_path(path);
2953 return 0;
2954 }
2955
2956 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
2957 {
2958 struct btrfs_block_group_cache *block_group;
2959 int readonly = 0;
2960
2961 block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
2962 if (!block_group || block_group->ro)
2963 readonly = 1;
2964 if (block_group)
2965 btrfs_put_block_group(block_group);
2966 return readonly;
2967 }
2968
2969 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
2970 u64 total_bytes, u64 bytes_used,
2971 struct btrfs_space_info **space_info)
2972 {
2973 struct btrfs_space_info *found;
2974 int i;
2975 int factor;
2976
2977 if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
2978 BTRFS_BLOCK_GROUP_RAID10))
2979 factor = 2;
2980 else
2981 factor = 1;
2982
2983 found = __find_space_info(info, flags);
2984 if (found) {
2985 spin_lock(&found->lock);
2986 found->total_bytes += total_bytes;
2987 found->disk_total += total_bytes * factor;
2988 found->bytes_used += bytes_used;
2989 found->disk_used += bytes_used * factor;
2990 found->full = 0;
2991 spin_unlock(&found->lock);
2992 *space_info = found;
2993 return 0;
2994 }
2995 found = kzalloc(sizeof(*found), GFP_NOFS);
2996 if (!found)
2997 return -ENOMEM;
2998
2999 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
3000 INIT_LIST_HEAD(&found->block_groups[i]);
3001 init_rwsem(&found->groups_sem);
3002 spin_lock_init(&found->lock);
3003 found->flags = flags & (BTRFS_BLOCK_GROUP_DATA |
3004 BTRFS_BLOCK_GROUP_SYSTEM |
3005 BTRFS_BLOCK_GROUP_METADATA);
3006 found->total_bytes = total_bytes;
3007 found->disk_total = total_bytes * factor;
3008 found->bytes_used = bytes_used;
3009 found->disk_used = bytes_used * factor;
3010 found->bytes_pinned = 0;
3011 found->bytes_reserved = 0;
3012 found->bytes_readonly = 0;
3013 found->bytes_may_use = 0;
3014 found->full = 0;
3015 found->force_alloc = 0;
3016 *space_info = found;
3017 list_add_rcu(&found->list, &info->space_info);
3018 atomic_set(&found->caching_threads, 0);
3019 return 0;
3020 }
3021
3022 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
3023 {
3024 u64 extra_flags = flags & (BTRFS_BLOCK_GROUP_RAID0 |
3025 BTRFS_BLOCK_GROUP_RAID1 |
3026 BTRFS_BLOCK_GROUP_RAID10 |
3027 BTRFS_BLOCK_GROUP_DUP);
3028 if (extra_flags) {
3029 if (flags & BTRFS_BLOCK_GROUP_DATA)
3030 fs_info->avail_data_alloc_bits |= extra_flags;
3031 if (flags & BTRFS_BLOCK_GROUP_METADATA)
3032 fs_info->avail_metadata_alloc_bits |= extra_flags;
3033 if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3034 fs_info->avail_system_alloc_bits |= extra_flags;
3035 }
3036 }
3037
3038 u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
3039 {
3040 u64 num_devices = root->fs_info->fs_devices->rw_devices;
3041
3042 if (num_devices == 1)
3043 flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0);
3044 if (num_devices < 4)
3045 flags &= ~BTRFS_BLOCK_GROUP_RAID10;
3046
3047 if ((flags & BTRFS_BLOCK_GROUP_DUP) &&
3048 (flags & (BTRFS_BLOCK_GROUP_RAID1 |
3049 BTRFS_BLOCK_GROUP_RAID10))) {
3050 flags &= ~BTRFS_BLOCK_GROUP_DUP;
3051 }
3052
3053 if ((flags & BTRFS_BLOCK_GROUP_RAID1) &&
3054 (flags & BTRFS_BLOCK_GROUP_RAID10)) {
3055 flags &= ~BTRFS_BLOCK_GROUP_RAID1;
3056 }
3057
3058 if ((flags & BTRFS_BLOCK_GROUP_RAID0) &&
3059 ((flags & BTRFS_BLOCK_GROUP_RAID1) |
3060 (flags & BTRFS_BLOCK_GROUP_RAID10) |
3061 (flags & BTRFS_BLOCK_GROUP_DUP)))
3062 flags &= ~BTRFS_BLOCK_GROUP_RAID0;
3063 return flags;
3064 }
3065
3066 static u64 get_alloc_profile(struct btrfs_root *root, u64 flags)
3067 {
3068 if (flags & BTRFS_BLOCK_GROUP_DATA)
3069 flags |= root->fs_info->avail_data_alloc_bits &
3070 root->fs_info->data_alloc_profile;
3071 else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3072 flags |= root->fs_info->avail_system_alloc_bits &
3073 root->fs_info->system_alloc_profile;
3074 else if (flags & BTRFS_BLOCK_GROUP_METADATA)
3075 flags |= root->fs_info->avail_metadata_alloc_bits &
3076 root->fs_info->metadata_alloc_profile;
3077 return btrfs_reduce_alloc_profile(root, flags);
3078 }
3079
3080 static u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
3081 {
3082 u64 flags;
3083
3084 if (data)
3085 flags = BTRFS_BLOCK_GROUP_DATA;
3086 else if (root == root->fs_info->chunk_root)
3087 flags = BTRFS_BLOCK_GROUP_SYSTEM;
3088 else
3089 flags = BTRFS_BLOCK_GROUP_METADATA;
3090
3091 return get_alloc_profile(root, flags);
3092 }
3093
3094 void btrfs_set_inode_space_info(struct btrfs_root *root, struct inode *inode)
3095 {
3096 BTRFS_I(inode)->space_info = __find_space_info(root->fs_info,
3097 BTRFS_BLOCK_GROUP_DATA);
3098 }
3099
3100 /*
3101 * This will check the space that the inode allocates from to make sure we have
3102 * enough space for bytes.
3103 */
3104 int btrfs_check_data_free_space(struct inode *inode, u64 bytes)
3105 {
3106 struct btrfs_space_info *data_sinfo;
3107 struct btrfs_root *root = BTRFS_I(inode)->root;
3108 u64 used;
3109 int ret = 0, committed = 0, alloc_chunk = 1;
3110
3111 /* make sure bytes are sectorsize aligned */
3112 bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
3113
3114 if (root == root->fs_info->tree_root) {
3115 alloc_chunk = 0;
3116 committed = 1;
3117 }
3118
3119 data_sinfo = BTRFS_I(inode)->space_info;
3120 if (!data_sinfo)
3121 goto alloc;
3122
3123 again:
3124 /* make sure we have enough space to handle the data first */
3125 spin_lock(&data_sinfo->lock);
3126 used = data_sinfo->bytes_used + data_sinfo->bytes_reserved +
3127 data_sinfo->bytes_pinned + data_sinfo->bytes_readonly +
3128 data_sinfo->bytes_may_use;
3129
3130 if (used + bytes > data_sinfo->total_bytes) {
3131 struct btrfs_trans_handle *trans;
3132
3133 /*
3134 * if we don't have enough free bytes in this space then we need
3135 * to alloc a new chunk.
3136 */
3137 if (!data_sinfo->full && alloc_chunk) {
3138 u64 alloc_target;
3139
3140 data_sinfo->force_alloc = 1;
3141 spin_unlock(&data_sinfo->lock);
3142 alloc:
3143 alloc_target = btrfs_get_alloc_profile(root, 1);
3144 trans = btrfs_join_transaction(root, 1);
3145 if (IS_ERR(trans))
3146 return PTR_ERR(trans);
3147
3148 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3149 bytes + 2 * 1024 * 1024,
3150 alloc_target, 0);
3151 btrfs_end_transaction(trans, root);
3152 if (ret < 0)
3153 return ret;
3154
3155 if (!data_sinfo) {
3156 btrfs_set_inode_space_info(root, inode);
3157 data_sinfo = BTRFS_I(inode)->space_info;
3158 }
3159 goto again;
3160 }
3161 spin_unlock(&data_sinfo->lock);
3162
3163 /* commit the current transaction and try again */
3164 if (!committed && !root->fs_info->open_ioctl_trans) {
3165 committed = 1;
3166 trans = btrfs_join_transaction(root, 1);
3167 if (IS_ERR(trans))
3168 return PTR_ERR(trans);
3169 ret = btrfs_commit_transaction(trans, root);
3170 if (ret)
3171 return ret;
3172 goto again;
3173 }
3174
3175 #if 0 /* I hope we never need this code again, just in case */
3176 printk(KERN_ERR "no space left, need %llu, %llu bytes_used, "
3177 "%llu bytes_reserved, " "%llu bytes_pinned, "
3178 "%llu bytes_readonly, %llu may use %llu total\n",
3179 (unsigned long long)bytes,
3180 (unsigned long long)data_sinfo->bytes_used,
3181 (unsigned long long)data_sinfo->bytes_reserved,
3182 (unsigned long long)data_sinfo->bytes_pinned,
3183 (unsigned long long)data_sinfo->bytes_readonly,
3184 (unsigned long long)data_sinfo->bytes_may_use,
3185 (unsigned long long)data_sinfo->total_bytes);
3186 #endif
3187 return -ENOSPC;
3188 }
3189 data_sinfo->bytes_may_use += bytes;
3190 BTRFS_I(inode)->reserved_bytes += bytes;
3191 spin_unlock(&data_sinfo->lock);
3192
3193 return 0;
3194 }
3195
3196 /*
3197 * called when we are clearing an delalloc extent from the
3198 * inode's io_tree or there was an error for whatever reason
3199 * after calling btrfs_check_data_free_space
3200 */
3201 void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
3202 {
3203 struct btrfs_root *root = BTRFS_I(inode)->root;
3204 struct btrfs_space_info *data_sinfo;
3205
3206 /* make sure bytes are sectorsize aligned */
3207 bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
3208
3209 data_sinfo = BTRFS_I(inode)->space_info;
3210 spin_lock(&data_sinfo->lock);
3211 data_sinfo->bytes_may_use -= bytes;
3212 BTRFS_I(inode)->reserved_bytes -= bytes;
3213 spin_unlock(&data_sinfo->lock);
3214 }
3215
3216 static void force_metadata_allocation(struct btrfs_fs_info *info)
3217 {
3218 struct list_head *head = &info->space_info;
3219 struct btrfs_space_info *found;
3220
3221 rcu_read_lock();
3222 list_for_each_entry_rcu(found, head, list) {
3223 if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
3224 found->force_alloc = 1;
3225 }
3226 rcu_read_unlock();
3227 }
3228
3229 static int should_alloc_chunk(struct btrfs_root *root,
3230 struct btrfs_space_info *sinfo, u64 alloc_bytes)
3231 {
3232 u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
3233 u64 thresh;
3234
3235 if (sinfo->bytes_used + sinfo->bytes_reserved +
3236 alloc_bytes + 256 * 1024 * 1024 < num_bytes)
3237 return 0;
3238
3239 if (sinfo->bytes_used + sinfo->bytes_reserved +
3240 alloc_bytes < div_factor(num_bytes, 8))
3241 return 0;
3242
3243 thresh = btrfs_super_total_bytes(&root->fs_info->super_copy);
3244 thresh = max_t(u64, 256 * 1024 * 1024, div_factor_fine(thresh, 5));
3245
3246 if (num_bytes > thresh && sinfo->bytes_used < div_factor(num_bytes, 3))
3247 return 0;
3248
3249 return 1;
3250 }
3251
3252 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
3253 struct btrfs_root *extent_root, u64 alloc_bytes,
3254 u64 flags, int force)
3255 {
3256 struct btrfs_space_info *space_info;
3257 struct btrfs_fs_info *fs_info = extent_root->fs_info;
3258 int ret = 0;
3259
3260 mutex_lock(&fs_info->chunk_mutex);
3261
3262 flags = btrfs_reduce_alloc_profile(extent_root, flags);
3263
3264 space_info = __find_space_info(extent_root->fs_info, flags);
3265 if (!space_info) {
3266 ret = update_space_info(extent_root->fs_info, flags,
3267 0, 0, &space_info);
3268 BUG_ON(ret);
3269 }
3270 BUG_ON(!space_info);
3271
3272 spin_lock(&space_info->lock);
3273 if (space_info->force_alloc)
3274 force = 1;
3275 if (space_info->full) {
3276 spin_unlock(&space_info->lock);
3277 goto out;
3278 }
3279
3280 if (!force && !should_alloc_chunk(extent_root, space_info,
3281 alloc_bytes)) {
3282 spin_unlock(&space_info->lock);
3283 goto out;
3284 }
3285 spin_unlock(&space_info->lock);
3286
3287 /*
3288 * If we have mixed data/metadata chunks we want to make sure we keep
3289 * allocating mixed chunks instead of individual chunks.
3290 */
3291 if (btrfs_mixed_space_info(space_info))
3292 flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
3293
3294 /*
3295 * if we're doing a data chunk, go ahead and make sure that
3296 * we keep a reasonable number of metadata chunks allocated in the
3297 * FS as well.
3298 */
3299 if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
3300 fs_info->data_chunk_allocations++;
3301 if (!(fs_info->data_chunk_allocations %
3302 fs_info->metadata_ratio))
3303 force_metadata_allocation(fs_info);
3304 }
3305
3306 ret = btrfs_alloc_chunk(trans, extent_root, flags);
3307 spin_lock(&space_info->lock);
3308 if (ret)
3309 space_info->full = 1;
3310 else
3311 ret = 1;
3312 space_info->force_alloc = 0;
3313 spin_unlock(&space_info->lock);
3314 out:
3315 mutex_unlock(&extent_root->fs_info->chunk_mutex);
3316 return ret;
3317 }
3318
3319 /*
3320 * shrink metadata reservation for delalloc
3321 */
3322 static int shrink_delalloc(struct btrfs_trans_handle *trans,
3323 struct btrfs_root *root, u64 to_reclaim, int sync)
3324 {
3325 struct btrfs_block_rsv *block_rsv;
3326 struct btrfs_space_info *space_info;
3327 u64 reserved;
3328 u64 max_reclaim;
3329 u64 reclaimed = 0;
3330 int pause = 1;
3331 int nr_pages = (2 * 1024 * 1024) >> PAGE_CACHE_SHIFT;
3332
3333 block_rsv = &root->fs_info->delalloc_block_rsv;
3334 space_info = block_rsv->space_info;
3335
3336 smp_mb();
3337 reserved = space_info->bytes_reserved;
3338
3339 if (reserved == 0)
3340 return 0;
3341
3342 max_reclaim = min(reserved, to_reclaim);
3343
3344 while (1) {
3345 /* have the flusher threads jump in and do some IO */
3346 smp_mb();
3347 nr_pages = min_t(unsigned long, nr_pages,
3348 root->fs_info->delalloc_bytes >> PAGE_CACHE_SHIFT);
3349 writeback_inodes_sb_nr_if_idle(root->fs_info->sb, nr_pages);
3350
3351 spin_lock(&space_info->lock);
3352 if (reserved > space_info->bytes_reserved)
3353 reclaimed += reserved - space_info->bytes_reserved;
3354 reserved = space_info->bytes_reserved;
3355 spin_unlock(&space_info->lock);
3356
3357 if (reserved == 0 || reclaimed >= max_reclaim)
3358 break;
3359
3360 if (trans && trans->transaction->blocked)
3361 return -EAGAIN;
3362
3363 __set_current_state(TASK_INTERRUPTIBLE);
3364 schedule_timeout(pause);
3365 pause <<= 1;
3366 if (pause > HZ / 10)
3367 pause = HZ / 10;
3368
3369 }
3370 return reclaimed >= to_reclaim;
3371 }
3372
3373 /*
3374 * Retries tells us how many times we've called reserve_metadata_bytes. The
3375 * idea is if this is the first call (retries == 0) then we will add to our
3376 * reserved count if we can't make the allocation in order to hold our place
3377 * while we go and try and free up space. That way for retries > 1 we don't try
3378 * and add space, we just check to see if the amount of unused space is >= the
3379 * total space, meaning that our reservation is valid.
3380 *
3381 * However if we don't intend to retry this reservation, pass -1 as retries so
3382 * that it short circuits this logic.
3383 */
3384 static int reserve_metadata_bytes(struct btrfs_trans_handle *trans,
3385 struct btrfs_root *root,
3386 struct btrfs_block_rsv *block_rsv,
3387 u64 orig_bytes, int flush)
3388 {
3389 struct btrfs_space_info *space_info = block_rsv->space_info;
3390 u64 unused;
3391 u64 num_bytes = orig_bytes;
3392 int retries = 0;
3393 int ret = 0;
3394 bool reserved = false;
3395 bool committed = false;
3396
3397 again:
3398 ret = -ENOSPC;
3399 if (reserved)
3400 num_bytes = 0;
3401
3402 spin_lock(&space_info->lock);
3403 unused = space_info->bytes_used + space_info->bytes_reserved +
3404 space_info->bytes_pinned + space_info->bytes_readonly +
3405 space_info->bytes_may_use;
3406
3407 /*
3408 * The idea here is that we've not already over-reserved the block group
3409 * then we can go ahead and save our reservation first and then start
3410 * flushing if we need to. Otherwise if we've already overcommitted
3411 * lets start flushing stuff first and then come back and try to make
3412 * our reservation.
3413 */
3414 if (unused <= space_info->total_bytes) {
3415 unused -= space_info->total_bytes;
3416 if (unused >= num_bytes) {
3417 if (!reserved)
3418 space_info->bytes_reserved += orig_bytes;
3419 ret = 0;
3420 } else {
3421 /*
3422 * Ok set num_bytes to orig_bytes since we aren't
3423 * overocmmitted, this way we only try and reclaim what
3424 * we need.
3425 */
3426 num_bytes = orig_bytes;
3427 }
3428 } else {
3429 /*
3430 * Ok we're over committed, set num_bytes to the overcommitted
3431 * amount plus the amount of bytes that we need for this
3432 * reservation.
3433 */
3434 num_bytes = unused - space_info->total_bytes +
3435 (orig_bytes * (retries + 1));
3436 }
3437
3438 /*
3439 * Couldn't make our reservation, save our place so while we're trying
3440 * to reclaim space we can actually use it instead of somebody else
3441 * stealing it from us.
3442 */
3443 if (ret && !reserved) {
3444 space_info->bytes_reserved += orig_bytes;
3445 reserved = true;
3446 }
3447
3448 spin_unlock(&space_info->lock);
3449
3450 if (!ret)
3451 return 0;
3452
3453 if (!flush)
3454 goto out;
3455
3456 /*
3457 * We do synchronous shrinking since we don't actually unreserve
3458 * metadata until after the IO is completed.
3459 */
3460 ret = shrink_delalloc(trans, root, num_bytes, 1);
3461 if (ret > 0)
3462 return 0;
3463 else if (ret < 0)
3464 goto out;
3465
3466 /*
3467 * So if we were overcommitted it's possible that somebody else flushed
3468 * out enough space and we simply didn't have enough space to reclaim,
3469 * so go back around and try again.
3470 */
3471 if (retries < 2) {
3472 retries++;
3473 goto again;
3474 }
3475
3476 spin_lock(&space_info->lock);
3477 /*
3478 * Not enough space to be reclaimed, don't bother committing the
3479 * transaction.
3480 */
3481 if (space_info->bytes_pinned < orig_bytes)
3482 ret = -ENOSPC;
3483 spin_unlock(&space_info->lock);
3484 if (ret)
3485 goto out;
3486
3487 ret = -EAGAIN;
3488 if (trans || committed)
3489 goto out;
3490
3491 ret = -ENOSPC;
3492 trans = btrfs_join_transaction(root, 1);
3493 if (IS_ERR(trans))
3494 goto out;
3495 ret = btrfs_commit_transaction(trans, root);
3496 if (!ret) {
3497 trans = NULL;
3498 committed = true;
3499 goto again;
3500 }
3501
3502 out:
3503 if (reserved) {
3504 spin_lock(&space_info->lock);
3505 space_info->bytes_reserved -= orig_bytes;
3506 spin_unlock(&space_info->lock);
3507 }
3508
3509 return ret;
3510 }
3511
3512 static struct btrfs_block_rsv *get_block_rsv(struct btrfs_trans_handle *trans,
3513 struct btrfs_root *root)
3514 {
3515 struct btrfs_block_rsv *block_rsv;
3516 if (root->ref_cows)
3517 block_rsv = trans->block_rsv;
3518 else
3519 block_rsv = root->block_rsv;
3520
3521 if (!block_rsv)
3522 block_rsv = &root->fs_info->empty_block_rsv;
3523
3524 return block_rsv;
3525 }
3526
3527 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
3528 u64 num_bytes)
3529 {
3530 int ret = -ENOSPC;
3531 spin_lock(&block_rsv->lock);
3532 if (block_rsv->reserved >= num_bytes) {
3533 block_rsv->reserved -= num_bytes;
3534 if (block_rsv->reserved < block_rsv->size)
3535 block_rsv->full = 0;
3536 ret = 0;
3537 }
3538 spin_unlock(&block_rsv->lock);
3539 return ret;
3540 }
3541
3542 static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
3543 u64 num_bytes, int update_size)
3544 {
3545 spin_lock(&block_rsv->lock);
3546 block_rsv->reserved += num_bytes;
3547 if (update_size)
3548 block_rsv->size += num_bytes;
3549 else if (block_rsv->reserved >= block_rsv->size)
3550 block_rsv->full = 1;
3551 spin_unlock(&block_rsv->lock);
3552 }
3553
3554 void block_rsv_release_bytes(struct btrfs_block_rsv *block_rsv,
3555 struct btrfs_block_rsv *dest, u64 num_bytes)
3556 {
3557 struct btrfs_space_info *space_info = block_rsv->space_info;
3558
3559 spin_lock(&block_rsv->lock);
3560 if (num_bytes == (u64)-1)
3561 num_bytes = block_rsv->size;
3562 block_rsv->size -= num_bytes;
3563 if (block_rsv->reserved >= block_rsv->size) {
3564 num_bytes = block_rsv->reserved - block_rsv->size;
3565 block_rsv->reserved = block_rsv->size;
3566 block_rsv->full = 1;
3567 } else {
3568 num_bytes = 0;
3569 }
3570 spin_unlock(&block_rsv->lock);
3571
3572 if (num_bytes > 0) {
3573 if (dest) {
3574 block_rsv_add_bytes(dest, num_bytes, 0);
3575 } else {
3576 spin_lock(&space_info->lock);
3577 space_info->bytes_reserved -= num_bytes;
3578 spin_unlock(&space_info->lock);
3579 }
3580 }
3581 }
3582
3583 static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src,
3584 struct btrfs_block_rsv *dst, u64 num_bytes)
3585 {
3586 int ret;
3587
3588 ret = block_rsv_use_bytes(src, num_bytes);
3589 if (ret)
3590 return ret;
3591
3592 block_rsv_add_bytes(dst, num_bytes, 1);
3593 return 0;
3594 }
3595
3596 void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv)
3597 {
3598 memset(rsv, 0, sizeof(*rsv));
3599 spin_lock_init(&rsv->lock);
3600 atomic_set(&rsv->usage, 1);
3601 rsv->priority = 6;
3602 INIT_LIST_HEAD(&rsv->list);
3603 }
3604
3605 struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root)
3606 {
3607 struct btrfs_block_rsv *block_rsv;
3608 struct btrfs_fs_info *fs_info = root->fs_info;
3609
3610 block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
3611 if (!block_rsv)
3612 return NULL;
3613
3614 btrfs_init_block_rsv(block_rsv);
3615 block_rsv->space_info = __find_space_info(fs_info,
3616 BTRFS_BLOCK_GROUP_METADATA);
3617 return block_rsv;
3618 }
3619
3620 void btrfs_free_block_rsv(struct btrfs_root *root,
3621 struct btrfs_block_rsv *rsv)
3622 {
3623 if (rsv && atomic_dec_and_test(&rsv->usage)) {
3624 btrfs_block_rsv_release(root, rsv, (u64)-1);
3625 if (!rsv->durable)
3626 kfree(rsv);
3627 }
3628 }
3629
3630 /*
3631 * make the block_rsv struct be able to capture freed space.
3632 * the captured space will re-add to the the block_rsv struct
3633 * after transaction commit
3634 */
3635 void btrfs_add_durable_block_rsv(struct btrfs_fs_info *fs_info,
3636 struct btrfs_block_rsv *block_rsv)
3637 {
3638 block_rsv->durable = 1;
3639 mutex_lock(&fs_info->durable_block_rsv_mutex);
3640 list_add_tail(&block_rsv->list, &fs_info->durable_block_rsv_list);
3641 mutex_unlock(&fs_info->durable_block_rsv_mutex);
3642 }
3643
3644 int btrfs_block_rsv_add(struct btrfs_trans_handle *trans,
3645 struct btrfs_root *root,
3646 struct btrfs_block_rsv *block_rsv,
3647 u64 num_bytes)
3648 {
3649 int ret;
3650
3651 if (num_bytes == 0)
3652 return 0;
3653
3654 ret = reserve_metadata_bytes(trans, root, block_rsv, num_bytes, 1);
3655 if (!ret) {
3656 block_rsv_add_bytes(block_rsv, num_bytes, 1);
3657 return 0;
3658 }
3659
3660 return ret;
3661 }
3662
3663 int btrfs_block_rsv_check(struct btrfs_trans_handle *trans,
3664 struct btrfs_root *root,
3665 struct btrfs_block_rsv *block_rsv,
3666 u64 min_reserved, int min_factor)
3667 {
3668 u64 num_bytes = 0;
3669 int commit_trans = 0;
3670 int ret = -ENOSPC;
3671
3672 if (!block_rsv)
3673 return 0;
3674
3675 spin_lock(&block_rsv->lock);
3676 if (min_factor > 0)
3677 num_bytes = div_factor(block_rsv->size, min_factor);
3678 if (min_reserved > num_bytes)
3679 num_bytes = min_reserved;
3680
3681 if (block_rsv->reserved >= num_bytes) {
3682 ret = 0;
3683 } else {
3684 num_bytes -= block_rsv->reserved;
3685 if (block_rsv->durable &&
3686 block_rsv->freed[0] + block_rsv->freed[1] >= num_bytes)
3687 commit_trans = 1;
3688 }
3689 spin_unlock(&block_rsv->lock);
3690 if (!ret)
3691 return 0;
3692
3693 if (block_rsv->refill_used) {
3694 ret = reserve_metadata_bytes(trans, root, block_rsv,
3695 num_bytes, 0);
3696 if (!ret) {
3697 block_rsv_add_bytes(block_rsv, num_bytes, 0);
3698 return 0;
3699 }
3700 }
3701
3702 if (commit_trans) {
3703 if (trans)
3704 return -EAGAIN;
3705
3706 trans = btrfs_join_transaction(root, 1);
3707 BUG_ON(IS_ERR(trans));
3708 ret = btrfs_commit_transaction(trans, root);
3709 return 0;
3710 }
3711
3712 WARN_ON(1);
3713 printk(KERN_INFO"block_rsv size %llu reserved %llu freed %llu %llu\n",
3714 block_rsv->size, block_rsv->reserved,
3715 block_rsv->freed[0], block_rsv->freed[1]);
3716
3717 return -ENOSPC;
3718 }
3719
3720 int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
3721 struct btrfs_block_rsv *dst_rsv,
3722 u64 num_bytes)
3723 {
3724 return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
3725 }
3726
3727 void btrfs_block_rsv_release(struct btrfs_root *root,
3728 struct btrfs_block_rsv *block_rsv,
3729 u64 num_bytes)
3730 {
3731 struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
3732 if (global_rsv->full || global_rsv == block_rsv ||
3733 block_rsv->space_info != global_rsv->space_info)
3734 global_rsv = NULL;
3735 block_rsv_release_bytes(block_rsv, global_rsv, num_bytes);
3736 }
3737
3738 /*
3739 * helper to calculate size of global block reservation.
3740 * the desired value is sum of space used by extent tree,
3741 * checksum tree and root tree
3742 */
3743 static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
3744 {
3745 struct btrfs_space_info *sinfo;
3746 u64 num_bytes;
3747 u64 meta_used;
3748 u64 data_used;
3749 int csum_size = btrfs_super_csum_size(&fs_info->super_copy);
3750 #if 0
3751 /*
3752 * per tree used space accounting can be inaccuracy, so we
3753 * can't rely on it.
3754 */
3755 spin_lock(&fs_info->extent_root->accounting_lock);
3756 num_bytes = btrfs_root_used(&fs_info->extent_root->root_item);
3757 spin_unlock(&fs_info->extent_root->accounting_lock);
3758
3759 spin_lock(&fs_info->csum_root->accounting_lock);
3760 num_bytes += btrfs_root_used(&fs_info->csum_root->root_item);
3761 spin_unlock(&fs_info->csum_root->accounting_lock);
3762
3763 spin_lock(&fs_info->tree_root->accounting_lock);
3764 num_bytes += btrfs_root_used(&fs_info->tree_root->root_item);
3765 spin_unlock(&fs_info->tree_root->accounting_lock);
3766 #endif
3767 sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
3768 spin_lock(&sinfo->lock);
3769 data_used = sinfo->bytes_used;
3770 spin_unlock(&sinfo->lock);
3771
3772 sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
3773 spin_lock(&sinfo->lock);
3774 if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA)
3775 data_used = 0;
3776 meta_used = sinfo->bytes_used;
3777 spin_unlock(&sinfo->lock);
3778
3779 num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) *
3780 csum_size * 2;
3781 num_bytes += div64_u64(data_used + meta_used, 50);
3782
3783 if (num_bytes * 3 > meta_used)
3784 num_bytes = div64_u64(meta_used, 3);
3785
3786 return ALIGN(num_bytes, fs_info->extent_root->leafsize << 10);
3787 }
3788
3789 static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
3790 {
3791 struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
3792 struct btrfs_space_info *sinfo = block_rsv->space_info;
3793 u64 num_bytes;
3794
3795 num_bytes = calc_global_metadata_size(fs_info);
3796
3797 spin_lock(&block_rsv->lock);
3798 spin_lock(&sinfo->lock);
3799
3800 block_rsv->size = num_bytes;
3801
3802 num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
3803 sinfo->bytes_reserved + sinfo->bytes_readonly +
3804 sinfo->bytes_may_use;
3805
3806 if (sinfo->total_bytes > num_bytes) {
3807 num_bytes = sinfo->total_bytes - num_bytes;
3808 block_rsv->reserved += num_bytes;
3809 sinfo->bytes_reserved += num_bytes;
3810 }
3811
3812 if (block_rsv->reserved >= block_rsv->size) {
3813 num_bytes = block_rsv->reserved - block_rsv->size;
3814 sinfo->bytes_reserved -= num_bytes;
3815 block_rsv->reserved = block_rsv->size;
3816 block_rsv->full = 1;
3817 }
3818 #if 0
3819 printk(KERN_INFO"global block rsv size %llu reserved %llu\n",
3820 block_rsv->size, block_rsv->reserved);
3821 #endif
3822 spin_unlock(&sinfo->lock);
3823 spin_unlock(&block_rsv->lock);
3824 }
3825
3826 static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
3827 {
3828 struct btrfs_space_info *space_info;
3829
3830 space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
3831 fs_info->chunk_block_rsv.space_info = space_info;
3832 fs_info->chunk_block_rsv.priority = 10;
3833
3834 space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
3835 fs_info->global_block_rsv.space_info = space_info;
3836 fs_info->global_block_rsv.priority = 10;
3837 fs_info->global_block_rsv.refill_used = 1;
3838 fs_info->delalloc_block_rsv.space_info = space_info;
3839 fs_info->trans_block_rsv.space_info = space_info;
3840 fs_info->empty_block_rsv.space_info = space_info;
3841 fs_info->empty_block_rsv.priority = 10;
3842
3843 fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
3844 fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
3845 fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
3846 fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
3847 fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
3848
3849 btrfs_add_durable_block_rsv(fs_info, &fs_info->global_block_rsv);
3850
3851 btrfs_add_durable_block_rsv(fs_info, &fs_info->delalloc_block_rsv);
3852
3853 update_global_block_rsv(fs_info);
3854 }
3855
3856 static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
3857 {
3858 block_rsv_release_bytes(&fs_info->global_block_rsv, NULL, (u64)-1);
3859 WARN_ON(fs_info->delalloc_block_rsv.size > 0);
3860 WARN_ON(fs_info->delalloc_block_rsv.reserved > 0);
3861 WARN_ON(fs_info->trans_block_rsv.size > 0);
3862 WARN_ON(fs_info->trans_block_rsv.reserved > 0);
3863 WARN_ON(fs_info->chunk_block_rsv.size > 0);
3864 WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
3865 }
3866
3867 static u64 calc_trans_metadata_size(struct btrfs_root *root, int num_items)
3868 {
3869 return (root->leafsize + root->nodesize * (BTRFS_MAX_LEVEL - 1)) *
3870 3 * num_items;
3871 }
3872
3873 int btrfs_trans_reserve_metadata(struct btrfs_trans_handle *trans,
3874 struct btrfs_root *root,
3875 int num_items)
3876 {
3877 u64 num_bytes;
3878 int ret;
3879
3880 if (num_items == 0 || root->fs_info->chunk_root == root)
3881 return 0;
3882
3883 num_bytes = calc_trans_metadata_size(root, num_items);
3884 ret = btrfs_block_rsv_add(trans, root, &root->fs_info->trans_block_rsv,
3885 num_bytes);
3886 if (!ret) {
3887 trans->bytes_reserved += num_bytes;
3888 trans->block_rsv = &root->fs_info->trans_block_rsv;
3889 }
3890 return ret;
3891 }
3892
3893 void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
3894 struct btrfs_root *root)
3895 {
3896 if (!trans->bytes_reserved)
3897 return;
3898
3899 BUG_ON(trans->block_rsv != &root->fs_info->trans_block_rsv);
3900 btrfs_block_rsv_release(root, trans->block_rsv,
3901 trans->bytes_reserved);
3902 trans->bytes_reserved = 0;
3903 }
3904
3905 int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
3906 struct inode *inode)
3907 {
3908 struct btrfs_root *root = BTRFS_I(inode)->root;
3909 struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
3910 struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
3911
3912 /*
3913 * one for deleting orphan item, one for updating inode and
3914 * two for calling btrfs_truncate_inode_items.
3915 *
3916 * btrfs_truncate_inode_items is a delete operation, it frees
3917 * more space than it uses in most cases. So two units of
3918 * metadata space should be enough for calling it many times.
3919 * If all of the metadata space is used, we can commit
3920 * transaction and use space it freed.
3921 */
3922 u64 num_bytes = calc_trans_metadata_size(root, 4);
3923 return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
3924 }
3925
3926 void btrfs_orphan_release_metadata(struct inode *inode)
3927 {
3928 struct btrfs_root *root = BTRFS_I(inode)->root;
3929 u64 num_bytes = calc_trans_metadata_size(root, 4);
3930 btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
3931 }
3932
3933 int btrfs_snap_reserve_metadata(struct btrfs_trans_handle *trans,
3934 struct btrfs_pending_snapshot *pending)
3935 {
3936 struct btrfs_root *root = pending->root;
3937 struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
3938 struct btrfs_block_rsv *dst_rsv = &pending->block_rsv;
3939 /*
3940 * two for root back/forward refs, two for directory entries
3941 * and one for root of the snapshot.
3942 */
3943 u64 num_bytes = calc_trans_metadata_size(root, 5);
3944 dst_rsv->space_info = src_rsv->space_info;
3945 return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
3946 }
3947
3948 static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes)
3949 {
3950 return num_bytes >>= 3;
3951 }
3952
3953 int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
3954 {
3955 struct btrfs_root *root = BTRFS_I(inode)->root;
3956 struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
3957 u64 to_reserve;
3958 int nr_extents;
3959 int ret;
3960
3961 if (btrfs_transaction_in_commit(root->fs_info))
3962 schedule_timeout(1);
3963
3964 num_bytes = ALIGN(num_bytes, root->sectorsize);
3965
3966 spin_lock(&BTRFS_I(inode)->accounting_lock);
3967 nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents) + 1;
3968 if (nr_extents > BTRFS_I(inode)->reserved_extents) {
3969 nr_extents -= BTRFS_I(inode)->reserved_extents;
3970 to_reserve = calc_trans_metadata_size(root, nr_extents);
3971 } else {
3972 nr_extents = 0;
3973 to_reserve = 0;
3974 }
3975 spin_unlock(&BTRFS_I(inode)->accounting_lock);
3976
3977 to_reserve += calc_csum_metadata_size(inode, num_bytes);
3978 ret = reserve_metadata_bytes(NULL, root, block_rsv, to_reserve, 1);
3979 if (ret)
3980 return ret;
3981
3982 spin_lock(&BTRFS_I(inode)->accounting_lock);
3983 BTRFS_I(inode)->reserved_extents += nr_extents;
3984 atomic_inc(&BTRFS_I(inode)->outstanding_extents);
3985 spin_unlock(&BTRFS_I(inode)->accounting_lock);
3986
3987 block_rsv_add_bytes(block_rsv, to_reserve, 1);
3988
3989 if (block_rsv->size > 512 * 1024 * 1024)
3990 shrink_delalloc(NULL, root, to_reserve, 0);
3991
3992 return 0;
3993 }
3994
3995 void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
3996 {
3997 struct btrfs_root *root = BTRFS_I(inode)->root;
3998 u64 to_free;
3999 int nr_extents;
4000
4001 num_bytes = ALIGN(num_bytes, root->sectorsize);
4002 atomic_dec(&BTRFS_I(inode)->outstanding_extents);
4003
4004 spin_lock(&BTRFS_I(inode)->accounting_lock);
4005 nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents);
4006 if (nr_extents < BTRFS_I(inode)->reserved_extents) {
4007 nr_extents = BTRFS_I(inode)->reserved_extents - nr_extents;
4008 BTRFS_I(inode)->reserved_extents -= nr_extents;
4009 } else {
4010 nr_extents = 0;
4011 }
4012 spin_unlock(&BTRFS_I(inode)->accounting_lock);
4013
4014 to_free = calc_csum_metadata_size(inode, num_bytes);
4015 if (nr_extents > 0)
4016 to_free += calc_trans_metadata_size(root, nr_extents);
4017
4018 btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
4019 to_free);
4020 }
4021
4022 int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes)
4023 {
4024 int ret;
4025
4026 ret = btrfs_check_data_free_space(inode, num_bytes);
4027 if (ret)
4028 return ret;
4029
4030 ret = btrfs_delalloc_reserve_metadata(inode, num_bytes);
4031 if (ret) {
4032 btrfs_free_reserved_data_space(inode, num_bytes);
4033 return ret;
4034 }
4035
4036 return 0;
4037 }
4038
4039 void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes)
4040 {
4041 btrfs_delalloc_release_metadata(inode, num_bytes);
4042 btrfs_free_reserved_data_space(inode, num_bytes);
4043 }
4044
4045 static int update_block_group(struct btrfs_trans_handle *trans,
4046 struct btrfs_root *root,
4047 u64 bytenr, u64 num_bytes, int alloc)
4048 {
4049 struct btrfs_block_group_cache *cache = NULL;
4050 struct btrfs_fs_info *info = root->fs_info;
4051 u64 total = num_bytes;
4052 u64 old_val;
4053 u64 byte_in_group;
4054 int factor;
4055
4056 /* block accounting for super block */
4057 spin_lock(&info->delalloc_lock);
4058 old_val = btrfs_super_bytes_used(&info->super_copy);
4059 if (alloc)
4060 old_val += num_bytes;
4061 else
4062 old_val -= num_bytes;
4063 btrfs_set_super_bytes_used(&info->super_copy, old_val);
4064 spin_unlock(&info->delalloc_lock);
4065
4066 while (total) {
4067 cache = btrfs_lookup_block_group(info, bytenr);
4068 if (!cache)
4069 return -1;
4070 if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
4071 BTRFS_BLOCK_GROUP_RAID1 |
4072 BTRFS_BLOCK_GROUP_RAID10))
4073 factor = 2;
4074 else
4075 factor = 1;
4076 /*
4077 * If this block group has free space cache written out, we
4078 * need to make sure to load it if we are removing space. This
4079 * is because we need the unpinning stage to actually add the
4080 * space back to the block group, otherwise we will leak space.
4081 */
4082 if (!alloc && cache->cached == BTRFS_CACHE_NO)
4083 cache_block_group(cache, trans, 1);
4084
4085 byte_in_group = bytenr - cache->key.objectid;
4086 WARN_ON(byte_in_group > cache->key.offset);
4087
4088 spin_lock(&cache->space_info->lock);
4089 spin_lock(&cache->lock);
4090
4091 if (btrfs_super_cache_generation(&info->super_copy) != 0 &&
4092 cache->disk_cache_state < BTRFS_DC_CLEAR)
4093 cache->disk_cache_state = BTRFS_DC_CLEAR;
4094
4095 cache->dirty = 1;
4096 old_val = btrfs_block_group_used(&cache->item);
4097 num_bytes = min(total, cache->key.offset - byte_in_group);
4098 if (alloc) {
4099 old_val += num_bytes;
4100 btrfs_set_block_group_used(&cache->item, old_val);
4101 cache->reserved -= num_bytes;
4102 cache->space_info->bytes_reserved -= num_bytes;
4103 cache->space_info->bytes_used += num_bytes;
4104 cache->space_info->disk_used += num_bytes * factor;
4105 spin_unlock(&cache->lock);
4106 spin_unlock(&cache->space_info->lock);
4107 } else {
4108 old_val -= num_bytes;
4109 btrfs_set_block_group_used(&cache->item, old_val);
4110 cache->pinned += num_bytes;
4111 cache->space_info->bytes_pinned += num_bytes;
4112 cache->space_info->bytes_used -= num_bytes;
4113 cache->space_info->disk_used -= num_bytes * factor;
4114 spin_unlock(&cache->lock);
4115 spin_unlock(&cache->space_info->lock);
4116
4117 set_extent_dirty(info->pinned_extents,
4118 bytenr, bytenr + num_bytes - 1,
4119 GFP_NOFS | __GFP_NOFAIL);
4120 }
4121 btrfs_put_block_group(cache);
4122 total -= num_bytes;
4123 bytenr += num_bytes;
4124 }
4125 return 0;
4126 }
4127
4128 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
4129 {
4130 struct btrfs_block_group_cache *cache;
4131 u64 bytenr;
4132
4133 cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
4134 if (!cache)
4135 return 0;
4136
4137 bytenr = cache->key.objectid;
4138 btrfs_put_block_group(cache);
4139
4140 return bytenr;
4141 }
4142
4143 static int pin_down_extent(struct btrfs_root *root,
4144 struct btrfs_block_group_cache *cache,
4145 u64 bytenr, u64 num_bytes, int reserved)
4146 {
4147 spin_lock(&cache->space_info->lock);
4148 spin_lock(&cache->lock);
4149 cache->pinned += num_bytes;
4150 cache->space_info->bytes_pinned += num_bytes;
4151 if (reserved) {
4152 cache->reserved -= num_bytes;
4153 cache->space_info->bytes_reserved -= num_bytes;
4154 }
4155 spin_unlock(&cache->lock);
4156 spin_unlock(&cache->space_info->lock);
4157
4158 set_extent_dirty(root->fs_info->pinned_extents, bytenr,
4159 bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
4160 return 0;
4161 }
4162
4163 /*
4164 * this function must be called within transaction
4165 */
4166 int btrfs_pin_extent(struct btrfs_root *root,
4167 u64 bytenr, u64 num_bytes, int reserved)
4168 {
4169 struct btrfs_block_group_cache *cache;
4170
4171 cache = btrfs_lookup_block_group(root->fs_info, bytenr);
4172 BUG_ON(!cache);
4173
4174 pin_down_extent(root, cache, bytenr, num_bytes, reserved);
4175
4176 btrfs_put_block_group(cache);
4177 return 0;
4178 }
4179
4180 /*
4181 * update size of reserved extents. this function may return -EAGAIN
4182 * if 'reserve' is true or 'sinfo' is false.
4183 */
4184 static int update_reserved_bytes(struct btrfs_block_group_cache *cache,
4185 u64 num_bytes, int reserve, int sinfo)
4186 {
4187 int ret = 0;
4188 if (sinfo) {
4189 struct btrfs_space_info *space_info = cache->space_info;
4190 spin_lock(&space_info->lock);
4191 spin_lock(&cache->lock);
4192 if (reserve) {
4193 if (cache->ro) {
4194 ret = -EAGAIN;
4195 } else {
4196 cache->reserved += num_bytes;
4197 space_info->bytes_reserved += num_bytes;
4198 }
4199 } else {
4200 if (cache->ro)
4201 space_info->bytes_readonly += num_bytes;
4202 cache->reserved -= num_bytes;
4203 space_info->bytes_reserved -= num_bytes;
4204 }
4205 spin_unlock(&cache->lock);
4206 spin_unlock(&space_info->lock);
4207 } else {
4208 spin_lock(&cache->lock);
4209 if (cache->ro) {
4210 ret = -EAGAIN;
4211 } else {
4212 if (reserve)
4213 cache->reserved += num_bytes;
4214 else
4215 cache->reserved -= num_bytes;
4216 }
4217 spin_unlock(&cache->lock);
4218 }
4219 return ret;
4220 }
4221
4222 int btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
4223 struct btrfs_root *root)
4224 {
4225 struct btrfs_fs_info *fs_info = root->fs_info;
4226 struct btrfs_caching_control *next;
4227 struct btrfs_caching_control *caching_ctl;
4228 struct btrfs_block_group_cache *cache;
4229
4230 down_write(&fs_info->extent_commit_sem);
4231
4232 list_for_each_entry_safe(caching_ctl, next,
4233 &fs_info->caching_block_groups, list) {
4234 cache = caching_ctl->block_group;
4235 if (block_group_cache_done(cache)) {
4236 cache->last_byte_to_unpin = (u64)-1;
4237 list_del_init(&caching_ctl->list);
4238 put_caching_control(caching_ctl);
4239 } else {
4240 cache->last_byte_to_unpin = caching_ctl->progress;
4241 }
4242 }
4243
4244 if (fs_info->pinned_extents == &fs_info->freed_extents[0])
4245 fs_info->pinned_extents = &fs_info->freed_extents[1];
4246 else
4247 fs_info->pinned_extents = &fs_info->freed_extents[0];
4248
4249 up_write(&fs_info->extent_commit_sem);
4250
4251 update_global_block_rsv(fs_info);
4252 return 0;
4253 }
4254
4255 static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
4256 {
4257 struct btrfs_fs_info *fs_info = root->fs_info;
4258 struct btrfs_block_group_cache *cache = NULL;
4259 u64 len;
4260
4261 while (start <= end) {
4262 if (!cache ||
4263 start >= cache->key.objectid + cache->key.offset) {
4264 if (cache)
4265 btrfs_put_block_group(cache);
4266 cache = btrfs_lookup_block_group(fs_info, start);
4267 BUG_ON(!cache);
4268 }
4269
4270 len = cache->key.objectid + cache->key.offset - start;
4271 len = min(len, end + 1 - start);
4272
4273 if (start < cache->last_byte_to_unpin) {
4274 len = min(len, cache->last_byte_to_unpin - start);
4275 btrfs_add_free_space(cache, start, len);
4276 }
4277
4278 start += len;
4279
4280 spin_lock(&cache->space_info->lock);
4281 spin_lock(&cache->lock);
4282 cache->pinned -= len;
4283 cache->space_info->bytes_pinned -= len;
4284 if (cache->ro) {
4285 cache->space_info->bytes_readonly += len;
4286 } else if (cache->reserved_pinned > 0) {
4287 len = min(len, cache->reserved_pinned);
4288 cache->reserved_pinned -= len;
4289 cache->space_info->bytes_reserved += len;
4290 }
4291 spin_unlock(&cache->lock);
4292 spin_unlock(&cache->space_info->lock);
4293 }
4294
4295 if (cache)
4296 btrfs_put_block_group(cache);
4297 return 0;
4298 }
4299
4300 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
4301 struct btrfs_root *root)
4302 {
4303 struct btrfs_fs_info *fs_info = root->fs_info;
4304 struct extent_io_tree *unpin;
4305 struct btrfs_block_rsv *block_rsv;
4306 struct btrfs_block_rsv *next_rsv;
4307 u64 start;
4308 u64 end;
4309 int idx;
4310 int ret;
4311
4312 if (fs_info->pinned_extents == &fs_info->freed_extents[0])
4313 unpin = &fs_info->freed_extents[1];
4314 else
4315 unpin = &fs_info->freed_extents[0];
4316
4317 while (1) {
4318 ret = find_first_extent_bit(unpin, 0, &start, &end,
4319 EXTENT_DIRTY);
4320 if (ret)
4321 break;
4322
4323 ret = btrfs_discard_extent(root, start, end + 1 - start);
4324
4325 clear_extent_dirty(unpin, start, end, GFP_NOFS);
4326 unpin_extent_range(root, start, end);
4327 cond_resched();
4328 }
4329
4330 mutex_lock(&fs_info->durable_block_rsv_mutex);
4331 list_for_each_entry_safe(block_rsv, next_rsv,
4332 &fs_info->durable_block_rsv_list, list) {
4333
4334 idx = trans->transid & 0x1;
4335 if (block_rsv->freed[idx] > 0) {
4336 block_rsv_add_bytes(block_rsv,
4337 block_rsv->freed[idx], 0);
4338 block_rsv->freed[idx] = 0;
4339 }
4340 if (atomic_read(&block_rsv->usage) == 0) {
4341 btrfs_block_rsv_release(root, block_rsv, (u64)-1);
4342
4343 if (block_rsv->freed[0] == 0 &&
4344 block_rsv->freed[1] == 0) {
4345 list_del_init(&block_rsv->list);
4346 kfree(block_rsv);
4347 }
4348 } else {
4349 btrfs_block_rsv_release(root, block_rsv, 0);
4350 }
4351 }
4352 mutex_unlock(&fs_info->durable_block_rsv_mutex);
4353
4354 return 0;
4355 }
4356
4357 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
4358 struct btrfs_root *root,
4359 u64 bytenr, u64 num_bytes, u64 parent,
4360 u64 root_objectid, u64 owner_objectid,
4361 u64 owner_offset, int refs_to_drop,
4362 struct btrfs_delayed_extent_op *extent_op)
4363 {
4364 struct btrfs_key key;
4365 struct btrfs_path *path;
4366 struct btrfs_fs_info *info = root->fs_info;
4367 struct btrfs_root *extent_root = info->extent_root;
4368 struct extent_buffer *leaf;
4369 struct btrfs_extent_item *ei;
4370 struct btrfs_extent_inline_ref *iref;
4371 int ret;
4372 int is_data;
4373 int extent_slot = 0;
4374 int found_extent = 0;
4375 int num_to_del = 1;
4376 u32 item_size;
4377 u64 refs;
4378
4379 path = btrfs_alloc_path();
4380 if (!path)
4381 return -ENOMEM;
4382
4383 path->reada = 1;
4384 path->leave_spinning = 1;
4385
4386 is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
4387 BUG_ON(!is_data && refs_to_drop != 1);
4388
4389 ret = lookup_extent_backref(trans, extent_root, path, &iref,
4390 bytenr, num_bytes, parent,
4391 root_objectid, owner_objectid,
4392 owner_offset);
4393 if (ret == 0) {
4394 extent_slot = path->slots[0];
4395 while (extent_slot >= 0) {
4396 btrfs_item_key_to_cpu(path->nodes[0], &key,
4397 extent_slot);
4398 if (key.objectid != bytenr)
4399 break;
4400 if (key.type == BTRFS_EXTENT_ITEM_KEY &&
4401 key.offset == num_bytes) {
4402 found_extent = 1;
4403 break;
4404 }
4405 if (path->slots[0] - extent_slot > 5)
4406 break;
4407 extent_slot--;
4408 }
4409 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
4410 item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
4411 if (found_extent && item_size < sizeof(*ei))
4412 found_extent = 0;
4413 #endif
4414 if (!found_extent) {
4415 BUG_ON(iref);
4416 ret = remove_extent_backref(trans, extent_root, path,
4417 NULL, refs_to_drop,
4418 is_data);
4419 BUG_ON(ret);
4420 btrfs_release_path(extent_root, path);
4421 path->leave_spinning = 1;
4422
4423 key.objectid = bytenr;
4424 key.type = BTRFS_EXTENT_ITEM_KEY;
4425 key.offset = num_bytes;
4426
4427 ret = btrfs_search_slot(trans, extent_root,
4428 &key, path, -1, 1);
4429 if (ret) {
4430 printk(KERN_ERR "umm, got %d back from search"
4431 ", was looking for %llu\n", ret,
4432 (unsigned long long)bytenr);
4433 btrfs_print_leaf(extent_root, path->nodes[0]);
4434 }
4435 BUG_ON(ret);
4436 extent_slot = path->slots[0];
4437 }
4438 } else {
4439 btrfs_print_leaf(extent_root, path->nodes[0]);
4440 WARN_ON(1);
4441 printk(KERN_ERR "btrfs unable to find ref byte nr %llu "
4442 "parent %llu root %llu owner %llu offset %llu\n",
4443 (unsigned long long)bytenr,
4444 (unsigned long long)parent,
4445 (unsigned long long)root_objectid,
4446 (unsigned long long)owner_objectid,
4447 (unsigned long long)owner_offset);
4448 }
4449
4450 leaf = path->nodes[0];
4451 item_size = btrfs_item_size_nr(leaf, extent_slot);
4452 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
4453 if (item_size < sizeof(*ei)) {
4454 BUG_ON(found_extent || extent_slot != path->slots[0]);
4455 ret = convert_extent_item_v0(trans, extent_root, path,
4456 owner_objectid, 0);
4457 BUG_ON(ret < 0);
4458
4459 btrfs_release_path(extent_root, path);
4460 path->leave_spinning = 1;
4461
4462 key.objectid = bytenr;
4463 key.type = BTRFS_EXTENT_ITEM_KEY;
4464 key.offset = num_bytes;
4465
4466 ret = btrfs_search_slot(trans, extent_root, &key, path,
4467 -1, 1);
4468 if (ret) {
4469 printk(KERN_ERR "umm, got %d back from search"
4470 ", was looking for %llu\n", ret,
4471 (unsigned long long)bytenr);
4472 btrfs_print_leaf(extent_root, path->nodes[0]);
4473 }
4474 BUG_ON(ret);
4475 extent_slot = path->slots[0];
4476 leaf = path->nodes[0];
4477 item_size = btrfs_item_size_nr(leaf, extent_slot);
4478 }
4479 #endif
4480 BUG_ON(item_size < sizeof(*ei));
4481 ei = btrfs_item_ptr(leaf, extent_slot,
4482 struct btrfs_extent_item);
4483 if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID) {
4484 struct btrfs_tree_block_info *bi;
4485 BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
4486 bi = (struct btrfs_tree_block_info *)(ei + 1);
4487 WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
4488 }
4489
4490 refs = btrfs_extent_refs(leaf, ei);
4491 BUG_ON(refs < refs_to_drop);
4492 refs -= refs_to_drop;
4493
4494 if (refs > 0) {
4495 if (extent_op)
4496 __run_delayed_extent_op(extent_op, leaf, ei);
4497 /*
4498 * In the case of inline back ref, reference count will
4499 * be updated by remove_extent_backref
4500 */
4501 if (iref) {
4502 BUG_ON(!found_extent);
4503 } else {
4504 btrfs_set_extent_refs(leaf, ei, refs);
4505 btrfs_mark_buffer_dirty(leaf);
4506 }
4507 if (found_extent) {
4508 ret = remove_extent_backref(trans, extent_root, path,
4509 iref, refs_to_drop,
4510 is_data);
4511 BUG_ON(ret);
4512 }
4513 } else {
4514 if (found_extent) {
4515 BUG_ON(is_data && refs_to_drop !=
4516 extent_data_ref_count(root, path, iref));
4517 if (iref) {
4518 BUG_ON(path->slots[0] != extent_slot);
4519 } else {
4520 BUG_ON(path->slots[0] != extent_slot + 1);
4521 path->slots[0] = extent_slot;
4522 num_to_del = 2;
4523 }
4524 }
4525
4526 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
4527 num_to_del);
4528 BUG_ON(ret);
4529 btrfs_release_path(extent_root, path);
4530
4531 if (is_data) {
4532 ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
4533 BUG_ON(ret);
4534 } else {
4535 invalidate_mapping_pages(info->btree_inode->i_mapping,
4536 bytenr >> PAGE_CACHE_SHIFT,
4537 (bytenr + num_bytes - 1) >> PAGE_CACHE_SHIFT);
4538 }
4539
4540 ret = update_block_group(trans, root, bytenr, num_bytes, 0);
4541 BUG_ON(ret);
4542 }
4543 btrfs_free_path(path);
4544 return ret;
4545 }
4546
4547 /*
4548 * when we free an block, it is possible (and likely) that we free the last
4549 * delayed ref for that extent as well. This searches the delayed ref tree for
4550 * a given extent, and if there are no other delayed refs to be processed, it
4551 * removes it from the tree.
4552 */
4553 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
4554 struct btrfs_root *root, u64 bytenr)
4555 {
4556 struct btrfs_delayed_ref_head *head;
4557 struct btrfs_delayed_ref_root *delayed_refs;
4558 struct btrfs_delayed_ref_node *ref;
4559 struct rb_node *node;
4560 int ret = 0;
4561
4562 delayed_refs = &trans->transaction->delayed_refs;
4563 spin_lock(&delayed_refs->lock);
4564 head = btrfs_find_delayed_ref_head(trans, bytenr);
4565 if (!head)
4566 goto out;
4567
4568 node = rb_prev(&head->node.rb_node);
4569 if (!node)
4570 goto out;
4571
4572 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
4573
4574 /* there are still entries for this ref, we can't drop it */
4575 if (ref->bytenr == bytenr)
4576 goto out;
4577
4578 if (head->extent_op) {
4579 if (!head->must_insert_reserved)
4580 goto out;
4581 kfree(head->extent_op);
4582 head->extent_op = NULL;
4583 }
4584
4585 /*
4586 * waiting for the lock here would deadlock. If someone else has it
4587 * locked they are already in the process of dropping it anyway
4588 */
4589 if (!mutex_trylock(&head->mutex))
4590 goto out;
4591
4592 /*
4593 * at this point we have a head with no other entries. Go
4594 * ahead and process it.
4595 */
4596 head->node.in_tree = 0;
4597 rb_erase(&head->node.rb_node, &delayed_refs->root);
4598
4599 delayed_refs->num_entries--;
4600
4601 /*
4602 * we don't take a ref on the node because we're removing it from the
4603 * tree, so we just steal the ref the tree was holding.
4604 */
4605 delayed_refs->num_heads--;
4606 if (list_empty(&head->cluster))
4607 delayed_refs->num_heads_ready--;
4608
4609 list_del_init(&head->cluster);
4610 spin_unlock(&delayed_refs->lock);
4611
4612 BUG_ON(head->extent_op);
4613 if (head->must_insert_reserved)
4614 ret = 1;
4615
4616 mutex_unlock(&head->mutex);
4617 btrfs_put_delayed_ref(&head->node);
4618 return ret;
4619 out:
4620 spin_unlock(&delayed_refs->lock);
4621 return 0;
4622 }
4623
4624 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
4625 struct btrfs_root *root,
4626 struct extent_buffer *buf,
4627 u64 parent, int last_ref)
4628 {
4629 struct btrfs_block_rsv *block_rsv;
4630 struct btrfs_block_group_cache *cache = NULL;
4631 int ret;
4632
4633 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
4634 ret = btrfs_add_delayed_tree_ref(trans, buf->start, buf->len,
4635 parent, root->root_key.objectid,
4636 btrfs_header_level(buf),
4637 BTRFS_DROP_DELAYED_REF, NULL);
4638 BUG_ON(ret);
4639 }
4640
4641 if (!last_ref)
4642 return;
4643
4644 block_rsv = get_block_rsv(trans, root);
4645 cache = btrfs_lookup_block_group(root->fs_info, buf->start);
4646 if (block_rsv->space_info != cache->space_info)
4647 goto out;
4648
4649 if (btrfs_header_generation(buf) == trans->transid) {
4650 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
4651 ret = check_ref_cleanup(trans, root, buf->start);
4652 if (!ret)
4653 goto pin;
4654 }
4655
4656 if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
4657 pin_down_extent(root, cache, buf->start, buf->len, 1);
4658 goto pin;
4659 }
4660
4661 WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
4662
4663 btrfs_add_free_space(cache, buf->start, buf->len);
4664 ret = update_reserved_bytes(cache, buf->len, 0, 0);
4665 if (ret == -EAGAIN) {
4666 /* block group became read-only */
4667 update_reserved_bytes(cache, buf->len, 0, 1);
4668 goto out;
4669 }
4670
4671 ret = 1;
4672 spin_lock(&block_rsv->lock);
4673 if (block_rsv->reserved < block_rsv->size) {
4674 block_rsv->reserved += buf->len;
4675 ret = 0;
4676 }
4677 spin_unlock(&block_rsv->lock);
4678
4679 if (ret) {
4680 spin_lock(&cache->space_info->lock);
4681 cache->space_info->bytes_reserved -= buf->len;
4682 spin_unlock(&cache->space_info->lock);
4683 }
4684 goto out;
4685 }
4686 pin:
4687 if (block_rsv->durable && !cache->ro) {
4688 ret = 0;
4689 spin_lock(&cache->lock);
4690 if (!cache->ro) {
4691 cache->reserved_pinned += buf->len;
4692 ret = 1;
4693 }
4694 spin_unlock(&cache->lock);
4695
4696 if (ret) {
4697 spin_lock(&block_rsv->lock);
4698 block_rsv->freed[trans->transid & 0x1] += buf->len;
4699 spin_unlock(&block_rsv->lock);
4700 }
4701 }
4702 out:
4703 btrfs_put_block_group(cache);
4704 }
4705
4706 int btrfs_free_extent(struct btrfs_trans_handle *trans,
4707 struct btrfs_root *root,
4708 u64 bytenr, u64 num_bytes, u64 parent,
4709 u64 root_objectid, u64 owner, u64 offset)
4710 {
4711 int ret;
4712
4713 /*
4714 * tree log blocks never actually go into the extent allocation
4715 * tree, just update pinning info and exit early.
4716 */
4717 if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
4718 WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
4719 /* unlocks the pinned mutex */
4720 btrfs_pin_extent(root, bytenr, num_bytes, 1);
4721 ret = 0;
4722 } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
4723 ret = btrfs_add_delayed_tree_ref(trans, bytenr, num_bytes,
4724 parent, root_objectid, (int)owner,
4725 BTRFS_DROP_DELAYED_REF, NULL);
4726 BUG_ON(ret);
4727 } else {
4728 ret = btrfs_add_delayed_data_ref(trans, bytenr, num_bytes,
4729 parent, root_objectid, owner,
4730 offset, BTRFS_DROP_DELAYED_REF, NULL);
4731 BUG_ON(ret);
4732 }
4733 return ret;
4734 }
4735
4736 static u64 stripe_align(struct btrfs_root *root, u64 val)
4737 {
4738 u64 mask = ((u64)root->stripesize - 1);
4739 u64 ret = (val + mask) & ~mask;
4740 return ret;
4741 }
4742
4743 /*
4744 * when we wait for progress in the block group caching, its because
4745 * our allocation attempt failed at least once. So, we must sleep
4746 * and let some progress happen before we try again.
4747 *
4748 * This function will sleep at least once waiting for new free space to
4749 * show up, and then it will check the block group free space numbers
4750 * for our min num_bytes. Another option is to have it go ahead
4751 * and look in the rbtree for a free extent of a given size, but this
4752 * is a good start.
4753 */
4754 static noinline int
4755 wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
4756 u64 num_bytes)
4757 {
4758 struct btrfs_caching_control *caching_ctl;
4759 DEFINE_WAIT(wait);
4760
4761 caching_ctl = get_caching_control(cache);
4762 if (!caching_ctl)
4763 return 0;
4764
4765 wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
4766 (cache->free_space >= num_bytes));
4767
4768 put_caching_control(caching_ctl);
4769 return 0;
4770 }
4771
4772 static noinline int
4773 wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
4774 {
4775 struct btrfs_caching_control *caching_ctl;
4776 DEFINE_WAIT(wait);
4777
4778 caching_ctl = get_caching_control(cache);
4779 if (!caching_ctl)
4780 return 0;
4781
4782 wait_event(caching_ctl->wait, block_group_cache_done(cache));
4783
4784 put_caching_control(caching_ctl);
4785 return 0;
4786 }
4787
4788 static int get_block_group_index(struct btrfs_block_group_cache *cache)
4789 {
4790 int index;
4791 if (cache->flags & BTRFS_BLOCK_GROUP_RAID10)
4792 index = 0;
4793 else if (cache->flags & BTRFS_BLOCK_GROUP_RAID1)
4794 index = 1;
4795 else if (cache->flags & BTRFS_BLOCK_GROUP_DUP)
4796 index = 2;
4797 else if (cache->flags & BTRFS_BLOCK_GROUP_RAID0)
4798 index = 3;
4799 else
4800 index = 4;
4801 return index;
4802 }
4803
4804 enum btrfs_loop_type {
4805 LOOP_FIND_IDEAL = 0,
4806 LOOP_CACHING_NOWAIT = 1,
4807 LOOP_CACHING_WAIT = 2,
4808 LOOP_ALLOC_CHUNK = 3,
4809 LOOP_NO_EMPTY_SIZE = 4,
4810 };
4811
4812 /*
4813 * walks the btree of allocated extents and find a hole of a given size.
4814 * The key ins is changed to record the hole:
4815 * ins->objectid == block start
4816 * ins->flags = BTRFS_EXTENT_ITEM_KEY
4817 * ins->offset == number of blocks
4818 * Any available blocks before search_start are skipped.
4819 */
4820 static noinline int find_free_extent(struct btrfs_trans_handle *trans,
4821 struct btrfs_root *orig_root,
4822 u64 num_bytes, u64 empty_size,
4823 u64 search_start, u64 search_end,
4824 u64 hint_byte, struct btrfs_key *ins,
4825 int data)
4826 {
4827 int ret = 0;
4828 struct btrfs_root *root = orig_root->fs_info->extent_root;
4829 struct btrfs_free_cluster *last_ptr = NULL;
4830 struct btrfs_block_group_cache *block_group = NULL;
4831 int empty_cluster = 2 * 1024 * 1024;
4832 int allowed_chunk_alloc = 0;
4833 int done_chunk_alloc = 0;
4834 struct btrfs_space_info *space_info;
4835 int last_ptr_loop = 0;
4836 int loop = 0;
4837 int index = 0;
4838 bool found_uncached_bg = false;
4839 bool failed_cluster_refill = false;
4840 bool failed_alloc = false;
4841 bool use_cluster = true;
4842 u64 ideal_cache_percent = 0;
4843 u64 ideal_cache_offset = 0;
4844
4845 WARN_ON(num_bytes < root->sectorsize);
4846 btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
4847 ins->objectid = 0;
4848 ins->offset = 0;
4849
4850 space_info = __find_space_info(root->fs_info, data);
4851 if (!space_info) {
4852 printk(KERN_ERR "No space info for %d\n", data);
4853 return -ENOSPC;
4854 }
4855
4856 /*
4857 * If the space info is for both data and metadata it means we have a
4858 * small filesystem and we can't use the clustering stuff.
4859 */
4860 if (btrfs_mixed_space_info(space_info))
4861 use_cluster = false;
4862
4863 if (orig_root->ref_cows || empty_size)
4864 allowed_chunk_alloc = 1;
4865
4866 if (data & BTRFS_BLOCK_GROUP_METADATA && use_cluster) {
4867 last_ptr = &root->fs_info->meta_alloc_cluster;
4868 if (!btrfs_test_opt(root, SSD))
4869 empty_cluster = 64 * 1024;
4870 }
4871
4872 if ((data & BTRFS_BLOCK_GROUP_DATA) && use_cluster &&
4873 btrfs_test_opt(root, SSD)) {
4874 last_ptr = &root->fs_info->data_alloc_cluster;
4875 }
4876
4877 if (last_ptr) {
4878 spin_lock(&last_ptr->lock);
4879 if (last_ptr->block_group)
4880 hint_byte = last_ptr->window_start;
4881 spin_unlock(&last_ptr->lock);
4882 }
4883
4884 search_start = max(search_start, first_logical_byte(root, 0));
4885 search_start = max(search_start, hint_byte);
4886
4887 if (!last_ptr)
4888 empty_cluster = 0;
4889
4890 if (search_start == hint_byte) {
4891 ideal_cache:
4892 block_group = btrfs_lookup_block_group(root->fs_info,
4893 search_start);
4894 /*
4895 * we don't want to use the block group if it doesn't match our
4896 * allocation bits, or if its not cached.
4897 *
4898 * However if we are re-searching with an ideal block group
4899 * picked out then we don't care that the block group is cached.
4900 */
4901 if (block_group && block_group_bits(block_group, data) &&
4902 (block_group->cached != BTRFS_CACHE_NO ||
4903 search_start == ideal_cache_offset)) {
4904 down_read(&space_info->groups_sem);
4905 if (list_empty(&block_group->list) ||
4906 block_group->ro) {
4907 /*
4908 * someone is removing this block group,
4909 * we can't jump into the have_block_group
4910 * target because our list pointers are not
4911 * valid
4912 */
4913 btrfs_put_block_group(block_group);
4914 up_read(&space_info->groups_sem);
4915 } else {
4916 index = get_block_group_index(block_group);
4917 goto have_block_group;
4918 }
4919 } else if (block_group) {
4920 btrfs_put_block_group(block_group);
4921 }
4922 }
4923 search:
4924 down_read(&space_info->groups_sem);
4925 list_for_each_entry(block_group, &space_info->block_groups[index],
4926 list) {
4927 u64 offset;
4928 int cached;
4929
4930 btrfs_get_block_group(block_group);
4931 search_start = block_group->key.objectid;
4932
4933 have_block_group:
4934 if (unlikely(block_group->cached == BTRFS_CACHE_NO)) {
4935 u64 free_percent;
4936
4937 ret = cache_block_group(block_group, trans, 1);
4938 if (block_group->cached == BTRFS_CACHE_FINISHED)
4939 goto have_block_group;
4940
4941 free_percent = btrfs_block_group_used(&block_group->item);
4942 free_percent *= 100;
4943 free_percent = div64_u64(free_percent,
4944 block_group->key.offset);
4945 free_percent = 100 - free_percent;
4946 if (free_percent > ideal_cache_percent &&
4947 likely(!block_group->ro)) {
4948 ideal_cache_offset = block_group->key.objectid;
4949 ideal_cache_percent = free_percent;
4950 }
4951
4952 /*
4953 * We only want to start kthread caching if we are at
4954 * the point where we will wait for caching to make
4955 * progress, or if our ideal search is over and we've
4956 * found somebody to start caching.
4957 */
4958 if (loop > LOOP_CACHING_NOWAIT ||
4959 (loop > LOOP_FIND_IDEAL &&
4960 atomic_read(&space_info->caching_threads) < 2)) {
4961 ret = cache_block_group(block_group, trans, 0);
4962 BUG_ON(ret);
4963 }
4964 found_uncached_bg = true;
4965
4966 /*
4967 * If loop is set for cached only, try the next block
4968 * group.
4969 */
4970 if (loop == LOOP_FIND_IDEAL)
4971 goto loop;
4972 }
4973
4974 cached = block_group_cache_done(block_group);
4975 if (unlikely(!cached))
4976 found_uncached_bg = true;
4977
4978 if (unlikely(block_group->ro))
4979 goto loop;
4980
4981 /*
4982 * Ok we want to try and use the cluster allocator, so lets look
4983 * there, unless we are on LOOP_NO_EMPTY_SIZE, since we will
4984 * have tried the cluster allocator plenty of times at this
4985 * point and not have found anything, so we are likely way too
4986 * fragmented for the clustering stuff to find anything, so lets
4987 * just skip it and let the allocator find whatever block it can
4988 * find
4989 */
4990 if (last_ptr && loop < LOOP_NO_EMPTY_SIZE) {
4991 /*
4992 * the refill lock keeps out other
4993 * people trying to start a new cluster
4994 */
4995 spin_lock(&last_ptr->refill_lock);
4996 if (last_ptr->block_group &&
4997 (last_ptr->block_group->ro ||
4998 !block_group_bits(last_ptr->block_group, data))) {
4999 offset = 0;
5000 goto refill_cluster;
5001 }
5002
5003 offset = btrfs_alloc_from_cluster(block_group, last_ptr,
5004 num_bytes, search_start);
5005 if (offset) {
5006 /* we have a block, we're done */
5007 spin_unlock(&last_ptr->refill_lock);
5008 goto checks;
5009 }
5010
5011 spin_lock(&last_ptr->lock);
5012 /*
5013 * whoops, this cluster doesn't actually point to
5014 * this block group. Get a ref on the block
5015 * group is does point to and try again
5016 */
5017 if (!last_ptr_loop && last_ptr->block_group &&
5018 last_ptr->block_group != block_group) {
5019
5020 btrfs_put_block_group(block_group);
5021 block_group = last_ptr->block_group;
5022 btrfs_get_block_group(block_group);
5023 spin_unlock(&last_ptr->lock);
5024 spin_unlock(&last_ptr->refill_lock);
5025
5026 last_ptr_loop = 1;
5027 search_start = block_group->key.objectid;
5028 /*
5029 * we know this block group is properly
5030 * in the list because
5031 * btrfs_remove_block_group, drops the
5032 * cluster before it removes the block
5033 * group from the list
5034 */
5035 goto have_block_group;
5036 }
5037 spin_unlock(&last_ptr->lock);
5038 refill_cluster:
5039 /*
5040 * this cluster didn't work out, free it and
5041 * start over
5042 */
5043 btrfs_return_cluster_to_free_space(NULL, last_ptr);
5044
5045 last_ptr_loop = 0;
5046
5047 /* allocate a cluster in this block group */
5048 ret = btrfs_find_space_cluster(trans, root,
5049 block_group, last_ptr,
5050 offset, num_bytes,
5051 empty_cluster + empty_size);
5052 if (ret == 0) {
5053 /*
5054 * now pull our allocation out of this
5055 * cluster
5056 */
5057 offset = btrfs_alloc_from_cluster(block_group,
5058 last_ptr, num_bytes,
5059 search_start);
5060 if (offset) {
5061 /* we found one, proceed */
5062 spin_unlock(&last_ptr->refill_lock);
5063 goto checks;
5064 }
5065 } else if (!cached && loop > LOOP_CACHING_NOWAIT
5066 && !failed_cluster_refill) {
5067 spin_unlock(&last_ptr->refill_lock);
5068
5069 failed_cluster_refill = true;
5070 wait_block_group_cache_progress(block_group,
5071 num_bytes + empty_cluster + empty_size);
5072 goto have_block_group;
5073 }
5074
5075 /*
5076 * at this point we either didn't find a cluster
5077 * or we weren't able to allocate a block from our
5078 * cluster. Free the cluster we've been trying
5079 * to use, and go to the next block group
5080 */
5081 btrfs_return_cluster_to_free_space(NULL, last_ptr);
5082 spin_unlock(&last_ptr->refill_lock);
5083 goto loop;
5084 }
5085
5086 offset = btrfs_find_space_for_alloc(block_group, search_start,
5087 num_bytes, empty_size);
5088 /*
5089 * If we didn't find a chunk, and we haven't failed on this
5090 * block group before, and this block group is in the middle of
5091 * caching and we are ok with waiting, then go ahead and wait
5092 * for progress to be made, and set failed_alloc to true.
5093 *
5094 * If failed_alloc is true then we've already waited on this
5095 * block group once and should move on to the next block group.
5096 */
5097 if (!offset && !failed_alloc && !cached &&
5098 loop > LOOP_CACHING_NOWAIT) {
5099 wait_block_group_cache_progress(block_group,
5100 num_bytes + empty_size);
5101 failed_alloc = true;
5102 goto have_block_group;
5103 } else if (!offset) {
5104 goto loop;
5105 }
5106 checks:
5107 search_start = stripe_align(root, offset);
5108 /* move on to the next group */
5109 if (search_start + num_bytes >= search_end) {
5110 btrfs_add_free_space(block_group, offset, num_bytes);
5111 goto loop;
5112 }
5113
5114 /* move on to the next group */
5115 if (search_start + num_bytes >
5116 block_group->key.objectid + block_group->key.offset) {
5117 btrfs_add_free_space(block_group, offset, num_bytes);
5118 goto loop;
5119 }
5120
5121 ins->objectid = search_start;
5122 ins->offset = num_bytes;
5123
5124 if (offset < search_start)
5125 btrfs_add_free_space(block_group, offset,
5126 search_start - offset);
5127 BUG_ON(offset > search_start);
5128
5129 ret = update_reserved_bytes(block_group, num_bytes, 1,
5130 (data & BTRFS_BLOCK_GROUP_DATA));
5131 if (ret == -EAGAIN) {
5132 btrfs_add_free_space(block_group, offset, num_bytes);
5133 goto loop;
5134 }
5135
5136 /* we are all good, lets return */
5137 ins->objectid = search_start;
5138 ins->offset = num_bytes;
5139
5140 if (offset < search_start)
5141 btrfs_add_free_space(block_group, offset,
5142 search_start - offset);
5143 BUG_ON(offset > search_start);
5144 break;
5145 loop:
5146 failed_cluster_refill = false;
5147 failed_alloc = false;
5148 BUG_ON(index != get_block_group_index(block_group));
5149 btrfs_put_block_group(block_group);
5150 }
5151 up_read(&space_info->groups_sem);
5152
5153 if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
5154 goto search;
5155
5156 /* LOOP_FIND_IDEAL, only search caching/cached bg's, and don't wait for
5157 * for them to make caching progress. Also
5158 * determine the best possible bg to cache
5159 * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
5160 * caching kthreads as we move along
5161 * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
5162 * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
5163 * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
5164 * again
5165 */
5166 if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE &&
5167 (found_uncached_bg || empty_size || empty_cluster ||
5168 allowed_chunk_alloc)) {
5169 index = 0;
5170 if (loop == LOOP_FIND_IDEAL && found_uncached_bg) {
5171 found_uncached_bg = false;
5172 loop++;
5173 if (!ideal_cache_percent &&
5174 atomic_read(&space_info->caching_threads))
5175 goto search;
5176
5177 /*
5178 * 1 of the following 2 things have happened so far
5179 *
5180 * 1) We found an ideal block group for caching that
5181 * is mostly full and will cache quickly, so we might
5182 * as well wait for it.
5183 *
5184 * 2) We searched for cached only and we didn't find
5185 * anything, and we didn't start any caching kthreads
5186 * either, so chances are we will loop through and
5187 * start a couple caching kthreads, and then come back
5188 * around and just wait for them. This will be slower
5189 * because we will have 2 caching kthreads reading at
5190 * the same time when we could have just started one
5191 * and waited for it to get far enough to give us an
5192 * allocation, so go ahead and go to the wait caching
5193 * loop.
5194 */
5195 loop = LOOP_CACHING_WAIT;
5196 search_start = ideal_cache_offset;
5197 ideal_cache_percent = 0;
5198 goto ideal_cache;
5199 } else if (loop == LOOP_FIND_IDEAL) {
5200 /*
5201 * Didn't find a uncached bg, wait on anything we find
5202 * next.
5203 */
5204 loop = LOOP_CACHING_WAIT;
5205 goto search;
5206 }
5207
5208 if (loop < LOOP_CACHING_WAIT) {
5209 loop++;
5210 goto search;
5211 }
5212
5213 if (loop == LOOP_ALLOC_CHUNK) {
5214 empty_size = 0;
5215 empty_cluster = 0;
5216 }
5217
5218 if (allowed_chunk_alloc) {
5219 ret = do_chunk_alloc(trans, root, num_bytes +
5220 2 * 1024 * 1024, data, 1);
5221 allowed_chunk_alloc = 0;
5222 done_chunk_alloc = 1;
5223 } else if (!done_chunk_alloc) {
5224 space_info->force_alloc = 1;
5225 }
5226
5227 if (loop < LOOP_NO_EMPTY_SIZE) {
5228 loop++;
5229 goto search;
5230 }
5231 ret = -ENOSPC;
5232 } else if (!ins->objectid) {
5233 ret = -ENOSPC;
5234 }
5235
5236 /* we found what we needed */
5237 if (ins->objectid) {
5238 if (!(data & BTRFS_BLOCK_GROUP_DATA))
5239 trans->block_group = block_group->key.objectid;
5240
5241 btrfs_put_block_group(block_group);
5242 ret = 0;
5243 }
5244
5245 return ret;
5246 }
5247
5248 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
5249 int dump_block_groups)
5250 {
5251 struct btrfs_block_group_cache *cache;
5252 int index = 0;
5253
5254 spin_lock(&info->lock);
5255 printk(KERN_INFO "space_info has %llu free, is %sfull\n",
5256 (unsigned long long)(info->total_bytes - info->bytes_used -
5257 info->bytes_pinned - info->bytes_reserved -
5258 info->bytes_readonly),
5259 (info->full) ? "" : "not ");
5260 printk(KERN_INFO "space_info total=%llu, used=%llu, pinned=%llu, "
5261 "reserved=%llu, may_use=%llu, readonly=%llu\n",
5262 (unsigned long long)info->total_bytes,
5263 (unsigned long long)info->bytes_used,
5264 (unsigned long long)info->bytes_pinned,
5265 (unsigned long long)info->bytes_reserved,
5266 (unsigned long long)info->bytes_may_use,
5267 (unsigned long long)info->bytes_readonly);
5268 spin_unlock(&info->lock);
5269
5270 if (!dump_block_groups)
5271 return;
5272
5273 down_read(&info->groups_sem);
5274 again:
5275 list_for_each_entry(cache, &info->block_groups[index], list) {
5276 spin_lock(&cache->lock);
5277 printk(KERN_INFO "block group %llu has %llu bytes, %llu used "
5278 "%llu pinned %llu reserved\n",
5279 (unsigned long long)cache->key.objectid,
5280 (unsigned long long)cache->key.offset,
5281 (unsigned long long)btrfs_block_group_used(&cache->item),
5282 (unsigned long long)cache->pinned,
5283 (unsigned long long)cache->reserved);
5284 btrfs_dump_free_space(cache, bytes);
5285 spin_unlock(&cache->lock);
5286 }
5287 if (++index < BTRFS_NR_RAID_TYPES)
5288 goto again;
5289 up_read(&info->groups_sem);
5290 }
5291
5292 int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
5293 struct btrfs_root *root,
5294 u64 num_bytes, u64 min_alloc_size,
5295 u64 empty_size, u64 hint_byte,
5296 u64 search_end, struct btrfs_key *ins,
5297 u64 data)
5298 {
5299 int ret;
5300 u64 search_start = 0;
5301
5302 data = btrfs_get_alloc_profile(root, data);
5303 again:
5304 /*
5305 * the only place that sets empty_size is btrfs_realloc_node, which
5306 * is not called recursively on allocations
5307 */
5308 if (empty_size || root->ref_cows)
5309 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
5310 num_bytes + 2 * 1024 * 1024, data, 0);
5311
5312 WARN_ON(num_bytes < root->sectorsize);
5313 ret = find_free_extent(trans, root, num_bytes, empty_size,
5314 search_start, search_end, hint_byte,
5315 ins, data);
5316
5317 if (ret == -ENOSPC && num_bytes > min_alloc_size) {
5318 num_bytes = num_bytes >> 1;
5319 num_bytes = num_bytes & ~(root->sectorsize - 1);
5320 num_bytes = max(num_bytes, min_alloc_size);
5321 do_chunk_alloc(trans, root->fs_info->extent_root,
5322 num_bytes, data, 1);
5323 goto again;
5324 }
5325 if (ret == -ENOSPC) {
5326 struct btrfs_space_info *sinfo;
5327
5328 sinfo = __find_space_info(root->fs_info, data);
5329 printk(KERN_ERR "btrfs allocation failed flags %llu, "
5330 "wanted %llu\n", (unsigned long long)data,
5331 (unsigned long long)num_bytes);
5332 dump_space_info(sinfo, num_bytes, 1);
5333 }
5334
5335 return ret;
5336 }
5337
5338 int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len)
5339 {
5340 struct btrfs_block_group_cache *cache;
5341 int ret = 0;
5342
5343 cache = btrfs_lookup_block_group(root->fs_info, start);
5344 if (!cache) {
5345 printk(KERN_ERR "Unable to find block group for %llu\n",
5346 (unsigned long long)start);
5347 return -ENOSPC;
5348 }
5349
5350 ret = btrfs_discard_extent(root, start, len);
5351
5352 btrfs_add_free_space(cache, start, len);
5353 update_reserved_bytes(cache, len, 0, 1);
5354 btrfs_put_block_group(cache);
5355
5356 return ret;
5357 }
5358
5359 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
5360 struct btrfs_root *root,
5361 u64 parent, u64 root_objectid,
5362 u64 flags, u64 owner, u64 offset,
5363 struct btrfs_key *ins, int ref_mod)
5364 {
5365 int ret;
5366 struct btrfs_fs_info *fs_info = root->fs_info;
5367 struct btrfs_extent_item *extent_item;
5368 struct btrfs_extent_inline_ref *iref;
5369 struct btrfs_path *path;
5370 struct extent_buffer *leaf;
5371 int type;
5372 u32 size;
5373
5374 if (parent > 0)
5375 type = BTRFS_SHARED_DATA_REF_KEY;
5376 else
5377 type = BTRFS_EXTENT_DATA_REF_KEY;
5378
5379 size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
5380
5381 path = btrfs_alloc_path();
5382 BUG_ON(!path);
5383
5384 path->leave_spinning = 1;
5385 ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
5386 ins, size);
5387 BUG_ON(ret);
5388
5389 leaf = path->nodes[0];
5390 extent_item = btrfs_item_ptr(leaf, path->slots[0],
5391 struct btrfs_extent_item);
5392 btrfs_set_extent_refs(leaf, extent_item, ref_mod);
5393 btrfs_set_extent_generation(leaf, extent_item, trans->transid);
5394 btrfs_set_extent_flags(leaf, extent_item,
5395 flags | BTRFS_EXTENT_FLAG_DATA);
5396
5397 iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
5398 btrfs_set_extent_inline_ref_type(leaf, iref, type);
5399 if (parent > 0) {
5400 struct btrfs_shared_data_ref *ref;
5401 ref = (struct btrfs_shared_data_ref *)(iref + 1);
5402 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
5403 btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
5404 } else {
5405 struct btrfs_extent_data_ref *ref;
5406 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
5407 btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
5408 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
5409 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
5410 btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
5411 }
5412
5413 btrfs_mark_buffer_dirty(path->nodes[0]);
5414 btrfs_free_path(path);
5415
5416 ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
5417 if (ret) {
5418 printk(KERN_ERR "btrfs update block group failed for %llu "
5419 "%llu\n", (unsigned long long)ins->objectid,
5420 (unsigned long long)ins->offset);
5421 BUG();
5422 }
5423 return ret;
5424 }
5425
5426 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
5427 struct btrfs_root *root,
5428 u64 parent, u64 root_objectid,
5429 u64 flags, struct btrfs_disk_key *key,
5430 int level, struct btrfs_key *ins)
5431 {
5432 int ret;
5433 struct btrfs_fs_info *fs_info = root->fs_info;
5434 struct btrfs_extent_item *extent_item;
5435 struct btrfs_tree_block_info *block_info;
5436 struct btrfs_extent_inline_ref *iref;
5437 struct btrfs_path *path;
5438 struct extent_buffer *leaf;
5439 u32 size = sizeof(*extent_item) + sizeof(*block_info) + sizeof(*iref);
5440
5441 path = btrfs_alloc_path();
5442 BUG_ON(!path);
5443
5444 path->leave_spinning = 1;
5445 ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
5446 ins, size);
5447 BUG_ON(ret);
5448
5449 leaf = path->nodes[0];
5450 extent_item = btrfs_item_ptr(leaf, path->slots[0],
5451 struct btrfs_extent_item);
5452 btrfs_set_extent_refs(leaf, extent_item, 1);
5453 btrfs_set_extent_generation(leaf, extent_item, trans->transid);
5454 btrfs_set_extent_flags(leaf, extent_item,
5455 flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
5456 block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
5457
5458 btrfs_set_tree_block_key(leaf, block_info, key);
5459 btrfs_set_tree_block_level(leaf, block_info, level);
5460
5461 iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
5462 if (parent > 0) {
5463 BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
5464 btrfs_set_extent_inline_ref_type(leaf, iref,
5465 BTRFS_SHARED_BLOCK_REF_KEY);
5466 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
5467 } else {
5468 btrfs_set_extent_inline_ref_type(leaf, iref,
5469 BTRFS_TREE_BLOCK_REF_KEY);
5470 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
5471 }
5472
5473 btrfs_mark_buffer_dirty(leaf);
5474 btrfs_free_path(path);
5475
5476 ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
5477 if (ret) {
5478 printk(KERN_ERR "btrfs update block group failed for %llu "
5479 "%llu\n", (unsigned long long)ins->objectid,
5480 (unsigned long long)ins->offset);
5481 BUG();
5482 }
5483 return ret;
5484 }
5485
5486 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
5487 struct btrfs_root *root,
5488 u64 root_objectid, u64 owner,
5489 u64 offset, struct btrfs_key *ins)
5490 {
5491 int ret;
5492
5493 BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
5494
5495 ret = btrfs_add_delayed_data_ref(trans, ins->objectid, ins->offset,
5496 0, root_objectid, owner, offset,
5497 BTRFS_ADD_DELAYED_EXTENT, NULL);
5498 return ret;
5499 }
5500
5501 /*
5502 * this is used by the tree logging recovery code. It records that
5503 * an extent has been allocated and makes sure to clear the free
5504 * space cache bits as well
5505 */
5506 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
5507 struct btrfs_root *root,
5508 u64 root_objectid, u64 owner, u64 offset,
5509 struct btrfs_key *ins)
5510 {
5511 int ret;
5512 struct btrfs_block_group_cache *block_group;
5513 struct btrfs_caching_control *caching_ctl;
5514 u64 start = ins->objectid;
5515 u64 num_bytes = ins->offset;
5516
5517 block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
5518 cache_block_group(block_group, trans, 0);
5519 caching_ctl = get_caching_control(block_group);
5520
5521 if (!caching_ctl) {
5522 BUG_ON(!block_group_cache_done(block_group));
5523 ret = btrfs_remove_free_space(block_group, start, num_bytes);
5524 BUG_ON(ret);
5525 } else {
5526 mutex_lock(&caching_ctl->mutex);
5527
5528 if (start >= caching_ctl->progress) {
5529 ret = add_excluded_extent(root, start, num_bytes);
5530 BUG_ON(ret);
5531 } else if (start + num_bytes <= caching_ctl->progress) {
5532 ret = btrfs_remove_free_space(block_group,
5533 start, num_bytes);
5534 BUG_ON(ret);
5535 } else {
5536 num_bytes = caching_ctl->progress - start;
5537 ret = btrfs_remove_free_space(block_group,
5538 start, num_bytes);
5539 BUG_ON(ret);
5540
5541 start = caching_ctl->progress;
5542 num_bytes = ins->objectid + ins->offset -
5543 caching_ctl->progress;
5544 ret = add_excluded_extent(root, start, num_bytes);
5545 BUG_ON(ret);
5546 }
5547
5548 mutex_unlock(&caching_ctl->mutex);
5549 put_caching_control(caching_ctl);
5550 }
5551
5552 ret = update_reserved_bytes(block_group, ins->offset, 1, 1);
5553 BUG_ON(ret);
5554 btrfs_put_block_group(block_group);
5555 ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
5556 0, owner, offset, ins, 1);
5557 return ret;
5558 }
5559
5560 struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
5561 struct btrfs_root *root,
5562 u64 bytenr, u32 blocksize,
5563 int level)
5564 {
5565 struct extent_buffer *buf;
5566
5567 buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
5568 if (!buf)
5569 return ERR_PTR(-ENOMEM);
5570 btrfs_set_header_generation(buf, trans->transid);
5571 btrfs_set_buffer_lockdep_class(buf, level);
5572 btrfs_tree_lock(buf);
5573 clean_tree_block(trans, root, buf);
5574
5575 btrfs_set_lock_blocking(buf);
5576 btrfs_set_buffer_uptodate(buf);
5577
5578 if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
5579 /*
5580 * we allow two log transactions at a time, use different
5581 * EXENT bit to differentiate dirty pages.
5582 */
5583 if (root->log_transid % 2 == 0)
5584 set_extent_dirty(&root->dirty_log_pages, buf->start,
5585 buf->start + buf->len - 1, GFP_NOFS);
5586 else
5587 set_extent_new(&root->dirty_log_pages, buf->start,
5588 buf->start + buf->len - 1, GFP_NOFS);
5589 } else {
5590 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
5591 buf->start + buf->len - 1, GFP_NOFS);
5592 }
5593 trans->blocks_used++;
5594 /* this returns a buffer locked for blocking */
5595 return buf;
5596 }
5597
5598 static struct btrfs_block_rsv *
5599 use_block_rsv(struct btrfs_trans_handle *trans,
5600 struct btrfs_root *root, u32 blocksize)
5601 {
5602 struct btrfs_block_rsv *block_rsv;
5603 int ret;
5604
5605 block_rsv = get_block_rsv(trans, root);
5606
5607 if (block_rsv->size == 0) {
5608 ret = reserve_metadata_bytes(trans, root, block_rsv,
5609 blocksize, 0);
5610 if (ret)
5611 return ERR_PTR(ret);
5612 return block_rsv;
5613 }
5614
5615 ret = block_rsv_use_bytes(block_rsv, blocksize);
5616 if (!ret)
5617 return block_rsv;
5618
5619 return ERR_PTR(-ENOSPC);
5620 }
5621
5622 static void unuse_block_rsv(struct btrfs_block_rsv *block_rsv, u32 blocksize)
5623 {
5624 block_rsv_add_bytes(block_rsv, blocksize, 0);
5625 block_rsv_release_bytes(block_rsv, NULL, 0);
5626 }
5627
5628 /*
5629 * finds a free extent and does all the dirty work required for allocation
5630 * returns the key for the extent through ins, and a tree buffer for
5631 * the first block of the extent through buf.
5632 *
5633 * returns the tree buffer or NULL.
5634 */
5635 struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
5636 struct btrfs_root *root, u32 blocksize,
5637 u64 parent, u64 root_objectid,
5638 struct btrfs_disk_key *key, int level,
5639 u64 hint, u64 empty_size)
5640 {
5641 struct btrfs_key ins;
5642 struct btrfs_block_rsv *block_rsv;
5643 struct extent_buffer *buf;
5644 u64 flags = 0;
5645 int ret;
5646
5647
5648 block_rsv = use_block_rsv(trans, root, blocksize);
5649 if (IS_ERR(block_rsv))
5650 return ERR_CAST(block_rsv);
5651
5652 ret = btrfs_reserve_extent(trans, root, blocksize, blocksize,
5653 empty_size, hint, (u64)-1, &ins, 0);
5654 if (ret) {
5655 unuse_block_rsv(block_rsv, blocksize);
5656 return ERR_PTR(ret);
5657 }
5658
5659 buf = btrfs_init_new_buffer(trans, root, ins.objectid,
5660 blocksize, level);
5661 BUG_ON(IS_ERR(buf));
5662
5663 if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
5664 if (parent == 0)
5665 parent = ins.objectid;
5666 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
5667 } else
5668 BUG_ON(parent > 0);
5669
5670 if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
5671 struct btrfs_delayed_extent_op *extent_op;
5672 extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
5673 BUG_ON(!extent_op);
5674 if (key)
5675 memcpy(&extent_op->key, key, sizeof(extent_op->key));
5676 else
5677 memset(&extent_op->key, 0, sizeof(extent_op->key));
5678 extent_op->flags_to_set = flags;
5679 extent_op->update_key = 1;
5680 extent_op->update_flags = 1;
5681 extent_op->is_data = 0;
5682
5683 ret = btrfs_add_delayed_tree_ref(trans, ins.objectid,
5684 ins.offset, parent, root_objectid,
5685 level, BTRFS_ADD_DELAYED_EXTENT,
5686 extent_op);
5687 BUG_ON(ret);
5688 }
5689 return buf;
5690 }
5691
5692 struct walk_control {
5693 u64 refs[BTRFS_MAX_LEVEL];
5694 u64 flags[BTRFS_MAX_LEVEL];
5695 struct btrfs_key update_progress;
5696 int stage;
5697 int level;
5698 int shared_level;
5699 int update_ref;
5700 int keep_locks;
5701 int reada_slot;
5702 int reada_count;
5703 };
5704
5705 #define DROP_REFERENCE 1
5706 #define UPDATE_BACKREF 2
5707
5708 static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
5709 struct btrfs_root *root,
5710 struct walk_control *wc,
5711 struct btrfs_path *path)
5712 {
5713 u64 bytenr;
5714 u64 generation;
5715 u64 refs;
5716 u64 flags;
5717 u32 nritems;
5718 u32 blocksize;
5719 struct btrfs_key key;
5720 struct extent_buffer *eb;
5721 int ret;
5722 int slot;
5723 int nread = 0;
5724
5725 if (path->slots[wc->level] < wc->reada_slot) {
5726 wc->reada_count = wc->reada_count * 2 / 3;
5727 wc->reada_count = max(wc->reada_count, 2);
5728 } else {
5729 wc->reada_count = wc->reada_count * 3 / 2;
5730 wc->reada_count = min_t(int, wc->reada_count,
5731 BTRFS_NODEPTRS_PER_BLOCK(root));
5732 }
5733
5734 eb = path->nodes[wc->level];
5735 nritems = btrfs_header_nritems(eb);
5736 blocksize = btrfs_level_size(root, wc->level - 1);
5737
5738 for (slot = path->slots[wc->level]; slot < nritems; slot++) {
5739 if (nread >= wc->reada_count)
5740 break;
5741
5742 cond_resched();
5743 bytenr = btrfs_node_blockptr(eb, slot);
5744 generation = btrfs_node_ptr_generation(eb, slot);
5745
5746 if (slot == path->slots[wc->level])
5747 goto reada;
5748
5749 if (wc->stage == UPDATE_BACKREF &&
5750 generation <= root->root_key.offset)
5751 continue;
5752
5753 /* We don't lock the tree block, it's OK to be racy here */
5754 ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
5755 &refs, &flags);
5756 BUG_ON(ret);
5757 BUG_ON(refs == 0);
5758
5759 if (wc->stage == DROP_REFERENCE) {
5760 if (refs == 1)
5761 goto reada;
5762
5763 if (wc->level == 1 &&
5764 (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
5765 continue;
5766 if (!wc->update_ref ||
5767 generation <= root->root_key.offset)
5768 continue;
5769 btrfs_node_key_to_cpu(eb, &key, slot);
5770 ret = btrfs_comp_cpu_keys(&key,
5771 &wc->update_progress);
5772 if (ret < 0)
5773 continue;
5774 } else {
5775 if (wc->level == 1 &&
5776 (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
5777 continue;
5778 }
5779 reada:
5780 ret = readahead_tree_block(root, bytenr, blocksize,
5781 generation);
5782 if (ret)
5783 break;
5784 nread++;
5785 }
5786 wc->reada_slot = slot;
5787 }
5788
5789 /*
5790 * hepler to process tree block while walking down the tree.
5791 *
5792 * when wc->stage == UPDATE_BACKREF, this function updates
5793 * back refs for pointers in the block.
5794 *
5795 * NOTE: return value 1 means we should stop walking down.
5796 */
5797 static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
5798 struct btrfs_root *root,
5799 struct btrfs_path *path,
5800 struct walk_control *wc, int lookup_info)
5801 {
5802 int level = wc->level;
5803 struct extent_buffer *eb = path->nodes[level];
5804 u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
5805 int ret;
5806
5807 if (wc->stage == UPDATE_BACKREF &&
5808 btrfs_header_owner(eb) != root->root_key.objectid)
5809 return 1;
5810
5811 /*
5812 * when reference count of tree block is 1, it won't increase
5813 * again. once full backref flag is set, we never clear it.
5814 */
5815 if (lookup_info &&
5816 ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
5817 (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
5818 BUG_ON(!path->locks[level]);
5819 ret = btrfs_lookup_extent_info(trans, root,
5820 eb->start, eb->len,
5821 &wc->refs[level],
5822 &wc->flags[level]);
5823 BUG_ON(ret);
5824 BUG_ON(wc->refs[level] == 0);
5825 }
5826
5827 if (wc->stage == DROP_REFERENCE) {
5828 if (wc->refs[level] > 1)
5829 return 1;
5830
5831 if (path->locks[level] && !wc->keep_locks) {
5832 btrfs_tree_unlock(eb);
5833 path->locks[level] = 0;
5834 }
5835 return 0;
5836 }
5837
5838 /* wc->stage == UPDATE_BACKREF */
5839 if (!(wc->flags[level] & flag)) {
5840 BUG_ON(!path->locks[level]);
5841 ret = btrfs_inc_ref(trans, root, eb, 1);
5842 BUG_ON(ret);
5843 ret = btrfs_dec_ref(trans, root, eb, 0);
5844 BUG_ON(ret);
5845 ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
5846 eb->len, flag, 0);
5847 BUG_ON(ret);
5848 wc->flags[level] |= flag;
5849 }
5850
5851 /*
5852 * the block is shared by multiple trees, so it's not good to
5853 * keep the tree lock
5854 */
5855 if (path->locks[level] && level > 0) {
5856 btrfs_tree_unlock(eb);
5857 path->locks[level] = 0;
5858 }
5859 return 0;
5860 }
5861
5862 /*
5863 * hepler to process tree block pointer.
5864 *
5865 * when wc->stage == DROP_REFERENCE, this function checks
5866 * reference count of the block pointed to. if the block
5867 * is shared and we need update back refs for the subtree
5868 * rooted at the block, this function changes wc->stage to
5869 * UPDATE_BACKREF. if the block is shared and there is no
5870 * need to update back, this function drops the reference
5871 * to the block.
5872 *
5873 * NOTE: return value 1 means we should stop walking down.
5874 */
5875 static noinline int do_walk_down(struct btrfs_trans_handle *trans,
5876 struct btrfs_root *root,
5877 struct btrfs_path *path,
5878 struct walk_control *wc, int *lookup_info)
5879 {
5880 u64 bytenr;
5881 u64 generation;
5882 u64 parent;
5883 u32 blocksize;
5884 struct btrfs_key key;
5885 struct extent_buffer *next;
5886 int level = wc->level;
5887 int reada = 0;
5888 int ret = 0;
5889
5890 generation = btrfs_node_ptr_generation(path->nodes[level],
5891 path->slots[level]);
5892 /*
5893 * if the lower level block was created before the snapshot
5894 * was created, we know there is no need to update back refs
5895 * for the subtree
5896 */
5897 if (wc->stage == UPDATE_BACKREF &&
5898 generation <= root->root_key.offset) {
5899 *lookup_info = 1;
5900 return 1;
5901 }
5902
5903 bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
5904 blocksize = btrfs_level_size(root, level - 1);
5905
5906 next = btrfs_find_tree_block(root, bytenr, blocksize);
5907 if (!next) {
5908 next = btrfs_find_create_tree_block(root, bytenr, blocksize);
5909 if (!next)
5910 return -ENOMEM;
5911 reada = 1;
5912 }
5913 btrfs_tree_lock(next);
5914 btrfs_set_lock_blocking(next);
5915
5916 ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
5917 &wc->refs[level - 1],
5918 &wc->flags[level - 1]);
5919 BUG_ON(ret);
5920 BUG_ON(wc->refs[level - 1] == 0);
5921 *lookup_info = 0;
5922
5923 if (wc->stage == DROP_REFERENCE) {
5924 if (wc->refs[level - 1] > 1) {
5925 if (level == 1 &&
5926 (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
5927 goto skip;
5928
5929 if (!wc->update_ref ||
5930 generation <= root->root_key.offset)
5931 goto skip;
5932
5933 btrfs_node_key_to_cpu(path->nodes[level], &key,
5934 path->slots[level]);
5935 ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
5936 if (ret < 0)
5937 goto skip;
5938
5939 wc->stage = UPDATE_BACKREF;
5940 wc->shared_level = level - 1;
5941 }
5942 } else {
5943 if (level == 1 &&
5944 (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
5945 goto skip;
5946 }
5947
5948 if (!btrfs_buffer_uptodate(next, generation)) {
5949 btrfs_tree_unlock(next);
5950 free_extent_buffer(next);
5951 next = NULL;
5952 *lookup_info = 1;
5953 }
5954
5955 if (!next) {
5956 if (reada && level == 1)
5957 reada_walk_down(trans, root, wc, path);
5958 next = read_tree_block(root, bytenr, blocksize, generation);
5959 btrfs_tree_lock(next);
5960 btrfs_set_lock_blocking(next);
5961 }
5962
5963 level--;
5964 BUG_ON(level != btrfs_header_level(next));
5965 path->nodes[level] = next;
5966 path->slots[level] = 0;
5967 path->locks[level] = 1;
5968 wc->level = level;
5969 if (wc->level == 1)
5970 wc->reada_slot = 0;
5971 return 0;
5972 skip:
5973 wc->refs[level - 1] = 0;
5974 wc->flags[level - 1] = 0;
5975 if (wc->stage == DROP_REFERENCE) {
5976 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
5977 parent = path->nodes[level]->start;
5978 } else {
5979 BUG_ON(root->root_key.objectid !=
5980 btrfs_header_owner(path->nodes[level]));
5981 parent = 0;
5982 }
5983
5984 ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
5985 root->root_key.objectid, level - 1, 0);
5986 BUG_ON(ret);
5987 }
5988 btrfs_tree_unlock(next);
5989 free_extent_buffer(next);
5990 *lookup_info = 1;
5991 return 1;
5992 }
5993
5994 /*
5995 * hepler to process tree block while walking up the tree.
5996 *
5997 * when wc->stage == DROP_REFERENCE, this function drops
5998 * reference count on the block.
5999 *
6000 * when wc->stage == UPDATE_BACKREF, this function changes
6001 * wc->stage back to DROP_REFERENCE if we changed wc->stage
6002 * to UPDATE_BACKREF previously while processing the block.
6003 *
6004 * NOTE: return value 1 means we should stop walking up.
6005 */
6006 static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
6007 struct btrfs_root *root,
6008 struct btrfs_path *path,
6009 struct walk_control *wc)
6010 {
6011 int ret;
6012 int level = wc->level;
6013 struct extent_buffer *eb = path->nodes[level];
6014 u64 parent = 0;
6015
6016 if (wc->stage == UPDATE_BACKREF) {
6017 BUG_ON(wc->shared_level < level);
6018 if (level < wc->shared_level)
6019 goto out;
6020
6021 ret = find_next_key(path, level + 1, &wc->update_progress);
6022 if (ret > 0)
6023 wc->update_ref = 0;
6024
6025 wc->stage = DROP_REFERENCE;
6026 wc->shared_level = -1;
6027 path->slots[level] = 0;
6028
6029 /*
6030 * check reference count again if the block isn't locked.
6031 * we should start walking down the tree again if reference
6032 * count is one.
6033 */
6034 if (!path->locks[level]) {
6035 BUG_ON(level == 0);
6036 btrfs_tree_lock(eb);
6037 btrfs_set_lock_blocking(eb);
6038 path->locks[level] = 1;
6039
6040 ret = btrfs_lookup_extent_info(trans, root,
6041 eb->start, eb->len,
6042 &wc->refs[level],
6043 &wc->flags[level]);
6044 BUG_ON(ret);
6045 BUG_ON(wc->refs[level] == 0);
6046 if (wc->refs[level] == 1) {
6047 btrfs_tree_unlock(eb);
6048 path->locks[level] = 0;
6049 return 1;
6050 }
6051 }
6052 }
6053
6054 /* wc->stage == DROP_REFERENCE */
6055 BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
6056
6057 if (wc->refs[level] == 1) {
6058 if (level == 0) {
6059 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
6060 ret = btrfs_dec_ref(trans, root, eb, 1);
6061 else
6062 ret = btrfs_dec_ref(trans, root, eb, 0);
6063 BUG_ON(ret);
6064 }
6065 /* make block locked assertion in clean_tree_block happy */
6066 if (!path->locks[level] &&
6067 btrfs_header_generation(eb) == trans->transid) {
6068 btrfs_tree_lock(eb);
6069 btrfs_set_lock_blocking(eb);
6070 path->locks[level] = 1;
6071 }
6072 clean_tree_block(trans, root, eb);
6073 }
6074
6075 if (eb == root->node) {
6076 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
6077 parent = eb->start;
6078 else
6079 BUG_ON(root->root_key.objectid !=
6080 btrfs_header_owner(eb));
6081 } else {
6082 if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
6083 parent = path->nodes[level + 1]->start;
6084 else
6085 BUG_ON(root->root_key.objectid !=
6086 btrfs_header_owner(path->nodes[level + 1]));
6087 }
6088
6089 btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
6090 out:
6091 wc->refs[level] = 0;
6092 wc->flags[level] = 0;
6093 return 0;
6094 }
6095
6096 static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
6097 struct btrfs_root *root,
6098 struct btrfs_path *path,
6099 struct walk_control *wc)
6100 {
6101 int level = wc->level;
6102 int lookup_info = 1;
6103 int ret;
6104
6105 while (level >= 0) {
6106 ret = walk_down_proc(trans, root, path, wc, lookup_info);
6107 if (ret > 0)
6108 break;
6109
6110 if (level == 0)
6111 break;
6112
6113 if (path->slots[level] >=
6114 btrfs_header_nritems(path->nodes[level]))
6115 break;
6116
6117 ret = do_walk_down(trans, root, path, wc, &lookup_info);
6118 if (ret > 0) {
6119 path->slots[level]++;
6120 continue;
6121 } else if (ret < 0)
6122 return ret;
6123 level = wc->level;
6124 }
6125 return 0;
6126 }
6127
6128 static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
6129 struct btrfs_root *root,
6130 struct btrfs_path *path,
6131 struct walk_control *wc, int max_level)
6132 {
6133 int level = wc->level;
6134 int ret;
6135
6136 path->slots[level] = btrfs_header_nritems(path->nodes[level]);
6137 while (level < max_level && path->nodes[level]) {
6138 wc->level = level;
6139 if (path->slots[level] + 1 <
6140 btrfs_header_nritems(path->nodes[level])) {
6141 path->slots[level]++;
6142 return 0;
6143 } else {
6144 ret = walk_up_proc(trans, root, path, wc);
6145 if (ret > 0)
6146 return 0;
6147
6148 if (path->locks[level]) {
6149 btrfs_tree_unlock(path->nodes[level]);
6150 path->locks[level] = 0;
6151 }
6152 free_extent_buffer(path->nodes[level]);
6153 path->nodes[level] = NULL;
6154 level++;
6155 }
6156 }
6157 return 1;
6158 }
6159
6160 /*
6161 * drop a subvolume tree.
6162 *
6163 * this function traverses the tree freeing any blocks that only
6164 * referenced by the tree.
6165 *
6166 * when a shared tree block is found. this function decreases its
6167 * reference count by one. if update_ref is true, this function
6168 * also make sure backrefs for the shared block and all lower level
6169 * blocks are properly updated.
6170 */
6171 int btrfs_drop_snapshot(struct btrfs_root *root,
6172 struct btrfs_block_rsv *block_rsv, int update_ref)
6173 {
6174 struct btrfs_path *path;
6175 struct btrfs_trans_handle *trans;
6176 struct btrfs_root *tree_root = root->fs_info->tree_root;
6177 struct btrfs_root_item *root_item = &root->root_item;
6178 struct walk_control *wc;
6179 struct btrfs_key key;
6180 int err = 0;
6181 int ret;
6182 int level;
6183
6184 path = btrfs_alloc_path();
6185 BUG_ON(!path);
6186
6187 wc = kzalloc(sizeof(*wc), GFP_NOFS);
6188 BUG_ON(!wc);
6189
6190 trans = btrfs_start_transaction(tree_root, 0);
6191 if (block_rsv)
6192 trans->block_rsv = block_rsv;
6193
6194 if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
6195 level = btrfs_header_level(root->node);
6196 path->nodes[level] = btrfs_lock_root_node(root);
6197 btrfs_set_lock_blocking(path->nodes[level]);
6198 path->slots[level] = 0;
6199 path->locks[level] = 1;
6200 memset(&wc->update_progress, 0,
6201 sizeof(wc->update_progress));
6202 } else {
6203 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
6204 memcpy(&wc->update_progress, &key,
6205 sizeof(wc->update_progress));
6206
6207 level = root_item->drop_level;
6208 BUG_ON(level == 0);
6209 path->lowest_level = level;
6210 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
6211 path->lowest_level = 0;
6212 if (ret < 0) {
6213 err = ret;
6214 goto out;
6215 }
6216 WARN_ON(ret > 0);
6217
6218 /*
6219 * unlock our path, this is safe because only this
6220 * function is allowed to delete this snapshot
6221 */
6222 btrfs_unlock_up_safe(path, 0);
6223
6224 level = btrfs_header_level(root->node);
6225 while (1) {
6226 btrfs_tree_lock(path->nodes[level]);
6227 btrfs_set_lock_blocking(path->nodes[level]);
6228
6229 ret = btrfs_lookup_extent_info(trans, root,
6230 path->nodes[level]->start,
6231 path->nodes[level]->len,
6232 &wc->refs[level],
6233 &wc->flags[level]);
6234 BUG_ON(ret);
6235 BUG_ON(wc->refs[level] == 0);
6236
6237 if (level == root_item->drop_level)
6238 break;
6239
6240 btrfs_tree_unlock(path->nodes[level]);
6241 WARN_ON(wc->refs[level] != 1);
6242 level--;
6243 }
6244 }
6245
6246 wc->level = level;
6247 wc->shared_level = -1;
6248 wc->stage = DROP_REFERENCE;
6249 wc->update_ref = update_ref;
6250 wc->keep_locks = 0;
6251 wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
6252
6253 while (1) {
6254 ret = walk_down_tree(trans, root, path, wc);
6255 if (ret < 0) {
6256 err = ret;
6257 break;
6258 }
6259
6260 ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
6261 if (ret < 0) {
6262 err = ret;
6263 break;
6264 }
6265
6266 if (ret > 0) {
6267 BUG_ON(wc->stage != DROP_REFERENCE);
6268 break;
6269 }
6270
6271 if (wc->stage == DROP_REFERENCE) {
6272 level = wc->level;
6273 btrfs_node_key(path->nodes[level],
6274 &root_item->drop_progress,
6275 path->slots[level]);
6276 root_item->drop_level = level;
6277 }
6278
6279 BUG_ON(wc->level == 0);
6280 if (btrfs_should_end_transaction(trans, tree_root)) {
6281 ret = btrfs_update_root(trans, tree_root,
6282 &root->root_key,
6283 root_item);
6284 BUG_ON(ret);
6285
6286 btrfs_end_transaction_throttle(trans, tree_root);
6287 trans = btrfs_start_transaction(tree_root, 0);
6288 if (block_rsv)
6289 trans->block_rsv = block_rsv;
6290 }
6291 }
6292 btrfs_release_path(root, path);
6293 BUG_ON(err);
6294
6295 ret = btrfs_del_root(trans, tree_root, &root->root_key);
6296 BUG_ON(ret);
6297
6298 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
6299 ret = btrfs_find_last_root(tree_root, root->root_key.objectid,
6300 NULL, NULL);
6301 BUG_ON(ret < 0);
6302 if (ret > 0) {
6303 ret = btrfs_del_orphan_item(trans, tree_root,
6304 root->root_key.objectid);
6305 BUG_ON(ret);
6306 }
6307 }
6308
6309 if (root->in_radix) {
6310 btrfs_free_fs_root(tree_root->fs_info, root);
6311 } else {
6312 free_extent_buffer(root->node);
6313 free_extent_buffer(root->commit_root);
6314 kfree(root);
6315 }
6316 out:
6317 btrfs_end_transaction_throttle(trans, tree_root);
6318 kfree(wc);
6319 btrfs_free_path(path);
6320 return err;
6321 }
6322
6323 /*
6324 * drop subtree rooted at tree block 'node'.
6325 *
6326 * NOTE: this function will unlock and release tree block 'node'
6327 */
6328 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
6329 struct btrfs_root *root,
6330 struct extent_buffer *node,
6331 struct extent_buffer *parent)
6332 {
6333 struct btrfs_path *path;
6334 struct walk_control *wc;
6335 int level;
6336 int parent_level;
6337 int ret = 0;
6338 int wret;
6339
6340 BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
6341
6342 path = btrfs_alloc_path();
6343 BUG_ON(!path);
6344
6345 wc = kzalloc(sizeof(*wc), GFP_NOFS);
6346 BUG_ON(!wc);
6347
6348 btrfs_assert_tree_locked(parent);
6349 parent_level = btrfs_header_level(parent);
6350 extent_buffer_get(parent);
6351 path->nodes[parent_level] = parent;
6352 path->slots[parent_level] = btrfs_header_nritems(parent);
6353
6354 btrfs_assert_tree_locked(node);
6355 level = btrfs_header_level(node);
6356 path->nodes[level] = node;
6357 path->slots[level] = 0;
6358 path->locks[level] = 1;
6359
6360 wc->refs[parent_level] = 1;
6361 wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
6362 wc->level = level;
6363 wc->shared_level = -1;
6364 wc->stage = DROP_REFERENCE;
6365 wc->update_ref = 0;
6366 wc->keep_locks = 1;
6367 wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
6368
6369 while (1) {
6370 wret = walk_down_tree(trans, root, path, wc);
6371 if (wret < 0) {
6372 ret = wret;
6373 break;
6374 }
6375
6376 wret = walk_up_tree(trans, root, path, wc, parent_level);
6377 if (wret < 0)
6378 ret = wret;
6379 if (wret != 0)
6380 break;
6381 }
6382
6383 kfree(wc);
6384 btrfs_free_path(path);
6385 return ret;
6386 }
6387
6388 #if 0
6389 static unsigned long calc_ra(unsigned long start, unsigned long last,
6390 unsigned long nr)
6391 {
6392 return min(last, start + nr - 1);
6393 }
6394
6395 static noinline int relocate_inode_pages(struct inode *inode, u64 start,
6396 u64 len)
6397 {
6398 u64 page_start;
6399 u64 page_end;
6400 unsigned long first_index;
6401 unsigned long last_index;
6402 unsigned long i;
6403 struct page *page;
6404 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
6405 struct file_ra_state *ra;
6406 struct btrfs_ordered_extent *ordered;
6407 unsigned int total_read = 0;
6408 unsigned int total_dirty = 0;
6409 int ret = 0;
6410
6411 ra = kzalloc(sizeof(*ra), GFP_NOFS);
6412
6413 mutex_lock(&inode->i_mutex);
6414 first_index = start >> PAGE_CACHE_SHIFT;
6415 last_index = (start + len - 1) >> PAGE_CACHE_SHIFT;
6416
6417 /* make sure the dirty trick played by the caller work */
6418 ret = invalidate_inode_pages2_range(inode->i_mapping,
6419 first_index, last_index);
6420 if (ret)
6421 goto out_unlock;
6422
6423 file_ra_state_init(ra, inode->i_mapping);
6424
6425 for (i = first_index ; i <= last_index; i++) {
6426 if (total_read % ra->ra_pages == 0) {
6427 btrfs_force_ra(inode->i_mapping, ra, NULL, i,
6428 calc_ra(i, last_index, ra->ra_pages));
6429 }
6430 total_read++;
6431 again:
6432 if (((u64)i << PAGE_CACHE_SHIFT) > i_size_read(inode))
6433 BUG_ON(1);
6434 page = grab_cache_page(inode->i_mapping, i);
6435 if (!page) {
6436 ret = -ENOMEM;
6437 goto out_unlock;
6438 }
6439 if (!PageUptodate(page)) {
6440 btrfs_readpage(NULL, page);
6441 lock_page(page);
6442 if (!PageUptodate(page)) {
6443 unlock_page(page);
6444 page_cache_release(page);
6445 ret = -EIO;
6446 goto out_unlock;
6447 }
6448 }
6449 wait_on_page_writeback(page);
6450
6451 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
6452 page_end = page_start + PAGE_CACHE_SIZE - 1;
6453 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
6454
6455 ordered = btrfs_lookup_ordered_extent(inode, page_start);
6456 if (ordered) {
6457 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
6458 unlock_page(page);
6459 page_cache_release(page);
6460 btrfs_start_ordered_extent(inode, ordered, 1);
6461 btrfs_put_ordered_extent(ordered);
6462 goto again;
6463 }
6464 set_page_extent_mapped(page);
6465
6466 if (i == first_index)
6467 set_extent_bits(io_tree, page_start, page_end,
6468 EXTENT_BOUNDARY, GFP_NOFS);
6469 btrfs_set_extent_delalloc(inode, page_start, page_end);
6470
6471 set_page_dirty(page);
6472 total_dirty++;
6473
6474 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
6475 unlock_page(page);
6476 page_cache_release(page);
6477 }
6478
6479 out_unlock:
6480 kfree(ra);
6481 mutex_unlock(&inode->i_mutex);
6482 balance_dirty_pages_ratelimited_nr(inode->i_mapping, total_dirty);
6483 return ret;
6484 }
6485
6486 static noinline int relocate_data_extent(struct inode *reloc_inode,
6487 struct btrfs_key *extent_key,
6488 u64 offset)
6489 {
6490 struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
6491 struct extent_map_tree *em_tree = &BTRFS_I(reloc_inode)->extent_tree;
6492 struct extent_map *em;
6493 u64 start = extent_key->objectid - offset;
6494 u64 end = start + extent_key->offset - 1;
6495
6496 em = alloc_extent_map(GFP_NOFS);
6497 BUG_ON(!em || IS_ERR(em));
6498
6499 em->start = start;
6500 em->len = extent_key->offset;
6501 em->block_len = extent_key->offset;
6502 em->block_start = extent_key->objectid;
6503 em->bdev = root->fs_info->fs_devices->latest_bdev;
6504 set_bit(EXTENT_FLAG_PINNED, &em->flags);
6505
6506 /* setup extent map to cheat btrfs_readpage */
6507 lock_extent(&BTRFS_I(reloc_inode)->io_tree, start, end, GFP_NOFS);
6508 while (1) {
6509 int ret;
6510 write_lock(&em_tree->lock);
6511 ret = add_extent_mapping(em_tree, em);
6512 write_unlock(&em_tree->lock);
6513 if (ret != -EEXIST) {
6514 free_extent_map(em);
6515 break;
6516 }
6517 btrfs_drop_extent_cache(reloc_inode, start, end, 0);
6518 }
6519 unlock_extent(&BTRFS_I(reloc_inode)->io_tree, start, end, GFP_NOFS);
6520
6521 return relocate_inode_pages(reloc_inode, start, extent_key->offset);
6522 }
6523
6524 struct btrfs_ref_path {
6525 u64 extent_start;
6526 u64 nodes[BTRFS_MAX_LEVEL];
6527 u64 root_objectid;
6528 u64 root_generation;
6529 u64 owner_objectid;
6530 u32 num_refs;
6531 int lowest_level;
6532 int current_level;
6533 int shared_level;
6534
6535 struct btrfs_key node_keys[BTRFS_MAX_LEVEL];
6536 u64 new_nodes[BTRFS_MAX_LEVEL];
6537 };
6538
6539 struct disk_extent {
6540 u64 ram_bytes;
6541 u64 disk_bytenr;
6542 u64 disk_num_bytes;
6543 u64 offset;
6544 u64 num_bytes;
6545 u8 compression;
6546 u8 encryption;
6547 u16 other_encoding;
6548 };
6549
6550 static int is_cowonly_root(u64 root_objectid)
6551 {
6552 if (root_objectid == BTRFS_ROOT_TREE_OBJECTID ||
6553 root_objectid == BTRFS_EXTENT_TREE_OBJECTID ||
6554 root_objectid == BTRFS_CHUNK_TREE_OBJECTID ||
6555 root_objectid == BTRFS_DEV_TREE_OBJECTID ||
6556 root_objectid == BTRFS_TREE_LOG_OBJECTID ||
6557 root_objectid == BTRFS_CSUM_TREE_OBJECTID)
6558 return 1;
6559 return 0;
6560 }
6561
6562 static noinline int __next_ref_path(struct btrfs_trans_handle *trans,
6563 struct btrfs_root *extent_root,
6564 struct btrfs_ref_path *ref_path,
6565 int first_time)
6566 {
6567 struct extent_buffer *leaf;
6568 struct btrfs_path *path;
6569 struct btrfs_extent_ref *ref;
6570 struct btrfs_key key;
6571 struct btrfs_key found_key;
6572 u64 bytenr;
6573 u32 nritems;
6574 int level;
6575 int ret = 1;
6576
6577 path = btrfs_alloc_path();
6578 if (!path)
6579 return -ENOMEM;
6580
6581 if (first_time) {
6582 ref_path->lowest_level = -1;
6583 ref_path->current_level = -1;
6584 ref_path->shared_level = -1;
6585 goto walk_up;
6586 }
6587 walk_down:
6588 level = ref_path->current_level - 1;
6589 while (level >= -1) {
6590 u64 parent;
6591 if (level < ref_path->lowest_level)
6592 break;
6593
6594 if (level >= 0)
6595 bytenr = ref_path->nodes[level];
6596 else
6597 bytenr = ref_path->extent_start;
6598 BUG_ON(bytenr == 0);
6599
6600 parent = ref_path->nodes[level + 1];
6601 ref_path->nodes[level + 1] = 0;
6602 ref_path->current_level = level;
6603 BUG_ON(parent == 0);
6604
6605 key.objectid = bytenr;
6606 key.offset = parent + 1;
6607 key.type = BTRFS_EXTENT_REF_KEY;
6608
6609 ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 0);
6610 if (ret < 0)
6611 goto out;
6612 BUG_ON(ret == 0);
6613
6614 leaf = path->nodes[0];
6615 nritems = btrfs_header_nritems(leaf);
6616 if (path->slots[0] >= nritems) {
6617 ret = btrfs_next_leaf(extent_root, path);
6618 if (ret < 0)
6619 goto out;
6620 if (ret > 0)
6621 goto next;
6622 leaf = path->nodes[0];
6623 }
6624
6625 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6626 if (found_key.objectid == bytenr &&
6627 found_key.type == BTRFS_EXTENT_REF_KEY) {
6628 if (level < ref_path->shared_level)
6629 ref_path->shared_level = level;
6630 goto found;
6631 }
6632 next:
6633 level--;
6634 btrfs_release_path(extent_root, path);
6635 cond_resched();
6636 }
6637 /* reached lowest level */
6638 ret = 1;
6639 goto out;
6640 walk_up:
6641 level = ref_path->current_level;
6642 while (level < BTRFS_MAX_LEVEL - 1) {
6643 u64 ref_objectid;
6644
6645 if (level >= 0)
6646 bytenr = ref_path->nodes[level];
6647 else
6648 bytenr = ref_path->extent_start;
6649
6650 BUG_ON(bytenr == 0);
6651
6652 key.objectid = bytenr;
6653 key.offset = 0;
6654 key.type = BTRFS_EXTENT_REF_KEY;
6655
6656 ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 0);
6657 if (ret < 0)
6658 goto out;
6659
6660 leaf = path->nodes[0];
6661 nritems = btrfs_header_nritems(leaf);
6662 if (path->slots[0] >= nritems) {
6663 ret = btrfs_next_leaf(extent_root, path);
6664 if (ret < 0)
6665 goto out;
6666 if (ret > 0) {
6667 /* the extent was freed by someone */
6668 if (ref_path->lowest_level == level)
6669 goto out;
6670 btrfs_release_path(extent_root, path);
6671 goto walk_down;
6672 }
6673 leaf = path->nodes[0];
6674 }
6675
6676 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6677 if (found_key.objectid != bytenr ||
6678 found_key.type != BTRFS_EXTENT_REF_KEY) {
6679 /* the extent was freed by someone */
6680 if (ref_path->lowest_level == level) {
6681 ret = 1;
6682 goto out;
6683 }
6684 btrfs_release_path(extent_root, path);
6685 goto walk_down;
6686 }
6687 found:
6688 ref = btrfs_item_ptr(leaf, path->slots[0],
6689 struct btrfs_extent_ref);
6690 ref_objectid = btrfs_ref_objectid(leaf, ref);
6691 if (ref_objectid < BTRFS_FIRST_FREE_OBJECTID) {
6692 if (first_time) {
6693 level = (int)ref_objectid;
6694 BUG_ON(level >= BTRFS_MAX_LEVEL);
6695 ref_path->lowest_level = level;
6696 ref_path->current_level = level;
6697 ref_path->nodes[level] = bytenr;
6698 } else {
6699 WARN_ON(ref_objectid != level);
6700 }
6701 } else {
6702 WARN_ON(level != -1);
6703 }
6704 first_time = 0;
6705
6706 if (ref_path->lowest_level == level) {
6707 ref_path->owner_objectid = ref_objectid;
6708 ref_path->num_refs = btrfs_ref_num_refs(leaf, ref);
6709 }
6710
6711 /*
6712 * the block is tree root or the block isn't in reference
6713 * counted tree.
6714 */
6715 if (found_key.objectid == found_key.offset ||
6716 is_cowonly_root(btrfs_ref_root(leaf, ref))) {
6717 ref_path->root_objectid = btrfs_ref_root(leaf, ref);
6718 ref_path->root_generation =
6719 btrfs_ref_generation(leaf, ref);
6720 if (level < 0) {
6721 /* special reference from the tree log */
6722 ref_path->nodes[0] = found_key.offset;
6723 ref_path->current_level = 0;
6724 }
6725 ret = 0;
6726 goto out;
6727 }
6728
6729 level++;
6730 BUG_ON(ref_path->nodes[level] != 0);
6731 ref_path->nodes[level] = found_key.offset;
6732 ref_path->current_level = level;
6733
6734 /*
6735 * the reference was created in the running transaction,
6736 * no need to continue walking up.
6737 */
6738 if (btrfs_ref_generation(leaf, ref) == trans->transid) {
6739 ref_path->root_objectid = btrfs_ref_root(leaf, ref);
6740 ref_path->root_generation =
6741 btrfs_ref_generation(leaf, ref);
6742 ret = 0;
6743 goto out;
6744 }
6745
6746 btrfs_release_path(extent_root, path);
6747 cond_resched();
6748 }
6749 /* reached max tree level, but no tree root found. */
6750 BUG();
6751 out:
6752 btrfs_free_path(path);
6753 return ret;
6754 }
6755
6756 static int btrfs_first_ref_path(struct btrfs_trans_handle *trans,
6757 struct btrfs_root *extent_root,
6758 struct btrfs_ref_path *ref_path,
6759 u64 extent_start)
6760 {
6761 memset(ref_path, 0, sizeof(*ref_path));
6762 ref_path->extent_start = extent_start;
6763
6764 return __next_ref_path(trans, extent_root, ref_path, 1);
6765 }
6766
6767 static int btrfs_next_ref_path(struct btrfs_trans_handle *trans,
6768 struct btrfs_root *extent_root,
6769 struct btrfs_ref_path *ref_path)
6770 {
6771 return __next_ref_path(trans, extent_root, ref_path, 0);
6772 }
6773
6774 static noinline int get_new_locations(struct inode *reloc_inode,
6775 struct btrfs_key *extent_key,
6776 u64 offset, int no_fragment,
6777 struct disk_extent **extents,
6778 int *nr_extents)
6779 {
6780 struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
6781 struct btrfs_path *path;
6782 struct btrfs_file_extent_item *fi;
6783 struct extent_buffer *leaf;
6784 struct disk_extent *exts = *extents;
6785 struct btrfs_key found_key;
6786 u64 cur_pos;
6787 u64 last_byte;
6788 u32 nritems;
6789 int nr = 0;
6790 int max = *nr_extents;
6791 int ret;
6792
6793 WARN_ON(!no_fragment && *extents);
6794 if (!exts) {
6795 max = 1;
6796 exts = kmalloc(sizeof(*exts) * max, GFP_NOFS);
6797 if (!exts)
6798 return -ENOMEM;
6799 }
6800
6801 path = btrfs_alloc_path();
6802 BUG_ON(!path);
6803
6804 cur_pos = extent_key->objectid - offset;
6805 last_byte = extent_key->objectid + extent_key->offset;
6806 ret = btrfs_lookup_file_extent(NULL, root, path, reloc_inode->i_ino,
6807 cur_pos, 0);
6808 if (ret < 0)
6809 goto out;
6810 if (ret > 0) {
6811 ret = -ENOENT;
6812 goto out;
6813 }
6814
6815 while (1) {
6816 leaf = path->nodes[0];
6817 nritems = btrfs_header_nritems(leaf);
6818 if (path->slots[0] >= nritems) {
6819 ret = btrfs_next_leaf(root, path);
6820 if (ret < 0)
6821 goto out;
6822 if (ret > 0)
6823 break;
6824 leaf = path->nodes[0];
6825 }
6826
6827 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6828 if (found_key.offset != cur_pos ||
6829 found_key.type != BTRFS_EXTENT_DATA_KEY ||
6830 found_key.objectid != reloc_inode->i_ino)
6831 break;
6832
6833 fi = btrfs_item_ptr(leaf, path->slots[0],
6834 struct btrfs_file_extent_item);
6835 if (btrfs_file_extent_type(leaf, fi) !=
6836 BTRFS_FILE_EXTENT_REG ||
6837 btrfs_file_extent_disk_bytenr(leaf, fi) == 0)
6838 break;
6839
6840 if (nr == max) {
6841 struct disk_extent *old = exts;
6842 max *= 2;
6843 exts = kzalloc(sizeof(*exts) * max, GFP_NOFS);
6844 memcpy(exts, old, sizeof(*exts) * nr);
6845 if (old != *extents)
6846 kfree(old);
6847 }
6848
6849 exts[nr].disk_bytenr =
6850 btrfs_file_extent_disk_bytenr(leaf, fi);
6851 exts[nr].disk_num_bytes =
6852 btrfs_file_extent_disk_num_bytes(leaf, fi);
6853 exts[nr].offset = btrfs_file_extent_offset(leaf, fi);
6854 exts[nr].num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
6855 exts[nr].ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
6856 exts[nr].compression = btrfs_file_extent_compression(leaf, fi);
6857 exts[nr].encryption = btrfs_file_extent_encryption(leaf, fi);
6858 exts[nr].other_encoding = btrfs_file_extent_other_encoding(leaf,
6859 fi);
6860 BUG_ON(exts[nr].offset > 0);
6861 BUG_ON(exts[nr].compression || exts[nr].encryption);
6862 BUG_ON(exts[nr].num_bytes != exts[nr].disk_num_bytes);
6863
6864 cur_pos += exts[nr].num_bytes;
6865 nr++;
6866
6867 if (cur_pos + offset >= last_byte)
6868 break;
6869
6870 if (no_fragment) {
6871 ret = 1;
6872 goto out;
6873 }
6874 path->slots[0]++;
6875 }
6876
6877 BUG_ON(cur_pos + offset > last_byte);
6878 if (cur_pos + offset < last_byte) {
6879 ret = -ENOENT;
6880 goto out;
6881 }
6882 ret = 0;
6883 out:
6884 btrfs_free_path(path);
6885 if (ret) {
6886 if (exts != *extents)
6887 kfree(exts);
6888 } else {
6889 *extents = exts;
6890 *nr_extents = nr;
6891 }
6892 return ret;
6893 }
6894
6895 static noinline int replace_one_extent(struct btrfs_trans_handle *trans,
6896 struct btrfs_root *root,
6897 struct btrfs_path *path,
6898 struct btrfs_key *extent_key,
6899 struct btrfs_key *leaf_key,
6900 struct btrfs_ref_path *ref_path,
6901 struct disk_extent *new_extents,
6902 int nr_extents)
6903 {
6904 struct extent_buffer *leaf;
6905 struct btrfs_file_extent_item *fi;
6906 struct inode *inode = NULL;
6907 struct btrfs_key key;
6908 u64 lock_start = 0;
6909 u64 lock_end = 0;
6910 u64 num_bytes;
6911 u64 ext_offset;
6912 u64 search_end = (u64)-1;
6913 u32 nritems;
6914 int nr_scaned = 0;
6915 int extent_locked = 0;
6916 int extent_type;
6917 int ret;
6918
6919 memcpy(&key, leaf_key, sizeof(key));
6920 if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS) {
6921 if (key.objectid < ref_path->owner_objectid ||
6922 (key.objectid == ref_path->owner_objectid &&
6923 key.type < BTRFS_EXTENT_DATA_KEY)) {
6924 key.objectid = ref_path->owner_objectid;
6925 key.type = BTRFS_EXTENT_DATA_KEY;
6926 key.offset = 0;
6927 }
6928 }
6929
6930 while (1) {
6931 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
6932 if (ret < 0)
6933 goto out;
6934
6935 leaf = path->nodes[0];
6936 nritems = btrfs_header_nritems(leaf);
6937 next:
6938 if (extent_locked && ret > 0) {
6939 /*
6940 * the file extent item was modified by someone
6941 * before the extent got locked.
6942 */
6943 unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
6944 lock_end, GFP_NOFS);
6945 extent_locked = 0;
6946 }
6947
6948 if (path->slots[0] >= nritems) {
6949 if (++nr_scaned > 2)
6950 break;
6951
6952 BUG_ON(extent_locked);
6953 ret = btrfs_next_leaf(root, path);
6954 if (ret < 0)
6955 goto out;
6956 if (ret > 0)
6957 break;
6958 leaf = path->nodes[0];
6959 nritems = btrfs_header_nritems(leaf);
6960 }
6961
6962 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
6963
6964 if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS) {
6965 if ((key.objectid > ref_path->owner_objectid) ||
6966 (key.objectid == ref_path->owner_objectid &&
6967 key.type > BTRFS_EXTENT_DATA_KEY) ||
6968 key.offset >= search_end)
6969 break;
6970 }
6971
6972 if (inode && key.objectid != inode->i_ino) {
6973 BUG_ON(extent_locked);
6974 btrfs_release_path(root, path);
6975 mutex_unlock(&inode->i_mutex);
6976 iput(inode);
6977 inode = NULL;
6978 continue;
6979 }
6980
6981 if (key.type != BTRFS_EXTENT_DATA_KEY) {
6982 path->slots[0]++;
6983 ret = 1;
6984 goto next;
6985 }
6986 fi = btrfs_item_ptr(leaf, path->slots[0],
6987 struct btrfs_file_extent_item);
6988 extent_type = btrfs_file_extent_type(leaf, fi);
6989 if ((extent_type != BTRFS_FILE_EXTENT_REG &&
6990 extent_type != BTRFS_FILE_EXTENT_PREALLOC) ||
6991 (btrfs_file_extent_disk_bytenr(leaf, fi) !=
6992 extent_key->objectid)) {
6993 path->slots[0]++;
6994 ret = 1;
6995 goto next;
6996 }
6997
6998 num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
6999 ext_offset = btrfs_file_extent_offset(leaf, fi);
7000
7001 if (search_end == (u64)-1) {
7002 search_end = key.offset - ext_offset +
7003 btrfs_file_extent_ram_bytes(leaf, fi);
7004 }
7005
7006 if (!extent_locked) {
7007 lock_start = key.offset;
7008 lock_end = lock_start + num_bytes - 1;
7009 } else {
7010 if (lock_start > key.offset ||
7011 lock_end + 1 < key.offset + num_bytes) {
7012 unlock_extent(&BTRFS_I(inode)->io_tree,
7013 lock_start, lock_end, GFP_NOFS);
7014 extent_locked = 0;
7015 }
7016 }
7017
7018 if (!inode) {
7019 btrfs_release_path(root, path);
7020
7021 inode = btrfs_iget_locked(root->fs_info->sb,
7022 key.objectid, root);
7023 if (inode->i_state & I_NEW) {
7024 BTRFS_I(inode)->root = root;
7025 BTRFS_I(inode)->location.objectid =
7026 key.objectid;
7027 BTRFS_I(inode)->location.type =
7028 BTRFS_INODE_ITEM_KEY;
7029 BTRFS_I(inode)->location.offset = 0;
7030 btrfs_read_locked_inode(inode);
7031 unlock_new_inode(inode);
7032 }
7033 /*
7034 * some code call btrfs_commit_transaction while
7035 * holding the i_mutex, so we can't use mutex_lock
7036 * here.
7037 */
7038 if (is_bad_inode(inode) ||
7039 !mutex_trylock(&inode->i_mutex)) {
7040 iput(inode);
7041 inode = NULL;
7042 key.offset = (u64)-1;
7043 goto skip;
7044 }
7045 }
7046
7047 if (!extent_locked) {
7048 struct btrfs_ordered_extent *ordered;
7049
7050 btrfs_release_path(root, path);
7051
7052 lock_extent(&BTRFS_I(inode)->io_tree, lock_start,
7053 lock_end, GFP_NOFS);
7054 ordered = btrfs_lookup_first_ordered_extent(inode,
7055 lock_end);
7056 if (ordered &&
7057 ordered->file_offset <= lock_end &&
7058 ordered->file_offset + ordered->len > lock_start) {
7059 unlock_extent(&BTRFS_I(inode)->io_tree,
7060 lock_start, lock_end, GFP_NOFS);
7061 btrfs_start_ordered_extent(inode, ordered, 1);
7062 btrfs_put_ordered_extent(ordered);
7063 key.offset += num_bytes;
7064 goto skip;
7065 }
7066 if (ordered)
7067 btrfs_put_ordered_extent(ordered);
7068
7069 extent_locked = 1;
7070 continue;
7071 }
7072
7073 if (nr_extents == 1) {
7074 /* update extent pointer in place */
7075 btrfs_set_file_extent_disk_bytenr(leaf, fi,
7076 new_extents[0].disk_bytenr);
7077 btrfs_set_file_extent_disk_num_bytes(leaf, fi,
7078 new_extents[0].disk_num_bytes);
7079 btrfs_mark_buffer_dirty(leaf);
7080
7081 btrfs_drop_extent_cache(inode, key.offset,
7082 key.offset + num_bytes - 1, 0);
7083
7084 ret = btrfs_inc_extent_ref(trans, root,
7085 new_extents[0].disk_bytenr,
7086 new_extents[0].disk_num_bytes,
7087 leaf->start,
7088 root->root_key.objectid,
7089 trans->transid,
7090 key.objectid);
7091 BUG_ON(ret);
7092
7093 ret = btrfs_free_extent(trans, root,
7094 extent_key->objectid,
7095 extent_key->offset,
7096 leaf->start,
7097 btrfs_header_owner(leaf),
7098 btrfs_header_generation(leaf),
7099 key.objectid, 0);
7100 BUG_ON(ret);
7101
7102 btrfs_release_path(root, path);
7103 key.offset += num_bytes;
7104 } else {
7105 BUG_ON(1);
7106 #if 0
7107 u64 alloc_hint;
7108 u64 extent_len;
7109 int i;
7110 /*
7111 * drop old extent pointer at first, then insert the
7112 * new pointers one bye one
7113 */
7114 btrfs_release_path(root, path);
7115 ret = btrfs_drop_extents(trans, root, inode, key.offset,
7116 key.offset + num_bytes,
7117 key.offset, &alloc_hint);
7118 BUG_ON(ret);
7119
7120 for (i = 0; i < nr_extents; i++) {
7121 if (ext_offset >= new_extents[i].num_bytes) {
7122 ext_offset -= new_extents[i].num_bytes;
7123 continue;
7124 }
7125 extent_len = min(new_extents[i].num_bytes -
7126 ext_offset, num_bytes);
7127
7128 ret = btrfs_insert_empty_item(trans, root,
7129 path, &key,
7130 sizeof(*fi));
7131 BUG_ON(ret);
7132
7133 leaf = path->nodes[0];
7134 fi = btrfs_item_ptr(leaf, path->slots[0],
7135 struct btrfs_file_extent_item);
7136 btrfs_set_file_extent_generation(leaf, fi,
7137 trans->transid);
7138 btrfs_set_file_extent_type(leaf, fi,
7139 BTRFS_FILE_EXTENT_REG);
7140 btrfs_set_file_extent_disk_bytenr(leaf, fi,
7141 new_extents[i].disk_bytenr);
7142 btrfs_set_file_extent_disk_num_bytes(leaf, fi,
7143 new_extents[i].disk_num_bytes);
7144 btrfs_set_file_extent_ram_bytes(leaf, fi,
7145 new_extents[i].ram_bytes);
7146
7147 btrfs_set_file_extent_compression(leaf, fi,
7148 new_extents[i].compression);
7149 btrfs_set_file_extent_encryption(leaf, fi,
7150 new_extents[i].encryption);
7151 btrfs_set_file_extent_other_encoding(leaf, fi,
7152 new_extents[i].other_encoding);
7153
7154 btrfs_set_file_extent_num_bytes(leaf, fi,
7155 extent_len);
7156 ext_offset += new_extents[i].offset;
7157 btrfs_set_file_extent_offset(leaf, fi,
7158 ext_offset);
7159 btrfs_mark_buffer_dirty(leaf);
7160
7161 btrfs_drop_extent_cache(inode, key.offset,
7162 key.offset + extent_len - 1, 0);
7163
7164 ret = btrfs_inc_extent_ref(trans, root,
7165 new_extents[i].disk_bytenr,
7166 new_extents[i].disk_num_bytes,
7167 leaf->start,
7168 root->root_key.objectid,
7169 trans->transid, key.objectid);
7170 BUG_ON(ret);
7171 btrfs_release_path(root, path);
7172
7173 inode_add_bytes(inode, extent_len);
7174
7175 ext_offset = 0;
7176 num_bytes -= extent_len;
7177 key.offset += extent_len;
7178
7179 if (num_bytes == 0)
7180 break;
7181 }
7182 BUG_ON(i >= nr_extents);
7183 #endif
7184 }
7185
7186 if (extent_locked) {
7187 unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
7188 lock_end, GFP_NOFS);
7189 extent_locked = 0;
7190 }
7191 skip:
7192 if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS &&
7193 key.offset >= search_end)
7194 break;
7195
7196 cond_resched();
7197 }
7198 ret = 0;
7199 out:
7200 btrfs_release_path(root, path);
7201 if (inode) {
7202 mutex_unlock(&inode->i_mutex);
7203 if (extent_locked) {
7204 unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
7205 lock_end, GFP_NOFS);
7206 }
7207 iput(inode);
7208 }
7209 return ret;
7210 }
7211
7212 int btrfs_reloc_tree_cache_ref(struct btrfs_trans_handle *trans,
7213 struct btrfs_root *root,
7214 struct extent_buffer *buf, u64 orig_start)
7215 {
7216 int level;
7217 int ret;
7218
7219 BUG_ON(btrfs_header_generation(buf) != trans->transid);
7220 BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
7221
7222 level = btrfs_header_level(buf);
7223 if (level == 0) {
7224 struct btrfs_leaf_ref *ref;
7225 struct btrfs_leaf_ref *orig_ref;
7226
7227 orig_ref = btrfs_lookup_leaf_ref(root, orig_start);
7228 if (!orig_ref)
7229 return -ENOENT;
7230
7231 ref = btrfs_alloc_leaf_ref(root, orig_ref->nritems);
7232 if (!ref) {
7233 btrfs_free_leaf_ref(root, orig_ref);
7234 return -ENOMEM;
7235 }
7236
7237 ref->nritems = orig_ref->nritems;
7238 memcpy(ref->extents, orig_ref->extents,
7239 sizeof(ref->extents[0]) * ref->nritems);
7240
7241 btrfs_free_leaf_ref(root, orig_ref);
7242
7243 ref->root_gen = trans->transid;
7244 ref->bytenr = buf->start;
7245 ref->owner = btrfs_header_owner(buf);
7246 ref->generation = btrfs_header_generation(buf);
7247
7248 ret = btrfs_add_leaf_ref(root, ref, 0);
7249 WARN_ON(ret);
7250 btrfs_free_leaf_ref(root, ref);
7251 }
7252 return 0;
7253 }
7254
7255 static noinline int invalidate_extent_cache(struct btrfs_root *root,
7256 struct extent_buffer *leaf,
7257 struct btrfs_block_group_cache *group,
7258 struct btrfs_root *target_root)
7259 {
7260 struct btrfs_key key;
7261 struct inode *inode = NULL;
7262 struct btrfs_file_extent_item *fi;
7263 struct extent_state *cached_state = NULL;
7264 u64 num_bytes;
7265 u64 skip_objectid = 0;
7266 u32 nritems;
7267 u32 i;
7268
7269 nritems = btrfs_header_nritems(leaf);
7270 for (i = 0; i < nritems; i++) {
7271 btrfs_item_key_to_cpu(leaf, &key, i);
7272 if (key.objectid == skip_objectid ||
7273 key.type != BTRFS_EXTENT_DATA_KEY)
7274 continue;
7275 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
7276 if (btrfs_file_extent_type(leaf, fi) ==
7277 BTRFS_FILE_EXTENT_INLINE)
7278 continue;
7279 if (btrfs_file_extent_disk_bytenr(leaf, fi) == 0)
7280 continue;
7281 if (!inode || inode->i_ino != key.objectid) {
7282 iput(inode);
7283 inode = btrfs_ilookup(target_root->fs_info->sb,
7284 key.objectid, target_root, 1);
7285 }
7286 if (!inode) {
7287 skip_objectid = key.objectid;
7288 continue;
7289 }
7290 num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
7291
7292 lock_extent_bits(&BTRFS_I(inode)->io_tree, key.offset,
7293 key.offset + num_bytes - 1, 0, &cached_state,
7294 GFP_NOFS);
7295 btrfs_drop_extent_cache(inode, key.offset,
7296 key.offset + num_bytes - 1, 1);
7297 unlock_extent_cached(&BTRFS_I(inode)->io_tree, key.offset,
7298 key.offset + num_bytes - 1, &cached_state,
7299 GFP_NOFS);
7300 cond_resched();
7301 }
7302 iput(inode);
7303 return 0;
7304 }
7305
7306 static noinline int replace_extents_in_leaf(struct btrfs_trans_handle *trans,
7307 struct btrfs_root *root,
7308 struct extent_buffer *leaf,
7309 struct btrfs_block_group_cache *group,
7310 struct inode *reloc_inode)
7311 {
7312 struct btrfs_key key;
7313 struct btrfs_key extent_key;
7314 struct btrfs_file_extent_item *fi;
7315 struct btrfs_leaf_ref *ref;
7316 struct disk_extent *new_extent;
7317 u64 bytenr;
7318 u64 num_bytes;
7319 u32 nritems;
7320 u32 i;
7321 int ext_index;
7322 int nr_extent;
7323 int ret;
7324
7325 new_extent = kmalloc(sizeof(*new_extent), GFP_NOFS);
7326 BUG_ON(!new_extent);
7327
7328 ref = btrfs_lookup_leaf_ref(root, leaf->start);
7329 BUG_ON(!ref);
7330
7331 ext_index = -1;
7332 nritems = btrfs_header_nritems(leaf);
7333 for (i = 0; i < nritems; i++) {
7334 btrfs_item_key_to_cpu(leaf, &key, i);
7335 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
7336 continue;
7337 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
7338 if (btrfs_file_extent_type(leaf, fi) ==
7339 BTRFS_FILE_EXTENT_INLINE)
7340 continue;
7341 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
7342 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
7343 if (bytenr == 0)
7344 continue;
7345
7346 ext_index++;
7347 if (bytenr >= group->key.objectid + group->key.offset ||
7348 bytenr + num_bytes <= group->key.objectid)
7349 continue;
7350
7351 extent_key.objectid = bytenr;
7352 extent_key.offset = num_bytes;
7353 extent_key.type = BTRFS_EXTENT_ITEM_KEY;
7354 nr_extent = 1;
7355 ret = get_new_locations(reloc_inode, &extent_key,
7356 group->key.objectid, 1,
7357 &new_extent, &nr_extent);
7358 if (ret > 0)
7359 continue;
7360 BUG_ON(ret < 0);
7361
7362 BUG_ON(ref->extents[ext_index].bytenr != bytenr);
7363 BUG_ON(ref->extents[ext_index].num_bytes != num_bytes);
7364 ref->extents[ext_index].bytenr = new_extent->disk_bytenr;
7365 ref->extents[ext_index].num_bytes = new_extent->disk_num_bytes;
7366
7367 btrfs_set_file_extent_disk_bytenr(leaf, fi,
7368 new_extent->disk_bytenr);
7369 btrfs_set_file_extent_disk_num_bytes(leaf, fi,
7370 new_extent->disk_num_bytes);
7371 btrfs_mark_buffer_dirty(leaf);
7372
7373 ret = btrfs_inc_extent_ref(trans, root,
7374 new_extent->disk_bytenr,
7375 new_extent->disk_num_bytes,
7376 leaf->start,
7377 root->root_key.objectid,
7378 trans->transid, key.objectid);
7379 BUG_ON(ret);
7380
7381 ret = btrfs_free_extent(trans, root,
7382 bytenr, num_bytes, leaf->start,
7383 btrfs_header_owner(leaf),
7384 btrfs_header_generation(leaf),
7385 key.objectid, 0);
7386 BUG_ON(ret);
7387 cond_resched();
7388 }
7389 kfree(new_extent);
7390 BUG_ON(ext_index + 1 != ref->nritems);
7391 btrfs_free_leaf_ref(root, ref);
7392 return 0;
7393 }
7394
7395 int btrfs_free_reloc_root(struct btrfs_trans_handle *trans,
7396 struct btrfs_root *root)
7397 {
7398 struct btrfs_root *reloc_root;
7399 int ret;
7400
7401 if (root->reloc_root) {
7402 reloc_root = root->reloc_root;
7403 root->reloc_root = NULL;
7404 list_add(&reloc_root->dead_list,
7405 &root->fs_info->dead_reloc_roots);
7406
7407 btrfs_set_root_bytenr(&reloc_root->root_item,
7408 reloc_root->node->start);
7409 btrfs_set_root_level(&root->root_item,
7410 btrfs_header_level(reloc_root->node));
7411 memset(&reloc_root->root_item.drop_progress, 0,
7412 sizeof(struct btrfs_disk_key));
7413 reloc_root->root_item.drop_level = 0;
7414
7415 ret = btrfs_update_root(trans, root->fs_info->tree_root,
7416 &reloc_root->root_key,
7417 &reloc_root->root_item);
7418 BUG_ON(ret);
7419 }
7420 return 0;
7421 }
7422
7423 int btrfs_drop_dead_reloc_roots(struct btrfs_root *root)
7424 {
7425 struct btrfs_trans_handle *trans;
7426 struct btrfs_root *reloc_root;
7427 struct btrfs_root *prev_root = NULL;
7428 struct list_head dead_roots;
7429 int ret;
7430 unsigned long nr;
7431
7432 INIT_LIST_HEAD(&dead_roots);
7433 list_splice_init(&root->fs_info->dead_reloc_roots, &dead_roots);
7434
7435 while (!list_empty(&dead_roots)) {
7436 reloc_root = list_entry(dead_roots.prev,
7437 struct btrfs_root, dead_list);
7438 list_del_init(&reloc_root->dead_list);
7439
7440 BUG_ON(reloc_root->commit_root != NULL);
7441 while (1) {
7442 trans = btrfs_join_transaction(root, 1);
7443 BUG_ON(!trans);
7444
7445 mutex_lock(&root->fs_info->drop_mutex);
7446 ret = btrfs_drop_snapshot(trans, reloc_root);
7447 if (ret != -EAGAIN)
7448 break;
7449 mutex_unlock(&root->fs_info->drop_mutex);
7450
7451 nr = trans->blocks_used;
7452 ret = btrfs_end_transaction(trans, root);
7453 BUG_ON(ret);
7454 btrfs_btree_balance_dirty(root, nr);
7455 }
7456
7457 free_extent_buffer(reloc_root->node);
7458
7459 ret = btrfs_del_root(trans, root->fs_info->tree_root,
7460 &reloc_root->root_key);
7461 BUG_ON(ret);
7462 mutex_unlock(&root->fs_info->drop_mutex);
7463
7464 nr = trans->blocks_used;
7465 ret = btrfs_end_transaction(trans, root);
7466 BUG_ON(ret);
7467 btrfs_btree_balance_dirty(root, nr);
7468
7469 kfree(prev_root);
7470 prev_root = reloc_root;
7471 }
7472 if (prev_root) {
7473 btrfs_remove_leaf_refs(prev_root, (u64)-1, 0);
7474 kfree(prev_root);
7475 }
7476 return 0;
7477 }
7478
7479 int btrfs_add_dead_reloc_root(struct btrfs_root *root)
7480 {
7481 list_add(&root->dead_list, &root->fs_info->dead_reloc_roots);
7482 return 0;
7483 }
7484
7485 int btrfs_cleanup_reloc_trees(struct btrfs_root *root)
7486 {
7487 struct btrfs_root *reloc_root;
7488 struct btrfs_trans_handle *trans;
7489 struct btrfs_key location;
7490 int found;
7491 int ret;
7492
7493 mutex_lock(&root->fs_info->tree_reloc_mutex);
7494 ret = btrfs_find_dead_roots(root, BTRFS_TREE_RELOC_OBJECTID, NULL);
7495 BUG_ON(ret);
7496 found = !list_empty(&root->fs_info->dead_reloc_roots);
7497 mutex_unlock(&root->fs_info->tree_reloc_mutex);
7498
7499 if (found) {
7500 trans = btrfs_start_transaction(root, 1);
7501 BUG_ON(!trans);
7502 ret = btrfs_commit_transaction(trans, root);
7503 BUG_ON(ret);
7504 }
7505
7506 location.objectid = BTRFS_DATA_RELOC_TREE_OBJECTID;
7507 location.offset = (u64)-1;
7508 location.type = BTRFS_ROOT_ITEM_KEY;
7509
7510 reloc_root = btrfs_read_fs_root_no_name(root->fs_info, &location);
7511 BUG_ON(!reloc_root);
7512 btrfs_orphan_cleanup(reloc_root);
7513 return 0;
7514 }
7515
7516 static noinline int init_reloc_tree(struct btrfs_trans_handle *trans,
7517 struct btrfs_root *root)
7518 {
7519 struct btrfs_root *reloc_root;
7520 struct extent_buffer *eb;
7521 struct btrfs_root_item *root_item;
7522 struct btrfs_key root_key;
7523 int ret;
7524
7525 BUG_ON(!root->ref_cows);
7526 if (root->reloc_root)
7527 return 0;
7528
7529 root_item = kmalloc(sizeof(*root_item), GFP_NOFS);
7530 BUG_ON(!root_item);
7531
7532 ret = btrfs_copy_root(trans, root, root->commit_root,
7533 &eb, BTRFS_TREE_RELOC_OBJECTID);
7534 BUG_ON(ret);
7535
7536 root_key.objectid = BTRFS_TREE_RELOC_OBJECTID;
7537 root_key.offset = root->root_key.objectid;
7538 root_key.type = BTRFS_ROOT_ITEM_KEY;
7539
7540 memcpy(root_item, &root->root_item, sizeof(root_item));
7541 btrfs_set_root_refs(root_item, 0);
7542 btrfs_set_root_bytenr(root_item, eb->start);
7543 btrfs_set_root_level(root_item, btrfs_header_level(eb));
7544 btrfs_set_root_generation(root_item, trans->transid);
7545
7546 btrfs_tree_unlock(eb);
7547 free_extent_buffer(eb);
7548
7549 ret = btrfs_insert_root(trans, root->fs_info->tree_root,
7550 &root_key, root_item);
7551 BUG_ON(ret);
7552 kfree(root_item);
7553
7554 reloc_root = btrfs_read_fs_root_no_radix(root->fs_info->tree_root,
7555 &root_key);
7556 BUG_ON(!reloc_root);
7557 reloc_root->last_trans = trans->transid;
7558 reloc_root->commit_root = NULL;
7559 reloc_root->ref_tree = &root->fs_info->reloc_ref_tree;
7560
7561 root->reloc_root = reloc_root;
7562 return 0;
7563 }
7564
7565 /*
7566 * Core function of space balance.
7567 *
7568 * The idea is using reloc trees to relocate tree blocks in reference
7569 * counted roots. There is one reloc tree for each subvol, and all
7570 * reloc trees share same root key objectid. Reloc trees are snapshots
7571 * of the latest committed roots of subvols (root->commit_root).
7572 *
7573 * To relocate a tree block referenced by a subvol, there are two steps.
7574 * COW the block through subvol's reloc tree, then update block pointer
7575 * in the subvol to point to the new block. Since all reloc trees share
7576 * same root key objectid, doing special handing for tree blocks owned
7577 * by them is easy. Once a tree block has been COWed in one reloc tree,
7578 * we can use the resulting new block directly when the same block is
7579 * required to COW again through other reloc trees. By this way, relocated
7580 * tree blocks are shared between reloc trees, so they are also shared
7581 * between subvols.
7582 */
7583 static noinline int relocate_one_path(struct btrfs_trans_handle *trans,
7584 struct btrfs_root *root,
7585 struct btrfs_path *path,
7586 struct btrfs_key *first_key,
7587 struct btrfs_ref_path *ref_path,
7588 struct btrfs_block_group_cache *group,
7589 struct inode *reloc_inode)
7590 {
7591 struct btrfs_root *reloc_root;
7592 struct extent_buffer *eb = NULL;
7593 struct btrfs_key *keys;
7594 u64 *nodes;
7595 int level;
7596 int shared_level;
7597 int lowest_level = 0;
7598 int ret;
7599
7600 if (ref_path->owner_objectid < BTRFS_FIRST_FREE_OBJECTID)
7601 lowest_level = ref_path->owner_objectid;
7602
7603 if (!root->ref_cows) {
7604 path->lowest_level = lowest_level;
7605 ret = btrfs_search_slot(trans, root, first_key, path, 0, 1);
7606 BUG_ON(ret < 0);
7607 path->lowest_level = 0;
7608 btrfs_release_path(root, path);
7609 return 0;
7610 }
7611
7612 mutex_lock(&root->fs_info->tree_reloc_mutex);
7613 ret = init_reloc_tree(trans, root);
7614 BUG_ON(ret);
7615 reloc_root = root->reloc_root;
7616
7617 shared_level = ref_path->shared_level;
7618 ref_path->shared_level = BTRFS_MAX_LEVEL - 1;
7619
7620 keys = ref_path->node_keys;
7621 nodes = ref_path->new_nodes;
7622 memset(&keys[shared_level + 1], 0,
7623 sizeof(*keys) * (BTRFS_MAX_LEVEL - shared_level - 1));
7624 memset(&nodes[shared_level + 1], 0,
7625 sizeof(*nodes) * (BTRFS_MAX_LEVEL - shared_level - 1));
7626
7627 if (nodes[lowest_level] == 0) {
7628 path->lowest_level = lowest_level;
7629 ret = btrfs_search_slot(trans, reloc_root, first_key, path,
7630 0, 1);
7631 BUG_ON(ret);
7632 for (level = lowest_level; level < BTRFS_MAX_LEVEL; level++) {
7633 eb = path->nodes[level];
7634 if (!eb || eb == reloc_root->node)
7635 break;
7636 nodes[level] = eb->start;
7637 if (level == 0)
7638 btrfs_item_key_to_cpu(eb, &keys[level], 0);
7639 else
7640 btrfs_node_key_to_cpu(eb, &keys[level], 0);
7641 }
7642 if (nodes[0] &&
7643 ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
7644 eb = path->nodes[0];
7645 ret = replace_extents_in_leaf(trans, reloc_root, eb,
7646 group, reloc_inode);
7647 BUG_ON(ret);
7648 }
7649 btrfs_release_path(reloc_root, path);
7650 } else {
7651 ret = btrfs_merge_path(trans, reloc_root, keys, nodes,
7652 lowest_level);
7653 BUG_ON(ret);
7654 }
7655
7656 /*
7657 * replace tree blocks in the fs tree with tree blocks in
7658 * the reloc tree.
7659 */
7660 ret = btrfs_merge_path(trans, root, keys, nodes, lowest_level);
7661 BUG_ON(ret < 0);
7662
7663 if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
7664 ret = btrfs_search_slot(trans, reloc_root, first_key, path,
7665 0, 0);
7666 BUG_ON(ret);
7667 extent_buffer_get(path->nodes[0]);
7668 eb = path->nodes[0];
7669 btrfs_release_path(reloc_root, path);
7670 ret = invalidate_extent_cache(reloc_root, eb, group, root);
7671 BUG_ON(ret);
7672 free_extent_buffer(eb);
7673 }
7674
7675 mutex_unlock(&root->fs_info->tree_reloc_mutex);
7676 path->lowest_level = 0;
7677 return 0;
7678 }
7679
7680 static noinline int relocate_tree_block(struct btrfs_trans_handle *trans,
7681 struct btrfs_root *root,
7682 struct btrfs_path *path,
7683 struct btrfs_key *first_key,
7684 struct btrfs_ref_path *ref_path)
7685 {
7686 int ret;
7687
7688 ret = relocate_one_path(trans, root, path, first_key,
7689 ref_path, NULL, NULL);
7690 BUG_ON(ret);
7691
7692 return 0;
7693 }
7694
7695 static noinline int del_extent_zero(struct btrfs_trans_handle *trans,
7696 struct btrfs_root *extent_root,
7697 struct btrfs_path *path,
7698 struct btrfs_key *extent_key)
7699 {
7700 int ret;
7701
7702 ret = btrfs_search_slot(trans, extent_root, extent_key, path, -1, 1);
7703 if (ret)
7704 goto out;
7705 ret = btrfs_del_item(trans, extent_root, path);
7706 out:
7707 btrfs_release_path(extent_root, path);
7708 return ret;
7709 }
7710
7711 static noinline struct btrfs_root *read_ref_root(struct btrfs_fs_info *fs_info,
7712 struct btrfs_ref_path *ref_path)
7713 {
7714 struct btrfs_key root_key;
7715
7716 root_key.objectid = ref_path->root_objectid;
7717 root_key.type = BTRFS_ROOT_ITEM_KEY;
7718 if (is_cowonly_root(ref_path->root_objectid))
7719 root_key.offset = 0;
7720 else
7721 root_key.offset = (u64)-1;
7722
7723 return btrfs_read_fs_root_no_name(fs_info, &root_key);
7724 }
7725
7726 static noinline int relocate_one_extent(struct btrfs_root *extent_root,
7727 struct btrfs_path *path,
7728 struct btrfs_key *extent_key,
7729 struct btrfs_block_group_cache *group,
7730 struct inode *reloc_inode, int pass)
7731 {
7732 struct btrfs_trans_handle *trans;
7733 struct btrfs_root *found_root;
7734 struct btrfs_ref_path *ref_path = NULL;
7735 struct disk_extent *new_extents = NULL;
7736 int nr_extents = 0;
7737 int loops;
7738 int ret;
7739 int level;
7740 struct btrfs_key first_key;
7741 u64 prev_block = 0;
7742
7743
7744 trans = btrfs_start_transaction(extent_root, 1);
7745 BUG_ON(!trans);
7746
7747 if (extent_key->objectid == 0) {
7748 ret = del_extent_zero(trans, extent_root, path, extent_key);
7749 goto out;
7750 }
7751
7752 ref_path = kmalloc(sizeof(*ref_path), GFP_NOFS);
7753 if (!ref_path) {
7754 ret = -ENOMEM;
7755 goto out;
7756 }
7757
7758 for (loops = 0; ; loops++) {
7759 if (loops == 0) {
7760 ret = btrfs_first_ref_path(trans, extent_root, ref_path,
7761 extent_key->objectid);
7762 } else {
7763 ret = btrfs_next_ref_path(trans, extent_root, ref_path);
7764 }
7765 if (ret < 0)
7766 goto out;
7767 if (ret > 0)
7768 break;
7769
7770 if (ref_path->root_objectid == BTRFS_TREE_LOG_OBJECTID ||
7771 ref_path->root_objectid == BTRFS_TREE_RELOC_OBJECTID)
7772 continue;
7773
7774 found_root = read_ref_root(extent_root->fs_info, ref_path);
7775 BUG_ON(!found_root);
7776 /*
7777 * for reference counted tree, only process reference paths
7778 * rooted at the latest committed root.
7779 */
7780 if (found_root->ref_cows &&
7781 ref_path->root_generation != found_root->root_key.offset)
7782 continue;
7783
7784 if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
7785 if (pass == 0) {
7786 /*
7787 * copy data extents to new locations
7788 */
7789 u64 group_start = group->key.objectid;
7790 ret = relocate_data_extent(reloc_inode,
7791 extent_key,
7792 group_start);
7793 if (ret < 0)
7794 goto out;
7795 break;
7796 }
7797 level = 0;
7798 } else {
7799 level = ref_path->owner_objectid;
7800 }
7801
7802 if (prev_block != ref_path->nodes[level]) {
7803 struct extent_buffer *eb;
7804 u64 block_start = ref_path->nodes[level];
7805 u64 block_size = btrfs_level_size(found_root, level);
7806
7807 eb = read_tree_block(found_root, block_start,
7808 block_size, 0);
7809 btrfs_tree_lock(eb);
7810 BUG_ON(level != btrfs_header_level(eb));
7811
7812 if (level == 0)
7813 btrfs_item_key_to_cpu(eb, &first_key, 0);
7814 else
7815 btrfs_node_key_to_cpu(eb, &first_key, 0);
7816
7817 btrfs_tree_unlock(eb);
7818 free_extent_buffer(eb);
7819 prev_block = block_start;
7820 }
7821
7822 mutex_lock(&extent_root->fs_info->trans_mutex);
7823 btrfs_record_root_in_trans(found_root);
7824 mutex_unlock(&extent_root->fs_info->trans_mutex);
7825 if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
7826 /*
7827 * try to update data extent references while
7828 * keeping metadata shared between snapshots.
7829 */
7830 if (pass == 1) {
7831 ret = relocate_one_path(trans, found_root,
7832 path, &first_key, ref_path,
7833 group, reloc_inode);
7834 if (ret < 0)
7835 goto out;
7836 continue;
7837 }
7838 /*
7839 * use fallback method to process the remaining
7840 * references.
7841 */
7842 if (!new_extents) {
7843 u64 group_start = group->key.objectid;
7844 new_extents = kmalloc(sizeof(*new_extents),
7845 GFP_NOFS);
7846 nr_extents = 1;
7847 ret = get_new_locations(reloc_inode,
7848 extent_key,
7849 group_start, 1,
7850 &new_extents,
7851 &nr_extents);
7852 if (ret)
7853 goto out;
7854 }
7855 ret = replace_one_extent(trans, found_root,
7856 path, extent_key,
7857 &first_key, ref_path,
7858 new_extents, nr_extents);
7859 } else {
7860 ret = relocate_tree_block(trans, found_root, path,
7861 &first_key, ref_path);
7862 }
7863 if (ret < 0)
7864 goto out;
7865 }
7866 ret = 0;
7867 out:
7868 btrfs_end_transaction(trans, extent_root);
7869 kfree(new_extents);
7870 kfree(ref_path);
7871 return ret;
7872 }
7873 #endif
7874
7875 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
7876 {
7877 u64 num_devices;
7878 u64 stripped = BTRFS_BLOCK_GROUP_RAID0 |
7879 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
7880
7881 num_devices = root->fs_info->fs_devices->rw_devices;
7882 if (num_devices == 1) {
7883 stripped |= BTRFS_BLOCK_GROUP_DUP;
7884 stripped = flags & ~stripped;
7885
7886 /* turn raid0 into single device chunks */
7887 if (flags & BTRFS_BLOCK_GROUP_RAID0)
7888 return stripped;
7889
7890 /* turn mirroring into duplication */
7891 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
7892 BTRFS_BLOCK_GROUP_RAID10))
7893 return stripped | BTRFS_BLOCK_GROUP_DUP;
7894 return flags;
7895 } else {
7896 /* they already had raid on here, just return */
7897 if (flags & stripped)
7898 return flags;
7899
7900 stripped |= BTRFS_BLOCK_GROUP_DUP;
7901 stripped = flags & ~stripped;
7902
7903 /* switch duplicated blocks with raid1 */
7904 if (flags & BTRFS_BLOCK_GROUP_DUP)
7905 return stripped | BTRFS_BLOCK_GROUP_RAID1;
7906
7907 /* turn single device chunks into raid0 */
7908 return stripped | BTRFS_BLOCK_GROUP_RAID0;
7909 }
7910 return flags;
7911 }
7912
7913 static int set_block_group_ro(struct btrfs_block_group_cache *cache)
7914 {
7915 struct btrfs_space_info *sinfo = cache->space_info;
7916 u64 num_bytes;
7917 int ret = -ENOSPC;
7918
7919 if (cache->ro)
7920 return 0;
7921
7922 spin_lock(&sinfo->lock);
7923 spin_lock(&cache->lock);
7924 num_bytes = cache->key.offset - cache->reserved - cache->pinned -
7925 cache->bytes_super - btrfs_block_group_used(&cache->item);
7926
7927 if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
7928 sinfo->bytes_may_use + sinfo->bytes_readonly +
7929 cache->reserved_pinned + num_bytes < sinfo->total_bytes) {
7930 sinfo->bytes_readonly += num_bytes;
7931 sinfo->bytes_reserved += cache->reserved_pinned;
7932 cache->reserved_pinned = 0;
7933 cache->ro = 1;
7934 ret = 0;
7935 }
7936 spin_unlock(&cache->lock);
7937 spin_unlock(&sinfo->lock);
7938 return ret;
7939 }
7940
7941 int btrfs_set_block_group_ro(struct btrfs_root *root,
7942 struct btrfs_block_group_cache *cache)
7943
7944 {
7945 struct btrfs_trans_handle *trans;
7946 u64 alloc_flags;
7947 int ret;
7948
7949 BUG_ON(cache->ro);
7950
7951 trans = btrfs_join_transaction(root, 1);
7952 BUG_ON(IS_ERR(trans));
7953
7954 alloc_flags = update_block_group_flags(root, cache->flags);
7955 if (alloc_flags != cache->flags)
7956 do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, 1);
7957
7958 ret = set_block_group_ro(cache);
7959 if (!ret)
7960 goto out;
7961 alloc_flags = get_alloc_profile(root, cache->space_info->flags);
7962 ret = do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, 1);
7963 if (ret < 0)
7964 goto out;
7965 ret = set_block_group_ro(cache);
7966 out:
7967 btrfs_end_transaction(trans, root);
7968 return ret;
7969 }
7970
7971 int btrfs_set_block_group_rw(struct btrfs_root *root,
7972 struct btrfs_block_group_cache *cache)
7973 {
7974 struct btrfs_space_info *sinfo = cache->space_info;
7975 u64 num_bytes;
7976
7977 BUG_ON(!cache->ro);
7978
7979 spin_lock(&sinfo->lock);
7980 spin_lock(&cache->lock);
7981 num_bytes = cache->key.offset - cache->reserved - cache->pinned -
7982 cache->bytes_super - btrfs_block_group_used(&cache->item);
7983 sinfo->bytes_readonly -= num_bytes;
7984 cache->ro = 0;
7985 spin_unlock(&cache->lock);
7986 spin_unlock(&sinfo->lock);
7987 return 0;
7988 }
7989
7990 /*
7991 * checks to see if its even possible to relocate this block group.
7992 *
7993 * @return - -1 if it's not a good idea to relocate this block group, 0 if its
7994 * ok to go ahead and try.
7995 */
7996 int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
7997 {
7998 struct btrfs_block_group_cache *block_group;
7999 struct btrfs_space_info *space_info;
8000 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
8001 struct btrfs_device *device;
8002 int full = 0;
8003 int ret = 0;
8004
8005 block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
8006
8007 /* odd, couldn't find the block group, leave it alone */
8008 if (!block_group)
8009 return -1;
8010
8011 /* no bytes used, we're good */
8012 if (!btrfs_block_group_used(&block_group->item))
8013 goto out;
8014
8015 space_info = block_group->space_info;
8016 spin_lock(&space_info->lock);
8017
8018 full = space_info->full;
8019
8020 /*
8021 * if this is the last block group we have in this space, we can't
8022 * relocate it unless we're able to allocate a new chunk below.
8023 *
8024 * Otherwise, we need to make sure we have room in the space to handle
8025 * all of the extents from this block group. If we can, we're good
8026 */
8027 if ((space_info->total_bytes != block_group->key.offset) &&
8028 (space_info->bytes_used + space_info->bytes_reserved +
8029 space_info->bytes_pinned + space_info->bytes_readonly +
8030 btrfs_block_group_used(&block_group->item) <
8031 space_info->total_bytes)) {
8032 spin_unlock(&space_info->lock);
8033 goto out;
8034 }
8035 spin_unlock(&space_info->lock);
8036
8037 /*
8038 * ok we don't have enough space, but maybe we have free space on our
8039 * devices to allocate new chunks for relocation, so loop through our
8040 * alloc devices and guess if we have enough space. However, if we
8041 * were marked as full, then we know there aren't enough chunks, and we
8042 * can just return.
8043 */
8044 ret = -1;
8045 if (full)
8046 goto out;
8047
8048 mutex_lock(&root->fs_info->chunk_mutex);
8049 list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
8050 u64 min_free = btrfs_block_group_used(&block_group->item);
8051 u64 dev_offset, max_avail;
8052
8053 /*
8054 * check to make sure we can actually find a chunk with enough
8055 * space to fit our block group in.
8056 */
8057 if (device->total_bytes > device->bytes_used + min_free) {
8058 ret = find_free_dev_extent(NULL, device, min_free,
8059 &dev_offset, &max_avail);
8060 if (!ret)
8061 break;
8062 ret = -1;
8063 }
8064 }
8065 mutex_unlock(&root->fs_info->chunk_mutex);
8066 out:
8067 btrfs_put_block_group(block_group);
8068 return ret;
8069 }
8070
8071 static int find_first_block_group(struct btrfs_root *root,
8072 struct btrfs_path *path, struct btrfs_key *key)
8073 {
8074 int ret = 0;
8075 struct btrfs_key found_key;
8076 struct extent_buffer *leaf;
8077 int slot;
8078
8079 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
8080 if (ret < 0)
8081 goto out;
8082
8083 while (1) {
8084 slot = path->slots[0];
8085 leaf = path->nodes[0];
8086 if (slot >= btrfs_header_nritems(leaf)) {
8087 ret = btrfs_next_leaf(root, path);
8088 if (ret == 0)
8089 continue;
8090 if (ret < 0)
8091 goto out;
8092 break;
8093 }
8094 btrfs_item_key_to_cpu(leaf, &found_key, slot);
8095
8096 if (found_key.objectid >= key->objectid &&
8097 found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
8098 ret = 0;
8099 goto out;
8100 }
8101 path->slots[0]++;
8102 }
8103 out:
8104 return ret;
8105 }
8106
8107 void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
8108 {
8109 struct btrfs_block_group_cache *block_group;
8110 u64 last = 0;
8111
8112 while (1) {
8113 struct inode *inode;
8114
8115 block_group = btrfs_lookup_first_block_group(info, last);
8116 while (block_group) {
8117 spin_lock(&block_group->lock);
8118 if (block_group->iref)
8119 break;
8120 spin_unlock(&block_group->lock);
8121 block_group = next_block_group(info->tree_root,
8122 block_group);
8123 }
8124 if (!block_group) {
8125 if (last == 0)
8126 break;
8127 last = 0;
8128 continue;
8129 }
8130
8131 inode = block_group->inode;
8132 block_group->iref = 0;
8133 block_group->inode = NULL;
8134 spin_unlock(&block_group->lock);
8135 iput(inode);
8136 last = block_group->key.objectid + block_group->key.offset;
8137 btrfs_put_block_group(block_group);
8138 }
8139 }
8140
8141 int btrfs_free_block_groups(struct btrfs_fs_info *info)
8142 {
8143 struct btrfs_block_group_cache *block_group;
8144 struct btrfs_space_info *space_info;
8145 struct btrfs_caching_control *caching_ctl;
8146 struct rb_node *n;
8147
8148 down_write(&info->extent_commit_sem);
8149 while (!list_empty(&info->caching_block_groups)) {
8150 caching_ctl = list_entry(info->caching_block_groups.next,
8151 struct btrfs_caching_control, list);
8152 list_del(&caching_ctl->list);
8153 put_caching_control(caching_ctl);
8154 }
8155 up_write(&info->extent_commit_sem);
8156
8157 spin_lock(&info->block_group_cache_lock);
8158 while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
8159 block_group = rb_entry(n, struct btrfs_block_group_cache,
8160 cache_node);
8161 rb_erase(&block_group->cache_node,
8162 &info->block_group_cache_tree);
8163 spin_unlock(&info->block_group_cache_lock);
8164
8165 down_write(&block_group->space_info->groups_sem);
8166 list_del(&block_group->list);
8167 up_write(&block_group->space_info->groups_sem);
8168
8169 if (block_group->cached == BTRFS_CACHE_STARTED)
8170 wait_block_group_cache_done(block_group);
8171
8172 btrfs_remove_free_space_cache(block_group);
8173 btrfs_put_block_group(block_group);
8174
8175 spin_lock(&info->block_group_cache_lock);
8176 }
8177 spin_unlock(&info->block_group_cache_lock);
8178
8179 /* now that all the block groups are freed, go through and
8180 * free all the space_info structs. This is only called during
8181 * the final stages of unmount, and so we know nobody is
8182 * using them. We call synchronize_rcu() once before we start,
8183 * just to be on the safe side.
8184 */
8185 synchronize_rcu();
8186
8187 release_global_block_rsv(info);
8188
8189 while(!list_empty(&info->space_info)) {
8190 space_info = list_entry(info->space_info.next,
8191 struct btrfs_space_info,
8192 list);
8193 if (space_info->bytes_pinned > 0 ||
8194 space_info->bytes_reserved > 0) {
8195 WARN_ON(1);
8196 dump_space_info(space_info, 0, 0);
8197 }
8198 list_del(&space_info->list);
8199 kfree(space_info);
8200 }
8201 return 0;
8202 }
8203
8204 static void __link_block_group(struct btrfs_space_info *space_info,
8205 struct btrfs_block_group_cache *cache)
8206 {
8207 int index = get_block_group_index(cache);
8208
8209 down_write(&space_info->groups_sem);
8210 list_add_tail(&cache->list, &space_info->block_groups[index]);
8211 up_write(&space_info->groups_sem);
8212 }
8213
8214 int btrfs_read_block_groups(struct btrfs_root *root)
8215 {
8216 struct btrfs_path *path;
8217 int ret;
8218 struct btrfs_block_group_cache *cache;
8219 struct btrfs_fs_info *info = root->fs_info;
8220 struct btrfs_space_info *space_info;
8221 struct btrfs_key key;
8222 struct btrfs_key found_key;
8223 struct extent_buffer *leaf;
8224 int need_clear = 0;
8225 u64 cache_gen;
8226
8227 root = info->extent_root;
8228 key.objectid = 0;
8229 key.offset = 0;
8230 btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
8231 path = btrfs_alloc_path();
8232 if (!path)
8233 return -ENOMEM;
8234
8235 cache_gen = btrfs_super_cache_generation(&root->fs_info->super_copy);
8236 if (cache_gen != 0 &&
8237 btrfs_super_generation(&root->fs_info->super_copy) != cache_gen)
8238 need_clear = 1;
8239 if (btrfs_test_opt(root, CLEAR_CACHE))
8240 need_clear = 1;
8241 if (!btrfs_test_opt(root, SPACE_CACHE) && cache_gen)
8242 printk(KERN_INFO "btrfs: disk space caching is enabled\n");
8243
8244 while (1) {
8245 ret = find_first_block_group(root, path, &key);
8246 if (ret > 0)
8247 break;
8248 if (ret != 0)
8249 goto error;
8250
8251 leaf = path->nodes[0];
8252 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
8253 cache = kzalloc(sizeof(*cache), GFP_NOFS);
8254 if (!cache) {
8255 ret = -ENOMEM;
8256 goto error;
8257 }
8258
8259 atomic_set(&cache->count, 1);
8260 spin_lock_init(&cache->lock);
8261 spin_lock_init(&cache->tree_lock);
8262 cache->fs_info = info;
8263 INIT_LIST_HEAD(&cache->list);
8264 INIT_LIST_HEAD(&cache->cluster_list);
8265
8266 if (need_clear)
8267 cache->disk_cache_state = BTRFS_DC_CLEAR;
8268
8269 /*
8270 * we only want to have 32k of ram per block group for keeping
8271 * track of free space, and if we pass 1/2 of that we want to
8272 * start converting things over to using bitmaps
8273 */
8274 cache->extents_thresh = ((1024 * 32) / 2) /
8275 sizeof(struct btrfs_free_space);
8276
8277 read_extent_buffer(leaf, &cache->item,
8278 btrfs_item_ptr_offset(leaf, path->slots[0]),
8279 sizeof(cache->item));
8280 memcpy(&cache->key, &found_key, sizeof(found_key));
8281
8282 key.objectid = found_key.objectid + found_key.offset;
8283 btrfs_release_path(root, path);
8284 cache->flags = btrfs_block_group_flags(&cache->item);
8285 cache->sectorsize = root->sectorsize;
8286
8287 /*
8288 * check for two cases, either we are full, and therefore
8289 * don't need to bother with the caching work since we won't
8290 * find any space, or we are empty, and we can just add all
8291 * the space in and be done with it. This saves us _alot_ of
8292 * time, particularly in the full case.
8293 */
8294 if (found_key.offset == btrfs_block_group_used(&cache->item)) {
8295 exclude_super_stripes(root, cache);
8296 cache->last_byte_to_unpin = (u64)-1;
8297 cache->cached = BTRFS_CACHE_FINISHED;
8298 free_excluded_extents(root, cache);
8299 } else if (btrfs_block_group_used(&cache->item) == 0) {
8300 exclude_super_stripes(root, cache);
8301 cache->last_byte_to_unpin = (u64)-1;
8302 cache->cached = BTRFS_CACHE_FINISHED;
8303 add_new_free_space(cache, root->fs_info,
8304 found_key.objectid,
8305 found_key.objectid +
8306 found_key.offset);
8307 free_excluded_extents(root, cache);
8308 }
8309
8310 ret = update_space_info(info, cache->flags, found_key.offset,
8311 btrfs_block_group_used(&cache->item),
8312 &space_info);
8313 BUG_ON(ret);
8314 cache->space_info = space_info;
8315 spin_lock(&cache->space_info->lock);
8316 cache->space_info->bytes_readonly += cache->bytes_super;
8317 spin_unlock(&cache->space_info->lock);
8318
8319 __link_block_group(space_info, cache);
8320
8321 ret = btrfs_add_block_group_cache(root->fs_info, cache);
8322 BUG_ON(ret);
8323
8324 set_avail_alloc_bits(root->fs_info, cache->flags);
8325 if (btrfs_chunk_readonly(root, cache->key.objectid))
8326 set_block_group_ro(cache);
8327 }
8328
8329 list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
8330 if (!(get_alloc_profile(root, space_info->flags) &
8331 (BTRFS_BLOCK_GROUP_RAID10 |
8332 BTRFS_BLOCK_GROUP_RAID1 |
8333 BTRFS_BLOCK_GROUP_DUP)))
8334 continue;
8335 /*
8336 * avoid allocating from un-mirrored block group if there are
8337 * mirrored block groups.
8338 */
8339 list_for_each_entry(cache, &space_info->block_groups[3], list)
8340 set_block_group_ro(cache);
8341 list_for_each_entry(cache, &space_info->block_groups[4], list)
8342 set_block_group_ro(cache);
8343 }
8344
8345 init_global_block_rsv(info);
8346 ret = 0;
8347 error:
8348 btrfs_free_path(path);
8349 return ret;
8350 }
8351
8352 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
8353 struct btrfs_root *root, u64 bytes_used,
8354 u64 type, u64 chunk_objectid, u64 chunk_offset,
8355 u64 size)
8356 {
8357 int ret;
8358 struct btrfs_root *extent_root;
8359 struct btrfs_block_group_cache *cache;
8360
8361 extent_root = root->fs_info->extent_root;
8362
8363 root->fs_info->last_trans_log_full_commit = trans->transid;
8364
8365 cache = kzalloc(sizeof(*cache), GFP_NOFS);
8366 if (!cache)
8367 return -ENOMEM;
8368
8369 cache->key.objectid = chunk_offset;
8370 cache->key.offset = size;
8371 cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
8372 cache->sectorsize = root->sectorsize;
8373 cache->fs_info = root->fs_info;
8374
8375 /*
8376 * we only want to have 32k of ram per block group for keeping track
8377 * of free space, and if we pass 1/2 of that we want to start
8378 * converting things over to using bitmaps
8379 */
8380 cache->extents_thresh = ((1024 * 32) / 2) /
8381 sizeof(struct btrfs_free_space);
8382 atomic_set(&cache->count, 1);
8383 spin_lock_init(&cache->lock);
8384 spin_lock_init(&cache->tree_lock);
8385 INIT_LIST_HEAD(&cache->list);
8386 INIT_LIST_HEAD(&cache->cluster_list);
8387
8388 btrfs_set_block_group_used(&cache->item, bytes_used);
8389 btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
8390 cache->flags = type;
8391 btrfs_set_block_group_flags(&cache->item, type);
8392
8393 cache->last_byte_to_unpin = (u64)-1;
8394 cache->cached = BTRFS_CACHE_FINISHED;
8395 exclude_super_stripes(root, cache);
8396
8397 add_new_free_space(cache, root->fs_info, chunk_offset,
8398 chunk_offset + size);
8399
8400 free_excluded_extents(root, cache);
8401
8402 ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
8403 &cache->space_info);
8404 BUG_ON(ret);
8405
8406 spin_lock(&cache->space_info->lock);
8407 cache->space_info->bytes_readonly += cache->bytes_super;
8408 spin_unlock(&cache->space_info->lock);
8409
8410 __link_block_group(cache->space_info, cache);
8411
8412 ret = btrfs_add_block_group_cache(root->fs_info, cache);
8413 BUG_ON(ret);
8414
8415 ret = btrfs_insert_item(trans, extent_root, &cache->key, &cache->item,
8416 sizeof(cache->item));
8417 BUG_ON(ret);
8418
8419 set_avail_alloc_bits(extent_root->fs_info, type);
8420
8421 return 0;
8422 }
8423
8424 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
8425 struct btrfs_root *root, u64 group_start)
8426 {
8427 struct btrfs_path *path;
8428 struct btrfs_block_group_cache *block_group;
8429 struct btrfs_free_cluster *cluster;
8430 struct btrfs_root *tree_root = root->fs_info->tree_root;
8431 struct btrfs_key key;
8432 struct inode *inode;
8433 int ret;
8434 int factor;
8435
8436 root = root->fs_info->extent_root;
8437
8438 block_group = btrfs_lookup_block_group(root->fs_info, group_start);
8439 BUG_ON(!block_group);
8440 BUG_ON(!block_group->ro);
8441
8442 memcpy(&key, &block_group->key, sizeof(key));
8443 if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
8444 BTRFS_BLOCK_GROUP_RAID1 |
8445 BTRFS_BLOCK_GROUP_RAID10))
8446 factor = 2;
8447 else
8448 factor = 1;
8449
8450 /* make sure this block group isn't part of an allocation cluster */
8451 cluster = &root->fs_info->data_alloc_cluster;
8452 spin_lock(&cluster->refill_lock);
8453 btrfs_return_cluster_to_free_space(block_group, cluster);
8454 spin_unlock(&cluster->refill_lock);
8455
8456 /*
8457 * make sure this block group isn't part of a metadata
8458 * allocation cluster
8459 */
8460 cluster = &root->fs_info->meta_alloc_cluster;
8461 spin_lock(&cluster->refill_lock);
8462 btrfs_return_cluster_to_free_space(block_group, cluster);
8463 spin_unlock(&cluster->refill_lock);
8464
8465 path = btrfs_alloc_path();
8466 BUG_ON(!path);
8467
8468 inode = lookup_free_space_inode(root, block_group, path);
8469 if (!IS_ERR(inode)) {
8470 btrfs_orphan_add(trans, inode);
8471 clear_nlink(inode);
8472 /* One for the block groups ref */
8473 spin_lock(&block_group->lock);
8474 if (block_group->iref) {
8475 block_group->iref = 0;
8476 block_group->inode = NULL;
8477 spin_unlock(&block_group->lock);
8478 iput(inode);
8479 } else {
8480 spin_unlock(&block_group->lock);
8481 }
8482 /* One for our lookup ref */
8483 iput(inode);
8484 }
8485
8486 key.objectid = BTRFS_FREE_SPACE_OBJECTID;
8487 key.offset = block_group->key.objectid;
8488 key.type = 0;
8489
8490 ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
8491 if (ret < 0)
8492 goto out;
8493 if (ret > 0)
8494 btrfs_release_path(tree_root, path);
8495 if (ret == 0) {
8496 ret = btrfs_del_item(trans, tree_root, path);
8497 if (ret)
8498 goto out;
8499 btrfs_release_path(tree_root, path);
8500 }
8501
8502 spin_lock(&root->fs_info->block_group_cache_lock);
8503 rb_erase(&block_group->cache_node,
8504 &root->fs_info->block_group_cache_tree);
8505 spin_unlock(&root->fs_info->block_group_cache_lock);
8506
8507 down_write(&block_group->space_info->groups_sem);
8508 /*
8509 * we must use list_del_init so people can check to see if they
8510 * are still on the list after taking the semaphore
8511 */
8512 list_del_init(&block_group->list);
8513 up_write(&block_group->space_info->groups_sem);
8514
8515 if (block_group->cached == BTRFS_CACHE_STARTED)
8516 wait_block_group_cache_done(block_group);
8517
8518 btrfs_remove_free_space_cache(block_group);
8519
8520 spin_lock(&block_group->space_info->lock);
8521 block_group->space_info->total_bytes -= block_group->key.offset;
8522 block_group->space_info->bytes_readonly -= block_group->key.offset;
8523 block_group->space_info->disk_total -= block_group->key.offset * factor;
8524 spin_unlock(&block_group->space_info->lock);
8525
8526 memcpy(&key, &block_group->key, sizeof(key));
8527
8528 btrfs_clear_space_info_full(root->fs_info);
8529
8530 btrfs_put_block_group(block_group);
8531 btrfs_put_block_group(block_group);
8532
8533 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
8534 if (ret > 0)
8535 ret = -EIO;
8536 if (ret < 0)
8537 goto out;
8538
8539 ret = btrfs_del_item(trans, root, path);
8540 out:
8541 btrfs_free_path(path);
8542 return ret;
8543 }