Btrfs: space leak tracepoints
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / fs / btrfs / extent-tree.c
CommitLineData
6cbd5570
CM
1/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
ec6b910f 18#include <linux/sched.h>
edbd8d4e 19#include <linux/pagemap.h>
ec44a35c 20#include <linux/writeback.h>
21af804c 21#include <linux/blkdev.h>
b7a9f29f 22#include <linux/sort.h>
4184ea7f 23#include <linux/rcupdate.h>
817d52f8 24#include <linux/kthread.h>
5a0e3ad6 25#include <linux/slab.h>
dff51cd1 26#include <linux/ratelimit.h>
4b4e25f2 27#include "compat.h"
74493f7a 28#include "hash.h"
fec577fb
CM
29#include "ctree.h"
30#include "disk-io.h"
31#include "print-tree.h"
e089f05c 32#include "transaction.h"
0b86a832 33#include "volumes.h"
925baedd 34#include "locking.h"
fa9c0d79 35#include "free-space-cache.h"
fec577fb 36
0e4f8f88
CM
37/* control flags for do_chunk_alloc's force field
38 * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
39 * if we really need one.
40 *
41 * CHUNK_ALLOC_FORCE means it must try to allocate one
42 *
43 * CHUNK_ALLOC_LIMITED means to only try and allocate one
44 * if we have very few chunks already allocated. This is
45 * used as part of the clustering code to help make sure
46 * we have a good pool of storage to cluster in, without
47 * filling the FS with empty chunks
48 *
49 */
50enum {
51 CHUNK_ALLOC_NO_FORCE = 0,
52 CHUNK_ALLOC_FORCE = 1,
53 CHUNK_ALLOC_LIMITED = 2,
54};
55
fb25e914
JB
56/*
57 * Control how reservations are dealt with.
58 *
59 * RESERVE_FREE - freeing a reservation.
60 * RESERVE_ALLOC - allocating space and we need to update bytes_may_use for
61 * ENOSPC accounting
62 * RESERVE_ALLOC_NO_ACCOUNT - allocating space and we should not update
63 * bytes_may_use as the ENOSPC accounting is done elsewhere
64 */
65enum {
66 RESERVE_FREE = 0,
67 RESERVE_ALLOC = 1,
68 RESERVE_ALLOC_NO_ACCOUNT = 2,
69};
70
f3465ca4
JB
71static int update_block_group(struct btrfs_trans_handle *trans,
72 struct btrfs_root *root,
f0486c68 73 u64 bytenr, u64 num_bytes, int alloc);
5d4f98a2
YZ
74static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
75 struct btrfs_root *root,
76 u64 bytenr, u64 num_bytes, u64 parent,
77 u64 root_objectid, u64 owner_objectid,
78 u64 owner_offset, int refs_to_drop,
79 struct btrfs_delayed_extent_op *extra_op);
80static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
81 struct extent_buffer *leaf,
82 struct btrfs_extent_item *ei);
83static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
84 struct btrfs_root *root,
85 u64 parent, u64 root_objectid,
86 u64 flags, u64 owner, u64 offset,
87 struct btrfs_key *ins, int ref_mod);
88static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
89 struct btrfs_root *root,
90 u64 parent, u64 root_objectid,
91 u64 flags, struct btrfs_disk_key *key,
92 int level, struct btrfs_key *ins);
6a63209f
JB
93static int do_chunk_alloc(struct btrfs_trans_handle *trans,
94 struct btrfs_root *extent_root, u64 alloc_bytes,
95 u64 flags, int force);
11833d66
YZ
96static int find_next_key(struct btrfs_path *path, int level,
97 struct btrfs_key *key);
9ed74f2d
JB
98static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
99 int dump_block_groups);
fb25e914
JB
100static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
101 u64 num_bytes, int reserve);
6a63209f 102
817d52f8
JB
103static noinline int
104block_group_cache_done(struct btrfs_block_group_cache *cache)
105{
106 smp_mb();
107 return cache->cached == BTRFS_CACHE_FINISHED;
108}
109
0f9dd46c
JB
110static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
111{
112 return (cache->flags & bits) == bits;
113}
114
62a45b60 115static void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
11dfe35a
JB
116{
117 atomic_inc(&cache->count);
118}
119
120void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
121{
f0486c68
YZ
122 if (atomic_dec_and_test(&cache->count)) {
123 WARN_ON(cache->pinned > 0);
124 WARN_ON(cache->reserved > 0);
34d52cb6 125 kfree(cache->free_space_ctl);
11dfe35a 126 kfree(cache);
f0486c68 127 }
11dfe35a
JB
128}
129
0f9dd46c
JB
130/*
131 * this adds the block group to the fs_info rb tree for the block group
132 * cache
133 */
b2950863 134static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
0f9dd46c
JB
135 struct btrfs_block_group_cache *block_group)
136{
137 struct rb_node **p;
138 struct rb_node *parent = NULL;
139 struct btrfs_block_group_cache *cache;
140
141 spin_lock(&info->block_group_cache_lock);
142 p = &info->block_group_cache_tree.rb_node;
143
144 while (*p) {
145 parent = *p;
146 cache = rb_entry(parent, struct btrfs_block_group_cache,
147 cache_node);
148 if (block_group->key.objectid < cache->key.objectid) {
149 p = &(*p)->rb_left;
150 } else if (block_group->key.objectid > cache->key.objectid) {
151 p = &(*p)->rb_right;
152 } else {
153 spin_unlock(&info->block_group_cache_lock);
154 return -EEXIST;
155 }
156 }
157
158 rb_link_node(&block_group->cache_node, parent, p);
159 rb_insert_color(&block_group->cache_node,
160 &info->block_group_cache_tree);
161 spin_unlock(&info->block_group_cache_lock);
162
163 return 0;
164}
165
166/*
167 * This will return the block group at or after bytenr if contains is 0, else
168 * it will return the block group that contains the bytenr
169 */
170static struct btrfs_block_group_cache *
171block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
172 int contains)
173{
174 struct btrfs_block_group_cache *cache, *ret = NULL;
175 struct rb_node *n;
176 u64 end, start;
177
178 spin_lock(&info->block_group_cache_lock);
179 n = info->block_group_cache_tree.rb_node;
180
181 while (n) {
182 cache = rb_entry(n, struct btrfs_block_group_cache,
183 cache_node);
184 end = cache->key.objectid + cache->key.offset - 1;
185 start = cache->key.objectid;
186
187 if (bytenr < start) {
188 if (!contains && (!ret || start < ret->key.objectid))
189 ret = cache;
190 n = n->rb_left;
191 } else if (bytenr > start) {
192 if (contains && bytenr <= end) {
193 ret = cache;
194 break;
195 }
196 n = n->rb_right;
197 } else {
198 ret = cache;
199 break;
200 }
201 }
d2fb3437 202 if (ret)
11dfe35a 203 btrfs_get_block_group(ret);
0f9dd46c
JB
204 spin_unlock(&info->block_group_cache_lock);
205
206 return ret;
207}
208
11833d66
YZ
209static int add_excluded_extent(struct btrfs_root *root,
210 u64 start, u64 num_bytes)
817d52f8 211{
11833d66
YZ
212 u64 end = start + num_bytes - 1;
213 set_extent_bits(&root->fs_info->freed_extents[0],
214 start, end, EXTENT_UPTODATE, GFP_NOFS);
215 set_extent_bits(&root->fs_info->freed_extents[1],
216 start, end, EXTENT_UPTODATE, GFP_NOFS);
217 return 0;
218}
817d52f8 219
11833d66
YZ
220static void free_excluded_extents(struct btrfs_root *root,
221 struct btrfs_block_group_cache *cache)
222{
223 u64 start, end;
817d52f8 224
11833d66
YZ
225 start = cache->key.objectid;
226 end = start + cache->key.offset - 1;
227
228 clear_extent_bits(&root->fs_info->freed_extents[0],
229 start, end, EXTENT_UPTODATE, GFP_NOFS);
230 clear_extent_bits(&root->fs_info->freed_extents[1],
231 start, end, EXTENT_UPTODATE, GFP_NOFS);
817d52f8
JB
232}
233
11833d66
YZ
234static int exclude_super_stripes(struct btrfs_root *root,
235 struct btrfs_block_group_cache *cache)
817d52f8 236{
817d52f8
JB
237 u64 bytenr;
238 u64 *logical;
239 int stripe_len;
240 int i, nr, ret;
241
06b2331f
YZ
242 if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
243 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
244 cache->bytes_super += stripe_len;
245 ret = add_excluded_extent(root, cache->key.objectid,
246 stripe_len);
247 BUG_ON(ret);
248 }
249
817d52f8
JB
250 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
251 bytenr = btrfs_sb_offset(i);
252 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
253 cache->key.objectid, bytenr,
254 0, &logical, &nr, &stripe_len);
255 BUG_ON(ret);
11833d66 256
817d52f8 257 while (nr--) {
1b2da372 258 cache->bytes_super += stripe_len;
11833d66
YZ
259 ret = add_excluded_extent(root, logical[nr],
260 stripe_len);
261 BUG_ON(ret);
817d52f8 262 }
11833d66 263
817d52f8
JB
264 kfree(logical);
265 }
817d52f8
JB
266 return 0;
267}
268
11833d66
YZ
269static struct btrfs_caching_control *
270get_caching_control(struct btrfs_block_group_cache *cache)
271{
272 struct btrfs_caching_control *ctl;
273
274 spin_lock(&cache->lock);
275 if (cache->cached != BTRFS_CACHE_STARTED) {
276 spin_unlock(&cache->lock);
277 return NULL;
278 }
279
dde5abee
JB
280 /* We're loading it the fast way, so we don't have a caching_ctl. */
281 if (!cache->caching_ctl) {
282 spin_unlock(&cache->lock);
11833d66
YZ
283 return NULL;
284 }
285
286 ctl = cache->caching_ctl;
287 atomic_inc(&ctl->count);
288 spin_unlock(&cache->lock);
289 return ctl;
290}
291
292static void put_caching_control(struct btrfs_caching_control *ctl)
293{
294 if (atomic_dec_and_test(&ctl->count))
295 kfree(ctl);
296}
297
0f9dd46c
JB
298/*
299 * this is only called by cache_block_group, since we could have freed extents
300 * we need to check the pinned_extents for any extents that can't be used yet
301 * since their free space will be released as soon as the transaction commits.
302 */
817d52f8 303static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
0f9dd46c
JB
304 struct btrfs_fs_info *info, u64 start, u64 end)
305{
817d52f8 306 u64 extent_start, extent_end, size, total_added = 0;
0f9dd46c
JB
307 int ret;
308
309 while (start < end) {
11833d66 310 ret = find_first_extent_bit(info->pinned_extents, start,
0f9dd46c 311 &extent_start, &extent_end,
11833d66 312 EXTENT_DIRTY | EXTENT_UPTODATE);
0f9dd46c
JB
313 if (ret)
314 break;
315
06b2331f 316 if (extent_start <= start) {
0f9dd46c
JB
317 start = extent_end + 1;
318 } else if (extent_start > start && extent_start < end) {
319 size = extent_start - start;
817d52f8 320 total_added += size;
ea6a478e
JB
321 ret = btrfs_add_free_space(block_group, start,
322 size);
0f9dd46c
JB
323 BUG_ON(ret);
324 start = extent_end + 1;
325 } else {
326 break;
327 }
328 }
329
330 if (start < end) {
331 size = end - start;
817d52f8 332 total_added += size;
ea6a478e 333 ret = btrfs_add_free_space(block_group, start, size);
0f9dd46c
JB
334 BUG_ON(ret);
335 }
336
817d52f8 337 return total_added;
0f9dd46c
JB
338}
339
bab39bf9 340static noinline void caching_thread(struct btrfs_work *work)
e37c9e69 341{
bab39bf9
JB
342 struct btrfs_block_group_cache *block_group;
343 struct btrfs_fs_info *fs_info;
344 struct btrfs_caching_control *caching_ctl;
345 struct btrfs_root *extent_root;
e37c9e69 346 struct btrfs_path *path;
5f39d397 347 struct extent_buffer *leaf;
11833d66 348 struct btrfs_key key;
817d52f8 349 u64 total_found = 0;
11833d66
YZ
350 u64 last = 0;
351 u32 nritems;
352 int ret = 0;
f510cfec 353
bab39bf9
JB
354 caching_ctl = container_of(work, struct btrfs_caching_control, work);
355 block_group = caching_ctl->block_group;
356 fs_info = block_group->fs_info;
357 extent_root = fs_info->extent_root;
358
e37c9e69
CM
359 path = btrfs_alloc_path();
360 if (!path)
bab39bf9 361 goto out;
7d7d6068 362
817d52f8 363 last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
11833d66 364
5cd57b2c 365 /*
817d52f8
JB
366 * We don't want to deadlock with somebody trying to allocate a new
367 * extent for the extent root while also trying to search the extent
368 * root to add free space. So we skip locking and search the commit
369 * root, since its read-only
5cd57b2c
CM
370 */
371 path->skip_locking = 1;
817d52f8 372 path->search_commit_root = 1;
026fd317 373 path->reada = 1;
817d52f8 374
e4404d6e 375 key.objectid = last;
e37c9e69 376 key.offset = 0;
11833d66 377 key.type = BTRFS_EXTENT_ITEM_KEY;
013f1b12 378again:
11833d66 379 mutex_lock(&caching_ctl->mutex);
013f1b12
CM
380 /* need to make sure the commit_root doesn't disappear */
381 down_read(&fs_info->extent_commit_sem);
382
11833d66 383 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
e37c9e69 384 if (ret < 0)
ef8bbdfe 385 goto err;
a512bbf8 386
11833d66
YZ
387 leaf = path->nodes[0];
388 nritems = btrfs_header_nritems(leaf);
389
d397712b 390 while (1) {
7841cb28 391 if (btrfs_fs_closing(fs_info) > 1) {
f25784b3 392 last = (u64)-1;
817d52f8 393 break;
f25784b3 394 }
817d52f8 395
11833d66
YZ
396 if (path->slots[0] < nritems) {
397 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
398 } else {
399 ret = find_next_key(path, 0, &key);
400 if (ret)
e37c9e69 401 break;
817d52f8 402
589d8ade
JB
403 if (need_resched() ||
404 btrfs_next_leaf(extent_root, path)) {
405 caching_ctl->progress = last;
ff5714cc 406 btrfs_release_path(path);
589d8ade
JB
407 up_read(&fs_info->extent_commit_sem);
408 mutex_unlock(&caching_ctl->mutex);
11833d66 409 cond_resched();
589d8ade
JB
410 goto again;
411 }
412 leaf = path->nodes[0];
413 nritems = btrfs_header_nritems(leaf);
414 continue;
11833d66 415 }
817d52f8 416
11833d66
YZ
417 if (key.objectid < block_group->key.objectid) {
418 path->slots[0]++;
817d52f8 419 continue;
e37c9e69 420 }
0f9dd46c 421
e37c9e69 422 if (key.objectid >= block_group->key.objectid +
0f9dd46c 423 block_group->key.offset)
e37c9e69 424 break;
7d7d6068 425
11833d66 426 if (key.type == BTRFS_EXTENT_ITEM_KEY) {
817d52f8
JB
427 total_found += add_new_free_space(block_group,
428 fs_info, last,
429 key.objectid);
7d7d6068 430 last = key.objectid + key.offset;
817d52f8 431
11833d66
YZ
432 if (total_found > (1024 * 1024 * 2)) {
433 total_found = 0;
434 wake_up(&caching_ctl->wait);
435 }
817d52f8 436 }
e37c9e69
CM
437 path->slots[0]++;
438 }
817d52f8 439 ret = 0;
e37c9e69 440
817d52f8
JB
441 total_found += add_new_free_space(block_group, fs_info, last,
442 block_group->key.objectid +
443 block_group->key.offset);
11833d66 444 caching_ctl->progress = (u64)-1;
817d52f8
JB
445
446 spin_lock(&block_group->lock);
11833d66 447 block_group->caching_ctl = NULL;
817d52f8
JB
448 block_group->cached = BTRFS_CACHE_FINISHED;
449 spin_unlock(&block_group->lock);
0f9dd46c 450
54aa1f4d 451err:
e37c9e69 452 btrfs_free_path(path);
276e680d 453 up_read(&fs_info->extent_commit_sem);
817d52f8 454
11833d66
YZ
455 free_excluded_extents(extent_root, block_group);
456
457 mutex_unlock(&caching_ctl->mutex);
bab39bf9 458out:
11833d66
YZ
459 wake_up(&caching_ctl->wait);
460
461 put_caching_control(caching_ctl);
11dfe35a 462 btrfs_put_block_group(block_group);
817d52f8
JB
463}
464
9d66e233
JB
465static int cache_block_group(struct btrfs_block_group_cache *cache,
466 struct btrfs_trans_handle *trans,
b8399dee 467 struct btrfs_root *root,
9d66e233 468 int load_cache_only)
817d52f8 469{
291c7d2f 470 DEFINE_WAIT(wait);
11833d66
YZ
471 struct btrfs_fs_info *fs_info = cache->fs_info;
472 struct btrfs_caching_control *caching_ctl;
817d52f8
JB
473 int ret = 0;
474
291c7d2f
JB
475 caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
476 BUG_ON(!caching_ctl);
477
478 INIT_LIST_HEAD(&caching_ctl->list);
479 mutex_init(&caching_ctl->mutex);
480 init_waitqueue_head(&caching_ctl->wait);
481 caching_ctl->block_group = cache;
482 caching_ctl->progress = cache->key.objectid;
483 atomic_set(&caching_ctl->count, 1);
484 caching_ctl->work.func = caching_thread;
485
486 spin_lock(&cache->lock);
487 /*
488 * This should be a rare occasion, but this could happen I think in the
489 * case where one thread starts to load the space cache info, and then
490 * some other thread starts a transaction commit which tries to do an
491 * allocation while the other thread is still loading the space cache
492 * info. The previous loop should have kept us from choosing this block
493 * group, but if we've moved to the state where we will wait on caching
494 * block groups we need to first check if we're doing a fast load here,
495 * so we can wait for it to finish, otherwise we could end up allocating
496 * from a block group who's cache gets evicted for one reason or
497 * another.
498 */
499 while (cache->cached == BTRFS_CACHE_FAST) {
500 struct btrfs_caching_control *ctl;
501
502 ctl = cache->caching_ctl;
503 atomic_inc(&ctl->count);
504 prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
505 spin_unlock(&cache->lock);
506
507 schedule();
508
509 finish_wait(&ctl->wait, &wait);
510 put_caching_control(ctl);
511 spin_lock(&cache->lock);
512 }
513
514 if (cache->cached != BTRFS_CACHE_NO) {
515 spin_unlock(&cache->lock);
516 kfree(caching_ctl);
11833d66 517 return 0;
291c7d2f
JB
518 }
519 WARN_ON(cache->caching_ctl);
520 cache->caching_ctl = caching_ctl;
521 cache->cached = BTRFS_CACHE_FAST;
522 spin_unlock(&cache->lock);
11833d66 523
9d66e233
JB
524 /*
525 * We can't do the read from on-disk cache during a commit since we need
b8399dee
JB
526 * to have the normal tree locking. Also if we are currently trying to
527 * allocate blocks for the tree root we can't do the fast caching since
528 * we likely hold important locks.
9d66e233 529 */
f7039b1d 530 if (trans && (!trans->transaction->in_commit) &&
73bc1876
JB
531 (root && root != root->fs_info->tree_root) &&
532 btrfs_test_opt(root, SPACE_CACHE)) {
9d66e233
JB
533 ret = load_free_space_cache(fs_info, cache);
534
535 spin_lock(&cache->lock);
536 if (ret == 1) {
291c7d2f 537 cache->caching_ctl = NULL;
9d66e233
JB
538 cache->cached = BTRFS_CACHE_FINISHED;
539 cache->last_byte_to_unpin = (u64)-1;
540 } else {
291c7d2f
JB
541 if (load_cache_only) {
542 cache->caching_ctl = NULL;
543 cache->cached = BTRFS_CACHE_NO;
544 } else {
545 cache->cached = BTRFS_CACHE_STARTED;
546 }
9d66e233
JB
547 }
548 spin_unlock(&cache->lock);
291c7d2f 549 wake_up(&caching_ctl->wait);
3c14874a 550 if (ret == 1) {
291c7d2f 551 put_caching_control(caching_ctl);
3c14874a 552 free_excluded_extents(fs_info->extent_root, cache);
9d66e233 553 return 0;
3c14874a 554 }
291c7d2f
JB
555 } else {
556 /*
557 * We are not going to do the fast caching, set cached to the
558 * appropriate value and wakeup any waiters.
559 */
560 spin_lock(&cache->lock);
561 if (load_cache_only) {
562 cache->caching_ctl = NULL;
563 cache->cached = BTRFS_CACHE_NO;
564 } else {
565 cache->cached = BTRFS_CACHE_STARTED;
566 }
567 spin_unlock(&cache->lock);
568 wake_up(&caching_ctl->wait);
9d66e233
JB
569 }
570
291c7d2f
JB
571 if (load_cache_only) {
572 put_caching_control(caching_ctl);
11833d66 573 return 0;
817d52f8 574 }
817d52f8 575
11833d66 576 down_write(&fs_info->extent_commit_sem);
291c7d2f 577 atomic_inc(&caching_ctl->count);
11833d66
YZ
578 list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
579 up_write(&fs_info->extent_commit_sem);
580
11dfe35a 581 btrfs_get_block_group(cache);
11833d66 582
bab39bf9 583 btrfs_queue_worker(&fs_info->caching_workers, &caching_ctl->work);
817d52f8 584
ef8bbdfe 585 return ret;
e37c9e69
CM
586}
587
0f9dd46c
JB
588/*
589 * return the block group that starts at or after bytenr
590 */
d397712b
CM
591static struct btrfs_block_group_cache *
592btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
0ef3e66b 593{
0f9dd46c 594 struct btrfs_block_group_cache *cache;
0ef3e66b 595
0f9dd46c 596 cache = block_group_cache_tree_search(info, bytenr, 0);
0ef3e66b 597
0f9dd46c 598 return cache;
0ef3e66b
CM
599}
600
0f9dd46c 601/*
9f55684c 602 * return the block group that contains the given bytenr
0f9dd46c 603 */
d397712b
CM
604struct btrfs_block_group_cache *btrfs_lookup_block_group(
605 struct btrfs_fs_info *info,
606 u64 bytenr)
be744175 607{
0f9dd46c 608 struct btrfs_block_group_cache *cache;
be744175 609
0f9dd46c 610 cache = block_group_cache_tree_search(info, bytenr, 1);
96b5179d 611
0f9dd46c 612 return cache;
be744175 613}
0b86a832 614
0f9dd46c
JB
615static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
616 u64 flags)
6324fbf3 617{
0f9dd46c 618 struct list_head *head = &info->space_info;
0f9dd46c 619 struct btrfs_space_info *found;
4184ea7f 620
52ba6929 621 flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
b742bb82 622
4184ea7f
CM
623 rcu_read_lock();
624 list_for_each_entry_rcu(found, head, list) {
67377734 625 if (found->flags & flags) {
4184ea7f 626 rcu_read_unlock();
0f9dd46c 627 return found;
4184ea7f 628 }
0f9dd46c 629 }
4184ea7f 630 rcu_read_unlock();
0f9dd46c 631 return NULL;
6324fbf3
CM
632}
633
4184ea7f
CM
634/*
635 * after adding space to the filesystem, we need to clear the full flags
636 * on all the space infos.
637 */
638void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
639{
640 struct list_head *head = &info->space_info;
641 struct btrfs_space_info *found;
642
643 rcu_read_lock();
644 list_for_each_entry_rcu(found, head, list)
645 found->full = 0;
646 rcu_read_unlock();
647}
648
80eb234a
JB
649static u64 div_factor(u64 num, int factor)
650{
651 if (factor == 10)
652 return num;
653 num *= factor;
654 do_div(num, 10);
655 return num;
656}
657
e5bc2458
CM
658static u64 div_factor_fine(u64 num, int factor)
659{
660 if (factor == 100)
661 return num;
662 num *= factor;
663 do_div(num, 100);
664 return num;
665}
666
d2fb3437
YZ
667u64 btrfs_find_block_group(struct btrfs_root *root,
668 u64 search_start, u64 search_hint, int owner)
cd1bc465 669{
96b5179d 670 struct btrfs_block_group_cache *cache;
cd1bc465 671 u64 used;
d2fb3437
YZ
672 u64 last = max(search_hint, search_start);
673 u64 group_start = 0;
31f3c99b 674 int full_search = 0;
d2fb3437 675 int factor = 9;
0ef3e66b 676 int wrapped = 0;
31f3c99b 677again:
e8569813
ZY
678 while (1) {
679 cache = btrfs_lookup_first_block_group(root->fs_info, last);
0f9dd46c
JB
680 if (!cache)
681 break;
96b5179d 682
c286ac48 683 spin_lock(&cache->lock);
96b5179d
CM
684 last = cache->key.objectid + cache->key.offset;
685 used = btrfs_block_group_used(&cache->item);
686
d2fb3437
YZ
687 if ((full_search || !cache->ro) &&
688 block_group_bits(cache, BTRFS_BLOCK_GROUP_METADATA)) {
e8569813 689 if (used + cache->pinned + cache->reserved <
d2fb3437
YZ
690 div_factor(cache->key.offset, factor)) {
691 group_start = cache->key.objectid;
c286ac48 692 spin_unlock(&cache->lock);
fa9c0d79 693 btrfs_put_block_group(cache);
8790d502
CM
694 goto found;
695 }
6324fbf3 696 }
c286ac48 697 spin_unlock(&cache->lock);
fa9c0d79 698 btrfs_put_block_group(cache);
de428b63 699 cond_resched();
cd1bc465 700 }
0ef3e66b
CM
701 if (!wrapped) {
702 last = search_start;
703 wrapped = 1;
704 goto again;
705 }
706 if (!full_search && factor < 10) {
be744175 707 last = search_start;
31f3c99b 708 full_search = 1;
0ef3e66b 709 factor = 10;
31f3c99b
CM
710 goto again;
711 }
be744175 712found:
d2fb3437 713 return group_start;
925baedd 714}
0f9dd46c 715
e02119d5 716/* simple helper to search for an existing extent at a given offset */
31840ae1 717int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
e02119d5
CM
718{
719 int ret;
720 struct btrfs_key key;
31840ae1 721 struct btrfs_path *path;
e02119d5 722
31840ae1 723 path = btrfs_alloc_path();
d8926bb3
MF
724 if (!path)
725 return -ENOMEM;
726
e02119d5
CM
727 key.objectid = start;
728 key.offset = len;
729 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
730 ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
731 0, 0);
31840ae1 732 btrfs_free_path(path);
7bb86316
CM
733 return ret;
734}
735
a22285a6
YZ
736/*
737 * helper function to lookup reference count and flags of extent.
738 *
739 * the head node for delayed ref is used to store the sum of all the
740 * reference count modifications queued up in the rbtree. the head
741 * node may also store the extent flags to set. This way you can check
742 * to see what the reference count and extent flags would be if all of
743 * the delayed refs are not processed.
744 */
745int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
746 struct btrfs_root *root, u64 bytenr,
747 u64 num_bytes, u64 *refs, u64 *flags)
748{
749 struct btrfs_delayed_ref_head *head;
750 struct btrfs_delayed_ref_root *delayed_refs;
751 struct btrfs_path *path;
752 struct btrfs_extent_item *ei;
753 struct extent_buffer *leaf;
754 struct btrfs_key key;
755 u32 item_size;
756 u64 num_refs;
757 u64 extent_flags;
758 int ret;
759
760 path = btrfs_alloc_path();
761 if (!path)
762 return -ENOMEM;
763
764 key.objectid = bytenr;
765 key.type = BTRFS_EXTENT_ITEM_KEY;
766 key.offset = num_bytes;
767 if (!trans) {
768 path->skip_locking = 1;
769 path->search_commit_root = 1;
770 }
771again:
772 ret = btrfs_search_slot(trans, root->fs_info->extent_root,
773 &key, path, 0, 0);
774 if (ret < 0)
775 goto out_free;
776
777 if (ret == 0) {
778 leaf = path->nodes[0];
779 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
780 if (item_size >= sizeof(*ei)) {
781 ei = btrfs_item_ptr(leaf, path->slots[0],
782 struct btrfs_extent_item);
783 num_refs = btrfs_extent_refs(leaf, ei);
784 extent_flags = btrfs_extent_flags(leaf, ei);
785 } else {
786#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
787 struct btrfs_extent_item_v0 *ei0;
788 BUG_ON(item_size != sizeof(*ei0));
789 ei0 = btrfs_item_ptr(leaf, path->slots[0],
790 struct btrfs_extent_item_v0);
791 num_refs = btrfs_extent_refs_v0(leaf, ei0);
792 /* FIXME: this isn't correct for data */
793 extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
794#else
795 BUG();
796#endif
797 }
798 BUG_ON(num_refs == 0);
799 } else {
800 num_refs = 0;
801 extent_flags = 0;
802 ret = 0;
803 }
804
805 if (!trans)
806 goto out;
807
808 delayed_refs = &trans->transaction->delayed_refs;
809 spin_lock(&delayed_refs->lock);
810 head = btrfs_find_delayed_ref_head(trans, bytenr);
811 if (head) {
812 if (!mutex_trylock(&head->mutex)) {
813 atomic_inc(&head->node.refs);
814 spin_unlock(&delayed_refs->lock);
815
b3b4aa74 816 btrfs_release_path(path);
a22285a6 817
8cc33e5c
DS
818 /*
819 * Mutex was contended, block until it's released and try
820 * again
821 */
a22285a6
YZ
822 mutex_lock(&head->mutex);
823 mutex_unlock(&head->mutex);
824 btrfs_put_delayed_ref(&head->node);
825 goto again;
826 }
827 if (head->extent_op && head->extent_op->update_flags)
828 extent_flags |= head->extent_op->flags_to_set;
829 else
830 BUG_ON(num_refs == 0);
831
832 num_refs += head->node.ref_mod;
833 mutex_unlock(&head->mutex);
834 }
835 spin_unlock(&delayed_refs->lock);
836out:
837 WARN_ON(num_refs == 0);
838 if (refs)
839 *refs = num_refs;
840 if (flags)
841 *flags = extent_flags;
842out_free:
843 btrfs_free_path(path);
844 return ret;
845}
846
d8d5f3e1
CM
847/*
848 * Back reference rules. Back refs have three main goals:
849 *
850 * 1) differentiate between all holders of references to an extent so that
851 * when a reference is dropped we can make sure it was a valid reference
852 * before freeing the extent.
853 *
854 * 2) Provide enough information to quickly find the holders of an extent
855 * if we notice a given block is corrupted or bad.
856 *
857 * 3) Make it easy to migrate blocks for FS shrinking or storage pool
858 * maintenance. This is actually the same as #2, but with a slightly
859 * different use case.
860 *
5d4f98a2
YZ
861 * There are two kinds of back refs. The implicit back refs is optimized
862 * for pointers in non-shared tree blocks. For a given pointer in a block,
863 * back refs of this kind provide information about the block's owner tree
864 * and the pointer's key. These information allow us to find the block by
865 * b-tree searching. The full back refs is for pointers in tree blocks not
866 * referenced by their owner trees. The location of tree block is recorded
867 * in the back refs. Actually the full back refs is generic, and can be
868 * used in all cases the implicit back refs is used. The major shortcoming
869 * of the full back refs is its overhead. Every time a tree block gets
870 * COWed, we have to update back refs entry for all pointers in it.
871 *
872 * For a newly allocated tree block, we use implicit back refs for
873 * pointers in it. This means most tree related operations only involve
874 * implicit back refs. For a tree block created in old transaction, the
875 * only way to drop a reference to it is COW it. So we can detect the
876 * event that tree block loses its owner tree's reference and do the
877 * back refs conversion.
878 *
879 * When a tree block is COW'd through a tree, there are four cases:
880 *
881 * The reference count of the block is one and the tree is the block's
882 * owner tree. Nothing to do in this case.
883 *
884 * The reference count of the block is one and the tree is not the
885 * block's owner tree. In this case, full back refs is used for pointers
886 * in the block. Remove these full back refs, add implicit back refs for
887 * every pointers in the new block.
888 *
889 * The reference count of the block is greater than one and the tree is
890 * the block's owner tree. In this case, implicit back refs is used for
891 * pointers in the block. Add full back refs for every pointers in the
892 * block, increase lower level extents' reference counts. The original
893 * implicit back refs are entailed to the new block.
894 *
895 * The reference count of the block is greater than one and the tree is
896 * not the block's owner tree. Add implicit back refs for every pointer in
897 * the new block, increase lower level extents' reference count.
898 *
899 * Back Reference Key composing:
900 *
901 * The key objectid corresponds to the first byte in the extent,
902 * The key type is used to differentiate between types of back refs.
903 * There are different meanings of the key offset for different types
904 * of back refs.
905 *
d8d5f3e1
CM
906 * File extents can be referenced by:
907 *
908 * - multiple snapshots, subvolumes, or different generations in one subvol
31840ae1 909 * - different files inside a single subvolume
d8d5f3e1
CM
910 * - different offsets inside a file (bookend extents in file.c)
911 *
5d4f98a2 912 * The extent ref structure for the implicit back refs has fields for:
d8d5f3e1
CM
913 *
914 * - Objectid of the subvolume root
d8d5f3e1 915 * - objectid of the file holding the reference
5d4f98a2
YZ
916 * - original offset in the file
917 * - how many bookend extents
d8d5f3e1 918 *
5d4f98a2
YZ
919 * The key offset for the implicit back refs is hash of the first
920 * three fields.
d8d5f3e1 921 *
5d4f98a2 922 * The extent ref structure for the full back refs has field for:
d8d5f3e1 923 *
5d4f98a2 924 * - number of pointers in the tree leaf
d8d5f3e1 925 *
5d4f98a2
YZ
926 * The key offset for the implicit back refs is the first byte of
927 * the tree leaf
d8d5f3e1 928 *
5d4f98a2
YZ
929 * When a file extent is allocated, The implicit back refs is used.
930 * the fields are filled in:
d8d5f3e1 931 *
5d4f98a2 932 * (root_key.objectid, inode objectid, offset in file, 1)
d8d5f3e1 933 *
5d4f98a2
YZ
934 * When a file extent is removed file truncation, we find the
935 * corresponding implicit back refs and check the following fields:
d8d5f3e1 936 *
5d4f98a2 937 * (btrfs_header_owner(leaf), inode objectid, offset in file)
d8d5f3e1 938 *
5d4f98a2 939 * Btree extents can be referenced by:
d8d5f3e1 940 *
5d4f98a2 941 * - Different subvolumes
d8d5f3e1 942 *
5d4f98a2
YZ
943 * Both the implicit back refs and the full back refs for tree blocks
944 * only consist of key. The key offset for the implicit back refs is
945 * objectid of block's owner tree. The key offset for the full back refs
946 * is the first byte of parent block.
d8d5f3e1 947 *
5d4f98a2
YZ
948 * When implicit back refs is used, information about the lowest key and
949 * level of the tree block are required. These information are stored in
950 * tree block info structure.
d8d5f3e1 951 */
31840ae1 952
5d4f98a2
YZ
953#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
954static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
955 struct btrfs_root *root,
956 struct btrfs_path *path,
957 u64 owner, u32 extra_size)
7bb86316 958{
5d4f98a2
YZ
959 struct btrfs_extent_item *item;
960 struct btrfs_extent_item_v0 *ei0;
961 struct btrfs_extent_ref_v0 *ref0;
962 struct btrfs_tree_block_info *bi;
963 struct extent_buffer *leaf;
7bb86316 964 struct btrfs_key key;
5d4f98a2
YZ
965 struct btrfs_key found_key;
966 u32 new_size = sizeof(*item);
967 u64 refs;
968 int ret;
969
970 leaf = path->nodes[0];
971 BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
972
973 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
974 ei0 = btrfs_item_ptr(leaf, path->slots[0],
975 struct btrfs_extent_item_v0);
976 refs = btrfs_extent_refs_v0(leaf, ei0);
977
978 if (owner == (u64)-1) {
979 while (1) {
980 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
981 ret = btrfs_next_leaf(root, path);
982 if (ret < 0)
983 return ret;
984 BUG_ON(ret > 0);
985 leaf = path->nodes[0];
986 }
987 btrfs_item_key_to_cpu(leaf, &found_key,
988 path->slots[0]);
989 BUG_ON(key.objectid != found_key.objectid);
990 if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
991 path->slots[0]++;
992 continue;
993 }
994 ref0 = btrfs_item_ptr(leaf, path->slots[0],
995 struct btrfs_extent_ref_v0);
996 owner = btrfs_ref_objectid_v0(leaf, ref0);
997 break;
998 }
999 }
b3b4aa74 1000 btrfs_release_path(path);
5d4f98a2
YZ
1001
1002 if (owner < BTRFS_FIRST_FREE_OBJECTID)
1003 new_size += sizeof(*bi);
1004
1005 new_size -= sizeof(*ei0);
1006 ret = btrfs_search_slot(trans, root, &key, path,
1007 new_size + extra_size, 1);
1008 if (ret < 0)
1009 return ret;
1010 BUG_ON(ret);
1011
1012 ret = btrfs_extend_item(trans, root, path, new_size);
5d4f98a2
YZ
1013
1014 leaf = path->nodes[0];
1015 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1016 btrfs_set_extent_refs(leaf, item, refs);
1017 /* FIXME: get real generation */
1018 btrfs_set_extent_generation(leaf, item, 0);
1019 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1020 btrfs_set_extent_flags(leaf, item,
1021 BTRFS_EXTENT_FLAG_TREE_BLOCK |
1022 BTRFS_BLOCK_FLAG_FULL_BACKREF);
1023 bi = (struct btrfs_tree_block_info *)(item + 1);
1024 /* FIXME: get first key of the block */
1025 memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
1026 btrfs_set_tree_block_level(leaf, bi, (int)owner);
1027 } else {
1028 btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
1029 }
1030 btrfs_mark_buffer_dirty(leaf);
1031 return 0;
1032}
1033#endif
1034
1035static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
1036{
1037 u32 high_crc = ~(u32)0;
1038 u32 low_crc = ~(u32)0;
1039 __le64 lenum;
1040
1041 lenum = cpu_to_le64(root_objectid);
163e783e 1042 high_crc = crc32c(high_crc, &lenum, sizeof(lenum));
5d4f98a2 1043 lenum = cpu_to_le64(owner);
163e783e 1044 low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
5d4f98a2 1045 lenum = cpu_to_le64(offset);
163e783e 1046 low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
5d4f98a2
YZ
1047
1048 return ((u64)high_crc << 31) ^ (u64)low_crc;
1049}
1050
1051static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
1052 struct btrfs_extent_data_ref *ref)
1053{
1054 return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
1055 btrfs_extent_data_ref_objectid(leaf, ref),
1056 btrfs_extent_data_ref_offset(leaf, ref));
1057}
1058
1059static int match_extent_data_ref(struct extent_buffer *leaf,
1060 struct btrfs_extent_data_ref *ref,
1061 u64 root_objectid, u64 owner, u64 offset)
1062{
1063 if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
1064 btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
1065 btrfs_extent_data_ref_offset(leaf, ref) != offset)
1066 return 0;
1067 return 1;
1068}
1069
1070static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
1071 struct btrfs_root *root,
1072 struct btrfs_path *path,
1073 u64 bytenr, u64 parent,
1074 u64 root_objectid,
1075 u64 owner, u64 offset)
1076{
1077 struct btrfs_key key;
1078 struct btrfs_extent_data_ref *ref;
31840ae1 1079 struct extent_buffer *leaf;
5d4f98a2 1080 u32 nritems;
74493f7a 1081 int ret;
5d4f98a2
YZ
1082 int recow;
1083 int err = -ENOENT;
74493f7a 1084
31840ae1 1085 key.objectid = bytenr;
5d4f98a2
YZ
1086 if (parent) {
1087 key.type = BTRFS_SHARED_DATA_REF_KEY;
1088 key.offset = parent;
1089 } else {
1090 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1091 key.offset = hash_extent_data_ref(root_objectid,
1092 owner, offset);
1093 }
1094again:
1095 recow = 0;
1096 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1097 if (ret < 0) {
1098 err = ret;
1099 goto fail;
1100 }
31840ae1 1101
5d4f98a2
YZ
1102 if (parent) {
1103 if (!ret)
1104 return 0;
1105#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1106 key.type = BTRFS_EXTENT_REF_V0_KEY;
b3b4aa74 1107 btrfs_release_path(path);
5d4f98a2
YZ
1108 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1109 if (ret < 0) {
1110 err = ret;
1111 goto fail;
1112 }
1113 if (!ret)
1114 return 0;
1115#endif
1116 goto fail;
31840ae1
ZY
1117 }
1118
1119 leaf = path->nodes[0];
5d4f98a2
YZ
1120 nritems = btrfs_header_nritems(leaf);
1121 while (1) {
1122 if (path->slots[0] >= nritems) {
1123 ret = btrfs_next_leaf(root, path);
1124 if (ret < 0)
1125 err = ret;
1126 if (ret)
1127 goto fail;
1128
1129 leaf = path->nodes[0];
1130 nritems = btrfs_header_nritems(leaf);
1131 recow = 1;
1132 }
1133
1134 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1135 if (key.objectid != bytenr ||
1136 key.type != BTRFS_EXTENT_DATA_REF_KEY)
1137 goto fail;
1138
1139 ref = btrfs_item_ptr(leaf, path->slots[0],
1140 struct btrfs_extent_data_ref);
1141
1142 if (match_extent_data_ref(leaf, ref, root_objectid,
1143 owner, offset)) {
1144 if (recow) {
b3b4aa74 1145 btrfs_release_path(path);
5d4f98a2
YZ
1146 goto again;
1147 }
1148 err = 0;
1149 break;
1150 }
1151 path->slots[0]++;
31840ae1 1152 }
5d4f98a2
YZ
1153fail:
1154 return err;
31840ae1
ZY
1155}
1156
5d4f98a2
YZ
1157static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
1158 struct btrfs_root *root,
1159 struct btrfs_path *path,
1160 u64 bytenr, u64 parent,
1161 u64 root_objectid, u64 owner,
1162 u64 offset, int refs_to_add)
31840ae1
ZY
1163{
1164 struct btrfs_key key;
1165 struct extent_buffer *leaf;
5d4f98a2 1166 u32 size;
31840ae1
ZY
1167 u32 num_refs;
1168 int ret;
74493f7a 1169
74493f7a 1170 key.objectid = bytenr;
5d4f98a2
YZ
1171 if (parent) {
1172 key.type = BTRFS_SHARED_DATA_REF_KEY;
1173 key.offset = parent;
1174 size = sizeof(struct btrfs_shared_data_ref);
1175 } else {
1176 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1177 key.offset = hash_extent_data_ref(root_objectid,
1178 owner, offset);
1179 size = sizeof(struct btrfs_extent_data_ref);
1180 }
74493f7a 1181
5d4f98a2
YZ
1182 ret = btrfs_insert_empty_item(trans, root, path, &key, size);
1183 if (ret && ret != -EEXIST)
1184 goto fail;
1185
1186 leaf = path->nodes[0];
1187 if (parent) {
1188 struct btrfs_shared_data_ref *ref;
31840ae1 1189 ref = btrfs_item_ptr(leaf, path->slots[0],
5d4f98a2
YZ
1190 struct btrfs_shared_data_ref);
1191 if (ret == 0) {
1192 btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
1193 } else {
1194 num_refs = btrfs_shared_data_ref_count(leaf, ref);
1195 num_refs += refs_to_add;
1196 btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
31840ae1 1197 }
5d4f98a2
YZ
1198 } else {
1199 struct btrfs_extent_data_ref *ref;
1200 while (ret == -EEXIST) {
1201 ref = btrfs_item_ptr(leaf, path->slots[0],
1202 struct btrfs_extent_data_ref);
1203 if (match_extent_data_ref(leaf, ref, root_objectid,
1204 owner, offset))
1205 break;
b3b4aa74 1206 btrfs_release_path(path);
5d4f98a2
YZ
1207 key.offset++;
1208 ret = btrfs_insert_empty_item(trans, root, path, &key,
1209 size);
1210 if (ret && ret != -EEXIST)
1211 goto fail;
31840ae1 1212
5d4f98a2
YZ
1213 leaf = path->nodes[0];
1214 }
1215 ref = btrfs_item_ptr(leaf, path->slots[0],
1216 struct btrfs_extent_data_ref);
1217 if (ret == 0) {
1218 btrfs_set_extent_data_ref_root(leaf, ref,
1219 root_objectid);
1220 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
1221 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
1222 btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
1223 } else {
1224 num_refs = btrfs_extent_data_ref_count(leaf, ref);
1225 num_refs += refs_to_add;
1226 btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
31840ae1 1227 }
31840ae1 1228 }
5d4f98a2
YZ
1229 btrfs_mark_buffer_dirty(leaf);
1230 ret = 0;
1231fail:
b3b4aa74 1232 btrfs_release_path(path);
7bb86316 1233 return ret;
74493f7a
CM
1234}
1235
5d4f98a2
YZ
1236static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
1237 struct btrfs_root *root,
1238 struct btrfs_path *path,
1239 int refs_to_drop)
31840ae1 1240{
5d4f98a2
YZ
1241 struct btrfs_key key;
1242 struct btrfs_extent_data_ref *ref1 = NULL;
1243 struct btrfs_shared_data_ref *ref2 = NULL;
31840ae1 1244 struct extent_buffer *leaf;
5d4f98a2 1245 u32 num_refs = 0;
31840ae1
ZY
1246 int ret = 0;
1247
1248 leaf = path->nodes[0];
5d4f98a2
YZ
1249 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1250
1251 if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1252 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1253 struct btrfs_extent_data_ref);
1254 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1255 } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1256 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1257 struct btrfs_shared_data_ref);
1258 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1259#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1260 } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1261 struct btrfs_extent_ref_v0 *ref0;
1262 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1263 struct btrfs_extent_ref_v0);
1264 num_refs = btrfs_ref_count_v0(leaf, ref0);
1265#endif
1266 } else {
1267 BUG();
1268 }
1269
56bec294
CM
1270 BUG_ON(num_refs < refs_to_drop);
1271 num_refs -= refs_to_drop;
5d4f98a2 1272
31840ae1
ZY
1273 if (num_refs == 0) {
1274 ret = btrfs_del_item(trans, root, path);
1275 } else {
5d4f98a2
YZ
1276 if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
1277 btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
1278 else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
1279 btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
1280#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1281 else {
1282 struct btrfs_extent_ref_v0 *ref0;
1283 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1284 struct btrfs_extent_ref_v0);
1285 btrfs_set_ref_count_v0(leaf, ref0, num_refs);
1286 }
1287#endif
31840ae1
ZY
1288 btrfs_mark_buffer_dirty(leaf);
1289 }
31840ae1
ZY
1290 return ret;
1291}
1292
5d4f98a2
YZ
1293static noinline u32 extent_data_ref_count(struct btrfs_root *root,
1294 struct btrfs_path *path,
1295 struct btrfs_extent_inline_ref *iref)
15916de8 1296{
5d4f98a2
YZ
1297 struct btrfs_key key;
1298 struct extent_buffer *leaf;
1299 struct btrfs_extent_data_ref *ref1;
1300 struct btrfs_shared_data_ref *ref2;
1301 u32 num_refs = 0;
1302
1303 leaf = path->nodes[0];
1304 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1305 if (iref) {
1306 if (btrfs_extent_inline_ref_type(leaf, iref) ==
1307 BTRFS_EXTENT_DATA_REF_KEY) {
1308 ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
1309 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1310 } else {
1311 ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
1312 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1313 }
1314 } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1315 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1316 struct btrfs_extent_data_ref);
1317 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1318 } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1319 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1320 struct btrfs_shared_data_ref);
1321 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1322#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1323 } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1324 struct btrfs_extent_ref_v0 *ref0;
1325 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1326 struct btrfs_extent_ref_v0);
1327 num_refs = btrfs_ref_count_v0(leaf, ref0);
4b4e25f2 1328#endif
5d4f98a2
YZ
1329 } else {
1330 WARN_ON(1);
1331 }
1332 return num_refs;
1333}
15916de8 1334
5d4f98a2
YZ
1335static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1336 struct btrfs_root *root,
1337 struct btrfs_path *path,
1338 u64 bytenr, u64 parent,
1339 u64 root_objectid)
1f3c79a2 1340{
5d4f98a2 1341 struct btrfs_key key;
1f3c79a2 1342 int ret;
1f3c79a2 1343
5d4f98a2
YZ
1344 key.objectid = bytenr;
1345 if (parent) {
1346 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1347 key.offset = parent;
1348 } else {
1349 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1350 key.offset = root_objectid;
1f3c79a2
LH
1351 }
1352
5d4f98a2
YZ
1353 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1354 if (ret > 0)
1355 ret = -ENOENT;
1356#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1357 if (ret == -ENOENT && parent) {
b3b4aa74 1358 btrfs_release_path(path);
5d4f98a2
YZ
1359 key.type = BTRFS_EXTENT_REF_V0_KEY;
1360 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1361 if (ret > 0)
1362 ret = -ENOENT;
1363 }
1f3c79a2 1364#endif
5d4f98a2 1365 return ret;
1f3c79a2
LH
1366}
1367
5d4f98a2
YZ
1368static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1369 struct btrfs_root *root,
1370 struct btrfs_path *path,
1371 u64 bytenr, u64 parent,
1372 u64 root_objectid)
31840ae1 1373{
5d4f98a2 1374 struct btrfs_key key;
31840ae1 1375 int ret;
31840ae1 1376
5d4f98a2
YZ
1377 key.objectid = bytenr;
1378 if (parent) {
1379 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1380 key.offset = parent;
1381 } else {
1382 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1383 key.offset = root_objectid;
1384 }
1385
1386 ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
b3b4aa74 1387 btrfs_release_path(path);
31840ae1
ZY
1388 return ret;
1389}
1390
5d4f98a2 1391static inline int extent_ref_type(u64 parent, u64 owner)
31840ae1 1392{
5d4f98a2
YZ
1393 int type;
1394 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1395 if (parent > 0)
1396 type = BTRFS_SHARED_BLOCK_REF_KEY;
1397 else
1398 type = BTRFS_TREE_BLOCK_REF_KEY;
1399 } else {
1400 if (parent > 0)
1401 type = BTRFS_SHARED_DATA_REF_KEY;
1402 else
1403 type = BTRFS_EXTENT_DATA_REF_KEY;
1404 }
1405 return type;
31840ae1 1406}
56bec294 1407
2c47e605
YZ
1408static int find_next_key(struct btrfs_path *path, int level,
1409 struct btrfs_key *key)
56bec294 1410
02217ed2 1411{
2c47e605 1412 for (; level < BTRFS_MAX_LEVEL; level++) {
5d4f98a2
YZ
1413 if (!path->nodes[level])
1414 break;
5d4f98a2
YZ
1415 if (path->slots[level] + 1 >=
1416 btrfs_header_nritems(path->nodes[level]))
1417 continue;
1418 if (level == 0)
1419 btrfs_item_key_to_cpu(path->nodes[level], key,
1420 path->slots[level] + 1);
1421 else
1422 btrfs_node_key_to_cpu(path->nodes[level], key,
1423 path->slots[level] + 1);
1424 return 0;
1425 }
1426 return 1;
1427}
037e6390 1428
5d4f98a2
YZ
1429/*
1430 * look for inline back ref. if back ref is found, *ref_ret is set
1431 * to the address of inline back ref, and 0 is returned.
1432 *
1433 * if back ref isn't found, *ref_ret is set to the address where it
1434 * should be inserted, and -ENOENT is returned.
1435 *
1436 * if insert is true and there are too many inline back refs, the path
1437 * points to the extent item, and -EAGAIN is returned.
1438 *
1439 * NOTE: inline back refs are ordered in the same way that back ref
1440 * items in the tree are ordered.
1441 */
1442static noinline_for_stack
1443int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1444 struct btrfs_root *root,
1445 struct btrfs_path *path,
1446 struct btrfs_extent_inline_ref **ref_ret,
1447 u64 bytenr, u64 num_bytes,
1448 u64 parent, u64 root_objectid,
1449 u64 owner, u64 offset, int insert)
1450{
1451 struct btrfs_key key;
1452 struct extent_buffer *leaf;
1453 struct btrfs_extent_item *ei;
1454 struct btrfs_extent_inline_ref *iref;
1455 u64 flags;
1456 u64 item_size;
1457 unsigned long ptr;
1458 unsigned long end;
1459 int extra_size;
1460 int type;
1461 int want;
1462 int ret;
1463 int err = 0;
26b8003f 1464
db94535d 1465 key.objectid = bytenr;
31840ae1 1466 key.type = BTRFS_EXTENT_ITEM_KEY;
56bec294 1467 key.offset = num_bytes;
31840ae1 1468
5d4f98a2
YZ
1469 want = extent_ref_type(parent, owner);
1470 if (insert) {
1471 extra_size = btrfs_extent_inline_ref_size(want);
85d4198e 1472 path->keep_locks = 1;
5d4f98a2
YZ
1473 } else
1474 extra_size = -1;
1475 ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
b9473439 1476 if (ret < 0) {
5d4f98a2
YZ
1477 err = ret;
1478 goto out;
1479 }
1480 BUG_ON(ret);
1481
1482 leaf = path->nodes[0];
1483 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1484#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1485 if (item_size < sizeof(*ei)) {
1486 if (!insert) {
1487 err = -ENOENT;
1488 goto out;
1489 }
1490 ret = convert_extent_item_v0(trans, root, path, owner,
1491 extra_size);
1492 if (ret < 0) {
1493 err = ret;
1494 goto out;
1495 }
1496 leaf = path->nodes[0];
1497 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1498 }
1499#endif
1500 BUG_ON(item_size < sizeof(*ei));
1501
5d4f98a2
YZ
1502 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1503 flags = btrfs_extent_flags(leaf, ei);
1504
1505 ptr = (unsigned long)(ei + 1);
1506 end = (unsigned long)ei + item_size;
1507
1508 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1509 ptr += sizeof(struct btrfs_tree_block_info);
1510 BUG_ON(ptr > end);
1511 } else {
1512 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
1513 }
1514
1515 err = -ENOENT;
1516 while (1) {
1517 if (ptr >= end) {
1518 WARN_ON(ptr > end);
1519 break;
1520 }
1521 iref = (struct btrfs_extent_inline_ref *)ptr;
1522 type = btrfs_extent_inline_ref_type(leaf, iref);
1523 if (want < type)
1524 break;
1525 if (want > type) {
1526 ptr += btrfs_extent_inline_ref_size(type);
1527 continue;
1528 }
1529
1530 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1531 struct btrfs_extent_data_ref *dref;
1532 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1533 if (match_extent_data_ref(leaf, dref, root_objectid,
1534 owner, offset)) {
1535 err = 0;
1536 break;
1537 }
1538 if (hash_extent_data_ref_item(leaf, dref) <
1539 hash_extent_data_ref(root_objectid, owner, offset))
1540 break;
1541 } else {
1542 u64 ref_offset;
1543 ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1544 if (parent > 0) {
1545 if (parent == ref_offset) {
1546 err = 0;
1547 break;
1548 }
1549 if (ref_offset < parent)
1550 break;
1551 } else {
1552 if (root_objectid == ref_offset) {
1553 err = 0;
1554 break;
1555 }
1556 if (ref_offset < root_objectid)
1557 break;
1558 }
1559 }
1560 ptr += btrfs_extent_inline_ref_size(type);
1561 }
1562 if (err == -ENOENT && insert) {
1563 if (item_size + extra_size >=
1564 BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1565 err = -EAGAIN;
1566 goto out;
1567 }
1568 /*
1569 * To add new inline back ref, we have to make sure
1570 * there is no corresponding back ref item.
1571 * For simplicity, we just do not add new inline back
1572 * ref if there is any kind of item for this block
1573 */
2c47e605
YZ
1574 if (find_next_key(path, 0, &key) == 0 &&
1575 key.objectid == bytenr &&
85d4198e 1576 key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
5d4f98a2
YZ
1577 err = -EAGAIN;
1578 goto out;
1579 }
1580 }
1581 *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1582out:
85d4198e 1583 if (insert) {
5d4f98a2
YZ
1584 path->keep_locks = 0;
1585 btrfs_unlock_up_safe(path, 1);
1586 }
1587 return err;
1588}
1589
1590/*
1591 * helper to add new inline back ref
1592 */
1593static noinline_for_stack
1594int setup_inline_extent_backref(struct btrfs_trans_handle *trans,
1595 struct btrfs_root *root,
1596 struct btrfs_path *path,
1597 struct btrfs_extent_inline_ref *iref,
1598 u64 parent, u64 root_objectid,
1599 u64 owner, u64 offset, int refs_to_add,
1600 struct btrfs_delayed_extent_op *extent_op)
1601{
1602 struct extent_buffer *leaf;
1603 struct btrfs_extent_item *ei;
1604 unsigned long ptr;
1605 unsigned long end;
1606 unsigned long item_offset;
1607 u64 refs;
1608 int size;
1609 int type;
1610 int ret;
1611
1612 leaf = path->nodes[0];
1613 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1614 item_offset = (unsigned long)iref - (unsigned long)ei;
1615
1616 type = extent_ref_type(parent, owner);
1617 size = btrfs_extent_inline_ref_size(type);
1618
1619 ret = btrfs_extend_item(trans, root, path, size);
5d4f98a2
YZ
1620
1621 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1622 refs = btrfs_extent_refs(leaf, ei);
1623 refs += refs_to_add;
1624 btrfs_set_extent_refs(leaf, ei, refs);
1625 if (extent_op)
1626 __run_delayed_extent_op(extent_op, leaf, ei);
1627
1628 ptr = (unsigned long)ei + item_offset;
1629 end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1630 if (ptr < end - size)
1631 memmove_extent_buffer(leaf, ptr + size, ptr,
1632 end - size - ptr);
1633
1634 iref = (struct btrfs_extent_inline_ref *)ptr;
1635 btrfs_set_extent_inline_ref_type(leaf, iref, type);
1636 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1637 struct btrfs_extent_data_ref *dref;
1638 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1639 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1640 btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1641 btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1642 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1643 } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1644 struct btrfs_shared_data_ref *sref;
1645 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1646 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1647 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1648 } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1649 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1650 } else {
1651 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1652 }
1653 btrfs_mark_buffer_dirty(leaf);
1654 return 0;
1655}
1656
1657static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1658 struct btrfs_root *root,
1659 struct btrfs_path *path,
1660 struct btrfs_extent_inline_ref **ref_ret,
1661 u64 bytenr, u64 num_bytes, u64 parent,
1662 u64 root_objectid, u64 owner, u64 offset)
1663{
1664 int ret;
1665
1666 ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
1667 bytenr, num_bytes, parent,
1668 root_objectid, owner, offset, 0);
1669 if (ret != -ENOENT)
54aa1f4d 1670 return ret;
5d4f98a2 1671
b3b4aa74 1672 btrfs_release_path(path);
5d4f98a2
YZ
1673 *ref_ret = NULL;
1674
1675 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1676 ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
1677 root_objectid);
1678 } else {
1679 ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
1680 root_objectid, owner, offset);
b9473439 1681 }
5d4f98a2
YZ
1682 return ret;
1683}
31840ae1 1684
5d4f98a2
YZ
1685/*
1686 * helper to update/remove inline back ref
1687 */
1688static noinline_for_stack
1689int update_inline_extent_backref(struct btrfs_trans_handle *trans,
1690 struct btrfs_root *root,
1691 struct btrfs_path *path,
1692 struct btrfs_extent_inline_ref *iref,
1693 int refs_to_mod,
1694 struct btrfs_delayed_extent_op *extent_op)
1695{
1696 struct extent_buffer *leaf;
1697 struct btrfs_extent_item *ei;
1698 struct btrfs_extent_data_ref *dref = NULL;
1699 struct btrfs_shared_data_ref *sref = NULL;
1700 unsigned long ptr;
1701 unsigned long end;
1702 u32 item_size;
1703 int size;
1704 int type;
1705 int ret;
1706 u64 refs;
1707
1708 leaf = path->nodes[0];
1709 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1710 refs = btrfs_extent_refs(leaf, ei);
1711 WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1712 refs += refs_to_mod;
1713 btrfs_set_extent_refs(leaf, ei, refs);
1714 if (extent_op)
1715 __run_delayed_extent_op(extent_op, leaf, ei);
1716
1717 type = btrfs_extent_inline_ref_type(leaf, iref);
1718
1719 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1720 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1721 refs = btrfs_extent_data_ref_count(leaf, dref);
1722 } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1723 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1724 refs = btrfs_shared_data_ref_count(leaf, sref);
1725 } else {
1726 refs = 1;
1727 BUG_ON(refs_to_mod != -1);
56bec294 1728 }
31840ae1 1729
5d4f98a2
YZ
1730 BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1731 refs += refs_to_mod;
1732
1733 if (refs > 0) {
1734 if (type == BTRFS_EXTENT_DATA_REF_KEY)
1735 btrfs_set_extent_data_ref_count(leaf, dref, refs);
1736 else
1737 btrfs_set_shared_data_ref_count(leaf, sref, refs);
1738 } else {
1739 size = btrfs_extent_inline_ref_size(type);
1740 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1741 ptr = (unsigned long)iref;
1742 end = (unsigned long)ei + item_size;
1743 if (ptr + size < end)
1744 memmove_extent_buffer(leaf, ptr, ptr + size,
1745 end - ptr - size);
1746 item_size -= size;
1747 ret = btrfs_truncate_item(trans, root, path, item_size, 1);
5d4f98a2
YZ
1748 }
1749 btrfs_mark_buffer_dirty(leaf);
1750 return 0;
1751}
1752
1753static noinline_for_stack
1754int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1755 struct btrfs_root *root,
1756 struct btrfs_path *path,
1757 u64 bytenr, u64 num_bytes, u64 parent,
1758 u64 root_objectid, u64 owner,
1759 u64 offset, int refs_to_add,
1760 struct btrfs_delayed_extent_op *extent_op)
1761{
1762 struct btrfs_extent_inline_ref *iref;
1763 int ret;
1764
1765 ret = lookup_inline_extent_backref(trans, root, path, &iref,
1766 bytenr, num_bytes, parent,
1767 root_objectid, owner, offset, 1);
1768 if (ret == 0) {
1769 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1770 ret = update_inline_extent_backref(trans, root, path, iref,
1771 refs_to_add, extent_op);
1772 } else if (ret == -ENOENT) {
1773 ret = setup_inline_extent_backref(trans, root, path, iref,
1774 parent, root_objectid,
1775 owner, offset, refs_to_add,
1776 extent_op);
771ed689 1777 }
5d4f98a2
YZ
1778 return ret;
1779}
31840ae1 1780
5d4f98a2
YZ
1781static int insert_extent_backref(struct btrfs_trans_handle *trans,
1782 struct btrfs_root *root,
1783 struct btrfs_path *path,
1784 u64 bytenr, u64 parent, u64 root_objectid,
1785 u64 owner, u64 offset, int refs_to_add)
1786{
1787 int ret;
1788 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1789 BUG_ON(refs_to_add != 1);
1790 ret = insert_tree_block_ref(trans, root, path, bytenr,
1791 parent, root_objectid);
1792 } else {
1793 ret = insert_extent_data_ref(trans, root, path, bytenr,
1794 parent, root_objectid,
1795 owner, offset, refs_to_add);
1796 }
1797 return ret;
1798}
56bec294 1799
5d4f98a2
YZ
1800static int remove_extent_backref(struct btrfs_trans_handle *trans,
1801 struct btrfs_root *root,
1802 struct btrfs_path *path,
1803 struct btrfs_extent_inline_ref *iref,
1804 int refs_to_drop, int is_data)
1805{
1806 int ret;
b9473439 1807
5d4f98a2
YZ
1808 BUG_ON(!is_data && refs_to_drop != 1);
1809 if (iref) {
1810 ret = update_inline_extent_backref(trans, root, path, iref,
1811 -refs_to_drop, NULL);
1812 } else if (is_data) {
1813 ret = remove_extent_data_ref(trans, root, path, refs_to_drop);
1814 } else {
1815 ret = btrfs_del_item(trans, root, path);
1816 }
1817 return ret;
1818}
1819
5378e607 1820static int btrfs_issue_discard(struct block_device *bdev,
5d4f98a2
YZ
1821 u64 start, u64 len)
1822{
5378e607 1823 return blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_NOFS, 0);
5d4f98a2 1824}
5d4f98a2
YZ
1825
1826static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
5378e607 1827 u64 num_bytes, u64 *actual_bytes)
5d4f98a2 1828{
5d4f98a2 1829 int ret;
5378e607 1830 u64 discarded_bytes = 0;
a1d3c478 1831 struct btrfs_bio *bbio = NULL;
5d4f98a2 1832
e244a0ae 1833
5d4f98a2 1834 /* Tell the block device(s) that the sectors can be discarded */
5378e607 1835 ret = btrfs_map_block(&root->fs_info->mapping_tree, REQ_DISCARD,
a1d3c478 1836 bytenr, &num_bytes, &bbio, 0);
5d4f98a2 1837 if (!ret) {
a1d3c478 1838 struct btrfs_bio_stripe *stripe = bbio->stripes;
5d4f98a2
YZ
1839 int i;
1840
5d4f98a2 1841
a1d3c478 1842 for (i = 0; i < bbio->num_stripes; i++, stripe++) {
d5e2003c
JB
1843 if (!stripe->dev->can_discard)
1844 continue;
1845
5378e607
LD
1846 ret = btrfs_issue_discard(stripe->dev->bdev,
1847 stripe->physical,
1848 stripe->length);
1849 if (!ret)
1850 discarded_bytes += stripe->length;
1851 else if (ret != -EOPNOTSUPP)
1852 break;
d5e2003c
JB
1853
1854 /*
1855 * Just in case we get back EOPNOTSUPP for some reason,
1856 * just ignore the return value so we don't screw up
1857 * people calling discard_extent.
1858 */
1859 ret = 0;
5d4f98a2 1860 }
a1d3c478 1861 kfree(bbio);
5d4f98a2 1862 }
5378e607
LD
1863
1864 if (actual_bytes)
1865 *actual_bytes = discarded_bytes;
1866
5d4f98a2
YZ
1867
1868 return ret;
5d4f98a2
YZ
1869}
1870
1871int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1872 struct btrfs_root *root,
1873 u64 bytenr, u64 num_bytes, u64 parent,
66d7e7f0 1874 u64 root_objectid, u64 owner, u64 offset, int for_cow)
5d4f98a2
YZ
1875{
1876 int ret;
66d7e7f0
AJ
1877 struct btrfs_fs_info *fs_info = root->fs_info;
1878
5d4f98a2
YZ
1879 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
1880 root_objectid == BTRFS_TREE_LOG_OBJECTID);
1881
1882 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
66d7e7f0
AJ
1883 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
1884 num_bytes,
5d4f98a2 1885 parent, root_objectid, (int)owner,
66d7e7f0 1886 BTRFS_ADD_DELAYED_REF, NULL, for_cow);
5d4f98a2 1887 } else {
66d7e7f0
AJ
1888 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
1889 num_bytes,
5d4f98a2 1890 parent, root_objectid, owner, offset,
66d7e7f0 1891 BTRFS_ADD_DELAYED_REF, NULL, for_cow);
5d4f98a2
YZ
1892 }
1893 return ret;
1894}
1895
1896static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1897 struct btrfs_root *root,
1898 u64 bytenr, u64 num_bytes,
1899 u64 parent, u64 root_objectid,
1900 u64 owner, u64 offset, int refs_to_add,
1901 struct btrfs_delayed_extent_op *extent_op)
1902{
1903 struct btrfs_path *path;
1904 struct extent_buffer *leaf;
1905 struct btrfs_extent_item *item;
1906 u64 refs;
1907 int ret;
1908 int err = 0;
1909
1910 path = btrfs_alloc_path();
1911 if (!path)
1912 return -ENOMEM;
1913
1914 path->reada = 1;
1915 path->leave_spinning = 1;
1916 /* this will setup the path even if it fails to insert the back ref */
1917 ret = insert_inline_extent_backref(trans, root->fs_info->extent_root,
1918 path, bytenr, num_bytes, parent,
1919 root_objectid, owner, offset,
1920 refs_to_add, extent_op);
1921 if (ret == 0)
1922 goto out;
1923
1924 if (ret != -EAGAIN) {
1925 err = ret;
1926 goto out;
1927 }
1928
1929 leaf = path->nodes[0];
1930 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1931 refs = btrfs_extent_refs(leaf, item);
1932 btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
1933 if (extent_op)
1934 __run_delayed_extent_op(extent_op, leaf, item);
56bec294 1935
5d4f98a2 1936 btrfs_mark_buffer_dirty(leaf);
b3b4aa74 1937 btrfs_release_path(path);
56bec294
CM
1938
1939 path->reada = 1;
b9473439
CM
1940 path->leave_spinning = 1;
1941
56bec294
CM
1942 /* now insert the actual backref */
1943 ret = insert_extent_backref(trans, root->fs_info->extent_root,
5d4f98a2
YZ
1944 path, bytenr, parent, root_objectid,
1945 owner, offset, refs_to_add);
56bec294 1946 BUG_ON(ret);
5d4f98a2 1947out:
56bec294 1948 btrfs_free_path(path);
5d4f98a2 1949 return err;
56bec294
CM
1950}
1951
5d4f98a2
YZ
1952static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
1953 struct btrfs_root *root,
1954 struct btrfs_delayed_ref_node *node,
1955 struct btrfs_delayed_extent_op *extent_op,
1956 int insert_reserved)
56bec294 1957{
5d4f98a2
YZ
1958 int ret = 0;
1959 struct btrfs_delayed_data_ref *ref;
1960 struct btrfs_key ins;
1961 u64 parent = 0;
1962 u64 ref_root = 0;
1963 u64 flags = 0;
1964
1965 ins.objectid = node->bytenr;
1966 ins.offset = node->num_bytes;
1967 ins.type = BTRFS_EXTENT_ITEM_KEY;
1968
1969 ref = btrfs_delayed_node_to_data_ref(node);
1970 if (node->type == BTRFS_SHARED_DATA_REF_KEY)
1971 parent = ref->parent;
1972 else
1973 ref_root = ref->root;
1974
1975 if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
1976 if (extent_op) {
1977 BUG_ON(extent_op->update_key);
1978 flags |= extent_op->flags_to_set;
1979 }
1980 ret = alloc_reserved_file_extent(trans, root,
1981 parent, ref_root, flags,
1982 ref->objectid, ref->offset,
1983 &ins, node->ref_mod);
5d4f98a2
YZ
1984 } else if (node->action == BTRFS_ADD_DELAYED_REF) {
1985 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
1986 node->num_bytes, parent,
1987 ref_root, ref->objectid,
1988 ref->offset, node->ref_mod,
1989 extent_op);
1990 } else if (node->action == BTRFS_DROP_DELAYED_REF) {
1991 ret = __btrfs_free_extent(trans, root, node->bytenr,
1992 node->num_bytes, parent,
1993 ref_root, ref->objectid,
1994 ref->offset, node->ref_mod,
1995 extent_op);
1996 } else {
1997 BUG();
1998 }
1999 return ret;
2000}
2001
2002static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
2003 struct extent_buffer *leaf,
2004 struct btrfs_extent_item *ei)
2005{
2006 u64 flags = btrfs_extent_flags(leaf, ei);
2007 if (extent_op->update_flags) {
2008 flags |= extent_op->flags_to_set;
2009 btrfs_set_extent_flags(leaf, ei, flags);
2010 }
2011
2012 if (extent_op->update_key) {
2013 struct btrfs_tree_block_info *bi;
2014 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
2015 bi = (struct btrfs_tree_block_info *)(ei + 1);
2016 btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
2017 }
2018}
2019
2020static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
2021 struct btrfs_root *root,
2022 struct btrfs_delayed_ref_node *node,
2023 struct btrfs_delayed_extent_op *extent_op)
2024{
2025 struct btrfs_key key;
2026 struct btrfs_path *path;
2027 struct btrfs_extent_item *ei;
2028 struct extent_buffer *leaf;
2029 u32 item_size;
56bec294 2030 int ret;
5d4f98a2
YZ
2031 int err = 0;
2032
2033 path = btrfs_alloc_path();
2034 if (!path)
2035 return -ENOMEM;
2036
2037 key.objectid = node->bytenr;
2038 key.type = BTRFS_EXTENT_ITEM_KEY;
2039 key.offset = node->num_bytes;
2040
2041 path->reada = 1;
2042 path->leave_spinning = 1;
2043 ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
2044 path, 0, 1);
2045 if (ret < 0) {
2046 err = ret;
2047 goto out;
2048 }
2049 if (ret > 0) {
2050 err = -EIO;
2051 goto out;
2052 }
2053
2054 leaf = path->nodes[0];
2055 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2056#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2057 if (item_size < sizeof(*ei)) {
2058 ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
2059 path, (u64)-1, 0);
2060 if (ret < 0) {
2061 err = ret;
2062 goto out;
2063 }
2064 leaf = path->nodes[0];
2065 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2066 }
2067#endif
2068 BUG_ON(item_size < sizeof(*ei));
2069 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2070 __run_delayed_extent_op(extent_op, leaf, ei);
56bec294 2071
5d4f98a2
YZ
2072 btrfs_mark_buffer_dirty(leaf);
2073out:
2074 btrfs_free_path(path);
2075 return err;
56bec294
CM
2076}
2077
5d4f98a2
YZ
2078static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
2079 struct btrfs_root *root,
2080 struct btrfs_delayed_ref_node *node,
2081 struct btrfs_delayed_extent_op *extent_op,
2082 int insert_reserved)
56bec294
CM
2083{
2084 int ret = 0;
5d4f98a2
YZ
2085 struct btrfs_delayed_tree_ref *ref;
2086 struct btrfs_key ins;
2087 u64 parent = 0;
2088 u64 ref_root = 0;
56bec294 2089
5d4f98a2
YZ
2090 ins.objectid = node->bytenr;
2091 ins.offset = node->num_bytes;
2092 ins.type = BTRFS_EXTENT_ITEM_KEY;
56bec294 2093
5d4f98a2
YZ
2094 ref = btrfs_delayed_node_to_tree_ref(node);
2095 if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2096 parent = ref->parent;
2097 else
2098 ref_root = ref->root;
2099
2100 BUG_ON(node->ref_mod != 1);
2101 if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2102 BUG_ON(!extent_op || !extent_op->update_flags ||
2103 !extent_op->update_key);
2104 ret = alloc_reserved_tree_block(trans, root,
2105 parent, ref_root,
2106 extent_op->flags_to_set,
2107 &extent_op->key,
2108 ref->level, &ins);
5d4f98a2
YZ
2109 } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2110 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
2111 node->num_bytes, parent, ref_root,
2112 ref->level, 0, 1, extent_op);
2113 } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2114 ret = __btrfs_free_extent(trans, root, node->bytenr,
2115 node->num_bytes, parent, ref_root,
2116 ref->level, 0, 1, extent_op);
2117 } else {
2118 BUG();
2119 }
56bec294
CM
2120 return ret;
2121}
2122
2123/* helper function to actually process a single delayed ref entry */
5d4f98a2
YZ
2124static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
2125 struct btrfs_root *root,
2126 struct btrfs_delayed_ref_node *node,
2127 struct btrfs_delayed_extent_op *extent_op,
2128 int insert_reserved)
56bec294
CM
2129{
2130 int ret;
5d4f98a2 2131 if (btrfs_delayed_ref_is_head(node)) {
56bec294
CM
2132 struct btrfs_delayed_ref_head *head;
2133 /*
2134 * we've hit the end of the chain and we were supposed
2135 * to insert this extent into the tree. But, it got
2136 * deleted before we ever needed to insert it, so all
2137 * we have to do is clean up the accounting
2138 */
5d4f98a2
YZ
2139 BUG_ON(extent_op);
2140 head = btrfs_delayed_node_to_head(node);
56bec294 2141 if (insert_reserved) {
f0486c68
YZ
2142 btrfs_pin_extent(root, node->bytenr,
2143 node->num_bytes, 1);
5d4f98a2
YZ
2144 if (head->is_data) {
2145 ret = btrfs_del_csums(trans, root,
2146 node->bytenr,
2147 node->num_bytes);
2148 BUG_ON(ret);
2149 }
56bec294 2150 }
56bec294
CM
2151 mutex_unlock(&head->mutex);
2152 return 0;
2153 }
2154
5d4f98a2
YZ
2155 if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
2156 node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2157 ret = run_delayed_tree_ref(trans, root, node, extent_op,
2158 insert_reserved);
2159 else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
2160 node->type == BTRFS_SHARED_DATA_REF_KEY)
2161 ret = run_delayed_data_ref(trans, root, node, extent_op,
2162 insert_reserved);
2163 else
2164 BUG();
2165 return ret;
56bec294
CM
2166}
2167
2168static noinline struct btrfs_delayed_ref_node *
2169select_delayed_ref(struct btrfs_delayed_ref_head *head)
2170{
2171 struct rb_node *node;
2172 struct btrfs_delayed_ref_node *ref;
2173 int action = BTRFS_ADD_DELAYED_REF;
2174again:
2175 /*
2176 * select delayed ref of type BTRFS_ADD_DELAYED_REF first.
2177 * this prevents ref count from going down to zero when
2178 * there still are pending delayed ref.
2179 */
2180 node = rb_prev(&head->node.rb_node);
2181 while (1) {
2182 if (!node)
2183 break;
2184 ref = rb_entry(node, struct btrfs_delayed_ref_node,
2185 rb_node);
2186 if (ref->bytenr != head->node.bytenr)
2187 break;
5d4f98a2 2188 if (ref->action == action)
56bec294
CM
2189 return ref;
2190 node = rb_prev(node);
2191 }
2192 if (action == BTRFS_ADD_DELAYED_REF) {
2193 action = BTRFS_DROP_DELAYED_REF;
2194 goto again;
2195 }
2196 return NULL;
2197}
2198
c3e69d58
CM
2199static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
2200 struct btrfs_root *root,
2201 struct list_head *cluster)
56bec294 2202{
56bec294
CM
2203 struct btrfs_delayed_ref_root *delayed_refs;
2204 struct btrfs_delayed_ref_node *ref;
2205 struct btrfs_delayed_ref_head *locked_ref = NULL;
5d4f98a2 2206 struct btrfs_delayed_extent_op *extent_op;
56bec294 2207 int ret;
c3e69d58 2208 int count = 0;
56bec294 2209 int must_insert_reserved = 0;
56bec294
CM
2210
2211 delayed_refs = &trans->transaction->delayed_refs;
56bec294
CM
2212 while (1) {
2213 if (!locked_ref) {
c3e69d58
CM
2214 /* pick a new head ref from the cluster list */
2215 if (list_empty(cluster))
56bec294 2216 break;
56bec294 2217
c3e69d58
CM
2218 locked_ref = list_entry(cluster->next,
2219 struct btrfs_delayed_ref_head, cluster);
2220
2221 /* grab the lock that says we are going to process
2222 * all the refs for this head */
2223 ret = btrfs_delayed_ref_lock(trans, locked_ref);
2224
2225 /*
2226 * we may have dropped the spin lock to get the head
2227 * mutex lock, and that might have given someone else
2228 * time to free the head. If that's true, it has been
2229 * removed from our list and we can move on.
2230 */
2231 if (ret == -EAGAIN) {
2232 locked_ref = NULL;
2233 count++;
2234 continue;
56bec294
CM
2235 }
2236 }
a28ec197 2237
d1270cd9
AJ
2238 /*
2239 * locked_ref is the head node, so we have to go one
2240 * node back for any delayed ref updates
2241 */
2242 ref = select_delayed_ref(locked_ref);
2243
2244 if (ref && ref->seq &&
2245 btrfs_check_delayed_seq(delayed_refs, ref->seq)) {
2246 /*
2247 * there are still refs with lower seq numbers in the
2248 * process of being added. Don't run this ref yet.
2249 */
2250 list_del_init(&locked_ref->cluster);
2251 mutex_unlock(&locked_ref->mutex);
2252 locked_ref = NULL;
2253 delayed_refs->num_heads_ready++;
2254 spin_unlock(&delayed_refs->lock);
2255 cond_resched();
2256 spin_lock(&delayed_refs->lock);
2257 continue;
2258 }
2259
56bec294
CM
2260 /*
2261 * record the must insert reserved flag before we
2262 * drop the spin lock.
2263 */
2264 must_insert_reserved = locked_ref->must_insert_reserved;
2265 locked_ref->must_insert_reserved = 0;
7bb86316 2266
5d4f98a2
YZ
2267 extent_op = locked_ref->extent_op;
2268 locked_ref->extent_op = NULL;
2269
56bec294
CM
2270 if (!ref) {
2271 /* All delayed refs have been processed, Go ahead
2272 * and send the head node to run_one_delayed_ref,
2273 * so that any accounting fixes can happen
2274 */
2275 ref = &locked_ref->node;
5d4f98a2
YZ
2276
2277 if (extent_op && must_insert_reserved) {
2278 kfree(extent_op);
2279 extent_op = NULL;
2280 }
2281
2282 if (extent_op) {
2283 spin_unlock(&delayed_refs->lock);
2284
2285 ret = run_delayed_extent_op(trans, root,
2286 ref, extent_op);
2287 BUG_ON(ret);
2288 kfree(extent_op);
2289
203bf287 2290 goto next;
5d4f98a2
YZ
2291 }
2292
c3e69d58 2293 list_del_init(&locked_ref->cluster);
56bec294
CM
2294 locked_ref = NULL;
2295 }
02217ed2 2296
56bec294
CM
2297 ref->in_tree = 0;
2298 rb_erase(&ref->rb_node, &delayed_refs->root);
2299 delayed_refs->num_entries--;
a168650c
JS
2300 /*
2301 * we modified num_entries, but as we're currently running
2302 * delayed refs, skip
2303 * wake_up(&delayed_refs->seq_wait);
2304 * here.
2305 */
56bec294 2306 spin_unlock(&delayed_refs->lock);
925baedd 2307
5d4f98a2 2308 ret = run_one_delayed_ref(trans, root, ref, extent_op,
56bec294
CM
2309 must_insert_reserved);
2310 BUG_ON(ret);
eb099670 2311
5d4f98a2
YZ
2312 btrfs_put_delayed_ref(ref);
2313 kfree(extent_op);
c3e69d58 2314 count++;
203bf287
CM
2315next:
2316 do_chunk_alloc(trans, root->fs_info->extent_root,
2317 2 * 1024 * 1024,
2318 btrfs_get_alloc_profile(root, 0),
2319 CHUNK_ALLOC_NO_FORCE);
c3e69d58
CM
2320 cond_resched();
2321 spin_lock(&delayed_refs->lock);
2322 }
2323 return count;
2324}
2325
a168650c
JS
2326
2327static void wait_for_more_refs(struct btrfs_delayed_ref_root *delayed_refs,
2328 unsigned long num_refs)
2329{
2330 struct list_head *first_seq = delayed_refs->seq_head.next;
2331
2332 spin_unlock(&delayed_refs->lock);
2333 pr_debug("waiting for more refs (num %ld, first %p)\n",
2334 num_refs, first_seq);
2335 wait_event(delayed_refs->seq_wait,
2336 num_refs != delayed_refs->num_entries ||
2337 delayed_refs->seq_head.next != first_seq);
2338 pr_debug("done waiting for more refs (num %ld, first %p)\n",
2339 delayed_refs->num_entries, delayed_refs->seq_head.next);
2340 spin_lock(&delayed_refs->lock);
2341}
2342
c3e69d58
CM
2343/*
2344 * this starts processing the delayed reference count updates and
2345 * extent insertions we have queued up so far. count can be
2346 * 0, which means to process everything in the tree at the start
2347 * of the run (but not newly added entries), or it can be some target
2348 * number you'd like to process.
2349 */
2350int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2351 struct btrfs_root *root, unsigned long count)
2352{
2353 struct rb_node *node;
2354 struct btrfs_delayed_ref_root *delayed_refs;
2355 struct btrfs_delayed_ref_node *ref;
2356 struct list_head cluster;
2357 int ret;
a168650c 2358 u64 delayed_start;
c3e69d58
CM
2359 int run_all = count == (unsigned long)-1;
2360 int run_most = 0;
a168650c
JS
2361 unsigned long num_refs = 0;
2362 int consider_waiting;
c3e69d58
CM
2363
2364 if (root == root->fs_info->extent_root)
2365 root = root->fs_info->tree_root;
2366
203bf287
CM
2367 do_chunk_alloc(trans, root->fs_info->extent_root,
2368 2 * 1024 * 1024, btrfs_get_alloc_profile(root, 0),
2369 CHUNK_ALLOC_NO_FORCE);
2370
c3e69d58
CM
2371 delayed_refs = &trans->transaction->delayed_refs;
2372 INIT_LIST_HEAD(&cluster);
2373again:
a168650c 2374 consider_waiting = 0;
c3e69d58
CM
2375 spin_lock(&delayed_refs->lock);
2376 if (count == 0) {
2377 count = delayed_refs->num_entries * 2;
2378 run_most = 1;
2379 }
2380 while (1) {
2381 if (!(run_all || run_most) &&
2382 delayed_refs->num_heads_ready < 64)
2383 break;
eb099670 2384
56bec294 2385 /*
c3e69d58
CM
2386 * go find something we can process in the rbtree. We start at
2387 * the beginning of the tree, and then build a cluster
2388 * of refs to process starting at the first one we are able to
2389 * lock
56bec294 2390 */
a168650c 2391 delayed_start = delayed_refs->run_delayed_start;
c3e69d58
CM
2392 ret = btrfs_find_ref_cluster(trans, &cluster,
2393 delayed_refs->run_delayed_start);
2394 if (ret)
56bec294
CM
2395 break;
2396
a168650c
JS
2397 if (delayed_start >= delayed_refs->run_delayed_start) {
2398 if (consider_waiting == 0) {
2399 /*
2400 * btrfs_find_ref_cluster looped. let's do one
2401 * more cycle. if we don't run any delayed ref
2402 * during that cycle (because we can't because
2403 * all of them are blocked) and if the number of
2404 * refs doesn't change, we avoid busy waiting.
2405 */
2406 consider_waiting = 1;
2407 num_refs = delayed_refs->num_entries;
2408 } else {
2409 wait_for_more_refs(delayed_refs, num_refs);
2410 /*
2411 * after waiting, things have changed. we
2412 * dropped the lock and someone else might have
2413 * run some refs, built new clusters and so on.
2414 * therefore, we restart staleness detection.
2415 */
2416 consider_waiting = 0;
2417 }
2418 }
2419
c3e69d58
CM
2420 ret = run_clustered_refs(trans, root, &cluster);
2421 BUG_ON(ret < 0);
2422
2423 count -= min_t(unsigned long, ret, count);
2424
2425 if (count == 0)
2426 break;
a168650c
JS
2427
2428 if (ret || delayed_refs->run_delayed_start == 0) {
2429 /* refs were run, let's reset staleness detection */
2430 consider_waiting = 0;
2431 }
eb099670 2432 }
c3e69d58 2433
56bec294 2434 if (run_all) {
56bec294 2435 node = rb_first(&delayed_refs->root);
c3e69d58 2436 if (!node)
56bec294 2437 goto out;
c3e69d58 2438 count = (unsigned long)-1;
e9d0b13b 2439
56bec294
CM
2440 while (node) {
2441 ref = rb_entry(node, struct btrfs_delayed_ref_node,
2442 rb_node);
2443 if (btrfs_delayed_ref_is_head(ref)) {
2444 struct btrfs_delayed_ref_head *head;
5caf2a00 2445
56bec294
CM
2446 head = btrfs_delayed_node_to_head(ref);
2447 atomic_inc(&ref->refs);
2448
2449 spin_unlock(&delayed_refs->lock);
8cc33e5c
DS
2450 /*
2451 * Mutex was contended, block until it's
2452 * released and try again
2453 */
56bec294
CM
2454 mutex_lock(&head->mutex);
2455 mutex_unlock(&head->mutex);
2456
2457 btrfs_put_delayed_ref(ref);
1887be66 2458 cond_resched();
56bec294
CM
2459 goto again;
2460 }
2461 node = rb_next(node);
2462 }
2463 spin_unlock(&delayed_refs->lock);
56bec294
CM
2464 schedule_timeout(1);
2465 goto again;
5f39d397 2466 }
54aa1f4d 2467out:
c3e69d58 2468 spin_unlock(&delayed_refs->lock);
a28ec197
CM
2469 return 0;
2470}
2471
5d4f98a2
YZ
2472int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2473 struct btrfs_root *root,
2474 u64 bytenr, u64 num_bytes, u64 flags,
2475 int is_data)
2476{
2477 struct btrfs_delayed_extent_op *extent_op;
2478 int ret;
2479
2480 extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
2481 if (!extent_op)
2482 return -ENOMEM;
2483
2484 extent_op->flags_to_set = flags;
2485 extent_op->update_flags = 1;
2486 extent_op->update_key = 0;
2487 extent_op->is_data = is_data ? 1 : 0;
2488
66d7e7f0
AJ
2489 ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr,
2490 num_bytes, extent_op);
5d4f98a2
YZ
2491 if (ret)
2492 kfree(extent_op);
2493 return ret;
2494}
2495
2496static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
2497 struct btrfs_root *root,
2498 struct btrfs_path *path,
2499 u64 objectid, u64 offset, u64 bytenr)
2500{
2501 struct btrfs_delayed_ref_head *head;
2502 struct btrfs_delayed_ref_node *ref;
2503 struct btrfs_delayed_data_ref *data_ref;
2504 struct btrfs_delayed_ref_root *delayed_refs;
2505 struct rb_node *node;
2506 int ret = 0;
2507
2508 ret = -ENOENT;
2509 delayed_refs = &trans->transaction->delayed_refs;
2510 spin_lock(&delayed_refs->lock);
2511 head = btrfs_find_delayed_ref_head(trans, bytenr);
2512 if (!head)
2513 goto out;
2514
2515 if (!mutex_trylock(&head->mutex)) {
2516 atomic_inc(&head->node.refs);
2517 spin_unlock(&delayed_refs->lock);
2518
b3b4aa74 2519 btrfs_release_path(path);
5d4f98a2 2520
8cc33e5c
DS
2521 /*
2522 * Mutex was contended, block until it's released and let
2523 * caller try again
2524 */
5d4f98a2
YZ
2525 mutex_lock(&head->mutex);
2526 mutex_unlock(&head->mutex);
2527 btrfs_put_delayed_ref(&head->node);
2528 return -EAGAIN;
2529 }
2530
2531 node = rb_prev(&head->node.rb_node);
2532 if (!node)
2533 goto out_unlock;
2534
2535 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2536
2537 if (ref->bytenr != bytenr)
2538 goto out_unlock;
2539
2540 ret = 1;
2541 if (ref->type != BTRFS_EXTENT_DATA_REF_KEY)
2542 goto out_unlock;
2543
2544 data_ref = btrfs_delayed_node_to_data_ref(ref);
2545
2546 node = rb_prev(node);
2547 if (node) {
2548 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2549 if (ref->bytenr == bytenr)
2550 goto out_unlock;
2551 }
2552
2553 if (data_ref->root != root->root_key.objectid ||
2554 data_ref->objectid != objectid || data_ref->offset != offset)
2555 goto out_unlock;
2556
2557 ret = 0;
2558out_unlock:
2559 mutex_unlock(&head->mutex);
2560out:
2561 spin_unlock(&delayed_refs->lock);
2562 return ret;
2563}
2564
2565static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
2566 struct btrfs_root *root,
2567 struct btrfs_path *path,
2568 u64 objectid, u64 offset, u64 bytenr)
be20aa9d
CM
2569{
2570 struct btrfs_root *extent_root = root->fs_info->extent_root;
f321e491 2571 struct extent_buffer *leaf;
5d4f98a2
YZ
2572 struct btrfs_extent_data_ref *ref;
2573 struct btrfs_extent_inline_ref *iref;
2574 struct btrfs_extent_item *ei;
f321e491 2575 struct btrfs_key key;
5d4f98a2 2576 u32 item_size;
be20aa9d 2577 int ret;
925baedd 2578
be20aa9d 2579 key.objectid = bytenr;
31840ae1 2580 key.offset = (u64)-1;
f321e491 2581 key.type = BTRFS_EXTENT_ITEM_KEY;
be20aa9d 2582
be20aa9d
CM
2583 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2584 if (ret < 0)
2585 goto out;
2586 BUG_ON(ret == 0);
80ff3856
YZ
2587
2588 ret = -ENOENT;
2589 if (path->slots[0] == 0)
31840ae1 2590 goto out;
be20aa9d 2591
31840ae1 2592 path->slots[0]--;
f321e491 2593 leaf = path->nodes[0];
5d4f98a2 2594 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
be20aa9d 2595
5d4f98a2 2596 if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
be20aa9d 2597 goto out;
f321e491 2598
5d4f98a2
YZ
2599 ret = 1;
2600 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2601#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2602 if (item_size < sizeof(*ei)) {
2603 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
2604 goto out;
2605 }
2606#endif
2607 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
bd09835d 2608
5d4f98a2
YZ
2609 if (item_size != sizeof(*ei) +
2610 btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
2611 goto out;
be20aa9d 2612
5d4f98a2
YZ
2613 if (btrfs_extent_generation(leaf, ei) <=
2614 btrfs_root_last_snapshot(&root->root_item))
2615 goto out;
2616
2617 iref = (struct btrfs_extent_inline_ref *)(ei + 1);
2618 if (btrfs_extent_inline_ref_type(leaf, iref) !=
2619 BTRFS_EXTENT_DATA_REF_KEY)
2620 goto out;
2621
2622 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
2623 if (btrfs_extent_refs(leaf, ei) !=
2624 btrfs_extent_data_ref_count(leaf, ref) ||
2625 btrfs_extent_data_ref_root(leaf, ref) !=
2626 root->root_key.objectid ||
2627 btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
2628 btrfs_extent_data_ref_offset(leaf, ref) != offset)
2629 goto out;
2630
2631 ret = 0;
2632out:
2633 return ret;
2634}
2635
2636int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
2637 struct btrfs_root *root,
2638 u64 objectid, u64 offset, u64 bytenr)
2639{
2640 struct btrfs_path *path;
2641 int ret;
2642 int ret2;
2643
2644 path = btrfs_alloc_path();
2645 if (!path)
2646 return -ENOENT;
2647
2648 do {
2649 ret = check_committed_ref(trans, root, path, objectid,
2650 offset, bytenr);
2651 if (ret && ret != -ENOENT)
f321e491 2652 goto out;
80ff3856 2653
5d4f98a2
YZ
2654 ret2 = check_delayed_ref(trans, root, path, objectid,
2655 offset, bytenr);
2656 } while (ret2 == -EAGAIN);
2657
2658 if (ret2 && ret2 != -ENOENT) {
2659 ret = ret2;
2660 goto out;
f321e491 2661 }
5d4f98a2
YZ
2662
2663 if (ret != -ENOENT || ret2 != -ENOENT)
2664 ret = 0;
be20aa9d 2665out:
80ff3856 2666 btrfs_free_path(path);
f0486c68
YZ
2667 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
2668 WARN_ON(ret > 0);
f321e491 2669 return ret;
be20aa9d 2670}
c5739bba 2671
5d4f98a2 2672static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
b7a9f29f 2673 struct btrfs_root *root,
5d4f98a2 2674 struct extent_buffer *buf,
66d7e7f0 2675 int full_backref, int inc, int for_cow)
31840ae1
ZY
2676{
2677 u64 bytenr;
5d4f98a2
YZ
2678 u64 num_bytes;
2679 u64 parent;
31840ae1 2680 u64 ref_root;
31840ae1 2681 u32 nritems;
31840ae1
ZY
2682 struct btrfs_key key;
2683 struct btrfs_file_extent_item *fi;
2684 int i;
2685 int level;
2686 int ret = 0;
31840ae1 2687 int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
66d7e7f0 2688 u64, u64, u64, u64, u64, u64, int);
31840ae1
ZY
2689
2690 ref_root = btrfs_header_owner(buf);
31840ae1
ZY
2691 nritems = btrfs_header_nritems(buf);
2692 level = btrfs_header_level(buf);
2693
5d4f98a2
YZ
2694 if (!root->ref_cows && level == 0)
2695 return 0;
31840ae1 2696
5d4f98a2
YZ
2697 if (inc)
2698 process_func = btrfs_inc_extent_ref;
2699 else
2700 process_func = btrfs_free_extent;
31840ae1 2701
5d4f98a2
YZ
2702 if (full_backref)
2703 parent = buf->start;
2704 else
2705 parent = 0;
2706
2707 for (i = 0; i < nritems; i++) {
31840ae1 2708 if (level == 0) {
5d4f98a2 2709 btrfs_item_key_to_cpu(buf, &key, i);
31840ae1
ZY
2710 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
2711 continue;
5d4f98a2 2712 fi = btrfs_item_ptr(buf, i,
31840ae1
ZY
2713 struct btrfs_file_extent_item);
2714 if (btrfs_file_extent_type(buf, fi) ==
2715 BTRFS_FILE_EXTENT_INLINE)
2716 continue;
2717 bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
2718 if (bytenr == 0)
2719 continue;
5d4f98a2
YZ
2720
2721 num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
2722 key.offset -= btrfs_file_extent_offset(buf, fi);
2723 ret = process_func(trans, root, bytenr, num_bytes,
2724 parent, ref_root, key.objectid,
66d7e7f0 2725 key.offset, for_cow);
31840ae1
ZY
2726 if (ret)
2727 goto fail;
2728 } else {
5d4f98a2
YZ
2729 bytenr = btrfs_node_blockptr(buf, i);
2730 num_bytes = btrfs_level_size(root, level - 1);
2731 ret = process_func(trans, root, bytenr, num_bytes,
66d7e7f0
AJ
2732 parent, ref_root, level - 1, 0,
2733 for_cow);
31840ae1
ZY
2734 if (ret)
2735 goto fail;
2736 }
2737 }
2738 return 0;
2739fail:
5d4f98a2
YZ
2740 BUG();
2741 return ret;
2742}
2743
2744int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
66d7e7f0 2745 struct extent_buffer *buf, int full_backref, int for_cow)
5d4f98a2 2746{
66d7e7f0 2747 return __btrfs_mod_ref(trans, root, buf, full_backref, 1, for_cow);
5d4f98a2
YZ
2748}
2749
2750int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
66d7e7f0 2751 struct extent_buffer *buf, int full_backref, int for_cow)
5d4f98a2 2752{
66d7e7f0 2753 return __btrfs_mod_ref(trans, root, buf, full_backref, 0, for_cow);
31840ae1
ZY
2754}
2755
9078a3e1
CM
2756static int write_one_cache_group(struct btrfs_trans_handle *trans,
2757 struct btrfs_root *root,
2758 struct btrfs_path *path,
2759 struct btrfs_block_group_cache *cache)
2760{
2761 int ret;
9078a3e1 2762 struct btrfs_root *extent_root = root->fs_info->extent_root;
5f39d397
CM
2763 unsigned long bi;
2764 struct extent_buffer *leaf;
9078a3e1 2765
9078a3e1 2766 ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
54aa1f4d
CM
2767 if (ret < 0)
2768 goto fail;
9078a3e1 2769 BUG_ON(ret);
5f39d397
CM
2770
2771 leaf = path->nodes[0];
2772 bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
2773 write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
2774 btrfs_mark_buffer_dirty(leaf);
b3b4aa74 2775 btrfs_release_path(path);
54aa1f4d 2776fail:
9078a3e1
CM
2777 if (ret)
2778 return ret;
9078a3e1
CM
2779 return 0;
2780
2781}
2782
4a8c9a62
YZ
2783static struct btrfs_block_group_cache *
2784next_block_group(struct btrfs_root *root,
2785 struct btrfs_block_group_cache *cache)
2786{
2787 struct rb_node *node;
2788 spin_lock(&root->fs_info->block_group_cache_lock);
2789 node = rb_next(&cache->cache_node);
2790 btrfs_put_block_group(cache);
2791 if (node) {
2792 cache = rb_entry(node, struct btrfs_block_group_cache,
2793 cache_node);
11dfe35a 2794 btrfs_get_block_group(cache);
4a8c9a62
YZ
2795 } else
2796 cache = NULL;
2797 spin_unlock(&root->fs_info->block_group_cache_lock);
2798 return cache;
2799}
2800
0af3d00b
JB
2801static int cache_save_setup(struct btrfs_block_group_cache *block_group,
2802 struct btrfs_trans_handle *trans,
2803 struct btrfs_path *path)
2804{
2805 struct btrfs_root *root = block_group->fs_info->tree_root;
2806 struct inode *inode = NULL;
2807 u64 alloc_hint = 0;
2b20982e 2808 int dcs = BTRFS_DC_ERROR;
0af3d00b
JB
2809 int num_pages = 0;
2810 int retries = 0;
2811 int ret = 0;
2812
2813 /*
2814 * If this block group is smaller than 100 megs don't bother caching the
2815 * block group.
2816 */
2817 if (block_group->key.offset < (100 * 1024 * 1024)) {
2818 spin_lock(&block_group->lock);
2819 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
2820 spin_unlock(&block_group->lock);
2821 return 0;
2822 }
2823
2824again:
2825 inode = lookup_free_space_inode(root, block_group, path);
2826 if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
2827 ret = PTR_ERR(inode);
b3b4aa74 2828 btrfs_release_path(path);
0af3d00b
JB
2829 goto out;
2830 }
2831
2832 if (IS_ERR(inode)) {
2833 BUG_ON(retries);
2834 retries++;
2835
2836 if (block_group->ro)
2837 goto out_free;
2838
2839 ret = create_free_space_inode(root, trans, block_group, path);
2840 if (ret)
2841 goto out_free;
2842 goto again;
2843 }
2844
5b0e95bf
JB
2845 /* We've already setup this transaction, go ahead and exit */
2846 if (block_group->cache_generation == trans->transid &&
2847 i_size_read(inode)) {
2848 dcs = BTRFS_DC_SETUP;
2849 goto out_put;
2850 }
2851
0af3d00b
JB
2852 /*
2853 * We want to set the generation to 0, that way if anything goes wrong
2854 * from here on out we know not to trust this cache when we load up next
2855 * time.
2856 */
2857 BTRFS_I(inode)->generation = 0;
2858 ret = btrfs_update_inode(trans, root, inode);
2859 WARN_ON(ret);
2860
2861 if (i_size_read(inode) > 0) {
2862 ret = btrfs_truncate_free_space_cache(root, trans, path,
2863 inode);
2864 if (ret)
2865 goto out_put;
2866 }
2867
2868 spin_lock(&block_group->lock);
2869 if (block_group->cached != BTRFS_CACHE_FINISHED) {
2b20982e
JB
2870 /* We're not cached, don't bother trying to write stuff out */
2871 dcs = BTRFS_DC_WRITTEN;
0af3d00b
JB
2872 spin_unlock(&block_group->lock);
2873 goto out_put;
2874 }
2875 spin_unlock(&block_group->lock);
2876
2877 num_pages = (int)div64_u64(block_group->key.offset, 1024 * 1024 * 1024);
2878 if (!num_pages)
2879 num_pages = 1;
2880
2881 /*
2882 * Just to make absolutely sure we have enough space, we're going to
2883 * preallocate 12 pages worth of space for each block group. In
2884 * practice we ought to use at most 8, but we need extra space so we can
2885 * add our header and have a terminator between the extents and the
2886 * bitmaps.
2887 */
2888 num_pages *= 16;
2889 num_pages *= PAGE_CACHE_SIZE;
2890
2891 ret = btrfs_check_data_free_space(inode, num_pages);
2892 if (ret)
2893 goto out_put;
2894
2895 ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
2896 num_pages, num_pages,
2897 &alloc_hint);
2b20982e
JB
2898 if (!ret)
2899 dcs = BTRFS_DC_SETUP;
0af3d00b 2900 btrfs_free_reserved_data_space(inode, num_pages);
c09544e0 2901
0af3d00b
JB
2902out_put:
2903 iput(inode);
2904out_free:
b3b4aa74 2905 btrfs_release_path(path);
0af3d00b
JB
2906out:
2907 spin_lock(&block_group->lock);
e65cbb94 2908 if (!ret && dcs == BTRFS_DC_SETUP)
5b0e95bf 2909 block_group->cache_generation = trans->transid;
2b20982e 2910 block_group->disk_cache_state = dcs;
0af3d00b
JB
2911 spin_unlock(&block_group->lock);
2912
2913 return ret;
2914}
2915
96b5179d
CM
2916int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
2917 struct btrfs_root *root)
9078a3e1 2918{
4a8c9a62 2919 struct btrfs_block_group_cache *cache;
9078a3e1 2920 int err = 0;
9078a3e1 2921 struct btrfs_path *path;
96b5179d 2922 u64 last = 0;
9078a3e1
CM
2923
2924 path = btrfs_alloc_path();
2925 if (!path)
2926 return -ENOMEM;
2927
0af3d00b
JB
2928again:
2929 while (1) {
2930 cache = btrfs_lookup_first_block_group(root->fs_info, last);
2931 while (cache) {
2932 if (cache->disk_cache_state == BTRFS_DC_CLEAR)
2933 break;
2934 cache = next_block_group(root, cache);
2935 }
2936 if (!cache) {
2937 if (last == 0)
2938 break;
2939 last = 0;
2940 continue;
2941 }
2942 err = cache_save_setup(cache, trans, path);
2943 last = cache->key.objectid + cache->key.offset;
2944 btrfs_put_block_group(cache);
2945 }
2946
d397712b 2947 while (1) {
4a8c9a62
YZ
2948 if (last == 0) {
2949 err = btrfs_run_delayed_refs(trans, root,
2950 (unsigned long)-1);
2951 BUG_ON(err);
0f9dd46c 2952 }
54aa1f4d 2953
4a8c9a62
YZ
2954 cache = btrfs_lookup_first_block_group(root->fs_info, last);
2955 while (cache) {
0af3d00b
JB
2956 if (cache->disk_cache_state == BTRFS_DC_CLEAR) {
2957 btrfs_put_block_group(cache);
2958 goto again;
2959 }
2960
4a8c9a62
YZ
2961 if (cache->dirty)
2962 break;
2963 cache = next_block_group(root, cache);
2964 }
2965 if (!cache) {
2966 if (last == 0)
2967 break;
2968 last = 0;
2969 continue;
2970 }
0f9dd46c 2971
0cb59c99
JB
2972 if (cache->disk_cache_state == BTRFS_DC_SETUP)
2973 cache->disk_cache_state = BTRFS_DC_NEED_WRITE;
e8569813 2974 cache->dirty = 0;
4a8c9a62 2975 last = cache->key.objectid + cache->key.offset;
0f9dd46c 2976
4a8c9a62
YZ
2977 err = write_one_cache_group(trans, root, path, cache);
2978 BUG_ON(err);
2979 btrfs_put_block_group(cache);
9078a3e1 2980 }
4a8c9a62 2981
0cb59c99
JB
2982 while (1) {
2983 /*
2984 * I don't think this is needed since we're just marking our
2985 * preallocated extent as written, but just in case it can't
2986 * hurt.
2987 */
2988 if (last == 0) {
2989 err = btrfs_run_delayed_refs(trans, root,
2990 (unsigned long)-1);
2991 BUG_ON(err);
2992 }
2993
2994 cache = btrfs_lookup_first_block_group(root->fs_info, last);
2995 while (cache) {
2996 /*
2997 * Really this shouldn't happen, but it could if we
2998 * couldn't write the entire preallocated extent and
2999 * splitting the extent resulted in a new block.
3000 */
3001 if (cache->dirty) {
3002 btrfs_put_block_group(cache);
3003 goto again;
3004 }
3005 if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
3006 break;
3007 cache = next_block_group(root, cache);
3008 }
3009 if (!cache) {
3010 if (last == 0)
3011 break;
3012 last = 0;
3013 continue;
3014 }
3015
3016 btrfs_write_out_cache(root, trans, cache, path);
3017
3018 /*
3019 * If we didn't have an error then the cache state is still
3020 * NEED_WRITE, so we can set it to WRITTEN.
3021 */
3022 if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
3023 cache->disk_cache_state = BTRFS_DC_WRITTEN;
3024 last = cache->key.objectid + cache->key.offset;
3025 btrfs_put_block_group(cache);
3026 }
3027
9078a3e1 3028 btrfs_free_path(path);
4a8c9a62 3029 return 0;
9078a3e1
CM
3030}
3031
d2fb3437
YZ
3032int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
3033{
3034 struct btrfs_block_group_cache *block_group;
3035 int readonly = 0;
3036
3037 block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
3038 if (!block_group || block_group->ro)
3039 readonly = 1;
3040 if (block_group)
fa9c0d79 3041 btrfs_put_block_group(block_group);
d2fb3437
YZ
3042 return readonly;
3043}
3044
593060d7
CM
3045static int update_space_info(struct btrfs_fs_info *info, u64 flags,
3046 u64 total_bytes, u64 bytes_used,
3047 struct btrfs_space_info **space_info)
3048{
3049 struct btrfs_space_info *found;
b742bb82
YZ
3050 int i;
3051 int factor;
3052
3053 if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3054 BTRFS_BLOCK_GROUP_RAID10))
3055 factor = 2;
3056 else
3057 factor = 1;
593060d7
CM
3058
3059 found = __find_space_info(info, flags);
3060 if (found) {
25179201 3061 spin_lock(&found->lock);
593060d7 3062 found->total_bytes += total_bytes;
89a55897 3063 found->disk_total += total_bytes * factor;
593060d7 3064 found->bytes_used += bytes_used;
b742bb82 3065 found->disk_used += bytes_used * factor;
8f18cf13 3066 found->full = 0;
25179201 3067 spin_unlock(&found->lock);
593060d7
CM
3068 *space_info = found;
3069 return 0;
3070 }
c146afad 3071 found = kzalloc(sizeof(*found), GFP_NOFS);
593060d7
CM
3072 if (!found)
3073 return -ENOMEM;
3074
b742bb82
YZ
3075 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
3076 INIT_LIST_HEAD(&found->block_groups[i]);
80eb234a 3077 init_rwsem(&found->groups_sem);
0f9dd46c 3078 spin_lock_init(&found->lock);
52ba6929 3079 found->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
593060d7 3080 found->total_bytes = total_bytes;
89a55897 3081 found->disk_total = total_bytes * factor;
593060d7 3082 found->bytes_used = bytes_used;
b742bb82 3083 found->disk_used = bytes_used * factor;
593060d7 3084 found->bytes_pinned = 0;
e8569813 3085 found->bytes_reserved = 0;
c146afad 3086 found->bytes_readonly = 0;
f0486c68 3087 found->bytes_may_use = 0;
593060d7 3088 found->full = 0;
0e4f8f88 3089 found->force_alloc = CHUNK_ALLOC_NO_FORCE;
6d74119f 3090 found->chunk_alloc = 0;
fdb5effd
JB
3091 found->flush = 0;
3092 init_waitqueue_head(&found->wait);
593060d7 3093 *space_info = found;
4184ea7f 3094 list_add_rcu(&found->list, &info->space_info);
593060d7
CM
3095 return 0;
3096}
3097
8790d502
CM
3098static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
3099{
52ba6929 3100 u64 extra_flags = flags & BTRFS_BLOCK_GROUP_PROFILE_MASK;
a46d11a8
ID
3101
3102 /* chunk -> extended profile */
3103 if (extra_flags == 0)
3104 extra_flags = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
3105
3106 if (flags & BTRFS_BLOCK_GROUP_DATA)
3107 fs_info->avail_data_alloc_bits |= extra_flags;
3108 if (flags & BTRFS_BLOCK_GROUP_METADATA)
3109 fs_info->avail_metadata_alloc_bits |= extra_flags;
3110 if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3111 fs_info->avail_system_alloc_bits |= extra_flags;
8790d502 3112}
593060d7 3113
a46d11a8
ID
3114/*
3115 * @flags: available profiles in extended format (see ctree.h)
3116 *
e4d8ec0f
ID
3117 * Returns reduced profile in chunk format. If profile changing is in
3118 * progress (either running or paused) picks the target profile (if it's
3119 * already available), otherwise falls back to plain reducing.
a46d11a8 3120 */
2b82032c 3121u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
ec44a35c 3122{
cd02dca5
CM
3123 /*
3124 * we add in the count of missing devices because we want
3125 * to make sure that any RAID levels on a degraded FS
3126 * continue to be honored.
3127 */
3128 u64 num_devices = root->fs_info->fs_devices->rw_devices +
3129 root->fs_info->fs_devices->missing_devices;
a061fc8d 3130
e4d8ec0f
ID
3131 /* pick restriper's target profile if it's available */
3132 spin_lock(&root->fs_info->balance_lock);
3133 if (root->fs_info->balance_ctl) {
3134 struct btrfs_balance_control *bctl = root->fs_info->balance_ctl;
3135 u64 tgt = 0;
3136
3137 if ((flags & BTRFS_BLOCK_GROUP_DATA) &&
3138 (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3139 (flags & bctl->data.target)) {
3140 tgt = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
3141 } else if ((flags & BTRFS_BLOCK_GROUP_SYSTEM) &&
3142 (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3143 (flags & bctl->sys.target)) {
3144 tgt = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
3145 } else if ((flags & BTRFS_BLOCK_GROUP_METADATA) &&
3146 (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3147 (flags & bctl->meta.target)) {
3148 tgt = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
3149 }
3150
3151 if (tgt) {
3152 spin_unlock(&root->fs_info->balance_lock);
3153 flags = tgt;
3154 goto out;
3155 }
3156 }
3157 spin_unlock(&root->fs_info->balance_lock);
3158
a061fc8d
CM
3159 if (num_devices == 1)
3160 flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0);
3161 if (num_devices < 4)
3162 flags &= ~BTRFS_BLOCK_GROUP_RAID10;
3163
ec44a35c
CM
3164 if ((flags & BTRFS_BLOCK_GROUP_DUP) &&
3165 (flags & (BTRFS_BLOCK_GROUP_RAID1 |
a061fc8d 3166 BTRFS_BLOCK_GROUP_RAID10))) {
ec44a35c 3167 flags &= ~BTRFS_BLOCK_GROUP_DUP;
a061fc8d 3168 }
ec44a35c
CM
3169
3170 if ((flags & BTRFS_BLOCK_GROUP_RAID1) &&
a061fc8d 3171 (flags & BTRFS_BLOCK_GROUP_RAID10)) {
ec44a35c 3172 flags &= ~BTRFS_BLOCK_GROUP_RAID1;
a061fc8d 3173 }
ec44a35c
CM
3174
3175 if ((flags & BTRFS_BLOCK_GROUP_RAID0) &&
3176 ((flags & BTRFS_BLOCK_GROUP_RAID1) |
3177 (flags & BTRFS_BLOCK_GROUP_RAID10) |
a46d11a8 3178 (flags & BTRFS_BLOCK_GROUP_DUP))) {
ec44a35c 3179 flags &= ~BTRFS_BLOCK_GROUP_RAID0;
a46d11a8
ID
3180 }
3181
e4d8ec0f 3182out:
a46d11a8
ID
3183 /* extended -> chunk profile */
3184 flags &= ~BTRFS_AVAIL_ALLOC_BIT_SINGLE;
ec44a35c
CM
3185 return flags;
3186}
3187
b742bb82 3188static u64 get_alloc_profile(struct btrfs_root *root, u64 flags)
6a63209f 3189{
b742bb82 3190 if (flags & BTRFS_BLOCK_GROUP_DATA)
6fef8df1 3191 flags |= root->fs_info->avail_data_alloc_bits;
b742bb82 3192 else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
6fef8df1 3193 flags |= root->fs_info->avail_system_alloc_bits;
b742bb82 3194 else if (flags & BTRFS_BLOCK_GROUP_METADATA)
6fef8df1
ID
3195 flags |= root->fs_info->avail_metadata_alloc_bits;
3196
b742bb82 3197 return btrfs_reduce_alloc_profile(root, flags);
6a63209f
JB
3198}
3199
6d07bcec 3200u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
9ed74f2d 3201{
b742bb82 3202 u64 flags;
9ed74f2d 3203
b742bb82
YZ
3204 if (data)
3205 flags = BTRFS_BLOCK_GROUP_DATA;
3206 else if (root == root->fs_info->chunk_root)
3207 flags = BTRFS_BLOCK_GROUP_SYSTEM;
9ed74f2d 3208 else
b742bb82 3209 flags = BTRFS_BLOCK_GROUP_METADATA;
9ed74f2d 3210
b742bb82 3211 return get_alloc_profile(root, flags);
6a63209f 3212}
9ed74f2d 3213
6a63209f
JB
3214void btrfs_set_inode_space_info(struct btrfs_root *root, struct inode *inode)
3215{
6a63209f 3216 BTRFS_I(inode)->space_info = __find_space_info(root->fs_info,
f0486c68 3217 BTRFS_BLOCK_GROUP_DATA);
9ed74f2d
JB
3218}
3219
6a63209f 3220/*
6a63209f
JB
3221 * This will check the space that the inode allocates from to make sure we have
3222 * enough space for bytes.
6a63209f 3223 */
0ca1f7ce 3224int btrfs_check_data_free_space(struct inode *inode, u64 bytes)
6a63209f 3225{
6a63209f 3226 struct btrfs_space_info *data_sinfo;
0ca1f7ce 3227 struct btrfs_root *root = BTRFS_I(inode)->root;
ab6e2410 3228 u64 used;
0af3d00b 3229 int ret = 0, committed = 0, alloc_chunk = 1;
6a63209f 3230
6a63209f
JB
3231 /* make sure bytes are sectorsize aligned */
3232 bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
6a63209f 3233
82d5902d
LZ
3234 if (root == root->fs_info->tree_root ||
3235 BTRFS_I(inode)->location.objectid == BTRFS_FREE_INO_OBJECTID) {
0af3d00b
JB
3236 alloc_chunk = 0;
3237 committed = 1;
3238 }
3239
6a63209f 3240 data_sinfo = BTRFS_I(inode)->space_info;
33b4d47f
CM
3241 if (!data_sinfo)
3242 goto alloc;
9ed74f2d 3243
6a63209f
JB
3244again:
3245 /* make sure we have enough space to handle the data first */
3246 spin_lock(&data_sinfo->lock);
8929ecfa
YZ
3247 used = data_sinfo->bytes_used + data_sinfo->bytes_reserved +
3248 data_sinfo->bytes_pinned + data_sinfo->bytes_readonly +
3249 data_sinfo->bytes_may_use;
ab6e2410
JB
3250
3251 if (used + bytes > data_sinfo->total_bytes) {
4e06bdd6 3252 struct btrfs_trans_handle *trans;
9ed74f2d 3253
6a63209f
JB
3254 /*
3255 * if we don't have enough free bytes in this space then we need
3256 * to alloc a new chunk.
3257 */
0af3d00b 3258 if (!data_sinfo->full && alloc_chunk) {
6a63209f 3259 u64 alloc_target;
9ed74f2d 3260
0e4f8f88 3261 data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
6a63209f 3262 spin_unlock(&data_sinfo->lock);
33b4d47f 3263alloc:
6a63209f 3264 alloc_target = btrfs_get_alloc_profile(root, 1);
7a7eaa40 3265 trans = btrfs_join_transaction(root);
a22285a6
YZ
3266 if (IS_ERR(trans))
3267 return PTR_ERR(trans);
9ed74f2d 3268
6a63209f
JB
3269 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3270 bytes + 2 * 1024 * 1024,
0e4f8f88
CM
3271 alloc_target,
3272 CHUNK_ALLOC_NO_FORCE);
6a63209f 3273 btrfs_end_transaction(trans, root);
d52a5b5f
MX
3274 if (ret < 0) {
3275 if (ret != -ENOSPC)
3276 return ret;
3277 else
3278 goto commit_trans;
3279 }
9ed74f2d 3280
33b4d47f
CM
3281 if (!data_sinfo) {
3282 btrfs_set_inode_space_info(root, inode);
3283 data_sinfo = BTRFS_I(inode)->space_info;
3284 }
6a63209f
JB
3285 goto again;
3286 }
f2bb8f5c
JB
3287
3288 /*
3289 * If we have less pinned bytes than we want to allocate then
3290 * don't bother committing the transaction, it won't help us.
3291 */
3292 if (data_sinfo->bytes_pinned < bytes)
3293 committed = 1;
6a63209f 3294 spin_unlock(&data_sinfo->lock);
6a63209f 3295
4e06bdd6 3296 /* commit the current transaction and try again */
d52a5b5f 3297commit_trans:
a4abeea4
JB
3298 if (!committed &&
3299 !atomic_read(&root->fs_info->open_ioctl_trans)) {
4e06bdd6 3300 committed = 1;
7a7eaa40 3301 trans = btrfs_join_transaction(root);
a22285a6
YZ
3302 if (IS_ERR(trans))
3303 return PTR_ERR(trans);
4e06bdd6
JB
3304 ret = btrfs_commit_transaction(trans, root);
3305 if (ret)
3306 return ret;
3307 goto again;
3308 }
9ed74f2d 3309
6a63209f
JB
3310 return -ENOSPC;
3311 }
3312 data_sinfo->bytes_may_use += bytes;
8c2a3ca2
JB
3313 trace_btrfs_space_reservation(root->fs_info, "space_info",
3314 (u64)data_sinfo, bytes, 1);
6a63209f 3315 spin_unlock(&data_sinfo->lock);
6a63209f 3316
9ed74f2d 3317 return 0;
9ed74f2d 3318}
6a63209f 3319
6a63209f 3320/*
fb25e914 3321 * Called if we need to clear a data reservation for this inode.
6a63209f 3322 */
0ca1f7ce 3323void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
e3ccfa98 3324{
0ca1f7ce 3325 struct btrfs_root *root = BTRFS_I(inode)->root;
6a63209f 3326 struct btrfs_space_info *data_sinfo;
e3ccfa98 3327
6a63209f
JB
3328 /* make sure bytes are sectorsize aligned */
3329 bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
e3ccfa98 3330
6a63209f
JB
3331 data_sinfo = BTRFS_I(inode)->space_info;
3332 spin_lock(&data_sinfo->lock);
3333 data_sinfo->bytes_may_use -= bytes;
8c2a3ca2
JB
3334 trace_btrfs_space_reservation(root->fs_info, "space_info",
3335 (u64)data_sinfo, bytes, 0);
6a63209f 3336 spin_unlock(&data_sinfo->lock);
e3ccfa98
JB
3337}
3338
97e728d4 3339static void force_metadata_allocation(struct btrfs_fs_info *info)
e3ccfa98 3340{
97e728d4
JB
3341 struct list_head *head = &info->space_info;
3342 struct btrfs_space_info *found;
e3ccfa98 3343
97e728d4
JB
3344 rcu_read_lock();
3345 list_for_each_entry_rcu(found, head, list) {
3346 if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
0e4f8f88 3347 found->force_alloc = CHUNK_ALLOC_FORCE;
e3ccfa98 3348 }
97e728d4 3349 rcu_read_unlock();
e3ccfa98
JB
3350}
3351
e5bc2458 3352static int should_alloc_chunk(struct btrfs_root *root,
0e4f8f88
CM
3353 struct btrfs_space_info *sinfo, u64 alloc_bytes,
3354 int force)
32c00aff 3355{
fb25e914 3356 struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
424499db 3357 u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
0e4f8f88 3358 u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
e5bc2458 3359 u64 thresh;
e3ccfa98 3360
0e4f8f88
CM
3361 if (force == CHUNK_ALLOC_FORCE)
3362 return 1;
3363
fb25e914
JB
3364 /*
3365 * We need to take into account the global rsv because for all intents
3366 * and purposes it's used space. Don't worry about locking the
3367 * global_rsv, it doesn't change except when the transaction commits.
3368 */
3369 num_allocated += global_rsv->size;
3370
0e4f8f88
CM
3371 /*
3372 * in limited mode, we want to have some free space up to
3373 * about 1% of the FS size.
3374 */
3375 if (force == CHUNK_ALLOC_LIMITED) {
6c41761f 3376 thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
0e4f8f88
CM
3377 thresh = max_t(u64, 64 * 1024 * 1024,
3378 div_factor_fine(thresh, 1));
3379
3380 if (num_bytes - num_allocated < thresh)
3381 return 1;
3382 }
6c41761f 3383 thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
0e4f8f88 3384
cf1d72c9
CM
3385 /* 256MB or 2% of the FS */
3386 thresh = max_t(u64, 256 * 1024 * 1024, div_factor_fine(thresh, 2));
e5bc2458 3387
cf1d72c9 3388 if (num_bytes > thresh && sinfo->bytes_used < div_factor(num_bytes, 8))
14ed0ca6 3389 return 0;
424499db 3390 return 1;
32c00aff
JB
3391}
3392
6324fbf3
CM
3393static int do_chunk_alloc(struct btrfs_trans_handle *trans,
3394 struct btrfs_root *extent_root, u64 alloc_bytes,
0ef3e66b 3395 u64 flags, int force)
9ed74f2d 3396{
6324fbf3 3397 struct btrfs_space_info *space_info;
97e728d4 3398 struct btrfs_fs_info *fs_info = extent_root->fs_info;
6d74119f 3399 int wait_for_alloc = 0;
9ed74f2d 3400 int ret = 0;
9ed74f2d 3401
70922617 3402 BUG_ON(!profile_is_valid(flags, 0));
ec44a35c 3403
6324fbf3 3404 space_info = __find_space_info(extent_root->fs_info, flags);
593060d7
CM
3405 if (!space_info) {
3406 ret = update_space_info(extent_root->fs_info, flags,
3407 0, 0, &space_info);
3408 BUG_ON(ret);
9ed74f2d 3409 }
6324fbf3 3410 BUG_ON(!space_info);
9ed74f2d 3411
6d74119f 3412again:
25179201 3413 spin_lock(&space_info->lock);
9ed74f2d 3414 if (space_info->force_alloc)
0e4f8f88 3415 force = space_info->force_alloc;
25179201
JB
3416 if (space_info->full) {
3417 spin_unlock(&space_info->lock);
6d74119f 3418 return 0;
9ed74f2d
JB
3419 }
3420
0e4f8f88 3421 if (!should_alloc_chunk(extent_root, space_info, alloc_bytes, force)) {
25179201 3422 spin_unlock(&space_info->lock);
6d74119f
JB
3423 return 0;
3424 } else if (space_info->chunk_alloc) {
3425 wait_for_alloc = 1;
3426 } else {
3427 space_info->chunk_alloc = 1;
9ed74f2d 3428 }
0e4f8f88 3429
25179201 3430 spin_unlock(&space_info->lock);
9ed74f2d 3431
6d74119f
JB
3432 mutex_lock(&fs_info->chunk_mutex);
3433
3434 /*
3435 * The chunk_mutex is held throughout the entirety of a chunk
3436 * allocation, so once we've acquired the chunk_mutex we know that the
3437 * other guy is done and we need to recheck and see if we should
3438 * allocate.
3439 */
3440 if (wait_for_alloc) {
3441 mutex_unlock(&fs_info->chunk_mutex);
3442 wait_for_alloc = 0;
3443 goto again;
3444 }
3445
67377734
JB
3446 /*
3447 * If we have mixed data/metadata chunks we want to make sure we keep
3448 * allocating mixed chunks instead of individual chunks.
3449 */
3450 if (btrfs_mixed_space_info(space_info))
3451 flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
3452
97e728d4
JB
3453 /*
3454 * if we're doing a data chunk, go ahead and make sure that
3455 * we keep a reasonable number of metadata chunks allocated in the
3456 * FS as well.
3457 */
9ed74f2d 3458 if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
97e728d4
JB
3459 fs_info->data_chunk_allocations++;
3460 if (!(fs_info->data_chunk_allocations %
3461 fs_info->metadata_ratio))
3462 force_metadata_allocation(fs_info);
9ed74f2d
JB
3463 }
3464
2b82032c 3465 ret = btrfs_alloc_chunk(trans, extent_root, flags);
92b8e897
MF
3466 if (ret < 0 && ret != -ENOSPC)
3467 goto out;
3468
9ed74f2d 3469 spin_lock(&space_info->lock);
9ed74f2d 3470 if (ret)
6324fbf3 3471 space_info->full = 1;
424499db
YZ
3472 else
3473 ret = 1;
6d74119f 3474
0e4f8f88 3475 space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
6d74119f 3476 space_info->chunk_alloc = 0;
9ed74f2d 3477 spin_unlock(&space_info->lock);
92b8e897 3478out:
c146afad 3479 mutex_unlock(&extent_root->fs_info->chunk_mutex);
0f9dd46c 3480 return ret;
6324fbf3 3481}
9ed74f2d 3482
9ed74f2d 3483/*
5da9d01b 3484 * shrink metadata reservation for delalloc
9ed74f2d 3485 */
663350ac 3486static int shrink_delalloc(struct btrfs_root *root, u64 to_reclaim,
f104d044 3487 bool wait_ordered)
5da9d01b 3488{
0ca1f7ce 3489 struct btrfs_block_rsv *block_rsv;
0019f10d 3490 struct btrfs_space_info *space_info;
663350ac 3491 struct btrfs_trans_handle *trans;
5da9d01b
YZ
3492 u64 reserved;
3493 u64 max_reclaim;
3494 u64 reclaimed = 0;
b1953bce 3495 long time_left;
877da174 3496 unsigned long nr_pages = (2 * 1024 * 1024) >> PAGE_CACHE_SHIFT;
b1953bce 3497 int loops = 0;
36e39c40 3498 unsigned long progress;
5da9d01b 3499
663350ac 3500 trans = (struct btrfs_trans_handle *)current->journal_info;
0ca1f7ce 3501 block_rsv = &root->fs_info->delalloc_block_rsv;
0019f10d 3502 space_info = block_rsv->space_info;
bf9022e0
CM
3503
3504 smp_mb();
fb25e914 3505 reserved = space_info->bytes_may_use;
36e39c40 3506 progress = space_info->reservation_progress;
5da9d01b
YZ
3507
3508 if (reserved == 0)
3509 return 0;
c4f675cd 3510
fdb5effd
JB
3511 smp_mb();
3512 if (root->fs_info->delalloc_bytes == 0) {
3513 if (trans)
3514 return 0;
3515 btrfs_wait_ordered_extents(root, 0, 0);
3516 return 0;
3517 }
3518
5da9d01b 3519 max_reclaim = min(reserved, to_reclaim);
877da174
JB
3520 nr_pages = max_t(unsigned long, nr_pages,
3521 max_reclaim >> PAGE_CACHE_SHIFT);
b1953bce 3522 while (loops < 1024) {
bf9022e0
CM
3523 /* have the flusher threads jump in and do some IO */
3524 smp_mb();
3525 nr_pages = min_t(unsigned long, nr_pages,
3526 root->fs_info->delalloc_bytes >> PAGE_CACHE_SHIFT);
0e175a18
CW
3527 writeback_inodes_sb_nr_if_idle(root->fs_info->sb, nr_pages,
3528 WB_REASON_FS_FREE_SPACE);
5da9d01b 3529
0019f10d 3530 spin_lock(&space_info->lock);
fb25e914
JB
3531 if (reserved > space_info->bytes_may_use)
3532 reclaimed += reserved - space_info->bytes_may_use;
3533 reserved = space_info->bytes_may_use;
0019f10d 3534 spin_unlock(&space_info->lock);
5da9d01b 3535
36e39c40
CM
3536 loops++;
3537
5da9d01b
YZ
3538 if (reserved == 0 || reclaimed >= max_reclaim)
3539 break;
3540
3541 if (trans && trans->transaction->blocked)
3542 return -EAGAIN;
bf9022e0 3543
f104d044
JB
3544 if (wait_ordered && !trans) {
3545 btrfs_wait_ordered_extents(root, 0, 0);
3546 } else {
3547 time_left = schedule_timeout_interruptible(1);
b1953bce 3548
f104d044
JB
3549 /* We were interrupted, exit */
3550 if (time_left)
3551 break;
3552 }
b1953bce 3553
36e39c40
CM
3554 /* we've kicked the IO a few times, if anything has been freed,
3555 * exit. There is no sense in looping here for a long time
3556 * when we really need to commit the transaction, or there are
3557 * just too many writers without enough free space
3558 */
3559
3560 if (loops > 3) {
3561 smp_mb();
3562 if (progress != space_info->reservation_progress)
3563 break;
3564 }
bf9022e0 3565
5da9d01b 3566 }
f104d044 3567
5da9d01b
YZ
3568 return reclaimed >= to_reclaim;
3569}
3570
663350ac
JB
3571/**
3572 * maybe_commit_transaction - possibly commit the transaction if its ok to
3573 * @root - the root we're allocating for
3574 * @bytes - the number of bytes we want to reserve
3575 * @force - force the commit
8bb8ab2e 3576 *
663350ac
JB
3577 * This will check to make sure that committing the transaction will actually
3578 * get us somewhere and then commit the transaction if it does. Otherwise it
3579 * will return -ENOSPC.
8bb8ab2e 3580 */
663350ac
JB
3581static int may_commit_transaction(struct btrfs_root *root,
3582 struct btrfs_space_info *space_info,
3583 u64 bytes, int force)
3584{
3585 struct btrfs_block_rsv *delayed_rsv = &root->fs_info->delayed_block_rsv;
3586 struct btrfs_trans_handle *trans;
3587
3588 trans = (struct btrfs_trans_handle *)current->journal_info;
3589 if (trans)
3590 return -EAGAIN;
3591
3592 if (force)
3593 goto commit;
3594
3595 /* See if there is enough pinned space to make this reservation */
3596 spin_lock(&space_info->lock);
3597 if (space_info->bytes_pinned >= bytes) {
3598 spin_unlock(&space_info->lock);
3599 goto commit;
3600 }
3601 spin_unlock(&space_info->lock);
3602
3603 /*
3604 * See if there is some space in the delayed insertion reservation for
3605 * this reservation.
3606 */
3607 if (space_info != delayed_rsv->space_info)
3608 return -ENOSPC;
3609
3610 spin_lock(&delayed_rsv->lock);
3611 if (delayed_rsv->size < bytes) {
3612 spin_unlock(&delayed_rsv->lock);
3613 return -ENOSPC;
3614 }
3615 spin_unlock(&delayed_rsv->lock);
3616
3617commit:
3618 trans = btrfs_join_transaction(root);
3619 if (IS_ERR(trans))
3620 return -ENOSPC;
3621
3622 return btrfs_commit_transaction(trans, root);
3623}
3624
4a92b1b8
JB
3625/**
3626 * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
3627 * @root - the root we're allocating for
3628 * @block_rsv - the block_rsv we're allocating for
3629 * @orig_bytes - the number of bytes we want
3630 * @flush - wether or not we can flush to make our reservation
8bb8ab2e 3631 *
4a92b1b8
JB
3632 * This will reserve orgi_bytes number of bytes from the space info associated
3633 * with the block_rsv. If there is not enough space it will make an attempt to
3634 * flush out space to make room. It will do this by flushing delalloc if
3635 * possible or committing the transaction. If flush is 0 then no attempts to
3636 * regain reservations will be made and this will fail if there is not enough
3637 * space already.
8bb8ab2e 3638 */
4a92b1b8 3639static int reserve_metadata_bytes(struct btrfs_root *root,
8bb8ab2e
JB
3640 struct btrfs_block_rsv *block_rsv,
3641 u64 orig_bytes, int flush)
9ed74f2d 3642{
f0486c68 3643 struct btrfs_space_info *space_info = block_rsv->space_info;
2bf64758 3644 u64 used;
8bb8ab2e
JB
3645 u64 num_bytes = orig_bytes;
3646 int retries = 0;
3647 int ret = 0;
38227933 3648 bool committed = false;
fdb5effd 3649 bool flushing = false;
f104d044 3650 bool wait_ordered = false;
9ed74f2d 3651
8bb8ab2e 3652again:
fdb5effd 3653 ret = 0;
8bb8ab2e 3654 spin_lock(&space_info->lock);
fdb5effd
JB
3655 /*
3656 * We only want to wait if somebody other than us is flushing and we are
3657 * actually alloed to flush.
3658 */
3659 while (flush && !flushing && space_info->flush) {
3660 spin_unlock(&space_info->lock);
3661 /*
3662 * If we have a trans handle we can't wait because the flusher
3663 * may have to commit the transaction, which would mean we would
3664 * deadlock since we are waiting for the flusher to finish, but
3665 * hold the current transaction open.
3666 */
663350ac 3667 if (current->journal_info)
fdb5effd
JB
3668 return -EAGAIN;
3669 ret = wait_event_interruptible(space_info->wait,
3670 !space_info->flush);
3671 /* Must have been interrupted, return */
3672 if (ret)
3673 return -EINTR;
3674
3675 spin_lock(&space_info->lock);
3676 }
3677
3678 ret = -ENOSPC;
2bf64758
JB
3679 used = space_info->bytes_used + space_info->bytes_reserved +
3680 space_info->bytes_pinned + space_info->bytes_readonly +
3681 space_info->bytes_may_use;
9ed74f2d 3682
8bb8ab2e
JB
3683 /*
3684 * The idea here is that we've not already over-reserved the block group
3685 * then we can go ahead and save our reservation first and then start
3686 * flushing if we need to. Otherwise if we've already overcommitted
3687 * lets start flushing stuff first and then come back and try to make
3688 * our reservation.
3689 */
2bf64758
JB
3690 if (used <= space_info->total_bytes) {
3691 if (used + orig_bytes <= space_info->total_bytes) {
fb25e914 3692 space_info->bytes_may_use += orig_bytes;
8c2a3ca2
JB
3693 trace_btrfs_space_reservation(root->fs_info,
3694 "space_info",
3695 (u64)space_info,
3696 orig_bytes, 1);
8bb8ab2e
JB
3697 ret = 0;
3698 } else {
3699 /*
3700 * Ok set num_bytes to orig_bytes since we aren't
3701 * overocmmitted, this way we only try and reclaim what
3702 * we need.
3703 */
3704 num_bytes = orig_bytes;
3705 }
3706 } else {
3707 /*
3708 * Ok we're over committed, set num_bytes to the overcommitted
3709 * amount plus the amount of bytes that we need for this
3710 * reservation.
3711 */
f104d044 3712 wait_ordered = true;
2bf64758 3713 num_bytes = used - space_info->total_bytes +
8bb8ab2e
JB
3714 (orig_bytes * (retries + 1));
3715 }
9ed74f2d 3716
36ba022a 3717 if (ret) {
2bf64758
JB
3718 u64 profile = btrfs_get_alloc_profile(root, 0);
3719 u64 avail;
3720
7e355b83
JB
3721 /*
3722 * If we have a lot of space that's pinned, don't bother doing
3723 * the overcommit dance yet and just commit the transaction.
3724 */
3725 avail = (space_info->total_bytes - space_info->bytes_used) * 8;
3726 do_div(avail, 10);
663350ac 3727 if (space_info->bytes_pinned >= avail && flush && !committed) {
7e355b83
JB
3728 space_info->flush = 1;
3729 flushing = true;
3730 spin_unlock(&space_info->lock);
663350ac
JB
3731 ret = may_commit_transaction(root, space_info,
3732 orig_bytes, 1);
3733 if (ret)
3734 goto out;
3735 committed = true;
3736 goto again;
7e355b83
JB
3737 }
3738
2bf64758
JB
3739 spin_lock(&root->fs_info->free_chunk_lock);
3740 avail = root->fs_info->free_chunk_space;
3741
3742 /*
3743 * If we have dup, raid1 or raid10 then only half of the free
3744 * space is actually useable.
3745 */
3746 if (profile & (BTRFS_BLOCK_GROUP_DUP |
3747 BTRFS_BLOCK_GROUP_RAID1 |
3748 BTRFS_BLOCK_GROUP_RAID10))
3749 avail >>= 1;
3750
3751 /*
3752 * If we aren't flushing don't let us overcommit too much, say
3753 * 1/8th of the space. If we can flush, let it overcommit up to
3754 * 1/2 of the space.
3755 */
3756 if (flush)
3757 avail >>= 3;
3758 else
3759 avail >>= 1;
3760 spin_unlock(&root->fs_info->free_chunk_lock);
3761
9a82ca65 3762 if (used + num_bytes < space_info->total_bytes + avail) {
2bf64758 3763 space_info->bytes_may_use += orig_bytes;
8c2a3ca2
JB
3764 trace_btrfs_space_reservation(root->fs_info,
3765 "space_info",
3766 (u64)space_info,
3767 orig_bytes, 1);
2bf64758 3768 ret = 0;
f104d044
JB
3769 } else {
3770 wait_ordered = true;
2bf64758
JB
3771 }
3772 }
3773
8bb8ab2e
JB
3774 /*
3775 * Couldn't make our reservation, save our place so while we're trying
3776 * to reclaim space we can actually use it instead of somebody else
3777 * stealing it from us.
3778 */
fdb5effd
JB
3779 if (ret && flush) {
3780 flushing = true;
3781 space_info->flush = 1;
8bb8ab2e 3782 }
9ed74f2d 3783
f0486c68 3784 spin_unlock(&space_info->lock);
9ed74f2d 3785
fdb5effd 3786 if (!ret || !flush)
8bb8ab2e 3787 goto out;
f0486c68 3788
8bb8ab2e
JB
3789 /*
3790 * We do synchronous shrinking since we don't actually unreserve
3791 * metadata until after the IO is completed.
3792 */
663350ac 3793 ret = shrink_delalloc(root, num_bytes, wait_ordered);
fdb5effd 3794 if (ret < 0)
8bb8ab2e 3795 goto out;
f0486c68 3796
75c195a2
CM
3797 ret = 0;
3798
8bb8ab2e
JB
3799 /*
3800 * So if we were overcommitted it's possible that somebody else flushed
3801 * out enough space and we simply didn't have enough space to reclaim,
3802 * so go back around and try again.
3803 */
3804 if (retries < 2) {
f104d044 3805 wait_ordered = true;
8bb8ab2e
JB
3806 retries++;
3807 goto again;
3808 }
f0486c68 3809
8bb8ab2e 3810 ret = -ENOSPC;
75c195a2
CM
3811 if (committed)
3812 goto out;
3813
663350ac 3814 ret = may_commit_transaction(root, space_info, orig_bytes, 0);
38227933 3815 if (!ret) {
38227933 3816 committed = true;
8bb8ab2e 3817 goto again;
38227933 3818 }
8bb8ab2e
JB
3819
3820out:
fdb5effd 3821 if (flushing) {
8bb8ab2e 3822 spin_lock(&space_info->lock);
fdb5effd
JB
3823 space_info->flush = 0;
3824 wake_up_all(&space_info->wait);
8bb8ab2e 3825 spin_unlock(&space_info->lock);
f0486c68 3826 }
f0486c68
YZ
3827 return ret;
3828}
3829
3830static struct btrfs_block_rsv *get_block_rsv(struct btrfs_trans_handle *trans,
3831 struct btrfs_root *root)
3832{
4c13d758
JB
3833 struct btrfs_block_rsv *block_rsv = NULL;
3834
3835 if (root->ref_cows || root == root->fs_info->csum_root)
f0486c68 3836 block_rsv = trans->block_rsv;
4c13d758
JB
3837
3838 if (!block_rsv)
f0486c68
YZ
3839 block_rsv = root->block_rsv;
3840
3841 if (!block_rsv)
3842 block_rsv = &root->fs_info->empty_block_rsv;
3843
3844 return block_rsv;
3845}
3846
3847static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
3848 u64 num_bytes)
3849{
3850 int ret = -ENOSPC;
3851 spin_lock(&block_rsv->lock);
3852 if (block_rsv->reserved >= num_bytes) {
3853 block_rsv->reserved -= num_bytes;
3854 if (block_rsv->reserved < block_rsv->size)
3855 block_rsv->full = 0;
3856 ret = 0;
3857 }
3858 spin_unlock(&block_rsv->lock);
3859 return ret;
3860}
3861
3862static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
3863 u64 num_bytes, int update_size)
3864{
3865 spin_lock(&block_rsv->lock);
3866 block_rsv->reserved += num_bytes;
3867 if (update_size)
3868 block_rsv->size += num_bytes;
3869 else if (block_rsv->reserved >= block_rsv->size)
3870 block_rsv->full = 1;
3871 spin_unlock(&block_rsv->lock);
3872}
3873
8c2a3ca2
JB
3874static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
3875 struct btrfs_block_rsv *block_rsv,
62a45b60 3876 struct btrfs_block_rsv *dest, u64 num_bytes)
f0486c68
YZ
3877{
3878 struct btrfs_space_info *space_info = block_rsv->space_info;
3879
3880 spin_lock(&block_rsv->lock);
3881 if (num_bytes == (u64)-1)
3882 num_bytes = block_rsv->size;
3883 block_rsv->size -= num_bytes;
3884 if (block_rsv->reserved >= block_rsv->size) {
3885 num_bytes = block_rsv->reserved - block_rsv->size;
3886 block_rsv->reserved = block_rsv->size;
3887 block_rsv->full = 1;
3888 } else {
3889 num_bytes = 0;
3890 }
3891 spin_unlock(&block_rsv->lock);
3892
3893 if (num_bytes > 0) {
3894 if (dest) {
e9e22899
JB
3895 spin_lock(&dest->lock);
3896 if (!dest->full) {
3897 u64 bytes_to_add;
3898
3899 bytes_to_add = dest->size - dest->reserved;
3900 bytes_to_add = min(num_bytes, bytes_to_add);
3901 dest->reserved += bytes_to_add;
3902 if (dest->reserved >= dest->size)
3903 dest->full = 1;
3904 num_bytes -= bytes_to_add;
3905 }
3906 spin_unlock(&dest->lock);
3907 }
3908 if (num_bytes) {
f0486c68 3909 spin_lock(&space_info->lock);
fb25e914 3910 space_info->bytes_may_use -= num_bytes;
8c2a3ca2
JB
3911 trace_btrfs_space_reservation(fs_info, "space_info",
3912 (u64)space_info,
3913 num_bytes, 0);
36e39c40 3914 space_info->reservation_progress++;
f0486c68 3915 spin_unlock(&space_info->lock);
4e06bdd6 3916 }
9ed74f2d 3917 }
f0486c68 3918}
4e06bdd6 3919
f0486c68
YZ
3920static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src,
3921 struct btrfs_block_rsv *dst, u64 num_bytes)
3922{
3923 int ret;
9ed74f2d 3924
f0486c68
YZ
3925 ret = block_rsv_use_bytes(src, num_bytes);
3926 if (ret)
3927 return ret;
9ed74f2d 3928
f0486c68 3929 block_rsv_add_bytes(dst, num_bytes, 1);
9ed74f2d
JB
3930 return 0;
3931}
3932
f0486c68 3933void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv)
9ed74f2d 3934{
f0486c68
YZ
3935 memset(rsv, 0, sizeof(*rsv));
3936 spin_lock_init(&rsv->lock);
f0486c68
YZ
3937}
3938
3939struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root)
3940{
3941 struct btrfs_block_rsv *block_rsv;
3942 struct btrfs_fs_info *fs_info = root->fs_info;
9ed74f2d 3943
f0486c68
YZ
3944 block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
3945 if (!block_rsv)
3946 return NULL;
9ed74f2d 3947
f0486c68 3948 btrfs_init_block_rsv(block_rsv);
f0486c68
YZ
3949 block_rsv->space_info = __find_space_info(fs_info,
3950 BTRFS_BLOCK_GROUP_METADATA);
f0486c68
YZ
3951 return block_rsv;
3952}
9ed74f2d 3953
f0486c68
YZ
3954void btrfs_free_block_rsv(struct btrfs_root *root,
3955 struct btrfs_block_rsv *rsv)
3956{
dabdb640
JB
3957 btrfs_block_rsv_release(root, rsv, (u64)-1);
3958 kfree(rsv);
9ed74f2d
JB
3959}
3960
61b520a9
MX
3961static inline int __block_rsv_add(struct btrfs_root *root,
3962 struct btrfs_block_rsv *block_rsv,
3963 u64 num_bytes, int flush)
9ed74f2d 3964{
f0486c68 3965 int ret;
9ed74f2d 3966
f0486c68
YZ
3967 if (num_bytes == 0)
3968 return 0;
8bb8ab2e 3969
61b520a9 3970 ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
f0486c68
YZ
3971 if (!ret) {
3972 block_rsv_add_bytes(block_rsv, num_bytes, 1);
3973 return 0;
3974 }
9ed74f2d 3975
f0486c68 3976 return ret;
f0486c68 3977}
9ed74f2d 3978
61b520a9
MX
3979int btrfs_block_rsv_add(struct btrfs_root *root,
3980 struct btrfs_block_rsv *block_rsv,
3981 u64 num_bytes)
3982{
3983 return __block_rsv_add(root, block_rsv, num_bytes, 1);
3984}
3985
c06a0e12
JB
3986int btrfs_block_rsv_add_noflush(struct btrfs_root *root,
3987 struct btrfs_block_rsv *block_rsv,
3988 u64 num_bytes)
f0486c68 3989{
61b520a9 3990 return __block_rsv_add(root, block_rsv, num_bytes, 0);
f0486c68 3991}
9ed74f2d 3992
4a92b1b8 3993int btrfs_block_rsv_check(struct btrfs_root *root,
36ba022a 3994 struct btrfs_block_rsv *block_rsv, int min_factor)
f0486c68
YZ
3995{
3996 u64 num_bytes = 0;
f0486c68 3997 int ret = -ENOSPC;
9ed74f2d 3998
f0486c68
YZ
3999 if (!block_rsv)
4000 return 0;
9ed74f2d 4001
f0486c68 4002 spin_lock(&block_rsv->lock);
36ba022a
JB
4003 num_bytes = div_factor(block_rsv->size, min_factor);
4004 if (block_rsv->reserved >= num_bytes)
4005 ret = 0;
4006 spin_unlock(&block_rsv->lock);
9ed74f2d 4007
36ba022a
JB
4008 return ret;
4009}
4010
aa38a711
MX
4011static inline int __btrfs_block_rsv_refill(struct btrfs_root *root,
4012 struct btrfs_block_rsv *block_rsv,
4013 u64 min_reserved, int flush)
36ba022a
JB
4014{
4015 u64 num_bytes = 0;
4016 int ret = -ENOSPC;
4017
4018 if (!block_rsv)
4019 return 0;
4020
4021 spin_lock(&block_rsv->lock);
4022 num_bytes = min_reserved;
13553e52 4023 if (block_rsv->reserved >= num_bytes)
f0486c68 4024 ret = 0;
13553e52 4025 else
f0486c68 4026 num_bytes -= block_rsv->reserved;
f0486c68 4027 spin_unlock(&block_rsv->lock);
13553e52 4028
f0486c68
YZ
4029 if (!ret)
4030 return 0;
4031
aa38a711 4032 ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
dabdb640
JB
4033 if (!ret) {
4034 block_rsv_add_bytes(block_rsv, num_bytes, 0);
f0486c68 4035 return 0;
6a63209f 4036 }
9ed74f2d 4037
13553e52 4038 return ret;
f0486c68
YZ
4039}
4040
aa38a711
MX
4041int btrfs_block_rsv_refill(struct btrfs_root *root,
4042 struct btrfs_block_rsv *block_rsv,
4043 u64 min_reserved)
4044{
4045 return __btrfs_block_rsv_refill(root, block_rsv, min_reserved, 1);
4046}
4047
4048int btrfs_block_rsv_refill_noflush(struct btrfs_root *root,
4049 struct btrfs_block_rsv *block_rsv,
4050 u64 min_reserved)
4051{
4052 return __btrfs_block_rsv_refill(root, block_rsv, min_reserved, 0);
4053}
4054
f0486c68
YZ
4055int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
4056 struct btrfs_block_rsv *dst_rsv,
4057 u64 num_bytes)
4058{
4059 return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4060}
4061
4062void btrfs_block_rsv_release(struct btrfs_root *root,
4063 struct btrfs_block_rsv *block_rsv,
4064 u64 num_bytes)
4065{
4066 struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4067 if (global_rsv->full || global_rsv == block_rsv ||
4068 block_rsv->space_info != global_rsv->space_info)
4069 global_rsv = NULL;
8c2a3ca2
JB
4070 block_rsv_release_bytes(root->fs_info, block_rsv, global_rsv,
4071 num_bytes);
6a63209f
JB
4072}
4073
4074/*
8929ecfa
YZ
4075 * helper to calculate size of global block reservation.
4076 * the desired value is sum of space used by extent tree,
4077 * checksum tree and root tree
6a63209f 4078 */
8929ecfa 4079static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
6a63209f 4080{
8929ecfa
YZ
4081 struct btrfs_space_info *sinfo;
4082 u64 num_bytes;
4083 u64 meta_used;
4084 u64 data_used;
6c41761f 4085 int csum_size = btrfs_super_csum_size(fs_info->super_copy);
6a63209f 4086
8929ecfa
YZ
4087 sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
4088 spin_lock(&sinfo->lock);
4089 data_used = sinfo->bytes_used;
4090 spin_unlock(&sinfo->lock);
33b4d47f 4091
8929ecfa
YZ
4092 sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4093 spin_lock(&sinfo->lock);
6d48755d
JB
4094 if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA)
4095 data_used = 0;
8929ecfa
YZ
4096 meta_used = sinfo->bytes_used;
4097 spin_unlock(&sinfo->lock);
ab6e2410 4098
8929ecfa
YZ
4099 num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) *
4100 csum_size * 2;
4101 num_bytes += div64_u64(data_used + meta_used, 50);
4e06bdd6 4102
8929ecfa
YZ
4103 if (num_bytes * 3 > meta_used)
4104 num_bytes = div64_u64(meta_used, 3);
ab6e2410 4105
8929ecfa
YZ
4106 return ALIGN(num_bytes, fs_info->extent_root->leafsize << 10);
4107}
6a63209f 4108
8929ecfa
YZ
4109static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
4110{
4111 struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
4112 struct btrfs_space_info *sinfo = block_rsv->space_info;
4113 u64 num_bytes;
6a63209f 4114
8929ecfa 4115 num_bytes = calc_global_metadata_size(fs_info);
33b4d47f 4116
8929ecfa
YZ
4117 spin_lock(&block_rsv->lock);
4118 spin_lock(&sinfo->lock);
4e06bdd6 4119
8929ecfa 4120 block_rsv->size = num_bytes;
4e06bdd6 4121
8929ecfa 4122 num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
6d48755d
JB
4123 sinfo->bytes_reserved + sinfo->bytes_readonly +
4124 sinfo->bytes_may_use;
8929ecfa
YZ
4125
4126 if (sinfo->total_bytes > num_bytes) {
4127 num_bytes = sinfo->total_bytes - num_bytes;
4128 block_rsv->reserved += num_bytes;
fb25e914 4129 sinfo->bytes_may_use += num_bytes;
8c2a3ca2
JB
4130 trace_btrfs_space_reservation(fs_info, "space_info",
4131 (u64)sinfo, num_bytes, 1);
6a63209f 4132 }
6a63209f 4133
8929ecfa
YZ
4134 if (block_rsv->reserved >= block_rsv->size) {
4135 num_bytes = block_rsv->reserved - block_rsv->size;
fb25e914 4136 sinfo->bytes_may_use -= num_bytes;
8c2a3ca2
JB
4137 trace_btrfs_space_reservation(fs_info, "space_info",
4138 (u64)sinfo, num_bytes, 0);
36e39c40 4139 sinfo->reservation_progress++;
8929ecfa
YZ
4140 block_rsv->reserved = block_rsv->size;
4141 block_rsv->full = 1;
4142 }
182608c8 4143
8929ecfa
YZ
4144 spin_unlock(&sinfo->lock);
4145 spin_unlock(&block_rsv->lock);
6a63209f
JB
4146}
4147
f0486c68 4148static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
6a63209f 4149{
f0486c68 4150 struct btrfs_space_info *space_info;
6a63209f 4151
f0486c68
YZ
4152 space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
4153 fs_info->chunk_block_rsv.space_info = space_info;
6a63209f 4154
f0486c68 4155 space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
8929ecfa 4156 fs_info->global_block_rsv.space_info = space_info;
8929ecfa 4157 fs_info->delalloc_block_rsv.space_info = space_info;
f0486c68
YZ
4158 fs_info->trans_block_rsv.space_info = space_info;
4159 fs_info->empty_block_rsv.space_info = space_info;
6d668dda 4160 fs_info->delayed_block_rsv.space_info = space_info;
f0486c68 4161
8929ecfa
YZ
4162 fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
4163 fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
4164 fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
4165 fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
f0486c68 4166 fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
8929ecfa 4167
8929ecfa 4168 update_global_block_rsv(fs_info);
6a63209f
JB
4169}
4170
8929ecfa 4171static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
6a63209f 4172{
8c2a3ca2
JB
4173 block_rsv_release_bytes(fs_info, &fs_info->global_block_rsv, NULL,
4174 (u64)-1);
8929ecfa
YZ
4175 WARN_ON(fs_info->delalloc_block_rsv.size > 0);
4176 WARN_ON(fs_info->delalloc_block_rsv.reserved > 0);
4177 WARN_ON(fs_info->trans_block_rsv.size > 0);
4178 WARN_ON(fs_info->trans_block_rsv.reserved > 0);
4179 WARN_ON(fs_info->chunk_block_rsv.size > 0);
4180 WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
6d668dda
JB
4181 WARN_ON(fs_info->delayed_block_rsv.size > 0);
4182 WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
fcb80c2a
JB
4183}
4184
a22285a6
YZ
4185void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
4186 struct btrfs_root *root)
6a63209f 4187{
a22285a6
YZ
4188 if (!trans->bytes_reserved)
4189 return;
6a63209f 4190
8c2a3ca2
JB
4191 trace_btrfs_space_reservation(root->fs_info, "transaction", (u64)trans,
4192 trans->bytes_reserved, 0);
b24e03db 4193 btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
a22285a6
YZ
4194 trans->bytes_reserved = 0;
4195}
6a63209f 4196
d68fc57b
YZ
4197int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
4198 struct inode *inode)
4199{
4200 struct btrfs_root *root = BTRFS_I(inode)->root;
4201 struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
4202 struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
4203
4204 /*
fcb80c2a
JB
4205 * We need to hold space in order to delete our orphan item once we've
4206 * added it, so this takes the reservation so we can release it later
4207 * when we are truly done with the orphan item.
d68fc57b 4208 */
ff5714cc 4209 u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
8c2a3ca2
JB
4210 trace_btrfs_space_reservation(root->fs_info, "orphan",
4211 btrfs_ino(inode), num_bytes, 1);
d68fc57b 4212 return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
6a63209f
JB
4213}
4214
d68fc57b 4215void btrfs_orphan_release_metadata(struct inode *inode)
97e728d4 4216{
d68fc57b 4217 struct btrfs_root *root = BTRFS_I(inode)->root;
ff5714cc 4218 u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
8c2a3ca2
JB
4219 trace_btrfs_space_reservation(root->fs_info, "orphan",
4220 btrfs_ino(inode), num_bytes, 0);
d68fc57b
YZ
4221 btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
4222}
97e728d4 4223
a22285a6
YZ
4224int btrfs_snap_reserve_metadata(struct btrfs_trans_handle *trans,
4225 struct btrfs_pending_snapshot *pending)
4226{
4227 struct btrfs_root *root = pending->root;
4228 struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
4229 struct btrfs_block_rsv *dst_rsv = &pending->block_rsv;
4230 /*
4231 * two for root back/forward refs, two for directory entries
4232 * and one for root of the snapshot.
4233 */
16cdcec7 4234 u64 num_bytes = btrfs_calc_trans_metadata_size(root, 5);
a22285a6
YZ
4235 dst_rsv->space_info = src_rsv->space_info;
4236 return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
97e728d4
JB
4237}
4238
7709cde3
JB
4239/**
4240 * drop_outstanding_extent - drop an outstanding extent
4241 * @inode: the inode we're dropping the extent for
4242 *
4243 * This is called when we are freeing up an outstanding extent, either called
4244 * after an error or after an extent is written. This will return the number of
4245 * reserved extents that need to be freed. This must be called with
4246 * BTRFS_I(inode)->lock held.
4247 */
9e0baf60
JB
4248static unsigned drop_outstanding_extent(struct inode *inode)
4249{
7fd2ae21 4250 unsigned drop_inode_space = 0;
9e0baf60
JB
4251 unsigned dropped_extents = 0;
4252
9e0baf60
JB
4253 BUG_ON(!BTRFS_I(inode)->outstanding_extents);
4254 BTRFS_I(inode)->outstanding_extents--;
4255
7fd2ae21
JB
4256 if (BTRFS_I(inode)->outstanding_extents == 0 &&
4257 BTRFS_I(inode)->delalloc_meta_reserved) {
4258 drop_inode_space = 1;
4259 BTRFS_I(inode)->delalloc_meta_reserved = 0;
4260 }
4261
9e0baf60
JB
4262 /*
4263 * If we have more or the same amount of outsanding extents than we have
4264 * reserved then we need to leave the reserved extents count alone.
4265 */
4266 if (BTRFS_I(inode)->outstanding_extents >=
4267 BTRFS_I(inode)->reserved_extents)
7fd2ae21 4268 return drop_inode_space;
9e0baf60
JB
4269
4270 dropped_extents = BTRFS_I(inode)->reserved_extents -
4271 BTRFS_I(inode)->outstanding_extents;
4272 BTRFS_I(inode)->reserved_extents -= dropped_extents;
7fd2ae21 4273 return dropped_extents + drop_inode_space;
9e0baf60
JB
4274}
4275
7709cde3
JB
4276/**
4277 * calc_csum_metadata_size - return the amount of metada space that must be
4278 * reserved/free'd for the given bytes.
4279 * @inode: the inode we're manipulating
4280 * @num_bytes: the number of bytes in question
4281 * @reserve: 1 if we are reserving space, 0 if we are freeing space
4282 *
4283 * This adjusts the number of csum_bytes in the inode and then returns the
4284 * correct amount of metadata that must either be reserved or freed. We
4285 * calculate how many checksums we can fit into one leaf and then divide the
4286 * number of bytes that will need to be checksumed by this value to figure out
4287 * how many checksums will be required. If we are adding bytes then the number
4288 * may go up and we will return the number of additional bytes that must be
4289 * reserved. If it is going down we will return the number of bytes that must
4290 * be freed.
4291 *
4292 * This must be called with BTRFS_I(inode)->lock held.
4293 */
4294static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes,
4295 int reserve)
6324fbf3 4296{
7709cde3
JB
4297 struct btrfs_root *root = BTRFS_I(inode)->root;
4298 u64 csum_size;
4299 int num_csums_per_leaf;
4300 int num_csums;
4301 int old_csums;
4302
4303 if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM &&
4304 BTRFS_I(inode)->csum_bytes == 0)
4305 return 0;
4306
4307 old_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
4308 if (reserve)
4309 BTRFS_I(inode)->csum_bytes += num_bytes;
4310 else
4311 BTRFS_I(inode)->csum_bytes -= num_bytes;
4312 csum_size = BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item);
4313 num_csums_per_leaf = (int)div64_u64(csum_size,
4314 sizeof(struct btrfs_csum_item) +
4315 sizeof(struct btrfs_disk_key));
4316 num_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
4317 num_csums = num_csums + num_csums_per_leaf - 1;
4318 num_csums = num_csums / num_csums_per_leaf;
4319
4320 old_csums = old_csums + num_csums_per_leaf - 1;
4321 old_csums = old_csums / num_csums_per_leaf;
4322
4323 /* No change, no need to reserve more */
4324 if (old_csums == num_csums)
4325 return 0;
4326
4327 if (reserve)
4328 return btrfs_calc_trans_metadata_size(root,
4329 num_csums - old_csums);
4330
4331 return btrfs_calc_trans_metadata_size(root, old_csums - num_csums);
0ca1f7ce 4332}
c146afad 4333
0ca1f7ce
YZ
4334int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
4335{
4336 struct btrfs_root *root = BTRFS_I(inode)->root;
4337 struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
9e0baf60 4338 u64 to_reserve = 0;
660d3f6c 4339 u64 csum_bytes;
9e0baf60 4340 unsigned nr_extents = 0;
660d3f6c 4341 int extra_reserve = 0;
c09544e0 4342 int flush = 1;
0ca1f7ce 4343 int ret;
6324fbf3 4344
660d3f6c 4345 /* Need to be holding the i_mutex here if we aren't free space cache */
c09544e0
JB
4346 if (btrfs_is_free_space_inode(root, inode))
4347 flush = 0;
660d3f6c
JB
4348 else
4349 WARN_ON(!mutex_is_locked(&inode->i_mutex));
c09544e0
JB
4350
4351 if (flush && btrfs_transaction_in_commit(root->fs_info))
0ca1f7ce 4352 schedule_timeout(1);
ec44a35c 4353
0ca1f7ce 4354 num_bytes = ALIGN(num_bytes, root->sectorsize);
8bb8ab2e 4355
9e0baf60
JB
4356 spin_lock(&BTRFS_I(inode)->lock);
4357 BTRFS_I(inode)->outstanding_extents++;
4358
4359 if (BTRFS_I(inode)->outstanding_extents >
660d3f6c 4360 BTRFS_I(inode)->reserved_extents)
9e0baf60
JB
4361 nr_extents = BTRFS_I(inode)->outstanding_extents -
4362 BTRFS_I(inode)->reserved_extents;
57a45ced 4363
7fd2ae21
JB
4364 /*
4365 * Add an item to reserve for updating the inode when we complete the
4366 * delalloc io.
4367 */
4368 if (!BTRFS_I(inode)->delalloc_meta_reserved) {
4369 nr_extents++;
660d3f6c 4370 extra_reserve = 1;
593060d7 4371 }
7fd2ae21
JB
4372
4373 to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
7709cde3 4374 to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
660d3f6c 4375 csum_bytes = BTRFS_I(inode)->csum_bytes;
9e0baf60 4376 spin_unlock(&BTRFS_I(inode)->lock);
57a45ced 4377
36ba022a 4378 ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
9e0baf60 4379 if (ret) {
7ed49f18 4380 u64 to_free = 0;
9e0baf60 4381 unsigned dropped;
7ed49f18 4382
7709cde3 4383 spin_lock(&BTRFS_I(inode)->lock);
9e0baf60 4384 dropped = drop_outstanding_extent(inode);
9e0baf60 4385 /*
660d3f6c
JB
4386 * If the inodes csum_bytes is the same as the original
4387 * csum_bytes then we know we haven't raced with any free()ers
4388 * so we can just reduce our inodes csum bytes and carry on.
4389 * Otherwise we have to do the normal free thing to account for
4390 * the case that the free side didn't free up its reserve
4391 * because of this outstanding reservation.
9e0baf60 4392 */
660d3f6c
JB
4393 if (BTRFS_I(inode)->csum_bytes == csum_bytes)
4394 calc_csum_metadata_size(inode, num_bytes, 0);
4395 else
4396 to_free = calc_csum_metadata_size(inode, num_bytes, 0);
4397 spin_unlock(&BTRFS_I(inode)->lock);
4398 if (dropped)
4399 to_free += btrfs_calc_trans_metadata_size(root, dropped);
4400
8c2a3ca2 4401 if (to_free) {
7ed49f18 4402 btrfs_block_rsv_release(root, block_rsv, to_free);
8c2a3ca2
JB
4403 trace_btrfs_space_reservation(root->fs_info,
4404 "delalloc",
4405 btrfs_ino(inode),
4406 to_free, 0);
4407 }
0ca1f7ce 4408 return ret;
9e0baf60 4409 }
25179201 4410
660d3f6c
JB
4411 spin_lock(&BTRFS_I(inode)->lock);
4412 if (extra_reserve) {
4413 BTRFS_I(inode)->delalloc_meta_reserved = 1;
4414 nr_extents--;
4415 }
4416 BTRFS_I(inode)->reserved_extents += nr_extents;
4417 spin_unlock(&BTRFS_I(inode)->lock);
4418
8c2a3ca2
JB
4419 if (to_reserve)
4420 trace_btrfs_space_reservation(root->fs_info,"delalloc",
4421 btrfs_ino(inode), to_reserve, 1);
0ca1f7ce
YZ
4422 block_rsv_add_bytes(block_rsv, to_reserve, 1);
4423
0ca1f7ce
YZ
4424 return 0;
4425}
4426
7709cde3
JB
4427/**
4428 * btrfs_delalloc_release_metadata - release a metadata reservation for an inode
4429 * @inode: the inode to release the reservation for
4430 * @num_bytes: the number of bytes we're releasing
4431 *
4432 * This will release the metadata reservation for an inode. This can be called
4433 * once we complete IO for a given set of bytes to release their metadata
4434 * reservations.
4435 */
0ca1f7ce
YZ
4436void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
4437{
4438 struct btrfs_root *root = BTRFS_I(inode)->root;
9e0baf60
JB
4439 u64 to_free = 0;
4440 unsigned dropped;
0ca1f7ce
YZ
4441
4442 num_bytes = ALIGN(num_bytes, root->sectorsize);
7709cde3 4443 spin_lock(&BTRFS_I(inode)->lock);
9e0baf60 4444 dropped = drop_outstanding_extent(inode);
97e728d4 4445
7709cde3
JB
4446 to_free = calc_csum_metadata_size(inode, num_bytes, 0);
4447 spin_unlock(&BTRFS_I(inode)->lock);
9e0baf60
JB
4448 if (dropped > 0)
4449 to_free += btrfs_calc_trans_metadata_size(root, dropped);
0ca1f7ce 4450
8c2a3ca2
JB
4451 trace_btrfs_space_reservation(root->fs_info, "delalloc",
4452 btrfs_ino(inode), to_free, 0);
0ca1f7ce
YZ
4453 btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
4454 to_free);
4455}
4456
7709cde3
JB
4457/**
4458 * btrfs_delalloc_reserve_space - reserve data and metadata space for delalloc
4459 * @inode: inode we're writing to
4460 * @num_bytes: the number of bytes we want to allocate
4461 *
4462 * This will do the following things
4463 *
4464 * o reserve space in the data space info for num_bytes
4465 * o reserve space in the metadata space info based on number of outstanding
4466 * extents and how much csums will be needed
4467 * o add to the inodes ->delalloc_bytes
4468 * o add it to the fs_info's delalloc inodes list.
4469 *
4470 * This will return 0 for success and -ENOSPC if there is no space left.
4471 */
0ca1f7ce
YZ
4472int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes)
4473{
4474 int ret;
4475
4476 ret = btrfs_check_data_free_space(inode, num_bytes);
d397712b 4477 if (ret)
0ca1f7ce
YZ
4478 return ret;
4479
4480 ret = btrfs_delalloc_reserve_metadata(inode, num_bytes);
4481 if (ret) {
4482 btrfs_free_reserved_data_space(inode, num_bytes);
4483 return ret;
4484 }
4485
4486 return 0;
4487}
4488
7709cde3
JB
4489/**
4490 * btrfs_delalloc_release_space - release data and metadata space for delalloc
4491 * @inode: inode we're releasing space for
4492 * @num_bytes: the number of bytes we want to free up
4493 *
4494 * This must be matched with a call to btrfs_delalloc_reserve_space. This is
4495 * called in the case that we don't need the metadata AND data reservations
4496 * anymore. So if there is an error or we insert an inline extent.
4497 *
4498 * This function will release the metadata space that was not used and will
4499 * decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes
4500 * list if there are no delalloc bytes left.
4501 */
0ca1f7ce
YZ
4502void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes)
4503{
4504 btrfs_delalloc_release_metadata(inode, num_bytes);
4505 btrfs_free_reserved_data_space(inode, num_bytes);
6324fbf3
CM
4506}
4507
9078a3e1
CM
4508static int update_block_group(struct btrfs_trans_handle *trans,
4509 struct btrfs_root *root,
f0486c68 4510 u64 bytenr, u64 num_bytes, int alloc)
9078a3e1 4511{
0af3d00b 4512 struct btrfs_block_group_cache *cache = NULL;
9078a3e1 4513 struct btrfs_fs_info *info = root->fs_info;
db94535d 4514 u64 total = num_bytes;
9078a3e1 4515 u64 old_val;
db94535d 4516 u64 byte_in_group;
0af3d00b 4517 int factor;
3e1ad54f 4518
5d4f98a2
YZ
4519 /* block accounting for super block */
4520 spin_lock(&info->delalloc_lock);
6c41761f 4521 old_val = btrfs_super_bytes_used(info->super_copy);
5d4f98a2
YZ
4522 if (alloc)
4523 old_val += num_bytes;
4524 else
4525 old_val -= num_bytes;
6c41761f 4526 btrfs_set_super_bytes_used(info->super_copy, old_val);
5d4f98a2
YZ
4527 spin_unlock(&info->delalloc_lock);
4528
d397712b 4529 while (total) {
db94535d 4530 cache = btrfs_lookup_block_group(info, bytenr);
f3465ca4 4531 if (!cache)
9078a3e1 4532 return -1;
b742bb82
YZ
4533 if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
4534 BTRFS_BLOCK_GROUP_RAID1 |
4535 BTRFS_BLOCK_GROUP_RAID10))
4536 factor = 2;
4537 else
4538 factor = 1;
9d66e233
JB
4539 /*
4540 * If this block group has free space cache written out, we
4541 * need to make sure to load it if we are removing space. This
4542 * is because we need the unpinning stage to actually add the
4543 * space back to the block group, otherwise we will leak space.
4544 */
4545 if (!alloc && cache->cached == BTRFS_CACHE_NO)
b8399dee 4546 cache_block_group(cache, trans, NULL, 1);
0af3d00b 4547
db94535d
CM
4548 byte_in_group = bytenr - cache->key.objectid;
4549 WARN_ON(byte_in_group > cache->key.offset);
9078a3e1 4550
25179201 4551 spin_lock(&cache->space_info->lock);
c286ac48 4552 spin_lock(&cache->lock);
0af3d00b 4553
73bc1876 4554 if (btrfs_test_opt(root, SPACE_CACHE) &&
0af3d00b
JB
4555 cache->disk_cache_state < BTRFS_DC_CLEAR)
4556 cache->disk_cache_state = BTRFS_DC_CLEAR;
4557
0f9dd46c 4558 cache->dirty = 1;
9078a3e1 4559 old_val = btrfs_block_group_used(&cache->item);
db94535d 4560 num_bytes = min(total, cache->key.offset - byte_in_group);
cd1bc465 4561 if (alloc) {
db94535d 4562 old_val += num_bytes;
11833d66
YZ
4563 btrfs_set_block_group_used(&cache->item, old_val);
4564 cache->reserved -= num_bytes;
11833d66 4565 cache->space_info->bytes_reserved -= num_bytes;
b742bb82
YZ
4566 cache->space_info->bytes_used += num_bytes;
4567 cache->space_info->disk_used += num_bytes * factor;
c286ac48 4568 spin_unlock(&cache->lock);
25179201 4569 spin_unlock(&cache->space_info->lock);
cd1bc465 4570 } else {
db94535d 4571 old_val -= num_bytes;
c286ac48 4572 btrfs_set_block_group_used(&cache->item, old_val);
f0486c68
YZ
4573 cache->pinned += num_bytes;
4574 cache->space_info->bytes_pinned += num_bytes;
6324fbf3 4575 cache->space_info->bytes_used -= num_bytes;
b742bb82 4576 cache->space_info->disk_used -= num_bytes * factor;
c286ac48 4577 spin_unlock(&cache->lock);
25179201 4578 spin_unlock(&cache->space_info->lock);
1f3c79a2 4579
f0486c68
YZ
4580 set_extent_dirty(info->pinned_extents,
4581 bytenr, bytenr + num_bytes - 1,
4582 GFP_NOFS | __GFP_NOFAIL);
cd1bc465 4583 }
fa9c0d79 4584 btrfs_put_block_group(cache);
db94535d
CM
4585 total -= num_bytes;
4586 bytenr += num_bytes;
9078a3e1
CM
4587 }
4588 return 0;
4589}
6324fbf3 4590
a061fc8d
CM
4591static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
4592{
0f9dd46c 4593 struct btrfs_block_group_cache *cache;
d2fb3437 4594 u64 bytenr;
0f9dd46c
JB
4595
4596 cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
4597 if (!cache)
a061fc8d 4598 return 0;
0f9dd46c 4599
d2fb3437 4600 bytenr = cache->key.objectid;
fa9c0d79 4601 btrfs_put_block_group(cache);
d2fb3437
YZ
4602
4603 return bytenr;
a061fc8d
CM
4604}
4605
f0486c68
YZ
4606static int pin_down_extent(struct btrfs_root *root,
4607 struct btrfs_block_group_cache *cache,
4608 u64 bytenr, u64 num_bytes, int reserved)
324ae4df 4609{
11833d66
YZ
4610 spin_lock(&cache->space_info->lock);
4611 spin_lock(&cache->lock);
4612 cache->pinned += num_bytes;
4613 cache->space_info->bytes_pinned += num_bytes;
4614 if (reserved) {
4615 cache->reserved -= num_bytes;
4616 cache->space_info->bytes_reserved -= num_bytes;
4617 }
4618 spin_unlock(&cache->lock);
4619 spin_unlock(&cache->space_info->lock);
68b38550 4620
f0486c68
YZ
4621 set_extent_dirty(root->fs_info->pinned_extents, bytenr,
4622 bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
4623 return 0;
4624}
68b38550 4625
f0486c68
YZ
4626/*
4627 * this function must be called within transaction
4628 */
4629int btrfs_pin_extent(struct btrfs_root *root,
4630 u64 bytenr, u64 num_bytes, int reserved)
4631{
4632 struct btrfs_block_group_cache *cache;
68b38550 4633
f0486c68
YZ
4634 cache = btrfs_lookup_block_group(root->fs_info, bytenr);
4635 BUG_ON(!cache);
4636
4637 pin_down_extent(root, cache, bytenr, num_bytes, reserved);
4638
4639 btrfs_put_block_group(cache);
11833d66
YZ
4640 return 0;
4641}
4642
f0486c68 4643/*
e688b725
CM
4644 * this function must be called within transaction
4645 */
4646int btrfs_pin_extent_for_log_replay(struct btrfs_trans_handle *trans,
4647 struct btrfs_root *root,
4648 u64 bytenr, u64 num_bytes)
4649{
4650 struct btrfs_block_group_cache *cache;
4651
4652 cache = btrfs_lookup_block_group(root->fs_info, bytenr);
4653 BUG_ON(!cache);
4654
4655 /*
4656 * pull in the free space cache (if any) so that our pin
4657 * removes the free space from the cache. We have load_only set
4658 * to one because the slow code to read in the free extents does check
4659 * the pinned extents.
4660 */
4661 cache_block_group(cache, trans, root, 1);
4662
4663 pin_down_extent(root, cache, bytenr, num_bytes, 0);
4664
4665 /* remove us from the free space cache (if we're there at all) */
4666 btrfs_remove_free_space(cache, bytenr, num_bytes);
4667 btrfs_put_block_group(cache);
4668 return 0;
4669}
4670
fb25e914
JB
4671/**
4672 * btrfs_update_reserved_bytes - update the block_group and space info counters
4673 * @cache: The cache we are manipulating
4674 * @num_bytes: The number of bytes in question
4675 * @reserve: One of the reservation enums
4676 *
4677 * This is called by the allocator when it reserves space, or by somebody who is
4678 * freeing space that was never actually used on disk. For example if you
4679 * reserve some space for a new leaf in transaction A and before transaction A
4680 * commits you free that leaf, you call this with reserve set to 0 in order to
4681 * clear the reservation.
4682 *
4683 * Metadata reservations should be called with RESERVE_ALLOC so we do the proper
4684 * ENOSPC accounting. For data we handle the reservation through clearing the
4685 * delalloc bits in the io_tree. We have to do this since we could end up
4686 * allocating less disk space for the amount of data we have reserved in the
4687 * case of compression.
4688 *
4689 * If this is a reservation and the block group has become read only we cannot
4690 * make the reservation and return -EAGAIN, otherwise this function always
4691 * succeeds.
f0486c68 4692 */
fb25e914
JB
4693static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
4694 u64 num_bytes, int reserve)
11833d66 4695{
fb25e914 4696 struct btrfs_space_info *space_info = cache->space_info;
f0486c68 4697 int ret = 0;
fb25e914
JB
4698 spin_lock(&space_info->lock);
4699 spin_lock(&cache->lock);
4700 if (reserve != RESERVE_FREE) {
f0486c68
YZ
4701 if (cache->ro) {
4702 ret = -EAGAIN;
4703 } else {
fb25e914
JB
4704 cache->reserved += num_bytes;
4705 space_info->bytes_reserved += num_bytes;
4706 if (reserve == RESERVE_ALLOC) {
8c2a3ca2
JB
4707 trace_btrfs_space_reservation(cache->fs_info,
4708 "space_info",
4709 (u64)space_info,
4710 num_bytes, 0);
fb25e914
JB
4711 space_info->bytes_may_use -= num_bytes;
4712 }
f0486c68 4713 }
fb25e914
JB
4714 } else {
4715 if (cache->ro)
4716 space_info->bytes_readonly += num_bytes;
4717 cache->reserved -= num_bytes;
4718 space_info->bytes_reserved -= num_bytes;
4719 space_info->reservation_progress++;
324ae4df 4720 }
fb25e914
JB
4721 spin_unlock(&cache->lock);
4722 spin_unlock(&space_info->lock);
f0486c68 4723 return ret;
324ae4df 4724}
9078a3e1 4725
11833d66
YZ
4726int btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
4727 struct btrfs_root *root)
e8569813 4728{
e8569813 4729 struct btrfs_fs_info *fs_info = root->fs_info;
11833d66
YZ
4730 struct btrfs_caching_control *next;
4731 struct btrfs_caching_control *caching_ctl;
4732 struct btrfs_block_group_cache *cache;
e8569813 4733
11833d66 4734 down_write(&fs_info->extent_commit_sem);
25179201 4735
11833d66
YZ
4736 list_for_each_entry_safe(caching_ctl, next,
4737 &fs_info->caching_block_groups, list) {
4738 cache = caching_ctl->block_group;
4739 if (block_group_cache_done(cache)) {
4740 cache->last_byte_to_unpin = (u64)-1;
4741 list_del_init(&caching_ctl->list);
4742 put_caching_control(caching_ctl);
e8569813 4743 } else {
11833d66 4744 cache->last_byte_to_unpin = caching_ctl->progress;
e8569813 4745 }
e8569813 4746 }
11833d66
YZ
4747
4748 if (fs_info->pinned_extents == &fs_info->freed_extents[0])
4749 fs_info->pinned_extents = &fs_info->freed_extents[1];
4750 else
4751 fs_info->pinned_extents = &fs_info->freed_extents[0];
4752
4753 up_write(&fs_info->extent_commit_sem);
8929ecfa
YZ
4754
4755 update_global_block_rsv(fs_info);
e8569813
ZY
4756 return 0;
4757}
4758
11833d66 4759static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
ccd467d6 4760{
11833d66
YZ
4761 struct btrfs_fs_info *fs_info = root->fs_info;
4762 struct btrfs_block_group_cache *cache = NULL;
4763 u64 len;
ccd467d6 4764
11833d66
YZ
4765 while (start <= end) {
4766 if (!cache ||
4767 start >= cache->key.objectid + cache->key.offset) {
4768 if (cache)
4769 btrfs_put_block_group(cache);
4770 cache = btrfs_lookup_block_group(fs_info, start);
4771 BUG_ON(!cache);
4772 }
4773
4774 len = cache->key.objectid + cache->key.offset - start;
4775 len = min(len, end + 1 - start);
4776
4777 if (start < cache->last_byte_to_unpin) {
4778 len = min(len, cache->last_byte_to_unpin - start);
4779 btrfs_add_free_space(cache, start, len);
4780 }
4781
f0486c68
YZ
4782 start += len;
4783
11833d66
YZ
4784 spin_lock(&cache->space_info->lock);
4785 spin_lock(&cache->lock);
4786 cache->pinned -= len;
4787 cache->space_info->bytes_pinned -= len;
37be25bc 4788 if (cache->ro)
f0486c68 4789 cache->space_info->bytes_readonly += len;
11833d66
YZ
4790 spin_unlock(&cache->lock);
4791 spin_unlock(&cache->space_info->lock);
ccd467d6 4792 }
11833d66
YZ
4793
4794 if (cache)
4795 btrfs_put_block_group(cache);
ccd467d6
CM
4796 return 0;
4797}
4798
4799int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
11833d66 4800 struct btrfs_root *root)
a28ec197 4801{
11833d66
YZ
4802 struct btrfs_fs_info *fs_info = root->fs_info;
4803 struct extent_io_tree *unpin;
1a5bc167
CM
4804 u64 start;
4805 u64 end;
a28ec197 4806 int ret;
a28ec197 4807
11833d66
YZ
4808 if (fs_info->pinned_extents == &fs_info->freed_extents[0])
4809 unpin = &fs_info->freed_extents[1];
4810 else
4811 unpin = &fs_info->freed_extents[0];
4812
d397712b 4813 while (1) {
1a5bc167
CM
4814 ret = find_first_extent_bit(unpin, 0, &start, &end,
4815 EXTENT_DIRTY);
4816 if (ret)
a28ec197 4817 break;
1f3c79a2 4818
5378e607
LD
4819 if (btrfs_test_opt(root, DISCARD))
4820 ret = btrfs_discard_extent(root, start,
4821 end + 1 - start, NULL);
1f3c79a2 4822
1a5bc167 4823 clear_extent_dirty(unpin, start, end, GFP_NOFS);
11833d66 4824 unpin_extent_range(root, start, end);
b9473439 4825 cond_resched();
a28ec197 4826 }
817d52f8 4827
e20d96d6
CM
4828 return 0;
4829}
4830
5d4f98a2
YZ
4831static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
4832 struct btrfs_root *root,
4833 u64 bytenr, u64 num_bytes, u64 parent,
4834 u64 root_objectid, u64 owner_objectid,
4835 u64 owner_offset, int refs_to_drop,
4836 struct btrfs_delayed_extent_op *extent_op)
a28ec197 4837{
e2fa7227 4838 struct btrfs_key key;
5d4f98a2 4839 struct btrfs_path *path;
1261ec42
CM
4840 struct btrfs_fs_info *info = root->fs_info;
4841 struct btrfs_root *extent_root = info->extent_root;
5f39d397 4842 struct extent_buffer *leaf;
5d4f98a2
YZ
4843 struct btrfs_extent_item *ei;
4844 struct btrfs_extent_inline_ref *iref;
a28ec197 4845 int ret;
5d4f98a2 4846 int is_data;
952fccac
CM
4847 int extent_slot = 0;
4848 int found_extent = 0;
4849 int num_to_del = 1;
5d4f98a2
YZ
4850 u32 item_size;
4851 u64 refs;
037e6390 4852
5caf2a00 4853 path = btrfs_alloc_path();
54aa1f4d
CM
4854 if (!path)
4855 return -ENOMEM;
5f26f772 4856
3c12ac72 4857 path->reada = 1;
b9473439 4858 path->leave_spinning = 1;
5d4f98a2
YZ
4859
4860 is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
4861 BUG_ON(!is_data && refs_to_drop != 1);
4862
4863 ret = lookup_extent_backref(trans, extent_root, path, &iref,
4864 bytenr, num_bytes, parent,
4865 root_objectid, owner_objectid,
4866 owner_offset);
7bb86316 4867 if (ret == 0) {
952fccac 4868 extent_slot = path->slots[0];
5d4f98a2
YZ
4869 while (extent_slot >= 0) {
4870 btrfs_item_key_to_cpu(path->nodes[0], &key,
952fccac 4871 extent_slot);
5d4f98a2 4872 if (key.objectid != bytenr)
952fccac 4873 break;
5d4f98a2
YZ
4874 if (key.type == BTRFS_EXTENT_ITEM_KEY &&
4875 key.offset == num_bytes) {
952fccac
CM
4876 found_extent = 1;
4877 break;
4878 }
4879 if (path->slots[0] - extent_slot > 5)
4880 break;
5d4f98a2 4881 extent_slot--;
952fccac 4882 }
5d4f98a2
YZ
4883#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
4884 item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
4885 if (found_extent && item_size < sizeof(*ei))
4886 found_extent = 0;
4887#endif
31840ae1 4888 if (!found_extent) {
5d4f98a2 4889 BUG_ON(iref);
56bec294 4890 ret = remove_extent_backref(trans, extent_root, path,
5d4f98a2
YZ
4891 NULL, refs_to_drop,
4892 is_data);
31840ae1 4893 BUG_ON(ret);
b3b4aa74 4894 btrfs_release_path(path);
b9473439 4895 path->leave_spinning = 1;
5d4f98a2
YZ
4896
4897 key.objectid = bytenr;
4898 key.type = BTRFS_EXTENT_ITEM_KEY;
4899 key.offset = num_bytes;
4900
31840ae1
ZY
4901 ret = btrfs_search_slot(trans, extent_root,
4902 &key, path, -1, 1);
f3465ca4
JB
4903 if (ret) {
4904 printk(KERN_ERR "umm, got %d back from search"
d397712b
CM
4905 ", was looking for %llu\n", ret,
4906 (unsigned long long)bytenr);
b783e62d
JB
4907 if (ret > 0)
4908 btrfs_print_leaf(extent_root,
4909 path->nodes[0]);
f3465ca4 4910 }
31840ae1
ZY
4911 BUG_ON(ret);
4912 extent_slot = path->slots[0];
4913 }
7bb86316
CM
4914 } else {
4915 btrfs_print_leaf(extent_root, path->nodes[0]);
4916 WARN_ON(1);
d397712b 4917 printk(KERN_ERR "btrfs unable to find ref byte nr %llu "
5d4f98a2 4918 "parent %llu root %llu owner %llu offset %llu\n",
d397712b 4919 (unsigned long long)bytenr,
56bec294 4920 (unsigned long long)parent,
d397712b 4921 (unsigned long long)root_objectid,
5d4f98a2
YZ
4922 (unsigned long long)owner_objectid,
4923 (unsigned long long)owner_offset);
7bb86316 4924 }
5f39d397
CM
4925
4926 leaf = path->nodes[0];
5d4f98a2
YZ
4927 item_size = btrfs_item_size_nr(leaf, extent_slot);
4928#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
4929 if (item_size < sizeof(*ei)) {
4930 BUG_ON(found_extent || extent_slot != path->slots[0]);
4931 ret = convert_extent_item_v0(trans, extent_root, path,
4932 owner_objectid, 0);
4933 BUG_ON(ret < 0);
4934
b3b4aa74 4935 btrfs_release_path(path);
5d4f98a2
YZ
4936 path->leave_spinning = 1;
4937
4938 key.objectid = bytenr;
4939 key.type = BTRFS_EXTENT_ITEM_KEY;
4940 key.offset = num_bytes;
4941
4942 ret = btrfs_search_slot(trans, extent_root, &key, path,
4943 -1, 1);
4944 if (ret) {
4945 printk(KERN_ERR "umm, got %d back from search"
4946 ", was looking for %llu\n", ret,
4947 (unsigned long long)bytenr);
4948 btrfs_print_leaf(extent_root, path->nodes[0]);
4949 }
4950 BUG_ON(ret);
4951 extent_slot = path->slots[0];
4952 leaf = path->nodes[0];
4953 item_size = btrfs_item_size_nr(leaf, extent_slot);
4954 }
4955#endif
4956 BUG_ON(item_size < sizeof(*ei));
952fccac 4957 ei = btrfs_item_ptr(leaf, extent_slot,
123abc88 4958 struct btrfs_extent_item);
5d4f98a2
YZ
4959 if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID) {
4960 struct btrfs_tree_block_info *bi;
4961 BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
4962 bi = (struct btrfs_tree_block_info *)(ei + 1);
4963 WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
4964 }
56bec294 4965
5d4f98a2 4966 refs = btrfs_extent_refs(leaf, ei);
56bec294
CM
4967 BUG_ON(refs < refs_to_drop);
4968 refs -= refs_to_drop;
5f39d397 4969
5d4f98a2
YZ
4970 if (refs > 0) {
4971 if (extent_op)
4972 __run_delayed_extent_op(extent_op, leaf, ei);
4973 /*
4974 * In the case of inline back ref, reference count will
4975 * be updated by remove_extent_backref
952fccac 4976 */
5d4f98a2
YZ
4977 if (iref) {
4978 BUG_ON(!found_extent);
4979 } else {
4980 btrfs_set_extent_refs(leaf, ei, refs);
4981 btrfs_mark_buffer_dirty(leaf);
4982 }
4983 if (found_extent) {
4984 ret = remove_extent_backref(trans, extent_root, path,
4985 iref, refs_to_drop,
4986 is_data);
952fccac
CM
4987 BUG_ON(ret);
4988 }
5d4f98a2 4989 } else {
5d4f98a2
YZ
4990 if (found_extent) {
4991 BUG_ON(is_data && refs_to_drop !=
4992 extent_data_ref_count(root, path, iref));
4993 if (iref) {
4994 BUG_ON(path->slots[0] != extent_slot);
4995 } else {
4996 BUG_ON(path->slots[0] != extent_slot + 1);
4997 path->slots[0] = extent_slot;
4998 num_to_del = 2;
4999 }
78fae27e 5000 }
b9473439 5001
952fccac
CM
5002 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
5003 num_to_del);
31840ae1 5004 BUG_ON(ret);
b3b4aa74 5005 btrfs_release_path(path);
21af804c 5006
5d4f98a2 5007 if (is_data) {
459931ec
CM
5008 ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
5009 BUG_ON(ret);
d57e62b8
CM
5010 } else {
5011 invalidate_mapping_pages(info->btree_inode->i_mapping,
5012 bytenr >> PAGE_CACHE_SHIFT,
5013 (bytenr + num_bytes - 1) >> PAGE_CACHE_SHIFT);
459931ec
CM
5014 }
5015
f0486c68 5016 ret = update_block_group(trans, root, bytenr, num_bytes, 0);
dcbdd4dc 5017 BUG_ON(ret);
a28ec197 5018 }
5caf2a00 5019 btrfs_free_path(path);
a28ec197
CM
5020 return ret;
5021}
5022
1887be66 5023/*
f0486c68 5024 * when we free an block, it is possible (and likely) that we free the last
1887be66
CM
5025 * delayed ref for that extent as well. This searches the delayed ref tree for
5026 * a given extent, and if there are no other delayed refs to be processed, it
5027 * removes it from the tree.
5028 */
5029static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
5030 struct btrfs_root *root, u64 bytenr)
5031{
5032 struct btrfs_delayed_ref_head *head;
5033 struct btrfs_delayed_ref_root *delayed_refs;
5034 struct btrfs_delayed_ref_node *ref;
5035 struct rb_node *node;
f0486c68 5036 int ret = 0;
1887be66
CM
5037
5038 delayed_refs = &trans->transaction->delayed_refs;
5039 spin_lock(&delayed_refs->lock);
5040 head = btrfs_find_delayed_ref_head(trans, bytenr);
5041 if (!head)
5042 goto out;
5043
5044 node = rb_prev(&head->node.rb_node);
5045 if (!node)
5046 goto out;
5047
5048 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
5049
5050 /* there are still entries for this ref, we can't drop it */
5051 if (ref->bytenr == bytenr)
5052 goto out;
5053
5d4f98a2
YZ
5054 if (head->extent_op) {
5055 if (!head->must_insert_reserved)
5056 goto out;
5057 kfree(head->extent_op);
5058 head->extent_op = NULL;
5059 }
5060
1887be66
CM
5061 /*
5062 * waiting for the lock here would deadlock. If someone else has it
5063 * locked they are already in the process of dropping it anyway
5064 */
5065 if (!mutex_trylock(&head->mutex))
5066 goto out;
5067
5068 /*
5069 * at this point we have a head with no other entries. Go
5070 * ahead and process it.
5071 */
5072 head->node.in_tree = 0;
5073 rb_erase(&head->node.rb_node, &delayed_refs->root);
c3e69d58 5074
1887be66 5075 delayed_refs->num_entries--;
a168650c
JS
5076 if (waitqueue_active(&delayed_refs->seq_wait))
5077 wake_up(&delayed_refs->seq_wait);
1887be66
CM
5078
5079 /*
5080 * we don't take a ref on the node because we're removing it from the
5081 * tree, so we just steal the ref the tree was holding.
5082 */
c3e69d58
CM
5083 delayed_refs->num_heads--;
5084 if (list_empty(&head->cluster))
5085 delayed_refs->num_heads_ready--;
5086
5087 list_del_init(&head->cluster);
1887be66
CM
5088 spin_unlock(&delayed_refs->lock);
5089
f0486c68
YZ
5090 BUG_ON(head->extent_op);
5091 if (head->must_insert_reserved)
5092 ret = 1;
5093
5094 mutex_unlock(&head->mutex);
1887be66 5095 btrfs_put_delayed_ref(&head->node);
f0486c68 5096 return ret;
1887be66
CM
5097out:
5098 spin_unlock(&delayed_refs->lock);
5099 return 0;
5100}
5101
f0486c68
YZ
5102void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
5103 struct btrfs_root *root,
5104 struct extent_buffer *buf,
66d7e7f0 5105 u64 parent, int last_ref, int for_cow)
f0486c68 5106{
f0486c68
YZ
5107 struct btrfs_block_group_cache *cache = NULL;
5108 int ret;
5109
5110 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
66d7e7f0
AJ
5111 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
5112 buf->start, buf->len,
5113 parent, root->root_key.objectid,
5114 btrfs_header_level(buf),
5115 BTRFS_DROP_DELAYED_REF, NULL, for_cow);
f0486c68
YZ
5116 BUG_ON(ret);
5117 }
5118
5119 if (!last_ref)
5120 return;
5121
f0486c68 5122 cache = btrfs_lookup_block_group(root->fs_info, buf->start);
f0486c68
YZ
5123
5124 if (btrfs_header_generation(buf) == trans->transid) {
5125 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
5126 ret = check_ref_cleanup(trans, root, buf->start);
5127 if (!ret)
37be25bc 5128 goto out;
f0486c68
YZ
5129 }
5130
5131 if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
5132 pin_down_extent(root, cache, buf->start, buf->len, 1);
37be25bc 5133 goto out;
f0486c68
YZ
5134 }
5135
5136 WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
5137
5138 btrfs_add_free_space(cache, buf->start, buf->len);
fb25e914 5139 btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE);
f0486c68
YZ
5140 }
5141out:
a826d6dc
JB
5142 /*
5143 * Deleting the buffer, clear the corrupt flag since it doesn't matter
5144 * anymore.
5145 */
5146 clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
f0486c68
YZ
5147 btrfs_put_block_group(cache);
5148}
5149
66d7e7f0
AJ
5150int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
5151 u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
5152 u64 owner, u64 offset, int for_cow)
925baedd
CM
5153{
5154 int ret;
66d7e7f0 5155 struct btrfs_fs_info *fs_info = root->fs_info;
925baedd 5156
56bec294
CM
5157 /*
5158 * tree log blocks never actually go into the extent allocation
5159 * tree, just update pinning info and exit early.
56bec294 5160 */
5d4f98a2
YZ
5161 if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
5162 WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
b9473439 5163 /* unlocks the pinned mutex */
11833d66 5164 btrfs_pin_extent(root, bytenr, num_bytes, 1);
56bec294 5165 ret = 0;
5d4f98a2 5166 } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
66d7e7f0
AJ
5167 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
5168 num_bytes,
5d4f98a2 5169 parent, root_objectid, (int)owner,
66d7e7f0 5170 BTRFS_DROP_DELAYED_REF, NULL, for_cow);
1887be66 5171 BUG_ON(ret);
5d4f98a2 5172 } else {
66d7e7f0
AJ
5173 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
5174 num_bytes,
5175 parent, root_objectid, owner,
5176 offset, BTRFS_DROP_DELAYED_REF,
5177 NULL, for_cow);
5d4f98a2 5178 BUG_ON(ret);
56bec294 5179 }
925baedd
CM
5180 return ret;
5181}
5182
87ee04eb
CM
5183static u64 stripe_align(struct btrfs_root *root, u64 val)
5184{
5185 u64 mask = ((u64)root->stripesize - 1);
5186 u64 ret = (val + mask) & ~mask;
5187 return ret;
5188}
5189
817d52f8
JB
5190/*
5191 * when we wait for progress in the block group caching, its because
5192 * our allocation attempt failed at least once. So, we must sleep
5193 * and let some progress happen before we try again.
5194 *
5195 * This function will sleep at least once waiting for new free space to
5196 * show up, and then it will check the block group free space numbers
5197 * for our min num_bytes. Another option is to have it go ahead
5198 * and look in the rbtree for a free extent of a given size, but this
5199 * is a good start.
5200 */
5201static noinline int
5202wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
5203 u64 num_bytes)
5204{
11833d66 5205 struct btrfs_caching_control *caching_ctl;
817d52f8
JB
5206 DEFINE_WAIT(wait);
5207
11833d66
YZ
5208 caching_ctl = get_caching_control(cache);
5209 if (!caching_ctl)
817d52f8 5210 return 0;
817d52f8 5211
11833d66 5212 wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
34d52cb6 5213 (cache->free_space_ctl->free_space >= num_bytes));
11833d66
YZ
5214
5215 put_caching_control(caching_ctl);
5216 return 0;
5217}
5218
5219static noinline int
5220wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
5221{
5222 struct btrfs_caching_control *caching_ctl;
5223 DEFINE_WAIT(wait);
5224
5225 caching_ctl = get_caching_control(cache);
5226 if (!caching_ctl)
5227 return 0;
5228
5229 wait_event(caching_ctl->wait, block_group_cache_done(cache));
5230
5231 put_caching_control(caching_ctl);
817d52f8
JB
5232 return 0;
5233}
5234
b742bb82
YZ
5235static int get_block_group_index(struct btrfs_block_group_cache *cache)
5236{
5237 int index;
5238 if (cache->flags & BTRFS_BLOCK_GROUP_RAID10)
5239 index = 0;
5240 else if (cache->flags & BTRFS_BLOCK_GROUP_RAID1)
5241 index = 1;
5242 else if (cache->flags & BTRFS_BLOCK_GROUP_DUP)
5243 index = 2;
5244 else if (cache->flags & BTRFS_BLOCK_GROUP_RAID0)
5245 index = 3;
5246 else
5247 index = 4;
5248 return index;
5249}
5250
817d52f8 5251enum btrfs_loop_type {
ccf0e725 5252 LOOP_FIND_IDEAL = 0,
817d52f8
JB
5253 LOOP_CACHING_NOWAIT = 1,
5254 LOOP_CACHING_WAIT = 2,
5255 LOOP_ALLOC_CHUNK = 3,
5256 LOOP_NO_EMPTY_SIZE = 4,
5257};
5258
fec577fb
CM
5259/*
5260 * walks the btree of allocated extents and find a hole of a given size.
5261 * The key ins is changed to record the hole:
5262 * ins->objectid == block start
62e2749e 5263 * ins->flags = BTRFS_EXTENT_ITEM_KEY
fec577fb
CM
5264 * ins->offset == number of blocks
5265 * Any available blocks before search_start are skipped.
5266 */
d397712b 5267static noinline int find_free_extent(struct btrfs_trans_handle *trans,
98ed5174
CM
5268 struct btrfs_root *orig_root,
5269 u64 num_bytes, u64 empty_size,
5270 u64 search_start, u64 search_end,
5271 u64 hint_byte, struct btrfs_key *ins,
e0f54067 5272 u64 data)
fec577fb 5273{
80eb234a 5274 int ret = 0;
d397712b 5275 struct btrfs_root *root = orig_root->fs_info->extent_root;
fa9c0d79 5276 struct btrfs_free_cluster *last_ptr = NULL;
80eb234a 5277 struct btrfs_block_group_cache *block_group = NULL;
274bd4fb 5278 struct btrfs_block_group_cache *used_block_group;
239b14b3 5279 int empty_cluster = 2 * 1024 * 1024;
0ef3e66b 5280 int allowed_chunk_alloc = 0;
ccf0e725 5281 int done_chunk_alloc = 0;
80eb234a 5282 struct btrfs_space_info *space_info;
fa9c0d79 5283 int loop = 0;
f0486c68 5284 int index = 0;
fb25e914
JB
5285 int alloc_type = (data & BTRFS_BLOCK_GROUP_DATA) ?
5286 RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC;
817d52f8 5287 bool found_uncached_bg = false;
0a24325e 5288 bool failed_cluster_refill = false;
1cdda9b8 5289 bool failed_alloc = false;
67377734 5290 bool use_cluster = true;
60d2adbb 5291 bool have_caching_bg = false;
ccf0e725
JB
5292 u64 ideal_cache_percent = 0;
5293 u64 ideal_cache_offset = 0;
fec577fb 5294
db94535d 5295 WARN_ON(num_bytes < root->sectorsize);
b1a4d965 5296 btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
80eb234a
JB
5297 ins->objectid = 0;
5298 ins->offset = 0;
b1a4d965 5299
3f7de037
JB
5300 trace_find_free_extent(orig_root, num_bytes, empty_size, data);
5301
2552d17e 5302 space_info = __find_space_info(root->fs_info, data);
1b1d1f66 5303 if (!space_info) {
e0f54067 5304 printk(KERN_ERR "No space info for %llu\n", data);
1b1d1f66
JB
5305 return -ENOSPC;
5306 }
2552d17e 5307
67377734
JB
5308 /*
5309 * If the space info is for both data and metadata it means we have a
5310 * small filesystem and we can't use the clustering stuff.
5311 */
5312 if (btrfs_mixed_space_info(space_info))
5313 use_cluster = false;
5314
0ef3e66b
CM
5315 if (orig_root->ref_cows || empty_size)
5316 allowed_chunk_alloc = 1;
5317
67377734 5318 if (data & BTRFS_BLOCK_GROUP_METADATA && use_cluster) {
fa9c0d79 5319 last_ptr = &root->fs_info->meta_alloc_cluster;
536ac8ae
CM
5320 if (!btrfs_test_opt(root, SSD))
5321 empty_cluster = 64 * 1024;
239b14b3
CM
5322 }
5323
67377734
JB
5324 if ((data & BTRFS_BLOCK_GROUP_DATA) && use_cluster &&
5325 btrfs_test_opt(root, SSD)) {
fa9c0d79
CM
5326 last_ptr = &root->fs_info->data_alloc_cluster;
5327 }
0f9dd46c 5328
239b14b3 5329 if (last_ptr) {
fa9c0d79
CM
5330 spin_lock(&last_ptr->lock);
5331 if (last_ptr->block_group)
5332 hint_byte = last_ptr->window_start;
5333 spin_unlock(&last_ptr->lock);
239b14b3 5334 }
fa9c0d79 5335
a061fc8d 5336 search_start = max(search_start, first_logical_byte(root, 0));
239b14b3 5337 search_start = max(search_start, hint_byte);
0b86a832 5338
817d52f8 5339 if (!last_ptr)
fa9c0d79 5340 empty_cluster = 0;
fa9c0d79 5341
2552d17e 5342 if (search_start == hint_byte) {
ccf0e725 5343ideal_cache:
2552d17e
JB
5344 block_group = btrfs_lookup_block_group(root->fs_info,
5345 search_start);
274bd4fb 5346 used_block_group = block_group;
817d52f8
JB
5347 /*
5348 * we don't want to use the block group if it doesn't match our
5349 * allocation bits, or if its not cached.
ccf0e725
JB
5350 *
5351 * However if we are re-searching with an ideal block group
5352 * picked out then we don't care that the block group is cached.
817d52f8
JB
5353 */
5354 if (block_group && block_group_bits(block_group, data) &&
ccf0e725
JB
5355 (block_group->cached != BTRFS_CACHE_NO ||
5356 search_start == ideal_cache_offset)) {
2552d17e 5357 down_read(&space_info->groups_sem);
44fb5511
CM
5358 if (list_empty(&block_group->list) ||
5359 block_group->ro) {
5360 /*
5361 * someone is removing this block group,
5362 * we can't jump into the have_block_group
5363 * target because our list pointers are not
5364 * valid
5365 */
5366 btrfs_put_block_group(block_group);
5367 up_read(&space_info->groups_sem);
ccf0e725 5368 } else {
b742bb82 5369 index = get_block_group_index(block_group);
44fb5511 5370 goto have_block_group;
ccf0e725 5371 }
2552d17e 5372 } else if (block_group) {
fa9c0d79 5373 btrfs_put_block_group(block_group);
2552d17e 5374 }
42e70e7a 5375 }
2552d17e 5376search:
60d2adbb 5377 have_caching_bg = false;
80eb234a 5378 down_read(&space_info->groups_sem);
b742bb82
YZ
5379 list_for_each_entry(block_group, &space_info->block_groups[index],
5380 list) {
6226cb0a 5381 u64 offset;
817d52f8 5382 int cached;
8a1413a2 5383
274bd4fb 5384 used_block_group = block_group;
11dfe35a 5385 btrfs_get_block_group(block_group);
2552d17e 5386 search_start = block_group->key.objectid;
42e70e7a 5387
83a50de9
CM
5388 /*
5389 * this can happen if we end up cycling through all the
5390 * raid types, but we want to make sure we only allocate
5391 * for the proper type.
5392 */
5393 if (!block_group_bits(block_group, data)) {
5394 u64 extra = BTRFS_BLOCK_GROUP_DUP |
5395 BTRFS_BLOCK_GROUP_RAID1 |
5396 BTRFS_BLOCK_GROUP_RAID10;
5397
5398 /*
5399 * if they asked for extra copies and this block group
5400 * doesn't provide them, bail. This does allow us to
5401 * fill raid0 from raid1.
5402 */
5403 if ((data & extra) && !(block_group->flags & extra))
5404 goto loop;
5405 }
5406
2552d17e 5407have_block_group:
291c7d2f
JB
5408 cached = block_group_cache_done(block_group);
5409 if (unlikely(!cached)) {
ccf0e725
JB
5410 u64 free_percent;
5411
291c7d2f 5412 found_uncached_bg = true;
b8399dee
JB
5413 ret = cache_block_group(block_group, trans,
5414 orig_root, 1);
9d66e233 5415 if (block_group->cached == BTRFS_CACHE_FINISHED)
291c7d2f 5416 goto alloc;
9d66e233 5417
ccf0e725
JB
5418 free_percent = btrfs_block_group_used(&block_group->item);
5419 free_percent *= 100;
5420 free_percent = div64_u64(free_percent,
5421 block_group->key.offset);
5422 free_percent = 100 - free_percent;
5423 if (free_percent > ideal_cache_percent &&
5424 likely(!block_group->ro)) {
5425 ideal_cache_offset = block_group->key.objectid;
5426 ideal_cache_percent = free_percent;
5427 }
5428
817d52f8 5429 /*
bab39bf9
JB
5430 * The caching workers are limited to 2 threads, so we
5431 * can queue as much work as we care to.
817d52f8 5432 */
bab39bf9 5433 if (loop > LOOP_FIND_IDEAL) {
b8399dee
JB
5434 ret = cache_block_group(block_group, trans,
5435 orig_root, 0);
817d52f8 5436 BUG_ON(ret);
2552d17e 5437 }
817d52f8 5438
ccf0e725
JB
5439 /*
5440 * If loop is set for cached only, try the next block
5441 * group.
5442 */
5443 if (loop == LOOP_FIND_IDEAL)
817d52f8
JB
5444 goto loop;
5445 }
5446
291c7d2f 5447alloc:
ea6a478e 5448 if (unlikely(block_group->ro))
2552d17e 5449 goto loop;
0f9dd46c 5450
0a24325e 5451 /*
062c05c4
AO
5452 * Ok we want to try and use the cluster allocator, so
5453 * lets look there
0a24325e 5454 */
062c05c4 5455 if (last_ptr) {
fa9c0d79
CM
5456 /*
5457 * the refill lock keeps out other
5458 * people trying to start a new cluster
5459 */
5460 spin_lock(&last_ptr->refill_lock);
274bd4fb
AO
5461 used_block_group = last_ptr->block_group;
5462 if (used_block_group != block_group &&
5463 (!used_block_group ||
5464 used_block_group->ro ||
5465 !block_group_bits(used_block_group, data))) {
5466 used_block_group = block_group;
44fb5511 5467 goto refill_cluster;
274bd4fb
AO
5468 }
5469
5470 if (used_block_group != block_group)
5471 btrfs_get_block_group(used_block_group);
44fb5511 5472
274bd4fb
AO
5473 offset = btrfs_alloc_from_cluster(used_block_group,
5474 last_ptr, num_bytes, used_block_group->key.objectid);
fa9c0d79
CM
5475 if (offset) {
5476 /* we have a block, we're done */
5477 spin_unlock(&last_ptr->refill_lock);
3f7de037
JB
5478 trace_btrfs_reserve_extent_cluster(root,
5479 block_group, search_start, num_bytes);
fa9c0d79
CM
5480 goto checks;
5481 }
5482
274bd4fb
AO
5483 WARN_ON(last_ptr->block_group != used_block_group);
5484 if (used_block_group != block_group) {
5485 btrfs_put_block_group(used_block_group);
5486 used_block_group = block_group;
fa9c0d79 5487 }
44fb5511 5488refill_cluster:
274bd4fb 5489 BUG_ON(used_block_group != block_group);
062c05c4
AO
5490 /* If we are on LOOP_NO_EMPTY_SIZE, we can't
5491 * set up a new clusters, so lets just skip it
5492 * and let the allocator find whatever block
5493 * it can find. If we reach this point, we
5494 * will have tried the cluster allocator
5495 * plenty of times and not have found
5496 * anything, so we are likely way too
5497 * fragmented for the clustering stuff to find
a5f6f719
AO
5498 * anything.
5499 *
5500 * However, if the cluster is taken from the
5501 * current block group, release the cluster
5502 * first, so that we stand a better chance of
5503 * succeeding in the unclustered
5504 * allocation. */
5505 if (loop >= LOOP_NO_EMPTY_SIZE &&
5506 last_ptr->block_group != block_group) {
062c05c4
AO
5507 spin_unlock(&last_ptr->refill_lock);
5508 goto unclustered_alloc;
5509 }
5510
fa9c0d79
CM
5511 /*
5512 * this cluster didn't work out, free it and
5513 * start over
5514 */
5515 btrfs_return_cluster_to_free_space(NULL, last_ptr);
5516
a5f6f719
AO
5517 if (loop >= LOOP_NO_EMPTY_SIZE) {
5518 spin_unlock(&last_ptr->refill_lock);
5519 goto unclustered_alloc;
5520 }
5521
fa9c0d79 5522 /* allocate a cluster in this block group */
451d7585 5523 ret = btrfs_find_space_cluster(trans, root,
fa9c0d79 5524 block_group, last_ptr,
1b22bad7 5525 search_start, num_bytes,
fa9c0d79
CM
5526 empty_cluster + empty_size);
5527 if (ret == 0) {
5528 /*
5529 * now pull our allocation out of this
5530 * cluster
5531 */
5532 offset = btrfs_alloc_from_cluster(block_group,
5533 last_ptr, num_bytes,
5534 search_start);
5535 if (offset) {
5536 /* we found one, proceed */
5537 spin_unlock(&last_ptr->refill_lock);
3f7de037
JB
5538 trace_btrfs_reserve_extent_cluster(root,
5539 block_group, search_start,
5540 num_bytes);
fa9c0d79
CM
5541 goto checks;
5542 }
0a24325e
JB
5543 } else if (!cached && loop > LOOP_CACHING_NOWAIT
5544 && !failed_cluster_refill) {
817d52f8
JB
5545 spin_unlock(&last_ptr->refill_lock);
5546
0a24325e 5547 failed_cluster_refill = true;
817d52f8
JB
5548 wait_block_group_cache_progress(block_group,
5549 num_bytes + empty_cluster + empty_size);
5550 goto have_block_group;
fa9c0d79 5551 }
817d52f8 5552
fa9c0d79
CM
5553 /*
5554 * at this point we either didn't find a cluster
5555 * or we weren't able to allocate a block from our
5556 * cluster. Free the cluster we've been trying
5557 * to use, and go to the next block group
5558 */
0a24325e 5559 btrfs_return_cluster_to_free_space(NULL, last_ptr);
fa9c0d79 5560 spin_unlock(&last_ptr->refill_lock);
0a24325e 5561 goto loop;
fa9c0d79
CM
5562 }
5563
062c05c4 5564unclustered_alloc:
a5f6f719
AO
5565 spin_lock(&block_group->free_space_ctl->tree_lock);
5566 if (cached &&
5567 block_group->free_space_ctl->free_space <
5568 num_bytes + empty_cluster + empty_size) {
5569 spin_unlock(&block_group->free_space_ctl->tree_lock);
5570 goto loop;
5571 }
5572 spin_unlock(&block_group->free_space_ctl->tree_lock);
5573
6226cb0a
JB
5574 offset = btrfs_find_space_for_alloc(block_group, search_start,
5575 num_bytes, empty_size);
1cdda9b8
JB
5576 /*
5577 * If we didn't find a chunk, and we haven't failed on this
5578 * block group before, and this block group is in the middle of
5579 * caching and we are ok with waiting, then go ahead and wait
5580 * for progress to be made, and set failed_alloc to true.
5581 *
5582 * If failed_alloc is true then we've already waited on this
5583 * block group once and should move on to the next block group.
5584 */
5585 if (!offset && !failed_alloc && !cached &&
5586 loop > LOOP_CACHING_NOWAIT) {
817d52f8 5587 wait_block_group_cache_progress(block_group,
1cdda9b8
JB
5588 num_bytes + empty_size);
5589 failed_alloc = true;
817d52f8 5590 goto have_block_group;
1cdda9b8 5591 } else if (!offset) {
60d2adbb
MX
5592 if (!cached)
5593 have_caching_bg = true;
1cdda9b8 5594 goto loop;
817d52f8 5595 }
fa9c0d79 5596checks:
6226cb0a 5597 search_start = stripe_align(root, offset);
2552d17e 5598 /* move on to the next group */
6226cb0a 5599 if (search_start + num_bytes >= search_end) {
274bd4fb 5600 btrfs_add_free_space(used_block_group, offset, num_bytes);
2552d17e 5601 goto loop;
6226cb0a 5602 }
25179201 5603
2552d17e
JB
5604 /* move on to the next group */
5605 if (search_start + num_bytes >
274bd4fb
AO
5606 used_block_group->key.objectid + used_block_group->key.offset) {
5607 btrfs_add_free_space(used_block_group, offset, num_bytes);
2552d17e 5608 goto loop;
6226cb0a 5609 }
f5a31e16 5610
f0486c68 5611 if (offset < search_start)
274bd4fb 5612 btrfs_add_free_space(used_block_group, offset,
f0486c68
YZ
5613 search_start - offset);
5614 BUG_ON(offset > search_start);
2552d17e 5615
274bd4fb 5616 ret = btrfs_update_reserved_bytes(used_block_group, num_bytes,
fb25e914 5617 alloc_type);
f0486c68 5618 if (ret == -EAGAIN) {
274bd4fb 5619 btrfs_add_free_space(used_block_group, offset, num_bytes);
2552d17e 5620 goto loop;
0f9dd46c 5621 }
0b86a832 5622
f0486c68 5623 /* we are all good, lets return */
2552d17e
JB
5624 ins->objectid = search_start;
5625 ins->offset = num_bytes;
d2fb3437 5626
3f7de037
JB
5627 trace_btrfs_reserve_extent(orig_root, block_group,
5628 search_start, num_bytes);
6226cb0a 5629 if (offset < search_start)
274bd4fb 5630 btrfs_add_free_space(used_block_group, offset,
6226cb0a
JB
5631 search_start - offset);
5632 BUG_ON(offset > search_start);
274bd4fb
AO
5633 if (used_block_group != block_group)
5634 btrfs_put_block_group(used_block_group);
d82a6f1d 5635 btrfs_put_block_group(block_group);
2552d17e
JB
5636 break;
5637loop:
0a24325e 5638 failed_cluster_refill = false;
1cdda9b8 5639 failed_alloc = false;
b742bb82 5640 BUG_ON(index != get_block_group_index(block_group));
274bd4fb
AO
5641 if (used_block_group != block_group)
5642 btrfs_put_block_group(used_block_group);
fa9c0d79 5643 btrfs_put_block_group(block_group);
2552d17e
JB
5644 }
5645 up_read(&space_info->groups_sem);
5646
60d2adbb
MX
5647 if (!ins->objectid && loop >= LOOP_CACHING_WAIT && have_caching_bg)
5648 goto search;
5649
b742bb82
YZ
5650 if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
5651 goto search;
5652
ccf0e725
JB
5653 /* LOOP_FIND_IDEAL, only search caching/cached bg's, and don't wait for
5654 * for them to make caching progress. Also
5655 * determine the best possible bg to cache
5656 * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
5657 * caching kthreads as we move along
817d52f8
JB
5658 * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
5659 * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
5660 * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
5661 * again
fa9c0d79 5662 */
723bda20 5663 if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
b742bb82 5664 index = 0;
ccf0e725 5665 if (loop == LOOP_FIND_IDEAL && found_uncached_bg) {
817d52f8 5666 found_uncached_bg = false;
ccf0e725 5667 loop++;
bab39bf9 5668 if (!ideal_cache_percent)
817d52f8 5669 goto search;
ccf0e725
JB
5670
5671 /*
5672 * 1 of the following 2 things have happened so far
5673 *
5674 * 1) We found an ideal block group for caching that
5675 * is mostly full and will cache quickly, so we might
5676 * as well wait for it.
5677 *
5678 * 2) We searched for cached only and we didn't find
5679 * anything, and we didn't start any caching kthreads
5680 * either, so chances are we will loop through and
5681 * start a couple caching kthreads, and then come back
5682 * around and just wait for them. This will be slower
5683 * because we will have 2 caching kthreads reading at
5684 * the same time when we could have just started one
5685 * and waited for it to get far enough to give us an
5686 * allocation, so go ahead and go to the wait caching
5687 * loop.
5688 */
5689 loop = LOOP_CACHING_WAIT;
5690 search_start = ideal_cache_offset;
5691 ideal_cache_percent = 0;
5692 goto ideal_cache;
5693 } else if (loop == LOOP_FIND_IDEAL) {
5694 /*
5695 * Didn't find a uncached bg, wait on anything we find
5696 * next.
5697 */
5698 loop = LOOP_CACHING_WAIT;
5699 goto search;
5700 }
5701
723bda20 5702 loop++;
817d52f8
JB
5703
5704 if (loop == LOOP_ALLOC_CHUNK) {
723bda20
JB
5705 if (allowed_chunk_alloc) {
5706 ret = do_chunk_alloc(trans, root, num_bytes +
5707 2 * 1024 * 1024, data,
5708 CHUNK_ALLOC_LIMITED);
5709 allowed_chunk_alloc = 0;
5710 if (ret == 1)
5711 done_chunk_alloc = 1;
5712 } else if (!done_chunk_alloc &&
5713 space_info->force_alloc ==
5714 CHUNK_ALLOC_NO_FORCE) {
5715 space_info->force_alloc = CHUNK_ALLOC_LIMITED;
5716 }
2552d17e 5717
723bda20
JB
5718 /*
5719 * We didn't allocate a chunk, go ahead and drop the
5720 * empty size and loop again.
5721 */
5722 if (!done_chunk_alloc)
5723 loop = LOOP_NO_EMPTY_SIZE;
2552d17e
JB
5724 }
5725
723bda20
JB
5726 if (loop == LOOP_NO_EMPTY_SIZE) {
5727 empty_size = 0;
5728 empty_cluster = 0;
fa9c0d79 5729 }
723bda20
JB
5730
5731 goto search;
2552d17e
JB
5732 } else if (!ins->objectid) {
5733 ret = -ENOSPC;
d82a6f1d 5734 } else if (ins->objectid) {
80eb234a 5735 ret = 0;
be744175 5736 }
be744175 5737
0f70abe2 5738 return ret;
fec577fb 5739}
ec44a35c 5740
9ed74f2d
JB
5741static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
5742 int dump_block_groups)
0f9dd46c
JB
5743{
5744 struct btrfs_block_group_cache *cache;
b742bb82 5745 int index = 0;
0f9dd46c 5746
9ed74f2d 5747 spin_lock(&info->lock);
fb25e914
JB
5748 printk(KERN_INFO "space_info %llu has %llu free, is %sfull\n",
5749 (unsigned long long)info->flags,
d397712b 5750 (unsigned long long)(info->total_bytes - info->bytes_used -
9ed74f2d 5751 info->bytes_pinned - info->bytes_reserved -
8929ecfa 5752 info->bytes_readonly),
d397712b 5753 (info->full) ? "" : "not ");
8929ecfa
YZ
5754 printk(KERN_INFO "space_info total=%llu, used=%llu, pinned=%llu, "
5755 "reserved=%llu, may_use=%llu, readonly=%llu\n",
21380931 5756 (unsigned long long)info->total_bytes,
8929ecfa 5757 (unsigned long long)info->bytes_used,
21380931 5758 (unsigned long long)info->bytes_pinned,
8929ecfa 5759 (unsigned long long)info->bytes_reserved,
21380931 5760 (unsigned long long)info->bytes_may_use,
8929ecfa 5761 (unsigned long long)info->bytes_readonly);
9ed74f2d
JB
5762 spin_unlock(&info->lock);
5763
5764 if (!dump_block_groups)
5765 return;
0f9dd46c 5766
80eb234a 5767 down_read(&info->groups_sem);
b742bb82
YZ
5768again:
5769 list_for_each_entry(cache, &info->block_groups[index], list) {
0f9dd46c 5770 spin_lock(&cache->lock);
d397712b
CM
5771 printk(KERN_INFO "block group %llu has %llu bytes, %llu used "
5772 "%llu pinned %llu reserved\n",
5773 (unsigned long long)cache->key.objectid,
5774 (unsigned long long)cache->key.offset,
5775 (unsigned long long)btrfs_block_group_used(&cache->item),
5776 (unsigned long long)cache->pinned,
5777 (unsigned long long)cache->reserved);
0f9dd46c
JB
5778 btrfs_dump_free_space(cache, bytes);
5779 spin_unlock(&cache->lock);
5780 }
b742bb82
YZ
5781 if (++index < BTRFS_NR_RAID_TYPES)
5782 goto again;
80eb234a 5783 up_read(&info->groups_sem);
0f9dd46c 5784}
e8569813 5785
11833d66
YZ
5786int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
5787 struct btrfs_root *root,
5788 u64 num_bytes, u64 min_alloc_size,
5789 u64 empty_size, u64 hint_byte,
5790 u64 search_end, struct btrfs_key *ins,
5791 u64 data)
fec577fb
CM
5792{
5793 int ret;
fbdc762b 5794 u64 search_start = 0;
925baedd 5795
6a63209f 5796 data = btrfs_get_alloc_profile(root, data);
98d20f67 5797again:
0ef3e66b
CM
5798 /*
5799 * the only place that sets empty_size is btrfs_realloc_node, which
5800 * is not called recursively on allocations
5801 */
83d3c969 5802 if (empty_size || root->ref_cows)
6324fbf3 5803 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
0e4f8f88
CM
5804 num_bytes + 2 * 1024 * 1024, data,
5805 CHUNK_ALLOC_NO_FORCE);
0b86a832 5806
db94535d
CM
5807 WARN_ON(num_bytes < root->sectorsize);
5808 ret = find_free_extent(trans, root, num_bytes, empty_size,
f0486c68
YZ
5809 search_start, search_end, hint_byte,
5810 ins, data);
3b951516 5811
98d20f67
CM
5812 if (ret == -ENOSPC && num_bytes > min_alloc_size) {
5813 num_bytes = num_bytes >> 1;
0f9dd46c 5814 num_bytes = num_bytes & ~(root->sectorsize - 1);
98d20f67 5815 num_bytes = max(num_bytes, min_alloc_size);
0ef3e66b 5816 do_chunk_alloc(trans, root->fs_info->extent_root,
0e4f8f88 5817 num_bytes, data, CHUNK_ALLOC_FORCE);
98d20f67
CM
5818 goto again;
5819 }
91435650 5820 if (ret == -ENOSPC && btrfs_test_opt(root, ENOSPC_DEBUG)) {
0f9dd46c
JB
5821 struct btrfs_space_info *sinfo;
5822
5823 sinfo = __find_space_info(root->fs_info, data);
d397712b
CM
5824 printk(KERN_ERR "btrfs allocation failed flags %llu, "
5825 "wanted %llu\n", (unsigned long long)data,
5826 (unsigned long long)num_bytes);
9ed74f2d 5827 dump_space_info(sinfo, num_bytes, 1);
925baedd 5828 }
0f9dd46c 5829
1abe9b8a 5830 trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset);
5831
0f9dd46c 5832 return ret;
e6dcd2dc
CM
5833}
5834
e688b725
CM
5835static int __btrfs_free_reserved_extent(struct btrfs_root *root,
5836 u64 start, u64 len, int pin)
65b51a00 5837{
0f9dd46c 5838 struct btrfs_block_group_cache *cache;
1f3c79a2 5839 int ret = 0;
0f9dd46c 5840
0f9dd46c
JB
5841 cache = btrfs_lookup_block_group(root->fs_info, start);
5842 if (!cache) {
d397712b
CM
5843 printk(KERN_ERR "Unable to find block group for %llu\n",
5844 (unsigned long long)start);
0f9dd46c
JB
5845 return -ENOSPC;
5846 }
1f3c79a2 5847
5378e607
LD
5848 if (btrfs_test_opt(root, DISCARD))
5849 ret = btrfs_discard_extent(root, start, len, NULL);
1f3c79a2 5850
e688b725
CM
5851 if (pin)
5852 pin_down_extent(root, cache, start, len, 1);
5853 else {
5854 btrfs_add_free_space(cache, start, len);
5855 btrfs_update_reserved_bytes(cache, len, RESERVE_FREE);
5856 }
fa9c0d79 5857 btrfs_put_block_group(cache);
817d52f8 5858
1abe9b8a 5859 trace_btrfs_reserved_extent_free(root, start, len);
5860
e6dcd2dc
CM
5861 return ret;
5862}
5863
e688b725
CM
5864int btrfs_free_reserved_extent(struct btrfs_root *root,
5865 u64 start, u64 len)
5866{
5867 return __btrfs_free_reserved_extent(root, start, len, 0);
5868}
5869
5870int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
5871 u64 start, u64 len)
5872{
5873 return __btrfs_free_reserved_extent(root, start, len, 1);
5874}
5875
5d4f98a2
YZ
5876static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
5877 struct btrfs_root *root,
5878 u64 parent, u64 root_objectid,
5879 u64 flags, u64 owner, u64 offset,
5880 struct btrfs_key *ins, int ref_mod)
e6dcd2dc
CM
5881{
5882 int ret;
5d4f98a2 5883 struct btrfs_fs_info *fs_info = root->fs_info;
e6dcd2dc 5884 struct btrfs_extent_item *extent_item;
5d4f98a2 5885 struct btrfs_extent_inline_ref *iref;
e6dcd2dc 5886 struct btrfs_path *path;
5d4f98a2
YZ
5887 struct extent_buffer *leaf;
5888 int type;
5889 u32 size;
26b8003f 5890
5d4f98a2
YZ
5891 if (parent > 0)
5892 type = BTRFS_SHARED_DATA_REF_KEY;
5893 else
5894 type = BTRFS_EXTENT_DATA_REF_KEY;
58176a96 5895
5d4f98a2 5896 size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
7bb86316
CM
5897
5898 path = btrfs_alloc_path();
db5b493a
TI
5899 if (!path)
5900 return -ENOMEM;
47e4bb98 5901
b9473439 5902 path->leave_spinning = 1;
5d4f98a2
YZ
5903 ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
5904 ins, size);
ccd467d6 5905 BUG_ON(ret);
0f9dd46c 5906
5d4f98a2
YZ
5907 leaf = path->nodes[0];
5908 extent_item = btrfs_item_ptr(leaf, path->slots[0],
47e4bb98 5909 struct btrfs_extent_item);
5d4f98a2
YZ
5910 btrfs_set_extent_refs(leaf, extent_item, ref_mod);
5911 btrfs_set_extent_generation(leaf, extent_item, trans->transid);
5912 btrfs_set_extent_flags(leaf, extent_item,
5913 flags | BTRFS_EXTENT_FLAG_DATA);
5914
5915 iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
5916 btrfs_set_extent_inline_ref_type(leaf, iref, type);
5917 if (parent > 0) {
5918 struct btrfs_shared_data_ref *ref;
5919 ref = (struct btrfs_shared_data_ref *)(iref + 1);
5920 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
5921 btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
5922 } else {
5923 struct btrfs_extent_data_ref *ref;
5924 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
5925 btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
5926 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
5927 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
5928 btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
5929 }
47e4bb98
CM
5930
5931 btrfs_mark_buffer_dirty(path->nodes[0]);
7bb86316 5932 btrfs_free_path(path);
f510cfec 5933
f0486c68 5934 ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
f5947066 5935 if (ret) {
d397712b
CM
5936 printk(KERN_ERR "btrfs update block group failed for %llu "
5937 "%llu\n", (unsigned long long)ins->objectid,
5938 (unsigned long long)ins->offset);
f5947066
CM
5939 BUG();
5940 }
e6dcd2dc
CM
5941 return ret;
5942}
5943
5d4f98a2
YZ
5944static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
5945 struct btrfs_root *root,
5946 u64 parent, u64 root_objectid,
5947 u64 flags, struct btrfs_disk_key *key,
5948 int level, struct btrfs_key *ins)
e6dcd2dc
CM
5949{
5950 int ret;
5d4f98a2
YZ
5951 struct btrfs_fs_info *fs_info = root->fs_info;
5952 struct btrfs_extent_item *extent_item;
5953 struct btrfs_tree_block_info *block_info;
5954 struct btrfs_extent_inline_ref *iref;
5955 struct btrfs_path *path;
5956 struct extent_buffer *leaf;
5957 u32 size = sizeof(*extent_item) + sizeof(*block_info) + sizeof(*iref);
1c2308f8 5958
5d4f98a2 5959 path = btrfs_alloc_path();
d8926bb3
MF
5960 if (!path)
5961 return -ENOMEM;
56bec294 5962
5d4f98a2
YZ
5963 path->leave_spinning = 1;
5964 ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
5965 ins, size);
56bec294 5966 BUG_ON(ret);
5d4f98a2
YZ
5967
5968 leaf = path->nodes[0];
5969 extent_item = btrfs_item_ptr(leaf, path->slots[0],
5970 struct btrfs_extent_item);
5971 btrfs_set_extent_refs(leaf, extent_item, 1);
5972 btrfs_set_extent_generation(leaf, extent_item, trans->transid);
5973 btrfs_set_extent_flags(leaf, extent_item,
5974 flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
5975 block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
5976
5977 btrfs_set_tree_block_key(leaf, block_info, key);
5978 btrfs_set_tree_block_level(leaf, block_info, level);
5979
5980 iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
5981 if (parent > 0) {
5982 BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
5983 btrfs_set_extent_inline_ref_type(leaf, iref,
5984 BTRFS_SHARED_BLOCK_REF_KEY);
5985 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
5986 } else {
5987 btrfs_set_extent_inline_ref_type(leaf, iref,
5988 BTRFS_TREE_BLOCK_REF_KEY);
5989 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
5990 }
5991
5992 btrfs_mark_buffer_dirty(leaf);
5993 btrfs_free_path(path);
5994
f0486c68 5995 ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
5d4f98a2
YZ
5996 if (ret) {
5997 printk(KERN_ERR "btrfs update block group failed for %llu "
5998 "%llu\n", (unsigned long long)ins->objectid,
5999 (unsigned long long)ins->offset);
6000 BUG();
6001 }
6002 return ret;
6003}
6004
6005int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
6006 struct btrfs_root *root,
6007 u64 root_objectid, u64 owner,
6008 u64 offset, struct btrfs_key *ins)
6009{
6010 int ret;
6011
6012 BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
6013
66d7e7f0
AJ
6014 ret = btrfs_add_delayed_data_ref(root->fs_info, trans, ins->objectid,
6015 ins->offset, 0,
6016 root_objectid, owner, offset,
6017 BTRFS_ADD_DELAYED_EXTENT, NULL, 0);
e6dcd2dc
CM
6018 return ret;
6019}
e02119d5
CM
6020
6021/*
6022 * this is used by the tree logging recovery code. It records that
6023 * an extent has been allocated and makes sure to clear the free
6024 * space cache bits as well
6025 */
5d4f98a2
YZ
6026int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
6027 struct btrfs_root *root,
6028 u64 root_objectid, u64 owner, u64 offset,
6029 struct btrfs_key *ins)
e02119d5
CM
6030{
6031 int ret;
6032 struct btrfs_block_group_cache *block_group;
11833d66
YZ
6033 struct btrfs_caching_control *caching_ctl;
6034 u64 start = ins->objectid;
6035 u64 num_bytes = ins->offset;
e02119d5 6036
e02119d5 6037 block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
b8399dee 6038 cache_block_group(block_group, trans, NULL, 0);
11833d66 6039 caching_ctl = get_caching_control(block_group);
e02119d5 6040
11833d66
YZ
6041 if (!caching_ctl) {
6042 BUG_ON(!block_group_cache_done(block_group));
6043 ret = btrfs_remove_free_space(block_group, start, num_bytes);
6044 BUG_ON(ret);
6045 } else {
6046 mutex_lock(&caching_ctl->mutex);
6047
6048 if (start >= caching_ctl->progress) {
6049 ret = add_excluded_extent(root, start, num_bytes);
6050 BUG_ON(ret);
6051 } else if (start + num_bytes <= caching_ctl->progress) {
6052 ret = btrfs_remove_free_space(block_group,
6053 start, num_bytes);
6054 BUG_ON(ret);
6055 } else {
6056 num_bytes = caching_ctl->progress - start;
6057 ret = btrfs_remove_free_space(block_group,
6058 start, num_bytes);
6059 BUG_ON(ret);
6060
6061 start = caching_ctl->progress;
6062 num_bytes = ins->objectid + ins->offset -
6063 caching_ctl->progress;
6064 ret = add_excluded_extent(root, start, num_bytes);
6065 BUG_ON(ret);
6066 }
6067
6068 mutex_unlock(&caching_ctl->mutex);
6069 put_caching_control(caching_ctl);
6070 }
6071
fb25e914
JB
6072 ret = btrfs_update_reserved_bytes(block_group, ins->offset,
6073 RESERVE_ALLOC_NO_ACCOUNT);
f0486c68 6074 BUG_ON(ret);
fa9c0d79 6075 btrfs_put_block_group(block_group);
5d4f98a2
YZ
6076 ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
6077 0, owner, offset, ins, 1);
e02119d5
CM
6078 return ret;
6079}
6080
65b51a00
CM
6081struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
6082 struct btrfs_root *root,
4008c04a
CM
6083 u64 bytenr, u32 blocksize,
6084 int level)
65b51a00
CM
6085{
6086 struct extent_buffer *buf;
6087
6088 buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
6089 if (!buf)
6090 return ERR_PTR(-ENOMEM);
6091 btrfs_set_header_generation(buf, trans->transid);
85d4e461 6092 btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
65b51a00
CM
6093 btrfs_tree_lock(buf);
6094 clean_tree_block(trans, root, buf);
b4ce94de
CM
6095
6096 btrfs_set_lock_blocking(buf);
65b51a00 6097 btrfs_set_buffer_uptodate(buf);
b4ce94de 6098
d0c803c4 6099 if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
8cef4e16
YZ
6100 /*
6101 * we allow two log transactions at a time, use different
6102 * EXENT bit to differentiate dirty pages.
6103 */
6104 if (root->log_transid % 2 == 0)
6105 set_extent_dirty(&root->dirty_log_pages, buf->start,
6106 buf->start + buf->len - 1, GFP_NOFS);
6107 else
6108 set_extent_new(&root->dirty_log_pages, buf->start,
6109 buf->start + buf->len - 1, GFP_NOFS);
d0c803c4
CM
6110 } else {
6111 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
65b51a00 6112 buf->start + buf->len - 1, GFP_NOFS);
d0c803c4 6113 }
65b51a00 6114 trans->blocks_used++;
b4ce94de 6115 /* this returns a buffer locked for blocking */
65b51a00
CM
6116 return buf;
6117}
6118
f0486c68
YZ
6119static struct btrfs_block_rsv *
6120use_block_rsv(struct btrfs_trans_handle *trans,
6121 struct btrfs_root *root, u32 blocksize)
6122{
6123 struct btrfs_block_rsv *block_rsv;
68a82277 6124 struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
f0486c68
YZ
6125 int ret;
6126
6127 block_rsv = get_block_rsv(trans, root);
6128
6129 if (block_rsv->size == 0) {
36ba022a 6130 ret = reserve_metadata_bytes(root, block_rsv, blocksize, 0);
68a82277
JB
6131 /*
6132 * If we couldn't reserve metadata bytes try and use some from
6133 * the global reserve.
6134 */
6135 if (ret && block_rsv != global_rsv) {
6136 ret = block_rsv_use_bytes(global_rsv, blocksize);
6137 if (!ret)
6138 return global_rsv;
f0486c68 6139 return ERR_PTR(ret);
68a82277 6140 } else if (ret) {
f0486c68 6141 return ERR_PTR(ret);
68a82277 6142 }
f0486c68
YZ
6143 return block_rsv;
6144 }
6145
6146 ret = block_rsv_use_bytes(block_rsv, blocksize);
6147 if (!ret)
6148 return block_rsv;
68a82277 6149 if (ret) {
dff51cd1
DS
6150 static DEFINE_RATELIMIT_STATE(_rs,
6151 DEFAULT_RATELIMIT_INTERVAL,
6152 /*DEFAULT_RATELIMIT_BURST*/ 2);
6153 if (__ratelimit(&_rs)) {
6154 printk(KERN_DEBUG "btrfs: block rsv returned %d\n", ret);
6155 WARN_ON(1);
6156 }
36ba022a 6157 ret = reserve_metadata_bytes(root, block_rsv, blocksize, 0);
68a82277 6158 if (!ret) {
68a82277
JB
6159 return block_rsv;
6160 } else if (ret && block_rsv != global_rsv) {
6161 ret = block_rsv_use_bytes(global_rsv, blocksize);
6162 if (!ret)
6163 return global_rsv;
6164 }
6165 }
f0486c68 6166
f0486c68
YZ
6167 return ERR_PTR(-ENOSPC);
6168}
6169
8c2a3ca2
JB
6170static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
6171 struct btrfs_block_rsv *block_rsv, u32 blocksize)
f0486c68
YZ
6172{
6173 block_rsv_add_bytes(block_rsv, blocksize, 0);
8c2a3ca2 6174 block_rsv_release_bytes(fs_info, block_rsv, NULL, 0);
f0486c68
YZ
6175}
6176
fec577fb 6177/*
f0486c68
YZ
6178 * finds a free extent and does all the dirty work required for allocation
6179 * returns the key for the extent through ins, and a tree buffer for
6180 * the first block of the extent through buf.
6181 *
fec577fb
CM
6182 * returns the tree buffer or NULL.
6183 */
5f39d397 6184struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
5d4f98a2
YZ
6185 struct btrfs_root *root, u32 blocksize,
6186 u64 parent, u64 root_objectid,
6187 struct btrfs_disk_key *key, int level,
66d7e7f0 6188 u64 hint, u64 empty_size, int for_cow)
fec577fb 6189{
e2fa7227 6190 struct btrfs_key ins;
f0486c68 6191 struct btrfs_block_rsv *block_rsv;
5f39d397 6192 struct extent_buffer *buf;
f0486c68
YZ
6193 u64 flags = 0;
6194 int ret;
6195
fec577fb 6196
f0486c68
YZ
6197 block_rsv = use_block_rsv(trans, root, blocksize);
6198 if (IS_ERR(block_rsv))
6199 return ERR_CAST(block_rsv);
6200
6201 ret = btrfs_reserve_extent(trans, root, blocksize, blocksize,
6202 empty_size, hint, (u64)-1, &ins, 0);
fec577fb 6203 if (ret) {
8c2a3ca2 6204 unuse_block_rsv(root->fs_info, block_rsv, blocksize);
54aa1f4d 6205 return ERR_PTR(ret);
fec577fb 6206 }
55c69072 6207
4008c04a
CM
6208 buf = btrfs_init_new_buffer(trans, root, ins.objectid,
6209 blocksize, level);
f0486c68
YZ
6210 BUG_ON(IS_ERR(buf));
6211
6212 if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
6213 if (parent == 0)
6214 parent = ins.objectid;
6215 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
6216 } else
6217 BUG_ON(parent > 0);
6218
6219 if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
6220 struct btrfs_delayed_extent_op *extent_op;
6221 extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
6222 BUG_ON(!extent_op);
6223 if (key)
6224 memcpy(&extent_op->key, key, sizeof(extent_op->key));
6225 else
6226 memset(&extent_op->key, 0, sizeof(extent_op->key));
6227 extent_op->flags_to_set = flags;
6228 extent_op->update_key = 1;
6229 extent_op->update_flags = 1;
6230 extent_op->is_data = 0;
6231
66d7e7f0
AJ
6232 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
6233 ins.objectid,
f0486c68
YZ
6234 ins.offset, parent, root_objectid,
6235 level, BTRFS_ADD_DELAYED_EXTENT,
66d7e7f0 6236 extent_op, for_cow);
f0486c68
YZ
6237 BUG_ON(ret);
6238 }
fec577fb
CM
6239 return buf;
6240}
a28ec197 6241
2c47e605
YZ
6242struct walk_control {
6243 u64 refs[BTRFS_MAX_LEVEL];
6244 u64 flags[BTRFS_MAX_LEVEL];
6245 struct btrfs_key update_progress;
6246 int stage;
6247 int level;
6248 int shared_level;
6249 int update_ref;
6250 int keep_locks;
1c4850e2
YZ
6251 int reada_slot;
6252 int reada_count;
66d7e7f0 6253 int for_reloc;
2c47e605
YZ
6254};
6255
6256#define DROP_REFERENCE 1
6257#define UPDATE_BACKREF 2
6258
1c4850e2
YZ
6259static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
6260 struct btrfs_root *root,
6261 struct walk_control *wc,
6262 struct btrfs_path *path)
6407bf6d 6263{
1c4850e2
YZ
6264 u64 bytenr;
6265 u64 generation;
6266 u64 refs;
94fcca9f 6267 u64 flags;
5d4f98a2 6268 u32 nritems;
1c4850e2
YZ
6269 u32 blocksize;
6270 struct btrfs_key key;
6271 struct extent_buffer *eb;
6407bf6d 6272 int ret;
1c4850e2
YZ
6273 int slot;
6274 int nread = 0;
6407bf6d 6275
1c4850e2
YZ
6276 if (path->slots[wc->level] < wc->reada_slot) {
6277 wc->reada_count = wc->reada_count * 2 / 3;
6278 wc->reada_count = max(wc->reada_count, 2);
6279 } else {
6280 wc->reada_count = wc->reada_count * 3 / 2;
6281 wc->reada_count = min_t(int, wc->reada_count,
6282 BTRFS_NODEPTRS_PER_BLOCK(root));
6283 }
7bb86316 6284
1c4850e2
YZ
6285 eb = path->nodes[wc->level];
6286 nritems = btrfs_header_nritems(eb);
6287 blocksize = btrfs_level_size(root, wc->level - 1);
bd56b302 6288
1c4850e2
YZ
6289 for (slot = path->slots[wc->level]; slot < nritems; slot++) {
6290 if (nread >= wc->reada_count)
6291 break;
bd56b302 6292
2dd3e67b 6293 cond_resched();
1c4850e2
YZ
6294 bytenr = btrfs_node_blockptr(eb, slot);
6295 generation = btrfs_node_ptr_generation(eb, slot);
2dd3e67b 6296
1c4850e2
YZ
6297 if (slot == path->slots[wc->level])
6298 goto reada;
5d4f98a2 6299
1c4850e2
YZ
6300 if (wc->stage == UPDATE_BACKREF &&
6301 generation <= root->root_key.offset)
bd56b302
CM
6302 continue;
6303
94fcca9f
YZ
6304 /* We don't lock the tree block, it's OK to be racy here */
6305 ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
6306 &refs, &flags);
6307 BUG_ON(ret);
6308 BUG_ON(refs == 0);
6309
1c4850e2 6310 if (wc->stage == DROP_REFERENCE) {
1c4850e2
YZ
6311 if (refs == 1)
6312 goto reada;
bd56b302 6313
94fcca9f
YZ
6314 if (wc->level == 1 &&
6315 (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
6316 continue;
1c4850e2
YZ
6317 if (!wc->update_ref ||
6318 generation <= root->root_key.offset)
6319 continue;
6320 btrfs_node_key_to_cpu(eb, &key, slot);
6321 ret = btrfs_comp_cpu_keys(&key,
6322 &wc->update_progress);
6323 if (ret < 0)
6324 continue;
94fcca9f
YZ
6325 } else {
6326 if (wc->level == 1 &&
6327 (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
6328 continue;
6407bf6d 6329 }
1c4850e2
YZ
6330reada:
6331 ret = readahead_tree_block(root, bytenr, blocksize,
6332 generation);
6333 if (ret)
bd56b302 6334 break;
1c4850e2 6335 nread++;
20524f02 6336 }
1c4850e2 6337 wc->reada_slot = slot;
20524f02 6338}
2c47e605 6339
f82d02d9 6340/*
2c47e605
YZ
6341 * hepler to process tree block while walking down the tree.
6342 *
2c47e605
YZ
6343 * when wc->stage == UPDATE_BACKREF, this function updates
6344 * back refs for pointers in the block.
6345 *
6346 * NOTE: return value 1 means we should stop walking down.
f82d02d9 6347 */
2c47e605 6348static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
5d4f98a2 6349 struct btrfs_root *root,
2c47e605 6350 struct btrfs_path *path,
94fcca9f 6351 struct walk_control *wc, int lookup_info)
f82d02d9 6352{
2c47e605
YZ
6353 int level = wc->level;
6354 struct extent_buffer *eb = path->nodes[level];
2c47e605 6355 u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
f82d02d9
YZ
6356 int ret;
6357
2c47e605
YZ
6358 if (wc->stage == UPDATE_BACKREF &&
6359 btrfs_header_owner(eb) != root->root_key.objectid)
6360 return 1;
f82d02d9 6361
2c47e605
YZ
6362 /*
6363 * when reference count of tree block is 1, it won't increase
6364 * again. once full backref flag is set, we never clear it.
6365 */
94fcca9f
YZ
6366 if (lookup_info &&
6367 ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
6368 (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
2c47e605
YZ
6369 BUG_ON(!path->locks[level]);
6370 ret = btrfs_lookup_extent_info(trans, root,
6371 eb->start, eb->len,
6372 &wc->refs[level],
6373 &wc->flags[level]);
6374 BUG_ON(ret);
6375 BUG_ON(wc->refs[level] == 0);
6376 }
5d4f98a2 6377
2c47e605
YZ
6378 if (wc->stage == DROP_REFERENCE) {
6379 if (wc->refs[level] > 1)
6380 return 1;
f82d02d9 6381
2c47e605 6382 if (path->locks[level] && !wc->keep_locks) {
bd681513 6383 btrfs_tree_unlock_rw(eb, path->locks[level]);
2c47e605
YZ
6384 path->locks[level] = 0;
6385 }
6386 return 0;
6387 }
f82d02d9 6388
2c47e605
YZ
6389 /* wc->stage == UPDATE_BACKREF */
6390 if (!(wc->flags[level] & flag)) {
6391 BUG_ON(!path->locks[level]);
66d7e7f0 6392 ret = btrfs_inc_ref(trans, root, eb, 1, wc->for_reloc);
f82d02d9 6393 BUG_ON(ret);
66d7e7f0 6394 ret = btrfs_dec_ref(trans, root, eb, 0, wc->for_reloc);
2c47e605
YZ
6395 BUG_ON(ret);
6396 ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
6397 eb->len, flag, 0);
6398 BUG_ON(ret);
6399 wc->flags[level] |= flag;
6400 }
6401
6402 /*
6403 * the block is shared by multiple trees, so it's not good to
6404 * keep the tree lock
6405 */
6406 if (path->locks[level] && level > 0) {
bd681513 6407 btrfs_tree_unlock_rw(eb, path->locks[level]);
2c47e605
YZ
6408 path->locks[level] = 0;
6409 }
6410 return 0;
6411}
6412
1c4850e2
YZ
6413/*
6414 * hepler to process tree block pointer.
6415 *
6416 * when wc->stage == DROP_REFERENCE, this function checks
6417 * reference count of the block pointed to. if the block
6418 * is shared and we need update back refs for the subtree
6419 * rooted at the block, this function changes wc->stage to
6420 * UPDATE_BACKREF. if the block is shared and there is no
6421 * need to update back, this function drops the reference
6422 * to the block.
6423 *
6424 * NOTE: return value 1 means we should stop walking down.
6425 */
6426static noinline int do_walk_down(struct btrfs_trans_handle *trans,
6427 struct btrfs_root *root,
6428 struct btrfs_path *path,
94fcca9f 6429 struct walk_control *wc, int *lookup_info)
1c4850e2
YZ
6430{
6431 u64 bytenr;
6432 u64 generation;
6433 u64 parent;
6434 u32 blocksize;
6435 struct btrfs_key key;
6436 struct extent_buffer *next;
6437 int level = wc->level;
6438 int reada = 0;
6439 int ret = 0;
6440
6441 generation = btrfs_node_ptr_generation(path->nodes[level],
6442 path->slots[level]);
6443 /*
6444 * if the lower level block was created before the snapshot
6445 * was created, we know there is no need to update back refs
6446 * for the subtree
6447 */
6448 if (wc->stage == UPDATE_BACKREF &&
94fcca9f
YZ
6449 generation <= root->root_key.offset) {
6450 *lookup_info = 1;
1c4850e2 6451 return 1;
94fcca9f 6452 }
1c4850e2
YZ
6453
6454 bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
6455 blocksize = btrfs_level_size(root, level - 1);
6456
6457 next = btrfs_find_tree_block(root, bytenr, blocksize);
6458 if (!next) {
6459 next = btrfs_find_create_tree_block(root, bytenr, blocksize);
90d2c51d
MX
6460 if (!next)
6461 return -ENOMEM;
1c4850e2
YZ
6462 reada = 1;
6463 }
6464 btrfs_tree_lock(next);
6465 btrfs_set_lock_blocking(next);
6466
94fcca9f
YZ
6467 ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
6468 &wc->refs[level - 1],
6469 &wc->flags[level - 1]);
6470 BUG_ON(ret);
6471 BUG_ON(wc->refs[level - 1] == 0);
6472 *lookup_info = 0;
1c4850e2 6473
94fcca9f 6474 if (wc->stage == DROP_REFERENCE) {
1c4850e2 6475 if (wc->refs[level - 1] > 1) {
94fcca9f
YZ
6476 if (level == 1 &&
6477 (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
6478 goto skip;
6479
1c4850e2
YZ
6480 if (!wc->update_ref ||
6481 generation <= root->root_key.offset)
6482 goto skip;
6483
6484 btrfs_node_key_to_cpu(path->nodes[level], &key,
6485 path->slots[level]);
6486 ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
6487 if (ret < 0)
6488 goto skip;
6489
6490 wc->stage = UPDATE_BACKREF;
6491 wc->shared_level = level - 1;
6492 }
94fcca9f
YZ
6493 } else {
6494 if (level == 1 &&
6495 (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
6496 goto skip;
1c4850e2
YZ
6497 }
6498
6499 if (!btrfs_buffer_uptodate(next, generation)) {
6500 btrfs_tree_unlock(next);
6501 free_extent_buffer(next);
6502 next = NULL;
94fcca9f 6503 *lookup_info = 1;
1c4850e2
YZ
6504 }
6505
6506 if (!next) {
6507 if (reada && level == 1)
6508 reada_walk_down(trans, root, wc, path);
6509 next = read_tree_block(root, bytenr, blocksize, generation);
97d9a8a4
TI
6510 if (!next)
6511 return -EIO;
1c4850e2
YZ
6512 btrfs_tree_lock(next);
6513 btrfs_set_lock_blocking(next);
6514 }
6515
6516 level--;
6517 BUG_ON(level != btrfs_header_level(next));
6518 path->nodes[level] = next;
6519 path->slots[level] = 0;
bd681513 6520 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
1c4850e2
YZ
6521 wc->level = level;
6522 if (wc->level == 1)
6523 wc->reada_slot = 0;
6524 return 0;
6525skip:
6526 wc->refs[level - 1] = 0;
6527 wc->flags[level - 1] = 0;
94fcca9f
YZ
6528 if (wc->stage == DROP_REFERENCE) {
6529 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
6530 parent = path->nodes[level]->start;
6531 } else {
6532 BUG_ON(root->root_key.objectid !=
6533 btrfs_header_owner(path->nodes[level]));
6534 parent = 0;
6535 }
1c4850e2 6536
94fcca9f 6537 ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
66d7e7f0 6538 root->root_key.objectid, level - 1, 0, 0);
94fcca9f 6539 BUG_ON(ret);
1c4850e2 6540 }
1c4850e2
YZ
6541 btrfs_tree_unlock(next);
6542 free_extent_buffer(next);
94fcca9f 6543 *lookup_info = 1;
1c4850e2
YZ
6544 return 1;
6545}
6546
2c47e605
YZ
6547/*
6548 * hepler to process tree block while walking up the tree.
6549 *
6550 * when wc->stage == DROP_REFERENCE, this function drops
6551 * reference count on the block.
6552 *
6553 * when wc->stage == UPDATE_BACKREF, this function changes
6554 * wc->stage back to DROP_REFERENCE if we changed wc->stage
6555 * to UPDATE_BACKREF previously while processing the block.
6556 *
6557 * NOTE: return value 1 means we should stop walking up.
6558 */
6559static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
6560 struct btrfs_root *root,
6561 struct btrfs_path *path,
6562 struct walk_control *wc)
6563{
f0486c68 6564 int ret;
2c47e605
YZ
6565 int level = wc->level;
6566 struct extent_buffer *eb = path->nodes[level];
6567 u64 parent = 0;
6568
6569 if (wc->stage == UPDATE_BACKREF) {
6570 BUG_ON(wc->shared_level < level);
6571 if (level < wc->shared_level)
6572 goto out;
6573
2c47e605
YZ
6574 ret = find_next_key(path, level + 1, &wc->update_progress);
6575 if (ret > 0)
6576 wc->update_ref = 0;
6577
6578 wc->stage = DROP_REFERENCE;
6579 wc->shared_level = -1;
6580 path->slots[level] = 0;
6581
6582 /*
6583 * check reference count again if the block isn't locked.
6584 * we should start walking down the tree again if reference
6585 * count is one.
6586 */
6587 if (!path->locks[level]) {
6588 BUG_ON(level == 0);
6589 btrfs_tree_lock(eb);
6590 btrfs_set_lock_blocking(eb);
bd681513 6591 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
2c47e605
YZ
6592
6593 ret = btrfs_lookup_extent_info(trans, root,
6594 eb->start, eb->len,
6595 &wc->refs[level],
6596 &wc->flags[level]);
f82d02d9 6597 BUG_ON(ret);
2c47e605
YZ
6598 BUG_ON(wc->refs[level] == 0);
6599 if (wc->refs[level] == 1) {
bd681513 6600 btrfs_tree_unlock_rw(eb, path->locks[level]);
2c47e605
YZ
6601 return 1;
6602 }
f82d02d9 6603 }
2c47e605 6604 }
f82d02d9 6605
2c47e605
YZ
6606 /* wc->stage == DROP_REFERENCE */
6607 BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
5d4f98a2 6608
2c47e605
YZ
6609 if (wc->refs[level] == 1) {
6610 if (level == 0) {
6611 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
66d7e7f0
AJ
6612 ret = btrfs_dec_ref(trans, root, eb, 1,
6613 wc->for_reloc);
2c47e605 6614 else
66d7e7f0
AJ
6615 ret = btrfs_dec_ref(trans, root, eb, 0,
6616 wc->for_reloc);
2c47e605
YZ
6617 BUG_ON(ret);
6618 }
6619 /* make block locked assertion in clean_tree_block happy */
6620 if (!path->locks[level] &&
6621 btrfs_header_generation(eb) == trans->transid) {
6622 btrfs_tree_lock(eb);
6623 btrfs_set_lock_blocking(eb);
bd681513 6624 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
2c47e605
YZ
6625 }
6626 clean_tree_block(trans, root, eb);
6627 }
6628
6629 if (eb == root->node) {
6630 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
6631 parent = eb->start;
6632 else
6633 BUG_ON(root->root_key.objectid !=
6634 btrfs_header_owner(eb));
6635 } else {
6636 if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
6637 parent = path->nodes[level + 1]->start;
6638 else
6639 BUG_ON(root->root_key.objectid !=
6640 btrfs_header_owner(path->nodes[level + 1]));
f82d02d9 6641 }
f82d02d9 6642
66d7e7f0 6643 btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1, 0);
2c47e605
YZ
6644out:
6645 wc->refs[level] = 0;
6646 wc->flags[level] = 0;
f0486c68 6647 return 0;
2c47e605
YZ
6648}
6649
6650static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
6651 struct btrfs_root *root,
6652 struct btrfs_path *path,
6653 struct walk_control *wc)
6654{
2c47e605 6655 int level = wc->level;
94fcca9f 6656 int lookup_info = 1;
2c47e605
YZ
6657 int ret;
6658
6659 while (level >= 0) {
94fcca9f 6660 ret = walk_down_proc(trans, root, path, wc, lookup_info);
2c47e605
YZ
6661 if (ret > 0)
6662 break;
6663
6664 if (level == 0)
6665 break;
6666
7a7965f8
YZ
6667 if (path->slots[level] >=
6668 btrfs_header_nritems(path->nodes[level]))
6669 break;
6670
94fcca9f 6671 ret = do_walk_down(trans, root, path, wc, &lookup_info);
1c4850e2
YZ
6672 if (ret > 0) {
6673 path->slots[level]++;
6674 continue;
90d2c51d
MX
6675 } else if (ret < 0)
6676 return ret;
1c4850e2 6677 level = wc->level;
f82d02d9 6678 }
f82d02d9
YZ
6679 return 0;
6680}
6681
d397712b 6682static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
98ed5174 6683 struct btrfs_root *root,
f82d02d9 6684 struct btrfs_path *path,
2c47e605 6685 struct walk_control *wc, int max_level)
20524f02 6686{
2c47e605 6687 int level = wc->level;
20524f02 6688 int ret;
9f3a7427 6689
2c47e605
YZ
6690 path->slots[level] = btrfs_header_nritems(path->nodes[level]);
6691 while (level < max_level && path->nodes[level]) {
6692 wc->level = level;
6693 if (path->slots[level] + 1 <
6694 btrfs_header_nritems(path->nodes[level])) {
6695 path->slots[level]++;
20524f02
CM
6696 return 0;
6697 } else {
2c47e605
YZ
6698 ret = walk_up_proc(trans, root, path, wc);
6699 if (ret > 0)
6700 return 0;
bd56b302 6701
2c47e605 6702 if (path->locks[level]) {
bd681513
CM
6703 btrfs_tree_unlock_rw(path->nodes[level],
6704 path->locks[level]);
2c47e605 6705 path->locks[level] = 0;
f82d02d9 6706 }
2c47e605
YZ
6707 free_extent_buffer(path->nodes[level]);
6708 path->nodes[level] = NULL;
6709 level++;
20524f02
CM
6710 }
6711 }
6712 return 1;
6713}
6714
9aca1d51 6715/*
2c47e605
YZ
6716 * drop a subvolume tree.
6717 *
6718 * this function traverses the tree freeing any blocks that only
6719 * referenced by the tree.
6720 *
6721 * when a shared tree block is found. this function decreases its
6722 * reference count by one. if update_ref is true, this function
6723 * also make sure backrefs for the shared block and all lower level
6724 * blocks are properly updated.
9aca1d51 6725 */
cb1b69f4 6726void btrfs_drop_snapshot(struct btrfs_root *root,
66d7e7f0
AJ
6727 struct btrfs_block_rsv *block_rsv, int update_ref,
6728 int for_reloc)
20524f02 6729{
5caf2a00 6730 struct btrfs_path *path;
2c47e605
YZ
6731 struct btrfs_trans_handle *trans;
6732 struct btrfs_root *tree_root = root->fs_info->tree_root;
9f3a7427 6733 struct btrfs_root_item *root_item = &root->root_item;
2c47e605
YZ
6734 struct walk_control *wc;
6735 struct btrfs_key key;
6736 int err = 0;
6737 int ret;
6738 int level;
20524f02 6739
5caf2a00 6740 path = btrfs_alloc_path();
cb1b69f4
TI
6741 if (!path) {
6742 err = -ENOMEM;
6743 goto out;
6744 }
20524f02 6745
2c47e605 6746 wc = kzalloc(sizeof(*wc), GFP_NOFS);
38a1a919
MF
6747 if (!wc) {
6748 btrfs_free_path(path);
cb1b69f4
TI
6749 err = -ENOMEM;
6750 goto out;
38a1a919 6751 }
2c47e605 6752
a22285a6 6753 trans = btrfs_start_transaction(tree_root, 0);
98d5dc13
TI
6754 BUG_ON(IS_ERR(trans));
6755
3fd0a558
YZ
6756 if (block_rsv)
6757 trans->block_rsv = block_rsv;
2c47e605 6758
9f3a7427 6759 if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
2c47e605 6760 level = btrfs_header_level(root->node);
5d4f98a2
YZ
6761 path->nodes[level] = btrfs_lock_root_node(root);
6762 btrfs_set_lock_blocking(path->nodes[level]);
9f3a7427 6763 path->slots[level] = 0;
bd681513 6764 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
2c47e605
YZ
6765 memset(&wc->update_progress, 0,
6766 sizeof(wc->update_progress));
9f3a7427 6767 } else {
9f3a7427 6768 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
2c47e605
YZ
6769 memcpy(&wc->update_progress, &key,
6770 sizeof(wc->update_progress));
6771
6702ed49 6772 level = root_item->drop_level;
2c47e605 6773 BUG_ON(level == 0);
6702ed49 6774 path->lowest_level = level;
2c47e605
YZ
6775 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
6776 path->lowest_level = 0;
6777 if (ret < 0) {
6778 err = ret;
cb1b69f4 6779 goto out_free;
9f3a7427 6780 }
1c4850e2 6781 WARN_ON(ret > 0);
2c47e605 6782
7d9eb12c
CM
6783 /*
6784 * unlock our path, this is safe because only this
6785 * function is allowed to delete this snapshot
6786 */
5d4f98a2 6787 btrfs_unlock_up_safe(path, 0);
2c47e605
YZ
6788
6789 level = btrfs_header_level(root->node);
6790 while (1) {
6791 btrfs_tree_lock(path->nodes[level]);
6792 btrfs_set_lock_blocking(path->nodes[level]);
6793
6794 ret = btrfs_lookup_extent_info(trans, root,
6795 path->nodes[level]->start,
6796 path->nodes[level]->len,
6797 &wc->refs[level],
6798 &wc->flags[level]);
6799 BUG_ON(ret);
6800 BUG_ON(wc->refs[level] == 0);
6801
6802 if (level == root_item->drop_level)
6803 break;
6804
6805 btrfs_tree_unlock(path->nodes[level]);
6806 WARN_ON(wc->refs[level] != 1);
6807 level--;
6808 }
9f3a7427 6809 }
2c47e605
YZ
6810
6811 wc->level = level;
6812 wc->shared_level = -1;
6813 wc->stage = DROP_REFERENCE;
6814 wc->update_ref = update_ref;
6815 wc->keep_locks = 0;
66d7e7f0 6816 wc->for_reloc = for_reloc;
1c4850e2 6817 wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
2c47e605 6818
d397712b 6819 while (1) {
2c47e605
YZ
6820 ret = walk_down_tree(trans, root, path, wc);
6821 if (ret < 0) {
6822 err = ret;
20524f02 6823 break;
2c47e605 6824 }
9aca1d51 6825
2c47e605
YZ
6826 ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
6827 if (ret < 0) {
6828 err = ret;
20524f02 6829 break;
2c47e605
YZ
6830 }
6831
6832 if (ret > 0) {
6833 BUG_ON(wc->stage != DROP_REFERENCE);
e7a84565
CM
6834 break;
6835 }
2c47e605
YZ
6836
6837 if (wc->stage == DROP_REFERENCE) {
6838 level = wc->level;
6839 btrfs_node_key(path->nodes[level],
6840 &root_item->drop_progress,
6841 path->slots[level]);
6842 root_item->drop_level = level;
6843 }
6844
6845 BUG_ON(wc->level == 0);
3fd0a558 6846 if (btrfs_should_end_transaction(trans, tree_root)) {
2c47e605
YZ
6847 ret = btrfs_update_root(trans, tree_root,
6848 &root->root_key,
6849 root_item);
6850 BUG_ON(ret);
6851
3fd0a558 6852 btrfs_end_transaction_throttle(trans, tree_root);
a22285a6 6853 trans = btrfs_start_transaction(tree_root, 0);
98d5dc13 6854 BUG_ON(IS_ERR(trans));
3fd0a558
YZ
6855 if (block_rsv)
6856 trans->block_rsv = block_rsv;
c3e69d58 6857 }
20524f02 6858 }
b3b4aa74 6859 btrfs_release_path(path);
2c47e605
YZ
6860 BUG_ON(err);
6861
6862 ret = btrfs_del_root(trans, tree_root, &root->root_key);
6863 BUG_ON(ret);
6864
76dda93c
YZ
6865 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
6866 ret = btrfs_find_last_root(tree_root, root->root_key.objectid,
6867 NULL, NULL);
6868 BUG_ON(ret < 0);
6869 if (ret > 0) {
84cd948c
JB
6870 /* if we fail to delete the orphan item this time
6871 * around, it'll get picked up the next time.
6872 *
6873 * The most common failure here is just -ENOENT.
6874 */
6875 btrfs_del_orphan_item(trans, tree_root,
6876 root->root_key.objectid);
76dda93c
YZ
6877 }
6878 }
6879
6880 if (root->in_radix) {
6881 btrfs_free_fs_root(tree_root->fs_info, root);
6882 } else {
6883 free_extent_buffer(root->node);
6884 free_extent_buffer(root->commit_root);
6885 kfree(root);
6886 }
cb1b69f4 6887out_free:
3fd0a558 6888 btrfs_end_transaction_throttle(trans, tree_root);
2c47e605 6889 kfree(wc);
5caf2a00 6890 btrfs_free_path(path);
cb1b69f4
TI
6891out:
6892 if (err)
6893 btrfs_std_error(root->fs_info, err);
6894 return;
20524f02 6895}
9078a3e1 6896
2c47e605
YZ
6897/*
6898 * drop subtree rooted at tree block 'node'.
6899 *
6900 * NOTE: this function will unlock and release tree block 'node'
66d7e7f0 6901 * only used by relocation code
2c47e605 6902 */
f82d02d9
YZ
6903int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
6904 struct btrfs_root *root,
6905 struct extent_buffer *node,
6906 struct extent_buffer *parent)
6907{
6908 struct btrfs_path *path;
2c47e605 6909 struct walk_control *wc;
f82d02d9
YZ
6910 int level;
6911 int parent_level;
6912 int ret = 0;
6913 int wret;
6914
2c47e605
YZ
6915 BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
6916
f82d02d9 6917 path = btrfs_alloc_path();
db5b493a
TI
6918 if (!path)
6919 return -ENOMEM;
f82d02d9 6920
2c47e605 6921 wc = kzalloc(sizeof(*wc), GFP_NOFS);
db5b493a
TI
6922 if (!wc) {
6923 btrfs_free_path(path);
6924 return -ENOMEM;
6925 }
2c47e605 6926
b9447ef8 6927 btrfs_assert_tree_locked(parent);
f82d02d9
YZ
6928 parent_level = btrfs_header_level(parent);
6929 extent_buffer_get(parent);
6930 path->nodes[parent_level] = parent;
6931 path->slots[parent_level] = btrfs_header_nritems(parent);
6932
b9447ef8 6933 btrfs_assert_tree_locked(node);
f82d02d9 6934 level = btrfs_header_level(node);
f82d02d9
YZ
6935 path->nodes[level] = node;
6936 path->slots[level] = 0;
bd681513 6937 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
2c47e605
YZ
6938
6939 wc->refs[parent_level] = 1;
6940 wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
6941 wc->level = level;
6942 wc->shared_level = -1;
6943 wc->stage = DROP_REFERENCE;
6944 wc->update_ref = 0;
6945 wc->keep_locks = 1;
66d7e7f0 6946 wc->for_reloc = 1;
1c4850e2 6947 wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
f82d02d9
YZ
6948
6949 while (1) {
2c47e605
YZ
6950 wret = walk_down_tree(trans, root, path, wc);
6951 if (wret < 0) {
f82d02d9 6952 ret = wret;
f82d02d9 6953 break;
2c47e605 6954 }
f82d02d9 6955
2c47e605 6956 wret = walk_up_tree(trans, root, path, wc, parent_level);
f82d02d9
YZ
6957 if (wret < 0)
6958 ret = wret;
6959 if (wret != 0)
6960 break;
6961 }
6962
2c47e605 6963 kfree(wc);
f82d02d9
YZ
6964 btrfs_free_path(path);
6965 return ret;
6966}
6967
ec44a35c
CM
6968static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
6969{
6970 u64 num_devices;
6971 u64 stripped = BTRFS_BLOCK_GROUP_RAID0 |
6972 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
6973
e4d8ec0f
ID
6974 if (root->fs_info->balance_ctl) {
6975 struct btrfs_balance_control *bctl = root->fs_info->balance_ctl;
6976 u64 tgt = 0;
6977
6978 /* pick restriper's target profile and return */
6979 if (flags & BTRFS_BLOCK_GROUP_DATA &&
6980 bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
6981 tgt = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
6982 } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
6983 bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
6984 tgt = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
6985 } else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
6986 bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
6987 tgt = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
6988 }
6989
6990 if (tgt) {
6991 /* extended -> chunk profile */
6992 tgt &= ~BTRFS_AVAIL_ALLOC_BIT_SINGLE;
6993 return tgt;
6994 }
6995 }
6996
cd02dca5
CM
6997 /*
6998 * we add in the count of missing devices because we want
6999 * to make sure that any RAID levels on a degraded FS
7000 * continue to be honored.
7001 */
7002 num_devices = root->fs_info->fs_devices->rw_devices +
7003 root->fs_info->fs_devices->missing_devices;
7004
ec44a35c
CM
7005 if (num_devices == 1) {
7006 stripped |= BTRFS_BLOCK_GROUP_DUP;
7007 stripped = flags & ~stripped;
7008
7009 /* turn raid0 into single device chunks */
7010 if (flags & BTRFS_BLOCK_GROUP_RAID0)
7011 return stripped;
7012
7013 /* turn mirroring into duplication */
7014 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
7015 BTRFS_BLOCK_GROUP_RAID10))
7016 return stripped | BTRFS_BLOCK_GROUP_DUP;
7017 return flags;
7018 } else {
7019 /* they already had raid on here, just return */
ec44a35c
CM
7020 if (flags & stripped)
7021 return flags;
7022
7023 stripped |= BTRFS_BLOCK_GROUP_DUP;
7024 stripped = flags & ~stripped;
7025
7026 /* switch duplicated blocks with raid1 */
7027 if (flags & BTRFS_BLOCK_GROUP_DUP)
7028 return stripped | BTRFS_BLOCK_GROUP_RAID1;
7029
7030 /* turn single device chunks into raid0 */
7031 return stripped | BTRFS_BLOCK_GROUP_RAID0;
7032 }
7033 return flags;
7034}
7035
199c36ea 7036static int set_block_group_ro(struct btrfs_block_group_cache *cache, int force)
0ef3e66b 7037{
f0486c68
YZ
7038 struct btrfs_space_info *sinfo = cache->space_info;
7039 u64 num_bytes;
199c36ea 7040 u64 min_allocable_bytes;
f0486c68 7041 int ret = -ENOSPC;
0ef3e66b 7042
c286ac48 7043
199c36ea
MX
7044 /*
7045 * We need some metadata space and system metadata space for
7046 * allocating chunks in some corner cases until we force to set
7047 * it to be readonly.
7048 */
7049 if ((sinfo->flags &
7050 (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
7051 !force)
7052 min_allocable_bytes = 1 * 1024 * 1024;
7053 else
7054 min_allocable_bytes = 0;
7055
f0486c68
YZ
7056 spin_lock(&sinfo->lock);
7057 spin_lock(&cache->lock);
61cfea9b
W
7058
7059 if (cache->ro) {
7060 ret = 0;
7061 goto out;
7062 }
7063
f0486c68
YZ
7064 num_bytes = cache->key.offset - cache->reserved - cache->pinned -
7065 cache->bytes_super - btrfs_block_group_used(&cache->item);
7066
7067 if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
37be25bc
JB
7068 sinfo->bytes_may_use + sinfo->bytes_readonly + num_bytes +
7069 min_allocable_bytes <= sinfo->total_bytes) {
f0486c68 7070 sinfo->bytes_readonly += num_bytes;
f0486c68
YZ
7071 cache->ro = 1;
7072 ret = 0;
7073 }
61cfea9b 7074out:
f0486c68
YZ
7075 spin_unlock(&cache->lock);
7076 spin_unlock(&sinfo->lock);
7077 return ret;
7078}
7d9eb12c 7079
f0486c68
YZ
7080int btrfs_set_block_group_ro(struct btrfs_root *root,
7081 struct btrfs_block_group_cache *cache)
c286ac48 7082
f0486c68
YZ
7083{
7084 struct btrfs_trans_handle *trans;
7085 u64 alloc_flags;
7086 int ret;
7d9eb12c 7087
f0486c68 7088 BUG_ON(cache->ro);
0ef3e66b 7089
ff5714cc 7090 trans = btrfs_join_transaction(root);
f0486c68 7091 BUG_ON(IS_ERR(trans));
5d4f98a2 7092
f0486c68
YZ
7093 alloc_flags = update_block_group_flags(root, cache->flags);
7094 if (alloc_flags != cache->flags)
0e4f8f88
CM
7095 do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
7096 CHUNK_ALLOC_FORCE);
5d4f98a2 7097
199c36ea 7098 ret = set_block_group_ro(cache, 0);
f0486c68
YZ
7099 if (!ret)
7100 goto out;
7101 alloc_flags = get_alloc_profile(root, cache->space_info->flags);
0e4f8f88
CM
7102 ret = do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
7103 CHUNK_ALLOC_FORCE);
f0486c68
YZ
7104 if (ret < 0)
7105 goto out;
199c36ea 7106 ret = set_block_group_ro(cache, 0);
f0486c68
YZ
7107out:
7108 btrfs_end_transaction(trans, root);
7109 return ret;
7110}
5d4f98a2 7111
c87f08ca
CM
7112int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
7113 struct btrfs_root *root, u64 type)
7114{
7115 u64 alloc_flags = get_alloc_profile(root, type);
0e4f8f88
CM
7116 return do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
7117 CHUNK_ALLOC_FORCE);
c87f08ca
CM
7118}
7119
6d07bcec
MX
7120/*
7121 * helper to account the unused space of all the readonly block group in the
7122 * list. takes mirrors into account.
7123 */
7124static u64 __btrfs_get_ro_block_group_free_space(struct list_head *groups_list)
7125{
7126 struct btrfs_block_group_cache *block_group;
7127 u64 free_bytes = 0;
7128 int factor;
7129
7130 list_for_each_entry(block_group, groups_list, list) {
7131 spin_lock(&block_group->lock);
7132
7133 if (!block_group->ro) {
7134 spin_unlock(&block_group->lock);
7135 continue;
7136 }
7137
7138 if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 |
7139 BTRFS_BLOCK_GROUP_RAID10 |
7140 BTRFS_BLOCK_GROUP_DUP))
7141 factor = 2;
7142 else
7143 factor = 1;
7144
7145 free_bytes += (block_group->key.offset -
7146 btrfs_block_group_used(&block_group->item)) *
7147 factor;
7148
7149 spin_unlock(&block_group->lock);
7150 }
7151
7152 return free_bytes;
7153}
7154
7155/*
7156 * helper to account the unused space of all the readonly block group in the
7157 * space_info. takes mirrors into account.
7158 */
7159u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
7160{
7161 int i;
7162 u64 free_bytes = 0;
7163
7164 spin_lock(&sinfo->lock);
7165
7166 for(i = 0; i < BTRFS_NR_RAID_TYPES; i++)
7167 if (!list_empty(&sinfo->block_groups[i]))
7168 free_bytes += __btrfs_get_ro_block_group_free_space(
7169 &sinfo->block_groups[i]);
7170
7171 spin_unlock(&sinfo->lock);
7172
7173 return free_bytes;
7174}
7175
f0486c68
YZ
7176int btrfs_set_block_group_rw(struct btrfs_root *root,
7177 struct btrfs_block_group_cache *cache)
5d4f98a2 7178{
f0486c68
YZ
7179 struct btrfs_space_info *sinfo = cache->space_info;
7180 u64 num_bytes;
7181
7182 BUG_ON(!cache->ro);
7183
7184 spin_lock(&sinfo->lock);
7185 spin_lock(&cache->lock);
7186 num_bytes = cache->key.offset - cache->reserved - cache->pinned -
7187 cache->bytes_super - btrfs_block_group_used(&cache->item);
7188 sinfo->bytes_readonly -= num_bytes;
7189 cache->ro = 0;
7190 spin_unlock(&cache->lock);
7191 spin_unlock(&sinfo->lock);
5d4f98a2
YZ
7192 return 0;
7193}
7194
ba1bf481
JB
7195/*
7196 * checks to see if its even possible to relocate this block group.
7197 *
7198 * @return - -1 if it's not a good idea to relocate this block group, 0 if its
7199 * ok to go ahead and try.
7200 */
7201int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
1a40e23b 7202{
ba1bf481
JB
7203 struct btrfs_block_group_cache *block_group;
7204 struct btrfs_space_info *space_info;
7205 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
7206 struct btrfs_device *device;
cdcb725c 7207 u64 min_free;
6719db6a
JB
7208 u64 dev_min = 1;
7209 u64 dev_nr = 0;
cdcb725c 7210 int index;
ba1bf481
JB
7211 int full = 0;
7212 int ret = 0;
1a40e23b 7213
ba1bf481 7214 block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
1a40e23b 7215
ba1bf481
JB
7216 /* odd, couldn't find the block group, leave it alone */
7217 if (!block_group)
7218 return -1;
1a40e23b 7219
cdcb725c 7220 min_free = btrfs_block_group_used(&block_group->item);
7221
ba1bf481 7222 /* no bytes used, we're good */
cdcb725c 7223 if (!min_free)
1a40e23b
ZY
7224 goto out;
7225
ba1bf481
JB
7226 space_info = block_group->space_info;
7227 spin_lock(&space_info->lock);
17d217fe 7228
ba1bf481 7229 full = space_info->full;
17d217fe 7230
ba1bf481
JB
7231 /*
7232 * if this is the last block group we have in this space, we can't
7ce618db
CM
7233 * relocate it unless we're able to allocate a new chunk below.
7234 *
7235 * Otherwise, we need to make sure we have room in the space to handle
7236 * all of the extents from this block group. If we can, we're good
ba1bf481 7237 */
7ce618db 7238 if ((space_info->total_bytes != block_group->key.offset) &&
cdcb725c 7239 (space_info->bytes_used + space_info->bytes_reserved +
7240 space_info->bytes_pinned + space_info->bytes_readonly +
7241 min_free < space_info->total_bytes)) {
ba1bf481
JB
7242 spin_unlock(&space_info->lock);
7243 goto out;
17d217fe 7244 }
ba1bf481 7245 spin_unlock(&space_info->lock);
ea8c2819 7246
ba1bf481
JB
7247 /*
7248 * ok we don't have enough space, but maybe we have free space on our
7249 * devices to allocate new chunks for relocation, so loop through our
7250 * alloc devices and guess if we have enough space. However, if we
7251 * were marked as full, then we know there aren't enough chunks, and we
7252 * can just return.
7253 */
7254 ret = -1;
7255 if (full)
7256 goto out;
ea8c2819 7257
cdcb725c 7258 /*
7259 * index:
7260 * 0: raid10
7261 * 1: raid1
7262 * 2: dup
7263 * 3: raid0
7264 * 4: single
7265 */
7266 index = get_block_group_index(block_group);
7267 if (index == 0) {
7268 dev_min = 4;
6719db6a
JB
7269 /* Divide by 2 */
7270 min_free >>= 1;
cdcb725c 7271 } else if (index == 1) {
7272 dev_min = 2;
7273 } else if (index == 2) {
6719db6a
JB
7274 /* Multiply by 2 */
7275 min_free <<= 1;
cdcb725c 7276 } else if (index == 3) {
7277 dev_min = fs_devices->rw_devices;
6719db6a 7278 do_div(min_free, dev_min);
cdcb725c 7279 }
7280
ba1bf481
JB
7281 mutex_lock(&root->fs_info->chunk_mutex);
7282 list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
7bfc837d 7283 u64 dev_offset;
56bec294 7284
ba1bf481
JB
7285 /*
7286 * check to make sure we can actually find a chunk with enough
7287 * space to fit our block group in.
7288 */
7289 if (device->total_bytes > device->bytes_used + min_free) {
125ccb0a 7290 ret = find_free_dev_extent(device, min_free,
7bfc837d 7291 &dev_offset, NULL);
ba1bf481 7292 if (!ret)
cdcb725c 7293 dev_nr++;
7294
7295 if (dev_nr >= dev_min)
73e48b27 7296 break;
cdcb725c 7297
ba1bf481 7298 ret = -1;
725c8463 7299 }
edbd8d4e 7300 }
ba1bf481 7301 mutex_unlock(&root->fs_info->chunk_mutex);
edbd8d4e 7302out:
ba1bf481 7303 btrfs_put_block_group(block_group);
edbd8d4e
CM
7304 return ret;
7305}
7306
b2950863
CH
7307static int find_first_block_group(struct btrfs_root *root,
7308 struct btrfs_path *path, struct btrfs_key *key)
0b86a832 7309{
925baedd 7310 int ret = 0;
0b86a832
CM
7311 struct btrfs_key found_key;
7312 struct extent_buffer *leaf;
7313 int slot;
edbd8d4e 7314
0b86a832
CM
7315 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
7316 if (ret < 0)
925baedd
CM
7317 goto out;
7318
d397712b 7319 while (1) {
0b86a832 7320 slot = path->slots[0];
edbd8d4e 7321 leaf = path->nodes[0];
0b86a832
CM
7322 if (slot >= btrfs_header_nritems(leaf)) {
7323 ret = btrfs_next_leaf(root, path);
7324 if (ret == 0)
7325 continue;
7326 if (ret < 0)
925baedd 7327 goto out;
0b86a832 7328 break;
edbd8d4e 7329 }
0b86a832 7330 btrfs_item_key_to_cpu(leaf, &found_key, slot);
edbd8d4e 7331
0b86a832 7332 if (found_key.objectid >= key->objectid &&
925baedd
CM
7333 found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
7334 ret = 0;
7335 goto out;
7336 }
0b86a832 7337 path->slots[0]++;
edbd8d4e 7338 }
925baedd 7339out:
0b86a832 7340 return ret;
edbd8d4e
CM
7341}
7342
0af3d00b
JB
7343void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
7344{
7345 struct btrfs_block_group_cache *block_group;
7346 u64 last = 0;
7347
7348 while (1) {
7349 struct inode *inode;
7350
7351 block_group = btrfs_lookup_first_block_group(info, last);
7352 while (block_group) {
7353 spin_lock(&block_group->lock);
7354 if (block_group->iref)
7355 break;
7356 spin_unlock(&block_group->lock);
7357 block_group = next_block_group(info->tree_root,
7358 block_group);
7359 }
7360 if (!block_group) {
7361 if (last == 0)
7362 break;
7363 last = 0;
7364 continue;
7365 }
7366
7367 inode = block_group->inode;
7368 block_group->iref = 0;
7369 block_group->inode = NULL;
7370 spin_unlock(&block_group->lock);
7371 iput(inode);
7372 last = block_group->key.objectid + block_group->key.offset;
7373 btrfs_put_block_group(block_group);
7374 }
7375}
7376
1a40e23b
ZY
7377int btrfs_free_block_groups(struct btrfs_fs_info *info)
7378{
7379 struct btrfs_block_group_cache *block_group;
4184ea7f 7380 struct btrfs_space_info *space_info;
11833d66 7381 struct btrfs_caching_control *caching_ctl;
1a40e23b
ZY
7382 struct rb_node *n;
7383
11833d66
YZ
7384 down_write(&info->extent_commit_sem);
7385 while (!list_empty(&info->caching_block_groups)) {
7386 caching_ctl = list_entry(info->caching_block_groups.next,
7387 struct btrfs_caching_control, list);
7388 list_del(&caching_ctl->list);
7389 put_caching_control(caching_ctl);
7390 }
7391 up_write(&info->extent_commit_sem);
7392
1a40e23b
ZY
7393 spin_lock(&info->block_group_cache_lock);
7394 while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
7395 block_group = rb_entry(n, struct btrfs_block_group_cache,
7396 cache_node);
1a40e23b
ZY
7397 rb_erase(&block_group->cache_node,
7398 &info->block_group_cache_tree);
d899e052
YZ
7399 spin_unlock(&info->block_group_cache_lock);
7400
80eb234a 7401 down_write(&block_group->space_info->groups_sem);
1a40e23b 7402 list_del(&block_group->list);
80eb234a 7403 up_write(&block_group->space_info->groups_sem);
d2fb3437 7404
817d52f8 7405 if (block_group->cached == BTRFS_CACHE_STARTED)
11833d66 7406 wait_block_group_cache_done(block_group);
817d52f8 7407
3c14874a
JB
7408 /*
7409 * We haven't cached this block group, which means we could
7410 * possibly have excluded extents on this block group.
7411 */
7412 if (block_group->cached == BTRFS_CACHE_NO)
7413 free_excluded_extents(info->extent_root, block_group);
7414
817d52f8 7415 btrfs_remove_free_space_cache(block_group);
11dfe35a 7416 btrfs_put_block_group(block_group);
d899e052
YZ
7417
7418 spin_lock(&info->block_group_cache_lock);
1a40e23b
ZY
7419 }
7420 spin_unlock(&info->block_group_cache_lock);
4184ea7f
CM
7421
7422 /* now that all the block groups are freed, go through and
7423 * free all the space_info structs. This is only called during
7424 * the final stages of unmount, and so we know nobody is
7425 * using them. We call synchronize_rcu() once before we start,
7426 * just to be on the safe side.
7427 */
7428 synchronize_rcu();
7429
8929ecfa
YZ
7430 release_global_block_rsv(info);
7431
4184ea7f
CM
7432 while(!list_empty(&info->space_info)) {
7433 space_info = list_entry(info->space_info.next,
7434 struct btrfs_space_info,
7435 list);
f0486c68 7436 if (space_info->bytes_pinned > 0 ||
fb25e914
JB
7437 space_info->bytes_reserved > 0 ||
7438 space_info->bytes_may_use > 0) {
f0486c68
YZ
7439 WARN_ON(1);
7440 dump_space_info(space_info, 0, 0);
7441 }
4184ea7f
CM
7442 list_del(&space_info->list);
7443 kfree(space_info);
7444 }
1a40e23b
ZY
7445 return 0;
7446}
7447
b742bb82
YZ
7448static void __link_block_group(struct btrfs_space_info *space_info,
7449 struct btrfs_block_group_cache *cache)
7450{
7451 int index = get_block_group_index(cache);
7452
7453 down_write(&space_info->groups_sem);
7454 list_add_tail(&cache->list, &space_info->block_groups[index]);
7455 up_write(&space_info->groups_sem);
7456}
7457
9078a3e1
CM
7458int btrfs_read_block_groups(struct btrfs_root *root)
7459{
7460 struct btrfs_path *path;
7461 int ret;
9078a3e1 7462 struct btrfs_block_group_cache *cache;
be744175 7463 struct btrfs_fs_info *info = root->fs_info;
6324fbf3 7464 struct btrfs_space_info *space_info;
9078a3e1
CM
7465 struct btrfs_key key;
7466 struct btrfs_key found_key;
5f39d397 7467 struct extent_buffer *leaf;
0af3d00b
JB
7468 int need_clear = 0;
7469 u64 cache_gen;
96b5179d 7470
be744175 7471 root = info->extent_root;
9078a3e1 7472 key.objectid = 0;
0b86a832 7473 key.offset = 0;
9078a3e1 7474 btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
9078a3e1
CM
7475 path = btrfs_alloc_path();
7476 if (!path)
7477 return -ENOMEM;
026fd317 7478 path->reada = 1;
9078a3e1 7479
6c41761f 7480 cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy);
73bc1876 7481 if (btrfs_test_opt(root, SPACE_CACHE) &&
6c41761f 7482 btrfs_super_generation(root->fs_info->super_copy) != cache_gen)
0af3d00b 7483 need_clear = 1;
88c2ba3b
JB
7484 if (btrfs_test_opt(root, CLEAR_CACHE))
7485 need_clear = 1;
0af3d00b 7486
d397712b 7487 while (1) {
0b86a832 7488 ret = find_first_block_group(root, path, &key);
b742bb82
YZ
7489 if (ret > 0)
7490 break;
0b86a832
CM
7491 if (ret != 0)
7492 goto error;
5f39d397
CM
7493 leaf = path->nodes[0];
7494 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
8f18cf13 7495 cache = kzalloc(sizeof(*cache), GFP_NOFS);
9078a3e1 7496 if (!cache) {
0b86a832 7497 ret = -ENOMEM;
f0486c68 7498 goto error;
9078a3e1 7499 }
34d52cb6
LZ
7500 cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
7501 GFP_NOFS);
7502 if (!cache->free_space_ctl) {
7503 kfree(cache);
7504 ret = -ENOMEM;
7505 goto error;
7506 }
3e1ad54f 7507
d2fb3437 7508 atomic_set(&cache->count, 1);
c286ac48 7509 spin_lock_init(&cache->lock);
817d52f8 7510 cache->fs_info = info;
0f9dd46c 7511 INIT_LIST_HEAD(&cache->list);
fa9c0d79 7512 INIT_LIST_HEAD(&cache->cluster_list);
96303081 7513
0af3d00b
JB
7514 if (need_clear)
7515 cache->disk_cache_state = BTRFS_DC_CLEAR;
7516
5f39d397
CM
7517 read_extent_buffer(leaf, &cache->item,
7518 btrfs_item_ptr_offset(leaf, path->slots[0]),
7519 sizeof(cache->item));
9078a3e1 7520 memcpy(&cache->key, &found_key, sizeof(found_key));
0b86a832 7521
9078a3e1 7522 key.objectid = found_key.objectid + found_key.offset;
b3b4aa74 7523 btrfs_release_path(path);
0b86a832 7524 cache->flags = btrfs_block_group_flags(&cache->item);
817d52f8
JB
7525 cache->sectorsize = root->sectorsize;
7526
34d52cb6
LZ
7527 btrfs_init_free_space_ctl(cache);
7528
3c14874a
JB
7529 /*
7530 * We need to exclude the super stripes now so that the space
7531 * info has super bytes accounted for, otherwise we'll think
7532 * we have more space than we actually do.
7533 */
7534 exclude_super_stripes(root, cache);
7535
817d52f8
JB
7536 /*
7537 * check for two cases, either we are full, and therefore
7538 * don't need to bother with the caching work since we won't
7539 * find any space, or we are empty, and we can just add all
7540 * the space in and be done with it. This saves us _alot_ of
7541 * time, particularly in the full case.
7542 */
7543 if (found_key.offset == btrfs_block_group_used(&cache->item)) {
11833d66 7544 cache->last_byte_to_unpin = (u64)-1;
817d52f8 7545 cache->cached = BTRFS_CACHE_FINISHED;
1b2da372 7546 free_excluded_extents(root, cache);
817d52f8 7547 } else if (btrfs_block_group_used(&cache->item) == 0) {
11833d66 7548 cache->last_byte_to_unpin = (u64)-1;
817d52f8
JB
7549 cache->cached = BTRFS_CACHE_FINISHED;
7550 add_new_free_space(cache, root->fs_info,
7551 found_key.objectid,
7552 found_key.objectid +
7553 found_key.offset);
11833d66 7554 free_excluded_extents(root, cache);
817d52f8 7555 }
96b5179d 7556
6324fbf3
CM
7557 ret = update_space_info(info, cache->flags, found_key.offset,
7558 btrfs_block_group_used(&cache->item),
7559 &space_info);
7560 BUG_ON(ret);
7561 cache->space_info = space_info;
1b2da372 7562 spin_lock(&cache->space_info->lock);
f0486c68 7563 cache->space_info->bytes_readonly += cache->bytes_super;
1b2da372
JB
7564 spin_unlock(&cache->space_info->lock);
7565
b742bb82 7566 __link_block_group(space_info, cache);
0f9dd46c
JB
7567
7568 ret = btrfs_add_block_group_cache(root->fs_info, cache);
7569 BUG_ON(ret);
75ccf47d
CM
7570
7571 set_avail_alloc_bits(root->fs_info, cache->flags);
2b82032c 7572 if (btrfs_chunk_readonly(root, cache->key.objectid))
199c36ea 7573 set_block_group_ro(cache, 1);
9078a3e1 7574 }
b742bb82
YZ
7575
7576 list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
7577 if (!(get_alloc_profile(root, space_info->flags) &
7578 (BTRFS_BLOCK_GROUP_RAID10 |
7579 BTRFS_BLOCK_GROUP_RAID1 |
7580 BTRFS_BLOCK_GROUP_DUP)))
7581 continue;
7582 /*
7583 * avoid allocating from un-mirrored block group if there are
7584 * mirrored block groups.
7585 */
7586 list_for_each_entry(cache, &space_info->block_groups[3], list)
199c36ea 7587 set_block_group_ro(cache, 1);
b742bb82 7588 list_for_each_entry(cache, &space_info->block_groups[4], list)
199c36ea 7589 set_block_group_ro(cache, 1);
9078a3e1 7590 }
f0486c68
YZ
7591
7592 init_global_block_rsv(info);
0b86a832
CM
7593 ret = 0;
7594error:
9078a3e1 7595 btrfs_free_path(path);
0b86a832 7596 return ret;
9078a3e1 7597}
6324fbf3
CM
7598
7599int btrfs_make_block_group(struct btrfs_trans_handle *trans,
7600 struct btrfs_root *root, u64 bytes_used,
e17cade2 7601 u64 type, u64 chunk_objectid, u64 chunk_offset,
6324fbf3
CM
7602 u64 size)
7603{
7604 int ret;
6324fbf3
CM
7605 struct btrfs_root *extent_root;
7606 struct btrfs_block_group_cache *cache;
6324fbf3
CM
7607
7608 extent_root = root->fs_info->extent_root;
6324fbf3 7609
12fcfd22 7610 root->fs_info->last_trans_log_full_commit = trans->transid;
e02119d5 7611
8f18cf13 7612 cache = kzalloc(sizeof(*cache), GFP_NOFS);
0f9dd46c
JB
7613 if (!cache)
7614 return -ENOMEM;
34d52cb6
LZ
7615 cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
7616 GFP_NOFS);
7617 if (!cache->free_space_ctl) {
7618 kfree(cache);
7619 return -ENOMEM;
7620 }
0f9dd46c 7621
e17cade2 7622 cache->key.objectid = chunk_offset;
6324fbf3 7623 cache->key.offset = size;
d2fb3437 7624 cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
96303081 7625 cache->sectorsize = root->sectorsize;
0af3d00b 7626 cache->fs_info = root->fs_info;
96303081 7627
d2fb3437 7628 atomic_set(&cache->count, 1);
c286ac48 7629 spin_lock_init(&cache->lock);
0f9dd46c 7630 INIT_LIST_HEAD(&cache->list);
fa9c0d79 7631 INIT_LIST_HEAD(&cache->cluster_list);
0ef3e66b 7632
34d52cb6
LZ
7633 btrfs_init_free_space_ctl(cache);
7634
6324fbf3 7635 btrfs_set_block_group_used(&cache->item, bytes_used);
6324fbf3
CM
7636 btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
7637 cache->flags = type;
7638 btrfs_set_block_group_flags(&cache->item, type);
7639
11833d66 7640 cache->last_byte_to_unpin = (u64)-1;
817d52f8 7641 cache->cached = BTRFS_CACHE_FINISHED;
11833d66 7642 exclude_super_stripes(root, cache);
96303081 7643
817d52f8
JB
7644 add_new_free_space(cache, root->fs_info, chunk_offset,
7645 chunk_offset + size);
7646
11833d66
YZ
7647 free_excluded_extents(root, cache);
7648
6324fbf3
CM
7649 ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
7650 &cache->space_info);
7651 BUG_ON(ret);
c7c144db 7652 update_global_block_rsv(root->fs_info);
1b2da372
JB
7653
7654 spin_lock(&cache->space_info->lock);
f0486c68 7655 cache->space_info->bytes_readonly += cache->bytes_super;
1b2da372
JB
7656 spin_unlock(&cache->space_info->lock);
7657
b742bb82 7658 __link_block_group(cache->space_info, cache);
6324fbf3 7659
0f9dd46c
JB
7660 ret = btrfs_add_block_group_cache(root->fs_info, cache);
7661 BUG_ON(ret);
c286ac48 7662
6324fbf3
CM
7663 ret = btrfs_insert_item(trans, extent_root, &cache->key, &cache->item,
7664 sizeof(cache->item));
7665 BUG_ON(ret);
7666
d18a2c44 7667 set_avail_alloc_bits(extent_root->fs_info, type);
925baedd 7668
6324fbf3
CM
7669 return 0;
7670}
1a40e23b 7671
10ea00f5
ID
7672static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
7673{
7674 u64 extra_flags = flags & BTRFS_BLOCK_GROUP_PROFILE_MASK;
7675
7676 /* chunk -> extended profile */
7677 if (extra_flags == 0)
7678 extra_flags = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
7679
7680 if (flags & BTRFS_BLOCK_GROUP_DATA)
7681 fs_info->avail_data_alloc_bits &= ~extra_flags;
7682 if (flags & BTRFS_BLOCK_GROUP_METADATA)
7683 fs_info->avail_metadata_alloc_bits &= ~extra_flags;
7684 if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
7685 fs_info->avail_system_alloc_bits &= ~extra_flags;
7686}
7687
1a40e23b
ZY
7688int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
7689 struct btrfs_root *root, u64 group_start)
7690{
7691 struct btrfs_path *path;
7692 struct btrfs_block_group_cache *block_group;
44fb5511 7693 struct btrfs_free_cluster *cluster;
0af3d00b 7694 struct btrfs_root *tree_root = root->fs_info->tree_root;
1a40e23b 7695 struct btrfs_key key;
0af3d00b 7696 struct inode *inode;
1a40e23b 7697 int ret;
10ea00f5 7698 int index;
89a55897 7699 int factor;
1a40e23b 7700
1a40e23b
ZY
7701 root = root->fs_info->extent_root;
7702
7703 block_group = btrfs_lookup_block_group(root->fs_info, group_start);
7704 BUG_ON(!block_group);
c146afad 7705 BUG_ON(!block_group->ro);
1a40e23b 7706
9f7c43c9 7707 /*
7708 * Free the reserved super bytes from this block group before
7709 * remove it.
7710 */
7711 free_excluded_extents(root, block_group);
7712
1a40e23b 7713 memcpy(&key, &block_group->key, sizeof(key));
10ea00f5 7714 index = get_block_group_index(block_group);
89a55897
JB
7715 if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
7716 BTRFS_BLOCK_GROUP_RAID1 |
7717 BTRFS_BLOCK_GROUP_RAID10))
7718 factor = 2;
7719 else
7720 factor = 1;
1a40e23b 7721
44fb5511
CM
7722 /* make sure this block group isn't part of an allocation cluster */
7723 cluster = &root->fs_info->data_alloc_cluster;
7724 spin_lock(&cluster->refill_lock);
7725 btrfs_return_cluster_to_free_space(block_group, cluster);
7726 spin_unlock(&cluster->refill_lock);
7727
7728 /*
7729 * make sure this block group isn't part of a metadata
7730 * allocation cluster
7731 */
7732 cluster = &root->fs_info->meta_alloc_cluster;
7733 spin_lock(&cluster->refill_lock);
7734 btrfs_return_cluster_to_free_space(block_group, cluster);
7735 spin_unlock(&cluster->refill_lock);
7736
1a40e23b 7737 path = btrfs_alloc_path();
d8926bb3
MF
7738 if (!path) {
7739 ret = -ENOMEM;
7740 goto out;
7741 }
1a40e23b 7742
10b2f34d 7743 inode = lookup_free_space_inode(tree_root, block_group, path);
0af3d00b 7744 if (!IS_ERR(inode)) {
b532402e
TI
7745 ret = btrfs_orphan_add(trans, inode);
7746 BUG_ON(ret);
0af3d00b
JB
7747 clear_nlink(inode);
7748 /* One for the block groups ref */
7749 spin_lock(&block_group->lock);
7750 if (block_group->iref) {
7751 block_group->iref = 0;
7752 block_group->inode = NULL;
7753 spin_unlock(&block_group->lock);
7754 iput(inode);
7755 } else {
7756 spin_unlock(&block_group->lock);
7757 }
7758 /* One for our lookup ref */
455757c3 7759 btrfs_add_delayed_iput(inode);
0af3d00b
JB
7760 }
7761
7762 key.objectid = BTRFS_FREE_SPACE_OBJECTID;
7763 key.offset = block_group->key.objectid;
7764 key.type = 0;
7765
7766 ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
7767 if (ret < 0)
7768 goto out;
7769 if (ret > 0)
b3b4aa74 7770 btrfs_release_path(path);
0af3d00b
JB
7771 if (ret == 0) {
7772 ret = btrfs_del_item(trans, tree_root, path);
7773 if (ret)
7774 goto out;
b3b4aa74 7775 btrfs_release_path(path);
0af3d00b
JB
7776 }
7777
3dfdb934 7778 spin_lock(&root->fs_info->block_group_cache_lock);
1a40e23b
ZY
7779 rb_erase(&block_group->cache_node,
7780 &root->fs_info->block_group_cache_tree);
3dfdb934 7781 spin_unlock(&root->fs_info->block_group_cache_lock);
817d52f8 7782
80eb234a 7783 down_write(&block_group->space_info->groups_sem);
44fb5511
CM
7784 /*
7785 * we must use list_del_init so people can check to see if they
7786 * are still on the list after taking the semaphore
7787 */
7788 list_del_init(&block_group->list);
10ea00f5
ID
7789 if (list_empty(&block_group->space_info->block_groups[index]))
7790 clear_avail_alloc_bits(root->fs_info, block_group->flags);
80eb234a 7791 up_write(&block_group->space_info->groups_sem);
1a40e23b 7792
817d52f8 7793 if (block_group->cached == BTRFS_CACHE_STARTED)
11833d66 7794 wait_block_group_cache_done(block_group);
817d52f8
JB
7795
7796 btrfs_remove_free_space_cache(block_group);
7797
c146afad
YZ
7798 spin_lock(&block_group->space_info->lock);
7799 block_group->space_info->total_bytes -= block_group->key.offset;
7800 block_group->space_info->bytes_readonly -= block_group->key.offset;
89a55897 7801 block_group->space_info->disk_total -= block_group->key.offset * factor;
c146afad 7802 spin_unlock(&block_group->space_info->lock);
283bb197 7803
0af3d00b
JB
7804 memcpy(&key, &block_group->key, sizeof(key));
7805
283bb197 7806 btrfs_clear_space_info_full(root->fs_info);
c146afad 7807
fa9c0d79
CM
7808 btrfs_put_block_group(block_group);
7809 btrfs_put_block_group(block_group);
1a40e23b
ZY
7810
7811 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
7812 if (ret > 0)
7813 ret = -EIO;
7814 if (ret < 0)
7815 goto out;
7816
7817 ret = btrfs_del_item(trans, root, path);
7818out:
7819 btrfs_free_path(path);
7820 return ret;
7821}
acce952b 7822
c59021f8 7823int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
7824{
7825 struct btrfs_space_info *space_info;
1aba86d6 7826 struct btrfs_super_block *disk_super;
7827 u64 features;
7828 u64 flags;
7829 int mixed = 0;
c59021f8 7830 int ret;
7831
6c41761f 7832 disk_super = fs_info->super_copy;
1aba86d6 7833 if (!btrfs_super_root(disk_super))
7834 return 1;
c59021f8 7835
1aba86d6 7836 features = btrfs_super_incompat_flags(disk_super);
7837 if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
7838 mixed = 1;
c59021f8 7839
1aba86d6 7840 flags = BTRFS_BLOCK_GROUP_SYSTEM;
7841 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
c59021f8 7842 if (ret)
1aba86d6 7843 goto out;
c59021f8 7844
1aba86d6 7845 if (mixed) {
7846 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
7847 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
7848 } else {
7849 flags = BTRFS_BLOCK_GROUP_METADATA;
7850 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
7851 if (ret)
7852 goto out;
7853
7854 flags = BTRFS_BLOCK_GROUP_DATA;
7855 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
7856 }
7857out:
c59021f8 7858 return ret;
7859}
7860
acce952b 7861int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
7862{
7863 return unpin_extent_range(root, start, end);
7864}
7865
7866int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr,
5378e607 7867 u64 num_bytes, u64 *actual_bytes)
acce952b 7868{
5378e607 7869 return btrfs_discard_extent(root, bytenr, num_bytes, actual_bytes);
acce952b 7870}
f7039b1d
LD
7871
7872int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
7873{
7874 struct btrfs_fs_info *fs_info = root->fs_info;
7875 struct btrfs_block_group_cache *cache = NULL;
7876 u64 group_trimmed;
7877 u64 start;
7878 u64 end;
7879 u64 trimmed = 0;
7880 int ret = 0;
7881
7882 cache = btrfs_lookup_block_group(fs_info, range->start);
7883
7884 while (cache) {
7885 if (cache->key.objectid >= (range->start + range->len)) {
7886 btrfs_put_block_group(cache);
7887 break;
7888 }
7889
7890 start = max(range->start, cache->key.objectid);
7891 end = min(range->start + range->len,
7892 cache->key.objectid + cache->key.offset);
7893
7894 if (end - start >= range->minlen) {
7895 if (!block_group_cache_done(cache)) {
7896 ret = cache_block_group(cache, NULL, root, 0);
7897 if (!ret)
7898 wait_block_group_cache_done(cache);
7899 }
7900 ret = btrfs_trim_block_group(cache,
7901 &group_trimmed,
7902 start,
7903 end,
7904 range->minlen);
7905
7906 trimmed += group_trimmed;
7907 if (ret) {
7908 btrfs_put_block_group(cache);
7909 break;
7910 }
7911 }
7912
7913 cache = next_block_group(fs_info->tree_root, cache);
7914 }
7915
7916 range->len = trimmed;
7917 return ret;
7918}