Merge branch 'zstd-minimal' of git://git.kernel.org/pub/scm/linux/kernel/git/mason...
[GitHub/LineageOS/android_kernel_motorola_exynos9610.git] / fs / btrfs / disk-io.c
1 /*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19 #include <linux/fs.h>
20 #include <linux/blkdev.h>
21 #include <linux/scatterlist.h>
22 #include <linux/swap.h>
23 #include <linux/radix-tree.h>
24 #include <linux/writeback.h>
25 #include <linux/buffer_head.h>
26 #include <linux/workqueue.h>
27 #include <linux/kthread.h>
28 #include <linux/slab.h>
29 #include <linux/migrate.h>
30 #include <linux/ratelimit.h>
31 #include <linux/uuid.h>
32 #include <linux/semaphore.h>
33 #include <asm/unaligned.h>
34 #include "ctree.h"
35 #include "disk-io.h"
36 #include "hash.h"
37 #include "transaction.h"
38 #include "btrfs_inode.h"
39 #include "volumes.h"
40 #include "print-tree.h"
41 #include "locking.h"
42 #include "tree-log.h"
43 #include "free-space-cache.h"
44 #include "free-space-tree.h"
45 #include "inode-map.h"
46 #include "check-integrity.h"
47 #include "rcu-string.h"
48 #include "dev-replace.h"
49 #include "raid56.h"
50 #include "sysfs.h"
51 #include "qgroup.h"
52 #include "compression.h"
53
54 #ifdef CONFIG_X86
55 #include <asm/cpufeature.h>
56 #endif
57
58 #define BTRFS_SUPER_FLAG_SUPP (BTRFS_HEADER_FLAG_WRITTEN |\
59 BTRFS_HEADER_FLAG_RELOC |\
60 BTRFS_SUPER_FLAG_ERROR |\
61 BTRFS_SUPER_FLAG_SEEDING |\
62 BTRFS_SUPER_FLAG_METADUMP)
63
64 static const struct extent_io_ops btree_extent_io_ops;
65 static void end_workqueue_fn(struct btrfs_work *work);
66 static void free_fs_root(struct btrfs_root *root);
67 static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info);
68 static void btrfs_destroy_ordered_extents(struct btrfs_root *root);
69 static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
70 struct btrfs_fs_info *fs_info);
71 static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root);
72 static int btrfs_destroy_marked_extents(struct btrfs_fs_info *fs_info,
73 struct extent_io_tree *dirty_pages,
74 int mark);
75 static int btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info,
76 struct extent_io_tree *pinned_extents);
77 static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info);
78 static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info);
79
80 /*
81 * btrfs_end_io_wq structs are used to do processing in task context when an IO
82 * is complete. This is used during reads to verify checksums, and it is used
83 * by writes to insert metadata for new file extents after IO is complete.
84 */
85 struct btrfs_end_io_wq {
86 struct bio *bio;
87 bio_end_io_t *end_io;
88 void *private;
89 struct btrfs_fs_info *info;
90 blk_status_t status;
91 enum btrfs_wq_endio_type metadata;
92 struct btrfs_work work;
93 };
94
95 static struct kmem_cache *btrfs_end_io_wq_cache;
96
97 int __init btrfs_end_io_wq_init(void)
98 {
99 btrfs_end_io_wq_cache = kmem_cache_create("btrfs_end_io_wq",
100 sizeof(struct btrfs_end_io_wq),
101 0,
102 SLAB_MEM_SPREAD,
103 NULL);
104 if (!btrfs_end_io_wq_cache)
105 return -ENOMEM;
106 return 0;
107 }
108
109 void btrfs_end_io_wq_exit(void)
110 {
111 kmem_cache_destroy(btrfs_end_io_wq_cache);
112 }
113
114 /*
115 * async submit bios are used to offload expensive checksumming
116 * onto the worker threads. They checksum file and metadata bios
117 * just before they are sent down the IO stack.
118 */
119 struct async_submit_bio {
120 void *private_data;
121 struct btrfs_fs_info *fs_info;
122 struct bio *bio;
123 extent_submit_bio_hook_t *submit_bio_start;
124 extent_submit_bio_hook_t *submit_bio_done;
125 int mirror_num;
126 unsigned long bio_flags;
127 /*
128 * bio_offset is optional, can be used if the pages in the bio
129 * can't tell us where in the file the bio should go
130 */
131 u64 bio_offset;
132 struct btrfs_work work;
133 blk_status_t status;
134 };
135
136 /*
137 * Lockdep class keys for extent_buffer->lock's in this root. For a given
138 * eb, the lockdep key is determined by the btrfs_root it belongs to and
139 * the level the eb occupies in the tree.
140 *
141 * Different roots are used for different purposes and may nest inside each
142 * other and they require separate keysets. As lockdep keys should be
143 * static, assign keysets according to the purpose of the root as indicated
144 * by btrfs_root->objectid. This ensures that all special purpose roots
145 * have separate keysets.
146 *
147 * Lock-nesting across peer nodes is always done with the immediate parent
148 * node locked thus preventing deadlock. As lockdep doesn't know this, use
149 * subclass to avoid triggering lockdep warning in such cases.
150 *
151 * The key is set by the readpage_end_io_hook after the buffer has passed
152 * csum validation but before the pages are unlocked. It is also set by
153 * btrfs_init_new_buffer on freshly allocated blocks.
154 *
155 * We also add a check to make sure the highest level of the tree is the
156 * same as our lockdep setup here. If BTRFS_MAX_LEVEL changes, this code
157 * needs update as well.
158 */
159 #ifdef CONFIG_DEBUG_LOCK_ALLOC
160 # if BTRFS_MAX_LEVEL != 8
161 # error
162 # endif
163
164 static struct btrfs_lockdep_keyset {
165 u64 id; /* root objectid */
166 const char *name_stem; /* lock name stem */
167 char names[BTRFS_MAX_LEVEL + 1][20];
168 struct lock_class_key keys[BTRFS_MAX_LEVEL + 1];
169 } btrfs_lockdep_keysets[] = {
170 { .id = BTRFS_ROOT_TREE_OBJECTID, .name_stem = "root" },
171 { .id = BTRFS_EXTENT_TREE_OBJECTID, .name_stem = "extent" },
172 { .id = BTRFS_CHUNK_TREE_OBJECTID, .name_stem = "chunk" },
173 { .id = BTRFS_DEV_TREE_OBJECTID, .name_stem = "dev" },
174 { .id = BTRFS_FS_TREE_OBJECTID, .name_stem = "fs" },
175 { .id = BTRFS_CSUM_TREE_OBJECTID, .name_stem = "csum" },
176 { .id = BTRFS_QUOTA_TREE_OBJECTID, .name_stem = "quota" },
177 { .id = BTRFS_TREE_LOG_OBJECTID, .name_stem = "log" },
178 { .id = BTRFS_TREE_RELOC_OBJECTID, .name_stem = "treloc" },
179 { .id = BTRFS_DATA_RELOC_TREE_OBJECTID, .name_stem = "dreloc" },
180 { .id = BTRFS_UUID_TREE_OBJECTID, .name_stem = "uuid" },
181 { .id = BTRFS_FREE_SPACE_TREE_OBJECTID, .name_stem = "free-space" },
182 { .id = 0, .name_stem = "tree" },
183 };
184
185 void __init btrfs_init_lockdep(void)
186 {
187 int i, j;
188
189 /* initialize lockdep class names */
190 for (i = 0; i < ARRAY_SIZE(btrfs_lockdep_keysets); i++) {
191 struct btrfs_lockdep_keyset *ks = &btrfs_lockdep_keysets[i];
192
193 for (j = 0; j < ARRAY_SIZE(ks->names); j++)
194 snprintf(ks->names[j], sizeof(ks->names[j]),
195 "btrfs-%s-%02d", ks->name_stem, j);
196 }
197 }
198
199 void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb,
200 int level)
201 {
202 struct btrfs_lockdep_keyset *ks;
203
204 BUG_ON(level >= ARRAY_SIZE(ks->keys));
205
206 /* find the matching keyset, id 0 is the default entry */
207 for (ks = btrfs_lockdep_keysets; ks->id; ks++)
208 if (ks->id == objectid)
209 break;
210
211 lockdep_set_class_and_name(&eb->lock,
212 &ks->keys[level], ks->names[level]);
213 }
214
215 #endif
216
217 /*
218 * extents on the btree inode are pretty simple, there's one extent
219 * that covers the entire device
220 */
221 static struct extent_map *btree_get_extent(struct btrfs_inode *inode,
222 struct page *page, size_t pg_offset, u64 start, u64 len,
223 int create)
224 {
225 struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
226 struct extent_map_tree *em_tree = &inode->extent_tree;
227 struct extent_map *em;
228 int ret;
229
230 read_lock(&em_tree->lock);
231 em = lookup_extent_mapping(em_tree, start, len);
232 if (em) {
233 em->bdev = fs_info->fs_devices->latest_bdev;
234 read_unlock(&em_tree->lock);
235 goto out;
236 }
237 read_unlock(&em_tree->lock);
238
239 em = alloc_extent_map();
240 if (!em) {
241 em = ERR_PTR(-ENOMEM);
242 goto out;
243 }
244 em->start = 0;
245 em->len = (u64)-1;
246 em->block_len = (u64)-1;
247 em->block_start = 0;
248 em->bdev = fs_info->fs_devices->latest_bdev;
249
250 write_lock(&em_tree->lock);
251 ret = add_extent_mapping(em_tree, em, 0);
252 if (ret == -EEXIST) {
253 free_extent_map(em);
254 em = lookup_extent_mapping(em_tree, start, len);
255 if (!em)
256 em = ERR_PTR(-EIO);
257 } else if (ret) {
258 free_extent_map(em);
259 em = ERR_PTR(ret);
260 }
261 write_unlock(&em_tree->lock);
262
263 out:
264 return em;
265 }
266
267 u32 btrfs_csum_data(const char *data, u32 seed, size_t len)
268 {
269 return btrfs_crc32c(seed, data, len);
270 }
271
272 void btrfs_csum_final(u32 crc, u8 *result)
273 {
274 put_unaligned_le32(~crc, result);
275 }
276
277 /*
278 * compute the csum for a btree block, and either verify it or write it
279 * into the csum field of the block.
280 */
281 static int csum_tree_block(struct btrfs_fs_info *fs_info,
282 struct extent_buffer *buf,
283 int verify)
284 {
285 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
286 char *result = NULL;
287 unsigned long len;
288 unsigned long cur_len;
289 unsigned long offset = BTRFS_CSUM_SIZE;
290 char *kaddr;
291 unsigned long map_start;
292 unsigned long map_len;
293 int err;
294 u32 crc = ~(u32)0;
295 unsigned long inline_result;
296
297 len = buf->len - offset;
298 while (len > 0) {
299 err = map_private_extent_buffer(buf, offset, 32,
300 &kaddr, &map_start, &map_len);
301 if (err)
302 return err;
303 cur_len = min(len, map_len - (offset - map_start));
304 crc = btrfs_csum_data(kaddr + offset - map_start,
305 crc, cur_len);
306 len -= cur_len;
307 offset += cur_len;
308 }
309 if (csum_size > sizeof(inline_result)) {
310 result = kzalloc(csum_size, GFP_NOFS);
311 if (!result)
312 return -ENOMEM;
313 } else {
314 result = (char *)&inline_result;
315 }
316
317 btrfs_csum_final(crc, result);
318
319 if (verify) {
320 if (memcmp_extent_buffer(buf, result, 0, csum_size)) {
321 u32 val;
322 u32 found = 0;
323 memcpy(&found, result, csum_size);
324
325 read_extent_buffer(buf, &val, 0, csum_size);
326 btrfs_warn_rl(fs_info,
327 "%s checksum verify failed on %llu wanted %X found %X level %d",
328 fs_info->sb->s_id, buf->start,
329 val, found, btrfs_header_level(buf));
330 if (result != (char *)&inline_result)
331 kfree(result);
332 return -EUCLEAN;
333 }
334 } else {
335 write_extent_buffer(buf, result, 0, csum_size);
336 }
337 if (result != (char *)&inline_result)
338 kfree(result);
339 return 0;
340 }
341
342 /*
343 * we can't consider a given block up to date unless the transid of the
344 * block matches the transid in the parent node's pointer. This is how we
345 * detect blocks that either didn't get written at all or got written
346 * in the wrong place.
347 */
348 static int verify_parent_transid(struct extent_io_tree *io_tree,
349 struct extent_buffer *eb, u64 parent_transid,
350 int atomic)
351 {
352 struct extent_state *cached_state = NULL;
353 int ret;
354 bool need_lock = (current->journal_info == BTRFS_SEND_TRANS_STUB);
355
356 if (!parent_transid || btrfs_header_generation(eb) == parent_transid)
357 return 0;
358
359 if (atomic)
360 return -EAGAIN;
361
362 if (need_lock) {
363 btrfs_tree_read_lock(eb);
364 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
365 }
366
367 lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1,
368 &cached_state);
369 if (extent_buffer_uptodate(eb) &&
370 btrfs_header_generation(eb) == parent_transid) {
371 ret = 0;
372 goto out;
373 }
374 btrfs_err_rl(eb->fs_info,
375 "parent transid verify failed on %llu wanted %llu found %llu",
376 eb->start,
377 parent_transid, btrfs_header_generation(eb));
378 ret = 1;
379
380 /*
381 * Things reading via commit roots that don't have normal protection,
382 * like send, can have a really old block in cache that may point at a
383 * block that has been freed and re-allocated. So don't clear uptodate
384 * if we find an eb that is under IO (dirty/writeback) because we could
385 * end up reading in the stale data and then writing it back out and
386 * making everybody very sad.
387 */
388 if (!extent_buffer_under_io(eb))
389 clear_extent_buffer_uptodate(eb);
390 out:
391 unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1,
392 &cached_state, GFP_NOFS);
393 if (need_lock)
394 btrfs_tree_read_unlock_blocking(eb);
395 return ret;
396 }
397
398 /*
399 * Return 0 if the superblock checksum type matches the checksum value of that
400 * algorithm. Pass the raw disk superblock data.
401 */
402 static int btrfs_check_super_csum(struct btrfs_fs_info *fs_info,
403 char *raw_disk_sb)
404 {
405 struct btrfs_super_block *disk_sb =
406 (struct btrfs_super_block *)raw_disk_sb;
407 u16 csum_type = btrfs_super_csum_type(disk_sb);
408 int ret = 0;
409
410 if (csum_type == BTRFS_CSUM_TYPE_CRC32) {
411 u32 crc = ~(u32)0;
412 const int csum_size = sizeof(crc);
413 char result[csum_size];
414
415 /*
416 * The super_block structure does not span the whole
417 * BTRFS_SUPER_INFO_SIZE range, we expect that the unused space
418 * is filled with zeros and is included in the checksum.
419 */
420 crc = btrfs_csum_data(raw_disk_sb + BTRFS_CSUM_SIZE,
421 crc, BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE);
422 btrfs_csum_final(crc, result);
423
424 if (memcmp(raw_disk_sb, result, csum_size))
425 ret = 1;
426 }
427
428 if (csum_type >= ARRAY_SIZE(btrfs_csum_sizes)) {
429 btrfs_err(fs_info, "unsupported checksum algorithm %u",
430 csum_type);
431 ret = 1;
432 }
433
434 return ret;
435 }
436
437 /*
438 * helper to read a given tree block, doing retries as required when
439 * the checksums don't match and we have alternate mirrors to try.
440 */
441 static int btree_read_extent_buffer_pages(struct btrfs_fs_info *fs_info,
442 struct extent_buffer *eb,
443 u64 parent_transid)
444 {
445 struct extent_io_tree *io_tree;
446 int failed = 0;
447 int ret;
448 int num_copies = 0;
449 int mirror_num = 0;
450 int failed_mirror = 0;
451
452 clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
453 io_tree = &BTRFS_I(fs_info->btree_inode)->io_tree;
454 while (1) {
455 ret = read_extent_buffer_pages(io_tree, eb, WAIT_COMPLETE,
456 btree_get_extent, mirror_num);
457 if (!ret) {
458 if (!verify_parent_transid(io_tree, eb,
459 parent_transid, 0))
460 break;
461 else
462 ret = -EIO;
463 }
464
465 /*
466 * This buffer's crc is fine, but its contents are corrupted, so
467 * there is no reason to read the other copies, they won't be
468 * any less wrong.
469 */
470 if (test_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags))
471 break;
472
473 num_copies = btrfs_num_copies(fs_info,
474 eb->start, eb->len);
475 if (num_copies == 1)
476 break;
477
478 if (!failed_mirror) {
479 failed = 1;
480 failed_mirror = eb->read_mirror;
481 }
482
483 mirror_num++;
484 if (mirror_num == failed_mirror)
485 mirror_num++;
486
487 if (mirror_num > num_copies)
488 break;
489 }
490
491 if (failed && !ret && failed_mirror)
492 repair_eb_io_failure(fs_info, eb, failed_mirror);
493
494 return ret;
495 }
496
497 /*
498 * checksum a dirty tree block before IO. This has extra checks to make sure
499 * we only fill in the checksum field in the first page of a multi-page block
500 */
501
502 static int csum_dirty_buffer(struct btrfs_fs_info *fs_info, struct page *page)
503 {
504 u64 start = page_offset(page);
505 u64 found_start;
506 struct extent_buffer *eb;
507
508 eb = (struct extent_buffer *)page->private;
509 if (page != eb->pages[0])
510 return 0;
511
512 found_start = btrfs_header_bytenr(eb);
513 /*
514 * Please do not consolidate these warnings into a single if.
515 * It is useful to know what went wrong.
516 */
517 if (WARN_ON(found_start != start))
518 return -EUCLEAN;
519 if (WARN_ON(!PageUptodate(page)))
520 return -EUCLEAN;
521
522 ASSERT(memcmp_extent_buffer(eb, fs_info->fsid,
523 btrfs_header_fsid(), BTRFS_FSID_SIZE) == 0);
524
525 return csum_tree_block(fs_info, eb, 0);
526 }
527
528 static int check_tree_block_fsid(struct btrfs_fs_info *fs_info,
529 struct extent_buffer *eb)
530 {
531 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
532 u8 fsid[BTRFS_FSID_SIZE];
533 int ret = 1;
534
535 read_extent_buffer(eb, fsid, btrfs_header_fsid(), BTRFS_FSID_SIZE);
536 while (fs_devices) {
537 if (!memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE)) {
538 ret = 0;
539 break;
540 }
541 fs_devices = fs_devices->seed;
542 }
543 return ret;
544 }
545
546 #define CORRUPT(reason, eb, root, slot) \
547 btrfs_crit(root->fs_info, \
548 "corrupt %s, %s: block=%llu, root=%llu, slot=%d", \
549 btrfs_header_level(eb) == 0 ? "leaf" : "node", \
550 reason, btrfs_header_bytenr(eb), root->objectid, slot)
551
552 static noinline int check_leaf(struct btrfs_root *root,
553 struct extent_buffer *leaf)
554 {
555 struct btrfs_fs_info *fs_info = root->fs_info;
556 struct btrfs_key key;
557 struct btrfs_key leaf_key;
558 u32 nritems = btrfs_header_nritems(leaf);
559 int slot;
560
561 /*
562 * Extent buffers from a relocation tree have a owner field that
563 * corresponds to the subvolume tree they are based on. So just from an
564 * extent buffer alone we can not find out what is the id of the
565 * corresponding subvolume tree, so we can not figure out if the extent
566 * buffer corresponds to the root of the relocation tree or not. So skip
567 * this check for relocation trees.
568 */
569 if (nritems == 0 && !btrfs_header_flag(leaf, BTRFS_HEADER_FLAG_RELOC)) {
570 struct btrfs_root *check_root;
571
572 key.objectid = btrfs_header_owner(leaf);
573 key.type = BTRFS_ROOT_ITEM_KEY;
574 key.offset = (u64)-1;
575
576 check_root = btrfs_get_fs_root(fs_info, &key, false);
577 /*
578 * The only reason we also check NULL here is that during
579 * open_ctree() some roots has not yet been set up.
580 */
581 if (!IS_ERR_OR_NULL(check_root)) {
582 struct extent_buffer *eb;
583
584 eb = btrfs_root_node(check_root);
585 /* if leaf is the root, then it's fine */
586 if (leaf != eb) {
587 CORRUPT("non-root leaf's nritems is 0",
588 leaf, check_root, 0);
589 free_extent_buffer(eb);
590 return -EIO;
591 }
592 free_extent_buffer(eb);
593 }
594 return 0;
595 }
596
597 if (nritems == 0)
598 return 0;
599
600 /* Check the 0 item */
601 if (btrfs_item_offset_nr(leaf, 0) + btrfs_item_size_nr(leaf, 0) !=
602 BTRFS_LEAF_DATA_SIZE(fs_info)) {
603 CORRUPT("invalid item offset size pair", leaf, root, 0);
604 return -EIO;
605 }
606
607 /*
608 * Check to make sure each items keys are in the correct order and their
609 * offsets make sense. We only have to loop through nritems-1 because
610 * we check the current slot against the next slot, which verifies the
611 * next slot's offset+size makes sense and that the current's slot
612 * offset is correct.
613 */
614 for (slot = 0; slot < nritems - 1; slot++) {
615 btrfs_item_key_to_cpu(leaf, &leaf_key, slot);
616 btrfs_item_key_to_cpu(leaf, &key, slot + 1);
617
618 /* Make sure the keys are in the right order */
619 if (btrfs_comp_cpu_keys(&leaf_key, &key) >= 0) {
620 CORRUPT("bad key order", leaf, root, slot);
621 return -EIO;
622 }
623
624 /*
625 * Make sure the offset and ends are right, remember that the
626 * item data starts at the end of the leaf and grows towards the
627 * front.
628 */
629 if (btrfs_item_offset_nr(leaf, slot) !=
630 btrfs_item_end_nr(leaf, slot + 1)) {
631 CORRUPT("slot offset bad", leaf, root, slot);
632 return -EIO;
633 }
634
635 /*
636 * Check to make sure that we don't point outside of the leaf,
637 * just in case all the items are consistent to each other, but
638 * all point outside of the leaf.
639 */
640 if (btrfs_item_end_nr(leaf, slot) >
641 BTRFS_LEAF_DATA_SIZE(fs_info)) {
642 CORRUPT("slot end outside of leaf", leaf, root, slot);
643 return -EIO;
644 }
645 }
646
647 return 0;
648 }
649
650 static int check_node(struct btrfs_root *root, struct extent_buffer *node)
651 {
652 unsigned long nr = btrfs_header_nritems(node);
653 struct btrfs_key key, next_key;
654 int slot;
655 u64 bytenr;
656 int ret = 0;
657
658 if (nr == 0 || nr > BTRFS_NODEPTRS_PER_BLOCK(root->fs_info)) {
659 btrfs_crit(root->fs_info,
660 "corrupt node: block %llu root %llu nritems %lu",
661 node->start, root->objectid, nr);
662 return -EIO;
663 }
664
665 for (slot = 0; slot < nr - 1; slot++) {
666 bytenr = btrfs_node_blockptr(node, slot);
667 btrfs_node_key_to_cpu(node, &key, slot);
668 btrfs_node_key_to_cpu(node, &next_key, slot + 1);
669
670 if (!bytenr) {
671 CORRUPT("invalid item slot", node, root, slot);
672 ret = -EIO;
673 goto out;
674 }
675
676 if (btrfs_comp_cpu_keys(&key, &next_key) >= 0) {
677 CORRUPT("bad key order", node, root, slot);
678 ret = -EIO;
679 goto out;
680 }
681 }
682 out:
683 return ret;
684 }
685
686 static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
687 u64 phy_offset, struct page *page,
688 u64 start, u64 end, int mirror)
689 {
690 u64 found_start;
691 int found_level;
692 struct extent_buffer *eb;
693 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
694 struct btrfs_fs_info *fs_info = root->fs_info;
695 int ret = 0;
696 int reads_done;
697
698 if (!page->private)
699 goto out;
700
701 eb = (struct extent_buffer *)page->private;
702
703 /* the pending IO might have been the only thing that kept this buffer
704 * in memory. Make sure we have a ref for all this other checks
705 */
706 extent_buffer_get(eb);
707
708 reads_done = atomic_dec_and_test(&eb->io_pages);
709 if (!reads_done)
710 goto err;
711
712 eb->read_mirror = mirror;
713 if (test_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags)) {
714 ret = -EIO;
715 goto err;
716 }
717
718 found_start = btrfs_header_bytenr(eb);
719 if (found_start != eb->start) {
720 btrfs_err_rl(fs_info, "bad tree block start %llu %llu",
721 found_start, eb->start);
722 ret = -EIO;
723 goto err;
724 }
725 if (check_tree_block_fsid(fs_info, eb)) {
726 btrfs_err_rl(fs_info, "bad fsid on block %llu",
727 eb->start);
728 ret = -EIO;
729 goto err;
730 }
731 found_level = btrfs_header_level(eb);
732 if (found_level >= BTRFS_MAX_LEVEL) {
733 btrfs_err(fs_info, "bad tree block level %d",
734 (int)btrfs_header_level(eb));
735 ret = -EIO;
736 goto err;
737 }
738
739 btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb),
740 eb, found_level);
741
742 ret = csum_tree_block(fs_info, eb, 1);
743 if (ret)
744 goto err;
745
746 /*
747 * If this is a leaf block and it is corrupt, set the corrupt bit so
748 * that we don't try and read the other copies of this block, just
749 * return -EIO.
750 */
751 if (found_level == 0 && check_leaf(root, eb)) {
752 set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
753 ret = -EIO;
754 }
755
756 if (found_level > 0 && check_node(root, eb))
757 ret = -EIO;
758
759 if (!ret)
760 set_extent_buffer_uptodate(eb);
761 err:
762 if (reads_done &&
763 test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
764 btree_readahead_hook(eb, ret);
765
766 if (ret) {
767 /*
768 * our io error hook is going to dec the io pages
769 * again, we have to make sure it has something
770 * to decrement
771 */
772 atomic_inc(&eb->io_pages);
773 clear_extent_buffer_uptodate(eb);
774 }
775 free_extent_buffer(eb);
776 out:
777 return ret;
778 }
779
780 static int btree_io_failed_hook(struct page *page, int failed_mirror)
781 {
782 struct extent_buffer *eb;
783
784 eb = (struct extent_buffer *)page->private;
785 set_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
786 eb->read_mirror = failed_mirror;
787 atomic_dec(&eb->io_pages);
788 if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
789 btree_readahead_hook(eb, -EIO);
790 return -EIO; /* we fixed nothing */
791 }
792
793 static void end_workqueue_bio(struct bio *bio)
794 {
795 struct btrfs_end_io_wq *end_io_wq = bio->bi_private;
796 struct btrfs_fs_info *fs_info;
797 struct btrfs_workqueue *wq;
798 btrfs_work_func_t func;
799
800 fs_info = end_io_wq->info;
801 end_io_wq->status = bio->bi_status;
802
803 if (bio_op(bio) == REQ_OP_WRITE) {
804 if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA) {
805 wq = fs_info->endio_meta_write_workers;
806 func = btrfs_endio_meta_write_helper;
807 } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE) {
808 wq = fs_info->endio_freespace_worker;
809 func = btrfs_freespace_write_helper;
810 } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) {
811 wq = fs_info->endio_raid56_workers;
812 func = btrfs_endio_raid56_helper;
813 } else {
814 wq = fs_info->endio_write_workers;
815 func = btrfs_endio_write_helper;
816 }
817 } else {
818 if (unlikely(end_io_wq->metadata ==
819 BTRFS_WQ_ENDIO_DIO_REPAIR)) {
820 wq = fs_info->endio_repair_workers;
821 func = btrfs_endio_repair_helper;
822 } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) {
823 wq = fs_info->endio_raid56_workers;
824 func = btrfs_endio_raid56_helper;
825 } else if (end_io_wq->metadata) {
826 wq = fs_info->endio_meta_workers;
827 func = btrfs_endio_meta_helper;
828 } else {
829 wq = fs_info->endio_workers;
830 func = btrfs_endio_helper;
831 }
832 }
833
834 btrfs_init_work(&end_io_wq->work, func, end_workqueue_fn, NULL, NULL);
835 btrfs_queue_work(wq, &end_io_wq->work);
836 }
837
838 blk_status_t btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
839 enum btrfs_wq_endio_type metadata)
840 {
841 struct btrfs_end_io_wq *end_io_wq;
842
843 end_io_wq = kmem_cache_alloc(btrfs_end_io_wq_cache, GFP_NOFS);
844 if (!end_io_wq)
845 return BLK_STS_RESOURCE;
846
847 end_io_wq->private = bio->bi_private;
848 end_io_wq->end_io = bio->bi_end_io;
849 end_io_wq->info = info;
850 end_io_wq->status = 0;
851 end_io_wq->bio = bio;
852 end_io_wq->metadata = metadata;
853
854 bio->bi_private = end_io_wq;
855 bio->bi_end_io = end_workqueue_bio;
856 return 0;
857 }
858
859 unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info)
860 {
861 unsigned long limit = min_t(unsigned long,
862 info->thread_pool_size,
863 info->fs_devices->open_devices);
864 return 256 * limit;
865 }
866
867 static void run_one_async_start(struct btrfs_work *work)
868 {
869 struct async_submit_bio *async;
870 blk_status_t ret;
871
872 async = container_of(work, struct async_submit_bio, work);
873 ret = async->submit_bio_start(async->private_data, async->bio,
874 async->mirror_num, async->bio_flags,
875 async->bio_offset);
876 if (ret)
877 async->status = ret;
878 }
879
880 static void run_one_async_done(struct btrfs_work *work)
881 {
882 struct btrfs_fs_info *fs_info;
883 struct async_submit_bio *async;
884 int limit;
885
886 async = container_of(work, struct async_submit_bio, work);
887 fs_info = async->fs_info;
888
889 limit = btrfs_async_submit_limit(fs_info);
890 limit = limit * 2 / 3;
891
892 /*
893 * atomic_dec_return implies a barrier for waitqueue_active
894 */
895 if (atomic_dec_return(&fs_info->nr_async_submits) < limit &&
896 waitqueue_active(&fs_info->async_submit_wait))
897 wake_up(&fs_info->async_submit_wait);
898
899 /* If an error occurred we just want to clean up the bio and move on */
900 if (async->status) {
901 async->bio->bi_status = async->status;
902 bio_endio(async->bio);
903 return;
904 }
905
906 async->submit_bio_done(async->private_data, async->bio, async->mirror_num,
907 async->bio_flags, async->bio_offset);
908 }
909
910 static void run_one_async_free(struct btrfs_work *work)
911 {
912 struct async_submit_bio *async;
913
914 async = container_of(work, struct async_submit_bio, work);
915 kfree(async);
916 }
917
918 blk_status_t btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
919 int mirror_num, unsigned long bio_flags,
920 u64 bio_offset, void *private_data,
921 extent_submit_bio_hook_t *submit_bio_start,
922 extent_submit_bio_hook_t *submit_bio_done)
923 {
924 struct async_submit_bio *async;
925
926 async = kmalloc(sizeof(*async), GFP_NOFS);
927 if (!async)
928 return BLK_STS_RESOURCE;
929
930 async->private_data = private_data;
931 async->fs_info = fs_info;
932 async->bio = bio;
933 async->mirror_num = mirror_num;
934 async->submit_bio_start = submit_bio_start;
935 async->submit_bio_done = submit_bio_done;
936
937 btrfs_init_work(&async->work, btrfs_worker_helper, run_one_async_start,
938 run_one_async_done, run_one_async_free);
939
940 async->bio_flags = bio_flags;
941 async->bio_offset = bio_offset;
942
943 async->status = 0;
944
945 atomic_inc(&fs_info->nr_async_submits);
946
947 if (op_is_sync(bio->bi_opf))
948 btrfs_set_work_high_priority(&async->work);
949
950 btrfs_queue_work(fs_info->workers, &async->work);
951
952 while (atomic_read(&fs_info->async_submit_draining) &&
953 atomic_read(&fs_info->nr_async_submits)) {
954 wait_event(fs_info->async_submit_wait,
955 (atomic_read(&fs_info->nr_async_submits) == 0));
956 }
957
958 return 0;
959 }
960
961 static blk_status_t btree_csum_one_bio(struct bio *bio)
962 {
963 struct bio_vec *bvec;
964 struct btrfs_root *root;
965 int i, ret = 0;
966
967 ASSERT(!bio_flagged(bio, BIO_CLONED));
968 bio_for_each_segment_all(bvec, bio, i) {
969 root = BTRFS_I(bvec->bv_page->mapping->host)->root;
970 ret = csum_dirty_buffer(root->fs_info, bvec->bv_page);
971 if (ret)
972 break;
973 }
974
975 return errno_to_blk_status(ret);
976 }
977
978 static blk_status_t __btree_submit_bio_start(void *private_data, struct bio *bio,
979 int mirror_num, unsigned long bio_flags,
980 u64 bio_offset)
981 {
982 /*
983 * when we're called for a write, we're already in the async
984 * submission context. Just jump into btrfs_map_bio
985 */
986 return btree_csum_one_bio(bio);
987 }
988
989 static blk_status_t __btree_submit_bio_done(void *private_data, struct bio *bio,
990 int mirror_num, unsigned long bio_flags,
991 u64 bio_offset)
992 {
993 struct inode *inode = private_data;
994 blk_status_t ret;
995
996 /*
997 * when we're called for a write, we're already in the async
998 * submission context. Just jump into btrfs_map_bio
999 */
1000 ret = btrfs_map_bio(btrfs_sb(inode->i_sb), bio, mirror_num, 1);
1001 if (ret) {
1002 bio->bi_status = ret;
1003 bio_endio(bio);
1004 }
1005 return ret;
1006 }
1007
1008 static int check_async_write(unsigned long bio_flags)
1009 {
1010 if (bio_flags & EXTENT_BIO_TREE_LOG)
1011 return 0;
1012 #ifdef CONFIG_X86
1013 if (static_cpu_has(X86_FEATURE_XMM4_2))
1014 return 0;
1015 #endif
1016 return 1;
1017 }
1018
1019 static blk_status_t btree_submit_bio_hook(void *private_data, struct bio *bio,
1020 int mirror_num, unsigned long bio_flags,
1021 u64 bio_offset)
1022 {
1023 struct inode *inode = private_data;
1024 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1025 int async = check_async_write(bio_flags);
1026 blk_status_t ret;
1027
1028 if (bio_op(bio) != REQ_OP_WRITE) {
1029 /*
1030 * called for a read, do the setup so that checksum validation
1031 * can happen in the async kernel threads
1032 */
1033 ret = btrfs_bio_wq_end_io(fs_info, bio,
1034 BTRFS_WQ_ENDIO_METADATA);
1035 if (ret)
1036 goto out_w_error;
1037 ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
1038 } else if (!async) {
1039 ret = btree_csum_one_bio(bio);
1040 if (ret)
1041 goto out_w_error;
1042 ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
1043 } else {
1044 /*
1045 * kthread helpers are used to submit writes so that
1046 * checksumming can happen in parallel across all CPUs
1047 */
1048 ret = btrfs_wq_submit_bio(fs_info, bio, mirror_num, 0,
1049 bio_offset, private_data,
1050 __btree_submit_bio_start,
1051 __btree_submit_bio_done);
1052 }
1053
1054 if (ret)
1055 goto out_w_error;
1056 return 0;
1057
1058 out_w_error:
1059 bio->bi_status = ret;
1060 bio_endio(bio);
1061 return ret;
1062 }
1063
1064 #ifdef CONFIG_MIGRATION
1065 static int btree_migratepage(struct address_space *mapping,
1066 struct page *newpage, struct page *page,
1067 enum migrate_mode mode)
1068 {
1069 /*
1070 * we can't safely write a btree page from here,
1071 * we haven't done the locking hook
1072 */
1073 if (PageDirty(page))
1074 return -EAGAIN;
1075 /*
1076 * Buffers may be managed in a filesystem specific way.
1077 * We must have no buffers or drop them.
1078 */
1079 if (page_has_private(page) &&
1080 !try_to_release_page(page, GFP_KERNEL))
1081 return -EAGAIN;
1082 return migrate_page(mapping, newpage, page, mode);
1083 }
1084 #endif
1085
1086
1087 static int btree_writepages(struct address_space *mapping,
1088 struct writeback_control *wbc)
1089 {
1090 struct btrfs_fs_info *fs_info;
1091 int ret;
1092
1093 if (wbc->sync_mode == WB_SYNC_NONE) {
1094
1095 if (wbc->for_kupdate)
1096 return 0;
1097
1098 fs_info = BTRFS_I(mapping->host)->root->fs_info;
1099 /* this is a bit racy, but that's ok */
1100 ret = percpu_counter_compare(&fs_info->dirty_metadata_bytes,
1101 BTRFS_DIRTY_METADATA_THRESH);
1102 if (ret < 0)
1103 return 0;
1104 }
1105 return btree_write_cache_pages(mapping, wbc);
1106 }
1107
1108 static int btree_readpage(struct file *file, struct page *page)
1109 {
1110 struct extent_io_tree *tree;
1111 tree = &BTRFS_I(page->mapping->host)->io_tree;
1112 return extent_read_full_page(tree, page, btree_get_extent, 0);
1113 }
1114
1115 static int btree_releasepage(struct page *page, gfp_t gfp_flags)
1116 {
1117 if (PageWriteback(page) || PageDirty(page))
1118 return 0;
1119
1120 return try_release_extent_buffer(page);
1121 }
1122
1123 static void btree_invalidatepage(struct page *page, unsigned int offset,
1124 unsigned int length)
1125 {
1126 struct extent_io_tree *tree;
1127 tree = &BTRFS_I(page->mapping->host)->io_tree;
1128 extent_invalidatepage(tree, page, offset);
1129 btree_releasepage(page, GFP_NOFS);
1130 if (PagePrivate(page)) {
1131 btrfs_warn(BTRFS_I(page->mapping->host)->root->fs_info,
1132 "page private not zero on page %llu",
1133 (unsigned long long)page_offset(page));
1134 ClearPagePrivate(page);
1135 set_page_private(page, 0);
1136 put_page(page);
1137 }
1138 }
1139
1140 static int btree_set_page_dirty(struct page *page)
1141 {
1142 #ifdef DEBUG
1143 struct extent_buffer *eb;
1144
1145 BUG_ON(!PagePrivate(page));
1146 eb = (struct extent_buffer *)page->private;
1147 BUG_ON(!eb);
1148 BUG_ON(!test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
1149 BUG_ON(!atomic_read(&eb->refs));
1150 btrfs_assert_tree_locked(eb);
1151 #endif
1152 return __set_page_dirty_nobuffers(page);
1153 }
1154
1155 static const struct address_space_operations btree_aops = {
1156 .readpage = btree_readpage,
1157 .writepages = btree_writepages,
1158 .releasepage = btree_releasepage,
1159 .invalidatepage = btree_invalidatepage,
1160 #ifdef CONFIG_MIGRATION
1161 .migratepage = btree_migratepage,
1162 #endif
1163 .set_page_dirty = btree_set_page_dirty,
1164 };
1165
1166 void readahead_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr)
1167 {
1168 struct extent_buffer *buf = NULL;
1169 struct inode *btree_inode = fs_info->btree_inode;
1170
1171 buf = btrfs_find_create_tree_block(fs_info, bytenr);
1172 if (IS_ERR(buf))
1173 return;
1174 read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
1175 buf, WAIT_NONE, btree_get_extent, 0);
1176 free_extent_buffer(buf);
1177 }
1178
1179 int reada_tree_block_flagged(struct btrfs_fs_info *fs_info, u64 bytenr,
1180 int mirror_num, struct extent_buffer **eb)
1181 {
1182 struct extent_buffer *buf = NULL;
1183 struct inode *btree_inode = fs_info->btree_inode;
1184 struct extent_io_tree *io_tree = &BTRFS_I(btree_inode)->io_tree;
1185 int ret;
1186
1187 buf = btrfs_find_create_tree_block(fs_info, bytenr);
1188 if (IS_ERR(buf))
1189 return 0;
1190
1191 set_bit(EXTENT_BUFFER_READAHEAD, &buf->bflags);
1192
1193 ret = read_extent_buffer_pages(io_tree, buf, WAIT_PAGE_LOCK,
1194 btree_get_extent, mirror_num);
1195 if (ret) {
1196 free_extent_buffer(buf);
1197 return ret;
1198 }
1199
1200 if (test_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags)) {
1201 free_extent_buffer(buf);
1202 return -EIO;
1203 } else if (extent_buffer_uptodate(buf)) {
1204 *eb = buf;
1205 } else {
1206 free_extent_buffer(buf);
1207 }
1208 return 0;
1209 }
1210
1211 struct extent_buffer *btrfs_find_create_tree_block(
1212 struct btrfs_fs_info *fs_info,
1213 u64 bytenr)
1214 {
1215 if (btrfs_is_testing(fs_info))
1216 return alloc_test_extent_buffer(fs_info, bytenr);
1217 return alloc_extent_buffer(fs_info, bytenr);
1218 }
1219
1220
1221 int btrfs_write_tree_block(struct extent_buffer *buf)
1222 {
1223 return filemap_fdatawrite_range(buf->pages[0]->mapping, buf->start,
1224 buf->start + buf->len - 1);
1225 }
1226
1227 void btrfs_wait_tree_block_writeback(struct extent_buffer *buf)
1228 {
1229 filemap_fdatawait_range(buf->pages[0]->mapping,
1230 buf->start, buf->start + buf->len - 1);
1231 }
1232
1233 struct extent_buffer *read_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr,
1234 u64 parent_transid)
1235 {
1236 struct extent_buffer *buf = NULL;
1237 int ret;
1238
1239 buf = btrfs_find_create_tree_block(fs_info, bytenr);
1240 if (IS_ERR(buf))
1241 return buf;
1242
1243 ret = btree_read_extent_buffer_pages(fs_info, buf, parent_transid);
1244 if (ret) {
1245 free_extent_buffer(buf);
1246 return ERR_PTR(ret);
1247 }
1248 return buf;
1249
1250 }
1251
1252 void clean_tree_block(struct btrfs_fs_info *fs_info,
1253 struct extent_buffer *buf)
1254 {
1255 if (btrfs_header_generation(buf) ==
1256 fs_info->running_transaction->transid) {
1257 btrfs_assert_tree_locked(buf);
1258
1259 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)) {
1260 percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
1261 -buf->len,
1262 fs_info->dirty_metadata_batch);
1263 /* ugh, clear_extent_buffer_dirty needs to lock the page */
1264 btrfs_set_lock_blocking(buf);
1265 clear_extent_buffer_dirty(buf);
1266 }
1267 }
1268 }
1269
1270 static struct btrfs_subvolume_writers *btrfs_alloc_subvolume_writers(void)
1271 {
1272 struct btrfs_subvolume_writers *writers;
1273 int ret;
1274
1275 writers = kmalloc(sizeof(*writers), GFP_NOFS);
1276 if (!writers)
1277 return ERR_PTR(-ENOMEM);
1278
1279 ret = percpu_counter_init(&writers->counter, 0, GFP_KERNEL);
1280 if (ret < 0) {
1281 kfree(writers);
1282 return ERR_PTR(ret);
1283 }
1284
1285 init_waitqueue_head(&writers->wait);
1286 return writers;
1287 }
1288
1289 static void
1290 btrfs_free_subvolume_writers(struct btrfs_subvolume_writers *writers)
1291 {
1292 percpu_counter_destroy(&writers->counter);
1293 kfree(writers);
1294 }
1295
1296 static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info,
1297 u64 objectid)
1298 {
1299 bool dummy = test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state);
1300 root->node = NULL;
1301 root->commit_root = NULL;
1302 root->state = 0;
1303 root->orphan_cleanup_state = 0;
1304
1305 root->objectid = objectid;
1306 root->last_trans = 0;
1307 root->highest_objectid = 0;
1308 root->nr_delalloc_inodes = 0;
1309 root->nr_ordered_extents = 0;
1310 root->name = NULL;
1311 root->inode_tree = RB_ROOT;
1312 INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC);
1313 root->block_rsv = NULL;
1314 root->orphan_block_rsv = NULL;
1315
1316 INIT_LIST_HEAD(&root->dirty_list);
1317 INIT_LIST_HEAD(&root->root_list);
1318 INIT_LIST_HEAD(&root->delalloc_inodes);
1319 INIT_LIST_HEAD(&root->delalloc_root);
1320 INIT_LIST_HEAD(&root->ordered_extents);
1321 INIT_LIST_HEAD(&root->ordered_root);
1322 INIT_LIST_HEAD(&root->logged_list[0]);
1323 INIT_LIST_HEAD(&root->logged_list[1]);
1324 spin_lock_init(&root->orphan_lock);
1325 spin_lock_init(&root->inode_lock);
1326 spin_lock_init(&root->delalloc_lock);
1327 spin_lock_init(&root->ordered_extent_lock);
1328 spin_lock_init(&root->accounting_lock);
1329 spin_lock_init(&root->log_extents_lock[0]);
1330 spin_lock_init(&root->log_extents_lock[1]);
1331 mutex_init(&root->objectid_mutex);
1332 mutex_init(&root->log_mutex);
1333 mutex_init(&root->ordered_extent_mutex);
1334 mutex_init(&root->delalloc_mutex);
1335 init_waitqueue_head(&root->log_writer_wait);
1336 init_waitqueue_head(&root->log_commit_wait[0]);
1337 init_waitqueue_head(&root->log_commit_wait[1]);
1338 INIT_LIST_HEAD(&root->log_ctxs[0]);
1339 INIT_LIST_HEAD(&root->log_ctxs[1]);
1340 atomic_set(&root->log_commit[0], 0);
1341 atomic_set(&root->log_commit[1], 0);
1342 atomic_set(&root->log_writers, 0);
1343 atomic_set(&root->log_batch, 0);
1344 atomic_set(&root->orphan_inodes, 0);
1345 refcount_set(&root->refs, 1);
1346 atomic_set(&root->will_be_snapshotted, 0);
1347 atomic64_set(&root->qgroup_meta_rsv, 0);
1348 root->log_transid = 0;
1349 root->log_transid_committed = -1;
1350 root->last_log_commit = 0;
1351 if (!dummy)
1352 extent_io_tree_init(&root->dirty_log_pages, NULL);
1353
1354 memset(&root->root_key, 0, sizeof(root->root_key));
1355 memset(&root->root_item, 0, sizeof(root->root_item));
1356 memset(&root->defrag_progress, 0, sizeof(root->defrag_progress));
1357 if (!dummy)
1358 root->defrag_trans_start = fs_info->generation;
1359 else
1360 root->defrag_trans_start = 0;
1361 root->root_key.objectid = objectid;
1362 root->anon_dev = 0;
1363
1364 spin_lock_init(&root->root_item_lock);
1365 }
1366
1367 static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info,
1368 gfp_t flags)
1369 {
1370 struct btrfs_root *root = kzalloc(sizeof(*root), flags);
1371 if (root)
1372 root->fs_info = fs_info;
1373 return root;
1374 }
1375
1376 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
1377 /* Should only be used by the testing infrastructure */
1378 struct btrfs_root *btrfs_alloc_dummy_root(struct btrfs_fs_info *fs_info)
1379 {
1380 struct btrfs_root *root;
1381
1382 if (!fs_info)
1383 return ERR_PTR(-EINVAL);
1384
1385 root = btrfs_alloc_root(fs_info, GFP_KERNEL);
1386 if (!root)
1387 return ERR_PTR(-ENOMEM);
1388
1389 /* We don't use the stripesize in selftest, set it as sectorsize */
1390 __setup_root(root, fs_info, BTRFS_ROOT_TREE_OBJECTID);
1391 root->alloc_bytenr = 0;
1392
1393 return root;
1394 }
1395 #endif
1396
1397 struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
1398 struct btrfs_fs_info *fs_info,
1399 u64 objectid)
1400 {
1401 struct extent_buffer *leaf;
1402 struct btrfs_root *tree_root = fs_info->tree_root;
1403 struct btrfs_root *root;
1404 struct btrfs_key key;
1405 int ret = 0;
1406 uuid_le uuid;
1407
1408 root = btrfs_alloc_root(fs_info, GFP_KERNEL);
1409 if (!root)
1410 return ERR_PTR(-ENOMEM);
1411
1412 __setup_root(root, fs_info, objectid);
1413 root->root_key.objectid = objectid;
1414 root->root_key.type = BTRFS_ROOT_ITEM_KEY;
1415 root->root_key.offset = 0;
1416
1417 leaf = btrfs_alloc_tree_block(trans, root, 0, objectid, NULL, 0, 0, 0);
1418 if (IS_ERR(leaf)) {
1419 ret = PTR_ERR(leaf);
1420 leaf = NULL;
1421 goto fail;
1422 }
1423
1424 memzero_extent_buffer(leaf, 0, sizeof(struct btrfs_header));
1425 btrfs_set_header_bytenr(leaf, leaf->start);
1426 btrfs_set_header_generation(leaf, trans->transid);
1427 btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV);
1428 btrfs_set_header_owner(leaf, objectid);
1429 root->node = leaf;
1430
1431 write_extent_buffer_fsid(leaf, fs_info->fsid);
1432 write_extent_buffer_chunk_tree_uuid(leaf, fs_info->chunk_tree_uuid);
1433 btrfs_mark_buffer_dirty(leaf);
1434
1435 root->commit_root = btrfs_root_node(root);
1436 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
1437
1438 root->root_item.flags = 0;
1439 root->root_item.byte_limit = 0;
1440 btrfs_set_root_bytenr(&root->root_item, leaf->start);
1441 btrfs_set_root_generation(&root->root_item, trans->transid);
1442 btrfs_set_root_level(&root->root_item, 0);
1443 btrfs_set_root_refs(&root->root_item, 1);
1444 btrfs_set_root_used(&root->root_item, leaf->len);
1445 btrfs_set_root_last_snapshot(&root->root_item, 0);
1446 btrfs_set_root_dirid(&root->root_item, 0);
1447 uuid_le_gen(&uuid);
1448 memcpy(root->root_item.uuid, uuid.b, BTRFS_UUID_SIZE);
1449 root->root_item.drop_level = 0;
1450
1451 key.objectid = objectid;
1452 key.type = BTRFS_ROOT_ITEM_KEY;
1453 key.offset = 0;
1454 ret = btrfs_insert_root(trans, tree_root, &key, &root->root_item);
1455 if (ret)
1456 goto fail;
1457
1458 btrfs_tree_unlock(leaf);
1459
1460 return root;
1461
1462 fail:
1463 if (leaf) {
1464 btrfs_tree_unlock(leaf);
1465 free_extent_buffer(root->commit_root);
1466 free_extent_buffer(leaf);
1467 }
1468 kfree(root);
1469
1470 return ERR_PTR(ret);
1471 }
1472
1473 static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
1474 struct btrfs_fs_info *fs_info)
1475 {
1476 struct btrfs_root *root;
1477 struct extent_buffer *leaf;
1478
1479 root = btrfs_alloc_root(fs_info, GFP_NOFS);
1480 if (!root)
1481 return ERR_PTR(-ENOMEM);
1482
1483 __setup_root(root, fs_info, BTRFS_TREE_LOG_OBJECTID);
1484
1485 root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID;
1486 root->root_key.type = BTRFS_ROOT_ITEM_KEY;
1487 root->root_key.offset = BTRFS_TREE_LOG_OBJECTID;
1488
1489 /*
1490 * DON'T set REF_COWS for log trees
1491 *
1492 * log trees do not get reference counted because they go away
1493 * before a real commit is actually done. They do store pointers
1494 * to file data extents, and those reference counts still get
1495 * updated (along with back refs to the log tree).
1496 */
1497
1498 leaf = btrfs_alloc_tree_block(trans, root, 0, BTRFS_TREE_LOG_OBJECTID,
1499 NULL, 0, 0, 0);
1500 if (IS_ERR(leaf)) {
1501 kfree(root);
1502 return ERR_CAST(leaf);
1503 }
1504
1505 memzero_extent_buffer(leaf, 0, sizeof(struct btrfs_header));
1506 btrfs_set_header_bytenr(leaf, leaf->start);
1507 btrfs_set_header_generation(leaf, trans->transid);
1508 btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV);
1509 btrfs_set_header_owner(leaf, BTRFS_TREE_LOG_OBJECTID);
1510 root->node = leaf;
1511
1512 write_extent_buffer_fsid(root->node, fs_info->fsid);
1513 btrfs_mark_buffer_dirty(root->node);
1514 btrfs_tree_unlock(root->node);
1515 return root;
1516 }
1517
1518 int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
1519 struct btrfs_fs_info *fs_info)
1520 {
1521 struct btrfs_root *log_root;
1522
1523 log_root = alloc_log_tree(trans, fs_info);
1524 if (IS_ERR(log_root))
1525 return PTR_ERR(log_root);
1526 WARN_ON(fs_info->log_root_tree);
1527 fs_info->log_root_tree = log_root;
1528 return 0;
1529 }
1530
1531 int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
1532 struct btrfs_root *root)
1533 {
1534 struct btrfs_fs_info *fs_info = root->fs_info;
1535 struct btrfs_root *log_root;
1536 struct btrfs_inode_item *inode_item;
1537
1538 log_root = alloc_log_tree(trans, fs_info);
1539 if (IS_ERR(log_root))
1540 return PTR_ERR(log_root);
1541
1542 log_root->last_trans = trans->transid;
1543 log_root->root_key.offset = root->root_key.objectid;
1544
1545 inode_item = &log_root->root_item.inode;
1546 btrfs_set_stack_inode_generation(inode_item, 1);
1547 btrfs_set_stack_inode_size(inode_item, 3);
1548 btrfs_set_stack_inode_nlink(inode_item, 1);
1549 btrfs_set_stack_inode_nbytes(inode_item,
1550 fs_info->nodesize);
1551 btrfs_set_stack_inode_mode(inode_item, S_IFDIR | 0755);
1552
1553 btrfs_set_root_node(&log_root->root_item, log_root->node);
1554
1555 WARN_ON(root->log_root);
1556 root->log_root = log_root;
1557 root->log_transid = 0;
1558 root->log_transid_committed = -1;
1559 root->last_log_commit = 0;
1560 return 0;
1561 }
1562
1563 static struct btrfs_root *btrfs_read_tree_root(struct btrfs_root *tree_root,
1564 struct btrfs_key *key)
1565 {
1566 struct btrfs_root *root;
1567 struct btrfs_fs_info *fs_info = tree_root->fs_info;
1568 struct btrfs_path *path;
1569 u64 generation;
1570 int ret;
1571
1572 path = btrfs_alloc_path();
1573 if (!path)
1574 return ERR_PTR(-ENOMEM);
1575
1576 root = btrfs_alloc_root(fs_info, GFP_NOFS);
1577 if (!root) {
1578 ret = -ENOMEM;
1579 goto alloc_fail;
1580 }
1581
1582 __setup_root(root, fs_info, key->objectid);
1583
1584 ret = btrfs_find_root(tree_root, key, path,
1585 &root->root_item, &root->root_key);
1586 if (ret) {
1587 if (ret > 0)
1588 ret = -ENOENT;
1589 goto find_fail;
1590 }
1591
1592 generation = btrfs_root_generation(&root->root_item);
1593 root->node = read_tree_block(fs_info,
1594 btrfs_root_bytenr(&root->root_item),
1595 generation);
1596 if (IS_ERR(root->node)) {
1597 ret = PTR_ERR(root->node);
1598 goto find_fail;
1599 } else if (!btrfs_buffer_uptodate(root->node, generation, 0)) {
1600 ret = -EIO;
1601 free_extent_buffer(root->node);
1602 goto find_fail;
1603 }
1604 root->commit_root = btrfs_root_node(root);
1605 out:
1606 btrfs_free_path(path);
1607 return root;
1608
1609 find_fail:
1610 kfree(root);
1611 alloc_fail:
1612 root = ERR_PTR(ret);
1613 goto out;
1614 }
1615
1616 struct btrfs_root *btrfs_read_fs_root(struct btrfs_root *tree_root,
1617 struct btrfs_key *location)
1618 {
1619 struct btrfs_root *root;
1620
1621 root = btrfs_read_tree_root(tree_root, location);
1622 if (IS_ERR(root))
1623 return root;
1624
1625 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
1626 set_bit(BTRFS_ROOT_REF_COWS, &root->state);
1627 btrfs_check_and_init_root_item(&root->root_item);
1628 }
1629
1630 return root;
1631 }
1632
1633 int btrfs_init_fs_root(struct btrfs_root *root)
1634 {
1635 int ret;
1636 struct btrfs_subvolume_writers *writers;
1637
1638 root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS);
1639 root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned),
1640 GFP_NOFS);
1641 if (!root->free_ino_pinned || !root->free_ino_ctl) {
1642 ret = -ENOMEM;
1643 goto fail;
1644 }
1645
1646 writers = btrfs_alloc_subvolume_writers();
1647 if (IS_ERR(writers)) {
1648 ret = PTR_ERR(writers);
1649 goto fail;
1650 }
1651 root->subv_writers = writers;
1652
1653 btrfs_init_free_ino_ctl(root);
1654 spin_lock_init(&root->ino_cache_lock);
1655 init_waitqueue_head(&root->ino_cache_wait);
1656
1657 ret = get_anon_bdev(&root->anon_dev);
1658 if (ret)
1659 goto fail;
1660
1661 mutex_lock(&root->objectid_mutex);
1662 ret = btrfs_find_highest_objectid(root,
1663 &root->highest_objectid);
1664 if (ret) {
1665 mutex_unlock(&root->objectid_mutex);
1666 goto fail;
1667 }
1668
1669 ASSERT(root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID);
1670
1671 mutex_unlock(&root->objectid_mutex);
1672
1673 return 0;
1674 fail:
1675 /* the caller is responsible to call free_fs_root */
1676 return ret;
1677 }
1678
1679 struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
1680 u64 root_id)
1681 {
1682 struct btrfs_root *root;
1683
1684 spin_lock(&fs_info->fs_roots_radix_lock);
1685 root = radix_tree_lookup(&fs_info->fs_roots_radix,
1686 (unsigned long)root_id);
1687 spin_unlock(&fs_info->fs_roots_radix_lock);
1688 return root;
1689 }
1690
1691 int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info,
1692 struct btrfs_root *root)
1693 {
1694 int ret;
1695
1696 ret = radix_tree_preload(GFP_NOFS);
1697 if (ret)
1698 return ret;
1699
1700 spin_lock(&fs_info->fs_roots_radix_lock);
1701 ret = radix_tree_insert(&fs_info->fs_roots_radix,
1702 (unsigned long)root->root_key.objectid,
1703 root);
1704 if (ret == 0)
1705 set_bit(BTRFS_ROOT_IN_RADIX, &root->state);
1706 spin_unlock(&fs_info->fs_roots_radix_lock);
1707 radix_tree_preload_end();
1708
1709 return ret;
1710 }
1711
1712 struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info,
1713 struct btrfs_key *location,
1714 bool check_ref)
1715 {
1716 struct btrfs_root *root;
1717 struct btrfs_path *path;
1718 struct btrfs_key key;
1719 int ret;
1720
1721 if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
1722 return fs_info->tree_root;
1723 if (location->objectid == BTRFS_EXTENT_TREE_OBJECTID)
1724 return fs_info->extent_root;
1725 if (location->objectid == BTRFS_CHUNK_TREE_OBJECTID)
1726 return fs_info->chunk_root;
1727 if (location->objectid == BTRFS_DEV_TREE_OBJECTID)
1728 return fs_info->dev_root;
1729 if (location->objectid == BTRFS_CSUM_TREE_OBJECTID)
1730 return fs_info->csum_root;
1731 if (location->objectid == BTRFS_QUOTA_TREE_OBJECTID)
1732 return fs_info->quota_root ? fs_info->quota_root :
1733 ERR_PTR(-ENOENT);
1734 if (location->objectid == BTRFS_UUID_TREE_OBJECTID)
1735 return fs_info->uuid_root ? fs_info->uuid_root :
1736 ERR_PTR(-ENOENT);
1737 if (location->objectid == BTRFS_FREE_SPACE_TREE_OBJECTID)
1738 return fs_info->free_space_root ? fs_info->free_space_root :
1739 ERR_PTR(-ENOENT);
1740 again:
1741 root = btrfs_lookup_fs_root(fs_info, location->objectid);
1742 if (root) {
1743 if (check_ref && btrfs_root_refs(&root->root_item) == 0)
1744 return ERR_PTR(-ENOENT);
1745 return root;
1746 }
1747
1748 root = btrfs_read_fs_root(fs_info->tree_root, location);
1749 if (IS_ERR(root))
1750 return root;
1751
1752 if (check_ref && btrfs_root_refs(&root->root_item) == 0) {
1753 ret = -ENOENT;
1754 goto fail;
1755 }
1756
1757 ret = btrfs_init_fs_root(root);
1758 if (ret)
1759 goto fail;
1760
1761 path = btrfs_alloc_path();
1762 if (!path) {
1763 ret = -ENOMEM;
1764 goto fail;
1765 }
1766 key.objectid = BTRFS_ORPHAN_OBJECTID;
1767 key.type = BTRFS_ORPHAN_ITEM_KEY;
1768 key.offset = location->objectid;
1769
1770 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
1771 btrfs_free_path(path);
1772 if (ret < 0)
1773 goto fail;
1774 if (ret == 0)
1775 set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state);
1776
1777 ret = btrfs_insert_fs_root(fs_info, root);
1778 if (ret) {
1779 if (ret == -EEXIST) {
1780 free_fs_root(root);
1781 goto again;
1782 }
1783 goto fail;
1784 }
1785 return root;
1786 fail:
1787 free_fs_root(root);
1788 return ERR_PTR(ret);
1789 }
1790
1791 static int btrfs_congested_fn(void *congested_data, int bdi_bits)
1792 {
1793 struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data;
1794 int ret = 0;
1795 struct btrfs_device *device;
1796 struct backing_dev_info *bdi;
1797
1798 rcu_read_lock();
1799 list_for_each_entry_rcu(device, &info->fs_devices->devices, dev_list) {
1800 if (!device->bdev)
1801 continue;
1802 bdi = device->bdev->bd_bdi;
1803 if (bdi_congested(bdi, bdi_bits)) {
1804 ret = 1;
1805 break;
1806 }
1807 }
1808 rcu_read_unlock();
1809 return ret;
1810 }
1811
1812 /*
1813 * called by the kthread helper functions to finally call the bio end_io
1814 * functions. This is where read checksum verification actually happens
1815 */
1816 static void end_workqueue_fn(struct btrfs_work *work)
1817 {
1818 struct bio *bio;
1819 struct btrfs_end_io_wq *end_io_wq;
1820
1821 end_io_wq = container_of(work, struct btrfs_end_io_wq, work);
1822 bio = end_io_wq->bio;
1823
1824 bio->bi_status = end_io_wq->status;
1825 bio->bi_private = end_io_wq->private;
1826 bio->bi_end_io = end_io_wq->end_io;
1827 kmem_cache_free(btrfs_end_io_wq_cache, end_io_wq);
1828 bio_endio(bio);
1829 }
1830
1831 static int cleaner_kthread(void *arg)
1832 {
1833 struct btrfs_root *root = arg;
1834 struct btrfs_fs_info *fs_info = root->fs_info;
1835 int again;
1836 struct btrfs_trans_handle *trans;
1837
1838 do {
1839 again = 0;
1840
1841 /* Make the cleaner go to sleep early. */
1842 if (btrfs_need_cleaner_sleep(fs_info))
1843 goto sleep;
1844
1845 /*
1846 * Do not do anything if we might cause open_ctree() to block
1847 * before we have finished mounting the filesystem.
1848 */
1849 if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags))
1850 goto sleep;
1851
1852 if (!mutex_trylock(&fs_info->cleaner_mutex))
1853 goto sleep;
1854
1855 /*
1856 * Avoid the problem that we change the status of the fs
1857 * during the above check and trylock.
1858 */
1859 if (btrfs_need_cleaner_sleep(fs_info)) {
1860 mutex_unlock(&fs_info->cleaner_mutex);
1861 goto sleep;
1862 }
1863
1864 mutex_lock(&fs_info->cleaner_delayed_iput_mutex);
1865 btrfs_run_delayed_iputs(fs_info);
1866 mutex_unlock(&fs_info->cleaner_delayed_iput_mutex);
1867
1868 again = btrfs_clean_one_deleted_snapshot(root);
1869 mutex_unlock(&fs_info->cleaner_mutex);
1870
1871 /*
1872 * The defragger has dealt with the R/O remount and umount,
1873 * needn't do anything special here.
1874 */
1875 btrfs_run_defrag_inodes(fs_info);
1876
1877 /*
1878 * Acquires fs_info->delete_unused_bgs_mutex to avoid racing
1879 * with relocation (btrfs_relocate_chunk) and relocation
1880 * acquires fs_info->cleaner_mutex (btrfs_relocate_block_group)
1881 * after acquiring fs_info->delete_unused_bgs_mutex. So we
1882 * can't hold, nor need to, fs_info->cleaner_mutex when deleting
1883 * unused block groups.
1884 */
1885 btrfs_delete_unused_bgs(fs_info);
1886 sleep:
1887 if (!again) {
1888 set_current_state(TASK_INTERRUPTIBLE);
1889 if (!kthread_should_stop())
1890 schedule();
1891 __set_current_state(TASK_RUNNING);
1892 }
1893 } while (!kthread_should_stop());
1894
1895 /*
1896 * Transaction kthread is stopped before us and wakes us up.
1897 * However we might have started a new transaction and COWed some
1898 * tree blocks when deleting unused block groups for example. So
1899 * make sure we commit the transaction we started to have a clean
1900 * shutdown when evicting the btree inode - if it has dirty pages
1901 * when we do the final iput() on it, eviction will trigger a
1902 * writeback for it which will fail with null pointer dereferences
1903 * since work queues and other resources were already released and
1904 * destroyed by the time the iput/eviction/writeback is made.
1905 */
1906 trans = btrfs_attach_transaction(root);
1907 if (IS_ERR(trans)) {
1908 if (PTR_ERR(trans) != -ENOENT)
1909 btrfs_err(fs_info,
1910 "cleaner transaction attach returned %ld",
1911 PTR_ERR(trans));
1912 } else {
1913 int ret;
1914
1915 ret = btrfs_commit_transaction(trans);
1916 if (ret)
1917 btrfs_err(fs_info,
1918 "cleaner open transaction commit returned %d",
1919 ret);
1920 }
1921
1922 return 0;
1923 }
1924
1925 static int transaction_kthread(void *arg)
1926 {
1927 struct btrfs_root *root = arg;
1928 struct btrfs_fs_info *fs_info = root->fs_info;
1929 struct btrfs_trans_handle *trans;
1930 struct btrfs_transaction *cur;
1931 u64 transid;
1932 unsigned long now;
1933 unsigned long delay;
1934 bool cannot_commit;
1935
1936 do {
1937 cannot_commit = false;
1938 delay = HZ * fs_info->commit_interval;
1939 mutex_lock(&fs_info->transaction_kthread_mutex);
1940
1941 spin_lock(&fs_info->trans_lock);
1942 cur = fs_info->running_transaction;
1943 if (!cur) {
1944 spin_unlock(&fs_info->trans_lock);
1945 goto sleep;
1946 }
1947
1948 now = get_seconds();
1949 if (cur->state < TRANS_STATE_BLOCKED &&
1950 (now < cur->start_time ||
1951 now - cur->start_time < fs_info->commit_interval)) {
1952 spin_unlock(&fs_info->trans_lock);
1953 delay = HZ * 5;
1954 goto sleep;
1955 }
1956 transid = cur->transid;
1957 spin_unlock(&fs_info->trans_lock);
1958
1959 /* If the file system is aborted, this will always fail. */
1960 trans = btrfs_attach_transaction(root);
1961 if (IS_ERR(trans)) {
1962 if (PTR_ERR(trans) != -ENOENT)
1963 cannot_commit = true;
1964 goto sleep;
1965 }
1966 if (transid == trans->transid) {
1967 btrfs_commit_transaction(trans);
1968 } else {
1969 btrfs_end_transaction(trans);
1970 }
1971 sleep:
1972 wake_up_process(fs_info->cleaner_kthread);
1973 mutex_unlock(&fs_info->transaction_kthread_mutex);
1974
1975 if (unlikely(test_bit(BTRFS_FS_STATE_ERROR,
1976 &fs_info->fs_state)))
1977 btrfs_cleanup_transaction(fs_info);
1978 set_current_state(TASK_INTERRUPTIBLE);
1979 if (!kthread_should_stop() &&
1980 (!btrfs_transaction_blocked(fs_info) ||
1981 cannot_commit))
1982 schedule_timeout(delay);
1983 __set_current_state(TASK_RUNNING);
1984 } while (!kthread_should_stop());
1985 return 0;
1986 }
1987
1988 /*
1989 * this will find the highest generation in the array of
1990 * root backups. The index of the highest array is returned,
1991 * or -1 if we can't find anything.
1992 *
1993 * We check to make sure the array is valid by comparing the
1994 * generation of the latest root in the array with the generation
1995 * in the super block. If they don't match we pitch it.
1996 */
1997 static int find_newest_super_backup(struct btrfs_fs_info *info, u64 newest_gen)
1998 {
1999 u64 cur;
2000 int newest_index = -1;
2001 struct btrfs_root_backup *root_backup;
2002 int i;
2003
2004 for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) {
2005 root_backup = info->super_copy->super_roots + i;
2006 cur = btrfs_backup_tree_root_gen(root_backup);
2007 if (cur == newest_gen)
2008 newest_index = i;
2009 }
2010
2011 /* check to see if we actually wrapped around */
2012 if (newest_index == BTRFS_NUM_BACKUP_ROOTS - 1) {
2013 root_backup = info->super_copy->super_roots;
2014 cur = btrfs_backup_tree_root_gen(root_backup);
2015 if (cur == newest_gen)
2016 newest_index = 0;
2017 }
2018 return newest_index;
2019 }
2020
2021
2022 /*
2023 * find the oldest backup so we know where to store new entries
2024 * in the backup array. This will set the backup_root_index
2025 * field in the fs_info struct
2026 */
2027 static void find_oldest_super_backup(struct btrfs_fs_info *info,
2028 u64 newest_gen)
2029 {
2030 int newest_index = -1;
2031
2032 newest_index = find_newest_super_backup(info, newest_gen);
2033 /* if there was garbage in there, just move along */
2034 if (newest_index == -1) {
2035 info->backup_root_index = 0;
2036 } else {
2037 info->backup_root_index = (newest_index + 1) % BTRFS_NUM_BACKUP_ROOTS;
2038 }
2039 }
2040
2041 /*
2042 * copy all the root pointers into the super backup array.
2043 * this will bump the backup pointer by one when it is
2044 * done
2045 */
2046 static void backup_super_roots(struct btrfs_fs_info *info)
2047 {
2048 int next_backup;
2049 struct btrfs_root_backup *root_backup;
2050 int last_backup;
2051
2052 next_backup = info->backup_root_index;
2053 last_backup = (next_backup + BTRFS_NUM_BACKUP_ROOTS - 1) %
2054 BTRFS_NUM_BACKUP_ROOTS;
2055
2056 /*
2057 * just overwrite the last backup if we're at the same generation
2058 * this happens only at umount
2059 */
2060 root_backup = info->super_for_commit->super_roots + last_backup;
2061 if (btrfs_backup_tree_root_gen(root_backup) ==
2062 btrfs_header_generation(info->tree_root->node))
2063 next_backup = last_backup;
2064
2065 root_backup = info->super_for_commit->super_roots + next_backup;
2066
2067 /*
2068 * make sure all of our padding and empty slots get zero filled
2069 * regardless of which ones we use today
2070 */
2071 memset(root_backup, 0, sizeof(*root_backup));
2072
2073 info->backup_root_index = (next_backup + 1) % BTRFS_NUM_BACKUP_ROOTS;
2074
2075 btrfs_set_backup_tree_root(root_backup, info->tree_root->node->start);
2076 btrfs_set_backup_tree_root_gen(root_backup,
2077 btrfs_header_generation(info->tree_root->node));
2078
2079 btrfs_set_backup_tree_root_level(root_backup,
2080 btrfs_header_level(info->tree_root->node));
2081
2082 btrfs_set_backup_chunk_root(root_backup, info->chunk_root->node->start);
2083 btrfs_set_backup_chunk_root_gen(root_backup,
2084 btrfs_header_generation(info->chunk_root->node));
2085 btrfs_set_backup_chunk_root_level(root_backup,
2086 btrfs_header_level(info->chunk_root->node));
2087
2088 btrfs_set_backup_extent_root(root_backup, info->extent_root->node->start);
2089 btrfs_set_backup_extent_root_gen(root_backup,
2090 btrfs_header_generation(info->extent_root->node));
2091 btrfs_set_backup_extent_root_level(root_backup,
2092 btrfs_header_level(info->extent_root->node));
2093
2094 /*
2095 * we might commit during log recovery, which happens before we set
2096 * the fs_root. Make sure it is valid before we fill it in.
2097 */
2098 if (info->fs_root && info->fs_root->node) {
2099 btrfs_set_backup_fs_root(root_backup,
2100 info->fs_root->node->start);
2101 btrfs_set_backup_fs_root_gen(root_backup,
2102 btrfs_header_generation(info->fs_root->node));
2103 btrfs_set_backup_fs_root_level(root_backup,
2104 btrfs_header_level(info->fs_root->node));
2105 }
2106
2107 btrfs_set_backup_dev_root(root_backup, info->dev_root->node->start);
2108 btrfs_set_backup_dev_root_gen(root_backup,
2109 btrfs_header_generation(info->dev_root->node));
2110 btrfs_set_backup_dev_root_level(root_backup,
2111 btrfs_header_level(info->dev_root->node));
2112
2113 btrfs_set_backup_csum_root(root_backup, info->csum_root->node->start);
2114 btrfs_set_backup_csum_root_gen(root_backup,
2115 btrfs_header_generation(info->csum_root->node));
2116 btrfs_set_backup_csum_root_level(root_backup,
2117 btrfs_header_level(info->csum_root->node));
2118
2119 btrfs_set_backup_total_bytes(root_backup,
2120 btrfs_super_total_bytes(info->super_copy));
2121 btrfs_set_backup_bytes_used(root_backup,
2122 btrfs_super_bytes_used(info->super_copy));
2123 btrfs_set_backup_num_devices(root_backup,
2124 btrfs_super_num_devices(info->super_copy));
2125
2126 /*
2127 * if we don't copy this out to the super_copy, it won't get remembered
2128 * for the next commit
2129 */
2130 memcpy(&info->super_copy->super_roots,
2131 &info->super_for_commit->super_roots,
2132 sizeof(*root_backup) * BTRFS_NUM_BACKUP_ROOTS);
2133 }
2134
2135 /*
2136 * this copies info out of the root backup array and back into
2137 * the in-memory super block. It is meant to help iterate through
2138 * the array, so you send it the number of backups you've already
2139 * tried and the last backup index you used.
2140 *
2141 * this returns -1 when it has tried all the backups
2142 */
2143 static noinline int next_root_backup(struct btrfs_fs_info *info,
2144 struct btrfs_super_block *super,
2145 int *num_backups_tried, int *backup_index)
2146 {
2147 struct btrfs_root_backup *root_backup;
2148 int newest = *backup_index;
2149
2150 if (*num_backups_tried == 0) {
2151 u64 gen = btrfs_super_generation(super);
2152
2153 newest = find_newest_super_backup(info, gen);
2154 if (newest == -1)
2155 return -1;
2156
2157 *backup_index = newest;
2158 *num_backups_tried = 1;
2159 } else if (*num_backups_tried == BTRFS_NUM_BACKUP_ROOTS) {
2160 /* we've tried all the backups, all done */
2161 return -1;
2162 } else {
2163 /* jump to the next oldest backup */
2164 newest = (*backup_index + BTRFS_NUM_BACKUP_ROOTS - 1) %
2165 BTRFS_NUM_BACKUP_ROOTS;
2166 *backup_index = newest;
2167 *num_backups_tried += 1;
2168 }
2169 root_backup = super->super_roots + newest;
2170
2171 btrfs_set_super_generation(super,
2172 btrfs_backup_tree_root_gen(root_backup));
2173 btrfs_set_super_root(super, btrfs_backup_tree_root(root_backup));
2174 btrfs_set_super_root_level(super,
2175 btrfs_backup_tree_root_level(root_backup));
2176 btrfs_set_super_bytes_used(super, btrfs_backup_bytes_used(root_backup));
2177
2178 /*
2179 * fixme: the total bytes and num_devices need to match or we should
2180 * need a fsck
2181 */
2182 btrfs_set_super_total_bytes(super, btrfs_backup_total_bytes(root_backup));
2183 btrfs_set_super_num_devices(super, btrfs_backup_num_devices(root_backup));
2184 return 0;
2185 }
2186
2187 /* helper to cleanup workers */
2188 static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info)
2189 {
2190 btrfs_destroy_workqueue(fs_info->fixup_workers);
2191 btrfs_destroy_workqueue(fs_info->delalloc_workers);
2192 btrfs_destroy_workqueue(fs_info->workers);
2193 btrfs_destroy_workqueue(fs_info->endio_workers);
2194 btrfs_destroy_workqueue(fs_info->endio_raid56_workers);
2195 btrfs_destroy_workqueue(fs_info->endio_repair_workers);
2196 btrfs_destroy_workqueue(fs_info->rmw_workers);
2197 btrfs_destroy_workqueue(fs_info->endio_write_workers);
2198 btrfs_destroy_workqueue(fs_info->endio_freespace_worker);
2199 btrfs_destroy_workqueue(fs_info->submit_workers);
2200 btrfs_destroy_workqueue(fs_info->delayed_workers);
2201 btrfs_destroy_workqueue(fs_info->caching_workers);
2202 btrfs_destroy_workqueue(fs_info->readahead_workers);
2203 btrfs_destroy_workqueue(fs_info->flush_workers);
2204 btrfs_destroy_workqueue(fs_info->qgroup_rescan_workers);
2205 btrfs_destroy_workqueue(fs_info->extent_workers);
2206 /*
2207 * Now that all other work queues are destroyed, we can safely destroy
2208 * the queues used for metadata I/O, since tasks from those other work
2209 * queues can do metadata I/O operations.
2210 */
2211 btrfs_destroy_workqueue(fs_info->endio_meta_workers);
2212 btrfs_destroy_workqueue(fs_info->endio_meta_write_workers);
2213 }
2214
2215 static void free_root_extent_buffers(struct btrfs_root *root)
2216 {
2217 if (root) {
2218 free_extent_buffer(root->node);
2219 free_extent_buffer(root->commit_root);
2220 root->node = NULL;
2221 root->commit_root = NULL;
2222 }
2223 }
2224
2225 /* helper to cleanup tree roots */
2226 static void free_root_pointers(struct btrfs_fs_info *info, int chunk_root)
2227 {
2228 free_root_extent_buffers(info->tree_root);
2229
2230 free_root_extent_buffers(info->dev_root);
2231 free_root_extent_buffers(info->extent_root);
2232 free_root_extent_buffers(info->csum_root);
2233 free_root_extent_buffers(info->quota_root);
2234 free_root_extent_buffers(info->uuid_root);
2235 if (chunk_root)
2236 free_root_extent_buffers(info->chunk_root);
2237 free_root_extent_buffers(info->free_space_root);
2238 }
2239
2240 void btrfs_free_fs_roots(struct btrfs_fs_info *fs_info)
2241 {
2242 int ret;
2243 struct btrfs_root *gang[8];
2244 int i;
2245
2246 while (!list_empty(&fs_info->dead_roots)) {
2247 gang[0] = list_entry(fs_info->dead_roots.next,
2248 struct btrfs_root, root_list);
2249 list_del(&gang[0]->root_list);
2250
2251 if (test_bit(BTRFS_ROOT_IN_RADIX, &gang[0]->state)) {
2252 btrfs_drop_and_free_fs_root(fs_info, gang[0]);
2253 } else {
2254 free_extent_buffer(gang[0]->node);
2255 free_extent_buffer(gang[0]->commit_root);
2256 btrfs_put_fs_root(gang[0]);
2257 }
2258 }
2259
2260 while (1) {
2261 ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
2262 (void **)gang, 0,
2263 ARRAY_SIZE(gang));
2264 if (!ret)
2265 break;
2266 for (i = 0; i < ret; i++)
2267 btrfs_drop_and_free_fs_root(fs_info, gang[i]);
2268 }
2269
2270 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
2271 btrfs_free_log_root_tree(NULL, fs_info);
2272 btrfs_destroy_pinned_extent(fs_info, fs_info->pinned_extents);
2273 }
2274 }
2275
2276 static void btrfs_init_scrub(struct btrfs_fs_info *fs_info)
2277 {
2278 mutex_init(&fs_info->scrub_lock);
2279 atomic_set(&fs_info->scrubs_running, 0);
2280 atomic_set(&fs_info->scrub_pause_req, 0);
2281 atomic_set(&fs_info->scrubs_paused, 0);
2282 atomic_set(&fs_info->scrub_cancel_req, 0);
2283 init_waitqueue_head(&fs_info->scrub_pause_wait);
2284 fs_info->scrub_workers_refcnt = 0;
2285 }
2286
2287 static void btrfs_init_balance(struct btrfs_fs_info *fs_info)
2288 {
2289 spin_lock_init(&fs_info->balance_lock);
2290 mutex_init(&fs_info->balance_mutex);
2291 atomic_set(&fs_info->balance_running, 0);
2292 atomic_set(&fs_info->balance_pause_req, 0);
2293 atomic_set(&fs_info->balance_cancel_req, 0);
2294 fs_info->balance_ctl = NULL;
2295 init_waitqueue_head(&fs_info->balance_wait_q);
2296 }
2297
2298 static void btrfs_init_btree_inode(struct btrfs_fs_info *fs_info)
2299 {
2300 struct inode *inode = fs_info->btree_inode;
2301
2302 inode->i_ino = BTRFS_BTREE_INODE_OBJECTID;
2303 set_nlink(inode, 1);
2304 /*
2305 * we set the i_size on the btree inode to the max possible int.
2306 * the real end of the address space is determined by all of
2307 * the devices in the system
2308 */
2309 inode->i_size = OFFSET_MAX;
2310 inode->i_mapping->a_ops = &btree_aops;
2311
2312 RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
2313 extent_io_tree_init(&BTRFS_I(inode)->io_tree, inode);
2314 BTRFS_I(inode)->io_tree.track_uptodate = 0;
2315 extent_map_tree_init(&BTRFS_I(inode)->extent_tree);
2316
2317 BTRFS_I(inode)->io_tree.ops = &btree_extent_io_ops;
2318
2319 BTRFS_I(inode)->root = fs_info->tree_root;
2320 memset(&BTRFS_I(inode)->location, 0, sizeof(struct btrfs_key));
2321 set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags);
2322 btrfs_insert_inode_hash(inode);
2323 }
2324
2325 static void btrfs_init_dev_replace_locks(struct btrfs_fs_info *fs_info)
2326 {
2327 fs_info->dev_replace.lock_owner = 0;
2328 atomic_set(&fs_info->dev_replace.nesting_level, 0);
2329 mutex_init(&fs_info->dev_replace.lock_finishing_cancel_unmount);
2330 rwlock_init(&fs_info->dev_replace.lock);
2331 atomic_set(&fs_info->dev_replace.read_locks, 0);
2332 atomic_set(&fs_info->dev_replace.blocking_readers, 0);
2333 init_waitqueue_head(&fs_info->replace_wait);
2334 init_waitqueue_head(&fs_info->dev_replace.read_lock_wq);
2335 }
2336
2337 static void btrfs_init_qgroup(struct btrfs_fs_info *fs_info)
2338 {
2339 spin_lock_init(&fs_info->qgroup_lock);
2340 mutex_init(&fs_info->qgroup_ioctl_lock);
2341 fs_info->qgroup_tree = RB_ROOT;
2342 fs_info->qgroup_op_tree = RB_ROOT;
2343 INIT_LIST_HEAD(&fs_info->dirty_qgroups);
2344 fs_info->qgroup_seq = 1;
2345 fs_info->qgroup_ulist = NULL;
2346 fs_info->qgroup_rescan_running = false;
2347 mutex_init(&fs_info->qgroup_rescan_lock);
2348 }
2349
2350 static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info,
2351 struct btrfs_fs_devices *fs_devices)
2352 {
2353 int max_active = fs_info->thread_pool_size;
2354 unsigned int flags = WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_UNBOUND;
2355
2356 fs_info->workers =
2357 btrfs_alloc_workqueue(fs_info, "worker",
2358 flags | WQ_HIGHPRI, max_active, 16);
2359
2360 fs_info->delalloc_workers =
2361 btrfs_alloc_workqueue(fs_info, "delalloc",
2362 flags, max_active, 2);
2363
2364 fs_info->flush_workers =
2365 btrfs_alloc_workqueue(fs_info, "flush_delalloc",
2366 flags, max_active, 0);
2367
2368 fs_info->caching_workers =
2369 btrfs_alloc_workqueue(fs_info, "cache", flags, max_active, 0);
2370
2371 /*
2372 * a higher idle thresh on the submit workers makes it much more
2373 * likely that bios will be send down in a sane order to the
2374 * devices
2375 */
2376 fs_info->submit_workers =
2377 btrfs_alloc_workqueue(fs_info, "submit", flags,
2378 min_t(u64, fs_devices->num_devices,
2379 max_active), 64);
2380
2381 fs_info->fixup_workers =
2382 btrfs_alloc_workqueue(fs_info, "fixup", flags, 1, 0);
2383
2384 /*
2385 * endios are largely parallel and should have a very
2386 * low idle thresh
2387 */
2388 fs_info->endio_workers =
2389 btrfs_alloc_workqueue(fs_info, "endio", flags, max_active, 4);
2390 fs_info->endio_meta_workers =
2391 btrfs_alloc_workqueue(fs_info, "endio-meta", flags,
2392 max_active, 4);
2393 fs_info->endio_meta_write_workers =
2394 btrfs_alloc_workqueue(fs_info, "endio-meta-write", flags,
2395 max_active, 2);
2396 fs_info->endio_raid56_workers =
2397 btrfs_alloc_workqueue(fs_info, "endio-raid56", flags,
2398 max_active, 4);
2399 fs_info->endio_repair_workers =
2400 btrfs_alloc_workqueue(fs_info, "endio-repair", flags, 1, 0);
2401 fs_info->rmw_workers =
2402 btrfs_alloc_workqueue(fs_info, "rmw", flags, max_active, 2);
2403 fs_info->endio_write_workers =
2404 btrfs_alloc_workqueue(fs_info, "endio-write", flags,
2405 max_active, 2);
2406 fs_info->endio_freespace_worker =
2407 btrfs_alloc_workqueue(fs_info, "freespace-write", flags,
2408 max_active, 0);
2409 fs_info->delayed_workers =
2410 btrfs_alloc_workqueue(fs_info, "delayed-meta", flags,
2411 max_active, 0);
2412 fs_info->readahead_workers =
2413 btrfs_alloc_workqueue(fs_info, "readahead", flags,
2414 max_active, 2);
2415 fs_info->qgroup_rescan_workers =
2416 btrfs_alloc_workqueue(fs_info, "qgroup-rescan", flags, 1, 0);
2417 fs_info->extent_workers =
2418 btrfs_alloc_workqueue(fs_info, "extent-refs", flags,
2419 min_t(u64, fs_devices->num_devices,
2420 max_active), 8);
2421
2422 if (!(fs_info->workers && fs_info->delalloc_workers &&
2423 fs_info->submit_workers && fs_info->flush_workers &&
2424 fs_info->endio_workers && fs_info->endio_meta_workers &&
2425 fs_info->endio_meta_write_workers &&
2426 fs_info->endio_repair_workers &&
2427 fs_info->endio_write_workers && fs_info->endio_raid56_workers &&
2428 fs_info->endio_freespace_worker && fs_info->rmw_workers &&
2429 fs_info->caching_workers && fs_info->readahead_workers &&
2430 fs_info->fixup_workers && fs_info->delayed_workers &&
2431 fs_info->extent_workers &&
2432 fs_info->qgroup_rescan_workers)) {
2433 return -ENOMEM;
2434 }
2435
2436 return 0;
2437 }
2438
2439 static int btrfs_replay_log(struct btrfs_fs_info *fs_info,
2440 struct btrfs_fs_devices *fs_devices)
2441 {
2442 int ret;
2443 struct btrfs_root *log_tree_root;
2444 struct btrfs_super_block *disk_super = fs_info->super_copy;
2445 u64 bytenr = btrfs_super_log_root(disk_super);
2446
2447 if (fs_devices->rw_devices == 0) {
2448 btrfs_warn(fs_info, "log replay required on RO media");
2449 return -EIO;
2450 }
2451
2452 log_tree_root = btrfs_alloc_root(fs_info, GFP_KERNEL);
2453 if (!log_tree_root)
2454 return -ENOMEM;
2455
2456 __setup_root(log_tree_root, fs_info, BTRFS_TREE_LOG_OBJECTID);
2457
2458 log_tree_root->node = read_tree_block(fs_info, bytenr,
2459 fs_info->generation + 1);
2460 if (IS_ERR(log_tree_root->node)) {
2461 btrfs_warn(fs_info, "failed to read log tree");
2462 ret = PTR_ERR(log_tree_root->node);
2463 kfree(log_tree_root);
2464 return ret;
2465 } else if (!extent_buffer_uptodate(log_tree_root->node)) {
2466 btrfs_err(fs_info, "failed to read log tree");
2467 free_extent_buffer(log_tree_root->node);
2468 kfree(log_tree_root);
2469 return -EIO;
2470 }
2471 /* returns with log_tree_root freed on success */
2472 ret = btrfs_recover_log_trees(log_tree_root);
2473 if (ret) {
2474 btrfs_handle_fs_error(fs_info, ret,
2475 "Failed to recover log tree");
2476 free_extent_buffer(log_tree_root->node);
2477 kfree(log_tree_root);
2478 return ret;
2479 }
2480
2481 if (fs_info->sb->s_flags & MS_RDONLY) {
2482 ret = btrfs_commit_super(fs_info);
2483 if (ret)
2484 return ret;
2485 }
2486
2487 return 0;
2488 }
2489
2490 static int btrfs_read_roots(struct btrfs_fs_info *fs_info)
2491 {
2492 struct btrfs_root *tree_root = fs_info->tree_root;
2493 struct btrfs_root *root;
2494 struct btrfs_key location;
2495 int ret;
2496
2497 BUG_ON(!fs_info->tree_root);
2498
2499 location.objectid = BTRFS_EXTENT_TREE_OBJECTID;
2500 location.type = BTRFS_ROOT_ITEM_KEY;
2501 location.offset = 0;
2502
2503 root = btrfs_read_tree_root(tree_root, &location);
2504 if (IS_ERR(root))
2505 return PTR_ERR(root);
2506 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2507 fs_info->extent_root = root;
2508
2509 location.objectid = BTRFS_DEV_TREE_OBJECTID;
2510 root = btrfs_read_tree_root(tree_root, &location);
2511 if (IS_ERR(root))
2512 return PTR_ERR(root);
2513 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2514 fs_info->dev_root = root;
2515 btrfs_init_devices_late(fs_info);
2516
2517 location.objectid = BTRFS_CSUM_TREE_OBJECTID;
2518 root = btrfs_read_tree_root(tree_root, &location);
2519 if (IS_ERR(root))
2520 return PTR_ERR(root);
2521 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2522 fs_info->csum_root = root;
2523
2524 location.objectid = BTRFS_QUOTA_TREE_OBJECTID;
2525 root = btrfs_read_tree_root(tree_root, &location);
2526 if (!IS_ERR(root)) {
2527 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2528 set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
2529 fs_info->quota_root = root;
2530 }
2531
2532 location.objectid = BTRFS_UUID_TREE_OBJECTID;
2533 root = btrfs_read_tree_root(tree_root, &location);
2534 if (IS_ERR(root)) {
2535 ret = PTR_ERR(root);
2536 if (ret != -ENOENT)
2537 return ret;
2538 } else {
2539 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2540 fs_info->uuid_root = root;
2541 }
2542
2543 if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
2544 location.objectid = BTRFS_FREE_SPACE_TREE_OBJECTID;
2545 root = btrfs_read_tree_root(tree_root, &location);
2546 if (IS_ERR(root))
2547 return PTR_ERR(root);
2548 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2549 fs_info->free_space_root = root;
2550 }
2551
2552 return 0;
2553 }
2554
2555 int open_ctree(struct super_block *sb,
2556 struct btrfs_fs_devices *fs_devices,
2557 char *options)
2558 {
2559 u32 sectorsize;
2560 u32 nodesize;
2561 u32 stripesize;
2562 u64 generation;
2563 u64 features;
2564 struct btrfs_key location;
2565 struct buffer_head *bh;
2566 struct btrfs_super_block *disk_super;
2567 struct btrfs_fs_info *fs_info = btrfs_sb(sb);
2568 struct btrfs_root *tree_root;
2569 struct btrfs_root *chunk_root;
2570 int ret;
2571 int err = -EINVAL;
2572 int num_backups_tried = 0;
2573 int backup_index = 0;
2574 int max_active;
2575 int clear_free_space_tree = 0;
2576
2577 tree_root = fs_info->tree_root = btrfs_alloc_root(fs_info, GFP_KERNEL);
2578 chunk_root = fs_info->chunk_root = btrfs_alloc_root(fs_info, GFP_KERNEL);
2579 if (!tree_root || !chunk_root) {
2580 err = -ENOMEM;
2581 goto fail;
2582 }
2583
2584 ret = init_srcu_struct(&fs_info->subvol_srcu);
2585 if (ret) {
2586 err = ret;
2587 goto fail;
2588 }
2589
2590 ret = percpu_counter_init(&fs_info->dirty_metadata_bytes, 0, GFP_KERNEL);
2591 if (ret) {
2592 err = ret;
2593 goto fail_srcu;
2594 }
2595 fs_info->dirty_metadata_batch = PAGE_SIZE *
2596 (1 + ilog2(nr_cpu_ids));
2597
2598 ret = percpu_counter_init(&fs_info->delalloc_bytes, 0, GFP_KERNEL);
2599 if (ret) {
2600 err = ret;
2601 goto fail_dirty_metadata_bytes;
2602 }
2603
2604 ret = percpu_counter_init(&fs_info->bio_counter, 0, GFP_KERNEL);
2605 if (ret) {
2606 err = ret;
2607 goto fail_delalloc_bytes;
2608 }
2609
2610 fs_info->btree_inode = new_inode(sb);
2611 if (!fs_info->btree_inode) {
2612 err = -ENOMEM;
2613 goto fail_bio_counter;
2614 }
2615
2616 mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS);
2617
2618 INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC);
2619 INIT_RADIX_TREE(&fs_info->buffer_radix, GFP_ATOMIC);
2620 INIT_LIST_HEAD(&fs_info->trans_list);
2621 INIT_LIST_HEAD(&fs_info->dead_roots);
2622 INIT_LIST_HEAD(&fs_info->delayed_iputs);
2623 INIT_LIST_HEAD(&fs_info->delalloc_roots);
2624 INIT_LIST_HEAD(&fs_info->caching_block_groups);
2625 spin_lock_init(&fs_info->delalloc_root_lock);
2626 spin_lock_init(&fs_info->trans_lock);
2627 spin_lock_init(&fs_info->fs_roots_radix_lock);
2628 spin_lock_init(&fs_info->delayed_iput_lock);
2629 spin_lock_init(&fs_info->defrag_inodes_lock);
2630 spin_lock_init(&fs_info->tree_mod_seq_lock);
2631 spin_lock_init(&fs_info->super_lock);
2632 spin_lock_init(&fs_info->qgroup_op_lock);
2633 spin_lock_init(&fs_info->buffer_lock);
2634 spin_lock_init(&fs_info->unused_bgs_lock);
2635 rwlock_init(&fs_info->tree_mod_log_lock);
2636 mutex_init(&fs_info->unused_bg_unpin_mutex);
2637 mutex_init(&fs_info->delete_unused_bgs_mutex);
2638 mutex_init(&fs_info->reloc_mutex);
2639 mutex_init(&fs_info->delalloc_root_mutex);
2640 mutex_init(&fs_info->cleaner_delayed_iput_mutex);
2641 seqlock_init(&fs_info->profiles_lock);
2642
2643 INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
2644 INIT_LIST_HEAD(&fs_info->space_info);
2645 INIT_LIST_HEAD(&fs_info->tree_mod_seq_list);
2646 INIT_LIST_HEAD(&fs_info->unused_bgs);
2647 btrfs_mapping_init(&fs_info->mapping_tree);
2648 btrfs_init_block_rsv(&fs_info->global_block_rsv,
2649 BTRFS_BLOCK_RSV_GLOBAL);
2650 btrfs_init_block_rsv(&fs_info->delalloc_block_rsv,
2651 BTRFS_BLOCK_RSV_DELALLOC);
2652 btrfs_init_block_rsv(&fs_info->trans_block_rsv, BTRFS_BLOCK_RSV_TRANS);
2653 btrfs_init_block_rsv(&fs_info->chunk_block_rsv, BTRFS_BLOCK_RSV_CHUNK);
2654 btrfs_init_block_rsv(&fs_info->empty_block_rsv, BTRFS_BLOCK_RSV_EMPTY);
2655 btrfs_init_block_rsv(&fs_info->delayed_block_rsv,
2656 BTRFS_BLOCK_RSV_DELOPS);
2657 atomic_set(&fs_info->nr_async_submits, 0);
2658 atomic_set(&fs_info->async_delalloc_pages, 0);
2659 atomic_set(&fs_info->async_submit_draining, 0);
2660 atomic_set(&fs_info->nr_async_bios, 0);
2661 atomic_set(&fs_info->defrag_running, 0);
2662 atomic_set(&fs_info->qgroup_op_seq, 0);
2663 atomic_set(&fs_info->reada_works_cnt, 0);
2664 atomic64_set(&fs_info->tree_mod_seq, 0);
2665 fs_info->sb = sb;
2666 fs_info->max_inline = BTRFS_DEFAULT_MAX_INLINE;
2667 fs_info->metadata_ratio = 0;
2668 fs_info->defrag_inodes = RB_ROOT;
2669 atomic64_set(&fs_info->free_chunk_space, 0);
2670 fs_info->tree_mod_log = RB_ROOT;
2671 fs_info->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL;
2672 fs_info->avg_delayed_ref_runtime = NSEC_PER_SEC >> 6; /* div by 64 */
2673 /* readahead state */
2674 INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
2675 spin_lock_init(&fs_info->reada_lock);
2676
2677 fs_info->thread_pool_size = min_t(unsigned long,
2678 num_online_cpus() + 2, 8);
2679
2680 INIT_LIST_HEAD(&fs_info->ordered_roots);
2681 spin_lock_init(&fs_info->ordered_root_lock);
2682 fs_info->delayed_root = kmalloc(sizeof(struct btrfs_delayed_root),
2683 GFP_KERNEL);
2684 if (!fs_info->delayed_root) {
2685 err = -ENOMEM;
2686 goto fail_iput;
2687 }
2688 btrfs_init_delayed_root(fs_info->delayed_root);
2689
2690 btrfs_init_scrub(fs_info);
2691 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
2692 fs_info->check_integrity_print_mask = 0;
2693 #endif
2694 btrfs_init_balance(fs_info);
2695 btrfs_init_async_reclaim_work(&fs_info->async_reclaim_work);
2696
2697 sb->s_blocksize = BTRFS_BDEV_BLOCKSIZE;
2698 sb->s_blocksize_bits = blksize_bits(BTRFS_BDEV_BLOCKSIZE);
2699
2700 btrfs_init_btree_inode(fs_info);
2701
2702 spin_lock_init(&fs_info->block_group_cache_lock);
2703 fs_info->block_group_cache_tree = RB_ROOT;
2704 fs_info->first_logical_byte = (u64)-1;
2705
2706 extent_io_tree_init(&fs_info->freed_extents[0], NULL);
2707 extent_io_tree_init(&fs_info->freed_extents[1], NULL);
2708 fs_info->pinned_extents = &fs_info->freed_extents[0];
2709 set_bit(BTRFS_FS_BARRIER, &fs_info->flags);
2710
2711 mutex_init(&fs_info->ordered_operations_mutex);
2712 mutex_init(&fs_info->tree_log_mutex);
2713 mutex_init(&fs_info->chunk_mutex);
2714 mutex_init(&fs_info->transaction_kthread_mutex);
2715 mutex_init(&fs_info->cleaner_mutex);
2716 mutex_init(&fs_info->volume_mutex);
2717 mutex_init(&fs_info->ro_block_group_mutex);
2718 init_rwsem(&fs_info->commit_root_sem);
2719 init_rwsem(&fs_info->cleanup_work_sem);
2720 init_rwsem(&fs_info->subvol_sem);
2721 sema_init(&fs_info->uuid_tree_rescan_sem, 1);
2722
2723 btrfs_init_dev_replace_locks(fs_info);
2724 btrfs_init_qgroup(fs_info);
2725
2726 btrfs_init_free_cluster(&fs_info->meta_alloc_cluster);
2727 btrfs_init_free_cluster(&fs_info->data_alloc_cluster);
2728
2729 init_waitqueue_head(&fs_info->transaction_throttle);
2730 init_waitqueue_head(&fs_info->transaction_wait);
2731 init_waitqueue_head(&fs_info->transaction_blocked_wait);
2732 init_waitqueue_head(&fs_info->async_submit_wait);
2733
2734 INIT_LIST_HEAD(&fs_info->pinned_chunks);
2735
2736 /* Usable values until the real ones are cached from the superblock */
2737 fs_info->nodesize = 4096;
2738 fs_info->sectorsize = 4096;
2739 fs_info->stripesize = 4096;
2740
2741 ret = btrfs_alloc_stripe_hash_table(fs_info);
2742 if (ret) {
2743 err = ret;
2744 goto fail_alloc;
2745 }
2746
2747 __setup_root(tree_root, fs_info, BTRFS_ROOT_TREE_OBJECTID);
2748
2749 invalidate_bdev(fs_devices->latest_bdev);
2750
2751 /*
2752 * Read super block and check the signature bytes only
2753 */
2754 bh = btrfs_read_dev_super(fs_devices->latest_bdev);
2755 if (IS_ERR(bh)) {
2756 err = PTR_ERR(bh);
2757 goto fail_alloc;
2758 }
2759
2760 /*
2761 * We want to check superblock checksum, the type is stored inside.
2762 * Pass the whole disk block of size BTRFS_SUPER_INFO_SIZE (4k).
2763 */
2764 if (btrfs_check_super_csum(fs_info, bh->b_data)) {
2765 btrfs_err(fs_info, "superblock checksum mismatch");
2766 err = -EINVAL;
2767 brelse(bh);
2768 goto fail_alloc;
2769 }
2770
2771 /*
2772 * super_copy is zeroed at allocation time and we never touch the
2773 * following bytes up to INFO_SIZE, the checksum is calculated from
2774 * the whole block of INFO_SIZE
2775 */
2776 memcpy(fs_info->super_copy, bh->b_data, sizeof(*fs_info->super_copy));
2777 memcpy(fs_info->super_for_commit, fs_info->super_copy,
2778 sizeof(*fs_info->super_for_commit));
2779 brelse(bh);
2780
2781 memcpy(fs_info->fsid, fs_info->super_copy->fsid, BTRFS_FSID_SIZE);
2782
2783 ret = btrfs_check_super_valid(fs_info);
2784 if (ret) {
2785 btrfs_err(fs_info, "superblock contains fatal errors");
2786 err = -EINVAL;
2787 goto fail_alloc;
2788 }
2789
2790 disk_super = fs_info->super_copy;
2791 if (!btrfs_super_root(disk_super))
2792 goto fail_alloc;
2793
2794 /* check FS state, whether FS is broken. */
2795 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_ERROR)
2796 set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state);
2797
2798 /*
2799 * run through our array of backup supers and setup
2800 * our ring pointer to the oldest one
2801 */
2802 generation = btrfs_super_generation(disk_super);
2803 find_oldest_super_backup(fs_info, generation);
2804
2805 /*
2806 * In the long term, we'll store the compression type in the super
2807 * block, and it'll be used for per file compression control.
2808 */
2809 fs_info->compress_type = BTRFS_COMPRESS_ZLIB;
2810
2811 ret = btrfs_parse_options(fs_info, options, sb->s_flags);
2812 if (ret) {
2813 err = ret;
2814 goto fail_alloc;
2815 }
2816
2817 features = btrfs_super_incompat_flags(disk_super) &
2818 ~BTRFS_FEATURE_INCOMPAT_SUPP;
2819 if (features) {
2820 btrfs_err(fs_info,
2821 "cannot mount because of unsupported optional features (%llx)",
2822 features);
2823 err = -EINVAL;
2824 goto fail_alloc;
2825 }
2826
2827 features = btrfs_super_incompat_flags(disk_super);
2828 features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF;
2829 if (fs_info->compress_type == BTRFS_COMPRESS_LZO)
2830 features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO;
2831 else if (fs_info->compress_type == BTRFS_COMPRESS_ZSTD)
2832 features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_ZSTD;
2833
2834 if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA)
2835 btrfs_info(fs_info, "has skinny extents");
2836
2837 /*
2838 * flag our filesystem as having big metadata blocks if
2839 * they are bigger than the page size
2840 */
2841 if (btrfs_super_nodesize(disk_super) > PAGE_SIZE) {
2842 if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA))
2843 btrfs_info(fs_info,
2844 "flagging fs with big metadata feature");
2845 features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA;
2846 }
2847
2848 nodesize = btrfs_super_nodesize(disk_super);
2849 sectorsize = btrfs_super_sectorsize(disk_super);
2850 stripesize = sectorsize;
2851 fs_info->dirty_metadata_batch = nodesize * (1 + ilog2(nr_cpu_ids));
2852 fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids));
2853
2854 /* Cache block sizes */
2855 fs_info->nodesize = nodesize;
2856 fs_info->sectorsize = sectorsize;
2857 fs_info->stripesize = stripesize;
2858
2859 /*
2860 * mixed block groups end up with duplicate but slightly offset
2861 * extent buffers for the same range. It leads to corruptions
2862 */
2863 if ((features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) &&
2864 (sectorsize != nodesize)) {
2865 btrfs_err(fs_info,
2866 "unequal nodesize/sectorsize (%u != %u) are not allowed for mixed block groups",
2867 nodesize, sectorsize);
2868 goto fail_alloc;
2869 }
2870
2871 /*
2872 * Needn't use the lock because there is no other task which will
2873 * update the flag.
2874 */
2875 btrfs_set_super_incompat_flags(disk_super, features);
2876
2877 features = btrfs_super_compat_ro_flags(disk_super) &
2878 ~BTRFS_FEATURE_COMPAT_RO_SUPP;
2879 if (!(sb->s_flags & MS_RDONLY) && features) {
2880 btrfs_err(fs_info,
2881 "cannot mount read-write because of unsupported optional features (%llx)",
2882 features);
2883 err = -EINVAL;
2884 goto fail_alloc;
2885 }
2886
2887 max_active = fs_info->thread_pool_size;
2888
2889 ret = btrfs_init_workqueues(fs_info, fs_devices);
2890 if (ret) {
2891 err = ret;
2892 goto fail_sb_buffer;
2893 }
2894
2895 sb->s_bdi->congested_fn = btrfs_congested_fn;
2896 sb->s_bdi->congested_data = fs_info;
2897 sb->s_bdi->capabilities |= BDI_CAP_CGROUP_WRITEBACK;
2898 sb->s_bdi->ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_SIZE;
2899 sb->s_bdi->ra_pages *= btrfs_super_num_devices(disk_super);
2900 sb->s_bdi->ra_pages = max(sb->s_bdi->ra_pages, SZ_4M / PAGE_SIZE);
2901
2902 sb->s_blocksize = sectorsize;
2903 sb->s_blocksize_bits = blksize_bits(sectorsize);
2904
2905 mutex_lock(&fs_info->chunk_mutex);
2906 ret = btrfs_read_sys_array(fs_info);
2907 mutex_unlock(&fs_info->chunk_mutex);
2908 if (ret) {
2909 btrfs_err(fs_info, "failed to read the system array: %d", ret);
2910 goto fail_sb_buffer;
2911 }
2912
2913 generation = btrfs_super_chunk_root_generation(disk_super);
2914
2915 __setup_root(chunk_root, fs_info, BTRFS_CHUNK_TREE_OBJECTID);
2916
2917 chunk_root->node = read_tree_block(fs_info,
2918 btrfs_super_chunk_root(disk_super),
2919 generation);
2920 if (IS_ERR(chunk_root->node) ||
2921 !extent_buffer_uptodate(chunk_root->node)) {
2922 btrfs_err(fs_info, "failed to read chunk root");
2923 if (!IS_ERR(chunk_root->node))
2924 free_extent_buffer(chunk_root->node);
2925 chunk_root->node = NULL;
2926 goto fail_tree_roots;
2927 }
2928 btrfs_set_root_node(&chunk_root->root_item, chunk_root->node);
2929 chunk_root->commit_root = btrfs_root_node(chunk_root);
2930
2931 read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid,
2932 btrfs_header_chunk_tree_uuid(chunk_root->node), BTRFS_UUID_SIZE);
2933
2934 ret = btrfs_read_chunk_tree(fs_info);
2935 if (ret) {
2936 btrfs_err(fs_info, "failed to read chunk tree: %d", ret);
2937 goto fail_tree_roots;
2938 }
2939
2940 /*
2941 * keep the device that is marked to be the target device for the
2942 * dev_replace procedure
2943 */
2944 btrfs_close_extra_devices(fs_devices, 0);
2945
2946 if (!fs_devices->latest_bdev) {
2947 btrfs_err(fs_info, "failed to read devices");
2948 goto fail_tree_roots;
2949 }
2950
2951 retry_root_backup:
2952 generation = btrfs_super_generation(disk_super);
2953
2954 tree_root->node = read_tree_block(fs_info,
2955 btrfs_super_root(disk_super),
2956 generation);
2957 if (IS_ERR(tree_root->node) ||
2958 !extent_buffer_uptodate(tree_root->node)) {
2959 btrfs_warn(fs_info, "failed to read tree root");
2960 if (!IS_ERR(tree_root->node))
2961 free_extent_buffer(tree_root->node);
2962 tree_root->node = NULL;
2963 goto recovery_tree_root;
2964 }
2965
2966 btrfs_set_root_node(&tree_root->root_item, tree_root->node);
2967 tree_root->commit_root = btrfs_root_node(tree_root);
2968 btrfs_set_root_refs(&tree_root->root_item, 1);
2969
2970 mutex_lock(&tree_root->objectid_mutex);
2971 ret = btrfs_find_highest_objectid(tree_root,
2972 &tree_root->highest_objectid);
2973 if (ret) {
2974 mutex_unlock(&tree_root->objectid_mutex);
2975 goto recovery_tree_root;
2976 }
2977
2978 ASSERT(tree_root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID);
2979
2980 mutex_unlock(&tree_root->objectid_mutex);
2981
2982 ret = btrfs_read_roots(fs_info);
2983 if (ret)
2984 goto recovery_tree_root;
2985
2986 fs_info->generation = generation;
2987 fs_info->last_trans_committed = generation;
2988
2989 ret = btrfs_recover_balance(fs_info);
2990 if (ret) {
2991 btrfs_err(fs_info, "failed to recover balance: %d", ret);
2992 goto fail_block_groups;
2993 }
2994
2995 ret = btrfs_init_dev_stats(fs_info);
2996 if (ret) {
2997 btrfs_err(fs_info, "failed to init dev_stats: %d", ret);
2998 goto fail_block_groups;
2999 }
3000
3001 ret = btrfs_init_dev_replace(fs_info);
3002 if (ret) {
3003 btrfs_err(fs_info, "failed to init dev_replace: %d", ret);
3004 goto fail_block_groups;
3005 }
3006
3007 btrfs_close_extra_devices(fs_devices, 1);
3008
3009 ret = btrfs_sysfs_add_fsid(fs_devices, NULL);
3010 if (ret) {
3011 btrfs_err(fs_info, "failed to init sysfs fsid interface: %d",
3012 ret);
3013 goto fail_block_groups;
3014 }
3015
3016 ret = btrfs_sysfs_add_device(fs_devices);
3017 if (ret) {
3018 btrfs_err(fs_info, "failed to init sysfs device interface: %d",
3019 ret);
3020 goto fail_fsdev_sysfs;
3021 }
3022
3023 ret = btrfs_sysfs_add_mounted(fs_info);
3024 if (ret) {
3025 btrfs_err(fs_info, "failed to init sysfs interface: %d", ret);
3026 goto fail_fsdev_sysfs;
3027 }
3028
3029 ret = btrfs_init_space_info(fs_info);
3030 if (ret) {
3031 btrfs_err(fs_info, "failed to initialize space info: %d", ret);
3032 goto fail_sysfs;
3033 }
3034
3035 ret = btrfs_read_block_groups(fs_info);
3036 if (ret) {
3037 btrfs_err(fs_info, "failed to read block groups: %d", ret);
3038 goto fail_sysfs;
3039 }
3040
3041 if (!(sb->s_flags & MS_RDONLY) && !btrfs_check_rw_degradable(fs_info)) {
3042 btrfs_warn(fs_info,
3043 "writeable mount is not allowed due to too many missing devices");
3044 goto fail_sysfs;
3045 }
3046
3047 fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root,
3048 "btrfs-cleaner");
3049 if (IS_ERR(fs_info->cleaner_kthread))
3050 goto fail_sysfs;
3051
3052 fs_info->transaction_kthread = kthread_run(transaction_kthread,
3053 tree_root,
3054 "btrfs-transaction");
3055 if (IS_ERR(fs_info->transaction_kthread))
3056 goto fail_cleaner;
3057
3058 if (!btrfs_test_opt(fs_info, NOSSD) &&
3059 !fs_info->fs_devices->rotating) {
3060 btrfs_set_and_info(fs_info, SSD, "enabling ssd optimizations");
3061 }
3062
3063 /*
3064 * Mount does not set all options immediately, we can do it now and do
3065 * not have to wait for transaction commit
3066 */
3067 btrfs_apply_pending_changes(fs_info);
3068
3069 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
3070 if (btrfs_test_opt(fs_info, CHECK_INTEGRITY)) {
3071 ret = btrfsic_mount(fs_info, fs_devices,
3072 btrfs_test_opt(fs_info,
3073 CHECK_INTEGRITY_INCLUDING_EXTENT_DATA) ?
3074 1 : 0,
3075 fs_info->check_integrity_print_mask);
3076 if (ret)
3077 btrfs_warn(fs_info,
3078 "failed to initialize integrity check module: %d",
3079 ret);
3080 }
3081 #endif
3082 ret = btrfs_read_qgroup_config(fs_info);
3083 if (ret)
3084 goto fail_trans_kthread;
3085
3086 /* do not make disk changes in broken FS or nologreplay is given */
3087 if (btrfs_super_log_root(disk_super) != 0 &&
3088 !btrfs_test_opt(fs_info, NOLOGREPLAY)) {
3089 ret = btrfs_replay_log(fs_info, fs_devices);
3090 if (ret) {
3091 err = ret;
3092 goto fail_qgroup;
3093 }
3094 }
3095
3096 ret = btrfs_find_orphan_roots(fs_info);
3097 if (ret)
3098 goto fail_qgroup;
3099
3100 if (!(sb->s_flags & MS_RDONLY)) {
3101 ret = btrfs_cleanup_fs_roots(fs_info);
3102 if (ret)
3103 goto fail_qgroup;
3104
3105 mutex_lock(&fs_info->cleaner_mutex);
3106 ret = btrfs_recover_relocation(tree_root);
3107 mutex_unlock(&fs_info->cleaner_mutex);
3108 if (ret < 0) {
3109 btrfs_warn(fs_info, "failed to recover relocation: %d",
3110 ret);
3111 err = -EINVAL;
3112 goto fail_qgroup;
3113 }
3114 }
3115
3116 location.objectid = BTRFS_FS_TREE_OBJECTID;
3117 location.type = BTRFS_ROOT_ITEM_KEY;
3118 location.offset = 0;
3119
3120 fs_info->fs_root = btrfs_read_fs_root_no_name(fs_info, &location);
3121 if (IS_ERR(fs_info->fs_root)) {
3122 err = PTR_ERR(fs_info->fs_root);
3123 goto fail_qgroup;
3124 }
3125
3126 if (sb->s_flags & MS_RDONLY)
3127 return 0;
3128
3129 if (btrfs_test_opt(fs_info, CLEAR_CACHE) &&
3130 btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
3131 clear_free_space_tree = 1;
3132 } else if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) &&
3133 !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID)) {
3134 btrfs_warn(fs_info, "free space tree is invalid");
3135 clear_free_space_tree = 1;
3136 }
3137
3138 if (clear_free_space_tree) {
3139 btrfs_info(fs_info, "clearing free space tree");
3140 ret = btrfs_clear_free_space_tree(fs_info);
3141 if (ret) {
3142 btrfs_warn(fs_info,
3143 "failed to clear free space tree: %d", ret);
3144 close_ctree(fs_info);
3145 return ret;
3146 }
3147 }
3148
3149 if (btrfs_test_opt(fs_info, FREE_SPACE_TREE) &&
3150 !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
3151 btrfs_info(fs_info, "creating free space tree");
3152 ret = btrfs_create_free_space_tree(fs_info);
3153 if (ret) {
3154 btrfs_warn(fs_info,
3155 "failed to create free space tree: %d", ret);
3156 close_ctree(fs_info);
3157 return ret;
3158 }
3159 }
3160
3161 down_read(&fs_info->cleanup_work_sem);
3162 if ((ret = btrfs_orphan_cleanup(fs_info->fs_root)) ||
3163 (ret = btrfs_orphan_cleanup(fs_info->tree_root))) {
3164 up_read(&fs_info->cleanup_work_sem);
3165 close_ctree(fs_info);
3166 return ret;
3167 }
3168 up_read(&fs_info->cleanup_work_sem);
3169
3170 ret = btrfs_resume_balance_async(fs_info);
3171 if (ret) {
3172 btrfs_warn(fs_info, "failed to resume balance: %d", ret);
3173 close_ctree(fs_info);
3174 return ret;
3175 }
3176
3177 ret = btrfs_resume_dev_replace_async(fs_info);
3178 if (ret) {
3179 btrfs_warn(fs_info, "failed to resume device replace: %d", ret);
3180 close_ctree(fs_info);
3181 return ret;
3182 }
3183
3184 btrfs_qgroup_rescan_resume(fs_info);
3185
3186 if (!fs_info->uuid_root) {
3187 btrfs_info(fs_info, "creating UUID tree");
3188 ret = btrfs_create_uuid_tree(fs_info);
3189 if (ret) {
3190 btrfs_warn(fs_info,
3191 "failed to create the UUID tree: %d", ret);
3192 close_ctree(fs_info);
3193 return ret;
3194 }
3195 } else if (btrfs_test_opt(fs_info, RESCAN_UUID_TREE) ||
3196 fs_info->generation !=
3197 btrfs_super_uuid_tree_generation(disk_super)) {
3198 btrfs_info(fs_info, "checking UUID tree");
3199 ret = btrfs_check_uuid_tree(fs_info);
3200 if (ret) {
3201 btrfs_warn(fs_info,
3202 "failed to check the UUID tree: %d", ret);
3203 close_ctree(fs_info);
3204 return ret;
3205 }
3206 } else {
3207 set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags);
3208 }
3209 set_bit(BTRFS_FS_OPEN, &fs_info->flags);
3210
3211 /*
3212 * backuproot only affect mount behavior, and if open_ctree succeeded,
3213 * no need to keep the flag
3214 */
3215 btrfs_clear_opt(fs_info->mount_opt, USEBACKUPROOT);
3216
3217 return 0;
3218
3219 fail_qgroup:
3220 btrfs_free_qgroup_config(fs_info);
3221 fail_trans_kthread:
3222 kthread_stop(fs_info->transaction_kthread);
3223 btrfs_cleanup_transaction(fs_info);
3224 btrfs_free_fs_roots(fs_info);
3225 fail_cleaner:
3226 kthread_stop(fs_info->cleaner_kthread);
3227
3228 /*
3229 * make sure we're done with the btree inode before we stop our
3230 * kthreads
3231 */
3232 filemap_write_and_wait(fs_info->btree_inode->i_mapping);
3233
3234 fail_sysfs:
3235 btrfs_sysfs_remove_mounted(fs_info);
3236
3237 fail_fsdev_sysfs:
3238 btrfs_sysfs_remove_fsid(fs_info->fs_devices);
3239
3240 fail_block_groups:
3241 btrfs_put_block_group_cache(fs_info);
3242
3243 fail_tree_roots:
3244 free_root_pointers(fs_info, 1);
3245 invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
3246
3247 fail_sb_buffer:
3248 btrfs_stop_all_workers(fs_info);
3249 btrfs_free_block_groups(fs_info);
3250 fail_alloc:
3251 fail_iput:
3252 btrfs_mapping_tree_free(&fs_info->mapping_tree);
3253
3254 iput(fs_info->btree_inode);
3255 fail_bio_counter:
3256 percpu_counter_destroy(&fs_info->bio_counter);
3257 fail_delalloc_bytes:
3258 percpu_counter_destroy(&fs_info->delalloc_bytes);
3259 fail_dirty_metadata_bytes:
3260 percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
3261 fail_srcu:
3262 cleanup_srcu_struct(&fs_info->subvol_srcu);
3263 fail:
3264 btrfs_free_stripe_hash_table(fs_info);
3265 btrfs_close_devices(fs_info->fs_devices);
3266 return err;
3267
3268 recovery_tree_root:
3269 if (!btrfs_test_opt(fs_info, USEBACKUPROOT))
3270 goto fail_tree_roots;
3271
3272 free_root_pointers(fs_info, 0);
3273
3274 /* don't use the log in recovery mode, it won't be valid */
3275 btrfs_set_super_log_root(disk_super, 0);
3276
3277 /* we can't trust the free space cache either */
3278 btrfs_set_opt(fs_info->mount_opt, CLEAR_CACHE);
3279
3280 ret = next_root_backup(fs_info, fs_info->super_copy,
3281 &num_backups_tried, &backup_index);
3282 if (ret == -1)
3283 goto fail_block_groups;
3284 goto retry_root_backup;
3285 }
3286
3287 static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
3288 {
3289 if (uptodate) {
3290 set_buffer_uptodate(bh);
3291 } else {
3292 struct btrfs_device *device = (struct btrfs_device *)
3293 bh->b_private;
3294
3295 btrfs_warn_rl_in_rcu(device->fs_info,
3296 "lost page write due to IO error on %s",
3297 rcu_str_deref(device->name));
3298 /* note, we don't set_buffer_write_io_error because we have
3299 * our own ways of dealing with the IO errors
3300 */
3301 clear_buffer_uptodate(bh);
3302 btrfs_dev_stat_inc_and_print(device, BTRFS_DEV_STAT_WRITE_ERRS);
3303 }
3304 unlock_buffer(bh);
3305 put_bh(bh);
3306 }
3307
3308 int btrfs_read_dev_one_super(struct block_device *bdev, int copy_num,
3309 struct buffer_head **bh_ret)
3310 {
3311 struct buffer_head *bh;
3312 struct btrfs_super_block *super;
3313 u64 bytenr;
3314
3315 bytenr = btrfs_sb_offset(copy_num);
3316 if (bytenr + BTRFS_SUPER_INFO_SIZE >= i_size_read(bdev->bd_inode))
3317 return -EINVAL;
3318
3319 bh = __bread(bdev, bytenr / BTRFS_BDEV_BLOCKSIZE, BTRFS_SUPER_INFO_SIZE);
3320 /*
3321 * If we fail to read from the underlying devices, as of now
3322 * the best option we have is to mark it EIO.
3323 */
3324 if (!bh)
3325 return -EIO;
3326
3327 super = (struct btrfs_super_block *)bh->b_data;
3328 if (btrfs_super_bytenr(super) != bytenr ||
3329 btrfs_super_magic(super) != BTRFS_MAGIC) {
3330 brelse(bh);
3331 return -EINVAL;
3332 }
3333
3334 *bh_ret = bh;
3335 return 0;
3336 }
3337
3338
3339 struct buffer_head *btrfs_read_dev_super(struct block_device *bdev)
3340 {
3341 struct buffer_head *bh;
3342 struct buffer_head *latest = NULL;
3343 struct btrfs_super_block *super;
3344 int i;
3345 u64 transid = 0;
3346 int ret = -EINVAL;
3347
3348 /* we would like to check all the supers, but that would make
3349 * a btrfs mount succeed after a mkfs from a different FS.
3350 * So, we need to add a special mount option to scan for
3351 * later supers, using BTRFS_SUPER_MIRROR_MAX instead
3352 */
3353 for (i = 0; i < 1; i++) {
3354 ret = btrfs_read_dev_one_super(bdev, i, &bh);
3355 if (ret)
3356 continue;
3357
3358 super = (struct btrfs_super_block *)bh->b_data;
3359
3360 if (!latest || btrfs_super_generation(super) > transid) {
3361 brelse(latest);
3362 latest = bh;
3363 transid = btrfs_super_generation(super);
3364 } else {
3365 brelse(bh);
3366 }
3367 }
3368
3369 if (!latest)
3370 return ERR_PTR(ret);
3371
3372 return latest;
3373 }
3374
3375 /*
3376 * Write superblock @sb to the @device. Do not wait for completion, all the
3377 * buffer heads we write are pinned.
3378 *
3379 * Write @max_mirrors copies of the superblock, where 0 means default that fit
3380 * the expected device size at commit time. Note that max_mirrors must be
3381 * same for write and wait phases.
3382 *
3383 * Return number of errors when buffer head is not found or submission fails.
3384 */
3385 static int write_dev_supers(struct btrfs_device *device,
3386 struct btrfs_super_block *sb, int max_mirrors)
3387 {
3388 struct buffer_head *bh;
3389 int i;
3390 int ret;
3391 int errors = 0;
3392 u32 crc;
3393 u64 bytenr;
3394
3395 if (max_mirrors == 0)
3396 max_mirrors = BTRFS_SUPER_MIRROR_MAX;
3397
3398 for (i = 0; i < max_mirrors; i++) {
3399 bytenr = btrfs_sb_offset(i);
3400 if (bytenr + BTRFS_SUPER_INFO_SIZE >=
3401 device->commit_total_bytes)
3402 break;
3403
3404 btrfs_set_super_bytenr(sb, bytenr);
3405
3406 crc = ~(u32)0;
3407 crc = btrfs_csum_data((const char *)sb + BTRFS_CSUM_SIZE, crc,
3408 BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE);
3409 btrfs_csum_final(crc, sb->csum);
3410
3411 /* One reference for us, and we leave it for the caller */
3412 bh = __getblk(device->bdev, bytenr / BTRFS_BDEV_BLOCKSIZE,
3413 BTRFS_SUPER_INFO_SIZE);
3414 if (!bh) {
3415 btrfs_err(device->fs_info,
3416 "couldn't get super buffer head for bytenr %llu",
3417 bytenr);
3418 errors++;
3419 continue;
3420 }
3421
3422 memcpy(bh->b_data, sb, BTRFS_SUPER_INFO_SIZE);
3423
3424 /* one reference for submit_bh */
3425 get_bh(bh);
3426
3427 set_buffer_uptodate(bh);
3428 lock_buffer(bh);
3429 bh->b_end_io = btrfs_end_buffer_write_sync;
3430 bh->b_private = device;
3431
3432 /*
3433 * we fua the first super. The others we allow
3434 * to go down lazy.
3435 */
3436 if (i == 0) {
3437 ret = btrfsic_submit_bh(REQ_OP_WRITE,
3438 REQ_SYNC | REQ_FUA | REQ_META | REQ_PRIO, bh);
3439 } else {
3440 ret = btrfsic_submit_bh(REQ_OP_WRITE,
3441 REQ_SYNC | REQ_META | REQ_PRIO, bh);
3442 }
3443 if (ret)
3444 errors++;
3445 }
3446 return errors < i ? 0 : -1;
3447 }
3448
3449 /*
3450 * Wait for write completion of superblocks done by write_dev_supers,
3451 * @max_mirrors same for write and wait phases.
3452 *
3453 * Return number of errors when buffer head is not found or not marked up to
3454 * date.
3455 */
3456 static int wait_dev_supers(struct btrfs_device *device, int max_mirrors)
3457 {
3458 struct buffer_head *bh;
3459 int i;
3460 int errors = 0;
3461 u64 bytenr;
3462
3463 if (max_mirrors == 0)
3464 max_mirrors = BTRFS_SUPER_MIRROR_MAX;
3465
3466 for (i = 0; i < max_mirrors; i++) {
3467 bytenr = btrfs_sb_offset(i);
3468 if (bytenr + BTRFS_SUPER_INFO_SIZE >=
3469 device->commit_total_bytes)
3470 break;
3471
3472 bh = __find_get_block(device->bdev,
3473 bytenr / BTRFS_BDEV_BLOCKSIZE,
3474 BTRFS_SUPER_INFO_SIZE);
3475 if (!bh) {
3476 errors++;
3477 continue;
3478 }
3479 wait_on_buffer(bh);
3480 if (!buffer_uptodate(bh))
3481 errors++;
3482
3483 /* drop our reference */
3484 brelse(bh);
3485
3486 /* drop the reference from the writing run */
3487 brelse(bh);
3488 }
3489
3490 return errors < i ? 0 : -1;
3491 }
3492
3493 /*
3494 * endio for the write_dev_flush, this will wake anyone waiting
3495 * for the barrier when it is done
3496 */
3497 static void btrfs_end_empty_barrier(struct bio *bio)
3498 {
3499 complete(bio->bi_private);
3500 }
3501
3502 /*
3503 * Submit a flush request to the device if it supports it. Error handling is
3504 * done in the waiting counterpart.
3505 */
3506 static void write_dev_flush(struct btrfs_device *device)
3507 {
3508 struct request_queue *q = bdev_get_queue(device->bdev);
3509 struct bio *bio = device->flush_bio;
3510
3511 if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags))
3512 return;
3513
3514 bio_reset(bio);
3515 bio->bi_end_io = btrfs_end_empty_barrier;
3516 bio_set_dev(bio, device->bdev);
3517 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH;
3518 init_completion(&device->flush_wait);
3519 bio->bi_private = &device->flush_wait;
3520
3521 btrfsic_submit_bio(bio);
3522 device->flush_bio_sent = 1;
3523 }
3524
3525 /*
3526 * If the flush bio has been submitted by write_dev_flush, wait for it.
3527 */
3528 static blk_status_t wait_dev_flush(struct btrfs_device *device)
3529 {
3530 struct bio *bio = device->flush_bio;
3531
3532 if (!device->flush_bio_sent)
3533 return BLK_STS_OK;
3534
3535 device->flush_bio_sent = 0;
3536 wait_for_completion_io(&device->flush_wait);
3537
3538 return bio->bi_status;
3539 }
3540
3541 static int check_barrier_error(struct btrfs_fs_info *fs_info)
3542 {
3543 if (!btrfs_check_rw_degradable(fs_info))
3544 return -EIO;
3545 return 0;
3546 }
3547
3548 /*
3549 * send an empty flush down to each device in parallel,
3550 * then wait for them
3551 */
3552 static int barrier_all_devices(struct btrfs_fs_info *info)
3553 {
3554 struct list_head *head;
3555 struct btrfs_device *dev;
3556 int errors_wait = 0;
3557 blk_status_t ret;
3558
3559 /* send down all the barriers */
3560 head = &info->fs_devices->devices;
3561 list_for_each_entry_rcu(dev, head, dev_list) {
3562 if (dev->missing)
3563 continue;
3564 if (!dev->bdev)
3565 continue;
3566 if (!dev->in_fs_metadata || !dev->writeable)
3567 continue;
3568
3569 write_dev_flush(dev);
3570 dev->last_flush_error = BLK_STS_OK;
3571 }
3572
3573 /* wait for all the barriers */
3574 list_for_each_entry_rcu(dev, head, dev_list) {
3575 if (dev->missing)
3576 continue;
3577 if (!dev->bdev) {
3578 errors_wait++;
3579 continue;
3580 }
3581 if (!dev->in_fs_metadata || !dev->writeable)
3582 continue;
3583
3584 ret = wait_dev_flush(dev);
3585 if (ret) {
3586 dev->last_flush_error = ret;
3587 btrfs_dev_stat_inc_and_print(dev,
3588 BTRFS_DEV_STAT_FLUSH_ERRS);
3589 errors_wait++;
3590 }
3591 }
3592
3593 if (errors_wait) {
3594 /*
3595 * At some point we need the status of all disks
3596 * to arrive at the volume status. So error checking
3597 * is being pushed to a separate loop.
3598 */
3599 return check_barrier_error(info);
3600 }
3601 return 0;
3602 }
3603
3604 int btrfs_get_num_tolerated_disk_barrier_failures(u64 flags)
3605 {
3606 int raid_type;
3607 int min_tolerated = INT_MAX;
3608
3609 if ((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 ||
3610 (flags & BTRFS_AVAIL_ALLOC_BIT_SINGLE))
3611 min_tolerated = min(min_tolerated,
3612 btrfs_raid_array[BTRFS_RAID_SINGLE].
3613 tolerated_failures);
3614
3615 for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) {
3616 if (raid_type == BTRFS_RAID_SINGLE)
3617 continue;
3618 if (!(flags & btrfs_raid_group[raid_type]))
3619 continue;
3620 min_tolerated = min(min_tolerated,
3621 btrfs_raid_array[raid_type].
3622 tolerated_failures);
3623 }
3624
3625 if (min_tolerated == INT_MAX) {
3626 pr_warn("BTRFS: unknown raid flag: %llu", flags);
3627 min_tolerated = 0;
3628 }
3629
3630 return min_tolerated;
3631 }
3632
3633 int write_all_supers(struct btrfs_fs_info *fs_info, int max_mirrors)
3634 {
3635 struct list_head *head;
3636 struct btrfs_device *dev;
3637 struct btrfs_super_block *sb;
3638 struct btrfs_dev_item *dev_item;
3639 int ret;
3640 int do_barriers;
3641 int max_errors;
3642 int total_errors = 0;
3643 u64 flags;
3644
3645 do_barriers = !btrfs_test_opt(fs_info, NOBARRIER);
3646 backup_super_roots(fs_info);
3647
3648 sb = fs_info->super_for_commit;
3649 dev_item = &sb->dev_item;
3650
3651 mutex_lock(&fs_info->fs_devices->device_list_mutex);
3652 head = &fs_info->fs_devices->devices;
3653 max_errors = btrfs_super_num_devices(fs_info->super_copy) - 1;
3654
3655 if (do_barriers) {
3656 ret = barrier_all_devices(fs_info);
3657 if (ret) {
3658 mutex_unlock(
3659 &fs_info->fs_devices->device_list_mutex);
3660 btrfs_handle_fs_error(fs_info, ret,
3661 "errors while submitting device barriers.");
3662 return ret;
3663 }
3664 }
3665
3666 list_for_each_entry_rcu(dev, head, dev_list) {
3667 if (!dev->bdev) {
3668 total_errors++;
3669 continue;
3670 }
3671 if (!dev->in_fs_metadata || !dev->writeable)
3672 continue;
3673
3674 btrfs_set_stack_device_generation(dev_item, 0);
3675 btrfs_set_stack_device_type(dev_item, dev->type);
3676 btrfs_set_stack_device_id(dev_item, dev->devid);
3677 btrfs_set_stack_device_total_bytes(dev_item,
3678 dev->commit_total_bytes);
3679 btrfs_set_stack_device_bytes_used(dev_item,
3680 dev->commit_bytes_used);
3681 btrfs_set_stack_device_io_align(dev_item, dev->io_align);
3682 btrfs_set_stack_device_io_width(dev_item, dev->io_width);
3683 btrfs_set_stack_device_sector_size(dev_item, dev->sector_size);
3684 memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE);
3685 memcpy(dev_item->fsid, dev->fs_devices->fsid, BTRFS_FSID_SIZE);
3686
3687 flags = btrfs_super_flags(sb);
3688 btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN);
3689
3690 ret = write_dev_supers(dev, sb, max_mirrors);
3691 if (ret)
3692 total_errors++;
3693 }
3694 if (total_errors > max_errors) {
3695 btrfs_err(fs_info, "%d errors while writing supers",
3696 total_errors);
3697 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3698
3699 /* FUA is masked off if unsupported and can't be the reason */
3700 btrfs_handle_fs_error(fs_info, -EIO,
3701 "%d errors while writing supers",
3702 total_errors);
3703 return -EIO;
3704 }
3705
3706 total_errors = 0;
3707 list_for_each_entry_rcu(dev, head, dev_list) {
3708 if (!dev->bdev)
3709 continue;
3710 if (!dev->in_fs_metadata || !dev->writeable)
3711 continue;
3712
3713 ret = wait_dev_supers(dev, max_mirrors);
3714 if (ret)
3715 total_errors++;
3716 }
3717 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3718 if (total_errors > max_errors) {
3719 btrfs_handle_fs_error(fs_info, -EIO,
3720 "%d errors while writing supers",
3721 total_errors);
3722 return -EIO;
3723 }
3724 return 0;
3725 }
3726
3727 /* Drop a fs root from the radix tree and free it. */
3728 void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info,
3729 struct btrfs_root *root)
3730 {
3731 spin_lock(&fs_info->fs_roots_radix_lock);
3732 radix_tree_delete(&fs_info->fs_roots_radix,
3733 (unsigned long)root->root_key.objectid);
3734 spin_unlock(&fs_info->fs_roots_radix_lock);
3735
3736 if (btrfs_root_refs(&root->root_item) == 0)
3737 synchronize_srcu(&fs_info->subvol_srcu);
3738
3739 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
3740 btrfs_free_log(NULL, root);
3741 if (root->reloc_root) {
3742 free_extent_buffer(root->reloc_root->node);
3743 free_extent_buffer(root->reloc_root->commit_root);
3744 btrfs_put_fs_root(root->reloc_root);
3745 root->reloc_root = NULL;
3746 }
3747 }
3748
3749 if (root->free_ino_pinned)
3750 __btrfs_remove_free_space_cache(root->free_ino_pinned);
3751 if (root->free_ino_ctl)
3752 __btrfs_remove_free_space_cache(root->free_ino_ctl);
3753 free_fs_root(root);
3754 }
3755
3756 static void free_fs_root(struct btrfs_root *root)
3757 {
3758 iput(root->ino_cache_inode);
3759 WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree));
3760 btrfs_free_block_rsv(root->fs_info, root->orphan_block_rsv);
3761 root->orphan_block_rsv = NULL;
3762 if (root->anon_dev)
3763 free_anon_bdev(root->anon_dev);
3764 if (root->subv_writers)
3765 btrfs_free_subvolume_writers(root->subv_writers);
3766 free_extent_buffer(root->node);
3767 free_extent_buffer(root->commit_root);
3768 kfree(root->free_ino_ctl);
3769 kfree(root->free_ino_pinned);
3770 kfree(root->name);
3771 btrfs_put_fs_root(root);
3772 }
3773
3774 void btrfs_free_fs_root(struct btrfs_root *root)
3775 {
3776 free_fs_root(root);
3777 }
3778
3779 int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
3780 {
3781 u64 root_objectid = 0;
3782 struct btrfs_root *gang[8];
3783 int i = 0;
3784 int err = 0;
3785 unsigned int ret = 0;
3786 int index;
3787
3788 while (1) {
3789 index = srcu_read_lock(&fs_info->subvol_srcu);
3790 ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
3791 (void **)gang, root_objectid,
3792 ARRAY_SIZE(gang));
3793 if (!ret) {
3794 srcu_read_unlock(&fs_info->subvol_srcu, index);
3795 break;
3796 }
3797 root_objectid = gang[ret - 1]->root_key.objectid + 1;
3798
3799 for (i = 0; i < ret; i++) {
3800 /* Avoid to grab roots in dead_roots */
3801 if (btrfs_root_refs(&gang[i]->root_item) == 0) {
3802 gang[i] = NULL;
3803 continue;
3804 }
3805 /* grab all the search result for later use */
3806 gang[i] = btrfs_grab_fs_root(gang[i]);
3807 }
3808 srcu_read_unlock(&fs_info->subvol_srcu, index);
3809
3810 for (i = 0; i < ret; i++) {
3811 if (!gang[i])
3812 continue;
3813 root_objectid = gang[i]->root_key.objectid;
3814 err = btrfs_orphan_cleanup(gang[i]);
3815 if (err)
3816 break;
3817 btrfs_put_fs_root(gang[i]);
3818 }
3819 root_objectid++;
3820 }
3821
3822 /* release the uncleaned roots due to error */
3823 for (; i < ret; i++) {
3824 if (gang[i])
3825 btrfs_put_fs_root(gang[i]);
3826 }
3827 return err;
3828 }
3829
3830 int btrfs_commit_super(struct btrfs_fs_info *fs_info)
3831 {
3832 struct btrfs_root *root = fs_info->tree_root;
3833 struct btrfs_trans_handle *trans;
3834
3835 mutex_lock(&fs_info->cleaner_mutex);
3836 btrfs_run_delayed_iputs(fs_info);
3837 mutex_unlock(&fs_info->cleaner_mutex);
3838 wake_up_process(fs_info->cleaner_kthread);
3839
3840 /* wait until ongoing cleanup work done */
3841 down_write(&fs_info->cleanup_work_sem);
3842 up_write(&fs_info->cleanup_work_sem);
3843
3844 trans = btrfs_join_transaction(root);
3845 if (IS_ERR(trans))
3846 return PTR_ERR(trans);
3847 return btrfs_commit_transaction(trans);
3848 }
3849
3850 void close_ctree(struct btrfs_fs_info *fs_info)
3851 {
3852 struct btrfs_root *root = fs_info->tree_root;
3853 int ret;
3854
3855 set_bit(BTRFS_FS_CLOSING_START, &fs_info->flags);
3856
3857 /* wait for the qgroup rescan worker to stop */
3858 btrfs_qgroup_wait_for_completion(fs_info, false);
3859
3860 /* wait for the uuid_scan task to finish */
3861 down(&fs_info->uuid_tree_rescan_sem);
3862 /* avoid complains from lockdep et al., set sem back to initial state */
3863 up(&fs_info->uuid_tree_rescan_sem);
3864
3865 /* pause restriper - we want to resume on mount */
3866 btrfs_pause_balance(fs_info);
3867
3868 btrfs_dev_replace_suspend_for_unmount(fs_info);
3869
3870 btrfs_scrub_cancel(fs_info);
3871
3872 /* wait for any defraggers to finish */
3873 wait_event(fs_info->transaction_wait,
3874 (atomic_read(&fs_info->defrag_running) == 0));
3875
3876 /* clear out the rbtree of defraggable inodes */
3877 btrfs_cleanup_defrag_inodes(fs_info);
3878
3879 cancel_work_sync(&fs_info->async_reclaim_work);
3880
3881 if (!(fs_info->sb->s_flags & MS_RDONLY)) {
3882 /*
3883 * If the cleaner thread is stopped and there are
3884 * block groups queued for removal, the deletion will be
3885 * skipped when we quit the cleaner thread.
3886 */
3887 btrfs_delete_unused_bgs(fs_info);
3888
3889 ret = btrfs_commit_super(fs_info);
3890 if (ret)
3891 btrfs_err(fs_info, "commit super ret %d", ret);
3892 }
3893
3894 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
3895 btrfs_error_commit_super(fs_info);
3896
3897 kthread_stop(fs_info->transaction_kthread);
3898 kthread_stop(fs_info->cleaner_kthread);
3899
3900 set_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags);
3901
3902 btrfs_free_qgroup_config(fs_info);
3903
3904 if (percpu_counter_sum(&fs_info->delalloc_bytes)) {
3905 btrfs_info(fs_info, "at unmount delalloc count %lld",
3906 percpu_counter_sum(&fs_info->delalloc_bytes));
3907 }
3908
3909 btrfs_sysfs_remove_mounted(fs_info);
3910 btrfs_sysfs_remove_fsid(fs_info->fs_devices);
3911
3912 btrfs_free_fs_roots(fs_info);
3913
3914 btrfs_put_block_group_cache(fs_info);
3915
3916 /*
3917 * we must make sure there is not any read request to
3918 * submit after we stopping all workers.
3919 */
3920 invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
3921 btrfs_stop_all_workers(fs_info);
3922
3923 btrfs_free_block_groups(fs_info);
3924
3925 clear_bit(BTRFS_FS_OPEN, &fs_info->flags);
3926 free_root_pointers(fs_info, 1);
3927
3928 iput(fs_info->btree_inode);
3929
3930 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
3931 if (btrfs_test_opt(fs_info, CHECK_INTEGRITY))
3932 btrfsic_unmount(fs_info->fs_devices);
3933 #endif
3934
3935 btrfs_close_devices(fs_info->fs_devices);
3936 btrfs_mapping_tree_free(&fs_info->mapping_tree);
3937
3938 percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
3939 percpu_counter_destroy(&fs_info->delalloc_bytes);
3940 percpu_counter_destroy(&fs_info->bio_counter);
3941 cleanup_srcu_struct(&fs_info->subvol_srcu);
3942
3943 btrfs_free_stripe_hash_table(fs_info);
3944
3945 __btrfs_free_block_rsv(root->orphan_block_rsv);
3946 root->orphan_block_rsv = NULL;
3947
3948 while (!list_empty(&fs_info->pinned_chunks)) {
3949 struct extent_map *em;
3950
3951 em = list_first_entry(&fs_info->pinned_chunks,
3952 struct extent_map, list);
3953 list_del_init(&em->list);
3954 free_extent_map(em);
3955 }
3956 }
3957
3958 int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid,
3959 int atomic)
3960 {
3961 int ret;
3962 struct inode *btree_inode = buf->pages[0]->mapping->host;
3963
3964 ret = extent_buffer_uptodate(buf);
3965 if (!ret)
3966 return ret;
3967
3968 ret = verify_parent_transid(&BTRFS_I(btree_inode)->io_tree, buf,
3969 parent_transid, atomic);
3970 if (ret == -EAGAIN)
3971 return ret;
3972 return !ret;
3973 }
3974
3975 void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
3976 {
3977 struct btrfs_fs_info *fs_info;
3978 struct btrfs_root *root;
3979 u64 transid = btrfs_header_generation(buf);
3980 int was_dirty;
3981
3982 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
3983 /*
3984 * This is a fast path so only do this check if we have sanity tests
3985 * enabled. Normal people shouldn't be marking dummy buffers as dirty
3986 * outside of the sanity tests.
3987 */
3988 if (unlikely(test_bit(EXTENT_BUFFER_DUMMY, &buf->bflags)))
3989 return;
3990 #endif
3991 root = BTRFS_I(buf->pages[0]->mapping->host)->root;
3992 fs_info = root->fs_info;
3993 btrfs_assert_tree_locked(buf);
3994 if (transid != fs_info->generation)
3995 WARN(1, KERN_CRIT "btrfs transid mismatch buffer %llu, found %llu running %llu\n",
3996 buf->start, transid, fs_info->generation);
3997 was_dirty = set_extent_buffer_dirty(buf);
3998 if (!was_dirty)
3999 percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
4000 buf->len,
4001 fs_info->dirty_metadata_batch);
4002 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
4003 if (btrfs_header_level(buf) == 0 && check_leaf(root, buf)) {
4004 btrfs_print_leaf(buf);
4005 ASSERT(0);
4006 }
4007 #endif
4008 }
4009
4010 static void __btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info,
4011 int flush_delayed)
4012 {
4013 /*
4014 * looks as though older kernels can get into trouble with
4015 * this code, they end up stuck in balance_dirty_pages forever
4016 */
4017 int ret;
4018
4019 if (current->flags & PF_MEMALLOC)
4020 return;
4021
4022 if (flush_delayed)
4023 btrfs_balance_delayed_items(fs_info);
4024
4025 ret = percpu_counter_compare(&fs_info->dirty_metadata_bytes,
4026 BTRFS_DIRTY_METADATA_THRESH);
4027 if (ret > 0) {
4028 balance_dirty_pages_ratelimited(fs_info->btree_inode->i_mapping);
4029 }
4030 }
4031
4032 void btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info)
4033 {
4034 __btrfs_btree_balance_dirty(fs_info, 1);
4035 }
4036
4037 void btrfs_btree_balance_dirty_nodelay(struct btrfs_fs_info *fs_info)
4038 {
4039 __btrfs_btree_balance_dirty(fs_info, 0);
4040 }
4041
4042 int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid)
4043 {
4044 struct btrfs_root *root = BTRFS_I(buf->pages[0]->mapping->host)->root;
4045 struct btrfs_fs_info *fs_info = root->fs_info;
4046
4047 return btree_read_extent_buffer_pages(fs_info, buf, parent_transid);
4048 }
4049
4050 static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info)
4051 {
4052 struct btrfs_super_block *sb = fs_info->super_copy;
4053 u64 nodesize = btrfs_super_nodesize(sb);
4054 u64 sectorsize = btrfs_super_sectorsize(sb);
4055 int ret = 0;
4056
4057 if (btrfs_super_magic(sb) != BTRFS_MAGIC) {
4058 btrfs_err(fs_info, "no valid FS found");
4059 ret = -EINVAL;
4060 }
4061 if (btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP)
4062 btrfs_warn(fs_info, "unrecognized super flag: %llu",
4063 btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP);
4064 if (btrfs_super_root_level(sb) >= BTRFS_MAX_LEVEL) {
4065 btrfs_err(fs_info, "tree_root level too big: %d >= %d",
4066 btrfs_super_root_level(sb), BTRFS_MAX_LEVEL);
4067 ret = -EINVAL;
4068 }
4069 if (btrfs_super_chunk_root_level(sb) >= BTRFS_MAX_LEVEL) {
4070 btrfs_err(fs_info, "chunk_root level too big: %d >= %d",
4071 btrfs_super_chunk_root_level(sb), BTRFS_MAX_LEVEL);
4072 ret = -EINVAL;
4073 }
4074 if (btrfs_super_log_root_level(sb) >= BTRFS_MAX_LEVEL) {
4075 btrfs_err(fs_info, "log_root level too big: %d >= %d",
4076 btrfs_super_log_root_level(sb), BTRFS_MAX_LEVEL);
4077 ret = -EINVAL;
4078 }
4079
4080 /*
4081 * Check sectorsize and nodesize first, other check will need it.
4082 * Check all possible sectorsize(4K, 8K, 16K, 32K, 64K) here.
4083 */
4084 if (!is_power_of_2(sectorsize) || sectorsize < 4096 ||
4085 sectorsize > BTRFS_MAX_METADATA_BLOCKSIZE) {
4086 btrfs_err(fs_info, "invalid sectorsize %llu", sectorsize);
4087 ret = -EINVAL;
4088 }
4089 /* Only PAGE SIZE is supported yet */
4090 if (sectorsize != PAGE_SIZE) {
4091 btrfs_err(fs_info,
4092 "sectorsize %llu not supported yet, only support %lu",
4093 sectorsize, PAGE_SIZE);
4094 ret = -EINVAL;
4095 }
4096 if (!is_power_of_2(nodesize) || nodesize < sectorsize ||
4097 nodesize > BTRFS_MAX_METADATA_BLOCKSIZE) {
4098 btrfs_err(fs_info, "invalid nodesize %llu", nodesize);
4099 ret = -EINVAL;
4100 }
4101 if (nodesize != le32_to_cpu(sb->__unused_leafsize)) {
4102 btrfs_err(fs_info, "invalid leafsize %u, should be %llu",
4103 le32_to_cpu(sb->__unused_leafsize), nodesize);
4104 ret = -EINVAL;
4105 }
4106
4107 /* Root alignment check */
4108 if (!IS_ALIGNED(btrfs_super_root(sb), sectorsize)) {
4109 btrfs_warn(fs_info, "tree_root block unaligned: %llu",
4110 btrfs_super_root(sb));
4111 ret = -EINVAL;
4112 }
4113 if (!IS_ALIGNED(btrfs_super_chunk_root(sb), sectorsize)) {
4114 btrfs_warn(fs_info, "chunk_root block unaligned: %llu",
4115 btrfs_super_chunk_root(sb));
4116 ret = -EINVAL;
4117 }
4118 if (!IS_ALIGNED(btrfs_super_log_root(sb), sectorsize)) {
4119 btrfs_warn(fs_info, "log_root block unaligned: %llu",
4120 btrfs_super_log_root(sb));
4121 ret = -EINVAL;
4122 }
4123
4124 if (memcmp(fs_info->fsid, sb->dev_item.fsid, BTRFS_FSID_SIZE) != 0) {
4125 btrfs_err(fs_info,
4126 "dev_item UUID does not match fsid: %pU != %pU",
4127 fs_info->fsid, sb->dev_item.fsid);
4128 ret = -EINVAL;
4129 }
4130
4131 /*
4132 * Hint to catch really bogus numbers, bitflips or so, more exact checks are
4133 * done later
4134 */
4135 if (btrfs_super_bytes_used(sb) < 6 * btrfs_super_nodesize(sb)) {
4136 btrfs_err(fs_info, "bytes_used is too small %llu",
4137 btrfs_super_bytes_used(sb));
4138 ret = -EINVAL;
4139 }
4140 if (!is_power_of_2(btrfs_super_stripesize(sb))) {
4141 btrfs_err(fs_info, "invalid stripesize %u",
4142 btrfs_super_stripesize(sb));
4143 ret = -EINVAL;
4144 }
4145 if (btrfs_super_num_devices(sb) > (1UL << 31))
4146 btrfs_warn(fs_info, "suspicious number of devices: %llu",
4147 btrfs_super_num_devices(sb));
4148 if (btrfs_super_num_devices(sb) == 0) {
4149 btrfs_err(fs_info, "number of devices is 0");
4150 ret = -EINVAL;
4151 }
4152
4153 if (btrfs_super_bytenr(sb) != BTRFS_SUPER_INFO_OFFSET) {
4154 btrfs_err(fs_info, "super offset mismatch %llu != %u",
4155 btrfs_super_bytenr(sb), BTRFS_SUPER_INFO_OFFSET);
4156 ret = -EINVAL;
4157 }
4158
4159 /*
4160 * Obvious sys_chunk_array corruptions, it must hold at least one key
4161 * and one chunk
4162 */
4163 if (btrfs_super_sys_array_size(sb) > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) {
4164 btrfs_err(fs_info, "system chunk array too big %u > %u",
4165 btrfs_super_sys_array_size(sb),
4166 BTRFS_SYSTEM_CHUNK_ARRAY_SIZE);
4167 ret = -EINVAL;
4168 }
4169 if (btrfs_super_sys_array_size(sb) < sizeof(struct btrfs_disk_key)
4170 + sizeof(struct btrfs_chunk)) {
4171 btrfs_err(fs_info, "system chunk array too small %u < %zu",
4172 btrfs_super_sys_array_size(sb),
4173 sizeof(struct btrfs_disk_key)
4174 + sizeof(struct btrfs_chunk));
4175 ret = -EINVAL;
4176 }
4177
4178 /*
4179 * The generation is a global counter, we'll trust it more than the others
4180 * but it's still possible that it's the one that's wrong.
4181 */
4182 if (btrfs_super_generation(sb) < btrfs_super_chunk_root_generation(sb))
4183 btrfs_warn(fs_info,
4184 "suspicious: generation < chunk_root_generation: %llu < %llu",
4185 btrfs_super_generation(sb),
4186 btrfs_super_chunk_root_generation(sb));
4187 if (btrfs_super_generation(sb) < btrfs_super_cache_generation(sb)
4188 && btrfs_super_cache_generation(sb) != (u64)-1)
4189 btrfs_warn(fs_info,
4190 "suspicious: generation < cache_generation: %llu < %llu",
4191 btrfs_super_generation(sb),
4192 btrfs_super_cache_generation(sb));
4193
4194 return ret;
4195 }
4196
4197 static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info)
4198 {
4199 mutex_lock(&fs_info->cleaner_mutex);
4200 btrfs_run_delayed_iputs(fs_info);
4201 mutex_unlock(&fs_info->cleaner_mutex);
4202
4203 down_write(&fs_info->cleanup_work_sem);
4204 up_write(&fs_info->cleanup_work_sem);
4205
4206 /* cleanup FS via transaction */
4207 btrfs_cleanup_transaction(fs_info);
4208 }
4209
4210 static void btrfs_destroy_ordered_extents(struct btrfs_root *root)
4211 {
4212 struct btrfs_ordered_extent *ordered;
4213
4214 spin_lock(&root->ordered_extent_lock);
4215 /*
4216 * This will just short circuit the ordered completion stuff which will
4217 * make sure the ordered extent gets properly cleaned up.
4218 */
4219 list_for_each_entry(ordered, &root->ordered_extents,
4220 root_extent_list)
4221 set_bit(BTRFS_ORDERED_IOERR, &ordered->flags);
4222 spin_unlock(&root->ordered_extent_lock);
4223 }
4224
4225 static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info)
4226 {
4227 struct btrfs_root *root;
4228 struct list_head splice;
4229
4230 INIT_LIST_HEAD(&splice);
4231
4232 spin_lock(&fs_info->ordered_root_lock);
4233 list_splice_init(&fs_info->ordered_roots, &splice);
4234 while (!list_empty(&splice)) {
4235 root = list_first_entry(&splice, struct btrfs_root,
4236 ordered_root);
4237 list_move_tail(&root->ordered_root,
4238 &fs_info->ordered_roots);
4239
4240 spin_unlock(&fs_info->ordered_root_lock);
4241 btrfs_destroy_ordered_extents(root);
4242
4243 cond_resched();
4244 spin_lock(&fs_info->ordered_root_lock);
4245 }
4246 spin_unlock(&fs_info->ordered_root_lock);
4247 }
4248
4249 static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
4250 struct btrfs_fs_info *fs_info)
4251 {
4252 struct rb_node *node;
4253 struct btrfs_delayed_ref_root *delayed_refs;
4254 struct btrfs_delayed_ref_node *ref;
4255 int ret = 0;
4256
4257 delayed_refs = &trans->delayed_refs;
4258
4259 spin_lock(&delayed_refs->lock);
4260 if (atomic_read(&delayed_refs->num_entries) == 0) {
4261 spin_unlock(&delayed_refs->lock);
4262 btrfs_info(fs_info, "delayed_refs has NO entry");
4263 return ret;
4264 }
4265
4266 while ((node = rb_first(&delayed_refs->href_root)) != NULL) {
4267 struct btrfs_delayed_ref_head *head;
4268 struct btrfs_delayed_ref_node *tmp;
4269 bool pin_bytes = false;
4270
4271 head = rb_entry(node, struct btrfs_delayed_ref_head,
4272 href_node);
4273 if (!mutex_trylock(&head->mutex)) {
4274 refcount_inc(&head->node.refs);
4275 spin_unlock(&delayed_refs->lock);
4276
4277 mutex_lock(&head->mutex);
4278 mutex_unlock(&head->mutex);
4279 btrfs_put_delayed_ref(&head->node);
4280 spin_lock(&delayed_refs->lock);
4281 continue;
4282 }
4283 spin_lock(&head->lock);
4284 list_for_each_entry_safe_reverse(ref, tmp, &head->ref_list,
4285 list) {
4286 ref->in_tree = 0;
4287 list_del(&ref->list);
4288 if (!list_empty(&ref->add_list))
4289 list_del(&ref->add_list);
4290 atomic_dec(&delayed_refs->num_entries);
4291 btrfs_put_delayed_ref(ref);
4292 }
4293 if (head->must_insert_reserved)
4294 pin_bytes = true;
4295 btrfs_free_delayed_extent_op(head->extent_op);
4296 delayed_refs->num_heads--;
4297 if (head->processing == 0)
4298 delayed_refs->num_heads_ready--;
4299 atomic_dec(&delayed_refs->num_entries);
4300 head->node.in_tree = 0;
4301 rb_erase(&head->href_node, &delayed_refs->href_root);
4302 spin_unlock(&head->lock);
4303 spin_unlock(&delayed_refs->lock);
4304 mutex_unlock(&head->mutex);
4305
4306 if (pin_bytes)
4307 btrfs_pin_extent(fs_info, head->node.bytenr,
4308 head->node.num_bytes, 1);
4309 btrfs_put_delayed_ref(&head->node);
4310 cond_resched();
4311 spin_lock(&delayed_refs->lock);
4312 }
4313
4314 spin_unlock(&delayed_refs->lock);
4315
4316 return ret;
4317 }
4318
4319 static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
4320 {
4321 struct btrfs_inode *btrfs_inode;
4322 struct list_head splice;
4323
4324 INIT_LIST_HEAD(&splice);
4325
4326 spin_lock(&root->delalloc_lock);
4327 list_splice_init(&root->delalloc_inodes, &splice);
4328
4329 while (!list_empty(&splice)) {
4330 btrfs_inode = list_first_entry(&splice, struct btrfs_inode,
4331 delalloc_inodes);
4332
4333 list_del_init(&btrfs_inode->delalloc_inodes);
4334 clear_bit(BTRFS_INODE_IN_DELALLOC_LIST,
4335 &btrfs_inode->runtime_flags);
4336 spin_unlock(&root->delalloc_lock);
4337
4338 btrfs_invalidate_inodes(btrfs_inode->root);
4339
4340 spin_lock(&root->delalloc_lock);
4341 }
4342
4343 spin_unlock(&root->delalloc_lock);
4344 }
4345
4346 static void btrfs_destroy_all_delalloc_inodes(struct btrfs_fs_info *fs_info)
4347 {
4348 struct btrfs_root *root;
4349 struct list_head splice;
4350
4351 INIT_LIST_HEAD(&splice);
4352
4353 spin_lock(&fs_info->delalloc_root_lock);
4354 list_splice_init(&fs_info->delalloc_roots, &splice);
4355 while (!list_empty(&splice)) {
4356 root = list_first_entry(&splice, struct btrfs_root,
4357 delalloc_root);
4358 list_del_init(&root->delalloc_root);
4359 root = btrfs_grab_fs_root(root);
4360 BUG_ON(!root);
4361 spin_unlock(&fs_info->delalloc_root_lock);
4362
4363 btrfs_destroy_delalloc_inodes(root);
4364 btrfs_put_fs_root(root);
4365
4366 spin_lock(&fs_info->delalloc_root_lock);
4367 }
4368 spin_unlock(&fs_info->delalloc_root_lock);
4369 }
4370
4371 static int btrfs_destroy_marked_extents(struct btrfs_fs_info *fs_info,
4372 struct extent_io_tree *dirty_pages,
4373 int mark)
4374 {
4375 int ret;
4376 struct extent_buffer *eb;
4377 u64 start = 0;
4378 u64 end;
4379
4380 while (1) {
4381 ret = find_first_extent_bit(dirty_pages, start, &start, &end,
4382 mark, NULL);
4383 if (ret)
4384 break;
4385
4386 clear_extent_bits(dirty_pages, start, end, mark);
4387 while (start <= end) {
4388 eb = find_extent_buffer(fs_info, start);
4389 start += fs_info->nodesize;
4390 if (!eb)
4391 continue;
4392 wait_on_extent_buffer_writeback(eb);
4393
4394 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY,
4395 &eb->bflags))
4396 clear_extent_buffer_dirty(eb);
4397 free_extent_buffer_stale(eb);
4398 }
4399 }
4400
4401 return ret;
4402 }
4403
4404 static int btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info,
4405 struct extent_io_tree *pinned_extents)
4406 {
4407 struct extent_io_tree *unpin;
4408 u64 start;
4409 u64 end;
4410 int ret;
4411 bool loop = true;
4412
4413 unpin = pinned_extents;
4414 again:
4415 while (1) {
4416 ret = find_first_extent_bit(unpin, 0, &start, &end,
4417 EXTENT_DIRTY, NULL);
4418 if (ret)
4419 break;
4420
4421 clear_extent_dirty(unpin, start, end);
4422 btrfs_error_unpin_extent_range(fs_info, start, end);
4423 cond_resched();
4424 }
4425
4426 if (loop) {
4427 if (unpin == &fs_info->freed_extents[0])
4428 unpin = &fs_info->freed_extents[1];
4429 else
4430 unpin = &fs_info->freed_extents[0];
4431 loop = false;
4432 goto again;
4433 }
4434
4435 return 0;
4436 }
4437
4438 static void btrfs_cleanup_bg_io(struct btrfs_block_group_cache *cache)
4439 {
4440 struct inode *inode;
4441
4442 inode = cache->io_ctl.inode;
4443 if (inode) {
4444 invalidate_inode_pages2(inode->i_mapping);
4445 BTRFS_I(inode)->generation = 0;
4446 cache->io_ctl.inode = NULL;
4447 iput(inode);
4448 }
4449 btrfs_put_block_group(cache);
4450 }
4451
4452 void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *cur_trans,
4453 struct btrfs_fs_info *fs_info)
4454 {
4455 struct btrfs_block_group_cache *cache;
4456
4457 spin_lock(&cur_trans->dirty_bgs_lock);
4458 while (!list_empty(&cur_trans->dirty_bgs)) {
4459 cache = list_first_entry(&cur_trans->dirty_bgs,
4460 struct btrfs_block_group_cache,
4461 dirty_list);
4462 if (!cache) {
4463 btrfs_err(fs_info, "orphan block group dirty_bgs list");
4464 spin_unlock(&cur_trans->dirty_bgs_lock);
4465 return;
4466 }
4467
4468 if (!list_empty(&cache->io_list)) {
4469 spin_unlock(&cur_trans->dirty_bgs_lock);
4470 list_del_init(&cache->io_list);
4471 btrfs_cleanup_bg_io(cache);
4472 spin_lock(&cur_trans->dirty_bgs_lock);
4473 }
4474
4475 list_del_init(&cache->dirty_list);
4476 spin_lock(&cache->lock);
4477 cache->disk_cache_state = BTRFS_DC_ERROR;
4478 spin_unlock(&cache->lock);
4479
4480 spin_unlock(&cur_trans->dirty_bgs_lock);
4481 btrfs_put_block_group(cache);
4482 spin_lock(&cur_trans->dirty_bgs_lock);
4483 }
4484 spin_unlock(&cur_trans->dirty_bgs_lock);
4485
4486 while (!list_empty(&cur_trans->io_bgs)) {
4487 cache = list_first_entry(&cur_trans->io_bgs,
4488 struct btrfs_block_group_cache,
4489 io_list);
4490 if (!cache) {
4491 btrfs_err(fs_info, "orphan block group on io_bgs list");
4492 return;
4493 }
4494
4495 list_del_init(&cache->io_list);
4496 spin_lock(&cache->lock);
4497 cache->disk_cache_state = BTRFS_DC_ERROR;
4498 spin_unlock(&cache->lock);
4499 btrfs_cleanup_bg_io(cache);
4500 }
4501 }
4502
4503 void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
4504 struct btrfs_fs_info *fs_info)
4505 {
4506 btrfs_cleanup_dirty_bgs(cur_trans, fs_info);
4507 ASSERT(list_empty(&cur_trans->dirty_bgs));
4508 ASSERT(list_empty(&cur_trans->io_bgs));
4509
4510 btrfs_destroy_delayed_refs(cur_trans, fs_info);
4511
4512 cur_trans->state = TRANS_STATE_COMMIT_START;
4513 wake_up(&fs_info->transaction_blocked_wait);
4514
4515 cur_trans->state = TRANS_STATE_UNBLOCKED;
4516 wake_up(&fs_info->transaction_wait);
4517
4518 btrfs_destroy_delayed_inodes(fs_info);
4519 btrfs_assert_delayed_root_empty(fs_info);
4520
4521 btrfs_destroy_marked_extents(fs_info, &cur_trans->dirty_pages,
4522 EXTENT_DIRTY);
4523 btrfs_destroy_pinned_extent(fs_info,
4524 fs_info->pinned_extents);
4525
4526 cur_trans->state =TRANS_STATE_COMPLETED;
4527 wake_up(&cur_trans->commit_wait);
4528 }
4529
4530 static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info)
4531 {
4532 struct btrfs_transaction *t;
4533
4534 mutex_lock(&fs_info->transaction_kthread_mutex);
4535
4536 spin_lock(&fs_info->trans_lock);
4537 while (!list_empty(&fs_info->trans_list)) {
4538 t = list_first_entry(&fs_info->trans_list,
4539 struct btrfs_transaction, list);
4540 if (t->state >= TRANS_STATE_COMMIT_START) {
4541 refcount_inc(&t->use_count);
4542 spin_unlock(&fs_info->trans_lock);
4543 btrfs_wait_for_commit(fs_info, t->transid);
4544 btrfs_put_transaction(t);
4545 spin_lock(&fs_info->trans_lock);
4546 continue;
4547 }
4548 if (t == fs_info->running_transaction) {
4549 t->state = TRANS_STATE_COMMIT_DOING;
4550 spin_unlock(&fs_info->trans_lock);
4551 /*
4552 * We wait for 0 num_writers since we don't hold a trans
4553 * handle open currently for this transaction.
4554 */
4555 wait_event(t->writer_wait,
4556 atomic_read(&t->num_writers) == 0);
4557 } else {
4558 spin_unlock(&fs_info->trans_lock);
4559 }
4560 btrfs_cleanup_one_transaction(t, fs_info);
4561
4562 spin_lock(&fs_info->trans_lock);
4563 if (t == fs_info->running_transaction)
4564 fs_info->running_transaction = NULL;
4565 list_del_init(&t->list);
4566 spin_unlock(&fs_info->trans_lock);
4567
4568 btrfs_put_transaction(t);
4569 trace_btrfs_transaction_commit(fs_info->tree_root);
4570 spin_lock(&fs_info->trans_lock);
4571 }
4572 spin_unlock(&fs_info->trans_lock);
4573 btrfs_destroy_all_ordered_extents(fs_info);
4574 btrfs_destroy_delayed_inodes(fs_info);
4575 btrfs_assert_delayed_root_empty(fs_info);
4576 btrfs_destroy_pinned_extent(fs_info, fs_info->pinned_extents);
4577 btrfs_destroy_all_delalloc_inodes(fs_info);
4578 mutex_unlock(&fs_info->transaction_kthread_mutex);
4579
4580 return 0;
4581 }
4582
4583 static struct btrfs_fs_info *btree_fs_info(void *private_data)
4584 {
4585 struct inode *inode = private_data;
4586 return btrfs_sb(inode->i_sb);
4587 }
4588
4589 static const struct extent_io_ops btree_extent_io_ops = {
4590 /* mandatory callbacks */
4591 .submit_bio_hook = btree_submit_bio_hook,
4592 .readpage_end_io_hook = btree_readpage_end_io_hook,
4593 /* note we're sharing with inode.c for the merge bio hook */
4594 .merge_bio_hook = btrfs_merge_bio_hook,
4595 .readpage_io_failed_hook = btree_io_failed_hook,
4596 .set_range_writeback = btrfs_set_range_writeback,
4597 .tree_fs_info = btree_fs_info,
4598
4599 /* optional callbacks */
4600 };