2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
20 #include <linux/blkdev.h>
21 #include <linux/scatterlist.h>
22 #include <linux/swap.h>
23 #include <linux/radix-tree.h>
24 #include <linux/writeback.h>
25 #include <linux/buffer_head.h>
26 #include <linux/workqueue.h>
27 #include <linux/kthread.h>
28 #include <linux/freezer.h>
29 #include <linux/crc32c.h>
30 #include <linux/slab.h>
31 #include <linux/migrate.h>
35 #include "transaction.h"
36 #include "btrfs_inode.h"
38 #include "print-tree.h"
39 #include "async-thread.h"
42 #include "free-space-cache.h"
44 static struct extent_io_ops btree_extent_io_ops
;
45 static void end_workqueue_fn(struct btrfs_work
*work
);
46 static void free_fs_root(struct btrfs_root
*root
);
47 static void btrfs_check_super_valid(struct btrfs_fs_info
*fs_info
,
49 static int btrfs_destroy_ordered_operations(struct btrfs_root
*root
);
50 static int btrfs_destroy_ordered_extents(struct btrfs_root
*root
);
51 static int btrfs_destroy_delayed_refs(struct btrfs_transaction
*trans
,
52 struct btrfs_root
*root
);
53 static int btrfs_destroy_pending_snapshots(struct btrfs_transaction
*t
);
54 static int btrfs_destroy_delalloc_inodes(struct btrfs_root
*root
);
55 static int btrfs_destroy_marked_extents(struct btrfs_root
*root
,
56 struct extent_io_tree
*dirty_pages
,
58 static int btrfs_destroy_pinned_extent(struct btrfs_root
*root
,
59 struct extent_io_tree
*pinned_extents
);
60 static int btrfs_cleanup_transaction(struct btrfs_root
*root
);
63 * end_io_wq structs are used to do processing in task context when an IO is
64 * complete. This is used during reads to verify checksums, and it is used
65 * by writes to insert metadata for new file extents after IO is complete.
71 struct btrfs_fs_info
*info
;
74 struct list_head list
;
75 struct btrfs_work work
;
79 * async submit bios are used to offload expensive checksumming
80 * onto the worker threads. They checksum file and metadata bios
81 * just before they are sent down the IO stack.
83 struct async_submit_bio
{
86 struct list_head list
;
87 extent_submit_bio_hook_t
*submit_bio_start
;
88 extent_submit_bio_hook_t
*submit_bio_done
;
91 unsigned long bio_flags
;
93 * bio_offset is optional, can be used if the pages in the bio
94 * can't tell us where in the file the bio should go
97 struct btrfs_work work
;
100 /* These are used to set the lockdep class on the extent buffer locks.
101 * The class is set by the readpage_end_io_hook after the buffer has
102 * passed csum validation but before the pages are unlocked.
104 * The lockdep class is also set by btrfs_init_new_buffer on freshly
107 * The class is based on the level in the tree block, which allows lockdep
108 * to know that lower nodes nest inside the locks of higher nodes.
110 * We also add a check to make sure the highest level of the tree is
111 * the same as our lockdep setup here. If BTRFS_MAX_LEVEL changes, this
112 * code needs update as well.
114 #ifdef CONFIG_DEBUG_LOCK_ALLOC
115 # if BTRFS_MAX_LEVEL != 8
118 static struct lock_class_key btrfs_eb_class
[BTRFS_MAX_LEVEL
+ 1];
119 static const char *btrfs_eb_name
[BTRFS_MAX_LEVEL
+ 1] = {
129 /* highest possible level */
135 * extents on the btree inode are pretty simple, there's one extent
136 * that covers the entire device
138 static struct extent_map
*btree_get_extent(struct inode
*inode
,
139 struct page
*page
, size_t page_offset
, u64 start
, u64 len
,
142 struct extent_map_tree
*em_tree
= &BTRFS_I(inode
)->extent_tree
;
143 struct extent_map
*em
;
146 read_lock(&em_tree
->lock
);
147 em
= lookup_extent_mapping(em_tree
, start
, len
);
150 BTRFS_I(inode
)->root
->fs_info
->fs_devices
->latest_bdev
;
151 read_unlock(&em_tree
->lock
);
154 read_unlock(&em_tree
->lock
);
156 em
= alloc_extent_map(GFP_NOFS
);
158 em
= ERR_PTR(-ENOMEM
);
163 em
->block_len
= (u64
)-1;
165 em
->bdev
= BTRFS_I(inode
)->root
->fs_info
->fs_devices
->latest_bdev
;
167 write_lock(&em_tree
->lock
);
168 ret
= add_extent_mapping(em_tree
, em
);
169 if (ret
== -EEXIST
) {
170 u64 failed_start
= em
->start
;
171 u64 failed_len
= em
->len
;
174 em
= lookup_extent_mapping(em_tree
, start
, len
);
178 em
= lookup_extent_mapping(em_tree
, failed_start
,
186 write_unlock(&em_tree
->lock
);
194 u32
btrfs_csum_data(struct btrfs_root
*root
, char *data
, u32 seed
, size_t len
)
196 return crc32c(seed
, data
, len
);
199 void btrfs_csum_final(u32 crc
, char *result
)
201 *(__le32
*)result
= ~cpu_to_le32(crc
);
205 * compute the csum for a btree block, and either verify it or write it
206 * into the csum field of the block.
208 static int csum_tree_block(struct btrfs_root
*root
, struct extent_buffer
*buf
,
212 btrfs_super_csum_size(&root
->fs_info
->super_copy
);
215 unsigned long cur_len
;
216 unsigned long offset
= BTRFS_CSUM_SIZE
;
217 char *map_token
= NULL
;
219 unsigned long map_start
;
220 unsigned long map_len
;
223 unsigned long inline_result
;
225 len
= buf
->len
- offset
;
227 err
= map_private_extent_buffer(buf
, offset
, 32,
229 &map_start
, &map_len
, KM_USER0
);
232 cur_len
= min(len
, map_len
- (offset
- map_start
));
233 crc
= btrfs_csum_data(root
, kaddr
+ offset
- map_start
,
237 unmap_extent_buffer(buf
, map_token
, KM_USER0
);
239 if (csum_size
> sizeof(inline_result
)) {
240 result
= kzalloc(csum_size
* sizeof(char), GFP_NOFS
);
244 result
= (char *)&inline_result
;
247 btrfs_csum_final(crc
, result
);
250 if (memcmp_extent_buffer(buf
, result
, 0, csum_size
)) {
253 memcpy(&found
, result
, csum_size
);
255 read_extent_buffer(buf
, &val
, 0, csum_size
);
256 if (printk_ratelimit()) {
257 printk(KERN_INFO
"btrfs: %s checksum verify "
258 "failed on %llu wanted %X found %X "
260 root
->fs_info
->sb
->s_id
,
261 (unsigned long long)buf
->start
, val
, found
,
262 btrfs_header_level(buf
));
264 if (result
!= (char *)&inline_result
)
269 write_extent_buffer(buf
, result
, 0, csum_size
);
271 if (result
!= (char *)&inline_result
)
277 * we can't consider a given block up to date unless the transid of the
278 * block matches the transid in the parent node's pointer. This is how we
279 * detect blocks that either didn't get written at all or got written
280 * in the wrong place.
282 static int verify_parent_transid(struct extent_io_tree
*io_tree
,
283 struct extent_buffer
*eb
, u64 parent_transid
)
285 struct extent_state
*cached_state
= NULL
;
288 if (!parent_transid
|| btrfs_header_generation(eb
) == parent_transid
)
291 lock_extent_bits(io_tree
, eb
->start
, eb
->start
+ eb
->len
- 1,
292 0, &cached_state
, GFP_NOFS
);
293 if (extent_buffer_uptodate(io_tree
, eb
, cached_state
) &&
294 btrfs_header_generation(eb
) == parent_transid
) {
298 if (printk_ratelimit()) {
299 printk("parent transid verify failed on %llu wanted %llu "
301 (unsigned long long)eb
->start
,
302 (unsigned long long)parent_transid
,
303 (unsigned long long)btrfs_header_generation(eb
));
306 clear_extent_buffer_uptodate(io_tree
, eb
, &cached_state
);
308 unlock_extent_cached(io_tree
, eb
->start
, eb
->start
+ eb
->len
- 1,
309 &cached_state
, GFP_NOFS
);
314 * helper to read a given tree block, doing retries as required when
315 * the checksums don't match and we have alternate mirrors to try.
317 static int btree_read_extent_buffer_pages(struct btrfs_root
*root
,
318 struct extent_buffer
*eb
,
319 u64 start
, u64 parent_transid
)
321 struct extent_io_tree
*io_tree
;
326 clear_bit(EXTENT_BUFFER_CORRUPT
, &eb
->bflags
);
327 io_tree
= &BTRFS_I(root
->fs_info
->btree_inode
)->io_tree
;
329 ret
= read_extent_buffer_pages(io_tree
, eb
, start
, 1,
330 btree_get_extent
, mirror_num
);
332 !verify_parent_transid(io_tree
, eb
, parent_transid
))
336 * This buffer's crc is fine, but its contents are corrupted, so
337 * there is no reason to read the other copies, they won't be
340 if (test_bit(EXTENT_BUFFER_CORRUPT
, &eb
->bflags
))
343 num_copies
= btrfs_num_copies(&root
->fs_info
->mapping_tree
,
349 if (mirror_num
> num_copies
)
356 * checksum a dirty tree block before IO. This has extra checks to make sure
357 * we only fill in the checksum field in the first page of a multi-page block
360 static int csum_dirty_buffer(struct btrfs_root
*root
, struct page
*page
)
362 struct extent_io_tree
*tree
;
363 u64 start
= (u64
)page
->index
<< PAGE_CACHE_SHIFT
;
366 struct extent_buffer
*eb
;
369 tree
= &BTRFS_I(page
->mapping
->host
)->io_tree
;
371 if (page
->private == EXTENT_PAGE_PRIVATE
) {
375 if (!page
->private) {
379 len
= page
->private >> 2;
382 eb
= alloc_extent_buffer(tree
, start
, len
, page
, GFP_NOFS
);
387 ret
= btree_read_extent_buffer_pages(root
, eb
, start
+ PAGE_CACHE_SIZE
,
388 btrfs_header_generation(eb
));
390 WARN_ON(!btrfs_header_flag(eb
, BTRFS_HEADER_FLAG_WRITTEN
));
392 found_start
= btrfs_header_bytenr(eb
);
393 if (found_start
!= start
) {
397 if (eb
->first_page
!= page
) {
401 if (!PageUptodate(page
)) {
405 csum_tree_block(root
, eb
, 0);
407 free_extent_buffer(eb
);
412 static int check_tree_block_fsid(struct btrfs_root
*root
,
413 struct extent_buffer
*eb
)
415 struct btrfs_fs_devices
*fs_devices
= root
->fs_info
->fs_devices
;
416 u8 fsid
[BTRFS_UUID_SIZE
];
419 read_extent_buffer(eb
, fsid
, (unsigned long)btrfs_header_fsid(eb
),
422 if (!memcmp(fsid
, fs_devices
->fsid
, BTRFS_FSID_SIZE
)) {
426 fs_devices
= fs_devices
->seed
;
431 #define CORRUPT(reason, eb, root, slot) \
432 printk(KERN_CRIT "btrfs: corrupt leaf, %s: block=%llu," \
433 "root=%llu, slot=%d\n", reason, \
434 (unsigned long long)btrfs_header_bytenr(eb), \
435 (unsigned long long)root->objectid, slot)
437 static noinline
int check_leaf(struct btrfs_root
*root
,
438 struct extent_buffer
*leaf
)
440 struct btrfs_key key
;
441 struct btrfs_key leaf_key
;
442 u32 nritems
= btrfs_header_nritems(leaf
);
448 /* Check the 0 item */
449 if (btrfs_item_offset_nr(leaf
, 0) + btrfs_item_size_nr(leaf
, 0) !=
450 BTRFS_LEAF_DATA_SIZE(root
)) {
451 CORRUPT("invalid item offset size pair", leaf
, root
, 0);
456 * Check to make sure each items keys are in the correct order and their
457 * offsets make sense. We only have to loop through nritems-1 because
458 * we check the current slot against the next slot, which verifies the
459 * next slot's offset+size makes sense and that the current's slot
462 for (slot
= 0; slot
< nritems
- 1; slot
++) {
463 btrfs_item_key_to_cpu(leaf
, &leaf_key
, slot
);
464 btrfs_item_key_to_cpu(leaf
, &key
, slot
+ 1);
466 /* Make sure the keys are in the right order */
467 if (btrfs_comp_cpu_keys(&leaf_key
, &key
) >= 0) {
468 CORRUPT("bad key order", leaf
, root
, slot
);
473 * Make sure the offset and ends are right, remember that the
474 * item data starts at the end of the leaf and grows towards the
477 if (btrfs_item_offset_nr(leaf
, slot
) !=
478 btrfs_item_end_nr(leaf
, slot
+ 1)) {
479 CORRUPT("slot offset bad", leaf
, root
, slot
);
484 * Check to make sure that we don't point outside of the leaf,
485 * just incase all the items are consistent to eachother, but
486 * all point outside of the leaf.
488 if (btrfs_item_end_nr(leaf
, slot
) >
489 BTRFS_LEAF_DATA_SIZE(root
)) {
490 CORRUPT("slot end outside of leaf", leaf
, root
, slot
);
498 #ifdef CONFIG_DEBUG_LOCK_ALLOC
499 void btrfs_set_buffer_lockdep_class(struct extent_buffer
*eb
, int level
)
501 lockdep_set_class_and_name(&eb
->lock
,
502 &btrfs_eb_class
[level
],
503 btrfs_eb_name
[level
]);
507 static int btree_readpage_end_io_hook(struct page
*page
, u64 start
, u64 end
,
508 struct extent_state
*state
)
510 struct extent_io_tree
*tree
;
514 struct extent_buffer
*eb
;
515 struct btrfs_root
*root
= BTRFS_I(page
->mapping
->host
)->root
;
518 tree
= &BTRFS_I(page
->mapping
->host
)->io_tree
;
519 if (page
->private == EXTENT_PAGE_PRIVATE
)
524 len
= page
->private >> 2;
527 eb
= alloc_extent_buffer(tree
, start
, len
, page
, GFP_NOFS
);
533 found_start
= btrfs_header_bytenr(eb
);
534 if (found_start
!= start
) {
535 if (printk_ratelimit()) {
536 printk(KERN_INFO
"btrfs bad tree block start "
538 (unsigned long long)found_start
,
539 (unsigned long long)eb
->start
);
544 if (eb
->first_page
!= page
) {
545 printk(KERN_INFO
"btrfs bad first page %lu %lu\n",
546 eb
->first_page
->index
, page
->index
);
551 if (check_tree_block_fsid(root
, eb
)) {
552 if (printk_ratelimit()) {
553 printk(KERN_INFO
"btrfs bad fsid on block %llu\n",
554 (unsigned long long)eb
->start
);
559 found_level
= btrfs_header_level(eb
);
561 btrfs_set_buffer_lockdep_class(eb
, found_level
);
563 ret
= csum_tree_block(root
, eb
, 1);
570 * If this is a leaf block and it is corrupt, set the corrupt bit so
571 * that we don't try and read the other copies of this block, just
574 if (found_level
== 0 && check_leaf(root
, eb
)) {
575 set_bit(EXTENT_BUFFER_CORRUPT
, &eb
->bflags
);
579 end
= min_t(u64
, eb
->len
, PAGE_CACHE_SIZE
);
580 end
= eb
->start
+ end
- 1;
582 free_extent_buffer(eb
);
587 static void end_workqueue_bio(struct bio
*bio
, int err
)
589 struct end_io_wq
*end_io_wq
= bio
->bi_private
;
590 struct btrfs_fs_info
*fs_info
;
592 fs_info
= end_io_wq
->info
;
593 end_io_wq
->error
= err
;
594 end_io_wq
->work
.func
= end_workqueue_fn
;
595 end_io_wq
->work
.flags
= 0;
597 if (bio
->bi_rw
& REQ_WRITE
) {
598 if (end_io_wq
->metadata
== 1)
599 btrfs_queue_worker(&fs_info
->endio_meta_write_workers
,
601 else if (end_io_wq
->metadata
== 2)
602 btrfs_queue_worker(&fs_info
->endio_freespace_worker
,
605 btrfs_queue_worker(&fs_info
->endio_write_workers
,
608 if (end_io_wq
->metadata
)
609 btrfs_queue_worker(&fs_info
->endio_meta_workers
,
612 btrfs_queue_worker(&fs_info
->endio_workers
,
618 * For the metadata arg you want
621 * 1 - if normal metadta
622 * 2 - if writing to the free space cache area
624 int btrfs_bio_wq_end_io(struct btrfs_fs_info
*info
, struct bio
*bio
,
627 struct end_io_wq
*end_io_wq
;
628 end_io_wq
= kmalloc(sizeof(*end_io_wq
), GFP_NOFS
);
632 end_io_wq
->private = bio
->bi_private
;
633 end_io_wq
->end_io
= bio
->bi_end_io
;
634 end_io_wq
->info
= info
;
635 end_io_wq
->error
= 0;
636 end_io_wq
->bio
= bio
;
637 end_io_wq
->metadata
= metadata
;
639 bio
->bi_private
= end_io_wq
;
640 bio
->bi_end_io
= end_workqueue_bio
;
644 unsigned long btrfs_async_submit_limit(struct btrfs_fs_info
*info
)
646 unsigned long limit
= min_t(unsigned long,
647 info
->workers
.max_workers
,
648 info
->fs_devices
->open_devices
);
652 int btrfs_congested_async(struct btrfs_fs_info
*info
, int iodone
)
654 return atomic_read(&info
->nr_async_bios
) >
655 btrfs_async_submit_limit(info
);
658 static void run_one_async_start(struct btrfs_work
*work
)
660 struct async_submit_bio
*async
;
662 async
= container_of(work
, struct async_submit_bio
, work
);
663 async
->submit_bio_start(async
->inode
, async
->rw
, async
->bio
,
664 async
->mirror_num
, async
->bio_flags
,
668 static void run_one_async_done(struct btrfs_work
*work
)
670 struct btrfs_fs_info
*fs_info
;
671 struct async_submit_bio
*async
;
674 async
= container_of(work
, struct async_submit_bio
, work
);
675 fs_info
= BTRFS_I(async
->inode
)->root
->fs_info
;
677 limit
= btrfs_async_submit_limit(fs_info
);
678 limit
= limit
* 2 / 3;
680 atomic_dec(&fs_info
->nr_async_submits
);
682 if (atomic_read(&fs_info
->nr_async_submits
) < limit
&&
683 waitqueue_active(&fs_info
->async_submit_wait
))
684 wake_up(&fs_info
->async_submit_wait
);
686 async
->submit_bio_done(async
->inode
, async
->rw
, async
->bio
,
687 async
->mirror_num
, async
->bio_flags
,
691 static void run_one_async_free(struct btrfs_work
*work
)
693 struct async_submit_bio
*async
;
695 async
= container_of(work
, struct async_submit_bio
, work
);
699 int btrfs_wq_submit_bio(struct btrfs_fs_info
*fs_info
, struct inode
*inode
,
700 int rw
, struct bio
*bio
, int mirror_num
,
701 unsigned long bio_flags
,
703 extent_submit_bio_hook_t
*submit_bio_start
,
704 extent_submit_bio_hook_t
*submit_bio_done
)
706 struct async_submit_bio
*async
;
708 async
= kmalloc(sizeof(*async
), GFP_NOFS
);
712 async
->inode
= inode
;
715 async
->mirror_num
= mirror_num
;
716 async
->submit_bio_start
= submit_bio_start
;
717 async
->submit_bio_done
= submit_bio_done
;
719 async
->work
.func
= run_one_async_start
;
720 async
->work
.ordered_func
= run_one_async_done
;
721 async
->work
.ordered_free
= run_one_async_free
;
723 async
->work
.flags
= 0;
724 async
->bio_flags
= bio_flags
;
725 async
->bio_offset
= bio_offset
;
727 atomic_inc(&fs_info
->nr_async_submits
);
730 btrfs_set_work_high_prio(&async
->work
);
732 btrfs_queue_worker(&fs_info
->workers
, &async
->work
);
734 while (atomic_read(&fs_info
->async_submit_draining
) &&
735 atomic_read(&fs_info
->nr_async_submits
)) {
736 wait_event(fs_info
->async_submit_wait
,
737 (atomic_read(&fs_info
->nr_async_submits
) == 0));
743 static int btree_csum_one_bio(struct bio
*bio
)
745 struct bio_vec
*bvec
= bio
->bi_io_vec
;
747 struct btrfs_root
*root
;
749 WARN_ON(bio
->bi_vcnt
<= 0);
750 while (bio_index
< bio
->bi_vcnt
) {
751 root
= BTRFS_I(bvec
->bv_page
->mapping
->host
)->root
;
752 csum_dirty_buffer(root
, bvec
->bv_page
);
759 static int __btree_submit_bio_start(struct inode
*inode
, int rw
,
760 struct bio
*bio
, int mirror_num
,
761 unsigned long bio_flags
,
765 * when we're called for a write, we're already in the async
766 * submission context. Just jump into btrfs_map_bio
768 btree_csum_one_bio(bio
);
772 static int __btree_submit_bio_done(struct inode
*inode
, int rw
, struct bio
*bio
,
773 int mirror_num
, unsigned long bio_flags
,
777 * when we're called for a write, we're already in the async
778 * submission context. Just jump into btrfs_map_bio
780 return btrfs_map_bio(BTRFS_I(inode
)->root
, rw
, bio
, mirror_num
, 1);
783 static int btree_submit_bio_hook(struct inode
*inode
, int rw
, struct bio
*bio
,
784 int mirror_num
, unsigned long bio_flags
,
789 ret
= btrfs_bio_wq_end_io(BTRFS_I(inode
)->root
->fs_info
,
793 if (!(rw
& REQ_WRITE
)) {
795 * called for a read, do the setup so that checksum validation
796 * can happen in the async kernel threads
798 return btrfs_map_bio(BTRFS_I(inode
)->root
, rw
, bio
,
803 * kthread helpers are used to submit writes so that checksumming
804 * can happen in parallel across all CPUs
806 return btrfs_wq_submit_bio(BTRFS_I(inode
)->root
->fs_info
,
807 inode
, rw
, bio
, mirror_num
, 0,
809 __btree_submit_bio_start
,
810 __btree_submit_bio_done
);
813 #ifdef CONFIG_MIGRATION
814 static int btree_migratepage(struct address_space
*mapping
,
815 struct page
*newpage
, struct page
*page
)
818 * we can't safely write a btree page from here,
819 * we haven't done the locking hook
824 * Buffers may be managed in a filesystem specific way.
825 * We must have no buffers or drop them.
827 if (page_has_private(page
) &&
828 !try_to_release_page(page
, GFP_KERNEL
))
830 return migrate_page(mapping
, newpage
, page
);
834 static int btree_writepage(struct page
*page
, struct writeback_control
*wbc
)
836 struct extent_io_tree
*tree
;
837 struct btrfs_root
*root
= BTRFS_I(page
->mapping
->host
)->root
;
838 struct extent_buffer
*eb
;
841 tree
= &BTRFS_I(page
->mapping
->host
)->io_tree
;
842 if (!(current
->flags
& PF_MEMALLOC
)) {
843 return extent_write_full_page(tree
, page
,
844 btree_get_extent
, wbc
);
847 redirty_page_for_writepage(wbc
, page
);
848 eb
= btrfs_find_tree_block(root
, page_offset(page
), PAGE_CACHE_SIZE
);
851 was_dirty
= test_and_set_bit(EXTENT_BUFFER_DIRTY
, &eb
->bflags
);
853 spin_lock(&root
->fs_info
->delalloc_lock
);
854 root
->fs_info
->dirty_metadata_bytes
+= PAGE_CACHE_SIZE
;
855 spin_unlock(&root
->fs_info
->delalloc_lock
);
857 free_extent_buffer(eb
);
863 static int btree_writepages(struct address_space
*mapping
,
864 struct writeback_control
*wbc
)
866 struct extent_io_tree
*tree
;
867 tree
= &BTRFS_I(mapping
->host
)->io_tree
;
868 if (wbc
->sync_mode
== WB_SYNC_NONE
) {
869 struct btrfs_root
*root
= BTRFS_I(mapping
->host
)->root
;
871 unsigned long thresh
= 32 * 1024 * 1024;
873 if (wbc
->for_kupdate
)
876 /* this is a bit racy, but that's ok */
877 num_dirty
= root
->fs_info
->dirty_metadata_bytes
;
878 if (num_dirty
< thresh
)
881 return extent_writepages(tree
, mapping
, btree_get_extent
, wbc
);
884 static int btree_readpage(struct file
*file
, struct page
*page
)
886 struct extent_io_tree
*tree
;
887 tree
= &BTRFS_I(page
->mapping
->host
)->io_tree
;
888 return extent_read_full_page(tree
, page
, btree_get_extent
);
891 static int btree_releasepage(struct page
*page
, gfp_t gfp_flags
)
893 struct extent_io_tree
*tree
;
894 struct extent_map_tree
*map
;
897 if (PageWriteback(page
) || PageDirty(page
))
900 tree
= &BTRFS_I(page
->mapping
->host
)->io_tree
;
901 map
= &BTRFS_I(page
->mapping
->host
)->extent_tree
;
903 ret
= try_release_extent_state(map
, tree
, page
, gfp_flags
);
907 ret
= try_release_extent_buffer(tree
, page
);
909 ClearPagePrivate(page
);
910 set_page_private(page
, 0);
911 page_cache_release(page
);
917 static void btree_invalidatepage(struct page
*page
, unsigned long offset
)
919 struct extent_io_tree
*tree
;
920 tree
= &BTRFS_I(page
->mapping
->host
)->io_tree
;
921 extent_invalidatepage(tree
, page
, offset
);
922 btree_releasepage(page
, GFP_NOFS
);
923 if (PagePrivate(page
)) {
924 printk(KERN_WARNING
"btrfs warning page private not zero "
925 "on page %llu\n", (unsigned long long)page_offset(page
));
926 ClearPagePrivate(page
);
927 set_page_private(page
, 0);
928 page_cache_release(page
);
932 static const struct address_space_operations btree_aops
= {
933 .readpage
= btree_readpage
,
934 .writepage
= btree_writepage
,
935 .writepages
= btree_writepages
,
936 .releasepage
= btree_releasepage
,
937 .invalidatepage
= btree_invalidatepage
,
938 .sync_page
= block_sync_page
,
939 #ifdef CONFIG_MIGRATION
940 .migratepage
= btree_migratepage
,
944 int readahead_tree_block(struct btrfs_root
*root
, u64 bytenr
, u32 blocksize
,
947 struct extent_buffer
*buf
= NULL
;
948 struct inode
*btree_inode
= root
->fs_info
->btree_inode
;
951 buf
= btrfs_find_create_tree_block(root
, bytenr
, blocksize
);
954 read_extent_buffer_pages(&BTRFS_I(btree_inode
)->io_tree
,
955 buf
, 0, 0, btree_get_extent
, 0);
956 free_extent_buffer(buf
);
960 struct extent_buffer
*btrfs_find_tree_block(struct btrfs_root
*root
,
961 u64 bytenr
, u32 blocksize
)
963 struct inode
*btree_inode
= root
->fs_info
->btree_inode
;
964 struct extent_buffer
*eb
;
965 eb
= find_extent_buffer(&BTRFS_I(btree_inode
)->io_tree
,
966 bytenr
, blocksize
, GFP_NOFS
);
970 struct extent_buffer
*btrfs_find_create_tree_block(struct btrfs_root
*root
,
971 u64 bytenr
, u32 blocksize
)
973 struct inode
*btree_inode
= root
->fs_info
->btree_inode
;
974 struct extent_buffer
*eb
;
976 eb
= alloc_extent_buffer(&BTRFS_I(btree_inode
)->io_tree
,
977 bytenr
, blocksize
, NULL
, GFP_NOFS
);
982 int btrfs_write_tree_block(struct extent_buffer
*buf
)
984 return filemap_fdatawrite_range(buf
->first_page
->mapping
, buf
->start
,
985 buf
->start
+ buf
->len
- 1);
988 int btrfs_wait_tree_block_writeback(struct extent_buffer
*buf
)
990 return filemap_fdatawait_range(buf
->first_page
->mapping
,
991 buf
->start
, buf
->start
+ buf
->len
- 1);
994 struct extent_buffer
*read_tree_block(struct btrfs_root
*root
, u64 bytenr
,
995 u32 blocksize
, u64 parent_transid
)
997 struct extent_buffer
*buf
= NULL
;
1000 buf
= btrfs_find_create_tree_block(root
, bytenr
, blocksize
);
1004 ret
= btree_read_extent_buffer_pages(root
, buf
, 0, parent_transid
);
1007 set_bit(EXTENT_BUFFER_UPTODATE
, &buf
->bflags
);
1012 int clean_tree_block(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
,
1013 struct extent_buffer
*buf
)
1015 struct inode
*btree_inode
= root
->fs_info
->btree_inode
;
1016 if (btrfs_header_generation(buf
) ==
1017 root
->fs_info
->running_transaction
->transid
) {
1018 btrfs_assert_tree_locked(buf
);
1020 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY
, &buf
->bflags
)) {
1021 spin_lock(&root
->fs_info
->delalloc_lock
);
1022 if (root
->fs_info
->dirty_metadata_bytes
>= buf
->len
)
1023 root
->fs_info
->dirty_metadata_bytes
-= buf
->len
;
1026 spin_unlock(&root
->fs_info
->delalloc_lock
);
1029 /* ugh, clear_extent_buffer_dirty needs to lock the page */
1030 btrfs_set_lock_blocking(buf
);
1031 clear_extent_buffer_dirty(&BTRFS_I(btree_inode
)->io_tree
,
1037 static int __setup_root(u32 nodesize
, u32 leafsize
, u32 sectorsize
,
1038 u32 stripesize
, struct btrfs_root
*root
,
1039 struct btrfs_fs_info
*fs_info
,
1043 root
->commit_root
= NULL
;
1044 root
->sectorsize
= sectorsize
;
1045 root
->nodesize
= nodesize
;
1046 root
->leafsize
= leafsize
;
1047 root
->stripesize
= stripesize
;
1049 root
->track_dirty
= 0;
1051 root
->orphan_item_inserted
= 0;
1052 root
->orphan_cleanup_state
= 0;
1054 root
->fs_info
= fs_info
;
1055 root
->objectid
= objectid
;
1056 root
->last_trans
= 0;
1057 root
->highest_objectid
= 0;
1060 root
->inode_tree
= RB_ROOT
;
1061 root
->block_rsv
= NULL
;
1062 root
->orphan_block_rsv
= NULL
;
1064 INIT_LIST_HEAD(&root
->dirty_list
);
1065 INIT_LIST_HEAD(&root
->orphan_list
);
1066 INIT_LIST_HEAD(&root
->root_list
);
1067 spin_lock_init(&root
->node_lock
);
1068 spin_lock_init(&root
->orphan_lock
);
1069 spin_lock_init(&root
->inode_lock
);
1070 spin_lock_init(&root
->accounting_lock
);
1071 mutex_init(&root
->objectid_mutex
);
1072 mutex_init(&root
->log_mutex
);
1073 init_waitqueue_head(&root
->log_writer_wait
);
1074 init_waitqueue_head(&root
->log_commit_wait
[0]);
1075 init_waitqueue_head(&root
->log_commit_wait
[1]);
1076 atomic_set(&root
->log_commit
[0], 0);
1077 atomic_set(&root
->log_commit
[1], 0);
1078 atomic_set(&root
->log_writers
, 0);
1079 root
->log_batch
= 0;
1080 root
->log_transid
= 0;
1081 root
->last_log_commit
= 0;
1082 extent_io_tree_init(&root
->dirty_log_pages
,
1083 fs_info
->btree_inode
->i_mapping
, GFP_NOFS
);
1085 memset(&root
->root_key
, 0, sizeof(root
->root_key
));
1086 memset(&root
->root_item
, 0, sizeof(root
->root_item
));
1087 memset(&root
->defrag_progress
, 0, sizeof(root
->defrag_progress
));
1088 memset(&root
->root_kobj
, 0, sizeof(root
->root_kobj
));
1089 root
->defrag_trans_start
= fs_info
->generation
;
1090 init_completion(&root
->kobj_unregister
);
1091 root
->defrag_running
= 0;
1092 root
->root_key
.objectid
= objectid
;
1093 root
->anon_super
.s_root
= NULL
;
1094 root
->anon_super
.s_dev
= 0;
1095 INIT_LIST_HEAD(&root
->anon_super
.s_list
);
1096 INIT_LIST_HEAD(&root
->anon_super
.s_instances
);
1097 init_rwsem(&root
->anon_super
.s_umount
);
1102 static int find_and_setup_root(struct btrfs_root
*tree_root
,
1103 struct btrfs_fs_info
*fs_info
,
1105 struct btrfs_root
*root
)
1111 __setup_root(tree_root
->nodesize
, tree_root
->leafsize
,
1112 tree_root
->sectorsize
, tree_root
->stripesize
,
1113 root
, fs_info
, objectid
);
1114 ret
= btrfs_find_last_root(tree_root
, objectid
,
1115 &root
->root_item
, &root
->root_key
);
1120 generation
= btrfs_root_generation(&root
->root_item
);
1121 blocksize
= btrfs_level_size(root
, btrfs_root_level(&root
->root_item
));
1122 root
->node
= read_tree_block(root
, btrfs_root_bytenr(&root
->root_item
),
1123 blocksize
, generation
);
1124 if (!root
->node
|| !btrfs_buffer_uptodate(root
->node
, generation
)) {
1125 free_extent_buffer(root
->node
);
1128 root
->commit_root
= btrfs_root_node(root
);
1132 static struct btrfs_root
*alloc_log_tree(struct btrfs_trans_handle
*trans
,
1133 struct btrfs_fs_info
*fs_info
)
1135 struct btrfs_root
*root
;
1136 struct btrfs_root
*tree_root
= fs_info
->tree_root
;
1137 struct extent_buffer
*leaf
;
1139 root
= kzalloc(sizeof(*root
), GFP_NOFS
);
1141 return ERR_PTR(-ENOMEM
);
1143 __setup_root(tree_root
->nodesize
, tree_root
->leafsize
,
1144 tree_root
->sectorsize
, tree_root
->stripesize
,
1145 root
, fs_info
, BTRFS_TREE_LOG_OBJECTID
);
1147 root
->root_key
.objectid
= BTRFS_TREE_LOG_OBJECTID
;
1148 root
->root_key
.type
= BTRFS_ROOT_ITEM_KEY
;
1149 root
->root_key
.offset
= BTRFS_TREE_LOG_OBJECTID
;
1151 * log trees do not get reference counted because they go away
1152 * before a real commit is actually done. They do store pointers
1153 * to file data extents, and those reference counts still get
1154 * updated (along with back refs to the log tree).
1158 leaf
= btrfs_alloc_free_block(trans
, root
, root
->leafsize
, 0,
1159 BTRFS_TREE_LOG_OBJECTID
, NULL
, 0, 0, 0);
1162 return ERR_CAST(leaf
);
1165 memset_extent_buffer(leaf
, 0, 0, sizeof(struct btrfs_header
));
1166 btrfs_set_header_bytenr(leaf
, leaf
->start
);
1167 btrfs_set_header_generation(leaf
, trans
->transid
);
1168 btrfs_set_header_backref_rev(leaf
, BTRFS_MIXED_BACKREF_REV
);
1169 btrfs_set_header_owner(leaf
, BTRFS_TREE_LOG_OBJECTID
);
1172 write_extent_buffer(root
->node
, root
->fs_info
->fsid
,
1173 (unsigned long)btrfs_header_fsid(root
->node
),
1175 btrfs_mark_buffer_dirty(root
->node
);
1176 btrfs_tree_unlock(root
->node
);
1180 int btrfs_init_log_root_tree(struct btrfs_trans_handle
*trans
,
1181 struct btrfs_fs_info
*fs_info
)
1183 struct btrfs_root
*log_root
;
1185 log_root
= alloc_log_tree(trans
, fs_info
);
1186 if (IS_ERR(log_root
))
1187 return PTR_ERR(log_root
);
1188 WARN_ON(fs_info
->log_root_tree
);
1189 fs_info
->log_root_tree
= log_root
;
1193 int btrfs_add_log_tree(struct btrfs_trans_handle
*trans
,
1194 struct btrfs_root
*root
)
1196 struct btrfs_root
*log_root
;
1197 struct btrfs_inode_item
*inode_item
;
1199 log_root
= alloc_log_tree(trans
, root
->fs_info
);
1200 if (IS_ERR(log_root
))
1201 return PTR_ERR(log_root
);
1203 log_root
->last_trans
= trans
->transid
;
1204 log_root
->root_key
.offset
= root
->root_key
.objectid
;
1206 inode_item
= &log_root
->root_item
.inode
;
1207 inode_item
->generation
= cpu_to_le64(1);
1208 inode_item
->size
= cpu_to_le64(3);
1209 inode_item
->nlink
= cpu_to_le32(1);
1210 inode_item
->nbytes
= cpu_to_le64(root
->leafsize
);
1211 inode_item
->mode
= cpu_to_le32(S_IFDIR
| 0755);
1213 btrfs_set_root_node(&log_root
->root_item
, log_root
->node
);
1215 WARN_ON(root
->log_root
);
1216 root
->log_root
= log_root
;
1217 root
->log_transid
= 0;
1218 root
->last_log_commit
= 0;
1222 struct btrfs_root
*btrfs_read_fs_root_no_radix(struct btrfs_root
*tree_root
,
1223 struct btrfs_key
*location
)
1225 struct btrfs_root
*root
;
1226 struct btrfs_fs_info
*fs_info
= tree_root
->fs_info
;
1227 struct btrfs_path
*path
;
1228 struct extent_buffer
*l
;
1233 root
= kzalloc(sizeof(*root
), GFP_NOFS
);
1235 return ERR_PTR(-ENOMEM
);
1236 if (location
->offset
== (u64
)-1) {
1237 ret
= find_and_setup_root(tree_root
, fs_info
,
1238 location
->objectid
, root
);
1241 return ERR_PTR(ret
);
1246 __setup_root(tree_root
->nodesize
, tree_root
->leafsize
,
1247 tree_root
->sectorsize
, tree_root
->stripesize
,
1248 root
, fs_info
, location
->objectid
);
1250 path
= btrfs_alloc_path();
1252 ret
= btrfs_search_slot(NULL
, tree_root
, location
, path
, 0, 0);
1255 read_extent_buffer(l
, &root
->root_item
,
1256 btrfs_item_ptr_offset(l
, path
->slots
[0]),
1257 sizeof(root
->root_item
));
1258 memcpy(&root
->root_key
, location
, sizeof(*location
));
1260 btrfs_free_path(path
);
1265 return ERR_PTR(ret
);
1268 generation
= btrfs_root_generation(&root
->root_item
);
1269 blocksize
= btrfs_level_size(root
, btrfs_root_level(&root
->root_item
));
1270 root
->node
= read_tree_block(root
, btrfs_root_bytenr(&root
->root_item
),
1271 blocksize
, generation
);
1272 root
->commit_root
= btrfs_root_node(root
);
1273 BUG_ON(!root
->node
);
1275 if (location
->objectid
!= BTRFS_TREE_LOG_OBJECTID
)
1281 struct btrfs_root
*btrfs_lookup_fs_root(struct btrfs_fs_info
*fs_info
,
1284 struct btrfs_root
*root
;
1286 if (root_objectid
== BTRFS_ROOT_TREE_OBJECTID
)
1287 return fs_info
->tree_root
;
1288 if (root_objectid
== BTRFS_EXTENT_TREE_OBJECTID
)
1289 return fs_info
->extent_root
;
1291 root
= radix_tree_lookup(&fs_info
->fs_roots_radix
,
1292 (unsigned long)root_objectid
);
1296 struct btrfs_root
*btrfs_read_fs_root_no_name(struct btrfs_fs_info
*fs_info
,
1297 struct btrfs_key
*location
)
1299 struct btrfs_root
*root
;
1302 if (location
->objectid
== BTRFS_ROOT_TREE_OBJECTID
)
1303 return fs_info
->tree_root
;
1304 if (location
->objectid
== BTRFS_EXTENT_TREE_OBJECTID
)
1305 return fs_info
->extent_root
;
1306 if (location
->objectid
== BTRFS_CHUNK_TREE_OBJECTID
)
1307 return fs_info
->chunk_root
;
1308 if (location
->objectid
== BTRFS_DEV_TREE_OBJECTID
)
1309 return fs_info
->dev_root
;
1310 if (location
->objectid
== BTRFS_CSUM_TREE_OBJECTID
)
1311 return fs_info
->csum_root
;
1313 spin_lock(&fs_info
->fs_roots_radix_lock
);
1314 root
= radix_tree_lookup(&fs_info
->fs_roots_radix
,
1315 (unsigned long)location
->objectid
);
1316 spin_unlock(&fs_info
->fs_roots_radix_lock
);
1320 root
= btrfs_read_fs_root_no_radix(fs_info
->tree_root
, location
);
1324 set_anon_super(&root
->anon_super
, NULL
);
1326 if (btrfs_root_refs(&root
->root_item
) == 0) {
1331 ret
= btrfs_find_orphan_item(fs_info
->tree_root
, location
->objectid
);
1335 root
->orphan_item_inserted
= 1;
1337 ret
= radix_tree_preload(GFP_NOFS
& ~__GFP_HIGHMEM
);
1341 spin_lock(&fs_info
->fs_roots_radix_lock
);
1342 ret
= radix_tree_insert(&fs_info
->fs_roots_radix
,
1343 (unsigned long)root
->root_key
.objectid
,
1348 spin_unlock(&fs_info
->fs_roots_radix_lock
);
1349 radix_tree_preload_end();
1351 if (ret
== -EEXIST
) {
1358 ret
= btrfs_find_dead_roots(fs_info
->tree_root
,
1359 root
->root_key
.objectid
);
1364 return ERR_PTR(ret
);
1367 struct btrfs_root
*btrfs_read_fs_root(struct btrfs_fs_info
*fs_info
,
1368 struct btrfs_key
*location
,
1369 const char *name
, int namelen
)
1371 return btrfs_read_fs_root_no_name(fs_info
, location
);
1373 struct btrfs_root
*root
;
1376 root
= btrfs_read_fs_root_no_name(fs_info
, location
);
1383 ret
= btrfs_set_root_name(root
, name
, namelen
);
1385 free_extent_buffer(root
->node
);
1387 return ERR_PTR(ret
);
1390 ret
= btrfs_sysfs_add_root(root
);
1392 free_extent_buffer(root
->node
);
1395 return ERR_PTR(ret
);
1402 static int btrfs_congested_fn(void *congested_data
, int bdi_bits
)
1404 struct btrfs_fs_info
*info
= (struct btrfs_fs_info
*)congested_data
;
1406 struct btrfs_device
*device
;
1407 struct backing_dev_info
*bdi
;
1409 list_for_each_entry(device
, &info
->fs_devices
->devices
, dev_list
) {
1412 bdi
= blk_get_backing_dev_info(device
->bdev
);
1413 if (bdi
&& bdi_congested(bdi
, bdi_bits
)) {
1422 * this unplugs every device on the box, and it is only used when page
1425 static void __unplug_io_fn(struct backing_dev_info
*bdi
, struct page
*page
)
1427 struct btrfs_device
*device
;
1428 struct btrfs_fs_info
*info
;
1430 info
= (struct btrfs_fs_info
*)bdi
->unplug_io_data
;
1431 list_for_each_entry(device
, &info
->fs_devices
->devices
, dev_list
) {
1435 bdi
= blk_get_backing_dev_info(device
->bdev
);
1436 if (bdi
->unplug_io_fn
)
1437 bdi
->unplug_io_fn(bdi
, page
);
1441 static void btrfs_unplug_io_fn(struct backing_dev_info
*bdi
, struct page
*page
)
1443 struct inode
*inode
;
1444 struct extent_map_tree
*em_tree
;
1445 struct extent_map
*em
;
1446 struct address_space
*mapping
;
1449 /* the generic O_DIRECT read code does this */
1451 __unplug_io_fn(bdi
, page
);
1456 * page->mapping may change at any time. Get a consistent copy
1457 * and use that for everything below
1460 mapping
= page
->mapping
;
1464 inode
= mapping
->host
;
1467 * don't do the expensive searching for a small number of
1470 if (BTRFS_I(inode
)->root
->fs_info
->fs_devices
->open_devices
<= 2) {
1471 __unplug_io_fn(bdi
, page
);
1475 offset
= page_offset(page
);
1477 em_tree
= &BTRFS_I(inode
)->extent_tree
;
1478 read_lock(&em_tree
->lock
);
1479 em
= lookup_extent_mapping(em_tree
, offset
, PAGE_CACHE_SIZE
);
1480 read_unlock(&em_tree
->lock
);
1482 __unplug_io_fn(bdi
, page
);
1486 if (em
->block_start
>= EXTENT_MAP_LAST_BYTE
) {
1487 free_extent_map(em
);
1488 __unplug_io_fn(bdi
, page
);
1491 offset
= offset
- em
->start
;
1492 btrfs_unplug_page(&BTRFS_I(inode
)->root
->fs_info
->mapping_tree
,
1493 em
->block_start
+ offset
, page
);
1494 free_extent_map(em
);
1498 * If this fails, caller must call bdi_destroy() to get rid of the
1501 static int setup_bdi(struct btrfs_fs_info
*info
, struct backing_dev_info
*bdi
)
1505 bdi
->capabilities
= BDI_CAP_MAP_COPY
;
1506 err
= bdi_setup_and_register(bdi
, "btrfs", BDI_CAP_MAP_COPY
);
1510 bdi
->ra_pages
= default_backing_dev_info
.ra_pages
;
1511 bdi
->unplug_io_fn
= btrfs_unplug_io_fn
;
1512 bdi
->unplug_io_data
= info
;
1513 bdi
->congested_fn
= btrfs_congested_fn
;
1514 bdi
->congested_data
= info
;
1518 static int bio_ready_for_csum(struct bio
*bio
)
1524 struct extent_io_tree
*io_tree
= NULL
;
1525 struct bio_vec
*bvec
;
1529 bio_for_each_segment(bvec
, bio
, i
) {
1530 page
= bvec
->bv_page
;
1531 if (page
->private == EXTENT_PAGE_PRIVATE
) {
1532 length
+= bvec
->bv_len
;
1535 if (!page
->private) {
1536 length
+= bvec
->bv_len
;
1539 length
= bvec
->bv_len
;
1540 buf_len
= page
->private >> 2;
1541 start
= page_offset(page
) + bvec
->bv_offset
;
1542 io_tree
= &BTRFS_I(page
->mapping
->host
)->io_tree
;
1544 /* are we fully contained in this bio? */
1545 if (buf_len
<= length
)
1548 ret
= extent_range_uptodate(io_tree
, start
+ length
,
1549 start
+ buf_len
- 1);
1554 * called by the kthread helper functions to finally call the bio end_io
1555 * functions. This is where read checksum verification actually happens
1557 static void end_workqueue_fn(struct btrfs_work
*work
)
1560 struct end_io_wq
*end_io_wq
;
1561 struct btrfs_fs_info
*fs_info
;
1564 end_io_wq
= container_of(work
, struct end_io_wq
, work
);
1565 bio
= end_io_wq
->bio
;
1566 fs_info
= end_io_wq
->info
;
1568 /* metadata bio reads are special because the whole tree block must
1569 * be checksummed at once. This makes sure the entire block is in
1570 * ram and up to date before trying to verify things. For
1571 * blocksize <= pagesize, it is basically a noop
1573 if (!(bio
->bi_rw
& REQ_WRITE
) && end_io_wq
->metadata
&&
1574 !bio_ready_for_csum(bio
)) {
1575 btrfs_queue_worker(&fs_info
->endio_meta_workers
,
1579 error
= end_io_wq
->error
;
1580 bio
->bi_private
= end_io_wq
->private;
1581 bio
->bi_end_io
= end_io_wq
->end_io
;
1583 bio_endio(bio
, error
);
1586 static int cleaner_kthread(void *arg
)
1588 struct btrfs_root
*root
= arg
;
1591 vfs_check_frozen(root
->fs_info
->sb
, SB_FREEZE_WRITE
);
1593 if (!(root
->fs_info
->sb
->s_flags
& MS_RDONLY
) &&
1594 mutex_trylock(&root
->fs_info
->cleaner_mutex
)) {
1595 btrfs_run_delayed_iputs(root
);
1596 btrfs_clean_old_snapshots(root
);
1597 mutex_unlock(&root
->fs_info
->cleaner_mutex
);
1600 if (freezing(current
)) {
1603 set_current_state(TASK_INTERRUPTIBLE
);
1604 if (!kthread_should_stop())
1606 __set_current_state(TASK_RUNNING
);
1608 } while (!kthread_should_stop());
1612 static int transaction_kthread(void *arg
)
1614 struct btrfs_root
*root
= arg
;
1615 struct btrfs_trans_handle
*trans
;
1616 struct btrfs_transaction
*cur
;
1619 unsigned long delay
;
1624 vfs_check_frozen(root
->fs_info
->sb
, SB_FREEZE_WRITE
);
1625 mutex_lock(&root
->fs_info
->transaction_kthread_mutex
);
1627 spin_lock(&root
->fs_info
->new_trans_lock
);
1628 cur
= root
->fs_info
->running_transaction
;
1630 spin_unlock(&root
->fs_info
->new_trans_lock
);
1634 now
= get_seconds();
1635 if (!cur
->blocked
&&
1636 (now
< cur
->start_time
|| now
- cur
->start_time
< 30)) {
1637 spin_unlock(&root
->fs_info
->new_trans_lock
);
1641 transid
= cur
->transid
;
1642 spin_unlock(&root
->fs_info
->new_trans_lock
);
1644 trans
= btrfs_join_transaction(root
, 1);
1645 BUG_ON(IS_ERR(trans
));
1646 if (transid
== trans
->transid
) {
1647 ret
= btrfs_commit_transaction(trans
, root
);
1650 btrfs_end_transaction(trans
, root
);
1653 wake_up_process(root
->fs_info
->cleaner_kthread
);
1654 mutex_unlock(&root
->fs_info
->transaction_kthread_mutex
);
1656 if (freezing(current
)) {
1659 set_current_state(TASK_INTERRUPTIBLE
);
1660 if (!kthread_should_stop() &&
1661 !btrfs_transaction_blocked(root
->fs_info
))
1662 schedule_timeout(delay
);
1663 __set_current_state(TASK_RUNNING
);
1665 } while (!kthread_should_stop());
1669 struct btrfs_root
*open_ctree(struct super_block
*sb
,
1670 struct btrfs_fs_devices
*fs_devices
,
1680 struct btrfs_key location
;
1681 struct buffer_head
*bh
;
1682 struct btrfs_root
*extent_root
= kzalloc(sizeof(struct btrfs_root
),
1684 struct btrfs_root
*csum_root
= kzalloc(sizeof(struct btrfs_root
),
1686 struct btrfs_root
*tree_root
= btrfs_sb(sb
);
1687 struct btrfs_fs_info
*fs_info
= tree_root
->fs_info
;
1688 struct btrfs_root
*chunk_root
= kzalloc(sizeof(struct btrfs_root
),
1690 struct btrfs_root
*dev_root
= kzalloc(sizeof(struct btrfs_root
),
1692 struct btrfs_root
*log_tree_root
;
1697 struct btrfs_super_block
*disk_super
;
1699 if (!extent_root
|| !tree_root
|| !fs_info
||
1700 !chunk_root
|| !dev_root
|| !csum_root
) {
1705 ret
= init_srcu_struct(&fs_info
->subvol_srcu
);
1711 ret
= setup_bdi(fs_info
, &fs_info
->bdi
);
1717 fs_info
->btree_inode
= new_inode(sb
);
1718 if (!fs_info
->btree_inode
) {
1723 INIT_RADIX_TREE(&fs_info
->fs_roots_radix
, GFP_ATOMIC
);
1724 INIT_LIST_HEAD(&fs_info
->trans_list
);
1725 INIT_LIST_HEAD(&fs_info
->dead_roots
);
1726 INIT_LIST_HEAD(&fs_info
->delayed_iputs
);
1727 INIT_LIST_HEAD(&fs_info
->hashers
);
1728 INIT_LIST_HEAD(&fs_info
->delalloc_inodes
);
1729 INIT_LIST_HEAD(&fs_info
->ordered_operations
);
1730 INIT_LIST_HEAD(&fs_info
->caching_block_groups
);
1731 spin_lock_init(&fs_info
->delalloc_lock
);
1732 spin_lock_init(&fs_info
->new_trans_lock
);
1733 spin_lock_init(&fs_info
->ref_cache_lock
);
1734 spin_lock_init(&fs_info
->fs_roots_radix_lock
);
1735 spin_lock_init(&fs_info
->delayed_iput_lock
);
1737 init_completion(&fs_info
->kobj_unregister
);
1738 fs_info
->tree_root
= tree_root
;
1739 fs_info
->extent_root
= extent_root
;
1740 fs_info
->csum_root
= csum_root
;
1741 fs_info
->chunk_root
= chunk_root
;
1742 fs_info
->dev_root
= dev_root
;
1743 fs_info
->fs_devices
= fs_devices
;
1744 INIT_LIST_HEAD(&fs_info
->dirty_cowonly_roots
);
1745 INIT_LIST_HEAD(&fs_info
->space_info
);
1746 btrfs_mapping_init(&fs_info
->mapping_tree
);
1747 btrfs_init_block_rsv(&fs_info
->global_block_rsv
);
1748 btrfs_init_block_rsv(&fs_info
->delalloc_block_rsv
);
1749 btrfs_init_block_rsv(&fs_info
->trans_block_rsv
);
1750 btrfs_init_block_rsv(&fs_info
->chunk_block_rsv
);
1751 btrfs_init_block_rsv(&fs_info
->empty_block_rsv
);
1752 INIT_LIST_HEAD(&fs_info
->durable_block_rsv_list
);
1753 mutex_init(&fs_info
->durable_block_rsv_mutex
);
1754 atomic_set(&fs_info
->nr_async_submits
, 0);
1755 atomic_set(&fs_info
->async_delalloc_pages
, 0);
1756 atomic_set(&fs_info
->async_submit_draining
, 0);
1757 atomic_set(&fs_info
->nr_async_bios
, 0);
1759 fs_info
->max_inline
= 8192 * 1024;
1760 fs_info
->metadata_ratio
= 0;
1762 fs_info
->thread_pool_size
= min_t(unsigned long,
1763 num_online_cpus() + 2, 8);
1765 INIT_LIST_HEAD(&fs_info
->ordered_extents
);
1766 spin_lock_init(&fs_info
->ordered_extent_lock
);
1768 sb
->s_blocksize
= 4096;
1769 sb
->s_blocksize_bits
= blksize_bits(4096);
1770 sb
->s_bdi
= &fs_info
->bdi
;
1772 fs_info
->btree_inode
->i_ino
= BTRFS_BTREE_INODE_OBJECTID
;
1773 fs_info
->btree_inode
->i_nlink
= 1;
1775 * we set the i_size on the btree inode to the max possible int.
1776 * the real end of the address space is determined by all of
1777 * the devices in the system
1779 fs_info
->btree_inode
->i_size
= OFFSET_MAX
;
1780 fs_info
->btree_inode
->i_mapping
->a_ops
= &btree_aops
;
1781 fs_info
->btree_inode
->i_mapping
->backing_dev_info
= &fs_info
->bdi
;
1783 RB_CLEAR_NODE(&BTRFS_I(fs_info
->btree_inode
)->rb_node
);
1784 extent_io_tree_init(&BTRFS_I(fs_info
->btree_inode
)->io_tree
,
1785 fs_info
->btree_inode
->i_mapping
,
1787 extent_map_tree_init(&BTRFS_I(fs_info
->btree_inode
)->extent_tree
,
1790 BTRFS_I(fs_info
->btree_inode
)->io_tree
.ops
= &btree_extent_io_ops
;
1792 BTRFS_I(fs_info
->btree_inode
)->root
= tree_root
;
1793 memset(&BTRFS_I(fs_info
->btree_inode
)->location
, 0,
1794 sizeof(struct btrfs_key
));
1795 BTRFS_I(fs_info
->btree_inode
)->dummy_inode
= 1;
1796 insert_inode_hash(fs_info
->btree_inode
);
1798 spin_lock_init(&fs_info
->block_group_cache_lock
);
1799 fs_info
->block_group_cache_tree
= RB_ROOT
;
1801 extent_io_tree_init(&fs_info
->freed_extents
[0],
1802 fs_info
->btree_inode
->i_mapping
, GFP_NOFS
);
1803 extent_io_tree_init(&fs_info
->freed_extents
[1],
1804 fs_info
->btree_inode
->i_mapping
, GFP_NOFS
);
1805 fs_info
->pinned_extents
= &fs_info
->freed_extents
[0];
1806 fs_info
->do_barriers
= 1;
1809 mutex_init(&fs_info
->trans_mutex
);
1810 mutex_init(&fs_info
->ordered_operations_mutex
);
1811 mutex_init(&fs_info
->tree_log_mutex
);
1812 mutex_init(&fs_info
->chunk_mutex
);
1813 mutex_init(&fs_info
->transaction_kthread_mutex
);
1814 mutex_init(&fs_info
->cleaner_mutex
);
1815 mutex_init(&fs_info
->volume_mutex
);
1816 init_rwsem(&fs_info
->extent_commit_sem
);
1817 init_rwsem(&fs_info
->cleanup_work_sem
);
1818 init_rwsem(&fs_info
->subvol_sem
);
1820 btrfs_init_free_cluster(&fs_info
->meta_alloc_cluster
);
1821 btrfs_init_free_cluster(&fs_info
->data_alloc_cluster
);
1823 init_waitqueue_head(&fs_info
->transaction_throttle
);
1824 init_waitqueue_head(&fs_info
->transaction_wait
);
1825 init_waitqueue_head(&fs_info
->transaction_blocked_wait
);
1826 init_waitqueue_head(&fs_info
->async_submit_wait
);
1828 __setup_root(4096, 4096, 4096, 4096, tree_root
,
1829 fs_info
, BTRFS_ROOT_TREE_OBJECTID
);
1831 bh
= btrfs_read_dev_super(fs_devices
->latest_bdev
);
1837 memcpy(&fs_info
->super_copy
, bh
->b_data
, sizeof(fs_info
->super_copy
));
1838 memcpy(&fs_info
->super_for_commit
, &fs_info
->super_copy
,
1839 sizeof(fs_info
->super_for_commit
));
1842 memcpy(fs_info
->fsid
, fs_info
->super_copy
.fsid
, BTRFS_FSID_SIZE
);
1844 disk_super
= &fs_info
->super_copy
;
1845 if (!btrfs_super_root(disk_super
))
1848 /* check FS state, whether FS is broken. */
1849 fs_info
->fs_state
|= btrfs_super_flags(disk_super
);
1851 btrfs_check_super_valid(fs_info
, sb
->s_flags
& MS_RDONLY
);
1853 ret
= btrfs_parse_options(tree_root
, options
);
1859 features
= btrfs_super_incompat_flags(disk_super
) &
1860 ~BTRFS_FEATURE_INCOMPAT_SUPP
;
1862 printk(KERN_ERR
"BTRFS: couldn't mount because of "
1863 "unsupported optional features (%Lx).\n",
1864 (unsigned long long)features
);
1869 features
= btrfs_super_incompat_flags(disk_super
);
1870 features
|= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF
;
1871 if (tree_root
->fs_info
->compress_type
& BTRFS_COMPRESS_LZO
)
1872 features
|= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO
;
1873 btrfs_set_super_incompat_flags(disk_super
, features
);
1875 features
= btrfs_super_compat_ro_flags(disk_super
) &
1876 ~BTRFS_FEATURE_COMPAT_RO_SUPP
;
1877 if (!(sb
->s_flags
& MS_RDONLY
) && features
) {
1878 printk(KERN_ERR
"BTRFS: couldn't mount RDWR because of "
1879 "unsupported option features (%Lx).\n",
1880 (unsigned long long)features
);
1885 btrfs_init_workers(&fs_info
->generic_worker
,
1886 "genwork", 1, NULL
);
1888 btrfs_init_workers(&fs_info
->workers
, "worker",
1889 fs_info
->thread_pool_size
,
1890 &fs_info
->generic_worker
);
1892 btrfs_init_workers(&fs_info
->delalloc_workers
, "delalloc",
1893 fs_info
->thread_pool_size
,
1894 &fs_info
->generic_worker
);
1896 btrfs_init_workers(&fs_info
->submit_workers
, "submit",
1897 min_t(u64
, fs_devices
->num_devices
,
1898 fs_info
->thread_pool_size
),
1899 &fs_info
->generic_worker
);
1901 /* a higher idle thresh on the submit workers makes it much more
1902 * likely that bios will be send down in a sane order to the
1905 fs_info
->submit_workers
.idle_thresh
= 64;
1907 fs_info
->workers
.idle_thresh
= 16;
1908 fs_info
->workers
.ordered
= 1;
1910 fs_info
->delalloc_workers
.idle_thresh
= 2;
1911 fs_info
->delalloc_workers
.ordered
= 1;
1913 btrfs_init_workers(&fs_info
->fixup_workers
, "fixup", 1,
1914 &fs_info
->generic_worker
);
1915 btrfs_init_workers(&fs_info
->endio_workers
, "endio",
1916 fs_info
->thread_pool_size
,
1917 &fs_info
->generic_worker
);
1918 btrfs_init_workers(&fs_info
->endio_meta_workers
, "endio-meta",
1919 fs_info
->thread_pool_size
,
1920 &fs_info
->generic_worker
);
1921 btrfs_init_workers(&fs_info
->endio_meta_write_workers
,
1922 "endio-meta-write", fs_info
->thread_pool_size
,
1923 &fs_info
->generic_worker
);
1924 btrfs_init_workers(&fs_info
->endio_write_workers
, "endio-write",
1925 fs_info
->thread_pool_size
,
1926 &fs_info
->generic_worker
);
1927 btrfs_init_workers(&fs_info
->endio_freespace_worker
, "freespace-write",
1928 1, &fs_info
->generic_worker
);
1931 * endios are largely parallel and should have a very
1934 fs_info
->endio_workers
.idle_thresh
= 4;
1935 fs_info
->endio_meta_workers
.idle_thresh
= 4;
1937 fs_info
->endio_write_workers
.idle_thresh
= 2;
1938 fs_info
->endio_meta_write_workers
.idle_thresh
= 2;
1940 btrfs_start_workers(&fs_info
->workers
, 1);
1941 btrfs_start_workers(&fs_info
->generic_worker
, 1);
1942 btrfs_start_workers(&fs_info
->submit_workers
, 1);
1943 btrfs_start_workers(&fs_info
->delalloc_workers
, 1);
1944 btrfs_start_workers(&fs_info
->fixup_workers
, 1);
1945 btrfs_start_workers(&fs_info
->endio_workers
, 1);
1946 btrfs_start_workers(&fs_info
->endio_meta_workers
, 1);
1947 btrfs_start_workers(&fs_info
->endio_meta_write_workers
, 1);
1948 btrfs_start_workers(&fs_info
->endio_write_workers
, 1);
1949 btrfs_start_workers(&fs_info
->endio_freespace_worker
, 1);
1951 fs_info
->bdi
.ra_pages
*= btrfs_super_num_devices(disk_super
);
1952 fs_info
->bdi
.ra_pages
= max(fs_info
->bdi
.ra_pages
,
1953 4 * 1024 * 1024 / PAGE_CACHE_SIZE
);
1955 nodesize
= btrfs_super_nodesize(disk_super
);
1956 leafsize
= btrfs_super_leafsize(disk_super
);
1957 sectorsize
= btrfs_super_sectorsize(disk_super
);
1958 stripesize
= btrfs_super_stripesize(disk_super
);
1959 tree_root
->nodesize
= nodesize
;
1960 tree_root
->leafsize
= leafsize
;
1961 tree_root
->sectorsize
= sectorsize
;
1962 tree_root
->stripesize
= stripesize
;
1964 sb
->s_blocksize
= sectorsize
;
1965 sb
->s_blocksize_bits
= blksize_bits(sectorsize
);
1967 if (strncmp((char *)(&disk_super
->magic
), BTRFS_MAGIC
,
1968 sizeof(disk_super
->magic
))) {
1969 printk(KERN_INFO
"btrfs: valid FS not found on %s\n", sb
->s_id
);
1970 goto fail_sb_buffer
;
1973 mutex_lock(&fs_info
->chunk_mutex
);
1974 ret
= btrfs_read_sys_array(tree_root
);
1975 mutex_unlock(&fs_info
->chunk_mutex
);
1977 printk(KERN_WARNING
"btrfs: failed to read the system "
1978 "array on %s\n", sb
->s_id
);
1979 goto fail_sb_buffer
;
1982 blocksize
= btrfs_level_size(tree_root
,
1983 btrfs_super_chunk_root_level(disk_super
));
1984 generation
= btrfs_super_chunk_root_generation(disk_super
);
1986 __setup_root(nodesize
, leafsize
, sectorsize
, stripesize
,
1987 chunk_root
, fs_info
, BTRFS_CHUNK_TREE_OBJECTID
);
1989 chunk_root
->node
= read_tree_block(chunk_root
,
1990 btrfs_super_chunk_root(disk_super
),
1991 blocksize
, generation
);
1992 BUG_ON(!chunk_root
->node
);
1993 if (!test_bit(EXTENT_BUFFER_UPTODATE
, &chunk_root
->node
->bflags
)) {
1994 printk(KERN_WARNING
"btrfs: failed to read chunk root on %s\n",
1996 goto fail_chunk_root
;
1998 btrfs_set_root_node(&chunk_root
->root_item
, chunk_root
->node
);
1999 chunk_root
->commit_root
= btrfs_root_node(chunk_root
);
2001 read_extent_buffer(chunk_root
->node
, fs_info
->chunk_tree_uuid
,
2002 (unsigned long)btrfs_header_chunk_tree_uuid(chunk_root
->node
),
2005 mutex_lock(&fs_info
->chunk_mutex
);
2006 ret
= btrfs_read_chunk_tree(chunk_root
);
2007 mutex_unlock(&fs_info
->chunk_mutex
);
2009 printk(KERN_WARNING
"btrfs: failed to read chunk tree on %s\n",
2011 goto fail_chunk_root
;
2014 btrfs_close_extra_devices(fs_devices
);
2016 blocksize
= btrfs_level_size(tree_root
,
2017 btrfs_super_root_level(disk_super
));
2018 generation
= btrfs_super_generation(disk_super
);
2020 tree_root
->node
= read_tree_block(tree_root
,
2021 btrfs_super_root(disk_super
),
2022 blocksize
, generation
);
2023 if (!tree_root
->node
)
2024 goto fail_chunk_root
;
2025 if (!test_bit(EXTENT_BUFFER_UPTODATE
, &tree_root
->node
->bflags
)) {
2026 printk(KERN_WARNING
"btrfs: failed to read tree root on %s\n",
2028 goto fail_tree_root
;
2030 btrfs_set_root_node(&tree_root
->root_item
, tree_root
->node
);
2031 tree_root
->commit_root
= btrfs_root_node(tree_root
);
2033 ret
= find_and_setup_root(tree_root
, fs_info
,
2034 BTRFS_EXTENT_TREE_OBJECTID
, extent_root
);
2036 goto fail_tree_root
;
2037 extent_root
->track_dirty
= 1;
2039 ret
= find_and_setup_root(tree_root
, fs_info
,
2040 BTRFS_DEV_TREE_OBJECTID
, dev_root
);
2042 goto fail_extent_root
;
2043 dev_root
->track_dirty
= 1;
2045 ret
= find_and_setup_root(tree_root
, fs_info
,
2046 BTRFS_CSUM_TREE_OBJECTID
, csum_root
);
2050 csum_root
->track_dirty
= 1;
2052 fs_info
->generation
= generation
;
2053 fs_info
->last_trans_committed
= generation
;
2054 fs_info
->data_alloc_profile
= (u64
)-1;
2055 fs_info
->metadata_alloc_profile
= (u64
)-1;
2056 fs_info
->system_alloc_profile
= fs_info
->metadata_alloc_profile
;
2058 ret
= btrfs_read_block_groups(extent_root
);
2060 printk(KERN_ERR
"Failed to read block groups: %d\n", ret
);
2061 goto fail_block_groups
;
2064 fs_info
->cleaner_kthread
= kthread_run(cleaner_kthread
, tree_root
,
2066 if (IS_ERR(fs_info
->cleaner_kthread
))
2067 goto fail_block_groups
;
2069 fs_info
->transaction_kthread
= kthread_run(transaction_kthread
,
2071 "btrfs-transaction");
2072 if (IS_ERR(fs_info
->transaction_kthread
))
2075 if (!btrfs_test_opt(tree_root
, SSD
) &&
2076 !btrfs_test_opt(tree_root
, NOSSD
) &&
2077 !fs_info
->fs_devices
->rotating
) {
2078 printk(KERN_INFO
"Btrfs detected SSD devices, enabling SSD "
2080 btrfs_set_opt(fs_info
->mount_opt
, SSD
);
2083 /* do not make disk changes in broken FS */
2084 if (btrfs_super_log_root(disk_super
) != 0 &&
2085 !(fs_info
->fs_state
& BTRFS_SUPER_FLAG_ERROR
)) {
2086 u64 bytenr
= btrfs_super_log_root(disk_super
);
2088 if (fs_devices
->rw_devices
== 0) {
2089 printk(KERN_WARNING
"Btrfs log replay required "
2092 goto fail_trans_kthread
;
2095 btrfs_level_size(tree_root
,
2096 btrfs_super_log_root_level(disk_super
));
2098 log_tree_root
= kzalloc(sizeof(struct btrfs_root
), GFP_NOFS
);
2099 if (!log_tree_root
) {
2101 goto fail_trans_kthread
;
2104 __setup_root(nodesize
, leafsize
, sectorsize
, stripesize
,
2105 log_tree_root
, fs_info
, BTRFS_TREE_LOG_OBJECTID
);
2107 log_tree_root
->node
= read_tree_block(tree_root
, bytenr
,
2110 ret
= btrfs_recover_log_trees(log_tree_root
);
2113 if (sb
->s_flags
& MS_RDONLY
) {
2114 ret
= btrfs_commit_super(tree_root
);
2119 ret
= btrfs_find_orphan_roots(tree_root
);
2122 if (!(sb
->s_flags
& MS_RDONLY
)) {
2123 ret
= btrfs_cleanup_fs_roots(fs_info
);
2126 ret
= btrfs_recover_relocation(tree_root
);
2129 "btrfs: failed to recover relocation\n");
2131 goto fail_trans_kthread
;
2135 location
.objectid
= BTRFS_FS_TREE_OBJECTID
;
2136 location
.type
= BTRFS_ROOT_ITEM_KEY
;
2137 location
.offset
= (u64
)-1;
2139 fs_info
->fs_root
= btrfs_read_fs_root_no_name(fs_info
, &location
);
2140 if (!fs_info
->fs_root
)
2141 goto fail_trans_kthread
;
2142 if (IS_ERR(fs_info
->fs_root
)) {
2143 err
= PTR_ERR(fs_info
->fs_root
);
2144 goto fail_trans_kthread
;
2147 if (!(sb
->s_flags
& MS_RDONLY
)) {
2148 down_read(&fs_info
->cleanup_work_sem
);
2149 err
= btrfs_orphan_cleanup(fs_info
->fs_root
);
2151 err
= btrfs_orphan_cleanup(fs_info
->tree_root
);
2152 up_read(&fs_info
->cleanup_work_sem
);
2154 close_ctree(tree_root
);
2155 return ERR_PTR(err
);
2162 kthread_stop(fs_info
->transaction_kthread
);
2164 kthread_stop(fs_info
->cleaner_kthread
);
2167 * make sure we're done with the btree inode before we stop our
2170 filemap_write_and_wait(fs_info
->btree_inode
->i_mapping
);
2171 invalidate_inode_pages2(fs_info
->btree_inode
->i_mapping
);
2174 btrfs_free_block_groups(fs_info
);
2175 free_extent_buffer(csum_root
->node
);
2176 free_extent_buffer(csum_root
->commit_root
);
2178 free_extent_buffer(dev_root
->node
);
2179 free_extent_buffer(dev_root
->commit_root
);
2181 free_extent_buffer(extent_root
->node
);
2182 free_extent_buffer(extent_root
->commit_root
);
2184 free_extent_buffer(tree_root
->node
);
2185 free_extent_buffer(tree_root
->commit_root
);
2187 free_extent_buffer(chunk_root
->node
);
2188 free_extent_buffer(chunk_root
->commit_root
);
2190 btrfs_stop_workers(&fs_info
->generic_worker
);
2191 btrfs_stop_workers(&fs_info
->fixup_workers
);
2192 btrfs_stop_workers(&fs_info
->delalloc_workers
);
2193 btrfs_stop_workers(&fs_info
->workers
);
2194 btrfs_stop_workers(&fs_info
->endio_workers
);
2195 btrfs_stop_workers(&fs_info
->endio_meta_workers
);
2196 btrfs_stop_workers(&fs_info
->endio_meta_write_workers
);
2197 btrfs_stop_workers(&fs_info
->endio_write_workers
);
2198 btrfs_stop_workers(&fs_info
->endio_freespace_worker
);
2199 btrfs_stop_workers(&fs_info
->submit_workers
);
2201 invalidate_inode_pages2(fs_info
->btree_inode
->i_mapping
);
2202 iput(fs_info
->btree_inode
);
2204 btrfs_close_devices(fs_info
->fs_devices
);
2205 btrfs_mapping_tree_free(&fs_info
->mapping_tree
);
2207 bdi_destroy(&fs_info
->bdi
);
2209 cleanup_srcu_struct(&fs_info
->subvol_srcu
);
2217 return ERR_PTR(err
);
2220 static void btrfs_end_buffer_write_sync(struct buffer_head
*bh
, int uptodate
)
2222 char b
[BDEVNAME_SIZE
];
2225 set_buffer_uptodate(bh
);
2227 if (printk_ratelimit()) {
2228 printk(KERN_WARNING
"lost page write due to "
2229 "I/O error on %s\n",
2230 bdevname(bh
->b_bdev
, b
));
2232 /* note, we dont' set_buffer_write_io_error because we have
2233 * our own ways of dealing with the IO errors
2235 clear_buffer_uptodate(bh
);
2241 struct buffer_head
*btrfs_read_dev_super(struct block_device
*bdev
)
2243 struct buffer_head
*bh
;
2244 struct buffer_head
*latest
= NULL
;
2245 struct btrfs_super_block
*super
;
2250 /* we would like to check all the supers, but that would make
2251 * a btrfs mount succeed after a mkfs from a different FS.
2252 * So, we need to add a special mount option to scan for
2253 * later supers, using BTRFS_SUPER_MIRROR_MAX instead
2255 for (i
= 0; i
< 1; i
++) {
2256 bytenr
= btrfs_sb_offset(i
);
2257 if (bytenr
+ 4096 >= i_size_read(bdev
->bd_inode
))
2259 bh
= __bread(bdev
, bytenr
/ 4096, 4096);
2263 super
= (struct btrfs_super_block
*)bh
->b_data
;
2264 if (btrfs_super_bytenr(super
) != bytenr
||
2265 strncmp((char *)(&super
->magic
), BTRFS_MAGIC
,
2266 sizeof(super
->magic
))) {
2271 if (!latest
|| btrfs_super_generation(super
) > transid
) {
2274 transid
= btrfs_super_generation(super
);
2283 * this should be called twice, once with wait == 0 and
2284 * once with wait == 1. When wait == 0 is done, all the buffer heads
2285 * we write are pinned.
2287 * They are released when wait == 1 is done.
2288 * max_mirrors must be the same for both runs, and it indicates how
2289 * many supers on this one device should be written.
2291 * max_mirrors == 0 means to write them all.
2293 static int write_dev_supers(struct btrfs_device
*device
,
2294 struct btrfs_super_block
*sb
,
2295 int do_barriers
, int wait
, int max_mirrors
)
2297 struct buffer_head
*bh
;
2303 int last_barrier
= 0;
2305 if (max_mirrors
== 0)
2306 max_mirrors
= BTRFS_SUPER_MIRROR_MAX
;
2308 /* make sure only the last submit_bh does a barrier */
2310 for (i
= 0; i
< max_mirrors
; i
++) {
2311 bytenr
= btrfs_sb_offset(i
);
2312 if (bytenr
+ BTRFS_SUPER_INFO_SIZE
>=
2313 device
->total_bytes
)
2319 for (i
= 0; i
< max_mirrors
; i
++) {
2320 bytenr
= btrfs_sb_offset(i
);
2321 if (bytenr
+ BTRFS_SUPER_INFO_SIZE
>= device
->total_bytes
)
2325 bh
= __find_get_block(device
->bdev
, bytenr
/ 4096,
2326 BTRFS_SUPER_INFO_SIZE
);
2329 if (!buffer_uptodate(bh
))
2332 /* drop our reference */
2335 /* drop the reference from the wait == 0 run */
2339 btrfs_set_super_bytenr(sb
, bytenr
);
2342 crc
= btrfs_csum_data(NULL
, (char *)sb
+
2343 BTRFS_CSUM_SIZE
, crc
,
2344 BTRFS_SUPER_INFO_SIZE
-
2346 btrfs_csum_final(crc
, sb
->csum
);
2349 * one reference for us, and we leave it for the
2352 bh
= __getblk(device
->bdev
, bytenr
/ 4096,
2353 BTRFS_SUPER_INFO_SIZE
);
2354 memcpy(bh
->b_data
, sb
, BTRFS_SUPER_INFO_SIZE
);
2356 /* one reference for submit_bh */
2359 set_buffer_uptodate(bh
);
2361 bh
->b_end_io
= btrfs_end_buffer_write_sync
;
2364 if (i
== last_barrier
&& do_barriers
)
2365 ret
= submit_bh(WRITE_FLUSH_FUA
, bh
);
2367 ret
= submit_bh(WRITE_SYNC
, bh
);
2372 return errors
< i
? 0 : -1;
2375 int write_all_supers(struct btrfs_root
*root
, int max_mirrors
)
2377 struct list_head
*head
;
2378 struct btrfs_device
*dev
;
2379 struct btrfs_super_block
*sb
;
2380 struct btrfs_dev_item
*dev_item
;
2384 int total_errors
= 0;
2387 max_errors
= btrfs_super_num_devices(&root
->fs_info
->super_copy
) - 1;
2388 do_barriers
= !btrfs_test_opt(root
, NOBARRIER
);
2390 sb
= &root
->fs_info
->super_for_commit
;
2391 dev_item
= &sb
->dev_item
;
2393 mutex_lock(&root
->fs_info
->fs_devices
->device_list_mutex
);
2394 head
= &root
->fs_info
->fs_devices
->devices
;
2395 list_for_each_entry(dev
, head
, dev_list
) {
2400 if (!dev
->in_fs_metadata
|| !dev
->writeable
)
2403 btrfs_set_stack_device_generation(dev_item
, 0);
2404 btrfs_set_stack_device_type(dev_item
, dev
->type
);
2405 btrfs_set_stack_device_id(dev_item
, dev
->devid
);
2406 btrfs_set_stack_device_total_bytes(dev_item
, dev
->total_bytes
);
2407 btrfs_set_stack_device_bytes_used(dev_item
, dev
->bytes_used
);
2408 btrfs_set_stack_device_io_align(dev_item
, dev
->io_align
);
2409 btrfs_set_stack_device_io_width(dev_item
, dev
->io_width
);
2410 btrfs_set_stack_device_sector_size(dev_item
, dev
->sector_size
);
2411 memcpy(dev_item
->uuid
, dev
->uuid
, BTRFS_UUID_SIZE
);
2412 memcpy(dev_item
->fsid
, dev
->fs_devices
->fsid
, BTRFS_UUID_SIZE
);
2414 flags
= btrfs_super_flags(sb
);
2415 btrfs_set_super_flags(sb
, flags
| BTRFS_HEADER_FLAG_WRITTEN
);
2417 ret
= write_dev_supers(dev
, sb
, do_barriers
, 0, max_mirrors
);
2421 if (total_errors
> max_errors
) {
2422 printk(KERN_ERR
"btrfs: %d errors while writing supers\n",
2428 list_for_each_entry(dev
, head
, dev_list
) {
2431 if (!dev
->in_fs_metadata
|| !dev
->writeable
)
2434 ret
= write_dev_supers(dev
, sb
, do_barriers
, 1, max_mirrors
);
2438 mutex_unlock(&root
->fs_info
->fs_devices
->device_list_mutex
);
2439 if (total_errors
> max_errors
) {
2440 printk(KERN_ERR
"btrfs: %d errors while writing supers\n",
2447 int write_ctree_super(struct btrfs_trans_handle
*trans
,
2448 struct btrfs_root
*root
, int max_mirrors
)
2452 ret
= write_all_supers(root
, max_mirrors
);
2456 int btrfs_free_fs_root(struct btrfs_fs_info
*fs_info
, struct btrfs_root
*root
)
2458 spin_lock(&fs_info
->fs_roots_radix_lock
);
2459 radix_tree_delete(&fs_info
->fs_roots_radix
,
2460 (unsigned long)root
->root_key
.objectid
);
2461 spin_unlock(&fs_info
->fs_roots_radix_lock
);
2463 if (btrfs_root_refs(&root
->root_item
) == 0)
2464 synchronize_srcu(&fs_info
->subvol_srcu
);
2470 static void free_fs_root(struct btrfs_root
*root
)
2472 WARN_ON(!RB_EMPTY_ROOT(&root
->inode_tree
));
2473 if (root
->anon_super
.s_dev
) {
2474 down_write(&root
->anon_super
.s_umount
);
2475 kill_anon_super(&root
->anon_super
);
2477 free_extent_buffer(root
->node
);
2478 free_extent_buffer(root
->commit_root
);
2483 static int del_fs_roots(struct btrfs_fs_info
*fs_info
)
2486 struct btrfs_root
*gang
[8];
2489 while (!list_empty(&fs_info
->dead_roots
)) {
2490 gang
[0] = list_entry(fs_info
->dead_roots
.next
,
2491 struct btrfs_root
, root_list
);
2492 list_del(&gang
[0]->root_list
);
2494 if (gang
[0]->in_radix
) {
2495 btrfs_free_fs_root(fs_info
, gang
[0]);
2497 free_extent_buffer(gang
[0]->node
);
2498 free_extent_buffer(gang
[0]->commit_root
);
2504 ret
= radix_tree_gang_lookup(&fs_info
->fs_roots_radix
,
2509 for (i
= 0; i
< ret
; i
++)
2510 btrfs_free_fs_root(fs_info
, gang
[i
]);
2515 int btrfs_cleanup_fs_roots(struct btrfs_fs_info
*fs_info
)
2517 u64 root_objectid
= 0;
2518 struct btrfs_root
*gang
[8];
2523 ret
= radix_tree_gang_lookup(&fs_info
->fs_roots_radix
,
2524 (void **)gang
, root_objectid
,
2529 root_objectid
= gang
[ret
- 1]->root_key
.objectid
+ 1;
2530 for (i
= 0; i
< ret
; i
++) {
2533 root_objectid
= gang
[i
]->root_key
.objectid
;
2534 err
= btrfs_orphan_cleanup(gang
[i
]);
2543 int btrfs_commit_super(struct btrfs_root
*root
)
2545 struct btrfs_trans_handle
*trans
;
2548 mutex_lock(&root
->fs_info
->cleaner_mutex
);
2549 btrfs_run_delayed_iputs(root
);
2550 btrfs_clean_old_snapshots(root
);
2551 mutex_unlock(&root
->fs_info
->cleaner_mutex
);
2553 /* wait until ongoing cleanup work done */
2554 down_write(&root
->fs_info
->cleanup_work_sem
);
2555 up_write(&root
->fs_info
->cleanup_work_sem
);
2557 trans
= btrfs_join_transaction(root
, 1);
2559 return PTR_ERR(trans
);
2560 ret
= btrfs_commit_transaction(trans
, root
);
2562 /* run commit again to drop the original snapshot */
2563 trans
= btrfs_join_transaction(root
, 1);
2565 return PTR_ERR(trans
);
2566 btrfs_commit_transaction(trans
, root
);
2567 ret
= btrfs_write_and_wait_transaction(NULL
, root
);
2570 ret
= write_ctree_super(NULL
, root
, 0);
2574 int close_ctree(struct btrfs_root
*root
)
2576 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2579 fs_info
->closing
= 1;
2582 btrfs_put_block_group_cache(fs_info
);
2585 * Here come 2 situations when btrfs is broken to flip readonly:
2587 * 1. when btrfs flips readonly somewhere else before
2588 * btrfs_commit_super, sb->s_flags has MS_RDONLY flag,
2589 * and btrfs will skip to write sb directly to keep
2590 * ERROR state on disk.
2592 * 2. when btrfs flips readonly just in btrfs_commit_super,
2593 * and in such case, btrfs cannnot write sb via btrfs_commit_super,
2594 * and since fs_state has been set BTRFS_SUPER_FLAG_ERROR flag,
2595 * btrfs will cleanup all FS resources first and write sb then.
2597 if (!(fs_info
->sb
->s_flags
& MS_RDONLY
)) {
2598 ret
= btrfs_commit_super(root
);
2600 printk(KERN_ERR
"btrfs: commit super ret %d\n", ret
);
2603 if (fs_info
->fs_state
& BTRFS_SUPER_FLAG_ERROR
) {
2604 ret
= btrfs_error_commit_super(root
);
2606 printk(KERN_ERR
"btrfs: commit super ret %d\n", ret
);
2609 kthread_stop(root
->fs_info
->transaction_kthread
);
2610 kthread_stop(root
->fs_info
->cleaner_kthread
);
2612 fs_info
->closing
= 2;
2615 if (fs_info
->delalloc_bytes
) {
2616 printk(KERN_INFO
"btrfs: at unmount delalloc count %llu\n",
2617 (unsigned long long)fs_info
->delalloc_bytes
);
2619 if (fs_info
->total_ref_cache_size
) {
2620 printk(KERN_INFO
"btrfs: at umount reference cache size %llu\n",
2621 (unsigned long long)fs_info
->total_ref_cache_size
);
2624 free_extent_buffer(fs_info
->extent_root
->node
);
2625 free_extent_buffer(fs_info
->extent_root
->commit_root
);
2626 free_extent_buffer(fs_info
->tree_root
->node
);
2627 free_extent_buffer(fs_info
->tree_root
->commit_root
);
2628 free_extent_buffer(root
->fs_info
->chunk_root
->node
);
2629 free_extent_buffer(root
->fs_info
->chunk_root
->commit_root
);
2630 free_extent_buffer(root
->fs_info
->dev_root
->node
);
2631 free_extent_buffer(root
->fs_info
->dev_root
->commit_root
);
2632 free_extent_buffer(root
->fs_info
->csum_root
->node
);
2633 free_extent_buffer(root
->fs_info
->csum_root
->commit_root
);
2635 btrfs_free_block_groups(root
->fs_info
);
2637 del_fs_roots(fs_info
);
2639 iput(fs_info
->btree_inode
);
2641 btrfs_stop_workers(&fs_info
->generic_worker
);
2642 btrfs_stop_workers(&fs_info
->fixup_workers
);
2643 btrfs_stop_workers(&fs_info
->delalloc_workers
);
2644 btrfs_stop_workers(&fs_info
->workers
);
2645 btrfs_stop_workers(&fs_info
->endio_workers
);
2646 btrfs_stop_workers(&fs_info
->endio_meta_workers
);
2647 btrfs_stop_workers(&fs_info
->endio_meta_write_workers
);
2648 btrfs_stop_workers(&fs_info
->endio_write_workers
);
2649 btrfs_stop_workers(&fs_info
->endio_freespace_worker
);
2650 btrfs_stop_workers(&fs_info
->submit_workers
);
2652 btrfs_close_devices(fs_info
->fs_devices
);
2653 btrfs_mapping_tree_free(&fs_info
->mapping_tree
);
2655 bdi_destroy(&fs_info
->bdi
);
2656 cleanup_srcu_struct(&fs_info
->subvol_srcu
);
2658 kfree(fs_info
->extent_root
);
2659 kfree(fs_info
->tree_root
);
2660 kfree(fs_info
->chunk_root
);
2661 kfree(fs_info
->dev_root
);
2662 kfree(fs_info
->csum_root
);
2668 int btrfs_buffer_uptodate(struct extent_buffer
*buf
, u64 parent_transid
)
2671 struct inode
*btree_inode
= buf
->first_page
->mapping
->host
;
2673 ret
= extent_buffer_uptodate(&BTRFS_I(btree_inode
)->io_tree
, buf
,
2678 ret
= verify_parent_transid(&BTRFS_I(btree_inode
)->io_tree
, buf
,
2683 int btrfs_set_buffer_uptodate(struct extent_buffer
*buf
)
2685 struct inode
*btree_inode
= buf
->first_page
->mapping
->host
;
2686 return set_extent_buffer_uptodate(&BTRFS_I(btree_inode
)->io_tree
,
2690 void btrfs_mark_buffer_dirty(struct extent_buffer
*buf
)
2692 struct btrfs_root
*root
= BTRFS_I(buf
->first_page
->mapping
->host
)->root
;
2693 u64 transid
= btrfs_header_generation(buf
);
2694 struct inode
*btree_inode
= root
->fs_info
->btree_inode
;
2697 btrfs_assert_tree_locked(buf
);
2698 if (transid
!= root
->fs_info
->generation
) {
2699 printk(KERN_CRIT
"btrfs transid mismatch buffer %llu, "
2700 "found %llu running %llu\n",
2701 (unsigned long long)buf
->start
,
2702 (unsigned long long)transid
,
2703 (unsigned long long)root
->fs_info
->generation
);
2706 was_dirty
= set_extent_buffer_dirty(&BTRFS_I(btree_inode
)->io_tree
,
2709 spin_lock(&root
->fs_info
->delalloc_lock
);
2710 root
->fs_info
->dirty_metadata_bytes
+= buf
->len
;
2711 spin_unlock(&root
->fs_info
->delalloc_lock
);
2715 void btrfs_btree_balance_dirty(struct btrfs_root
*root
, unsigned long nr
)
2718 * looks as though older kernels can get into trouble with
2719 * this code, they end up stuck in balance_dirty_pages forever
2722 unsigned long thresh
= 32 * 1024 * 1024;
2724 if (current
->flags
& PF_MEMALLOC
)
2727 num_dirty
= root
->fs_info
->dirty_metadata_bytes
;
2729 if (num_dirty
> thresh
) {
2730 balance_dirty_pages_ratelimited_nr(
2731 root
->fs_info
->btree_inode
->i_mapping
, 1);
2736 int btrfs_read_buffer(struct extent_buffer
*buf
, u64 parent_transid
)
2738 struct btrfs_root
*root
= BTRFS_I(buf
->first_page
->mapping
->host
)->root
;
2740 ret
= btree_read_extent_buffer_pages(root
, buf
, 0, parent_transid
);
2742 set_bit(EXTENT_BUFFER_UPTODATE
, &buf
->bflags
);
2746 int btree_lock_page_hook(struct page
*page
)
2748 struct inode
*inode
= page
->mapping
->host
;
2749 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
2750 struct extent_io_tree
*io_tree
= &BTRFS_I(inode
)->io_tree
;
2751 struct extent_buffer
*eb
;
2753 u64 bytenr
= page_offset(page
);
2755 if (page
->private == EXTENT_PAGE_PRIVATE
)
2758 len
= page
->private >> 2;
2759 eb
= find_extent_buffer(io_tree
, bytenr
, len
, GFP_NOFS
);
2763 btrfs_tree_lock(eb
);
2764 btrfs_set_header_flag(eb
, BTRFS_HEADER_FLAG_WRITTEN
);
2766 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY
, &eb
->bflags
)) {
2767 spin_lock(&root
->fs_info
->delalloc_lock
);
2768 if (root
->fs_info
->dirty_metadata_bytes
>= eb
->len
)
2769 root
->fs_info
->dirty_metadata_bytes
-= eb
->len
;
2772 spin_unlock(&root
->fs_info
->delalloc_lock
);
2775 btrfs_tree_unlock(eb
);
2776 free_extent_buffer(eb
);
2782 static void btrfs_check_super_valid(struct btrfs_fs_info
*fs_info
,
2788 if (fs_info
->fs_state
& BTRFS_SUPER_FLAG_ERROR
)
2789 printk(KERN_WARNING
"warning: mount fs with errors, "
2790 "running btrfsck is recommended\n");
2793 int btrfs_error_commit_super(struct btrfs_root
*root
)
2797 mutex_lock(&root
->fs_info
->cleaner_mutex
);
2798 btrfs_run_delayed_iputs(root
);
2799 mutex_unlock(&root
->fs_info
->cleaner_mutex
);
2801 down_write(&root
->fs_info
->cleanup_work_sem
);
2802 up_write(&root
->fs_info
->cleanup_work_sem
);
2804 /* cleanup FS via transaction */
2805 btrfs_cleanup_transaction(root
);
2807 ret
= write_ctree_super(NULL
, root
, 0);
2812 static int btrfs_destroy_ordered_operations(struct btrfs_root
*root
)
2814 struct btrfs_inode
*btrfs_inode
;
2815 struct list_head splice
;
2817 INIT_LIST_HEAD(&splice
);
2819 mutex_lock(&root
->fs_info
->ordered_operations_mutex
);
2820 spin_lock(&root
->fs_info
->ordered_extent_lock
);
2822 list_splice_init(&root
->fs_info
->ordered_operations
, &splice
);
2823 while (!list_empty(&splice
)) {
2824 btrfs_inode
= list_entry(splice
.next
, struct btrfs_inode
,
2825 ordered_operations
);
2827 list_del_init(&btrfs_inode
->ordered_operations
);
2829 btrfs_invalidate_inodes(btrfs_inode
->root
);
2832 spin_unlock(&root
->fs_info
->ordered_extent_lock
);
2833 mutex_unlock(&root
->fs_info
->ordered_operations_mutex
);
2838 static int btrfs_destroy_ordered_extents(struct btrfs_root
*root
)
2840 struct list_head splice
;
2841 struct btrfs_ordered_extent
*ordered
;
2842 struct inode
*inode
;
2844 INIT_LIST_HEAD(&splice
);
2846 spin_lock(&root
->fs_info
->ordered_extent_lock
);
2848 list_splice_init(&root
->fs_info
->ordered_extents
, &splice
);
2849 while (!list_empty(&splice
)) {
2850 ordered
= list_entry(splice
.next
, struct btrfs_ordered_extent
,
2853 list_del_init(&ordered
->root_extent_list
);
2854 atomic_inc(&ordered
->refs
);
2856 /* the inode may be getting freed (in sys_unlink path). */
2857 inode
= igrab(ordered
->inode
);
2859 spin_unlock(&root
->fs_info
->ordered_extent_lock
);
2863 atomic_set(&ordered
->refs
, 1);
2864 btrfs_put_ordered_extent(ordered
);
2866 spin_lock(&root
->fs_info
->ordered_extent_lock
);
2869 spin_unlock(&root
->fs_info
->ordered_extent_lock
);
2874 static int btrfs_destroy_delayed_refs(struct btrfs_transaction
*trans
,
2875 struct btrfs_root
*root
)
2877 struct rb_node
*node
;
2878 struct btrfs_delayed_ref_root
*delayed_refs
;
2879 struct btrfs_delayed_ref_node
*ref
;
2882 delayed_refs
= &trans
->delayed_refs
;
2884 spin_lock(&delayed_refs
->lock
);
2885 if (delayed_refs
->num_entries
== 0) {
2886 printk(KERN_INFO
"delayed_refs has NO entry\n");
2890 node
= rb_first(&delayed_refs
->root
);
2892 ref
= rb_entry(node
, struct btrfs_delayed_ref_node
, rb_node
);
2893 node
= rb_next(node
);
2896 rb_erase(&ref
->rb_node
, &delayed_refs
->root
);
2897 delayed_refs
->num_entries
--;
2899 atomic_set(&ref
->refs
, 1);
2900 if (btrfs_delayed_ref_is_head(ref
)) {
2901 struct btrfs_delayed_ref_head
*head
;
2903 head
= btrfs_delayed_node_to_head(ref
);
2904 mutex_lock(&head
->mutex
);
2905 kfree(head
->extent_op
);
2906 delayed_refs
->num_heads
--;
2907 if (list_empty(&head
->cluster
))
2908 delayed_refs
->num_heads_ready
--;
2909 list_del_init(&head
->cluster
);
2910 mutex_unlock(&head
->mutex
);
2913 spin_unlock(&delayed_refs
->lock
);
2914 btrfs_put_delayed_ref(ref
);
2917 spin_lock(&delayed_refs
->lock
);
2920 spin_unlock(&delayed_refs
->lock
);
2925 static int btrfs_destroy_pending_snapshots(struct btrfs_transaction
*t
)
2927 struct btrfs_pending_snapshot
*snapshot
;
2928 struct list_head splice
;
2930 INIT_LIST_HEAD(&splice
);
2932 list_splice_init(&t
->pending_snapshots
, &splice
);
2934 while (!list_empty(&splice
)) {
2935 snapshot
= list_entry(splice
.next
,
2936 struct btrfs_pending_snapshot
,
2939 list_del_init(&snapshot
->list
);
2947 static int btrfs_destroy_delalloc_inodes(struct btrfs_root
*root
)
2949 struct btrfs_inode
*btrfs_inode
;
2950 struct list_head splice
;
2952 INIT_LIST_HEAD(&splice
);
2954 list_splice_init(&root
->fs_info
->delalloc_inodes
, &splice
);
2956 spin_lock(&root
->fs_info
->delalloc_lock
);
2958 while (!list_empty(&splice
)) {
2959 btrfs_inode
= list_entry(splice
.next
, struct btrfs_inode
,
2962 list_del_init(&btrfs_inode
->delalloc_inodes
);
2964 btrfs_invalidate_inodes(btrfs_inode
->root
);
2967 spin_unlock(&root
->fs_info
->delalloc_lock
);
2972 static int btrfs_destroy_marked_extents(struct btrfs_root
*root
,
2973 struct extent_io_tree
*dirty_pages
,
2978 struct inode
*btree_inode
= root
->fs_info
->btree_inode
;
2979 struct extent_buffer
*eb
;
2983 unsigned long index
;
2986 ret
= find_first_extent_bit(dirty_pages
, start
, &start
, &end
,
2991 clear_extent_bits(dirty_pages
, start
, end
, mark
, GFP_NOFS
);
2992 while (start
<= end
) {
2993 index
= start
>> PAGE_CACHE_SHIFT
;
2994 start
= (u64
)(index
+ 1) << PAGE_CACHE_SHIFT
;
2995 page
= find_get_page(btree_inode
->i_mapping
, index
);
2998 offset
= page_offset(page
);
3000 spin_lock(&dirty_pages
->buffer_lock
);
3001 eb
= radix_tree_lookup(
3002 &(&BTRFS_I(page
->mapping
->host
)->io_tree
)->buffer
,
3003 offset
>> PAGE_CACHE_SHIFT
);
3004 spin_unlock(&dirty_pages
->buffer_lock
);
3006 ret
= test_and_clear_bit(EXTENT_BUFFER_DIRTY
,
3008 atomic_set(&eb
->refs
, 1);
3010 if (PageWriteback(page
))
3011 end_page_writeback(page
);
3014 if (PageDirty(page
)) {
3015 clear_page_dirty_for_io(page
);
3016 spin_lock_irq(&page
->mapping
->tree_lock
);
3017 radix_tree_tag_clear(&page
->mapping
->page_tree
,
3019 PAGECACHE_TAG_DIRTY
);
3020 spin_unlock_irq(&page
->mapping
->tree_lock
);
3023 page
->mapping
->a_ops
->invalidatepage(page
, 0);
3031 static int btrfs_destroy_pinned_extent(struct btrfs_root
*root
,
3032 struct extent_io_tree
*pinned_extents
)
3034 struct extent_io_tree
*unpin
;
3039 unpin
= pinned_extents
;
3041 ret
= find_first_extent_bit(unpin
, 0, &start
, &end
,
3047 ret
= btrfs_error_discard_extent(root
, start
, end
+ 1 - start
);
3049 clear_extent_dirty(unpin
, start
, end
, GFP_NOFS
);
3050 btrfs_error_unpin_extent_range(root
, start
, end
);
3057 static int btrfs_cleanup_transaction(struct btrfs_root
*root
)
3059 struct btrfs_transaction
*t
;
3064 mutex_lock(&root
->fs_info
->trans_mutex
);
3065 mutex_lock(&root
->fs_info
->transaction_kthread_mutex
);
3067 list_splice_init(&root
->fs_info
->trans_list
, &list
);
3068 while (!list_empty(&list
)) {
3069 t
= list_entry(list
.next
, struct btrfs_transaction
, list
);
3073 btrfs_destroy_ordered_operations(root
);
3075 btrfs_destroy_ordered_extents(root
);
3077 btrfs_destroy_delayed_refs(t
, root
);
3079 btrfs_block_rsv_release(root
,
3080 &root
->fs_info
->trans_block_rsv
,
3081 t
->dirty_pages
.dirty_bytes
);
3083 /* FIXME: cleanup wait for commit */
3086 if (waitqueue_active(&root
->fs_info
->transaction_blocked_wait
))
3087 wake_up(&root
->fs_info
->transaction_blocked_wait
);
3090 if (waitqueue_active(&root
->fs_info
->transaction_wait
))
3091 wake_up(&root
->fs_info
->transaction_wait
);
3092 mutex_unlock(&root
->fs_info
->trans_mutex
);
3094 mutex_lock(&root
->fs_info
->trans_mutex
);
3096 if (waitqueue_active(&t
->commit_wait
))
3097 wake_up(&t
->commit_wait
);
3098 mutex_unlock(&root
->fs_info
->trans_mutex
);
3100 mutex_lock(&root
->fs_info
->trans_mutex
);
3102 btrfs_destroy_pending_snapshots(t
);
3104 btrfs_destroy_delalloc_inodes(root
);
3106 spin_lock(&root
->fs_info
->new_trans_lock
);
3107 root
->fs_info
->running_transaction
= NULL
;
3108 spin_unlock(&root
->fs_info
->new_trans_lock
);
3110 btrfs_destroy_marked_extents(root
, &t
->dirty_pages
,
3113 btrfs_destroy_pinned_extent(root
,
3114 root
->fs_info
->pinned_extents
);
3117 list_del_init(&t
->list
);
3118 memset(t
, 0, sizeof(*t
));
3119 kmem_cache_free(btrfs_transaction_cachep
, t
);
3122 mutex_unlock(&root
->fs_info
->transaction_kthread_mutex
);
3123 mutex_unlock(&root
->fs_info
->trans_mutex
);
3128 static struct extent_io_ops btree_extent_io_ops
= {
3129 .write_cache_pages_lock_hook
= btree_lock_page_hook
,
3130 .readpage_end_io_hook
= btree_readpage_end_io_hook
,
3131 .submit_bio_hook
= btree_submit_bio_hook
,
3132 /* note we're sharing with inode.c for the merge bio hook */
3133 .merge_bio_hook
= btrfs_merge_bio_hook
,