Btrfs: remove the useless assignment to *entry in function tree_insert of file extent...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / fs / btrfs / check-integrity.c
CommitLineData
5db02760
SB
1/*
2 * Copyright (C) STRATO AG 2011. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19/*
20 * This module can be used to catch cases when the btrfs kernel
21 * code executes write requests to the disk that bring the file
22 * system in an inconsistent state. In such a state, a power-loss
23 * or kernel panic event would cause that the data on disk is
24 * lost or at least damaged.
25 *
26 * Code is added that examines all block write requests during
27 * runtime (including writes of the super block). Three rules
28 * are verified and an error is printed on violation of the
29 * rules:
30 * 1. It is not allowed to write a disk block which is
31 * currently referenced by the super block (either directly
32 * or indirectly).
33 * 2. When a super block is written, it is verified that all
34 * referenced (directly or indirectly) blocks fulfill the
35 * following requirements:
36 * 2a. All referenced blocks have either been present when
37 * the file system was mounted, (i.e., they have been
38 * referenced by the super block) or they have been
39 * written since then and the write completion callback
40 * was called and a FLUSH request to the device where
41 * these blocks are located was received and completed.
42 * 2b. All referenced blocks need to have a generation
43 * number which is equal to the parent's number.
44 *
45 * One issue that was found using this module was that the log
46 * tree on disk became temporarily corrupted because disk blocks
47 * that had been in use for the log tree had been freed and
48 * reused too early, while being referenced by the written super
49 * block.
50 *
51 * The search term in the kernel log that can be used to filter
52 * on the existence of detected integrity issues is
53 * "btrfs: attempt".
54 *
55 * The integrity check is enabled via mount options. These
56 * mount options are only supported if the integrity check
57 * tool is compiled by defining BTRFS_FS_CHECK_INTEGRITY.
58 *
59 * Example #1, apply integrity checks to all metadata:
60 * mount /dev/sdb1 /mnt -o check_int
61 *
62 * Example #2, apply integrity checks to all metadata and
63 * to data extents:
64 * mount /dev/sdb1 /mnt -o check_int_data
65 *
66 * Example #3, apply integrity checks to all metadata and dump
67 * the tree that the super block references to kernel messages
68 * each time after a super block was written:
69 * mount /dev/sdb1 /mnt -o check_int,check_int_print_mask=263
70 *
71 * If the integrity check tool is included and activated in
72 * the mount options, plenty of kernel memory is used, and
73 * plenty of additional CPU cycles are spent. Enabling this
74 * functionality is not intended for normal use. In most
75 * cases, unless you are a btrfs developer who needs to verify
76 * the integrity of (super)-block write requests, do not
77 * enable the config option BTRFS_FS_CHECK_INTEGRITY to
78 * include and compile the integrity check tool.
79 */
80
81#include <linux/sched.h>
82#include <linux/slab.h>
83#include <linux/buffer_head.h>
84#include <linux/mutex.h>
85#include <linux/crc32c.h>
86#include <linux/genhd.h>
87#include <linux/blkdev.h>
88#include "ctree.h"
89#include "disk-io.h"
90#include "transaction.h"
91#include "extent_io.h"
5db02760
SB
92#include "volumes.h"
93#include "print-tree.h"
94#include "locking.h"
95#include "check-integrity.h"
96
97#define BTRFSIC_BLOCK_HASHTABLE_SIZE 0x10000
98#define BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE 0x10000
99#define BTRFSIC_DEV2STATE_HASHTABLE_SIZE 0x100
100#define BTRFSIC_BLOCK_MAGIC_NUMBER 0x14491051
101#define BTRFSIC_BLOCK_LINK_MAGIC_NUMBER 0x11070807
102#define BTRFSIC_DEV2STATE_MAGIC_NUMBER 0x20111530
103#define BTRFSIC_BLOCK_STACK_FRAME_MAGIC_NUMBER 20111300
104#define BTRFSIC_TREE_DUMP_MAX_INDENT_LEVEL (200 - 6) /* in characters,
105 * excluding " [...]" */
106#define BTRFSIC_BLOCK_SIZE PAGE_SIZE
107
108#define BTRFSIC_GENERATION_UNKNOWN ((u64)-1)
109
110/*
111 * The definition of the bitmask fields for the print_mask.
112 * They are specified with the mount option check_integrity_print_mask.
113 */
114#define BTRFSIC_PRINT_MASK_SUPERBLOCK_WRITE 0x00000001
115#define BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION 0x00000002
116#define BTRFSIC_PRINT_MASK_TREE_AFTER_SB_WRITE 0x00000004
117#define BTRFSIC_PRINT_MASK_TREE_BEFORE_SB_WRITE 0x00000008
118#define BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH 0x00000010
119#define BTRFSIC_PRINT_MASK_END_IO_BIO_BH 0x00000020
120#define BTRFSIC_PRINT_MASK_VERBOSE 0x00000040
121#define BTRFSIC_PRINT_MASK_VERY_VERBOSE 0x00000080
122#define BTRFSIC_PRINT_MASK_INITIAL_TREE 0x00000100
123#define BTRFSIC_PRINT_MASK_INITIAL_ALL_TREES 0x00000200
124#define BTRFSIC_PRINT_MASK_INITIAL_DATABASE 0x00000400
125#define BTRFSIC_PRINT_MASK_NUM_COPIES 0x00000800
126#define BTRFSIC_PRINT_MASK_TREE_WITH_ALL_MIRRORS 0x00001000
127
128struct btrfsic_dev_state;
129struct btrfsic_state;
130
131struct btrfsic_block {
132 u32 magic_num; /* only used for debug purposes */
133 unsigned int is_metadata:1; /* if it is meta-data, not data-data */
134 unsigned int is_superblock:1; /* if it is one of the superblocks */
135 unsigned int is_iodone:1; /* if is done by lower subsystem */
136 unsigned int iodone_w_error:1; /* error was indicated to endio */
137 unsigned int never_written:1; /* block was added because it was
138 * referenced, not because it was
139 * written */
140 unsigned int mirror_num:2; /* large enough to hold
141 * BTRFS_SUPER_MIRROR_MAX */
142 struct btrfsic_dev_state *dev_state;
143 u64 dev_bytenr; /* key, physical byte num on disk */
144 u64 logical_bytenr; /* logical byte num on disk */
145 u64 generation;
146 struct btrfs_disk_key disk_key; /* extra info to print in case of
147 * issues, will not always be correct */
148 struct list_head collision_resolving_node; /* list node */
149 struct list_head all_blocks_node; /* list node */
150
151 /* the following two lists contain block_link items */
152 struct list_head ref_to_list; /* list */
153 struct list_head ref_from_list; /* list */
154 struct btrfsic_block *next_in_same_bio;
155 void *orig_bio_bh_private;
156 union {
157 bio_end_io_t *bio;
158 bh_end_io_t *bh;
159 } orig_bio_bh_end_io;
160 int submit_bio_bh_rw;
161 u64 flush_gen; /* only valid if !never_written */
162};
163
164/*
165 * Elements of this type are allocated dynamically and required because
166 * each block object can refer to and can be ref from multiple blocks.
167 * The key to lookup them in the hashtable is the dev_bytenr of
168 * the block ref to plus the one from the block refered from.
169 * The fact that they are searchable via a hashtable and that a
170 * ref_cnt is maintained is not required for the btrfs integrity
171 * check algorithm itself, it is only used to make the output more
172 * beautiful in case that an error is detected (an error is defined
173 * as a write operation to a block while that block is still referenced).
174 */
175struct btrfsic_block_link {
176 u32 magic_num; /* only used for debug purposes */
177 u32 ref_cnt;
178 struct list_head node_ref_to; /* list node */
179 struct list_head node_ref_from; /* list node */
180 struct list_head collision_resolving_node; /* list node */
181 struct btrfsic_block *block_ref_to;
182 struct btrfsic_block *block_ref_from;
183 u64 parent_generation;
184};
185
186struct btrfsic_dev_state {
187 u32 magic_num; /* only used for debug purposes */
188 struct block_device *bdev;
189 struct btrfsic_state *state;
190 struct list_head collision_resolving_node; /* list node */
191 struct btrfsic_block dummy_block_for_bio_bh_flush;
192 u64 last_flush_gen;
193 char name[BDEVNAME_SIZE];
194};
195
196struct btrfsic_block_hashtable {
197 struct list_head table[BTRFSIC_BLOCK_HASHTABLE_SIZE];
198};
199
200struct btrfsic_block_link_hashtable {
201 struct list_head table[BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE];
202};
203
204struct btrfsic_dev_state_hashtable {
205 struct list_head table[BTRFSIC_DEV2STATE_HASHTABLE_SIZE];
206};
207
208struct btrfsic_block_data_ctx {
209 u64 start; /* virtual bytenr */
210 u64 dev_bytenr; /* physical bytenr on device */
211 u32 len;
212 struct btrfsic_dev_state *dev;
213 char *data;
214 struct buffer_head *bh; /* do not use if set to NULL */
215};
216
217/* This structure is used to implement recursion without occupying
218 * any stack space, refer to btrfsic_process_metablock() */
219struct btrfsic_stack_frame {
220 u32 magic;
221 u32 nr;
222 int error;
223 int i;
224 int limit_nesting;
225 int num_copies;
226 int mirror_num;
227 struct btrfsic_block *block;
228 struct btrfsic_block_data_ctx *block_ctx;
229 struct btrfsic_block *next_block;
230 struct btrfsic_block_data_ctx next_block_ctx;
231 struct btrfs_header *hdr;
232 struct btrfsic_stack_frame *prev;
233};
234
235/* Some state per mounted filesystem */
236struct btrfsic_state {
237 u32 print_mask;
238 int include_extent_data;
239 int csum_size;
240 struct list_head all_blocks_list;
241 struct btrfsic_block_hashtable block_hashtable;
242 struct btrfsic_block_link_hashtable block_link_hashtable;
243 struct btrfs_root *root;
244 u64 max_superblock_generation;
245 struct btrfsic_block *latest_superblock;
246};
247
248static void btrfsic_block_init(struct btrfsic_block *b);
249static struct btrfsic_block *btrfsic_block_alloc(void);
250static void btrfsic_block_free(struct btrfsic_block *b);
251static void btrfsic_block_link_init(struct btrfsic_block_link *n);
252static struct btrfsic_block_link *btrfsic_block_link_alloc(void);
253static void btrfsic_block_link_free(struct btrfsic_block_link *n);
254static void btrfsic_dev_state_init(struct btrfsic_dev_state *ds);
255static struct btrfsic_dev_state *btrfsic_dev_state_alloc(void);
256static void btrfsic_dev_state_free(struct btrfsic_dev_state *ds);
257static void btrfsic_block_hashtable_init(struct btrfsic_block_hashtable *h);
258static void btrfsic_block_hashtable_add(struct btrfsic_block *b,
259 struct btrfsic_block_hashtable *h);
260static void btrfsic_block_hashtable_remove(struct btrfsic_block *b);
261static struct btrfsic_block *btrfsic_block_hashtable_lookup(
262 struct block_device *bdev,
263 u64 dev_bytenr,
264 struct btrfsic_block_hashtable *h);
265static void btrfsic_block_link_hashtable_init(
266 struct btrfsic_block_link_hashtable *h);
267static void btrfsic_block_link_hashtable_add(
268 struct btrfsic_block_link *l,
269 struct btrfsic_block_link_hashtable *h);
270static void btrfsic_block_link_hashtable_remove(struct btrfsic_block_link *l);
271static struct btrfsic_block_link *btrfsic_block_link_hashtable_lookup(
272 struct block_device *bdev_ref_to,
273 u64 dev_bytenr_ref_to,
274 struct block_device *bdev_ref_from,
275 u64 dev_bytenr_ref_from,
276 struct btrfsic_block_link_hashtable *h);
277static void btrfsic_dev_state_hashtable_init(
278 struct btrfsic_dev_state_hashtable *h);
279static void btrfsic_dev_state_hashtable_add(
280 struct btrfsic_dev_state *ds,
281 struct btrfsic_dev_state_hashtable *h);
282static void btrfsic_dev_state_hashtable_remove(struct btrfsic_dev_state *ds);
283static struct btrfsic_dev_state *btrfsic_dev_state_hashtable_lookup(
284 struct block_device *bdev,
285 struct btrfsic_dev_state_hashtable *h);
286static struct btrfsic_stack_frame *btrfsic_stack_frame_alloc(void);
287static void btrfsic_stack_frame_free(struct btrfsic_stack_frame *sf);
288static int btrfsic_process_superblock(struct btrfsic_state *state,
289 struct btrfs_fs_devices *fs_devices);
290static int btrfsic_process_metablock(struct btrfsic_state *state,
291 struct btrfsic_block *block,
292 struct btrfsic_block_data_ctx *block_ctx,
293 struct btrfs_header *hdr,
294 int limit_nesting, int force_iodone_flag);
295static int btrfsic_create_link_to_next_block(
296 struct btrfsic_state *state,
297 struct btrfsic_block *block,
298 struct btrfsic_block_data_ctx
299 *block_ctx, u64 next_bytenr,
300 int limit_nesting,
301 struct btrfsic_block_data_ctx *next_block_ctx,
302 struct btrfsic_block **next_blockp,
303 int force_iodone_flag,
304 int *num_copiesp, int *mirror_nump,
305 struct btrfs_disk_key *disk_key,
306 u64 parent_generation);
307static int btrfsic_handle_extent_data(struct btrfsic_state *state,
308 struct btrfsic_block *block,
309 struct btrfsic_block_data_ctx *block_ctx,
310 u32 item_offset, int force_iodone_flag);
311static int btrfsic_map_block(struct btrfsic_state *state, u64 bytenr, u32 len,
312 struct btrfsic_block_data_ctx *block_ctx_out,
313 int mirror_num);
314static int btrfsic_map_superblock(struct btrfsic_state *state, u64 bytenr,
315 u32 len, struct block_device *bdev,
316 struct btrfsic_block_data_ctx *block_ctx_out);
317static void btrfsic_release_block_ctx(struct btrfsic_block_data_ctx *block_ctx);
318static int btrfsic_read_block(struct btrfsic_state *state,
319 struct btrfsic_block_data_ctx *block_ctx);
320static void btrfsic_dump_database(struct btrfsic_state *state);
321static int btrfsic_test_for_metadata(struct btrfsic_state *state,
322 const u8 *data, unsigned int size);
323static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state,
324 u64 dev_bytenr, u8 *mapped_data,
325 unsigned int len, struct bio *bio,
326 int *bio_is_patched,
327 struct buffer_head *bh,
328 int submit_bio_bh_rw);
329static int btrfsic_process_written_superblock(
330 struct btrfsic_state *state,
331 struct btrfsic_block *const block,
332 struct btrfs_super_block *const super_hdr);
333static void btrfsic_bio_end_io(struct bio *bp, int bio_error_status);
334static void btrfsic_bh_end_io(struct buffer_head *bh, int uptodate);
335static int btrfsic_is_block_ref_by_superblock(const struct btrfsic_state *state,
336 const struct btrfsic_block *block,
337 int recursion_level);
338static int btrfsic_check_all_ref_blocks(struct btrfsic_state *state,
339 struct btrfsic_block *const block,
340 int recursion_level);
341static void btrfsic_print_add_link(const struct btrfsic_state *state,
342 const struct btrfsic_block_link *l);
343static void btrfsic_print_rem_link(const struct btrfsic_state *state,
344 const struct btrfsic_block_link *l);
345static char btrfsic_get_block_type(const struct btrfsic_state *state,
346 const struct btrfsic_block *block);
347static void btrfsic_dump_tree(const struct btrfsic_state *state);
348static void btrfsic_dump_tree_sub(const struct btrfsic_state *state,
349 const struct btrfsic_block *block,
350 int indent_level);
351static struct btrfsic_block_link *btrfsic_block_link_lookup_or_add(
352 struct btrfsic_state *state,
353 struct btrfsic_block_data_ctx *next_block_ctx,
354 struct btrfsic_block *next_block,
355 struct btrfsic_block *from_block,
356 u64 parent_generation);
357static struct btrfsic_block *btrfsic_block_lookup_or_add(
358 struct btrfsic_state *state,
359 struct btrfsic_block_data_ctx *block_ctx,
360 const char *additional_string,
361 int is_metadata,
362 int is_iodone,
363 int never_written,
364 int mirror_num,
365 int *was_created);
366static int btrfsic_process_superblock_dev_mirror(
367 struct btrfsic_state *state,
368 struct btrfsic_dev_state *dev_state,
369 struct btrfs_device *device,
370 int superblock_mirror_num,
371 struct btrfsic_dev_state **selected_dev_state,
372 struct btrfs_super_block *selected_super);
373static struct btrfsic_dev_state *btrfsic_dev_state_lookup(
374 struct block_device *bdev);
375static void btrfsic_cmp_log_and_dev_bytenr(struct btrfsic_state *state,
376 u64 bytenr,
377 struct btrfsic_dev_state *dev_state,
378 u64 dev_bytenr, char *data);
379
380static struct mutex btrfsic_mutex;
381static int btrfsic_is_initialized;
382static struct btrfsic_dev_state_hashtable btrfsic_dev_state_hashtable;
383
384
385static void btrfsic_block_init(struct btrfsic_block *b)
386{
387 b->magic_num = BTRFSIC_BLOCK_MAGIC_NUMBER;
388 b->dev_state = NULL;
389 b->dev_bytenr = 0;
390 b->logical_bytenr = 0;
391 b->generation = BTRFSIC_GENERATION_UNKNOWN;
392 b->disk_key.objectid = 0;
393 b->disk_key.type = 0;
394 b->disk_key.offset = 0;
395 b->is_metadata = 0;
396 b->is_superblock = 0;
397 b->is_iodone = 0;
398 b->iodone_w_error = 0;
399 b->never_written = 0;
400 b->mirror_num = 0;
401 b->next_in_same_bio = NULL;
402 b->orig_bio_bh_private = NULL;
403 b->orig_bio_bh_end_io.bio = NULL;
404 INIT_LIST_HEAD(&b->collision_resolving_node);
405 INIT_LIST_HEAD(&b->all_blocks_node);
406 INIT_LIST_HEAD(&b->ref_to_list);
407 INIT_LIST_HEAD(&b->ref_from_list);
408 b->submit_bio_bh_rw = 0;
409 b->flush_gen = 0;
410}
411
412static struct btrfsic_block *btrfsic_block_alloc(void)
413{
414 struct btrfsic_block *b;
415
416 b = kzalloc(sizeof(*b), GFP_NOFS);
417 if (NULL != b)
418 btrfsic_block_init(b);
419
420 return b;
421}
422
423static void btrfsic_block_free(struct btrfsic_block *b)
424{
425 BUG_ON(!(NULL == b || BTRFSIC_BLOCK_MAGIC_NUMBER == b->magic_num));
426 kfree(b);
427}
428
429static void btrfsic_block_link_init(struct btrfsic_block_link *l)
430{
431 l->magic_num = BTRFSIC_BLOCK_LINK_MAGIC_NUMBER;
432 l->ref_cnt = 1;
433 INIT_LIST_HEAD(&l->node_ref_to);
434 INIT_LIST_HEAD(&l->node_ref_from);
435 INIT_LIST_HEAD(&l->collision_resolving_node);
436 l->block_ref_to = NULL;
437 l->block_ref_from = NULL;
438}
439
440static struct btrfsic_block_link *btrfsic_block_link_alloc(void)
441{
442 struct btrfsic_block_link *l;
443
444 l = kzalloc(sizeof(*l), GFP_NOFS);
445 if (NULL != l)
446 btrfsic_block_link_init(l);
447
448 return l;
449}
450
451static void btrfsic_block_link_free(struct btrfsic_block_link *l)
452{
453 BUG_ON(!(NULL == l || BTRFSIC_BLOCK_LINK_MAGIC_NUMBER == l->magic_num));
454 kfree(l);
455}
456
457static void btrfsic_dev_state_init(struct btrfsic_dev_state *ds)
458{
459 ds->magic_num = BTRFSIC_DEV2STATE_MAGIC_NUMBER;
460 ds->bdev = NULL;
461 ds->state = NULL;
462 ds->name[0] = '\0';
463 INIT_LIST_HEAD(&ds->collision_resolving_node);
464 ds->last_flush_gen = 0;
465 btrfsic_block_init(&ds->dummy_block_for_bio_bh_flush);
466 ds->dummy_block_for_bio_bh_flush.is_iodone = 1;
467 ds->dummy_block_for_bio_bh_flush.dev_state = ds;
468}
469
470static struct btrfsic_dev_state *btrfsic_dev_state_alloc(void)
471{
472 struct btrfsic_dev_state *ds;
473
474 ds = kzalloc(sizeof(*ds), GFP_NOFS);
475 if (NULL != ds)
476 btrfsic_dev_state_init(ds);
477
478 return ds;
479}
480
481static void btrfsic_dev_state_free(struct btrfsic_dev_state *ds)
482{
483 BUG_ON(!(NULL == ds ||
484 BTRFSIC_DEV2STATE_MAGIC_NUMBER == ds->magic_num));
485 kfree(ds);
486}
487
488static void btrfsic_block_hashtable_init(struct btrfsic_block_hashtable *h)
489{
490 int i;
491
492 for (i = 0; i < BTRFSIC_BLOCK_HASHTABLE_SIZE; i++)
493 INIT_LIST_HEAD(h->table + i);
494}
495
496static void btrfsic_block_hashtable_add(struct btrfsic_block *b,
497 struct btrfsic_block_hashtable *h)
498{
499 const unsigned int hashval =
500 (((unsigned int)(b->dev_bytenr >> 16)) ^
501 ((unsigned int)((uintptr_t)b->dev_state->bdev))) &
502 (BTRFSIC_BLOCK_HASHTABLE_SIZE - 1);
503
504 list_add(&b->collision_resolving_node, h->table + hashval);
505}
506
507static void btrfsic_block_hashtable_remove(struct btrfsic_block *b)
508{
509 list_del(&b->collision_resolving_node);
510}
511
512static struct btrfsic_block *btrfsic_block_hashtable_lookup(
513 struct block_device *bdev,
514 u64 dev_bytenr,
515 struct btrfsic_block_hashtable *h)
516{
517 const unsigned int hashval =
518 (((unsigned int)(dev_bytenr >> 16)) ^
519 ((unsigned int)((uintptr_t)bdev))) &
520 (BTRFSIC_BLOCK_HASHTABLE_SIZE - 1);
521 struct list_head *elem;
522
523 list_for_each(elem, h->table + hashval) {
524 struct btrfsic_block *const b =
525 list_entry(elem, struct btrfsic_block,
526 collision_resolving_node);
527
528 if (b->dev_state->bdev == bdev && b->dev_bytenr == dev_bytenr)
529 return b;
530 }
531
532 return NULL;
533}
534
535static void btrfsic_block_link_hashtable_init(
536 struct btrfsic_block_link_hashtable *h)
537{
538 int i;
539
540 for (i = 0; i < BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE; i++)
541 INIT_LIST_HEAD(h->table + i);
542}
543
544static void btrfsic_block_link_hashtable_add(
545 struct btrfsic_block_link *l,
546 struct btrfsic_block_link_hashtable *h)
547{
548 const unsigned int hashval =
549 (((unsigned int)(l->block_ref_to->dev_bytenr >> 16)) ^
550 ((unsigned int)(l->block_ref_from->dev_bytenr >> 16)) ^
551 ((unsigned int)((uintptr_t)l->block_ref_to->dev_state->bdev)) ^
552 ((unsigned int)((uintptr_t)l->block_ref_from->dev_state->bdev)))
553 & (BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE - 1);
554
555 BUG_ON(NULL == l->block_ref_to);
556 BUG_ON(NULL == l->block_ref_from);
557 list_add(&l->collision_resolving_node, h->table + hashval);
558}
559
560static void btrfsic_block_link_hashtable_remove(struct btrfsic_block_link *l)
561{
562 list_del(&l->collision_resolving_node);
563}
564
565static struct btrfsic_block_link *btrfsic_block_link_hashtable_lookup(
566 struct block_device *bdev_ref_to,
567 u64 dev_bytenr_ref_to,
568 struct block_device *bdev_ref_from,
569 u64 dev_bytenr_ref_from,
570 struct btrfsic_block_link_hashtable *h)
571{
572 const unsigned int hashval =
573 (((unsigned int)(dev_bytenr_ref_to >> 16)) ^
574 ((unsigned int)(dev_bytenr_ref_from >> 16)) ^
575 ((unsigned int)((uintptr_t)bdev_ref_to)) ^
576 ((unsigned int)((uintptr_t)bdev_ref_from))) &
577 (BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE - 1);
578 struct list_head *elem;
579
580 list_for_each(elem, h->table + hashval) {
581 struct btrfsic_block_link *const l =
582 list_entry(elem, struct btrfsic_block_link,
583 collision_resolving_node);
584
585 BUG_ON(NULL == l->block_ref_to);
586 BUG_ON(NULL == l->block_ref_from);
587 if (l->block_ref_to->dev_state->bdev == bdev_ref_to &&
588 l->block_ref_to->dev_bytenr == dev_bytenr_ref_to &&
589 l->block_ref_from->dev_state->bdev == bdev_ref_from &&
590 l->block_ref_from->dev_bytenr == dev_bytenr_ref_from)
591 return l;
592 }
593
594 return NULL;
595}
596
597static void btrfsic_dev_state_hashtable_init(
598 struct btrfsic_dev_state_hashtable *h)
599{
600 int i;
601
602 for (i = 0; i < BTRFSIC_DEV2STATE_HASHTABLE_SIZE; i++)
603 INIT_LIST_HEAD(h->table + i);
604}
605
606static void btrfsic_dev_state_hashtable_add(
607 struct btrfsic_dev_state *ds,
608 struct btrfsic_dev_state_hashtable *h)
609{
610 const unsigned int hashval =
611 (((unsigned int)((uintptr_t)ds->bdev)) &
612 (BTRFSIC_DEV2STATE_HASHTABLE_SIZE - 1));
613
614 list_add(&ds->collision_resolving_node, h->table + hashval);
615}
616
617static void btrfsic_dev_state_hashtable_remove(struct btrfsic_dev_state *ds)
618{
619 list_del(&ds->collision_resolving_node);
620}
621
622static struct btrfsic_dev_state *btrfsic_dev_state_hashtable_lookup(
623 struct block_device *bdev,
624 struct btrfsic_dev_state_hashtable *h)
625{
626 const unsigned int hashval =
627 (((unsigned int)((uintptr_t)bdev)) &
628 (BTRFSIC_DEV2STATE_HASHTABLE_SIZE - 1));
629 struct list_head *elem;
630
631 list_for_each(elem, h->table + hashval) {
632 struct btrfsic_dev_state *const ds =
633 list_entry(elem, struct btrfsic_dev_state,
634 collision_resolving_node);
635
636 if (ds->bdev == bdev)
637 return ds;
638 }
639
640 return NULL;
641}
642
643static int btrfsic_process_superblock(struct btrfsic_state *state,
644 struct btrfs_fs_devices *fs_devices)
645{
e77266e4 646 int ret = 0;
5db02760
SB
647 struct btrfs_super_block *selected_super;
648 struct list_head *dev_head = &fs_devices->devices;
649 struct btrfs_device *device;
650 struct btrfsic_dev_state *selected_dev_state = NULL;
651 int pass;
652
653 BUG_ON(NULL == state);
654 selected_super = kmalloc(sizeof(*selected_super), GFP_NOFS);
655 if (NULL == selected_super) {
656 printk(KERN_INFO "btrfsic: error, kmalloc failed!\n");
657 return -1;
658 }
659
660 list_for_each_entry(device, dev_head, dev_list) {
661 int i;
662 struct btrfsic_dev_state *dev_state;
663
664 if (!device->bdev || !device->name)
665 continue;
666
667 dev_state = btrfsic_dev_state_lookup(device->bdev);
668 BUG_ON(NULL == dev_state);
669 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
670 ret = btrfsic_process_superblock_dev_mirror(
671 state, dev_state, device, i,
672 &selected_dev_state, selected_super);
673 if (0 != ret && 0 == i) {
674 kfree(selected_super);
675 return ret;
676 }
677 }
678 }
679
680 if (NULL == state->latest_superblock) {
681 printk(KERN_INFO "btrfsic: no superblock found!\n");
682 kfree(selected_super);
683 return -1;
684 }
685
686 state->csum_size = btrfs_super_csum_size(selected_super);
687
688 for (pass = 0; pass < 3; pass++) {
689 int num_copies;
690 int mirror_num;
691 u64 next_bytenr;
692
693 switch (pass) {
694 case 0:
695 next_bytenr = btrfs_super_root(selected_super);
696 if (state->print_mask &
697 BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION)
698 printk(KERN_INFO "root@%llu\n",
699 (unsigned long long)next_bytenr);
700 break;
701 case 1:
702 next_bytenr = btrfs_super_chunk_root(selected_super);
703 if (state->print_mask &
704 BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION)
705 printk(KERN_INFO "chunk@%llu\n",
706 (unsigned long long)next_bytenr);
707 break;
708 case 2:
709 next_bytenr = btrfs_super_log_root(selected_super);
710 if (0 == next_bytenr)
711 continue;
712 if (state->print_mask &
713 BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION)
714 printk(KERN_INFO "log@%llu\n",
715 (unsigned long long)next_bytenr);
716 break;
717 }
718
719 num_copies =
720 btrfs_num_copies(&state->root->fs_info->mapping_tree,
721 next_bytenr, PAGE_SIZE);
722 if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
723 printk(KERN_INFO "num_copies(log_bytenr=%llu) = %d\n",
724 (unsigned long long)next_bytenr, num_copies);
725
726 for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) {
727 struct btrfsic_block *next_block;
728 struct btrfsic_block_data_ctx tmp_next_block_ctx;
729 struct btrfsic_block_link *l;
730 struct btrfs_header *hdr;
731
732 ret = btrfsic_map_block(state, next_bytenr, PAGE_SIZE,
733 &tmp_next_block_ctx,
734 mirror_num);
735 if (ret) {
736 printk(KERN_INFO "btrfsic:"
737 " btrfsic_map_block(root @%llu,"
738 " mirror %d) failed!\n",
739 (unsigned long long)next_bytenr,
740 mirror_num);
741 kfree(selected_super);
742 return -1;
743 }
744
745 next_block = btrfsic_block_hashtable_lookup(
746 tmp_next_block_ctx.dev->bdev,
747 tmp_next_block_ctx.dev_bytenr,
748 &state->block_hashtable);
749 BUG_ON(NULL == next_block);
750
751 l = btrfsic_block_link_hashtable_lookup(
752 tmp_next_block_ctx.dev->bdev,
753 tmp_next_block_ctx.dev_bytenr,
754 state->latest_superblock->dev_state->
755 bdev,
756 state->latest_superblock->dev_bytenr,
757 &state->block_link_hashtable);
758 BUG_ON(NULL == l);
759
760 ret = btrfsic_read_block(state, &tmp_next_block_ctx);
761 if (ret < (int)BTRFSIC_BLOCK_SIZE) {
762 printk(KERN_INFO
763 "btrfsic: read @logical %llu failed!\n",
764 (unsigned long long)
765 tmp_next_block_ctx.start);
766 btrfsic_release_block_ctx(&tmp_next_block_ctx);
767 kfree(selected_super);
768 return -1;
769 }
770
771 hdr = (struct btrfs_header *)tmp_next_block_ctx.data;
772 ret = btrfsic_process_metablock(state,
773 next_block,
774 &tmp_next_block_ctx,
775 hdr,
776 BTRFS_MAX_LEVEL + 3, 1);
777 btrfsic_release_block_ctx(&tmp_next_block_ctx);
778 }
779 }
780
781 kfree(selected_super);
782 return ret;
783}
784
785static int btrfsic_process_superblock_dev_mirror(
786 struct btrfsic_state *state,
787 struct btrfsic_dev_state *dev_state,
788 struct btrfs_device *device,
789 int superblock_mirror_num,
790 struct btrfsic_dev_state **selected_dev_state,
791 struct btrfs_super_block *selected_super)
792{
793 struct btrfs_super_block *super_tmp;
794 u64 dev_bytenr;
795 struct buffer_head *bh;
796 struct btrfsic_block *superblock_tmp;
797 int pass;
798 struct block_device *const superblock_bdev = device->bdev;
799
800 /* super block bytenr is always the unmapped device bytenr */
801 dev_bytenr = btrfs_sb_offset(superblock_mirror_num);
802 bh = __bread(superblock_bdev, dev_bytenr / 4096, 4096);
803 if (NULL == bh)
804 return -1;
805 super_tmp = (struct btrfs_super_block *)
806 (bh->b_data + (dev_bytenr & 4095));
807
808 if (btrfs_super_bytenr(super_tmp) != dev_bytenr ||
809 strncmp((char *)(&(super_tmp->magic)), BTRFS_MAGIC,
810 sizeof(super_tmp->magic)) ||
811 memcmp(device->uuid, super_tmp->dev_item.uuid, BTRFS_UUID_SIZE)) {
812 brelse(bh);
813 return 0;
814 }
815
816 superblock_tmp =
817 btrfsic_block_hashtable_lookup(superblock_bdev,
818 dev_bytenr,
819 &state->block_hashtable);
820 if (NULL == superblock_tmp) {
821 superblock_tmp = btrfsic_block_alloc();
822 if (NULL == superblock_tmp) {
823 printk(KERN_INFO "btrfsic: error, kmalloc failed!\n");
824 brelse(bh);
825 return -1;
826 }
827 /* for superblock, only the dev_bytenr makes sense */
828 superblock_tmp->dev_bytenr = dev_bytenr;
829 superblock_tmp->dev_state = dev_state;
830 superblock_tmp->logical_bytenr = dev_bytenr;
831 superblock_tmp->generation = btrfs_super_generation(super_tmp);
832 superblock_tmp->is_metadata = 1;
833 superblock_tmp->is_superblock = 1;
834 superblock_tmp->is_iodone = 1;
835 superblock_tmp->never_written = 0;
836 superblock_tmp->mirror_num = 1 + superblock_mirror_num;
837 if (state->print_mask & BTRFSIC_PRINT_MASK_SUPERBLOCK_WRITE)
838 printk(KERN_INFO "New initial S-block (bdev %p, %s)"
839 " @%llu (%s/%llu/%d)\n",
840 superblock_bdev, device->name,
841 (unsigned long long)dev_bytenr,
842 dev_state->name,
843 (unsigned long long)dev_bytenr,
844 superblock_mirror_num);
845 list_add(&superblock_tmp->all_blocks_node,
846 &state->all_blocks_list);
847 btrfsic_block_hashtable_add(superblock_tmp,
848 &state->block_hashtable);
849 }
850
851 /* select the one with the highest generation field */
852 if (btrfs_super_generation(super_tmp) >
853 state->max_superblock_generation ||
854 0 == state->max_superblock_generation) {
855 memcpy(selected_super, super_tmp, sizeof(*selected_super));
856 *selected_dev_state = dev_state;
857 state->max_superblock_generation =
858 btrfs_super_generation(super_tmp);
859 state->latest_superblock = superblock_tmp;
860 }
861
862 for (pass = 0; pass < 3; pass++) {
863 u64 next_bytenr;
864 int num_copies;
865 int mirror_num;
866 const char *additional_string = NULL;
867 struct btrfs_disk_key tmp_disk_key;
868
869 tmp_disk_key.type = BTRFS_ROOT_ITEM_KEY;
870 tmp_disk_key.offset = 0;
871 switch (pass) {
872 case 0:
873 tmp_disk_key.objectid =
874 cpu_to_le64(BTRFS_ROOT_TREE_OBJECTID);
875 additional_string = "initial root ";
876 next_bytenr = btrfs_super_root(super_tmp);
877 break;
878 case 1:
879 tmp_disk_key.objectid =
880 cpu_to_le64(BTRFS_CHUNK_TREE_OBJECTID);
881 additional_string = "initial chunk ";
882 next_bytenr = btrfs_super_chunk_root(super_tmp);
883 break;
884 case 2:
885 tmp_disk_key.objectid =
886 cpu_to_le64(BTRFS_TREE_LOG_OBJECTID);
887 additional_string = "initial log ";
888 next_bytenr = btrfs_super_log_root(super_tmp);
889 if (0 == next_bytenr)
890 continue;
891 break;
892 }
893
894 num_copies =
895 btrfs_num_copies(&state->root->fs_info->mapping_tree,
896 next_bytenr, PAGE_SIZE);
897 if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
898 printk(KERN_INFO "num_copies(log_bytenr=%llu) = %d\n",
899 (unsigned long long)next_bytenr, num_copies);
900 for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) {
901 struct btrfsic_block *next_block;
902 struct btrfsic_block_data_ctx tmp_next_block_ctx;
903 struct btrfsic_block_link *l;
904
905 if (btrfsic_map_block(state, next_bytenr, PAGE_SIZE,
906 &tmp_next_block_ctx,
907 mirror_num)) {
908 printk(KERN_INFO "btrfsic: btrfsic_map_block("
909 "bytenr @%llu, mirror %d) failed!\n",
910 (unsigned long long)next_bytenr,
911 mirror_num);
912 brelse(bh);
913 return -1;
914 }
915
916 next_block = btrfsic_block_lookup_or_add(
917 state, &tmp_next_block_ctx,
918 additional_string, 1, 1, 0,
919 mirror_num, NULL);
920 if (NULL == next_block) {
921 btrfsic_release_block_ctx(&tmp_next_block_ctx);
922 brelse(bh);
923 return -1;
924 }
925
926 next_block->disk_key = tmp_disk_key;
927 next_block->generation = BTRFSIC_GENERATION_UNKNOWN;
928 l = btrfsic_block_link_lookup_or_add(
929 state, &tmp_next_block_ctx,
930 next_block, superblock_tmp,
931 BTRFSIC_GENERATION_UNKNOWN);
932 btrfsic_release_block_ctx(&tmp_next_block_ctx);
933 if (NULL == l) {
934 brelse(bh);
935 return -1;
936 }
937 }
938 }
939 if (state->print_mask & BTRFSIC_PRINT_MASK_INITIAL_ALL_TREES)
940 btrfsic_dump_tree_sub(state, superblock_tmp, 0);
941
942 brelse(bh);
943 return 0;
944}
945
946static struct btrfsic_stack_frame *btrfsic_stack_frame_alloc(void)
947{
948 struct btrfsic_stack_frame *sf;
949
950 sf = kzalloc(sizeof(*sf), GFP_NOFS);
951 if (NULL == sf)
952 printk(KERN_INFO "btrfsic: alloc memory failed!\n");
953 else
954 sf->magic = BTRFSIC_BLOCK_STACK_FRAME_MAGIC_NUMBER;
955 return sf;
956}
957
958static void btrfsic_stack_frame_free(struct btrfsic_stack_frame *sf)
959{
960 BUG_ON(!(NULL == sf ||
961 BTRFSIC_BLOCK_STACK_FRAME_MAGIC_NUMBER == sf->magic));
962 kfree(sf);
963}
964
965static int btrfsic_process_metablock(
966 struct btrfsic_state *state,
967 struct btrfsic_block *const first_block,
968 struct btrfsic_block_data_ctx *const first_block_ctx,
969 struct btrfs_header *const first_hdr,
970 int first_limit_nesting, int force_iodone_flag)
971{
972 struct btrfsic_stack_frame initial_stack_frame = { 0 };
973 struct btrfsic_stack_frame *sf;
974 struct btrfsic_stack_frame *next_stack;
975
976 sf = &initial_stack_frame;
977 sf->error = 0;
978 sf->i = -1;
979 sf->limit_nesting = first_limit_nesting;
980 sf->block = first_block;
981 sf->block_ctx = first_block_ctx;
982 sf->next_block = NULL;
983 sf->hdr = first_hdr;
984 sf->prev = NULL;
985
986continue_with_new_stack_frame:
987 sf->block->generation = le64_to_cpu(sf->hdr->generation);
988 if (0 == sf->hdr->level) {
989 struct btrfs_leaf *const leafhdr =
990 (struct btrfs_leaf *)sf->hdr;
991
992 if (-1 == sf->i) {
993 sf->nr = le32_to_cpu(leafhdr->header.nritems);
994
995 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
996 printk(KERN_INFO
997 "leaf %llu items %d generation %llu"
998 " owner %llu\n",
999 (unsigned long long)
1000 sf->block_ctx->start,
1001 sf->nr,
1002 (unsigned long long)
1003 le64_to_cpu(leafhdr->header.generation),
1004 (unsigned long long)
1005 le64_to_cpu(leafhdr->header.owner));
1006 }
1007
1008continue_with_current_leaf_stack_frame:
1009 if (0 == sf->num_copies || sf->mirror_num > sf->num_copies) {
1010 sf->i++;
1011 sf->num_copies = 0;
1012 }
1013
1014 if (sf->i < sf->nr) {
1015 struct btrfs_item *disk_item = leafhdr->items + sf->i;
1016 struct btrfs_disk_key *disk_key = &disk_item->key;
1017 u8 type;
1018 const u32 item_offset = le32_to_cpu(disk_item->offset);
1019
1020 type = disk_key->type;
1021
1022 if (BTRFS_ROOT_ITEM_KEY == type) {
1023 const struct btrfs_root_item *const root_item =
1024 (struct btrfs_root_item *)
1025 (sf->block_ctx->data +
1026 offsetof(struct btrfs_leaf, items) +
1027 item_offset);
1028 const u64 next_bytenr =
1029 le64_to_cpu(root_item->bytenr);
1030
1031 sf->error =
1032 btrfsic_create_link_to_next_block(
1033 state,
1034 sf->block,
1035 sf->block_ctx,
1036 next_bytenr,
1037 sf->limit_nesting,
1038 &sf->next_block_ctx,
1039 &sf->next_block,
1040 force_iodone_flag,
1041 &sf->num_copies,
1042 &sf->mirror_num,
1043 disk_key,
1044 le64_to_cpu(root_item->
1045 generation));
1046 if (sf->error)
1047 goto one_stack_frame_backwards;
1048
1049 if (NULL != sf->next_block) {
1050 struct btrfs_header *const next_hdr =
1051 (struct btrfs_header *)
1052 sf->next_block_ctx.data;
1053
1054 next_stack =
1055 btrfsic_stack_frame_alloc();
1056 if (NULL == next_stack) {
1057 btrfsic_release_block_ctx(
1058 &sf->
1059 next_block_ctx);
1060 goto one_stack_frame_backwards;
1061 }
1062
1063 next_stack->i = -1;
1064 next_stack->block = sf->next_block;
1065 next_stack->block_ctx =
1066 &sf->next_block_ctx;
1067 next_stack->next_block = NULL;
1068 next_stack->hdr = next_hdr;
1069 next_stack->limit_nesting =
1070 sf->limit_nesting - 1;
1071 next_stack->prev = sf;
1072 sf = next_stack;
1073 goto continue_with_new_stack_frame;
1074 }
1075 } else if (BTRFS_EXTENT_DATA_KEY == type &&
1076 state->include_extent_data) {
1077 sf->error = btrfsic_handle_extent_data(
1078 state,
1079 sf->block,
1080 sf->block_ctx,
1081 item_offset,
1082 force_iodone_flag);
1083 if (sf->error)
1084 goto one_stack_frame_backwards;
1085 }
1086
1087 goto continue_with_current_leaf_stack_frame;
1088 }
1089 } else {
1090 struct btrfs_node *const nodehdr = (struct btrfs_node *)sf->hdr;
1091
1092 if (-1 == sf->i) {
1093 sf->nr = le32_to_cpu(nodehdr->header.nritems);
1094
1095 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
1096 printk(KERN_INFO "node %llu level %d items %d"
1097 " generation %llu owner %llu\n",
1098 (unsigned long long)
1099 sf->block_ctx->start,
1100 nodehdr->header.level, sf->nr,
1101 (unsigned long long)
1102 le64_to_cpu(nodehdr->header.generation),
1103 (unsigned long long)
1104 le64_to_cpu(nodehdr->header.owner));
1105 }
1106
1107continue_with_current_node_stack_frame:
1108 if (0 == sf->num_copies || sf->mirror_num > sf->num_copies) {
1109 sf->i++;
1110 sf->num_copies = 0;
1111 }
1112
1113 if (sf->i < sf->nr) {
1114 struct btrfs_key_ptr *disk_key_ptr =
1115 nodehdr->ptrs + sf->i;
1116 const u64 next_bytenr =
1117 le64_to_cpu(disk_key_ptr->blockptr);
1118
1119 sf->error = btrfsic_create_link_to_next_block(
1120 state,
1121 sf->block,
1122 sf->block_ctx,
1123 next_bytenr,
1124 sf->limit_nesting,
1125 &sf->next_block_ctx,
1126 &sf->next_block,
1127 force_iodone_flag,
1128 &sf->num_copies,
1129 &sf->mirror_num,
1130 &disk_key_ptr->key,
1131 le64_to_cpu(disk_key_ptr->generation));
1132 if (sf->error)
1133 goto one_stack_frame_backwards;
1134
1135 if (NULL != sf->next_block) {
1136 struct btrfs_header *const next_hdr =
1137 (struct btrfs_header *)
1138 sf->next_block_ctx.data;
1139
1140 next_stack = btrfsic_stack_frame_alloc();
1141 if (NULL == next_stack)
1142 goto one_stack_frame_backwards;
1143
1144 next_stack->i = -1;
1145 next_stack->block = sf->next_block;
1146 next_stack->block_ctx = &sf->next_block_ctx;
1147 next_stack->next_block = NULL;
1148 next_stack->hdr = next_hdr;
1149 next_stack->limit_nesting =
1150 sf->limit_nesting - 1;
1151 next_stack->prev = sf;
1152 sf = next_stack;
1153 goto continue_with_new_stack_frame;
1154 }
1155
1156 goto continue_with_current_node_stack_frame;
1157 }
1158 }
1159
1160one_stack_frame_backwards:
1161 if (NULL != sf->prev) {
1162 struct btrfsic_stack_frame *const prev = sf->prev;
1163
1164 /* the one for the initial block is freed in the caller */
1165 btrfsic_release_block_ctx(sf->block_ctx);
1166
1167 if (sf->error) {
1168 prev->error = sf->error;
1169 btrfsic_stack_frame_free(sf);
1170 sf = prev;
1171 goto one_stack_frame_backwards;
1172 }
1173
1174 btrfsic_stack_frame_free(sf);
1175 sf = prev;
1176 goto continue_with_new_stack_frame;
1177 } else {
1178 BUG_ON(&initial_stack_frame != sf);
1179 }
1180
1181 return sf->error;
1182}
1183
1184static int btrfsic_create_link_to_next_block(
1185 struct btrfsic_state *state,
1186 struct btrfsic_block *block,
1187 struct btrfsic_block_data_ctx *block_ctx,
1188 u64 next_bytenr,
1189 int limit_nesting,
1190 struct btrfsic_block_data_ctx *next_block_ctx,
1191 struct btrfsic_block **next_blockp,
1192 int force_iodone_flag,
1193 int *num_copiesp, int *mirror_nump,
1194 struct btrfs_disk_key *disk_key,
1195 u64 parent_generation)
1196{
1197 struct btrfsic_block *next_block = NULL;
1198 int ret;
1199 struct btrfsic_block_link *l;
1200 int did_alloc_block_link;
1201 int block_was_created;
1202
1203 *next_blockp = NULL;
1204 if (0 == *num_copiesp) {
1205 *num_copiesp =
1206 btrfs_num_copies(&state->root->fs_info->mapping_tree,
1207 next_bytenr, PAGE_SIZE);
1208 if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
1209 printk(KERN_INFO "num_copies(log_bytenr=%llu) = %d\n",
1210 (unsigned long long)next_bytenr, *num_copiesp);
1211 *mirror_nump = 1;
1212 }
1213
1214 if (*mirror_nump > *num_copiesp)
1215 return 0;
1216
1217 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
1218 printk(KERN_INFO
1219 "btrfsic_create_link_to_next_block(mirror_num=%d)\n",
1220 *mirror_nump);
1221 ret = btrfsic_map_block(state, next_bytenr,
1222 BTRFSIC_BLOCK_SIZE,
1223 next_block_ctx, *mirror_nump);
1224 if (ret) {
1225 printk(KERN_INFO
1226 "btrfsic: btrfsic_map_block(@%llu, mirror=%d) failed!\n",
1227 (unsigned long long)next_bytenr, *mirror_nump);
1228 btrfsic_release_block_ctx(next_block_ctx);
1229 *next_blockp = NULL;
1230 return -1;
1231 }
1232
1233 next_block = btrfsic_block_lookup_or_add(state,
1234 next_block_ctx, "referenced ",
1235 1, force_iodone_flag,
1236 !force_iodone_flag,
1237 *mirror_nump,
1238 &block_was_created);
1239 if (NULL == next_block) {
1240 btrfsic_release_block_ctx(next_block_ctx);
1241 *next_blockp = NULL;
1242 return -1;
1243 }
1244 if (block_was_created) {
1245 l = NULL;
1246 next_block->generation = BTRFSIC_GENERATION_UNKNOWN;
1247 } else {
1248 if (next_block->logical_bytenr != next_bytenr &&
1249 !(!next_block->is_metadata &&
1250 0 == next_block->logical_bytenr)) {
1251 printk(KERN_INFO
1252 "Referenced block @%llu (%s/%llu/%d)"
1253 " found in hash table, %c,"
1254 " bytenr mismatch (!= stored %llu).\n",
1255 (unsigned long long)next_bytenr,
1256 next_block_ctx->dev->name,
1257 (unsigned long long)next_block_ctx->dev_bytenr,
1258 *mirror_nump,
1259 btrfsic_get_block_type(state, next_block),
1260 (unsigned long long)next_block->logical_bytenr);
1261 } else if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
1262 printk(KERN_INFO
1263 "Referenced block @%llu (%s/%llu/%d)"
1264 " found in hash table, %c.\n",
1265 (unsigned long long)next_bytenr,
1266 next_block_ctx->dev->name,
1267 (unsigned long long)next_block_ctx->dev_bytenr,
1268 *mirror_nump,
1269 btrfsic_get_block_type(state, next_block));
1270 next_block->logical_bytenr = next_bytenr;
1271
1272 next_block->mirror_num = *mirror_nump;
1273 l = btrfsic_block_link_hashtable_lookup(
1274 next_block_ctx->dev->bdev,
1275 next_block_ctx->dev_bytenr,
1276 block_ctx->dev->bdev,
1277 block_ctx->dev_bytenr,
1278 &state->block_link_hashtable);
1279 }
1280
1281 next_block->disk_key = *disk_key;
1282 if (NULL == l) {
1283 l = btrfsic_block_link_alloc();
1284 if (NULL == l) {
1285 printk(KERN_INFO "btrfsic: error, kmalloc failed!\n");
1286 btrfsic_release_block_ctx(next_block_ctx);
1287 *next_blockp = NULL;
1288 return -1;
1289 }
1290
1291 did_alloc_block_link = 1;
1292 l->block_ref_to = next_block;
1293 l->block_ref_from = block;
1294 l->ref_cnt = 1;
1295 l->parent_generation = parent_generation;
1296
1297 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
1298 btrfsic_print_add_link(state, l);
1299
1300 list_add(&l->node_ref_to, &block->ref_to_list);
1301 list_add(&l->node_ref_from, &next_block->ref_from_list);
1302
1303 btrfsic_block_link_hashtable_add(l,
1304 &state->block_link_hashtable);
1305 } else {
1306 did_alloc_block_link = 0;
1307 if (0 == limit_nesting) {
1308 l->ref_cnt++;
1309 l->parent_generation = parent_generation;
1310 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
1311 btrfsic_print_add_link(state, l);
1312 }
1313 }
1314
1315 if (limit_nesting > 0 && did_alloc_block_link) {
1316 ret = btrfsic_read_block(state, next_block_ctx);
1317 if (ret < (int)BTRFSIC_BLOCK_SIZE) {
1318 printk(KERN_INFO
1319 "btrfsic: read block @logical %llu failed!\n",
1320 (unsigned long long)next_bytenr);
1321 btrfsic_release_block_ctx(next_block_ctx);
1322 *next_blockp = NULL;
1323 return -1;
1324 }
1325
1326 *next_blockp = next_block;
1327 } else {
1328 *next_blockp = NULL;
1329 }
1330 (*mirror_nump)++;
1331
1332 return 0;
1333}
1334
1335static int btrfsic_handle_extent_data(
1336 struct btrfsic_state *state,
1337 struct btrfsic_block *block,
1338 struct btrfsic_block_data_ctx *block_ctx,
1339 u32 item_offset, int force_iodone_flag)
1340{
1341 int ret;
1342 struct btrfs_file_extent_item *file_extent_item =
1343 (struct btrfs_file_extent_item *)(block_ctx->data +
1344 offsetof(struct btrfs_leaf,
1345 items) + item_offset);
1346 u64 next_bytenr =
1347 le64_to_cpu(file_extent_item->disk_bytenr) +
1348 le64_to_cpu(file_extent_item->offset);
1349 u64 num_bytes = le64_to_cpu(file_extent_item->num_bytes);
1350 u64 generation = le64_to_cpu(file_extent_item->generation);
1351 struct btrfsic_block_link *l;
1352
1353 if (state->print_mask & BTRFSIC_PRINT_MASK_VERY_VERBOSE)
1354 printk(KERN_INFO "extent_data: type %u, disk_bytenr = %llu,"
1355 " offset = %llu, num_bytes = %llu\n",
1356 file_extent_item->type,
1357 (unsigned long long)
1358 le64_to_cpu(file_extent_item->disk_bytenr),
1359 (unsigned long long)
1360 le64_to_cpu(file_extent_item->offset),
1361 (unsigned long long)
1362 le64_to_cpu(file_extent_item->num_bytes));
1363 if (BTRFS_FILE_EXTENT_REG != file_extent_item->type ||
1364 ((u64)0) == le64_to_cpu(file_extent_item->disk_bytenr))
1365 return 0;
1366 while (num_bytes > 0) {
1367 u32 chunk_len;
1368 int num_copies;
1369 int mirror_num;
1370
1371 if (num_bytes > BTRFSIC_BLOCK_SIZE)
1372 chunk_len = BTRFSIC_BLOCK_SIZE;
1373 else
1374 chunk_len = num_bytes;
1375
1376 num_copies =
1377 btrfs_num_copies(&state->root->fs_info->mapping_tree,
1378 next_bytenr, PAGE_SIZE);
1379 if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
1380 printk(KERN_INFO "num_copies(log_bytenr=%llu) = %d\n",
1381 (unsigned long long)next_bytenr, num_copies);
1382 for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) {
1383 struct btrfsic_block_data_ctx next_block_ctx;
1384 struct btrfsic_block *next_block;
1385 int block_was_created;
1386
1387 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
1388 printk(KERN_INFO "btrfsic_handle_extent_data("
1389 "mirror_num=%d)\n", mirror_num);
1390 if (state->print_mask & BTRFSIC_PRINT_MASK_VERY_VERBOSE)
1391 printk(KERN_INFO
1392 "\tdisk_bytenr = %llu, num_bytes %u\n",
1393 (unsigned long long)next_bytenr,
1394 chunk_len);
1395 ret = btrfsic_map_block(state, next_bytenr,
1396 chunk_len, &next_block_ctx,
1397 mirror_num);
1398 if (ret) {
1399 printk(KERN_INFO
1400 "btrfsic: btrfsic_map_block(@%llu,"
1401 " mirror=%d) failed!\n",
1402 (unsigned long long)next_bytenr,
1403 mirror_num);
1404 return -1;
1405 }
1406
1407 next_block = btrfsic_block_lookup_or_add(
1408 state,
1409 &next_block_ctx,
1410 "referenced ",
1411 0,
1412 force_iodone_flag,
1413 !force_iodone_flag,
1414 mirror_num,
1415 &block_was_created);
1416 if (NULL == next_block) {
1417 printk(KERN_INFO
1418 "btrfsic: error, kmalloc failed!\n");
1419 btrfsic_release_block_ctx(&next_block_ctx);
1420 return -1;
1421 }
1422 if (!block_was_created) {
1423 if (next_block->logical_bytenr != next_bytenr &&
1424 !(!next_block->is_metadata &&
1425 0 == next_block->logical_bytenr)) {
1426 printk(KERN_INFO
1427 "Referenced block"
1428 " @%llu (%s/%llu/%d)"
1429 " found in hash table, D,"
1430 " bytenr mismatch"
1431 " (!= stored %llu).\n",
1432 (unsigned long long)next_bytenr,
1433 next_block_ctx.dev->name,
1434 (unsigned long long)
1435 next_block_ctx.dev_bytenr,
1436 mirror_num,
1437 (unsigned long long)
1438 next_block->logical_bytenr);
1439 }
1440 next_block->logical_bytenr = next_bytenr;
1441 next_block->mirror_num = mirror_num;
1442 }
1443
1444 l = btrfsic_block_link_lookup_or_add(state,
1445 &next_block_ctx,
1446 next_block, block,
1447 generation);
1448 btrfsic_release_block_ctx(&next_block_ctx);
1449 if (NULL == l)
1450 return -1;
1451 }
1452
1453 next_bytenr += chunk_len;
1454 num_bytes -= chunk_len;
1455 }
1456
1457 return 0;
1458}
1459
1460static int btrfsic_map_block(struct btrfsic_state *state, u64 bytenr, u32 len,
1461 struct btrfsic_block_data_ctx *block_ctx_out,
1462 int mirror_num)
1463{
1464 int ret;
1465 u64 length;
1466 struct btrfs_bio *multi = NULL;
1467 struct btrfs_device *device;
1468
1469 length = len;
1470 ret = btrfs_map_block(&state->root->fs_info->mapping_tree, READ,
1471 bytenr, &length, &multi, mirror_num);
1472
1473 device = multi->stripes[0].dev;
1474 block_ctx_out->dev = btrfsic_dev_state_lookup(device->bdev);
1475 block_ctx_out->dev_bytenr = multi->stripes[0].physical;
1476 block_ctx_out->start = bytenr;
1477 block_ctx_out->len = len;
1478 block_ctx_out->data = NULL;
1479 block_ctx_out->bh = NULL;
1480
1481 if (0 == ret)
1482 kfree(multi);
1483 if (NULL == block_ctx_out->dev) {
1484 ret = -ENXIO;
1485 printk(KERN_INFO "btrfsic: error, cannot lookup dev (#1)!\n");
1486 }
1487
1488 return ret;
1489}
1490
1491static int btrfsic_map_superblock(struct btrfsic_state *state, u64 bytenr,
1492 u32 len, struct block_device *bdev,
1493 struct btrfsic_block_data_ctx *block_ctx_out)
1494{
1495 block_ctx_out->dev = btrfsic_dev_state_lookup(bdev);
1496 block_ctx_out->dev_bytenr = bytenr;
1497 block_ctx_out->start = bytenr;
1498 block_ctx_out->len = len;
1499 block_ctx_out->data = NULL;
1500 block_ctx_out->bh = NULL;
1501 if (NULL != block_ctx_out->dev) {
1502 return 0;
1503 } else {
1504 printk(KERN_INFO "btrfsic: error, cannot lookup dev (#2)!\n");
1505 return -ENXIO;
1506 }
1507}
1508
1509static void btrfsic_release_block_ctx(struct btrfsic_block_data_ctx *block_ctx)
1510{
1511 if (NULL != block_ctx->bh) {
1512 brelse(block_ctx->bh);
1513 block_ctx->bh = NULL;
1514 }
1515}
1516
1517static int btrfsic_read_block(struct btrfsic_state *state,
1518 struct btrfsic_block_data_ctx *block_ctx)
1519{
1520 block_ctx->bh = NULL;
1521 if (block_ctx->dev_bytenr & 4095) {
1522 printk(KERN_INFO
1523 "btrfsic: read_block() with unaligned bytenr %llu\n",
1524 (unsigned long long)block_ctx->dev_bytenr);
1525 return -1;
1526 }
1527 if (block_ctx->len > 4096) {
1528 printk(KERN_INFO
1529 "btrfsic: read_block() with too huge size %d\n",
1530 block_ctx->len);
1531 return -1;
1532 }
1533
1534 block_ctx->bh = __bread(block_ctx->dev->bdev,
1535 block_ctx->dev_bytenr >> 12, 4096);
1536 if (NULL == block_ctx->bh)
1537 return -1;
1538 block_ctx->data = block_ctx->bh->b_data;
1539
1540 return block_ctx->len;
1541}
1542
1543static void btrfsic_dump_database(struct btrfsic_state *state)
1544{
1545 struct list_head *elem_all;
1546
1547 BUG_ON(NULL == state);
1548
1549 printk(KERN_INFO "all_blocks_list:\n");
1550 list_for_each(elem_all, &state->all_blocks_list) {
1551 const struct btrfsic_block *const b_all =
1552 list_entry(elem_all, struct btrfsic_block,
1553 all_blocks_node);
1554 struct list_head *elem_ref_to;
1555 struct list_head *elem_ref_from;
1556
1557 printk(KERN_INFO "%c-block @%llu (%s/%llu/%d)\n",
1558 btrfsic_get_block_type(state, b_all),
1559 (unsigned long long)b_all->logical_bytenr,
1560 b_all->dev_state->name,
1561 (unsigned long long)b_all->dev_bytenr,
1562 b_all->mirror_num);
1563
1564 list_for_each(elem_ref_to, &b_all->ref_to_list) {
1565 const struct btrfsic_block_link *const l =
1566 list_entry(elem_ref_to,
1567 struct btrfsic_block_link,
1568 node_ref_to);
1569
1570 printk(KERN_INFO " %c @%llu (%s/%llu/%d)"
1571 " refers %u* to"
1572 " %c @%llu (%s/%llu/%d)\n",
1573 btrfsic_get_block_type(state, b_all),
1574 (unsigned long long)b_all->logical_bytenr,
1575 b_all->dev_state->name,
1576 (unsigned long long)b_all->dev_bytenr,
1577 b_all->mirror_num,
1578 l->ref_cnt,
1579 btrfsic_get_block_type(state, l->block_ref_to),
1580 (unsigned long long)
1581 l->block_ref_to->logical_bytenr,
1582 l->block_ref_to->dev_state->name,
1583 (unsigned long long)l->block_ref_to->dev_bytenr,
1584 l->block_ref_to->mirror_num);
1585 }
1586
1587 list_for_each(elem_ref_from, &b_all->ref_from_list) {
1588 const struct btrfsic_block_link *const l =
1589 list_entry(elem_ref_from,
1590 struct btrfsic_block_link,
1591 node_ref_from);
1592
1593 printk(KERN_INFO " %c @%llu (%s/%llu/%d)"
1594 " is ref %u* from"
1595 " %c @%llu (%s/%llu/%d)\n",
1596 btrfsic_get_block_type(state, b_all),
1597 (unsigned long long)b_all->logical_bytenr,
1598 b_all->dev_state->name,
1599 (unsigned long long)b_all->dev_bytenr,
1600 b_all->mirror_num,
1601 l->ref_cnt,
1602 btrfsic_get_block_type(state, l->block_ref_from),
1603 (unsigned long long)
1604 l->block_ref_from->logical_bytenr,
1605 l->block_ref_from->dev_state->name,
1606 (unsigned long long)
1607 l->block_ref_from->dev_bytenr,
1608 l->block_ref_from->mirror_num);
1609 }
1610
1611 printk(KERN_INFO "\n");
1612 }
1613}
1614
1615/*
1616 * Test whether the disk block contains a tree block (leaf or node)
1617 * (note that this test fails for the super block)
1618 */
1619static int btrfsic_test_for_metadata(struct btrfsic_state *state,
1620 const u8 *data, unsigned int size)
1621{
1622 struct btrfs_header *h;
1623 u8 csum[BTRFS_CSUM_SIZE];
1624 u32 crc = ~(u32)0;
1625 int fail = 0;
1626 int crc_fail = 0;
1627
1628 h = (struct btrfs_header *)data;
1629
1630 if (memcmp(h->fsid, state->root->fs_info->fsid, BTRFS_UUID_SIZE))
1631 fail++;
1632
1633 crc = crc32c(crc, data + BTRFS_CSUM_SIZE, PAGE_SIZE - BTRFS_CSUM_SIZE);
1634 btrfs_csum_final(crc, csum);
1635 if (memcmp(csum, h->csum, state->csum_size))
1636 crc_fail++;
1637
1638 return fail || crc_fail;
1639}
1640
1641static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state,
1642 u64 dev_bytenr,
1643 u8 *mapped_data, unsigned int len,
1644 struct bio *bio,
1645 int *bio_is_patched,
1646 struct buffer_head *bh,
1647 int submit_bio_bh_rw)
1648{
1649 int is_metadata;
1650 struct btrfsic_block *block;
1651 struct btrfsic_block_data_ctx block_ctx;
1652 int ret;
1653 struct btrfsic_state *state = dev_state->state;
1654 struct block_device *bdev = dev_state->bdev;
1655
1656 WARN_ON(len > PAGE_SIZE);
1657 is_metadata = (0 == btrfsic_test_for_metadata(state, mapped_data, len));
1658 if (NULL != bio_is_patched)
1659 *bio_is_patched = 0;
1660
1661 block = btrfsic_block_hashtable_lookup(bdev, dev_bytenr,
1662 &state->block_hashtable);
1663 if (NULL != block) {
0b485143 1664 u64 bytenr = 0;
5db02760
SB
1665 struct list_head *elem_ref_to;
1666 struct list_head *tmp_ref_to;
1667
1668 if (block->is_superblock) {
1669 bytenr = le64_to_cpu(((struct btrfs_super_block *)
1670 mapped_data)->bytenr);
1671 is_metadata = 1;
1672 if (state->print_mask &
1673 BTRFSIC_PRINT_MASK_TREE_BEFORE_SB_WRITE) {
1674 printk(KERN_INFO
1675 "[before new superblock is written]:\n");
1676 btrfsic_dump_tree_sub(state, block, 0);
1677 }
1678 }
1679 if (is_metadata) {
1680 if (!block->is_superblock) {
1681 bytenr = le64_to_cpu(((struct btrfs_header *)
1682 mapped_data)->bytenr);
1683 btrfsic_cmp_log_and_dev_bytenr(state, bytenr,
1684 dev_state,
1685 dev_bytenr,
1686 mapped_data);
1687 }
1688 if (block->logical_bytenr != bytenr) {
1689 printk(KERN_INFO
1690 "Written block @%llu (%s/%llu/%d)"
1691 " found in hash table, %c,"
1692 " bytenr mismatch"
1693 " (!= stored %llu).\n",
1694 (unsigned long long)bytenr,
1695 dev_state->name,
1696 (unsigned long long)dev_bytenr,
1697 block->mirror_num,
1698 btrfsic_get_block_type(state, block),
1699 (unsigned long long)
1700 block->logical_bytenr);
1701 block->logical_bytenr = bytenr;
1702 } else if (state->print_mask &
1703 BTRFSIC_PRINT_MASK_VERBOSE)
1704 printk(KERN_INFO
1705 "Written block @%llu (%s/%llu/%d)"
1706 " found in hash table, %c.\n",
1707 (unsigned long long)bytenr,
1708 dev_state->name,
1709 (unsigned long long)dev_bytenr,
1710 block->mirror_num,
1711 btrfsic_get_block_type(state, block));
1712 } else {
1713 bytenr = block->logical_bytenr;
1714 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
1715 printk(KERN_INFO
1716 "Written block @%llu (%s/%llu/%d)"
1717 " found in hash table, %c.\n",
1718 (unsigned long long)bytenr,
1719 dev_state->name,
1720 (unsigned long long)dev_bytenr,
1721 block->mirror_num,
1722 btrfsic_get_block_type(state, block));
1723 }
1724
1725 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
1726 printk(KERN_INFO
1727 "ref_to_list: %cE, ref_from_list: %cE\n",
1728 list_empty(&block->ref_to_list) ? ' ' : '!',
1729 list_empty(&block->ref_from_list) ? ' ' : '!');
1730 if (btrfsic_is_block_ref_by_superblock(state, block, 0)) {
1731 printk(KERN_INFO "btrfs: attempt to overwrite %c-block"
1732 " @%llu (%s/%llu/%d), old(gen=%llu,"
1733 " objectid=%llu, type=%d, offset=%llu),"
1734 " new(gen=%llu),"
1735 " which is referenced by most recent superblock"
1736 " (superblockgen=%llu)!\n",
1737 btrfsic_get_block_type(state, block),
1738 (unsigned long long)bytenr,
1739 dev_state->name,
1740 (unsigned long long)dev_bytenr,
1741 block->mirror_num,
1742 (unsigned long long)block->generation,
1743 (unsigned long long)
1744 le64_to_cpu(block->disk_key.objectid),
1745 block->disk_key.type,
1746 (unsigned long long)
1747 le64_to_cpu(block->disk_key.offset),
1748 (unsigned long long)
1749 le64_to_cpu(((struct btrfs_header *)
1750 mapped_data)->generation),
1751 (unsigned long long)
1752 state->max_superblock_generation);
1753 btrfsic_dump_tree(state);
1754 }
1755
1756 if (!block->is_iodone && !block->never_written) {
1757 printk(KERN_INFO "btrfs: attempt to overwrite %c-block"
1758 " @%llu (%s/%llu/%d), oldgen=%llu, newgen=%llu,"
1759 " which is not yet iodone!\n",
1760 btrfsic_get_block_type(state, block),
1761 (unsigned long long)bytenr,
1762 dev_state->name,
1763 (unsigned long long)dev_bytenr,
1764 block->mirror_num,
1765 (unsigned long long)block->generation,
1766 (unsigned long long)
1767 le64_to_cpu(((struct btrfs_header *)
1768 mapped_data)->generation));
1769 /* it would not be safe to go on */
1770 btrfsic_dump_tree(state);
1771 return;
1772 }
1773
1774 /*
1775 * Clear all references of this block. Do not free
1776 * the block itself even if is not referenced anymore
1777 * because it still carries valueable information
1778 * like whether it was ever written and IO completed.
1779 */
1780 list_for_each_safe(elem_ref_to, tmp_ref_to,
1781 &block->ref_to_list) {
1782 struct btrfsic_block_link *const l =
1783 list_entry(elem_ref_to,
1784 struct btrfsic_block_link,
1785 node_ref_to);
1786
1787 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
1788 btrfsic_print_rem_link(state, l);
1789 l->ref_cnt--;
1790 if (0 == l->ref_cnt) {
1791 list_del(&l->node_ref_to);
1792 list_del(&l->node_ref_from);
1793 btrfsic_block_link_hashtable_remove(l);
1794 btrfsic_block_link_free(l);
1795 }
1796 }
1797
1798 if (block->is_superblock)
1799 ret = btrfsic_map_superblock(state, bytenr, len,
1800 bdev, &block_ctx);
1801 else
1802 ret = btrfsic_map_block(state, bytenr, len,
1803 &block_ctx, 0);
1804 if (ret) {
1805 printk(KERN_INFO
1806 "btrfsic: btrfsic_map_block(root @%llu)"
1807 " failed!\n", (unsigned long long)bytenr);
1808 return;
1809 }
1810 block_ctx.data = mapped_data;
1811 /* the following is required in case of writes to mirrors,
1812 * use the same that was used for the lookup */
1813 block_ctx.dev = dev_state;
1814 block_ctx.dev_bytenr = dev_bytenr;
1815
1816 if (is_metadata || state->include_extent_data) {
1817 block->never_written = 0;
1818 block->iodone_w_error = 0;
1819 if (NULL != bio) {
1820 block->is_iodone = 0;
1821 BUG_ON(NULL == bio_is_patched);
1822 if (!*bio_is_patched) {
1823 block->orig_bio_bh_private =
1824 bio->bi_private;
1825 block->orig_bio_bh_end_io.bio =
1826 bio->bi_end_io;
1827 block->next_in_same_bio = NULL;
1828 bio->bi_private = block;
1829 bio->bi_end_io = btrfsic_bio_end_io;
1830 *bio_is_patched = 1;
1831 } else {
1832 struct btrfsic_block *chained_block =
1833 (struct btrfsic_block *)
1834 bio->bi_private;
1835
1836 BUG_ON(NULL == chained_block);
1837 block->orig_bio_bh_private =
1838 chained_block->orig_bio_bh_private;
1839 block->orig_bio_bh_end_io.bio =
1840 chained_block->orig_bio_bh_end_io.
1841 bio;
1842 block->next_in_same_bio = chained_block;
1843 bio->bi_private = block;
1844 }
1845 } else if (NULL != bh) {
1846 block->is_iodone = 0;
1847 block->orig_bio_bh_private = bh->b_private;
1848 block->orig_bio_bh_end_io.bh = bh->b_end_io;
1849 block->next_in_same_bio = NULL;
1850 bh->b_private = block;
1851 bh->b_end_io = btrfsic_bh_end_io;
1852 } else {
1853 block->is_iodone = 1;
1854 block->orig_bio_bh_private = NULL;
1855 block->orig_bio_bh_end_io.bio = NULL;
1856 block->next_in_same_bio = NULL;
1857 }
1858 }
1859
1860 block->flush_gen = dev_state->last_flush_gen + 1;
1861 block->submit_bio_bh_rw = submit_bio_bh_rw;
1862 if (is_metadata) {
1863 block->logical_bytenr = bytenr;
1864 block->is_metadata = 1;
1865 if (block->is_superblock) {
1866 ret = btrfsic_process_written_superblock(
1867 state,
1868 block,
1869 (struct btrfs_super_block *)
1870 mapped_data);
1871 if (state->print_mask &
1872 BTRFSIC_PRINT_MASK_TREE_AFTER_SB_WRITE) {
1873 printk(KERN_INFO
1874 "[after new superblock is written]:\n");
1875 btrfsic_dump_tree_sub(state, block, 0);
1876 }
1877 } else {
1878 block->mirror_num = 0; /* unknown */
1879 ret = btrfsic_process_metablock(
1880 state,
1881 block,
1882 &block_ctx,
1883 (struct btrfs_header *)
1884 block_ctx.data,
1885 0, 0);
1886 }
1887 if (ret)
1888 printk(KERN_INFO
1889 "btrfsic: btrfsic_process_metablock"
1890 "(root @%llu) failed!\n",
1891 (unsigned long long)dev_bytenr);
1892 } else {
1893 block->is_metadata = 0;
1894 block->mirror_num = 0; /* unknown */
1895 block->generation = BTRFSIC_GENERATION_UNKNOWN;
1896 if (!state->include_extent_data
1897 && list_empty(&block->ref_from_list)) {
1898 /*
1899 * disk block is overwritten with extent
1900 * data (not meta data) and we are configured
1901 * to not include extent data: take the
1902 * chance and free the block's memory
1903 */
1904 btrfsic_block_hashtable_remove(block);
1905 list_del(&block->all_blocks_node);
1906 btrfsic_block_free(block);
1907 }
1908 }
1909 btrfsic_release_block_ctx(&block_ctx);
1910 } else {
1911 /* block has not been found in hash table */
1912 u64 bytenr;
1913
1914 if (!is_metadata) {
1915 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
1916 printk(KERN_INFO "Written block (%s/%llu/?)"
1917 " !found in hash table, D.\n",
1918 dev_state->name,
1919 (unsigned long long)dev_bytenr);
1920 if (!state->include_extent_data)
1921 return; /* ignore that written D block */
1922
1923 /* this is getting ugly for the
1924 * include_extent_data case... */
1925 bytenr = 0; /* unknown */
1926 block_ctx.start = bytenr;
1927 block_ctx.len = len;
1928 block_ctx.bh = NULL;
1929 } else {
1930 bytenr = le64_to_cpu(((struct btrfs_header *)
1931 mapped_data)->bytenr);
1932 btrfsic_cmp_log_and_dev_bytenr(state, bytenr, dev_state,
1933 dev_bytenr,
1934 mapped_data);
1935 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
1936 printk(KERN_INFO
1937 "Written block @%llu (%s/%llu/?)"
1938 " !found in hash table, M.\n",
1939 (unsigned long long)bytenr,
1940 dev_state->name,
1941 (unsigned long long)dev_bytenr);
1942
1943 ret = btrfsic_map_block(state, bytenr, len, &block_ctx,
1944 0);
1945 if (ret) {
1946 printk(KERN_INFO
1947 "btrfsic: btrfsic_map_block(root @%llu)"
1948 " failed!\n",
1949 (unsigned long long)dev_bytenr);
1950 return;
1951 }
1952 }
1953 block_ctx.data = mapped_data;
1954 /* the following is required in case of writes to mirrors,
1955 * use the same that was used for the lookup */
1956 block_ctx.dev = dev_state;
1957 block_ctx.dev_bytenr = dev_bytenr;
1958
1959 block = btrfsic_block_alloc();
1960 if (NULL == block) {
1961 printk(KERN_INFO "btrfsic: error, kmalloc failed!\n");
1962 btrfsic_release_block_ctx(&block_ctx);
1963 return;
1964 }
1965 block->dev_state = dev_state;
1966 block->dev_bytenr = dev_bytenr;
1967 block->logical_bytenr = bytenr;
1968 block->is_metadata = is_metadata;
1969 block->never_written = 0;
1970 block->iodone_w_error = 0;
1971 block->mirror_num = 0; /* unknown */
1972 block->flush_gen = dev_state->last_flush_gen + 1;
1973 block->submit_bio_bh_rw = submit_bio_bh_rw;
1974 if (NULL != bio) {
1975 block->is_iodone = 0;
1976 BUG_ON(NULL == bio_is_patched);
1977 if (!*bio_is_patched) {
1978 block->orig_bio_bh_private = bio->bi_private;
1979 block->orig_bio_bh_end_io.bio = bio->bi_end_io;
1980 block->next_in_same_bio = NULL;
1981 bio->bi_private = block;
1982 bio->bi_end_io = btrfsic_bio_end_io;
1983 *bio_is_patched = 1;
1984 } else {
1985 struct btrfsic_block *chained_block =
1986 (struct btrfsic_block *)
1987 bio->bi_private;
1988
1989 BUG_ON(NULL == chained_block);
1990 block->orig_bio_bh_private =
1991 chained_block->orig_bio_bh_private;
1992 block->orig_bio_bh_end_io.bio =
1993 chained_block->orig_bio_bh_end_io.bio;
1994 block->next_in_same_bio = chained_block;
1995 bio->bi_private = block;
1996 }
1997 } else if (NULL != bh) {
1998 block->is_iodone = 0;
1999 block->orig_bio_bh_private = bh->b_private;
2000 block->orig_bio_bh_end_io.bh = bh->b_end_io;
2001 block->next_in_same_bio = NULL;
2002 bh->b_private = block;
2003 bh->b_end_io = btrfsic_bh_end_io;
2004 } else {
2005 block->is_iodone = 1;
2006 block->orig_bio_bh_private = NULL;
2007 block->orig_bio_bh_end_io.bio = NULL;
2008 block->next_in_same_bio = NULL;
2009 }
2010 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
2011 printk(KERN_INFO
2012 "New written %c-block @%llu (%s/%llu/%d)\n",
2013 is_metadata ? 'M' : 'D',
2014 (unsigned long long)block->logical_bytenr,
2015 block->dev_state->name,
2016 (unsigned long long)block->dev_bytenr,
2017 block->mirror_num);
2018 list_add(&block->all_blocks_node, &state->all_blocks_list);
2019 btrfsic_block_hashtable_add(block, &state->block_hashtable);
2020
2021 if (is_metadata) {
2022 ret = btrfsic_process_metablock(state, block,
2023 &block_ctx,
2024 (struct btrfs_header *)
2025 block_ctx.data, 0, 0);
2026 if (ret)
2027 printk(KERN_INFO
2028 "btrfsic: process_metablock(root @%llu)"
2029 " failed!\n",
2030 (unsigned long long)dev_bytenr);
2031 }
2032 btrfsic_release_block_ctx(&block_ctx);
2033 }
2034}
2035
2036static void btrfsic_bio_end_io(struct bio *bp, int bio_error_status)
2037{
2038 struct btrfsic_block *block = (struct btrfsic_block *)bp->bi_private;
2039 int iodone_w_error;
2040
2041 /* mutex is not held! This is not save if IO is not yet completed
2042 * on umount */
2043 iodone_w_error = 0;
2044 if (bio_error_status)
2045 iodone_w_error = 1;
2046
2047 BUG_ON(NULL == block);
2048 bp->bi_private = block->orig_bio_bh_private;
2049 bp->bi_end_io = block->orig_bio_bh_end_io.bio;
2050
2051 do {
2052 struct btrfsic_block *next_block;
2053 struct btrfsic_dev_state *const dev_state = block->dev_state;
2054
2055 if ((dev_state->state->print_mask &
2056 BTRFSIC_PRINT_MASK_END_IO_BIO_BH))
2057 printk(KERN_INFO
2058 "bio_end_io(err=%d) for %c @%llu (%s/%llu/%d)\n",
2059 bio_error_status,
2060 btrfsic_get_block_type(dev_state->state, block),
2061 (unsigned long long)block->logical_bytenr,
2062 dev_state->name,
2063 (unsigned long long)block->dev_bytenr,
2064 block->mirror_num);
2065 next_block = block->next_in_same_bio;
2066 block->iodone_w_error = iodone_w_error;
2067 if (block->submit_bio_bh_rw & REQ_FLUSH) {
2068 dev_state->last_flush_gen++;
2069 if ((dev_state->state->print_mask &
2070 BTRFSIC_PRINT_MASK_END_IO_BIO_BH))
2071 printk(KERN_INFO
2072 "bio_end_io() new %s flush_gen=%llu\n",
2073 dev_state->name,
2074 (unsigned long long)
2075 dev_state->last_flush_gen);
2076 }
2077 if (block->submit_bio_bh_rw & REQ_FUA)
2078 block->flush_gen = 0; /* FUA completed means block is
2079 * on disk */
2080 block->is_iodone = 1; /* for FLUSH, this releases the block */
2081 block = next_block;
2082 } while (NULL != block);
2083
2084 bp->bi_end_io(bp, bio_error_status);
2085}
2086
2087static void btrfsic_bh_end_io(struct buffer_head *bh, int uptodate)
2088{
2089 struct btrfsic_block *block = (struct btrfsic_block *)bh->b_private;
2090 int iodone_w_error = !uptodate;
2091 struct btrfsic_dev_state *dev_state;
2092
2093 BUG_ON(NULL == block);
2094 dev_state = block->dev_state;
2095 if ((dev_state->state->print_mask & BTRFSIC_PRINT_MASK_END_IO_BIO_BH))
2096 printk(KERN_INFO
2097 "bh_end_io(error=%d) for %c @%llu (%s/%llu/%d)\n",
2098 iodone_w_error,
2099 btrfsic_get_block_type(dev_state->state, block),
2100 (unsigned long long)block->logical_bytenr,
2101 block->dev_state->name,
2102 (unsigned long long)block->dev_bytenr,
2103 block->mirror_num);
2104
2105 block->iodone_w_error = iodone_w_error;
2106 if (block->submit_bio_bh_rw & REQ_FLUSH) {
2107 dev_state->last_flush_gen++;
2108 if ((dev_state->state->print_mask &
2109 BTRFSIC_PRINT_MASK_END_IO_BIO_BH))
2110 printk(KERN_INFO
2111 "bh_end_io() new %s flush_gen=%llu\n",
2112 dev_state->name,
2113 (unsigned long long)dev_state->last_flush_gen);
2114 }
2115 if (block->submit_bio_bh_rw & REQ_FUA)
2116 block->flush_gen = 0; /* FUA completed means block is on disk */
2117
2118 bh->b_private = block->orig_bio_bh_private;
2119 bh->b_end_io = block->orig_bio_bh_end_io.bh;
2120 block->is_iodone = 1; /* for FLUSH, this releases the block */
2121 bh->b_end_io(bh, uptodate);
2122}
2123
2124static int btrfsic_process_written_superblock(
2125 struct btrfsic_state *state,
2126 struct btrfsic_block *const superblock,
2127 struct btrfs_super_block *const super_hdr)
2128{
2129 int pass;
2130
2131 superblock->generation = btrfs_super_generation(super_hdr);
2132 if (!(superblock->generation > state->max_superblock_generation ||
2133 0 == state->max_superblock_generation)) {
2134 if (state->print_mask & BTRFSIC_PRINT_MASK_SUPERBLOCK_WRITE)
2135 printk(KERN_INFO
2136 "btrfsic: superblock @%llu (%s/%llu/%d)"
2137 " with old gen %llu <= %llu\n",
2138 (unsigned long long)superblock->logical_bytenr,
2139 superblock->dev_state->name,
2140 (unsigned long long)superblock->dev_bytenr,
2141 superblock->mirror_num,
2142 (unsigned long long)
2143 btrfs_super_generation(super_hdr),
2144 (unsigned long long)
2145 state->max_superblock_generation);
2146 } else {
2147 if (state->print_mask & BTRFSIC_PRINT_MASK_SUPERBLOCK_WRITE)
2148 printk(KERN_INFO
2149 "btrfsic: got new superblock @%llu (%s/%llu/%d)"
2150 " with new gen %llu > %llu\n",
2151 (unsigned long long)superblock->logical_bytenr,
2152 superblock->dev_state->name,
2153 (unsigned long long)superblock->dev_bytenr,
2154 superblock->mirror_num,
2155 (unsigned long long)
2156 btrfs_super_generation(super_hdr),
2157 (unsigned long long)
2158 state->max_superblock_generation);
2159
2160 state->max_superblock_generation =
2161 btrfs_super_generation(super_hdr);
2162 state->latest_superblock = superblock;
2163 }
2164
2165 for (pass = 0; pass < 3; pass++) {
2166 int ret;
2167 u64 next_bytenr;
2168 struct btrfsic_block *next_block;
2169 struct btrfsic_block_data_ctx tmp_next_block_ctx;
2170 struct btrfsic_block_link *l;
2171 int num_copies;
2172 int mirror_num;
2173 const char *additional_string = NULL;
2174 struct btrfs_disk_key tmp_disk_key;
2175
2176 tmp_disk_key.type = BTRFS_ROOT_ITEM_KEY;
2177 tmp_disk_key.offset = 0;
2178
2179 switch (pass) {
2180 case 0:
2181 tmp_disk_key.objectid =
2182 cpu_to_le64(BTRFS_ROOT_TREE_OBJECTID);
2183 additional_string = "root ";
2184 next_bytenr = btrfs_super_root(super_hdr);
2185 if (state->print_mask &
2186 BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION)
2187 printk(KERN_INFO "root@%llu\n",
2188 (unsigned long long)next_bytenr);
2189 break;
2190 case 1:
2191 tmp_disk_key.objectid =
2192 cpu_to_le64(BTRFS_CHUNK_TREE_OBJECTID);
2193 additional_string = "chunk ";
2194 next_bytenr = btrfs_super_chunk_root(super_hdr);
2195 if (state->print_mask &
2196 BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION)
2197 printk(KERN_INFO "chunk@%llu\n",
2198 (unsigned long long)next_bytenr);
2199 break;
2200 case 2:
2201 tmp_disk_key.objectid =
2202 cpu_to_le64(BTRFS_TREE_LOG_OBJECTID);
2203 additional_string = "log ";
2204 next_bytenr = btrfs_super_log_root(super_hdr);
2205 if (0 == next_bytenr)
2206 continue;
2207 if (state->print_mask &
2208 BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION)
2209 printk(KERN_INFO "log@%llu\n",
2210 (unsigned long long)next_bytenr);
2211 break;
2212 }
2213
2214 num_copies =
2215 btrfs_num_copies(&state->root->fs_info->mapping_tree,
2216 next_bytenr, PAGE_SIZE);
2217 if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
2218 printk(KERN_INFO "num_copies(log_bytenr=%llu) = %d\n",
2219 (unsigned long long)next_bytenr, num_copies);
2220 for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) {
2221 int was_created;
2222
2223 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
2224 printk(KERN_INFO
2225 "btrfsic_process_written_superblock("
2226 "mirror_num=%d)\n", mirror_num);
2227 ret = btrfsic_map_block(state, next_bytenr, PAGE_SIZE,
2228 &tmp_next_block_ctx,
2229 mirror_num);
2230 if (ret) {
2231 printk(KERN_INFO
2232 "btrfsic: btrfsic_map_block(@%llu,"
2233 " mirror=%d) failed!\n",
2234 (unsigned long long)next_bytenr,
2235 mirror_num);
2236 return -1;
2237 }
2238
2239 next_block = btrfsic_block_lookup_or_add(
2240 state,
2241 &tmp_next_block_ctx,
2242 additional_string,
2243 1, 0, 1,
2244 mirror_num,
2245 &was_created);
2246 if (NULL == next_block) {
2247 printk(KERN_INFO
2248 "btrfsic: error, kmalloc failed!\n");
2249 btrfsic_release_block_ctx(&tmp_next_block_ctx);
2250 return -1;
2251 }
2252
2253 next_block->disk_key = tmp_disk_key;
2254 if (was_created)
2255 next_block->generation =
2256 BTRFSIC_GENERATION_UNKNOWN;
2257 l = btrfsic_block_link_lookup_or_add(
2258 state,
2259 &tmp_next_block_ctx,
2260 next_block,
2261 superblock,
2262 BTRFSIC_GENERATION_UNKNOWN);
2263 btrfsic_release_block_ctx(&tmp_next_block_ctx);
2264 if (NULL == l)
2265 return -1;
2266 }
2267 }
2268
2269 if (-1 == btrfsic_check_all_ref_blocks(state, superblock, 0)) {
2270 WARN_ON(1);
2271 btrfsic_dump_tree(state);
2272 }
2273
2274 return 0;
2275}
2276
2277static int btrfsic_check_all_ref_blocks(struct btrfsic_state *state,
2278 struct btrfsic_block *const block,
2279 int recursion_level)
2280{
2281 struct list_head *elem_ref_to;
2282 int ret = 0;
2283
2284 if (recursion_level >= 3 + BTRFS_MAX_LEVEL) {
2285 /*
2286 * Note that this situation can happen and does not
2287 * indicate an error in regular cases. It happens
2288 * when disk blocks are freed and later reused.
2289 * The check-integrity module is not aware of any
2290 * block free operations, it just recognizes block
2291 * write operations. Therefore it keeps the linkage
2292 * information for a block until a block is
2293 * rewritten. This can temporarily cause incorrect
2294 * and even circular linkage informations. This
2295 * causes no harm unless such blocks are referenced
2296 * by the most recent super block.
2297 */
2298 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
2299 printk(KERN_INFO
2300 "btrfsic: abort cyclic linkage (case 1).\n");
2301
2302 return ret;
2303 }
2304
2305 /*
2306 * This algorithm is recursive because the amount of used stack
2307 * space is very small and the max recursion depth is limited.
2308 */
2309 list_for_each(elem_ref_to, &block->ref_to_list) {
2310 const struct btrfsic_block_link *const l =
2311 list_entry(elem_ref_to, struct btrfsic_block_link,
2312 node_ref_to);
2313
2314 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
2315 printk(KERN_INFO
2316 "rl=%d, %c @%llu (%s/%llu/%d)"
2317 " %u* refers to %c @%llu (%s/%llu/%d)\n",
2318 recursion_level,
2319 btrfsic_get_block_type(state, block),
2320 (unsigned long long)block->logical_bytenr,
2321 block->dev_state->name,
2322 (unsigned long long)block->dev_bytenr,
2323 block->mirror_num,
2324 l->ref_cnt,
2325 btrfsic_get_block_type(state, l->block_ref_to),
2326 (unsigned long long)
2327 l->block_ref_to->logical_bytenr,
2328 l->block_ref_to->dev_state->name,
2329 (unsigned long long)l->block_ref_to->dev_bytenr,
2330 l->block_ref_to->mirror_num);
2331 if (l->block_ref_to->never_written) {
2332 printk(KERN_INFO "btrfs: attempt to write superblock"
2333 " which references block %c @%llu (%s/%llu/%d)"
2334 " which is never written!\n",
2335 btrfsic_get_block_type(state, l->block_ref_to),
2336 (unsigned long long)
2337 l->block_ref_to->logical_bytenr,
2338 l->block_ref_to->dev_state->name,
2339 (unsigned long long)l->block_ref_to->dev_bytenr,
2340 l->block_ref_to->mirror_num);
2341 ret = -1;
2342 } else if (!l->block_ref_to->is_iodone) {
2343 printk(KERN_INFO "btrfs: attempt to write superblock"
2344 " which references block %c @%llu (%s/%llu/%d)"
2345 " which is not yet iodone!\n",
2346 btrfsic_get_block_type(state, l->block_ref_to),
2347 (unsigned long long)
2348 l->block_ref_to->logical_bytenr,
2349 l->block_ref_to->dev_state->name,
2350 (unsigned long long)l->block_ref_to->dev_bytenr,
2351 l->block_ref_to->mirror_num);
2352 ret = -1;
2353 } else if (l->parent_generation !=
2354 l->block_ref_to->generation &&
2355 BTRFSIC_GENERATION_UNKNOWN !=
2356 l->parent_generation &&
2357 BTRFSIC_GENERATION_UNKNOWN !=
2358 l->block_ref_to->generation) {
2359 printk(KERN_INFO "btrfs: attempt to write superblock"
2360 " which references block %c @%llu (%s/%llu/%d)"
2361 " with generation %llu !="
2362 " parent generation %llu!\n",
2363 btrfsic_get_block_type(state, l->block_ref_to),
2364 (unsigned long long)
2365 l->block_ref_to->logical_bytenr,
2366 l->block_ref_to->dev_state->name,
2367 (unsigned long long)l->block_ref_to->dev_bytenr,
2368 l->block_ref_to->mirror_num,
2369 (unsigned long long)l->block_ref_to->generation,
2370 (unsigned long long)l->parent_generation);
2371 ret = -1;
2372 } else if (l->block_ref_to->flush_gen >
2373 l->block_ref_to->dev_state->last_flush_gen) {
2374 printk(KERN_INFO "btrfs: attempt to write superblock"
2375 " which references block %c @%llu (%s/%llu/%d)"
2376 " which is not flushed out of disk's write cache"
2377 " (block flush_gen=%llu,"
2378 " dev->flush_gen=%llu)!\n",
2379 btrfsic_get_block_type(state, l->block_ref_to),
2380 (unsigned long long)
2381 l->block_ref_to->logical_bytenr,
2382 l->block_ref_to->dev_state->name,
2383 (unsigned long long)l->block_ref_to->dev_bytenr,
2384 l->block_ref_to->mirror_num,
2385 (unsigned long long)block->flush_gen,
2386 (unsigned long long)
2387 l->block_ref_to->dev_state->last_flush_gen);
2388 ret = -1;
2389 } else if (-1 == btrfsic_check_all_ref_blocks(state,
2390 l->block_ref_to,
2391 recursion_level +
2392 1)) {
2393 ret = -1;
2394 }
2395 }
2396
2397 return ret;
2398}
2399
2400static int btrfsic_is_block_ref_by_superblock(
2401 const struct btrfsic_state *state,
2402 const struct btrfsic_block *block,
2403 int recursion_level)
2404{
2405 struct list_head *elem_ref_from;
2406
2407 if (recursion_level >= 3 + BTRFS_MAX_LEVEL) {
2408 /* refer to comment at "abort cyclic linkage (case 1)" */
2409 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
2410 printk(KERN_INFO
2411 "btrfsic: abort cyclic linkage (case 2).\n");
2412
2413 return 0;
2414 }
2415
2416 /*
2417 * This algorithm is recursive because the amount of used stack space
2418 * is very small and the max recursion depth is limited.
2419 */
2420 list_for_each(elem_ref_from, &block->ref_from_list) {
2421 const struct btrfsic_block_link *const l =
2422 list_entry(elem_ref_from, struct btrfsic_block_link,
2423 node_ref_from);
2424
2425 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
2426 printk(KERN_INFO
2427 "rl=%d, %c @%llu (%s/%llu/%d)"
2428 " is ref %u* from %c @%llu (%s/%llu/%d)\n",
2429 recursion_level,
2430 btrfsic_get_block_type(state, block),
2431 (unsigned long long)block->logical_bytenr,
2432 block->dev_state->name,
2433 (unsigned long long)block->dev_bytenr,
2434 block->mirror_num,
2435 l->ref_cnt,
2436 btrfsic_get_block_type(state, l->block_ref_from),
2437 (unsigned long long)
2438 l->block_ref_from->logical_bytenr,
2439 l->block_ref_from->dev_state->name,
2440 (unsigned long long)
2441 l->block_ref_from->dev_bytenr,
2442 l->block_ref_from->mirror_num);
2443 if (l->block_ref_from->is_superblock &&
2444 state->latest_superblock->dev_bytenr ==
2445 l->block_ref_from->dev_bytenr &&
2446 state->latest_superblock->dev_state->bdev ==
2447 l->block_ref_from->dev_state->bdev)
2448 return 1;
2449 else if (btrfsic_is_block_ref_by_superblock(state,
2450 l->block_ref_from,
2451 recursion_level +
2452 1))
2453 return 1;
2454 }
2455
2456 return 0;
2457}
2458
2459static void btrfsic_print_add_link(const struct btrfsic_state *state,
2460 const struct btrfsic_block_link *l)
2461{
2462 printk(KERN_INFO
2463 "Add %u* link from %c @%llu (%s/%llu/%d)"
2464 " to %c @%llu (%s/%llu/%d).\n",
2465 l->ref_cnt,
2466 btrfsic_get_block_type(state, l->block_ref_from),
2467 (unsigned long long)l->block_ref_from->logical_bytenr,
2468 l->block_ref_from->dev_state->name,
2469 (unsigned long long)l->block_ref_from->dev_bytenr,
2470 l->block_ref_from->mirror_num,
2471 btrfsic_get_block_type(state, l->block_ref_to),
2472 (unsigned long long)l->block_ref_to->logical_bytenr,
2473 l->block_ref_to->dev_state->name,
2474 (unsigned long long)l->block_ref_to->dev_bytenr,
2475 l->block_ref_to->mirror_num);
2476}
2477
2478static void btrfsic_print_rem_link(const struct btrfsic_state *state,
2479 const struct btrfsic_block_link *l)
2480{
2481 printk(KERN_INFO
2482 "Rem %u* link from %c @%llu (%s/%llu/%d)"
2483 " to %c @%llu (%s/%llu/%d).\n",
2484 l->ref_cnt,
2485 btrfsic_get_block_type(state, l->block_ref_from),
2486 (unsigned long long)l->block_ref_from->logical_bytenr,
2487 l->block_ref_from->dev_state->name,
2488 (unsigned long long)l->block_ref_from->dev_bytenr,
2489 l->block_ref_from->mirror_num,
2490 btrfsic_get_block_type(state, l->block_ref_to),
2491 (unsigned long long)l->block_ref_to->logical_bytenr,
2492 l->block_ref_to->dev_state->name,
2493 (unsigned long long)l->block_ref_to->dev_bytenr,
2494 l->block_ref_to->mirror_num);
2495}
2496
2497static char btrfsic_get_block_type(const struct btrfsic_state *state,
2498 const struct btrfsic_block *block)
2499{
2500 if (block->is_superblock &&
2501 state->latest_superblock->dev_bytenr == block->dev_bytenr &&
2502 state->latest_superblock->dev_state->bdev == block->dev_state->bdev)
2503 return 'S';
2504 else if (block->is_superblock)
2505 return 's';
2506 else if (block->is_metadata)
2507 return 'M';
2508 else
2509 return 'D';
2510}
2511
2512static void btrfsic_dump_tree(const struct btrfsic_state *state)
2513{
2514 btrfsic_dump_tree_sub(state, state->latest_superblock, 0);
2515}
2516
2517static void btrfsic_dump_tree_sub(const struct btrfsic_state *state,
2518 const struct btrfsic_block *block,
2519 int indent_level)
2520{
2521 struct list_head *elem_ref_to;
2522 int indent_add;
2523 static char buf[80];
2524 int cursor_position;
2525
2526 /*
2527 * Should better fill an on-stack buffer with a complete line and
2528 * dump it at once when it is time to print a newline character.
2529 */
2530
2531 /*
2532 * This algorithm is recursive because the amount of used stack space
2533 * is very small and the max recursion depth is limited.
2534 */
2535 indent_add = sprintf(buf, "%c-%llu(%s/%llu/%d)",
2536 btrfsic_get_block_type(state, block),
2537 (unsigned long long)block->logical_bytenr,
2538 block->dev_state->name,
2539 (unsigned long long)block->dev_bytenr,
2540 block->mirror_num);
2541 if (indent_level + indent_add > BTRFSIC_TREE_DUMP_MAX_INDENT_LEVEL) {
2542 printk("[...]\n");
2543 return;
2544 }
2545 printk(buf);
2546 indent_level += indent_add;
2547 if (list_empty(&block->ref_to_list)) {
2548 printk("\n");
2549 return;
2550 }
2551 if (block->mirror_num > 1 &&
2552 !(state->print_mask & BTRFSIC_PRINT_MASK_TREE_WITH_ALL_MIRRORS)) {
2553 printk(" [...]\n");
2554 return;
2555 }
2556
2557 cursor_position = indent_level;
2558 list_for_each(elem_ref_to, &block->ref_to_list) {
2559 const struct btrfsic_block_link *const l =
2560 list_entry(elem_ref_to, struct btrfsic_block_link,
2561 node_ref_to);
2562
2563 while (cursor_position < indent_level) {
2564 printk(" ");
2565 cursor_position++;
2566 }
2567 if (l->ref_cnt > 1)
2568 indent_add = sprintf(buf, " %d*--> ", l->ref_cnt);
2569 else
2570 indent_add = sprintf(buf, " --> ");
2571 if (indent_level + indent_add >
2572 BTRFSIC_TREE_DUMP_MAX_INDENT_LEVEL) {
2573 printk("[...]\n");
2574 cursor_position = 0;
2575 continue;
2576 }
2577
2578 printk(buf);
2579
2580 btrfsic_dump_tree_sub(state, l->block_ref_to,
2581 indent_level + indent_add);
2582 cursor_position = 0;
2583 }
2584}
2585
2586static struct btrfsic_block_link *btrfsic_block_link_lookup_or_add(
2587 struct btrfsic_state *state,
2588 struct btrfsic_block_data_ctx *next_block_ctx,
2589 struct btrfsic_block *next_block,
2590 struct btrfsic_block *from_block,
2591 u64 parent_generation)
2592{
2593 struct btrfsic_block_link *l;
2594
2595 l = btrfsic_block_link_hashtable_lookup(next_block_ctx->dev->bdev,
2596 next_block_ctx->dev_bytenr,
2597 from_block->dev_state->bdev,
2598 from_block->dev_bytenr,
2599 &state->block_link_hashtable);
2600 if (NULL == l) {
2601 l = btrfsic_block_link_alloc();
2602 if (NULL == l) {
2603 printk(KERN_INFO
2604 "btrfsic: error, kmalloc" " failed!\n");
2605 return NULL;
2606 }
2607
2608 l->block_ref_to = next_block;
2609 l->block_ref_from = from_block;
2610 l->ref_cnt = 1;
2611 l->parent_generation = parent_generation;
2612
2613 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
2614 btrfsic_print_add_link(state, l);
2615
2616 list_add(&l->node_ref_to, &from_block->ref_to_list);
2617 list_add(&l->node_ref_from, &next_block->ref_from_list);
2618
2619 btrfsic_block_link_hashtable_add(l,
2620 &state->block_link_hashtable);
2621 } else {
2622 l->ref_cnt++;
2623 l->parent_generation = parent_generation;
2624 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
2625 btrfsic_print_add_link(state, l);
2626 }
2627
2628 return l;
2629}
2630
2631static struct btrfsic_block *btrfsic_block_lookup_or_add(
2632 struct btrfsic_state *state,
2633 struct btrfsic_block_data_ctx *block_ctx,
2634 const char *additional_string,
2635 int is_metadata,
2636 int is_iodone,
2637 int never_written,
2638 int mirror_num,
2639 int *was_created)
2640{
2641 struct btrfsic_block *block;
2642
2643 block = btrfsic_block_hashtable_lookup(block_ctx->dev->bdev,
2644 block_ctx->dev_bytenr,
2645 &state->block_hashtable);
2646 if (NULL == block) {
2647 struct btrfsic_dev_state *dev_state;
2648
2649 block = btrfsic_block_alloc();
2650 if (NULL == block) {
2651 printk(KERN_INFO "btrfsic: error, kmalloc failed!\n");
2652 return NULL;
2653 }
2654 dev_state = btrfsic_dev_state_lookup(block_ctx->dev->bdev);
2655 if (NULL == dev_state) {
2656 printk(KERN_INFO
2657 "btrfsic: error, lookup dev_state failed!\n");
2658 btrfsic_block_free(block);
2659 return NULL;
2660 }
2661 block->dev_state = dev_state;
2662 block->dev_bytenr = block_ctx->dev_bytenr;
2663 block->logical_bytenr = block_ctx->start;
2664 block->is_metadata = is_metadata;
2665 block->is_iodone = is_iodone;
2666 block->never_written = never_written;
2667 block->mirror_num = mirror_num;
2668 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
2669 printk(KERN_INFO
2670 "New %s%c-block @%llu (%s/%llu/%d)\n",
2671 additional_string,
2672 btrfsic_get_block_type(state, block),
2673 (unsigned long long)block->logical_bytenr,
2674 dev_state->name,
2675 (unsigned long long)block->dev_bytenr,
2676 mirror_num);
2677 list_add(&block->all_blocks_node, &state->all_blocks_list);
2678 btrfsic_block_hashtable_add(block, &state->block_hashtable);
2679 if (NULL != was_created)
2680 *was_created = 1;
2681 } else {
2682 if (NULL != was_created)
2683 *was_created = 0;
2684 }
2685
2686 return block;
2687}
2688
2689static void btrfsic_cmp_log_and_dev_bytenr(struct btrfsic_state *state,
2690 u64 bytenr,
2691 struct btrfsic_dev_state *dev_state,
2692 u64 dev_bytenr, char *data)
2693{
2694 int num_copies;
2695 int mirror_num;
2696 int ret;
2697 struct btrfsic_block_data_ctx block_ctx;
2698 int match = 0;
2699
2700 num_copies = btrfs_num_copies(&state->root->fs_info->mapping_tree,
2701 bytenr, PAGE_SIZE);
2702
2703 for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) {
2704 ret = btrfsic_map_block(state, bytenr, PAGE_SIZE,
2705 &block_ctx, mirror_num);
2706 if (ret) {
2707 printk(KERN_INFO "btrfsic:"
2708 " btrfsic_map_block(logical @%llu,"
2709 " mirror %d) failed!\n",
2710 (unsigned long long)bytenr, mirror_num);
2711 continue;
2712 }
2713
2714 if (dev_state->bdev == block_ctx.dev->bdev &&
2715 dev_bytenr == block_ctx.dev_bytenr) {
2716 match++;
2717 btrfsic_release_block_ctx(&block_ctx);
2718 break;
2719 }
2720 btrfsic_release_block_ctx(&block_ctx);
2721 }
2722
2723 if (!match) {
2724 printk(KERN_INFO "btrfs: attempt to write M-block which contains logical bytenr that doesn't map to dev+physical bytenr of submit_bio,"
2725 " buffer->log_bytenr=%llu, submit_bio(bdev=%s,"
2726 " phys_bytenr=%llu)!\n",
2727 (unsigned long long)bytenr, dev_state->name,
2728 (unsigned long long)dev_bytenr);
2729 for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) {
2730 ret = btrfsic_map_block(state, bytenr, PAGE_SIZE,
2731 &block_ctx, mirror_num);
2732 if (ret)
2733 continue;
2734
2735 printk(KERN_INFO "Read logical bytenr @%llu maps to"
2736 " (%s/%llu/%d)\n",
2737 (unsigned long long)bytenr,
2738 block_ctx.dev->name,
2739 (unsigned long long)block_ctx.dev_bytenr,
2740 mirror_num);
2741 }
2742 WARN_ON(1);
2743 }
2744}
2745
2746static struct btrfsic_dev_state *btrfsic_dev_state_lookup(
2747 struct block_device *bdev)
2748{
2749 struct btrfsic_dev_state *ds;
2750
2751 ds = btrfsic_dev_state_hashtable_lookup(bdev,
2752 &btrfsic_dev_state_hashtable);
2753 return ds;
2754}
2755
2756int btrfsic_submit_bh(int rw, struct buffer_head *bh)
2757{
2758 struct btrfsic_dev_state *dev_state;
2759
2760 if (!btrfsic_is_initialized)
2761 return submit_bh(rw, bh);
2762
2763 mutex_lock(&btrfsic_mutex);
2764 /* since btrfsic_submit_bh() might also be called before
2765 * btrfsic_mount(), this might return NULL */
2766 dev_state = btrfsic_dev_state_lookup(bh->b_bdev);
2767
2768 /* Only called to write the superblock (incl. FLUSH/FUA) */
2769 if (NULL != dev_state &&
2770 (rw & WRITE) && bh->b_size > 0) {
2771 u64 dev_bytenr;
2772
2773 dev_bytenr = 4096 * bh->b_blocknr;
2774 if (dev_state->state->print_mask &
2775 BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH)
2776 printk(KERN_INFO
2777 "submit_bh(rw=0x%x, blocknr=%lu (bytenr %llu),"
2778 " size=%lu, data=%p, bdev=%p)\n",
0b485143
SB
2779 rw, (unsigned long)bh->b_blocknr,
2780 (unsigned long long)dev_bytenr,
2781 (unsigned long)bh->b_size, bh->b_data,
2782 bh->b_bdev);
5db02760
SB
2783 btrfsic_process_written_block(dev_state, dev_bytenr,
2784 bh->b_data, bh->b_size, NULL,
2785 NULL, bh, rw);
2786 } else if (NULL != dev_state && (rw & REQ_FLUSH)) {
2787 if (dev_state->state->print_mask &
2788 BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH)
2789 printk(KERN_INFO
2790 "submit_bh(rw=0x%x) FLUSH, bdev=%p)\n",
2791 rw, bh->b_bdev);
2792 if (!dev_state->dummy_block_for_bio_bh_flush.is_iodone) {
2793 if ((dev_state->state->print_mask &
2794 (BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH |
2795 BTRFSIC_PRINT_MASK_VERBOSE)))
2796 printk(KERN_INFO
2797 "btrfsic_submit_bh(%s) with FLUSH"
2798 " but dummy block already in use"
2799 " (ignored)!\n",
2800 dev_state->name);
2801 } else {
2802 struct btrfsic_block *const block =
2803 &dev_state->dummy_block_for_bio_bh_flush;
2804
2805 block->is_iodone = 0;
2806 block->never_written = 0;
2807 block->iodone_w_error = 0;
2808 block->flush_gen = dev_state->last_flush_gen + 1;
2809 block->submit_bio_bh_rw = rw;
2810 block->orig_bio_bh_private = bh->b_private;
2811 block->orig_bio_bh_end_io.bh = bh->b_end_io;
2812 block->next_in_same_bio = NULL;
2813 bh->b_private = block;
2814 bh->b_end_io = btrfsic_bh_end_io;
2815 }
2816 }
2817 mutex_unlock(&btrfsic_mutex);
2818 return submit_bh(rw, bh);
2819}
2820
2821void btrfsic_submit_bio(int rw, struct bio *bio)
2822{
2823 struct btrfsic_dev_state *dev_state;
2824
2825 if (!btrfsic_is_initialized) {
2826 submit_bio(rw, bio);
2827 return;
2828 }
2829
2830 mutex_lock(&btrfsic_mutex);
2831 /* since btrfsic_submit_bio() is also called before
2832 * btrfsic_mount(), this might return NULL */
2833 dev_state = btrfsic_dev_state_lookup(bio->bi_bdev);
2834 if (NULL != dev_state &&
2835 (rw & WRITE) && NULL != bio->bi_io_vec) {
2836 unsigned int i;
2837 u64 dev_bytenr;
2838 int bio_is_patched;
2839
2840 dev_bytenr = 512 * bio->bi_sector;
2841 bio_is_patched = 0;
2842 if (dev_state->state->print_mask &
2843 BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH)
2844 printk(KERN_INFO
2845 "submit_bio(rw=0x%x, bi_vcnt=%u,"
2846 " bi_sector=%lu (bytenr %llu), bi_bdev=%p)\n",
0b485143 2847 rw, bio->bi_vcnt, (unsigned long)bio->bi_sector,
5db02760
SB
2848 (unsigned long long)dev_bytenr,
2849 bio->bi_bdev);
2850
2851 for (i = 0; i < bio->bi_vcnt; i++) {
2852 u8 *mapped_data;
2853
2854 mapped_data = kmap(bio->bi_io_vec[i].bv_page);
2855 if ((BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH |
2856 BTRFSIC_PRINT_MASK_VERBOSE) ==
2857 (dev_state->state->print_mask &
2858 (BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH |
2859 BTRFSIC_PRINT_MASK_VERBOSE)))
2860 printk(KERN_INFO
2861 "#%u: page=%p, mapped=%p, len=%u,"
2862 " offset=%u\n",
2863 i, bio->bi_io_vec[i].bv_page,
2864 mapped_data,
2865 bio->bi_io_vec[i].bv_len,
2866 bio->bi_io_vec[i].bv_offset);
2867 btrfsic_process_written_block(dev_state, dev_bytenr,
2868 mapped_data,
2869 bio->bi_io_vec[i].bv_len,
2870 bio, &bio_is_patched,
2871 NULL, rw);
2872 kunmap(bio->bi_io_vec[i].bv_page);
2873 dev_bytenr += bio->bi_io_vec[i].bv_len;
2874 }
2875 } else if (NULL != dev_state && (rw & REQ_FLUSH)) {
2876 if (dev_state->state->print_mask &
2877 BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH)
2878 printk(KERN_INFO
2879 "submit_bio(rw=0x%x) FLUSH, bdev=%p)\n",
2880 rw, bio->bi_bdev);
2881 if (!dev_state->dummy_block_for_bio_bh_flush.is_iodone) {
2882 if ((dev_state->state->print_mask &
2883 (BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH |
2884 BTRFSIC_PRINT_MASK_VERBOSE)))
2885 printk(KERN_INFO
2886 "btrfsic_submit_bio(%s) with FLUSH"
2887 " but dummy block already in use"
2888 " (ignored)!\n",
2889 dev_state->name);
2890 } else {
2891 struct btrfsic_block *const block =
2892 &dev_state->dummy_block_for_bio_bh_flush;
2893
2894 block->is_iodone = 0;
2895 block->never_written = 0;
2896 block->iodone_w_error = 0;
2897 block->flush_gen = dev_state->last_flush_gen + 1;
2898 block->submit_bio_bh_rw = rw;
2899 block->orig_bio_bh_private = bio->bi_private;
2900 block->orig_bio_bh_end_io.bio = bio->bi_end_io;
2901 block->next_in_same_bio = NULL;
2902 bio->bi_private = block;
2903 bio->bi_end_io = btrfsic_bio_end_io;
2904 }
2905 }
2906 mutex_unlock(&btrfsic_mutex);
2907
2908 submit_bio(rw, bio);
2909}
2910
2911int btrfsic_mount(struct btrfs_root *root,
2912 struct btrfs_fs_devices *fs_devices,
2913 int including_extent_data, u32 print_mask)
2914{
2915 int ret;
2916 struct btrfsic_state *state;
2917 struct list_head *dev_head = &fs_devices->devices;
2918 struct btrfs_device *device;
2919
2920 state = kzalloc(sizeof(*state), GFP_NOFS);
2921 if (NULL == state) {
2922 printk(KERN_INFO "btrfs check-integrity: kmalloc() failed!\n");
2923 return -1;
2924 }
2925
2926 if (!btrfsic_is_initialized) {
2927 mutex_init(&btrfsic_mutex);
2928 btrfsic_dev_state_hashtable_init(&btrfsic_dev_state_hashtable);
2929 btrfsic_is_initialized = 1;
2930 }
2931 mutex_lock(&btrfsic_mutex);
2932 state->root = root;
2933 state->print_mask = print_mask;
2934 state->include_extent_data = including_extent_data;
2935 state->csum_size = 0;
2936 INIT_LIST_HEAD(&state->all_blocks_list);
2937 btrfsic_block_hashtable_init(&state->block_hashtable);
2938 btrfsic_block_link_hashtable_init(&state->block_link_hashtable);
2939 state->max_superblock_generation = 0;
2940 state->latest_superblock = NULL;
2941
2942 list_for_each_entry(device, dev_head, dev_list) {
2943 struct btrfsic_dev_state *ds;
2944 char *p;
2945
2946 if (!device->bdev || !device->name)
2947 continue;
2948
2949 ds = btrfsic_dev_state_alloc();
2950 if (NULL == ds) {
2951 printk(KERN_INFO
2952 "btrfs check-integrity: kmalloc() failed!\n");
2953 mutex_unlock(&btrfsic_mutex);
2954 return -1;
2955 }
2956 ds->bdev = device->bdev;
2957 ds->state = state;
2958 bdevname(ds->bdev, ds->name);
2959 ds->name[BDEVNAME_SIZE - 1] = '\0';
2960 for (p = ds->name; *p != '\0'; p++);
2961 while (p > ds->name && *p != '/')
2962 p--;
2963 if (*p == '/')
2964 p++;
2965 strlcpy(ds->name, p, sizeof(ds->name));
2966 btrfsic_dev_state_hashtable_add(ds,
2967 &btrfsic_dev_state_hashtable);
2968 }
2969
2970 ret = btrfsic_process_superblock(state, fs_devices);
2971 if (0 != ret) {
2972 mutex_unlock(&btrfsic_mutex);
2973 btrfsic_unmount(root, fs_devices);
2974 return ret;
2975 }
2976
2977 if (state->print_mask & BTRFSIC_PRINT_MASK_INITIAL_DATABASE)
2978 btrfsic_dump_database(state);
2979 if (state->print_mask & BTRFSIC_PRINT_MASK_INITIAL_TREE)
2980 btrfsic_dump_tree(state);
2981
2982 mutex_unlock(&btrfsic_mutex);
2983 return 0;
2984}
2985
2986void btrfsic_unmount(struct btrfs_root *root,
2987 struct btrfs_fs_devices *fs_devices)
2988{
2989 struct list_head *elem_all;
2990 struct list_head *tmp_all;
2991 struct btrfsic_state *state;
2992 struct list_head *dev_head = &fs_devices->devices;
2993 struct btrfs_device *device;
2994
2995 if (!btrfsic_is_initialized)
2996 return;
2997
2998 mutex_lock(&btrfsic_mutex);
2999
3000 state = NULL;
3001 list_for_each_entry(device, dev_head, dev_list) {
3002 struct btrfsic_dev_state *ds;
3003
3004 if (!device->bdev || !device->name)
3005 continue;
3006
3007 ds = btrfsic_dev_state_hashtable_lookup(
3008 device->bdev,
3009 &btrfsic_dev_state_hashtable);
3010 if (NULL != ds) {
3011 state = ds->state;
3012 btrfsic_dev_state_hashtable_remove(ds);
3013 btrfsic_dev_state_free(ds);
3014 }
3015 }
3016
3017 if (NULL == state) {
3018 printk(KERN_INFO
3019 "btrfsic: error, cannot find state information"
3020 " on umount!\n");
3021 mutex_unlock(&btrfsic_mutex);
3022 return;
3023 }
3024
3025 /*
3026 * Don't care about keeping the lists' state up to date,
3027 * just free all memory that was allocated dynamically.
3028 * Free the blocks and the block_links.
3029 */
3030 list_for_each_safe(elem_all, tmp_all, &state->all_blocks_list) {
3031 struct btrfsic_block *const b_all =
3032 list_entry(elem_all, struct btrfsic_block,
3033 all_blocks_node);
3034 struct list_head *elem_ref_to;
3035 struct list_head *tmp_ref_to;
3036
3037 list_for_each_safe(elem_ref_to, tmp_ref_to,
3038 &b_all->ref_to_list) {
3039 struct btrfsic_block_link *const l =
3040 list_entry(elem_ref_to,
3041 struct btrfsic_block_link,
3042 node_ref_to);
3043
3044 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
3045 btrfsic_print_rem_link(state, l);
3046
3047 l->ref_cnt--;
3048 if (0 == l->ref_cnt)
3049 btrfsic_block_link_free(l);
3050 }
3051
3052 if (b_all->is_iodone)
3053 btrfsic_block_free(b_all);
3054 else
3055 printk(KERN_INFO "btrfs: attempt to free %c-block"
3056 " @%llu (%s/%llu/%d) on umount which is"
3057 " not yet iodone!\n",
3058 btrfsic_get_block_type(state, b_all),
3059 (unsigned long long)b_all->logical_bytenr,
3060 b_all->dev_state->name,
3061 (unsigned long long)b_all->dev_bytenr,
3062 b_all->mirror_num);
3063 }
3064
3065 mutex_unlock(&btrfsic_mutex);
3066
3067 kfree(state);
3068}