* every tree block (leaf or node) starts with this header.
*/
struct btrfs_header {
+ __le32 csum[8];
u8 fsid[16]; /* FS specific uuid */
__le64 blocknr; /* which block this node is supposed to live in */
__le64 generation;
__le64 parentid; /* objectid of the tree root */
- __le32 csum;
__le32 ham;
__le16 nritems;
__le16 flags;
* it currently lacks any block count etc etc
*/
struct btrfs_super_block {
+ __le32 csum[8];
+ /* the first 3 fields must match struct btrfs_header */
u8 fsid[16]; /* FS specific uuid */
__le64 blocknr; /* this block number */
- __le32 csum;
__le64 magic;
__le32 blocksize;
__le64 generation;
struct btrfs_disk_key key;
} __attribute__ ((__packed__));
+struct crypto_hash;
struct btrfs_fs_info {
struct btrfs_root *fs_root;
struct btrfs_root *extent_root;
struct inode *btree_inode;
struct mutex trans_mutex;
struct mutex fs_mutex;
+ struct crypto_hash *hash_tfm;
+ spinlock_t hash_lock;
};
/*
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/blkdev.h>
+#include <linux/crypto.h>
+#include <linux/scatterlist.h>
#include "ctree.h"
#include "disk-io.h"
#include "transaction.h"
return 0;
}
+static int csum_tree_block(struct btrfs_root * root, struct buffer_head *bh,
+ int verify)
+{
+ struct btrfs_node *node = btrfs_buffer_node(bh);
+ struct scatterlist sg;
+ struct crypto_hash *tfm = root->fs_info->hash_tfm;
+ struct hash_desc desc;
+ int ret;
+ char result[32];
+
+ desc.tfm = tfm;
+ desc.flags = 0;
+ sg_init_one(&sg, bh->b_data + 32, bh->b_size - 32);
+ spin_lock(&root->fs_info->hash_lock);
+ ret = crypto_hash_digest(&desc, &sg, bh->b_size - 32, result);
+ spin_unlock(&root->fs_info->hash_lock);
+ if (ret) {
+ printk("sha256 digest failed\n");
+ }
+ if (verify) {
+ if (memcmp(node->header.csum, result, sizeof(result)))
+ printk("csum verify failed on %Lu\n", bh->b_blocknr);
+ return -EINVAL;
+ } else
+ memcpy(node->header.csum, result, sizeof(node->header.csum));
+ return 0;
+}
+
static int btree_writepage(struct page *page, struct writeback_control *wbc)
{
+ struct buffer_head *bh;
+ struct btrfs_root *root = btrfs_sb(page->mapping->host->i_sb);
+ struct buffer_head *head;
+
+ if (!page_has_buffers(page)) {
+ create_empty_buffers(page, root->fs_info->sb->s_blocksize,
+ (1 << BH_Dirty)|(1 << BH_Uptodate));
+ }
+ head = page_buffers(page);
+ bh = head;
+ do {
+ if (buffer_dirty(bh))
+ csum_tree_block(root, bh, 0);
+ bh = bh->b_this_page;
+ } while (bh != head);
return block_write_full_page(page, btree_get_block, wbc);
}
wait_on_buffer(bh);
if (!buffer_uptodate(bh))
goto fail;
+ csum_tree_block(root, bh, 1);
} else {
unlock_buffer(bh);
}
GFP_NOFS);
int ret;
- if (!btrfs_super_root(disk_super))
+ if (!btrfs_super_root(disk_super)) {
return NULL;
+ }
init_bit_radix(&fs_info->pinned_radix);
init_bit_radix(&fs_info->pending_del_radix);
sb_set_blocksize(sb, sb_buffer->b_size);
fs_info->btree_inode->i_size = sb->s_bdev->bd_inode->i_size;
fs_info->btree_inode->i_mapping->a_ops = &btree_aops;
mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS);
+ fs_info->hash_tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
+ if (!fs_info->hash_tfm) {
+ printk("failed to allocate sha256 hash\n");
+ return NULL;
+ }
+ spin_lock_init(&fs_info->hash_lock);
mutex_init(&fs_info->trans_mutex);
mutex_init(&fs_info->fs_mutex);
fs_info->sb_buffer = read_tree_block(tree_root, sb_buffer->b_blocknr);
- if (!fs_info->sb_buffer)
+ if (!fs_info->sb_buffer) {
+printk("failed2\n");
return NULL;
-
+ }
brelse(sb_buffer);
sb_buffer = NULL;
disk_super = (struct btrfs_super_block *)fs_info->sb_buffer->b_data;
root->fs_info->tree_root->node->b_blocknr);
lock_buffer(bh);
clear_buffer_dirty(bh);
+ csum_tree_block(root, bh, 0);
bh->b_end_io = end_buffer_write_sync;
get_bh(bh);
submit_bh(WRITE, bh);
root->fs_info->tree_root->node);
btrfs_block_release(root, root->commit_root);
btrfs_block_release(root, root->fs_info->sb_buffer);
+ crypto_free_hash(root->fs_info->hash_tfm);
iput(root->fs_info->btree_inode);
kfree(root->fs_info->extent_root);
kfree(root->fs_info->inode_root);