__le32 stripesize;
__le32 sys_chunk_array_size;
__le64 chunk_root_generation;
+ __le64 compat_flags;
+ __le64 compat_ro_flags;
+ __le64 incompat_flags;
u8 root_level;
u8 chunk_root_level;
u8 log_root_level;
u8 sys_chunk_array[BTRFS_SYSTEM_CHUNK_ARRAY_SIZE];
} __attribute__ ((__packed__));
+/*
+ * Compat flags that we support. If any incompat flags are set other than the
+ * ones specified below then we will fail to mount
+ */
+#define BTRFS_FEATURE_COMPAT_SUPP 0x0
+#define BTRFS_FEATURE_COMPAT_RO_SUPP 0x0
+#define BTRFS_FEATURE_INCOMPAT_SUPP 0x0
+
/*
* A leaf is full of items. offset and size tell us where to find
* the item in the leaf (relative to the start of the data area)
__le32 gid;
__le32 mode;
__le64 rdev;
- __le16 flags;
- __le16 compat_flags;
+ __le64 flags;
struct btrfs_timespec atime;
struct btrfs_timespec ctime;
__le64 byte_limit;
__le64 bytes_used;
__le64 last_snapshot;
- __le32 flags;
+ __le64 flags;
__le32 refs;
struct btrfs_disk_key drop_progress;
u8 drop_level;
BTRFS_SETGET_FUNCS(inode_gid, struct btrfs_inode_item, gid, 32);
BTRFS_SETGET_FUNCS(inode_mode, struct btrfs_inode_item, mode, 32);
BTRFS_SETGET_FUNCS(inode_rdev, struct btrfs_inode_item, rdev, 64);
-BTRFS_SETGET_FUNCS(inode_flags, struct btrfs_inode_item, flags, 16);
-BTRFS_SETGET_FUNCS(inode_compat_flags, struct btrfs_inode_item,
- compat_flags, 16);
+BTRFS_SETGET_FUNCS(inode_flags, struct btrfs_inode_item, flags, 64);
static inline struct btrfs_timespec *
btrfs_inode_atime(struct btrfs_inode_item *inode_item)
BTRFS_SETGET_STACK_FUNCS(root_level, struct btrfs_root_item, level, 8);
BTRFS_SETGET_STACK_FUNCS(root_dirid, struct btrfs_root_item, root_dirid, 64);
BTRFS_SETGET_STACK_FUNCS(root_refs, struct btrfs_root_item, refs, 32);
-BTRFS_SETGET_STACK_FUNCS(root_flags, struct btrfs_root_item, flags, 32);
+BTRFS_SETGET_STACK_FUNCS(root_flags, struct btrfs_root_item, flags, 64);
BTRFS_SETGET_STACK_FUNCS(root_used, struct btrfs_root_item, bytes_used, 64);
BTRFS_SETGET_STACK_FUNCS(root_limit, struct btrfs_root_item, byte_limit, 64);
BTRFS_SETGET_STACK_FUNCS(root_last_snapshot, struct btrfs_root_item,
root_dir_objectid, 64);
BTRFS_SETGET_STACK_FUNCS(super_num_devices, struct btrfs_super_block,
num_devices, 64);
+BTRFS_SETGET_STACK_FUNCS(super_compat_flags, struct btrfs_super_block,
+ compat_flags, 64);
+BTRFS_SETGET_STACK_FUNCS(super_compat_ro_flags, struct btrfs_super_block,
+ compat_flags, 64);
+BTRFS_SETGET_STACK_FUNCS(super_incompat_flags, struct btrfs_super_block,
+ incompat_flags, 64);
static inline unsigned long btrfs_leaf_data(struct extent_buffer *l)
{
u32 blocksize;
u32 stripesize;
u64 generation;
+ u64 features;
struct btrfs_key location;
struct buffer_head *bh;
struct btrfs_root *extent_root = kzalloc(sizeof(struct btrfs_root),
goto fail_sb_buffer;
}
+ features = btrfs_super_incompat_flags(disk_super) &
+ ~BTRFS_FEATURE_INCOMPAT_SUPP;
+ if (features) {
+ printk(KERN_ERR "BTRFS: couldn't mount because of "
+ "unsupported optional features (%Lx).\n",
+ features);
+ err = -EINVAL;
+ goto fail_sb_buffer;
+ }
+
+ features = btrfs_super_compat_ro_flags(disk_super) &
+ ~BTRFS_FEATURE_COMPAT_RO_SUPP;
+ if (!(sb->s_flags & MS_RDONLY) && features) {
+ printk(KERN_ERR "BTRFS: couldn't mount RDWR because of "
+ "unsupported option features (%Lx).\n",
+ features);
+ err = -EINVAL;
+ goto fail_sb_buffer;
+ }
+
/*
* we need to start all the end_io workers up front because the
* queue work function gets called at interrupt time, and so it