Btrfs: change reserved_extents to an atomic_t
authorJosef Bacik <josef@redhat.com>
Tue, 25 Jan 2011 21:30:38 +0000 (16:30 -0500)
committerJosef Bacik <josef@redhat.com>
Thu, 17 Mar 2011 18:21:18 +0000 (14:21 -0400)
We track delayed allocation per inodes via 2 counters, one is
outstanding_extents and reserved_extents.  Outstanding_extents is already an
atomic_t, but reserved_extents is not and is protected by a spinlock.  So
convert this to an atomic_t and instead of using a spinlock, use atomic_cmpxchg
when releasing delalloc bytes.  This makes our inode 72 bytes smaller, and
reduces locking overhead (albiet it was minimal to begin with).  Thanks,

Signed-off-by: Josef Bacik <josef@redhat.com>
fs/btrfs/btrfs_inode.h
fs/btrfs/extent-tree.c
fs/btrfs/inode.c

index ccc991c542df39ca152f53950aa98ec75aa1d7a9..57c3bb2884ceabd2ea4be939557a6c273c9e8626 100644 (file)
@@ -136,9 +136,8 @@ struct btrfs_inode {
         * items we think we'll end up using, and reserved_extents is the number
         * of extent items we've reserved metadata for.
         */
-       spinlock_t accounting_lock;
        atomic_t outstanding_extents;
-       int reserved_extents;
+       atomic_t reserved_extents;
 
        /*
         * ordered_data_close is set by truncate when a file that used
index 7b3089b5c2df816522e2356d3064f0b2bdb40c4f..27376c97d85f9407378896c3237f0e6fbbfddcc8 100644 (file)
@@ -3996,6 +3996,7 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
        struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
        u64 to_reserve;
        int nr_extents;
+       int reserved_extents;
        int ret;
 
        if (btrfs_transaction_in_commit(root->fs_info))
@@ -4003,25 +4004,24 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
 
        num_bytes = ALIGN(num_bytes, root->sectorsize);
 
-       spin_lock(&BTRFS_I(inode)->accounting_lock);
        nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents) + 1;
-       if (nr_extents > BTRFS_I(inode)->reserved_extents) {
-               nr_extents -= BTRFS_I(inode)->reserved_extents;
+       reserved_extents = atomic_read(&BTRFS_I(inode)->reserved_extents);
+
+       if (nr_extents > reserved_extents) {
+               nr_extents -= reserved_extents;
                to_reserve = calc_trans_metadata_size(root, nr_extents);
        } else {
                nr_extents = 0;
                to_reserve = 0;
        }
-       spin_unlock(&BTRFS_I(inode)->accounting_lock);
+
        to_reserve += calc_csum_metadata_size(inode, num_bytes);
        ret = reserve_metadata_bytes(NULL, root, block_rsv, to_reserve, 1);
        if (ret)
                return ret;
 
-       spin_lock(&BTRFS_I(inode)->accounting_lock);
-       BTRFS_I(inode)->reserved_extents += nr_extents;
+       atomic_add(nr_extents, &BTRFS_I(inode)->reserved_extents);
        atomic_inc(&BTRFS_I(inode)->outstanding_extents);
-       spin_unlock(&BTRFS_I(inode)->accounting_lock);
 
        block_rsv_add_bytes(block_rsv, to_reserve, 1);
 
@@ -4036,20 +4036,30 @@ void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
        struct btrfs_root *root = BTRFS_I(inode)->root;
        u64 to_free;
        int nr_extents;
+       int reserved_extents;
 
        num_bytes = ALIGN(num_bytes, root->sectorsize);
        atomic_dec(&BTRFS_I(inode)->outstanding_extents);
        WARN_ON(atomic_read(&BTRFS_I(inode)->outstanding_extents) < 0);
 
-       spin_lock(&BTRFS_I(inode)->accounting_lock);
-       nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents);
-       if (nr_extents < BTRFS_I(inode)->reserved_extents) {
-               nr_extents = BTRFS_I(inode)->reserved_extents - nr_extents;
-               BTRFS_I(inode)->reserved_extents -= nr_extents;
-       } else {
-               nr_extents = 0;
-       }
-       spin_unlock(&BTRFS_I(inode)->accounting_lock);
+       reserved_extents = atomic_read(&BTRFS_I(inode)->reserved_extents);
+       do {
+               int old, new;
+
+               nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents);
+               if (nr_extents >= reserved_extents) {
+                       nr_extents = 0;
+                       break;
+               }
+               old = reserved_extents;
+               nr_extents = reserved_extents - nr_extents;
+               new = reserved_extents - nr_extents;
+               old = atomic_cmpxchg(&BTRFS_I(inode)->reserved_extents,
+                                    reserved_extents, new);
+               if (likely(old == reserved_extents))
+                       break;
+               reserved_extents = old;
+       } while (1);
 
        to_free = calc_csum_metadata_size(inode, num_bytes);
        if (nr_extents > 0)
index 9007bbd01dbf69fd53dbd8c9269afe35ee3f79f6..d97b69afbbfb412889335f35dbbe334a7d995d78 100644 (file)
@@ -6632,9 +6632,8 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
        ei->index_cnt = (u64)-1;
        ei->last_unlink_trans = 0;
 
-       spin_lock_init(&ei->accounting_lock);
        atomic_set(&ei->outstanding_extents, 0);
-       ei->reserved_extents = 0;
+       atomic_set(&ei->reserved_extents, 0);
 
        ei->ordered_data_close = 0;
        ei->orphan_meta_reserved = 0;
@@ -6670,7 +6669,7 @@ void btrfs_destroy_inode(struct inode *inode)
        WARN_ON(!list_empty(&inode->i_dentry));
        WARN_ON(inode->i_data.nrpages);
        WARN_ON(atomic_read(&BTRFS_I(inode)->outstanding_extents));
-       WARN_ON(BTRFS_I(inode)->reserved_extents);
+       WARN_ON(atomic_read(&BTRFS_I(inode)->reserved_extents));
 
        /*
         * This can happen where we create an inode, but somebody else also