Btrfs: Use a mutex in the extent buffer for tree block locking
authorChris Mason <chris.mason@oracle.com>
Tue, 22 Jul 2008 15:18:08 +0000 (11:18 -0400)
committerChris Mason <chris.mason@oracle.com>
Thu, 25 Sep 2008 15:04:05 +0000 (11:04 -0400)
This replaces the use of the page cache lock bit for locking, which wasn't
suitable for block size < page size and couldn't be used recursively.

The mutexes alone don't fix either problem, but they are the first step.

Signed-off-by: Chris Mason <chris.mason@oracle.com>
fs/btrfs/extent-tree.c
fs/btrfs/extent_io.c
fs/btrfs/extent_io.h
fs/btrfs/locking.c

index ccd49322f793364f74269091e83e44c3d1a0a141..c51cd11de2062243995c991cda0ea625ae1d72fa 100644 (file)
@@ -1451,7 +1451,7 @@ static int pin_down_bytes(struct btrfs_root *root, u64 bytenr, u32 num_bytes,
                struct extent_buffer *buf;
                buf = btrfs_find_tree_block(root, bytenr, num_bytes);
                if (buf) {
-                       if (!btrfs_try_tree_lock(buf) &&
+                       if (btrfs_try_tree_lock(buf) &&
                            btrfs_buffer_uptodate(buf, 0)) {
                                u64 transid =
                                    root->fs_info->running_transaction->transid;
@@ -3345,11 +3345,6 @@ int btrfs_read_block_groups(struct btrfs_root *root)
                set_state_private(block_group_cache, found_key.objectid,
                                  (unsigned long)cache);
 
-               /* hack for now */
-               if (cache->flags & BTRFS_BLOCK_GROUP_METADATA) {
-                       cache_block_group(root->fs_info->extent_root,
-                                         cache);
-               }
                if (key.objectid >=
                    btrfs_super_total_bytes(&info->super_copy))
                        break;
index 32bb4ed3723deed4c3eeae52654293fddc3ffe92..7380449cb5b3c535ddb81dee1e44c0ddb4124ebd 100644 (file)
@@ -2690,6 +2690,7 @@ static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
        eb = kmem_cache_zalloc(extent_buffer_cache, mask);
        eb->start = start;
        eb->len = len;
+       mutex_init(&eb->mutex);
        spin_lock_irqsave(&leak_lock, flags);
        list_add(&eb->leak_list, &buffers);
        spin_unlock_irqrestore(&leak_lock, flags);
@@ -2837,6 +2838,7 @@ int clear_extent_buffer_dirty(struct extent_io_tree *tree,
 
        for (i = 0; i < num_pages; i++) {
                page = extent_buffer_page(eb, i);
+               lock_page(page);
                if (i == 0)
                        set_page_extent_head(page, eb->len);
                else
@@ -2854,6 +2856,7 @@ int clear_extent_buffer_dirty(struct extent_io_tree *tree,
                        end  = start + PAGE_CACHE_SIZE - 1;
                        if (test_range_bit(tree, start, end,
                                           EXTENT_DIRTY, 0)) {
+                               unlock_page(page);
                                continue;
                        }
                }
@@ -2865,6 +2868,7 @@ int clear_extent_buffer_dirty(struct extent_io_tree *tree,
                                                PAGECACHE_TAG_DIRTY);
                }
                read_unlock_irq(&page->mapping->tree_lock);
+               unlock_page(page);
        }
        return 0;
 }
@@ -2893,12 +2897,17 @@ int set_extent_buffer_dirty(struct extent_io_tree *tree,
                 * on us if the page isn't already dirty.
                 */
                if (i == 0) {
+                       lock_page(page);
                        set_page_extent_head(page, eb->len);
                } else if (PagePrivate(page) &&
                           page->private != EXTENT_PAGE_PRIVATE) {
+                       lock_page(page);
                        set_page_extent_mapped(page);
+                       unlock_page(page);
                }
                __set_page_dirty_nobuffers(extent_buffer_page(eb, i));
+               if (i == 0)
+                       unlock_page(page);
        }
        return set_extent_dirty(tree, eb->start,
                                eb->start + eb->len - 1, GFP_NOFS);
index dd367617d780d6ce9ccb60bad99f98a504dd2c1e..6c03e6a1993836e94d001fdaacefe07ec639645d 100644 (file)
@@ -90,6 +90,7 @@ struct extent_buffer {
        int flags;
        struct list_head leak_list;
        struct rb_node rb_node;
+       struct mutex mutex;
 };
 
 struct extent_map_tree;
index 058a506a0dd887649441dc1432f895d99fc67d16..01a9ac2be3f7d2aaf828f493e1e89ea413328e30 100644 (file)
@@ -29,32 +29,31 @@ int btrfs_tree_lock(struct extent_buffer *eb)
 {
        int i;
 
-       if (!TestSetPageLocked(eb->first_page))
+       if (mutex_trylock(&eb->mutex))
                return 0;
        for (i = 0; i < 512; i++) {
                cpu_relax();
-               if (!TestSetPageLocked(eb->first_page))
+               if (mutex_trylock(&eb->mutex))
                        return 0;
        }
        cpu_relax();
-       lock_page(eb->first_page);
+       mutex_lock(&eb->mutex);
        return 0;
 }
 
 int btrfs_try_tree_lock(struct extent_buffer *eb)
 {
-       return TestSetPageLocked(eb->first_page);
+       return mutex_trylock(&eb->mutex);
 }
 
 int btrfs_tree_unlock(struct extent_buffer *eb)
 {
-       WARN_ON(!PageLocked(eb->first_page));
-       unlock_page(eb->first_page);
+       mutex_unlock(&eb->mutex);
        return 0;
 }
 
 int btrfs_tree_locked(struct extent_buffer *eb)
 {
-       return PageLocked(eb->first_page);
+       return mutex_is_locked(&eb->mutex);
 }