btrfs: convert btrfs_caching_control.count from atomic_t to refcount_t
authorElena Reshetova <elena.reshetova@intel.com>
Fri, 3 Mar 2017 08:55:14 +0000 (10:55 +0200)
committerDavid Sterba <dsterba@suse.com>
Tue, 18 Apr 2017 12:07:23 +0000 (14:07 +0200)
refcount_t type and corresponding API should be
used instead of atomic_t when the variable is used as
a reference counter. This allows to avoid accidental
refcounter overflows that might lead to use-after-free
situations.

Signed-off-by: Elena Reshetova <elena.reshetova@intel.com>
Signed-off-by: Hans Liljestrand <ishkamiel@gmail.com>
Signed-off-by: Kees Cook <keescook@chromium.org>
Signed-off-by: David Windsor <dwindsor@gmail.com>
Signed-off-by: David Sterba <dsterba@suse.com>
fs/btrfs/ctree.h
fs/btrfs/extent-tree.c

index c4115901d9064f68217c3be825f233d7d6872cf6..cdfc2a46448b411271dd2c880e1d7ebb0316062b 100644 (file)
@@ -39,6 +39,7 @@
 #include <linux/security.h>
 #include <linux/sizes.h>
 #include <linux/dynamic_debug.h>
+#include <linux/refcount.h>
 #include "extent_io.h"
 #include "extent_map.h"
 #include "async-thread.h"
@@ -518,7 +519,7 @@ struct btrfs_caching_control {
        struct btrfs_work work;
        struct btrfs_block_group_cache *block_group;
        u64 progress;
-       atomic_t count;
+       refcount_t count;
 };
 
 /* Once caching_thread() finds this much free space, it will wake up waiters. */
index 85e51f311b57528579a32a74f0bf92d4b8b760ec..5c84eea60703fe1239613545c1d197cf618281a3 100644 (file)
@@ -316,14 +316,14 @@ get_caching_control(struct btrfs_block_group_cache *cache)
        }
 
        ctl = cache->caching_ctl;
-       atomic_inc(&ctl->count);
+       refcount_inc(&ctl->count);
        spin_unlock(&cache->lock);
        return ctl;
 }
 
 static void put_caching_control(struct btrfs_caching_control *ctl)
 {
-       if (atomic_dec_and_test(&ctl->count))
+       if (refcount_dec_and_test(&ctl->count))
                kfree(ctl);
 }
 
@@ -599,7 +599,7 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
        init_waitqueue_head(&caching_ctl->wait);
        caching_ctl->block_group = cache;
        caching_ctl->progress = cache->key.objectid;
-       atomic_set(&caching_ctl->count, 1);
+       refcount_set(&caching_ctl->count, 1);
        btrfs_init_work(&caching_ctl->work, btrfs_cache_helper,
                        caching_thread, NULL, NULL);
 
@@ -620,7 +620,7 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
                struct btrfs_caching_control *ctl;
 
                ctl = cache->caching_ctl;
-               atomic_inc(&ctl->count);
+               refcount_inc(&ctl->count);
                prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
                spin_unlock(&cache->lock);
 
@@ -707,7 +707,7 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
        }
 
        down_write(&fs_info->commit_root_sem);
-       atomic_inc(&caching_ctl->count);
+       refcount_inc(&caching_ctl->count);
        list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
        up_write(&fs_info->commit_root_sem);
 
@@ -10416,7 +10416,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
                                    &fs_info->caching_block_groups, list)
                                if (ctl->block_group == block_group) {
                                        caching_ctl = ctl;
-                                       atomic_inc(&caching_ctl->count);
+                                       refcount_inc(&caching_ctl->count);
                                        break;
                                }
                }