btrfs: Separate scrub_blocked_if_needed() to scrub_pause_on/off()
authorZhaolei <zhaolei@cn.fujitsu.com>
Wed, 5 Aug 2015 08:43:28 +0000 (16:43 +0800)
committerChris Mason <clm@fb.com>
Sun, 9 Aug 2015 14:07:12 +0000 (07:07 -0700)
It can reduce current duplicated code which is similar to
scrub_blocked_if_needed() but can not call it because little
different.
It also used by my next patch which is in same case.

Signed-off-by: Zhao Lei <zhaolei@cn.fujitsu.com>
Signed-off-by: Chris Mason <clm@fb.com>
fs/btrfs/scrub.c

index fadf5fcd930676fd27d708b081d6c87d7773436b..08872026a254baf7408a5f856383fbfe2c856562 100644 (file)
@@ -332,11 +332,14 @@ static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
        }
 }
 
-static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
+static void scrub_pause_on(struct btrfs_fs_info *fs_info)
 {
        atomic_inc(&fs_info->scrubs_paused);
        wake_up(&fs_info->scrub_pause_wait);
+}
 
+static void scrub_pause_off(struct btrfs_fs_info *fs_info)
+{
        mutex_lock(&fs_info->scrub_lock);
        __scrub_blocked_if_needed(fs_info);
        atomic_dec(&fs_info->scrubs_paused);
@@ -345,6 +348,12 @@ static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
        wake_up(&fs_info->scrub_pause_wait);
 }
 
+static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
+{
+       scrub_pause_on(fs_info);
+       scrub_pause_off(fs_info);
+}
+
 /*
  * used for workers that require transaction commits (i.e., for the
  * NOCOW case)