From f1167009711032b0d747ec89a632a626c901a1ad Mon Sep 17 00:00:00 2001 From: Niu Yawei Date: Fri, 1 Feb 2013 21:31:27 -0500 Subject: [PATCH] ext4: fix race in ext4_mb_add_n_trim() In ext4_mb_add_n_trim(), lg_prealloc_lock should be taken when changing the lg_prealloc_list. Signed-off-by: Niu Yawei Signed-off-by: "Theodore Ts'o" Cc: stable@vger.kernel.org --- fs/ext4/mballoc.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 1bf6fe785c4f..061727acd990 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -4136,7 +4136,7 @@ static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac) /* The max size of hash table is PREALLOC_TB_SIZE */ order = PREALLOC_TB_SIZE - 1; /* Add the prealloc space to lg */ - rcu_read_lock(); + spin_lock(&lg->lg_prealloc_lock); list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order], pa_inode_list) { spin_lock(&tmp_pa->pa_lock); @@ -4160,12 +4160,12 @@ static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac) if (!added) list_add_tail_rcu(&pa->pa_inode_list, &lg->lg_prealloc_list[order]); - rcu_read_unlock(); + spin_unlock(&lg->lg_prealloc_lock); /* Now trim the list to be not more than 8 elements */ if (lg_prealloc_count > 8) { ext4_mb_discard_lg_preallocations(sb, lg, - order, lg_prealloc_count); + order, lg_prealloc_count); return; } return ; -- 2.20.1