From c8fd0d37f81dd38e3f319f4938b45a5aaf0dfc58 Mon Sep 17 00:00:00 2001 From: "Yan, Zheng" Date: Mon, 28 Aug 2017 15:41:28 +0800 Subject: [PATCH] ceph: handle race between vmtruncate and queuing cap snap It's possible that we create a cap snap while there is pending vmtruncate (truncate hasn't been processed by worker thread). We should truncate dirty pages beyond capsnap->size in that case. Signed-off-by: "Yan, Zheng" Signed-off-by: Ilya Dryomov --- fs/ceph/inode.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index a19fafdf87f8..373dab5173ca 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -1833,9 +1833,20 @@ retry: * possibly truncate them.. so write AND block! */ if (ci->i_wrbuffer_ref_head < ci->i_wrbuffer_ref) { + struct ceph_cap_snap *capsnap; + to = ci->i_truncate_size; + list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) { + // MDS should have revoked Frw caps + WARN_ON_ONCE(capsnap->writing); + if (capsnap->dirty_pages && capsnap->size > to) + to = capsnap->size; + } + spin_unlock(&ci->i_ceph_lock); dout("__do_pending_vmtruncate %p flushing snaps first\n", inode); - spin_unlock(&ci->i_ceph_lock); + + truncate_pagecache(inode, to); + filemap_write_and_wait_range(&inode->i_data, 0, inode->i_sb->s_maxbytes); goto retry; -- 2.20.1