dax: protect PTE modification on WP fault by radix tree entry lock
authorJan Kara <jack@suse.cz>
Wed, 14 Dec 2016 23:07:50 +0000 (15:07 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 15 Dec 2016 00:04:09 +0000 (16:04 -0800)
Currently PTE gets updated in wp_pfn_shared() after dax_pfn_mkwrite()
has released corresponding radix tree entry lock.  When we want to
writeprotect PTE on cache flush, we need PTE modification to happen
under radix tree entry lock to ensure consistent updates of PTE and
radix tree (standard faults use page lock to ensure this consistency).
So move update of PTE bit into dax_pfn_mkwrite().

Link: http://lkml.kernel.org/r/1479460644-25076-20-git-send-email-jack@suse.cz
Signed-off-by: Jan Kara <jack@suse.cz>
Reviewed-by: Ross Zwisler <ross.zwisler@linux.intel.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
fs/dax.c
mm/memory.c

index df5c0daba69813fe197228ae03eeb27f4446b713..cf7a20a5858b6f585713a6b914c6ede3346320e9 100644 (file)
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -783,17 +783,27 @@ int dax_pfn_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
        struct file *file = vma->vm_file;
        struct address_space *mapping = file->f_mapping;
-       void *entry;
+       void *entry, **slot;
        pgoff_t index = vmf->pgoff;
 
        spin_lock_irq(&mapping->tree_lock);
-       entry = get_unlocked_mapping_entry(mapping, index, NULL);
-       if (!entry || !radix_tree_exceptional_entry(entry))
-               goto out;
+       entry = get_unlocked_mapping_entry(mapping, index, &slot);
+       if (!entry || !radix_tree_exceptional_entry(entry)) {
+               if (entry)
+                       put_unlocked_mapping_entry(mapping, index, entry);
+               spin_unlock_irq(&mapping->tree_lock);
+               return VM_FAULT_NOPAGE;
+       }
        radix_tree_tag_set(&mapping->page_tree, index, PAGECACHE_TAG_DIRTY);
-       put_unlocked_mapping_entry(mapping, index, entry);
-out:
+       entry = lock_slot(mapping, slot);
        spin_unlock_irq(&mapping->tree_lock);
+       /*
+        * If we race with somebody updating the PTE and finish_mkwrite_fault()
+        * fails, we don't care. We need to return VM_FAULT_NOPAGE and retry
+        * the fault in either case.
+        */
+       finish_mkwrite_fault(vmf);
+       put_locked_mapping_entry(mapping, index, entry);
        return VM_FAULT_NOPAGE;
 }
 EXPORT_SYMBOL_GPL(dax_pfn_mkwrite);
index edd899d0decb4594ab535a6b64249595ad7eb2a2..57d0bd1bd2c4c4418af5b835f208601a2ed7c8d3 100644 (file)
@@ -2315,7 +2315,7 @@ static int wp_pfn_shared(struct vm_fault *vmf)
                pte_unmap_unlock(vmf->pte, vmf->ptl);
                vmf->flags |= FAULT_FLAG_MKWRITE;
                ret = vma->vm_ops->pfn_mkwrite(vma, vmf);
-               if (ret & VM_FAULT_ERROR)
+               if (ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))
                        return ret;
                return finish_mkwrite_fault(vmf);
        }