import PULS_20180308
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / mm / memory.c
index 12df1500c4c153ea84f9f424484ca416018a3710..4cafab7d55d033f2da89313474fd3bcc6850cf15 100644 (file)
@@ -1467,6 +1467,16 @@ int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
 }
 EXPORT_SYMBOL_GPL(zap_vma_ptes);
 
+/*
+ * FOLL_FORCE can write to even unwritable pte's, but only
+ * after we've gone through a COW cycle and they are dirty.
+ */
+static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
+{
+       return pte_write(pte) ||
+               ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte));
+}
+
 /**
  * follow_page_mask - look up a page descriptor from a user-virtual address
  * @vma: vm_area_struct mapping @address
@@ -1574,7 +1584,7 @@ split_fallthrough:
        }
        if ((flags & FOLL_NUMA) && pte_numa(pte))
                goto no_page;
-       if ((flags & FOLL_WRITE) && !pte_write(pte))
+       if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags))
                goto unlock;
 
        page = vm_normal_page(vma, address, pte);
@@ -1895,7 +1905,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                                 */
                                if ((ret & VM_FAULT_WRITE) &&
                                    !(vma->vm_flags & VM_WRITE))
-                                       foll_flags &= ~FOLL_WRITE;
+                                       foll_flags |= FOLL_COW;
 
                                cond_resched();
                        }
@@ -3254,6 +3264,10 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
 
        pte_unmap(page_table);
 
+       /* File mapping without ->vm_ops ? */
+       if (vma->vm_flags & VM_SHARED)
+               return VM_FAULT_SIGBUS;
+
        /* Check if we need to add a guard page to the stack */
        if (check_stack_guard_page(vma, address) < 0)
                return VM_FAULT_SIGBUS;
@@ -3518,6 +3532,10 @@ static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
        pgoff_t pgoff = (((address & PAGE_MASK)
                        - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
 
+       /* The VMA was not fully populated on mmap() or missing VM_DONTEXPAND */
+       if (!vma->vm_ops->fault)
+               return VM_FAULT_SIGBUS;
+
        pte_unmap(page_table);
        return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
 }
@@ -3731,9 +3749,8 @@ int handle_pte_fault(struct mm_struct *mm,
        if (!pte_present(entry)) {
                if (pte_none(entry)) {
                        if (vma->vm_ops) {
-                               if (likely(vma->vm_ops->fault))
-                                       return do_linear_fault(mm, vma, address,
-                                               pte, pmd, flags, entry);
+                               return do_linear_fault(mm, vma, address,
+                                       pte, pmd, flags, entry);
                        }
                        return do_anonymous_page(mm, vma, address,
                                                 pte, pmd, flags);