Merge branch 'for-3.18-consistent-ops' of git://git.kernel.org/pub/scm/linux/kernel...
[GitHub/exynos8895/android_kernel_samsung_universal8895.git] / mm / mmap.c
index 7ff38f1a66ec22b3d59dbf7c93d4b73fca7cce6e..7f855206e7fb2bb1f9a30fcf7745bff7a2ae3adb 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -89,6 +89,25 @@ pgprot_t vm_get_page_prot(unsigned long vm_flags)
 }
 EXPORT_SYMBOL(vm_get_page_prot);
 
+static pgprot_t vm_pgprot_modify(pgprot_t oldprot, unsigned long vm_flags)
+{
+       return pgprot_modify(oldprot, vm_get_page_prot(vm_flags));
+}
+
+/* Update vma->vm_page_prot to reflect vma->vm_flags. */
+void vma_set_page_prot(struct vm_area_struct *vma)
+{
+       unsigned long vm_flags = vma->vm_flags;
+
+       vma->vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags);
+       if (vma_wants_writenotify(vma)) {
+               vm_flags &= ~VM_SHARED;
+               vma->vm_page_prot = vm_pgprot_modify(vma->vm_page_prot,
+                                                    vm_flags);
+       }
+}
+
+
 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS;  /* heuristic overcommit */
 int sysctl_overcommit_ratio __read_mostly = 50;        /* default is 50% */
 unsigned long sysctl_overcommit_kbytes __read_mostly;
@@ -368,16 +387,18 @@ static int browse_rb(struct rb_root *root)
                struct vm_area_struct *vma;
                vma = rb_entry(nd, struct vm_area_struct, vm_rb);
                if (vma->vm_start < prev) {
-                       pr_emerg("vm_start %lx prev %lx\n", vma->vm_start, prev);
+                       pr_emerg("vm_start %lx < prev %lx\n",
+                                 vma->vm_start, prev);
                        bug = 1;
                }
                if (vma->vm_start < pend) {
-                       pr_emerg("vm_start %lx pend %lx\n", vma->vm_start, pend);
+                       pr_emerg("vm_start %lx < pend %lx\n",
+                                 vma->vm_start, pend);
                        bug = 1;
                }
                if (vma->vm_start > vma->vm_end) {
-                       pr_emerg("vm_end %lx < vm_start %lx\n",
-                               vma->vm_end, vma->vm_start);
+                       pr_emerg("vm_start %lx > vm_end %lx\n",
+                                 vma->vm_start, vma->vm_end);
                        bug = 1;
                }
                if (vma->rb_subtree_gap != vma_compute_subtree_gap(vma)) {
@@ -408,8 +429,9 @@ static void validate_mm_rb(struct rb_root *root, struct vm_area_struct *ignore)
        for (nd = rb_first(root); nd; nd = rb_next(nd)) {
                struct vm_area_struct *vma;
                vma = rb_entry(nd, struct vm_area_struct, vm_rb);
-               BUG_ON(vma != ignore &&
-                      vma->rb_subtree_gap != vma_compute_subtree_gap(vma));
+               VM_BUG_ON_VMA(vma != ignore &&
+                       vma->rb_subtree_gap != vma_compute_subtree_gap(vma),
+                       vma);
        }
 }
 
@@ -419,8 +441,10 @@ static void validate_mm(struct mm_struct *mm)
        int i = 0;
        unsigned long highest_address = 0;
        struct vm_area_struct *vma = mm->mmap;
+
        while (vma) {
                struct anon_vma_chain *avc;
+
                vma_lock_anon_vma(vma);
                list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
                        anon_vma_interval_tree_verify(avc);
@@ -435,15 +459,16 @@ static void validate_mm(struct mm_struct *mm)
        }
        if (highest_address != mm->highest_vm_end) {
                pr_emerg("mm->highest_vm_end %lx, found %lx\n",
-                      mm->highest_vm_end, highest_address);
+                         mm->highest_vm_end, highest_address);
                bug = 1;
        }
        i = browse_rb(&mm->mm_rb);
        if (i != mm->map_count) {
-               pr_emerg("map_count %d rb %d\n", mm->map_count, i);
+               if (i != -1)
+                       pr_emerg("map_count %d rb %d\n", mm->map_count, i);
                bug = 1;
        }
-       BUG_ON(bug);
+       VM_BUG_ON_MM(bug, mm);
 }
 #else
 #define validate_mm_rb(root, ignore) do { } while (0)
@@ -786,8 +811,8 @@ again:                      remove_next = 1 + (end > next->vm_end);
        if (!anon_vma && adjust_next)
                anon_vma = next->anon_vma;
        if (anon_vma) {
-               VM_BUG_ON(adjust_next && next->anon_vma &&
-                         anon_vma != next->anon_vma);
+               VM_BUG_ON_VMA(adjust_next && next->anon_vma &&
+                         anon_vma != next->anon_vma, next);
                anon_vma_lock_write(anon_vma);
                anon_vma_interval_tree_pre_update_vma(vma);
                if (adjust_next)
@@ -1469,11 +1494,16 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
        if (vma->vm_ops && vma->vm_ops->page_mkwrite)
                return 1;
 
-       /* The open routine did something to the protections already? */
+       /* The open routine did something to the protections that pgprot_modify
+        * won't preserve? */
        if (pgprot_val(vma->vm_page_prot) !=
-           pgprot_val(vm_get_page_prot(vm_flags)))
+           pgprot_val(vm_pgprot_modify(vma->vm_page_prot, vm_flags)))
                return 0;
 
+       /* Do we need to track softdirty? */
+       if (IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) && !(vm_flags & VM_SOFTDIRTY))
+               return 1;
+
        /* Specialty mapping? */
        if (vm_flags & VM_PFNMAP)
                return 0;
@@ -1609,21 +1639,6 @@ munmap_back:
                        goto free_vma;
        }
 
-       if (vma_wants_writenotify(vma)) {
-               pgprot_t pprot = vma->vm_page_prot;
-
-               /* Can vma->vm_page_prot have changed??
-                *
-                * Answer: Yes, drivers may have changed it in their
-                *         f_op->mmap method.
-                *
-                * Ensures that vmas marked as uncached stay that way.
-                */
-               vma->vm_page_prot = vm_get_page_prot(vm_flags & ~VM_SHARED);
-               if (pgprot_val(pprot) == pgprot_val(pgprot_noncached(pprot)))
-                       vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-       }
-
        vma_link(mm, vma, prev, rb_link, rb_parent);
        /* Once vma denies write, undo our temporary denial count */
        if (file) {
@@ -1657,6 +1672,8 @@ out:
         */
        vma->vm_flags |= VM_SOFTDIRTY;
 
+       vma_set_page_prot(vma);
+
        return addr;
 
 unmap_and_free_vma:
@@ -2848,7 +2865,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
                         * safe. It is only safe to keep the vm_pgoff
                         * linear if there are no pages mapped yet.
                         */
-                       VM_BUG_ON(faulted_in_anon_vma);
+                       VM_BUG_ON_VMA(faulted_in_anon_vma, new_vma);
                        *vmap = vma = new_vma;
                }
                *need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff);
@@ -3196,7 +3213,7 @@ void __init mmap_init(void)
 {
        int ret;
 
-       ret = percpu_counter_init(&vm_committed_as, 0);
+       ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL);
        VM_BUG_ON(ret);
 }