[RAMEN9610-12171] mm: hpa: change allocate logic from buddy
[GitHub/LineageOS/android_kernel_motorola_exynos9610.git] / mm / mremap.c
index 049470aa1e3eefc88407e9a35f1ca252fd01912d..88ceeb4ef817e7c92286c3aab2b37e045a1f8022 100644 (file)
@@ -115,7 +115,7 @@ static pte_t move_soft_dirty_pte(pte_t pte)
 static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
                unsigned long old_addr, unsigned long old_end,
                struct vm_area_struct *new_vma, pmd_t *new_pmd,
-               unsigned long new_addr, bool need_rmap_locks, bool *need_flush)
+               unsigned long new_addr, bool need_rmap_locks)
 {
        struct mm_struct *mm = vma->vm_mm;
        pte_t *old_pte, *new_pte, pte;
@@ -163,15 +163,17 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
 
                pte = ptep_get_and_clear(mm, old_addr, old_pte);
                /*
-                * If we are remapping a dirty PTE, make sure
+                * If we are remapping a valid PTE, make sure
                 * to flush TLB before we drop the PTL for the
-                * old PTE or we may race with page_mkclean().
+                * PTE.
                 *
-                * This check has to be done after we removed the
-                * old PTE from page tables or another thread may
-                * dirty it after the check and before the removal.
+                * NOTE! Both old and new PTL matter: the old one
+                * for racing with page_mkclean(), the new one to
+                * make sure the physical page stays valid until
+                * the TLB entry for the old mapping has been
+                * flushed.
                 */
-               if (pte_present(pte) && pte_dirty(pte))
+               if (pte_present(pte))
                        force_flush = true;
                pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
                pte = move_soft_dirty_pte(pte);
@@ -179,13 +181,11 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
        }
 
        arch_leave_lazy_mmu_mode();
+       if (force_flush)
+               flush_tlb_range(vma, old_end - len, old_end);
        if (new_ptl != old_ptl)
                spin_unlock(new_ptl);
        pte_unmap(new_pte - 1);
-       if (force_flush)
-               flush_tlb_range(vma, old_end - len, old_end);
-       else
-               *need_flush = true;
        pte_unmap_unlock(old_pte - 1, old_ptl);
        if (need_rmap_locks)
                drop_rmap_locks(vma);
@@ -200,7 +200,6 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
 {
        unsigned long extent, next, old_end;
        pmd_t *old_pmd, *new_pmd;
-       bool need_flush = false;
        unsigned long mmun_start;       /* For mmu_notifiers */
        unsigned long mmun_end;         /* For mmu_notifiers */
 
@@ -231,8 +230,7 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
                                if (need_rmap_locks)
                                        take_rmap_locks(vma);
                                moved = move_huge_pmd(vma, old_addr, new_addr,
-                                                   old_end, old_pmd, new_pmd,
-                                                   &need_flush);
+                                                   old_end, old_pmd, new_pmd);
                                if (need_rmap_locks)
                                        drop_rmap_locks(vma);
                                if (moved)
@@ -250,10 +248,8 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
                if (extent > LATENCY_LIMIT)
                        extent = LATENCY_LIMIT;
                move_ptes(vma, old_pmd, old_addr, old_addr + extent, new_vma,
-                         new_pmd, new_addr, need_rmap_locks, &need_flush);
+                         new_pmd, new_addr, need_rmap_locks);
        }
-       if (need_flush)
-               flush_tlb_range(vma, old_end-len, old_addr);
 
        mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end);