FROMLIST: psi: split update_stats into parts
[GitHub/LineageOS/android_kernel_motorola_exynos9610.git] / mm / ksm.c
index 6cb60f46cce55761b0ff9d3523be69a706523972..f50cc573815f51894f328b624f536edcdbe23bb8 100644 (file)
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -199,6 +199,8 @@ struct rmap_item {
 #define SEQNR_MASK     0x0ff   /* low bits of unstable tree seqnr */
 #define UNSTABLE_FLAG  0x100   /* is a node of the unstable tree */
 #define STABLE_FLAG    0x200   /* is listed from the stable tree */
+#define KSM_FLAG_MASK  (SEQNR_MASK|UNSTABLE_FLAG|STABLE_FLAG)
+                               /* to mask all the flags */
 
 /* The stable and unstable tree heads */
 static struct rb_root one_stable_tree[1] = { RB_ROOT };
@@ -1133,6 +1135,13 @@ static int replace_page(struct vm_area_struct *vma, struct page *page,
        } else {
                newpte = pte_mkspecial(pfn_pte(page_to_pfn(kpage),
                                               vma->vm_page_prot));
+               /*
+                * We're replacing an anonymous page with a zero page, which is
+                * not anonymous. We need to do proper accounting otherwise we
+                * will get wrong values in /proc, and a BUG message in dmesg
+                * when tearing down the mm.
+                */
+               dec_mm_counter(mm, MM_ANONPAGES);
        }
 
        flush_cache_page(vma, addr, pte_pfn(*ptep));
@@ -2078,8 +2087,22 @@ static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
        tree_rmap_item =
                unstable_tree_search_insert(rmap_item, page, &tree_page);
        if (tree_rmap_item) {
+               bool split;
+
                kpage = try_to_merge_two_pages(rmap_item, page,
                                                tree_rmap_item, tree_page);
+               /*
+                * If both pages we tried to merge belong to the same compound
+                * page, then we actually ended up increasing the reference
+                * count of the same compound page twice, and split_huge_page
+                * failed.
+                * Here we set a flag if that happened, and we use it later to
+                * try split_huge_page again. Since we call put_page right
+                * afterwards, the reference count will be correct and
+                * split_huge_page should succeed.
+                */
+               split = PageTransCompound(page)
+                       && compound_head(page) == compound_head(tree_page);
                put_page(tree_page);
                if (kpage) {
                        /*
@@ -2106,6 +2129,20 @@ static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
                                break_cow(tree_rmap_item);
                                break_cow(rmap_item);
                        }
+               } else if (split) {
+                       /*
+                        * We are here if we tried to merge two pages and
+                        * failed because they both belonged to the same
+                        * compound page. We will split the page now, but no
+                        * merging will take place.
+                        * We do not want to add the cost of a full lock; if
+                        * the page is locked, it is better to skip it and
+                        * perhaps try again later.
+                        */
+                       if (!trylock_page(page))
+                               return;
+                       split_huge_page(page);
+                       unlock_page(page);
                }
        }
 }
@@ -2527,10 +2564,15 @@ again:
                anon_vma_lock_read(anon_vma);
                anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root,
                                               0, ULONG_MAX) {
+                       unsigned long addr;
+
                        cond_resched();
                        vma = vmac->vma;
-                       if (rmap_item->address < vma->vm_start ||
-                           rmap_item->address >= vma->vm_end)
+
+                       /* Ignore the stable/unstable/sqnr flags */
+                       addr = rmap_item->address & ~KSM_FLAG_MASK;
+
+                       if (addr < vma->vm_start || addr >= vma->vm_end)
                                continue;
                        /*
                         * Initially we examine only the vma which covers this
@@ -2544,8 +2586,7 @@ again:
                        if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
                                continue;
 
-                       if (!rwc->rmap_one(page, vma,
-                                       rmap_item->address, rwc->arg)) {
+                       if (!rwc->rmap_one(page, vma, addr, rwc->arg)) {
                                anon_vma_unlock_read(anon_vma);
                                return;
                        }