From: Andrea Arcangeli Date: Thu, 13 Jan 2011 23:46:53 +0000 (-0800) Subject: thp: split_huge_page anon_vma ordering dependency X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=05759d380a9d7f131a475186c07fce58ceaa8902;p=GitHub%2FLineageOS%2Fandroid_kernel_samsung_universal7580.git thp: split_huge_page anon_vma ordering dependency This documents how split_huge_page is safe vs new vma inserctions into the anon_vma that may have already released the anon_vma->lock but not established pmds yet when split_huge_page starts. Signed-off-by: Andrea Arcangeli Acked-by: Mel Gorman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 0c1e8f939f7..76350793289 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -841,6 +841,19 @@ static void __split_huge_page(struct page *page, continue; mapcount += __split_huge_page_splitting(page, vma, addr); } + /* + * It is critical that new vmas are added to the tail of the + * anon_vma list. This guarantes that if copy_huge_pmd() runs + * and establishes a child pmd before + * __split_huge_page_splitting() freezes the parent pmd (so if + * we fail to prevent copy_huge_pmd() from running until the + * whole __split_huge_page() is complete), we will still see + * the newly established pmd of the child later during the + * walk, to be able to set it as pmd_trans_splitting too. + */ + if (mapcount != page_mapcount(page)) + printk(KERN_ERR "mapcount %d page_mapcount %d\n", + mapcount, page_mapcount(page)); BUG_ON(mapcount != page_mapcount(page)); __split_huge_page_refcount(page); @@ -854,6 +867,9 @@ static void __split_huge_page(struct page *page, continue; mapcount2 += __split_huge_page_map(page, vma, addr); } + if (mapcount != mapcount2) + printk(KERN_ERR "mapcount %d mapcount2 %d page_mapcount %d\n", + mapcount, mapcount2, page_mapcount(page)); BUG_ON(mapcount != mapcount2); } diff --git a/mm/rmap.c b/mm/rmap.c index e41375a6b02..92e14dcfe73 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -177,6 +177,10 @@ static void anon_vma_chain_link(struct vm_area_struct *vma, list_add(&avc->same_vma, &vma->anon_vma_chain); anon_vma_lock(anon_vma); + /* + * It's critical to add new vmas to the tail of the anon_vma, + * see comment in huge_memory.c:__split_huge_page(). + */ list_add_tail(&avc->same_anon_vma, &anon_vma->head); anon_vma_unlock(anon_vma); }