mm: page_vma_mapped_walk(): add a level of indentation
authorHugh Dickins <hughd@google.com>
Fri, 25 Jun 2021 01:39:17 +0000 (18:39 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 11 Jul 2021 10:48:11 +0000 (12:48 +0200)
[ Upstream commit b3807a91aca7d21c05d5790612e49969117a72b9 ]

page_vma_mapped_walk() cleanup: add a level of indentation to much of
the body, making no functional change in this commit, but reducing the
later diff when this is all converted to a loop.

[hughd@google.com: : page_vma_mapped_walk(): add a level of indentation fix]
Link: https://lkml.kernel.org/r/7f817555-3ce1-c785-e438-87d8efdcaf26@google.com
Link: https://lkml.kernel.org/r/efde211-f3e2-fe54-977-ef481419e7f3@google.com
Signed-off-by: Hugh Dickins <hughd@google.com>
Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Alistair Popple <apopple@nvidia.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Peter Xu <peterx@redhat.com>
Cc: Ralph Campbell <rcampbell@nvidia.com>
Cc: Wang Yugui <wangyugui@e16-tech.com>
Cc: Will Deacon <will@kernel.org>
Cc: Yang Shi <shy828301@gmail.com>
Cc: Zi Yan <ziy@nvidia.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Sasha Levin <sashal@kernel.org>
mm/page_vma_mapped.c

index 2463ba78959b3b7d6994dd467df74bffa71a6cee..911c6dbe85f9645e87cc2b700fa75b2fda87c531 100644 (file)
@@ -168,62 +168,67 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
        if (pvmw->pte)
                goto next_pte;
 restart:
-       pgd = pgd_offset(mm, pvmw->address);
-       if (!pgd_present(*pgd))
-               return false;
-       p4d = p4d_offset(pgd, pvmw->address);
-       if (!p4d_present(*p4d))
-               return false;
-       pud = pud_offset(p4d, pvmw->address);
-       if (!pud_present(*pud))
-               return false;
-       pvmw->pmd = pmd_offset(pud, pvmw->address);
-       /*
-        * Make sure the pmd value isn't cached in a register by the
-        * compiler and used as a stale value after we've observed a
-        * subsequent update.
-        */
-       pmde = READ_ONCE(*pvmw->pmd);
-       if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde)) {
-               pvmw->ptl = pmd_lock(mm, pvmw->pmd);
-               pmde = *pvmw->pmd;
-               if (likely(pmd_trans_huge(pmde))) {
-                       if (pvmw->flags & PVMW_MIGRATION)
-                               return not_found(pvmw);
-                       if (pmd_page(pmde) != page)
-                               return not_found(pvmw);
-                       return true;
-               }
-               if (!pmd_present(pmde)) {
-                       swp_entry_t entry;
+       {
+               pgd = pgd_offset(mm, pvmw->address);
+               if (!pgd_present(*pgd))
+                       return false;
+               p4d = p4d_offset(pgd, pvmw->address);
+               if (!p4d_present(*p4d))
+                       return false;
+               pud = pud_offset(p4d, pvmw->address);
+               if (!pud_present(*pud))
+                       return false;
 
-                       if (!thp_migration_supported() ||
-                           !(pvmw->flags & PVMW_MIGRATION))
-                               return not_found(pvmw);
-                       entry = pmd_to_swp_entry(pmde);
-                       if (!is_migration_entry(entry) ||
-                           migration_entry_to_page(entry) != page)
-                               return not_found(pvmw);
-                       return true;
-               }
-               /* THP pmd was split under us: handle on pte level */
-               spin_unlock(pvmw->ptl);
-               pvmw->ptl = NULL;
-       } else if (!pmd_present(pmde)) {
+               pvmw->pmd = pmd_offset(pud, pvmw->address);
                /*
-                * If PVMW_SYNC, take and drop THP pmd lock so that we
-                * cannot return prematurely, while zap_huge_pmd() has
-                * cleared *pmd but not decremented compound_mapcount().
+                * Make sure the pmd value isn't cached in a register by the
+                * compiler and used as a stale value after we've observed a
+                * subsequent update.
                 */
-               if ((pvmw->flags & PVMW_SYNC) && PageTransCompound(page)) {
-                       spinlock_t *ptl = pmd_lock(mm, pvmw->pmd);
+               pmde = READ_ONCE(*pvmw->pmd);
+
+               if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde)) {
+                       pvmw->ptl = pmd_lock(mm, pvmw->pmd);
+                       pmde = *pvmw->pmd;
+                       if (likely(pmd_trans_huge(pmde))) {
+                               if (pvmw->flags & PVMW_MIGRATION)
+                                       return not_found(pvmw);
+                               if (pmd_page(pmde) != page)
+                                       return not_found(pvmw);
+                               return true;
+                       }
+                       if (!pmd_present(pmde)) {
+                               swp_entry_t entry;
+
+                               if (!thp_migration_supported() ||
+                                   !(pvmw->flags & PVMW_MIGRATION))
+                                       return not_found(pvmw);
+                               entry = pmd_to_swp_entry(pmde);
+                               if (!is_migration_entry(entry) ||
+                                   migration_entry_to_page(entry) != page)
+                                       return not_found(pvmw);
+                               return true;
+                       }
+                       /* THP pmd was split under us: handle on pte level */
+                       spin_unlock(pvmw->ptl);
+                       pvmw->ptl = NULL;
+               } else if (!pmd_present(pmde)) {
+                       /*
+                        * If PVMW_SYNC, take and drop THP pmd lock so that we
+                        * cannot return prematurely, while zap_huge_pmd() has
+                        * cleared *pmd but not decremented compound_mapcount().
+                        */
+                       if ((pvmw->flags & PVMW_SYNC) &&
+                           PageTransCompound(page)) {
+                               spinlock_t *ptl = pmd_lock(mm, pvmw->pmd);
 
-                       spin_unlock(ptl);
+                               spin_unlock(ptl);
+                       }
+                       return false;
                }
-               return false;
+               if (!map_pte(pvmw))
+                       goto next_pte;
        }
-       if (!map_pte(pvmw))
-               goto next_pte;
        while (1) {
                unsigned long end;