x86/mm: Sync also unmappings in vmalloc_sync_all()
authorJoerg Roedel <jroedel@suse.de>
Fri, 19 Jul 2019 18:46:51 +0000 (20:46 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 25 Aug 2019 08:51:19 +0000 (10:51 +0200)
commit 8e998fc24de47c55b47a887f6c95ab91acd4a720 upstream.

With huge-page ioremap areas the unmappings also need to be synced between
all page-tables. Otherwise it can cause data corruption when a region is
unmapped and later re-used.

Make the vmalloc_sync_one() function ready to sync unmappings and make sure
vmalloc_sync_all() iterates over all page-tables even when an unmapped PMD
is found.

Fixes: 5d72b4fba40ef ('x86, mm: support huge I/O mapping capability I/F')
Signed-off-by: Joerg Roedel <jroedel@suse.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Dave Hansen <dave.hansen@linux.intel.com>
Link: https://lkml.kernel.org/r/20190719184652.11391-3-joro@8bytes.org
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/x86/mm/fault.c

index 2870424bda1ffc89a4418822607cfa208f1d6003..7f4b3c59df475fb8ce3a9ea7e1f98a318636c77b 100644 (file)
@@ -273,11 +273,12 @@ static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
 
        pmd = pmd_offset(pud, address);
        pmd_k = pmd_offset(pud_k, address);
-       if (!pmd_present(*pmd_k))
-               return NULL;
 
-       if (!pmd_present(*pmd))
+       if (pmd_present(*pmd) != pmd_present(*pmd_k))
                set_pmd(pmd, *pmd_k);
+
+       if (!pmd_present(*pmd_k))
+               return NULL;
        else
                BUG_ON(pmd_pfn(*pmd) != pmd_pfn(*pmd_k));
 
@@ -299,17 +300,13 @@ void vmalloc_sync_all(void)
                spin_lock(&pgd_lock);
                list_for_each_entry(page, &pgd_list, lru) {
                        spinlock_t *pgt_lock;
-                       pmd_t *ret;
 
                        /* the pgt_lock only for Xen */
                        pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
 
                        spin_lock(pgt_lock);
-                       ret = vmalloc_sync_one(page_address(page), address);
+                       vmalloc_sync_one(page_address(page), address);
                        spin_unlock(pgt_lock);
-
-                       if (!ret)
-                               break;
                }
                spin_unlock(&pgd_lock);
        }