arm64: mm: Ensure writes to swapper are ordered wrt subsequent cache maintenance
authorWill Deacon <will.deacon@arm.com>
Fri, 22 Jun 2018 15:23:45 +0000 (16:23 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 3 Jul 2018 09:24:52 +0000 (11:24 +0200)
commit 71c8fc0c96abf8e53e74ed4d891d671e585f9076 upstream.

When rewriting swapper using nG mappings, we must performance cache
maintenance around each page table access in order to avoid coherency
problems with the host's cacheable alias under KVM. To ensure correct
ordering of the maintenance with respect to Device memory accesses made
with the Stage-1 MMU disabled, DMBs need to be added between the
maintenance and the corresponding memory access.

This patch adds a missing DMB between writing a new page table entry and
performing a clean+invalidate on the same line.

Fixes: f992b4dfd58b ("arm64: kpti: Add ->enable callback to remap swapper using nG mappings")
Cc: <stable@vger.kernel.org> # 4.16.x-
Acked-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/arm64/mm/proc.S

index e338165000e62b67e506e81f0f39b35318017053..bf0821b7b1ab5a4882f0063e2cbeb4e102b5ced7 100644 (file)
@@ -196,8 +196,9 @@ ENDPROC(idmap_cpu_replace_ttbr1)
 
        .macro __idmap_kpti_put_pgtable_ent_ng, type
        orr     \type, \type, #PTE_NG           // Same bit for blocks and pages
-       str     \type, [cur_\()\type\()p]       // Update the entry and ensure it
-       dc      civac, cur_\()\type\()p         // is visible to all CPUs.
+       str     \type, [cur_\()\type\()p]       // Update the entry and ensure
+       dmb     sy                              // that it is visible to all
+       dc      civac, cur_\()\type\()p         // CPUs.
        .endm
 
 /*