FROMLIST: arm64: mm: Fix and re-enable ARM64_SW_TTBR0_PAN
authorWill Deacon <will.deacon@arm.com>
Thu, 10 Aug 2017 12:58:16 +0000 (13:58 +0100)
committerGreg Kroah-Hartman <gregkh@google.com>
Sat, 6 Jan 2018 10:09:28 +0000 (11:09 +0100)
With the ASID now installed in TTBR1, we can re-enable ARM64_SW_TTBR0_PAN
by ensuring that we switch to a reserved ASID of zero when disabling
user access and restore the active user ASID on the uaccess enable path.

Reviewed-by: Mark Rutland <mark.rutland@arm.com>
Tested-by: Laura Abbott <labbott@redhat.com>
Tested-by: Shanker Donthineni <shankerd@codeaurora.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
(cherry picked from git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux.git
 commit 27a921e75711d924617269e0ba4adb8bae9fd0d1)

Change-Id: I3b06e02766753c59fac975363a2ead5c5e45b8f3
[ghackmann@google.com: adjust context, applying asm-uaccess.h changes to
 uaccess.h]
Signed-off-by: Greg Hackmann <ghackmann@google.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
arch/arm64/Kconfig
arch/arm64/include/asm/uaccess.h
arch/arm64/kernel/entry.S
arch/arm64/lib/clear_user.S
arch/arm64/lib/copy_from_user.S
arch/arm64/lib/copy_in_user.S
arch/arm64/lib/copy_to_user.S
arch/arm64/mm/cache.S
arch/arm64/xen/hypercall.S

index a1df4430f51d2a7bce431ef4654314968b205615..1f7ba1848c64dc7cfea2009a845eefe746627bb9 100644 (file)
@@ -719,7 +719,6 @@ endif
 
 config ARM64_SW_TTBR0_PAN
        bool "Emulate Privileged Access Never using TTBR0_EL1 switching"
-       depends on BROKEN       # Temporary while switch_mm is reworked
        help
          Enabling this option prevents the kernel from accessing
          user-space memory directly by pointing TTBR0_EL1 to a reserved
index 064cef9ae2d131312a8a0e26c3bdf612f4ad5082..d4383596a38c688cf2b69ca01c28cb1bc9cf6554 100644 (file)
@@ -144,15 +144,19 @@ static inline void __uaccess_ttbr0_disable(void)
 {
        unsigned long ttbr;
 
+       ttbr = read_sysreg(ttbr1_el1);
        /* reserved_ttbr0 placed at the end of swapper_pg_dir */
-       ttbr = read_sysreg(ttbr1_el1) + SWAPPER_DIR_SIZE;
-       write_sysreg(ttbr, ttbr0_el1);
+       write_sysreg(ttbr + SWAPPER_DIR_SIZE, ttbr0_el1);
+       isb();
+       /* Set reserved ASID */
+       ttbr &= ~(0xffffUL << 48);
+       write_sysreg(ttbr, ttbr1_el1);
        isb();
 }
 
 static inline void __uaccess_ttbr0_enable(void)
 {
-       unsigned long flags;
+       unsigned long flags, ttbr0, ttbr1;
 
        /*
         * Disable interrupts to avoid preemption between reading the 'ttbr0'
@@ -160,7 +164,16 @@ static inline void __uaccess_ttbr0_enable(void)
         * roll-over and an update of 'ttbr0'.
         */
        local_irq_save(flags);
-       write_sysreg(current_thread_info()->ttbr0, ttbr0_el1);
+       ttbr0 = current_thread_info()->ttbr0;
+
+       /* Restore active ASID */
+       ttbr1 = read_sysreg(ttbr1_el1);
+       ttbr1 |= ttbr0 & (0xffffUL << 48);
+       write_sysreg(ttbr1, ttbr1_el1);
+       isb();
+
+       /* Restore user page table */
+       write_sysreg(ttbr0, ttbr0_el1);
        isb();
        local_irq_restore(flags);
 }
@@ -442,11 +455,20 @@ extern __must_check long strnlen_user(const char __user *str, long n);
        add     \tmp1, \tmp1, #SWAPPER_DIR_SIZE // reserved_ttbr0 at the end of swapper_pg_dir
        msr     ttbr0_el1, \tmp1                // set reserved TTBR0_EL1
        isb
+       sub     \tmp1, \tmp1, #SWAPPER_DIR_SIZE
+       bic     \tmp1, \tmp1, #(0xffff << 48)
+       msr     ttbr1_el1, \tmp1                // set reserved ASID
+       isb
        .endm
 
-       .macro  __uaccess_ttbr0_enable, tmp1
+       .macro  __uaccess_ttbr0_enable, tmp1, tmp2
        get_thread_info \tmp1
        ldr     \tmp1, [\tmp1, #TSK_TI_TTBR0]   // load saved TTBR0_EL1
+       mrs     \tmp2, ttbr1_el1
+       extr    \tmp2, \tmp2, \tmp1, #48
+       ror     \tmp2, \tmp2, #16
+       msr     ttbr1_el1, \tmp2                // set the active ASID
+       isb
        msr     ttbr0_el1, \tmp1                // set the non-PAN TTBR0_EL1
        isb
        .endm
@@ -457,18 +479,18 @@ alternative_if_not ARM64_HAS_PAN
 alternative_else_nop_endif
        .endm
 
-       .macro  uaccess_ttbr0_enable, tmp1, tmp2
+       .macro  uaccess_ttbr0_enable, tmp1, tmp2, tmp3
 alternative_if_not ARM64_HAS_PAN
-       save_and_disable_irq \tmp2              // avoid preemption
-       __uaccess_ttbr0_enable \tmp1
-       restore_irq \tmp2
+       save_and_disable_irq \tmp3              // avoid preemption
+       __uaccess_ttbr0_enable \tmp1, \tmp2
+       restore_irq \tmp3
 alternative_else_nop_endif
        .endm
 #else
        .macro  uaccess_ttbr0_disable, tmp1
        .endm
 
-       .macro  uaccess_ttbr0_enable, tmp1, tmp2
+       .macro  uaccess_ttbr0_enable, tmp1, tmp2, tmp3
        .endm
 #endif
 
@@ -482,8 +504,8 @@ alternative_if ARM64_ALT_PAN_NOT_UAO
 alternative_else_nop_endif
        .endm
 
-       .macro  uaccess_enable_not_uao, tmp1, tmp2
-       uaccess_ttbr0_enable \tmp1, \tmp2
+       .macro  uaccess_enable_not_uao, tmp1, tmp2, tmp3
+       uaccess_ttbr0_enable \tmp1, \tmp2, \tmp3
 alternative_if ARM64_ALT_PAN_NOT_UAO
        SET_PSTATE_PAN(0)
 alternative_else_nop_endif
index e89231f1e323c7feccb6870f6352eabf0e8c210a..9f3c86da7005df9e7a89b49931c444ed669e0445 100644 (file)
@@ -145,7 +145,7 @@ alternative_if ARM64_HAS_PAN
 alternative_else_nop_endif
 
        .if     \el != 0
-       mrs     x21, ttbr0_el1
+       mrs     x21, ttbr1_el1
        tst     x21, #0xffff << 48              // Check for the reserved ASID
        orr     x23, x23, #PSR_PAN_BIT          // Set the emulated PAN in the saved SPSR
        b.eq    1f                              // TTBR0 access already disabled
@@ -213,7 +213,7 @@ alternative_else_nop_endif
        tbnz    x22, #22, 1f                    // Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set
        .endif
 
-       __uaccess_ttbr0_enable x0
+       __uaccess_ttbr0_enable x0, x1
 
        .if     \el == 0
        /*
index d7150e30438aef3bb3f29a8a41a73fbd4c5d4157..dd65ca253eb4e9b60ae3a244b59e00e5ddb79256 100644 (file)
@@ -30,7 +30,7 @@
  * Alignment fixed up by hardware.
  */
 ENTRY(__clear_user)
-       uaccess_enable_not_uao x2, x3
+       uaccess_enable_not_uao x2, x3, x4
        mov     x2, x1                  // save the size for fixup return
        subs    x1, x1, #8
        b.mi    2f
index 90154f3f7f2aed3acfd59612a6ba293b8504af93..1ff23f81e2426b5b17483bc6ba48b71a91395d46 100644 (file)
@@ -64,7 +64,7 @@
 
 end    .req    x5
 ENTRY(__arch_copy_from_user)
-       uaccess_enable_not_uao x3, x4
+       uaccess_enable_not_uao x3, x4, x5
        add     end, x0, x2
 #include "copy_template.S"
        uaccess_disable_not_uao x3
index 718b1c4e2f85a7aa44be720815ba1c9dc9909c4a..074d52fcd75ba16dfcd5c81e6386e661a6c30f8a 100644 (file)
@@ -65,7 +65,7 @@
 
 end    .req    x5
 ENTRY(__copy_in_user)
-       uaccess_enable_not_uao x3, x4
+       uaccess_enable_not_uao x3, x4, x5
        add     end, x0, x2
 #include "copy_template.S"
        uaccess_disable_not_uao x3
index e99e31c9acac81a26a762524471bec471735b541..67118444cde0ccfb6828a63df4bb9b65408ca666 100644 (file)
@@ -63,7 +63,7 @@
 
 end    .req    x5
 ENTRY(__arch_copy_to_user)
-       uaccess_enable_not_uao x3, x4
+       uaccess_enable_not_uao x3, x4, x5
        add     end, x0, x2
 #include "copy_template.S"
        uaccess_disable_not_uao x3
index 3be2cda5dbda0bd7ea4a22aa6c82d7266dc6b091..d7371925f9e2b493fcbde6068d48f329d853da16 100644 (file)
@@ -49,7 +49,7 @@ ENTRY(flush_icache_range)
  *     - end     - virtual end address of region
  */
 ENTRY(__flush_cache_user_range)
-       uaccess_ttbr0_enable x2, x3
+       uaccess_ttbr0_enable x2, x3, x4
        dcache_line_size x2, x3
        sub     x3, x2, #1
        bic     x4, x0, x3
index b96db5dafec4859f976e6831e9a4c118893abee1..27b38711023b5f45fc922b499235798bc536add4 100644 (file)
@@ -98,7 +98,7 @@ ENTRY(privcmd_call)
         * need the explicit uaccess_enable/disable if the TTBR0 PAN emulation
         * is enabled (it implies that hardware UAO and PAN disabled).
         */
-       uaccess_ttbr0_enable x6, x7
+       uaccess_ttbr0_enable x6, x7, x8
        hvc XEN_IMM
 
        /*