arm64: Introduce uaccess_{disable,enable} functionality based on TTBR0_EL1
authorCatalin Marinas <catalin.marinas@arm.com>
Fri, 1 Jul 2016 15:53:00 +0000 (16:53 +0100)
committerCatalin Marinas <catalin.marinas@arm.com>
Mon, 21 Nov 2016 18:48:53 +0000 (18:48 +0000)
This patch adds the uaccess macros/functions to disable access to user
space by setting TTBR0_EL1 to a reserved zeroed page. Since the value
written to TTBR0_EL1 must be a physical address, for simplicity this
patch introduces a reserved_ttbr0 page at a constant offset from
swapper_pg_dir. The uaccess_disable code uses the ttbr1_el1 value
adjusted by the reserved_ttbr0 offset.

Enabling access to user is done by restoring TTBR0_EL1 with the value
from the struct thread_info ttbr0 variable. Interrupts must be disabled
during the uaccess_ttbr0_enable code to ensure the atomicity of the
thread_info.ttbr0 read and TTBR0_EL1 write. This patch also moves the
get_thread_info asm macro from entry.S to assembler.h for reuse in the
uaccess_ttbr0_* macros.

Cc: Will Deacon <will.deacon@arm.com>
Cc: James Morse <james.morse@arm.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
arch/arm64/include/asm/assembler.h
arch/arm64/include/asm/cpufeature.h
arch/arm64/include/asm/kernel-pgtable.h
arch/arm64/include/asm/thread_info.h
arch/arm64/include/asm/uaccess.h
arch/arm64/kernel/asm-offsets.c
arch/arm64/kernel/cpufeature.c
arch/arm64/kernel/entry.S
arch/arm64/kernel/head.S
arch/arm64/kernel/vmlinux.lds.S

index 55752231055c036ac6f2428e8143275e80501685..446f6c46d4b17b352ef695665409c6863d470706 100644 (file)
        msr     daifclr, #2
        .endm
 
+       .macro  save_and_disable_irq, flags
+       mrs     \flags, daif
+       msr     daifset, #2
+       .endm
+
+       .macro  restore_irq, flags
+       msr     daif, \flags
+       .endm
+
 /*
  * Enable and disable debug exceptions.
  */
@@ -406,6 +415,13 @@ alternative_endif
        movk    \reg, :abs_g0_nc:\val
        .endm
 
+/*
+ * Return the current thread_info.
+ */
+       .macro  get_thread_info, rd
+       mrs     \rd, sp_el0
+       .endm
+
 /*
  * Errata workaround post TTBR0_EL1 update.
  */
index 0ef718b67c54bd031c657a7605e6c1edd51da805..a081531f9ff46aa94ec7e98ed89b631e80c16442 100644 (file)
@@ -241,6 +241,12 @@ static inline bool system_supports_fpsimd(void)
        return !cpus_have_const_cap(ARM64_HAS_NO_FPSIMD);
 }
 
+static inline bool system_uses_ttbr0_pan(void)
+{
+       return IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN) &&
+               !cpus_have_cap(ARM64_HAS_PAN);
+}
+
 #endif /* __ASSEMBLY__ */
 
 #endif
index 7e51d1b57c0c56461a9be0fb825fa84a9faeea8b..7803343e5881fbd7b2f635b25082d3e91d2583f8 100644 (file)
@@ -19,6 +19,7 @@
 #ifndef __ASM_KERNEL_PGTABLE_H
 #define __ASM_KERNEL_PGTABLE_H
 
+#include <asm/pgtable.h>
 #include <asm/sparsemem.h>
 
 /*
 #define SWAPPER_DIR_SIZE       (SWAPPER_PGTABLE_LEVELS * PAGE_SIZE)
 #define IDMAP_DIR_SIZE         (IDMAP_PGTABLE_LEVELS * PAGE_SIZE)
 
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+#define RESERVED_TTBR0_SIZE    (PAGE_SIZE)
+#else
+#define RESERVED_TTBR0_SIZE    (0)
+#endif
+
 /* Initial memory map size */
 #if ARM64_SWAPPER_USES_SECTION_MAPS
 #define SWAPPER_BLOCK_SHIFT    SECTION_SHIFT
index c17ad4d213d05d57b56495389d2e088c27334906..46c3b93cf865b985df8b67114d09645bb0f2c3b6 100644 (file)
@@ -47,6 +47,9 @@ typedef unsigned long mm_segment_t;
 struct thread_info {
        unsigned long           flags;          /* low level flags */
        mm_segment_t            addr_limit;     /* address limit */
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+       u64                     ttbr0;          /* saved TTBR0_EL1 */
+#endif
        int                     preempt_count;  /* 0 => preemptable, <0 => bug */
 };
 
index 154659509afb98123b220159c2aeca04cd2eba35..6986f56cfa8807cab1175ba82792d96fd1c674df 100644 (file)
@@ -19,6 +19,7 @@
 #define __ASM_UACCESS_H
 
 #include <asm/alternative.h>
+#include <asm/kernel-pgtable.h>
 #include <asm/sysreg.h>
 
 #ifndef __ASSEMBLY__
@@ -125,16 +126,71 @@ static inline void set_fs(mm_segment_t fs)
 /*
  * User access enabling/disabling.
  */
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+static inline void __uaccess_ttbr0_disable(void)
+{
+       unsigned long ttbr;
+
+       /* reserved_ttbr0 placed at the end of swapper_pg_dir */
+       ttbr = read_sysreg(ttbr1_el1) + SWAPPER_DIR_SIZE;
+       write_sysreg(ttbr, ttbr0_el1);
+       isb();
+}
+
+static inline void __uaccess_ttbr0_enable(void)
+{
+       unsigned long flags;
+
+       /*
+        * Disable interrupts to avoid preemption between reading the 'ttbr0'
+        * variable and the MSR. A context switch could trigger an ASID
+        * roll-over and an update of 'ttbr0'.
+        */
+       local_irq_save(flags);
+       write_sysreg(current_thread_info()->ttbr0, ttbr0_el1);
+       isb();
+       local_irq_restore(flags);
+}
+
+static inline bool uaccess_ttbr0_disable(void)
+{
+       if (!system_uses_ttbr0_pan())
+               return false;
+       __uaccess_ttbr0_disable();
+       return true;
+}
+
+static inline bool uaccess_ttbr0_enable(void)
+{
+       if (!system_uses_ttbr0_pan())
+               return false;
+       __uaccess_ttbr0_enable();
+       return true;
+}
+#else
+static inline bool uaccess_ttbr0_disable(void)
+{
+       return false;
+}
+
+static inline bool uaccess_ttbr0_enable(void)
+{
+       return false;
+}
+#endif
+
 #define __uaccess_disable(alt)                                         \
 do {                                                                   \
-       asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), alt,                  \
-                       CONFIG_ARM64_PAN));                             \
+       if (!uaccess_ttbr0_disable())                                   \
+               asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), alt,          \
+                               CONFIG_ARM64_PAN));                     \
 } while (0)
 
 #define __uaccess_enable(alt)                                          \
 do {                                                                   \
-       asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), alt,                  \
-                       CONFIG_ARM64_PAN));                             \
+       if (uaccess_ttbr0_enable())                                     \
+               asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), alt,          \
+                               CONFIG_ARM64_PAN));                     \
 } while (0)
 
 static inline void uaccess_disable(void)
@@ -373,16 +429,56 @@ extern __must_check long strnlen_user(const char __user *str, long n);
 #include <asm/assembler.h>
 
 /*
- * User access enabling/disabling macros. These are no-ops when UAO is
- * present.
+ * User access enabling/disabling macros.
+ */
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+       .macro  __uaccess_ttbr0_disable, tmp1
+       mrs     \tmp1, ttbr1_el1                // swapper_pg_dir
+       add     \tmp1, \tmp1, #SWAPPER_DIR_SIZE // reserved_ttbr0 at the end of swapper_pg_dir
+       msr     ttbr0_el1, \tmp1                // set reserved TTBR0_EL1
+       isb
+       .endm
+
+       .macro  __uaccess_ttbr0_enable, tmp1
+       get_thread_info \tmp1
+       ldr     \tmp1, [\tmp1, #TSK_TI_TTBR0]   // load saved TTBR0_EL1
+       msr     ttbr0_el1, \tmp1                // set the non-PAN TTBR0_EL1
+       isb
+       .endm
+
+       .macro  uaccess_ttbr0_disable, tmp1
+alternative_if_not ARM64_HAS_PAN
+       __uaccess_ttbr0_disable \tmp1
+alternative_else_nop_endif
+       .endm
+
+       .macro  uaccess_ttbr0_enable, tmp1, tmp2
+alternative_if_not ARM64_HAS_PAN
+       save_and_disable_irq \tmp2              // avoid preemption
+       __uaccess_ttbr0_enable \tmp1
+       restore_irq \tmp2
+alternative_else_nop_endif
+       .endm
+#else
+       .macro  uaccess_ttbr0_disable, tmp1
+       .endm
+
+       .macro  uaccess_ttbr0_enable, tmp1, tmp2
+       .endm
+#endif
+
+/*
+ * These macros are no-ops when UAO is present.
  */
        .macro  uaccess_disable_not_uao, tmp1
+       uaccess_ttbr0_disable \tmp1
 alternative_if ARM64_ALT_PAN_NOT_UAO
        SET_PSTATE_PAN(1)
 alternative_else_nop_endif
        .endm
 
        .macro  uaccess_enable_not_uao, tmp1, tmp2
+       uaccess_ttbr0_enable \tmp1, \tmp2
 alternative_if ARM64_ALT_PAN_NOT_UAO
        SET_PSTATE_PAN(0)
 alternative_else_nop_endif
index c2dc9fa4f09bef502f2582411e96cd99b5f48485..bc049afc73a782c73f97fe48a42dbcb88ccd9126 100644 (file)
@@ -39,6 +39,9 @@ int main(void)
   DEFINE(TSK_TI_FLAGS,         offsetof(struct task_struct, thread_info.flags));
   DEFINE(TSK_TI_PREEMPT,       offsetof(struct task_struct, thread_info.preempt_count));
   DEFINE(TSK_TI_ADDR_LIMIT,    offsetof(struct task_struct, thread_info.addr_limit));
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+  DEFINE(TSK_TI_TTBR0,         offsetof(struct task_struct, thread_info.ttbr0));
+#endif
   DEFINE(TSK_STACK,            offsetof(struct task_struct, stack));
   BLANK();
   DEFINE(THREAD_CPU_CONTEXT,   offsetof(struct task_struct, thread.cpu_context));
index f89385d794f6dfd6ac9164b405cd1c701147b146..fdf8f045929fcdaa3811518f74853dd235b1f79e 100644 (file)
@@ -47,6 +47,7 @@ unsigned int compat_elf_hwcap2 __read_mostly;
 #endif
 
 DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
+EXPORT_SYMBOL(cpu_hwcaps);
 
 DEFINE_STATIC_KEY_ARRAY_FALSE(cpu_hwcap_keys, ARM64_NCAPS);
 EXPORT_SYMBOL(cpu_hwcap_keys);
index 6349a8324b4f6be4c4599c84854e71074a972f0d..b7db3766a312a1d6eb9f6c72033323827b31610d 100644 (file)
@@ -183,10 +183,6 @@ alternative_else_nop_endif
        eret                                    // return to kernel
        .endm
 
-       .macro  get_thread_info, rd
-       mrs     \rd, sp_el0
-       .endm
-
        .macro  irq_stack_entry
        mov     x19, sp                 // preserve the original sp
 
index eaafb253bbfa58ed2fff02d636dcaed8f9c41f24..7ee6d74101cfb289a8ce5d5c1ab44206b779dcd2 100644 (file)
@@ -326,14 +326,14 @@ __create_page_tables:
         * dirty cache lines being evicted.
         */
        adrp    x0, idmap_pg_dir
-       adrp    x1, swapper_pg_dir + SWAPPER_DIR_SIZE
+       adrp    x1, swapper_pg_dir + SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE
        bl      __inval_cache_range
 
        /*
         * Clear the idmap and swapper page tables.
         */
        adrp    x0, idmap_pg_dir
-       adrp    x6, swapper_pg_dir + SWAPPER_DIR_SIZE
+       adrp    x6, swapper_pg_dir + SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE
 1:     stp     xzr, xzr, [x0], #16
        stp     xzr, xzr, [x0], #16
        stp     xzr, xzr, [x0], #16
@@ -412,7 +412,7 @@ __create_page_tables:
         * tables again to remove any speculatively loaded cache lines.
         */
        adrp    x0, idmap_pg_dir
-       adrp    x1, swapper_pg_dir + SWAPPER_DIR_SIZE
+       adrp    x1, swapper_pg_dir + SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE
        dmb     sy
        bl      __inval_cache_range
 
index 1105aab1e6d6af4be3f88c0ee22d4cfafd5c5ce6..b8deffa9e1bf3ec06d9411afb7849695df27d6d2 100644 (file)
@@ -216,6 +216,11 @@ SECTIONS
        swapper_pg_dir = .;
        . += SWAPPER_DIR_SIZE;
 
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+       reserved_ttbr0 = .;
+       . += RESERVED_TTBR0_SIZE;
+#endif
+
        _end = .;
 
        STABS_DEBUG