FROMLIST: [PATCH v2 1/3] arm64: compat: Split the sigreturn trampolines and kuser...
authorKevin Brodsky <kevin.brodsky@arm.com>
Wed, 23 Nov 2016 14:01:10 +0000 (14:01 +0000)
committerivanmeler <i_ivan@windowslive.com>
Wed, 13 Apr 2022 21:13:49 +0000 (21:13 +0000)
(cherry picked from url http://lkml.iu.edu/hypermail/linux/kernel/1709.1/01901.html)

AArch32 processes are currently installed a special [vectors] page that
contains the sigreturn trampolines and the kuser helpers, at the fixed
address mandated by the kuser helpers ABI.

Having both functionalities in the same page has become problematic,
because:

* It makes it impossible to disable the kuser helpers (the sigreturn
  trampolines cannot be removed), which is possible on arm.

* A future 32-bit vDSO would provide the sigreturn trampolines itself,
  making those in [vectors] redundant.

This patch addresses the problem by moving the sigreturn trampolines to
a separate [sigpage] page, mirroring [sigpage] on arm.

Even though [vectors] has always been a misnomer on arm64/compat, as
there is no AArch32 vector there (and now only the kuser helpers),
its name has been left unchanged, for compatibility with arm (there
are reports of software relying on [vectors] being there as the last
mapping in /proc/maps).

mm->context.vdso used to point to the [vectors] page, which is
unnecessary (as its address is fixed). It now points to the [sigpage]
page (whose address is randomized like a vDSO).

Signed-off-by: Kevin Brodsky <kevin.brodsky@arm.com>
Signed-off-by: Mark Salyzyn <salyzyn@android.com>
Bug: 9674955
Bug: 63737556
Bug: 20045882
Change-Id: I52a56ea71d7326df8c784f90eb73b5c324fe9d20

arch/arm64/include/asm/processor.h
arch/arm64/include/asm/signal32.h
arch/arm64/kernel/signal32.c
arch/arm64/kernel/vdso.c

index 1df92b9988db520bcf3dcfc8fa28f4b3d02af356..e4a3ec12ce16afd7adb2b7c07c336e770f29ec14 100644 (file)
@@ -40,9 +40,9 @@
 #ifdef __KERNEL__
 #define STACK_TOP_MAX          TASK_SIZE_64
 #ifdef CONFIG_COMPAT
-#define AARCH32_VECTORS_BASE   0xffff0000
+#define AARCH32_KUSER_HELPERS_BASE 0xffff0000
 #define STACK_TOP              (test_thread_flag(TIF_32BIT) ? \
-                               AARCH32_VECTORS_BASE : STACK_TOP_MAX)
+                               AARCH32_KUSER_HELPERS_BASE : STACK_TOP_MAX)
 #else
 #define STACK_TOP              STACK_TOP_MAX
 #endif /* CONFIG_COMPAT */
index 81abea0b7650867d86385fc0c4fc145fefd24c46..58e288aaf0bae3663ca0019945c1c7a79aabd730 100644 (file)
@@ -20,8 +20,6 @@
 #ifdef CONFIG_COMPAT
 #include <linux/compat.h>
 
-#define AARCH32_KERN_SIGRET_CODE_OFFSET        0x500
-
 int compat_setup_frame(int usig, struct ksignal *ksig, sigset_t *set,
                       struct pt_regs *regs);
 int compat_setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set,
index 107335637390ef3e15546ca442fc9fd8117a2cfb..074950a11fae599162f238382eb276a5e85e9b83 100644 (file)
@@ -484,14 +484,13 @@ static void compat_setup_return(struct pt_regs *regs, struct k_sigaction *ka,
                retcode = ptr_to_compat(ka->sa.sa_restorer);
        } else {
                /* Set up sigreturn pointer */
+               void *sigreturn_base = current->mm->context.vdso;
                unsigned int idx = thumb << 1;
 
                if (ka->sa.sa_flags & SA_SIGINFO)
                        idx += 3;
 
-               retcode = AARCH32_VECTORS_BASE +
-                         AARCH32_KERN_SIGRET_CODE_OFFSET +
-                         (idx << 2) + thumb;
+               retcode = ptr_to_compat(sigreturn_base) + (idx << 2) + thumb;
        }
 
        regs->regs[0]   = usig;
index 7e9dd94452bb23fcafbc2a734f04893cb047ded9..4ff2595db741c4470324babe8778631536241356 100644 (file)
@@ -1,5 +1,7 @@
 /*
- * VDSO implementation for AArch64 and vector page setup for AArch32.
+ * Additional userspace pages setup for AArch64 and AArch32.
+ *  - AArch64: vDSO pages setup, vDSO data page update.
+ *  - AArch32: sigreturn and kuser helpers pages setup.
  *
  * Copyright (C) 2012 ARM Limited
  *
@@ -53,32 +55,51 @@ struct vdso_data *vdso_data = &vdso_data_store.data;
 /*
  * Create and map the vectors page for AArch32 tasks.
  */
-static struct page *vectors_page[1];
+static struct page *vectors_page[] __ro_after_init;
+static const struct vm_special_mapping compat_vdso_spec[] = {
+       {
+               /* Must be named [sigpage] for compatibility with arm. */
+               .name   = "[sigpage]",
+               .pages  = &vectors_page[0],
+       },
+       {
+               .name   = "[kuserhelpers]",
+               .pages  = &vectors_page[1],
+       },
+};
+static struct page *vectors_page[ARRAY_SIZE(compat_vdso_spec)] __ro_after_init;
 
 static int __init alloc_vectors_page(void)
 {
        extern char __kuser_helper_start[], __kuser_helper_end[];
-       extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[];
-
-       int kuser_sz = __kuser_helper_end - __kuser_helper_start;
-       int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start;
-       unsigned long vpage;
+       size_t kuser_sz = __kuser_helper_end - __kuser_helper_start;
+       unsigned long kuser_vpage;
 
-       vpage = get_zeroed_page(GFP_ATOMIC);
+       extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[];
+       size_t sigret_sz =
+               __aarch32_sigret_code_end - __aarch32_sigret_code_start;
+       unsigned long sigret_vpage;
 
-       if (!vpage)
+       sigret_vpage = get_zeroed_page(GFP_ATOMIC);
+       if (!sigret_vpage)
                return -ENOMEM;
 
-       /* kuser helpers */
-       memcpy((void *)vpage + 0x1000 - kuser_sz, __kuser_helper_start,
-               kuser_sz);
+       kuser_vpage = get_zeroed_page(GFP_ATOMIC);
+       if (!kuser_vpage) {
+               free_page(sigret_vpage);
+               return -ENOMEM;
+       }
 
        /* sigreturn code */
-       memcpy((void *)vpage + AARCH32_KERN_SIGRET_CODE_OFFSET,
-               __aarch32_sigret_code_start, sigret_sz);
+       memcpy((void *)sigret_vpage, __aarch32_sigret_code_start, sigret_sz);
+       flush_icache_range(sigret_vpage, sigret_vpage + PAGE_SIZE);
+       vectors_page[0] = virt_to_page(sigret_vpage);
 
-       flush_icache_range(vpage, vpage + PAGE_SIZE);
-       vectors_page[0] = virt_to_page(vpage);
+       /* kuser helpers */
+       memcpy((void *)kuser_vpage + 0x1000 - kuser_sz, __kuser_helper_start,
+               kuser_sz);
+       flush_icache_range(kuser_vpage, kuser_vpage + PAGE_SIZE);
+       vectors_page[1] = virt_to_page(kuser_vpage);
 
        return 0;
 }
@@ -87,22 +108,31 @@ arch_initcall(alloc_vectors_page);
 int aarch32_setup_vectors_page(struct linux_binprm *bprm, int uses_interp)
 {
        struct mm_struct *mm = current->mm;
-       unsigned long addr = AARCH32_VECTORS_BASE;
-       static const struct vm_special_mapping spec = {
-               .name   = "[vectors]",
-               .pages  = vectors_page,
-
-       };
+       unsigned long addr;
        void *ret;
 
        down_write(&mm->mmap_sem);
-       current->mm->context.vdso = (void *)addr;
+       addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
+       if (IS_ERR_VALUE(addr)) {
+               ret = ERR_PTR(addr);
+               goto out;
+       }
 
-       /* Map vectors page at the high address. */
        ret = _install_special_mapping(mm, addr, PAGE_SIZE,
-                                      VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC,
-                                      &spec);
+                                      VM_READ|VM_EXEC|
+                                      VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
+                                      &compat_vdso_spec[0]);
+       if (IS_ERR(ret))
+               goto out;
 
+       current->mm->context.vdso = (void *)addr;
+
+       /* Map the kuser helpers at the ABI-defined high address. */
+       ret = _install_special_mapping(mm, AARCH32_KUSER_HELPERS_BASE,
+                                      PAGE_SIZE,
+                                      VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC,
+                                      &compat_vdso_spec[1]);
+out:
        up_write(&mm->mmap_sem);
 
        return PTR_ERR_OR_ZERO(ret);