/*
- * VDSO implementation for AArch64 and vector page setup for AArch32.
+ * Additional userspace pages setup for AArch64 and AArch32.
+ * - AArch64: vDSO pages setup, vDSO data page update.
+ * - AArch32: sigreturn and kuser helpers pages setup.
*
* Copyright (C) 2012 ARM Limited
*
/*
* Create and map the vectors page for AArch32 tasks.
*/
-static struct page *vectors_page[1] __ro_after_init;
+static struct page *vectors_page[] __ro_after_init;
+static const struct vm_special_mapping compat_vdso_spec[] = {
+ {
+ /* Must be named [sigpage] for compatibility with arm. */
+ .name = "[sigpage]",
+ .pages = &vectors_page[0],
+ },
+ {
+ .name = "[kuserhelpers]",
+ .pages = &vectors_page[1],
+ },
+};
+static struct page *vectors_page[ARRAY_SIZE(compat_vdso_spec)] __ro_after_init;
static int __init alloc_vectors_page(void)
{
extern char __kuser_helper_start[], __kuser_helper_end[];
- extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[];
+ size_t kuser_sz = __kuser_helper_end - __kuser_helper_start;
+ unsigned long kuser_vpage;
- int kuser_sz = __kuser_helper_end - __kuser_helper_start;
- int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start;
- unsigned long vpage;
-
- vpage = get_zeroed_page(GFP_ATOMIC);
+ extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[];
+ size_t sigret_sz =
+ __aarch32_sigret_code_end - __aarch32_sigret_code_start;
+ unsigned long sigret_vpage;
- if (!vpage)
+ sigret_vpage = get_zeroed_page(GFP_ATOMIC);
+ if (!sigret_vpage)
return -ENOMEM;
- /* kuser helpers */
- memcpy((void *)vpage + 0x1000 - kuser_sz, __kuser_helper_start,
- kuser_sz);
+ kuser_vpage = get_zeroed_page(GFP_ATOMIC);
+ if (!kuser_vpage) {
+ free_page(sigret_vpage);
+ return -ENOMEM;
+ }
/* sigreturn code */
- memcpy((void *)vpage + AARCH32_KERN_SIGRET_CODE_OFFSET,
- __aarch32_sigret_code_start, sigret_sz);
+ memcpy((void *)sigret_vpage, __aarch32_sigret_code_start, sigret_sz);
+ flush_icache_range(sigret_vpage, sigret_vpage + PAGE_SIZE);
+ vectors_page[0] = virt_to_page(sigret_vpage);
- flush_icache_range(vpage, vpage + PAGE_SIZE);
- vectors_page[0] = virt_to_page(vpage);
+ /* kuser helpers */
+ memcpy((void *)kuser_vpage + 0x1000 - kuser_sz, __kuser_helper_start,
+ kuser_sz);
+ flush_icache_range(kuser_vpage, kuser_vpage + PAGE_SIZE);
+ vectors_page[1] = virt_to_page(kuser_vpage);
return 0;
}
int aarch32_setup_vectors_page(struct linux_binprm *bprm, int uses_interp)
{
struct mm_struct *mm = current->mm;
- unsigned long addr = AARCH32_VECTORS_BASE;
- static const struct vm_special_mapping spec = {
- .name = "[vectors]",
- .pages = vectors_page,
-
- };
+ unsigned long addr;
void *ret;
if (down_write_killable(&mm->mmap_sem))
return -EINTR;
- current->mm->context.vdso = (void *)addr;
+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
+ if (IS_ERR_VALUE(addr)) {
+ ret = ERR_PTR(addr);
+ goto out;
+ }
- /* Map vectors page at the high address. */
ret = _install_special_mapping(mm, addr, PAGE_SIZE,
- VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC,
- &spec);
+ VM_READ|VM_EXEC|
+ VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
+ &compat_vdso_spec[0]);
+ if (IS_ERR(ret))
+ goto out;
+ current->mm->context.vdso = (void *)addr;
+
+ /* Map the kuser helpers at the ABI-defined high address. */
+ ret = _install_special_mapping(mm, AARCH32_KUSER_HELPERS_BASE,
+ PAGE_SIZE,
+ VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC,
+ &compat_vdso_spec[1]);
+out:
up_write(&mm->mmap_sem);
return PTR_ERR_OR_ZERO(ret);