/*
- * VDSO implementation for AArch64 and vector page setup for AArch32.
+ * Additional userspace pages setup for AArch64 and AArch32.
+ * - AArch64: vDSO pages setup, vDSO data page update.
+ * - AArch32: sigreturn and kuser helpers pages setup.
*
* Copyright (C) 2012 ARM Limited
*
#include <asm/vdso.h>
#include <asm/vdso_datapage.h>
-extern char vdso_start, vdso_end;
-static unsigned long vdso_pages;
-static struct page **vdso_pagelist;
+struct vdso_mappings {
+ unsigned long num_code_pages;
+ struct vm_special_mapping data_mapping;
+ struct vm_special_mapping code_mapping;
+};
/*
* The vDSO data page.
/*
* Create and map the vectors page for AArch32 tasks.
*/
-static struct page *vectors_page[1];
+#if !defined(CONFIG_VDSO32) || defined(CONFIG_KUSER_HELPERS)
+static struct page *vectors_page[] __ro_after_init;
+static const struct vm_special_mapping compat_vdso_spec[] = {
+ {
+ /* Must be named [sigpage] for compatibility with arm. */
+ .name = "[sigpage]",
+ .pages = &vectors_page[0],
+ },
+#ifdef CONFIG_KUSER_HELPERS
+ {
+ .name = "[kuserhelpers]",
+ .pages = &vectors_page[1],
+ },
+#endif
+};
+static struct page *vectors_page[ARRAY_SIZE(compat_vdso_spec)] __ro_after_init;
+#endif
static int __init alloc_vectors_page(void)
{
+#ifdef CONFIG_KUSER_HELPERS
extern char __kuser_helper_start[], __kuser_helper_end[];
- extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[];
-
- int kuser_sz = __kuser_helper_end - __kuser_helper_start;
- int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start;
- unsigned long vpage;
+ size_t kuser_sz = __kuser_helper_end - __kuser_helper_start;
+ unsigned long kuser_vpage;
+#endif
- vpage = get_zeroed_page(GFP_ATOMIC);
+#ifndef CONFIG_VDSO32
+ extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[];
+ size_t sigret_sz =
+ __aarch32_sigret_code_end - __aarch32_sigret_code_start;
+ unsigned long sigret_vpage;
- if (!vpage)
+ sigret_vpage = get_zeroed_page(GFP_ATOMIC);
+ if (!sigret_vpage)
return -ENOMEM;
+#endif
+
+#ifdef CONFIG_KUSER_HELPERS
+ kuser_vpage = get_zeroed_page(GFP_ATOMIC);
+ if (!kuser_vpage) {
+#ifndef CONFIG_VDSO32
+ free_page(sigret_vpage);
+#endif
+ return -ENOMEM;
+ }
+#endif
- /* kuser helpers */
- memcpy((void *)vpage + 0x1000 - kuser_sz, __kuser_helper_start,
- kuser_sz);
-
+#ifndef CONFIG_VDSO32
/* sigreturn code */
- memcpy((void *)vpage + AARCH32_KERN_SIGRET_CODE_OFFSET,
- __aarch32_sigret_code_start, sigret_sz);
+ memcpy((void *)sigret_vpage, __aarch32_sigret_code_start, sigret_sz);
+ flush_icache_range(sigret_vpage, sigret_vpage + PAGE_SIZE);
+ vectors_page[0] = virt_to_page(sigret_vpage);
+#endif
- flush_icache_range(vpage, vpage + PAGE_SIZE);
- vectors_page[0] = virt_to_page(vpage);
+#ifdef CONFIG_KUSER_HELPERS
+ /* kuser helpers */
+ memcpy((void *)kuser_vpage + 0x1000 - kuser_sz, __kuser_helper_start,
+ kuser_sz);
+ flush_icache_range(kuser_vpage, kuser_vpage + PAGE_SIZE);
+ vectors_page[1] = virt_to_page(kuser_vpage);
+#endif
return 0;
}
arch_initcall(alloc_vectors_page);
+#ifndef CONFIG_VDSO32
int aarch32_setup_vectors_page(struct linux_binprm *bprm, int uses_interp)
{
struct mm_struct *mm = current->mm;
- unsigned long addr = AARCH32_VECTORS_BASE;
- static const struct vm_special_mapping spec = {
- .name = "[vectors]",
- .pages = vectors_page,
-
- };
+ unsigned long addr;
void *ret;
down_write(&mm->mmap_sem);
- current->mm->context.vdso = (void *)addr;
+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
+ if (IS_ERR_VALUE(addr)) {
+ ret = ERR_PTR(addr);
+ goto out;
+ }
- /* Map vectors page at the high address. */
ret = _install_special_mapping(mm, addr, PAGE_SIZE,
- VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC,
- &spec);
+ VM_READ|VM_EXEC|
+ VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
+ &compat_vdso_spec[0]);
+ if (IS_ERR(ret))
+ goto out;
+
+ current->mm->context.vdso = (void *)addr;
+#ifdef CONFIG_KUSER_HELPERS
+ /* Map the kuser helpers at the ABI-defined high address. */
+ ret = _install_special_mapping(mm, AARCH32_KUSER_HELPERS_BASE,
+ PAGE_SIZE,
+ VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC,
+ &compat_vdso_spec[1]);
+#endif
+out:
up_write(&mm->mmap_sem);
return PTR_ERR_OR_ZERO(ret);
}
+#endif /* !CONFIG_VDSO32 */
#endif /* CONFIG_COMPAT */
-static struct vm_special_mapping vdso_spec[2];
-
-static int __init vdso_init(void)
+static int __init vdso_mappings_init(const char *name,
+ const char *code_start,
+ const char *code_end,
+ struct vdso_mappings *mappings)
{
- int i;
+ unsigned long i, vdso_pages;
+ struct page **vdso_pagelist;
unsigned long pfn;
- if (memcmp(&vdso_start, "\177ELF", 4)) {
- pr_err("vDSO is not a valid ELF object!\n");
+ if (memcmp(code_start, "\177ELF", 4)) {
+ pr_err("%s is not a valid ELF object!\n", name);
return -EINVAL;
}
- vdso_pages = (&vdso_end - &vdso_start) >> PAGE_SHIFT;
- pr_info("vdso: %ld pages (%ld code @ %p, %ld data @ %p)\n",
- vdso_pages + 1, vdso_pages, &vdso_start, 1L, vdso_data);
-
- /* Allocate the vDSO pagelist, plus a page for the data. */
- vdso_pagelist = kcalloc(vdso_pages + 1, sizeof(struct page *),
- GFP_KERNEL);
+ vdso_pages = (code_end - code_start) >> PAGE_SHIFT;
+ pr_info("%s: %ld pages (%ld code @ %p, %ld data @ %p)\n",
+ name, vdso_pages + 1, vdso_pages, code_start, 1L,
+ vdso_data);
+
+ /*
+ * Allocate space for storing pointers to the vDSO code pages + the
+ * data page. The pointers must have the same lifetime as the mappings,
+ * which are static, so there is no need to keep track of the pointer
+ * array to free it.
+ */
+ vdso_pagelist = kmalloc_array(vdso_pages + 1, sizeof(struct page *),
+ GFP_KERNEL);
if (vdso_pagelist == NULL)
return -ENOMEM;
/* Grab the vDSO data page. */
vdso_pagelist[0] = phys_to_page(__pa_symbol(vdso_data));
-
/* Grab the vDSO code pages. */
- pfn = sym_to_pfn(&vdso_start);
+ pfn = sym_to_pfn(code_start);
for (i = 0; i < vdso_pages; i++)
vdso_pagelist[i + 1] = pfn_to_page(pfn + i);
/* Populate the special mapping structures */
- vdso_spec[0] = (struct vm_special_mapping) {
+ mappings->data_mapping = (struct vm_special_mapping) {
.name = "[vvar]",
- .pages = vdso_pagelist,
+ .pages = &vdso_pagelist[0],
};
- vdso_spec[1] = (struct vm_special_mapping) {
+ mappings->code_mapping = (struct vm_special_mapping) {
.name = "[vdso]",
.pages = &vdso_pagelist[1],
};
+ mappings->num_code_pages = vdso_pages;
return 0;
}
+
+#ifdef CONFIG_COMPAT
+#ifdef CONFIG_VDSO32
+
+static struct vdso_mappings vdso32_mappings __ro_after_init;
+
+static int __init vdso32_init(void)
+{
+ extern char vdso32_start[], vdso32_end[];
+
+ return vdso_mappings_init("vdso32", vdso32_start, vdso32_end,
+ &vdso32_mappings);
+}
+arch_initcall(vdso32_init);
+
+#endif /* CONFIG_VDSO32 */
+#endif /* CONFIG_COMPAT */
+
+static struct vdso_mappings vdso_mappings __ro_after_init;
+
+static int __init vdso_init(void)
+{
+ extern char vdso_start[], vdso_end[];
+
+ return vdso_mappings_init("vdso", vdso_start, vdso_end,
+ &vdso_mappings);
+}
+
arch_initcall(vdso_init);
-int arch_setup_additional_pages(struct linux_binprm *bprm,
- int uses_interp)
+static int vdso_setup(struct mm_struct *mm,
+ const struct vdso_mappings *mappings)
{
- struct mm_struct *mm = current->mm;
unsigned long vdso_base, vdso_text_len, vdso_mapping_len;
void *ret;
- vdso_text_len = vdso_pages << PAGE_SHIFT;
+ vdso_text_len = mappings->num_code_pages << PAGE_SHIFT;
/* Be sure to map the data page */
vdso_mapping_len = vdso_text_len + PAGE_SIZE;
- down_write(&mm->mmap_sem);
vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0);
- if (IS_ERR_VALUE(vdso_base)) {
- ret = ERR_PTR(vdso_base);
- goto up_fail;
- }
+ if (IS_ERR_VALUE(vdso_base))
+ return PTR_ERR_OR_ZERO(ERR_PTR(vdso_base));
ret = _install_special_mapping(mm, vdso_base, PAGE_SIZE,
VM_READ|VM_MAYREAD,
- &vdso_spec[0]);
+ &mappings->data_mapping);
if (IS_ERR(ret))
- goto up_fail;
+ return PTR_ERR_OR_ZERO(ret);
vdso_base += PAGE_SIZE;
- mm->context.vdso = (void *)vdso_base;
ret = _install_special_mapping(mm, vdso_base, vdso_text_len,
VM_READ|VM_EXEC|
VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
- &vdso_spec[1]);
- if (IS_ERR(ret))
- goto up_fail;
+ &mappings->code_mapping);
+ if (!IS_ERR(ret))
+ mm->context.vdso = (void *)vdso_base;
+ return PTR_ERR_OR_ZERO(ret);
+}
+
+#ifdef CONFIG_COMPAT
+#ifdef CONFIG_VDSO32
+int aarch32_setup_vectors_page(struct linux_binprm *bprm, int uses_interp)
+{
+ struct mm_struct *mm = current->mm;
+ void *ret;
+ down_write(&mm->mmap_sem);
+
+ ret = ERR_PTR(vdso_setup(mm, &vdso32_mappings));
+#ifdef CONFIG_KUSER_HELPERS
+ if (!IS_ERR(ret))
+ /* Map the kuser helpers at the ABI-defined high address. */
+ ret = _install_special_mapping(mm, AARCH32_KUSER_HELPERS_BASE,
+ PAGE_SIZE,
+ VM_READ|VM_EXEC|
+ VM_MAYREAD|VM_MAYEXEC,
+ &compat_vdso_spec[1]);
+#endif
up_write(&mm->mmap_sem);
- return 0;
-up_fail:
- mm->context.vdso = NULL;
+ return PTR_ERR_OR_ZERO(ret);
+}
+#endif /* CONFIG_VDSO32 */
+#endif /* CONFIG_COMPAT */
+
+int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+{
+ struct mm_struct *mm = current->mm;
+ int ret;
+
+ down_write(&mm->mmap_sem);
+ ret = vdso_setup(mm, &vdso_mappings);
up_write(&mm->mmap_sem);
- return PTR_ERR(ret);
+ return ret;
}
/*
vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec;
if (!use_syscall) {
+ struct timespec btm = ktime_to_timespec(tk->offs_boot);
+
/* tkr_mono.cycle_last == tkr_raw.cycle_last */
vdso_data->cs_cycle_last = tk->tkr_mono.cycle_last;
vdso_data->raw_time_sec = tk->raw_sec;
vdso_data->raw_time_nsec = tk->tkr_raw.xtime_nsec;
vdso_data->xtime_clock_sec = tk->xtime_sec;
- vdso_data->xtime_clock_nsec = tk->tkr_mono.xtime_nsec;
+ vdso_data->xtime_clock_snsec = tk->tkr_mono.xtime_nsec;
/* tkr_raw.xtime_nsec == 0 */
vdso_data->cs_mono_mult = tk->tkr_mono.mult;
vdso_data->cs_raw_mult = tk->tkr_raw.mult;
/* tkr_mono.shift == tkr_raw.shift */
vdso_data->cs_shift = tk->tkr_mono.shift;
+ vdso_data->btm_sec = btm.tv_sec;
+ vdso_data->btm_nsec = btm.tv_nsec;
}
smp_wmb();