2 * Additional userspace pages setup for AArch64 and AArch32.
3 * - AArch64: vDSO pages setup, vDSO data page update.
4 * - AArch32: sigreturn and kuser helpers pages setup.
6 * Copyright (C) 2012 ARM Limited
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 * Author: Will Deacon <will.deacon@arm.com>
23 #include <linux/kernel.h>
24 #include <linux/clocksource.h>
25 #include <linux/elf.h>
26 #include <linux/err.h>
27 #include <linux/errno.h>
28 #include <linux/gfp.h>
30 #include <linux/sched.h>
31 #include <linux/signal.h>
32 #include <linux/slab.h>
33 #include <linux/timekeeper_internal.h>
34 #include <linux/vmalloc.h>
36 #include <asm/cacheflush.h>
37 #include <asm/signal32.h>
39 #include <asm/vdso_datapage.h>
41 extern char vdso_start
, vdso_end
;
42 static unsigned long vdso_pages
;
43 static struct page
**vdso_pagelist
;
49 struct vdso_data data
;
51 } vdso_data_store __page_aligned_data
;
52 struct vdso_data
*vdso_data
= &vdso_data_store
.data
;
56 * Create and map the vectors page for AArch32 tasks.
58 static struct page
*vectors_page
[] __ro_after_init
;
59 static const struct vm_special_mapping compat_vdso_spec
[] = {
61 /* Must be named [sigpage] for compatibility with arm. */
63 .pages
= &vectors_page
[0],
65 #ifdef CONFIG_KUSER_HELPERS
67 .name
= "[kuserhelpers]",
68 .pages
= &vectors_page
[1],
72 static struct page
*vectors_page
[ARRAY_SIZE(compat_vdso_spec
)] __ro_after_init
;
74 static int __init
alloc_vectors_page(void)
76 #ifdef CONFIG_KUSER_HELPERS
77 extern char __kuser_helper_start
[], __kuser_helper_end
[];
78 size_t kuser_sz
= __kuser_helper_end
- __kuser_helper_start
;
79 unsigned long kuser_vpage
;
82 extern char __aarch32_sigret_code_start
[], __aarch32_sigret_code_end
[];
84 __aarch32_sigret_code_end
- __aarch32_sigret_code_start
;
85 unsigned long sigret_vpage
;
87 sigret_vpage
= get_zeroed_page(GFP_ATOMIC
);
91 #ifdef CONFIG_KUSER_HELPERS
92 kuser_vpage
= get_zeroed_page(GFP_ATOMIC
);
94 free_page(sigret_vpage
);
100 memcpy((void *)sigret_vpage
, __aarch32_sigret_code_start
, sigret_sz
);
101 flush_icache_range(sigret_vpage
, sigret_vpage
+ PAGE_SIZE
);
102 vectors_page
[0] = virt_to_page(sigret_vpage
);
104 #ifdef CONFIG_KUSER_HELPERS
106 memcpy((void *)kuser_vpage
+ 0x1000 - kuser_sz
, __kuser_helper_start
,
108 flush_icache_range(kuser_vpage
, kuser_vpage
+ PAGE_SIZE
);
109 vectors_page
[1] = virt_to_page(kuser_vpage
);
114 arch_initcall(alloc_vectors_page
);
116 int aarch32_setup_vectors_page(struct linux_binprm
*bprm
, int uses_interp
)
118 struct mm_struct
*mm
= current
->mm
;
122 down_write(&mm
->mmap_sem
);
123 addr
= get_unmapped_area(NULL
, 0, PAGE_SIZE
, 0, 0);
124 if (IS_ERR_VALUE(addr
)) {
129 ret
= _install_special_mapping(mm
, addr
, PAGE_SIZE
,
131 VM_MAYREAD
|VM_MAYWRITE
|VM_MAYEXEC
,
132 &compat_vdso_spec
[0]);
136 current
->mm
->context
.vdso
= (void *)addr
;
138 #ifdef CONFIG_KUSER_HELPERS
139 /* Map the kuser helpers at the ABI-defined high address. */
140 ret
= _install_special_mapping(mm
, AARCH32_KUSER_HELPERS_BASE
,
142 VM_READ
|VM_EXEC
|VM_MAYREAD
|VM_MAYEXEC
,
143 &compat_vdso_spec
[1]);
146 up_write(&mm
->mmap_sem
);
148 return PTR_ERR_OR_ZERO(ret
);
150 #endif /* CONFIG_COMPAT */
152 static struct vm_special_mapping vdso_spec
[2];
154 static int __init
vdso_init(void)
159 if (memcmp(&vdso_start
, "\177ELF", 4)) {
160 pr_err("vDSO is not a valid ELF object!\n");
164 vdso_pages
= (&vdso_end
- &vdso_start
) >> PAGE_SHIFT
;
165 pr_info("vdso: %ld pages (%ld code @ %p, %ld data @ %p)\n",
166 vdso_pages
+ 1, vdso_pages
, &vdso_start
, 1L, vdso_data
);
168 /* Allocate the vDSO pagelist, plus a page for the data. */
169 vdso_pagelist
= kcalloc(vdso_pages
+ 1, sizeof(struct page
*),
171 if (vdso_pagelist
== NULL
)
174 /* Grab the vDSO data page. */
175 vdso_pagelist
[0] = phys_to_page(__pa_symbol(vdso_data
));
178 /* Grab the vDSO code pages. */
179 pfn
= sym_to_pfn(&vdso_start
);
181 for (i
= 0; i
< vdso_pages
; i
++)
182 vdso_pagelist
[i
+ 1] = pfn_to_page(pfn
+ i
);
184 /* Populate the special mapping structures */
185 vdso_spec
[0] = (struct vm_special_mapping
) {
187 .pages
= vdso_pagelist
,
190 vdso_spec
[1] = (struct vm_special_mapping
) {
192 .pages
= &vdso_pagelist
[1],
197 arch_initcall(vdso_init
);
199 int arch_setup_additional_pages(struct linux_binprm
*bprm
,
202 struct mm_struct
*mm
= current
->mm
;
203 unsigned long vdso_base
, vdso_text_len
, vdso_mapping_len
;
206 vdso_text_len
= vdso_pages
<< PAGE_SHIFT
;
207 /* Be sure to map the data page */
208 vdso_mapping_len
= vdso_text_len
+ PAGE_SIZE
;
210 down_write(&mm
->mmap_sem
);
211 vdso_base
= get_unmapped_area(NULL
, 0, vdso_mapping_len
, 0, 0);
212 if (IS_ERR_VALUE(vdso_base
)) {
213 ret
= ERR_PTR(vdso_base
);
216 ret
= _install_special_mapping(mm
, vdso_base
, PAGE_SIZE
,
222 vdso_base
+= PAGE_SIZE
;
223 mm
->context
.vdso
= (void *)vdso_base
;
224 ret
= _install_special_mapping(mm
, vdso_base
, vdso_text_len
,
226 VM_MAYREAD
|VM_MAYWRITE
|VM_MAYEXEC
,
232 up_write(&mm
->mmap_sem
);
236 mm
->context
.vdso
= NULL
;
237 up_write(&mm
->mmap_sem
);
242 * Update the vDSO data page to keep in sync with kernel timekeeping.
244 void update_vsyscall(struct timekeeper
*tk
)
246 u32 use_syscall
= strcmp(tk
->tkr_mono
.clock
->name
, "arch_sys_counter");
248 ++vdso_data
->tb_seq_count
;
251 vdso_data
->use_syscall
= use_syscall
;
252 vdso_data
->xtime_coarse_sec
= tk
->xtime_sec
;
253 vdso_data
->xtime_coarse_nsec
= tk
->tkr_mono
.xtime_nsec
>>
255 vdso_data
->wtm_clock_sec
= tk
->wall_to_monotonic
.tv_sec
;
256 vdso_data
->wtm_clock_nsec
= tk
->wall_to_monotonic
.tv_nsec
;
259 /* tkr_mono.cycle_last == tkr_raw.cycle_last */
260 vdso_data
->cs_cycle_last
= tk
->tkr_mono
.cycle_last
;
261 vdso_data
->raw_time_sec
= tk
->raw_sec
;
262 vdso_data
->raw_time_nsec
= tk
->tkr_raw
.xtime_nsec
;
263 vdso_data
->xtime_clock_sec
= tk
->xtime_sec
;
264 vdso_data
->xtime_clock_snsec
= tk
->tkr_mono
.xtime_nsec
;
265 /* tkr_raw.xtime_nsec == 0 */
266 vdso_data
->cs_mono_mult
= tk
->tkr_mono
.mult
;
267 vdso_data
->cs_raw_mult
= tk
->tkr_raw
.mult
;
268 /* tkr_mono.shift == tkr_raw.shift */
269 vdso_data
->cs_shift
= tk
->tkr_mono
.shift
;
270 vdso_data
->btm_nsec
= ktime_to_ns(tk
->offs_boot
);
274 ++vdso_data
->tb_seq_count
;
277 void update_vsyscall_tz(void)
279 vdso_data
->tz_minuteswest
= sys_tz
.tz_minuteswest
;
280 vdso_data
->tz_dsttime
= sys_tz
.tz_dsttime
;