Pull button into test branch
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / x86_64 / kernel / vsyscall.c
1 /*
2 * linux/arch/x86_64/kernel/vsyscall.c
3 *
4 * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
5 * Copyright 2003 Andi Kleen, SuSE Labs.
6 *
7 * Thanks to hpa@transmeta.com for some useful hint.
8 * Special thanks to Ingo Molnar for his early experience with
9 * a different vsyscall implementation for Linux/IA32 and for the name.
10 *
11 * vsyscall 1 is located at -10Mbyte, vsyscall 2 is located
12 * at virtual address -10Mbyte+1024bytes etc... There are at max 4
13 * vsyscalls. One vsyscall can reserve more than 1 slot to avoid
14 * jumping out of line if necessary. We cannot add more with this
15 * mechanism because older kernels won't return -ENOSYS.
16 * If we want more than four we need a vDSO.
17 *
18 * Note: the concept clashes with user mode linux. If you use UML and
19 * want per guest time just set the kernel.vsyscall64 sysctl to 0.
20 */
21
22 #include <linux/time.h>
23 #include <linux/init.h>
24 #include <linux/kernel.h>
25 #include <linux/timer.h>
26 #include <linux/seqlock.h>
27 #include <linux/jiffies.h>
28 #include <linux/sysctl.h>
29 #include <linux/getcpu.h>
30 #include <linux/cpu.h>
31 #include <linux/smp.h>
32 #include <linux/notifier.h>
33
34 #include <asm/vsyscall.h>
35 #include <asm/pgtable.h>
36 #include <asm/page.h>
37 #include <asm/fixmap.h>
38 #include <asm/errno.h>
39 #include <asm/io.h>
40 #include <asm/segment.h>
41 #include <asm/desc.h>
42 #include <asm/topology.h>
43
44 #define __vsyscall(nr) __attribute__ ((unused,__section__(".vsyscall_" #nr)))
45 #define __syscall_clobber "r11","rcx","memory"
46
47 int __sysctl_vsyscall __section_sysctl_vsyscall = 1;
48 seqlock_t __xtime_lock __section_xtime_lock = SEQLOCK_UNLOCKED;
49 int __vgetcpu_mode __section_vgetcpu_mode;
50
51 #include <asm/unistd.h>
52
53 static __always_inline void timeval_normalize(struct timeval * tv)
54 {
55 time_t __sec;
56
57 __sec = tv->tv_usec / 1000000;
58 if (__sec) {
59 tv->tv_usec %= 1000000;
60 tv->tv_sec += __sec;
61 }
62 }
63
64 static __always_inline void do_vgettimeofday(struct timeval * tv)
65 {
66 long sequence, t;
67 unsigned long sec, usec;
68
69 do {
70 sequence = read_seqbegin(&__xtime_lock);
71
72 sec = __xtime.tv_sec;
73 usec = __xtime.tv_nsec / 1000;
74
75 if (__vxtime.mode != VXTIME_HPET) {
76 t = get_cycles_sync();
77 if (t < __vxtime.last_tsc)
78 t = __vxtime.last_tsc;
79 usec += ((t - __vxtime.last_tsc) *
80 __vxtime.tsc_quot) >> 32;
81 /* See comment in x86_64 do_gettimeofday. */
82 } else {
83 usec += ((readl((void __iomem *)
84 fix_to_virt(VSYSCALL_HPET) + 0xf0) -
85 __vxtime.last) * __vxtime.quot) >> 32;
86 }
87 } while (read_seqretry(&__xtime_lock, sequence));
88
89 tv->tv_sec = sec + usec / 1000000;
90 tv->tv_usec = usec % 1000000;
91 }
92
93 /* RED-PEN may want to readd seq locking, but then the variable should be write-once. */
94 static __always_inline void do_get_tz(struct timezone * tz)
95 {
96 *tz = __sys_tz;
97 }
98
99 static __always_inline int gettimeofday(struct timeval *tv, struct timezone *tz)
100 {
101 int ret;
102 asm volatile("vsysc2: syscall"
103 : "=a" (ret)
104 : "0" (__NR_gettimeofday),"D" (tv),"S" (tz) : __syscall_clobber );
105 return ret;
106 }
107
108 static __always_inline long time_syscall(long *t)
109 {
110 long secs;
111 asm volatile("vsysc1: syscall"
112 : "=a" (secs)
113 : "0" (__NR_time),"D" (t) : __syscall_clobber);
114 return secs;
115 }
116
117 int __vsyscall(0) vgettimeofday(struct timeval * tv, struct timezone * tz)
118 {
119 if (!__sysctl_vsyscall)
120 return gettimeofday(tv,tz);
121 if (tv)
122 do_vgettimeofday(tv);
123 if (tz)
124 do_get_tz(tz);
125 return 0;
126 }
127
128 /* This will break when the xtime seconds get inaccurate, but that is
129 * unlikely */
130 time_t __vsyscall(1) vtime(time_t *t)
131 {
132 if (!__sysctl_vsyscall)
133 return time_syscall(t);
134 else if (t)
135 *t = __xtime.tv_sec;
136 return __xtime.tv_sec;
137 }
138
139 /* Fast way to get current CPU and node.
140 This helps to do per node and per CPU caches in user space.
141 The result is not guaranteed without CPU affinity, but usually
142 works out because the scheduler tries to keep a thread on the same
143 CPU.
144
145 tcache must point to a two element sized long array.
146 All arguments can be NULL. */
147 long __vsyscall(2)
148 vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache)
149 {
150 unsigned int dummy, p;
151 unsigned long j = 0;
152
153 /* Fast cache - only recompute value once per jiffies and avoid
154 relatively costly rdtscp/cpuid otherwise.
155 This works because the scheduler usually keeps the process
156 on the same CPU and this syscall doesn't guarantee its
157 results anyways.
158 We do this here because otherwise user space would do it on
159 its own in a likely inferior way (no access to jiffies).
160 If you don't like it pass NULL. */
161 if (tcache && tcache->blob[0] == (j = __jiffies)) {
162 p = tcache->blob[1];
163 } else if (__vgetcpu_mode == VGETCPU_RDTSCP) {
164 /* Load per CPU data from RDTSCP */
165 rdtscp(dummy, dummy, p);
166 } else {
167 /* Load per CPU data from GDT */
168 asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
169 }
170 if (tcache) {
171 tcache->blob[0] = j;
172 tcache->blob[1] = p;
173 }
174 if (cpu)
175 *cpu = p & 0xfff;
176 if (node)
177 *node = p >> 12;
178 return 0;
179 }
180
181 long __vsyscall(3) venosys_1(void)
182 {
183 return -ENOSYS;
184 }
185
186 #ifdef CONFIG_SYSCTL
187
188 #define SYSCALL 0x050f
189 #define NOP2 0x9090
190
191 /*
192 * NOP out syscall in vsyscall page when not needed.
193 */
194 static int vsyscall_sysctl_change(ctl_table *ctl, int write, struct file * filp,
195 void __user *buffer, size_t *lenp, loff_t *ppos)
196 {
197 extern u16 vsysc1, vsysc2;
198 u16 __iomem *map1;
199 u16 __iomem *map2;
200 int ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
201 if (!write)
202 return ret;
203 /* gcc has some trouble with __va(__pa()), so just do it this
204 way. */
205 map1 = ioremap(__pa_symbol(&vsysc1), 2);
206 if (!map1)
207 return -ENOMEM;
208 map2 = ioremap(__pa_symbol(&vsysc2), 2);
209 if (!map2) {
210 ret = -ENOMEM;
211 goto out;
212 }
213 if (!sysctl_vsyscall) {
214 writew(SYSCALL, map1);
215 writew(SYSCALL, map2);
216 } else {
217 writew(NOP2, map1);
218 writew(NOP2, map2);
219 }
220 iounmap(map2);
221 out:
222 iounmap(map1);
223 return ret;
224 }
225
226 static int vsyscall_sysctl_nostrat(ctl_table *t, int __user *name, int nlen,
227 void __user *oldval, size_t __user *oldlenp,
228 void __user *newval, size_t newlen)
229 {
230 return -ENOSYS;
231 }
232
233 static ctl_table kernel_table2[] = {
234 { .ctl_name = 99, .procname = "vsyscall64",
235 .data = &sysctl_vsyscall, .maxlen = sizeof(int), .mode = 0644,
236 .strategy = vsyscall_sysctl_nostrat,
237 .proc_handler = vsyscall_sysctl_change },
238 { 0, }
239 };
240
241 static ctl_table kernel_root_table2[] = {
242 { .ctl_name = CTL_KERN, .procname = "kernel", .mode = 0555,
243 .child = kernel_table2 },
244 { 0 },
245 };
246
247 #endif
248
249 /* Assume __initcall executes before all user space. Hopefully kmod
250 doesn't violate that. We'll find out if it does. */
251 static void __cpuinit vsyscall_set_cpu(int cpu)
252 {
253 unsigned long *d;
254 unsigned long node = 0;
255 #ifdef CONFIG_NUMA
256 node = cpu_to_node[cpu];
257 #endif
258 if (cpu_has(&cpu_data[cpu], X86_FEATURE_RDTSCP))
259 write_rdtscp_aux((node << 12) | cpu);
260
261 /* Store cpu number in limit so that it can be loaded quickly
262 in user space in vgetcpu.
263 12 bits for the CPU and 8 bits for the node. */
264 d = (unsigned long *)(cpu_gdt(cpu) + GDT_ENTRY_PER_CPU);
265 *d = 0x0f40000000000ULL;
266 *d |= cpu;
267 *d |= (node & 0xf) << 12;
268 *d |= (node >> 4) << 48;
269 }
270
271 static void __cpuinit cpu_vsyscall_init(void *arg)
272 {
273 /* preemption should be already off */
274 vsyscall_set_cpu(raw_smp_processor_id());
275 }
276
277 static int __cpuinit
278 cpu_vsyscall_notifier(struct notifier_block *n, unsigned long action, void *arg)
279 {
280 long cpu = (long)arg;
281 if (action == CPU_ONLINE)
282 smp_call_function_single(cpu, cpu_vsyscall_init, NULL, 0, 1);
283 return NOTIFY_DONE;
284 }
285
286 static void __init map_vsyscall(void)
287 {
288 extern char __vsyscall_0;
289 unsigned long physaddr_page0 = __pa_symbol(&__vsyscall_0);
290
291 /* Note that VSYSCALL_MAPPED_PAGES must agree with the code below. */
292 __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_page0, PAGE_KERNEL_VSYSCALL);
293 }
294
295 static int __init vsyscall_init(void)
296 {
297 BUG_ON(((unsigned long) &vgettimeofday !=
298 VSYSCALL_ADDR(__NR_vgettimeofday)));
299 BUG_ON((unsigned long) &vtime != VSYSCALL_ADDR(__NR_vtime));
300 BUG_ON((VSYSCALL_ADDR(0) != __fix_to_virt(VSYSCALL_FIRST_PAGE)));
301 BUG_ON((unsigned long) &vgetcpu != VSYSCALL_ADDR(__NR_vgetcpu));
302 map_vsyscall();
303 #ifdef CONFIG_SYSCTL
304 register_sysctl_table(kernel_root_table2, 0);
305 #endif
306 on_each_cpu(cpu_vsyscall_init, NULL, 0, 1);
307 hotcpu_notifier(cpu_vsyscall_notifier, 0);
308 return 0;
309 }
310
311 __initcall(vsyscall_init);