++s->seq;
}
+#ifdef CONFIG_X86_64
+
+#define VGETCPU_CPU_MASK 0xfff
+
+static inline unsigned int __getcpu(void)
+{
+ unsigned int p;
+
+ /*
+ * Load per CPU data from GDT. LSL is faster than RDTSCP and
+ * works on all CPUs.
+ */
+ asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
+
+ return p;
+}
+
+#endif /* CONFIG_X86_64 */
+
#endif /* _ASM_X86_VGTOD_H */
#include <linux/seqlock.h>
#include <uapi/asm/vsyscall.h>
-#define VGETCPU_RDTSCP 1
-#define VGETCPU_LSL 2
-
-/* kernel space (writeable) */
-extern int vgetcpu_mode;
-extern struct timezone sys_tz;
-
-#include <asm/vvar.h>
-
extern void map_vsyscall(void);
/*
*/
extern bool emulate_vsyscall(struct pt_regs *regs, unsigned long address);
-#ifdef CONFIG_X86_64
-
-#define VGETCPU_CPU_MASK 0xfff
-
-static inline unsigned int __getcpu(void)
-{
- unsigned int p;
-
- if (VVAR(vgetcpu_mode) == VGETCPU_RDTSCP) {
- /* Load per CPU data from RDTSCP */
- native_read_tscp(&p);
- } else {
- /* Load per CPU data from GDT */
- asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
- }
-
- return p;
-}
-#endif /* CONFIG_X86_64 */
-
#endif /* _ASM_X86_VSYSCALL_H */
/* DECLARE_VVAR(offset, type, name) */
-DECLARE_VVAR(16, int, vgetcpu_mode)
DECLARE_VVAR(128, struct vsyscall_gtod_data, vsyscall_gtod_data)
#undef DECLARE_VVAR
}
#ifdef CONFIG_X86_64
-static void vgetcpu_set_mode(void)
-{
- if (cpu_has(&boot_cpu_data, X86_FEATURE_RDTSCP))
- vgetcpu_mode = VGETCPU_RDTSCP;
- else
- vgetcpu_mode = VGETCPU_LSL;
-}
-
#ifdef CONFIG_IA32_EMULATION
/* May not be __init: called during resume */
static void syscall32_cpu_init(void)
#ifdef CONFIG_X86_32
sysenter_setup();
enable_sep_cpu();
-#else
- vgetcpu_set_mode();
#endif
cpu_detect_tlb(&boot_cpu_data);
}
#define CREATE_TRACE_POINTS
#include "vsyscall_trace.h"
-DEFINE_VVAR(int, vgetcpu_mode);
-
static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
static int __init vsyscall_setup(char *str)
#include <linux/kernel.h>
#include <linux/getcpu.h>
#include <linux/time.h>
-#include <asm/vsyscall.h>
#include <asm/vgtod.h>
notrace long