ANDROID: clock_gettime(CLOCK_BOOTTIME,) slows down >20x
[GitHub/exynos8895/android_kernel_samsung_universal8895.git] / arch / arm64 / kernel / vdso.c
index 17697320ce29c0d977cf8fa18884963687919871..72916c2e67107ee7cf93f306740584d17ee326c2 100644 (file)
@@ -57,6 +57,7 @@ struct vdso_data *vdso_data = &vdso_data_store.data;
 /*
  * Create and map the vectors page for AArch32 tasks.
  */
+#if !defined(CONFIG_VDSO32) || defined(CONFIG_KUSER_HELPERS)
 static struct page *vectors_page[] __ro_after_init;
 static const struct vm_special_mapping compat_vdso_spec[] = {
        {
@@ -72,6 +73,7 @@ static const struct vm_special_mapping compat_vdso_spec[] = {
 #endif
 };
 static struct page *vectors_page[ARRAY_SIZE(compat_vdso_spec)] __ro_after_init;
+#endif
 
 static int __init alloc_vectors_page(void)
 {
@@ -81,6 +83,7 @@ static int __init alloc_vectors_page(void)
        unsigned long kuser_vpage;
 #endif
 
+#ifndef CONFIG_VDSO32
        extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[];
        size_t sigret_sz =
                __aarch32_sigret_code_end - __aarch32_sigret_code_start;
@@ -89,19 +92,24 @@ static int __init alloc_vectors_page(void)
        sigret_vpage = get_zeroed_page(GFP_ATOMIC);
        if (!sigret_vpage)
                return -ENOMEM;
+#endif
 
 #ifdef CONFIG_KUSER_HELPERS
        kuser_vpage = get_zeroed_page(GFP_ATOMIC);
        if (!kuser_vpage) {
+#ifndef CONFIG_VDSO32
                free_page(sigret_vpage);
+#endif
                return -ENOMEM;
        }
 #endif
 
+#ifndef CONFIG_VDSO32
        /* sigreturn code */
        memcpy((void *)sigret_vpage, __aarch32_sigret_code_start, sigret_sz);
        flush_icache_range(sigret_vpage, sigret_vpage + PAGE_SIZE);
        vectors_page[0] = virt_to_page(sigret_vpage);
+#endif
 
 #ifdef CONFIG_KUSER_HELPERS
        /* kuser helpers */
@@ -115,6 +123,7 @@ static int __init alloc_vectors_page(void)
 }
 arch_initcall(alloc_vectors_page);
 
+#ifndef CONFIG_VDSO32
 int aarch32_setup_vectors_page(struct linux_binprm *bprm, int uses_interp)
 {
        struct mm_struct *mm = current->mm;
@@ -149,6 +158,7 @@ out:
 
        return PTR_ERR_OR_ZERO(ret);
 }
+#endif /* !CONFIG_VDSO32 */
 #endif /* CONFIG_COMPAT */
 
 static int __init vdso_mappings_init(const char *name,
@@ -205,6 +215,23 @@ static int __init vdso_mappings_init(const char *name,
        return 0;
 }
 
+#ifdef CONFIG_COMPAT
+#ifdef CONFIG_VDSO32
+
+static struct vdso_mappings vdso32_mappings __ro_after_init;
+
+static int __init vdso32_init(void)
+{
+       extern char vdso32_start[], vdso32_end[];
+
+       return vdso_mappings_init("vdso32", vdso32_start, vdso32_end,
+                                 &vdso32_mappings);
+}
+arch_initcall(vdso32_init);
+
+#endif /* CONFIG_VDSO32 */
+#endif /* CONFIG_COMPAT */
+
 static struct vdso_mappings vdso_mappings __ro_after_init;
 
 static int __init vdso_init(void)
@@ -246,6 +273,33 @@ static int vdso_setup(struct mm_struct *mm,
        return PTR_ERR_OR_ZERO(ret);
 }
 
+#ifdef CONFIG_COMPAT
+#ifdef CONFIG_VDSO32
+int aarch32_setup_vectors_page(struct linux_binprm *bprm, int uses_interp)
+{
+       struct mm_struct *mm = current->mm;
+       void *ret;
+
+       down_write(&mm->mmap_sem);
+
+       ret = ERR_PTR(vdso_setup(mm, &vdso32_mappings));
+#ifdef CONFIG_KUSER_HELPERS
+       if (!IS_ERR(ret))
+               /* Map the kuser helpers at the ABI-defined high address. */
+               ret = _install_special_mapping(mm, AARCH32_KUSER_HELPERS_BASE,
+                                              PAGE_SIZE,
+                                              VM_READ|VM_EXEC|
+                                              VM_MAYREAD|VM_MAYEXEC,
+                                              &compat_vdso_spec[1]);
+#endif
+
+       up_write(&mm->mmap_sem);
+
+       return PTR_ERR_OR_ZERO(ret);
+}
+#endif /* CONFIG_VDSO32 */
+#endif /* CONFIG_COMPAT */
+
 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
 {
        struct mm_struct *mm = current->mm;
@@ -275,6 +329,8 @@ void update_vsyscall(struct timekeeper *tk)
        vdso_data->wtm_clock_nsec               = tk->wall_to_monotonic.tv_nsec;
 
        if (!use_syscall) {
+               struct timespec btm = ktime_to_timespec(tk->offs_boot);
+
                /* tkr_mono.cycle_last == tkr_raw.cycle_last */
                vdso_data->cs_cycle_last        = tk->tkr_mono.cycle_last;
                vdso_data->raw_time_sec         = tk->raw_sec;
@@ -286,7 +342,8 @@ void update_vsyscall(struct timekeeper *tk)
                vdso_data->cs_raw_mult          = tk->tkr_raw.mult;
                /* tkr_mono.shift == tkr_raw.shift */
                vdso_data->cs_shift             = tk->tkr_mono.shift;
-               vdso_data->btm_nsec             = ktime_to_ns(tk->offs_boot);
+               vdso_data->btm_sec              = btm.tv_sec;
+               vdso_data->btm_nsec             = btm.tv_nsec;
        }
 
        smp_wmb();