ANDROID: clock_gettime(CLOCK_BOOTTIME,) slows down >20x
authorMark Salyzyn <salyzyn@google.com>
Wed, 24 Jan 2018 22:00:19 +0000 (14:00 -0800)
committerBruno Martins <bgcngm@gmail.com>
Fri, 20 Oct 2023 17:09:54 +0000 (18:09 +0100)
clock_gettime(CLOCK_BOOTTIME,) slows down after significant
accumulation of suspend time creating a large offset between it and
CLOCK_MONOTONIC time.  The __iter_div_u64_rem() is only for the usage
of adding a few second+nanosecond times and saving cycles on more
expensive remainder and division operations, but iterates one second
at a time which quickly goes out of scale in CLOCK_BOOTTIME's case
since it was specified as nanoseconds only.

The fix is to split off seconds from the boot time and cap the
nanoseconds so that __iter_div_u64_rem does not iterate.

Signed-off-by: Mark Salyzyn <salyzyn@google.com>
Bug: 72406285
Change-Id: Ia647ef1e76b7ba3b0c003028d4b3b955635adabb

arch/arm/include/asm/vdso_datapage.h
arch/arm/kernel/vdso.c
arch/arm64/include/asm/vdso_datapage.h
arch/arm64/kernel/vdso.c
arch/arm64/kernel/vdso32/compiler.h
lib/vdso/vgettimeofday.c

index 1adfb2daac3aff1a4b5c5f82079762460dc2d0a6..0120852b6b1297944d1fd777144e423f091586a1 100644 (file)
@@ -64,7 +64,8 @@ struct vdso_data {
        u32 tz_minuteswest;     /* timezone info for gettimeofday(2) */
        u32 tz_dsttime;
 
-       u64 btm_nsec;           /* monotonic to boot time */
+       u32 btm_sec;            /* monotonic to boot time */
+       u32 btm_nsec;
        /* Raw clocksource multipler */
        u32 cs_raw_mult;
        /* Raw time */
index 598e34e47bb53ee5f8c38d237cf686b59d2d12a8..4b37e4340e08068304ac9a4bcc546187b2b2b5d2 100644 (file)
@@ -321,6 +321,8 @@ void update_vsyscall(struct timekeeper *tk)
        vdso_data->wtm_clock_nsec               = wtm->tv_nsec;
 
        if (!vdso_data->use_syscall) {
+               struct timespec btm = ktime_to_timespec(tk->offs_boot);
+
                vdso_data->cs_cycle_last        = tk->tkr_mono.cycle_last;
                vdso_data->raw_time_sec         = tk->raw_sec;
                vdso_data->raw_time_nsec        = tk->tkr_raw.xtime_nsec;
@@ -331,7 +333,8 @@ void update_vsyscall(struct timekeeper *tk)
                /* tkr_mono.shift == tkr_raw.shift */
                vdso_data->cs_shift             = tk->tkr_mono.shift;
                vdso_data->cs_mask              = tk->tkr_mono.mask;
-               vdso_data->btm_nsec             = ktime_to_ns(tk->offs_boot);
+               vdso_data->btm_sec              = btm.tv_sec;
+               vdso_data->btm_nsec             = btm.tv_nsec;
        }
 
        vdso_write_end(vdso_data);
index e4fa5e054708174303f610eeb07956bd2639c898..348b9be9efe75b5f1736f438120227369f13af2f 100644 (file)
@@ -45,7 +45,8 @@ struct vdso_data {
        __u64 xtime_coarse_nsec;
        __u64 wtm_clock_sec;    /* Wall to monotonic time */
        vdso_wtm_clock_nsec_t wtm_clock_nsec;
-       __u64 btm_nsec;         /* monotonic to boot time */
+       __u32 btm_sec;          /* monotonic to boot time */
+       __u32 btm_nsec;
        __u32 tb_seq_count;     /* Timebase sequence counter */
        /* cs_* members must be adjacent and in this order (ldp accesses) */
        __u32 cs_mono_mult;     /* NTP-adjusted clocksource multiplier */
index 913983464e745c61503cd360bb4c7c5b94a9156d..e75193fa9f1a16b1971ef985f2d028c2edaacf44 100644 (file)
@@ -335,6 +335,8 @@ void update_vsyscall(struct timekeeper *tk)
        vdso_data->wtm_clock_nsec               = tk->wall_to_monotonic.tv_nsec;
 
        if (!use_syscall) {
+               struct timespec btm = ktime_to_timespec(tk->offs_boot);
+
                /* tkr_mono.cycle_last == tkr_raw.cycle_last */
                vdso_data->cs_cycle_last        = tk->tkr_mono.cycle_last;
                vdso_data->raw_time_sec         = tk->raw_sec;
@@ -345,7 +347,8 @@ void update_vsyscall(struct timekeeper *tk)
                vdso_data->cs_raw_mult          = tk->tkr_raw.mult;
                /* tkr_mono.shift == tkr_raw.shift */
                vdso_data->cs_shift             = tk->tkr_mono.shift;
-               vdso_data->btm_nsec             = ktime_to_ns(tk->offs_boot);
+               vdso_data->btm_sec              = btm.tv_sec;
+               vdso_data->btm_nsec             = btm.tv_nsec;
        }
 
        smp_wmb();
index a2ac3959518a7ef0bd6c2c10108135856c97785a..19a43fc37bb96ef363ae6e5f227f7cffeca122ba 100644 (file)
@@ -23,6 +23,8 @@
 #ifndef __VDSO_COMPILER_H
 #define __VDSO_COMPILER_H
 
+#include <generated/autoconf.h>
+#undef CONFIG_64BIT
 #include <asm/barrier.h>       /* for isb() & dmb()    */
 #include <asm/param.h>         /* for HZ               */
 #include <asm/unistd32.h>
index 00d93df8045f7945cc20ca956ec06b0a45d761a9..8ca2b5374c2da134a0b42c07c3acc50c46361304 100644 (file)
@@ -252,7 +252,8 @@ static notrace int do_monotonic_raw(const struct vdso_data *vd,
 static notrace int do_boottime(const struct vdso_data *vd, struct timespec *ts)
 {
        u32 seq, mult, shift;
-       u64 nsec, cycle_last, wtm_nsec;
+       u64 nsec, cycle_last;
+       vdso_wtm_clock_nsec_t wtm_nsec;
 #ifdef ARCH_CLOCK_FIXED_MASK
        static const u64 mask = ARCH_CLOCK_FIXED_MASK;
 #else
@@ -277,7 +278,7 @@ static notrace int do_boottime(const struct vdso_data *vd, struct timespec *ts)
                sec = vd->xtime_clock_sec;
                nsec = vd->xtime_clock_snsec;
 
-               sec += vd->wtm_clock_sec;
+               sec += vd->wtm_clock_sec + vd->btm_sec;
                wtm_nsec = vd->wtm_clock_nsec + vd->btm_nsec;
 
        } while (unlikely(vdso_read_retry(vd, seq)));