clock_gettime(CLOCK_BOOTTIME,) slows down after significant
accumulation of suspend time creating a large offset between it and
CLOCK_MONOTONIC time. The __iter_div_u64_rem() is only for the usage
of adding a few second+nanosecond times and saving cycles on more
expensive remainder and division operations, but iterates one second
at a time which quickly goes out of scale in CLOCK_BOOTTIME's case
since it was specified as nanoseconds only.
The fix is to split off seconds from the boot time and cap the
nanoseconds so that __iter_div_u64_rem does not iterate.
Signed-off-by: Mark Salyzyn <salyzyn@google.com>
Bug:
72406285
Change-Id: Ia647ef1e76b7ba3b0c003028d4b3b955635adabb
u32 tz_minuteswest; /* timezone info for gettimeofday(2) */
u32 tz_dsttime;
- u64 btm_nsec; /* monotonic to boot time */
+ u32 btm_sec; /* monotonic to boot time */
+ u32 btm_nsec;
/* Raw clocksource multipler */
u32 cs_raw_mult;
/* Raw time */
vdso_data->wtm_clock_nsec = wtm->tv_nsec;
if (!vdso_data->use_syscall) {
+ struct timespec btm = ktime_to_timespec(tk->offs_boot);
+
vdso_data->cs_cycle_last = tk->tkr_mono.cycle_last;
vdso_data->raw_time_sec = tk->raw_sec;
vdso_data->raw_time_nsec = tk->tkr_raw.xtime_nsec;
/* tkr_mono.shift == tkr_raw.shift */
vdso_data->cs_shift = tk->tkr_mono.shift;
vdso_data->cs_mask = tk->tkr_mono.mask;
- vdso_data->btm_nsec = ktime_to_ns(tk->offs_boot);
+ vdso_data->btm_sec = btm.tv_sec;
+ vdso_data->btm_nsec = btm.tv_nsec;
}
vdso_write_end(vdso_data);
__u64 xtime_coarse_nsec;
__u64 wtm_clock_sec; /* Wall to monotonic time */
vdso_wtm_clock_nsec_t wtm_clock_nsec;
- __u64 btm_nsec; /* monotonic to boot time */
+ __u32 btm_sec; /* monotonic to boot time */
+ __u32 btm_nsec;
__u32 tb_seq_count; /* Timebase sequence counter */
/* cs_* members must be adjacent and in this order (ldp accesses) */
__u32 cs_mono_mult; /* NTP-adjusted clocksource multiplier */
vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec;
if (!use_syscall) {
+ struct timespec btm = ktime_to_timespec(tk->offs_boot);
+
/* tkr_mono.cycle_last == tkr_raw.cycle_last */
vdso_data->cs_cycle_last = tk->tkr_mono.cycle_last;
vdso_data->raw_time_sec = tk->raw_sec;
vdso_data->cs_raw_mult = tk->tkr_raw.mult;
/* tkr_mono.shift == tkr_raw.shift */
vdso_data->cs_shift = tk->tkr_mono.shift;
- vdso_data->btm_nsec = ktime_to_ns(tk->offs_boot);
+ vdso_data->btm_sec = btm.tv_sec;
+ vdso_data->btm_nsec = btm.tv_nsec;
}
smp_wmb();
#ifndef __VDSO_COMPILER_H
#define __VDSO_COMPILER_H
+#include <generated/autoconf.h>
+#undef CONFIG_64BIT
#include <asm/barrier.h> /* for isb() & dmb() */
#include <asm/param.h> /* for HZ */
#include <asm/unistd32.h>
static notrace int do_boottime(const struct vdso_data *vd, struct timespec *ts)
{
u32 seq, mult, shift;
- u64 nsec, cycle_last, wtm_nsec;
+ u64 nsec, cycle_last;
+ vdso_wtm_clock_nsec_t wtm_nsec;
#ifdef ARCH_CLOCK_FIXED_MASK
static const u64 mask = ARCH_CLOCK_FIXED_MASK;
#else
sec = vd->xtime_clock_sec;
nsec = vd->xtime_clock_snsec;
- sec += vd->wtm_clock_sec;
+ sec += vd->wtm_clock_sec + vd->btm_sec;
wtm_nsec = vd->wtm_clock_nsec + vd->btm_nsec;
} while (unlikely(vdso_read_retry(vd, seq)));