From 23e8740478de9126e83fd47958c954bacc210be4 Mon Sep 17 00:00:00 2001 From: Mark Salyzyn Date: Thu, 24 Aug 2017 13:35:24 -0700 Subject: [PATCH] FROMLIST: [PATCH v5 11/12] lib: vdso: Add support for CLOCK_BOOTTIME (cherry pick from url https://patchwork.kernel.org/patch/10044503/) Take an effort to recode the arm64 vdso code from assembler to C previously submitted by Andrew Pinski , rework it for use in both arm and arm64, overlapping any optimizations for each architecture. But instead of landing it in arm64, land the result into lib/vdso and unify both implementations to simplify future maintenance. Add a case for CLOCK_BOOTTIME as it is popular for measuring relative time on systems expected to suspend() or hibernate(). Android uses CLOCK_BOOTTIME for all relative time measurements and timeouts. Switching to vdso reduced CPU utilization and improves accuracy. There is also a desire by some partners to switch all logging over to CLOCK_BOOTTIME, and thus this operation alone would contribute to a near percentile CPU load. Signed-off-by: Mark Salyzyn Cc: James Morse Cc: Russell King Cc: Catalin Marinas Cc: Will Deacon Cc: Andy Lutomirski Cc: Dmitry Safonov Cc: John Stultz Cc: Mark Rutland Cc: Laura Abbott Cc: Kees Cook Cc: Ard Biesheuvel Cc: Andy Gross Cc: Kevin Brodsky Cc: Andrew Pinski Cc: Thomas Gleixner Cc: linux-kernel@vger.kernel.org Cc: linux-arm-kernel@lists.infradead.org Bug: 63737556 Bug: 20045882 Change-Id: I76c26b054baf7f1100e03c65d6b16fe649b883b1 --- arch/arm/include/asm/vdso_datapage.h | 1 + arch/arm/kernel/vdso.c | 1 + arch/arm64/include/asm/vdso_datapage.h | 1 + arch/arm64/kernel/vdso.c | 1 + lib/vdso/vgettimeofday.c | 55 ++++++++++++++++++++++++++ 5 files changed, 59 insertions(+) diff --git a/arch/arm/include/asm/vdso_datapage.h b/arch/arm/include/asm/vdso_datapage.h index 1c6e6a5d5d9d..1adfb2daac3a 100644 --- a/arch/arm/include/asm/vdso_datapage.h +++ b/arch/arm/include/asm/vdso_datapage.h @@ -64,6 +64,7 @@ struct vdso_data { u32 tz_minuteswest; /* timezone info for gettimeofday(2) */ u32 tz_dsttime; + u64 btm_nsec; /* monotonic to boot time */ /* Raw clocksource multipler */ u32 cs_raw_mult; /* Raw time */ diff --git a/arch/arm/kernel/vdso.c b/arch/arm/kernel/vdso.c index 8ab930b0bac0..e50a72ca08f1 100644 --- a/arch/arm/kernel/vdso.c +++ b/arch/arm/kernel/vdso.c @@ -327,6 +327,7 @@ void update_vsyscall(struct timekeeper *tk) /* tkr_mono.shift == tkr_raw.shift */ vdso_data->cs_shift = tk->tkr_mono.shift; vdso_data->cs_mask = tk->tkr_mono.mask; + vdso_data->btm_nsec = ktime_to_ns(tk->offs_boot); } vdso_write_end(vdso_data); diff --git a/arch/arm64/include/asm/vdso_datapage.h b/arch/arm64/include/asm/vdso_datapage.h index 95f4a7abab80..e4fa5e054708 100644 --- a/arch/arm64/include/asm/vdso_datapage.h +++ b/arch/arm64/include/asm/vdso_datapage.h @@ -45,6 +45,7 @@ struct vdso_data { __u64 xtime_coarse_nsec; __u64 wtm_clock_sec; /* Wall to monotonic time */ vdso_wtm_clock_nsec_t wtm_clock_nsec; + __u64 btm_nsec; /* monotonic to boot time */ __u32 tb_seq_count; /* Timebase sequence counter */ /* cs_* members must be adjacent and in this order (ldp accesses) */ __u32 cs_mono_mult; /* NTP-adjusted clocksource multiplier */ diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c index 33bad6c00477..a1e023d3818c 100644 --- a/arch/arm64/kernel/vdso.c +++ b/arch/arm64/kernel/vdso.c @@ -267,6 +267,7 @@ void update_vsyscall(struct timekeeper *tk) vdso_data->cs_raw_mult = tk->tkr_raw.mult; /* tkr_mono.shift == tkr_raw.shift */ vdso_data->cs_shift = tk->tkr_mono.shift; + vdso_data->btm_nsec = ktime_to_ns(tk->offs_boot); } smp_wmb(); diff --git a/lib/vdso/vgettimeofday.c b/lib/vdso/vgettimeofday.c index 653512210fce..a062cf949273 100644 --- a/lib/vdso/vgettimeofday.c +++ b/lib/vdso/vgettimeofday.c @@ -247,6 +247,50 @@ static notrace int do_monotonic_raw(const struct vdso_data *vd, return 0; } +static notrace int do_boottime(const struct vdso_data *vd, struct timespec *ts) +{ + u32 seq, mult, shift; + u64 nsec, cycle_last, wtm_nsec; +#ifdef ARCH_CLOCK_FIXED_MASK + static const u64 mask = ARCH_CLOCK_FIXED_MASK; +#else + u64 mask; +#endif + __kernel_time_t sec; + + do { + seq = vdso_read_begin(vd); + + if (vd->use_syscall) + return -1; + + cycle_last = vd->cs_cycle_last; + + mult = vd->cs_mono_mult; + shift = vd->cs_shift; +#ifndef ARCH_CLOCK_FIXED_MASK + mask = vd->cs_mask; +#endif + + sec = vd->xtime_clock_sec; + nsec = vd->xtime_clock_snsec; + + sec += vd->wtm_clock_sec; + wtm_nsec = vd->wtm_clock_nsec + vd->btm_nsec; + + } while (unlikely(vdso_read_retry(vd, seq))); + + nsec += get_clock_shifted_nsec(cycle_last, mult, mask); + nsec >>= shift; + nsec += wtm_nsec; + + /* open coding timespec_add_ns to save a ts->tv_nsec = 0 */ + ts->tv_sec = sec + __iter_div_u64_rem(nsec, NSEC_PER_SEC, &nsec); + ts->tv_nsec = nsec; + + return 0; +} + #else /* ARCH_PROVIDES_TIMER */ static notrace int do_realtime(const struct vdso_data *vd, struct timespec *ts) @@ -265,6 +309,12 @@ static notrace int do_monotonic_raw(const struct vdso_data *vd, return -1; } +static notrace int do_boottime(const struct vdso_data *vd, + struct timespec *ts) +{ + return -1; +} + #endif /* ARCH_PROVIDES_TIMER */ notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts) @@ -290,6 +340,10 @@ notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts) if (do_monotonic_raw(vd, ts)) goto fallback; break; + case CLOCK_BOOTTIME: + if (do_boottime(vd, ts)) + goto fallback; + break; default: goto fallback; } @@ -326,6 +380,7 @@ int __vdso_clock_getres(clockid_t clock, struct timespec *res) long nsec; if (clock == CLOCK_REALTIME || + clock == CLOCK_BOOTTIME || clock == CLOCK_MONOTONIC || clock == CLOCK_MONOTONIC_RAW) nsec = MONOTONIC_RES_NSEC; -- 2.20.1