typedef u32 vdso_xtime_clock_sec_t;
#endif
+#ifndef _VDSO_RAW_TIME_SEC_T
+#define _VDSO_RAW_TIME_SEC_T
+typedef u32 vdso_raw_time_sec_t;
+#endif
+
/* Try to be cache-friendly on systems that don't implement the
* generic timer: fit the unconditionally updated fields in the first
* 32 bytes.
u64 xtime_clock_snsec; /* CLOCK_REALTIME sub-ns base */
u32 tz_minuteswest; /* timezone info for gettimeofday(2) */
u32 tz_dsttime;
+
+ /* Raw clocksource multipler */
+ u32 cs_raw_mult;
+ /* Raw time */
+ vdso_raw_time_sec_t raw_time_sec;
+ u32 raw_time_nsec;
};
union vdso_data_store {
if (!vdso_data->use_syscall) {
vdso_data->cs_cycle_last = tk->tkr_mono.cycle_last;
+ vdso_data->raw_time_sec = tk->raw_sec;
+ vdso_data->raw_time_nsec = tk->tkr_raw.xtime_nsec;
vdso_data->xtime_clock_sec = tk->xtime_sec;
vdso_data->xtime_clock_snsec = tk->tkr_mono.xtime_nsec;
vdso_data->cs_mono_mult = tk->tkr_mono.mult;
+ vdso_data->cs_raw_mult = tk->tkr_raw.mult;
/* tkr_mono.shift == tkr_raw.shift */
vdso_data->cs_shift = tk->tkr_mono.shift;
vdso_data->cs_mask = tk->tkr_mono.mask;
return 0;
}
+static notrace int do_monotonic_raw(const struct vdso_data *vd,
+ struct timespec *ts)
+{
+ u32 seq, mult, shift;
+ u64 nsec, cycle_last;
+ u64 mask;
+ vdso_raw_time_sec_t sec;
+
+ do {
+ seq = vdso_read_begin(vd);
+
+ if (vd->use_syscall)
+ return -1;
+
+ cycle_last = vd->cs_cycle_last;
+
+ mult = vd->cs_raw_mult;
+ shift = vd->cs_shift;
+ mask = vd->cs_mask;
+
+ sec = vd->raw_time_sec;
+ nsec = vd->raw_time_nsec;
+
+ } while (unlikely(vdso_read_retry(vd, seq)));
+
+ nsec += get_clock_shifted_nsec(cycle_last, mult, mask);
+ nsec >>= shift;
+ /* open coding timespec_add_ns to save a ts->tv_nsec = 0 */
+ ts->tv_sec = sec + __iter_div_u64_rem(nsec, NSEC_PER_SEC, &nsec);
+ ts->tv_nsec = nsec;
+
+ return 0;
+}
+
#else /* CONFIG_ARM_ARCH_TIMER */
static notrace int do_realtime(const struct vdso_data *vd, struct timespec *ts)
return -1;
}
+static notrace int do_monotonic_raw(const struct vdso_data *vd,
+ struct timespec *ts)
+{
+ return -1;
+}
+
#endif /* CONFIG_ARM_ARCH_TIMER */
notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
if (do_monotonic(vd, ts))
goto fallback;
break;
+ case CLOCK_MONOTONIC_RAW:
+ if (do_monotonic_raw(vd, ts))
+ goto fallback;
+ break;
default:
goto fallback;
}