2 * Userspace implementations of gettimeofday() and friends.
4 * Copyright (C) 2017 Cavium, Inc.
5 * Copyright (C) 2015 Mentor Graphics Corporation
6 * Copyright (C) 2012 ARM Limited
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 * Author: Will Deacon <will.deacon@arm.com>
21 * Rewriten from arch64 version into C by: Andrew Pinski <apinski@cavium.com>
22 * Reworked and rebased over arm version by: Mark Salyzyn <salyzyn@android.com>
25 #include <asm/barrier.h>
26 #include <linux/compiler.h> /* for notrace */
27 #include <linux/math64.h> /* for __iter_div_u64_rem() */
28 #include <uapi/linux/time.h> /* for struct timespec */
33 #ifdef ARCH_PROVIDES_TIMER
34 DEFINE_FALLBACK(gettimeofday
, struct timeval
*, tv
, struct timezone
*, tz
)
36 DEFINE_FALLBACK(clock_gettime
, clockid_t
, clock
, struct timespec
*, ts
)
37 DEFINE_FALLBACK(clock_getres
, clockid_t
, clock
, struct timespec
*, ts
)
39 static notrace u32
vdso_read_begin(const struct vdso_data
*vd
)
44 seq
= READ_ONCE(vd
->tb_seq_count
);
52 smp_rmb(); /* Pairs with second smp_wmb in update_vsyscall */
56 static notrace
int vdso_read_retry(const struct vdso_data
*vd
, u32 start
)
60 smp_rmb(); /* Pairs with first smp_wmb in update_vsyscall */
61 seq
= READ_ONCE(vd
->tb_seq_count
);
65 static notrace
int do_realtime_coarse(const struct vdso_data
*vd
,
71 seq
= vdso_read_begin(vd
);
73 ts
->tv_sec
= vd
->xtime_coarse_sec
;
74 ts
->tv_nsec
= vd
->xtime_coarse_nsec
;
76 } while (vdso_read_retry(vd
, seq
));
81 static notrace
int do_monotonic_coarse(const struct vdso_data
*vd
,
84 struct timespec tomono
;
89 seq
= vdso_read_begin(vd
);
91 ts
->tv_sec
= vd
->xtime_coarse_sec
;
92 ts
->tv_nsec
= vd
->xtime_coarse_nsec
;
94 tomono
.tv_sec
= vd
->wtm_clock_sec
;
95 tomono
.tv_nsec
= vd
->wtm_clock_nsec
;
97 } while (vdso_read_retry(vd
, seq
));
99 ts
->tv_sec
+= tomono
.tv_sec
;
100 /* open coding timespec_add_ns */
101 ts
->tv_sec
+= __iter_div_u64_rem(ts
->tv_nsec
+ tomono
.tv_nsec
,
102 NSEC_PER_SEC
, &nsec
);
108 #ifdef ARCH_PROVIDES_TIMER
111 * Returns the clock delta, in nanoseconds left-shifted by the clock
114 static notrace u64
get_clock_shifted_nsec(const u64 cycle_last
,
120 /* Read the virtual counter. */
121 res
= arch_vdso_read_counter();
123 res
= res
- cycle_last
;
129 static notrace
int do_realtime(const struct vdso_data
*vd
, struct timespec
*ts
)
131 u32 seq
, mult
, shift
;
132 u64 nsec
, cycle_last
;
133 #ifdef ARCH_CLOCK_FIXED_MASK
134 static const u64 mask
= ARCH_CLOCK_FIXED_MASK
;
138 vdso_xtime_clock_sec_t sec
;
141 seq
= vdso_read_begin(vd
);
146 cycle_last
= vd
->cs_cycle_last
;
148 mult
= vd
->cs_mono_mult
;
149 shift
= vd
->cs_shift
;
150 #ifndef ARCH_CLOCK_FIXED_MASK
154 sec
= vd
->xtime_clock_sec
;
155 nsec
= vd
->xtime_clock_snsec
;
157 } while (unlikely(vdso_read_retry(vd
, seq
)));
159 nsec
+= get_clock_shifted_nsec(cycle_last
, mult
, mask
);
161 /* open coding timespec_add_ns to save a ts->tv_nsec = 0 */
162 ts
->tv_sec
= sec
+ __iter_div_u64_rem(nsec
, NSEC_PER_SEC
, &nsec
);
168 static notrace
int do_monotonic(const struct vdso_data
*vd
, struct timespec
*ts
)
170 u32 seq
, mult
, shift
;
171 u64 nsec
, cycle_last
;
172 #ifdef ARCH_CLOCK_FIXED_MASK
173 static const u64 mask
= ARCH_CLOCK_FIXED_MASK
;
177 vdso_wtm_clock_nsec_t wtm_nsec
;
181 seq
= vdso_read_begin(vd
);
186 cycle_last
= vd
->cs_cycle_last
;
188 mult
= vd
->cs_mono_mult
;
189 shift
= vd
->cs_shift
;
190 #ifndef ARCH_CLOCK_FIXED_MASK
194 sec
= vd
->xtime_clock_sec
;
195 nsec
= vd
->xtime_clock_snsec
;
197 sec
+= vd
->wtm_clock_sec
;
198 wtm_nsec
= vd
->wtm_clock_nsec
;
200 } while (unlikely(vdso_read_retry(vd
, seq
)));
202 nsec
+= get_clock_shifted_nsec(cycle_last
, mult
, mask
);
205 /* open coding timespec_add_ns to save a ts->tv_nsec = 0 */
206 ts
->tv_sec
= sec
+ __iter_div_u64_rem(nsec
, NSEC_PER_SEC
, &nsec
);
212 static notrace
int do_monotonic_raw(const struct vdso_data
*vd
,
215 u32 seq
, mult
, shift
;
216 u64 nsec
, cycle_last
;
217 #ifdef ARCH_CLOCK_FIXED_MASK
218 static const u64 mask
= ARCH_CLOCK_FIXED_MASK
;
222 vdso_raw_time_sec_t sec
;
225 seq
= vdso_read_begin(vd
);
230 cycle_last
= vd
->cs_cycle_last
;
232 mult
= vd
->cs_raw_mult
;
233 shift
= vd
->cs_shift
;
234 #ifndef ARCH_CLOCK_FIXED_MASK
238 sec
= vd
->raw_time_sec
;
239 nsec
= vd
->raw_time_nsec
;
241 } while (unlikely(vdso_read_retry(vd
, seq
)));
243 nsec
+= get_clock_shifted_nsec(cycle_last
, mult
, mask
);
245 /* open coding timespec_add_ns to save a ts->tv_nsec = 0 */
246 ts
->tv_sec
= sec
+ __iter_div_u64_rem(nsec
, NSEC_PER_SEC
, &nsec
);
252 static notrace
int do_boottime(const struct vdso_data
*vd
, struct timespec
*ts
)
254 u32 seq
, mult
, shift
;
255 u64 nsec
, cycle_last
, wtm_nsec
;
256 #ifdef ARCH_CLOCK_FIXED_MASK
257 static const u64 mask
= ARCH_CLOCK_FIXED_MASK
;
264 seq
= vdso_read_begin(vd
);
269 cycle_last
= vd
->cs_cycle_last
;
271 mult
= vd
->cs_mono_mult
;
272 shift
= vd
->cs_shift
;
273 #ifndef ARCH_CLOCK_FIXED_MASK
277 sec
= vd
->xtime_clock_sec
;
278 nsec
= vd
->xtime_clock_snsec
;
280 sec
+= vd
->wtm_clock_sec
;
281 wtm_nsec
= vd
->wtm_clock_nsec
+ vd
->btm_nsec
;
283 } while (unlikely(vdso_read_retry(vd
, seq
)));
285 nsec
+= get_clock_shifted_nsec(cycle_last
, mult
, mask
);
289 /* open coding timespec_add_ns to save a ts->tv_nsec = 0 */
290 ts
->tv_sec
= sec
+ __iter_div_u64_rem(nsec
, NSEC_PER_SEC
, &nsec
);
296 #endif /* ARCH_PROVIDES_TIMER */
298 notrace
int __vdso_clock_gettime(clockid_t clock
, struct timespec
*ts
)
300 const struct vdso_data
*vd
= __get_datapage();
303 case CLOCK_REALTIME_COARSE
:
304 do_realtime_coarse(vd
, ts
);
306 case CLOCK_MONOTONIC_COARSE
:
307 do_monotonic_coarse(vd
, ts
);
309 #ifdef ARCH_PROVIDES_TIMER
311 if (do_realtime(vd
, ts
))
314 case CLOCK_MONOTONIC
:
315 if (do_monotonic(vd
, ts
))
318 case CLOCK_MONOTONIC_RAW
:
319 if (do_monotonic_raw(vd
, ts
))
323 if (do_boottime(vd
, ts
))
333 return clock_gettime_fallback(clock
, ts
);
336 #ifdef ARCH_PROVIDES_TIMER
337 notrace
int __vdso_gettimeofday(struct timeval
*tv
, struct timezone
*tz
)
339 const struct vdso_data
*vd
= __get_datapage();
341 if (likely(tv
!= NULL
)) {
344 if (do_realtime(vd
, &ts
))
345 return gettimeofday_fallback(tv
, tz
);
347 tv
->tv_sec
= ts
.tv_sec
;
348 tv
->tv_usec
= ts
.tv_nsec
/ 1000;
351 if (unlikely(tz
!= NULL
)) {
352 tz
->tz_minuteswest
= vd
->tz_minuteswest
;
353 tz
->tz_dsttime
= vd
->tz_dsttime
;
360 int __vdso_clock_getres(clockid_t clock
, struct timespec
*res
)
365 case CLOCK_REALTIME_COARSE
:
366 case CLOCK_MONOTONIC_COARSE
:
369 #ifdef ARCH_PROVIDES_TIMER
371 case CLOCK_MONOTONIC
:
372 case CLOCK_MONOTONIC_RAW
:
374 nsec
= MONOTONIC_RES_NSEC
;
378 return clock_getres_fallback(clock
, res
);
381 if (likely(res
!= NULL
)) {