2 * Userspace implementations of gettimeofday() and friends.
4 * Copyright (C) 2017 Cavium, Inc.
5 * Copyright (C) 2015 Mentor Graphics Corporation
6 * Copyright (C) 2012 ARM Limited
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 * Author: Will Deacon <will.deacon@arm.com>
21 * Rewriten from arch64 version into C by: Andrew Pinski <apinski@cavium.com>
22 * Reworked and rebased over arm version by: Mark Salyzyn <salyzyn@android.com>
25 #include <asm/barrier.h>
26 #include <linux/compiler.h> /* for notrace */
27 #include <linux/math64.h> /* for __iter_div_u64_rem() */
28 #include <uapi/linux/time.h> /* for struct timespec */
33 DEFINE_FALLBACK(gettimeofday
, struct timeval
*, tv
, struct timezone
*, tz
)
34 DEFINE_FALLBACK(clock_gettime
, clockid_t
, clock
, struct timespec
*, ts
)
35 DEFINE_FALLBACK(clock_getres
, clockid_t
, clock
, struct timespec
*, ts
)
37 static notrace u32
vdso_read_begin(const struct vdso_data
*vd
)
42 seq
= READ_ONCE(vd
->tb_seq_count
);
50 smp_rmb(); /* Pairs with second smp_wmb in update_vsyscall */
54 static notrace
int vdso_read_retry(const struct vdso_data
*vd
, u32 start
)
58 smp_rmb(); /* Pairs with first smp_wmb in update_vsyscall */
59 seq
= READ_ONCE(vd
->tb_seq_count
);
63 static notrace
int do_realtime_coarse(const struct vdso_data
*vd
,
69 seq
= vdso_read_begin(vd
);
71 ts
->tv_sec
= vd
->xtime_coarse_sec
;
72 ts
->tv_nsec
= vd
->xtime_coarse_nsec
;
74 } while (vdso_read_retry(vd
, seq
));
79 static notrace
int do_monotonic_coarse(const struct vdso_data
*vd
,
82 struct timespec tomono
;
87 seq
= vdso_read_begin(vd
);
89 ts
->tv_sec
= vd
->xtime_coarse_sec
;
90 ts
->tv_nsec
= vd
->xtime_coarse_nsec
;
92 tomono
.tv_sec
= vd
->wtm_clock_sec
;
93 tomono
.tv_nsec
= vd
->wtm_clock_nsec
;
95 } while (vdso_read_retry(vd
, seq
));
97 ts
->tv_sec
+= tomono
.tv_sec
;
98 /* open coding timespec_add_ns */
99 ts
->tv_sec
+= __iter_div_u64_rem(ts
->tv_nsec
+ tomono
.tv_nsec
,
100 NSEC_PER_SEC
, &nsec
);
106 #ifdef ARCH_PROVIDES_TIMER
109 * Returns the clock delta, in nanoseconds left-shifted by the clock
112 static notrace u64
get_clock_shifted_nsec(const u64 cycle_last
,
118 /* Read the virtual counter. */
119 res
= arch_vdso_read_counter();
121 res
= res
- cycle_last
;
127 static notrace
int do_realtime(const struct vdso_data
*vd
, struct timespec
*ts
)
129 u32 seq
, mult
, shift
;
130 u64 nsec
, cycle_last
;
131 #ifdef ARCH_CLOCK_FIXED_MASK
132 static const u64 mask
= ARCH_CLOCK_FIXED_MASK
;
136 vdso_xtime_clock_sec_t sec
;
139 seq
= vdso_read_begin(vd
);
144 cycle_last
= vd
->cs_cycle_last
;
146 mult
= vd
->cs_mono_mult
;
147 shift
= vd
->cs_shift
;
148 #ifndef ARCH_CLOCK_FIXED_MASK
152 sec
= vd
->xtime_clock_sec
;
153 nsec
= vd
->xtime_clock_snsec
;
155 } while (unlikely(vdso_read_retry(vd
, seq
)));
157 nsec
+= get_clock_shifted_nsec(cycle_last
, mult
, mask
);
159 /* open coding timespec_add_ns to save a ts->tv_nsec = 0 */
160 ts
->tv_sec
= sec
+ __iter_div_u64_rem(nsec
, NSEC_PER_SEC
, &nsec
);
166 static notrace
int do_monotonic(const struct vdso_data
*vd
, struct timespec
*ts
)
168 u32 seq
, mult
, shift
;
169 u64 nsec
, cycle_last
;
170 #ifdef ARCH_CLOCK_FIXED_MASK
171 static const u64 mask
= ARCH_CLOCK_FIXED_MASK
;
175 vdso_wtm_clock_nsec_t wtm_nsec
;
179 seq
= vdso_read_begin(vd
);
184 cycle_last
= vd
->cs_cycle_last
;
186 mult
= vd
->cs_mono_mult
;
187 shift
= vd
->cs_shift
;
188 #ifndef ARCH_CLOCK_FIXED_MASK
192 sec
= vd
->xtime_clock_sec
;
193 nsec
= vd
->xtime_clock_snsec
;
195 sec
+= vd
->wtm_clock_sec
;
196 wtm_nsec
= vd
->wtm_clock_nsec
;
198 } while (unlikely(vdso_read_retry(vd
, seq
)));
200 nsec
+= get_clock_shifted_nsec(cycle_last
, mult
, mask
);
203 /* open coding timespec_add_ns to save a ts->tv_nsec = 0 */
204 ts
->tv_sec
= sec
+ __iter_div_u64_rem(nsec
, NSEC_PER_SEC
, &nsec
);
210 static notrace
int do_monotonic_raw(const struct vdso_data
*vd
,
213 u32 seq
, mult
, shift
;
214 u64 nsec
, cycle_last
;
215 #ifdef ARCH_CLOCK_FIXED_MASK
216 static const u64 mask
= ARCH_CLOCK_FIXED_MASK
;
220 vdso_raw_time_sec_t sec
;
223 seq
= vdso_read_begin(vd
);
228 cycle_last
= vd
->cs_cycle_last
;
230 mult
= vd
->cs_raw_mult
;
231 shift
= vd
->cs_shift
;
232 #ifndef ARCH_CLOCK_FIXED_MASK
236 sec
= vd
->raw_time_sec
;
237 nsec
= vd
->raw_time_nsec
;
239 } while (unlikely(vdso_read_retry(vd
, seq
)));
241 nsec
+= get_clock_shifted_nsec(cycle_last
, mult
, mask
);
243 /* open coding timespec_add_ns to save a ts->tv_nsec = 0 */
244 ts
->tv_sec
= sec
+ __iter_div_u64_rem(nsec
, NSEC_PER_SEC
, &nsec
);
250 static notrace
int do_boottime(const struct vdso_data
*vd
, struct timespec
*ts
)
252 u32 seq
, mult
, shift
;
253 u64 nsec
, cycle_last
, wtm_nsec
;
254 #ifdef ARCH_CLOCK_FIXED_MASK
255 static const u64 mask
= ARCH_CLOCK_FIXED_MASK
;
262 seq
= vdso_read_begin(vd
);
267 cycle_last
= vd
->cs_cycle_last
;
269 mult
= vd
->cs_mono_mult
;
270 shift
= vd
->cs_shift
;
271 #ifndef ARCH_CLOCK_FIXED_MASK
275 sec
= vd
->xtime_clock_sec
;
276 nsec
= vd
->xtime_clock_snsec
;
278 sec
+= vd
->wtm_clock_sec
;
279 wtm_nsec
= vd
->wtm_clock_nsec
+ vd
->btm_nsec
;
281 } while (unlikely(vdso_read_retry(vd
, seq
)));
283 nsec
+= get_clock_shifted_nsec(cycle_last
, mult
, mask
);
287 /* open coding timespec_add_ns to save a ts->tv_nsec = 0 */
288 ts
->tv_sec
= sec
+ __iter_div_u64_rem(nsec
, NSEC_PER_SEC
, &nsec
);
294 #else /* ARCH_PROVIDES_TIMER */
296 static notrace
int do_realtime(const struct vdso_data
*vd
, struct timespec
*ts
)
301 static notrace
int do_monotonic(const struct vdso_data
*vd
, struct timespec
*ts
)
306 static notrace
int do_monotonic_raw(const struct vdso_data
*vd
,
312 static notrace
int do_boottime(const struct vdso_data
*vd
,
318 #endif /* ARCH_PROVIDES_TIMER */
320 notrace
int __vdso_clock_gettime(clockid_t clock
, struct timespec
*ts
)
322 const struct vdso_data
*vd
= __get_datapage();
325 case CLOCK_REALTIME_COARSE
:
326 do_realtime_coarse(vd
, ts
);
328 case CLOCK_MONOTONIC_COARSE
:
329 do_monotonic_coarse(vd
, ts
);
332 if (do_realtime(vd
, ts
))
335 case CLOCK_MONOTONIC
:
336 if (do_monotonic(vd
, ts
))
339 case CLOCK_MONOTONIC_RAW
:
340 if (do_monotonic_raw(vd
, ts
))
344 if (do_boottime(vd
, ts
))
353 return clock_gettime_fallback(clock
, ts
);
356 notrace
int __vdso_gettimeofday(struct timeval
*tv
, struct timezone
*tz
)
358 const struct vdso_data
*vd
= __get_datapage();
360 if (likely(tv
!= NULL
)) {
363 if (do_realtime(vd
, &ts
))
364 return gettimeofday_fallback(tv
, tz
);
366 tv
->tv_sec
= ts
.tv_sec
;
367 tv
->tv_usec
= ts
.tv_nsec
/ 1000;
370 if (unlikely(tz
!= NULL
)) {
371 tz
->tz_minuteswest
= vd
->tz_minuteswest
;
372 tz
->tz_dsttime
= vd
->tz_dsttime
;
378 int __vdso_clock_getres(clockid_t clock
, struct timespec
*res
)
382 if (clock
== CLOCK_REALTIME
||
383 clock
== CLOCK_BOOTTIME
||
384 clock
== CLOCK_MONOTONIC
||
385 clock
== CLOCK_MONOTONIC_RAW
)
386 nsec
= MONOTONIC_RES_NSEC
;
387 else if (clock
== CLOCK_REALTIME_COARSE
||
388 clock
== CLOCK_MONOTONIC_COARSE
)
391 return clock_getres_fallback(clock
, res
);
393 if (likely(res
!= NULL
)) {