2 * Userspace implementations of gettimeofday() and friends.
4 * Copyright (C) 2017 Cavium, Inc.
5 * Copyright (C) 2015 Mentor Graphics Corporation
6 * Copyright (C) 2012 ARM Limited
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 * Author: Will Deacon <will.deacon@arm.com>
21 * Rewriten from arch64 version into C by: Andrew Pinski <apinski@cavium.com>
22 * Reworked and rebased over arm version by: Mark Salyzyn <salyzyn@android.com>
25 #include <asm/barrier.h>
26 #include <linux/compiler.h> /* for notrace */
27 #include <linux/math64.h> /* for __iter_div_u64_rem() */
28 #include <uapi/linux/time.h> /* for struct timespec */
33 DEFINE_FALLBACK(gettimeofday
, struct timeval
*, tv
, struct timezone
*, tz
)
34 DEFINE_FALLBACK(clock_gettime
, clockid_t
, clock
, struct timespec
*, ts
)
35 DEFINE_FALLBACK(clock_getres
, clockid_t
, clock
, struct timespec
*, ts
)
37 static notrace u32
vdso_read_begin(const struct vdso_data
*vd
)
42 seq
= READ_ONCE(vd
->tb_seq_count
);
50 smp_rmb(); /* Pairs with second smp_wmb in update_vsyscall */
54 static notrace
int vdso_read_retry(const struct vdso_data
*vd
, u32 start
)
58 smp_rmb(); /* Pairs with first smp_wmb in update_vsyscall */
59 seq
= READ_ONCE(vd
->tb_seq_count
);
63 static notrace
int do_realtime_coarse(const struct vdso_data
*vd
,
69 seq
= vdso_read_begin(vd
);
71 ts
->tv_sec
= vd
->xtime_coarse_sec
;
72 ts
->tv_nsec
= vd
->xtime_coarse_nsec
;
74 } while (vdso_read_retry(vd
, seq
));
79 static notrace
int do_monotonic_coarse(const struct vdso_data
*vd
,
82 struct timespec tomono
;
87 seq
= vdso_read_begin(vd
);
89 ts
->tv_sec
= vd
->xtime_coarse_sec
;
90 ts
->tv_nsec
= vd
->xtime_coarse_nsec
;
92 tomono
.tv_sec
= vd
->wtm_clock_sec
;
93 tomono
.tv_nsec
= vd
->wtm_clock_nsec
;
95 } while (vdso_read_retry(vd
, seq
));
97 ts
->tv_sec
+= tomono
.tv_sec
;
98 /* open coding timespec_add_ns */
99 ts
->tv_sec
+= __iter_div_u64_rem(ts
->tv_nsec
+ tomono
.tv_nsec
,
100 NSEC_PER_SEC
, &nsec
);
106 #ifdef ARCH_PROVIDES_TIMER
109 * Returns the clock delta, in nanoseconds left-shifted by the clock
112 static notrace u64
get_clock_shifted_nsec(const u64 cycle_last
,
118 /* Read the virtual counter. */
119 res
= arch_vdso_read_counter();
121 res
= res
- cycle_last
;
127 static notrace
int do_realtime(const struct vdso_data
*vd
, struct timespec
*ts
)
129 u32 seq
, mult
, shift
;
130 u64 nsec
, cycle_last
;
131 #ifdef ARCH_CLOCK_FIXED_MASK
132 static const u64 mask
= ARCH_CLOCK_FIXED_MASK
;
136 vdso_xtime_clock_sec_t sec
;
139 seq
= vdso_read_begin(vd
);
144 cycle_last
= vd
->cs_cycle_last
;
146 mult
= vd
->cs_mono_mult
;
147 shift
= vd
->cs_shift
;
148 #ifndef ARCH_CLOCK_FIXED_MASK
152 sec
= vd
->xtime_clock_sec
;
153 nsec
= vd
->xtime_clock_snsec
;
155 } while (unlikely(vdso_read_retry(vd
, seq
)));
157 nsec
+= get_clock_shifted_nsec(cycle_last
, mult
, mask
);
159 /* open coding timespec_add_ns to save a ts->tv_nsec = 0 */
160 ts
->tv_sec
= sec
+ __iter_div_u64_rem(nsec
, NSEC_PER_SEC
, &nsec
);
166 static notrace
int do_monotonic(const struct vdso_data
*vd
, struct timespec
*ts
)
168 u32 seq
, mult
, shift
;
169 u64 nsec
, cycle_last
;
170 #ifdef ARCH_CLOCK_FIXED_MASK
171 static const u64 mask
= ARCH_CLOCK_FIXED_MASK
;
175 vdso_wtm_clock_nsec_t wtm_nsec
;
179 seq
= vdso_read_begin(vd
);
184 cycle_last
= vd
->cs_cycle_last
;
186 mult
= vd
->cs_mono_mult
;
187 shift
= vd
->cs_shift
;
188 #ifndef ARCH_CLOCK_FIXED_MASK
192 sec
= vd
->xtime_clock_sec
;
193 nsec
= vd
->xtime_clock_snsec
;
195 sec
+= vd
->wtm_clock_sec
;
196 wtm_nsec
= vd
->wtm_clock_nsec
;
198 } while (unlikely(vdso_read_retry(vd
, seq
)));
200 nsec
+= get_clock_shifted_nsec(cycle_last
, mult
, mask
);
203 /* open coding timespec_add_ns to save a ts->tv_nsec = 0 */
204 ts
->tv_sec
= sec
+ __iter_div_u64_rem(nsec
, NSEC_PER_SEC
, &nsec
);
210 static notrace
int do_monotonic_raw(const struct vdso_data
*vd
,
213 u32 seq
, mult
, shift
;
214 u64 nsec
, cycle_last
;
215 #ifdef ARCH_CLOCK_FIXED_MASK
216 static const u64 mask
= ARCH_CLOCK_FIXED_MASK
;
220 vdso_raw_time_sec_t sec
;
223 seq
= vdso_read_begin(vd
);
228 cycle_last
= vd
->cs_cycle_last
;
230 mult
= vd
->cs_raw_mult
;
231 shift
= vd
->cs_shift
;
232 #ifndef ARCH_CLOCK_FIXED_MASK
236 sec
= vd
->raw_time_sec
;
237 nsec
= vd
->raw_time_nsec
;
239 } while (unlikely(vdso_read_retry(vd
, seq
)));
241 nsec
+= get_clock_shifted_nsec(cycle_last
, mult
, mask
);
243 /* open coding timespec_add_ns to save a ts->tv_nsec = 0 */
244 ts
->tv_sec
= sec
+ __iter_div_u64_rem(nsec
, NSEC_PER_SEC
, &nsec
);
250 #else /* ARCH_PROVIDES_TIMER */
252 static notrace
int do_realtime(const struct vdso_data
*vd
, struct timespec
*ts
)
257 static notrace
int do_monotonic(const struct vdso_data
*vd
, struct timespec
*ts
)
262 static notrace
int do_monotonic_raw(const struct vdso_data
*vd
,
268 #endif /* ARCH_PROVIDES_TIMER */
270 notrace
int __vdso_clock_gettime(clockid_t clock
, struct timespec
*ts
)
272 const struct vdso_data
*vd
= __get_datapage();
275 case CLOCK_REALTIME_COARSE
:
276 do_realtime_coarse(vd
, ts
);
278 case CLOCK_MONOTONIC_COARSE
:
279 do_monotonic_coarse(vd
, ts
);
282 if (do_realtime(vd
, ts
))
285 case CLOCK_MONOTONIC
:
286 if (do_monotonic(vd
, ts
))
289 case CLOCK_MONOTONIC_RAW
:
290 if (do_monotonic_raw(vd
, ts
))
299 return clock_gettime_fallback(clock
, ts
);
302 notrace
int __vdso_gettimeofday(struct timeval
*tv
, struct timezone
*tz
)
304 const struct vdso_data
*vd
= __get_datapage();
306 if (likely(tv
!= NULL
)) {
309 if (do_realtime(vd
, &ts
))
310 return gettimeofday_fallback(tv
, tz
);
312 tv
->tv_sec
= ts
.tv_sec
;
313 tv
->tv_usec
= ts
.tv_nsec
/ 1000;
316 if (unlikely(tz
!= NULL
)) {
317 tz
->tz_minuteswest
= vd
->tz_minuteswest
;
318 tz
->tz_dsttime
= vd
->tz_dsttime
;
324 int __vdso_clock_getres(clockid_t clock
, struct timespec
*res
)
328 if (clock
== CLOCK_REALTIME
||
329 clock
== CLOCK_MONOTONIC
||
330 clock
== CLOCK_MONOTONIC_RAW
)
331 nsec
= MONOTONIC_RES_NSEC
;
332 else if (clock
== CLOCK_REALTIME_COARSE
||
333 clock
== CLOCK_MONOTONIC_COARSE
)
336 return clock_getres_fallback(clock
, res
);
338 if (likely(res
!= NULL
)) {