2 * Userspace implementations of gettimeofday() and friends.
4 * Copyright (C) 2017 Cavium, Inc.
5 * Copyright (C) 2015 Mentor Graphics Corporation
6 * Copyright (C) 2012 ARM Limited
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 * Author: Will Deacon <will.deacon@arm.com>
21 * Rewriten from arch64 version into C by: Andrew Pinski <apinski@cavium.com>
22 * Reworked and rebased over arm version by: Mark Salyzyn <salyzyn@android.com>
25 #include <asm/barrier.h>
26 #include <linux/compiler.h> /* for notrace */
27 #include <linux/math64.h> /* for __iter_div_u64_rem() */
28 #include <uapi/linux/time.h> /* for struct timespec */
33 DEFINE_FALLBACK(gettimeofday
, struct timeval
*, tv
, struct timezone
*, tz
)
34 DEFINE_FALLBACK(clock_gettime
, clockid_t
, clock
, struct timespec
*, ts
)
36 static notrace u32
vdso_read_begin(const struct vdso_data
*vd
)
41 seq
= READ_ONCE(vd
->tb_seq_count
);
49 smp_rmb(); /* Pairs with second smp_wmb in update_vsyscall */
53 static notrace
int vdso_read_retry(const struct vdso_data
*vd
, u32 start
)
57 smp_rmb(); /* Pairs with first smp_wmb in update_vsyscall */
58 seq
= READ_ONCE(vd
->tb_seq_count
);
62 static notrace
int do_realtime_coarse(const struct vdso_data
*vd
,
68 seq
= vdso_read_begin(vd
);
70 ts
->tv_sec
= vd
->xtime_coarse_sec
;
71 ts
->tv_nsec
= vd
->xtime_coarse_nsec
;
73 } while (vdso_read_retry(vd
, seq
));
78 static notrace
int do_monotonic_coarse(const struct vdso_data
*vd
,
81 struct timespec tomono
;
86 seq
= vdso_read_begin(vd
);
88 ts
->tv_sec
= vd
->xtime_coarse_sec
;
89 ts
->tv_nsec
= vd
->xtime_coarse_nsec
;
91 tomono
.tv_sec
= vd
->wtm_clock_sec
;
92 tomono
.tv_nsec
= vd
->wtm_clock_nsec
;
94 } while (vdso_read_retry(vd
, seq
));
96 ts
->tv_sec
+= tomono
.tv_sec
;
97 /* open coding timespec_add_ns */
98 ts
->tv_sec
+= __iter_div_u64_rem(ts
->tv_nsec
+ tomono
.tv_nsec
,
105 #ifdef CONFIG_ARM_ARCH_TIMER
108 * Returns the clock delta, in nanoseconds left-shifted by the clock
111 static notrace u64
get_clock_shifted_nsec(const u64 cycle_last
,
117 /* Read the virtual counter. */
118 res
= arch_vdso_read_counter();
120 res
= res
- cycle_last
;
126 static notrace
int do_realtime(const struct vdso_data
*vd
, struct timespec
*ts
)
128 u32 seq
, mult
, shift
;
129 u64 nsec
, cycle_last
;
131 vdso_xtime_clock_sec_t sec
;
134 seq
= vdso_read_begin(vd
);
139 cycle_last
= vd
->cs_cycle_last
;
141 mult
= vd
->cs_mono_mult
;
142 shift
= vd
->cs_shift
;
145 sec
= vd
->xtime_clock_sec
;
146 nsec
= vd
->xtime_clock_snsec
;
148 } while (unlikely(vdso_read_retry(vd
, seq
)));
150 nsec
+= get_clock_shifted_nsec(cycle_last
, mult
, mask
);
152 /* open coding timespec_add_ns to save a ts->tv_nsec = 0 */
153 ts
->tv_sec
= sec
+ __iter_div_u64_rem(nsec
, NSEC_PER_SEC
, &nsec
);
159 static notrace
int do_monotonic(const struct vdso_data
*vd
, struct timespec
*ts
)
161 u32 seq
, mult
, shift
;
162 u64 nsec
, cycle_last
;
164 vdso_wtm_clock_nsec_t wtm_nsec
;
168 seq
= vdso_read_begin(vd
);
173 cycle_last
= vd
->cs_cycle_last
;
175 mult
= vd
->cs_mono_mult
;
176 shift
= vd
->cs_shift
;
179 sec
= vd
->xtime_clock_sec
;
180 nsec
= vd
->xtime_clock_snsec
;
182 sec
+= vd
->wtm_clock_sec
;
183 wtm_nsec
= vd
->wtm_clock_nsec
;
185 } while (unlikely(vdso_read_retry(vd
, seq
)));
187 nsec
+= get_clock_shifted_nsec(cycle_last
, mult
, mask
);
190 /* open coding timespec_add_ns to save a ts->tv_nsec = 0 */
191 ts
->tv_sec
= sec
+ __iter_div_u64_rem(nsec
, NSEC_PER_SEC
, &nsec
);
197 static notrace
int do_monotonic_raw(const struct vdso_data
*vd
,
200 u32 seq
, mult
, shift
;
201 u64 nsec
, cycle_last
;
203 vdso_raw_time_sec_t sec
;
206 seq
= vdso_read_begin(vd
);
211 cycle_last
= vd
->cs_cycle_last
;
213 mult
= vd
->cs_raw_mult
;
214 shift
= vd
->cs_shift
;
217 sec
= vd
->raw_time_sec
;
218 nsec
= vd
->raw_time_nsec
;
220 } while (unlikely(vdso_read_retry(vd
, seq
)));
222 nsec
+= get_clock_shifted_nsec(cycle_last
, mult
, mask
);
224 /* open coding timespec_add_ns to save a ts->tv_nsec = 0 */
225 ts
->tv_sec
= sec
+ __iter_div_u64_rem(nsec
, NSEC_PER_SEC
, &nsec
);
231 #else /* CONFIG_ARM_ARCH_TIMER */
233 static notrace
int do_realtime(const struct vdso_data
*vd
, struct timespec
*ts
)
238 static notrace
int do_monotonic(const struct vdso_data
*vd
, struct timespec
*ts
)
243 static notrace
int do_monotonic_raw(const struct vdso_data
*vd
,
249 #endif /* CONFIG_ARM_ARCH_TIMER */
251 notrace
int __vdso_clock_gettime(clockid_t clock
, struct timespec
*ts
)
253 const struct vdso_data
*vd
= __get_datapage();
256 case CLOCK_REALTIME_COARSE
:
257 do_realtime_coarse(vd
, ts
);
259 case CLOCK_MONOTONIC_COARSE
:
260 do_monotonic_coarse(vd
, ts
);
263 if (do_realtime(vd
, ts
))
266 case CLOCK_MONOTONIC
:
267 if (do_monotonic(vd
, ts
))
270 case CLOCK_MONOTONIC_RAW
:
271 if (do_monotonic_raw(vd
, ts
))
280 return clock_gettime_fallback(clock
, ts
);
283 notrace
int __vdso_gettimeofday(struct timeval
*tv
, struct timezone
*tz
)
285 const struct vdso_data
*vd
= __get_datapage();
287 if (likely(tv
!= NULL
)) {
290 if (do_realtime(vd
, &ts
))
291 return gettimeofday_fallback(tv
, tz
);
293 tv
->tv_sec
= ts
.tv_sec
;
294 tv
->tv_usec
= ts
.tv_nsec
/ 1000;
297 if (unlikely(tz
!= NULL
)) {
298 tz
->tz_minuteswest
= vd
->tz_minuteswest
;
299 tz
->tz_dsttime
= vd
->tz_dsttime
;