FROMLIST: [PATCH v5 11/12] lib: vdso: Add support for CLOCK_BOOTTIME
[GitHub/exynos8895/android_kernel_samsung_universal8895.git] / lib / vdso / vgettimeofday.c
CommitLineData
0c8f7b59
MS
1/*
2 * Userspace implementations of gettimeofday() and friends.
3 *
4 * Copyright (C) 2017 Cavium, Inc.
5 * Copyright (C) 2015 Mentor Graphics Corporation
6 * Copyright (C) 2012 ARM Limited
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 *
20 * Author: Will Deacon <will.deacon@arm.com>
21 * Rewriten from arch64 version into C by: Andrew Pinski <apinski@cavium.com>
22 * Reworked and rebased over arm version by: Mark Salyzyn <salyzyn@android.com>
23 */
24
25#include <asm/barrier.h>
26#include <linux/compiler.h> /* for notrace */
27#include <linux/math64.h> /* for __iter_div_u64_rem() */
28#include <uapi/linux/time.h> /* for struct timespec */
29
30#include "compiler.h"
31#include "datapage.h"
32
33DEFINE_FALLBACK(gettimeofday, struct timeval *, tv, struct timezone *, tz)
34DEFINE_FALLBACK(clock_gettime, clockid_t, clock, struct timespec *, ts)
35DEFINE_FALLBACK(clock_getres, clockid_t, clock, struct timespec *, ts)
36
37static notrace u32 vdso_read_begin(const struct vdso_data *vd)
38{
39 u32 seq;
40
41 do {
42 seq = READ_ONCE(vd->tb_seq_count);
43
44 if ((seq & 1) == 0)
45 break;
46
47 cpu_relax();
48 } while (true);
49
50 smp_rmb(); /* Pairs with second smp_wmb in update_vsyscall */
51 return seq;
52}
53
54static notrace int vdso_read_retry(const struct vdso_data *vd, u32 start)
55{
56 u32 seq;
57
58 smp_rmb(); /* Pairs with first smp_wmb in update_vsyscall */
59 seq = READ_ONCE(vd->tb_seq_count);
60 return seq != start;
61}
62
63static notrace int do_realtime_coarse(const struct vdso_data *vd,
64 struct timespec *ts)
65{
66 u32 seq;
67
68 do {
69 seq = vdso_read_begin(vd);
70
71 ts->tv_sec = vd->xtime_coarse_sec;
72 ts->tv_nsec = vd->xtime_coarse_nsec;
73
74 } while (vdso_read_retry(vd, seq));
75
76 return 0;
77}
78
79static notrace int do_monotonic_coarse(const struct vdso_data *vd,
80 struct timespec *ts)
81{
82 struct timespec tomono;
83 u32 seq;
84 u64 nsec;
85
86 do {
87 seq = vdso_read_begin(vd);
88
89 ts->tv_sec = vd->xtime_coarse_sec;
90 ts->tv_nsec = vd->xtime_coarse_nsec;
91
92 tomono.tv_sec = vd->wtm_clock_sec;
93 tomono.tv_nsec = vd->wtm_clock_nsec;
94
95 } while (vdso_read_retry(vd, seq));
96
97 ts->tv_sec += tomono.tv_sec;
98 /* open coding timespec_add_ns */
99 ts->tv_sec += __iter_div_u64_rem(ts->tv_nsec + tomono.tv_nsec,
100 NSEC_PER_SEC, &nsec);
101 ts->tv_nsec = nsec;
102
103 return 0;
104}
105
106#ifdef ARCH_PROVIDES_TIMER
107
108/*
109 * Returns the clock delta, in nanoseconds left-shifted by the clock
110 * shift.
111 */
112static notrace u64 get_clock_shifted_nsec(const u64 cycle_last,
113 const u32 mult,
114 const u64 mask)
115{
116 u64 res;
117
118 /* Read the virtual counter. */
119 res = arch_vdso_read_counter();
120
121 res = res - cycle_last;
122
123 res &= mask;
124 return res * mult;
125}
126
127static notrace int do_realtime(const struct vdso_data *vd, struct timespec *ts)
128{
129 u32 seq, mult, shift;
130 u64 nsec, cycle_last;
131#ifdef ARCH_CLOCK_FIXED_MASK
132 static const u64 mask = ARCH_CLOCK_FIXED_MASK;
133#else
134 u64 mask;
135#endif
136 vdso_xtime_clock_sec_t sec;
137
138 do {
139 seq = vdso_read_begin(vd);
140
141 if (vd->use_syscall)
142 return -1;
143
144 cycle_last = vd->cs_cycle_last;
145
146 mult = vd->cs_mono_mult;
147 shift = vd->cs_shift;
148#ifndef ARCH_CLOCK_FIXED_MASK
149 mask = vd->cs_mask;
150#endif
151
152 sec = vd->xtime_clock_sec;
153 nsec = vd->xtime_clock_snsec;
154
155 } while (unlikely(vdso_read_retry(vd, seq)));
156
157 nsec += get_clock_shifted_nsec(cycle_last, mult, mask);
158 nsec >>= shift;
159 /* open coding timespec_add_ns to save a ts->tv_nsec = 0 */
160 ts->tv_sec = sec + __iter_div_u64_rem(nsec, NSEC_PER_SEC, &nsec);
161 ts->tv_nsec = nsec;
162
163 return 0;
164}
165
166static notrace int do_monotonic(const struct vdso_data *vd, struct timespec *ts)
167{
168 u32 seq, mult, shift;
169 u64 nsec, cycle_last;
170#ifdef ARCH_CLOCK_FIXED_MASK
171 static const u64 mask = ARCH_CLOCK_FIXED_MASK;
172#else
173 u64 mask;
174#endif
175 vdso_wtm_clock_nsec_t wtm_nsec;
176 __kernel_time_t sec;
177
178 do {
179 seq = vdso_read_begin(vd);
180
181 if (vd->use_syscall)
182 return -1;
183
184 cycle_last = vd->cs_cycle_last;
185
186 mult = vd->cs_mono_mult;
187 shift = vd->cs_shift;
188#ifndef ARCH_CLOCK_FIXED_MASK
189 mask = vd->cs_mask;
190#endif
191
192 sec = vd->xtime_clock_sec;
193 nsec = vd->xtime_clock_snsec;
194
195 sec += vd->wtm_clock_sec;
196 wtm_nsec = vd->wtm_clock_nsec;
197
198 } while (unlikely(vdso_read_retry(vd, seq)));
199
200 nsec += get_clock_shifted_nsec(cycle_last, mult, mask);
201 nsec >>= shift;
202 nsec += wtm_nsec;
203 /* open coding timespec_add_ns to save a ts->tv_nsec = 0 */
204 ts->tv_sec = sec + __iter_div_u64_rem(nsec, NSEC_PER_SEC, &nsec);
205 ts->tv_nsec = nsec;
206
207 return 0;
208}
209
210static notrace int do_monotonic_raw(const struct vdso_data *vd,
211 struct timespec *ts)
212{
213 u32 seq, mult, shift;
214 u64 nsec, cycle_last;
215#ifdef ARCH_CLOCK_FIXED_MASK
216 static const u64 mask = ARCH_CLOCK_FIXED_MASK;
217#else
218 u64 mask;
219#endif
220 vdso_raw_time_sec_t sec;
221
222 do {
223 seq = vdso_read_begin(vd);
224
225 if (vd->use_syscall)
226 return -1;
227
228 cycle_last = vd->cs_cycle_last;
229
230 mult = vd->cs_raw_mult;
231 shift = vd->cs_shift;
232#ifndef ARCH_CLOCK_FIXED_MASK
233 mask = vd->cs_mask;
234#endif
235
236 sec = vd->raw_time_sec;
237 nsec = vd->raw_time_nsec;
238
239 } while (unlikely(vdso_read_retry(vd, seq)));
240
241 nsec += get_clock_shifted_nsec(cycle_last, mult, mask);
242 nsec >>= shift;
243 /* open coding timespec_add_ns to save a ts->tv_nsec = 0 */
244 ts->tv_sec = sec + __iter_div_u64_rem(nsec, NSEC_PER_SEC, &nsec);
245 ts->tv_nsec = nsec;
246
247 return 0;
248}
249
23e87404
MS
250static notrace int do_boottime(const struct vdso_data *vd, struct timespec *ts)
251{
252 u32 seq, mult, shift;
253 u64 nsec, cycle_last, wtm_nsec;
254#ifdef ARCH_CLOCK_FIXED_MASK
255 static const u64 mask = ARCH_CLOCK_FIXED_MASK;
256#else
257 u64 mask;
258#endif
259 __kernel_time_t sec;
260
261 do {
262 seq = vdso_read_begin(vd);
263
264 if (vd->use_syscall)
265 return -1;
266
267 cycle_last = vd->cs_cycle_last;
268
269 mult = vd->cs_mono_mult;
270 shift = vd->cs_shift;
271#ifndef ARCH_CLOCK_FIXED_MASK
272 mask = vd->cs_mask;
273#endif
274
275 sec = vd->xtime_clock_sec;
276 nsec = vd->xtime_clock_snsec;
277
278 sec += vd->wtm_clock_sec;
279 wtm_nsec = vd->wtm_clock_nsec + vd->btm_nsec;
280
281 } while (unlikely(vdso_read_retry(vd, seq)));
282
283 nsec += get_clock_shifted_nsec(cycle_last, mult, mask);
284 nsec >>= shift;
285 nsec += wtm_nsec;
286
287 /* open coding timespec_add_ns to save a ts->tv_nsec = 0 */
288 ts->tv_sec = sec + __iter_div_u64_rem(nsec, NSEC_PER_SEC, &nsec);
289 ts->tv_nsec = nsec;
290
291 return 0;
292}
293
0c8f7b59
MS
294#else /* ARCH_PROVIDES_TIMER */
295
296static notrace int do_realtime(const struct vdso_data *vd, struct timespec *ts)
297{
298 return -1;
299}
300
301static notrace int do_monotonic(const struct vdso_data *vd, struct timespec *ts)
302{
303 return -1;
304}
305
306static notrace int do_monotonic_raw(const struct vdso_data *vd,
307 struct timespec *ts)
308{
309 return -1;
310}
311
23e87404
MS
312static notrace int do_boottime(const struct vdso_data *vd,
313 struct timespec *ts)
314{
315 return -1;
316}
317
0c8f7b59
MS
318#endif /* ARCH_PROVIDES_TIMER */
319
320notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
321{
322 const struct vdso_data *vd = __get_datapage();
323
324 switch (clock) {
325 case CLOCK_REALTIME_COARSE:
326 do_realtime_coarse(vd, ts);
327 break;
328 case CLOCK_MONOTONIC_COARSE:
329 do_monotonic_coarse(vd, ts);
330 break;
331 case CLOCK_REALTIME:
332 if (do_realtime(vd, ts))
333 goto fallback;
334 break;
335 case CLOCK_MONOTONIC:
336 if (do_monotonic(vd, ts))
337 goto fallback;
338 break;
339 case CLOCK_MONOTONIC_RAW:
340 if (do_monotonic_raw(vd, ts))
341 goto fallback;
342 break;
23e87404
MS
343 case CLOCK_BOOTTIME:
344 if (do_boottime(vd, ts))
345 goto fallback;
346 break;
0c8f7b59
MS
347 default:
348 goto fallback;
349 }
350
351 return 0;
352fallback:
353 return clock_gettime_fallback(clock, ts);
354}
355
356notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
357{
358 const struct vdso_data *vd = __get_datapage();
359
360 if (likely(tv != NULL)) {
361 struct timespec ts;
362
363 if (do_realtime(vd, &ts))
364 return gettimeofday_fallback(tv, tz);
365
366 tv->tv_sec = ts.tv_sec;
367 tv->tv_usec = ts.tv_nsec / 1000;
368 }
369
370 if (unlikely(tz != NULL)) {
371 tz->tz_minuteswest = vd->tz_minuteswest;
372 tz->tz_dsttime = vd->tz_dsttime;
373 }
374
375 return 0;
376}
377
378int __vdso_clock_getres(clockid_t clock, struct timespec *res)
379{
380 long nsec;
381
382 if (clock == CLOCK_REALTIME ||
23e87404 383 clock == CLOCK_BOOTTIME ||
0c8f7b59
MS
384 clock == CLOCK_MONOTONIC ||
385 clock == CLOCK_MONOTONIC_RAW)
386 nsec = MONOTONIC_RES_NSEC;
387 else if (clock == CLOCK_REALTIME_COARSE ||
388 clock == CLOCK_MONOTONIC_COARSE)
389 nsec = LOW_RES_NSEC;
390 else
391 return clock_getres_fallback(clock, res);
392
393 if (likely(res != NULL)) {
394 res->tv_sec = 0;
395 res->tv_nsec = nsec;
396 }
397
398 return 0;
399}