FROMLIST: [PATCH v5 12/12] lib: vdso: do not expose gettimeofday, if no arch supporte...
[GitHub/LineageOS/G12/android_kernel_amlogic_linux-4.9.git] / lib / vdso / vgettimeofday.c
1 /*
2 * Userspace implementations of gettimeofday() and friends.
3 *
4 * Copyright (C) 2017 Cavium, Inc.
5 * Copyright (C) 2015 Mentor Graphics Corporation
6 * Copyright (C) 2012 ARM Limited
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 *
20 * Author: Will Deacon <will.deacon@arm.com>
21 * Rewriten from arch64 version into C by: Andrew Pinski <apinski@cavium.com>
22 * Reworked and rebased over arm version by: Mark Salyzyn <salyzyn@android.com>
23 */
24
25 #include <asm/barrier.h>
26 #include <linux/compiler.h> /* for notrace */
27 #include <linux/math64.h> /* for __iter_div_u64_rem() */
28 #include <uapi/linux/time.h> /* for struct timespec */
29
30 #include "compiler.h"
31 #include "datapage.h"
32
33 #ifdef ARCH_PROVIDES_TIMER
34 DEFINE_FALLBACK(gettimeofday, struct timeval *, tv, struct timezone *, tz)
35 #endif
36 DEFINE_FALLBACK(clock_gettime, clockid_t, clock, struct timespec *, ts)
37 DEFINE_FALLBACK(clock_getres, clockid_t, clock, struct timespec *, ts)
38
39 static notrace u32 vdso_read_begin(const struct vdso_data *vd)
40 {
41 u32 seq;
42
43 do {
44 seq = READ_ONCE(vd->tb_seq_count);
45
46 if ((seq & 1) == 0)
47 break;
48
49 cpu_relax();
50 } while (true);
51
52 smp_rmb(); /* Pairs with second smp_wmb in update_vsyscall */
53 return seq;
54 }
55
56 static notrace int vdso_read_retry(const struct vdso_data *vd, u32 start)
57 {
58 u32 seq;
59
60 smp_rmb(); /* Pairs with first smp_wmb in update_vsyscall */
61 seq = READ_ONCE(vd->tb_seq_count);
62 return seq != start;
63 }
64
65 static notrace int do_realtime_coarse(const struct vdso_data *vd,
66 struct timespec *ts)
67 {
68 u32 seq;
69
70 do {
71 seq = vdso_read_begin(vd);
72
73 ts->tv_sec = vd->xtime_coarse_sec;
74 ts->tv_nsec = vd->xtime_coarse_nsec;
75
76 } while (vdso_read_retry(vd, seq));
77
78 return 0;
79 }
80
81 static notrace int do_monotonic_coarse(const struct vdso_data *vd,
82 struct timespec *ts)
83 {
84 struct timespec tomono;
85 u32 seq;
86 u64 nsec;
87
88 do {
89 seq = vdso_read_begin(vd);
90
91 ts->tv_sec = vd->xtime_coarse_sec;
92 ts->tv_nsec = vd->xtime_coarse_nsec;
93
94 tomono.tv_sec = vd->wtm_clock_sec;
95 tomono.tv_nsec = vd->wtm_clock_nsec;
96
97 } while (vdso_read_retry(vd, seq));
98
99 ts->tv_sec += tomono.tv_sec;
100 /* open coding timespec_add_ns */
101 ts->tv_sec += __iter_div_u64_rem(ts->tv_nsec + tomono.tv_nsec,
102 NSEC_PER_SEC, &nsec);
103 ts->tv_nsec = nsec;
104
105 return 0;
106 }
107
108 #ifdef ARCH_PROVIDES_TIMER
109
110 /*
111 * Returns the clock delta, in nanoseconds left-shifted by the clock
112 * shift.
113 */
114 static notrace u64 get_clock_shifted_nsec(const u64 cycle_last,
115 const u32 mult,
116 const u64 mask)
117 {
118 u64 res;
119
120 /* Read the virtual counter. */
121 res = arch_vdso_read_counter();
122
123 res = res - cycle_last;
124
125 res &= mask;
126 return res * mult;
127 }
128
129 static notrace int do_realtime(const struct vdso_data *vd, struct timespec *ts)
130 {
131 u32 seq, mult, shift;
132 u64 nsec, cycle_last;
133 #ifdef ARCH_CLOCK_FIXED_MASK
134 static const u64 mask = ARCH_CLOCK_FIXED_MASK;
135 #else
136 u64 mask;
137 #endif
138 vdso_xtime_clock_sec_t sec;
139
140 do {
141 seq = vdso_read_begin(vd);
142
143 if (vd->use_syscall)
144 return -1;
145
146 cycle_last = vd->cs_cycle_last;
147
148 mult = vd->cs_mono_mult;
149 shift = vd->cs_shift;
150 #ifndef ARCH_CLOCK_FIXED_MASK
151 mask = vd->cs_mask;
152 #endif
153
154 sec = vd->xtime_clock_sec;
155 nsec = vd->xtime_clock_snsec;
156
157 } while (unlikely(vdso_read_retry(vd, seq)));
158
159 nsec += get_clock_shifted_nsec(cycle_last, mult, mask);
160 nsec >>= shift;
161 /* open coding timespec_add_ns to save a ts->tv_nsec = 0 */
162 ts->tv_sec = sec + __iter_div_u64_rem(nsec, NSEC_PER_SEC, &nsec);
163 ts->tv_nsec = nsec;
164
165 return 0;
166 }
167
168 static notrace int do_monotonic(const struct vdso_data *vd, struct timespec *ts)
169 {
170 u32 seq, mult, shift;
171 u64 nsec, cycle_last;
172 #ifdef ARCH_CLOCK_FIXED_MASK
173 static const u64 mask = ARCH_CLOCK_FIXED_MASK;
174 #else
175 u64 mask;
176 #endif
177 vdso_wtm_clock_nsec_t wtm_nsec;
178 __kernel_time_t sec;
179
180 do {
181 seq = vdso_read_begin(vd);
182
183 if (vd->use_syscall)
184 return -1;
185
186 cycle_last = vd->cs_cycle_last;
187
188 mult = vd->cs_mono_mult;
189 shift = vd->cs_shift;
190 #ifndef ARCH_CLOCK_FIXED_MASK
191 mask = vd->cs_mask;
192 #endif
193
194 sec = vd->xtime_clock_sec;
195 nsec = vd->xtime_clock_snsec;
196
197 sec += vd->wtm_clock_sec;
198 wtm_nsec = vd->wtm_clock_nsec;
199
200 } while (unlikely(vdso_read_retry(vd, seq)));
201
202 nsec += get_clock_shifted_nsec(cycle_last, mult, mask);
203 nsec >>= shift;
204 nsec += wtm_nsec;
205 /* open coding timespec_add_ns to save a ts->tv_nsec = 0 */
206 ts->tv_sec = sec + __iter_div_u64_rem(nsec, NSEC_PER_SEC, &nsec);
207 ts->tv_nsec = nsec;
208
209 return 0;
210 }
211
212 static notrace int do_monotonic_raw(const struct vdso_data *vd,
213 struct timespec *ts)
214 {
215 u32 seq, mult, shift;
216 u64 nsec, cycle_last;
217 #ifdef ARCH_CLOCK_FIXED_MASK
218 static const u64 mask = ARCH_CLOCK_FIXED_MASK;
219 #else
220 u64 mask;
221 #endif
222 vdso_raw_time_sec_t sec;
223
224 do {
225 seq = vdso_read_begin(vd);
226
227 if (vd->use_syscall)
228 return -1;
229
230 cycle_last = vd->cs_cycle_last;
231
232 mult = vd->cs_raw_mult;
233 shift = vd->cs_shift;
234 #ifndef ARCH_CLOCK_FIXED_MASK
235 mask = vd->cs_mask;
236 #endif
237
238 sec = vd->raw_time_sec;
239 nsec = vd->raw_time_nsec;
240
241 } while (unlikely(vdso_read_retry(vd, seq)));
242
243 nsec += get_clock_shifted_nsec(cycle_last, mult, mask);
244 nsec >>= shift;
245 /* open coding timespec_add_ns to save a ts->tv_nsec = 0 */
246 ts->tv_sec = sec + __iter_div_u64_rem(nsec, NSEC_PER_SEC, &nsec);
247 ts->tv_nsec = nsec;
248
249 return 0;
250 }
251
252 static notrace int do_boottime(const struct vdso_data *vd, struct timespec *ts)
253 {
254 u32 seq, mult, shift;
255 u64 nsec, cycle_last, wtm_nsec;
256 #ifdef ARCH_CLOCK_FIXED_MASK
257 static const u64 mask = ARCH_CLOCK_FIXED_MASK;
258 #else
259 u64 mask;
260 #endif
261 __kernel_time_t sec;
262
263 do {
264 seq = vdso_read_begin(vd);
265
266 if (vd->use_syscall)
267 return -1;
268
269 cycle_last = vd->cs_cycle_last;
270
271 mult = vd->cs_mono_mult;
272 shift = vd->cs_shift;
273 #ifndef ARCH_CLOCK_FIXED_MASK
274 mask = vd->cs_mask;
275 #endif
276
277 sec = vd->xtime_clock_sec;
278 nsec = vd->xtime_clock_snsec;
279
280 sec += vd->wtm_clock_sec;
281 wtm_nsec = vd->wtm_clock_nsec + vd->btm_nsec;
282
283 } while (unlikely(vdso_read_retry(vd, seq)));
284
285 nsec += get_clock_shifted_nsec(cycle_last, mult, mask);
286 nsec >>= shift;
287 nsec += wtm_nsec;
288
289 /* open coding timespec_add_ns to save a ts->tv_nsec = 0 */
290 ts->tv_sec = sec + __iter_div_u64_rem(nsec, NSEC_PER_SEC, &nsec);
291 ts->tv_nsec = nsec;
292
293 return 0;
294 }
295
296 #endif /* ARCH_PROVIDES_TIMER */
297
298 notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
299 {
300 const struct vdso_data *vd = __get_datapage();
301
302 switch (clock) {
303 case CLOCK_REALTIME_COARSE:
304 do_realtime_coarse(vd, ts);
305 break;
306 case CLOCK_MONOTONIC_COARSE:
307 do_monotonic_coarse(vd, ts);
308 break;
309 #ifdef ARCH_PROVIDES_TIMER
310 case CLOCK_REALTIME:
311 if (do_realtime(vd, ts))
312 goto fallback;
313 break;
314 case CLOCK_MONOTONIC:
315 if (do_monotonic(vd, ts))
316 goto fallback;
317 break;
318 case CLOCK_MONOTONIC_RAW:
319 if (do_monotonic_raw(vd, ts))
320 goto fallback;
321 break;
322 case CLOCK_BOOTTIME:
323 if (do_boottime(vd, ts))
324 goto fallback;
325 break;
326 #endif
327 default:
328 goto fallback;
329 }
330
331 return 0;
332 fallback:
333 return clock_gettime_fallback(clock, ts);
334 }
335
336 #ifdef ARCH_PROVIDES_TIMER
337 notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
338 {
339 const struct vdso_data *vd = __get_datapage();
340
341 if (likely(tv != NULL)) {
342 struct timespec ts;
343
344 if (do_realtime(vd, &ts))
345 return gettimeofday_fallback(tv, tz);
346
347 tv->tv_sec = ts.tv_sec;
348 tv->tv_usec = ts.tv_nsec / 1000;
349 }
350
351 if (unlikely(tz != NULL)) {
352 tz->tz_minuteswest = vd->tz_minuteswest;
353 tz->tz_dsttime = vd->tz_dsttime;
354 }
355
356 return 0;
357 }
358 #endif
359
360 int __vdso_clock_getres(clockid_t clock, struct timespec *res)
361 {
362 long nsec;
363
364 switch (clock) {
365 case CLOCK_REALTIME_COARSE:
366 case CLOCK_MONOTONIC_COARSE:
367 nsec = LOW_RES_NSEC;
368 break;
369 #ifdef ARCH_PROVIDES_TIMER
370 case CLOCK_REALTIME:
371 case CLOCK_MONOTONIC:
372 case CLOCK_MONOTONIC_RAW:
373 case CLOCK_BOOTTIME:
374 nsec = MONOTONIC_RES_NSEC;
375 break;
376 #endif
377 default:
378 return clock_getres_fallback(clock, res);
379 }
380
381 if (likely(res != NULL)) {
382 res->tv_sec = 0;
383 res->tv_nsec = nsec;
384 }
385
386 return 0;
387 }