FROMLIST: [PATCH v5 09/12] arm: vdso: move vgettimeofday.c to lib/vdso/
[GitHub/exynos8895/android_kernel_samsung_universal8895.git] / lib / vdso / vgettimeofday.c
1 /*
2 * Userspace implementations of gettimeofday() and friends.
3 *
4 * Copyright (C) 2017 Cavium, Inc.
5 * Copyright (C) 2015 Mentor Graphics Corporation
6 * Copyright (C) 2012 ARM Limited
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 *
20 * Author: Will Deacon <will.deacon@arm.com>
21 * Rewriten from arch64 version into C by: Andrew Pinski <apinski@cavium.com>
22 * Reworked and rebased over arm version by: Mark Salyzyn <salyzyn@android.com>
23 */
24
25 #include <asm/barrier.h>
26 #include <linux/compiler.h> /* for notrace */
27 #include <linux/math64.h> /* for __iter_div_u64_rem() */
28 #include <uapi/linux/time.h> /* for struct timespec */
29
30 #include "compiler.h"
31 #include "datapage.h"
32
33 DEFINE_FALLBACK(gettimeofday, struct timeval *, tv, struct timezone *, tz)
34 DEFINE_FALLBACK(clock_gettime, clockid_t, clock, struct timespec *, ts)
35 DEFINE_FALLBACK(clock_getres, clockid_t, clock, struct timespec *, ts)
36
37 static notrace u32 vdso_read_begin(const struct vdso_data *vd)
38 {
39 u32 seq;
40
41 do {
42 seq = READ_ONCE(vd->tb_seq_count);
43
44 if ((seq & 1) == 0)
45 break;
46
47 cpu_relax();
48 } while (true);
49
50 smp_rmb(); /* Pairs with second smp_wmb in update_vsyscall */
51 return seq;
52 }
53
54 static notrace int vdso_read_retry(const struct vdso_data *vd, u32 start)
55 {
56 u32 seq;
57
58 smp_rmb(); /* Pairs with first smp_wmb in update_vsyscall */
59 seq = READ_ONCE(vd->tb_seq_count);
60 return seq != start;
61 }
62
63 static notrace int do_realtime_coarse(const struct vdso_data *vd,
64 struct timespec *ts)
65 {
66 u32 seq;
67
68 do {
69 seq = vdso_read_begin(vd);
70
71 ts->tv_sec = vd->xtime_coarse_sec;
72 ts->tv_nsec = vd->xtime_coarse_nsec;
73
74 } while (vdso_read_retry(vd, seq));
75
76 return 0;
77 }
78
79 static notrace int do_monotonic_coarse(const struct vdso_data *vd,
80 struct timespec *ts)
81 {
82 struct timespec tomono;
83 u32 seq;
84 u64 nsec;
85
86 do {
87 seq = vdso_read_begin(vd);
88
89 ts->tv_sec = vd->xtime_coarse_sec;
90 ts->tv_nsec = vd->xtime_coarse_nsec;
91
92 tomono.tv_sec = vd->wtm_clock_sec;
93 tomono.tv_nsec = vd->wtm_clock_nsec;
94
95 } while (vdso_read_retry(vd, seq));
96
97 ts->tv_sec += tomono.tv_sec;
98 /* open coding timespec_add_ns */
99 ts->tv_sec += __iter_div_u64_rem(ts->tv_nsec + tomono.tv_nsec,
100 NSEC_PER_SEC, &nsec);
101 ts->tv_nsec = nsec;
102
103 return 0;
104 }
105
106 #ifdef ARCH_PROVIDES_TIMER
107
108 /*
109 * Returns the clock delta, in nanoseconds left-shifted by the clock
110 * shift.
111 */
112 static notrace u64 get_clock_shifted_nsec(const u64 cycle_last,
113 const u32 mult,
114 const u64 mask)
115 {
116 u64 res;
117
118 /* Read the virtual counter. */
119 res = arch_vdso_read_counter();
120
121 res = res - cycle_last;
122
123 res &= mask;
124 return res * mult;
125 }
126
127 static notrace int do_realtime(const struct vdso_data *vd, struct timespec *ts)
128 {
129 u32 seq, mult, shift;
130 u64 nsec, cycle_last;
131 #ifdef ARCH_CLOCK_FIXED_MASK
132 static const u64 mask = ARCH_CLOCK_FIXED_MASK;
133 #else
134 u64 mask;
135 #endif
136 vdso_xtime_clock_sec_t sec;
137
138 do {
139 seq = vdso_read_begin(vd);
140
141 if (vd->use_syscall)
142 return -1;
143
144 cycle_last = vd->cs_cycle_last;
145
146 mult = vd->cs_mono_mult;
147 shift = vd->cs_shift;
148 #ifndef ARCH_CLOCK_FIXED_MASK
149 mask = vd->cs_mask;
150 #endif
151
152 sec = vd->xtime_clock_sec;
153 nsec = vd->xtime_clock_snsec;
154
155 } while (unlikely(vdso_read_retry(vd, seq)));
156
157 nsec += get_clock_shifted_nsec(cycle_last, mult, mask);
158 nsec >>= shift;
159 /* open coding timespec_add_ns to save a ts->tv_nsec = 0 */
160 ts->tv_sec = sec + __iter_div_u64_rem(nsec, NSEC_PER_SEC, &nsec);
161 ts->tv_nsec = nsec;
162
163 return 0;
164 }
165
166 static notrace int do_monotonic(const struct vdso_data *vd, struct timespec *ts)
167 {
168 u32 seq, mult, shift;
169 u64 nsec, cycle_last;
170 #ifdef ARCH_CLOCK_FIXED_MASK
171 static const u64 mask = ARCH_CLOCK_FIXED_MASK;
172 #else
173 u64 mask;
174 #endif
175 vdso_wtm_clock_nsec_t wtm_nsec;
176 __kernel_time_t sec;
177
178 do {
179 seq = vdso_read_begin(vd);
180
181 if (vd->use_syscall)
182 return -1;
183
184 cycle_last = vd->cs_cycle_last;
185
186 mult = vd->cs_mono_mult;
187 shift = vd->cs_shift;
188 #ifndef ARCH_CLOCK_FIXED_MASK
189 mask = vd->cs_mask;
190 #endif
191
192 sec = vd->xtime_clock_sec;
193 nsec = vd->xtime_clock_snsec;
194
195 sec += vd->wtm_clock_sec;
196 wtm_nsec = vd->wtm_clock_nsec;
197
198 } while (unlikely(vdso_read_retry(vd, seq)));
199
200 nsec += get_clock_shifted_nsec(cycle_last, mult, mask);
201 nsec >>= shift;
202 nsec += wtm_nsec;
203 /* open coding timespec_add_ns to save a ts->tv_nsec = 0 */
204 ts->tv_sec = sec + __iter_div_u64_rem(nsec, NSEC_PER_SEC, &nsec);
205 ts->tv_nsec = nsec;
206
207 return 0;
208 }
209
210 static notrace int do_monotonic_raw(const struct vdso_data *vd,
211 struct timespec *ts)
212 {
213 u32 seq, mult, shift;
214 u64 nsec, cycle_last;
215 #ifdef ARCH_CLOCK_FIXED_MASK
216 static const u64 mask = ARCH_CLOCK_FIXED_MASK;
217 #else
218 u64 mask;
219 #endif
220 vdso_raw_time_sec_t sec;
221
222 do {
223 seq = vdso_read_begin(vd);
224
225 if (vd->use_syscall)
226 return -1;
227
228 cycle_last = vd->cs_cycle_last;
229
230 mult = vd->cs_raw_mult;
231 shift = vd->cs_shift;
232 #ifndef ARCH_CLOCK_FIXED_MASK
233 mask = vd->cs_mask;
234 #endif
235
236 sec = vd->raw_time_sec;
237 nsec = vd->raw_time_nsec;
238
239 } while (unlikely(vdso_read_retry(vd, seq)));
240
241 nsec += get_clock_shifted_nsec(cycle_last, mult, mask);
242 nsec >>= shift;
243 /* open coding timespec_add_ns to save a ts->tv_nsec = 0 */
244 ts->tv_sec = sec + __iter_div_u64_rem(nsec, NSEC_PER_SEC, &nsec);
245 ts->tv_nsec = nsec;
246
247 return 0;
248 }
249
250 #else /* ARCH_PROVIDES_TIMER */
251
252 static notrace int do_realtime(const struct vdso_data *vd, struct timespec *ts)
253 {
254 return -1;
255 }
256
257 static notrace int do_monotonic(const struct vdso_data *vd, struct timespec *ts)
258 {
259 return -1;
260 }
261
262 static notrace int do_monotonic_raw(const struct vdso_data *vd,
263 struct timespec *ts)
264 {
265 return -1;
266 }
267
268 #endif /* ARCH_PROVIDES_TIMER */
269
270 notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
271 {
272 const struct vdso_data *vd = __get_datapage();
273
274 switch (clock) {
275 case CLOCK_REALTIME_COARSE:
276 do_realtime_coarse(vd, ts);
277 break;
278 case CLOCK_MONOTONIC_COARSE:
279 do_monotonic_coarse(vd, ts);
280 break;
281 case CLOCK_REALTIME:
282 if (do_realtime(vd, ts))
283 goto fallback;
284 break;
285 case CLOCK_MONOTONIC:
286 if (do_monotonic(vd, ts))
287 goto fallback;
288 break;
289 case CLOCK_MONOTONIC_RAW:
290 if (do_monotonic_raw(vd, ts))
291 goto fallback;
292 break;
293 default:
294 goto fallback;
295 }
296
297 return 0;
298 fallback:
299 return clock_gettime_fallback(clock, ts);
300 }
301
302 notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
303 {
304 const struct vdso_data *vd = __get_datapage();
305
306 if (likely(tv != NULL)) {
307 struct timespec ts;
308
309 if (do_realtime(vd, &ts))
310 return gettimeofday_fallback(tv, tz);
311
312 tv->tv_sec = ts.tv_sec;
313 tv->tv_usec = ts.tv_nsec / 1000;
314 }
315
316 if (unlikely(tz != NULL)) {
317 tz->tz_minuteswest = vd->tz_minuteswest;
318 tz->tz_dsttime = vd->tz_dsttime;
319 }
320
321 return 0;
322 }
323
324 int __vdso_clock_getres(clockid_t clock, struct timespec *res)
325 {
326 long nsec;
327
328 if (clock == CLOCK_REALTIME ||
329 clock == CLOCK_MONOTONIC ||
330 clock == CLOCK_MONOTONIC_RAW)
331 nsec = MONOTONIC_RES_NSEC;
332 else if (clock == CLOCK_REALTIME_COARSE ||
333 clock == CLOCK_MONOTONIC_COARSE)
334 nsec = LOW_RES_NSEC;
335 else
336 return clock_getres_fallback(clock, res);
337
338 if (likely(res != NULL)) {
339 res->tv_sec = 0;
340 res->tv_nsec = nsec;
341 }
342
343 return 0;
344 }