a354586f8a659936357ac2886dcd1bd719594b32
[GitHub/exynos8895/android_kernel_samsung_universal8895.git] / arch / arm / vdso / vgettimeofday.c
1 /*
2 * Userspace implementations of gettimeofday() and friends.
3 *
4 * Copyright (C) 2017 Cavium, Inc.
5 * Copyright (C) 2015 Mentor Graphics Corporation
6 * Copyright (C) 2012 ARM Limited
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 *
20 * Author: Will Deacon <will.deacon@arm.com>
21 * Rewriten from arch64 version into C by: Andrew Pinski <apinski@cavium.com>
22 * Reworked and rebased over arm version by: Mark Salyzyn <salyzyn@android.com>
23 */
24
25 #include <asm/barrier.h>
26 #include <linux/compiler.h> /* for notrace */
27 #include <linux/math64.h> /* for __iter_div_u64_rem() */
28 #include <uapi/linux/time.h> /* for struct timespec */
29
30 #include "compiler.h"
31 #include "datapage.h"
32
33 DEFINE_FALLBACK(gettimeofday, struct timeval *, tv, struct timezone *, tz)
34 DEFINE_FALLBACK(clock_gettime, clockid_t, clock, struct timespec *, ts)
35 DEFINE_FALLBACK(clock_getres, clockid_t, clock, struct timespec *, ts)
36
37 static notrace u32 vdso_read_begin(const struct vdso_data *vd)
38 {
39 u32 seq;
40
41 do {
42 seq = READ_ONCE(vd->tb_seq_count);
43
44 if ((seq & 1) == 0)
45 break;
46
47 cpu_relax();
48 } while (true);
49
50 smp_rmb(); /* Pairs with second smp_wmb in update_vsyscall */
51 return seq;
52 }
53
54 static notrace int vdso_read_retry(const struct vdso_data *vd, u32 start)
55 {
56 u32 seq;
57
58 smp_rmb(); /* Pairs with first smp_wmb in update_vsyscall */
59 seq = READ_ONCE(vd->tb_seq_count);
60 return seq != start;
61 }
62
63 static notrace int do_realtime_coarse(const struct vdso_data *vd,
64 struct timespec *ts)
65 {
66 u32 seq;
67
68 do {
69 seq = vdso_read_begin(vd);
70
71 ts->tv_sec = vd->xtime_coarse_sec;
72 ts->tv_nsec = vd->xtime_coarse_nsec;
73
74 } while (vdso_read_retry(vd, seq));
75
76 return 0;
77 }
78
79 static notrace int do_monotonic_coarse(const struct vdso_data *vd,
80 struct timespec *ts)
81 {
82 struct timespec tomono;
83 u32 seq;
84 u64 nsec;
85
86 do {
87 seq = vdso_read_begin(vd);
88
89 ts->tv_sec = vd->xtime_coarse_sec;
90 ts->tv_nsec = vd->xtime_coarse_nsec;
91
92 tomono.tv_sec = vd->wtm_clock_sec;
93 tomono.tv_nsec = vd->wtm_clock_nsec;
94
95 } while (vdso_read_retry(vd, seq));
96
97 ts->tv_sec += tomono.tv_sec;
98 /* open coding timespec_add_ns */
99 ts->tv_sec += __iter_div_u64_rem(ts->tv_nsec + tomono.tv_nsec,
100 NSEC_PER_SEC, &nsec);
101 ts->tv_nsec = nsec;
102
103 return 0;
104 }
105
106 #ifdef CONFIG_ARM_ARCH_TIMER
107
108 /*
109 * Returns the clock delta, in nanoseconds left-shifted by the clock
110 * shift.
111 */
112 static notrace u64 get_clock_shifted_nsec(const u64 cycle_last,
113 const u32 mult,
114 const u64 mask)
115 {
116 u64 res;
117
118 /* Read the virtual counter. */
119 res = arch_vdso_read_counter();
120
121 res = res - cycle_last;
122
123 res &= mask;
124 return res * mult;
125 }
126
127 static notrace int do_realtime(const struct vdso_data *vd, struct timespec *ts)
128 {
129 u32 seq, mult, shift;
130 u64 nsec, cycle_last;
131 u64 mask;
132 vdso_xtime_clock_sec_t sec;
133
134 do {
135 seq = vdso_read_begin(vd);
136
137 if (vd->use_syscall)
138 return -1;
139
140 cycle_last = vd->cs_cycle_last;
141
142 mult = vd->cs_mono_mult;
143 shift = vd->cs_shift;
144 mask = vd->cs_mask;
145
146 sec = vd->xtime_clock_sec;
147 nsec = vd->xtime_clock_snsec;
148
149 } while (unlikely(vdso_read_retry(vd, seq)));
150
151 nsec += get_clock_shifted_nsec(cycle_last, mult, mask);
152 nsec >>= shift;
153 /* open coding timespec_add_ns to save a ts->tv_nsec = 0 */
154 ts->tv_sec = sec + __iter_div_u64_rem(nsec, NSEC_PER_SEC, &nsec);
155 ts->tv_nsec = nsec;
156
157 return 0;
158 }
159
160 static notrace int do_monotonic(const struct vdso_data *vd, struct timespec *ts)
161 {
162 u32 seq, mult, shift;
163 u64 nsec, cycle_last;
164 u64 mask;
165 vdso_wtm_clock_nsec_t wtm_nsec;
166 __kernel_time_t sec;
167
168 do {
169 seq = vdso_read_begin(vd);
170
171 if (vd->use_syscall)
172 return -1;
173
174 cycle_last = vd->cs_cycle_last;
175
176 mult = vd->cs_mono_mult;
177 shift = vd->cs_shift;
178 mask = vd->cs_mask;
179
180 sec = vd->xtime_clock_sec;
181 nsec = vd->xtime_clock_snsec;
182
183 sec += vd->wtm_clock_sec;
184 wtm_nsec = vd->wtm_clock_nsec;
185
186 } while (unlikely(vdso_read_retry(vd, seq)));
187
188 nsec += get_clock_shifted_nsec(cycle_last, mult, mask);
189 nsec >>= shift;
190 nsec += wtm_nsec;
191 /* open coding timespec_add_ns to save a ts->tv_nsec = 0 */
192 ts->tv_sec = sec + __iter_div_u64_rem(nsec, NSEC_PER_SEC, &nsec);
193 ts->tv_nsec = nsec;
194
195 return 0;
196 }
197
198 static notrace int do_monotonic_raw(const struct vdso_data *vd,
199 struct timespec *ts)
200 {
201 u32 seq, mult, shift;
202 u64 nsec, cycle_last;
203 u64 mask;
204 vdso_raw_time_sec_t sec;
205
206 do {
207 seq = vdso_read_begin(vd);
208
209 if (vd->use_syscall)
210 return -1;
211
212 cycle_last = vd->cs_cycle_last;
213
214 mult = vd->cs_raw_mult;
215 shift = vd->cs_shift;
216 mask = vd->cs_mask;
217
218 sec = vd->raw_time_sec;
219 nsec = vd->raw_time_nsec;
220
221 } while (unlikely(vdso_read_retry(vd, seq)));
222
223 nsec += get_clock_shifted_nsec(cycle_last, mult, mask);
224 nsec >>= shift;
225 /* open coding timespec_add_ns to save a ts->tv_nsec = 0 */
226 ts->tv_sec = sec + __iter_div_u64_rem(nsec, NSEC_PER_SEC, &nsec);
227 ts->tv_nsec = nsec;
228
229 return 0;
230 }
231
232 #else /* CONFIG_ARM_ARCH_TIMER */
233
234 static notrace int do_realtime(const struct vdso_data *vd, struct timespec *ts)
235 {
236 return -1;
237 }
238
239 static notrace int do_monotonic(const struct vdso_data *vd, struct timespec *ts)
240 {
241 return -1;
242 }
243
244 static notrace int do_monotonic_raw(const struct vdso_data *vd,
245 struct timespec *ts)
246 {
247 return -1;
248 }
249
250 #endif /* CONFIG_ARM_ARCH_TIMER */
251
252 notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
253 {
254 const struct vdso_data *vd = __get_datapage();
255
256 switch (clock) {
257 case CLOCK_REALTIME_COARSE:
258 do_realtime_coarse(vd, ts);
259 break;
260 case CLOCK_MONOTONIC_COARSE:
261 do_monotonic_coarse(vd, ts);
262 break;
263 case CLOCK_REALTIME:
264 if (do_realtime(vd, ts))
265 goto fallback;
266 break;
267 case CLOCK_MONOTONIC:
268 if (do_monotonic(vd, ts))
269 goto fallback;
270 break;
271 case CLOCK_MONOTONIC_RAW:
272 if (do_monotonic_raw(vd, ts))
273 goto fallback;
274 break;
275 default:
276 goto fallback;
277 }
278
279 return 0;
280 fallback:
281 return clock_gettime_fallback(clock, ts);
282 }
283
284 notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
285 {
286 const struct vdso_data *vd = __get_datapage();
287
288 if (likely(tv != NULL)) {
289 struct timespec ts;
290
291 if (do_realtime(vd, &ts))
292 return gettimeofday_fallback(tv, tz);
293
294 tv->tv_sec = ts.tv_sec;
295 tv->tv_usec = ts.tv_nsec / 1000;
296 }
297
298 if (unlikely(tz != NULL)) {
299 tz->tz_minuteswest = vd->tz_minuteswest;
300 tz->tz_dsttime = vd->tz_dsttime;
301 }
302
303 return 0;
304 }
305
306 int __vdso_clock_getres(clockid_t clock, struct timespec *res)
307 {
308 long nsec;
309
310 if (clock == CLOCK_REALTIME ||
311 clock == CLOCK_MONOTONIC ||
312 clock == CLOCK_MONOTONIC_RAW)
313 nsec = MONOTONIC_RES_NSEC;
314 else if (clock == CLOCK_REALTIME_COARSE ||
315 clock == CLOCK_MONOTONIC_COARSE)
316 nsec = LOW_RES_NSEC;
317 else
318 return clock_getres_fallback(clock, res);
319
320 if (likely(res != NULL)) {
321 res->tv_sec = 0;
322 res->tv_nsec = nsec;
323 }
324
325 return 0;
326 }