a2c4db83edc46d7f6773fa7d3a7835324ad8cd8d
[GitHub/exynos8895/android_kernel_samsung_universal8895.git] / arch / arm / vdso / vgettimeofday.c
1 /*
2 * Userspace implementations of gettimeofday() and friends.
3 *
4 * Copyright (C) 2017 Cavium, Inc.
5 * Copyright (C) 2015 Mentor Graphics Corporation
6 * Copyright (C) 2012 ARM Limited
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 *
20 * Author: Will Deacon <will.deacon@arm.com>
21 * Rewriten from arch64 version into C by: Andrew Pinski <apinski@cavium.com>
22 * Reworked and rebased over arm version by: Mark Salyzyn <salyzyn@android.com>
23 */
24
25 #include <asm/barrier.h>
26 #include <linux/compiler.h> /* for notrace */
27 #include <linux/math64.h> /* for __iter_div_u64_rem() */
28 #include <uapi/linux/time.h> /* for struct timespec */
29
30 #include "compiler.h"
31 #include "datapage.h"
32
33 DEFINE_FALLBACK(gettimeofday, struct timeval *, tv, struct timezone *, tz)
34 DEFINE_FALLBACK(clock_gettime, clockid_t, clock, struct timespec *, ts)
35
36 static notrace u32 vdso_read_begin(const struct vdso_data *vd)
37 {
38 u32 seq;
39
40 do {
41 seq = READ_ONCE(vd->tb_seq_count);
42
43 if ((seq & 1) == 0)
44 break;
45
46 cpu_relax();
47 } while (true);
48
49 smp_rmb(); /* Pairs with second smp_wmb in update_vsyscall */
50 return seq;
51 }
52
53 static notrace int vdso_read_retry(const struct vdso_data *vd, u32 start)
54 {
55 u32 seq;
56
57 smp_rmb(); /* Pairs with first smp_wmb in update_vsyscall */
58 seq = READ_ONCE(vd->tb_seq_count);
59 return seq != start;
60 }
61
62 static notrace int do_realtime_coarse(const struct vdso_data *vd,
63 struct timespec *ts)
64 {
65 u32 seq;
66
67 do {
68 seq = vdso_read_begin(vd);
69
70 ts->tv_sec = vd->xtime_coarse_sec;
71 ts->tv_nsec = vd->xtime_coarse_nsec;
72
73 } while (vdso_read_retry(vd, seq));
74
75 return 0;
76 }
77
78 static notrace int do_monotonic_coarse(const struct vdso_data *vd,
79 struct timespec *ts)
80 {
81 struct timespec tomono;
82 u32 seq;
83 u64 nsec;
84
85 do {
86 seq = vdso_read_begin(vd);
87
88 ts->tv_sec = vd->xtime_coarse_sec;
89 ts->tv_nsec = vd->xtime_coarse_nsec;
90
91 tomono.tv_sec = vd->wtm_clock_sec;
92 tomono.tv_nsec = vd->wtm_clock_nsec;
93
94 } while (vdso_read_retry(vd, seq));
95
96 ts->tv_sec += tomono.tv_sec;
97 /* open coding timespec_add_ns */
98 ts->tv_sec += __iter_div_u64_rem(ts->tv_nsec + tomono.tv_nsec,
99 NSEC_PER_SEC, &nsec);
100 ts->tv_nsec = nsec;
101
102 return 0;
103 }
104
105 #ifdef CONFIG_ARM_ARCH_TIMER
106
107 /*
108 * Returns the clock delta, in nanoseconds left-shifted by the clock
109 * shift.
110 */
111 static notrace u64 get_clock_shifted_nsec(const u64 cycle_last,
112 const u32 mult,
113 const u64 mask)
114 {
115 u64 res;
116
117 /* Read the virtual counter. */
118 res = arch_vdso_read_counter();
119
120 res = res - cycle_last;
121
122 res &= mask;
123 return res * mult;
124 }
125
126 static notrace int do_realtime(const struct vdso_data *vd, struct timespec *ts)
127 {
128 u32 seq, mult, shift;
129 u64 nsec, cycle_last;
130 u64 mask;
131 vdso_xtime_clock_sec_t sec;
132
133 do {
134 seq = vdso_read_begin(vd);
135
136 if (vd->use_syscall)
137 return -1;
138
139 cycle_last = vd->cs_cycle_last;
140
141 mult = vd->cs_mono_mult;
142 shift = vd->cs_shift;
143 mask = vd->cs_mask;
144
145 sec = vd->xtime_clock_sec;
146 nsec = vd->xtime_clock_snsec;
147
148 } while (unlikely(vdso_read_retry(vd, seq)));
149
150 nsec += get_clock_shifted_nsec(cycle_last, mult, mask);
151 nsec >>= shift;
152 /* open coding timespec_add_ns to save a ts->tv_nsec = 0 */
153 ts->tv_sec = sec + __iter_div_u64_rem(nsec, NSEC_PER_SEC, &nsec);
154 ts->tv_nsec = nsec;
155
156 return 0;
157 }
158
159 static notrace int do_monotonic(const struct vdso_data *vd, struct timespec *ts)
160 {
161 u32 seq, mult, shift;
162 u64 nsec, cycle_last;
163 u64 mask;
164 vdso_wtm_clock_nsec_t wtm_nsec;
165 __kernel_time_t sec;
166
167 do {
168 seq = vdso_read_begin(vd);
169
170 if (vd->use_syscall)
171 return -1;
172
173 cycle_last = vd->cs_cycle_last;
174
175 mult = vd->cs_mono_mult;
176 shift = vd->cs_shift;
177 mask = vd->cs_mask;
178
179 sec = vd->xtime_clock_sec;
180 nsec = vd->xtime_clock_snsec;
181
182 sec += vd->wtm_clock_sec;
183 wtm_nsec = vd->wtm_clock_nsec;
184
185 } while (unlikely(vdso_read_retry(vd, seq)));
186
187 nsec += get_clock_shifted_nsec(cycle_last, mult, mask);
188 nsec >>= shift;
189 nsec += wtm_nsec;
190 /* open coding timespec_add_ns to save a ts->tv_nsec = 0 */
191 ts->tv_sec = sec + __iter_div_u64_rem(nsec, NSEC_PER_SEC, &nsec);
192 ts->tv_nsec = nsec;
193
194 return 0;
195 }
196
197 static notrace int do_monotonic_raw(const struct vdso_data *vd,
198 struct timespec *ts)
199 {
200 u32 seq, mult, shift;
201 u64 nsec, cycle_last;
202 u64 mask;
203 vdso_raw_time_sec_t sec;
204
205 do {
206 seq = vdso_read_begin(vd);
207
208 if (vd->use_syscall)
209 return -1;
210
211 cycle_last = vd->cs_cycle_last;
212
213 mult = vd->cs_raw_mult;
214 shift = vd->cs_shift;
215 mask = vd->cs_mask;
216
217 sec = vd->raw_time_sec;
218 nsec = vd->raw_time_nsec;
219
220 } while (unlikely(vdso_read_retry(vd, seq)));
221
222 nsec += get_clock_shifted_nsec(cycle_last, mult, mask);
223 nsec >>= shift;
224 /* open coding timespec_add_ns to save a ts->tv_nsec = 0 */
225 ts->tv_sec = sec + __iter_div_u64_rem(nsec, NSEC_PER_SEC, &nsec);
226 ts->tv_nsec = nsec;
227
228 return 0;
229 }
230
231 #else /* CONFIG_ARM_ARCH_TIMER */
232
233 static notrace int do_realtime(const struct vdso_data *vd, struct timespec *ts)
234 {
235 return -1;
236 }
237
238 static notrace int do_monotonic(const struct vdso_data *vd, struct timespec *ts)
239 {
240 return -1;
241 }
242
243 static notrace int do_monotonic_raw(const struct vdso_data *vd,
244 struct timespec *ts)
245 {
246 return -1;
247 }
248
249 #endif /* CONFIG_ARM_ARCH_TIMER */
250
251 notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
252 {
253 const struct vdso_data *vd = __get_datapage();
254
255 switch (clock) {
256 case CLOCK_REALTIME_COARSE:
257 do_realtime_coarse(vd, ts);
258 break;
259 case CLOCK_MONOTONIC_COARSE:
260 do_monotonic_coarse(vd, ts);
261 break;
262 case CLOCK_REALTIME:
263 if (do_realtime(vd, ts))
264 goto fallback;
265 break;
266 case CLOCK_MONOTONIC:
267 if (do_monotonic(vd, ts))
268 goto fallback;
269 break;
270 case CLOCK_MONOTONIC_RAW:
271 if (do_monotonic_raw(vd, ts))
272 goto fallback;
273 break;
274 default:
275 goto fallback;
276 }
277
278 return 0;
279 fallback:
280 return clock_gettime_fallback(clock, ts);
281 }
282
283 notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
284 {
285 const struct vdso_data *vd = __get_datapage();
286
287 if (likely(tv != NULL)) {
288 struct timespec ts;
289
290 if (do_realtime(vd, &ts))
291 return gettimeofday_fallback(tv, tz);
292
293 tv->tv_sec = ts.tv_sec;
294 tv->tv_usec = ts.tv_nsec / 1000;
295 }
296
297 if (unlikely(tz != NULL)) {
298 tz->tz_minuteswest = vd->tz_minuteswest;
299 tz->tz_dsttime = vd->tz_dsttime;
300 }
301
302 return 0;
303 }