Merge tag 'pinctrl-fixes-for-v3.9' of git://git.kernel.org/pub/scm/linux/kernel/git...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / time.c
CommitLineData
1da177e4
LT
1/*
2 * linux/kernel/time.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 *
6 * This file contains the interface functions for the various
7 * time related system calls: time, stime, gettimeofday, settimeofday,
8 * adjtime
9 */
10/*
11 * Modification history kernel/time.c
6fa6c3b1 12 *
1da177e4 13 * 1993-09-02 Philip Gladstone
6fa6c3b1 14 * Created file with time related functions from sched.c and adjtimex()
1da177e4
LT
15 * 1993-10-08 Torsten Duwe
16 * adjtime interface update and CMOS clock write code
17 * 1995-08-13 Torsten Duwe
18 * kernel PLL updated to 1994-12-13 specs (rfc-1589)
19 * 1999-01-16 Ulrich Windl
20 * Introduced error checking for many cases in adjtimex().
21 * Updated NTP code according to technical memorandum Jan '96
22 * "A Kernel Model for Precision Timekeeping" by Dave Mills
23 * Allow time_constant larger than MAXTC(6) for NTP v4 (MAXTC == 10)
24 * (Even though the technical memorandum forbids it)
25 * 2004-07-14 Christoph Lameter
26 * Added getnstimeofday to allow the posix timer functions to return
27 * with nanosecond accuracy
28 */
29
9984de1a 30#include <linux/export.h>
1da177e4 31#include <linux/timex.h>
c59ede7b 32#include <linux/capability.h>
189374ae 33#include <linux/timekeeper_internal.h>
1da177e4 34#include <linux/errno.h>
1da177e4
LT
35#include <linux/syscalls.h>
36#include <linux/security.h>
37#include <linux/fs.h>
71abb3af 38#include <linux/math64.h>
e3d5a27d 39#include <linux/ptrace.h>
1da177e4
LT
40
41#include <asm/uaccess.h>
42#include <asm/unistd.h>
43
bdc80787
PA
44#include "timeconst.h"
45
6fa6c3b1 46/*
1da177e4
LT
47 * The timezone where the local system is located. Used as a default by some
48 * programs who obtain this value by using gettimeofday.
49 */
50struct timezone sys_tz;
51
52EXPORT_SYMBOL(sys_tz);
53
54#ifdef __ARCH_WANT_SYS_TIME
55
56/*
57 * sys_time() can be implemented in user-level using
58 * sys_gettimeofday(). Is this for backwards compatibility? If so,
59 * why not move it into the appropriate arch directory (for those
60 * architectures that need it).
61 */
58fd3aa2 62SYSCALL_DEFINE1(time, time_t __user *, tloc)
1da177e4 63{
f20bf612 64 time_t i = get_seconds();
1da177e4
LT
65
66 if (tloc) {
20082208 67 if (put_user(i,tloc))
e3d5a27d 68 return -EFAULT;
1da177e4 69 }
e3d5a27d 70 force_successful_syscall_return();
1da177e4
LT
71 return i;
72}
73
74/*
75 * sys_stime() can be implemented in user-level using
76 * sys_settimeofday(). Is this for backwards compatibility? If so,
77 * why not move it into the appropriate arch directory (for those
78 * architectures that need it).
79 */
6fa6c3b1 80
58fd3aa2 81SYSCALL_DEFINE1(stime, time_t __user *, tptr)
1da177e4
LT
82{
83 struct timespec tv;
84 int err;
85
86 if (get_user(tv.tv_sec, tptr))
87 return -EFAULT;
88
89 tv.tv_nsec = 0;
90
91 err = security_settime(&tv, NULL);
92 if (err)
93 return err;
94
95 do_settimeofday(&tv);
96 return 0;
97}
98
99#endif /* __ARCH_WANT_SYS_TIME */
100
58fd3aa2
HC
101SYSCALL_DEFINE2(gettimeofday, struct timeval __user *, tv,
102 struct timezone __user *, tz)
1da177e4
LT
103{
104 if (likely(tv != NULL)) {
105 struct timeval ktv;
106 do_gettimeofday(&ktv);
107 if (copy_to_user(tv, &ktv, sizeof(ktv)))
108 return -EFAULT;
109 }
110 if (unlikely(tz != NULL)) {
111 if (copy_to_user(tz, &sys_tz, sizeof(sys_tz)))
112 return -EFAULT;
113 }
114 return 0;
115}
116
84e345e4
PB
117/*
118 * Indicates if there is an offset between the system clock and the hardware
119 * clock/persistent clock/rtc.
120 */
121int persistent_clock_is_local;
122
1da177e4
LT
123/*
124 * Adjust the time obtained from the CMOS to be UTC time instead of
125 * local time.
6fa6c3b1 126 *
1da177e4
LT
127 * This is ugly, but preferable to the alternatives. Otherwise we
128 * would either need to write a program to do it in /etc/rc (and risk
6fa6c3b1 129 * confusion if the program gets run more than once; it would also be
1da177e4
LT
130 * hard to make the program warp the clock precisely n hours) or
131 * compile in the timezone information into the kernel. Bad, bad....
132 *
bdc80787 133 * - TYT, 1992-01-01
1da177e4
LT
134 *
135 * The best thing to do is to keep the CMOS clock in universal time (UTC)
136 * as real UNIX machines always do it. This avoids all headaches about
137 * daylight saving times and warping kernel clocks.
138 */
77933d72 139static inline void warp_clock(void)
1da177e4 140{
bd45b7a3
TG
141 struct timespec adjust;
142
143 adjust = current_kernel_time();
84e345e4
PB
144 if (sys_tz.tz_minuteswest != 0)
145 persistent_clock_is_local = 1;
bd45b7a3 146 adjust.tv_sec += sys_tz.tz_minuteswest * 60;
64ce4c2f 147 do_settimeofday(&adjust);
1da177e4
LT
148}
149
150/*
151 * In case for some reason the CMOS clock has not already been running
152 * in UTC, but in some local time: The first time we set the timezone,
153 * we will warp the clock so that it is ticking UTC time instead of
154 * local time. Presumably, if someone is setting the timezone then we
155 * are running in an environment where the programs understand about
156 * timezones. This should be done at boot time in the /etc/rc script,
157 * as soon as possible, so that the clock can be set right. Otherwise,
158 * various programs will get confused when the clock gets warped.
159 */
160
1e6d7679 161int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
1da177e4
LT
162{
163 static int firsttime = 1;
164 int error = 0;
165
951069e3 166 if (tv && !timespec_valid(tv))
718bcceb
TG
167 return -EINVAL;
168
1da177e4
LT
169 error = security_settime(tv, tz);
170 if (error)
171 return error;
172
173 if (tz) {
1da177e4 174 sys_tz = *tz;
2c622148 175 update_vsyscall_tz();
1da177e4
LT
176 if (firsttime) {
177 firsttime = 0;
178 if (!tv)
179 warp_clock();
180 }
181 }
182 if (tv)
1da177e4 183 return do_settimeofday(tv);
1da177e4
LT
184 return 0;
185}
186
58fd3aa2
HC
187SYSCALL_DEFINE2(settimeofday, struct timeval __user *, tv,
188 struct timezone __user *, tz)
1da177e4
LT
189{
190 struct timeval user_tv;
191 struct timespec new_ts;
192 struct timezone new_tz;
193
194 if (tv) {
195 if (copy_from_user(&user_tv, tv, sizeof(*tv)))
196 return -EFAULT;
197 new_ts.tv_sec = user_tv.tv_sec;
198 new_ts.tv_nsec = user_tv.tv_usec * NSEC_PER_USEC;
199 }
200 if (tz) {
201 if (copy_from_user(&new_tz, tz, sizeof(*tz)))
202 return -EFAULT;
203 }
204
205 return do_sys_settimeofday(tv ? &new_ts : NULL, tz ? &new_tz : NULL);
206}
207
58fd3aa2 208SYSCALL_DEFINE1(adjtimex, struct timex __user *, txc_p)
1da177e4
LT
209{
210 struct timex txc; /* Local copy of parameter */
211 int ret;
212
213 /* Copy the user data space into the kernel copy
214 * structure. But bear in mind that the structures
215 * may change
216 */
217 if(copy_from_user(&txc, txc_p, sizeof(struct timex)))
218 return -EFAULT;
219 ret = do_adjtimex(&txc);
220 return copy_to_user(txc_p, &txc, sizeof(struct timex)) ? -EFAULT : ret;
221}
222
1da177e4
LT
223/**
224 * current_fs_time - Return FS time
225 * @sb: Superblock.
226 *
8ba8e95e 227 * Return the current time truncated to the time granularity supported by
1da177e4
LT
228 * the fs.
229 */
230struct timespec current_fs_time(struct super_block *sb)
231{
232 struct timespec now = current_kernel_time();
233 return timespec_trunc(now, sb->s_time_gran);
234}
235EXPORT_SYMBOL(current_fs_time);
236
753e9c5c
ED
237/*
238 * Convert jiffies to milliseconds and back.
239 *
240 * Avoid unnecessary multiplications/divisions in the
241 * two most common HZ cases:
242 */
af3b5628 243unsigned int jiffies_to_msecs(const unsigned long j)
753e9c5c
ED
244{
245#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
246 return (MSEC_PER_SEC / HZ) * j;
247#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
248 return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC);
249#else
bdc80787 250# if BITS_PER_LONG == 32
b9095fd8 251 return (HZ_TO_MSEC_MUL32 * j) >> HZ_TO_MSEC_SHR32;
bdc80787
PA
252# else
253 return (j * HZ_TO_MSEC_NUM) / HZ_TO_MSEC_DEN;
254# endif
753e9c5c
ED
255#endif
256}
257EXPORT_SYMBOL(jiffies_to_msecs);
258
af3b5628 259unsigned int jiffies_to_usecs(const unsigned long j)
753e9c5c
ED
260{
261#if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
262 return (USEC_PER_SEC / HZ) * j;
263#elif HZ > USEC_PER_SEC && !(HZ % USEC_PER_SEC)
264 return (j + (HZ / USEC_PER_SEC) - 1)/(HZ / USEC_PER_SEC);
265#else
bdc80787 266# if BITS_PER_LONG == 32
b9095fd8 267 return (HZ_TO_USEC_MUL32 * j) >> HZ_TO_USEC_SHR32;
bdc80787
PA
268# else
269 return (j * HZ_TO_USEC_NUM) / HZ_TO_USEC_DEN;
270# endif
753e9c5c
ED
271#endif
272}
273EXPORT_SYMBOL(jiffies_to_usecs);
274
1da177e4 275/**
8ba8e95e 276 * timespec_trunc - Truncate timespec to a granularity
1da177e4 277 * @t: Timespec
8ba8e95e 278 * @gran: Granularity in ns.
1da177e4 279 *
8ba8e95e 280 * Truncate a timespec to a granularity. gran must be smaller than a second.
1da177e4
LT
281 * Always rounds down.
282 *
283 * This function should be only used for timestamps returned by
284 * current_kernel_time() or CURRENT_TIME, not with do_gettimeofday() because
3eb05676 285 * it doesn't handle the better resolution of the latter.
1da177e4
LT
286 */
287struct timespec timespec_trunc(struct timespec t, unsigned gran)
288{
289 /*
290 * Division is pretty slow so avoid it for common cases.
291 * Currently current_kernel_time() never returns better than
292 * jiffies resolution. Exploit that.
293 */
294 if (gran <= jiffies_to_usecs(1) * 1000) {
295 /* nothing */
296 } else if (gran == 1000000000) {
297 t.tv_nsec = 0;
298 } else {
299 t.tv_nsec -= t.tv_nsec % gran;
300 }
301 return t;
302}
303EXPORT_SYMBOL(timespec_trunc);
304
753be622
TG
305/* Converts Gregorian date to seconds since 1970-01-01 00:00:00.
306 * Assumes input in normal date format, i.e. 1980-12-31 23:59:59
307 * => year=1980, mon=12, day=31, hour=23, min=59, sec=59.
308 *
309 * [For the Julian calendar (which was used in Russia before 1917,
310 * Britain & colonies before 1752, anywhere else before 1582,
311 * and is still in use by some communities) leave out the
312 * -year/100+year/400 terms, and add 10.]
313 *
314 * This algorithm was first published by Gauss (I think).
315 *
316 * WARNING: this function will overflow on 2106-02-07 06:28:16 on
3eb05676 317 * machines where long is 32-bit! (However, as time_t is signed, we
753be622
TG
318 * will already get problems at other places on 2038-01-19 03:14:08)
319 */
320unsigned long
f4818900
IM
321mktime(const unsigned int year0, const unsigned int mon0,
322 const unsigned int day, const unsigned int hour,
323 const unsigned int min, const unsigned int sec)
753be622 324{
f4818900
IM
325 unsigned int mon = mon0, year = year0;
326
327 /* 1..12 -> 11,12,1..10 */
328 if (0 >= (int) (mon -= 2)) {
329 mon += 12; /* Puts Feb last since it has leap day */
753be622
TG
330 year -= 1;
331 }
332
333 return ((((unsigned long)
334 (year/4 - year/100 + year/400 + 367*mon/12 + day) +
335 year*365 - 719499
336 )*24 + hour /* now have hours */
337 )*60 + min /* now have minutes */
338 )*60 + sec; /* finally seconds */
339}
340
199e7056
AM
341EXPORT_SYMBOL(mktime);
342
753be622
TG
343/**
344 * set_normalized_timespec - set timespec sec and nsec parts and normalize
345 *
346 * @ts: pointer to timespec variable to be set
347 * @sec: seconds to set
348 * @nsec: nanoseconds to set
349 *
350 * Set seconds and nanoseconds field of a timespec variable and
351 * normalize to the timespec storage format
352 *
353 * Note: The tv_nsec part is always in the range of
bdc80787 354 * 0 <= tv_nsec < NSEC_PER_SEC
753be622
TG
355 * For negative values only the tv_sec field is negative !
356 */
12e09337 357void set_normalized_timespec(struct timespec *ts, time_t sec, s64 nsec)
753be622
TG
358{
359 while (nsec >= NSEC_PER_SEC) {
12e09337
TG
360 /*
361 * The following asm() prevents the compiler from
362 * optimising this loop into a modulo operation. See
363 * also __iter_div_u64_rem() in include/linux/time.h
364 */
365 asm("" : "+rm"(nsec));
753be622
TG
366 nsec -= NSEC_PER_SEC;
367 ++sec;
368 }
369 while (nsec < 0) {
12e09337 370 asm("" : "+rm"(nsec));
753be622
TG
371 nsec += NSEC_PER_SEC;
372 --sec;
373 }
374 ts->tv_sec = sec;
375 ts->tv_nsec = nsec;
376}
7c3f944e 377EXPORT_SYMBOL(set_normalized_timespec);
753be622 378
f8f46da3
TG
379/**
380 * ns_to_timespec - Convert nanoseconds to timespec
381 * @nsec: the nanoseconds value to be converted
382 *
383 * Returns the timespec representation of the nsec parameter.
384 */
df869b63 385struct timespec ns_to_timespec(const s64 nsec)
f8f46da3
TG
386{
387 struct timespec ts;
f8bd2258 388 s32 rem;
f8f46da3 389
88fc3897
GA
390 if (!nsec)
391 return (struct timespec) {0, 0};
392
f8bd2258
RZ
393 ts.tv_sec = div_s64_rem(nsec, NSEC_PER_SEC, &rem);
394 if (unlikely(rem < 0)) {
395 ts.tv_sec--;
396 rem += NSEC_PER_SEC;
397 }
398 ts.tv_nsec = rem;
f8f46da3
TG
399
400 return ts;
401}
85795d64 402EXPORT_SYMBOL(ns_to_timespec);
f8f46da3
TG
403
404/**
405 * ns_to_timeval - Convert nanoseconds to timeval
406 * @nsec: the nanoseconds value to be converted
407 *
408 * Returns the timeval representation of the nsec parameter.
409 */
df869b63 410struct timeval ns_to_timeval(const s64 nsec)
f8f46da3
TG
411{
412 struct timespec ts = ns_to_timespec(nsec);
413 struct timeval tv;
414
415 tv.tv_sec = ts.tv_sec;
416 tv.tv_usec = (suseconds_t) ts.tv_nsec / 1000;
417
418 return tv;
419}
b7aa0bf7 420EXPORT_SYMBOL(ns_to_timeval);
f8f46da3 421
41cf5445
IM
422/*
423 * When we convert to jiffies then we interpret incoming values
424 * the following way:
425 *
426 * - negative values mean 'infinite timeout' (MAX_JIFFY_OFFSET)
427 *
428 * - 'too large' values [that would result in larger than
429 * MAX_JIFFY_OFFSET values] mean 'infinite timeout' too.
430 *
431 * - all other values are converted to jiffies by either multiplying
432 * the input value by a factor or dividing it with a factor
433 *
434 * We must also be careful about 32-bit overflows.
435 */
8b9365d7
IM
436unsigned long msecs_to_jiffies(const unsigned int m)
437{
41cf5445
IM
438 /*
439 * Negative value, means infinite timeout:
440 */
441 if ((int)m < 0)
8b9365d7 442 return MAX_JIFFY_OFFSET;
41cf5445 443
8b9365d7 444#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
41cf5445
IM
445 /*
446 * HZ is equal to or smaller than 1000, and 1000 is a nice
447 * round multiple of HZ, divide with the factor between them,
448 * but round upwards:
449 */
8b9365d7
IM
450 return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ);
451#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
41cf5445
IM
452 /*
453 * HZ is larger than 1000, and HZ is a nice round multiple of
454 * 1000 - simply multiply with the factor between them.
455 *
456 * But first make sure the multiplication result cannot
457 * overflow:
458 */
459 if (m > jiffies_to_msecs(MAX_JIFFY_OFFSET))
460 return MAX_JIFFY_OFFSET;
461
8b9365d7
IM
462 return m * (HZ / MSEC_PER_SEC);
463#else
41cf5445
IM
464 /*
465 * Generic case - multiply, round and divide. But first
466 * check that if we are doing a net multiplication, that
bdc80787 467 * we wouldn't overflow:
41cf5445
IM
468 */
469 if (HZ > MSEC_PER_SEC && m > jiffies_to_msecs(MAX_JIFFY_OFFSET))
470 return MAX_JIFFY_OFFSET;
471
b9095fd8 472 return (MSEC_TO_HZ_MUL32 * m + MSEC_TO_HZ_ADJ32)
bdc80787 473 >> MSEC_TO_HZ_SHR32;
8b9365d7
IM
474#endif
475}
476EXPORT_SYMBOL(msecs_to_jiffies);
477
478unsigned long usecs_to_jiffies(const unsigned int u)
479{
480 if (u > jiffies_to_usecs(MAX_JIFFY_OFFSET))
481 return MAX_JIFFY_OFFSET;
482#if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
483 return (u + (USEC_PER_SEC / HZ) - 1) / (USEC_PER_SEC / HZ);
484#elif HZ > USEC_PER_SEC && !(HZ % USEC_PER_SEC)
485 return u * (HZ / USEC_PER_SEC);
486#else
b9095fd8 487 return (USEC_TO_HZ_MUL32 * u + USEC_TO_HZ_ADJ32)
bdc80787 488 >> USEC_TO_HZ_SHR32;
8b9365d7
IM
489#endif
490}
491EXPORT_SYMBOL(usecs_to_jiffies);
492
493/*
494 * The TICK_NSEC - 1 rounds up the value to the next resolution. Note
495 * that a remainder subtract here would not do the right thing as the
496 * resolution values don't fall on second boundries. I.e. the line:
497 * nsec -= nsec % TICK_NSEC; is NOT a correct resolution rounding.
498 *
499 * Rather, we just shift the bits off the right.
500 *
501 * The >> (NSEC_JIFFIE_SC - SEC_JIFFIE_SC) converts the scaled nsec
502 * value to a scaled second value.
503 */
504unsigned long
505timespec_to_jiffies(const struct timespec *value)
506{
507 unsigned long sec = value->tv_sec;
508 long nsec = value->tv_nsec + TICK_NSEC - 1;
509
510 if (sec >= MAX_SEC_IN_JIFFIES){
511 sec = MAX_SEC_IN_JIFFIES;
512 nsec = 0;
513 }
514 return (((u64)sec * SEC_CONVERSION) +
515 (((u64)nsec * NSEC_CONVERSION) >>
516 (NSEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC;
517
518}
519EXPORT_SYMBOL(timespec_to_jiffies);
520
521void
522jiffies_to_timespec(const unsigned long jiffies, struct timespec *value)
523{
524 /*
525 * Convert jiffies to nanoseconds and separate with
526 * one divide.
527 */
f8bd2258
RZ
528 u32 rem;
529 value->tv_sec = div_u64_rem((u64)jiffies * TICK_NSEC,
530 NSEC_PER_SEC, &rem);
531 value->tv_nsec = rem;
8b9365d7
IM
532}
533EXPORT_SYMBOL(jiffies_to_timespec);
534
535/* Same for "timeval"
536 *
537 * Well, almost. The problem here is that the real system resolution is
538 * in nanoseconds and the value being converted is in micro seconds.
539 * Also for some machines (those that use HZ = 1024, in-particular),
540 * there is a LARGE error in the tick size in microseconds.
541
542 * The solution we use is to do the rounding AFTER we convert the
543 * microsecond part. Thus the USEC_ROUND, the bits to be shifted off.
544 * Instruction wise, this should cost only an additional add with carry
545 * instruction above the way it was done above.
546 */
547unsigned long
548timeval_to_jiffies(const struct timeval *value)
549{
550 unsigned long sec = value->tv_sec;
551 long usec = value->tv_usec;
552
553 if (sec >= MAX_SEC_IN_JIFFIES){
554 sec = MAX_SEC_IN_JIFFIES;
555 usec = 0;
556 }
557 return (((u64)sec * SEC_CONVERSION) +
558 (((u64)usec * USEC_CONVERSION + USEC_ROUND) >>
559 (USEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC;
560}
456a09dc 561EXPORT_SYMBOL(timeval_to_jiffies);
8b9365d7
IM
562
563void jiffies_to_timeval(const unsigned long jiffies, struct timeval *value)
564{
565 /*
566 * Convert jiffies to nanoseconds and separate with
567 * one divide.
568 */
f8bd2258 569 u32 rem;
8b9365d7 570
f8bd2258
RZ
571 value->tv_sec = div_u64_rem((u64)jiffies * TICK_NSEC,
572 NSEC_PER_SEC, &rem);
573 value->tv_usec = rem / NSEC_PER_USEC;
8b9365d7 574}
456a09dc 575EXPORT_SYMBOL(jiffies_to_timeval);
8b9365d7
IM
576
577/*
578 * Convert jiffies/jiffies_64 to clock_t and back.
579 */
cbbc719f 580clock_t jiffies_to_clock_t(unsigned long x)
8b9365d7
IM
581{
582#if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0
6ffc787a
DF
583# if HZ < USER_HZ
584 return x * (USER_HZ / HZ);
585# else
8b9365d7 586 return x / (HZ / USER_HZ);
6ffc787a 587# endif
8b9365d7 588#else
71abb3af 589 return div_u64((u64)x * TICK_NSEC, NSEC_PER_SEC / USER_HZ);
8b9365d7
IM
590#endif
591}
592EXPORT_SYMBOL(jiffies_to_clock_t);
593
594unsigned long clock_t_to_jiffies(unsigned long x)
595{
596#if (HZ % USER_HZ)==0
597 if (x >= ~0UL / (HZ / USER_HZ))
598 return ~0UL;
599 return x * (HZ / USER_HZ);
600#else
8b9365d7
IM
601 /* Don't worry about loss of precision here .. */
602 if (x >= ~0UL / HZ * USER_HZ)
603 return ~0UL;
604
605 /* .. but do try to contain it here */
71abb3af 606 return div_u64((u64)x * HZ, USER_HZ);
8b9365d7
IM
607#endif
608}
609EXPORT_SYMBOL(clock_t_to_jiffies);
610
611u64 jiffies_64_to_clock_t(u64 x)
612{
613#if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0
6ffc787a 614# if HZ < USER_HZ
71abb3af 615 x = div_u64(x * USER_HZ, HZ);
ec03d707 616# elif HZ > USER_HZ
71abb3af 617 x = div_u64(x, HZ / USER_HZ);
ec03d707
AM
618# else
619 /* Nothing to do */
6ffc787a 620# endif
8b9365d7
IM
621#else
622 /*
623 * There are better ways that don't overflow early,
624 * but even this doesn't overflow in hundreds of years
625 * in 64 bits, so..
626 */
71abb3af 627 x = div_u64(x * TICK_NSEC, (NSEC_PER_SEC / USER_HZ));
8b9365d7
IM
628#endif
629 return x;
630}
8b9365d7
IM
631EXPORT_SYMBOL(jiffies_64_to_clock_t);
632
633u64 nsec_to_clock_t(u64 x)
634{
635#if (NSEC_PER_SEC % USER_HZ) == 0
71abb3af 636 return div_u64(x, NSEC_PER_SEC / USER_HZ);
8b9365d7 637#elif (USER_HZ % 512) == 0
71abb3af 638 return div_u64(x * USER_HZ / 512, NSEC_PER_SEC / 512);
8b9365d7
IM
639#else
640 /*
641 * max relative error 5.7e-8 (1.8s per year) for USER_HZ <= 1024,
642 * overflow after 64.99 years.
643 * exact for HZ=60, 72, 90, 120, 144, 180, 300, 600, 900, ...
644 */
71abb3af 645 return div_u64(x * 9, (9ull * NSEC_PER_SEC + (USER_HZ / 2)) / USER_HZ);
8b9365d7 646#endif
8b9365d7
IM
647}
648
b7b20df9 649/**
a1dabb6b 650 * nsecs_to_jiffies64 - Convert nsecs in u64 to jiffies64
b7b20df9
HS
651 *
652 * @n: nsecs in u64
653 *
654 * Unlike {m,u}secs_to_jiffies, type of input is not unsigned int but u64.
655 * And this doesn't return MAX_JIFFY_OFFSET since this function is designed
656 * for scheduler, not for use in device drivers to calculate timeout value.
657 *
658 * note:
659 * NSEC_PER_SEC = 10^9 = (5^9 * 2^9) = (1953125 * 512)
660 * ULLONG_MAX ns = 18446744073.709551615 secs = about 584 years
661 */
a1dabb6b 662u64 nsecs_to_jiffies64(u64 n)
b7b20df9
HS
663{
664#if (NSEC_PER_SEC % HZ) == 0
665 /* Common case, HZ = 100, 128, 200, 250, 256, 500, 512, 1000 etc. */
666 return div_u64(n, NSEC_PER_SEC / HZ);
667#elif (HZ % 512) == 0
668 /* overflow after 292 years if HZ = 1024 */
669 return div_u64(n * HZ / 512, NSEC_PER_SEC / 512);
670#else
671 /*
672 * Generic case - optimized for cases where HZ is a multiple of 3.
673 * overflow after 64.99 years, exact for HZ = 60, 72, 90, 120 etc.
674 */
675 return div_u64(n * 9, (9ull * NSEC_PER_SEC + HZ / 2) / HZ);
676#endif
677}
678
a1dabb6b
VP
679/**
680 * nsecs_to_jiffies - Convert nsecs in u64 to jiffies
681 *
682 * @n: nsecs in u64
683 *
684 * Unlike {m,u}secs_to_jiffies, type of input is not unsigned int but u64.
685 * And this doesn't return MAX_JIFFY_OFFSET since this function is designed
686 * for scheduler, not for use in device drivers to calculate timeout value.
687 *
688 * note:
689 * NSEC_PER_SEC = 10^9 = (5^9 * 2^9) = (1953125 * 512)
690 * ULLONG_MAX ns = 18446744073.709551615 secs = about 584 years
691 */
692unsigned long nsecs_to_jiffies(u64 n)
693{
694 return (unsigned long)nsecs_to_jiffies64(n);
695}
696
df0cc053
TG
697/*
698 * Add two timespec values and do a safety check for overflow.
699 * It's assumed that both values are valid (>= 0)
700 */
701struct timespec timespec_add_safe(const struct timespec lhs,
702 const struct timespec rhs)
703{
704 struct timespec res;
705
706 set_normalized_timespec(&res, lhs.tv_sec + rhs.tv_sec,
707 lhs.tv_nsec + rhs.tv_nsec);
708
709 if (res.tv_sec < lhs.tv_sec || res.tv_sec < rhs.tv_sec)
710 res.tv_sec = TIME_T_MAX;
711
712 return res;
713}