[PATCH] revert "swsusp add check for suspension of X controlled devices"
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / hrtimer.c
CommitLineData
c0a31329
TG
1/*
2 * linux/kernel/hrtimer.c
3 *
4 * Copyright(C) 2005, Thomas Gleixner <tglx@linutronix.de>
5 * Copyright(C) 2005, Red Hat, Inc., Ingo Molnar
6 *
7 * High-resolution kernel timers
8 *
9 * In contrast to the low-resolution timeout API implemented in
10 * kernel/timer.c, hrtimers provide finer resolution and accuracy
11 * depending on system configuration and capabilities.
12 *
13 * These timers are currently used for:
14 * - itimers
15 * - POSIX timers
16 * - nanosleep
17 * - precise in-kernel timing
18 *
19 * Started by: Thomas Gleixner and Ingo Molnar
20 *
21 * Credits:
22 * based on kernel/timer.c
23 *
66188fae
TG
24 * Help, testing, suggestions, bugfixes, improvements were
25 * provided by:
26 *
27 * George Anzinger, Andrew Morton, Steven Rostedt, Roman Zippel
28 * et. al.
29 *
c0a31329
TG
30 * For licencing details see kernel-base/COPYING
31 */
32
33#include <linux/cpu.h>
34#include <linux/module.h>
35#include <linux/percpu.h>
36#include <linux/hrtimer.h>
37#include <linux/notifier.h>
38#include <linux/syscalls.h>
39#include <linux/interrupt.h>
40
41#include <asm/uaccess.h>
42
43/**
44 * ktime_get - get the monotonic time in ktime_t format
45 *
46 * returns the time in ktime_t format
47 */
48static ktime_t ktime_get(void)
49{
50 struct timespec now;
51
52 ktime_get_ts(&now);
53
54 return timespec_to_ktime(now);
55}
56
57/**
58 * ktime_get_real - get the real (wall-) time in ktime_t format
59 *
60 * returns the time in ktime_t format
61 */
62static ktime_t ktime_get_real(void)
63{
64 struct timespec now;
65
66 getnstimeofday(&now);
67
68 return timespec_to_ktime(now);
69}
70
71EXPORT_SYMBOL_GPL(ktime_get_real);
72
73/*
74 * The timer bases:
7978672c
GA
75 *
76 * Note: If we want to add new timer bases, we have to skip the two
77 * clock ids captured by the cpu-timers. We do this by holding empty
78 * entries rather than doing math adjustment of the clock ids.
79 * This ensures that we capture erroneous accesses to these clock ids
80 * rather than moving them into the range of valid clock id's.
c0a31329
TG
81 */
82
83#define MAX_HRTIMER_BASES 2
84
85static DEFINE_PER_CPU(struct hrtimer_base, hrtimer_bases[MAX_HRTIMER_BASES]) =
86{
87 {
88 .index = CLOCK_REALTIME,
89 .get_time = &ktime_get_real,
90 .resolution = KTIME_REALTIME_RES,
91 },
92 {
93 .index = CLOCK_MONOTONIC,
94 .get_time = &ktime_get,
95 .resolution = KTIME_MONOTONIC_RES,
96 },
97};
98
99/**
100 * ktime_get_ts - get the monotonic clock in timespec format
101 *
102 * @ts: pointer to timespec variable
103 *
104 * The function calculates the monotonic clock from the realtime
105 * clock and the wall_to_monotonic offset and stores the result
106 * in normalized timespec format in the variable pointed to by ts.
107 */
108void ktime_get_ts(struct timespec *ts)
109{
110 struct timespec tomono;
111 unsigned long seq;
112
113 do {
114 seq = read_seqbegin(&xtime_lock);
115 getnstimeofday(ts);
116 tomono = wall_to_monotonic;
117
118 } while (read_seqretry(&xtime_lock, seq));
119
120 set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec,
121 ts->tv_nsec + tomono.tv_nsec);
122}
69778e32 123EXPORT_SYMBOL_GPL(ktime_get_ts);
c0a31329 124
92127c7a
TG
125/*
126 * Get the coarse grained time at the softirq based on xtime and
127 * wall_to_monotonic.
128 */
129static void hrtimer_get_softirq_time(struct hrtimer_base *base)
130{
131 ktime_t xtim, tomono;
132 unsigned long seq;
133
134 do {
135 seq = read_seqbegin(&xtime_lock);
136 xtim = timespec_to_ktime(xtime);
137 tomono = timespec_to_ktime(wall_to_monotonic);
138
139 } while (read_seqretry(&xtime_lock, seq));
140
141 base[CLOCK_REALTIME].softirq_time = xtim;
142 base[CLOCK_MONOTONIC].softirq_time = ktime_add(xtim, tomono);
143}
144
c0a31329
TG
145/*
146 * Functions and macros which are different for UP/SMP systems are kept in a
147 * single place
148 */
149#ifdef CONFIG_SMP
150
151#define set_curr_timer(b, t) do { (b)->curr_timer = (t); } while (0)
152
153/*
154 * We are using hashed locking: holding per_cpu(hrtimer_bases)[n].lock
155 * means that all timers which are tied to this base via timer->base are
156 * locked, and the base itself is locked too.
157 *
158 * So __run_timers/migrate_timers can safely modify all timers which could
159 * be found on the lists/queues.
160 *
161 * When the timer's base is locked, and the timer removed from list, it is
162 * possible to set timer->base = NULL and drop the lock: the timer remains
163 * locked.
164 */
165static struct hrtimer_base *lock_hrtimer_base(const struct hrtimer *timer,
166 unsigned long *flags)
167{
168 struct hrtimer_base *base;
169
170 for (;;) {
171 base = timer->base;
172 if (likely(base != NULL)) {
173 spin_lock_irqsave(&base->lock, *flags);
174 if (likely(base == timer->base))
175 return base;
176 /* The timer has migrated to another CPU: */
177 spin_unlock_irqrestore(&base->lock, *flags);
178 }
179 cpu_relax();
180 }
181}
182
183/*
184 * Switch the timer base to the current CPU when possible.
185 */
186static inline struct hrtimer_base *
187switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_base *base)
188{
189 struct hrtimer_base *new_base;
190
191 new_base = &__get_cpu_var(hrtimer_bases[base->index]);
192
193 if (base != new_base) {
194 /*
195 * We are trying to schedule the timer on the local CPU.
196 * However we can't change timer's base while it is running,
197 * so we keep it on the same CPU. No hassle vs. reprogramming
198 * the event source in the high resolution case. The softirq
199 * code will take care of this when the timer function has
200 * completed. There is no conflict as we hold the lock until
201 * the timer is enqueued.
202 */
203 if (unlikely(base->curr_timer == timer))
204 return base;
205
206 /* See the comment in lock_timer_base() */
207 timer->base = NULL;
208 spin_unlock(&base->lock);
209 spin_lock(&new_base->lock);
210 timer->base = new_base;
211 }
212 return new_base;
213}
214
215#else /* CONFIG_SMP */
216
217#define set_curr_timer(b, t) do { } while (0)
218
219static inline struct hrtimer_base *
220lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
221{
222 struct hrtimer_base *base = timer->base;
223
224 spin_lock_irqsave(&base->lock, *flags);
225
226 return base;
227}
228
229#define switch_hrtimer_base(t, b) (b)
230
231#endif /* !CONFIG_SMP */
232
233/*
234 * Functions for the union type storage format of ktime_t which are
235 * too large for inlining:
236 */
237#if BITS_PER_LONG < 64
238# ifndef CONFIG_KTIME_SCALAR
239/**
240 * ktime_add_ns - Add a scalar nanoseconds value to a ktime_t variable
241 *
242 * @kt: addend
243 * @nsec: the scalar nsec value to add
244 *
245 * Returns the sum of kt and nsec in ktime_t format
246 */
247ktime_t ktime_add_ns(const ktime_t kt, u64 nsec)
248{
249 ktime_t tmp;
250
251 if (likely(nsec < NSEC_PER_SEC)) {
252 tmp.tv64 = nsec;
253 } else {
254 unsigned long rem = do_div(nsec, NSEC_PER_SEC);
255
256 tmp = ktime_set((long)nsec, rem);
257 }
258
259 return ktime_add(kt, tmp);
260}
261
262#else /* CONFIG_KTIME_SCALAR */
263
264# endif /* !CONFIG_KTIME_SCALAR */
265
266/*
267 * Divide a ktime value by a nanosecond value
268 */
df869b63 269static unsigned long ktime_divns(const ktime_t kt, s64 div)
c0a31329
TG
270{
271 u64 dclc, inc, dns;
272 int sft = 0;
273
274 dclc = dns = ktime_to_ns(kt);
275 inc = div;
276 /* Make sure the divisor is less than 2^32: */
277 while (div >> 32) {
278 sft++;
279 div >>= 1;
280 }
281 dclc >>= sft;
282 do_div(dclc, (unsigned long) div);
283
284 return (unsigned long) dclc;
285}
286
287#else /* BITS_PER_LONG < 64 */
288# define ktime_divns(kt, div) (unsigned long)((kt).tv64 / (div))
289#endif /* BITS_PER_LONG >= 64 */
290
291/*
292 * Counterpart to lock_timer_base above:
293 */
294static inline
295void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
296{
297 spin_unlock_irqrestore(&timer->base->lock, *flags);
298}
299
300/**
301 * hrtimer_forward - forward the timer expiry
302 *
303 * @timer: hrtimer to forward
44f21475 304 * @now: forward past this time
c0a31329
TG
305 * @interval: the interval to forward
306 *
307 * Forward the timer expiry so it will expire in the future.
8dca6f33 308 * Returns the number of overruns.
c0a31329
TG
309 */
310unsigned long
44f21475 311hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
c0a31329
TG
312{
313 unsigned long orun = 1;
44f21475 314 ktime_t delta;
c0a31329
TG
315
316 delta = ktime_sub(now, timer->expires);
317
318 if (delta.tv64 < 0)
319 return 0;
320
c9db4fa1
TG
321 if (interval.tv64 < timer->base->resolution.tv64)
322 interval.tv64 = timer->base->resolution.tv64;
323
c0a31329 324 if (unlikely(delta.tv64 >= interval.tv64)) {
df869b63 325 s64 incr = ktime_to_ns(interval);
c0a31329
TG
326
327 orun = ktime_divns(delta, incr);
328 timer->expires = ktime_add_ns(timer->expires, incr * orun);
329 if (timer->expires.tv64 > now.tv64)
330 return orun;
331 /*
332 * This (and the ktime_add() below) is the
333 * correction for exact:
334 */
335 orun++;
336 }
337 timer->expires = ktime_add(timer->expires, interval);
338
339 return orun;
340}
341
342/*
343 * enqueue_hrtimer - internal function to (re)start a timer
344 *
345 * The timer is inserted in expiry order. Insertion into the
346 * red black tree is O(log(n)). Must hold the base lock.
347 */
348static void enqueue_hrtimer(struct hrtimer *timer, struct hrtimer_base *base)
349{
350 struct rb_node **link = &base->active.rb_node;
c0a31329
TG
351 struct rb_node *parent = NULL;
352 struct hrtimer *entry;
353
354 /*
355 * Find the right place in the rbtree:
356 */
357 while (*link) {
358 parent = *link;
359 entry = rb_entry(parent, struct hrtimer, node);
360 /*
361 * We dont care about collisions. Nodes with
362 * the same expiry time stay together.
363 */
364 if (timer->expires.tv64 < entry->expires.tv64)
365 link = &(*link)->rb_left;
288867ec 366 else
c0a31329 367 link = &(*link)->rb_right;
c0a31329
TG
368 }
369
370 /*
288867ec
TG
371 * Insert the timer to the rbtree and check whether it
372 * replaces the first pending timer
c0a31329
TG
373 */
374 rb_link_node(&timer->node, parent, link);
375 rb_insert_color(&timer->node, &base->active);
c0a31329 376
288867ec
TG
377 if (!base->first || timer->expires.tv64 <
378 rb_entry(base->first, struct hrtimer, node)->expires.tv64)
379 base->first = &timer->node;
380}
c0a31329
TG
381
382/*
383 * __remove_hrtimer - internal function to remove a timer
384 *
385 * Caller must hold the base lock.
386 */
387static void __remove_hrtimer(struct hrtimer *timer, struct hrtimer_base *base)
388{
389 /*
288867ec
TG
390 * Remove the timer from the rbtree and replace the
391 * first entry pointer if necessary.
c0a31329 392 */
288867ec
TG
393 if (base->first == &timer->node)
394 base->first = rb_next(&timer->node);
c0a31329 395 rb_erase(&timer->node, &base->active);
b75f7a51 396 timer->node.rb_parent = HRTIMER_INACTIVE;
c0a31329
TG
397}
398
399/*
400 * remove hrtimer, called with base lock held
401 */
402static inline int
403remove_hrtimer(struct hrtimer *timer, struct hrtimer_base *base)
404{
405 if (hrtimer_active(timer)) {
406 __remove_hrtimer(timer, base);
c0a31329
TG
407 return 1;
408 }
409 return 0;
410}
411
412/**
413 * hrtimer_start - (re)start an relative timer on the current CPU
414 *
415 * @timer: the timer to be added
416 * @tim: expiry time
417 * @mode: expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL)
418 *
419 * Returns:
420 * 0 on success
421 * 1 when the timer was active
422 */
423int
424hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode)
425{
426 struct hrtimer_base *base, *new_base;
427 unsigned long flags;
428 int ret;
429
430 base = lock_hrtimer_base(timer, &flags);
431
432 /* Remove an active timer from the queue: */
433 ret = remove_hrtimer(timer, base);
434
435 /* Switch the timer base, if necessary: */
436 new_base = switch_hrtimer_base(timer, base);
437
06027bdd 438 if (mode == HRTIMER_REL) {
c0a31329 439 tim = ktime_add(tim, new_base->get_time());
06027bdd
IM
440 /*
441 * CONFIG_TIME_LOW_RES is a temporary way for architectures
442 * to signal that they simply return xtime in
443 * do_gettimeoffset(). In this case we want to round up by
444 * resolution when starting a relative timer, to avoid short
445 * timeouts. This will go away with the GTOD framework.
446 */
447#ifdef CONFIG_TIME_LOW_RES
448 tim = ktime_add(tim, base->resolution);
449#endif
450 }
c0a31329
TG
451 timer->expires = tim;
452
453 enqueue_hrtimer(timer, new_base);
454
455 unlock_hrtimer_base(timer, &flags);
456
457 return ret;
458}
459
460/**
461 * hrtimer_try_to_cancel - try to deactivate a timer
462 *
463 * @timer: hrtimer to stop
464 *
465 * Returns:
466 * 0 when the timer was not active
467 * 1 when the timer was active
468 * -1 when the timer is currently excuting the callback function and
469 * can not be stopped
470 */
471int hrtimer_try_to_cancel(struct hrtimer *timer)
472{
473 struct hrtimer_base *base;
474 unsigned long flags;
475 int ret = -1;
476
477 base = lock_hrtimer_base(timer, &flags);
478
479 if (base->curr_timer != timer)
480 ret = remove_hrtimer(timer, base);
481
482 unlock_hrtimer_base(timer, &flags);
483
484 return ret;
485
486}
487
488/**
489 * hrtimer_cancel - cancel a timer and wait for the handler to finish.
490 *
491 * @timer: the timer to be cancelled
492 *
493 * Returns:
494 * 0 when the timer was not active
495 * 1 when the timer was active
496 */
497int hrtimer_cancel(struct hrtimer *timer)
498{
499 for (;;) {
500 int ret = hrtimer_try_to_cancel(timer);
501
502 if (ret >= 0)
503 return ret;
5ef37b19 504 cpu_relax();
c0a31329
TG
505 }
506}
507
508/**
509 * hrtimer_get_remaining - get remaining time for the timer
510 *
511 * @timer: the timer to read
512 */
513ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
514{
515 struct hrtimer_base *base;
516 unsigned long flags;
517 ktime_t rem;
518
519 base = lock_hrtimer_base(timer, &flags);
520 rem = ktime_sub(timer->expires, timer->base->get_time());
521 unlock_hrtimer_base(timer, &flags);
522
523 return rem;
524}
525
69239749
TL
526#ifdef CONFIG_NO_IDLE_HZ
527/**
528 * hrtimer_get_next_event - get the time until next expiry event
529 *
530 * Returns the delta to the next expiry event or KTIME_MAX if no timer
531 * is pending.
532 */
533ktime_t hrtimer_get_next_event(void)
534{
535 struct hrtimer_base *base = __get_cpu_var(hrtimer_bases);
536 ktime_t delta, mindelta = { .tv64 = KTIME_MAX };
537 unsigned long flags;
538 int i;
539
540 for (i = 0; i < MAX_HRTIMER_BASES; i++, base++) {
541 struct hrtimer *timer;
542
543 spin_lock_irqsave(&base->lock, flags);
544 if (!base->first) {
545 spin_unlock_irqrestore(&base->lock, flags);
546 continue;
547 }
548 timer = rb_entry(base->first, struct hrtimer, node);
549 delta.tv64 = timer->expires.tv64;
550 spin_unlock_irqrestore(&base->lock, flags);
551 delta = ktime_sub(delta, base->get_time());
552 if (delta.tv64 < mindelta.tv64)
553 mindelta.tv64 = delta.tv64;
554 }
555 if (mindelta.tv64 < 0)
556 mindelta.tv64 = 0;
557 return mindelta;
558}
559#endif
560
c0a31329 561/**
7978672c 562 * hrtimer_init - initialize a timer to the given clock
c0a31329 563 *
7978672c 564 * @timer: the timer to be initialized
c0a31329 565 * @clock_id: the clock to be used
7978672c 566 * @mode: timer mode abs/rel
c0a31329 567 */
7978672c
GA
568void hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
569 enum hrtimer_mode mode)
c0a31329
TG
570{
571 struct hrtimer_base *bases;
572
7978672c
GA
573 memset(timer, 0, sizeof(struct hrtimer));
574
c0a31329 575 bases = per_cpu(hrtimer_bases, raw_smp_processor_id());
c0a31329 576
7978672c
GA
577 if (clock_id == CLOCK_REALTIME && mode != HRTIMER_ABS)
578 clock_id = CLOCK_MONOTONIC;
579
580 timer->base = &bases[clock_id];
b75f7a51 581 timer->node.rb_parent = HRTIMER_INACTIVE;
c0a31329
TG
582}
583
584/**
585 * hrtimer_get_res - get the timer resolution for a clock
586 *
587 * @which_clock: which clock to query
588 * @tp: pointer to timespec variable to store the resolution
589 *
590 * Store the resolution of the clock selected by which_clock in the
591 * variable pointed to by tp.
592 */
593int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp)
594{
595 struct hrtimer_base *bases;
596
c0a31329 597 bases = per_cpu(hrtimer_bases, raw_smp_processor_id());
e2787630 598 *tp = ktime_to_timespec(bases[which_clock].resolution);
c0a31329
TG
599
600 return 0;
601}
602
603/*
604 * Expire the per base hrtimer-queue:
605 */
606static inline void run_hrtimer_queue(struct hrtimer_base *base)
607{
288867ec 608 struct rb_node *node;
c0a31329 609
3055adda
DS
610 if (!base->first)
611 return;
612
92127c7a
TG
613 if (base->get_softirq_time)
614 base->softirq_time = base->get_softirq_time();
615
c0a31329
TG
616 spin_lock_irq(&base->lock);
617
288867ec 618 while ((node = base->first)) {
c0a31329 619 struct hrtimer *timer;
05cfb614 620 int (*fn)(struct hrtimer *);
c0a31329 621 int restart;
c0a31329 622
288867ec 623 timer = rb_entry(node, struct hrtimer, node);
92127c7a 624 if (base->softirq_time.tv64 <= timer->expires.tv64)
c0a31329
TG
625 break;
626
627 fn = timer->function;
c0a31329
TG
628 set_curr_timer(base, timer);
629 __remove_hrtimer(timer, base);
630 spin_unlock_irq(&base->lock);
631
05cfb614 632 restart = fn(timer);
c0a31329
TG
633
634 spin_lock_irq(&base->lock);
635
b75f7a51
RZ
636 if (restart != HRTIMER_NORESTART) {
637 BUG_ON(hrtimer_active(timer));
c0a31329 638 enqueue_hrtimer(timer, base);
b75f7a51 639 }
c0a31329
TG
640 }
641 set_curr_timer(base, NULL);
642 spin_unlock_irq(&base->lock);
643}
644
645/*
646 * Called from timer softirq every jiffy, expire hrtimers:
647 */
648void hrtimer_run_queues(void)
649{
650 struct hrtimer_base *base = __get_cpu_var(hrtimer_bases);
651 int i;
652
92127c7a
TG
653 hrtimer_get_softirq_time(base);
654
c0a31329
TG
655 for (i = 0; i < MAX_HRTIMER_BASES; i++)
656 run_hrtimer_queue(&base[i]);
657}
658
10c94ec1
TG
659/*
660 * Sleep related functions:
661 */
00362e33
TG
662static int hrtimer_wakeup(struct hrtimer *timer)
663{
664 struct hrtimer_sleeper *t =
665 container_of(timer, struct hrtimer_sleeper, timer);
666 struct task_struct *task = t->task;
667
668 t->task = NULL;
669 if (task)
670 wake_up_process(task);
671
672 return HRTIMER_NORESTART;
673}
674
675void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, task_t *task)
676{
677 sl->timer.function = hrtimer_wakeup;
678 sl->task = task;
679}
680
669d7868 681static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode)
432569bb 682{
669d7868 683 hrtimer_init_sleeper(t, current);
10c94ec1 684
432569bb
RZ
685 do {
686 set_current_state(TASK_INTERRUPTIBLE);
687 hrtimer_start(&t->timer, t->timer.expires, mode);
688
689 schedule();
690
669d7868
TG
691 hrtimer_cancel(&t->timer);
692 mode = HRTIMER_ABS;
693
694 } while (t->task && !signal_pending(current));
432569bb 695
669d7868 696 return t->task == NULL;
10c94ec1
TG
697}
698
7978672c 699static long __sched nanosleep_restart(struct restart_block *restart)
10c94ec1 700{
669d7868 701 struct hrtimer_sleeper t;
ea13dbc8
IM
702 struct timespec __user *rmtp;
703 struct timespec tu;
432569bb 704 ktime_t time;
10c94ec1
TG
705
706 restart->fn = do_no_restart_syscall;
707
432569bb
RZ
708 hrtimer_init(&t.timer, restart->arg3, HRTIMER_ABS);
709 t.timer.expires.tv64 = ((u64)restart->arg1 << 32) | (u64) restart->arg0;
10c94ec1 710
432569bb 711 if (do_nanosleep(&t, HRTIMER_ABS))
10c94ec1
TG
712 return 0;
713
714 rmtp = (struct timespec __user *) restart->arg2;
432569bb
RZ
715 if (rmtp) {
716 time = ktime_sub(t.timer.expires, t.timer.base->get_time());
717 if (time.tv64 <= 0)
718 return 0;
719 tu = ktime_to_timespec(time);
720 if (copy_to_user(rmtp, &tu, sizeof(tu)))
721 return -EFAULT;
722 }
10c94ec1 723
432569bb 724 restart->fn = nanosleep_restart;
10c94ec1
TG
725
726 /* The other values in restart are already filled in */
727 return -ERESTART_RESTARTBLOCK;
728}
729
10c94ec1
TG
730long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
731 const enum hrtimer_mode mode, const clockid_t clockid)
732{
733 struct restart_block *restart;
669d7868 734 struct hrtimer_sleeper t;
10c94ec1
TG
735 struct timespec tu;
736 ktime_t rem;
737
432569bb
RZ
738 hrtimer_init(&t.timer, clockid, mode);
739 t.timer.expires = timespec_to_ktime(*rqtp);
740 if (do_nanosleep(&t, mode))
10c94ec1
TG
741 return 0;
742
7978672c 743 /* Absolute timers do not update the rmtp value and restart: */
10c94ec1
TG
744 if (mode == HRTIMER_ABS)
745 return -ERESTARTNOHAND;
746
432569bb
RZ
747 if (rmtp) {
748 rem = ktime_sub(t.timer.expires, t.timer.base->get_time());
749 if (rem.tv64 <= 0)
750 return 0;
751 tu = ktime_to_timespec(rem);
752 if (copy_to_user(rmtp, &tu, sizeof(tu)))
753 return -EFAULT;
754 }
10c94ec1
TG
755
756 restart = &current_thread_info()->restart_block;
7978672c 757 restart->fn = nanosleep_restart;
432569bb
RZ
758 restart->arg0 = t.timer.expires.tv64 & 0xFFFFFFFF;
759 restart->arg1 = t.timer.expires.tv64 >> 32;
10c94ec1 760 restart->arg2 = (unsigned long) rmtp;
432569bb 761 restart->arg3 = (unsigned long) t.timer.base->index;
10c94ec1
TG
762
763 return -ERESTART_RESTARTBLOCK;
764}
765
6ba1b912
TG
766asmlinkage long
767sys_nanosleep(struct timespec __user *rqtp, struct timespec __user *rmtp)
768{
769 struct timespec tu;
770
771 if (copy_from_user(&tu, rqtp, sizeof(tu)))
772 return -EFAULT;
773
774 if (!timespec_valid(&tu))
775 return -EINVAL;
776
777 return hrtimer_nanosleep(&tu, rmtp, HRTIMER_REL, CLOCK_MONOTONIC);
778}
779
c0a31329
TG
780/*
781 * Functions related to boot-time initialization:
782 */
783static void __devinit init_hrtimers_cpu(int cpu)
784{
785 struct hrtimer_base *base = per_cpu(hrtimer_bases, cpu);
786 int i;
787
7978672c 788 for (i = 0; i < MAX_HRTIMER_BASES; i++, base++)
c0a31329 789 spin_lock_init(&base->lock);
c0a31329
TG
790}
791
792#ifdef CONFIG_HOTPLUG_CPU
793
794static void migrate_hrtimer_list(struct hrtimer_base *old_base,
795 struct hrtimer_base *new_base)
796{
797 struct hrtimer *timer;
798 struct rb_node *node;
799
800 while ((node = rb_first(&old_base->active))) {
801 timer = rb_entry(node, struct hrtimer, node);
802 __remove_hrtimer(timer, old_base);
803 timer->base = new_base;
804 enqueue_hrtimer(timer, new_base);
805 }
806}
807
808static void migrate_hrtimers(int cpu)
809{
810 struct hrtimer_base *old_base, *new_base;
811 int i;
812
813 BUG_ON(cpu_online(cpu));
814 old_base = per_cpu(hrtimer_bases, cpu);
815 new_base = get_cpu_var(hrtimer_bases);
816
817 local_irq_disable();
818
819 for (i = 0; i < MAX_HRTIMER_BASES; i++) {
820
821 spin_lock(&new_base->lock);
822 spin_lock(&old_base->lock);
823
824 BUG_ON(old_base->curr_timer);
825
826 migrate_hrtimer_list(old_base, new_base);
827
828 spin_unlock(&old_base->lock);
829 spin_unlock(&new_base->lock);
830 old_base++;
831 new_base++;
832 }
833
834 local_irq_enable();
835 put_cpu_var(hrtimer_bases);
836}
837#endif /* CONFIG_HOTPLUG_CPU */
838
83d722f7 839static int hrtimer_cpu_notify(struct notifier_block *self,
c0a31329
TG
840 unsigned long action, void *hcpu)
841{
842 long cpu = (long)hcpu;
843
844 switch (action) {
845
846 case CPU_UP_PREPARE:
847 init_hrtimers_cpu(cpu);
848 break;
849
850#ifdef CONFIG_HOTPLUG_CPU
851 case CPU_DEAD:
852 migrate_hrtimers(cpu);
853 break;
854#endif
855
856 default:
857 break;
858 }
859
860 return NOTIFY_OK;
861}
862
649bbaa4 863static struct notifier_block hrtimers_nb = {
c0a31329
TG
864 .notifier_call = hrtimer_cpu_notify,
865};
866
867void __init hrtimers_init(void)
868{
869 hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE,
870 (void *)(long)smp_processor_id());
871 register_cpu_notifier(&hrtimers_nb);
872}
873