kernel/rtmutex-debug.c: cleanups
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / timer.c
1 /*
2 * linux/kernel/timer.c
3 *
4 * Kernel internal timers, basic process system calls
5 *
6 * Copyright (C) 1991, 1992 Linus Torvalds
7 *
8 * 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better.
9 *
10 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
11 * "A Kernel Model for Precision Timekeeping" by Dave Mills
12 * 1998-12-24 Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
13 * serialize accesses to xtime/lost_ticks).
14 * Copyright (C) 1998 Andrea Arcangeli
15 * 1999-03-10 Improved NTP compatibility by Ulrich Windl
16 * 2002-05-31 Move sys_sysinfo here and make its locking sane, Robert Love
17 * 2000-10-05 Implemented scalable SMP per-CPU timer handling.
18 * Copyright (C) 2000, 2001, 2002 Ingo Molnar
19 * Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar
20 */
21
22 #include <linux/kernel_stat.h>
23 #include <linux/module.h>
24 #include <linux/interrupt.h>
25 #include <linux/percpu.h>
26 #include <linux/init.h>
27 #include <linux/mm.h>
28 #include <linux/swap.h>
29 #include <linux/notifier.h>
30 #include <linux/thread_info.h>
31 #include <linux/time.h>
32 #include <linux/jiffies.h>
33 #include <linux/posix-timers.h>
34 #include <linux/cpu.h>
35 #include <linux/syscalls.h>
36 #include <linux/delay.h>
37 #include <linux/tick.h>
38 #include <linux/kallsyms.h>
39
40 #include <asm/uaccess.h>
41 #include <asm/unistd.h>
42 #include <asm/div64.h>
43 #include <asm/timex.h>
44 #include <asm/io.h>
45
46 u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
47
48 EXPORT_SYMBOL(jiffies_64);
49
50 /*
51 * per-CPU timer vector definitions:
52 */
53 #define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6)
54 #define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8)
55 #define TVN_SIZE (1 << TVN_BITS)
56 #define TVR_SIZE (1 << TVR_BITS)
57 #define TVN_MASK (TVN_SIZE - 1)
58 #define TVR_MASK (TVR_SIZE - 1)
59
60 typedef struct tvec_s {
61 struct list_head vec[TVN_SIZE];
62 } tvec_t;
63
64 typedef struct tvec_root_s {
65 struct list_head vec[TVR_SIZE];
66 } tvec_root_t;
67
68 struct tvec_t_base_s {
69 spinlock_t lock;
70 struct timer_list *running_timer;
71 unsigned long timer_jiffies;
72 tvec_root_t tv1;
73 tvec_t tv2;
74 tvec_t tv3;
75 tvec_t tv4;
76 tvec_t tv5;
77 } ____cacheline_aligned;
78
79 typedef struct tvec_t_base_s tvec_base_t;
80
81 tvec_base_t boot_tvec_bases;
82 EXPORT_SYMBOL(boot_tvec_bases);
83 static DEFINE_PER_CPU(tvec_base_t *, tvec_bases) = &boot_tvec_bases;
84
85 /*
86 * Note that all tvec_bases is 2 byte aligned and lower bit of
87 * base in timer_list is guaranteed to be zero. Use the LSB for
88 * the new flag to indicate whether the timer is deferrable
89 */
90 #define TBASE_DEFERRABLE_FLAG (0x1)
91
92 /* Functions below help us manage 'deferrable' flag */
93 static inline unsigned int tbase_get_deferrable(tvec_base_t *base)
94 {
95 return ((unsigned int)(unsigned long)base & TBASE_DEFERRABLE_FLAG);
96 }
97
98 static inline tvec_base_t *tbase_get_base(tvec_base_t *base)
99 {
100 return ((tvec_base_t *)((unsigned long)base & ~TBASE_DEFERRABLE_FLAG));
101 }
102
103 static inline void timer_set_deferrable(struct timer_list *timer)
104 {
105 timer->base = ((tvec_base_t *)((unsigned long)(timer->base) |
106 TBASE_DEFERRABLE_FLAG));
107 }
108
109 static inline void
110 timer_set_base(struct timer_list *timer, tvec_base_t *new_base)
111 {
112 timer->base = (tvec_base_t *)((unsigned long)(new_base) |
113 tbase_get_deferrable(timer->base));
114 }
115
116 /**
117 * __round_jiffies - function to round jiffies to a full second
118 * @j: the time in (absolute) jiffies that should be rounded
119 * @cpu: the processor number on which the timeout will happen
120 *
121 * __round_jiffies() rounds an absolute time in the future (in jiffies)
122 * up or down to (approximately) full seconds. This is useful for timers
123 * for which the exact time they fire does not matter too much, as long as
124 * they fire approximately every X seconds.
125 *
126 * By rounding these timers to whole seconds, all such timers will fire
127 * at the same time, rather than at various times spread out. The goal
128 * of this is to have the CPU wake up less, which saves power.
129 *
130 * The exact rounding is skewed for each processor to avoid all
131 * processors firing at the exact same time, which could lead
132 * to lock contention or spurious cache line bouncing.
133 *
134 * The return value is the rounded version of the @j parameter.
135 */
136 unsigned long __round_jiffies(unsigned long j, int cpu)
137 {
138 int rem;
139 unsigned long original = j;
140
141 /*
142 * We don't want all cpus firing their timers at once hitting the
143 * same lock or cachelines, so we skew each extra cpu with an extra
144 * 3 jiffies. This 3 jiffies came originally from the mm/ code which
145 * already did this.
146 * The skew is done by adding 3*cpunr, then round, then subtract this
147 * extra offset again.
148 */
149 j += cpu * 3;
150
151 rem = j % HZ;
152
153 /*
154 * If the target jiffie is just after a whole second (which can happen
155 * due to delays of the timer irq, long irq off times etc etc) then
156 * we should round down to the whole second, not up. Use 1/4th second
157 * as cutoff for this rounding as an extreme upper bound for this.
158 */
159 if (rem < HZ/4) /* round down */
160 j = j - rem;
161 else /* round up */
162 j = j - rem + HZ;
163
164 /* now that we have rounded, subtract the extra skew again */
165 j -= cpu * 3;
166
167 if (j <= jiffies) /* rounding ate our timeout entirely; */
168 return original;
169 return j;
170 }
171 EXPORT_SYMBOL_GPL(__round_jiffies);
172
173 /**
174 * __round_jiffies_relative - function to round jiffies to a full second
175 * @j: the time in (relative) jiffies that should be rounded
176 * @cpu: the processor number on which the timeout will happen
177 *
178 * __round_jiffies_relative() rounds a time delta in the future (in jiffies)
179 * up or down to (approximately) full seconds. This is useful for timers
180 * for which the exact time they fire does not matter too much, as long as
181 * they fire approximately every X seconds.
182 *
183 * By rounding these timers to whole seconds, all such timers will fire
184 * at the same time, rather than at various times spread out. The goal
185 * of this is to have the CPU wake up less, which saves power.
186 *
187 * The exact rounding is skewed for each processor to avoid all
188 * processors firing at the exact same time, which could lead
189 * to lock contention or spurious cache line bouncing.
190 *
191 * The return value is the rounded version of the @j parameter.
192 */
193 unsigned long __round_jiffies_relative(unsigned long j, int cpu)
194 {
195 /*
196 * In theory the following code can skip a jiffy in case jiffies
197 * increments right between the addition and the later subtraction.
198 * However since the entire point of this function is to use approximate
199 * timeouts, it's entirely ok to not handle that.
200 */
201 return __round_jiffies(j + jiffies, cpu) - jiffies;
202 }
203 EXPORT_SYMBOL_GPL(__round_jiffies_relative);
204
205 /**
206 * round_jiffies - function to round jiffies to a full second
207 * @j: the time in (absolute) jiffies that should be rounded
208 *
209 * round_jiffies() rounds an absolute time in the future (in jiffies)
210 * up or down to (approximately) full seconds. This is useful for timers
211 * for which the exact time they fire does not matter too much, as long as
212 * they fire approximately every X seconds.
213 *
214 * By rounding these timers to whole seconds, all such timers will fire
215 * at the same time, rather than at various times spread out. The goal
216 * of this is to have the CPU wake up less, which saves power.
217 *
218 * The return value is the rounded version of the @j parameter.
219 */
220 unsigned long round_jiffies(unsigned long j)
221 {
222 return __round_jiffies(j, raw_smp_processor_id());
223 }
224 EXPORT_SYMBOL_GPL(round_jiffies);
225
226 /**
227 * round_jiffies_relative - function to round jiffies to a full second
228 * @j: the time in (relative) jiffies that should be rounded
229 *
230 * round_jiffies_relative() rounds a time delta in the future (in jiffies)
231 * up or down to (approximately) full seconds. This is useful for timers
232 * for which the exact time they fire does not matter too much, as long as
233 * they fire approximately every X seconds.
234 *
235 * By rounding these timers to whole seconds, all such timers will fire
236 * at the same time, rather than at various times spread out. The goal
237 * of this is to have the CPU wake up less, which saves power.
238 *
239 * The return value is the rounded version of the @j parameter.
240 */
241 unsigned long round_jiffies_relative(unsigned long j)
242 {
243 return __round_jiffies_relative(j, raw_smp_processor_id());
244 }
245 EXPORT_SYMBOL_GPL(round_jiffies_relative);
246
247
248 static inline void set_running_timer(tvec_base_t *base,
249 struct timer_list *timer)
250 {
251 #ifdef CONFIG_SMP
252 base->running_timer = timer;
253 #endif
254 }
255
256 static void internal_add_timer(tvec_base_t *base, struct timer_list *timer)
257 {
258 unsigned long expires = timer->expires;
259 unsigned long idx = expires - base->timer_jiffies;
260 struct list_head *vec;
261
262 if (idx < TVR_SIZE) {
263 int i = expires & TVR_MASK;
264 vec = base->tv1.vec + i;
265 } else if (idx < 1 << (TVR_BITS + TVN_BITS)) {
266 int i = (expires >> TVR_BITS) & TVN_MASK;
267 vec = base->tv2.vec + i;
268 } else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) {
269 int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK;
270 vec = base->tv3.vec + i;
271 } else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) {
272 int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK;
273 vec = base->tv4.vec + i;
274 } else if ((signed long) idx < 0) {
275 /*
276 * Can happen if you add a timer with expires == jiffies,
277 * or you set a timer to go off in the past
278 */
279 vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK);
280 } else {
281 int i;
282 /* If the timeout is larger than 0xffffffff on 64-bit
283 * architectures then we use the maximum timeout:
284 */
285 if (idx > 0xffffffffUL) {
286 idx = 0xffffffffUL;
287 expires = idx + base->timer_jiffies;
288 }
289 i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
290 vec = base->tv5.vec + i;
291 }
292 /*
293 * Timers are FIFO:
294 */
295 list_add_tail(&timer->entry, vec);
296 }
297
298 #ifdef CONFIG_TIMER_STATS
299 void __timer_stats_timer_set_start_info(struct timer_list *timer, void *addr)
300 {
301 if (timer->start_site)
302 return;
303
304 timer->start_site = addr;
305 memcpy(timer->start_comm, current->comm, TASK_COMM_LEN);
306 timer->start_pid = current->pid;
307 }
308
309 static void timer_stats_account_timer(struct timer_list *timer)
310 {
311 unsigned int flag = 0;
312
313 if (unlikely(tbase_get_deferrable(timer->base)))
314 flag |= TIMER_STATS_FLAG_DEFERRABLE;
315
316 timer_stats_update_stats(timer, timer->start_pid, timer->start_site,
317 timer->function, timer->start_comm, flag);
318 }
319
320 #else
321 static void timer_stats_account_timer(struct timer_list *timer) {}
322 #endif
323
324 /**
325 * init_timer - initialize a timer.
326 * @timer: the timer to be initialized
327 *
328 * init_timer() must be done to a timer prior calling *any* of the
329 * other timer functions.
330 */
331 void fastcall init_timer(struct timer_list *timer)
332 {
333 timer->entry.next = NULL;
334 timer->base = __raw_get_cpu_var(tvec_bases);
335 #ifdef CONFIG_TIMER_STATS
336 timer->start_site = NULL;
337 timer->start_pid = -1;
338 memset(timer->start_comm, 0, TASK_COMM_LEN);
339 #endif
340 }
341 EXPORT_SYMBOL(init_timer);
342
343 void fastcall init_timer_deferrable(struct timer_list *timer)
344 {
345 init_timer(timer);
346 timer_set_deferrable(timer);
347 }
348 EXPORT_SYMBOL(init_timer_deferrable);
349
350 static inline void detach_timer(struct timer_list *timer,
351 int clear_pending)
352 {
353 struct list_head *entry = &timer->entry;
354
355 __list_del(entry->prev, entry->next);
356 if (clear_pending)
357 entry->next = NULL;
358 entry->prev = LIST_POISON2;
359 }
360
361 /*
362 * We are using hashed locking: holding per_cpu(tvec_bases).lock
363 * means that all timers which are tied to this base via timer->base are
364 * locked, and the base itself is locked too.
365 *
366 * So __run_timers/migrate_timers can safely modify all timers which could
367 * be found on ->tvX lists.
368 *
369 * When the timer's base is locked, and the timer removed from list, it is
370 * possible to set timer->base = NULL and drop the lock: the timer remains
371 * locked.
372 */
373 static tvec_base_t *lock_timer_base(struct timer_list *timer,
374 unsigned long *flags)
375 __acquires(timer->base->lock)
376 {
377 tvec_base_t *base;
378
379 for (;;) {
380 tvec_base_t *prelock_base = timer->base;
381 base = tbase_get_base(prelock_base);
382 if (likely(base != NULL)) {
383 spin_lock_irqsave(&base->lock, *flags);
384 if (likely(prelock_base == timer->base))
385 return base;
386 /* The timer has migrated to another CPU */
387 spin_unlock_irqrestore(&base->lock, *flags);
388 }
389 cpu_relax();
390 }
391 }
392
393 int __mod_timer(struct timer_list *timer, unsigned long expires)
394 {
395 tvec_base_t *base, *new_base;
396 unsigned long flags;
397 int ret = 0;
398
399 timer_stats_timer_set_start_info(timer);
400 BUG_ON(!timer->function);
401
402 base = lock_timer_base(timer, &flags);
403
404 if (timer_pending(timer)) {
405 detach_timer(timer, 0);
406 ret = 1;
407 }
408
409 new_base = __get_cpu_var(tvec_bases);
410
411 if (base != new_base) {
412 /*
413 * We are trying to schedule the timer on the local CPU.
414 * However we can't change timer's base while it is running,
415 * otherwise del_timer_sync() can't detect that the timer's
416 * handler yet has not finished. This also guarantees that
417 * the timer is serialized wrt itself.
418 */
419 if (likely(base->running_timer != timer)) {
420 /* See the comment in lock_timer_base() */
421 timer_set_base(timer, NULL);
422 spin_unlock(&base->lock);
423 base = new_base;
424 spin_lock(&base->lock);
425 timer_set_base(timer, base);
426 }
427 }
428
429 timer->expires = expires;
430 internal_add_timer(base, timer);
431 spin_unlock_irqrestore(&base->lock, flags);
432
433 return ret;
434 }
435
436 EXPORT_SYMBOL(__mod_timer);
437
438 /**
439 * add_timer_on - start a timer on a particular CPU
440 * @timer: the timer to be added
441 * @cpu: the CPU to start it on
442 *
443 * This is not very scalable on SMP. Double adds are not possible.
444 */
445 void add_timer_on(struct timer_list *timer, int cpu)
446 {
447 tvec_base_t *base = per_cpu(tvec_bases, cpu);
448 unsigned long flags;
449
450 timer_stats_timer_set_start_info(timer);
451 BUG_ON(timer_pending(timer) || !timer->function);
452 spin_lock_irqsave(&base->lock, flags);
453 timer_set_base(timer, base);
454 internal_add_timer(base, timer);
455 spin_unlock_irqrestore(&base->lock, flags);
456 }
457
458
459 /**
460 * mod_timer - modify a timer's timeout
461 * @timer: the timer to be modified
462 * @expires: new timeout in jiffies
463 *
464 * mod_timer() is a more efficient way to update the expire field of an
465 * active timer (if the timer is inactive it will be activated)
466 *
467 * mod_timer(timer, expires) is equivalent to:
468 *
469 * del_timer(timer); timer->expires = expires; add_timer(timer);
470 *
471 * Note that if there are multiple unserialized concurrent users of the
472 * same timer, then mod_timer() is the only safe way to modify the timeout,
473 * since add_timer() cannot modify an already running timer.
474 *
475 * The function returns whether it has modified a pending timer or not.
476 * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an
477 * active timer returns 1.)
478 */
479 int mod_timer(struct timer_list *timer, unsigned long expires)
480 {
481 BUG_ON(!timer->function);
482
483 timer_stats_timer_set_start_info(timer);
484 /*
485 * This is a common optimization triggered by the
486 * networking code - if the timer is re-modified
487 * to be the same thing then just return:
488 */
489 if (timer->expires == expires && timer_pending(timer))
490 return 1;
491
492 return __mod_timer(timer, expires);
493 }
494
495 EXPORT_SYMBOL(mod_timer);
496
497 /**
498 * del_timer - deactive a timer.
499 * @timer: the timer to be deactivated
500 *
501 * del_timer() deactivates a timer - this works on both active and inactive
502 * timers.
503 *
504 * The function returns whether it has deactivated a pending timer or not.
505 * (ie. del_timer() of an inactive timer returns 0, del_timer() of an
506 * active timer returns 1.)
507 */
508 int del_timer(struct timer_list *timer)
509 {
510 tvec_base_t *base;
511 unsigned long flags;
512 int ret = 0;
513
514 timer_stats_timer_clear_start_info(timer);
515 if (timer_pending(timer)) {
516 base = lock_timer_base(timer, &flags);
517 if (timer_pending(timer)) {
518 detach_timer(timer, 1);
519 ret = 1;
520 }
521 spin_unlock_irqrestore(&base->lock, flags);
522 }
523
524 return ret;
525 }
526
527 EXPORT_SYMBOL(del_timer);
528
529 #ifdef CONFIG_SMP
530 /**
531 * try_to_del_timer_sync - Try to deactivate a timer
532 * @timer: timer do del
533 *
534 * This function tries to deactivate a timer. Upon successful (ret >= 0)
535 * exit the timer is not queued and the handler is not running on any CPU.
536 *
537 * It must not be called from interrupt contexts.
538 */
539 int try_to_del_timer_sync(struct timer_list *timer)
540 {
541 tvec_base_t *base;
542 unsigned long flags;
543 int ret = -1;
544
545 base = lock_timer_base(timer, &flags);
546
547 if (base->running_timer == timer)
548 goto out;
549
550 ret = 0;
551 if (timer_pending(timer)) {
552 detach_timer(timer, 1);
553 ret = 1;
554 }
555 out:
556 spin_unlock_irqrestore(&base->lock, flags);
557
558 return ret;
559 }
560
561 EXPORT_SYMBOL(try_to_del_timer_sync);
562
563 /**
564 * del_timer_sync - deactivate a timer and wait for the handler to finish.
565 * @timer: the timer to be deactivated
566 *
567 * This function only differs from del_timer() on SMP: besides deactivating
568 * the timer it also makes sure the handler has finished executing on other
569 * CPUs.
570 *
571 * Synchronization rules: Callers must prevent restarting of the timer,
572 * otherwise this function is meaningless. It must not be called from
573 * interrupt contexts. The caller must not hold locks which would prevent
574 * completion of the timer's handler. The timer's handler must not call
575 * add_timer_on(). Upon exit the timer is not queued and the handler is
576 * not running on any CPU.
577 *
578 * The function returns whether it has deactivated a pending timer or not.
579 */
580 int del_timer_sync(struct timer_list *timer)
581 {
582 for (;;) {
583 int ret = try_to_del_timer_sync(timer);
584 if (ret >= 0)
585 return ret;
586 cpu_relax();
587 }
588 }
589
590 EXPORT_SYMBOL(del_timer_sync);
591 #endif
592
593 static int cascade(tvec_base_t *base, tvec_t *tv, int index)
594 {
595 /* cascade all the timers from tv up one level */
596 struct timer_list *timer, *tmp;
597 struct list_head tv_list;
598
599 list_replace_init(tv->vec + index, &tv_list);
600
601 /*
602 * We are removing _all_ timers from the list, so we
603 * don't have to detach them individually.
604 */
605 list_for_each_entry_safe(timer, tmp, &tv_list, entry) {
606 BUG_ON(tbase_get_base(timer->base) != base);
607 internal_add_timer(base, timer);
608 }
609
610 return index;
611 }
612
613 #define INDEX(N) ((base->timer_jiffies >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK)
614
615 /**
616 * __run_timers - run all expired timers (if any) on this CPU.
617 * @base: the timer vector to be processed.
618 *
619 * This function cascades all vectors and executes all expired timer
620 * vectors.
621 */
622 static inline void __run_timers(tvec_base_t *base)
623 {
624 struct timer_list *timer;
625
626 spin_lock_irq(&base->lock);
627 while (time_after_eq(jiffies, base->timer_jiffies)) {
628 struct list_head work_list;
629 struct list_head *head = &work_list;
630 int index = base->timer_jiffies & TVR_MASK;
631
632 /*
633 * Cascade timers:
634 */
635 if (!index &&
636 (!cascade(base, &base->tv2, INDEX(0))) &&
637 (!cascade(base, &base->tv3, INDEX(1))) &&
638 !cascade(base, &base->tv4, INDEX(2)))
639 cascade(base, &base->tv5, INDEX(3));
640 ++base->timer_jiffies;
641 list_replace_init(base->tv1.vec + index, &work_list);
642 while (!list_empty(head)) {
643 void (*fn)(unsigned long);
644 unsigned long data;
645
646 timer = list_first_entry(head, struct timer_list,entry);
647 fn = timer->function;
648 data = timer->data;
649
650 timer_stats_account_timer(timer);
651
652 set_running_timer(base, timer);
653 detach_timer(timer, 1);
654 spin_unlock_irq(&base->lock);
655 {
656 int preempt_count = preempt_count();
657 fn(data);
658 if (preempt_count != preempt_count()) {
659 printk(KERN_WARNING "huh, entered %p "
660 "with preempt_count %08x, exited"
661 " with %08x?\n",
662 fn, preempt_count,
663 preempt_count());
664 BUG();
665 }
666 }
667 spin_lock_irq(&base->lock);
668 }
669 }
670 set_running_timer(base, NULL);
671 spin_unlock_irq(&base->lock);
672 }
673
674 #if defined(CONFIG_NO_IDLE_HZ) || defined(CONFIG_NO_HZ)
675 /*
676 * Find out when the next timer event is due to happen. This
677 * is used on S/390 to stop all activity when a cpus is idle.
678 * This functions needs to be called disabled.
679 */
680 static unsigned long __next_timer_interrupt(tvec_base_t *base)
681 {
682 unsigned long timer_jiffies = base->timer_jiffies;
683 unsigned long expires = timer_jiffies + NEXT_TIMER_MAX_DELTA;
684 int index, slot, array, found = 0;
685 struct timer_list *nte;
686 tvec_t *varray[4];
687
688 /* Look for timer events in tv1. */
689 index = slot = timer_jiffies & TVR_MASK;
690 do {
691 list_for_each_entry(nte, base->tv1.vec + slot, entry) {
692 if (tbase_get_deferrable(nte->base))
693 continue;
694
695 found = 1;
696 expires = nte->expires;
697 /* Look at the cascade bucket(s)? */
698 if (!index || slot < index)
699 goto cascade;
700 return expires;
701 }
702 slot = (slot + 1) & TVR_MASK;
703 } while (slot != index);
704
705 cascade:
706 /* Calculate the next cascade event */
707 if (index)
708 timer_jiffies += TVR_SIZE - index;
709 timer_jiffies >>= TVR_BITS;
710
711 /* Check tv2-tv5. */
712 varray[0] = &base->tv2;
713 varray[1] = &base->tv3;
714 varray[2] = &base->tv4;
715 varray[3] = &base->tv5;
716
717 for (array = 0; array < 4; array++) {
718 tvec_t *varp = varray[array];
719
720 index = slot = timer_jiffies & TVN_MASK;
721 do {
722 list_for_each_entry(nte, varp->vec + slot, entry) {
723 found = 1;
724 if (time_before(nte->expires, expires))
725 expires = nte->expires;
726 }
727 /*
728 * Do we still search for the first timer or are
729 * we looking up the cascade buckets ?
730 */
731 if (found) {
732 /* Look at the cascade bucket(s)? */
733 if (!index || slot < index)
734 break;
735 return expires;
736 }
737 slot = (slot + 1) & TVN_MASK;
738 } while (slot != index);
739
740 if (index)
741 timer_jiffies += TVN_SIZE - index;
742 timer_jiffies >>= TVN_BITS;
743 }
744 return expires;
745 }
746
747 /*
748 * Check, if the next hrtimer event is before the next timer wheel
749 * event:
750 */
751 static unsigned long cmp_next_hrtimer_event(unsigned long now,
752 unsigned long expires)
753 {
754 ktime_t hr_delta = hrtimer_get_next_event();
755 struct timespec tsdelta;
756 unsigned long delta;
757
758 if (hr_delta.tv64 == KTIME_MAX)
759 return expires;
760
761 /*
762 * Expired timer available, let it expire in the next tick
763 */
764 if (hr_delta.tv64 <= 0)
765 return now + 1;
766
767 tsdelta = ktime_to_timespec(hr_delta);
768 delta = timespec_to_jiffies(&tsdelta);
769
770 /*
771 * Limit the delta to the max value, which is checked in
772 * tick_nohz_stop_sched_tick():
773 */
774 if (delta > NEXT_TIMER_MAX_DELTA)
775 delta = NEXT_TIMER_MAX_DELTA;
776
777 /*
778 * Take rounding errors in to account and make sure, that it
779 * expires in the next tick. Otherwise we go into an endless
780 * ping pong due to tick_nohz_stop_sched_tick() retriggering
781 * the timer softirq
782 */
783 if (delta < 1)
784 delta = 1;
785 now += delta;
786 if (time_before(now, expires))
787 return now;
788 return expires;
789 }
790
791 /**
792 * next_timer_interrupt - return the jiffy of the next pending timer
793 * @now: current time (in jiffies)
794 */
795 unsigned long get_next_timer_interrupt(unsigned long now)
796 {
797 tvec_base_t *base = __get_cpu_var(tvec_bases);
798 unsigned long expires;
799
800 spin_lock(&base->lock);
801 expires = __next_timer_interrupt(base);
802 spin_unlock(&base->lock);
803
804 if (time_before_eq(expires, now))
805 return now;
806
807 return cmp_next_hrtimer_event(now, expires);
808 }
809
810 #ifdef CONFIG_NO_IDLE_HZ
811 unsigned long next_timer_interrupt(void)
812 {
813 return get_next_timer_interrupt(jiffies);
814 }
815 #endif
816
817 #endif
818
819 /*
820 * Called from the timer interrupt handler to charge one tick to the current
821 * process. user_tick is 1 if the tick is user time, 0 for system.
822 */
823 void update_process_times(int user_tick)
824 {
825 struct task_struct *p = current;
826 int cpu = smp_processor_id();
827
828 /* Note: this timer irq context must be accounted for as well. */
829 if (user_tick)
830 account_user_time(p, jiffies_to_cputime(1));
831 else
832 account_system_time(p, HARDIRQ_OFFSET, jiffies_to_cputime(1));
833 run_local_timers();
834 if (rcu_pending(cpu))
835 rcu_check_callbacks(cpu, user_tick);
836 scheduler_tick();
837 run_posix_cpu_timers(p);
838 }
839
840 /*
841 * Nr of active tasks - counted in fixed-point numbers
842 */
843 static unsigned long count_active_tasks(void)
844 {
845 return nr_active() * FIXED_1;
846 }
847
848 /*
849 * Hmm.. Changed this, as the GNU make sources (load.c) seems to
850 * imply that avenrun[] is the standard name for this kind of thing.
851 * Nothing else seems to be standardized: the fractional size etc
852 * all seem to differ on different machines.
853 *
854 * Requires xtime_lock to access.
855 */
856 unsigned long avenrun[3];
857
858 EXPORT_SYMBOL(avenrun);
859
860 /*
861 * calc_load - given tick count, update the avenrun load estimates.
862 * This is called while holding a write_lock on xtime_lock.
863 */
864 static inline void calc_load(unsigned long ticks)
865 {
866 unsigned long active_tasks; /* fixed-point */
867 static int count = LOAD_FREQ;
868
869 count -= ticks;
870 if (unlikely(count < 0)) {
871 active_tasks = count_active_tasks();
872 do {
873 CALC_LOAD(avenrun[0], EXP_1, active_tasks);
874 CALC_LOAD(avenrun[1], EXP_5, active_tasks);
875 CALC_LOAD(avenrun[2], EXP_15, active_tasks);
876 count += LOAD_FREQ;
877 } while (count < 0);
878 }
879 }
880
881 /*
882 * This function runs timers and the timer-tq in bottom half context.
883 */
884 static void run_timer_softirq(struct softirq_action *h)
885 {
886 tvec_base_t *base = __get_cpu_var(tvec_bases);
887
888 hrtimer_run_queues();
889
890 if (time_after_eq(jiffies, base->timer_jiffies))
891 __run_timers(base);
892 }
893
894 /*
895 * Called by the local, per-CPU timer interrupt on SMP.
896 */
897 void run_local_timers(void)
898 {
899 raise_softirq(TIMER_SOFTIRQ);
900 softlockup_tick();
901 }
902
903 /*
904 * Called by the timer interrupt. xtime_lock must already be taken
905 * by the timer IRQ!
906 */
907 static inline void update_times(unsigned long ticks)
908 {
909 update_wall_time();
910 calc_load(ticks);
911 }
912
913 /*
914 * The 64-bit jiffies value is not atomic - you MUST NOT read it
915 * without sampling the sequence number in xtime_lock.
916 * jiffies is defined in the linker script...
917 */
918
919 void do_timer(unsigned long ticks)
920 {
921 jiffies_64 += ticks;
922 update_times(ticks);
923 }
924
925 #ifdef __ARCH_WANT_SYS_ALARM
926
927 /*
928 * For backwards compatibility? This can be done in libc so Alpha
929 * and all newer ports shouldn't need it.
930 */
931 asmlinkage unsigned long sys_alarm(unsigned int seconds)
932 {
933 return alarm_setitimer(seconds);
934 }
935
936 #endif
937
938 #ifndef __alpha__
939
940 /*
941 * The Alpha uses getxpid, getxuid, and getxgid instead. Maybe this
942 * should be moved into arch/i386 instead?
943 */
944
945 /**
946 * sys_getpid - return the thread group id of the current process
947 *
948 * Note, despite the name, this returns the tgid not the pid. The tgid and
949 * the pid are identical unless CLONE_THREAD was specified on clone() in
950 * which case the tgid is the same in all threads of the same group.
951 *
952 * This is SMP safe as current->tgid does not change.
953 */
954 asmlinkage long sys_getpid(void)
955 {
956 return current->tgid;
957 }
958
959 /*
960 * Accessing ->real_parent is not SMP-safe, it could
961 * change from under us. However, we can use a stale
962 * value of ->real_parent under rcu_read_lock(), see
963 * release_task()->call_rcu(delayed_put_task_struct).
964 */
965 asmlinkage long sys_getppid(void)
966 {
967 int pid;
968
969 rcu_read_lock();
970 pid = rcu_dereference(current->real_parent)->tgid;
971 rcu_read_unlock();
972
973 return pid;
974 }
975
976 asmlinkage long sys_getuid(void)
977 {
978 /* Only we change this so SMP safe */
979 return current->uid;
980 }
981
982 asmlinkage long sys_geteuid(void)
983 {
984 /* Only we change this so SMP safe */
985 return current->euid;
986 }
987
988 asmlinkage long sys_getgid(void)
989 {
990 /* Only we change this so SMP safe */
991 return current->gid;
992 }
993
994 asmlinkage long sys_getegid(void)
995 {
996 /* Only we change this so SMP safe */
997 return current->egid;
998 }
999
1000 #endif
1001
1002 static void process_timeout(unsigned long __data)
1003 {
1004 wake_up_process((struct task_struct *)__data);
1005 }
1006
1007 /**
1008 * schedule_timeout - sleep until timeout
1009 * @timeout: timeout value in jiffies
1010 *
1011 * Make the current task sleep until @timeout jiffies have
1012 * elapsed. The routine will return immediately unless
1013 * the current task state has been set (see set_current_state()).
1014 *
1015 * You can set the task state as follows -
1016 *
1017 * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to
1018 * pass before the routine returns. The routine will return 0
1019 *
1020 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
1021 * delivered to the current task. In this case the remaining time
1022 * in jiffies will be returned, or 0 if the timer expired in time
1023 *
1024 * The current task state is guaranteed to be TASK_RUNNING when this
1025 * routine returns.
1026 *
1027 * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule
1028 * the CPU away without a bound on the timeout. In this case the return
1029 * value will be %MAX_SCHEDULE_TIMEOUT.
1030 *
1031 * In all cases the return value is guaranteed to be non-negative.
1032 */
1033 fastcall signed long __sched schedule_timeout(signed long timeout)
1034 {
1035 struct timer_list timer;
1036 unsigned long expire;
1037
1038 switch (timeout)
1039 {
1040 case MAX_SCHEDULE_TIMEOUT:
1041 /*
1042 * These two special cases are useful to be comfortable
1043 * in the caller. Nothing more. We could take
1044 * MAX_SCHEDULE_TIMEOUT from one of the negative value
1045 * but I' d like to return a valid offset (>=0) to allow
1046 * the caller to do everything it want with the retval.
1047 */
1048 schedule();
1049 goto out;
1050 default:
1051 /*
1052 * Another bit of PARANOID. Note that the retval will be
1053 * 0 since no piece of kernel is supposed to do a check
1054 * for a negative retval of schedule_timeout() (since it
1055 * should never happens anyway). You just have the printk()
1056 * that will tell you if something is gone wrong and where.
1057 */
1058 if (timeout < 0) {
1059 printk(KERN_ERR "schedule_timeout: wrong timeout "
1060 "value %lx\n", timeout);
1061 dump_stack();
1062 current->state = TASK_RUNNING;
1063 goto out;
1064 }
1065 }
1066
1067 expire = timeout + jiffies;
1068
1069 setup_timer(&timer, process_timeout, (unsigned long)current);
1070 __mod_timer(&timer, expire);
1071 schedule();
1072 del_singleshot_timer_sync(&timer);
1073
1074 timeout = expire - jiffies;
1075
1076 out:
1077 return timeout < 0 ? 0 : timeout;
1078 }
1079 EXPORT_SYMBOL(schedule_timeout);
1080
1081 /*
1082 * We can use __set_current_state() here because schedule_timeout() calls
1083 * schedule() unconditionally.
1084 */
1085 signed long __sched schedule_timeout_interruptible(signed long timeout)
1086 {
1087 __set_current_state(TASK_INTERRUPTIBLE);
1088 return schedule_timeout(timeout);
1089 }
1090 EXPORT_SYMBOL(schedule_timeout_interruptible);
1091
1092 signed long __sched schedule_timeout_uninterruptible(signed long timeout)
1093 {
1094 __set_current_state(TASK_UNINTERRUPTIBLE);
1095 return schedule_timeout(timeout);
1096 }
1097 EXPORT_SYMBOL(schedule_timeout_uninterruptible);
1098
1099 /* Thread ID - the internal kernel "pid" */
1100 asmlinkage long sys_gettid(void)
1101 {
1102 return current->pid;
1103 }
1104
1105 /**
1106 * do_sysinfo - fill in sysinfo struct
1107 * @info: pointer to buffer to fill
1108 */
1109 int do_sysinfo(struct sysinfo *info)
1110 {
1111 unsigned long mem_total, sav_total;
1112 unsigned int mem_unit, bitcount;
1113 unsigned long seq;
1114
1115 memset(info, 0, sizeof(struct sysinfo));
1116
1117 do {
1118 struct timespec tp;
1119 seq = read_seqbegin(&xtime_lock);
1120
1121 /*
1122 * This is annoying. The below is the same thing
1123 * posix_get_clock_monotonic() does, but it wants to
1124 * take the lock which we want to cover the loads stuff
1125 * too.
1126 */
1127
1128 getnstimeofday(&tp);
1129 tp.tv_sec += wall_to_monotonic.tv_sec;
1130 tp.tv_nsec += wall_to_monotonic.tv_nsec;
1131 monotonic_to_bootbased(&tp);
1132 if (tp.tv_nsec - NSEC_PER_SEC >= 0) {
1133 tp.tv_nsec = tp.tv_nsec - NSEC_PER_SEC;
1134 tp.tv_sec++;
1135 }
1136 info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
1137
1138 info->loads[0] = avenrun[0] << (SI_LOAD_SHIFT - FSHIFT);
1139 info->loads[1] = avenrun[1] << (SI_LOAD_SHIFT - FSHIFT);
1140 info->loads[2] = avenrun[2] << (SI_LOAD_SHIFT - FSHIFT);
1141
1142 info->procs = nr_threads;
1143 } while (read_seqretry(&xtime_lock, seq));
1144
1145 si_meminfo(info);
1146 si_swapinfo(info);
1147
1148 /*
1149 * If the sum of all the available memory (i.e. ram + swap)
1150 * is less than can be stored in a 32 bit unsigned long then
1151 * we can be binary compatible with 2.2.x kernels. If not,
1152 * well, in that case 2.2.x was broken anyways...
1153 *
1154 * -Erik Andersen <andersee@debian.org>
1155 */
1156
1157 mem_total = info->totalram + info->totalswap;
1158 if (mem_total < info->totalram || mem_total < info->totalswap)
1159 goto out;
1160 bitcount = 0;
1161 mem_unit = info->mem_unit;
1162 while (mem_unit > 1) {
1163 bitcount++;
1164 mem_unit >>= 1;
1165 sav_total = mem_total;
1166 mem_total <<= 1;
1167 if (mem_total < sav_total)
1168 goto out;
1169 }
1170
1171 /*
1172 * If mem_total did not overflow, multiply all memory values by
1173 * info->mem_unit and set it to 1. This leaves things compatible
1174 * with 2.2.x, and also retains compatibility with earlier 2.4.x
1175 * kernels...
1176 */
1177
1178 info->mem_unit = 1;
1179 info->totalram <<= bitcount;
1180 info->freeram <<= bitcount;
1181 info->sharedram <<= bitcount;
1182 info->bufferram <<= bitcount;
1183 info->totalswap <<= bitcount;
1184 info->freeswap <<= bitcount;
1185 info->totalhigh <<= bitcount;
1186 info->freehigh <<= bitcount;
1187
1188 out:
1189 return 0;
1190 }
1191
1192 asmlinkage long sys_sysinfo(struct sysinfo __user *info)
1193 {
1194 struct sysinfo val;
1195
1196 do_sysinfo(&val);
1197
1198 if (copy_to_user(info, &val, sizeof(struct sysinfo)))
1199 return -EFAULT;
1200
1201 return 0;
1202 }
1203
1204 /*
1205 * lockdep: we want to track each per-CPU base as a separate lock-class,
1206 * but timer-bases are kmalloc()-ed, so we need to attach separate
1207 * keys to them:
1208 */
1209 static struct lock_class_key base_lock_keys[NR_CPUS];
1210
1211 static int __devinit init_timers_cpu(int cpu)
1212 {
1213 int j;
1214 tvec_base_t *base;
1215 static char __devinitdata tvec_base_done[NR_CPUS];
1216
1217 if (!tvec_base_done[cpu]) {
1218 static char boot_done;
1219
1220 if (boot_done) {
1221 /*
1222 * The APs use this path later in boot
1223 */
1224 base = kmalloc_node(sizeof(*base),
1225 GFP_KERNEL | __GFP_ZERO,
1226 cpu_to_node(cpu));
1227 if (!base)
1228 return -ENOMEM;
1229
1230 /* Make sure that tvec_base is 2 byte aligned */
1231 if (tbase_get_deferrable(base)) {
1232 WARN_ON(1);
1233 kfree(base);
1234 return -ENOMEM;
1235 }
1236 per_cpu(tvec_bases, cpu) = base;
1237 } else {
1238 /*
1239 * This is for the boot CPU - we use compile-time
1240 * static initialisation because per-cpu memory isn't
1241 * ready yet and because the memory allocators are not
1242 * initialised either.
1243 */
1244 boot_done = 1;
1245 base = &boot_tvec_bases;
1246 }
1247 tvec_base_done[cpu] = 1;
1248 } else {
1249 base = per_cpu(tvec_bases, cpu);
1250 }
1251
1252 spin_lock_init(&base->lock);
1253 lockdep_set_class(&base->lock, base_lock_keys + cpu);
1254
1255 for (j = 0; j < TVN_SIZE; j++) {
1256 INIT_LIST_HEAD(base->tv5.vec + j);
1257 INIT_LIST_HEAD(base->tv4.vec + j);
1258 INIT_LIST_HEAD(base->tv3.vec + j);
1259 INIT_LIST_HEAD(base->tv2.vec + j);
1260 }
1261 for (j = 0; j < TVR_SIZE; j++)
1262 INIT_LIST_HEAD(base->tv1.vec + j);
1263
1264 base->timer_jiffies = jiffies;
1265 return 0;
1266 }
1267
1268 #ifdef CONFIG_HOTPLUG_CPU
1269 static void migrate_timer_list(tvec_base_t *new_base, struct list_head *head)
1270 {
1271 struct timer_list *timer;
1272
1273 while (!list_empty(head)) {
1274 timer = list_first_entry(head, struct timer_list, entry);
1275 detach_timer(timer, 0);
1276 timer_set_base(timer, new_base);
1277 internal_add_timer(new_base, timer);
1278 }
1279 }
1280
1281 static void __devinit migrate_timers(int cpu)
1282 {
1283 tvec_base_t *old_base;
1284 tvec_base_t *new_base;
1285 int i;
1286
1287 BUG_ON(cpu_online(cpu));
1288 old_base = per_cpu(tvec_bases, cpu);
1289 new_base = get_cpu_var(tvec_bases);
1290
1291 local_irq_disable();
1292 double_spin_lock(&new_base->lock, &old_base->lock,
1293 smp_processor_id() < cpu);
1294
1295 BUG_ON(old_base->running_timer);
1296
1297 for (i = 0; i < TVR_SIZE; i++)
1298 migrate_timer_list(new_base, old_base->tv1.vec + i);
1299 for (i = 0; i < TVN_SIZE; i++) {
1300 migrate_timer_list(new_base, old_base->tv2.vec + i);
1301 migrate_timer_list(new_base, old_base->tv3.vec + i);
1302 migrate_timer_list(new_base, old_base->tv4.vec + i);
1303 migrate_timer_list(new_base, old_base->tv5.vec + i);
1304 }
1305
1306 double_spin_unlock(&new_base->lock, &old_base->lock,
1307 smp_processor_id() < cpu);
1308 local_irq_enable();
1309 put_cpu_var(tvec_bases);
1310 }
1311 #endif /* CONFIG_HOTPLUG_CPU */
1312
1313 static int __cpuinit timer_cpu_notify(struct notifier_block *self,
1314 unsigned long action, void *hcpu)
1315 {
1316 long cpu = (long)hcpu;
1317 switch(action) {
1318 case CPU_UP_PREPARE:
1319 case CPU_UP_PREPARE_FROZEN:
1320 if (init_timers_cpu(cpu) < 0)
1321 return NOTIFY_BAD;
1322 break;
1323 #ifdef CONFIG_HOTPLUG_CPU
1324 case CPU_DEAD:
1325 case CPU_DEAD_FROZEN:
1326 migrate_timers(cpu);
1327 break;
1328 #endif
1329 default:
1330 break;
1331 }
1332 return NOTIFY_OK;
1333 }
1334
1335 static struct notifier_block __cpuinitdata timers_nb = {
1336 .notifier_call = timer_cpu_notify,
1337 };
1338
1339
1340 void __init init_timers(void)
1341 {
1342 int err = timer_cpu_notify(&timers_nb, (unsigned long)CPU_UP_PREPARE,
1343 (void *)(long)smp_processor_id());
1344
1345 init_timer_stats();
1346
1347 BUG_ON(err == NOTIFY_BAD);
1348 register_cpu_notifier(&timers_nb);
1349 open_softirq(TIMER_SOFTIRQ, run_timer_softirq, NULL);
1350 }
1351
1352 /**
1353 * msleep - sleep safely even with waitqueue interruptions
1354 * @msecs: Time in milliseconds to sleep for
1355 */
1356 void msleep(unsigned int msecs)
1357 {
1358 unsigned long timeout = msecs_to_jiffies(msecs) + 1;
1359
1360 while (timeout)
1361 timeout = schedule_timeout_uninterruptible(timeout);
1362 }
1363
1364 EXPORT_SYMBOL(msleep);
1365
1366 /**
1367 * msleep_interruptible - sleep waiting for signals
1368 * @msecs: Time in milliseconds to sleep for
1369 */
1370 unsigned long msleep_interruptible(unsigned int msecs)
1371 {
1372 unsigned long timeout = msecs_to_jiffies(msecs) + 1;
1373
1374 while (timeout && !signal_pending(current))
1375 timeout = schedule_timeout_interruptible(timeout);
1376 return jiffies_to_msecs(timeout);
1377 }
1378
1379 EXPORT_SYMBOL(msleep_interruptible);