Merge branch 'linux-2.6'
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / timer.c
1 /*
2 * linux/kernel/timer.c
3 *
4 * Kernel internal timers, basic process system calls
5 *
6 * Copyright (C) 1991, 1992 Linus Torvalds
7 *
8 * 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better.
9 *
10 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
11 * "A Kernel Model for Precision Timekeeping" by Dave Mills
12 * 1998-12-24 Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
13 * serialize accesses to xtime/lost_ticks).
14 * Copyright (C) 1998 Andrea Arcangeli
15 * 1999-03-10 Improved NTP compatibility by Ulrich Windl
16 * 2002-05-31 Move sys_sysinfo here and make its locking sane, Robert Love
17 * 2000-10-05 Implemented scalable SMP per-CPU timer handling.
18 * Copyright (C) 2000, 2001, 2002 Ingo Molnar
19 * Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar
20 */
21
22 #include <linux/kernel_stat.h>
23 #include <linux/module.h>
24 #include <linux/interrupt.h>
25 #include <linux/percpu.h>
26 #include <linux/init.h>
27 #include <linux/mm.h>
28 #include <linux/swap.h>
29 #include <linux/pid_namespace.h>
30 #include <linux/notifier.h>
31 #include <linux/thread_info.h>
32 #include <linux/time.h>
33 #include <linux/jiffies.h>
34 #include <linux/posix-timers.h>
35 #include <linux/cpu.h>
36 #include <linux/syscalls.h>
37 #include <linux/delay.h>
38 #include <linux/tick.h>
39 #include <linux/kallsyms.h>
40
41 #include <asm/uaccess.h>
42 #include <asm/unistd.h>
43 #include <asm/div64.h>
44 #include <asm/timex.h>
45 #include <asm/io.h>
46
47 u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
48
49 EXPORT_SYMBOL(jiffies_64);
50
51 /*
52 * per-CPU timer vector definitions:
53 */
54 #define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6)
55 #define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8)
56 #define TVN_SIZE (1 << TVN_BITS)
57 #define TVR_SIZE (1 << TVR_BITS)
58 #define TVN_MASK (TVN_SIZE - 1)
59 #define TVR_MASK (TVR_SIZE - 1)
60
61 struct tvec {
62 struct list_head vec[TVN_SIZE];
63 };
64
65 struct tvec_root {
66 struct list_head vec[TVR_SIZE];
67 };
68
69 struct tvec_base {
70 spinlock_t lock;
71 struct timer_list *running_timer;
72 unsigned long timer_jiffies;
73 struct tvec_root tv1;
74 struct tvec tv2;
75 struct tvec tv3;
76 struct tvec tv4;
77 struct tvec tv5;
78 } ____cacheline_aligned;
79
80 struct tvec_base boot_tvec_bases;
81 EXPORT_SYMBOL(boot_tvec_bases);
82 static DEFINE_PER_CPU(struct tvec_base *, tvec_bases) = &boot_tvec_bases;
83
84 /*
85 * Note that all tvec_bases are 2 byte aligned and lower bit of
86 * base in timer_list is guaranteed to be zero. Use the LSB for
87 * the new flag to indicate whether the timer is deferrable
88 */
89 #define TBASE_DEFERRABLE_FLAG (0x1)
90
91 /* Functions below help us manage 'deferrable' flag */
92 static inline unsigned int tbase_get_deferrable(struct tvec_base *base)
93 {
94 return ((unsigned int)(unsigned long)base & TBASE_DEFERRABLE_FLAG);
95 }
96
97 static inline struct tvec_base *tbase_get_base(struct tvec_base *base)
98 {
99 return ((struct tvec_base *)((unsigned long)base & ~TBASE_DEFERRABLE_FLAG));
100 }
101
102 static inline void timer_set_deferrable(struct timer_list *timer)
103 {
104 timer->base = ((struct tvec_base *)((unsigned long)(timer->base) |
105 TBASE_DEFERRABLE_FLAG));
106 }
107
108 static inline void
109 timer_set_base(struct timer_list *timer, struct tvec_base *new_base)
110 {
111 timer->base = (struct tvec_base *)((unsigned long)(new_base) |
112 tbase_get_deferrable(timer->base));
113 }
114
115 /**
116 * __round_jiffies - function to round jiffies to a full second
117 * @j: the time in (absolute) jiffies that should be rounded
118 * @cpu: the processor number on which the timeout will happen
119 *
120 * __round_jiffies() rounds an absolute time in the future (in jiffies)
121 * up or down to (approximately) full seconds. This is useful for timers
122 * for which the exact time they fire does not matter too much, as long as
123 * they fire approximately every X seconds.
124 *
125 * By rounding these timers to whole seconds, all such timers will fire
126 * at the same time, rather than at various times spread out. The goal
127 * of this is to have the CPU wake up less, which saves power.
128 *
129 * The exact rounding is skewed for each processor to avoid all
130 * processors firing at the exact same time, which could lead
131 * to lock contention or spurious cache line bouncing.
132 *
133 * The return value is the rounded version of the @j parameter.
134 */
135 unsigned long __round_jiffies(unsigned long j, int cpu)
136 {
137 int rem;
138 unsigned long original = j;
139
140 /*
141 * We don't want all cpus firing their timers at once hitting the
142 * same lock or cachelines, so we skew each extra cpu with an extra
143 * 3 jiffies. This 3 jiffies came originally from the mm/ code which
144 * already did this.
145 * The skew is done by adding 3*cpunr, then round, then subtract this
146 * extra offset again.
147 */
148 j += cpu * 3;
149
150 rem = j % HZ;
151
152 /*
153 * If the target jiffie is just after a whole second (which can happen
154 * due to delays of the timer irq, long irq off times etc etc) then
155 * we should round down to the whole second, not up. Use 1/4th second
156 * as cutoff for this rounding as an extreme upper bound for this.
157 */
158 if (rem < HZ/4) /* round down */
159 j = j - rem;
160 else /* round up */
161 j = j - rem + HZ;
162
163 /* now that we have rounded, subtract the extra skew again */
164 j -= cpu * 3;
165
166 if (j <= jiffies) /* rounding ate our timeout entirely; */
167 return original;
168 return j;
169 }
170 EXPORT_SYMBOL_GPL(__round_jiffies);
171
172 /**
173 * __round_jiffies_relative - function to round jiffies to a full second
174 * @j: the time in (relative) jiffies that should be rounded
175 * @cpu: the processor number on which the timeout will happen
176 *
177 * __round_jiffies_relative() rounds a time delta in the future (in jiffies)
178 * up or down to (approximately) full seconds. This is useful for timers
179 * for which the exact time they fire does not matter too much, as long as
180 * they fire approximately every X seconds.
181 *
182 * By rounding these timers to whole seconds, all such timers will fire
183 * at the same time, rather than at various times spread out. The goal
184 * of this is to have the CPU wake up less, which saves power.
185 *
186 * The exact rounding is skewed for each processor to avoid all
187 * processors firing at the exact same time, which could lead
188 * to lock contention or spurious cache line bouncing.
189 *
190 * The return value is the rounded version of the @j parameter.
191 */
192 unsigned long __round_jiffies_relative(unsigned long j, int cpu)
193 {
194 /*
195 * In theory the following code can skip a jiffy in case jiffies
196 * increments right between the addition and the later subtraction.
197 * However since the entire point of this function is to use approximate
198 * timeouts, it's entirely ok to not handle that.
199 */
200 return __round_jiffies(j + jiffies, cpu) - jiffies;
201 }
202 EXPORT_SYMBOL_GPL(__round_jiffies_relative);
203
204 /**
205 * round_jiffies - function to round jiffies to a full second
206 * @j: the time in (absolute) jiffies that should be rounded
207 *
208 * round_jiffies() rounds an absolute time in the future (in jiffies)
209 * up or down to (approximately) full seconds. This is useful for timers
210 * for which the exact time they fire does not matter too much, as long as
211 * they fire approximately every X seconds.
212 *
213 * By rounding these timers to whole seconds, all such timers will fire
214 * at the same time, rather than at various times spread out. The goal
215 * of this is to have the CPU wake up less, which saves power.
216 *
217 * The return value is the rounded version of the @j parameter.
218 */
219 unsigned long round_jiffies(unsigned long j)
220 {
221 return __round_jiffies(j, raw_smp_processor_id());
222 }
223 EXPORT_SYMBOL_GPL(round_jiffies);
224
225 /**
226 * round_jiffies_relative - function to round jiffies to a full second
227 * @j: the time in (relative) jiffies that should be rounded
228 *
229 * round_jiffies_relative() rounds a time delta in the future (in jiffies)
230 * up or down to (approximately) full seconds. This is useful for timers
231 * for which the exact time they fire does not matter too much, as long as
232 * they fire approximately every X seconds.
233 *
234 * By rounding these timers to whole seconds, all such timers will fire
235 * at the same time, rather than at various times spread out. The goal
236 * of this is to have the CPU wake up less, which saves power.
237 *
238 * The return value is the rounded version of the @j parameter.
239 */
240 unsigned long round_jiffies_relative(unsigned long j)
241 {
242 return __round_jiffies_relative(j, raw_smp_processor_id());
243 }
244 EXPORT_SYMBOL_GPL(round_jiffies_relative);
245
246
247 static inline void set_running_timer(struct tvec_base *base,
248 struct timer_list *timer)
249 {
250 #ifdef CONFIG_SMP
251 base->running_timer = timer;
252 #endif
253 }
254
255 static void internal_add_timer(struct tvec_base *base, struct timer_list *timer)
256 {
257 unsigned long expires = timer->expires;
258 unsigned long idx = expires - base->timer_jiffies;
259 struct list_head *vec;
260
261 if (idx < TVR_SIZE) {
262 int i = expires & TVR_MASK;
263 vec = base->tv1.vec + i;
264 } else if (idx < 1 << (TVR_BITS + TVN_BITS)) {
265 int i = (expires >> TVR_BITS) & TVN_MASK;
266 vec = base->tv2.vec + i;
267 } else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) {
268 int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK;
269 vec = base->tv3.vec + i;
270 } else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) {
271 int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK;
272 vec = base->tv4.vec + i;
273 } else if ((signed long) idx < 0) {
274 /*
275 * Can happen if you add a timer with expires == jiffies,
276 * or you set a timer to go off in the past
277 */
278 vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK);
279 } else {
280 int i;
281 /* If the timeout is larger than 0xffffffff on 64-bit
282 * architectures then we use the maximum timeout:
283 */
284 if (idx > 0xffffffffUL) {
285 idx = 0xffffffffUL;
286 expires = idx + base->timer_jiffies;
287 }
288 i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
289 vec = base->tv5.vec + i;
290 }
291 /*
292 * Timers are FIFO:
293 */
294 list_add_tail(&timer->entry, vec);
295 }
296
297 #ifdef CONFIG_TIMER_STATS
298 void __timer_stats_timer_set_start_info(struct timer_list *timer, void *addr)
299 {
300 if (timer->start_site)
301 return;
302
303 timer->start_site = addr;
304 memcpy(timer->start_comm, current->comm, TASK_COMM_LEN);
305 timer->start_pid = current->pid;
306 }
307
308 static void timer_stats_account_timer(struct timer_list *timer)
309 {
310 unsigned int flag = 0;
311
312 if (unlikely(tbase_get_deferrable(timer->base)))
313 flag |= TIMER_STATS_FLAG_DEFERRABLE;
314
315 timer_stats_update_stats(timer, timer->start_pid, timer->start_site,
316 timer->function, timer->start_comm, flag);
317 }
318
319 #else
320 static void timer_stats_account_timer(struct timer_list *timer) {}
321 #endif
322
323 /**
324 * init_timer - initialize a timer.
325 * @timer: the timer to be initialized
326 *
327 * init_timer() must be done to a timer prior calling *any* of the
328 * other timer functions.
329 */
330 void fastcall init_timer(struct timer_list *timer)
331 {
332 timer->entry.next = NULL;
333 timer->base = __raw_get_cpu_var(tvec_bases);
334 #ifdef CONFIG_TIMER_STATS
335 timer->start_site = NULL;
336 timer->start_pid = -1;
337 memset(timer->start_comm, 0, TASK_COMM_LEN);
338 #endif
339 }
340 EXPORT_SYMBOL(init_timer);
341
342 void fastcall init_timer_deferrable(struct timer_list *timer)
343 {
344 init_timer(timer);
345 timer_set_deferrable(timer);
346 }
347 EXPORT_SYMBOL(init_timer_deferrable);
348
349 static inline void detach_timer(struct timer_list *timer,
350 int clear_pending)
351 {
352 struct list_head *entry = &timer->entry;
353
354 __list_del(entry->prev, entry->next);
355 if (clear_pending)
356 entry->next = NULL;
357 entry->prev = LIST_POISON2;
358 }
359
360 /*
361 * We are using hashed locking: holding per_cpu(tvec_bases).lock
362 * means that all timers which are tied to this base via timer->base are
363 * locked, and the base itself is locked too.
364 *
365 * So __run_timers/migrate_timers can safely modify all timers which could
366 * be found on ->tvX lists.
367 *
368 * When the timer's base is locked, and the timer removed from list, it is
369 * possible to set timer->base = NULL and drop the lock: the timer remains
370 * locked.
371 */
372 static struct tvec_base *lock_timer_base(struct timer_list *timer,
373 unsigned long *flags)
374 __acquires(timer->base->lock)
375 {
376 struct tvec_base *base;
377
378 for (;;) {
379 struct tvec_base *prelock_base = timer->base;
380 base = tbase_get_base(prelock_base);
381 if (likely(base != NULL)) {
382 spin_lock_irqsave(&base->lock, *flags);
383 if (likely(prelock_base == timer->base))
384 return base;
385 /* The timer has migrated to another CPU */
386 spin_unlock_irqrestore(&base->lock, *flags);
387 }
388 cpu_relax();
389 }
390 }
391
392 int __mod_timer(struct timer_list *timer, unsigned long expires)
393 {
394 struct tvec_base *base, *new_base;
395 unsigned long flags;
396 int ret = 0;
397
398 timer_stats_timer_set_start_info(timer);
399 BUG_ON(!timer->function);
400
401 base = lock_timer_base(timer, &flags);
402
403 if (timer_pending(timer)) {
404 detach_timer(timer, 0);
405 ret = 1;
406 }
407
408 new_base = __get_cpu_var(tvec_bases);
409
410 if (base != new_base) {
411 /*
412 * We are trying to schedule the timer on the local CPU.
413 * However we can't change timer's base while it is running,
414 * otherwise del_timer_sync() can't detect that the timer's
415 * handler yet has not finished. This also guarantees that
416 * the timer is serialized wrt itself.
417 */
418 if (likely(base->running_timer != timer)) {
419 /* See the comment in lock_timer_base() */
420 timer_set_base(timer, NULL);
421 spin_unlock(&base->lock);
422 base = new_base;
423 spin_lock(&base->lock);
424 timer_set_base(timer, base);
425 }
426 }
427
428 timer->expires = expires;
429 internal_add_timer(base, timer);
430 spin_unlock_irqrestore(&base->lock, flags);
431
432 return ret;
433 }
434
435 EXPORT_SYMBOL(__mod_timer);
436
437 /**
438 * add_timer_on - start a timer on a particular CPU
439 * @timer: the timer to be added
440 * @cpu: the CPU to start it on
441 *
442 * This is not very scalable on SMP. Double adds are not possible.
443 */
444 void add_timer_on(struct timer_list *timer, int cpu)
445 {
446 struct tvec_base *base = per_cpu(tvec_bases, cpu);
447 unsigned long flags;
448
449 timer_stats_timer_set_start_info(timer);
450 BUG_ON(timer_pending(timer) || !timer->function);
451 spin_lock_irqsave(&base->lock, flags);
452 timer_set_base(timer, base);
453 internal_add_timer(base, timer);
454 spin_unlock_irqrestore(&base->lock, flags);
455 }
456
457
458 /**
459 * mod_timer - modify a timer's timeout
460 * @timer: the timer to be modified
461 * @expires: new timeout in jiffies
462 *
463 * mod_timer() is a more efficient way to update the expire field of an
464 * active timer (if the timer is inactive it will be activated)
465 *
466 * mod_timer(timer, expires) is equivalent to:
467 *
468 * del_timer(timer); timer->expires = expires; add_timer(timer);
469 *
470 * Note that if there are multiple unserialized concurrent users of the
471 * same timer, then mod_timer() is the only safe way to modify the timeout,
472 * since add_timer() cannot modify an already running timer.
473 *
474 * The function returns whether it has modified a pending timer or not.
475 * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an
476 * active timer returns 1.)
477 */
478 int mod_timer(struct timer_list *timer, unsigned long expires)
479 {
480 BUG_ON(!timer->function);
481
482 timer_stats_timer_set_start_info(timer);
483 /*
484 * This is a common optimization triggered by the
485 * networking code - if the timer is re-modified
486 * to be the same thing then just return:
487 */
488 if (timer->expires == expires && timer_pending(timer))
489 return 1;
490
491 return __mod_timer(timer, expires);
492 }
493
494 EXPORT_SYMBOL(mod_timer);
495
496 /**
497 * del_timer - deactive a timer.
498 * @timer: the timer to be deactivated
499 *
500 * del_timer() deactivates a timer - this works on both active and inactive
501 * timers.
502 *
503 * The function returns whether it has deactivated a pending timer or not.
504 * (ie. del_timer() of an inactive timer returns 0, del_timer() of an
505 * active timer returns 1.)
506 */
507 int del_timer(struct timer_list *timer)
508 {
509 struct tvec_base *base;
510 unsigned long flags;
511 int ret = 0;
512
513 timer_stats_timer_clear_start_info(timer);
514 if (timer_pending(timer)) {
515 base = lock_timer_base(timer, &flags);
516 if (timer_pending(timer)) {
517 detach_timer(timer, 1);
518 ret = 1;
519 }
520 spin_unlock_irqrestore(&base->lock, flags);
521 }
522
523 return ret;
524 }
525
526 EXPORT_SYMBOL(del_timer);
527
528 #ifdef CONFIG_SMP
529 /**
530 * try_to_del_timer_sync - Try to deactivate a timer
531 * @timer: timer do del
532 *
533 * This function tries to deactivate a timer. Upon successful (ret >= 0)
534 * exit the timer is not queued and the handler is not running on any CPU.
535 *
536 * It must not be called from interrupt contexts.
537 */
538 int try_to_del_timer_sync(struct timer_list *timer)
539 {
540 struct tvec_base *base;
541 unsigned long flags;
542 int ret = -1;
543
544 base = lock_timer_base(timer, &flags);
545
546 if (base->running_timer == timer)
547 goto out;
548
549 ret = 0;
550 if (timer_pending(timer)) {
551 detach_timer(timer, 1);
552 ret = 1;
553 }
554 out:
555 spin_unlock_irqrestore(&base->lock, flags);
556
557 return ret;
558 }
559
560 EXPORT_SYMBOL(try_to_del_timer_sync);
561
562 /**
563 * del_timer_sync - deactivate a timer and wait for the handler to finish.
564 * @timer: the timer to be deactivated
565 *
566 * This function only differs from del_timer() on SMP: besides deactivating
567 * the timer it also makes sure the handler has finished executing on other
568 * CPUs.
569 *
570 * Synchronization rules: Callers must prevent restarting of the timer,
571 * otherwise this function is meaningless. It must not be called from
572 * interrupt contexts. The caller must not hold locks which would prevent
573 * completion of the timer's handler. The timer's handler must not call
574 * add_timer_on(). Upon exit the timer is not queued and the handler is
575 * not running on any CPU.
576 *
577 * The function returns whether it has deactivated a pending timer or not.
578 */
579 int del_timer_sync(struct timer_list *timer)
580 {
581 for (;;) {
582 int ret = try_to_del_timer_sync(timer);
583 if (ret >= 0)
584 return ret;
585 cpu_relax();
586 }
587 }
588
589 EXPORT_SYMBOL(del_timer_sync);
590 #endif
591
592 static int cascade(struct tvec_base *base, struct tvec *tv, int index)
593 {
594 /* cascade all the timers from tv up one level */
595 struct timer_list *timer, *tmp;
596 struct list_head tv_list;
597
598 list_replace_init(tv->vec + index, &tv_list);
599
600 /*
601 * We are removing _all_ timers from the list, so we
602 * don't have to detach them individually.
603 */
604 list_for_each_entry_safe(timer, tmp, &tv_list, entry) {
605 BUG_ON(tbase_get_base(timer->base) != base);
606 internal_add_timer(base, timer);
607 }
608
609 return index;
610 }
611
612 #define INDEX(N) ((base->timer_jiffies >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK)
613
614 /**
615 * __run_timers - run all expired timers (if any) on this CPU.
616 * @base: the timer vector to be processed.
617 *
618 * This function cascades all vectors and executes all expired timer
619 * vectors.
620 */
621 static inline void __run_timers(struct tvec_base *base)
622 {
623 struct timer_list *timer;
624
625 spin_lock_irq(&base->lock);
626 while (time_after_eq(jiffies, base->timer_jiffies)) {
627 struct list_head work_list;
628 struct list_head *head = &work_list;
629 int index = base->timer_jiffies & TVR_MASK;
630
631 /*
632 * Cascade timers:
633 */
634 if (!index &&
635 (!cascade(base, &base->tv2, INDEX(0))) &&
636 (!cascade(base, &base->tv3, INDEX(1))) &&
637 !cascade(base, &base->tv4, INDEX(2)))
638 cascade(base, &base->tv5, INDEX(3));
639 ++base->timer_jiffies;
640 list_replace_init(base->tv1.vec + index, &work_list);
641 while (!list_empty(head)) {
642 void (*fn)(unsigned long);
643 unsigned long data;
644
645 timer = list_first_entry(head, struct timer_list,entry);
646 fn = timer->function;
647 data = timer->data;
648
649 timer_stats_account_timer(timer);
650
651 set_running_timer(base, timer);
652 detach_timer(timer, 1);
653 spin_unlock_irq(&base->lock);
654 {
655 int preempt_count = preempt_count();
656 fn(data);
657 if (preempt_count != preempt_count()) {
658 printk(KERN_ERR "huh, entered %p "
659 "with preempt_count %08x, exited"
660 " with %08x?\n",
661 fn, preempt_count,
662 preempt_count());
663 BUG();
664 }
665 }
666 spin_lock_irq(&base->lock);
667 }
668 }
669 set_running_timer(base, NULL);
670 spin_unlock_irq(&base->lock);
671 }
672
673 #if defined(CONFIG_NO_IDLE_HZ) || defined(CONFIG_NO_HZ)
674 /*
675 * Find out when the next timer event is due to happen. This
676 * is used on S/390 to stop all activity when a cpus is idle.
677 * This functions needs to be called disabled.
678 */
679 static unsigned long __next_timer_interrupt(struct tvec_base *base)
680 {
681 unsigned long timer_jiffies = base->timer_jiffies;
682 unsigned long expires = timer_jiffies + NEXT_TIMER_MAX_DELTA;
683 int index, slot, array, found = 0;
684 struct timer_list *nte;
685 struct tvec *varray[4];
686
687 /* Look for timer events in tv1. */
688 index = slot = timer_jiffies & TVR_MASK;
689 do {
690 list_for_each_entry(nte, base->tv1.vec + slot, entry) {
691 if (tbase_get_deferrable(nte->base))
692 continue;
693
694 found = 1;
695 expires = nte->expires;
696 /* Look at the cascade bucket(s)? */
697 if (!index || slot < index)
698 goto cascade;
699 return expires;
700 }
701 slot = (slot + 1) & TVR_MASK;
702 } while (slot != index);
703
704 cascade:
705 /* Calculate the next cascade event */
706 if (index)
707 timer_jiffies += TVR_SIZE - index;
708 timer_jiffies >>= TVR_BITS;
709
710 /* Check tv2-tv5. */
711 varray[0] = &base->tv2;
712 varray[1] = &base->tv3;
713 varray[2] = &base->tv4;
714 varray[3] = &base->tv5;
715
716 for (array = 0; array < 4; array++) {
717 struct tvec *varp = varray[array];
718
719 index = slot = timer_jiffies & TVN_MASK;
720 do {
721 list_for_each_entry(nte, varp->vec + slot, entry) {
722 found = 1;
723 if (time_before(nte->expires, expires))
724 expires = nte->expires;
725 }
726 /*
727 * Do we still search for the first timer or are
728 * we looking up the cascade buckets ?
729 */
730 if (found) {
731 /* Look at the cascade bucket(s)? */
732 if (!index || slot < index)
733 break;
734 return expires;
735 }
736 slot = (slot + 1) & TVN_MASK;
737 } while (slot != index);
738
739 if (index)
740 timer_jiffies += TVN_SIZE - index;
741 timer_jiffies >>= TVN_BITS;
742 }
743 return expires;
744 }
745
746 /*
747 * Check, if the next hrtimer event is before the next timer wheel
748 * event:
749 */
750 static unsigned long cmp_next_hrtimer_event(unsigned long now,
751 unsigned long expires)
752 {
753 ktime_t hr_delta = hrtimer_get_next_event();
754 struct timespec tsdelta;
755 unsigned long delta;
756
757 if (hr_delta.tv64 == KTIME_MAX)
758 return expires;
759
760 /*
761 * Expired timer available, let it expire in the next tick
762 */
763 if (hr_delta.tv64 <= 0)
764 return now + 1;
765
766 tsdelta = ktime_to_timespec(hr_delta);
767 delta = timespec_to_jiffies(&tsdelta);
768
769 /*
770 * Limit the delta to the max value, which is checked in
771 * tick_nohz_stop_sched_tick():
772 */
773 if (delta > NEXT_TIMER_MAX_DELTA)
774 delta = NEXT_TIMER_MAX_DELTA;
775
776 /*
777 * Take rounding errors in to account and make sure, that it
778 * expires in the next tick. Otherwise we go into an endless
779 * ping pong due to tick_nohz_stop_sched_tick() retriggering
780 * the timer softirq
781 */
782 if (delta < 1)
783 delta = 1;
784 now += delta;
785 if (time_before(now, expires))
786 return now;
787 return expires;
788 }
789
790 /**
791 * get_next_timer_interrupt - return the jiffy of the next pending timer
792 * @now: current time (in jiffies)
793 */
794 unsigned long get_next_timer_interrupt(unsigned long now)
795 {
796 struct tvec_base *base = __get_cpu_var(tvec_bases);
797 unsigned long expires;
798
799 spin_lock(&base->lock);
800 expires = __next_timer_interrupt(base);
801 spin_unlock(&base->lock);
802
803 if (time_before_eq(expires, now))
804 return now;
805
806 return cmp_next_hrtimer_event(now, expires);
807 }
808
809 #ifdef CONFIG_NO_IDLE_HZ
810 unsigned long next_timer_interrupt(void)
811 {
812 return get_next_timer_interrupt(jiffies);
813 }
814 #endif
815
816 #endif
817
818 #ifndef CONFIG_VIRT_CPU_ACCOUNTING
819 void account_process_tick(struct task_struct *p, int user_tick)
820 {
821 if (user_tick) {
822 account_user_time(p, jiffies_to_cputime(1));
823 account_user_time_scaled(p, jiffies_to_cputime(1));
824 } else {
825 account_system_time(p, HARDIRQ_OFFSET, jiffies_to_cputime(1));
826 account_system_time_scaled(p, jiffies_to_cputime(1));
827 }
828 }
829 #endif
830
831 /*
832 * Called from the timer interrupt handler to charge one tick to the current
833 * process. user_tick is 1 if the tick is user time, 0 for system.
834 */
835 void update_process_times(int user_tick)
836 {
837 struct task_struct *p = current;
838 int cpu = smp_processor_id();
839
840 /* Note: this timer irq context must be accounted for as well. */
841 account_process_tick(p, user_tick);
842 run_local_timers();
843 if (rcu_pending(cpu))
844 rcu_check_callbacks(cpu, user_tick);
845 scheduler_tick();
846 run_posix_cpu_timers(p);
847 }
848
849 /*
850 * Nr of active tasks - counted in fixed-point numbers
851 */
852 static unsigned long count_active_tasks(void)
853 {
854 return nr_active() * FIXED_1;
855 }
856
857 /*
858 * Hmm.. Changed this, as the GNU make sources (load.c) seems to
859 * imply that avenrun[] is the standard name for this kind of thing.
860 * Nothing else seems to be standardized: the fractional size etc
861 * all seem to differ on different machines.
862 *
863 * Requires xtime_lock to access.
864 */
865 unsigned long avenrun[3];
866
867 EXPORT_SYMBOL(avenrun);
868
869 /*
870 * calc_load - given tick count, update the avenrun load estimates.
871 * This is called while holding a write_lock on xtime_lock.
872 */
873 static inline void calc_load(unsigned long ticks)
874 {
875 unsigned long active_tasks; /* fixed-point */
876 static int count = LOAD_FREQ;
877
878 count -= ticks;
879 if (unlikely(count < 0)) {
880 active_tasks = count_active_tasks();
881 do {
882 CALC_LOAD(avenrun[0], EXP_1, active_tasks);
883 CALC_LOAD(avenrun[1], EXP_5, active_tasks);
884 CALC_LOAD(avenrun[2], EXP_15, active_tasks);
885 count += LOAD_FREQ;
886 } while (count < 0);
887 }
888 }
889
890 /*
891 * This function runs timers and the timer-tq in bottom half context.
892 */
893 static void run_timer_softirq(struct softirq_action *h)
894 {
895 struct tvec_base *base = __get_cpu_var(tvec_bases);
896
897 hrtimer_run_pending();
898
899 if (time_after_eq(jiffies, base->timer_jiffies))
900 __run_timers(base);
901 }
902
903 /*
904 * Called by the local, per-CPU timer interrupt on SMP.
905 */
906 void run_local_timers(void)
907 {
908 hrtimer_run_queues();
909 raise_softirq(TIMER_SOFTIRQ);
910 softlockup_tick();
911 }
912
913 /*
914 * Called by the timer interrupt. xtime_lock must already be taken
915 * by the timer IRQ!
916 */
917 static inline void update_times(unsigned long ticks)
918 {
919 update_wall_time();
920 calc_load(ticks);
921 }
922
923 /*
924 * The 64-bit jiffies value is not atomic - you MUST NOT read it
925 * without sampling the sequence number in xtime_lock.
926 * jiffies is defined in the linker script...
927 */
928
929 void do_timer(unsigned long ticks)
930 {
931 jiffies_64 += ticks;
932 update_times(ticks);
933 }
934
935 #ifdef __ARCH_WANT_SYS_ALARM
936
937 /*
938 * For backwards compatibility? This can be done in libc so Alpha
939 * and all newer ports shouldn't need it.
940 */
941 asmlinkage unsigned long sys_alarm(unsigned int seconds)
942 {
943 return alarm_setitimer(seconds);
944 }
945
946 #endif
947
948 #ifndef __alpha__
949
950 /*
951 * The Alpha uses getxpid, getxuid, and getxgid instead. Maybe this
952 * should be moved into arch/i386 instead?
953 */
954
955 /**
956 * sys_getpid - return the thread group id of the current process
957 *
958 * Note, despite the name, this returns the tgid not the pid. The tgid and
959 * the pid are identical unless CLONE_THREAD was specified on clone() in
960 * which case the tgid is the same in all threads of the same group.
961 *
962 * This is SMP safe as current->tgid does not change.
963 */
964 asmlinkage long sys_getpid(void)
965 {
966 return task_tgid_vnr(current);
967 }
968
969 /*
970 * Accessing ->real_parent is not SMP-safe, it could
971 * change from under us. However, we can use a stale
972 * value of ->real_parent under rcu_read_lock(), see
973 * release_task()->call_rcu(delayed_put_task_struct).
974 */
975 asmlinkage long sys_getppid(void)
976 {
977 int pid;
978
979 rcu_read_lock();
980 pid = task_tgid_nr_ns(current->real_parent, current->nsproxy->pid_ns);
981 rcu_read_unlock();
982
983 return pid;
984 }
985
986 asmlinkage long sys_getuid(void)
987 {
988 /* Only we change this so SMP safe */
989 return current->uid;
990 }
991
992 asmlinkage long sys_geteuid(void)
993 {
994 /* Only we change this so SMP safe */
995 return current->euid;
996 }
997
998 asmlinkage long sys_getgid(void)
999 {
1000 /* Only we change this so SMP safe */
1001 return current->gid;
1002 }
1003
1004 asmlinkage long sys_getegid(void)
1005 {
1006 /* Only we change this so SMP safe */
1007 return current->egid;
1008 }
1009
1010 #endif
1011
1012 static void process_timeout(unsigned long __data)
1013 {
1014 wake_up_process((struct task_struct *)__data);
1015 }
1016
1017 /**
1018 * schedule_timeout - sleep until timeout
1019 * @timeout: timeout value in jiffies
1020 *
1021 * Make the current task sleep until @timeout jiffies have
1022 * elapsed. The routine will return immediately unless
1023 * the current task state has been set (see set_current_state()).
1024 *
1025 * You can set the task state as follows -
1026 *
1027 * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to
1028 * pass before the routine returns. The routine will return 0
1029 *
1030 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
1031 * delivered to the current task. In this case the remaining time
1032 * in jiffies will be returned, or 0 if the timer expired in time
1033 *
1034 * The current task state is guaranteed to be TASK_RUNNING when this
1035 * routine returns.
1036 *
1037 * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule
1038 * the CPU away without a bound on the timeout. In this case the return
1039 * value will be %MAX_SCHEDULE_TIMEOUT.
1040 *
1041 * In all cases the return value is guaranteed to be non-negative.
1042 */
1043 fastcall signed long __sched schedule_timeout(signed long timeout)
1044 {
1045 struct timer_list timer;
1046 unsigned long expire;
1047
1048 switch (timeout)
1049 {
1050 case MAX_SCHEDULE_TIMEOUT:
1051 /*
1052 * These two special cases are useful to be comfortable
1053 * in the caller. Nothing more. We could take
1054 * MAX_SCHEDULE_TIMEOUT from one of the negative value
1055 * but I' d like to return a valid offset (>=0) to allow
1056 * the caller to do everything it want with the retval.
1057 */
1058 schedule();
1059 goto out;
1060 default:
1061 /*
1062 * Another bit of PARANOID. Note that the retval will be
1063 * 0 since no piece of kernel is supposed to do a check
1064 * for a negative retval of schedule_timeout() (since it
1065 * should never happens anyway). You just have the printk()
1066 * that will tell you if something is gone wrong and where.
1067 */
1068 if (timeout < 0) {
1069 printk(KERN_ERR "schedule_timeout: wrong timeout "
1070 "value %lx\n", timeout);
1071 dump_stack();
1072 current->state = TASK_RUNNING;
1073 goto out;
1074 }
1075 }
1076
1077 expire = timeout + jiffies;
1078
1079 setup_timer(&timer, process_timeout, (unsigned long)current);
1080 __mod_timer(&timer, expire);
1081 schedule();
1082 del_singleshot_timer_sync(&timer);
1083
1084 timeout = expire - jiffies;
1085
1086 out:
1087 return timeout < 0 ? 0 : timeout;
1088 }
1089 EXPORT_SYMBOL(schedule_timeout);
1090
1091 /*
1092 * We can use __set_current_state() here because schedule_timeout() calls
1093 * schedule() unconditionally.
1094 */
1095 signed long __sched schedule_timeout_interruptible(signed long timeout)
1096 {
1097 __set_current_state(TASK_INTERRUPTIBLE);
1098 return schedule_timeout(timeout);
1099 }
1100 EXPORT_SYMBOL(schedule_timeout_interruptible);
1101
1102 signed long __sched schedule_timeout_uninterruptible(signed long timeout)
1103 {
1104 __set_current_state(TASK_UNINTERRUPTIBLE);
1105 return schedule_timeout(timeout);
1106 }
1107 EXPORT_SYMBOL(schedule_timeout_uninterruptible);
1108
1109 /* Thread ID - the internal kernel "pid" */
1110 asmlinkage long sys_gettid(void)
1111 {
1112 return task_pid_vnr(current);
1113 }
1114
1115 /**
1116 * do_sysinfo - fill in sysinfo struct
1117 * @info: pointer to buffer to fill
1118 */
1119 int do_sysinfo(struct sysinfo *info)
1120 {
1121 unsigned long mem_total, sav_total;
1122 unsigned int mem_unit, bitcount;
1123 unsigned long seq;
1124
1125 memset(info, 0, sizeof(struct sysinfo));
1126
1127 do {
1128 struct timespec tp;
1129 seq = read_seqbegin(&xtime_lock);
1130
1131 /*
1132 * This is annoying. The below is the same thing
1133 * posix_get_clock_monotonic() does, but it wants to
1134 * take the lock which we want to cover the loads stuff
1135 * too.
1136 */
1137
1138 getnstimeofday(&tp);
1139 tp.tv_sec += wall_to_monotonic.tv_sec;
1140 tp.tv_nsec += wall_to_monotonic.tv_nsec;
1141 monotonic_to_bootbased(&tp);
1142 if (tp.tv_nsec - NSEC_PER_SEC >= 0) {
1143 tp.tv_nsec = tp.tv_nsec - NSEC_PER_SEC;
1144 tp.tv_sec++;
1145 }
1146 info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
1147
1148 info->loads[0] = avenrun[0] << (SI_LOAD_SHIFT - FSHIFT);
1149 info->loads[1] = avenrun[1] << (SI_LOAD_SHIFT - FSHIFT);
1150 info->loads[2] = avenrun[2] << (SI_LOAD_SHIFT - FSHIFT);
1151
1152 info->procs = nr_threads;
1153 } while (read_seqretry(&xtime_lock, seq));
1154
1155 si_meminfo(info);
1156 si_swapinfo(info);
1157
1158 /*
1159 * If the sum of all the available memory (i.e. ram + swap)
1160 * is less than can be stored in a 32 bit unsigned long then
1161 * we can be binary compatible with 2.2.x kernels. If not,
1162 * well, in that case 2.2.x was broken anyways...
1163 *
1164 * -Erik Andersen <andersee@debian.org>
1165 */
1166
1167 mem_total = info->totalram + info->totalswap;
1168 if (mem_total < info->totalram || mem_total < info->totalswap)
1169 goto out;
1170 bitcount = 0;
1171 mem_unit = info->mem_unit;
1172 while (mem_unit > 1) {
1173 bitcount++;
1174 mem_unit >>= 1;
1175 sav_total = mem_total;
1176 mem_total <<= 1;
1177 if (mem_total < sav_total)
1178 goto out;
1179 }
1180
1181 /*
1182 * If mem_total did not overflow, multiply all memory values by
1183 * info->mem_unit and set it to 1. This leaves things compatible
1184 * with 2.2.x, and also retains compatibility with earlier 2.4.x
1185 * kernels...
1186 */
1187
1188 info->mem_unit = 1;
1189 info->totalram <<= bitcount;
1190 info->freeram <<= bitcount;
1191 info->sharedram <<= bitcount;
1192 info->bufferram <<= bitcount;
1193 info->totalswap <<= bitcount;
1194 info->freeswap <<= bitcount;
1195 info->totalhigh <<= bitcount;
1196 info->freehigh <<= bitcount;
1197
1198 out:
1199 return 0;
1200 }
1201
1202 asmlinkage long sys_sysinfo(struct sysinfo __user *info)
1203 {
1204 struct sysinfo val;
1205
1206 do_sysinfo(&val);
1207
1208 if (copy_to_user(info, &val, sizeof(struct sysinfo)))
1209 return -EFAULT;
1210
1211 return 0;
1212 }
1213
1214 /*
1215 * lockdep: we want to track each per-CPU base as a separate lock-class,
1216 * but timer-bases are kmalloc()-ed, so we need to attach separate
1217 * keys to them:
1218 */
1219 static struct lock_class_key base_lock_keys[NR_CPUS];
1220
1221 static int __cpuinit init_timers_cpu(int cpu)
1222 {
1223 int j;
1224 struct tvec_base *base;
1225 static char __cpuinitdata tvec_base_done[NR_CPUS];
1226
1227 if (!tvec_base_done[cpu]) {
1228 static char boot_done;
1229
1230 if (boot_done) {
1231 /*
1232 * The APs use this path later in boot
1233 */
1234 base = kmalloc_node(sizeof(*base),
1235 GFP_KERNEL | __GFP_ZERO,
1236 cpu_to_node(cpu));
1237 if (!base)
1238 return -ENOMEM;
1239
1240 /* Make sure that tvec_base is 2 byte aligned */
1241 if (tbase_get_deferrable(base)) {
1242 WARN_ON(1);
1243 kfree(base);
1244 return -ENOMEM;
1245 }
1246 per_cpu(tvec_bases, cpu) = base;
1247 } else {
1248 /*
1249 * This is for the boot CPU - we use compile-time
1250 * static initialisation because per-cpu memory isn't
1251 * ready yet and because the memory allocators are not
1252 * initialised either.
1253 */
1254 boot_done = 1;
1255 base = &boot_tvec_bases;
1256 }
1257 tvec_base_done[cpu] = 1;
1258 } else {
1259 base = per_cpu(tvec_bases, cpu);
1260 }
1261
1262 spin_lock_init(&base->lock);
1263 lockdep_set_class(&base->lock, base_lock_keys + cpu);
1264
1265 for (j = 0; j < TVN_SIZE; j++) {
1266 INIT_LIST_HEAD(base->tv5.vec + j);
1267 INIT_LIST_HEAD(base->tv4.vec + j);
1268 INIT_LIST_HEAD(base->tv3.vec + j);
1269 INIT_LIST_HEAD(base->tv2.vec + j);
1270 }
1271 for (j = 0; j < TVR_SIZE; j++)
1272 INIT_LIST_HEAD(base->tv1.vec + j);
1273
1274 base->timer_jiffies = jiffies;
1275 return 0;
1276 }
1277
1278 #ifdef CONFIG_HOTPLUG_CPU
1279 static void migrate_timer_list(struct tvec_base *new_base, struct list_head *head)
1280 {
1281 struct timer_list *timer;
1282
1283 while (!list_empty(head)) {
1284 timer = list_first_entry(head, struct timer_list, entry);
1285 detach_timer(timer, 0);
1286 timer_set_base(timer, new_base);
1287 internal_add_timer(new_base, timer);
1288 }
1289 }
1290
1291 static void __cpuinit migrate_timers(int cpu)
1292 {
1293 struct tvec_base *old_base;
1294 struct tvec_base *new_base;
1295 int i;
1296
1297 BUG_ON(cpu_online(cpu));
1298 old_base = per_cpu(tvec_bases, cpu);
1299 new_base = get_cpu_var(tvec_bases);
1300
1301 local_irq_disable();
1302 double_spin_lock(&new_base->lock, &old_base->lock,
1303 smp_processor_id() < cpu);
1304
1305 BUG_ON(old_base->running_timer);
1306
1307 for (i = 0; i < TVR_SIZE; i++)
1308 migrate_timer_list(new_base, old_base->tv1.vec + i);
1309 for (i = 0; i < TVN_SIZE; i++) {
1310 migrate_timer_list(new_base, old_base->tv2.vec + i);
1311 migrate_timer_list(new_base, old_base->tv3.vec + i);
1312 migrate_timer_list(new_base, old_base->tv4.vec + i);
1313 migrate_timer_list(new_base, old_base->tv5.vec + i);
1314 }
1315
1316 double_spin_unlock(&new_base->lock, &old_base->lock,
1317 smp_processor_id() < cpu);
1318 local_irq_enable();
1319 put_cpu_var(tvec_bases);
1320 }
1321 #endif /* CONFIG_HOTPLUG_CPU */
1322
1323 static int __cpuinit timer_cpu_notify(struct notifier_block *self,
1324 unsigned long action, void *hcpu)
1325 {
1326 long cpu = (long)hcpu;
1327 switch(action) {
1328 case CPU_UP_PREPARE:
1329 case CPU_UP_PREPARE_FROZEN:
1330 if (init_timers_cpu(cpu) < 0)
1331 return NOTIFY_BAD;
1332 break;
1333 #ifdef CONFIG_HOTPLUG_CPU
1334 case CPU_DEAD:
1335 case CPU_DEAD_FROZEN:
1336 migrate_timers(cpu);
1337 break;
1338 #endif
1339 default:
1340 break;
1341 }
1342 return NOTIFY_OK;
1343 }
1344
1345 static struct notifier_block __cpuinitdata timers_nb = {
1346 .notifier_call = timer_cpu_notify,
1347 };
1348
1349
1350 void __init init_timers(void)
1351 {
1352 int err = timer_cpu_notify(&timers_nb, (unsigned long)CPU_UP_PREPARE,
1353 (void *)(long)smp_processor_id());
1354
1355 init_timer_stats();
1356
1357 BUG_ON(err == NOTIFY_BAD);
1358 register_cpu_notifier(&timers_nb);
1359 open_softirq(TIMER_SOFTIRQ, run_timer_softirq, NULL);
1360 }
1361
1362 /**
1363 * msleep - sleep safely even with waitqueue interruptions
1364 * @msecs: Time in milliseconds to sleep for
1365 */
1366 void msleep(unsigned int msecs)
1367 {
1368 unsigned long timeout = msecs_to_jiffies(msecs) + 1;
1369
1370 while (timeout)
1371 timeout = schedule_timeout_uninterruptible(timeout);
1372 }
1373
1374 EXPORT_SYMBOL(msleep);
1375
1376 /**
1377 * msleep_interruptible - sleep waiting for signals
1378 * @msecs: Time in milliseconds to sleep for
1379 */
1380 unsigned long msleep_interruptible(unsigned int msecs)
1381 {
1382 unsigned long timeout = msecs_to_jiffies(msecs) + 1;
1383
1384 while (timeout && !signal_pending(current))
1385 timeout = schedule_timeout_interruptible(timeout);
1386 return jiffies_to_msecs(timeout);
1387 }
1388
1389 EXPORT_SYMBOL(msleep_interruptible);