4 * Kernel internal timers, kernel timekeeping, basic process system calls
6 * Copyright (C) 1991, 1992 Linus Torvalds
8 * 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better.
10 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
11 * "A Kernel Model for Precision Timekeeping" by Dave Mills
12 * 1998-12-24 Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
13 * serialize accesses to xtime/lost_ticks).
14 * Copyright (C) 1998 Andrea Arcangeli
15 * 1999-03-10 Improved NTP compatibility by Ulrich Windl
16 * 2002-05-31 Move sys_sysinfo here and make its locking sane, Robert Love
17 * 2000-10-05 Implemented scalable SMP per-CPU timer handling.
18 * Copyright (C) 2000, 2001, 2002 Ingo Molnar
19 * Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar
22 #include <linux/kernel_stat.h>
23 #include <linux/module.h>
24 #include <linux/interrupt.h>
25 #include <linux/percpu.h>
26 #include <linux/init.h>
28 #include <linux/swap.h>
29 #include <linux/notifier.h>
30 #include <linux/thread_info.h>
31 #include <linux/time.h>
32 #include <linux/jiffies.h>
33 #include <linux/posix-timers.h>
34 #include <linux/cpu.h>
35 #include <linux/syscalls.h>
37 #include <asm/uaccess.h>
38 #include <asm/unistd.h>
39 #include <asm/div64.h>
40 #include <asm/timex.h>
43 #ifdef CONFIG_TIME_INTERPOLATION
44 static void time_interpolator_update(long delta_nsec
);
46 #define time_interpolator_update(x)
50 * per-CPU timer vector definitions:
53 #define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6)
54 #define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8)
55 #define TVN_SIZE (1 << TVN_BITS)
56 #define TVR_SIZE (1 << TVR_BITS)
57 #define TVN_MASK (TVN_SIZE - 1)
58 #define TVR_MASK (TVR_SIZE - 1)
62 struct timer_list
*running_timer
;
65 typedef struct tvec_s
{
66 struct list_head vec
[TVN_SIZE
];
69 typedef struct tvec_root_s
{
70 struct list_head vec
[TVR_SIZE
];
73 struct tvec_t_base_s
{
74 struct timer_base_s t_base
;
75 unsigned long timer_jiffies
;
81 } ____cacheline_aligned_in_smp
;
83 typedef struct tvec_t_base_s tvec_base_t
;
84 static DEFINE_PER_CPU(tvec_base_t
, tvec_bases
);
86 static inline void set_running_timer(tvec_base_t
*base
,
87 struct timer_list
*timer
)
90 base
->t_base
.running_timer
= timer
;
94 static void internal_add_timer(tvec_base_t
*base
, struct timer_list
*timer
)
96 unsigned long expires
= timer
->expires
;
97 unsigned long idx
= expires
- base
->timer_jiffies
;
98 struct list_head
*vec
;
100 if (idx
< TVR_SIZE
) {
101 int i
= expires
& TVR_MASK
;
102 vec
= base
->tv1
.vec
+ i
;
103 } else if (idx
< 1 << (TVR_BITS
+ TVN_BITS
)) {
104 int i
= (expires
>> TVR_BITS
) & TVN_MASK
;
105 vec
= base
->tv2
.vec
+ i
;
106 } else if (idx
< 1 << (TVR_BITS
+ 2 * TVN_BITS
)) {
107 int i
= (expires
>> (TVR_BITS
+ TVN_BITS
)) & TVN_MASK
;
108 vec
= base
->tv3
.vec
+ i
;
109 } else if (idx
< 1 << (TVR_BITS
+ 3 * TVN_BITS
)) {
110 int i
= (expires
>> (TVR_BITS
+ 2 * TVN_BITS
)) & TVN_MASK
;
111 vec
= base
->tv4
.vec
+ i
;
112 } else if ((signed long) idx
< 0) {
114 * Can happen if you add a timer with expires == jiffies,
115 * or you set a timer to go off in the past
117 vec
= base
->tv1
.vec
+ (base
->timer_jiffies
& TVR_MASK
);
120 /* If the timeout is larger than 0xffffffff on 64-bit
121 * architectures then we use the maximum timeout:
123 if (idx
> 0xffffffffUL
) {
125 expires
= idx
+ base
->timer_jiffies
;
127 i
= (expires
>> (TVR_BITS
+ 3 * TVN_BITS
)) & TVN_MASK
;
128 vec
= base
->tv5
.vec
+ i
;
133 list_add_tail(&timer
->entry
, vec
);
136 typedef struct timer_base_s timer_base_t
;
138 * Used by TIMER_INITIALIZER, we can't use per_cpu(tvec_bases)
139 * at compile time, and we need timer->base to lock the timer.
141 timer_base_t __init_timer_base
142 ____cacheline_aligned_in_smp
= { .lock
= SPIN_LOCK_UNLOCKED
};
143 EXPORT_SYMBOL(__init_timer_base
);
146 * init_timer - initialize a timer.
147 * @timer: the timer to be initialized
149 * init_timer() must be done to a timer prior calling *any* of the
150 * other timer functions.
152 void fastcall
init_timer(struct timer_list
*timer
)
154 timer
->entry
.next
= NULL
;
155 timer
->base
= &per_cpu(tvec_bases
, raw_smp_processor_id()).t_base
;
157 EXPORT_SYMBOL(init_timer
);
159 static inline void detach_timer(struct timer_list
*timer
,
162 struct list_head
*entry
= &timer
->entry
;
164 __list_del(entry
->prev
, entry
->next
);
167 entry
->prev
= LIST_POISON2
;
171 * We are using hashed locking: holding per_cpu(tvec_bases).t_base.lock
172 * means that all timers which are tied to this base via timer->base are
173 * locked, and the base itself is locked too.
175 * So __run_timers/migrate_timers can safely modify all timers which could
176 * be found on ->tvX lists.
178 * When the timer's base is locked, and the timer removed from list, it is
179 * possible to set timer->base = NULL and drop the lock: the timer remains
182 static timer_base_t
*lock_timer_base(struct timer_list
*timer
,
183 unsigned long *flags
)
189 if (likely(base
!= NULL
)) {
190 spin_lock_irqsave(&base
->lock
, *flags
);
191 if (likely(base
== timer
->base
))
193 /* The timer has migrated to another CPU */
194 spin_unlock_irqrestore(&base
->lock
, *flags
);
200 int __mod_timer(struct timer_list
*timer
, unsigned long expires
)
203 tvec_base_t
*new_base
;
207 BUG_ON(!timer
->function
);
209 base
= lock_timer_base(timer
, &flags
);
211 if (timer_pending(timer
)) {
212 detach_timer(timer
, 0);
216 new_base
= &__get_cpu_var(tvec_bases
);
218 if (base
!= &new_base
->t_base
) {
220 * We are trying to schedule the timer on the local CPU.
221 * However we can't change timer's base while it is running,
222 * otherwise del_timer_sync() can't detect that the timer's
223 * handler yet has not finished. This also guarantees that
224 * the timer is serialized wrt itself.
226 if (unlikely(base
->running_timer
== timer
)) {
227 /* The timer remains on a former base */
228 new_base
= container_of(base
, tvec_base_t
, t_base
);
230 /* See the comment in lock_timer_base() */
232 spin_unlock(&base
->lock
);
233 spin_lock(&new_base
->t_base
.lock
);
234 timer
->base
= &new_base
->t_base
;
238 timer
->expires
= expires
;
239 internal_add_timer(new_base
, timer
);
240 spin_unlock_irqrestore(&new_base
->t_base
.lock
, flags
);
245 EXPORT_SYMBOL(__mod_timer
);
248 * add_timer_on - start a timer on a particular CPU
249 * @timer: the timer to be added
250 * @cpu: the CPU to start it on
252 * This is not very scalable on SMP. Double adds are not possible.
254 void add_timer_on(struct timer_list
*timer
, int cpu
)
256 tvec_base_t
*base
= &per_cpu(tvec_bases
, cpu
);
259 BUG_ON(timer_pending(timer
) || !timer
->function
);
260 spin_lock_irqsave(&base
->t_base
.lock
, flags
);
261 timer
->base
= &base
->t_base
;
262 internal_add_timer(base
, timer
);
263 spin_unlock_irqrestore(&base
->t_base
.lock
, flags
);
268 * mod_timer - modify a timer's timeout
269 * @timer: the timer to be modified
271 * mod_timer is a more efficient way to update the expire field of an
272 * active timer (if the timer is inactive it will be activated)
274 * mod_timer(timer, expires) is equivalent to:
276 * del_timer(timer); timer->expires = expires; add_timer(timer);
278 * Note that if there are multiple unserialized concurrent users of the
279 * same timer, then mod_timer() is the only safe way to modify the timeout,
280 * since add_timer() cannot modify an already running timer.
282 * The function returns whether it has modified a pending timer or not.
283 * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an
284 * active timer returns 1.)
286 int mod_timer(struct timer_list
*timer
, unsigned long expires
)
288 BUG_ON(!timer
->function
);
291 * This is a common optimization triggered by the
292 * networking code - if the timer is re-modified
293 * to be the same thing then just return:
295 if (timer
->expires
== expires
&& timer_pending(timer
))
298 return __mod_timer(timer
, expires
);
301 EXPORT_SYMBOL(mod_timer
);
304 * del_timer - deactive a timer.
305 * @timer: the timer to be deactivated
307 * del_timer() deactivates a timer - this works on both active and inactive
310 * The function returns whether it has deactivated a pending timer or not.
311 * (ie. del_timer() of an inactive timer returns 0, del_timer() of an
312 * active timer returns 1.)
314 int del_timer(struct timer_list
*timer
)
320 if (timer_pending(timer
)) {
321 base
= lock_timer_base(timer
, &flags
);
322 if (timer_pending(timer
)) {
323 detach_timer(timer
, 1);
326 spin_unlock_irqrestore(&base
->lock
, flags
);
332 EXPORT_SYMBOL(del_timer
);
336 * This function tries to deactivate a timer. Upon successful (ret >= 0)
337 * exit the timer is not queued and the handler is not running on any CPU.
339 * It must not be called from interrupt contexts.
341 int try_to_del_timer_sync(struct timer_list
*timer
)
347 base
= lock_timer_base(timer
, &flags
);
349 if (base
->running_timer
== timer
)
353 if (timer_pending(timer
)) {
354 detach_timer(timer
, 1);
358 spin_unlock_irqrestore(&base
->lock
, flags
);
364 * del_timer_sync - deactivate a timer and wait for the handler to finish.
365 * @timer: the timer to be deactivated
367 * This function only differs from del_timer() on SMP: besides deactivating
368 * the timer it also makes sure the handler has finished executing on other
371 * Synchronization rules: callers must prevent restarting of the timer,
372 * otherwise this function is meaningless. It must not be called from
373 * interrupt contexts. The caller must not hold locks which would prevent
374 * completion of the timer's handler. The timer's handler must not call
375 * add_timer_on(). Upon exit the timer is not queued and the handler is
376 * not running on any CPU.
378 * The function returns whether it has deactivated a pending timer or not.
380 int del_timer_sync(struct timer_list
*timer
)
383 int ret
= try_to_del_timer_sync(timer
);
389 EXPORT_SYMBOL(del_timer_sync
);
392 static int cascade(tvec_base_t
*base
, tvec_t
*tv
, int index
)
394 /* cascade all the timers from tv up one level */
395 struct list_head
*head
, *curr
;
397 head
= tv
->vec
+ index
;
400 * We are removing _all_ timers from the list, so we don't have to
401 * detach them individually, just clear the list afterwards.
403 while (curr
!= head
) {
404 struct timer_list
*tmp
;
406 tmp
= list_entry(curr
, struct timer_list
, entry
);
407 BUG_ON(tmp
->base
!= &base
->t_base
);
409 internal_add_timer(base
, tmp
);
411 INIT_LIST_HEAD(head
);
417 * __run_timers - run all expired timers (if any) on this CPU.
418 * @base: the timer vector to be processed.
420 * This function cascades all vectors and executes all expired timer
423 #define INDEX(N) (base->timer_jiffies >> (TVR_BITS + N * TVN_BITS)) & TVN_MASK
425 static inline void __run_timers(tvec_base_t
*base
)
427 struct timer_list
*timer
;
429 spin_lock_irq(&base
->t_base
.lock
);
430 while (time_after_eq(jiffies
, base
->timer_jiffies
)) {
431 struct list_head work_list
= LIST_HEAD_INIT(work_list
);
432 struct list_head
*head
= &work_list
;
433 int index
= base
->timer_jiffies
& TVR_MASK
;
439 (!cascade(base
, &base
->tv2
, INDEX(0))) &&
440 (!cascade(base
, &base
->tv3
, INDEX(1))) &&
441 !cascade(base
, &base
->tv4
, INDEX(2)))
442 cascade(base
, &base
->tv5
, INDEX(3));
443 ++base
->timer_jiffies
;
444 list_splice_init(base
->tv1
.vec
+ index
, &work_list
);
445 while (!list_empty(head
)) {
446 void (*fn
)(unsigned long);
449 timer
= list_entry(head
->next
,struct timer_list
,entry
);
450 fn
= timer
->function
;
453 set_running_timer(base
, timer
);
454 detach_timer(timer
, 1);
455 spin_unlock_irq(&base
->t_base
.lock
);
457 int preempt_count
= preempt_count();
459 if (preempt_count
!= preempt_count()) {
460 printk(KERN_WARNING
"huh, entered %p "
461 "with preempt_count %08x, exited"
468 spin_lock_irq(&base
->t_base
.lock
);
471 set_running_timer(base
, NULL
);
472 spin_unlock_irq(&base
->t_base
.lock
);
475 #ifdef CONFIG_NO_IDLE_HZ
477 * Find out when the next timer event is due to happen. This
478 * is used on S/390 to stop all activity when a cpus is idle.
479 * This functions needs to be called disabled.
481 unsigned long next_timer_interrupt(void)
484 struct list_head
*list
;
485 struct timer_list
*nte
;
486 unsigned long expires
;
490 base
= &__get_cpu_var(tvec_bases
);
491 spin_lock(&base
->t_base
.lock
);
492 expires
= base
->timer_jiffies
+ (LONG_MAX
>> 1);
495 /* Look for timer events in tv1. */
496 j
= base
->timer_jiffies
& TVR_MASK
;
498 list_for_each_entry(nte
, base
->tv1
.vec
+ j
, entry
) {
499 expires
= nte
->expires
;
500 if (j
< (base
->timer_jiffies
& TVR_MASK
))
501 list
= base
->tv2
.vec
+ (INDEX(0));
504 j
= (j
+ 1) & TVR_MASK
;
505 } while (j
!= (base
->timer_jiffies
& TVR_MASK
));
508 varray
[0] = &base
->tv2
;
509 varray
[1] = &base
->tv3
;
510 varray
[2] = &base
->tv4
;
511 varray
[3] = &base
->tv5
;
512 for (i
= 0; i
< 4; i
++) {
515 if (list_empty(varray
[i
]->vec
+ j
)) {
516 j
= (j
+ 1) & TVN_MASK
;
519 list_for_each_entry(nte
, varray
[i
]->vec
+ j
, entry
)
520 if (time_before(nte
->expires
, expires
))
521 expires
= nte
->expires
;
522 if (j
< (INDEX(i
)) && i
< 3)
523 list
= varray
[i
+ 1]->vec
+ (INDEX(i
+ 1));
525 } while (j
!= (INDEX(i
)));
530 * The search wrapped. We need to look at the next list
531 * from next tv element that would cascade into tv element
532 * where we found the timer element.
534 list_for_each_entry(nte
, list
, entry
) {
535 if (time_before(nte
->expires
, expires
))
536 expires
= nte
->expires
;
539 spin_unlock(&base
->t_base
.lock
);
544 /******************************************************************/
547 * Timekeeping variables
549 unsigned long tick_usec
= TICK_USEC
; /* USER_HZ period (usec) */
550 unsigned long tick_nsec
= TICK_NSEC
; /* ACTHZ period (nsec) */
554 * wall_to_monotonic is what we need to add to xtime (or xtime corrected
555 * for sub jiffie times) to get to monotonic time. Monotonic is pegged
556 * at zero at system boot time, so wall_to_monotonic will be negative,
557 * however, we will ALWAYS keep the tv_nsec part positive so we can use
558 * the usual normalization.
560 struct timespec xtime
__attribute__ ((aligned (16)));
561 struct timespec wall_to_monotonic
__attribute__ ((aligned (16)));
563 EXPORT_SYMBOL(xtime
);
565 /* Don't completely fail for HZ > 500. */
566 int tickadj
= 500/HZ
? : 1; /* microsecs */
570 * phase-lock loop variables
572 /* TIME_ERROR prevents overwriting the CMOS clock */
573 int time_state
= TIME_OK
; /* clock synchronization status */
574 int time_status
= STA_UNSYNC
; /* clock status bits */
575 long time_offset
; /* time adjustment (us) */
576 long time_constant
= 2; /* pll time constant */
577 long time_tolerance
= MAXFREQ
; /* frequency tolerance (ppm) */
578 long time_precision
= 1; /* clock precision (us) */
579 long time_maxerror
= NTP_PHASE_LIMIT
; /* maximum error (us) */
580 long time_esterror
= NTP_PHASE_LIMIT
; /* estimated error (us) */
581 static long time_phase
; /* phase offset (scaled us) */
582 long time_freq
= (((NSEC_PER_SEC
+ HZ
/2) % HZ
- HZ
/2) << SHIFT_USEC
) / NSEC_PER_USEC
;
583 /* frequency offset (scaled ppm)*/
584 static long time_adj
; /* tick adjust (scaled 1 / HZ) */
585 long time_reftime
; /* time at last adjustment (s) */
587 long time_next_adjust
;
590 * this routine handles the overflow of the microsecond field
592 * The tricky bits of code to handle the accurate clock support
593 * were provided by Dave Mills (Mills@UDEL.EDU) of NTP fame.
594 * They were originally developed for SUN and DEC kernels.
595 * All the kudos should go to Dave for this stuff.
598 static void second_overflow(void)
602 /* Bump the maxerror field */
603 time_maxerror
+= time_tolerance
>> SHIFT_USEC
;
604 if (time_maxerror
> NTP_PHASE_LIMIT
) {
605 time_maxerror
= NTP_PHASE_LIMIT
;
606 time_status
|= STA_UNSYNC
;
610 * Leap second processing. If in leap-insert state at the end of the
611 * day, the system clock is set back one second; if in leap-delete
612 * state, the system clock is set ahead one second. The microtime()
613 * routine or external clock driver will insure that reported time is
614 * always monotonic. The ugly divides should be replaced.
616 switch (time_state
) {
618 if (time_status
& STA_INS
)
619 time_state
= TIME_INS
;
620 else if (time_status
& STA_DEL
)
621 time_state
= TIME_DEL
;
624 if (xtime
.tv_sec
% 86400 == 0) {
626 wall_to_monotonic
.tv_sec
++;
628 * The timer interpolator will make time change
629 * gradually instead of an immediate jump by one second
631 time_interpolator_update(-NSEC_PER_SEC
);
632 time_state
= TIME_OOP
;
634 printk(KERN_NOTICE
"Clock: inserting leap second "
639 if ((xtime
.tv_sec
+ 1) % 86400 == 0) {
641 wall_to_monotonic
.tv_sec
--;
643 * Use of time interpolator for a gradual change of
646 time_interpolator_update(NSEC_PER_SEC
);
647 time_state
= TIME_WAIT
;
649 printk(KERN_NOTICE
"Clock: deleting leap second "
654 time_state
= TIME_WAIT
;
657 if (!(time_status
& (STA_INS
| STA_DEL
)))
658 time_state
= TIME_OK
;
662 * Compute the phase adjustment for the next second. In PLL mode, the
663 * offset is reduced by a fixed factor times the time constant. In FLL
664 * mode the offset is used directly. In either mode, the maximum phase
665 * adjustment for each second is clamped so as to spread the adjustment
666 * over not more than the number of seconds between updates.
669 if (!(time_status
& STA_FLL
))
670 ltemp
= shift_right(ltemp
, SHIFT_KG
+ time_constant
);
671 ltemp
= min(ltemp
, (MAXPHASE
/ MINSEC
) << SHIFT_UPDATE
);
672 ltemp
= max(ltemp
, -(MAXPHASE
/ MINSEC
) << SHIFT_UPDATE
);
673 time_offset
-= ltemp
;
674 time_adj
= ltemp
<< (SHIFT_SCALE
- SHIFT_HZ
- SHIFT_UPDATE
);
677 * Compute the frequency estimate and additional phase adjustment due
678 * to frequency error for the next second. When the PPS signal is
679 * engaged, gnaw on the watchdog counter and update the frequency
680 * computed by the pll and the PPS signal.
683 if (pps_valid
== PPS_VALID
) { /* PPS signal lost */
684 pps_jitter
= MAXTIME
;
685 pps_stabil
= MAXFREQ
;
686 time_status
&= ~(STA_PPSSIGNAL
| STA_PPSJITTER
|
687 STA_PPSWANDER
| STA_PPSERROR
);
689 ltemp
= time_freq
+ pps_freq
;
690 time_adj
+= shift_right(ltemp
,(SHIFT_USEC
+ SHIFT_HZ
- SHIFT_SCALE
));
694 * Compensate for (HZ==100) != (1 << SHIFT_HZ). Add 25% and 3.125% to
695 * get 128.125; => only 0.125% error (p. 14)
697 time_adj
+= shift_right(time_adj
, 2) + shift_right(time_adj
, 5);
701 * Compensate for (HZ==250) != (1 << SHIFT_HZ). Add 1.5625% and
702 * 0.78125% to get 255.85938; => only 0.05% error (p. 14)
704 time_adj
+= shift_right(time_adj
, 6) + shift_right(time_adj
, 7);
708 * Compensate for (HZ==1000) != (1 << SHIFT_HZ). Add 1.5625% and
709 * 0.78125% to get 1023.4375; => only 0.05% error (p. 14)
711 time_adj
+= shift_right(time_adj
, 6) + shift_right(time_adj
, 7);
715 /* in the NTP reference this is called "hardclock()" */
716 static void update_wall_time_one_tick(void)
718 long time_adjust_step
, delta_nsec
;
720 if ((time_adjust_step
= time_adjust
) != 0 ) {
722 * We are doing an adjtime thing. Prepare time_adjust_step to
723 * be within bounds. Note that a positive time_adjust means we
724 * want the clock to run faster.
726 * Limit the amount of the step to be in the range
727 * -tickadj .. +tickadj
729 time_adjust_step
= min(time_adjust_step
, (long)tickadj
);
730 time_adjust_step
= max(time_adjust_step
, (long)-tickadj
);
732 /* Reduce by this step the amount of time left */
733 time_adjust
-= time_adjust_step
;
735 delta_nsec
= tick_nsec
+ time_adjust_step
* 1000;
737 * Advance the phase, once it gets to one microsecond, then
738 * advance the tick more.
740 time_phase
+= time_adj
;
741 if ((time_phase
>= FINENSEC
) || (time_phase
<= -FINENSEC
)) {
742 long ltemp
= shift_right(time_phase
, (SHIFT_SCALE
- 10));
743 time_phase
-= ltemp
<< (SHIFT_SCALE
- 10);
746 xtime
.tv_nsec
+= delta_nsec
;
747 time_interpolator_update(delta_nsec
);
749 /* Changes by adjtime() do not take effect till next tick. */
750 if (time_next_adjust
!= 0) {
751 time_adjust
= time_next_adjust
;
752 time_next_adjust
= 0;
757 * Using a loop looks inefficient, but "ticks" is
758 * usually just one (we shouldn't be losing ticks,
759 * we're doing this this way mainly for interrupt
760 * latency reasons, not because we think we'll
761 * have lots of lost timer ticks
763 static void update_wall_time(unsigned long ticks
)
767 update_wall_time_one_tick();
768 if (xtime
.tv_nsec
>= 1000000000) {
769 xtime
.tv_nsec
-= 1000000000;
777 * Called from the timer interrupt handler to charge one tick to the current
778 * process. user_tick is 1 if the tick is user time, 0 for system.
780 void update_process_times(int user_tick
)
782 struct task_struct
*p
= current
;
783 int cpu
= smp_processor_id();
785 /* Note: this timer irq context must be accounted for as well. */
787 account_user_time(p
, jiffies_to_cputime(1));
789 account_system_time(p
, HARDIRQ_OFFSET
, jiffies_to_cputime(1));
791 if (rcu_pending(cpu
))
792 rcu_check_callbacks(cpu
, user_tick
);
794 run_posix_cpu_timers(p
);
798 * Nr of active tasks - counted in fixed-point numbers
800 static unsigned long count_active_tasks(void)
802 return (nr_running() + nr_uninterruptible()) * FIXED_1
;
806 * Hmm.. Changed this, as the GNU make sources (load.c) seems to
807 * imply that avenrun[] is the standard name for this kind of thing.
808 * Nothing else seems to be standardized: the fractional size etc
809 * all seem to differ on different machines.
811 * Requires xtime_lock to access.
813 unsigned long avenrun
[3];
815 EXPORT_SYMBOL(avenrun
);
818 * calc_load - given tick count, update the avenrun load estimates.
819 * This is called while holding a write_lock on xtime_lock.
821 static inline void calc_load(unsigned long ticks
)
823 unsigned long active_tasks
; /* fixed-point */
824 static int count
= LOAD_FREQ
;
829 active_tasks
= count_active_tasks();
830 CALC_LOAD(avenrun
[0], EXP_1
, active_tasks
);
831 CALC_LOAD(avenrun
[1], EXP_5
, active_tasks
);
832 CALC_LOAD(avenrun
[2], EXP_15
, active_tasks
);
836 /* jiffies at the most recent update of wall time */
837 unsigned long wall_jiffies
= INITIAL_JIFFIES
;
840 * This read-write spinlock protects us from races in SMP while
841 * playing with xtime and avenrun.
843 #ifndef ARCH_HAVE_XTIME_LOCK
844 seqlock_t xtime_lock __cacheline_aligned_in_smp
= SEQLOCK_UNLOCKED
;
846 EXPORT_SYMBOL(xtime_lock
);
850 * This function runs timers and the timer-tq in bottom half context.
852 static void run_timer_softirq(struct softirq_action
*h
)
854 tvec_base_t
*base
= &__get_cpu_var(tvec_bases
);
856 if (time_after_eq(jiffies
, base
->timer_jiffies
))
861 * Called by the local, per-CPU timer interrupt on SMP.
863 void run_local_timers(void)
865 raise_softirq(TIMER_SOFTIRQ
);
869 * Called by the timer interrupt. xtime_lock must already be taken
872 static inline void update_times(void)
876 ticks
= jiffies
- wall_jiffies
;
878 wall_jiffies
+= ticks
;
879 update_wall_time(ticks
);
885 * The 64-bit jiffies value is not atomic - you MUST NOT read it
886 * without sampling the sequence number in xtime_lock.
887 * jiffies is defined in the linker script...
890 void do_timer(struct pt_regs
*regs
)
894 softlockup_tick(regs
);
897 #ifdef __ARCH_WANT_SYS_ALARM
900 * For backwards compatibility? This can be done in libc so Alpha
901 * and all newer ports shouldn't need it.
903 asmlinkage
unsigned long sys_alarm(unsigned int seconds
)
905 struct itimerval it_new
, it_old
;
906 unsigned int oldalarm
;
908 it_new
.it_interval
.tv_sec
= it_new
.it_interval
.tv_usec
= 0;
909 it_new
.it_value
.tv_sec
= seconds
;
910 it_new
.it_value
.tv_usec
= 0;
911 do_setitimer(ITIMER_REAL
, &it_new
, &it_old
);
912 oldalarm
= it_old
.it_value
.tv_sec
;
913 /* ehhh.. We can't return 0 if we have an alarm pending.. */
914 /* And we'd better return too much than too little anyway */
915 if ((!oldalarm
&& it_old
.it_value
.tv_usec
) || it_old
.it_value
.tv_usec
>= 500000)
925 * The Alpha uses getxpid, getxuid, and getxgid instead. Maybe this
926 * should be moved into arch/i386 instead?
930 * sys_getpid - return the thread group id of the current process
932 * Note, despite the name, this returns the tgid not the pid. The tgid and
933 * the pid are identical unless CLONE_THREAD was specified on clone() in
934 * which case the tgid is the same in all threads of the same group.
936 * This is SMP safe as current->tgid does not change.
938 asmlinkage
long sys_getpid(void)
940 return current
->tgid
;
944 * Accessing ->group_leader->real_parent is not SMP-safe, it could
945 * change from under us. However, rather than getting any lock
946 * we can use an optimistic algorithm: get the parent
947 * pid, and go back and check that the parent is still
948 * the same. If it has changed (which is extremely unlikely
949 * indeed), we just try again..
951 * NOTE! This depends on the fact that even if we _do_
952 * get an old value of "parent", we can happily dereference
953 * the pointer (it was and remains a dereferencable kernel pointer
954 * no matter what): we just can't necessarily trust the result
955 * until we know that the parent pointer is valid.
957 * NOTE2: ->group_leader never changes from under us.
959 asmlinkage
long sys_getppid(void)
962 struct task_struct
*me
= current
;
963 struct task_struct
*parent
;
965 parent
= me
->group_leader
->real_parent
;
968 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
970 struct task_struct
*old
= parent
;
973 * Make sure we read the pid before re-reading the
977 parent
= me
->group_leader
->real_parent
;
987 asmlinkage
long sys_getuid(void)
989 /* Only we change this so SMP safe */
993 asmlinkage
long sys_geteuid(void)
995 /* Only we change this so SMP safe */
996 return current
->euid
;
999 asmlinkage
long sys_getgid(void)
1001 /* Only we change this so SMP safe */
1002 return current
->gid
;
1005 asmlinkage
long sys_getegid(void)
1007 /* Only we change this so SMP safe */
1008 return current
->egid
;
1013 static void process_timeout(unsigned long __data
)
1015 wake_up_process((task_t
*)__data
);
1019 * schedule_timeout - sleep until timeout
1020 * @timeout: timeout value in jiffies
1022 * Make the current task sleep until @timeout jiffies have
1023 * elapsed. The routine will return immediately unless
1024 * the current task state has been set (see set_current_state()).
1026 * You can set the task state as follows -
1028 * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to
1029 * pass before the routine returns. The routine will return 0
1031 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
1032 * delivered to the current task. In this case the remaining time
1033 * in jiffies will be returned, or 0 if the timer expired in time
1035 * The current task state is guaranteed to be TASK_RUNNING when this
1038 * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule
1039 * the CPU away without a bound on the timeout. In this case the return
1040 * value will be %MAX_SCHEDULE_TIMEOUT.
1042 * In all cases the return value is guaranteed to be non-negative.
1044 fastcall
signed long __sched
schedule_timeout(signed long timeout
)
1046 struct timer_list timer
;
1047 unsigned long expire
;
1051 case MAX_SCHEDULE_TIMEOUT
:
1053 * These two special cases are useful to be comfortable
1054 * in the caller. Nothing more. We could take
1055 * MAX_SCHEDULE_TIMEOUT from one of the negative value
1056 * but I' d like to return a valid offset (>=0) to allow
1057 * the caller to do everything it want with the retval.
1063 * Another bit of PARANOID. Note that the retval will be
1064 * 0 since no piece of kernel is supposed to do a check
1065 * for a negative retval of schedule_timeout() (since it
1066 * should never happens anyway). You just have the printk()
1067 * that will tell you if something is gone wrong and where.
1071 printk(KERN_ERR
"schedule_timeout: wrong timeout "
1072 "value %lx from %p\n", timeout
,
1073 __builtin_return_address(0));
1074 current
->state
= TASK_RUNNING
;
1079 expire
= timeout
+ jiffies
;
1081 setup_timer(&timer
, process_timeout
, (unsigned long)current
);
1082 __mod_timer(&timer
, expire
);
1084 del_singleshot_timer_sync(&timer
);
1086 timeout
= expire
- jiffies
;
1089 return timeout
< 0 ? 0 : timeout
;
1091 EXPORT_SYMBOL(schedule_timeout
);
1094 * We can use __set_current_state() here because schedule_timeout() calls
1095 * schedule() unconditionally.
1097 signed long __sched
schedule_timeout_interruptible(signed long timeout
)
1099 __set_current_state(TASK_INTERRUPTIBLE
);
1100 return schedule_timeout(timeout
);
1102 EXPORT_SYMBOL(schedule_timeout_interruptible
);
1104 signed long __sched
schedule_timeout_uninterruptible(signed long timeout
)
1106 __set_current_state(TASK_UNINTERRUPTIBLE
);
1107 return schedule_timeout(timeout
);
1109 EXPORT_SYMBOL(schedule_timeout_uninterruptible
);
1111 /* Thread ID - the internal kernel "pid" */
1112 asmlinkage
long sys_gettid(void)
1114 return current
->pid
;
1117 static long __sched
nanosleep_restart(struct restart_block
*restart
)
1119 unsigned long expire
= restart
->arg0
, now
= jiffies
;
1120 struct timespec __user
*rmtp
= (struct timespec __user
*) restart
->arg1
;
1123 /* Did it expire while we handled signals? */
1124 if (!time_after(expire
, now
))
1127 expire
= schedule_timeout_interruptible(expire
- now
);
1132 jiffies_to_timespec(expire
, &t
);
1134 ret
= -ERESTART_RESTARTBLOCK
;
1135 if (rmtp
&& copy_to_user(rmtp
, &t
, sizeof(t
)))
1137 /* The 'restart' block is already filled in */
1142 asmlinkage
long sys_nanosleep(struct timespec __user
*rqtp
, struct timespec __user
*rmtp
)
1145 unsigned long expire
;
1148 if (copy_from_user(&t
, rqtp
, sizeof(t
)))
1151 if ((t
.tv_nsec
>= 1000000000L) || (t
.tv_nsec
< 0) || (t
.tv_sec
< 0))
1154 expire
= timespec_to_jiffies(&t
) + (t
.tv_sec
|| t
.tv_nsec
);
1155 expire
= schedule_timeout_interruptible(expire
);
1159 struct restart_block
*restart
;
1160 jiffies_to_timespec(expire
, &t
);
1161 if (rmtp
&& copy_to_user(rmtp
, &t
, sizeof(t
)))
1164 restart
= ¤t_thread_info()->restart_block
;
1165 restart
->fn
= nanosleep_restart
;
1166 restart
->arg0
= jiffies
+ expire
;
1167 restart
->arg1
= (unsigned long) rmtp
;
1168 ret
= -ERESTART_RESTARTBLOCK
;
1174 * sys_sysinfo - fill in sysinfo struct
1176 asmlinkage
long sys_sysinfo(struct sysinfo __user
*info
)
1179 unsigned long mem_total
, sav_total
;
1180 unsigned int mem_unit
, bitcount
;
1183 memset((char *)&val
, 0, sizeof(struct sysinfo
));
1187 seq
= read_seqbegin(&xtime_lock
);
1190 * This is annoying. The below is the same thing
1191 * posix_get_clock_monotonic() does, but it wants to
1192 * take the lock which we want to cover the loads stuff
1196 getnstimeofday(&tp
);
1197 tp
.tv_sec
+= wall_to_monotonic
.tv_sec
;
1198 tp
.tv_nsec
+= wall_to_monotonic
.tv_nsec
;
1199 if (tp
.tv_nsec
- NSEC_PER_SEC
>= 0) {
1200 tp
.tv_nsec
= tp
.tv_nsec
- NSEC_PER_SEC
;
1203 val
.uptime
= tp
.tv_sec
+ (tp
.tv_nsec
? 1 : 0);
1205 val
.loads
[0] = avenrun
[0] << (SI_LOAD_SHIFT
- FSHIFT
);
1206 val
.loads
[1] = avenrun
[1] << (SI_LOAD_SHIFT
- FSHIFT
);
1207 val
.loads
[2] = avenrun
[2] << (SI_LOAD_SHIFT
- FSHIFT
);
1209 val
.procs
= nr_threads
;
1210 } while (read_seqretry(&xtime_lock
, seq
));
1216 * If the sum of all the available memory (i.e. ram + swap)
1217 * is less than can be stored in a 32 bit unsigned long then
1218 * we can be binary compatible with 2.2.x kernels. If not,
1219 * well, in that case 2.2.x was broken anyways...
1221 * -Erik Andersen <andersee@debian.org>
1224 mem_total
= val
.totalram
+ val
.totalswap
;
1225 if (mem_total
< val
.totalram
|| mem_total
< val
.totalswap
)
1228 mem_unit
= val
.mem_unit
;
1229 while (mem_unit
> 1) {
1232 sav_total
= mem_total
;
1234 if (mem_total
< sav_total
)
1239 * If mem_total did not overflow, multiply all memory values by
1240 * val.mem_unit and set it to 1. This leaves things compatible
1241 * with 2.2.x, and also retains compatibility with earlier 2.4.x
1246 val
.totalram
<<= bitcount
;
1247 val
.freeram
<<= bitcount
;
1248 val
.sharedram
<<= bitcount
;
1249 val
.bufferram
<<= bitcount
;
1250 val
.totalswap
<<= bitcount
;
1251 val
.freeswap
<<= bitcount
;
1252 val
.totalhigh
<<= bitcount
;
1253 val
.freehigh
<<= bitcount
;
1256 if (copy_to_user(info
, &val
, sizeof(struct sysinfo
)))
1262 static void __devinit
init_timers_cpu(int cpu
)
1267 base
= &per_cpu(tvec_bases
, cpu
);
1268 spin_lock_init(&base
->t_base
.lock
);
1269 for (j
= 0; j
< TVN_SIZE
; j
++) {
1270 INIT_LIST_HEAD(base
->tv5
.vec
+ j
);
1271 INIT_LIST_HEAD(base
->tv4
.vec
+ j
);
1272 INIT_LIST_HEAD(base
->tv3
.vec
+ j
);
1273 INIT_LIST_HEAD(base
->tv2
.vec
+ j
);
1275 for (j
= 0; j
< TVR_SIZE
; j
++)
1276 INIT_LIST_HEAD(base
->tv1
.vec
+ j
);
1278 base
->timer_jiffies
= jiffies
;
1281 #ifdef CONFIG_HOTPLUG_CPU
1282 static void migrate_timer_list(tvec_base_t
*new_base
, struct list_head
*head
)
1284 struct timer_list
*timer
;
1286 while (!list_empty(head
)) {
1287 timer
= list_entry(head
->next
, struct timer_list
, entry
);
1288 detach_timer(timer
, 0);
1289 timer
->base
= &new_base
->t_base
;
1290 internal_add_timer(new_base
, timer
);
1294 static void __devinit
migrate_timers(int cpu
)
1296 tvec_base_t
*old_base
;
1297 tvec_base_t
*new_base
;
1300 BUG_ON(cpu_online(cpu
));
1301 old_base
= &per_cpu(tvec_bases
, cpu
);
1302 new_base
= &get_cpu_var(tvec_bases
);
1304 local_irq_disable();
1305 spin_lock(&new_base
->t_base
.lock
);
1306 spin_lock(&old_base
->t_base
.lock
);
1308 if (old_base
->t_base
.running_timer
)
1310 for (i
= 0; i
< TVR_SIZE
; i
++)
1311 migrate_timer_list(new_base
, old_base
->tv1
.vec
+ i
);
1312 for (i
= 0; i
< TVN_SIZE
; i
++) {
1313 migrate_timer_list(new_base
, old_base
->tv2
.vec
+ i
);
1314 migrate_timer_list(new_base
, old_base
->tv3
.vec
+ i
);
1315 migrate_timer_list(new_base
, old_base
->tv4
.vec
+ i
);
1316 migrate_timer_list(new_base
, old_base
->tv5
.vec
+ i
);
1319 spin_unlock(&old_base
->t_base
.lock
);
1320 spin_unlock(&new_base
->t_base
.lock
);
1322 put_cpu_var(tvec_bases
);
1324 #endif /* CONFIG_HOTPLUG_CPU */
1326 static int __devinit
timer_cpu_notify(struct notifier_block
*self
,
1327 unsigned long action
, void *hcpu
)
1329 long cpu
= (long)hcpu
;
1331 case CPU_UP_PREPARE
:
1332 init_timers_cpu(cpu
);
1334 #ifdef CONFIG_HOTPLUG_CPU
1336 migrate_timers(cpu
);
1345 static struct notifier_block __devinitdata timers_nb
= {
1346 .notifier_call
= timer_cpu_notify
,
1350 void __init
init_timers(void)
1352 timer_cpu_notify(&timers_nb
, (unsigned long)CPU_UP_PREPARE
,
1353 (void *)(long)smp_processor_id());
1354 register_cpu_notifier(&timers_nb
);
1355 open_softirq(TIMER_SOFTIRQ
, run_timer_softirq
, NULL
);
1358 #ifdef CONFIG_TIME_INTERPOLATION
1360 struct time_interpolator
*time_interpolator
;
1361 static struct time_interpolator
*time_interpolator_list
;
1362 static DEFINE_SPINLOCK(time_interpolator_lock
);
1364 static inline u64
time_interpolator_get_cycles(unsigned int src
)
1366 unsigned long (*x
)(void);
1370 case TIME_SOURCE_FUNCTION
:
1371 x
= time_interpolator
->addr
;
1374 case TIME_SOURCE_MMIO64
:
1375 return readq((void __iomem
*) time_interpolator
->addr
);
1377 case TIME_SOURCE_MMIO32
:
1378 return readl((void __iomem
*) time_interpolator
->addr
);
1380 default: return get_cycles();
1384 static inline u64
time_interpolator_get_counter(int writelock
)
1386 unsigned int src
= time_interpolator
->source
;
1388 if (time_interpolator
->jitter
)
1394 lcycle
= time_interpolator
->last_cycle
;
1395 now
= time_interpolator_get_cycles(src
);
1396 if (lcycle
&& time_after(lcycle
, now
))
1399 /* When holding the xtime write lock, there's no need
1400 * to add the overhead of the cmpxchg. Readers are
1401 * force to retry until the write lock is released.
1404 time_interpolator
->last_cycle
= now
;
1407 /* Keep track of the last timer value returned. The use of cmpxchg here
1408 * will cause contention in an SMP environment.
1410 } while (unlikely(cmpxchg(&time_interpolator
->last_cycle
, lcycle
, now
) != lcycle
));
1414 return time_interpolator_get_cycles(src
);
1417 void time_interpolator_reset(void)
1419 time_interpolator
->offset
= 0;
1420 time_interpolator
->last_counter
= time_interpolator_get_counter(1);
1423 #define GET_TI_NSECS(count,i) (((((count) - i->last_counter) & (i)->mask) * (i)->nsec_per_cyc) >> (i)->shift)
1425 unsigned long time_interpolator_get_offset(void)
1427 /* If we do not have a time interpolator set up then just return zero */
1428 if (!time_interpolator
)
1431 return time_interpolator
->offset
+
1432 GET_TI_NSECS(time_interpolator_get_counter(0), time_interpolator
);
1435 #define INTERPOLATOR_ADJUST 65536
1436 #define INTERPOLATOR_MAX_SKIP 10*INTERPOLATOR_ADJUST
1438 static void time_interpolator_update(long delta_nsec
)
1441 unsigned long offset
;
1443 /* If there is no time interpolator set up then do nothing */
1444 if (!time_interpolator
)
1448 * The interpolator compensates for late ticks by accumulating the late
1449 * time in time_interpolator->offset. A tick earlier than expected will
1450 * lead to a reset of the offset and a corresponding jump of the clock
1451 * forward. Again this only works if the interpolator clock is running
1452 * slightly slower than the regular clock and the tuning logic insures
1456 counter
= time_interpolator_get_counter(1);
1457 offset
= time_interpolator
->offset
+
1458 GET_TI_NSECS(counter
, time_interpolator
);
1460 if (delta_nsec
< 0 || (unsigned long) delta_nsec
< offset
)
1461 time_interpolator
->offset
= offset
- delta_nsec
;
1463 time_interpolator
->skips
++;
1464 time_interpolator
->ns_skipped
+= delta_nsec
- offset
;
1465 time_interpolator
->offset
= 0;
1467 time_interpolator
->last_counter
= counter
;
1469 /* Tuning logic for time interpolator invoked every minute or so.
1470 * Decrease interpolator clock speed if no skips occurred and an offset is carried.
1471 * Increase interpolator clock speed if we skip too much time.
1473 if (jiffies
% INTERPOLATOR_ADJUST
== 0)
1475 if (time_interpolator
->skips
== 0 && time_interpolator
->offset
> TICK_NSEC
)
1476 time_interpolator
->nsec_per_cyc
--;
1477 if (time_interpolator
->ns_skipped
> INTERPOLATOR_MAX_SKIP
&& time_interpolator
->offset
== 0)
1478 time_interpolator
->nsec_per_cyc
++;
1479 time_interpolator
->skips
= 0;
1480 time_interpolator
->ns_skipped
= 0;
1485 is_better_time_interpolator(struct time_interpolator
*new)
1487 if (!time_interpolator
)
1489 return new->frequency
> 2*time_interpolator
->frequency
||
1490 (unsigned long)new->drift
< (unsigned long)time_interpolator
->drift
;
1494 register_time_interpolator(struct time_interpolator
*ti
)
1496 unsigned long flags
;
1499 if (ti
->frequency
== 0 || ti
->mask
== 0)
1502 ti
->nsec_per_cyc
= ((u64
)NSEC_PER_SEC
<< ti
->shift
) / ti
->frequency
;
1503 spin_lock(&time_interpolator_lock
);
1504 write_seqlock_irqsave(&xtime_lock
, flags
);
1505 if (is_better_time_interpolator(ti
)) {
1506 time_interpolator
= ti
;
1507 time_interpolator_reset();
1509 write_sequnlock_irqrestore(&xtime_lock
, flags
);
1511 ti
->next
= time_interpolator_list
;
1512 time_interpolator_list
= ti
;
1513 spin_unlock(&time_interpolator_lock
);
1517 unregister_time_interpolator(struct time_interpolator
*ti
)
1519 struct time_interpolator
*curr
, **prev
;
1520 unsigned long flags
;
1522 spin_lock(&time_interpolator_lock
);
1523 prev
= &time_interpolator_list
;
1524 for (curr
= *prev
; curr
; curr
= curr
->next
) {
1532 write_seqlock_irqsave(&xtime_lock
, flags
);
1533 if (ti
== time_interpolator
) {
1534 /* we lost the best time-interpolator: */
1535 time_interpolator
= NULL
;
1536 /* find the next-best interpolator */
1537 for (curr
= time_interpolator_list
; curr
; curr
= curr
->next
)
1538 if (is_better_time_interpolator(curr
))
1539 time_interpolator
= curr
;
1540 time_interpolator_reset();
1542 write_sequnlock_irqrestore(&xtime_lock
, flags
);
1543 spin_unlock(&time_interpolator_lock
);
1545 #endif /* CONFIG_TIME_INTERPOLATION */
1548 * msleep - sleep safely even with waitqueue interruptions
1549 * @msecs: Time in milliseconds to sleep for
1551 void msleep(unsigned int msecs
)
1553 unsigned long timeout
= msecs_to_jiffies(msecs
) + 1;
1556 timeout
= schedule_timeout_uninterruptible(timeout
);
1559 EXPORT_SYMBOL(msleep
);
1562 * msleep_interruptible - sleep waiting for signals
1563 * @msecs: Time in milliseconds to sleep for
1565 unsigned long msleep_interruptible(unsigned int msecs
)
1567 unsigned long timeout
= msecs_to_jiffies(msecs
) + 1;
1569 while (timeout
&& !signal_pending(current
))
1570 timeout
= schedule_timeout_interruptible(timeout
);
1571 return jiffies_to_msecs(timeout
);
1574 EXPORT_SYMBOL(msleep_interruptible
);