Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/kernel/timer.c | |
3 | * | |
4 | * Kernel internal timers, kernel timekeeping, basic process system calls | |
5 | * | |
6 | * Copyright (C) 1991, 1992 Linus Torvalds | |
7 | * | |
8 | * 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better. | |
9 | * | |
10 | * 1997-09-10 Updated NTP code according to technical memorandum Jan '96 | |
11 | * "A Kernel Model for Precision Timekeeping" by Dave Mills | |
12 | * 1998-12-24 Fixed a xtime SMP race (we need the xtime_lock rw spinlock to | |
13 | * serialize accesses to xtime/lost_ticks). | |
14 | * Copyright (C) 1998 Andrea Arcangeli | |
15 | * 1999-03-10 Improved NTP compatibility by Ulrich Windl | |
16 | * 2002-05-31 Move sys_sysinfo here and make its locking sane, Robert Love | |
17 | * 2000-10-05 Implemented scalable SMP per-CPU timer handling. | |
18 | * Copyright (C) 2000, 2001, 2002 Ingo Molnar | |
19 | * Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar | |
20 | */ | |
21 | ||
22 | #include <linux/kernel_stat.h> | |
23 | #include <linux/module.h> | |
24 | #include <linux/interrupt.h> | |
25 | #include <linux/percpu.h> | |
26 | #include <linux/init.h> | |
27 | #include <linux/mm.h> | |
28 | #include <linux/swap.h> | |
29 | #include <linux/notifier.h> | |
30 | #include <linux/thread_info.h> | |
31 | #include <linux/time.h> | |
32 | #include <linux/jiffies.h> | |
33 | #include <linux/posix-timers.h> | |
34 | #include <linux/cpu.h> | |
35 | #include <linux/syscalls.h> | |
36 | ||
37 | #include <asm/uaccess.h> | |
38 | #include <asm/unistd.h> | |
39 | #include <asm/div64.h> | |
40 | #include <asm/timex.h> | |
41 | #include <asm/io.h> | |
42 | ||
43 | #ifdef CONFIG_TIME_INTERPOLATION | |
44 | static void time_interpolator_update(long delta_nsec); | |
45 | #else | |
46 | #define time_interpolator_update(x) | |
47 | #endif | |
48 | ||
49 | /* | |
50 | * per-CPU timer vector definitions: | |
51 | */ | |
52 | ||
53 | #define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6) | |
54 | #define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8) | |
55 | #define TVN_SIZE (1 << TVN_BITS) | |
56 | #define TVR_SIZE (1 << TVR_BITS) | |
57 | #define TVN_MASK (TVN_SIZE - 1) | |
58 | #define TVR_MASK (TVR_SIZE - 1) | |
59 | ||
55c888d6 ON |
60 | struct timer_base_s { |
61 | spinlock_t lock; | |
62 | struct timer_list *running_timer; | |
63 | }; | |
64 | ||
1da177e4 LT |
65 | typedef struct tvec_s { |
66 | struct list_head vec[TVN_SIZE]; | |
67 | } tvec_t; | |
68 | ||
69 | typedef struct tvec_root_s { | |
70 | struct list_head vec[TVR_SIZE]; | |
71 | } tvec_root_t; | |
72 | ||
73 | struct tvec_t_base_s { | |
55c888d6 | 74 | struct timer_base_s t_base; |
1da177e4 | 75 | unsigned long timer_jiffies; |
1da177e4 LT |
76 | tvec_root_t tv1; |
77 | tvec_t tv2; | |
78 | tvec_t tv3; | |
79 | tvec_t tv4; | |
80 | tvec_t tv5; | |
81 | } ____cacheline_aligned_in_smp; | |
82 | ||
83 | typedef struct tvec_t_base_s tvec_base_t; | |
55c888d6 | 84 | static DEFINE_PER_CPU(tvec_base_t, tvec_bases); |
1da177e4 LT |
85 | |
86 | static inline void set_running_timer(tvec_base_t *base, | |
87 | struct timer_list *timer) | |
88 | { | |
89 | #ifdef CONFIG_SMP | |
55c888d6 | 90 | base->t_base.running_timer = timer; |
1da177e4 LT |
91 | #endif |
92 | } | |
93 | ||
1da177e4 LT |
94 | static void internal_add_timer(tvec_base_t *base, struct timer_list *timer) |
95 | { | |
96 | unsigned long expires = timer->expires; | |
97 | unsigned long idx = expires - base->timer_jiffies; | |
98 | struct list_head *vec; | |
99 | ||
100 | if (idx < TVR_SIZE) { | |
101 | int i = expires & TVR_MASK; | |
102 | vec = base->tv1.vec + i; | |
103 | } else if (idx < 1 << (TVR_BITS + TVN_BITS)) { | |
104 | int i = (expires >> TVR_BITS) & TVN_MASK; | |
105 | vec = base->tv2.vec + i; | |
106 | } else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) { | |
107 | int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK; | |
108 | vec = base->tv3.vec + i; | |
109 | } else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) { | |
110 | int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK; | |
111 | vec = base->tv4.vec + i; | |
112 | } else if ((signed long) idx < 0) { | |
113 | /* | |
114 | * Can happen if you add a timer with expires == jiffies, | |
115 | * or you set a timer to go off in the past | |
116 | */ | |
117 | vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK); | |
118 | } else { | |
119 | int i; | |
120 | /* If the timeout is larger than 0xffffffff on 64-bit | |
121 | * architectures then we use the maximum timeout: | |
122 | */ | |
123 | if (idx > 0xffffffffUL) { | |
124 | idx = 0xffffffffUL; | |
125 | expires = idx + base->timer_jiffies; | |
126 | } | |
127 | i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK; | |
128 | vec = base->tv5.vec + i; | |
129 | } | |
130 | /* | |
131 | * Timers are FIFO: | |
132 | */ | |
133 | list_add_tail(&timer->entry, vec); | |
134 | } | |
135 | ||
55c888d6 ON |
136 | typedef struct timer_base_s timer_base_t; |
137 | /* | |
138 | * Used by TIMER_INITIALIZER, we can't use per_cpu(tvec_bases) | |
139 | * at compile time, and we need timer->base to lock the timer. | |
140 | */ | |
141 | timer_base_t __init_timer_base | |
142 | ____cacheline_aligned_in_smp = { .lock = SPIN_LOCK_UNLOCKED }; | |
143 | EXPORT_SYMBOL(__init_timer_base); | |
144 | ||
145 | /*** | |
146 | * init_timer - initialize a timer. | |
147 | * @timer: the timer to be initialized | |
148 | * | |
149 | * init_timer() must be done to a timer prior calling *any* of the | |
150 | * other timer functions. | |
151 | */ | |
152 | void fastcall init_timer(struct timer_list *timer) | |
153 | { | |
154 | timer->entry.next = NULL; | |
155 | timer->base = &per_cpu(tvec_bases, raw_smp_processor_id()).t_base; | |
55c888d6 ON |
156 | } |
157 | EXPORT_SYMBOL(init_timer); | |
158 | ||
159 | static inline void detach_timer(struct timer_list *timer, | |
160 | int clear_pending) | |
161 | { | |
162 | struct list_head *entry = &timer->entry; | |
163 | ||
164 | __list_del(entry->prev, entry->next); | |
165 | if (clear_pending) | |
166 | entry->next = NULL; | |
167 | entry->prev = LIST_POISON2; | |
168 | } | |
169 | ||
170 | /* | |
171 | * We are using hashed locking: holding per_cpu(tvec_bases).t_base.lock | |
172 | * means that all timers which are tied to this base via timer->base are | |
173 | * locked, and the base itself is locked too. | |
174 | * | |
175 | * So __run_timers/migrate_timers can safely modify all timers which could | |
176 | * be found on ->tvX lists. | |
177 | * | |
178 | * When the timer's base is locked, and the timer removed from list, it is | |
179 | * possible to set timer->base = NULL and drop the lock: the timer remains | |
180 | * locked. | |
181 | */ | |
182 | static timer_base_t *lock_timer_base(struct timer_list *timer, | |
183 | unsigned long *flags) | |
184 | { | |
185 | timer_base_t *base; | |
186 | ||
187 | for (;;) { | |
188 | base = timer->base; | |
189 | if (likely(base != NULL)) { | |
190 | spin_lock_irqsave(&base->lock, *flags); | |
191 | if (likely(base == timer->base)) | |
192 | return base; | |
193 | /* The timer has migrated to another CPU */ | |
194 | spin_unlock_irqrestore(&base->lock, *flags); | |
195 | } | |
196 | cpu_relax(); | |
197 | } | |
198 | } | |
199 | ||
1da177e4 LT |
200 | int __mod_timer(struct timer_list *timer, unsigned long expires) |
201 | { | |
55c888d6 ON |
202 | timer_base_t *base; |
203 | tvec_base_t *new_base; | |
1da177e4 LT |
204 | unsigned long flags; |
205 | int ret = 0; | |
206 | ||
207 | BUG_ON(!timer->function); | |
1da177e4 | 208 | |
55c888d6 ON |
209 | base = lock_timer_base(timer, &flags); |
210 | ||
211 | if (timer_pending(timer)) { | |
212 | detach_timer(timer, 0); | |
213 | ret = 1; | |
214 | } | |
215 | ||
1da177e4 | 216 | new_base = &__get_cpu_var(tvec_bases); |
1da177e4 | 217 | |
55c888d6 | 218 | if (base != &new_base->t_base) { |
1da177e4 | 219 | /* |
55c888d6 ON |
220 | * We are trying to schedule the timer on the local CPU. |
221 | * However we can't change timer's base while it is running, | |
222 | * otherwise del_timer_sync() can't detect that the timer's | |
223 | * handler yet has not finished. This also guarantees that | |
224 | * the timer is serialized wrt itself. | |
1da177e4 | 225 | */ |
55c888d6 ON |
226 | if (unlikely(base->running_timer == timer)) { |
227 | /* The timer remains on a former base */ | |
228 | new_base = container_of(base, tvec_base_t, t_base); | |
229 | } else { | |
230 | /* See the comment in lock_timer_base() */ | |
231 | timer->base = NULL; | |
232 | spin_unlock(&base->lock); | |
233 | spin_lock(&new_base->t_base.lock); | |
234 | timer->base = &new_base->t_base; | |
1da177e4 LT |
235 | } |
236 | } | |
237 | ||
1da177e4 LT |
238 | timer->expires = expires; |
239 | internal_add_timer(new_base, timer); | |
55c888d6 | 240 | spin_unlock_irqrestore(&new_base->t_base.lock, flags); |
1da177e4 LT |
241 | |
242 | return ret; | |
243 | } | |
244 | ||
245 | EXPORT_SYMBOL(__mod_timer); | |
246 | ||
247 | /*** | |
248 | * add_timer_on - start a timer on a particular CPU | |
249 | * @timer: the timer to be added | |
250 | * @cpu: the CPU to start it on | |
251 | * | |
252 | * This is not very scalable on SMP. Double adds are not possible. | |
253 | */ | |
254 | void add_timer_on(struct timer_list *timer, int cpu) | |
255 | { | |
256 | tvec_base_t *base = &per_cpu(tvec_bases, cpu); | |
257 | unsigned long flags; | |
55c888d6 | 258 | |
1da177e4 | 259 | BUG_ON(timer_pending(timer) || !timer->function); |
55c888d6 ON |
260 | spin_lock_irqsave(&base->t_base.lock, flags); |
261 | timer->base = &base->t_base; | |
1da177e4 | 262 | internal_add_timer(base, timer); |
55c888d6 | 263 | spin_unlock_irqrestore(&base->t_base.lock, flags); |
1da177e4 LT |
264 | } |
265 | ||
266 | ||
267 | /*** | |
268 | * mod_timer - modify a timer's timeout | |
269 | * @timer: the timer to be modified | |
270 | * | |
271 | * mod_timer is a more efficient way to update the expire field of an | |
272 | * active timer (if the timer is inactive it will be activated) | |
273 | * | |
274 | * mod_timer(timer, expires) is equivalent to: | |
275 | * | |
276 | * del_timer(timer); timer->expires = expires; add_timer(timer); | |
277 | * | |
278 | * Note that if there are multiple unserialized concurrent users of the | |
279 | * same timer, then mod_timer() is the only safe way to modify the timeout, | |
280 | * since add_timer() cannot modify an already running timer. | |
281 | * | |
282 | * The function returns whether it has modified a pending timer or not. | |
283 | * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an | |
284 | * active timer returns 1.) | |
285 | */ | |
286 | int mod_timer(struct timer_list *timer, unsigned long expires) | |
287 | { | |
288 | BUG_ON(!timer->function); | |
289 | ||
1da177e4 LT |
290 | /* |
291 | * This is a common optimization triggered by the | |
292 | * networking code - if the timer is re-modified | |
293 | * to be the same thing then just return: | |
294 | */ | |
295 | if (timer->expires == expires && timer_pending(timer)) | |
296 | return 1; | |
297 | ||
298 | return __mod_timer(timer, expires); | |
299 | } | |
300 | ||
301 | EXPORT_SYMBOL(mod_timer); | |
302 | ||
303 | /*** | |
304 | * del_timer - deactive a timer. | |
305 | * @timer: the timer to be deactivated | |
306 | * | |
307 | * del_timer() deactivates a timer - this works on both active and inactive | |
308 | * timers. | |
309 | * | |
310 | * The function returns whether it has deactivated a pending timer or not. | |
311 | * (ie. del_timer() of an inactive timer returns 0, del_timer() of an | |
312 | * active timer returns 1.) | |
313 | */ | |
314 | int del_timer(struct timer_list *timer) | |
315 | { | |
55c888d6 | 316 | timer_base_t *base; |
1da177e4 | 317 | unsigned long flags; |
55c888d6 | 318 | int ret = 0; |
1da177e4 | 319 | |
55c888d6 ON |
320 | if (timer_pending(timer)) { |
321 | base = lock_timer_base(timer, &flags); | |
322 | if (timer_pending(timer)) { | |
323 | detach_timer(timer, 1); | |
324 | ret = 1; | |
325 | } | |
1da177e4 | 326 | spin_unlock_irqrestore(&base->lock, flags); |
1da177e4 | 327 | } |
1da177e4 | 328 | |
55c888d6 | 329 | return ret; |
1da177e4 LT |
330 | } |
331 | ||
332 | EXPORT_SYMBOL(del_timer); | |
333 | ||
334 | #ifdef CONFIG_SMP | |
fd450b73 ON |
335 | /* |
336 | * This function tries to deactivate a timer. Upon successful (ret >= 0) | |
337 | * exit the timer is not queued and the handler is not running on any CPU. | |
338 | * | |
339 | * It must not be called from interrupt contexts. | |
340 | */ | |
341 | int try_to_del_timer_sync(struct timer_list *timer) | |
342 | { | |
343 | timer_base_t *base; | |
344 | unsigned long flags; | |
345 | int ret = -1; | |
346 | ||
347 | base = lock_timer_base(timer, &flags); | |
348 | ||
349 | if (base->running_timer == timer) | |
350 | goto out; | |
351 | ||
352 | ret = 0; | |
353 | if (timer_pending(timer)) { | |
354 | detach_timer(timer, 1); | |
355 | ret = 1; | |
356 | } | |
357 | out: | |
358 | spin_unlock_irqrestore(&base->lock, flags); | |
359 | ||
360 | return ret; | |
361 | } | |
362 | ||
1da177e4 LT |
363 | /*** |
364 | * del_timer_sync - deactivate a timer and wait for the handler to finish. | |
365 | * @timer: the timer to be deactivated | |
366 | * | |
367 | * This function only differs from del_timer() on SMP: besides deactivating | |
368 | * the timer it also makes sure the handler has finished executing on other | |
369 | * CPUs. | |
370 | * | |
371 | * Synchronization rules: callers must prevent restarting of the timer, | |
372 | * otherwise this function is meaningless. It must not be called from | |
373 | * interrupt contexts. The caller must not hold locks which would prevent | |
55c888d6 ON |
374 | * completion of the timer's handler. The timer's handler must not call |
375 | * add_timer_on(). Upon exit the timer is not queued and the handler is | |
376 | * not running on any CPU. | |
1da177e4 LT |
377 | * |
378 | * The function returns whether it has deactivated a pending timer or not. | |
1da177e4 LT |
379 | */ |
380 | int del_timer_sync(struct timer_list *timer) | |
381 | { | |
fd450b73 ON |
382 | for (;;) { |
383 | int ret = try_to_del_timer_sync(timer); | |
384 | if (ret >= 0) | |
385 | return ret; | |
386 | } | |
1da177e4 | 387 | } |
1da177e4 | 388 | |
55c888d6 | 389 | EXPORT_SYMBOL(del_timer_sync); |
1da177e4 LT |
390 | #endif |
391 | ||
392 | static int cascade(tvec_base_t *base, tvec_t *tv, int index) | |
393 | { | |
394 | /* cascade all the timers from tv up one level */ | |
395 | struct list_head *head, *curr; | |
396 | ||
397 | head = tv->vec + index; | |
398 | curr = head->next; | |
399 | /* | |
400 | * We are removing _all_ timers from the list, so we don't have to | |
401 | * detach them individually, just clear the list afterwards. | |
402 | */ | |
403 | while (curr != head) { | |
404 | struct timer_list *tmp; | |
405 | ||
406 | tmp = list_entry(curr, struct timer_list, entry); | |
55c888d6 | 407 | BUG_ON(tmp->base != &base->t_base); |
1da177e4 LT |
408 | curr = curr->next; |
409 | internal_add_timer(base, tmp); | |
410 | } | |
411 | INIT_LIST_HEAD(head); | |
412 | ||
413 | return index; | |
414 | } | |
415 | ||
416 | /*** | |
417 | * __run_timers - run all expired timers (if any) on this CPU. | |
418 | * @base: the timer vector to be processed. | |
419 | * | |
420 | * This function cascades all vectors and executes all expired timer | |
421 | * vectors. | |
422 | */ | |
423 | #define INDEX(N) (base->timer_jiffies >> (TVR_BITS + N * TVN_BITS)) & TVN_MASK | |
424 | ||
425 | static inline void __run_timers(tvec_base_t *base) | |
426 | { | |
427 | struct timer_list *timer; | |
428 | ||
55c888d6 | 429 | spin_lock_irq(&base->t_base.lock); |
1da177e4 LT |
430 | while (time_after_eq(jiffies, base->timer_jiffies)) { |
431 | struct list_head work_list = LIST_HEAD_INIT(work_list); | |
432 | struct list_head *head = &work_list; | |
433 | int index = base->timer_jiffies & TVR_MASK; | |
434 | ||
435 | /* | |
436 | * Cascade timers: | |
437 | */ | |
438 | if (!index && | |
439 | (!cascade(base, &base->tv2, INDEX(0))) && | |
440 | (!cascade(base, &base->tv3, INDEX(1))) && | |
441 | !cascade(base, &base->tv4, INDEX(2))) | |
442 | cascade(base, &base->tv5, INDEX(3)); | |
443 | ++base->timer_jiffies; | |
444 | list_splice_init(base->tv1.vec + index, &work_list); | |
55c888d6 | 445 | while (!list_empty(head)) { |
1da177e4 LT |
446 | void (*fn)(unsigned long); |
447 | unsigned long data; | |
448 | ||
449 | timer = list_entry(head->next,struct timer_list,entry); | |
450 | fn = timer->function; | |
451 | data = timer->data; | |
452 | ||
1da177e4 | 453 | set_running_timer(base, timer); |
55c888d6 ON |
454 | detach_timer(timer, 1); |
455 | spin_unlock_irq(&base->t_base.lock); | |
1da177e4 | 456 | { |
be5b4fbd | 457 | int preempt_count = preempt_count(); |
1da177e4 LT |
458 | fn(data); |
459 | if (preempt_count != preempt_count()) { | |
be5b4fbd JJ |
460 | printk(KERN_WARNING "huh, entered %p " |
461 | "with preempt_count %08x, exited" | |
462 | " with %08x?\n", | |
463 | fn, preempt_count, | |
464 | preempt_count()); | |
1da177e4 LT |
465 | BUG(); |
466 | } | |
467 | } | |
55c888d6 | 468 | spin_lock_irq(&base->t_base.lock); |
1da177e4 LT |
469 | } |
470 | } | |
471 | set_running_timer(base, NULL); | |
55c888d6 | 472 | spin_unlock_irq(&base->t_base.lock); |
1da177e4 LT |
473 | } |
474 | ||
475 | #ifdef CONFIG_NO_IDLE_HZ | |
476 | /* | |
477 | * Find out when the next timer event is due to happen. This | |
478 | * is used on S/390 to stop all activity when a cpus is idle. | |
479 | * This functions needs to be called disabled. | |
480 | */ | |
481 | unsigned long next_timer_interrupt(void) | |
482 | { | |
483 | tvec_base_t *base; | |
484 | struct list_head *list; | |
485 | struct timer_list *nte; | |
486 | unsigned long expires; | |
487 | tvec_t *varray[4]; | |
488 | int i, j; | |
489 | ||
490 | base = &__get_cpu_var(tvec_bases); | |
55c888d6 | 491 | spin_lock(&base->t_base.lock); |
1da177e4 LT |
492 | expires = base->timer_jiffies + (LONG_MAX >> 1); |
493 | list = 0; | |
494 | ||
495 | /* Look for timer events in tv1. */ | |
496 | j = base->timer_jiffies & TVR_MASK; | |
497 | do { | |
498 | list_for_each_entry(nte, base->tv1.vec + j, entry) { | |
499 | expires = nte->expires; | |
500 | if (j < (base->timer_jiffies & TVR_MASK)) | |
501 | list = base->tv2.vec + (INDEX(0)); | |
502 | goto found; | |
503 | } | |
504 | j = (j + 1) & TVR_MASK; | |
505 | } while (j != (base->timer_jiffies & TVR_MASK)); | |
506 | ||
507 | /* Check tv2-tv5. */ | |
508 | varray[0] = &base->tv2; | |
509 | varray[1] = &base->tv3; | |
510 | varray[2] = &base->tv4; | |
511 | varray[3] = &base->tv5; | |
512 | for (i = 0; i < 4; i++) { | |
513 | j = INDEX(i); | |
514 | do { | |
515 | if (list_empty(varray[i]->vec + j)) { | |
516 | j = (j + 1) & TVN_MASK; | |
517 | continue; | |
518 | } | |
519 | list_for_each_entry(nte, varray[i]->vec + j, entry) | |
520 | if (time_before(nte->expires, expires)) | |
521 | expires = nte->expires; | |
522 | if (j < (INDEX(i)) && i < 3) | |
523 | list = varray[i + 1]->vec + (INDEX(i + 1)); | |
524 | goto found; | |
525 | } while (j != (INDEX(i))); | |
526 | } | |
527 | found: | |
528 | if (list) { | |
529 | /* | |
530 | * The search wrapped. We need to look at the next list | |
531 | * from next tv element that would cascade into tv element | |
532 | * where we found the timer element. | |
533 | */ | |
534 | list_for_each_entry(nte, list, entry) { | |
535 | if (time_before(nte->expires, expires)) | |
536 | expires = nte->expires; | |
537 | } | |
538 | } | |
55c888d6 | 539 | spin_unlock(&base->t_base.lock); |
1da177e4 LT |
540 | return expires; |
541 | } | |
542 | #endif | |
543 | ||
544 | /******************************************************************/ | |
545 | ||
546 | /* | |
547 | * Timekeeping variables | |
548 | */ | |
549 | unsigned long tick_usec = TICK_USEC; /* USER_HZ period (usec) */ | |
550 | unsigned long tick_nsec = TICK_NSEC; /* ACTHZ period (nsec) */ | |
551 | ||
552 | /* | |
553 | * The current time | |
554 | * wall_to_monotonic is what we need to add to xtime (or xtime corrected | |
555 | * for sub jiffie times) to get to monotonic time. Monotonic is pegged | |
556 | * at zero at system boot time, so wall_to_monotonic will be negative, | |
557 | * however, we will ALWAYS keep the tv_nsec part positive so we can use | |
558 | * the usual normalization. | |
559 | */ | |
560 | struct timespec xtime __attribute__ ((aligned (16))); | |
561 | struct timespec wall_to_monotonic __attribute__ ((aligned (16))); | |
562 | ||
563 | EXPORT_SYMBOL(xtime); | |
564 | ||
565 | /* Don't completely fail for HZ > 500. */ | |
566 | int tickadj = 500/HZ ? : 1; /* microsecs */ | |
567 | ||
568 | ||
569 | /* | |
570 | * phase-lock loop variables | |
571 | */ | |
572 | /* TIME_ERROR prevents overwriting the CMOS clock */ | |
573 | int time_state = TIME_OK; /* clock synchronization status */ | |
574 | int time_status = STA_UNSYNC; /* clock status bits */ | |
575 | long time_offset; /* time adjustment (us) */ | |
576 | long time_constant = 2; /* pll time constant */ | |
577 | long time_tolerance = MAXFREQ; /* frequency tolerance (ppm) */ | |
578 | long time_precision = 1; /* clock precision (us) */ | |
579 | long time_maxerror = NTP_PHASE_LIMIT; /* maximum error (us) */ | |
580 | long time_esterror = NTP_PHASE_LIMIT; /* estimated error (us) */ | |
581 | static long time_phase; /* phase offset (scaled us) */ | |
582 | long time_freq = (((NSEC_PER_SEC + HZ/2) % HZ - HZ/2) << SHIFT_USEC) / NSEC_PER_USEC; | |
583 | /* frequency offset (scaled ppm)*/ | |
584 | static long time_adj; /* tick adjust (scaled 1 / HZ) */ | |
585 | long time_reftime; /* time at last adjustment (s) */ | |
586 | long time_adjust; | |
587 | long time_next_adjust; | |
588 | ||
589 | /* | |
590 | * this routine handles the overflow of the microsecond field | |
591 | * | |
592 | * The tricky bits of code to handle the accurate clock support | |
593 | * were provided by Dave Mills (Mills@UDEL.EDU) of NTP fame. | |
594 | * They were originally developed for SUN and DEC kernels. | |
595 | * All the kudos should go to Dave for this stuff. | |
596 | * | |
597 | */ | |
598 | static void second_overflow(void) | |
599 | { | |
a5a0d52c AM |
600 | long ltemp; |
601 | ||
602 | /* Bump the maxerror field */ | |
603 | time_maxerror += time_tolerance >> SHIFT_USEC; | |
604 | if (time_maxerror > NTP_PHASE_LIMIT) { | |
605 | time_maxerror = NTP_PHASE_LIMIT; | |
606 | time_status |= STA_UNSYNC; | |
1da177e4 | 607 | } |
a5a0d52c AM |
608 | |
609 | /* | |
610 | * Leap second processing. If in leap-insert state at the end of the | |
611 | * day, the system clock is set back one second; if in leap-delete | |
612 | * state, the system clock is set ahead one second. The microtime() | |
613 | * routine or external clock driver will insure that reported time is | |
614 | * always monotonic. The ugly divides should be replaced. | |
615 | */ | |
616 | switch (time_state) { | |
617 | case TIME_OK: | |
618 | if (time_status & STA_INS) | |
619 | time_state = TIME_INS; | |
620 | else if (time_status & STA_DEL) | |
621 | time_state = TIME_DEL; | |
622 | break; | |
623 | case TIME_INS: | |
624 | if (xtime.tv_sec % 86400 == 0) { | |
625 | xtime.tv_sec--; | |
626 | wall_to_monotonic.tv_sec++; | |
627 | /* | |
628 | * The timer interpolator will make time change | |
629 | * gradually instead of an immediate jump by one second | |
630 | */ | |
631 | time_interpolator_update(-NSEC_PER_SEC); | |
632 | time_state = TIME_OOP; | |
633 | clock_was_set(); | |
634 | printk(KERN_NOTICE "Clock: inserting leap second " | |
635 | "23:59:60 UTC\n"); | |
636 | } | |
637 | break; | |
638 | case TIME_DEL: | |
639 | if ((xtime.tv_sec + 1) % 86400 == 0) { | |
640 | xtime.tv_sec++; | |
641 | wall_to_monotonic.tv_sec--; | |
642 | /* | |
643 | * Use of time interpolator for a gradual change of | |
644 | * time | |
645 | */ | |
646 | time_interpolator_update(NSEC_PER_SEC); | |
647 | time_state = TIME_WAIT; | |
648 | clock_was_set(); | |
649 | printk(KERN_NOTICE "Clock: deleting leap second " | |
650 | "23:59:59 UTC\n"); | |
651 | } | |
652 | break; | |
653 | case TIME_OOP: | |
654 | time_state = TIME_WAIT; | |
655 | break; | |
656 | case TIME_WAIT: | |
657 | if (!(time_status & (STA_INS | STA_DEL))) | |
658 | time_state = TIME_OK; | |
1da177e4 | 659 | } |
a5a0d52c AM |
660 | |
661 | /* | |
662 | * Compute the phase adjustment for the next second. In PLL mode, the | |
663 | * offset is reduced by a fixed factor times the time constant. In FLL | |
664 | * mode the offset is used directly. In either mode, the maximum phase | |
665 | * adjustment for each second is clamped so as to spread the adjustment | |
666 | * over not more than the number of seconds between updates. | |
667 | */ | |
1da177e4 LT |
668 | ltemp = time_offset; |
669 | if (!(time_status & STA_FLL)) | |
1bb34a41 JS |
670 | ltemp = shift_right(ltemp, SHIFT_KG + time_constant); |
671 | ltemp = min(ltemp, (MAXPHASE / MINSEC) << SHIFT_UPDATE); | |
672 | ltemp = max(ltemp, -(MAXPHASE / MINSEC) << SHIFT_UPDATE); | |
1da177e4 LT |
673 | time_offset -= ltemp; |
674 | time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE); | |
1da177e4 | 675 | |
a5a0d52c AM |
676 | /* |
677 | * Compute the frequency estimate and additional phase adjustment due | |
678 | * to frequency error for the next second. When the PPS signal is | |
679 | * engaged, gnaw on the watchdog counter and update the frequency | |
680 | * computed by the pll and the PPS signal. | |
681 | */ | |
682 | pps_valid++; | |
683 | if (pps_valid == PPS_VALID) { /* PPS signal lost */ | |
684 | pps_jitter = MAXTIME; | |
685 | pps_stabil = MAXFREQ; | |
686 | time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER | | |
687 | STA_PPSWANDER | STA_PPSERROR); | |
688 | } | |
689 | ltemp = time_freq + pps_freq; | |
690 | time_adj += shift_right(ltemp,(SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE)); | |
1da177e4 LT |
691 | |
692 | #if HZ == 100 | |
a5a0d52c AM |
693 | /* |
694 | * Compensate for (HZ==100) != (1 << SHIFT_HZ). Add 25% and 3.125% to | |
695 | * get 128.125; => only 0.125% error (p. 14) | |
696 | */ | |
697 | time_adj += shift_right(time_adj, 2) + shift_right(time_adj, 5); | |
1da177e4 | 698 | #endif |
4b8f573b | 699 | #if HZ == 250 |
a5a0d52c AM |
700 | /* |
701 | * Compensate for (HZ==250) != (1 << SHIFT_HZ). Add 1.5625% and | |
702 | * 0.78125% to get 255.85938; => only 0.05% error (p. 14) | |
703 | */ | |
704 | time_adj += shift_right(time_adj, 6) + shift_right(time_adj, 7); | |
4b8f573b | 705 | #endif |
1da177e4 | 706 | #if HZ == 1000 |
a5a0d52c AM |
707 | /* |
708 | * Compensate for (HZ==1000) != (1 << SHIFT_HZ). Add 1.5625% and | |
709 | * 0.78125% to get 1023.4375; => only 0.05% error (p. 14) | |
710 | */ | |
711 | time_adj += shift_right(time_adj, 6) + shift_right(time_adj, 7); | |
1da177e4 LT |
712 | #endif |
713 | } | |
714 | ||
715 | /* in the NTP reference this is called "hardclock()" */ | |
716 | static void update_wall_time_one_tick(void) | |
717 | { | |
718 | long time_adjust_step, delta_nsec; | |
719 | ||
a5a0d52c AM |
720 | if ((time_adjust_step = time_adjust) != 0 ) { |
721 | /* | |
722 | * We are doing an adjtime thing. Prepare time_adjust_step to | |
723 | * be within bounds. Note that a positive time_adjust means we | |
724 | * want the clock to run faster. | |
725 | * | |
726 | * Limit the amount of the step to be in the range | |
727 | * -tickadj .. +tickadj | |
728 | */ | |
729 | time_adjust_step = min(time_adjust_step, (long)tickadj); | |
730 | time_adjust_step = max(time_adjust_step, (long)-tickadj); | |
731 | ||
732 | /* Reduce by this step the amount of time left */ | |
733 | time_adjust -= time_adjust_step; | |
1da177e4 LT |
734 | } |
735 | delta_nsec = tick_nsec + time_adjust_step * 1000; | |
736 | /* | |
737 | * Advance the phase, once it gets to one microsecond, then | |
738 | * advance the tick more. | |
739 | */ | |
740 | time_phase += time_adj; | |
1bb34a41 JS |
741 | if ((time_phase >= FINENSEC) || (time_phase <= -FINENSEC)) { |
742 | long ltemp = shift_right(time_phase, (SHIFT_SCALE - 10)); | |
1da177e4 LT |
743 | time_phase -= ltemp << (SHIFT_SCALE - 10); |
744 | delta_nsec += ltemp; | |
745 | } | |
746 | xtime.tv_nsec += delta_nsec; | |
747 | time_interpolator_update(delta_nsec); | |
748 | ||
749 | /* Changes by adjtime() do not take effect till next tick. */ | |
750 | if (time_next_adjust != 0) { | |
751 | time_adjust = time_next_adjust; | |
752 | time_next_adjust = 0; | |
753 | } | |
754 | } | |
755 | ||
756 | /* | |
757 | * Using a loop looks inefficient, but "ticks" is | |
758 | * usually just one (we shouldn't be losing ticks, | |
759 | * we're doing this this way mainly for interrupt | |
760 | * latency reasons, not because we think we'll | |
761 | * have lots of lost timer ticks | |
762 | */ | |
763 | static void update_wall_time(unsigned long ticks) | |
764 | { | |
765 | do { | |
766 | ticks--; | |
767 | update_wall_time_one_tick(); | |
768 | if (xtime.tv_nsec >= 1000000000) { | |
769 | xtime.tv_nsec -= 1000000000; | |
770 | xtime.tv_sec++; | |
771 | second_overflow(); | |
772 | } | |
773 | } while (ticks); | |
774 | } | |
775 | ||
776 | /* | |
777 | * Called from the timer interrupt handler to charge one tick to the current | |
778 | * process. user_tick is 1 if the tick is user time, 0 for system. | |
779 | */ | |
780 | void update_process_times(int user_tick) | |
781 | { | |
782 | struct task_struct *p = current; | |
783 | int cpu = smp_processor_id(); | |
784 | ||
785 | /* Note: this timer irq context must be accounted for as well. */ | |
786 | if (user_tick) | |
787 | account_user_time(p, jiffies_to_cputime(1)); | |
788 | else | |
789 | account_system_time(p, HARDIRQ_OFFSET, jiffies_to_cputime(1)); | |
790 | run_local_timers(); | |
791 | if (rcu_pending(cpu)) | |
792 | rcu_check_callbacks(cpu, user_tick); | |
793 | scheduler_tick(); | |
794 | run_posix_cpu_timers(p); | |
795 | } | |
796 | ||
797 | /* | |
798 | * Nr of active tasks - counted in fixed-point numbers | |
799 | */ | |
800 | static unsigned long count_active_tasks(void) | |
801 | { | |
802 | return (nr_running() + nr_uninterruptible()) * FIXED_1; | |
803 | } | |
804 | ||
805 | /* | |
806 | * Hmm.. Changed this, as the GNU make sources (load.c) seems to | |
807 | * imply that avenrun[] is the standard name for this kind of thing. | |
808 | * Nothing else seems to be standardized: the fractional size etc | |
809 | * all seem to differ on different machines. | |
810 | * | |
811 | * Requires xtime_lock to access. | |
812 | */ | |
813 | unsigned long avenrun[3]; | |
814 | ||
815 | EXPORT_SYMBOL(avenrun); | |
816 | ||
817 | /* | |
818 | * calc_load - given tick count, update the avenrun load estimates. | |
819 | * This is called while holding a write_lock on xtime_lock. | |
820 | */ | |
821 | static inline void calc_load(unsigned long ticks) | |
822 | { | |
823 | unsigned long active_tasks; /* fixed-point */ | |
824 | static int count = LOAD_FREQ; | |
825 | ||
826 | count -= ticks; | |
827 | if (count < 0) { | |
828 | count += LOAD_FREQ; | |
829 | active_tasks = count_active_tasks(); | |
830 | CALC_LOAD(avenrun[0], EXP_1, active_tasks); | |
831 | CALC_LOAD(avenrun[1], EXP_5, active_tasks); | |
832 | CALC_LOAD(avenrun[2], EXP_15, active_tasks); | |
833 | } | |
834 | } | |
835 | ||
836 | /* jiffies at the most recent update of wall time */ | |
837 | unsigned long wall_jiffies = INITIAL_JIFFIES; | |
838 | ||
839 | /* | |
840 | * This read-write spinlock protects us from races in SMP while | |
841 | * playing with xtime and avenrun. | |
842 | */ | |
843 | #ifndef ARCH_HAVE_XTIME_LOCK | |
844 | seqlock_t xtime_lock __cacheline_aligned_in_smp = SEQLOCK_UNLOCKED; | |
845 | ||
846 | EXPORT_SYMBOL(xtime_lock); | |
847 | #endif | |
848 | ||
849 | /* | |
850 | * This function runs timers and the timer-tq in bottom half context. | |
851 | */ | |
852 | static void run_timer_softirq(struct softirq_action *h) | |
853 | { | |
854 | tvec_base_t *base = &__get_cpu_var(tvec_bases); | |
855 | ||
856 | if (time_after_eq(jiffies, base->timer_jiffies)) | |
857 | __run_timers(base); | |
858 | } | |
859 | ||
860 | /* | |
861 | * Called by the local, per-CPU timer interrupt on SMP. | |
862 | */ | |
863 | void run_local_timers(void) | |
864 | { | |
865 | raise_softirq(TIMER_SOFTIRQ); | |
866 | } | |
867 | ||
868 | /* | |
869 | * Called by the timer interrupt. xtime_lock must already be taken | |
870 | * by the timer IRQ! | |
871 | */ | |
872 | static inline void update_times(void) | |
873 | { | |
874 | unsigned long ticks; | |
875 | ||
876 | ticks = jiffies - wall_jiffies; | |
877 | if (ticks) { | |
878 | wall_jiffies += ticks; | |
879 | update_wall_time(ticks); | |
880 | } | |
881 | calc_load(ticks); | |
882 | } | |
883 | ||
884 | /* | |
885 | * The 64-bit jiffies value is not atomic - you MUST NOT read it | |
886 | * without sampling the sequence number in xtime_lock. | |
887 | * jiffies is defined in the linker script... | |
888 | */ | |
889 | ||
890 | void do_timer(struct pt_regs *regs) | |
891 | { | |
892 | jiffies_64++; | |
893 | update_times(); | |
8446f1d3 | 894 | softlockup_tick(regs); |
1da177e4 LT |
895 | } |
896 | ||
897 | #ifdef __ARCH_WANT_SYS_ALARM | |
898 | ||
899 | /* | |
900 | * For backwards compatibility? This can be done in libc so Alpha | |
901 | * and all newer ports shouldn't need it. | |
902 | */ | |
903 | asmlinkage unsigned long sys_alarm(unsigned int seconds) | |
904 | { | |
905 | struct itimerval it_new, it_old; | |
906 | unsigned int oldalarm; | |
907 | ||
908 | it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0; | |
909 | it_new.it_value.tv_sec = seconds; | |
910 | it_new.it_value.tv_usec = 0; | |
911 | do_setitimer(ITIMER_REAL, &it_new, &it_old); | |
912 | oldalarm = it_old.it_value.tv_sec; | |
913 | /* ehhh.. We can't return 0 if we have an alarm pending.. */ | |
914 | /* And we'd better return too much than too little anyway */ | |
915 | if ((!oldalarm && it_old.it_value.tv_usec) || it_old.it_value.tv_usec >= 500000) | |
916 | oldalarm++; | |
917 | return oldalarm; | |
918 | } | |
919 | ||
920 | #endif | |
921 | ||
922 | #ifndef __alpha__ | |
923 | ||
924 | /* | |
925 | * The Alpha uses getxpid, getxuid, and getxgid instead. Maybe this | |
926 | * should be moved into arch/i386 instead? | |
927 | */ | |
928 | ||
929 | /** | |
930 | * sys_getpid - return the thread group id of the current process | |
931 | * | |
932 | * Note, despite the name, this returns the tgid not the pid. The tgid and | |
933 | * the pid are identical unless CLONE_THREAD was specified on clone() in | |
934 | * which case the tgid is the same in all threads of the same group. | |
935 | * | |
936 | * This is SMP safe as current->tgid does not change. | |
937 | */ | |
938 | asmlinkage long sys_getpid(void) | |
939 | { | |
940 | return current->tgid; | |
941 | } | |
942 | ||
943 | /* | |
944 | * Accessing ->group_leader->real_parent is not SMP-safe, it could | |
945 | * change from under us. However, rather than getting any lock | |
946 | * we can use an optimistic algorithm: get the parent | |
947 | * pid, and go back and check that the parent is still | |
948 | * the same. If it has changed (which is extremely unlikely | |
949 | * indeed), we just try again.. | |
950 | * | |
951 | * NOTE! This depends on the fact that even if we _do_ | |
952 | * get an old value of "parent", we can happily dereference | |
953 | * the pointer (it was and remains a dereferencable kernel pointer | |
954 | * no matter what): we just can't necessarily trust the result | |
955 | * until we know that the parent pointer is valid. | |
956 | * | |
957 | * NOTE2: ->group_leader never changes from under us. | |
958 | */ | |
959 | asmlinkage long sys_getppid(void) | |
960 | { | |
961 | int pid; | |
962 | struct task_struct *me = current; | |
963 | struct task_struct *parent; | |
964 | ||
965 | parent = me->group_leader->real_parent; | |
966 | for (;;) { | |
967 | pid = parent->tgid; | |
4c5640cb | 968 | #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT) |
1da177e4 LT |
969 | { |
970 | struct task_struct *old = parent; | |
971 | ||
972 | /* | |
973 | * Make sure we read the pid before re-reading the | |
974 | * parent pointer: | |
975 | */ | |
d59dd462 | 976 | smp_rmb(); |
1da177e4 LT |
977 | parent = me->group_leader->real_parent; |
978 | if (old != parent) | |
979 | continue; | |
980 | } | |
981 | #endif | |
982 | break; | |
983 | } | |
984 | return pid; | |
985 | } | |
986 | ||
987 | asmlinkage long sys_getuid(void) | |
988 | { | |
989 | /* Only we change this so SMP safe */ | |
990 | return current->uid; | |
991 | } | |
992 | ||
993 | asmlinkage long sys_geteuid(void) | |
994 | { | |
995 | /* Only we change this so SMP safe */ | |
996 | return current->euid; | |
997 | } | |
998 | ||
999 | asmlinkage long sys_getgid(void) | |
1000 | { | |
1001 | /* Only we change this so SMP safe */ | |
1002 | return current->gid; | |
1003 | } | |
1004 | ||
1005 | asmlinkage long sys_getegid(void) | |
1006 | { | |
1007 | /* Only we change this so SMP safe */ | |
1008 | return current->egid; | |
1009 | } | |
1010 | ||
1011 | #endif | |
1012 | ||
1013 | static void process_timeout(unsigned long __data) | |
1014 | { | |
1015 | wake_up_process((task_t *)__data); | |
1016 | } | |
1017 | ||
1018 | /** | |
1019 | * schedule_timeout - sleep until timeout | |
1020 | * @timeout: timeout value in jiffies | |
1021 | * | |
1022 | * Make the current task sleep until @timeout jiffies have | |
1023 | * elapsed. The routine will return immediately unless | |
1024 | * the current task state has been set (see set_current_state()). | |
1025 | * | |
1026 | * You can set the task state as follows - | |
1027 | * | |
1028 | * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to | |
1029 | * pass before the routine returns. The routine will return 0 | |
1030 | * | |
1031 | * %TASK_INTERRUPTIBLE - the routine may return early if a signal is | |
1032 | * delivered to the current task. In this case the remaining time | |
1033 | * in jiffies will be returned, or 0 if the timer expired in time | |
1034 | * | |
1035 | * The current task state is guaranteed to be TASK_RUNNING when this | |
1036 | * routine returns. | |
1037 | * | |
1038 | * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule | |
1039 | * the CPU away without a bound on the timeout. In this case the return | |
1040 | * value will be %MAX_SCHEDULE_TIMEOUT. | |
1041 | * | |
1042 | * In all cases the return value is guaranteed to be non-negative. | |
1043 | */ | |
1044 | fastcall signed long __sched schedule_timeout(signed long timeout) | |
1045 | { | |
1046 | struct timer_list timer; | |
1047 | unsigned long expire; | |
1048 | ||
1049 | switch (timeout) | |
1050 | { | |
1051 | case MAX_SCHEDULE_TIMEOUT: | |
1052 | /* | |
1053 | * These two special cases are useful to be comfortable | |
1054 | * in the caller. Nothing more. We could take | |
1055 | * MAX_SCHEDULE_TIMEOUT from one of the negative value | |
1056 | * but I' d like to return a valid offset (>=0) to allow | |
1057 | * the caller to do everything it want with the retval. | |
1058 | */ | |
1059 | schedule(); | |
1060 | goto out; | |
1061 | default: | |
1062 | /* | |
1063 | * Another bit of PARANOID. Note that the retval will be | |
1064 | * 0 since no piece of kernel is supposed to do a check | |
1065 | * for a negative retval of schedule_timeout() (since it | |
1066 | * should never happens anyway). You just have the printk() | |
1067 | * that will tell you if something is gone wrong and where. | |
1068 | */ | |
1069 | if (timeout < 0) | |
1070 | { | |
1071 | printk(KERN_ERR "schedule_timeout: wrong timeout " | |
a5a0d52c AM |
1072 | "value %lx from %p\n", timeout, |
1073 | __builtin_return_address(0)); | |
1da177e4 LT |
1074 | current->state = TASK_RUNNING; |
1075 | goto out; | |
1076 | } | |
1077 | } | |
1078 | ||
1079 | expire = timeout + jiffies; | |
1080 | ||
a8db2db1 ON |
1081 | setup_timer(&timer, process_timeout, (unsigned long)current); |
1082 | __mod_timer(&timer, expire); | |
1da177e4 LT |
1083 | schedule(); |
1084 | del_singleshot_timer_sync(&timer); | |
1085 | ||
1086 | timeout = expire - jiffies; | |
1087 | ||
1088 | out: | |
1089 | return timeout < 0 ? 0 : timeout; | |
1090 | } | |
1da177e4 LT |
1091 | EXPORT_SYMBOL(schedule_timeout); |
1092 | ||
8a1c1757 AM |
1093 | /* |
1094 | * We can use __set_current_state() here because schedule_timeout() calls | |
1095 | * schedule() unconditionally. | |
1096 | */ | |
64ed93a2 NA |
1097 | signed long __sched schedule_timeout_interruptible(signed long timeout) |
1098 | { | |
a5a0d52c AM |
1099 | __set_current_state(TASK_INTERRUPTIBLE); |
1100 | return schedule_timeout(timeout); | |
64ed93a2 NA |
1101 | } |
1102 | EXPORT_SYMBOL(schedule_timeout_interruptible); | |
1103 | ||
1104 | signed long __sched schedule_timeout_uninterruptible(signed long timeout) | |
1105 | { | |
a5a0d52c AM |
1106 | __set_current_state(TASK_UNINTERRUPTIBLE); |
1107 | return schedule_timeout(timeout); | |
64ed93a2 NA |
1108 | } |
1109 | EXPORT_SYMBOL(schedule_timeout_uninterruptible); | |
1110 | ||
1da177e4 LT |
1111 | /* Thread ID - the internal kernel "pid" */ |
1112 | asmlinkage long sys_gettid(void) | |
1113 | { | |
1114 | return current->pid; | |
1115 | } | |
1116 | ||
1117 | static long __sched nanosleep_restart(struct restart_block *restart) | |
1118 | { | |
1119 | unsigned long expire = restart->arg0, now = jiffies; | |
1120 | struct timespec __user *rmtp = (struct timespec __user *) restart->arg1; | |
1121 | long ret; | |
1122 | ||
1123 | /* Did it expire while we handled signals? */ | |
1124 | if (!time_after(expire, now)) | |
1125 | return 0; | |
1126 | ||
75bcc8c5 | 1127 | expire = schedule_timeout_interruptible(expire - now); |
1da177e4 LT |
1128 | |
1129 | ret = 0; | |
1130 | if (expire) { | |
1131 | struct timespec t; | |
1132 | jiffies_to_timespec(expire, &t); | |
1133 | ||
1134 | ret = -ERESTART_RESTARTBLOCK; | |
1135 | if (rmtp && copy_to_user(rmtp, &t, sizeof(t))) | |
1136 | ret = -EFAULT; | |
1137 | /* The 'restart' block is already filled in */ | |
1138 | } | |
1139 | return ret; | |
1140 | } | |
1141 | ||
1142 | asmlinkage long sys_nanosleep(struct timespec __user *rqtp, struct timespec __user *rmtp) | |
1143 | { | |
1144 | struct timespec t; | |
1145 | unsigned long expire; | |
1146 | long ret; | |
1147 | ||
1148 | if (copy_from_user(&t, rqtp, sizeof(t))) | |
1149 | return -EFAULT; | |
1150 | ||
1151 | if ((t.tv_nsec >= 1000000000L) || (t.tv_nsec < 0) || (t.tv_sec < 0)) | |
1152 | return -EINVAL; | |
1153 | ||
1154 | expire = timespec_to_jiffies(&t) + (t.tv_sec || t.tv_nsec); | |
75bcc8c5 | 1155 | expire = schedule_timeout_interruptible(expire); |
1da177e4 LT |
1156 | |
1157 | ret = 0; | |
1158 | if (expire) { | |
1159 | struct restart_block *restart; | |
1160 | jiffies_to_timespec(expire, &t); | |
1161 | if (rmtp && copy_to_user(rmtp, &t, sizeof(t))) | |
1162 | return -EFAULT; | |
1163 | ||
1164 | restart = ¤t_thread_info()->restart_block; | |
1165 | restart->fn = nanosleep_restart; | |
1166 | restart->arg0 = jiffies + expire; | |
1167 | restart->arg1 = (unsigned long) rmtp; | |
1168 | ret = -ERESTART_RESTARTBLOCK; | |
1169 | } | |
1170 | return ret; | |
1171 | } | |
1172 | ||
1173 | /* | |
1174 | * sys_sysinfo - fill in sysinfo struct | |
1175 | */ | |
1176 | asmlinkage long sys_sysinfo(struct sysinfo __user *info) | |
1177 | { | |
1178 | struct sysinfo val; | |
1179 | unsigned long mem_total, sav_total; | |
1180 | unsigned int mem_unit, bitcount; | |
1181 | unsigned long seq; | |
1182 | ||
1183 | memset((char *)&val, 0, sizeof(struct sysinfo)); | |
1184 | ||
1185 | do { | |
1186 | struct timespec tp; | |
1187 | seq = read_seqbegin(&xtime_lock); | |
1188 | ||
1189 | /* | |
1190 | * This is annoying. The below is the same thing | |
1191 | * posix_get_clock_monotonic() does, but it wants to | |
1192 | * take the lock which we want to cover the loads stuff | |
1193 | * too. | |
1194 | */ | |
1195 | ||
1196 | getnstimeofday(&tp); | |
1197 | tp.tv_sec += wall_to_monotonic.tv_sec; | |
1198 | tp.tv_nsec += wall_to_monotonic.tv_nsec; | |
1199 | if (tp.tv_nsec - NSEC_PER_SEC >= 0) { | |
1200 | tp.tv_nsec = tp.tv_nsec - NSEC_PER_SEC; | |
1201 | tp.tv_sec++; | |
1202 | } | |
1203 | val.uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0); | |
1204 | ||
1205 | val.loads[0] = avenrun[0] << (SI_LOAD_SHIFT - FSHIFT); | |
1206 | val.loads[1] = avenrun[1] << (SI_LOAD_SHIFT - FSHIFT); | |
1207 | val.loads[2] = avenrun[2] << (SI_LOAD_SHIFT - FSHIFT); | |
1208 | ||
1209 | val.procs = nr_threads; | |
1210 | } while (read_seqretry(&xtime_lock, seq)); | |
1211 | ||
1212 | si_meminfo(&val); | |
1213 | si_swapinfo(&val); | |
1214 | ||
1215 | /* | |
1216 | * If the sum of all the available memory (i.e. ram + swap) | |
1217 | * is less than can be stored in a 32 bit unsigned long then | |
1218 | * we can be binary compatible with 2.2.x kernels. If not, | |
1219 | * well, in that case 2.2.x was broken anyways... | |
1220 | * | |
1221 | * -Erik Andersen <andersee@debian.org> | |
1222 | */ | |
1223 | ||
1224 | mem_total = val.totalram + val.totalswap; | |
1225 | if (mem_total < val.totalram || mem_total < val.totalswap) | |
1226 | goto out; | |
1227 | bitcount = 0; | |
1228 | mem_unit = val.mem_unit; | |
1229 | while (mem_unit > 1) { | |
1230 | bitcount++; | |
1231 | mem_unit >>= 1; | |
1232 | sav_total = mem_total; | |
1233 | mem_total <<= 1; | |
1234 | if (mem_total < sav_total) | |
1235 | goto out; | |
1236 | } | |
1237 | ||
1238 | /* | |
1239 | * If mem_total did not overflow, multiply all memory values by | |
1240 | * val.mem_unit and set it to 1. This leaves things compatible | |
1241 | * with 2.2.x, and also retains compatibility with earlier 2.4.x | |
1242 | * kernels... | |
1243 | */ | |
1244 | ||
1245 | val.mem_unit = 1; | |
1246 | val.totalram <<= bitcount; | |
1247 | val.freeram <<= bitcount; | |
1248 | val.sharedram <<= bitcount; | |
1249 | val.bufferram <<= bitcount; | |
1250 | val.totalswap <<= bitcount; | |
1251 | val.freeswap <<= bitcount; | |
1252 | val.totalhigh <<= bitcount; | |
1253 | val.freehigh <<= bitcount; | |
1254 | ||
1255 | out: | |
1256 | if (copy_to_user(info, &val, sizeof(struct sysinfo))) | |
1257 | return -EFAULT; | |
1258 | ||
1259 | return 0; | |
1260 | } | |
1261 | ||
1262 | static void __devinit init_timers_cpu(int cpu) | |
1263 | { | |
1264 | int j; | |
1265 | tvec_base_t *base; | |
55c888d6 | 1266 | |
1da177e4 | 1267 | base = &per_cpu(tvec_bases, cpu); |
55c888d6 | 1268 | spin_lock_init(&base->t_base.lock); |
1da177e4 LT |
1269 | for (j = 0; j < TVN_SIZE; j++) { |
1270 | INIT_LIST_HEAD(base->tv5.vec + j); | |
1271 | INIT_LIST_HEAD(base->tv4.vec + j); | |
1272 | INIT_LIST_HEAD(base->tv3.vec + j); | |
1273 | INIT_LIST_HEAD(base->tv2.vec + j); | |
1274 | } | |
1275 | for (j = 0; j < TVR_SIZE; j++) | |
1276 | INIT_LIST_HEAD(base->tv1.vec + j); | |
1277 | ||
1278 | base->timer_jiffies = jiffies; | |
1279 | } | |
1280 | ||
1281 | #ifdef CONFIG_HOTPLUG_CPU | |
55c888d6 | 1282 | static void migrate_timer_list(tvec_base_t *new_base, struct list_head *head) |
1da177e4 LT |
1283 | { |
1284 | struct timer_list *timer; | |
1285 | ||
1286 | while (!list_empty(head)) { | |
1287 | timer = list_entry(head->next, struct timer_list, entry); | |
55c888d6 ON |
1288 | detach_timer(timer, 0); |
1289 | timer->base = &new_base->t_base; | |
1da177e4 | 1290 | internal_add_timer(new_base, timer); |
1da177e4 | 1291 | } |
1da177e4 LT |
1292 | } |
1293 | ||
1294 | static void __devinit migrate_timers(int cpu) | |
1295 | { | |
1296 | tvec_base_t *old_base; | |
1297 | tvec_base_t *new_base; | |
1298 | int i; | |
1299 | ||
1300 | BUG_ON(cpu_online(cpu)); | |
1301 | old_base = &per_cpu(tvec_bases, cpu); | |
1302 | new_base = &get_cpu_var(tvec_bases); | |
1303 | ||
1304 | local_irq_disable(); | |
55c888d6 ON |
1305 | spin_lock(&new_base->t_base.lock); |
1306 | spin_lock(&old_base->t_base.lock); | |
1da177e4 | 1307 | |
55c888d6 | 1308 | if (old_base->t_base.running_timer) |
1da177e4 LT |
1309 | BUG(); |
1310 | for (i = 0; i < TVR_SIZE; i++) | |
55c888d6 ON |
1311 | migrate_timer_list(new_base, old_base->tv1.vec + i); |
1312 | for (i = 0; i < TVN_SIZE; i++) { | |
1313 | migrate_timer_list(new_base, old_base->tv2.vec + i); | |
1314 | migrate_timer_list(new_base, old_base->tv3.vec + i); | |
1315 | migrate_timer_list(new_base, old_base->tv4.vec + i); | |
1316 | migrate_timer_list(new_base, old_base->tv5.vec + i); | |
1317 | } | |
1318 | ||
1319 | spin_unlock(&old_base->t_base.lock); | |
1320 | spin_unlock(&new_base->t_base.lock); | |
1da177e4 LT |
1321 | local_irq_enable(); |
1322 | put_cpu_var(tvec_bases); | |
1da177e4 LT |
1323 | } |
1324 | #endif /* CONFIG_HOTPLUG_CPU */ | |
1325 | ||
1326 | static int __devinit timer_cpu_notify(struct notifier_block *self, | |
1327 | unsigned long action, void *hcpu) | |
1328 | { | |
1329 | long cpu = (long)hcpu; | |
1330 | switch(action) { | |
1331 | case CPU_UP_PREPARE: | |
1332 | init_timers_cpu(cpu); | |
1333 | break; | |
1334 | #ifdef CONFIG_HOTPLUG_CPU | |
1335 | case CPU_DEAD: | |
1336 | migrate_timers(cpu); | |
1337 | break; | |
1338 | #endif | |
1339 | default: | |
1340 | break; | |
1341 | } | |
1342 | return NOTIFY_OK; | |
1343 | } | |
1344 | ||
1345 | static struct notifier_block __devinitdata timers_nb = { | |
1346 | .notifier_call = timer_cpu_notify, | |
1347 | }; | |
1348 | ||
1349 | ||
1350 | void __init init_timers(void) | |
1351 | { | |
1352 | timer_cpu_notify(&timers_nb, (unsigned long)CPU_UP_PREPARE, | |
1353 | (void *)(long)smp_processor_id()); | |
1354 | register_cpu_notifier(&timers_nb); | |
1355 | open_softirq(TIMER_SOFTIRQ, run_timer_softirq, NULL); | |
1356 | } | |
1357 | ||
1358 | #ifdef CONFIG_TIME_INTERPOLATION | |
1359 | ||
1360 | struct time_interpolator *time_interpolator; | |
1361 | static struct time_interpolator *time_interpolator_list; | |
1362 | static DEFINE_SPINLOCK(time_interpolator_lock); | |
1363 | ||
1364 | static inline u64 time_interpolator_get_cycles(unsigned int src) | |
1365 | { | |
1366 | unsigned long (*x)(void); | |
1367 | ||
1368 | switch (src) | |
1369 | { | |
1370 | case TIME_SOURCE_FUNCTION: | |
1371 | x = time_interpolator->addr; | |
1372 | return x(); | |
1373 | ||
1374 | case TIME_SOURCE_MMIO64 : | |
1375 | return readq((void __iomem *) time_interpolator->addr); | |
1376 | ||
1377 | case TIME_SOURCE_MMIO32 : | |
1378 | return readl((void __iomem *) time_interpolator->addr); | |
1379 | ||
1380 | default: return get_cycles(); | |
1381 | } | |
1382 | } | |
1383 | ||
486d46ae | 1384 | static inline u64 time_interpolator_get_counter(int writelock) |
1da177e4 LT |
1385 | { |
1386 | unsigned int src = time_interpolator->source; | |
1387 | ||
1388 | if (time_interpolator->jitter) | |
1389 | { | |
1390 | u64 lcycle; | |
1391 | u64 now; | |
1392 | ||
1393 | do { | |
1394 | lcycle = time_interpolator->last_cycle; | |
1395 | now = time_interpolator_get_cycles(src); | |
1396 | if (lcycle && time_after(lcycle, now)) | |
1397 | return lcycle; | |
486d46ae AW |
1398 | |
1399 | /* When holding the xtime write lock, there's no need | |
1400 | * to add the overhead of the cmpxchg. Readers are | |
1401 | * force to retry until the write lock is released. | |
1402 | */ | |
1403 | if (writelock) { | |
1404 | time_interpolator->last_cycle = now; | |
1405 | return now; | |
1406 | } | |
1da177e4 LT |
1407 | /* Keep track of the last timer value returned. The use of cmpxchg here |
1408 | * will cause contention in an SMP environment. | |
1409 | */ | |
1410 | } while (unlikely(cmpxchg(&time_interpolator->last_cycle, lcycle, now) != lcycle)); | |
1411 | return now; | |
1412 | } | |
1413 | else | |
1414 | return time_interpolator_get_cycles(src); | |
1415 | } | |
1416 | ||
1417 | void time_interpolator_reset(void) | |
1418 | { | |
1419 | time_interpolator->offset = 0; | |
486d46ae | 1420 | time_interpolator->last_counter = time_interpolator_get_counter(1); |
1da177e4 LT |
1421 | } |
1422 | ||
1423 | #define GET_TI_NSECS(count,i) (((((count) - i->last_counter) & (i)->mask) * (i)->nsec_per_cyc) >> (i)->shift) | |
1424 | ||
1425 | unsigned long time_interpolator_get_offset(void) | |
1426 | { | |
1427 | /* If we do not have a time interpolator set up then just return zero */ | |
1428 | if (!time_interpolator) | |
1429 | return 0; | |
1430 | ||
1431 | return time_interpolator->offset + | |
486d46ae | 1432 | GET_TI_NSECS(time_interpolator_get_counter(0), time_interpolator); |
1da177e4 LT |
1433 | } |
1434 | ||
1435 | #define INTERPOLATOR_ADJUST 65536 | |
1436 | #define INTERPOLATOR_MAX_SKIP 10*INTERPOLATOR_ADJUST | |
1437 | ||
1438 | static void time_interpolator_update(long delta_nsec) | |
1439 | { | |
1440 | u64 counter; | |
1441 | unsigned long offset; | |
1442 | ||
1443 | /* If there is no time interpolator set up then do nothing */ | |
1444 | if (!time_interpolator) | |
1445 | return; | |
1446 | ||
a5a0d52c AM |
1447 | /* |
1448 | * The interpolator compensates for late ticks by accumulating the late | |
1449 | * time in time_interpolator->offset. A tick earlier than expected will | |
1450 | * lead to a reset of the offset and a corresponding jump of the clock | |
1451 | * forward. Again this only works if the interpolator clock is running | |
1452 | * slightly slower than the regular clock and the tuning logic insures | |
1453 | * that. | |
1454 | */ | |
1da177e4 | 1455 | |
486d46ae | 1456 | counter = time_interpolator_get_counter(1); |
a5a0d52c AM |
1457 | offset = time_interpolator->offset + |
1458 | GET_TI_NSECS(counter, time_interpolator); | |
1da177e4 LT |
1459 | |
1460 | if (delta_nsec < 0 || (unsigned long) delta_nsec < offset) | |
1461 | time_interpolator->offset = offset - delta_nsec; | |
1462 | else { | |
1463 | time_interpolator->skips++; | |
1464 | time_interpolator->ns_skipped += delta_nsec - offset; | |
1465 | time_interpolator->offset = 0; | |
1466 | } | |
1467 | time_interpolator->last_counter = counter; | |
1468 | ||
1469 | /* Tuning logic for time interpolator invoked every minute or so. | |
1470 | * Decrease interpolator clock speed if no skips occurred and an offset is carried. | |
1471 | * Increase interpolator clock speed if we skip too much time. | |
1472 | */ | |
1473 | if (jiffies % INTERPOLATOR_ADJUST == 0) | |
1474 | { | |
1475 | if (time_interpolator->skips == 0 && time_interpolator->offset > TICK_NSEC) | |
1476 | time_interpolator->nsec_per_cyc--; | |
1477 | if (time_interpolator->ns_skipped > INTERPOLATOR_MAX_SKIP && time_interpolator->offset == 0) | |
1478 | time_interpolator->nsec_per_cyc++; | |
1479 | time_interpolator->skips = 0; | |
1480 | time_interpolator->ns_skipped = 0; | |
1481 | } | |
1482 | } | |
1483 | ||
1484 | static inline int | |
1485 | is_better_time_interpolator(struct time_interpolator *new) | |
1486 | { | |
1487 | if (!time_interpolator) | |
1488 | return 1; | |
1489 | return new->frequency > 2*time_interpolator->frequency || | |
1490 | (unsigned long)new->drift < (unsigned long)time_interpolator->drift; | |
1491 | } | |
1492 | ||
1493 | void | |
1494 | register_time_interpolator(struct time_interpolator *ti) | |
1495 | { | |
1496 | unsigned long flags; | |
1497 | ||
1498 | /* Sanity check */ | |
1499 | if (ti->frequency == 0 || ti->mask == 0) | |
1500 | BUG(); | |
1501 | ||
1502 | ti->nsec_per_cyc = ((u64)NSEC_PER_SEC << ti->shift) / ti->frequency; | |
1503 | spin_lock(&time_interpolator_lock); | |
1504 | write_seqlock_irqsave(&xtime_lock, flags); | |
1505 | if (is_better_time_interpolator(ti)) { | |
1506 | time_interpolator = ti; | |
1507 | time_interpolator_reset(); | |
1508 | } | |
1509 | write_sequnlock_irqrestore(&xtime_lock, flags); | |
1510 | ||
1511 | ti->next = time_interpolator_list; | |
1512 | time_interpolator_list = ti; | |
1513 | spin_unlock(&time_interpolator_lock); | |
1514 | } | |
1515 | ||
1516 | void | |
1517 | unregister_time_interpolator(struct time_interpolator *ti) | |
1518 | { | |
1519 | struct time_interpolator *curr, **prev; | |
1520 | unsigned long flags; | |
1521 | ||
1522 | spin_lock(&time_interpolator_lock); | |
1523 | prev = &time_interpolator_list; | |
1524 | for (curr = *prev; curr; curr = curr->next) { | |
1525 | if (curr == ti) { | |
1526 | *prev = curr->next; | |
1527 | break; | |
1528 | } | |
1529 | prev = &curr->next; | |
1530 | } | |
1531 | ||
1532 | write_seqlock_irqsave(&xtime_lock, flags); | |
1533 | if (ti == time_interpolator) { | |
1534 | /* we lost the best time-interpolator: */ | |
1535 | time_interpolator = NULL; | |
1536 | /* find the next-best interpolator */ | |
1537 | for (curr = time_interpolator_list; curr; curr = curr->next) | |
1538 | if (is_better_time_interpolator(curr)) | |
1539 | time_interpolator = curr; | |
1540 | time_interpolator_reset(); | |
1541 | } | |
1542 | write_sequnlock_irqrestore(&xtime_lock, flags); | |
1543 | spin_unlock(&time_interpolator_lock); | |
1544 | } | |
1545 | #endif /* CONFIG_TIME_INTERPOLATION */ | |
1546 | ||
1547 | /** | |
1548 | * msleep - sleep safely even with waitqueue interruptions | |
1549 | * @msecs: Time in milliseconds to sleep for | |
1550 | */ | |
1551 | void msleep(unsigned int msecs) | |
1552 | { | |
1553 | unsigned long timeout = msecs_to_jiffies(msecs) + 1; | |
1554 | ||
75bcc8c5 NA |
1555 | while (timeout) |
1556 | timeout = schedule_timeout_uninterruptible(timeout); | |
1da177e4 LT |
1557 | } |
1558 | ||
1559 | EXPORT_SYMBOL(msleep); | |
1560 | ||
1561 | /** | |
96ec3efd | 1562 | * msleep_interruptible - sleep waiting for signals |
1da177e4 LT |
1563 | * @msecs: Time in milliseconds to sleep for |
1564 | */ | |
1565 | unsigned long msleep_interruptible(unsigned int msecs) | |
1566 | { | |
1567 | unsigned long timeout = msecs_to_jiffies(msecs) + 1; | |
1568 | ||
75bcc8c5 NA |
1569 | while (timeout && !signal_pending(current)) |
1570 | timeout = schedule_timeout_interruptible(timeout); | |
1da177e4 LT |
1571 | return jiffies_to_msecs(timeout); |
1572 | } | |
1573 | ||
1574 | EXPORT_SYMBOL(msleep_interruptible); |