sched: Fold updating of the last_update_time_info into update_ts_time_stats()
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / time / tick-sched.c
CommitLineData
79bf2bb3
TG
1/*
2 * linux/kernel/time/tick-sched.c
3 *
4 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
5 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
6 * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner
7 *
8 * No idle tick implementation for low and high resolution timers
9 *
10 * Started by: Thomas Gleixner and Ingo Molnar
11 *
b10db7f0 12 * Distribute under GPLv2.
79bf2bb3
TG
13 */
14#include <linux/cpu.h>
15#include <linux/err.h>
16#include <linux/hrtimer.h>
17#include <linux/interrupt.h>
18#include <linux/kernel_stat.h>
19#include <linux/percpu.h>
20#include <linux/profile.h>
21#include <linux/sched.h>
22#include <linux/tick.h>
8083e4ad 23#include <linux/module.h>
79bf2bb3 24
9e203bcc
DM
25#include <asm/irq_regs.h>
26
79bf2bb3
TG
27#include "tick-internal.h"
28
29/*
30 * Per cpu nohz control structure
31 */
32static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched);
33
34/*
35 * The time, when the last jiffy update happened. Protected by xtime_lock.
36 */
37static ktime_t last_jiffies_update;
38
289f480a
IM
39struct tick_sched *tick_get_tick_sched(int cpu)
40{
41 return &per_cpu(tick_cpu_sched, cpu);
42}
43
79bf2bb3
TG
44/*
45 * Must be called with interrupts disabled !
46 */
47static void tick_do_update_jiffies64(ktime_t now)
48{
49 unsigned long ticks = 0;
50 ktime_t delta;
51
7a14ce1d
IM
52 /*
53 * Do a quick check without holding xtime_lock:
54 */
55 delta = ktime_sub(now, last_jiffies_update);
56 if (delta.tv64 < tick_period.tv64)
57 return;
58
79bf2bb3
TG
59 /* Reevalute with xtime_lock held */
60 write_seqlock(&xtime_lock);
61
62 delta = ktime_sub(now, last_jiffies_update);
63 if (delta.tv64 >= tick_period.tv64) {
64
65 delta = ktime_sub(delta, tick_period);
66 last_jiffies_update = ktime_add(last_jiffies_update,
67 tick_period);
68
69 /* Slow path for long timeouts */
70 if (unlikely(delta.tv64 >= tick_period.tv64)) {
71 s64 incr = ktime_to_ns(tick_period);
72
73 ticks = ktime_divns(delta, incr);
74
75 last_jiffies_update = ktime_add_ns(last_jiffies_update,
76 incr * ticks);
77 }
78 do_timer(++ticks);
49d670fb
TG
79
80 /* Keep the tick_next_period variable up to date */
81 tick_next_period = ktime_add(last_jiffies_update, tick_period);
79bf2bb3
TG
82 }
83 write_sequnlock(&xtime_lock);
84}
85
86/*
87 * Initialize and return retrieve the jiffies update.
88 */
89static ktime_t tick_init_jiffy_update(void)
90{
91 ktime_t period;
92
93 write_seqlock(&xtime_lock);
94 /* Did we start the jiffies update yet ? */
95 if (last_jiffies_update.tv64 == 0)
96 last_jiffies_update = tick_next_period;
97 period = last_jiffies_update;
98 write_sequnlock(&xtime_lock);
99 return period;
100}
101
102/*
103 * NOHZ - aka dynamic tick functionality
104 */
105#ifdef CONFIG_NO_HZ
106/*
107 * NO HZ enabled ?
108 */
109static int tick_nohz_enabled __read_mostly = 1;
110
111/*
112 * Enable / Disable tickless mode
113 */
114static int __init setup_tick_nohz(char *str)
115{
116 if (!strcmp(str, "off"))
117 tick_nohz_enabled = 0;
118 else if (!strcmp(str, "on"))
119 tick_nohz_enabled = 1;
120 else
121 return 0;
122 return 1;
123}
124
125__setup("nohz=", setup_tick_nohz);
126
127/**
128 * tick_nohz_update_jiffies - update jiffies when idle was interrupted
129 *
130 * Called from interrupt entry when the CPU was idle
131 *
132 * In case the sched_tick was stopped on this CPU, we have to check if jiffies
133 * must be updated. Otherwise an interrupt handler could use a stale jiffy
134 * value. We do this unconditionally on any cpu, as we don't know whether the
135 * cpu, which has the update task assigned is in a long sleep.
136 */
eed3b9cf 137static void tick_nohz_update_jiffies(ktime_t now)
79bf2bb3
TG
138{
139 int cpu = smp_processor_id();
140 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
141 unsigned long flags;
79bf2bb3 142
6a7b3dc3 143 cpumask_clear_cpu(cpu, nohz_cpu_mask);
5df7fa1c 144 ts->idle_waketime = now;
79bf2bb3
TG
145
146 local_irq_save(flags);
147 tick_do_update_jiffies64(now);
148 local_irq_restore(flags);
02ff3755
IM
149
150 touch_softlockup_watchdog();
79bf2bb3
TG
151}
152
595aac48
AV
153/*
154 * Updates the per cpu time idle statistics counters
155 */
8d63bf94
AV
156static void
157update_ts_time_stats(struct tick_sched *ts, ktime_t now, u64 *last_update_time)
6378ddb5 158{
eed3b9cf 159 ktime_t delta;
6378ddb5 160
eed3b9cf 161 ts->idle_lastupdate = now;
595aac48
AV
162 if (ts->idle_active) {
163 delta = ktime_sub(now, ts->idle_entrytime);
164 ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);
8c7b09f4 165 ts->idle_entrytime = now;
595aac48 166 }
8d63bf94
AV
167
168 if (ts->idle_active && last_update_time)
169 *last_update_time = ktime_to_us(ts->idle_lastupdate);
170 else
171 *last_update_time = ktime_to_us(now);
172
595aac48
AV
173}
174
175static void tick_nohz_stop_idle(int cpu, ktime_t now)
176{
177 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
178
8d63bf94 179 update_ts_time_stats(ts, now, NULL);
eed3b9cf 180 ts->idle_active = 0;
56c7426b 181
eed3b9cf 182 sched_clock_idle_wakeup_event(0);
6378ddb5
VP
183}
184
903b8a8d 185static ktime_t tick_nohz_start_idle(struct tick_sched *ts)
6378ddb5 186{
595aac48 187 ktime_t now;
6378ddb5
VP
188
189 now = ktime_get();
595aac48 190
8d63bf94 191 update_ts_time_stats(ts, now, NULL);
595aac48 192
6378ddb5
VP
193 ts->idle_entrytime = now;
194 ts->idle_active = 1;
56c7426b 195 sched_clock_idle_sleep_event();
6378ddb5
VP
196 return now;
197}
198
b1f724c3
AV
199/**
200 * get_cpu_idle_time_us - get the total idle time of a cpu
201 * @cpu: CPU number to query
202 * @last_update_time: variable to store update time in
203 *
204 * Return the cummulative idle time (since boot) for a given
205 * CPU, in microseconds. The idle time returned includes
206 * the iowait time (unlike what "top" and co report).
207 *
208 * This time is measured via accounting rather than sampling,
209 * and is as accurate as ktime_get() is.
210 *
211 * This function returns -1 if NOHZ is not enabled.
212 */
6378ddb5
VP
213u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time)
214{
215 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
216
8083e4ad 217 if (!tick_nohz_enabled)
218 return -1;
219
8d63bf94 220 update_ts_time_stats(ts, ktime_get(), last_update_time);
8083e4ad 221
6378ddb5
VP
222 return ktime_to_us(ts->idle_sleeptime);
223}
8083e4ad 224EXPORT_SYMBOL_GPL(get_cpu_idle_time_us);
6378ddb5 225
79bf2bb3
TG
226/**
227 * tick_nohz_stop_sched_tick - stop the idle tick from the idle task
228 *
229 * When the next event is more than a tick into the future, stop the idle tick
230 * Called either from the idle loop or from irq_exit() when an idle period was
231 * just interrupted by an interrupt which did not cause a reschedule.
232 */
b8f8c3cf 233void tick_nohz_stop_sched_tick(int inidle)
79bf2bb3
TG
234{
235 unsigned long seq, last_jiffies, next_jiffies, delta_jiffies, flags;
236 struct tick_sched *ts;
6378ddb5 237 ktime_t last_update, expires, now;
4f86d3a8 238 struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;
98962465 239 u64 time_delta;
79bf2bb3
TG
240 int cpu;
241
242 local_irq_save(flags);
243
244 cpu = smp_processor_id();
245 ts = &per_cpu(tick_cpu_sched, cpu);
f2e21c96
EN
246
247 /*
248 * Call to tick_nohz_start_idle stops the last_update_time from being
249 * updated. Thus, it must not be called in the event we are called from
250 * irq_exit() with the prior state different than idle.
251 */
252 if (!inidle && !ts->inidle)
253 goto end;
254
fdc6f192
EN
255 /*
256 * Set ts->inidle unconditionally. Even if the system did not
257 * switch to NOHZ mode the cpu frequency governers rely on the
258 * update of the idle time accounting in tick_nohz_start_idle().
259 */
260 ts->inidle = 1;
261
903b8a8d 262 now = tick_nohz_start_idle(ts);
79bf2bb3 263
5e41d0d6
TG
264 /*
265 * If this cpu is offline and it is the one which updates
266 * jiffies, then give up the assignment and let it be taken by
267 * the cpu which runs the tick timer next. If we don't drop
268 * this here the jiffies might be stale and do_timer() never
269 * invoked.
270 */
271 if (unlikely(!cpu_online(cpu))) {
272 if (cpu == tick_do_timer_cpu)
6441402b 273 tick_do_timer_cpu = TICK_DO_TIMER_NONE;
5e41d0d6
TG
274 }
275
79bf2bb3
TG
276 if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE))
277 goto end;
278
279 if (need_resched())
280 goto end;
281
fa116ea3 282 if (unlikely(local_softirq_pending() && cpu_online(cpu))) {
35282316
TG
283 static int ratelimit;
284
285 if (ratelimit < 10) {
286 printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
529eaccd 287 (unsigned int) local_softirq_pending());
35282316
TG
288 ratelimit++;
289 }
857f3fd7 290 goto end;
35282316 291 }
79bf2bb3 292
39c0cbe2
MG
293 if (nohz_ratelimit(cpu))
294 goto end;
295
79bf2bb3 296 ts->idle_calls++;
79bf2bb3
TG
297 /* Read jiffies and the time when jiffies were updated last */
298 do {
299 seq = read_seqbegin(&xtime_lock);
300 last_update = last_jiffies_update;
301 last_jiffies = jiffies;
27185016 302 time_delta = timekeeping_max_deferment();
79bf2bb3
TG
303 } while (read_seqretry(&xtime_lock, seq));
304
3c5d92a0
MS
305 if (rcu_needs_cpu(cpu) || printk_needs_cpu(cpu) ||
306 arch_needs_cpu(cpu)) {
307 next_jiffies = last_jiffies + 1;
6ba9b346 308 delta_jiffies = 1;
3c5d92a0
MS
309 } else {
310 /* Get the next timer wheel timer */
311 next_jiffies = get_next_timer_interrupt(last_jiffies);
312 delta_jiffies = next_jiffies - last_jiffies;
313 }
79bf2bb3
TG
314 /*
315 * Do not stop the tick, if we are only one off
316 * or if the cpu is required for rcu
317 */
6ba9b346 318 if (!ts->tick_stopped && delta_jiffies == 1)
79bf2bb3
TG
319 goto out;
320
321 /* Schedule the tick, if we are at least one jiffie off */
322 if ((long)delta_jiffies >= 1) {
323
00147449
WR
324 /*
325 * If this cpu is the one which updates jiffies, then
326 * give up the assignment and let it be taken by the
327 * cpu which runs the tick timer next, which might be
328 * this cpu as well. If we don't drop this here the
329 * jiffies might be stale and do_timer() never
27185016
TG
330 * invoked. Keep track of the fact that it was the one
331 * which had the do_timer() duty last. If this cpu is
332 * the one which had the do_timer() duty last, we
333 * limit the sleep time to the timekeeping
334 * max_deferement value which we retrieved
335 * above. Otherwise we can sleep as long as we want.
00147449 336 */
27185016 337 if (cpu == tick_do_timer_cpu) {
00147449 338 tick_do_timer_cpu = TICK_DO_TIMER_NONE;
27185016
TG
339 ts->do_timer_last = 1;
340 } else if (tick_do_timer_cpu != TICK_DO_TIMER_NONE) {
341 time_delta = KTIME_MAX;
342 ts->do_timer_last = 0;
343 } else if (!ts->do_timer_last) {
344 time_delta = KTIME_MAX;
345 }
346
00147449 347 /*
98962465
JH
348 * calculate the expiry time for the next timer wheel
349 * timer. delta_jiffies >= NEXT_TIMER_MAX_DELTA signals
350 * that there is no timer pending or at least extremely
351 * far into the future (12 days for HZ=1000). In this
352 * case we set the expiry to the end of time.
353 */
354 if (likely(delta_jiffies < NEXT_TIMER_MAX_DELTA)) {
355 /*
356 * Calculate the time delta for the next timer event.
357 * If the time delta exceeds the maximum time delta
358 * permitted by the current clocksource then adjust
359 * the time delta accordingly to ensure the
360 * clocksource does not wrap.
361 */
362 time_delta = min_t(u64, time_delta,
363 tick_period.tv64 * delta_jiffies);
98962465 364 }
00147449 365
27185016
TG
366 if (time_delta < KTIME_MAX)
367 expires = ktime_add_ns(last_update, time_delta);
368 else
369 expires.tv64 = KTIME_MAX;
00147449 370
6ba9b346 371 if (delta_jiffies > 1)
6a7b3dc3 372 cpumask_set_cpu(cpu, nohz_cpu_mask);
00147449
WR
373
374 /* Skip reprogram of event if its not changed */
375 if (ts->tick_stopped && ktime_equal(expires, dev->next_event))
376 goto out;
377
79bf2bb3
TG
378 /*
379 * nohz_stop_sched_tick can be called several times before
380 * the nohz_restart_sched_tick is called. This happens when
381 * interrupts arrive which do not cause a reschedule. In the
382 * first call we save the current tick time, so we can restart
383 * the scheduler tick in nohz_restart_sched_tick.
384 */
385 if (!ts->tick_stopped) {
46cb4b7c
SS
386 if (select_nohz_load_balancer(1)) {
387 /*
388 * sched tick not stopped!
389 */
6a7b3dc3 390 cpumask_clear_cpu(cpu, nohz_cpu_mask);
46cb4b7c
SS
391 goto out;
392 }
393
cc584b21 394 ts->idle_tick = hrtimer_get_expires(&ts->sched_timer);
79bf2bb3
TG
395 ts->tick_stopped = 1;
396 ts->idle_jiffies = last_jiffies;
2232c2d8 397 rcu_enter_nohz();
79bf2bb3 398 }
d3ed7824 399
eaad084b
TG
400 ts->idle_sleeps++;
401
98962465
JH
402 /* Mark expires */
403 ts->idle_expires = expires;
404
eaad084b 405 /*
98962465
JH
406 * If the expiration time == KTIME_MAX, then
407 * in this case we simply stop the tick timer.
eaad084b 408 */
98962465 409 if (unlikely(expires.tv64 == KTIME_MAX)) {
eaad084b
TG
410 if (ts->nohz_mode == NOHZ_MODE_HIGHRES)
411 hrtimer_cancel(&ts->sched_timer);
412 goto out;
413 }
414
79bf2bb3
TG
415 if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
416 hrtimer_start(&ts->sched_timer, expires,
5c333864 417 HRTIMER_MODE_ABS_PINNED);
79bf2bb3
TG
418 /* Check, if the timer was already in the past */
419 if (hrtimer_active(&ts->sched_timer))
420 goto out;
4c9dc641 421 } else if (!tick_program_event(expires, 0))
79bf2bb3
TG
422 goto out;
423 /*
424 * We are past the event already. So we crossed a
425 * jiffie boundary. Update jiffies and raise the
426 * softirq.
427 */
428 tick_do_update_jiffies64(ktime_get());
6a7b3dc3 429 cpumask_clear_cpu(cpu, nohz_cpu_mask);
79bf2bb3
TG
430 }
431 raise_softirq_irqoff(TIMER_SOFTIRQ);
432out:
433 ts->next_jiffies = next_jiffies;
434 ts->last_jiffies = last_jiffies;
4f86d3a8 435 ts->sleep_length = ktime_sub(dev->next_event, now);
79bf2bb3
TG
436end:
437 local_irq_restore(flags);
438}
439
4f86d3a8
LB
440/**
441 * tick_nohz_get_sleep_length - return the length of the current sleep
442 *
443 * Called from power state control code with interrupts disabled
444 */
445ktime_t tick_nohz_get_sleep_length(void)
446{
447 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
448
449 return ts->sleep_length;
450}
451
c34bec5a
TG
452static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
453{
454 hrtimer_cancel(&ts->sched_timer);
268a3dcf 455 hrtimer_set_expires(&ts->sched_timer, ts->idle_tick);
c34bec5a
TG
456
457 while (1) {
458 /* Forward the time to expire in the future */
459 hrtimer_forward(&ts->sched_timer, now, tick_period);
460
461 if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
268a3dcf 462 hrtimer_start_expires(&ts->sched_timer,
5c333864 463 HRTIMER_MODE_ABS_PINNED);
c34bec5a
TG
464 /* Check, if the timer was already in the past */
465 if (hrtimer_active(&ts->sched_timer))
466 break;
467 } else {
268a3dcf
TG
468 if (!tick_program_event(
469 hrtimer_get_expires(&ts->sched_timer), 0))
c34bec5a
TG
470 break;
471 }
472 /* Update jiffies and reread time */
473 tick_do_update_jiffies64(now);
474 now = ktime_get();
475 }
476}
477
79bf2bb3 478/**
8dce39c2 479 * tick_nohz_restart_sched_tick - restart the idle tick from the idle task
79bf2bb3
TG
480 *
481 * Restart the idle tick when the CPU is woken up from idle
482 */
483void tick_nohz_restart_sched_tick(void)
484{
485 int cpu = smp_processor_id();
486 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
79741dd3 487#ifndef CONFIG_VIRT_CPU_ACCOUNTING
79bf2bb3 488 unsigned long ticks;
79741dd3 489#endif
6378ddb5 490 ktime_t now;
79bf2bb3 491
6378ddb5 492 local_irq_disable();
eed3b9cf
MS
493 if (ts->idle_active || (ts->inidle && ts->tick_stopped))
494 now = ktime_get();
495
496 if (ts->idle_active)
497 tick_nohz_stop_idle(cpu, now);
6378ddb5 498
b8f8c3cf
TG
499 if (!ts->inidle || !ts->tick_stopped) {
500 ts->inidle = 0;
6378ddb5 501 local_irq_enable();
79bf2bb3 502 return;
6378ddb5 503 }
79bf2bb3 504
b8f8c3cf
TG
505 ts->inidle = 0;
506
2232c2d8
SR
507 rcu_exit_nohz();
508
79bf2bb3 509 /* Update jiffies first */
46cb4b7c 510 select_nohz_load_balancer(0);
79bf2bb3 511 tick_do_update_jiffies64(now);
6a7b3dc3 512 cpumask_clear_cpu(cpu, nohz_cpu_mask);
79bf2bb3 513
79741dd3 514#ifndef CONFIG_VIRT_CPU_ACCOUNTING
79bf2bb3
TG
515 /*
516 * We stopped the tick in idle. Update process times would miss the
517 * time we slept as update_process_times does only a 1 tick
518 * accounting. Enforce that this is accounted to idle !
519 */
520 ticks = jiffies - ts->idle_jiffies;
521 /*
522 * We might be one off. Do not randomly account a huge number of ticks!
523 */
79741dd3
MS
524 if (ticks && ticks < LONG_MAX)
525 account_idle_ticks(ticks);
526#endif
79bf2bb3 527
126e01bf 528 touch_softlockup_watchdog();
79bf2bb3
TG
529 /*
530 * Cancel the scheduled timer and restore the tick
531 */
532 ts->tick_stopped = 0;
5df7fa1c 533 ts->idle_exittime = now;
79bf2bb3 534
c34bec5a 535 tick_nohz_restart(ts, now);
79bf2bb3 536
79bf2bb3
TG
537 local_irq_enable();
538}
539
540static int tick_nohz_reprogram(struct tick_sched *ts, ktime_t now)
541{
542 hrtimer_forward(&ts->sched_timer, now, tick_period);
cc584b21 543 return tick_program_event(hrtimer_get_expires(&ts->sched_timer), 0);
79bf2bb3
TG
544}
545
546/*
547 * The nohz low res interrupt handler
548 */
549static void tick_nohz_handler(struct clock_event_device *dev)
550{
551 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
552 struct pt_regs *regs = get_irq_regs();
d3ed7824 553 int cpu = smp_processor_id();
79bf2bb3
TG
554 ktime_t now = ktime_get();
555
556 dev->next_event.tv64 = KTIME_MAX;
557
d3ed7824
TG
558 /*
559 * Check if the do_timer duty was dropped. We don't care about
560 * concurrency: This happens only when the cpu in charge went
561 * into a long sleep. If two cpus happen to assign themself to
562 * this duty, then the jiffies update is still serialized by
563 * xtime_lock.
564 */
6441402b 565 if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE))
d3ed7824
TG
566 tick_do_timer_cpu = cpu;
567
79bf2bb3 568 /* Check, if the jiffies need an update */
d3ed7824
TG
569 if (tick_do_timer_cpu == cpu)
570 tick_do_update_jiffies64(now);
79bf2bb3
TG
571
572 /*
573 * When we are idle and the tick is stopped, we have to touch
574 * the watchdog as we might not schedule for a really long
575 * time. This happens on complete idle SMP systems while
576 * waiting on the login prompt. We also increment the "start
577 * of idle" jiffy stamp so the idle accounting adjustment we
578 * do when we go busy again does not account too much ticks.
579 */
580 if (ts->tick_stopped) {
581 touch_softlockup_watchdog();
582 ts->idle_jiffies++;
583 }
584
585 update_process_times(user_mode(regs));
586 profile_tick(CPU_PROFILING);
587
79bf2bb3
TG
588 while (tick_nohz_reprogram(ts, now)) {
589 now = ktime_get();
590 tick_do_update_jiffies64(now);
591 }
592}
593
594/**
595 * tick_nohz_switch_to_nohz - switch to nohz mode
596 */
597static void tick_nohz_switch_to_nohz(void)
598{
599 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
600 ktime_t next;
601
602 if (!tick_nohz_enabled)
603 return;
604
605 local_irq_disable();
606 if (tick_switch_to_oneshot(tick_nohz_handler)) {
607 local_irq_enable();
608 return;
609 }
610
611 ts->nohz_mode = NOHZ_MODE_LOWRES;
612
613 /*
614 * Recycle the hrtimer in ts, so we can share the
615 * hrtimer_forward with the highres code.
616 */
617 hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
618 /* Get the next period */
619 next = tick_init_jiffy_update();
620
621 for (;;) {
cc584b21 622 hrtimer_set_expires(&ts->sched_timer, next);
79bf2bb3
TG
623 if (!tick_program_event(next, 0))
624 break;
625 next = ktime_add(next, tick_period);
626 }
627 local_irq_enable();
628
629 printk(KERN_INFO "Switched to NOHz mode on CPU #%d\n",
630 smp_processor_id());
631}
632
fb02fbc1
TG
633/*
634 * When NOHZ is enabled and the tick is stopped, we need to kick the
635 * tick timer from irq_enter() so that the jiffies update is kept
636 * alive during long running softirqs. That's ugly as hell, but
637 * correctness is key even if we need to fix the offending softirq in
638 * the first place.
639 *
640 * Note, this is different to tick_nohz_restart. We just kick the
641 * timer and do not touch the other magic bits which need to be done
642 * when idle is left.
643 */
eed3b9cf 644static void tick_nohz_kick_tick(int cpu, ktime_t now)
fb02fbc1 645{
ae99286b
TG
646#if 0
647 /* Switch back to 2.6.27 behaviour */
648
fb02fbc1 649 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
eed3b9cf 650 ktime_t delta;
fb02fbc1 651
c4bd822e
TG
652 /*
653 * Do not touch the tick device, when the next expiry is either
654 * already reached or less/equal than the tick period.
655 */
268a3dcf 656 delta = ktime_sub(hrtimer_get_expires(&ts->sched_timer), now);
c4bd822e
TG
657 if (delta.tv64 <= tick_period.tv64)
658 return;
659
660 tick_nohz_restart(ts, now);
ae99286b 661#endif
fb02fbc1
TG
662}
663
eed3b9cf
MS
664static inline void tick_check_nohz(int cpu)
665{
666 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
667 ktime_t now;
668
669 if (!ts->idle_active && !ts->tick_stopped)
670 return;
671 now = ktime_get();
672 if (ts->idle_active)
673 tick_nohz_stop_idle(cpu, now);
674 if (ts->tick_stopped) {
675 tick_nohz_update_jiffies(now);
676 tick_nohz_kick_tick(cpu, now);
677 }
678}
679
79bf2bb3
TG
680#else
681
682static inline void tick_nohz_switch_to_nohz(void) { }
eed3b9cf 683static inline void tick_check_nohz(int cpu) { }
79bf2bb3
TG
684
685#endif /* NO_HZ */
686
719254fa
TG
687/*
688 * Called from irq_enter to notify about the possible interruption of idle()
689 */
690void tick_check_idle(int cpu)
691{
fb02fbc1 692 tick_check_oneshot_broadcast(cpu);
eed3b9cf 693 tick_check_nohz(cpu);
719254fa
TG
694}
695
79bf2bb3
TG
696/*
697 * High resolution timer specific code
698 */
699#ifdef CONFIG_HIGH_RES_TIMERS
700/*
4c9dc641 701 * We rearm the timer until we get disabled by the idle code.
79bf2bb3
TG
702 * Called with interrupts disabled and timer->base->cpu_base->lock held.
703 */
704static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer)
705{
706 struct tick_sched *ts =
707 container_of(timer, struct tick_sched, sched_timer);
79bf2bb3
TG
708 struct pt_regs *regs = get_irq_regs();
709 ktime_t now = ktime_get();
d3ed7824
TG
710 int cpu = smp_processor_id();
711
712#ifdef CONFIG_NO_HZ
713 /*
714 * Check if the do_timer duty was dropped. We don't care about
715 * concurrency: This happens only when the cpu in charge went
716 * into a long sleep. If two cpus happen to assign themself to
717 * this duty, then the jiffies update is still serialized by
718 * xtime_lock.
719 */
6441402b 720 if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE))
d3ed7824
TG
721 tick_do_timer_cpu = cpu;
722#endif
79bf2bb3
TG
723
724 /* Check, if the jiffies need an update */
d3ed7824
TG
725 if (tick_do_timer_cpu == cpu)
726 tick_do_update_jiffies64(now);
79bf2bb3
TG
727
728 /*
729 * Do not call, when we are not in irq context and have
730 * no valid regs pointer
731 */
732 if (regs) {
733 /*
734 * When we are idle and the tick is stopped, we have to touch
735 * the watchdog as we might not schedule for a really long
736 * time. This happens on complete idle SMP systems while
737 * waiting on the login prompt. We also increment the "start of
738 * idle" jiffy stamp so the idle accounting adjustment we do
739 * when we go busy again does not account too much ticks.
740 */
741 if (ts->tick_stopped) {
742 touch_softlockup_watchdog();
743 ts->idle_jiffies++;
744 }
79bf2bb3
TG
745 update_process_times(user_mode(regs));
746 profile_tick(CPU_PROFILING);
79bf2bb3
TG
747 }
748
79bf2bb3
TG
749 hrtimer_forward(timer, now, tick_period);
750
751 return HRTIMER_RESTART;
752}
753
754/**
755 * tick_setup_sched_timer - setup the tick emulation timer
756 */
757void tick_setup_sched_timer(void)
758{
759 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
760 ktime_t now = ktime_get();
3704540b 761 u64 offset;
79bf2bb3
TG
762
763 /*
764 * Emulate tick processing via per-CPU hrtimers:
765 */
766 hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
767 ts->sched_timer.function = tick_sched_timer;
79bf2bb3 768
3704540b 769 /* Get the next period (per cpu) */
cc584b21 770 hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update());
3704540b 771 offset = ktime_to_ns(tick_period) >> 1;
b2d9323d 772 do_div(offset, num_possible_cpus());
3704540b 773 offset *= smp_processor_id();
cc584b21 774 hrtimer_add_expires_ns(&ts->sched_timer, offset);
79bf2bb3
TG
775
776 for (;;) {
777 hrtimer_forward(&ts->sched_timer, now, tick_period);
5c333864
AB
778 hrtimer_start_expires(&ts->sched_timer,
779 HRTIMER_MODE_ABS_PINNED);
79bf2bb3
TG
780 /* Check, if the timer was already in the past */
781 if (hrtimer_active(&ts->sched_timer))
782 break;
783 now = ktime_get();
784 }
785
786#ifdef CONFIG_NO_HZ
787 if (tick_nohz_enabled)
788 ts->nohz_mode = NOHZ_MODE_HIGHRES;
789#endif
790}
3c4fbe5e 791#endif /* HIGH_RES_TIMERS */
79bf2bb3 792
3c4fbe5e 793#if defined CONFIG_NO_HZ || defined CONFIG_HIGH_RES_TIMERS
79bf2bb3
TG
794void tick_cancel_sched_timer(int cpu)
795{
796 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
797
3c4fbe5e 798# ifdef CONFIG_HIGH_RES_TIMERS
79bf2bb3
TG
799 if (ts->sched_timer.base)
800 hrtimer_cancel(&ts->sched_timer);
3c4fbe5e 801# endif
a7901766 802
79bf2bb3
TG
803 ts->nohz_mode = NOHZ_MODE_INACTIVE;
804}
3c4fbe5e 805#endif
79bf2bb3
TG
806
807/**
808 * Async notification about clocksource changes
809 */
810void tick_clock_notify(void)
811{
812 int cpu;
813
814 for_each_possible_cpu(cpu)
815 set_bit(0, &per_cpu(tick_cpu_sched, cpu).check_clocks);
816}
817
818/*
819 * Async notification about clock event changes
820 */
821void tick_oneshot_notify(void)
822{
823 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
824
825 set_bit(0, &ts->check_clocks);
826}
827
828/**
829 * Check, if a change happened, which makes oneshot possible.
830 *
831 * Called cyclic from the hrtimer softirq (driven by the timer
832 * softirq) allow_nohz signals, that we can switch into low-res nohz
833 * mode, because high resolution timers are disabled (either compile
834 * or runtime).
835 */
836int tick_check_oneshot_change(int allow_nohz)
837{
838 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
839
840 if (!test_and_clear_bit(0, &ts->check_clocks))
841 return 0;
842
843 if (ts->nohz_mode != NOHZ_MODE_INACTIVE)
844 return 0;
845
cf4fc6cb 846 if (!timekeeping_valid_for_hres() || !tick_is_oneshot_available())
79bf2bb3
TG
847 return 0;
848
849 if (!allow_nohz)
850 return 1;
851
852 tick_nohz_switch_to_nohz();
853 return 0;
854}