Commit | Line | Data |
---|---|---|
79bf2bb3 TG |
1 | /* |
2 | * linux/kernel/time/tick-sched.c | |
3 | * | |
4 | * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> | |
5 | * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar | |
6 | * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner | |
7 | * | |
8 | * No idle tick implementation for low and high resolution timers | |
9 | * | |
10 | * Started by: Thomas Gleixner and Ingo Molnar | |
11 | * | |
b10db7f0 | 12 | * Distribute under GPLv2. |
79bf2bb3 TG |
13 | */ |
14 | #include <linux/cpu.h> | |
15 | #include <linux/err.h> | |
16 | #include <linux/hrtimer.h> | |
17 | #include <linux/interrupt.h> | |
18 | #include <linux/kernel_stat.h> | |
19 | #include <linux/percpu.h> | |
20 | #include <linux/profile.h> | |
21 | #include <linux/sched.h> | |
8083e4ad | 22 | #include <linux/module.h> |
6fa3eb70 S |
23 | #ifdef CONFIG_MTK_SCHED_RQAVG_US |
24 | #include <linux/rq_stats.h> | |
25 | #endif | |
00b42959 | 26 | #include <linux/irq_work.h> |
9014c45d FW |
27 | #include <linux/posix-timers.h> |
28 | #include <linux/perf_event.h> | |
79bf2bb3 | 29 | |
9e203bcc DM |
30 | #include <asm/irq_regs.h> |
31 | ||
79bf2bb3 TG |
32 | #include "tick-internal.h" |
33 | ||
cb41a290 FW |
34 | #include <trace/events/timer.h> |
35 | ||
6fa3eb70 S |
36 | #ifdef CONFIG_MTK_SCHED_RQAVG_US |
37 | struct rq_data rq_info; | |
38 | #ifdef CONFIG_MTK_SCHED_RQAVG_US_ENABLE_WQ | |
39 | struct workqueue_struct *rq_wq; | |
40 | #endif | |
41 | spinlock_t rq_lock; | |
42 | #endif | |
43 | ||
44 | #ifdef CONFIG_MT_LOAD_BALANCE_PROFILER | |
45 | #include <mtlbprof/mtlbprof.h> | |
46 | #endif | |
47 | ||
79bf2bb3 TG |
48 | /* |
49 | * Per cpu nohz control structure | |
50 | */ | |
33a5f626 | 51 | DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched); |
79bf2bb3 TG |
52 | |
53 | /* | |
d6ad4187 | 54 | * The time, when the last jiffy update happened. Protected by jiffies_lock. |
79bf2bb3 TG |
55 | */ |
56 | static ktime_t last_jiffies_update; | |
57 | ||
289f480a IM |
58 | struct tick_sched *tick_get_tick_sched(int cpu) |
59 | { | |
60 | return &per_cpu(tick_cpu_sched, cpu); | |
61 | } | |
62 | ||
79bf2bb3 TG |
63 | /* |
64 | * Must be called with interrupts disabled ! | |
65 | */ | |
66 | static void tick_do_update_jiffies64(ktime_t now) | |
67 | { | |
68 | unsigned long ticks = 0; | |
69 | ktime_t delta; | |
70 | ||
7a14ce1d | 71 | /* |
d6ad4187 | 72 | * Do a quick check without holding jiffies_lock: |
7a14ce1d IM |
73 | */ |
74 | delta = ktime_sub(now, last_jiffies_update); | |
75 | if (delta.tv64 < tick_period.tv64) | |
76 | return; | |
77 | ||
d6ad4187 JS |
78 | /* Reevalute with jiffies_lock held */ |
79 | write_seqlock(&jiffies_lock); | |
79bf2bb3 TG |
80 | |
81 | delta = ktime_sub(now, last_jiffies_update); | |
82 | if (delta.tv64 >= tick_period.tv64) { | |
83 | ||
84 | delta = ktime_sub(delta, tick_period); | |
85 | last_jiffies_update = ktime_add(last_jiffies_update, | |
86 | tick_period); | |
87 | ||
88 | /* Slow path for long timeouts */ | |
89 | if (unlikely(delta.tv64 >= tick_period.tv64)) { | |
90 | s64 incr = ktime_to_ns(tick_period); | |
91 | ||
92 | ticks = ktime_divns(delta, incr); | |
93 | ||
94 | last_jiffies_update = ktime_add_ns(last_jiffies_update, | |
95 | incr * ticks); | |
96 | } | |
97 | do_timer(++ticks); | |
49d670fb TG |
98 | |
99 | /* Keep the tick_next_period variable up to date */ | |
100 | tick_next_period = ktime_add(last_jiffies_update, tick_period); | |
79bf2bb3 | 101 | } |
d6ad4187 | 102 | write_sequnlock(&jiffies_lock); |
79bf2bb3 TG |
103 | } |
104 | ||
105 | /* | |
106 | * Initialize and return retrieve the jiffies update. | |
107 | */ | |
108 | static ktime_t tick_init_jiffy_update(void) | |
109 | { | |
110 | ktime_t period; | |
111 | ||
d6ad4187 | 112 | write_seqlock(&jiffies_lock); |
79bf2bb3 TG |
113 | /* Did we start the jiffies update yet ? */ |
114 | if (last_jiffies_update.tv64 == 0) | |
115 | last_jiffies_update = tick_next_period; | |
116 | period = last_jiffies_update; | |
d6ad4187 | 117 | write_sequnlock(&jiffies_lock); |
79bf2bb3 TG |
118 | return period; |
119 | } | |
120 | ||
5bb96226 FW |
121 | |
122 | static void tick_sched_do_timer(ktime_t now) | |
123 | { | |
124 | int cpu = smp_processor_id(); | |
125 | ||
3451d024 | 126 | #ifdef CONFIG_NO_HZ_COMMON |
5bb96226 FW |
127 | /* |
128 | * Check if the do_timer duty was dropped. We don't care about | |
129 | * concurrency: This happens only when the cpu in charge went | |
130 | * into a long sleep. If two cpus happen to assign themself to | |
131 | * this duty, then the jiffies update is still serialized by | |
9c3f9e28 | 132 | * jiffies_lock. |
5bb96226 | 133 | */ |
a382bf93 | 134 | if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE) |
c5bfece2 | 135 | && !tick_nohz_full_cpu(cpu)) |
5bb96226 FW |
136 | tick_do_timer_cpu = cpu; |
137 | #endif | |
138 | ||
139 | /* Check, if the jiffies need an update */ | |
140 | if (tick_do_timer_cpu == cpu) | |
141 | tick_do_update_jiffies64(now); | |
142 | } | |
143 | ||
9e8f559b FW |
144 | static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs) |
145 | { | |
3451d024 | 146 | #ifdef CONFIG_NO_HZ_COMMON |
9e8f559b FW |
147 | /* |
148 | * When we are idle and the tick is stopped, we have to touch | |
149 | * the watchdog as we might not schedule for a really long | |
150 | * time. This happens on complete idle SMP systems while | |
151 | * waiting on the login prompt. We also increment the "start of | |
152 | * idle" jiffy stamp so the idle accounting adjustment we do | |
153 | * when we go busy again does not account too much ticks. | |
154 | */ | |
155 | if (ts->tick_stopped) { | |
156 | touch_softlockup_watchdog(); | |
157 | if (is_idle_task(current)) | |
158 | ts->idle_jiffies++; | |
159 | } | |
94a57140 | 160 | #endif |
9e8f559b FW |
161 | update_process_times(user_mode(regs)); |
162 | profile_tick(CPU_PROFILING); | |
163 | } | |
164 | ||
c5bfece2 FW |
165 | #ifdef CONFIG_NO_HZ_FULL |
166 | static cpumask_var_t nohz_full_mask; | |
167 | bool have_nohz_full_mask; | |
a831881b | 168 | |
9014c45d FW |
169 | static bool can_stop_full_tick(void) |
170 | { | |
171 | WARN_ON_ONCE(!irqs_disabled()); | |
172 | ||
cb41a290 FW |
173 | if (!sched_can_stop_tick()) { |
174 | trace_tick_stop(0, "more than 1 task in runqueue\n"); | |
9014c45d | 175 | return false; |
cb41a290 | 176 | } |
9014c45d | 177 | |
cb41a290 FW |
178 | if (!posix_cpu_timers_can_stop_tick(current)) { |
179 | trace_tick_stop(0, "posix timers running\n"); | |
9014c45d | 180 | return false; |
cb41a290 | 181 | } |
9014c45d | 182 | |
cb41a290 FW |
183 | if (!perf_event_can_stop_tick()) { |
184 | trace_tick_stop(0, "perf events running\n"); | |
9014c45d | 185 | return false; |
cb41a290 | 186 | } |
9014c45d FW |
187 | |
188 | /* sched_clock_tick() needs us? */ | |
189 | #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK | |
190 | /* | |
191 | * TODO: kick full dynticks CPUs when | |
192 | * sched_clock_stable is set. | |
193 | */ | |
cb41a290 FW |
194 | if (!sched_clock_stable) { |
195 | trace_tick_stop(0, "unstable sched clock\n"); | |
9014c45d | 196 | return false; |
cb41a290 | 197 | } |
9014c45d FW |
198 | #endif |
199 | ||
200 | return true; | |
201 | } | |
202 | ||
203 | static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now); | |
204 | ||
76c24fb0 FW |
205 | /* |
206 | * Re-evaluate the need for the tick on the current CPU | |
207 | * and restart it if necessary. | |
208 | */ | |
ff442c51 | 209 | void tick_nohz_full_check(void) |
76c24fb0 | 210 | { |
9014c45d FW |
211 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); |
212 | ||
213 | if (tick_nohz_full_cpu(smp_processor_id())) { | |
214 | if (ts->tick_stopped && !is_idle_task(current)) { | |
215 | if (!can_stop_full_tick()) | |
216 | tick_nohz_restart_sched_tick(ts, ktime_get()); | |
217 | } | |
218 | } | |
76c24fb0 FW |
219 | } |
220 | ||
221 | static void nohz_full_kick_work_func(struct irq_work *work) | |
222 | { | |
223 | tick_nohz_full_check(); | |
224 | } | |
225 | ||
226 | static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = { | |
227 | .func = nohz_full_kick_work_func, | |
228 | }; | |
229 | ||
230 | /* | |
231 | * Kick the current CPU if it's full dynticks in order to force it to | |
232 | * re-evaluate its dependency on the tick and restart it if necessary. | |
233 | */ | |
234 | void tick_nohz_full_kick(void) | |
235 | { | |
236 | if (tick_nohz_full_cpu(smp_processor_id())) | |
237 | irq_work_queue(&__get_cpu_var(nohz_full_kick_work)); | |
238 | } | |
239 | ||
240 | static void nohz_full_kick_ipi(void *info) | |
241 | { | |
242 | tick_nohz_full_check(); | |
243 | } | |
244 | ||
245 | /* | |
246 | * Kick all full dynticks CPUs in order to force these to re-evaluate | |
247 | * their dependency on the tick and restart it if necessary. | |
248 | */ | |
249 | void tick_nohz_full_kick_all(void) | |
250 | { | |
251 | if (!have_nohz_full_mask) | |
252 | return; | |
253 | ||
254 | preempt_disable(); | |
255 | smp_call_function_many(nohz_full_mask, | |
256 | nohz_full_kick_ipi, NULL, false); | |
257 | preempt_enable(); | |
258 | } | |
259 | ||
99e5ada9 FW |
260 | /* |
261 | * Re-evaluate the need for the tick as we switch the current task. | |
262 | * It might need the tick due to per task/process properties: | |
263 | * perf events, posix cpu timers, ... | |
264 | */ | |
265 | void tick_nohz_task_switch(struct task_struct *tsk) | |
266 | { | |
267 | unsigned long flags; | |
268 | ||
99e5ada9 FW |
269 | local_irq_save(flags); |
270 | ||
6296ace4 LZ |
271 | if (!tick_nohz_full_cpu(smp_processor_id())) |
272 | goto out; | |
273 | ||
99e5ada9 FW |
274 | if (tick_nohz_tick_stopped() && !can_stop_full_tick()) |
275 | tick_nohz_full_kick(); | |
276 | ||
6296ace4 | 277 | out: |
99e5ada9 FW |
278 | local_irq_restore(flags); |
279 | } | |
280 | ||
c5bfece2 | 281 | int tick_nohz_full_cpu(int cpu) |
a831881b | 282 | { |
c5bfece2 | 283 | if (!have_nohz_full_mask) |
a831881b FW |
284 | return 0; |
285 | ||
c5bfece2 | 286 | return cpumask_test_cpu(cpu, nohz_full_mask); |
a831881b FW |
287 | } |
288 | ||
289 | /* Parse the boot-time nohz CPU list from the kernel parameters. */ | |
c5bfece2 | 290 | static int __init tick_nohz_full_setup(char *str) |
a831881b | 291 | { |
0453b435 FW |
292 | int cpu; |
293 | ||
c5bfece2 | 294 | alloc_bootmem_cpumask_var(&nohz_full_mask); |
0453b435 | 295 | if (cpulist_parse(str, nohz_full_mask) < 0) { |
c5bfece2 | 296 | pr_warning("NOHZ: Incorrect nohz_full cpumask\n"); |
0453b435 FW |
297 | return 1; |
298 | } | |
299 | ||
300 | cpu = smp_processor_id(); | |
301 | if (cpumask_test_cpu(cpu, nohz_full_mask)) { | |
302 | pr_warning("NO_HZ: Clearing %d from nohz_full range for timekeeping\n", cpu); | |
303 | cpumask_clear_cpu(cpu, nohz_full_mask); | |
304 | } | |
305 | have_nohz_full_mask = true; | |
306 | ||
a831881b FW |
307 | return 1; |
308 | } | |
c5bfece2 | 309 | __setup("nohz_full=", tick_nohz_full_setup); |
a831881b | 310 | |
a382bf93 FW |
311 | static int __cpuinit tick_nohz_cpu_down_callback(struct notifier_block *nfb, |
312 | unsigned long action, | |
313 | void *hcpu) | |
314 | { | |
315 | unsigned int cpu = (unsigned long)hcpu; | |
316 | ||
317 | switch (action & ~CPU_TASKS_FROZEN) { | |
318 | case CPU_DOWN_PREPARE: | |
319 | /* | |
320 | * If we handle the timekeeping duty for full dynticks CPUs, | |
321 | * we can't safely shutdown that CPU. | |
322 | */ | |
c5bfece2 | 323 | if (have_nohz_full_mask && tick_do_timer_cpu == cpu) |
1a7f829f | 324 | return NOTIFY_BAD; |
a382bf93 FW |
325 | break; |
326 | } | |
327 | return NOTIFY_OK; | |
328 | } | |
329 | ||
1034fc2f FW |
330 | /* |
331 | * Worst case string length in chunks of CPU range seems 2 steps | |
332 | * separations: 0,2,4,6,... | |
333 | * This is NR_CPUS + sizeof('\0') | |
334 | */ | |
c5bfece2 | 335 | static char __initdata nohz_full_buf[NR_CPUS + 1]; |
1034fc2f | 336 | |
f98823ac FW |
337 | static int tick_nohz_init_all(void) |
338 | { | |
339 | int err = -1; | |
340 | ||
341 | #ifdef CONFIG_NO_HZ_FULL_ALL | |
342 | if (!alloc_cpumask_var(&nohz_full_mask, GFP_KERNEL)) { | |
343 | pr_err("NO_HZ: Can't allocate full dynticks cpumask\n"); | |
344 | return err; | |
345 | } | |
346 | err = 0; | |
347 | cpumask_setall(nohz_full_mask); | |
348 | cpumask_clear_cpu(smp_processor_id(), nohz_full_mask); | |
349 | have_nohz_full_mask = true; | |
350 | #endif | |
351 | return err; | |
352 | } | |
353 | ||
d1e43fa5 | 354 | void __init tick_nohz_init(void) |
a831881b | 355 | { |
d1e43fa5 FW |
356 | int cpu; |
357 | ||
f98823ac FW |
358 | if (!have_nohz_full_mask) { |
359 | if (tick_nohz_init_all() < 0) | |
360 | return; | |
361 | } | |
d1e43fa5 FW |
362 | |
363 | cpu_notifier(tick_nohz_cpu_down_callback, 0); | |
364 | ||
365 | /* Make sure full dynticks CPU are also RCU nocbs */ | |
366 | for_each_cpu(cpu, nohz_full_mask) { | |
367 | if (!rcu_is_nocb_cpu(cpu)) { | |
368 | pr_warning("NO_HZ: CPU %d is not RCU nocb: " | |
369 | "cleared from nohz_full range", cpu); | |
370 | cpumask_clear_cpu(cpu, nohz_full_mask); | |
371 | } | |
372 | } | |
a831881b | 373 | |
c5bfece2 FW |
374 | cpulist_scnprintf(nohz_full_buf, sizeof(nohz_full_buf), nohz_full_mask); |
375 | pr_info("NO_HZ: Full dynticks CPUs: %s.\n", nohz_full_buf); | |
a831881b | 376 | } |
a831881b | 377 | #else |
c5bfece2 | 378 | #define have_nohz_full_mask (0) |
a831881b FW |
379 | #endif |
380 | ||
79bf2bb3 TG |
381 | /* |
382 | * NOHZ - aka dynamic tick functionality | |
383 | */ | |
3451d024 | 384 | #ifdef CONFIG_NO_HZ_COMMON |
79bf2bb3 TG |
385 | /* |
386 | * NO HZ enabled ? | |
387 | */ | |
9d2ad243 | 388 | int tick_nohz_enabled __read_mostly = 1; |
79bf2bb3 TG |
389 | |
390 | /* | |
391 | * Enable / Disable tickless mode | |
392 | */ | |
393 | static int __init setup_tick_nohz(char *str) | |
394 | { | |
395 | if (!strcmp(str, "off")) | |
396 | tick_nohz_enabled = 0; | |
397 | else if (!strcmp(str, "on")) | |
398 | tick_nohz_enabled = 1; | |
399 | else | |
400 | return 0; | |
401 | return 1; | |
402 | } | |
403 | ||
404 | __setup("nohz=", setup_tick_nohz); | |
405 | ||
406 | /** | |
407 | * tick_nohz_update_jiffies - update jiffies when idle was interrupted | |
408 | * | |
409 | * Called from interrupt entry when the CPU was idle | |
410 | * | |
411 | * In case the sched_tick was stopped on this CPU, we have to check if jiffies | |
412 | * must be updated. Otherwise an interrupt handler could use a stale jiffy | |
413 | * value. We do this unconditionally on any cpu, as we don't know whether the | |
414 | * cpu, which has the update task assigned is in a long sleep. | |
415 | */ | |
eed3b9cf | 416 | static void tick_nohz_update_jiffies(ktime_t now) |
79bf2bb3 TG |
417 | { |
418 | int cpu = smp_processor_id(); | |
419 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | |
420 | unsigned long flags; | |
79bf2bb3 | 421 | |
5df7fa1c | 422 | ts->idle_waketime = now; |
79bf2bb3 TG |
423 | |
424 | local_irq_save(flags); | |
425 | tick_do_update_jiffies64(now); | |
426 | local_irq_restore(flags); | |
02ff3755 IM |
427 | |
428 | touch_softlockup_watchdog(); | |
79bf2bb3 TG |
429 | } |
430 | ||
595aac48 AV |
431 | /* |
432 | * Updates the per cpu time idle statistics counters | |
433 | */ | |
8d63bf94 | 434 | static void |
8c215bd3 | 435 | update_ts_time_stats(int cpu, struct tick_sched *ts, ktime_t now, u64 *last_update_time) |
6378ddb5 | 436 | { |
eed3b9cf | 437 | ktime_t delta; |
6378ddb5 | 438 | |
595aac48 AV |
439 | if (ts->idle_active) { |
440 | delta = ktime_sub(now, ts->idle_entrytime); | |
8c215bd3 | 441 | if (nr_iowait_cpu(cpu) > 0) |
0224cf4c | 442 | ts->iowait_sleeptime = ktime_add(ts->iowait_sleeptime, delta); |
6beea0cd MH |
443 | else |
444 | ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta); | |
8c7b09f4 | 445 | ts->idle_entrytime = now; |
595aac48 | 446 | } |
8d63bf94 | 447 | |
e0e37c20 | 448 | if (last_update_time) |
8d63bf94 AV |
449 | *last_update_time = ktime_to_us(now); |
450 | ||
595aac48 AV |
451 | } |
452 | ||
453 | static void tick_nohz_stop_idle(int cpu, ktime_t now) | |
454 | { | |
455 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | |
456 | ||
8c215bd3 | 457 | update_ts_time_stats(cpu, ts, now, NULL); |
eed3b9cf | 458 | ts->idle_active = 0; |
6fa3eb70 S |
459 | #ifdef CONFIG_MT_LOAD_BALANCE_PROFILER |
460 | mt_lbprof_update_state(cpu, MT_LBPROF_NO_TASK_STATE); | |
461 | #endif | |
56c7426b | 462 | |
eed3b9cf | 463 | sched_clock_idle_wakeup_event(0); |
6378ddb5 VP |
464 | } |
465 | ||
8c215bd3 | 466 | static ktime_t tick_nohz_start_idle(int cpu, struct tick_sched *ts) |
6378ddb5 | 467 | { |
430ee881 | 468 | ktime_t now = ktime_get(); |
595aac48 | 469 | |
6378ddb5 VP |
470 | ts->idle_entrytime = now; |
471 | ts->idle_active = 1; | |
6fa3eb70 S |
472 | #ifdef CONFIG_MT_LOAD_BALANCE_PROFILER |
473 | mt_lbprof_update_state(cpu, MT_LBPROF_NO_TASK_STATE); | |
474 | #endif | |
475 | ||
56c7426b | 476 | sched_clock_idle_sleep_event(); |
6378ddb5 VP |
477 | return now; |
478 | } | |
479 | ||
b1f724c3 AV |
480 | /** |
481 | * get_cpu_idle_time_us - get the total idle time of a cpu | |
482 | * @cpu: CPU number to query | |
09a1d34f MH |
483 | * @last_update_time: variable to store update time in. Do not update |
484 | * counters if NULL. | |
b1f724c3 AV |
485 | * |
486 | * Return the cummulative idle time (since boot) for a given | |
6beea0cd | 487 | * CPU, in microseconds. |
b1f724c3 AV |
488 | * |
489 | * This time is measured via accounting rather than sampling, | |
490 | * and is as accurate as ktime_get() is. | |
491 | * | |
492 | * This function returns -1 if NOHZ is not enabled. | |
493 | */ | |
6378ddb5 VP |
494 | u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time) |
495 | { | |
496 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | |
09a1d34f | 497 | ktime_t now, idle; |
6378ddb5 | 498 | |
8083e4ad | 499 | if (!tick_nohz_enabled) |
500 | return -1; | |
501 | ||
09a1d34f MH |
502 | now = ktime_get(); |
503 | if (last_update_time) { | |
504 | update_ts_time_stats(cpu, ts, now, last_update_time); | |
505 | idle = ts->idle_sleeptime; | |
506 | } else { | |
507 | if (ts->idle_active && !nr_iowait_cpu(cpu)) { | |
508 | ktime_t delta = ktime_sub(now, ts->idle_entrytime); | |
509 | ||
510 | idle = ktime_add(ts->idle_sleeptime, delta); | |
511 | } else { | |
512 | idle = ts->idle_sleeptime; | |
513 | } | |
514 | } | |
515 | ||
516 | return ktime_to_us(idle); | |
8083e4ad | 517 | |
6378ddb5 | 518 | } |
8083e4ad | 519 | EXPORT_SYMBOL_GPL(get_cpu_idle_time_us); |
6378ddb5 | 520 | |
6beea0cd | 521 | /** |
0224cf4c AV |
522 | * get_cpu_iowait_time_us - get the total iowait time of a cpu |
523 | * @cpu: CPU number to query | |
09a1d34f MH |
524 | * @last_update_time: variable to store update time in. Do not update |
525 | * counters if NULL. | |
0224cf4c AV |
526 | * |
527 | * Return the cummulative iowait time (since boot) for a given | |
528 | * CPU, in microseconds. | |
529 | * | |
530 | * This time is measured via accounting rather than sampling, | |
531 | * and is as accurate as ktime_get() is. | |
532 | * | |
533 | * This function returns -1 if NOHZ is not enabled. | |
534 | */ | |
535 | u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time) | |
536 | { | |
537 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | |
09a1d34f | 538 | ktime_t now, iowait; |
0224cf4c AV |
539 | |
540 | if (!tick_nohz_enabled) | |
541 | return -1; | |
542 | ||
09a1d34f MH |
543 | now = ktime_get(); |
544 | if (last_update_time) { | |
545 | update_ts_time_stats(cpu, ts, now, last_update_time); | |
546 | iowait = ts->iowait_sleeptime; | |
547 | } else { | |
548 | if (ts->idle_active && nr_iowait_cpu(cpu) > 0) { | |
549 | ktime_t delta = ktime_sub(now, ts->idle_entrytime); | |
0224cf4c | 550 | |
09a1d34f MH |
551 | iowait = ktime_add(ts->iowait_sleeptime, delta); |
552 | } else { | |
553 | iowait = ts->iowait_sleeptime; | |
554 | } | |
555 | } | |
0224cf4c | 556 | |
09a1d34f | 557 | return ktime_to_us(iowait); |
0224cf4c AV |
558 | } |
559 | EXPORT_SYMBOL_GPL(get_cpu_iowait_time_us); | |
560 | ||
84bf1bcc FW |
561 | static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts, |
562 | ktime_t now, int cpu) | |
79bf2bb3 | 563 | { |
280f0677 | 564 | unsigned long seq, last_jiffies, next_jiffies, delta_jiffies; |
84bf1bcc | 565 | ktime_t last_update, expires, ret = { .tv64 = 0 }; |
aa9b1630 | 566 | unsigned long rcu_delta_jiffies; |
4f86d3a8 | 567 | struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev; |
98962465 | 568 | u64 time_delta; |
79bf2bb3 | 569 | |
79bf2bb3 TG |
570 | /* Read jiffies and the time when jiffies were updated last */ |
571 | do { | |
d6ad4187 | 572 | seq = read_seqbegin(&jiffies_lock); |
79bf2bb3 TG |
573 | last_update = last_jiffies_update; |
574 | last_jiffies = jiffies; | |
27185016 | 575 | time_delta = timekeeping_max_deferment(); |
d6ad4187 | 576 | } while (read_seqretry(&jiffies_lock, seq)); |
79bf2bb3 | 577 | |
74876a98 | 578 | if (rcu_needs_cpu(cpu, &rcu_delta_jiffies) || |
00b42959 | 579 | arch_needs_cpu(cpu) || irq_work_needs_cpu()) { |
3c5d92a0 | 580 | next_jiffies = last_jiffies + 1; |
6ba9b346 | 581 | delta_jiffies = 1; |
3c5d92a0 MS |
582 | } else { |
583 | /* Get the next timer wheel timer */ | |
584 | next_jiffies = get_next_timer_interrupt(last_jiffies); | |
585 | delta_jiffies = next_jiffies - last_jiffies; | |
aa9b1630 PM |
586 | if (rcu_delta_jiffies < delta_jiffies) { |
587 | next_jiffies = last_jiffies + rcu_delta_jiffies; | |
588 | delta_jiffies = rcu_delta_jiffies; | |
589 | } | |
3c5d92a0 | 590 | } |
47aa8b6c | 591 | |
79bf2bb3 | 592 | /* |
47aa8b6c IM |
593 | * Do not stop the tick, if we are only one off (or less) |
594 | * or if the cpu is required for RCU: | |
79bf2bb3 | 595 | */ |
47aa8b6c | 596 | if (!ts->tick_stopped && delta_jiffies <= 1) |
79bf2bb3 TG |
597 | goto out; |
598 | ||
599 | /* Schedule the tick, if we are at least one jiffie off */ | |
600 | if ((long)delta_jiffies >= 1) { | |
601 | ||
00147449 WR |
602 | /* |
603 | * If this cpu is the one which updates jiffies, then | |
604 | * give up the assignment and let it be taken by the | |
605 | * cpu which runs the tick timer next, which might be | |
606 | * this cpu as well. If we don't drop this here the | |
607 | * jiffies might be stale and do_timer() never | |
27185016 TG |
608 | * invoked. Keep track of the fact that it was the one |
609 | * which had the do_timer() duty last. If this cpu is | |
610 | * the one which had the do_timer() duty last, we | |
611 | * limit the sleep time to the timekeeping | |
612 | * max_deferement value which we retrieved | |
613 | * above. Otherwise we can sleep as long as we want. | |
00147449 | 614 | */ |
27185016 | 615 | if (cpu == tick_do_timer_cpu) { |
00147449 | 616 | tick_do_timer_cpu = TICK_DO_TIMER_NONE; |
27185016 TG |
617 | ts->do_timer_last = 1; |
618 | } else if (tick_do_timer_cpu != TICK_DO_TIMER_NONE) { | |
619 | time_delta = KTIME_MAX; | |
620 | ts->do_timer_last = 0; | |
621 | } else if (!ts->do_timer_last) { | |
622 | time_delta = KTIME_MAX; | |
623 | } | |
624 | ||
265f22a9 FW |
625 | #ifdef CONFIG_NO_HZ_FULL |
626 | if (!ts->inidle) { | |
627 | time_delta = min(time_delta, | |
628 | scheduler_tick_max_deferment()); | |
629 | } | |
630 | #endif | |
631 | ||
00147449 | 632 | /* |
98962465 JH |
633 | * calculate the expiry time for the next timer wheel |
634 | * timer. delta_jiffies >= NEXT_TIMER_MAX_DELTA signals | |
635 | * that there is no timer pending or at least extremely | |
636 | * far into the future (12 days for HZ=1000). In this | |
637 | * case we set the expiry to the end of time. | |
638 | */ | |
639 | if (likely(delta_jiffies < NEXT_TIMER_MAX_DELTA)) { | |
640 | /* | |
641 | * Calculate the time delta for the next timer event. | |
642 | * If the time delta exceeds the maximum time delta | |
643 | * permitted by the current clocksource then adjust | |
644 | * the time delta accordingly to ensure the | |
645 | * clocksource does not wrap. | |
646 | */ | |
647 | time_delta = min_t(u64, time_delta, | |
648 | tick_period.tv64 * delta_jiffies); | |
98962465 | 649 | } |
00147449 | 650 | |
27185016 TG |
651 | if (time_delta < KTIME_MAX) |
652 | expires = ktime_add_ns(last_update, time_delta); | |
653 | else | |
654 | expires.tv64 = KTIME_MAX; | |
00147449 | 655 | |
00147449 WR |
656 | /* Skip reprogram of event if its not changed */ |
657 | if (ts->tick_stopped && ktime_equal(expires, dev->next_event)) | |
658 | goto out; | |
659 | ||
84bf1bcc FW |
660 | ret = expires; |
661 | ||
79bf2bb3 TG |
662 | /* |
663 | * nohz_stop_sched_tick can be called several times before | |
664 | * the nohz_restart_sched_tick is called. This happens when | |
665 | * interrupts arrive which do not cause a reschedule. In the | |
666 | * first call we save the current tick time, so we can restart | |
667 | * the scheduler tick in nohz_restart_sched_tick. | |
668 | */ | |
669 | if (!ts->tick_stopped) { | |
c1cc017c | 670 | nohz_balance_enter_idle(cpu); |
5167e8d5 | 671 | calc_load_enter_idle(); |
46cb4b7c | 672 | |
f5d411c9 | 673 | ts->last_tick = hrtimer_get_expires(&ts->sched_timer); |
79bf2bb3 | 674 | ts->tick_stopped = 1; |
cb41a290 | 675 | trace_tick_stop(1, " "); |
79bf2bb3 | 676 | } |
d3ed7824 | 677 | |
eaad084b | 678 | /* |
98962465 JH |
679 | * If the expiration time == KTIME_MAX, then |
680 | * in this case we simply stop the tick timer. | |
eaad084b | 681 | */ |
98962465 | 682 | if (unlikely(expires.tv64 == KTIME_MAX)) { |
eaad084b TG |
683 | if (ts->nohz_mode == NOHZ_MODE_HIGHRES) |
684 | hrtimer_cancel(&ts->sched_timer); | |
685 | goto out; | |
686 | } | |
687 | ||
79bf2bb3 TG |
688 | if (ts->nohz_mode == NOHZ_MODE_HIGHRES) { |
689 | hrtimer_start(&ts->sched_timer, expires, | |
5c333864 | 690 | HRTIMER_MODE_ABS_PINNED); |
79bf2bb3 TG |
691 | /* Check, if the timer was already in the past */ |
692 | if (hrtimer_active(&ts->sched_timer)) | |
693 | goto out; | |
4c9dc641 | 694 | } else if (!tick_program_event(expires, 0)) |
79bf2bb3 TG |
695 | goto out; |
696 | /* | |
697 | * We are past the event already. So we crossed a | |
698 | * jiffie boundary. Update jiffies and raise the | |
699 | * softirq. | |
700 | */ | |
701 | tick_do_update_jiffies64(ktime_get()); | |
79bf2bb3 TG |
702 | } |
703 | raise_softirq_irqoff(TIMER_SOFTIRQ); | |
704 | out: | |
705 | ts->next_jiffies = next_jiffies; | |
706 | ts->last_jiffies = last_jiffies; | |
4f86d3a8 | 707 | ts->sleep_length = ktime_sub(dev->next_event, now); |
84bf1bcc FW |
708 | |
709 | return ret; | |
280f0677 FW |
710 | } |
711 | ||
5811d996 FW |
712 | static void tick_nohz_full_stop_tick(struct tick_sched *ts) |
713 | { | |
714 | #ifdef CONFIG_NO_HZ_FULL | |
715 | int cpu = smp_processor_id(); | |
716 | ||
717 | if (!tick_nohz_full_cpu(cpu) || is_idle_task(current)) | |
718 | return; | |
719 | ||
720 | if (!ts->tick_stopped && ts->nohz_mode == NOHZ_MODE_INACTIVE) | |
721 | return; | |
722 | ||
723 | if (!can_stop_full_tick()) | |
724 | return; | |
725 | ||
726 | tick_nohz_stop_sched_tick(ts, ktime_get(), cpu); | |
727 | #endif | |
728 | } | |
729 | ||
5b39939a FW |
730 | static bool can_stop_idle_tick(int cpu, struct tick_sched *ts) |
731 | { | |
732 | /* | |
733 | * If this cpu is offline and it is the one which updates | |
734 | * jiffies, then give up the assignment and let it be taken by | |
735 | * the cpu which runs the tick timer next. If we don't drop | |
736 | * this here the jiffies might be stale and do_timer() never | |
737 | * invoked. | |
738 | */ | |
739 | if (unlikely(!cpu_online(cpu))) { | |
740 | if (cpu == tick_do_timer_cpu) | |
741 | tick_do_timer_cpu = TICK_DO_TIMER_NONE; | |
f7ea0fd6 | 742 | return false; |
5b39939a FW |
743 | } |
744 | ||
ec804bd9 TG |
745 | if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) { |
746 | ts->sleep_length = (ktime_t) { .tv64 = NSEC_PER_SEC/HZ }; | |
5b39939a | 747 | return false; |
ec804bd9 | 748 | } |
5b39939a FW |
749 | |
750 | if (need_resched()) | |
751 | return false; | |
752 | ||
753 | if (unlikely(local_softirq_pending() && cpu_online(cpu))) { | |
754 | static int ratelimit; | |
755 | ||
803b0eba PM |
756 | if (ratelimit < 10 && |
757 | (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) { | |
cfea7d7e RV |
758 | pr_warn("NOHZ: local_softirq_pending %02x\n", |
759 | (unsigned int) local_softirq_pending()); | |
5b39939a FW |
760 | ratelimit++; |
761 | } | |
762 | return false; | |
763 | } | |
764 | ||
c5bfece2 | 765 | if (have_nohz_full_mask) { |
a382bf93 FW |
766 | /* |
767 | * Keep the tick alive to guarantee timekeeping progression | |
768 | * if there are full dynticks CPUs around | |
769 | */ | |
770 | if (tick_do_timer_cpu == cpu) | |
771 | return false; | |
772 | /* | |
773 | * Boot safety: make sure the timekeeping duty has been | |
774 | * assigned before entering dyntick-idle mode, | |
775 | */ | |
776 | if (tick_do_timer_cpu == TICK_DO_TIMER_NONE) | |
777 | return false; | |
778 | } | |
779 | ||
5b39939a FW |
780 | return true; |
781 | } | |
782 | ||
19f5f736 FW |
783 | static void __tick_nohz_idle_enter(struct tick_sched *ts) |
784 | { | |
84bf1bcc | 785 | ktime_t now, expires; |
5b39939a | 786 | int cpu = smp_processor_id(); |
19f5f736 | 787 | |
5b39939a | 788 | now = tick_nohz_start_idle(cpu, ts); |
2ac0d98f | 789 | |
5b39939a FW |
790 | if (can_stop_idle_tick(cpu, ts)) { |
791 | int was_stopped = ts->tick_stopped; | |
792 | ||
793 | ts->idle_calls++; | |
84bf1bcc FW |
794 | |
795 | expires = tick_nohz_stop_sched_tick(ts, now, cpu); | |
796 | if (expires.tv64 > 0LL) { | |
797 | ts->idle_sleeps++; | |
798 | ts->idle_expires = expires; | |
799 | } | |
5b39939a FW |
800 | |
801 | if (!was_stopped && ts->tick_stopped) | |
802 | ts->idle_jiffies = ts->last_jiffies; | |
803 | } | |
280f0677 FW |
804 | } |
805 | ||
806 | /** | |
807 | * tick_nohz_idle_enter - stop the idle tick from the idle task | |
808 | * | |
809 | * When the next event is more than a tick into the future, stop the idle tick | |
810 | * Called when we start the idle loop. | |
2bbb6817 | 811 | * |
1268fbc7 | 812 | * The arch is responsible of calling: |
2bbb6817 FW |
813 | * |
814 | * - rcu_idle_enter() after its last use of RCU before the CPU is put | |
815 | * to sleep. | |
816 | * - rcu_idle_exit() before the first use of RCU after the CPU is woken up. | |
280f0677 | 817 | */ |
1268fbc7 | 818 | void tick_nohz_idle_enter(void) |
280f0677 FW |
819 | { |
820 | struct tick_sched *ts; | |
821 | ||
1268fbc7 FW |
822 | WARN_ON_ONCE(irqs_disabled()); |
823 | ||
0db49b72 LT |
824 | /* |
825 | * Update the idle state in the scheduler domain hierarchy | |
826 | * when tick_nohz_stop_sched_tick() is called from the idle loop. | |
827 | * State will be updated to busy during the first busy tick after | |
828 | * exiting idle. | |
829 | */ | |
830 | set_cpu_sd_state_idle(); | |
831 | ||
1268fbc7 FW |
832 | local_irq_disable(); |
833 | ||
280f0677 FW |
834 | ts = &__get_cpu_var(tick_cpu_sched); |
835 | /* | |
836 | * set ts->inidle unconditionally. even if the system did not | |
837 | * switch to nohz mode the cpu frequency governers rely on the | |
838 | * update of the idle time accounting in tick_nohz_start_idle(). | |
839 | */ | |
840 | ts->inidle = 1; | |
19f5f736 | 841 | __tick_nohz_idle_enter(ts); |
1268fbc7 FW |
842 | |
843 | local_irq_enable(); | |
280f0677 | 844 | } |
4dbd2771 | 845 | EXPORT_SYMBOL_GPL(tick_nohz_idle_enter); |
280f0677 FW |
846 | |
847 | /** | |
848 | * tick_nohz_irq_exit - update next tick event from interrupt exit | |
849 | * | |
850 | * When an interrupt fires while we are idle and it doesn't cause | |
851 | * a reschedule, it may still add, modify or delete a timer, enqueue | |
852 | * an RCU callback, etc... | |
853 | * So we need to re-calculate and reprogram the next tick event. | |
854 | */ | |
855 | void tick_nohz_irq_exit(void) | |
856 | { | |
857 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); | |
858 | ||
d201a0b9 | 859 | if (ts->inidle) |
5811d996 | 860 | __tick_nohz_idle_enter(ts); |
d201a0b9 | 861 | else |
5811d996 | 862 | tick_nohz_full_stop_tick(ts); |
79bf2bb3 TG |
863 | } |
864 | ||
4f86d3a8 LB |
865 | /** |
866 | * tick_nohz_get_sleep_length - return the length of the current sleep | |
867 | * | |
868 | * Called from power state control code with interrupts disabled | |
869 | */ | |
870 | ktime_t tick_nohz_get_sleep_length(void) | |
871 | { | |
872 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); | |
873 | ||
874 | return ts->sleep_length; | |
875 | } | |
876 | ||
c34bec5a TG |
877 | static void tick_nohz_restart(struct tick_sched *ts, ktime_t now) |
878 | { | |
879 | hrtimer_cancel(&ts->sched_timer); | |
f5d411c9 | 880 | hrtimer_set_expires(&ts->sched_timer, ts->last_tick); |
c34bec5a TG |
881 | |
882 | while (1) { | |
883 | /* Forward the time to expire in the future */ | |
884 | hrtimer_forward(&ts->sched_timer, now, tick_period); | |
885 | ||
886 | if (ts->nohz_mode == NOHZ_MODE_HIGHRES) { | |
268a3dcf | 887 | hrtimer_start_expires(&ts->sched_timer, |
5c333864 | 888 | HRTIMER_MODE_ABS_PINNED); |
c34bec5a TG |
889 | /* Check, if the timer was already in the past */ |
890 | if (hrtimer_active(&ts->sched_timer)) | |
891 | break; | |
892 | } else { | |
268a3dcf TG |
893 | if (!tick_program_event( |
894 | hrtimer_get_expires(&ts->sched_timer), 0)) | |
c34bec5a TG |
895 | break; |
896 | } | |
6f103929 | 897 | /* Reread time and update jiffies */ |
c34bec5a | 898 | now = ktime_get(); |
6f103929 | 899 | tick_do_update_jiffies64(now); |
c34bec5a TG |
900 | } |
901 | } | |
902 | ||
19f5f736 | 903 | static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now) |
79bf2bb3 | 904 | { |
79bf2bb3 | 905 | /* Update jiffies first */ |
79bf2bb3 | 906 | tick_do_update_jiffies64(now); |
5aaa0b7a | 907 | update_cpu_load_nohz(); |
79bf2bb3 | 908 | |
749c8814 | 909 | calc_load_exit_idle(); |
2ac0d98f FW |
910 | touch_softlockup_watchdog(); |
911 | /* | |
912 | * Cancel the scheduled timer and restore the tick | |
913 | */ | |
914 | ts->tick_stopped = 0; | |
915 | ts->idle_exittime = now; | |
916 | ||
917 | tick_nohz_restart(ts, now); | |
918 | } | |
919 | ||
920 | static void tick_nohz_account_idle_ticks(struct tick_sched *ts) | |
921 | { | |
3f4724ea | 922 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
2ac0d98f | 923 | unsigned long ticks; |
3f4724ea FW |
924 | |
925 | if (vtime_accounting_enabled()) | |
926 | return; | |
79bf2bb3 TG |
927 | /* |
928 | * We stopped the tick in idle. Update process times would miss the | |
929 | * time we slept as update_process_times does only a 1 tick | |
930 | * accounting. Enforce that this is accounted to idle ! | |
931 | */ | |
932 | ticks = jiffies - ts->idle_jiffies; | |
933 | /* | |
934 | * We might be one off. Do not randomly account a huge number of ticks! | |
935 | */ | |
79741dd3 MS |
936 | if (ticks && ticks < LONG_MAX) |
937 | account_idle_ticks(ticks); | |
938 | #endif | |
19f5f736 FW |
939 | } |
940 | ||
79bf2bb3 | 941 | /** |
280f0677 | 942 | * tick_nohz_idle_exit - restart the idle tick from the idle task |
79bf2bb3 TG |
943 | * |
944 | * Restart the idle tick when the CPU is woken up from idle | |
280f0677 FW |
945 | * This also exit the RCU extended quiescent state. The CPU |
946 | * can use RCU again after this function is called. | |
79bf2bb3 | 947 | */ |
280f0677 | 948 | void tick_nohz_idle_exit(void) |
79bf2bb3 TG |
949 | { |
950 | int cpu = smp_processor_id(); | |
951 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | |
6378ddb5 | 952 | ktime_t now; |
79bf2bb3 | 953 | |
6378ddb5 | 954 | local_irq_disable(); |
2bbb6817 | 955 | |
15f827be FW |
956 | WARN_ON_ONCE(!ts->inidle); |
957 | ||
958 | ts->inidle = 0; | |
959 | ||
960 | if (ts->idle_active || ts->tick_stopped) | |
eed3b9cf MS |
961 | now = ktime_get(); |
962 | ||
963 | if (ts->idle_active) | |
964 | tick_nohz_stop_idle(cpu, now); | |
6378ddb5 | 965 | |
2ac0d98f | 966 | if (ts->tick_stopped) { |
19f5f736 | 967 | tick_nohz_restart_sched_tick(ts, now); |
2ac0d98f | 968 | tick_nohz_account_idle_ticks(ts); |
6378ddb5 | 969 | } |
79bf2bb3 | 970 | |
79bf2bb3 TG |
971 | local_irq_enable(); |
972 | } | |
4dbd2771 | 973 | EXPORT_SYMBOL_GPL(tick_nohz_idle_exit); |
79bf2bb3 TG |
974 | |
975 | static int tick_nohz_reprogram(struct tick_sched *ts, ktime_t now) | |
976 | { | |
977 | hrtimer_forward(&ts->sched_timer, now, tick_period); | |
cc584b21 | 978 | return tick_program_event(hrtimer_get_expires(&ts->sched_timer), 0); |
79bf2bb3 TG |
979 | } |
980 | ||
981 | /* | |
982 | * The nohz low res interrupt handler | |
983 | */ | |
984 | static void tick_nohz_handler(struct clock_event_device *dev) | |
985 | { | |
986 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); | |
987 | struct pt_regs *regs = get_irq_regs(); | |
988 | ktime_t now = ktime_get(); | |
989 | ||
990 | dev->next_event.tv64 = KTIME_MAX; | |
991 | ||
5bb96226 | 992 | tick_sched_do_timer(now); |
9e8f559b | 993 | tick_sched_handle(ts, regs); |
79bf2bb3 | 994 | |
79bf2bb3 TG |
995 | while (tick_nohz_reprogram(ts, now)) { |
996 | now = ktime_get(); | |
997 | tick_do_update_jiffies64(now); | |
998 | } | |
999 | } | |
1000 | ||
1001 | /** | |
1002 | * tick_nohz_switch_to_nohz - switch to nohz mode | |
1003 | */ | |
1004 | static void tick_nohz_switch_to_nohz(void) | |
1005 | { | |
1006 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); | |
1007 | ktime_t next; | |
1008 | ||
1009 | if (!tick_nohz_enabled) | |
1010 | return; | |
1011 | ||
1012 | local_irq_disable(); | |
1013 | if (tick_switch_to_oneshot(tick_nohz_handler)) { | |
1014 | local_irq_enable(); | |
1015 | return; | |
1016 | } | |
1017 | ||
1018 | ts->nohz_mode = NOHZ_MODE_LOWRES; | |
1019 | ||
1020 | /* | |
1021 | * Recycle the hrtimer in ts, so we can share the | |
1022 | * hrtimer_forward with the highres code. | |
1023 | */ | |
1024 | hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | |
1025 | /* Get the next period */ | |
1026 | next = tick_init_jiffy_update(); | |
1027 | ||
1028 | for (;;) { | |
cc584b21 | 1029 | hrtimer_set_expires(&ts->sched_timer, next); |
79bf2bb3 TG |
1030 | if (!tick_program_event(next, 0)) |
1031 | break; | |
1032 | next = ktime_add(next, tick_period); | |
1033 | } | |
1034 | local_irq_enable(); | |
79bf2bb3 TG |
1035 | } |
1036 | ||
fb02fbc1 TG |
1037 | /* |
1038 | * When NOHZ is enabled and the tick is stopped, we need to kick the | |
1039 | * tick timer from irq_enter() so that the jiffies update is kept | |
1040 | * alive during long running softirqs. That's ugly as hell, but | |
1041 | * correctness is key even if we need to fix the offending softirq in | |
1042 | * the first place. | |
1043 | * | |
1044 | * Note, this is different to tick_nohz_restart. We just kick the | |
1045 | * timer and do not touch the other magic bits which need to be done | |
1046 | * when idle is left. | |
1047 | */ | |
eed3b9cf | 1048 | static void tick_nohz_kick_tick(int cpu, ktime_t now) |
fb02fbc1 | 1049 | { |
ae99286b TG |
1050 | #if 0 |
1051 | /* Switch back to 2.6.27 behaviour */ | |
1052 | ||
fb02fbc1 | 1053 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); |
eed3b9cf | 1054 | ktime_t delta; |
fb02fbc1 | 1055 | |
c4bd822e TG |
1056 | /* |
1057 | * Do not touch the tick device, when the next expiry is either | |
1058 | * already reached or less/equal than the tick period. | |
1059 | */ | |
268a3dcf | 1060 | delta = ktime_sub(hrtimer_get_expires(&ts->sched_timer), now); |
c4bd822e TG |
1061 | if (delta.tv64 <= tick_period.tv64) |
1062 | return; | |
1063 | ||
1064 | tick_nohz_restart(ts, now); | |
ae99286b | 1065 | #endif |
fb02fbc1 TG |
1066 | } |
1067 | ||
eed3b9cf MS |
1068 | static inline void tick_check_nohz(int cpu) |
1069 | { | |
1070 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | |
1071 | ktime_t now; | |
1072 | ||
1073 | if (!ts->idle_active && !ts->tick_stopped) | |
1074 | return; | |
1075 | now = ktime_get(); | |
1076 | if (ts->idle_active) | |
1077 | tick_nohz_stop_idle(cpu, now); | |
1078 | if (ts->tick_stopped) { | |
1079 | tick_nohz_update_jiffies(now); | |
1080 | tick_nohz_kick_tick(cpu, now); | |
1081 | } | |
1082 | } | |
1083 | ||
79bf2bb3 TG |
1084 | #else |
1085 | ||
1086 | static inline void tick_nohz_switch_to_nohz(void) { } | |
eed3b9cf | 1087 | static inline void tick_check_nohz(int cpu) { } |
79bf2bb3 | 1088 | |
3451d024 | 1089 | #endif /* CONFIG_NO_HZ_COMMON */ |
79bf2bb3 | 1090 | |
719254fa TG |
1091 | /* |
1092 | * Called from irq_enter to notify about the possible interruption of idle() | |
1093 | */ | |
1094 | void tick_check_idle(int cpu) | |
1095 | { | |
fb02fbc1 | 1096 | tick_check_oneshot_broadcast(cpu); |
eed3b9cf | 1097 | tick_check_nohz(cpu); |
719254fa TG |
1098 | } |
1099 | ||
79bf2bb3 TG |
1100 | /* |
1101 | * High resolution timer specific code | |
1102 | */ | |
1103 | #ifdef CONFIG_HIGH_RES_TIMERS | |
6fa3eb70 S |
1104 | |
1105 | #ifdef CONFIG_MTK_SCHED_RQAVG_US | |
1106 | static void update_rq_stats(void) | |
1107 | { | |
1108 | unsigned long jiffy_gap = 0; | |
1109 | unsigned int rq_avg = 0; | |
1110 | unsigned long flags = 0; | |
1111 | ||
1112 | spin_lock_irqsave(&rq_lock, flags); | |
1113 | ||
1114 | jiffy_gap = jiffies - rq_info.rq_poll_last_jiffy; | |
1115 | if (jiffy_gap >= rq_info.rq_poll_jiffies) { | |
1116 | if (!rq_info.rq_avg) | |
1117 | rq_info.rq_poll_total_jiffies = 0; | |
1118 | ||
1119 | rq_avg = nr_running() * 10; | |
1120 | ||
1121 | if (rq_info.rq_poll_total_jiffies) { | |
1122 | rq_avg = (rq_avg * jiffy_gap) + | |
1123 | (rq_info.rq_avg * | |
1124 | rq_info.rq_poll_total_jiffies); | |
1125 | do_div(rq_avg, | |
1126 | rq_info.rq_poll_total_jiffies + jiffy_gap); | |
1127 | } | |
1128 | ||
1129 | rq_info.rq_avg = rq_avg; | |
1130 | rq_info.rq_poll_total_jiffies += jiffy_gap; | |
1131 | rq_info.rq_poll_last_jiffy = jiffies; | |
1132 | } | |
1133 | ||
1134 | spin_unlock_irqrestore(&rq_lock, flags); | |
1135 | } | |
1136 | ||
1137 | #ifdef CONFIG_MTK_SCHED_RQAVG_US_ENABLE_WQ | |
1138 | static void wakeup_user(void) | |
1139 | { | |
1140 | unsigned long jiffy_gap; | |
1141 | ||
1142 | jiffy_gap = jiffies - rq_info.def_timer_last_jiffy; | |
1143 | ||
1144 | if (jiffy_gap >= rq_info.def_timer_jiffies) { | |
1145 | rq_info.def_timer_last_jiffy = jiffies; | |
1146 | queue_work(rq_wq, &rq_info.def_timer_work); | |
1147 | } | |
1148 | } | |
1149 | #endif /* CONFIG_MTK_SCHED_RQAVG_US_ENABLE_WQ */ | |
1150 | ||
1151 | #endif /* CONFIG_MTK_SCHED_RQAVG_US */ | |
79bf2bb3 | 1152 | /* |
4c9dc641 | 1153 | * We rearm the timer until we get disabled by the idle code. |
351f181f | 1154 | * Called with interrupts disabled. |
79bf2bb3 TG |
1155 | */ |
1156 | static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer) | |
1157 | { | |
1158 | struct tick_sched *ts = | |
1159 | container_of(timer, struct tick_sched, sched_timer); | |
79bf2bb3 TG |
1160 | struct pt_regs *regs = get_irq_regs(); |
1161 | ktime_t now = ktime_get(); | |
d3ed7824 | 1162 | |
5bb96226 | 1163 | tick_sched_do_timer(now); |
79bf2bb3 TG |
1164 | |
1165 | /* | |
1166 | * Do not call, when we are not in irq context and have | |
1167 | * no valid regs pointer | |
1168 | */ | |
9e8f559b FW |
1169 | if (regs) |
1170 | tick_sched_handle(ts, regs); | |
79bf2bb3 | 1171 | |
6fa3eb70 S |
1172 | #ifdef CONFIG_MTK_SCHED_RQAVG_US |
1173 | if ((rq_info.init == 1) && (tick_do_timer_cpu == smp_processor_id())) { | |
1174 | ||
1175 | /* | |
1176 | * update run queue statistics | |
1177 | */ | |
1178 | update_rq_stats(); | |
1179 | ||
1180 | #ifdef CONFIG_MTK_SCHED_RQAVG_US_ENABLE_WQ | |
1181 | /* | |
1182 | * wakeup user if needed | |
1183 | */ | |
1184 | wakeup_user(); | |
1185 | #endif /* CONFIG_MTK_SCHED_RQAVG_US_ENABLE_WQ */ | |
1186 | } | |
1187 | #endif /* CONFIG_MTK_SCHED_RQAVG_US */ | |
1188 | ||
79bf2bb3 TG |
1189 | hrtimer_forward(timer, now, tick_period); |
1190 | ||
1191 | return HRTIMER_RESTART; | |
1192 | } | |
1193 | ||
5307c955 MG |
1194 | static int sched_skew_tick; |
1195 | ||
62cf20b3 TG |
1196 | static int __init skew_tick(char *str) |
1197 | { | |
1198 | get_option(&str, &sched_skew_tick); | |
1199 | ||
1200 | return 0; | |
1201 | } | |
1202 | early_param("skew_tick", skew_tick); | |
1203 | ||
79bf2bb3 TG |
1204 | /** |
1205 | * tick_setup_sched_timer - setup the tick emulation timer | |
1206 | */ | |
1207 | void tick_setup_sched_timer(void) | |
1208 | { | |
1209 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); | |
1210 | ktime_t now = ktime_get(); | |
1211 | ||
1212 | /* | |
1213 | * Emulate tick processing via per-CPU hrtimers: | |
1214 | */ | |
1215 | hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | |
1216 | ts->sched_timer.function = tick_sched_timer; | |
79bf2bb3 | 1217 | |
3704540b | 1218 | /* Get the next period (per cpu) */ |
cc584b21 | 1219 | hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update()); |
79bf2bb3 | 1220 | |
9c3f9e28 | 1221 | /* Offset the tick to avert jiffies_lock contention. */ |
5307c955 MG |
1222 | if (sched_skew_tick) { |
1223 | u64 offset = ktime_to_ns(tick_period) >> 1; | |
1224 | do_div(offset, num_possible_cpus()); | |
1225 | offset *= smp_processor_id(); | |
1226 | hrtimer_add_expires_ns(&ts->sched_timer, offset); | |
1227 | } | |
1228 | ||
79bf2bb3 TG |
1229 | for (;;) { |
1230 | hrtimer_forward(&ts->sched_timer, now, tick_period); | |
5c333864 AB |
1231 | hrtimer_start_expires(&ts->sched_timer, |
1232 | HRTIMER_MODE_ABS_PINNED); | |
79bf2bb3 TG |
1233 | /* Check, if the timer was already in the past */ |
1234 | if (hrtimer_active(&ts->sched_timer)) | |
1235 | break; | |
1236 | now = ktime_get(); | |
1237 | } | |
1238 | ||
3451d024 | 1239 | #ifdef CONFIG_NO_HZ_COMMON |
29c158e8 | 1240 | if (tick_nohz_enabled) |
79bf2bb3 TG |
1241 | ts->nohz_mode = NOHZ_MODE_HIGHRES; |
1242 | #endif | |
1243 | } | |
3c4fbe5e | 1244 | #endif /* HIGH_RES_TIMERS */ |
79bf2bb3 | 1245 | |
3451d024 | 1246 | #if defined CONFIG_NO_HZ_COMMON || defined CONFIG_HIGH_RES_TIMERS |
79bf2bb3 TG |
1247 | void tick_cancel_sched_timer(int cpu) |
1248 | { | |
1249 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | |
1250 | ||
3c4fbe5e | 1251 | # ifdef CONFIG_HIGH_RES_TIMERS |
79bf2bb3 TG |
1252 | if (ts->sched_timer.base) |
1253 | hrtimer_cancel(&ts->sched_timer); | |
3c4fbe5e | 1254 | # endif |
a7901766 | 1255 | |
6fa3eb70 S |
1256 | //memset(ts, 0, sizeof(*ts)); /*to avoid idle time clear to 0 after CPU plug off*/ |
1257 | ts->nohz_mode = NOHZ_MODE_INACTIVE; | |
79bf2bb3 | 1258 | } |
3c4fbe5e | 1259 | #endif |
79bf2bb3 TG |
1260 | |
1261 | /** | |
1262 | * Async notification about clocksource changes | |
1263 | */ | |
1264 | void tick_clock_notify(void) | |
1265 | { | |
1266 | int cpu; | |
1267 | ||
1268 | for_each_possible_cpu(cpu) | |
1269 | set_bit(0, &per_cpu(tick_cpu_sched, cpu).check_clocks); | |
1270 | } | |
1271 | ||
1272 | /* | |
1273 | * Async notification about clock event changes | |
1274 | */ | |
1275 | void tick_oneshot_notify(void) | |
1276 | { | |
1277 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); | |
1278 | ||
1279 | set_bit(0, &ts->check_clocks); | |
1280 | } | |
1281 | ||
1282 | /** | |
1283 | * Check, if a change happened, which makes oneshot possible. | |
1284 | * | |
1285 | * Called cyclic from the hrtimer softirq (driven by the timer | |
1286 | * softirq) allow_nohz signals, that we can switch into low-res nohz | |
1287 | * mode, because high resolution timers are disabled (either compile | |
1288 | * or runtime). | |
1289 | */ | |
1290 | int tick_check_oneshot_change(int allow_nohz) | |
1291 | { | |
1292 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); | |
1293 | ||
1294 | if (!test_and_clear_bit(0, &ts->check_clocks)) | |
1295 | return 0; | |
1296 | ||
1297 | if (ts->nohz_mode != NOHZ_MODE_INACTIVE) | |
1298 | return 0; | |
1299 | ||
cf4fc6cb | 1300 | if (!timekeeping_valid_for_hres() || !tick_is_oneshot_available()) |
79bf2bb3 TG |
1301 | return 0; |
1302 | ||
1303 | if (!allow_nohz) | |
1304 | return 1; | |
1305 | ||
1306 | tick_nohz_switch_to_nohz(); | |
1307 | return 0; | |
1308 | } |