sched, nohz: Introduce nohz_flags in 'struct rq'
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / sched / core.c
CommitLineData
1da177e4 1/*
391e43da 2 * kernel/sched/core.c
1da177e4
LT
3 *
4 * Kernel scheduler and related syscalls
5 *
6 * Copyright (C) 1991-2002 Linus Torvalds
7 *
8 * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and
9 * make semaphores SMP safe
10 * 1998-11-19 Implemented schedule_timeout() and related stuff
11 * by Andrea Arcangeli
12 * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar:
13 * hybrid priority-list and round-robin design with
14 * an array-switch method of distributing timeslices
15 * and per-CPU runqueues. Cleanups and useful suggestions
16 * by Davide Libenzi, preemptible kernel bits by Robert Love.
17 * 2003-09-03 Interactivity tuning by Con Kolivas.
18 * 2004-04-02 Scheduler domains code by Nick Piggin
c31f2e8a
IM
19 * 2007-04-15 Work begun on replacing all interactivity tuning with a
20 * fair scheduling design by Con Kolivas.
21 * 2007-05-05 Load balancing (smp-nice) and other improvements
22 * by Peter Williams
23 * 2007-05-06 Interactivity improvements to CFS by Mike Galbraith
24 * 2007-07-01 Group scheduling enhancements by Srivatsa Vaddagiri
b9131769
IM
25 * 2007-11-29 RT balancing improvements by Steven Rostedt, Gregory Haskins,
26 * Thomas Gleixner, Mike Kravetz
1da177e4
LT
27 */
28
29#include <linux/mm.h>
30#include <linux/module.h>
31#include <linux/nmi.h>
32#include <linux/init.h>
dff06c15 33#include <linux/uaccess.h>
1da177e4 34#include <linux/highmem.h>
1da177e4
LT
35#include <asm/mmu_context.h>
36#include <linux/interrupt.h>
c59ede7b 37#include <linux/capability.h>
1da177e4
LT
38#include <linux/completion.h>
39#include <linux/kernel_stat.h>
9a11b49a 40#include <linux/debug_locks.h>
cdd6c482 41#include <linux/perf_event.h>
1da177e4
LT
42#include <linux/security.h>
43#include <linux/notifier.h>
44#include <linux/profile.h>
7dfb7103 45#include <linux/freezer.h>
198e2f18 46#include <linux/vmalloc.h>
1da177e4
LT
47#include <linux/blkdev.h>
48#include <linux/delay.h>
b488893a 49#include <linux/pid_namespace.h>
1da177e4
LT
50#include <linux/smp.h>
51#include <linux/threads.h>
52#include <linux/timer.h>
53#include <linux/rcupdate.h>
54#include <linux/cpu.h>
55#include <linux/cpuset.h>
56#include <linux/percpu.h>
b5aadf7f 57#include <linux/proc_fs.h>
1da177e4 58#include <linux/seq_file.h>
e692ab53 59#include <linux/sysctl.h>
1da177e4
LT
60#include <linux/syscalls.h>
61#include <linux/times.h>
8f0ab514 62#include <linux/tsacct_kern.h>
c6fd91f0 63#include <linux/kprobes.h>
0ff92245 64#include <linux/delayacct.h>
dff06c15 65#include <linux/unistd.h>
f5ff8422 66#include <linux/pagemap.h>
8f4d37ec 67#include <linux/hrtimer.h>
30914a58 68#include <linux/tick.h>
f00b45c1
PZ
69#include <linux/debugfs.h>
70#include <linux/ctype.h>
6cd8a4bb 71#include <linux/ftrace.h>
5a0e3ad6 72#include <linux/slab.h>
f1c6f1a7 73#include <linux/init_task.h>
1da177e4 74
5517d86b 75#include <asm/tlb.h>
838225b4 76#include <asm/irq_regs.h>
e6e6685a
GC
77#ifdef CONFIG_PARAVIRT
78#include <asm/paravirt.h>
79#endif
1da177e4 80
029632fb 81#include "sched.h"
391e43da 82#include "../workqueue_sched.h"
6e0534f2 83
a8d154b0 84#define CREATE_TRACE_POINTS
ad8d75ff 85#include <trace/events/sched.h>
a8d154b0 86
029632fb 87void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period)
d0b27fa7 88{
58088ad0
PT
89 unsigned long delta;
90 ktime_t soft, hard, now;
d0b27fa7 91
58088ad0
PT
92 for (;;) {
93 if (hrtimer_active(period_timer))
94 break;
95
96 now = hrtimer_cb_get_time(period_timer);
97 hrtimer_forward(period_timer, now, period);
d0b27fa7 98
58088ad0
PT
99 soft = hrtimer_get_softexpires(period_timer);
100 hard = hrtimer_get_expires(period_timer);
101 delta = ktime_to_ns(ktime_sub(hard, soft));
102 __hrtimer_start_range_ns(period_timer, soft, delta,
103 HRTIMER_MODE_ABS_PINNED, 0);
104 }
105}
106
029632fb
PZ
107DEFINE_MUTEX(sched_domains_mutex);
108DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
dc61b1d6 109
fe44d621 110static void update_rq_clock_task(struct rq *rq, s64 delta);
305e6835 111
029632fb 112void update_rq_clock(struct rq *rq)
3e51f33f 113{
fe44d621 114 s64 delta;
305e6835 115
61eadef6 116 if (rq->skip_clock_update > 0)
f26f9aff 117 return;
aa483808 118
fe44d621
PZ
119 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
120 rq->clock += delta;
121 update_rq_clock_task(rq, delta);
3e51f33f
PZ
122}
123
bf5c91ba
IM
124/*
125 * Debugging: various feature bits
126 */
f00b45c1 127
f00b45c1
PZ
128#define SCHED_FEAT(name, enabled) \
129 (1UL << __SCHED_FEAT_##name) * enabled |
130
bf5c91ba 131const_debug unsigned int sysctl_sched_features =
391e43da 132#include "features.h"
f00b45c1
PZ
133 0;
134
135#undef SCHED_FEAT
136
137#ifdef CONFIG_SCHED_DEBUG
138#define SCHED_FEAT(name, enabled) \
139 #name ,
140
983ed7a6 141static __read_mostly char *sched_feat_names[] = {
391e43da 142#include "features.h"
f00b45c1
PZ
143 NULL
144};
145
146#undef SCHED_FEAT
147
34f3a814 148static int sched_feat_show(struct seq_file *m, void *v)
f00b45c1 149{
f00b45c1
PZ
150 int i;
151
152 for (i = 0; sched_feat_names[i]; i++) {
34f3a814
LZ
153 if (!(sysctl_sched_features & (1UL << i)))
154 seq_puts(m, "NO_");
155 seq_printf(m, "%s ", sched_feat_names[i]);
f00b45c1 156 }
34f3a814 157 seq_puts(m, "\n");
f00b45c1 158
34f3a814 159 return 0;
f00b45c1
PZ
160}
161
162static ssize_t
163sched_feat_write(struct file *filp, const char __user *ubuf,
164 size_t cnt, loff_t *ppos)
165{
166 char buf[64];
7740191c 167 char *cmp;
f00b45c1
PZ
168 int neg = 0;
169 int i;
170
171 if (cnt > 63)
172 cnt = 63;
173
174 if (copy_from_user(&buf, ubuf, cnt))
175 return -EFAULT;
176
177 buf[cnt] = 0;
7740191c 178 cmp = strstrip(buf);
f00b45c1 179
524429c3 180 if (strncmp(cmp, "NO_", 3) == 0) {
f00b45c1
PZ
181 neg = 1;
182 cmp += 3;
183 }
184
185 for (i = 0; sched_feat_names[i]; i++) {
7740191c 186 if (strcmp(cmp, sched_feat_names[i]) == 0) {
f00b45c1
PZ
187 if (neg)
188 sysctl_sched_features &= ~(1UL << i);
189 else
190 sysctl_sched_features |= (1UL << i);
191 break;
192 }
193 }
194
195 if (!sched_feat_names[i])
196 return -EINVAL;
197
42994724 198 *ppos += cnt;
f00b45c1
PZ
199
200 return cnt;
201}
202
34f3a814
LZ
203static int sched_feat_open(struct inode *inode, struct file *filp)
204{
205 return single_open(filp, sched_feat_show, NULL);
206}
207
828c0950 208static const struct file_operations sched_feat_fops = {
34f3a814
LZ
209 .open = sched_feat_open,
210 .write = sched_feat_write,
211 .read = seq_read,
212 .llseek = seq_lseek,
213 .release = single_release,
f00b45c1
PZ
214};
215
216static __init int sched_init_debug(void)
217{
f00b45c1
PZ
218 debugfs_create_file("sched_features", 0644, NULL, NULL,
219 &sched_feat_fops);
220
221 return 0;
222}
223late_initcall(sched_init_debug);
224
225#endif
226
b82d9fdd
PZ
227/*
228 * Number of tasks to iterate in a single balance run.
229 * Limited because this is done with IRQs disabled.
230 */
231const_debug unsigned int sysctl_sched_nr_migrate = 32;
232
e9e9250b
PZ
233/*
234 * period over which we average the RT time consumption, measured
235 * in ms.
236 *
237 * default: 1s
238 */
239const_debug unsigned int sysctl_sched_time_avg = MSEC_PER_SEC;
240
029632fb
PZ
241/*
242 * period over which we measure -rt task cpu usage in us.
243 * default: 1s
244 */
245unsigned int sysctl_sched_rt_period = 1000000;
8a25d5de 246
029632fb
PZ
247__read_mostly int scheduler_running;
248
249/*
250 * part of the period that we allow rt tasks to run in us.
251 * default: 0.95s
252 */
253int sysctl_sched_rt_runtime = 950000;
4866cde0 254
4866cde0 255
1da177e4 256
0970d299 257/*
0122ec5b 258 * __task_rq_lock - lock the rq @p resides on.
b29739f9 259 */
70b97a7f 260static inline struct rq *__task_rq_lock(struct task_struct *p)
b29739f9
IM
261 __acquires(rq->lock)
262{
0970d299
PZ
263 struct rq *rq;
264
0122ec5b
PZ
265 lockdep_assert_held(&p->pi_lock);
266
3a5c359a 267 for (;;) {
0970d299 268 rq = task_rq(p);
05fa785c 269 raw_spin_lock(&rq->lock);
65cc8e48 270 if (likely(rq == task_rq(p)))
3a5c359a 271 return rq;
05fa785c 272 raw_spin_unlock(&rq->lock);
b29739f9 273 }
b29739f9
IM
274}
275
1da177e4 276/*
0122ec5b 277 * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
1da177e4 278 */
70b97a7f 279static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
0122ec5b 280 __acquires(p->pi_lock)
1da177e4
LT
281 __acquires(rq->lock)
282{
70b97a7f 283 struct rq *rq;
1da177e4 284
3a5c359a 285 for (;;) {
0122ec5b 286 raw_spin_lock_irqsave(&p->pi_lock, *flags);
3a5c359a 287 rq = task_rq(p);
05fa785c 288 raw_spin_lock(&rq->lock);
65cc8e48 289 if (likely(rq == task_rq(p)))
3a5c359a 290 return rq;
0122ec5b
PZ
291 raw_spin_unlock(&rq->lock);
292 raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
1da177e4 293 }
1da177e4
LT
294}
295
a9957449 296static void __task_rq_unlock(struct rq *rq)
b29739f9
IM
297 __releases(rq->lock)
298{
05fa785c 299 raw_spin_unlock(&rq->lock);
b29739f9
IM
300}
301
0122ec5b
PZ
302static inline void
303task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags)
1da177e4 304 __releases(rq->lock)
0122ec5b 305 __releases(p->pi_lock)
1da177e4 306{
0122ec5b
PZ
307 raw_spin_unlock(&rq->lock);
308 raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
1da177e4
LT
309}
310
1da177e4 311/*
cc2a73b5 312 * this_rq_lock - lock this runqueue and disable interrupts.
1da177e4 313 */
a9957449 314static struct rq *this_rq_lock(void)
1da177e4
LT
315 __acquires(rq->lock)
316{
70b97a7f 317 struct rq *rq;
1da177e4
LT
318
319 local_irq_disable();
320 rq = this_rq();
05fa785c 321 raw_spin_lock(&rq->lock);
1da177e4
LT
322
323 return rq;
324}
325
8f4d37ec
PZ
326#ifdef CONFIG_SCHED_HRTICK
327/*
328 * Use HR-timers to deliver accurate preemption points.
329 *
330 * Its all a bit involved since we cannot program an hrt while holding the
331 * rq->lock. So what we do is store a state in in rq->hrtick_* and ask for a
332 * reschedule event.
333 *
334 * When we get rescheduled we reprogram the hrtick_timer outside of the
335 * rq->lock.
336 */
8f4d37ec 337
8f4d37ec
PZ
338static void hrtick_clear(struct rq *rq)
339{
340 if (hrtimer_active(&rq->hrtick_timer))
341 hrtimer_cancel(&rq->hrtick_timer);
342}
343
8f4d37ec
PZ
344/*
345 * High-resolution timer tick.
346 * Runs from hardirq context with interrupts disabled.
347 */
348static enum hrtimer_restart hrtick(struct hrtimer *timer)
349{
350 struct rq *rq = container_of(timer, struct rq, hrtick_timer);
351
352 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
353
05fa785c 354 raw_spin_lock(&rq->lock);
3e51f33f 355 update_rq_clock(rq);
8f4d37ec 356 rq->curr->sched_class->task_tick(rq, rq->curr, 1);
05fa785c 357 raw_spin_unlock(&rq->lock);
8f4d37ec
PZ
358
359 return HRTIMER_NORESTART;
360}
361
95e904c7 362#ifdef CONFIG_SMP
31656519
PZ
363/*
364 * called from hardirq (IPI) context
365 */
366static void __hrtick_start(void *arg)
b328ca18 367{
31656519 368 struct rq *rq = arg;
b328ca18 369
05fa785c 370 raw_spin_lock(&rq->lock);
31656519
PZ
371 hrtimer_restart(&rq->hrtick_timer);
372 rq->hrtick_csd_pending = 0;
05fa785c 373 raw_spin_unlock(&rq->lock);
b328ca18
PZ
374}
375
31656519
PZ
376/*
377 * Called to set the hrtick timer state.
378 *
379 * called with rq->lock held and irqs disabled
380 */
029632fb 381void hrtick_start(struct rq *rq, u64 delay)
b328ca18 382{
31656519
PZ
383 struct hrtimer *timer = &rq->hrtick_timer;
384 ktime_t time = ktime_add_ns(timer->base->get_time(), delay);
b328ca18 385
cc584b21 386 hrtimer_set_expires(timer, time);
31656519
PZ
387
388 if (rq == this_rq()) {
389 hrtimer_restart(timer);
390 } else if (!rq->hrtick_csd_pending) {
6e275637 391 __smp_call_function_single(cpu_of(rq), &rq->hrtick_csd, 0);
31656519
PZ
392 rq->hrtick_csd_pending = 1;
393 }
b328ca18
PZ
394}
395
396static int
397hotplug_hrtick(struct notifier_block *nfb, unsigned long action, void *hcpu)
398{
399 int cpu = (int)(long)hcpu;
400
401 switch (action) {
402 case CPU_UP_CANCELED:
403 case CPU_UP_CANCELED_FROZEN:
404 case CPU_DOWN_PREPARE:
405 case CPU_DOWN_PREPARE_FROZEN:
406 case CPU_DEAD:
407 case CPU_DEAD_FROZEN:
31656519 408 hrtick_clear(cpu_rq(cpu));
b328ca18
PZ
409 return NOTIFY_OK;
410 }
411
412 return NOTIFY_DONE;
413}
414
fa748203 415static __init void init_hrtick(void)
b328ca18
PZ
416{
417 hotcpu_notifier(hotplug_hrtick, 0);
418}
31656519
PZ
419#else
420/*
421 * Called to set the hrtick timer state.
422 *
423 * called with rq->lock held and irqs disabled
424 */
029632fb 425void hrtick_start(struct rq *rq, u64 delay)
31656519 426{
7f1e2ca9 427 __hrtimer_start_range_ns(&rq->hrtick_timer, ns_to_ktime(delay), 0,
5c333864 428 HRTIMER_MODE_REL_PINNED, 0);
31656519 429}
b328ca18 430
006c75f1 431static inline void init_hrtick(void)
8f4d37ec 432{
8f4d37ec 433}
31656519 434#endif /* CONFIG_SMP */
8f4d37ec 435
31656519 436static void init_rq_hrtick(struct rq *rq)
8f4d37ec 437{
31656519
PZ
438#ifdef CONFIG_SMP
439 rq->hrtick_csd_pending = 0;
8f4d37ec 440
31656519
PZ
441 rq->hrtick_csd.flags = 0;
442 rq->hrtick_csd.func = __hrtick_start;
443 rq->hrtick_csd.info = rq;
444#endif
8f4d37ec 445
31656519
PZ
446 hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
447 rq->hrtick_timer.function = hrtick;
8f4d37ec 448}
006c75f1 449#else /* CONFIG_SCHED_HRTICK */
8f4d37ec
PZ
450static inline void hrtick_clear(struct rq *rq)
451{
452}
453
8f4d37ec
PZ
454static inline void init_rq_hrtick(struct rq *rq)
455{
456}
457
b328ca18
PZ
458static inline void init_hrtick(void)
459{
460}
006c75f1 461#endif /* CONFIG_SCHED_HRTICK */
8f4d37ec 462
c24d20db
IM
463/*
464 * resched_task - mark a task 'to be rescheduled now'.
465 *
466 * On UP this means the setting of the need_resched flag, on SMP it
467 * might also involve a cross-CPU call to trigger the scheduler on
468 * the target CPU.
469 */
470#ifdef CONFIG_SMP
471
472#ifndef tsk_is_polling
473#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
474#endif
475
029632fb 476void resched_task(struct task_struct *p)
c24d20db
IM
477{
478 int cpu;
479
05fa785c 480 assert_raw_spin_locked(&task_rq(p)->lock);
c24d20db 481
5ed0cec0 482 if (test_tsk_need_resched(p))
c24d20db
IM
483 return;
484
5ed0cec0 485 set_tsk_need_resched(p);
c24d20db
IM
486
487 cpu = task_cpu(p);
488 if (cpu == smp_processor_id())
489 return;
490
491 /* NEED_RESCHED must be visible before we test polling */
492 smp_mb();
493 if (!tsk_is_polling(p))
494 smp_send_reschedule(cpu);
495}
496
029632fb 497void resched_cpu(int cpu)
c24d20db
IM
498{
499 struct rq *rq = cpu_rq(cpu);
500 unsigned long flags;
501
05fa785c 502 if (!raw_spin_trylock_irqsave(&rq->lock, flags))
c24d20db
IM
503 return;
504 resched_task(cpu_curr(cpu));
05fa785c 505 raw_spin_unlock_irqrestore(&rq->lock, flags);
c24d20db 506}
06d8308c
TG
507
508#ifdef CONFIG_NO_HZ
83cd4fe2
VP
509/*
510 * In the semi idle case, use the nearest busy cpu for migrating timers
511 * from an idle cpu. This is good for power-savings.
512 *
513 * We don't do similar optimization for completely idle system, as
514 * selecting an idle cpu will add more delays to the timers than intended
515 * (as that cpu's timer base may not be uptodate wrt jiffies etc).
516 */
517int get_nohz_timer_target(void)
518{
519 int cpu = smp_processor_id();
520 int i;
521 struct sched_domain *sd;
522
057f3fad 523 rcu_read_lock();
83cd4fe2 524 for_each_domain(cpu, sd) {
057f3fad
PZ
525 for_each_cpu(i, sched_domain_span(sd)) {
526 if (!idle_cpu(i)) {
527 cpu = i;
528 goto unlock;
529 }
530 }
83cd4fe2 531 }
057f3fad
PZ
532unlock:
533 rcu_read_unlock();
83cd4fe2
VP
534 return cpu;
535}
06d8308c
TG
536/*
537 * When add_timer_on() enqueues a timer into the timer wheel of an
538 * idle CPU then this timer might expire before the next timer event
539 * which is scheduled to wake up that CPU. In case of a completely
540 * idle system the next event might even be infinite time into the
541 * future. wake_up_idle_cpu() ensures that the CPU is woken up and
542 * leaves the inner idle loop so the newly added timer is taken into
543 * account when the CPU goes back to idle and evaluates the timer
544 * wheel for the next timer event.
545 */
546void wake_up_idle_cpu(int cpu)
547{
548 struct rq *rq = cpu_rq(cpu);
549
550 if (cpu == smp_processor_id())
551 return;
552
553 /*
554 * This is safe, as this function is called with the timer
555 * wheel base lock of (cpu) held. When the CPU is on the way
556 * to idle and has not yet set rq->curr to idle then it will
557 * be serialized on the timer wheel base lock and take the new
558 * timer into account automatically.
559 */
560 if (rq->curr != rq->idle)
561 return;
562
563 /*
564 * We can set TIF_RESCHED on the idle task of the other CPU
565 * lockless. The worst case is that the other CPU runs the
566 * idle task through an additional NOOP schedule()
567 */
5ed0cec0 568 set_tsk_need_resched(rq->idle);
06d8308c
TG
569
570 /* NEED_RESCHED must be visible before we test polling */
571 smp_mb();
572 if (!tsk_is_polling(rq->idle))
573 smp_send_reschedule(cpu);
574}
39c0cbe2 575
ca38062e
SS
576static inline bool got_nohz_idle_kick(void)
577{
1c792db7
SS
578 int cpu = smp_processor_id();
579 return idle_cpu(cpu) && test_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu));
ca38062e
SS
580}
581
582#else /* CONFIG_NO_HZ */
583
584static inline bool got_nohz_idle_kick(void)
585{
586 return false;
587}
588
6d6bc0ad 589#endif /* CONFIG_NO_HZ */
06d8308c 590
029632fb 591void sched_avg_update(struct rq *rq)
e9e9250b
PZ
592{
593 s64 period = sched_avg_period();
594
595 while ((s64)(rq->clock - rq->age_stamp) > period) {
0d98bb26
WD
596 /*
597 * Inline assembly required to prevent the compiler
598 * optimising this loop into a divmod call.
599 * See __iter_div_u64_rem() for another example of this.
600 */
601 asm("" : "+rm" (rq->age_stamp));
e9e9250b
PZ
602 rq->age_stamp += period;
603 rq->rt_avg /= 2;
604 }
605}
606
6d6bc0ad 607#else /* !CONFIG_SMP */
029632fb 608void resched_task(struct task_struct *p)
c24d20db 609{
05fa785c 610 assert_raw_spin_locked(&task_rq(p)->lock);
31656519 611 set_tsk_need_resched(p);
c24d20db 612}
6d6bc0ad 613#endif /* CONFIG_SMP */
c24d20db 614
a790de99
PT
615#if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \
616 (defined(CONFIG_SMP) || defined(CONFIG_CFS_BANDWIDTH)))
c09595f6 617/*
8277434e
PT
618 * Iterate task_group tree rooted at *from, calling @down when first entering a
619 * node and @up when leaving it for the final time.
620 *
621 * Caller must hold rcu_lock or sufficient equivalent.
c09595f6 622 */
029632fb 623int walk_tg_tree_from(struct task_group *from,
8277434e 624 tg_visitor down, tg_visitor up, void *data)
c09595f6
PZ
625{
626 struct task_group *parent, *child;
eb755805 627 int ret;
c09595f6 628
8277434e
PT
629 parent = from;
630
c09595f6 631down:
eb755805
PZ
632 ret = (*down)(parent, data);
633 if (ret)
8277434e 634 goto out;
c09595f6
PZ
635 list_for_each_entry_rcu(child, &parent->children, siblings) {
636 parent = child;
637 goto down;
638
639up:
029632fb
PZ
640 continue;
641 }
642 ret = (*up)(parent, data);
643 if (ret || parent == from)
644 goto out;
1e3c88bd 645
029632fb
PZ
646 child = parent;
647 parent = parent->parent;
648 if (parent)
649 goto up;
650out:
651 return ret;
9c217245
IM
652}
653
029632fb 654int tg_nop(struct task_group *tg, void *data)
9c217245 655{
029632fb 656 return 0;
9c217245 657}
029632fb
PZ
658#endif
659
660void update_cpu_load(struct rq *this_rq);
9c217245 661
45bf76df
IM
662static void set_load_weight(struct task_struct *p)
663{
f05998d4
NR
664 int prio = p->static_prio - MAX_RT_PRIO;
665 struct load_weight *load = &p->se.load;
666
dd41f596
IM
667 /*
668 * SCHED_IDLE tasks get minimal weight:
669 */
670 if (p->policy == SCHED_IDLE) {
c8b28116 671 load->weight = scale_load(WEIGHT_IDLEPRIO);
f05998d4 672 load->inv_weight = WMULT_IDLEPRIO;
dd41f596
IM
673 return;
674 }
71f8bd46 675
c8b28116 676 load->weight = scale_load(prio_to_weight[prio]);
f05998d4 677 load->inv_weight = prio_to_wmult[prio];
71f8bd46
IM
678}
679
371fd7e7 680static void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
2087a1ad 681{
a64692a3 682 update_rq_clock(rq);
dd41f596 683 sched_info_queued(p);
371fd7e7 684 p->sched_class->enqueue_task(rq, p, flags);
71f8bd46
IM
685}
686
371fd7e7 687static void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
71f8bd46 688{
a64692a3 689 update_rq_clock(rq);
46ac22ba 690 sched_info_dequeued(p);
371fd7e7 691 p->sched_class->dequeue_task(rq, p, flags);
71f8bd46
IM
692}
693
1e3c88bd
PZ
694/*
695 * activate_task - move a task to the runqueue.
696 */
029632fb 697void activate_task(struct rq *rq, struct task_struct *p, int flags)
1e3c88bd
PZ
698{
699 if (task_contributes_to_load(p))
700 rq->nr_uninterruptible--;
701
371fd7e7 702 enqueue_task(rq, p, flags);
1e3c88bd
PZ
703}
704
705/*
706 * deactivate_task - remove a task from the runqueue.
707 */
029632fb 708void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
1e3c88bd
PZ
709{
710 if (task_contributes_to_load(p))
711 rq->nr_uninterruptible++;
712
371fd7e7 713 dequeue_task(rq, p, flags);
1e3c88bd
PZ
714}
715
b52bfee4
VP
716#ifdef CONFIG_IRQ_TIME_ACCOUNTING
717
305e6835
VP
718/*
719 * There are no locks covering percpu hardirq/softirq time.
720 * They are only modified in account_system_vtime, on corresponding CPU
721 * with interrupts disabled. So, writes are safe.
722 * They are read and saved off onto struct rq in update_rq_clock().
723 * This may result in other CPU reading this CPU's irq time and can
724 * race with irq/account_system_vtime on this CPU. We would either get old
8e92c201
PZ
725 * or new value with a side effect of accounting a slice of irq time to wrong
726 * task when irq is in progress while we read rq->clock. That is a worthy
727 * compromise in place of having locks on each irq in account_system_time.
305e6835 728 */
b52bfee4
VP
729static DEFINE_PER_CPU(u64, cpu_hardirq_time);
730static DEFINE_PER_CPU(u64, cpu_softirq_time);
731
732static DEFINE_PER_CPU(u64, irq_start_time);
733static int sched_clock_irqtime;
734
735void enable_sched_clock_irqtime(void)
736{
737 sched_clock_irqtime = 1;
738}
739
740void disable_sched_clock_irqtime(void)
741{
742 sched_clock_irqtime = 0;
743}
744
8e92c201
PZ
745#ifndef CONFIG_64BIT
746static DEFINE_PER_CPU(seqcount_t, irq_time_seq);
747
748static inline void irq_time_write_begin(void)
749{
750 __this_cpu_inc(irq_time_seq.sequence);
751 smp_wmb();
752}
753
754static inline void irq_time_write_end(void)
755{
756 smp_wmb();
757 __this_cpu_inc(irq_time_seq.sequence);
758}
759
760static inline u64 irq_time_read(int cpu)
761{
762 u64 irq_time;
763 unsigned seq;
764
765 do {
766 seq = read_seqcount_begin(&per_cpu(irq_time_seq, cpu));
767 irq_time = per_cpu(cpu_softirq_time, cpu) +
768 per_cpu(cpu_hardirq_time, cpu);
769 } while (read_seqcount_retry(&per_cpu(irq_time_seq, cpu), seq));
770
771 return irq_time;
772}
773#else /* CONFIG_64BIT */
774static inline void irq_time_write_begin(void)
775{
776}
777
778static inline void irq_time_write_end(void)
779{
780}
781
782static inline u64 irq_time_read(int cpu)
305e6835 783{
305e6835
VP
784 return per_cpu(cpu_softirq_time, cpu) + per_cpu(cpu_hardirq_time, cpu);
785}
8e92c201 786#endif /* CONFIG_64BIT */
305e6835 787
fe44d621
PZ
788/*
789 * Called before incrementing preempt_count on {soft,}irq_enter
790 * and before decrementing preempt_count on {soft,}irq_exit.
791 */
b52bfee4
VP
792void account_system_vtime(struct task_struct *curr)
793{
794 unsigned long flags;
fe44d621 795 s64 delta;
b52bfee4 796 int cpu;
b52bfee4
VP
797
798 if (!sched_clock_irqtime)
799 return;
800
801 local_irq_save(flags);
802
b52bfee4 803 cpu = smp_processor_id();
fe44d621
PZ
804 delta = sched_clock_cpu(cpu) - __this_cpu_read(irq_start_time);
805 __this_cpu_add(irq_start_time, delta);
806
8e92c201 807 irq_time_write_begin();
b52bfee4
VP
808 /*
809 * We do not account for softirq time from ksoftirqd here.
810 * We want to continue accounting softirq time to ksoftirqd thread
811 * in that case, so as not to confuse scheduler with a special task
812 * that do not consume any time, but still wants to run.
813 */
814 if (hardirq_count())
fe44d621 815 __this_cpu_add(cpu_hardirq_time, delta);
4dd53d89 816 else if (in_serving_softirq() && curr != this_cpu_ksoftirqd())
fe44d621 817 __this_cpu_add(cpu_softirq_time, delta);
b52bfee4 818
8e92c201 819 irq_time_write_end();
b52bfee4
VP
820 local_irq_restore(flags);
821}
b7dadc38 822EXPORT_SYMBOL_GPL(account_system_vtime);
b52bfee4 823
e6e6685a
GC
824#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
825
826#ifdef CONFIG_PARAVIRT
827static inline u64 steal_ticks(u64 steal)
aa483808 828{
e6e6685a
GC
829 if (unlikely(steal > NSEC_PER_SEC))
830 return div_u64(steal, TICK_NSEC);
fe44d621 831
e6e6685a
GC
832 return __iter_div_u64_rem(steal, TICK_NSEC, &steal);
833}
834#endif
835
fe44d621 836static void update_rq_clock_task(struct rq *rq, s64 delta)
aa483808 837{
095c0aa8
GC
838/*
839 * In theory, the compile should just see 0 here, and optimize out the call
840 * to sched_rt_avg_update. But I don't trust it...
841 */
842#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
843 s64 steal = 0, irq_delta = 0;
844#endif
845#ifdef CONFIG_IRQ_TIME_ACCOUNTING
8e92c201 846 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
fe44d621
PZ
847
848 /*
849 * Since irq_time is only updated on {soft,}irq_exit, we might run into
850 * this case when a previous update_rq_clock() happened inside a
851 * {soft,}irq region.
852 *
853 * When this happens, we stop ->clock_task and only update the
854 * prev_irq_time stamp to account for the part that fit, so that a next
855 * update will consume the rest. This ensures ->clock_task is
856 * monotonic.
857 *
858 * It does however cause some slight miss-attribution of {soft,}irq
859 * time, a more accurate solution would be to update the irq_time using
860 * the current rq->clock timestamp, except that would require using
861 * atomic ops.
862 */
863 if (irq_delta > delta)
864 irq_delta = delta;
865
866 rq->prev_irq_time += irq_delta;
867 delta -= irq_delta;
095c0aa8
GC
868#endif
869#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
870 if (static_branch((&paravirt_steal_rq_enabled))) {
871 u64 st;
872
873 steal = paravirt_steal_clock(cpu_of(rq));
874 steal -= rq->prev_steal_time_rq;
875
876 if (unlikely(steal > delta))
877 steal = delta;
878
879 st = steal_ticks(steal);
880 steal = st * TICK_NSEC;
881
882 rq->prev_steal_time_rq += steal;
883
884 delta -= steal;
885 }
886#endif
887
fe44d621
PZ
888 rq->clock_task += delta;
889
095c0aa8
GC
890#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
891 if ((irq_delta + steal) && sched_feat(NONTASK_POWER))
892 sched_rt_avg_update(rq, irq_delta + steal);
893#endif
aa483808
VP
894}
895
095c0aa8 896#ifdef CONFIG_IRQ_TIME_ACCOUNTING
abb74cef
VP
897static int irqtime_account_hi_update(void)
898{
899 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
900 unsigned long flags;
901 u64 latest_ns;
902 int ret = 0;
903
904 local_irq_save(flags);
905 latest_ns = this_cpu_read(cpu_hardirq_time);
906 if (cputime64_gt(nsecs_to_cputime64(latest_ns), cpustat->irq))
907 ret = 1;
908 local_irq_restore(flags);
909 return ret;
910}
911
912static int irqtime_account_si_update(void)
913{
914 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
915 unsigned long flags;
916 u64 latest_ns;
917 int ret = 0;
918
919 local_irq_save(flags);
920 latest_ns = this_cpu_read(cpu_softirq_time);
921 if (cputime64_gt(nsecs_to_cputime64(latest_ns), cpustat->softirq))
922 ret = 1;
923 local_irq_restore(flags);
924 return ret;
925}
926
fe44d621 927#else /* CONFIG_IRQ_TIME_ACCOUNTING */
305e6835 928
abb74cef
VP
929#define sched_clock_irqtime (0)
930
095c0aa8 931#endif
b52bfee4 932
34f971f6
PZ
933void sched_set_stop_task(int cpu, struct task_struct *stop)
934{
935 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
936 struct task_struct *old_stop = cpu_rq(cpu)->stop;
937
938 if (stop) {
939 /*
940 * Make it appear like a SCHED_FIFO task, its something
941 * userspace knows about and won't get confused about.
942 *
943 * Also, it will make PI more or less work without too
944 * much confusion -- but then, stop work should not
945 * rely on PI working anyway.
946 */
947 sched_setscheduler_nocheck(stop, SCHED_FIFO, &param);
948
949 stop->sched_class = &stop_sched_class;
950 }
951
952 cpu_rq(cpu)->stop = stop;
953
954 if (old_stop) {
955 /*
956 * Reset it back to a normal scheduling class so that
957 * it can die in pieces.
958 */
959 old_stop->sched_class = &rt_sched_class;
960 }
961}
962
14531189 963/*
dd41f596 964 * __normal_prio - return the priority that is based on the static prio
14531189 965 */
14531189
IM
966static inline int __normal_prio(struct task_struct *p)
967{
dd41f596 968 return p->static_prio;
14531189
IM
969}
970
b29739f9
IM
971/*
972 * Calculate the expected normal priority: i.e. priority
973 * without taking RT-inheritance into account. Might be
974 * boosted by interactivity modifiers. Changes upon fork,
975 * setprio syscalls, and whenever the interactivity
976 * estimator recalculates.
977 */
36c8b586 978static inline int normal_prio(struct task_struct *p)
b29739f9
IM
979{
980 int prio;
981
e05606d3 982 if (task_has_rt_policy(p))
b29739f9
IM
983 prio = MAX_RT_PRIO-1 - p->rt_priority;
984 else
985 prio = __normal_prio(p);
986 return prio;
987}
988
989/*
990 * Calculate the current priority, i.e. the priority
991 * taken into account by the scheduler. This value might
992 * be boosted by RT tasks, or might be boosted by
993 * interactivity modifiers. Will be RT if the task got
994 * RT-boosted. If not then it returns p->normal_prio.
995 */
36c8b586 996static int effective_prio(struct task_struct *p)
b29739f9
IM
997{
998 p->normal_prio = normal_prio(p);
999 /*
1000 * If we are RT tasks or we were boosted to RT priority,
1001 * keep the priority unchanged. Otherwise, update priority
1002 * to the normal priority:
1003 */
1004 if (!rt_prio(p->prio))
1005 return p->normal_prio;
1006 return p->prio;
1007}
1008
1da177e4
LT
1009/**
1010 * task_curr - is this task currently executing on a CPU?
1011 * @p: the task in question.
1012 */
36c8b586 1013inline int task_curr(const struct task_struct *p)
1da177e4
LT
1014{
1015 return cpu_curr(task_cpu(p)) == p;
1016}
1017
cb469845
SR
1018static inline void check_class_changed(struct rq *rq, struct task_struct *p,
1019 const struct sched_class *prev_class,
da7a735e 1020 int oldprio)
cb469845
SR
1021{
1022 if (prev_class != p->sched_class) {
1023 if (prev_class->switched_from)
da7a735e
PZ
1024 prev_class->switched_from(rq, p);
1025 p->sched_class->switched_to(rq, p);
1026 } else if (oldprio != p->prio)
1027 p->sched_class->prio_changed(rq, p, oldprio);
cb469845
SR
1028}
1029
029632fb 1030void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
1e5a7405
PZ
1031{
1032 const struct sched_class *class;
1033
1034 if (p->sched_class == rq->curr->sched_class) {
1035 rq->curr->sched_class->check_preempt_curr(rq, p, flags);
1036 } else {
1037 for_each_class(class) {
1038 if (class == rq->curr->sched_class)
1039 break;
1040 if (class == p->sched_class) {
1041 resched_task(rq->curr);
1042 break;
1043 }
1044 }
1045 }
1046
1047 /*
1048 * A queue event has occurred, and we're going to schedule. In
1049 * this case, we can save a useless back to back clock update.
1050 */
fd2f4419 1051 if (rq->curr->on_rq && test_tsk_need_resched(rq->curr))
1e5a7405
PZ
1052 rq->skip_clock_update = 1;
1053}
1054
1da177e4 1055#ifdef CONFIG_SMP
dd41f596 1056void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
c65cc870 1057{
e2912009
PZ
1058#ifdef CONFIG_SCHED_DEBUG
1059 /*
1060 * We should never call set_task_cpu() on a blocked task,
1061 * ttwu() will sort out the placement.
1062 */
077614ee
PZ
1063 WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
1064 !(task_thread_info(p)->preempt_count & PREEMPT_ACTIVE));
0122ec5b
PZ
1065
1066#ifdef CONFIG_LOCKDEP
6c6c54e1
PZ
1067 /*
1068 * The caller should hold either p->pi_lock or rq->lock, when changing
1069 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
1070 *
1071 * sched_move_task() holds both and thus holding either pins the cgroup,
1072 * see set_task_rq().
1073 *
1074 * Furthermore, all task_rq users should acquire both locks, see
1075 * task_rq_lock().
1076 */
0122ec5b
PZ
1077 WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
1078 lockdep_is_held(&task_rq(p)->lock)));
1079#endif
e2912009
PZ
1080#endif
1081
de1d7286 1082 trace_sched_migrate_task(p, new_cpu);
cbc34ed1 1083
0c69774e
PZ
1084 if (task_cpu(p) != new_cpu) {
1085 p->se.nr_migrations++;
a8b0ca17 1086 perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, NULL, 0);
0c69774e 1087 }
dd41f596
IM
1088
1089 __set_task_cpu(p, new_cpu);
c65cc870
IM
1090}
1091
969c7921 1092struct migration_arg {
36c8b586 1093 struct task_struct *task;
1da177e4 1094 int dest_cpu;
70b97a7f 1095};
1da177e4 1096
969c7921
TH
1097static int migration_cpu_stop(void *data);
1098
1da177e4
LT
1099/*
1100 * wait_task_inactive - wait for a thread to unschedule.
1101 *
85ba2d86
RM
1102 * If @match_state is nonzero, it's the @p->state value just checked and
1103 * not expected to change. If it changes, i.e. @p might have woken up,
1104 * then return zero. When we succeed in waiting for @p to be off its CPU,
1105 * we return a positive number (its total switch count). If a second call
1106 * a short while later returns the same number, the caller can be sure that
1107 * @p has remained unscheduled the whole time.
1108 *
1da177e4
LT
1109 * The caller must ensure that the task *will* unschedule sometime soon,
1110 * else this function might spin for a *long* time. This function can't
1111 * be called with interrupts off, or it may introduce deadlock with
1112 * smp_call_function() if an IPI is sent by the same process we are
1113 * waiting to become inactive.
1114 */
85ba2d86 1115unsigned long wait_task_inactive(struct task_struct *p, long match_state)
1da177e4
LT
1116{
1117 unsigned long flags;
dd41f596 1118 int running, on_rq;
85ba2d86 1119 unsigned long ncsw;
70b97a7f 1120 struct rq *rq;
1da177e4 1121
3a5c359a
AK
1122 for (;;) {
1123 /*
1124 * We do the initial early heuristics without holding
1125 * any task-queue locks at all. We'll only try to get
1126 * the runqueue lock when things look like they will
1127 * work out!
1128 */
1129 rq = task_rq(p);
fa490cfd 1130
3a5c359a
AK
1131 /*
1132 * If the task is actively running on another CPU
1133 * still, just relax and busy-wait without holding
1134 * any locks.
1135 *
1136 * NOTE! Since we don't hold any locks, it's not
1137 * even sure that "rq" stays as the right runqueue!
1138 * But we don't care, since "task_running()" will
1139 * return false if the runqueue has changed and p
1140 * is actually now running somewhere else!
1141 */
85ba2d86
RM
1142 while (task_running(rq, p)) {
1143 if (match_state && unlikely(p->state != match_state))
1144 return 0;
3a5c359a 1145 cpu_relax();
85ba2d86 1146 }
fa490cfd 1147
3a5c359a
AK
1148 /*
1149 * Ok, time to look more closely! We need the rq
1150 * lock now, to be *sure*. If we're wrong, we'll
1151 * just go back and repeat.
1152 */
1153 rq = task_rq_lock(p, &flags);
27a9da65 1154 trace_sched_wait_task(p);
3a5c359a 1155 running = task_running(rq, p);
fd2f4419 1156 on_rq = p->on_rq;
85ba2d86 1157 ncsw = 0;
f31e11d8 1158 if (!match_state || p->state == match_state)
93dcf55f 1159 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
0122ec5b 1160 task_rq_unlock(rq, p, &flags);
fa490cfd 1161
85ba2d86
RM
1162 /*
1163 * If it changed from the expected state, bail out now.
1164 */
1165 if (unlikely(!ncsw))
1166 break;
1167
3a5c359a
AK
1168 /*
1169 * Was it really running after all now that we
1170 * checked with the proper locks actually held?
1171 *
1172 * Oops. Go back and try again..
1173 */
1174 if (unlikely(running)) {
1175 cpu_relax();
1176 continue;
1177 }
fa490cfd 1178
3a5c359a
AK
1179 /*
1180 * It's not enough that it's not actively running,
1181 * it must be off the runqueue _entirely_, and not
1182 * preempted!
1183 *
80dd99b3 1184 * So if it was still runnable (but just not actively
3a5c359a
AK
1185 * running right now), it's preempted, and we should
1186 * yield - it could be a while.
1187 */
1188 if (unlikely(on_rq)) {
8eb90c30
TG
1189 ktime_t to = ktime_set(0, NSEC_PER_SEC/HZ);
1190
1191 set_current_state(TASK_UNINTERRUPTIBLE);
1192 schedule_hrtimeout(&to, HRTIMER_MODE_REL);
3a5c359a
AK
1193 continue;
1194 }
fa490cfd 1195
3a5c359a
AK
1196 /*
1197 * Ahh, all good. It wasn't running, and it wasn't
1198 * runnable, which means that it will never become
1199 * running in the future either. We're all done!
1200 */
1201 break;
1202 }
85ba2d86
RM
1203
1204 return ncsw;
1da177e4
LT
1205}
1206
1207/***
1208 * kick_process - kick a running thread to enter/exit the kernel
1209 * @p: the to-be-kicked thread
1210 *
1211 * Cause a process which is running on another CPU to enter
1212 * kernel-mode, without any delay. (to get signals handled.)
1213 *
25985edc 1214 * NOTE: this function doesn't have to take the runqueue lock,
1da177e4
LT
1215 * because all it wants to ensure is that the remote task enters
1216 * the kernel. If the IPI races and the task has been migrated
1217 * to another CPU then no harm is done and the purpose has been
1218 * achieved as well.
1219 */
36c8b586 1220void kick_process(struct task_struct *p)
1da177e4
LT
1221{
1222 int cpu;
1223
1224 preempt_disable();
1225 cpu = task_cpu(p);
1226 if ((cpu != smp_processor_id()) && task_curr(p))
1227 smp_send_reschedule(cpu);
1228 preempt_enable();
1229}
b43e3521 1230EXPORT_SYMBOL_GPL(kick_process);
476d139c 1231#endif /* CONFIG_SMP */
1da177e4 1232
970b13ba 1233#ifdef CONFIG_SMP
30da688e 1234/*
013fdb80 1235 * ->cpus_allowed is protected by both rq->lock and p->pi_lock
30da688e 1236 */
5da9a0fb
PZ
1237static int select_fallback_rq(int cpu, struct task_struct *p)
1238{
1239 int dest_cpu;
1240 const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(cpu));
1241
1242 /* Look for allowed, online CPU in same node. */
1243 for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask)
fa17b507 1244 if (cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
5da9a0fb
PZ
1245 return dest_cpu;
1246
1247 /* Any allowed, online CPU? */
fa17b507 1248 dest_cpu = cpumask_any_and(tsk_cpus_allowed(p), cpu_active_mask);
5da9a0fb
PZ
1249 if (dest_cpu < nr_cpu_ids)
1250 return dest_cpu;
1251
1252 /* No more Mr. Nice Guy. */
48c5ccae
PZ
1253 dest_cpu = cpuset_cpus_allowed_fallback(p);
1254 /*
1255 * Don't tell them about moving exiting tasks or
1256 * kernel threads (both mm NULL), since they never
1257 * leave kernel.
1258 */
1259 if (p->mm && printk_ratelimit()) {
1260 printk(KERN_INFO "process %d (%s) no longer affine to cpu%d\n",
1261 task_pid_nr(p), p->comm, cpu);
5da9a0fb
PZ
1262 }
1263
1264 return dest_cpu;
1265}
1266
e2912009 1267/*
013fdb80 1268 * The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable.
e2912009 1269 */
970b13ba 1270static inline
7608dec2 1271int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags)
970b13ba 1272{
7608dec2 1273 int cpu = p->sched_class->select_task_rq(p, sd_flags, wake_flags);
e2912009
PZ
1274
1275 /*
1276 * In order not to call set_task_cpu() on a blocking task we need
1277 * to rely on ttwu() to place the task on a valid ->cpus_allowed
1278 * cpu.
1279 *
1280 * Since this is common to all placement strategies, this lives here.
1281 *
1282 * [ this allows ->select_task() to simply return task_cpu(p) and
1283 * not worry about this generic constraint ]
1284 */
fa17b507 1285 if (unlikely(!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)) ||
70f11205 1286 !cpu_online(cpu)))
5da9a0fb 1287 cpu = select_fallback_rq(task_cpu(p), p);
e2912009
PZ
1288
1289 return cpu;
970b13ba 1290}
09a40af5
MG
1291
1292static void update_avg(u64 *avg, u64 sample)
1293{
1294 s64 diff = sample - *avg;
1295 *avg += diff >> 3;
1296}
970b13ba
PZ
1297#endif
1298
d7c01d27 1299static void
b84cb5df 1300ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
9ed3811a 1301{
d7c01d27 1302#ifdef CONFIG_SCHEDSTATS
b84cb5df
PZ
1303 struct rq *rq = this_rq();
1304
d7c01d27
PZ
1305#ifdef CONFIG_SMP
1306 int this_cpu = smp_processor_id();
1307
1308 if (cpu == this_cpu) {
1309 schedstat_inc(rq, ttwu_local);
1310 schedstat_inc(p, se.statistics.nr_wakeups_local);
1311 } else {
1312 struct sched_domain *sd;
1313
1314 schedstat_inc(p, se.statistics.nr_wakeups_remote);
057f3fad 1315 rcu_read_lock();
d7c01d27
PZ
1316 for_each_domain(this_cpu, sd) {
1317 if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
1318 schedstat_inc(sd, ttwu_wake_remote);
1319 break;
1320 }
1321 }
057f3fad 1322 rcu_read_unlock();
d7c01d27 1323 }
f339b9dc
PZ
1324
1325 if (wake_flags & WF_MIGRATED)
1326 schedstat_inc(p, se.statistics.nr_wakeups_migrate);
1327
d7c01d27
PZ
1328#endif /* CONFIG_SMP */
1329
1330 schedstat_inc(rq, ttwu_count);
9ed3811a 1331 schedstat_inc(p, se.statistics.nr_wakeups);
d7c01d27
PZ
1332
1333 if (wake_flags & WF_SYNC)
9ed3811a 1334 schedstat_inc(p, se.statistics.nr_wakeups_sync);
d7c01d27 1335
d7c01d27
PZ
1336#endif /* CONFIG_SCHEDSTATS */
1337}
1338
1339static void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags)
1340{
9ed3811a 1341 activate_task(rq, p, en_flags);
fd2f4419 1342 p->on_rq = 1;
c2f7115e
PZ
1343
1344 /* if a worker is waking up, notify workqueue */
1345 if (p->flags & PF_WQ_WORKER)
1346 wq_worker_waking_up(p, cpu_of(rq));
9ed3811a
TH
1347}
1348
23f41eeb
PZ
1349/*
1350 * Mark the task runnable and perform wakeup-preemption.
1351 */
89363381 1352static void
23f41eeb 1353ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
9ed3811a 1354{
89363381 1355 trace_sched_wakeup(p, true);
9ed3811a
TH
1356 check_preempt_curr(rq, p, wake_flags);
1357
1358 p->state = TASK_RUNNING;
1359#ifdef CONFIG_SMP
1360 if (p->sched_class->task_woken)
1361 p->sched_class->task_woken(rq, p);
1362
e69c6341 1363 if (rq->idle_stamp) {
9ed3811a
TH
1364 u64 delta = rq->clock - rq->idle_stamp;
1365 u64 max = 2*sysctl_sched_migration_cost;
1366
1367 if (delta > max)
1368 rq->avg_idle = max;
1369 else
1370 update_avg(&rq->avg_idle, delta);
1371 rq->idle_stamp = 0;
1372 }
1373#endif
1374}
1375
c05fbafb
PZ
1376static void
1377ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags)
1378{
1379#ifdef CONFIG_SMP
1380 if (p->sched_contributes_to_load)
1381 rq->nr_uninterruptible--;
1382#endif
1383
1384 ttwu_activate(rq, p, ENQUEUE_WAKEUP | ENQUEUE_WAKING);
1385 ttwu_do_wakeup(rq, p, wake_flags);
1386}
1387
1388/*
1389 * Called in case the task @p isn't fully descheduled from its runqueue,
1390 * in this case we must do a remote wakeup. Its a 'light' wakeup though,
1391 * since all we need to do is flip p->state to TASK_RUNNING, since
1392 * the task is still ->on_rq.
1393 */
1394static int ttwu_remote(struct task_struct *p, int wake_flags)
1395{
1396 struct rq *rq;
1397 int ret = 0;
1398
1399 rq = __task_rq_lock(p);
1400 if (p->on_rq) {
1401 ttwu_do_wakeup(rq, p, wake_flags);
1402 ret = 1;
1403 }
1404 __task_rq_unlock(rq);
1405
1406 return ret;
1407}
1408
317f3941 1409#ifdef CONFIG_SMP
fa14ff4a 1410static void sched_ttwu_pending(void)
317f3941
PZ
1411{
1412 struct rq *rq = this_rq();
fa14ff4a
PZ
1413 struct llist_node *llist = llist_del_all(&rq->wake_list);
1414 struct task_struct *p;
317f3941
PZ
1415
1416 raw_spin_lock(&rq->lock);
1417
fa14ff4a
PZ
1418 while (llist) {
1419 p = llist_entry(llist, struct task_struct, wake_entry);
1420 llist = llist_next(llist);
317f3941
PZ
1421 ttwu_do_activate(rq, p, 0);
1422 }
1423
1424 raw_spin_unlock(&rq->lock);
1425}
1426
1427void scheduler_ipi(void)
1428{
ca38062e 1429 if (llist_empty(&this_rq()->wake_list) && !got_nohz_idle_kick())
c5d753a5
PZ
1430 return;
1431
1432 /*
1433 * Not all reschedule IPI handlers call irq_enter/irq_exit, since
1434 * traditionally all their work was done from the interrupt return
1435 * path. Now that we actually do some work, we need to make sure
1436 * we do call them.
1437 *
1438 * Some archs already do call them, luckily irq_enter/exit nest
1439 * properly.
1440 *
1441 * Arguably we should visit all archs and update all handlers,
1442 * however a fair share of IPIs are still resched only so this would
1443 * somewhat pessimize the simple resched case.
1444 */
1445 irq_enter();
fa14ff4a 1446 sched_ttwu_pending();
ca38062e
SS
1447
1448 /*
1449 * Check if someone kicked us for doing the nohz idle load balance.
1450 */
6eb57e0d
SS
1451 if (unlikely(got_nohz_idle_kick() && !need_resched())) {
1452 this_rq()->idle_balance = 1;
ca38062e 1453 raise_softirq_irqoff(SCHED_SOFTIRQ);
6eb57e0d 1454 }
c5d753a5 1455 irq_exit();
317f3941
PZ
1456}
1457
1458static void ttwu_queue_remote(struct task_struct *p, int cpu)
1459{
fa14ff4a 1460 if (llist_add(&p->wake_entry, &cpu_rq(cpu)->wake_list))
317f3941
PZ
1461 smp_send_reschedule(cpu);
1462}
d6aa8f85
PZ
1463
1464#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
1465static int ttwu_activate_remote(struct task_struct *p, int wake_flags)
1466{
1467 struct rq *rq;
1468 int ret = 0;
1469
1470 rq = __task_rq_lock(p);
1471 if (p->on_cpu) {
1472 ttwu_activate(rq, p, ENQUEUE_WAKEUP);
1473 ttwu_do_wakeup(rq, p, wake_flags);
1474 ret = 1;
1475 }
1476 __task_rq_unlock(rq);
1477
1478 return ret;
1479
1480}
1481#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
1482#endif /* CONFIG_SMP */
317f3941 1483
c05fbafb
PZ
1484static void ttwu_queue(struct task_struct *p, int cpu)
1485{
1486 struct rq *rq = cpu_rq(cpu);
1487
17d9f311 1488#if defined(CONFIG_SMP)
317f3941 1489 if (sched_feat(TTWU_QUEUE) && cpu != smp_processor_id()) {
f01114cb 1490 sched_clock_cpu(cpu); /* sync clocks x-cpu */
317f3941
PZ
1491 ttwu_queue_remote(p, cpu);
1492 return;
1493 }
1494#endif
1495
c05fbafb
PZ
1496 raw_spin_lock(&rq->lock);
1497 ttwu_do_activate(rq, p, 0);
1498 raw_spin_unlock(&rq->lock);
9ed3811a
TH
1499}
1500
1501/**
1da177e4 1502 * try_to_wake_up - wake up a thread
9ed3811a 1503 * @p: the thread to be awakened
1da177e4 1504 * @state: the mask of task states that can be woken
9ed3811a 1505 * @wake_flags: wake modifier flags (WF_*)
1da177e4
LT
1506 *
1507 * Put it on the run-queue if it's not already there. The "current"
1508 * thread is always on the run-queue (except when the actual
1509 * re-schedule is in progress), and as such you're allowed to do
1510 * the simpler "current->state = TASK_RUNNING" to mark yourself
1511 * runnable without the overhead of this.
1512 *
9ed3811a
TH
1513 * Returns %true if @p was woken up, %false if it was already running
1514 * or @state didn't match @p's state.
1da177e4 1515 */
e4a52bcb
PZ
1516static int
1517try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
1da177e4 1518{
1da177e4 1519 unsigned long flags;
c05fbafb 1520 int cpu, success = 0;
2398f2c6 1521
04e2f174 1522 smp_wmb();
013fdb80 1523 raw_spin_lock_irqsave(&p->pi_lock, flags);
e9c84311 1524 if (!(p->state & state))
1da177e4
LT
1525 goto out;
1526
c05fbafb 1527 success = 1; /* we're going to change ->state */
1da177e4 1528 cpu = task_cpu(p);
1da177e4 1529
c05fbafb
PZ
1530 if (p->on_rq && ttwu_remote(p, wake_flags))
1531 goto stat;
1da177e4 1532
1da177e4 1533#ifdef CONFIG_SMP
e9c84311 1534 /*
c05fbafb
PZ
1535 * If the owning (remote) cpu is still in the middle of schedule() with
1536 * this task as prev, wait until its done referencing the task.
e9c84311 1537 */
e4a52bcb
PZ
1538 while (p->on_cpu) {
1539#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
1540 /*
d6aa8f85
PZ
1541 * In case the architecture enables interrupts in
1542 * context_switch(), we cannot busy wait, since that
1543 * would lead to deadlocks when an interrupt hits and
1544 * tries to wake up @prev. So bail and do a complete
1545 * remote wakeup.
e4a52bcb 1546 */
d6aa8f85 1547 if (ttwu_activate_remote(p, wake_flags))
c05fbafb 1548 goto stat;
d6aa8f85 1549#else
e4a52bcb 1550 cpu_relax();
d6aa8f85 1551#endif
371fd7e7 1552 }
0970d299 1553 /*
e4a52bcb 1554 * Pairs with the smp_wmb() in finish_lock_switch().
0970d299 1555 */
e4a52bcb 1556 smp_rmb();
1da177e4 1557
a8e4f2ea 1558 p->sched_contributes_to_load = !!task_contributes_to_load(p);
e9c84311 1559 p->state = TASK_WAKING;
e7693a36 1560
e4a52bcb 1561 if (p->sched_class->task_waking)
74f8e4b2 1562 p->sched_class->task_waking(p);
efbbd05a 1563
7608dec2 1564 cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
f339b9dc
PZ
1565 if (task_cpu(p) != cpu) {
1566 wake_flags |= WF_MIGRATED;
e4a52bcb 1567 set_task_cpu(p, cpu);
f339b9dc 1568 }
1da177e4 1569#endif /* CONFIG_SMP */
1da177e4 1570
c05fbafb
PZ
1571 ttwu_queue(p, cpu);
1572stat:
b84cb5df 1573 ttwu_stat(p, cpu, wake_flags);
1da177e4 1574out:
013fdb80 1575 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
1da177e4
LT
1576
1577 return success;
1578}
1579
21aa9af0
TH
1580/**
1581 * try_to_wake_up_local - try to wake up a local task with rq lock held
1582 * @p: the thread to be awakened
1583 *
2acca55e 1584 * Put @p on the run-queue if it's not already there. The caller must
21aa9af0 1585 * ensure that this_rq() is locked, @p is bound to this_rq() and not
2acca55e 1586 * the current task.
21aa9af0
TH
1587 */
1588static void try_to_wake_up_local(struct task_struct *p)
1589{
1590 struct rq *rq = task_rq(p);
21aa9af0
TH
1591
1592 BUG_ON(rq != this_rq());
1593 BUG_ON(p == current);
1594 lockdep_assert_held(&rq->lock);
1595
2acca55e
PZ
1596 if (!raw_spin_trylock(&p->pi_lock)) {
1597 raw_spin_unlock(&rq->lock);
1598 raw_spin_lock(&p->pi_lock);
1599 raw_spin_lock(&rq->lock);
1600 }
1601
21aa9af0 1602 if (!(p->state & TASK_NORMAL))
2acca55e 1603 goto out;
21aa9af0 1604
fd2f4419 1605 if (!p->on_rq)
d7c01d27
PZ
1606 ttwu_activate(rq, p, ENQUEUE_WAKEUP);
1607
23f41eeb 1608 ttwu_do_wakeup(rq, p, 0);
b84cb5df 1609 ttwu_stat(p, smp_processor_id(), 0);
2acca55e
PZ
1610out:
1611 raw_spin_unlock(&p->pi_lock);
21aa9af0
TH
1612}
1613
50fa610a
DH
1614/**
1615 * wake_up_process - Wake up a specific process
1616 * @p: The process to be woken up.
1617 *
1618 * Attempt to wake up the nominated process and move it to the set of runnable
1619 * processes. Returns 1 if the process was woken up, 0 if it was already
1620 * running.
1621 *
1622 * It may be assumed that this function implies a write memory barrier before
1623 * changing the task state if and only if any tasks are woken up.
1624 */
7ad5b3a5 1625int wake_up_process(struct task_struct *p)
1da177e4 1626{
d9514f6c 1627 return try_to_wake_up(p, TASK_ALL, 0);
1da177e4 1628}
1da177e4
LT
1629EXPORT_SYMBOL(wake_up_process);
1630
7ad5b3a5 1631int wake_up_state(struct task_struct *p, unsigned int state)
1da177e4
LT
1632{
1633 return try_to_wake_up(p, state, 0);
1634}
1635
1da177e4
LT
1636/*
1637 * Perform scheduler related setup for a newly forked process p.
1638 * p is forked by current.
dd41f596
IM
1639 *
1640 * __sched_fork() is basic setup used by init_idle() too:
1641 */
1642static void __sched_fork(struct task_struct *p)
1643{
fd2f4419
PZ
1644 p->on_rq = 0;
1645
1646 p->se.on_rq = 0;
dd41f596
IM
1647 p->se.exec_start = 0;
1648 p->se.sum_exec_runtime = 0;
f6cf891c 1649 p->se.prev_sum_exec_runtime = 0;
6c594c21 1650 p->se.nr_migrations = 0;
da7a735e 1651 p->se.vruntime = 0;
fd2f4419 1652 INIT_LIST_HEAD(&p->se.group_node);
6cfb0d5d
IM
1653
1654#ifdef CONFIG_SCHEDSTATS
41acab88 1655 memset(&p->se.statistics, 0, sizeof(p->se.statistics));
6cfb0d5d 1656#endif
476d139c 1657
fa717060 1658 INIT_LIST_HEAD(&p->rt.run_list);
476d139c 1659
e107be36
AK
1660#ifdef CONFIG_PREEMPT_NOTIFIERS
1661 INIT_HLIST_HEAD(&p->preempt_notifiers);
1662#endif
dd41f596
IM
1663}
1664
1665/*
1666 * fork()/clone()-time setup:
1667 */
3e51e3ed 1668void sched_fork(struct task_struct *p)
dd41f596 1669{
0122ec5b 1670 unsigned long flags;
dd41f596
IM
1671 int cpu = get_cpu();
1672
1673 __sched_fork(p);
06b83b5f 1674 /*
0017d735 1675 * We mark the process as running here. This guarantees that
06b83b5f
PZ
1676 * nobody will actually run it, and a signal or other external
1677 * event cannot wake it up and insert it on the runqueue either.
1678 */
0017d735 1679 p->state = TASK_RUNNING;
dd41f596 1680
c350a04e
MG
1681 /*
1682 * Make sure we do not leak PI boosting priority to the child.
1683 */
1684 p->prio = current->normal_prio;
1685
b9dc29e7
MG
1686 /*
1687 * Revert to default priority/policy on fork if requested.
1688 */
1689 if (unlikely(p->sched_reset_on_fork)) {
c350a04e 1690 if (task_has_rt_policy(p)) {
b9dc29e7 1691 p->policy = SCHED_NORMAL;
6c697bdf 1692 p->static_prio = NICE_TO_PRIO(0);
c350a04e
MG
1693 p->rt_priority = 0;
1694 } else if (PRIO_TO_NICE(p->static_prio) < 0)
1695 p->static_prio = NICE_TO_PRIO(0);
1696
1697 p->prio = p->normal_prio = __normal_prio(p);
1698 set_load_weight(p);
6c697bdf 1699
b9dc29e7
MG
1700 /*
1701 * We don't need the reset flag anymore after the fork. It has
1702 * fulfilled its duty:
1703 */
1704 p->sched_reset_on_fork = 0;
1705 }
ca94c442 1706
2ddbf952
HS
1707 if (!rt_prio(p->prio))
1708 p->sched_class = &fair_sched_class;
b29739f9 1709
cd29fe6f
PZ
1710 if (p->sched_class->task_fork)
1711 p->sched_class->task_fork(p);
1712
86951599
PZ
1713 /*
1714 * The child is not yet in the pid-hash so no cgroup attach races,
1715 * and the cgroup is pinned to this child due to cgroup_fork()
1716 * is ran before sched_fork().
1717 *
1718 * Silence PROVE_RCU.
1719 */
0122ec5b 1720 raw_spin_lock_irqsave(&p->pi_lock, flags);
5f3edc1b 1721 set_task_cpu(p, cpu);
0122ec5b 1722 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
5f3edc1b 1723
52f17b6c 1724#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
dd41f596 1725 if (likely(sched_info_on()))
52f17b6c 1726 memset(&p->sched_info, 0, sizeof(p->sched_info));
1da177e4 1727#endif
3ca7a440
PZ
1728#if defined(CONFIG_SMP)
1729 p->on_cpu = 0;
4866cde0 1730#endif
bdd4e85d 1731#ifdef CONFIG_PREEMPT_COUNT
4866cde0 1732 /* Want to start with kernel preemption disabled. */
a1261f54 1733 task_thread_info(p)->preempt_count = 1;
1da177e4 1734#endif
806c09a7 1735#ifdef CONFIG_SMP
917b627d 1736 plist_node_init(&p->pushable_tasks, MAX_PRIO);
806c09a7 1737#endif
917b627d 1738
476d139c 1739 put_cpu();
1da177e4
LT
1740}
1741
1742/*
1743 * wake_up_new_task - wake up a newly created task for the first time.
1744 *
1745 * This function will do some initial scheduler statistics housekeeping
1746 * that must be done for every newly created context, then puts the task
1747 * on the runqueue and wakes it.
1748 */
3e51e3ed 1749void wake_up_new_task(struct task_struct *p)
1da177e4
LT
1750{
1751 unsigned long flags;
dd41f596 1752 struct rq *rq;
fabf318e 1753
ab2515c4 1754 raw_spin_lock_irqsave(&p->pi_lock, flags);
fabf318e
PZ
1755#ifdef CONFIG_SMP
1756 /*
1757 * Fork balancing, do it here and not earlier because:
1758 * - cpus_allowed can change in the fork path
1759 * - any previously selected cpu might disappear through hotplug
fabf318e 1760 */
ab2515c4 1761 set_task_cpu(p, select_task_rq(p, SD_BALANCE_FORK, 0));
0017d735
PZ
1762#endif
1763
ab2515c4 1764 rq = __task_rq_lock(p);
cd29fe6f 1765 activate_task(rq, p, 0);
fd2f4419 1766 p->on_rq = 1;
89363381 1767 trace_sched_wakeup_new(p, true);
a7558e01 1768 check_preempt_curr(rq, p, WF_FORK);
9a897c5a 1769#ifdef CONFIG_SMP
efbbd05a
PZ
1770 if (p->sched_class->task_woken)
1771 p->sched_class->task_woken(rq, p);
9a897c5a 1772#endif
0122ec5b 1773 task_rq_unlock(rq, p, &flags);
1da177e4
LT
1774}
1775
e107be36
AK
1776#ifdef CONFIG_PREEMPT_NOTIFIERS
1777
1778/**
80dd99b3 1779 * preempt_notifier_register - tell me when current is being preempted & rescheduled
421cee29 1780 * @notifier: notifier struct to register
e107be36
AK
1781 */
1782void preempt_notifier_register(struct preempt_notifier *notifier)
1783{
1784 hlist_add_head(&notifier->link, &current->preempt_notifiers);
1785}
1786EXPORT_SYMBOL_GPL(preempt_notifier_register);
1787
1788/**
1789 * preempt_notifier_unregister - no longer interested in preemption notifications
421cee29 1790 * @notifier: notifier struct to unregister
e107be36
AK
1791 *
1792 * This is safe to call from within a preemption notifier.
1793 */
1794void preempt_notifier_unregister(struct preempt_notifier *notifier)
1795{
1796 hlist_del(&notifier->link);
1797}
1798EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
1799
1800static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
1801{
1802 struct preempt_notifier *notifier;
1803 struct hlist_node *node;
1804
1805 hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)
1806 notifier->ops->sched_in(notifier, raw_smp_processor_id());
1807}
1808
1809static void
1810fire_sched_out_preempt_notifiers(struct task_struct *curr,
1811 struct task_struct *next)
1812{
1813 struct preempt_notifier *notifier;
1814 struct hlist_node *node;
1815
1816 hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)
1817 notifier->ops->sched_out(notifier, next);
1818}
1819
6d6bc0ad 1820#else /* !CONFIG_PREEMPT_NOTIFIERS */
e107be36
AK
1821
1822static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
1823{
1824}
1825
1826static void
1827fire_sched_out_preempt_notifiers(struct task_struct *curr,
1828 struct task_struct *next)
1829{
1830}
1831
6d6bc0ad 1832#endif /* CONFIG_PREEMPT_NOTIFIERS */
e107be36 1833
4866cde0
NP
1834/**
1835 * prepare_task_switch - prepare to switch tasks
1836 * @rq: the runqueue preparing to switch
421cee29 1837 * @prev: the current task that is being switched out
4866cde0
NP
1838 * @next: the task we are going to switch to.
1839 *
1840 * This is called with the rq lock held and interrupts off. It must
1841 * be paired with a subsequent finish_task_switch after the context
1842 * switch.
1843 *
1844 * prepare_task_switch sets up locking and calls architecture specific
1845 * hooks.
1846 */
e107be36
AK
1847static inline void
1848prepare_task_switch(struct rq *rq, struct task_struct *prev,
1849 struct task_struct *next)
4866cde0 1850{
fe4b04fa
PZ
1851 sched_info_switch(prev, next);
1852 perf_event_task_sched_out(prev, next);
e107be36 1853 fire_sched_out_preempt_notifiers(prev, next);
4866cde0
NP
1854 prepare_lock_switch(rq, next);
1855 prepare_arch_switch(next);
fe4b04fa 1856 trace_sched_switch(prev, next);
4866cde0
NP
1857}
1858
1da177e4
LT
1859/**
1860 * finish_task_switch - clean up after a task-switch
344babaa 1861 * @rq: runqueue associated with task-switch
1da177e4
LT
1862 * @prev: the thread we just switched away from.
1863 *
4866cde0
NP
1864 * finish_task_switch must be called after the context switch, paired
1865 * with a prepare_task_switch call before the context switch.
1866 * finish_task_switch will reconcile locking set up by prepare_task_switch,
1867 * and do any other architecture-specific cleanup actions.
1da177e4
LT
1868 *
1869 * Note that we may have delayed dropping an mm in context_switch(). If
41a2d6cf 1870 * so, we finish that here outside of the runqueue lock. (Doing it
1da177e4
LT
1871 * with the lock held can cause deadlocks; see schedule() for
1872 * details.)
1873 */
a9957449 1874static void finish_task_switch(struct rq *rq, struct task_struct *prev)
1da177e4
LT
1875 __releases(rq->lock)
1876{
1da177e4 1877 struct mm_struct *mm = rq->prev_mm;
55a101f8 1878 long prev_state;
1da177e4
LT
1879
1880 rq->prev_mm = NULL;
1881
1882 /*
1883 * A task struct has one reference for the use as "current".
c394cc9f 1884 * If a task dies, then it sets TASK_DEAD in tsk->state and calls
55a101f8
ON
1885 * schedule one last time. The schedule call will never return, and
1886 * the scheduled task must drop that reference.
c394cc9f 1887 * The test for TASK_DEAD must occur while the runqueue locks are
1da177e4
LT
1888 * still held, otherwise prev could be scheduled on another cpu, die
1889 * there before we look at prev->state, and then the reference would
1890 * be dropped twice.
1891 * Manfred Spraul <manfred@colorfullife.com>
1892 */
55a101f8 1893 prev_state = prev->state;
4866cde0 1894 finish_arch_switch(prev);
8381f65d
JI
1895#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
1896 local_irq_disable();
1897#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
a8d757ef 1898 perf_event_task_sched_in(prev, current);
8381f65d
JI
1899#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
1900 local_irq_enable();
1901#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
4866cde0 1902 finish_lock_switch(rq, prev);
e8fa1362 1903
e107be36 1904 fire_sched_in_preempt_notifiers(current);
1da177e4
LT
1905 if (mm)
1906 mmdrop(mm);
c394cc9f 1907 if (unlikely(prev_state == TASK_DEAD)) {
c6fd91f0 1908 /*
1909 * Remove function-return probe instances associated with this
1910 * task and put them back on the free list.
9761eea8 1911 */
c6fd91f0 1912 kprobe_flush_task(prev);
1da177e4 1913 put_task_struct(prev);
c6fd91f0 1914 }
1da177e4
LT
1915}
1916
3f029d3c
GH
1917#ifdef CONFIG_SMP
1918
1919/* assumes rq->lock is held */
1920static inline void pre_schedule(struct rq *rq, struct task_struct *prev)
1921{
1922 if (prev->sched_class->pre_schedule)
1923 prev->sched_class->pre_schedule(rq, prev);
1924}
1925
1926/* rq->lock is NOT held, but preemption is disabled */
1927static inline void post_schedule(struct rq *rq)
1928{
1929 if (rq->post_schedule) {
1930 unsigned long flags;
1931
05fa785c 1932 raw_spin_lock_irqsave(&rq->lock, flags);
3f029d3c
GH
1933 if (rq->curr->sched_class->post_schedule)
1934 rq->curr->sched_class->post_schedule(rq);
05fa785c 1935 raw_spin_unlock_irqrestore(&rq->lock, flags);
3f029d3c
GH
1936
1937 rq->post_schedule = 0;
1938 }
1939}
1940
1941#else
da19ab51 1942
3f029d3c
GH
1943static inline void pre_schedule(struct rq *rq, struct task_struct *p)
1944{
1945}
1946
1947static inline void post_schedule(struct rq *rq)
1948{
1da177e4
LT
1949}
1950
3f029d3c
GH
1951#endif
1952
1da177e4
LT
1953/**
1954 * schedule_tail - first thing a freshly forked thread must call.
1955 * @prev: the thread we just switched away from.
1956 */
36c8b586 1957asmlinkage void schedule_tail(struct task_struct *prev)
1da177e4
LT
1958 __releases(rq->lock)
1959{
70b97a7f
IM
1960 struct rq *rq = this_rq();
1961
4866cde0 1962 finish_task_switch(rq, prev);
da19ab51 1963
3f029d3c
GH
1964 /*
1965 * FIXME: do we need to worry about rq being invalidated by the
1966 * task_switch?
1967 */
1968 post_schedule(rq);
70b97a7f 1969
4866cde0
NP
1970#ifdef __ARCH_WANT_UNLOCKED_CTXSW
1971 /* In this case, finish_task_switch does not reenable preemption */
1972 preempt_enable();
1973#endif
1da177e4 1974 if (current->set_child_tid)
b488893a 1975 put_user(task_pid_vnr(current), current->set_child_tid);
1da177e4
LT
1976}
1977
1978/*
1979 * context_switch - switch to the new MM and the new
1980 * thread's register state.
1981 */
dd41f596 1982static inline void
70b97a7f 1983context_switch(struct rq *rq, struct task_struct *prev,
36c8b586 1984 struct task_struct *next)
1da177e4 1985{
dd41f596 1986 struct mm_struct *mm, *oldmm;
1da177e4 1987
e107be36 1988 prepare_task_switch(rq, prev, next);
fe4b04fa 1989
dd41f596
IM
1990 mm = next->mm;
1991 oldmm = prev->active_mm;
9226d125
ZA
1992 /*
1993 * For paravirt, this is coupled with an exit in switch_to to
1994 * combine the page table reload and the switch backend into
1995 * one hypercall.
1996 */
224101ed 1997 arch_start_context_switch(prev);
9226d125 1998
31915ab4 1999 if (!mm) {
1da177e4
LT
2000 next->active_mm = oldmm;
2001 atomic_inc(&oldmm->mm_count);
2002 enter_lazy_tlb(oldmm, next);
2003 } else
2004 switch_mm(oldmm, mm, next);
2005
31915ab4 2006 if (!prev->mm) {
1da177e4 2007 prev->active_mm = NULL;
1da177e4
LT
2008 rq->prev_mm = oldmm;
2009 }
3a5f5e48
IM
2010 /*
2011 * Since the runqueue lock will be released by the next
2012 * task (which is an invalid locking op but in the case
2013 * of the scheduler it's an obvious special-case), so we
2014 * do an early lockdep release here:
2015 */
2016#ifndef __ARCH_WANT_UNLOCKED_CTXSW
8a25d5de 2017 spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
3a5f5e48 2018#endif
1da177e4
LT
2019
2020 /* Here we just switch the register state and the stack. */
2021 switch_to(prev, next, prev);
2022
dd41f596
IM
2023 barrier();
2024 /*
2025 * this_rq must be evaluated again because prev may have moved
2026 * CPUs since it called schedule(), thus the 'rq' on its stack
2027 * frame will be invalid.
2028 */
2029 finish_task_switch(this_rq(), prev);
1da177e4
LT
2030}
2031
2032/*
2033 * nr_running, nr_uninterruptible and nr_context_switches:
2034 *
2035 * externally visible scheduler statistics: current number of runnable
2036 * threads, current number of uninterruptible-sleeping threads, total
2037 * number of context switches performed since bootup.
2038 */
2039unsigned long nr_running(void)
2040{
2041 unsigned long i, sum = 0;
2042
2043 for_each_online_cpu(i)
2044 sum += cpu_rq(i)->nr_running;
2045
2046 return sum;
f711f609 2047}
1da177e4
LT
2048
2049unsigned long nr_uninterruptible(void)
f711f609 2050{
1da177e4 2051 unsigned long i, sum = 0;
f711f609 2052
0a945022 2053 for_each_possible_cpu(i)
1da177e4 2054 sum += cpu_rq(i)->nr_uninterruptible;
f711f609
GS
2055
2056 /*
1da177e4
LT
2057 * Since we read the counters lockless, it might be slightly
2058 * inaccurate. Do not allow it to go below zero though:
f711f609 2059 */
1da177e4
LT
2060 if (unlikely((long)sum < 0))
2061 sum = 0;
f711f609 2062
1da177e4 2063 return sum;
f711f609 2064}
f711f609 2065
1da177e4 2066unsigned long long nr_context_switches(void)
46cb4b7c 2067{
cc94abfc
SR
2068 int i;
2069 unsigned long long sum = 0;
46cb4b7c 2070
0a945022 2071 for_each_possible_cpu(i)
1da177e4 2072 sum += cpu_rq(i)->nr_switches;
46cb4b7c 2073
1da177e4
LT
2074 return sum;
2075}
483b4ee6 2076
1da177e4
LT
2077unsigned long nr_iowait(void)
2078{
2079 unsigned long i, sum = 0;
483b4ee6 2080
0a945022 2081 for_each_possible_cpu(i)
1da177e4 2082 sum += atomic_read(&cpu_rq(i)->nr_iowait);
46cb4b7c 2083
1da177e4
LT
2084 return sum;
2085}
483b4ee6 2086
8c215bd3 2087unsigned long nr_iowait_cpu(int cpu)
69d25870 2088{
8c215bd3 2089 struct rq *this = cpu_rq(cpu);
69d25870
AV
2090 return atomic_read(&this->nr_iowait);
2091}
46cb4b7c 2092
69d25870
AV
2093unsigned long this_cpu_load(void)
2094{
2095 struct rq *this = this_rq();
2096 return this->cpu_load[0];
2097}
e790fb0b 2098
46cb4b7c 2099
dce48a84
TG
2100/* Variables and functions for calc_load */
2101static atomic_long_t calc_load_tasks;
2102static unsigned long calc_load_update;
2103unsigned long avenrun[3];
2104EXPORT_SYMBOL(avenrun);
46cb4b7c 2105
74f5187a
PZ
2106static long calc_load_fold_active(struct rq *this_rq)
2107{
2108 long nr_active, delta = 0;
2109
2110 nr_active = this_rq->nr_running;
2111 nr_active += (long) this_rq->nr_uninterruptible;
2112
2113 if (nr_active != this_rq->calc_load_active) {
2114 delta = nr_active - this_rq->calc_load_active;
2115 this_rq->calc_load_active = nr_active;
2116 }
2117
2118 return delta;
2119}
2120
0f004f5a
PZ
2121static unsigned long
2122calc_load(unsigned long load, unsigned long exp, unsigned long active)
2123{
2124 load *= exp;
2125 load += active * (FIXED_1 - exp);
2126 load += 1UL << (FSHIFT - 1);
2127 return load >> FSHIFT;
2128}
2129
74f5187a
PZ
2130#ifdef CONFIG_NO_HZ
2131/*
2132 * For NO_HZ we delay the active fold to the next LOAD_FREQ update.
2133 *
2134 * When making the ILB scale, we should try to pull this in as well.
2135 */
2136static atomic_long_t calc_load_tasks_idle;
2137
029632fb 2138void calc_load_account_idle(struct rq *this_rq)
74f5187a
PZ
2139{
2140 long delta;
2141
2142 delta = calc_load_fold_active(this_rq);
2143 if (delta)
2144 atomic_long_add(delta, &calc_load_tasks_idle);
2145}
2146
2147static long calc_load_fold_idle(void)
2148{
2149 long delta = 0;
2150
2151 /*
2152 * Its got a race, we don't care...
2153 */
2154 if (atomic_long_read(&calc_load_tasks_idle))
2155 delta = atomic_long_xchg(&calc_load_tasks_idle, 0);
2156
2157 return delta;
2158}
0f004f5a
PZ
2159
2160/**
2161 * fixed_power_int - compute: x^n, in O(log n) time
2162 *
2163 * @x: base of the power
2164 * @frac_bits: fractional bits of @x
2165 * @n: power to raise @x to.
2166 *
2167 * By exploiting the relation between the definition of the natural power
2168 * function: x^n := x*x*...*x (x multiplied by itself for n times), and
2169 * the binary encoding of numbers used by computers: n := \Sum n_i * 2^i,
2170 * (where: n_i \elem {0, 1}, the binary vector representing n),
2171 * we find: x^n := x^(\Sum n_i * 2^i) := \Prod x^(n_i * 2^i), which is
2172 * of course trivially computable in O(log_2 n), the length of our binary
2173 * vector.
2174 */
2175static unsigned long
2176fixed_power_int(unsigned long x, unsigned int frac_bits, unsigned int n)
2177{
2178 unsigned long result = 1UL << frac_bits;
2179
2180 if (n) for (;;) {
2181 if (n & 1) {
2182 result *= x;
2183 result += 1UL << (frac_bits - 1);
2184 result >>= frac_bits;
2185 }
2186 n >>= 1;
2187 if (!n)
2188 break;
2189 x *= x;
2190 x += 1UL << (frac_bits - 1);
2191 x >>= frac_bits;
2192 }
2193
2194 return result;
2195}
2196
2197/*
2198 * a1 = a0 * e + a * (1 - e)
2199 *
2200 * a2 = a1 * e + a * (1 - e)
2201 * = (a0 * e + a * (1 - e)) * e + a * (1 - e)
2202 * = a0 * e^2 + a * (1 - e) * (1 + e)
2203 *
2204 * a3 = a2 * e + a * (1 - e)
2205 * = (a0 * e^2 + a * (1 - e) * (1 + e)) * e + a * (1 - e)
2206 * = a0 * e^3 + a * (1 - e) * (1 + e + e^2)
2207 *
2208 * ...
2209 *
2210 * an = a0 * e^n + a * (1 - e) * (1 + e + ... + e^n-1) [1]
2211 * = a0 * e^n + a * (1 - e) * (1 - e^n)/(1 - e)
2212 * = a0 * e^n + a * (1 - e^n)
2213 *
2214 * [1] application of the geometric series:
2215 *
2216 * n 1 - x^(n+1)
2217 * S_n := \Sum x^i = -------------
2218 * i=0 1 - x
2219 */
2220static unsigned long
2221calc_load_n(unsigned long load, unsigned long exp,
2222 unsigned long active, unsigned int n)
2223{
2224
2225 return calc_load(load, fixed_power_int(exp, FSHIFT, n), active);
2226}
2227
2228/*
2229 * NO_HZ can leave us missing all per-cpu ticks calling
2230 * calc_load_account_active(), but since an idle CPU folds its delta into
2231 * calc_load_tasks_idle per calc_load_account_idle(), all we need to do is fold
2232 * in the pending idle delta if our idle period crossed a load cycle boundary.
2233 *
2234 * Once we've updated the global active value, we need to apply the exponential
2235 * weights adjusted to the number of cycles missed.
2236 */
2237static void calc_global_nohz(unsigned long ticks)
2238{
2239 long delta, active, n;
2240
2241 if (time_before(jiffies, calc_load_update))
2242 return;
2243
2244 /*
2245 * If we crossed a calc_load_update boundary, make sure to fold
2246 * any pending idle changes, the respective CPUs might have
2247 * missed the tick driven calc_load_account_active() update
2248 * due to NO_HZ.
2249 */
2250 delta = calc_load_fold_idle();
2251 if (delta)
2252 atomic_long_add(delta, &calc_load_tasks);
2253
2254 /*
2255 * If we were idle for multiple load cycles, apply them.
2256 */
2257 if (ticks >= LOAD_FREQ) {
2258 n = ticks / LOAD_FREQ;
2259
2260 active = atomic_long_read(&calc_load_tasks);
2261 active = active > 0 ? active * FIXED_1 : 0;
2262
2263 avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n);
2264 avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n);
2265 avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n);
2266
2267 calc_load_update += n * LOAD_FREQ;
2268 }
2269
2270 /*
2271 * Its possible the remainder of the above division also crosses
2272 * a LOAD_FREQ period, the regular check in calc_global_load()
2273 * which comes after this will take care of that.
2274 *
2275 * Consider us being 11 ticks before a cycle completion, and us
2276 * sleeping for 4*LOAD_FREQ + 22 ticks, then the above code will
2277 * age us 4 cycles, and the test in calc_global_load() will
2278 * pick up the final one.
2279 */
2280}
74f5187a 2281#else
029632fb 2282void calc_load_account_idle(struct rq *this_rq)
74f5187a
PZ
2283{
2284}
2285
2286static inline long calc_load_fold_idle(void)
2287{
2288 return 0;
2289}
0f004f5a
PZ
2290
2291static void calc_global_nohz(unsigned long ticks)
2292{
2293}
74f5187a
PZ
2294#endif
2295
2d02494f
TG
2296/**
2297 * get_avenrun - get the load average array
2298 * @loads: pointer to dest load array
2299 * @offset: offset to add
2300 * @shift: shift count to shift the result left
2301 *
2302 * These values are estimates at best, so no need for locking.
2303 */
2304void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
2305{
2306 loads[0] = (avenrun[0] + offset) << shift;
2307 loads[1] = (avenrun[1] + offset) << shift;
2308 loads[2] = (avenrun[2] + offset) << shift;
46cb4b7c 2309}
46cb4b7c 2310
46cb4b7c 2311/*
dce48a84
TG
2312 * calc_load - update the avenrun load estimates 10 ticks after the
2313 * CPUs have updated calc_load_tasks.
7835b98b 2314 */
0f004f5a 2315void calc_global_load(unsigned long ticks)
7835b98b 2316{
dce48a84 2317 long active;
1da177e4 2318
0f004f5a
PZ
2319 calc_global_nohz(ticks);
2320
2321 if (time_before(jiffies, calc_load_update + 10))
dce48a84 2322 return;
1da177e4 2323
dce48a84
TG
2324 active = atomic_long_read(&calc_load_tasks);
2325 active = active > 0 ? active * FIXED_1 : 0;
1da177e4 2326
dce48a84
TG
2327 avenrun[0] = calc_load(avenrun[0], EXP_1, active);
2328 avenrun[1] = calc_load(avenrun[1], EXP_5, active);
2329 avenrun[2] = calc_load(avenrun[2], EXP_15, active);
dd41f596 2330
dce48a84
TG
2331 calc_load_update += LOAD_FREQ;
2332}
1da177e4 2333
dce48a84 2334/*
74f5187a
PZ
2335 * Called from update_cpu_load() to periodically update this CPU's
2336 * active count.
dce48a84
TG
2337 */
2338static void calc_load_account_active(struct rq *this_rq)
2339{
74f5187a 2340 long delta;
08c183f3 2341
74f5187a
PZ
2342 if (time_before(jiffies, this_rq->calc_load_update))
2343 return;
783609c6 2344
74f5187a
PZ
2345 delta = calc_load_fold_active(this_rq);
2346 delta += calc_load_fold_idle();
2347 if (delta)
dce48a84 2348 atomic_long_add(delta, &calc_load_tasks);
74f5187a
PZ
2349
2350 this_rq->calc_load_update += LOAD_FREQ;
46cb4b7c
SS
2351}
2352
fdf3e95d
VP
2353/*
2354 * The exact cpuload at various idx values, calculated at every tick would be
2355 * load = (2^idx - 1) / 2^idx * load + 1 / 2^idx * cur_load
2356 *
2357 * If a cpu misses updates for n-1 ticks (as it was idle) and update gets called
2358 * on nth tick when cpu may be busy, then we have:
2359 * load = ((2^idx - 1) / 2^idx)^(n-1) * load
2360 * load = (2^idx - 1) / 2^idx) * load + 1 / 2^idx * cur_load
2361 *
2362 * decay_load_missed() below does efficient calculation of
2363 * load = ((2^idx - 1) / 2^idx)^(n-1) * load
2364 * avoiding 0..n-1 loop doing load = ((2^idx - 1) / 2^idx) * load
2365 *
2366 * The calculation is approximated on a 128 point scale.
2367 * degrade_zero_ticks is the number of ticks after which load at any
2368 * particular idx is approximated to be zero.
2369 * degrade_factor is a precomputed table, a row for each load idx.
2370 * Each column corresponds to degradation factor for a power of two ticks,
2371 * based on 128 point scale.
2372 * Example:
2373 * row 2, col 3 (=12) says that the degradation at load idx 2 after
2374 * 8 ticks is 12/128 (which is an approximation of exact factor 3^8/4^8).
2375 *
2376 * With this power of 2 load factors, we can degrade the load n times
2377 * by looking at 1 bits in n and doing as many mult/shift instead of
2378 * n mult/shifts needed by the exact degradation.
2379 */
2380#define DEGRADE_SHIFT 7
2381static const unsigned char
2382 degrade_zero_ticks[CPU_LOAD_IDX_MAX] = {0, 8, 32, 64, 128};
2383static const unsigned char
2384 degrade_factor[CPU_LOAD_IDX_MAX][DEGRADE_SHIFT + 1] = {
2385 {0, 0, 0, 0, 0, 0, 0, 0},
2386 {64, 32, 8, 0, 0, 0, 0, 0},
2387 {96, 72, 40, 12, 1, 0, 0},
2388 {112, 98, 75, 43, 15, 1, 0},
2389 {120, 112, 98, 76, 45, 16, 2} };
2390
2391/*
2392 * Update cpu_load for any missed ticks, due to tickless idle. The backlog
2393 * would be when CPU is idle and so we just decay the old load without
2394 * adding any new load.
2395 */
2396static unsigned long
2397decay_load_missed(unsigned long load, unsigned long missed_updates, int idx)
2398{
2399 int j = 0;
2400
2401 if (!missed_updates)
2402 return load;
2403
2404 if (missed_updates >= degrade_zero_ticks[idx])
2405 return 0;
2406
2407 if (idx == 1)
2408 return load >> missed_updates;
2409
2410 while (missed_updates) {
2411 if (missed_updates % 2)
2412 load = (load * degrade_factor[idx][j]) >> DEGRADE_SHIFT;
2413
2414 missed_updates >>= 1;
2415 j++;
2416 }
2417 return load;
2418}
2419
46cb4b7c 2420/*
dd41f596 2421 * Update rq->cpu_load[] statistics. This function is usually called every
fdf3e95d
VP
2422 * scheduler tick (TICK_NSEC). With tickless idle this will not be called
2423 * every tick. We fix it up based on jiffies.
46cb4b7c 2424 */
029632fb 2425void update_cpu_load(struct rq *this_rq)
46cb4b7c 2426{
495eca49 2427 unsigned long this_load = this_rq->load.weight;
fdf3e95d
VP
2428 unsigned long curr_jiffies = jiffies;
2429 unsigned long pending_updates;
dd41f596 2430 int i, scale;
46cb4b7c 2431
dd41f596 2432 this_rq->nr_load_updates++;
46cb4b7c 2433
fdf3e95d
VP
2434 /* Avoid repeated calls on same jiffy, when moving in and out of idle */
2435 if (curr_jiffies == this_rq->last_load_update_tick)
2436 return;
2437
2438 pending_updates = curr_jiffies - this_rq->last_load_update_tick;
2439 this_rq->last_load_update_tick = curr_jiffies;
2440
dd41f596 2441 /* Update our load: */
fdf3e95d
VP
2442 this_rq->cpu_load[0] = this_load; /* Fasttrack for idx 0 */
2443 for (i = 1, scale = 2; i < CPU_LOAD_IDX_MAX; i++, scale += scale) {
dd41f596 2444 unsigned long old_load, new_load;
7d1e6a9b 2445
dd41f596 2446 /* scale is effectively 1 << i now, and >> i divides by scale */
46cb4b7c 2447
dd41f596 2448 old_load = this_rq->cpu_load[i];
fdf3e95d 2449 old_load = decay_load_missed(old_load, pending_updates - 1, i);
dd41f596 2450 new_load = this_load;
a25707f3
IM
2451 /*
2452 * Round up the averaging division if load is increasing. This
2453 * prevents us from getting stuck on 9 if the load is 10, for
2454 * example.
2455 */
2456 if (new_load > old_load)
fdf3e95d
VP
2457 new_load += scale - 1;
2458
2459 this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i;
dd41f596 2460 }
da2b71ed
SS
2461
2462 sched_avg_update(this_rq);
fdf3e95d
VP
2463}
2464
2465static void update_cpu_load_active(struct rq *this_rq)
2466{
2467 update_cpu_load(this_rq);
46cb4b7c 2468
74f5187a 2469 calc_load_account_active(this_rq);
46cb4b7c
SS
2470}
2471
dd41f596 2472#ifdef CONFIG_SMP
8a0be9ef 2473
46cb4b7c 2474/*
38022906
PZ
2475 * sched_exec - execve() is a valuable balancing opportunity, because at
2476 * this point the task has the smallest effective memory and cache footprint.
46cb4b7c 2477 */
38022906 2478void sched_exec(void)
46cb4b7c 2479{
38022906 2480 struct task_struct *p = current;
1da177e4 2481 unsigned long flags;
0017d735 2482 int dest_cpu;
46cb4b7c 2483
8f42ced9 2484 raw_spin_lock_irqsave(&p->pi_lock, flags);
7608dec2 2485 dest_cpu = p->sched_class->select_task_rq(p, SD_BALANCE_EXEC, 0);
0017d735
PZ
2486 if (dest_cpu == smp_processor_id())
2487 goto unlock;
38022906 2488
8f42ced9 2489 if (likely(cpu_active(dest_cpu))) {
969c7921 2490 struct migration_arg arg = { p, dest_cpu };
46cb4b7c 2491
8f42ced9
PZ
2492 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
2493 stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
1da177e4
LT
2494 return;
2495 }
0017d735 2496unlock:
8f42ced9 2497 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
1da177e4 2498}
dd41f596 2499
1da177e4
LT
2500#endif
2501
1da177e4
LT
2502DEFINE_PER_CPU(struct kernel_stat, kstat);
2503
2504EXPORT_PER_CPU_SYMBOL(kstat);
2505
2506/*
c5f8d995 2507 * Return any ns on the sched_clock that have not yet been accounted in
f06febc9 2508 * @p in case that task is currently running.
c5f8d995
HS
2509 *
2510 * Called with task_rq_lock() held on @rq.
1da177e4 2511 */
c5f8d995
HS
2512static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq)
2513{
2514 u64 ns = 0;
2515
2516 if (task_current(rq, p)) {
2517 update_rq_clock(rq);
305e6835 2518 ns = rq->clock_task - p->se.exec_start;
c5f8d995
HS
2519 if ((s64)ns < 0)
2520 ns = 0;
2521 }
2522
2523 return ns;
2524}
2525
bb34d92f 2526unsigned long long task_delta_exec(struct task_struct *p)
1da177e4 2527{
1da177e4 2528 unsigned long flags;
41b86e9c 2529 struct rq *rq;
bb34d92f 2530 u64 ns = 0;
48f24c4d 2531
41b86e9c 2532 rq = task_rq_lock(p, &flags);
c5f8d995 2533 ns = do_task_delta_exec(p, rq);
0122ec5b 2534 task_rq_unlock(rq, p, &flags);
1508487e 2535
c5f8d995
HS
2536 return ns;
2537}
f06febc9 2538
c5f8d995
HS
2539/*
2540 * Return accounted runtime for the task.
2541 * In case the task is currently running, return the runtime plus current's
2542 * pending runtime that have not been accounted yet.
2543 */
2544unsigned long long task_sched_runtime(struct task_struct *p)
2545{
2546 unsigned long flags;
2547 struct rq *rq;
2548 u64 ns = 0;
2549
2550 rq = task_rq_lock(p, &flags);
2551 ns = p->se.sum_exec_runtime + do_task_delta_exec(p, rq);
0122ec5b 2552 task_rq_unlock(rq, p, &flags);
c5f8d995
HS
2553
2554 return ns;
2555}
48f24c4d 2556
1da177e4
LT
2557/*
2558 * Account user cpu time to a process.
2559 * @p: the process that the cpu time gets accounted to
1da177e4 2560 * @cputime: the cpu time spent in user space since the last update
457533a7 2561 * @cputime_scaled: cputime scaled by cpu frequency
1da177e4 2562 */
457533a7
MS
2563void account_user_time(struct task_struct *p, cputime_t cputime,
2564 cputime_t cputime_scaled)
1da177e4
LT
2565{
2566 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
2567 cputime64_t tmp;
2568
457533a7 2569 /* Add user time to process. */
1da177e4 2570 p->utime = cputime_add(p->utime, cputime);
457533a7 2571 p->utimescaled = cputime_add(p->utimescaled, cputime_scaled);
f06febc9 2572 account_group_user_time(p, cputime);
1da177e4
LT
2573
2574 /* Add user time to cpustat. */
2575 tmp = cputime_to_cputime64(cputime);
2576 if (TASK_NICE(p) > 0)
2577 cpustat->nice = cputime64_add(cpustat->nice, tmp);
2578 else
2579 cpustat->user = cputime64_add(cpustat->user, tmp);
ef12fefa
BR
2580
2581 cpuacct_update_stats(p, CPUACCT_STAT_USER, cputime);
49b5cf34
JL
2582 /* Account for user time used */
2583 acct_update_integrals(p);
1da177e4
LT
2584}
2585
94886b84
LV
2586/*
2587 * Account guest cpu time to a process.
2588 * @p: the process that the cpu time gets accounted to
2589 * @cputime: the cpu time spent in virtual machine since the last update
457533a7 2590 * @cputime_scaled: cputime scaled by cpu frequency
94886b84 2591 */
457533a7
MS
2592static void account_guest_time(struct task_struct *p, cputime_t cputime,
2593 cputime_t cputime_scaled)
94886b84
LV
2594{
2595 cputime64_t tmp;
2596 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
2597
2598 tmp = cputime_to_cputime64(cputime);
2599
457533a7 2600 /* Add guest time to process. */
94886b84 2601 p->utime = cputime_add(p->utime, cputime);
457533a7 2602 p->utimescaled = cputime_add(p->utimescaled, cputime_scaled);
f06febc9 2603 account_group_user_time(p, cputime);
94886b84
LV
2604 p->gtime = cputime_add(p->gtime, cputime);
2605
457533a7 2606 /* Add guest time to cpustat. */
ce0e7b28
RO
2607 if (TASK_NICE(p) > 0) {
2608 cpustat->nice = cputime64_add(cpustat->nice, tmp);
2609 cpustat->guest_nice = cputime64_add(cpustat->guest_nice, tmp);
2610 } else {
2611 cpustat->user = cputime64_add(cpustat->user, tmp);
2612 cpustat->guest = cputime64_add(cpustat->guest, tmp);
2613 }
94886b84
LV
2614}
2615
70a89a66
VP
2616/*
2617 * Account system cpu time to a process and desired cpustat field
2618 * @p: the process that the cpu time gets accounted to
2619 * @cputime: the cpu time spent in kernel space since the last update
2620 * @cputime_scaled: cputime scaled by cpu frequency
2621 * @target_cputime64: pointer to cpustat field that has to be updated
2622 */
2623static inline
2624void __account_system_time(struct task_struct *p, cputime_t cputime,
2625 cputime_t cputime_scaled, cputime64_t *target_cputime64)
2626{
2627 cputime64_t tmp = cputime_to_cputime64(cputime);
2628
2629 /* Add system time to process. */
2630 p->stime = cputime_add(p->stime, cputime);
2631 p->stimescaled = cputime_add(p->stimescaled, cputime_scaled);
2632 account_group_system_time(p, cputime);
2633
2634 /* Add system time to cpustat. */
2635 *target_cputime64 = cputime64_add(*target_cputime64, tmp);
2636 cpuacct_update_stats(p, CPUACCT_STAT_SYSTEM, cputime);
2637
2638 /* Account for system time used */
2639 acct_update_integrals(p);
2640}
2641
1da177e4
LT
2642/*
2643 * Account system cpu time to a process.
2644 * @p: the process that the cpu time gets accounted to
2645 * @hardirq_offset: the offset to subtract from hardirq_count()
2646 * @cputime: the cpu time spent in kernel space since the last update
457533a7 2647 * @cputime_scaled: cputime scaled by cpu frequency
1da177e4
LT
2648 */
2649void account_system_time(struct task_struct *p, int hardirq_offset,
457533a7 2650 cputime_t cputime, cputime_t cputime_scaled)
1da177e4
LT
2651{
2652 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
70a89a66 2653 cputime64_t *target_cputime64;
1da177e4 2654
983ed7a6 2655 if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) {
457533a7 2656 account_guest_time(p, cputime, cputime_scaled);
983ed7a6
HH
2657 return;
2658 }
94886b84 2659
1da177e4 2660 if (hardirq_count() - hardirq_offset)
70a89a66 2661 target_cputime64 = &cpustat->irq;
75e1056f 2662 else if (in_serving_softirq())
70a89a66 2663 target_cputime64 = &cpustat->softirq;
1da177e4 2664 else
70a89a66 2665 target_cputime64 = &cpustat->system;
ef12fefa 2666
70a89a66 2667 __account_system_time(p, cputime, cputime_scaled, target_cputime64);
1da177e4
LT
2668}
2669
c66f08be 2670/*
1da177e4 2671 * Account for involuntary wait time.
544b4a1f 2672 * @cputime: the cpu time spent in involuntary wait
c66f08be 2673 */
79741dd3 2674void account_steal_time(cputime_t cputime)
c66f08be 2675{
79741dd3
MS
2676 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
2677 cputime64_t cputime64 = cputime_to_cputime64(cputime);
2678
2679 cpustat->steal = cputime64_add(cpustat->steal, cputime64);
c66f08be
MN
2680}
2681
1da177e4 2682/*
79741dd3
MS
2683 * Account for idle time.
2684 * @cputime: the cpu time spent in idle wait
1da177e4 2685 */
79741dd3 2686void account_idle_time(cputime_t cputime)
1da177e4
LT
2687{
2688 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
79741dd3 2689 cputime64_t cputime64 = cputime_to_cputime64(cputime);
70b97a7f 2690 struct rq *rq = this_rq();
1da177e4 2691
79741dd3
MS
2692 if (atomic_read(&rq->nr_iowait) > 0)
2693 cpustat->iowait = cputime64_add(cpustat->iowait, cputime64);
2694 else
2695 cpustat->idle = cputime64_add(cpustat->idle, cputime64);
1da177e4
LT
2696}
2697
e6e6685a
GC
2698static __always_inline bool steal_account_process_tick(void)
2699{
2700#ifdef CONFIG_PARAVIRT
2701 if (static_branch(&paravirt_steal_enabled)) {
2702 u64 steal, st = 0;
2703
2704 steal = paravirt_steal_clock(smp_processor_id());
2705 steal -= this_rq()->prev_steal_time;
2706
2707 st = steal_ticks(steal);
2708 this_rq()->prev_steal_time += st * TICK_NSEC;
2709
2710 account_steal_time(st);
2711 return st;
2712 }
2713#endif
2714 return false;
2715}
2716
79741dd3
MS
2717#ifndef CONFIG_VIRT_CPU_ACCOUNTING
2718
abb74cef
VP
2719#ifdef CONFIG_IRQ_TIME_ACCOUNTING
2720/*
2721 * Account a tick to a process and cpustat
2722 * @p: the process that the cpu time gets accounted to
2723 * @user_tick: is the tick from userspace
2724 * @rq: the pointer to rq
2725 *
2726 * Tick demultiplexing follows the order
2727 * - pending hardirq update
2728 * - pending softirq update
2729 * - user_time
2730 * - idle_time
2731 * - system time
2732 * - check for guest_time
2733 * - else account as system_time
2734 *
2735 * Check for hardirq is done both for system and user time as there is
2736 * no timer going off while we are on hardirq and hence we may never get an
2737 * opportunity to update it solely in system time.
2738 * p->stime and friends are only updated on system time and not on irq
2739 * softirq as those do not count in task exec_runtime any more.
2740 */
2741static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
2742 struct rq *rq)
2743{
2744 cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
2745 cputime64_t tmp = cputime_to_cputime64(cputime_one_jiffy);
2746 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
2747
e6e6685a
GC
2748 if (steal_account_process_tick())
2749 return;
2750
abb74cef
VP
2751 if (irqtime_account_hi_update()) {
2752 cpustat->irq = cputime64_add(cpustat->irq, tmp);
2753 } else if (irqtime_account_si_update()) {
2754 cpustat->softirq = cputime64_add(cpustat->softirq, tmp);
414bee9b
VP
2755 } else if (this_cpu_ksoftirqd() == p) {
2756 /*
2757 * ksoftirqd time do not get accounted in cpu_softirq_time.
2758 * So, we have to handle it separately here.
2759 * Also, p->stime needs to be updated for ksoftirqd.
2760 */
2761 __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled,
2762 &cpustat->softirq);
abb74cef
VP
2763 } else if (user_tick) {
2764 account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
2765 } else if (p == rq->idle) {
2766 account_idle_time(cputime_one_jiffy);
2767 } else if (p->flags & PF_VCPU) { /* System time or guest time */
2768 account_guest_time(p, cputime_one_jiffy, one_jiffy_scaled);
2769 } else {
2770 __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled,
2771 &cpustat->system);
2772 }
2773}
2774
2775static void irqtime_account_idle_ticks(int ticks)
2776{
2777 int i;
2778 struct rq *rq = this_rq();
2779
2780 for (i = 0; i < ticks; i++)
2781 irqtime_account_process_tick(current, 0, rq);
2782}
544b4a1f 2783#else /* CONFIG_IRQ_TIME_ACCOUNTING */
abb74cef
VP
2784static void irqtime_account_idle_ticks(int ticks) {}
2785static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
2786 struct rq *rq) {}
544b4a1f 2787#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
79741dd3
MS
2788
2789/*
2790 * Account a single tick of cpu time.
2791 * @p: the process that the cpu time gets accounted to
2792 * @user_tick: indicates if the tick is a user or a system tick
2793 */
2794void account_process_tick(struct task_struct *p, int user_tick)
2795{
a42548a1 2796 cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
79741dd3
MS
2797 struct rq *rq = this_rq();
2798
abb74cef
VP
2799 if (sched_clock_irqtime) {
2800 irqtime_account_process_tick(p, user_tick, rq);
2801 return;
2802 }
2803
e6e6685a
GC
2804 if (steal_account_process_tick())
2805 return;
2806
79741dd3 2807 if (user_tick)
a42548a1 2808 account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
f5f293a4 2809 else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET))
a42548a1 2810 account_system_time(p, HARDIRQ_OFFSET, cputime_one_jiffy,
79741dd3
MS
2811 one_jiffy_scaled);
2812 else
a42548a1 2813 account_idle_time(cputime_one_jiffy);
79741dd3
MS
2814}
2815
2816/*
2817 * Account multiple ticks of steal time.
2818 * @p: the process from which the cpu time has been stolen
2819 * @ticks: number of stolen ticks
2820 */
2821void account_steal_ticks(unsigned long ticks)
2822{
2823 account_steal_time(jiffies_to_cputime(ticks));
2824}
2825
2826/*
2827 * Account multiple ticks of idle time.
2828 * @ticks: number of stolen ticks
2829 */
2830void account_idle_ticks(unsigned long ticks)
2831{
abb74cef
VP
2832
2833 if (sched_clock_irqtime) {
2834 irqtime_account_idle_ticks(ticks);
2835 return;
2836 }
2837
79741dd3 2838 account_idle_time(jiffies_to_cputime(ticks));
1da177e4
LT
2839}
2840
79741dd3
MS
2841#endif
2842
49048622
BS
2843/*
2844 * Use precise platform statistics if available:
2845 */
2846#ifdef CONFIG_VIRT_CPU_ACCOUNTING
d180c5bc 2847void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
49048622 2848{
d99ca3b9
HS
2849 *ut = p->utime;
2850 *st = p->stime;
49048622
BS
2851}
2852
0cf55e1e 2853void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
49048622 2854{
0cf55e1e
HS
2855 struct task_cputime cputime;
2856
2857 thread_group_cputime(p, &cputime);
2858
2859 *ut = cputime.utime;
2860 *st = cputime.stime;
49048622
BS
2861}
2862#else
761b1d26
HS
2863
2864#ifndef nsecs_to_cputime
b7b20df9 2865# define nsecs_to_cputime(__nsecs) nsecs_to_jiffies(__nsecs)
761b1d26
HS
2866#endif
2867
d180c5bc 2868void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
49048622 2869{
d99ca3b9 2870 cputime_t rtime, utime = p->utime, total = cputime_add(utime, p->stime);
49048622
BS
2871
2872 /*
2873 * Use CFS's precise accounting:
2874 */
d180c5bc 2875 rtime = nsecs_to_cputime(p->se.sum_exec_runtime);
49048622
BS
2876
2877 if (total) {
e75e863d 2878 u64 temp = rtime;
d180c5bc 2879
e75e863d 2880 temp *= utime;
49048622 2881 do_div(temp, total);
d180c5bc
HS
2882 utime = (cputime_t)temp;
2883 } else
2884 utime = rtime;
49048622 2885
d180c5bc
HS
2886 /*
2887 * Compare with previous values, to keep monotonicity:
2888 */
761b1d26 2889 p->prev_utime = max(p->prev_utime, utime);
d99ca3b9 2890 p->prev_stime = max(p->prev_stime, cputime_sub(rtime, p->prev_utime));
49048622 2891
d99ca3b9
HS
2892 *ut = p->prev_utime;
2893 *st = p->prev_stime;
49048622
BS
2894}
2895
0cf55e1e
HS
2896/*
2897 * Must be called with siglock held.
2898 */
2899void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
49048622 2900{
0cf55e1e
HS
2901 struct signal_struct *sig = p->signal;
2902 struct task_cputime cputime;
2903 cputime_t rtime, utime, total;
49048622 2904
0cf55e1e 2905 thread_group_cputime(p, &cputime);
49048622 2906
0cf55e1e
HS
2907 total = cputime_add(cputime.utime, cputime.stime);
2908 rtime = nsecs_to_cputime(cputime.sum_exec_runtime);
49048622 2909
0cf55e1e 2910 if (total) {
e75e863d 2911 u64 temp = rtime;
49048622 2912
e75e863d 2913 temp *= cputime.utime;
0cf55e1e
HS
2914 do_div(temp, total);
2915 utime = (cputime_t)temp;
2916 } else
2917 utime = rtime;
2918
2919 sig->prev_utime = max(sig->prev_utime, utime);
2920 sig->prev_stime = max(sig->prev_stime,
2921 cputime_sub(rtime, sig->prev_utime));
2922
2923 *ut = sig->prev_utime;
2924 *st = sig->prev_stime;
49048622 2925}
49048622 2926#endif
49048622 2927
7835b98b
CL
2928/*
2929 * This function gets called by the timer code, with HZ frequency.
2930 * We call it with interrupts disabled.
7835b98b
CL
2931 */
2932void scheduler_tick(void)
2933{
7835b98b
CL
2934 int cpu = smp_processor_id();
2935 struct rq *rq = cpu_rq(cpu);
dd41f596 2936 struct task_struct *curr = rq->curr;
3e51f33f
PZ
2937
2938 sched_clock_tick();
dd41f596 2939
05fa785c 2940 raw_spin_lock(&rq->lock);
3e51f33f 2941 update_rq_clock(rq);
fdf3e95d 2942 update_cpu_load_active(rq);
fa85ae24 2943 curr->sched_class->task_tick(rq, curr, 0);
05fa785c 2944 raw_spin_unlock(&rq->lock);
7835b98b 2945
e9d2b064 2946 perf_event_task_tick();
e220d2dc 2947
e418e1c2 2948#ifdef CONFIG_SMP
6eb57e0d 2949 rq->idle_balance = idle_cpu(cpu);
dd41f596 2950 trigger_load_balance(rq, cpu);
e418e1c2 2951#endif
1da177e4
LT
2952}
2953
132380a0 2954notrace unsigned long get_parent_ip(unsigned long addr)
6cd8a4bb
SR
2955{
2956 if (in_lock_functions(addr)) {
2957 addr = CALLER_ADDR2;
2958 if (in_lock_functions(addr))
2959 addr = CALLER_ADDR3;
2960 }
2961 return addr;
2962}
1da177e4 2963
7e49fcce
SR
2964#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
2965 defined(CONFIG_PREEMPT_TRACER))
2966
43627582 2967void __kprobes add_preempt_count(int val)
1da177e4 2968{
6cd8a4bb 2969#ifdef CONFIG_DEBUG_PREEMPT
1da177e4
LT
2970 /*
2971 * Underflow?
2972 */
9a11b49a
IM
2973 if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
2974 return;
6cd8a4bb 2975#endif
1da177e4 2976 preempt_count() += val;
6cd8a4bb 2977#ifdef CONFIG_DEBUG_PREEMPT
1da177e4
LT
2978 /*
2979 * Spinlock count overflowing soon?
2980 */
33859f7f
MOS
2981 DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
2982 PREEMPT_MASK - 10);
6cd8a4bb
SR
2983#endif
2984 if (preempt_count() == val)
2985 trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
1da177e4
LT
2986}
2987EXPORT_SYMBOL(add_preempt_count);
2988
43627582 2989void __kprobes sub_preempt_count(int val)
1da177e4 2990{
6cd8a4bb 2991#ifdef CONFIG_DEBUG_PREEMPT
1da177e4
LT
2992 /*
2993 * Underflow?
2994 */
01e3eb82 2995 if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
9a11b49a 2996 return;
1da177e4
LT
2997 /*
2998 * Is the spinlock portion underflowing?
2999 */
9a11b49a
IM
3000 if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
3001 !(preempt_count() & PREEMPT_MASK)))
3002 return;
6cd8a4bb 3003#endif
9a11b49a 3004
6cd8a4bb
SR
3005 if (preempt_count() == val)
3006 trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
1da177e4
LT
3007 preempt_count() -= val;
3008}
3009EXPORT_SYMBOL(sub_preempt_count);
3010
3011#endif
3012
3013/*
dd41f596 3014 * Print scheduling while atomic bug:
1da177e4 3015 */
dd41f596 3016static noinline void __schedule_bug(struct task_struct *prev)
1da177e4 3017{
838225b4
SS
3018 struct pt_regs *regs = get_irq_regs();
3019
3df0fc5b
PZ
3020 printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
3021 prev->comm, prev->pid, preempt_count());
838225b4 3022
dd41f596 3023 debug_show_held_locks(prev);
e21f5b15 3024 print_modules();
dd41f596
IM
3025 if (irqs_disabled())
3026 print_irqtrace_events(prev);
838225b4
SS
3027
3028 if (regs)
3029 show_regs(regs);
3030 else
3031 dump_stack();
dd41f596 3032}
1da177e4 3033
dd41f596
IM
3034/*
3035 * Various schedule()-time debugging checks and statistics:
3036 */
3037static inline void schedule_debug(struct task_struct *prev)
3038{
1da177e4 3039 /*
41a2d6cf 3040 * Test if we are atomic. Since do_exit() needs to call into
1da177e4
LT
3041 * schedule() atomically, we ignore that path for now.
3042 * Otherwise, whine if we are scheduling when we should not be.
3043 */
3f33a7ce 3044 if (unlikely(in_atomic_preempt_off() && !prev->exit_state))
dd41f596 3045 __schedule_bug(prev);
b3fbab05 3046 rcu_sleep_check();
dd41f596 3047
1da177e4
LT
3048 profile_hit(SCHED_PROFILING, __builtin_return_address(0));
3049
2d72376b 3050 schedstat_inc(this_rq(), sched_count);
dd41f596
IM
3051}
3052
6cecd084 3053static void put_prev_task(struct rq *rq, struct task_struct *prev)
df1c99d4 3054{
61eadef6 3055 if (prev->on_rq || rq->skip_clock_update < 0)
a64692a3 3056 update_rq_clock(rq);
6cecd084 3057 prev->sched_class->put_prev_task(rq, prev);
df1c99d4
MG
3058}
3059
dd41f596
IM
3060/*
3061 * Pick up the highest-prio task:
3062 */
3063static inline struct task_struct *
b67802ea 3064pick_next_task(struct rq *rq)
dd41f596 3065{
5522d5d5 3066 const struct sched_class *class;
dd41f596 3067 struct task_struct *p;
1da177e4
LT
3068
3069 /*
dd41f596
IM
3070 * Optimization: we know that if all tasks are in
3071 * the fair class we can call that function directly:
1da177e4 3072 */
953bfcd1 3073 if (likely(rq->nr_running == rq->cfs.h_nr_running)) {
fb8d4724 3074 p = fair_sched_class.pick_next_task(rq);
dd41f596
IM
3075 if (likely(p))
3076 return p;
1da177e4
LT
3077 }
3078
34f971f6 3079 for_each_class(class) {
fb8d4724 3080 p = class->pick_next_task(rq);
dd41f596
IM
3081 if (p)
3082 return p;
dd41f596 3083 }
34f971f6
PZ
3084
3085 BUG(); /* the idle class will always have a runnable task */
dd41f596 3086}
1da177e4 3087
dd41f596 3088/*
c259e01a 3089 * __schedule() is the main scheduler function.
dd41f596 3090 */
c259e01a 3091static void __sched __schedule(void)
dd41f596
IM
3092{
3093 struct task_struct *prev, *next;
67ca7bde 3094 unsigned long *switch_count;
dd41f596 3095 struct rq *rq;
31656519 3096 int cpu;
dd41f596 3097
ff743345
PZ
3098need_resched:
3099 preempt_disable();
dd41f596
IM
3100 cpu = smp_processor_id();
3101 rq = cpu_rq(cpu);
25502a6c 3102 rcu_note_context_switch(cpu);
dd41f596 3103 prev = rq->curr;
dd41f596 3104
dd41f596 3105 schedule_debug(prev);
1da177e4 3106
31656519 3107 if (sched_feat(HRTICK))
f333fdc9 3108 hrtick_clear(rq);
8f4d37ec 3109
05fa785c 3110 raw_spin_lock_irq(&rq->lock);
1da177e4 3111
246d86b5 3112 switch_count = &prev->nivcsw;
1da177e4 3113 if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
21aa9af0 3114 if (unlikely(signal_pending_state(prev->state, prev))) {
1da177e4 3115 prev->state = TASK_RUNNING;
21aa9af0 3116 } else {
2acca55e
PZ
3117 deactivate_task(rq, prev, DEQUEUE_SLEEP);
3118 prev->on_rq = 0;
3119
21aa9af0 3120 /*
2acca55e
PZ
3121 * If a worker went to sleep, notify and ask workqueue
3122 * whether it wants to wake up a task to maintain
3123 * concurrency.
21aa9af0
TH
3124 */
3125 if (prev->flags & PF_WQ_WORKER) {
3126 struct task_struct *to_wakeup;
3127
3128 to_wakeup = wq_worker_sleeping(prev, cpu);
3129 if (to_wakeup)
3130 try_to_wake_up_local(to_wakeup);
3131 }
21aa9af0 3132 }
dd41f596 3133 switch_count = &prev->nvcsw;
1da177e4
LT
3134 }
3135
3f029d3c 3136 pre_schedule(rq, prev);
f65eda4f 3137
dd41f596 3138 if (unlikely(!rq->nr_running))
1da177e4 3139 idle_balance(cpu, rq);
1da177e4 3140
df1c99d4 3141 put_prev_task(rq, prev);
b67802ea 3142 next = pick_next_task(rq);
f26f9aff
MG
3143 clear_tsk_need_resched(prev);
3144 rq->skip_clock_update = 0;
1da177e4 3145
1da177e4 3146 if (likely(prev != next)) {
1da177e4
LT
3147 rq->nr_switches++;
3148 rq->curr = next;
3149 ++*switch_count;
3150
dd41f596 3151 context_switch(rq, prev, next); /* unlocks the rq */
8f4d37ec 3152 /*
246d86b5
ON
3153 * The context switch have flipped the stack from under us
3154 * and restored the local variables which were saved when
3155 * this task called schedule() in the past. prev == current
3156 * is still correct, but it can be moved to another cpu/rq.
8f4d37ec
PZ
3157 */
3158 cpu = smp_processor_id();
3159 rq = cpu_rq(cpu);
1da177e4 3160 } else
05fa785c 3161 raw_spin_unlock_irq(&rq->lock);
1da177e4 3162
3f029d3c 3163 post_schedule(rq);
1da177e4 3164
1da177e4 3165 preempt_enable_no_resched();
ff743345 3166 if (need_resched())
1da177e4
LT
3167 goto need_resched;
3168}
c259e01a 3169
9c40cef2
TG
3170static inline void sched_submit_work(struct task_struct *tsk)
3171{
3172 if (!tsk->state)
3173 return;
3174 /*
3175 * If we are going to sleep and we have plugged IO queued,
3176 * make sure to submit it to avoid deadlocks.
3177 */
3178 if (blk_needs_flush_plug(tsk))
3179 blk_schedule_flush_plug(tsk);
3180}
3181
6ebbe7a0 3182asmlinkage void __sched schedule(void)
c259e01a 3183{
9c40cef2
TG
3184 struct task_struct *tsk = current;
3185
3186 sched_submit_work(tsk);
c259e01a
TG
3187 __schedule();
3188}
1da177e4
LT
3189EXPORT_SYMBOL(schedule);
3190
c08f7829 3191#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
0d66bf6d 3192
c6eb3dda
PZ
3193static inline bool owner_running(struct mutex *lock, struct task_struct *owner)
3194{
c6eb3dda 3195 if (lock->owner != owner)
307bf980 3196 return false;
0d66bf6d
PZ
3197
3198 /*
c6eb3dda
PZ
3199 * Ensure we emit the owner->on_cpu, dereference _after_ checking
3200 * lock->owner still matches owner, if that fails, owner might
3201 * point to free()d memory, if it still matches, the rcu_read_lock()
3202 * ensures the memory stays valid.
0d66bf6d 3203 */
c6eb3dda 3204 barrier();
0d66bf6d 3205
307bf980 3206 return owner->on_cpu;
c6eb3dda 3207}
0d66bf6d 3208
c6eb3dda
PZ
3209/*
3210 * Look out! "owner" is an entirely speculative pointer
3211 * access and not reliable.
3212 */
3213int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
3214{
3215 if (!sched_feat(OWNER_SPIN))
3216 return 0;
0d66bf6d 3217
307bf980 3218 rcu_read_lock();
c6eb3dda
PZ
3219 while (owner_running(lock, owner)) {
3220 if (need_resched())
307bf980 3221 break;
0d66bf6d 3222
335d7afb 3223 arch_mutex_cpu_relax();
0d66bf6d 3224 }
307bf980 3225 rcu_read_unlock();
4b402210 3226
c6eb3dda 3227 /*
307bf980
TG
3228 * We break out the loop above on need_resched() and when the
3229 * owner changed, which is a sign for heavy contention. Return
3230 * success only when lock->owner is NULL.
c6eb3dda 3231 */
307bf980 3232 return lock->owner == NULL;
0d66bf6d
PZ
3233}
3234#endif
3235
1da177e4
LT
3236#ifdef CONFIG_PREEMPT
3237/*
2ed6e34f 3238 * this is the entry point to schedule() from in-kernel preemption
41a2d6cf 3239 * off of preempt_enable. Kernel preemptions off return from interrupt
1da177e4
LT
3240 * occur there and call schedule directly.
3241 */
d1f74e20 3242asmlinkage void __sched notrace preempt_schedule(void)
1da177e4
LT
3243{
3244 struct thread_info *ti = current_thread_info();
6478d880 3245
1da177e4
LT
3246 /*
3247 * If there is a non-zero preempt_count or interrupts are disabled,
41a2d6cf 3248 * we do not want to preempt the current task. Just return..
1da177e4 3249 */
beed33a8 3250 if (likely(ti->preempt_count || irqs_disabled()))
1da177e4
LT
3251 return;
3252
3a5c359a 3253 do {
d1f74e20 3254 add_preempt_count_notrace(PREEMPT_ACTIVE);
c259e01a 3255 __schedule();
d1f74e20 3256 sub_preempt_count_notrace(PREEMPT_ACTIVE);
1da177e4 3257
3a5c359a
AK
3258 /*
3259 * Check again in case we missed a preemption opportunity
3260 * between schedule and now.
3261 */
3262 barrier();
5ed0cec0 3263 } while (need_resched());
1da177e4 3264}
1da177e4
LT
3265EXPORT_SYMBOL(preempt_schedule);
3266
3267/*
2ed6e34f 3268 * this is the entry point to schedule() from kernel preemption
1da177e4
LT
3269 * off of irq context.
3270 * Note, that this is called and return with irqs disabled. This will
3271 * protect us against recursive calling from irq.
3272 */
3273asmlinkage void __sched preempt_schedule_irq(void)
3274{
3275 struct thread_info *ti = current_thread_info();
6478d880 3276
2ed6e34f 3277 /* Catch callers which need to be fixed */
1da177e4
LT
3278 BUG_ON(ti->preempt_count || !irqs_disabled());
3279
3a5c359a
AK
3280 do {
3281 add_preempt_count(PREEMPT_ACTIVE);
3a5c359a 3282 local_irq_enable();
c259e01a 3283 __schedule();
3a5c359a 3284 local_irq_disable();
3a5c359a 3285 sub_preempt_count(PREEMPT_ACTIVE);
1da177e4 3286
3a5c359a
AK
3287 /*
3288 * Check again in case we missed a preemption opportunity
3289 * between schedule and now.
3290 */
3291 barrier();
5ed0cec0 3292 } while (need_resched());
1da177e4
LT
3293}
3294
3295#endif /* CONFIG_PREEMPT */
3296
63859d4f 3297int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags,
95cdf3b7 3298 void *key)
1da177e4 3299{
63859d4f 3300 return try_to_wake_up(curr->private, mode, wake_flags);
1da177e4 3301}
1da177e4
LT
3302EXPORT_SYMBOL(default_wake_function);
3303
3304/*
41a2d6cf
IM
3305 * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
3306 * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
1da177e4
LT
3307 * number) then we wake all the non-exclusive tasks and one exclusive task.
3308 *
3309 * There are circumstances in which we can try to wake a task which has already
41a2d6cf 3310 * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
1da177e4
LT
3311 * zero in this (rare) case, and we handle it by continuing to scan the queue.
3312 */
78ddb08f 3313static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
63859d4f 3314 int nr_exclusive, int wake_flags, void *key)
1da177e4 3315{
2e45874c 3316 wait_queue_t *curr, *next;
1da177e4 3317
2e45874c 3318 list_for_each_entry_safe(curr, next, &q->task_list, task_list) {
48f24c4d
IM
3319 unsigned flags = curr->flags;
3320
63859d4f 3321 if (curr->func(curr, mode, wake_flags, key) &&
48f24c4d 3322 (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
1da177e4
LT
3323 break;
3324 }
3325}
3326
3327/**
3328 * __wake_up - wake up threads blocked on a waitqueue.
3329 * @q: the waitqueue
3330 * @mode: which threads
3331 * @nr_exclusive: how many wake-one or wake-many threads to wake up
67be2dd1 3332 * @key: is directly passed to the wakeup function
50fa610a
DH
3333 *
3334 * It may be assumed that this function implies a write memory barrier before
3335 * changing the task state if and only if any tasks are woken up.
1da177e4 3336 */
7ad5b3a5 3337void __wake_up(wait_queue_head_t *q, unsigned int mode,
95cdf3b7 3338 int nr_exclusive, void *key)
1da177e4
LT
3339{
3340 unsigned long flags;
3341
3342 spin_lock_irqsave(&q->lock, flags);
3343 __wake_up_common(q, mode, nr_exclusive, 0, key);
3344 spin_unlock_irqrestore(&q->lock, flags);
3345}
1da177e4
LT
3346EXPORT_SYMBOL(__wake_up);
3347
3348/*
3349 * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
3350 */
7ad5b3a5 3351void __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
1da177e4
LT
3352{
3353 __wake_up_common(q, mode, 1, 0, NULL);
3354}
22c43c81 3355EXPORT_SYMBOL_GPL(__wake_up_locked);
1da177e4 3356
4ede816a
DL
3357void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key)
3358{
3359 __wake_up_common(q, mode, 1, 0, key);
3360}
bf294b41 3361EXPORT_SYMBOL_GPL(__wake_up_locked_key);
4ede816a 3362
1da177e4 3363/**
4ede816a 3364 * __wake_up_sync_key - wake up threads blocked on a waitqueue.
1da177e4
LT
3365 * @q: the waitqueue
3366 * @mode: which threads
3367 * @nr_exclusive: how many wake-one or wake-many threads to wake up
4ede816a 3368 * @key: opaque value to be passed to wakeup targets
1da177e4
LT
3369 *
3370 * The sync wakeup differs that the waker knows that it will schedule
3371 * away soon, so while the target thread will be woken up, it will not
3372 * be migrated to another CPU - ie. the two threads are 'synchronized'
3373 * with each other. This can prevent needless bouncing between CPUs.
3374 *
3375 * On UP it can prevent extra preemption.
50fa610a
DH
3376 *
3377 * It may be assumed that this function implies a write memory barrier before
3378 * changing the task state if and only if any tasks are woken up.
1da177e4 3379 */
4ede816a
DL
3380void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode,
3381 int nr_exclusive, void *key)
1da177e4
LT
3382{
3383 unsigned long flags;
7d478721 3384 int wake_flags = WF_SYNC;
1da177e4
LT
3385
3386 if (unlikely(!q))
3387 return;
3388
3389 if (unlikely(!nr_exclusive))
7d478721 3390 wake_flags = 0;
1da177e4
LT
3391
3392 spin_lock_irqsave(&q->lock, flags);
7d478721 3393 __wake_up_common(q, mode, nr_exclusive, wake_flags, key);
1da177e4
LT
3394 spin_unlock_irqrestore(&q->lock, flags);
3395}
4ede816a
DL
3396EXPORT_SYMBOL_GPL(__wake_up_sync_key);
3397
3398/*
3399 * __wake_up_sync - see __wake_up_sync_key()
3400 */
3401void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
3402{
3403 __wake_up_sync_key(q, mode, nr_exclusive, NULL);
3404}
1da177e4
LT
3405EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
3406
65eb3dc6
KD
3407/**
3408 * complete: - signals a single thread waiting on this completion
3409 * @x: holds the state of this particular completion
3410 *
3411 * This will wake up a single thread waiting on this completion. Threads will be
3412 * awakened in the same order in which they were queued.
3413 *
3414 * See also complete_all(), wait_for_completion() and related routines.
50fa610a
DH
3415 *
3416 * It may be assumed that this function implies a write memory barrier before
3417 * changing the task state if and only if any tasks are woken up.
65eb3dc6 3418 */
b15136e9 3419void complete(struct completion *x)
1da177e4
LT
3420{
3421 unsigned long flags;
3422
3423 spin_lock_irqsave(&x->wait.lock, flags);
3424 x->done++;
d9514f6c 3425 __wake_up_common(&x->wait, TASK_NORMAL, 1, 0, NULL);
1da177e4
LT
3426 spin_unlock_irqrestore(&x->wait.lock, flags);
3427}
3428EXPORT_SYMBOL(complete);
3429
65eb3dc6
KD
3430/**
3431 * complete_all: - signals all threads waiting on this completion
3432 * @x: holds the state of this particular completion
3433 *
3434 * This will wake up all threads waiting on this particular completion event.
50fa610a
DH
3435 *
3436 * It may be assumed that this function implies a write memory barrier before
3437 * changing the task state if and only if any tasks are woken up.
65eb3dc6 3438 */
b15136e9 3439void complete_all(struct completion *x)
1da177e4
LT
3440{
3441 unsigned long flags;
3442
3443 spin_lock_irqsave(&x->wait.lock, flags);
3444 x->done += UINT_MAX/2;
d9514f6c 3445 __wake_up_common(&x->wait, TASK_NORMAL, 0, 0, NULL);
1da177e4
LT
3446 spin_unlock_irqrestore(&x->wait.lock, flags);
3447}
3448EXPORT_SYMBOL(complete_all);
3449
8cbbe86d
AK
3450static inline long __sched
3451do_wait_for_common(struct completion *x, long timeout, int state)
1da177e4 3452{
1da177e4
LT
3453 if (!x->done) {
3454 DECLARE_WAITQUEUE(wait, current);
3455
a93d2f17 3456 __add_wait_queue_tail_exclusive(&x->wait, &wait);
1da177e4 3457 do {
94d3d824 3458 if (signal_pending_state(state, current)) {
ea71a546
ON
3459 timeout = -ERESTARTSYS;
3460 break;
8cbbe86d
AK
3461 }
3462 __set_current_state(state);
1da177e4
LT
3463 spin_unlock_irq(&x->wait.lock);
3464 timeout = schedule_timeout(timeout);
3465 spin_lock_irq(&x->wait.lock);
ea71a546 3466 } while (!x->done && timeout);
1da177e4 3467 __remove_wait_queue(&x->wait, &wait);
ea71a546
ON
3468 if (!x->done)
3469 return timeout;
1da177e4
LT
3470 }
3471 x->done--;
ea71a546 3472 return timeout ?: 1;
1da177e4 3473}
1da177e4 3474
8cbbe86d
AK
3475static long __sched
3476wait_for_common(struct completion *x, long timeout, int state)
1da177e4 3477{
1da177e4
LT
3478 might_sleep();
3479
3480 spin_lock_irq(&x->wait.lock);
8cbbe86d 3481 timeout = do_wait_for_common(x, timeout, state);
1da177e4 3482 spin_unlock_irq(&x->wait.lock);
8cbbe86d
AK
3483 return timeout;
3484}
1da177e4 3485
65eb3dc6
KD
3486/**
3487 * wait_for_completion: - waits for completion of a task
3488 * @x: holds the state of this particular completion
3489 *
3490 * This waits to be signaled for completion of a specific task. It is NOT
3491 * interruptible and there is no timeout.
3492 *
3493 * See also similar routines (i.e. wait_for_completion_timeout()) with timeout
3494 * and interrupt capability. Also see complete().
3495 */
b15136e9 3496void __sched wait_for_completion(struct completion *x)
8cbbe86d
AK
3497{
3498 wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
1da177e4 3499}
8cbbe86d 3500EXPORT_SYMBOL(wait_for_completion);
1da177e4 3501
65eb3dc6
KD
3502/**
3503 * wait_for_completion_timeout: - waits for completion of a task (w/timeout)
3504 * @x: holds the state of this particular completion
3505 * @timeout: timeout value in jiffies
3506 *
3507 * This waits for either a completion of a specific task to be signaled or for a
3508 * specified timeout to expire. The timeout is in jiffies. It is not
3509 * interruptible.
c6dc7f05
BF
3510 *
3511 * The return value is 0 if timed out, and positive (at least 1, or number of
3512 * jiffies left till timeout) if completed.
65eb3dc6 3513 */
b15136e9 3514unsigned long __sched
8cbbe86d 3515wait_for_completion_timeout(struct completion *x, unsigned long timeout)
1da177e4 3516{
8cbbe86d 3517 return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE);
1da177e4 3518}
8cbbe86d 3519EXPORT_SYMBOL(wait_for_completion_timeout);
1da177e4 3520
65eb3dc6
KD
3521/**
3522 * wait_for_completion_interruptible: - waits for completion of a task (w/intr)
3523 * @x: holds the state of this particular completion
3524 *
3525 * This waits for completion of a specific task to be signaled. It is
3526 * interruptible.
c6dc7f05
BF
3527 *
3528 * The return value is -ERESTARTSYS if interrupted, 0 if completed.
65eb3dc6 3529 */
8cbbe86d 3530int __sched wait_for_completion_interruptible(struct completion *x)
0fec171c 3531{
51e97990
AK
3532 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE);
3533 if (t == -ERESTARTSYS)
3534 return t;
3535 return 0;
0fec171c 3536}
8cbbe86d 3537EXPORT_SYMBOL(wait_for_completion_interruptible);
1da177e4 3538
65eb3dc6
KD
3539/**
3540 * wait_for_completion_interruptible_timeout: - waits for completion (w/(to,intr))
3541 * @x: holds the state of this particular completion
3542 * @timeout: timeout value in jiffies
3543 *
3544 * This waits for either a completion of a specific task to be signaled or for a
3545 * specified timeout to expire. It is interruptible. The timeout is in jiffies.
c6dc7f05
BF
3546 *
3547 * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
3548 * positive (at least 1, or number of jiffies left till timeout) if completed.
65eb3dc6 3549 */
6bf41237 3550long __sched
8cbbe86d
AK
3551wait_for_completion_interruptible_timeout(struct completion *x,
3552 unsigned long timeout)
0fec171c 3553{
8cbbe86d 3554 return wait_for_common(x, timeout, TASK_INTERRUPTIBLE);
0fec171c 3555}
8cbbe86d 3556EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
1da177e4 3557
65eb3dc6
KD
3558/**
3559 * wait_for_completion_killable: - waits for completion of a task (killable)
3560 * @x: holds the state of this particular completion
3561 *
3562 * This waits to be signaled for completion of a specific task. It can be
3563 * interrupted by a kill signal.
c6dc7f05
BF
3564 *
3565 * The return value is -ERESTARTSYS if interrupted, 0 if completed.
65eb3dc6 3566 */
009e577e
MW
3567int __sched wait_for_completion_killable(struct completion *x)
3568{
3569 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
3570 if (t == -ERESTARTSYS)
3571 return t;
3572 return 0;
3573}
3574EXPORT_SYMBOL(wait_for_completion_killable);
3575
0aa12fb4
SW
3576/**
3577 * wait_for_completion_killable_timeout: - waits for completion of a task (w/(to,killable))
3578 * @x: holds the state of this particular completion
3579 * @timeout: timeout value in jiffies
3580 *
3581 * This waits for either a completion of a specific task to be
3582 * signaled or for a specified timeout to expire. It can be
3583 * interrupted by a kill signal. The timeout is in jiffies.
c6dc7f05
BF
3584 *
3585 * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
3586 * positive (at least 1, or number of jiffies left till timeout) if completed.
0aa12fb4 3587 */
6bf41237 3588long __sched
0aa12fb4
SW
3589wait_for_completion_killable_timeout(struct completion *x,
3590 unsigned long timeout)
3591{
3592 return wait_for_common(x, timeout, TASK_KILLABLE);
3593}
3594EXPORT_SYMBOL(wait_for_completion_killable_timeout);
3595
be4de352
DC
3596/**
3597 * try_wait_for_completion - try to decrement a completion without blocking
3598 * @x: completion structure
3599 *
3600 * Returns: 0 if a decrement cannot be done without blocking
3601 * 1 if a decrement succeeded.
3602 *
3603 * If a completion is being used as a counting completion,
3604 * attempt to decrement the counter without blocking. This
3605 * enables us to avoid waiting if the resource the completion
3606 * is protecting is not available.
3607 */
3608bool try_wait_for_completion(struct completion *x)
3609{
7539a3b3 3610 unsigned long flags;
be4de352
DC
3611 int ret = 1;
3612
7539a3b3 3613 spin_lock_irqsave(&x->wait.lock, flags);
be4de352
DC
3614 if (!x->done)
3615 ret = 0;
3616 else
3617 x->done--;
7539a3b3 3618 spin_unlock_irqrestore(&x->wait.lock, flags);
be4de352
DC
3619 return ret;
3620}
3621EXPORT_SYMBOL(try_wait_for_completion);
3622
3623/**
3624 * completion_done - Test to see if a completion has any waiters
3625 * @x: completion structure
3626 *
3627 * Returns: 0 if there are waiters (wait_for_completion() in progress)
3628 * 1 if there are no waiters.
3629 *
3630 */
3631bool completion_done(struct completion *x)
3632{
7539a3b3 3633 unsigned long flags;
be4de352
DC
3634 int ret = 1;
3635
7539a3b3 3636 spin_lock_irqsave(&x->wait.lock, flags);
be4de352
DC
3637 if (!x->done)
3638 ret = 0;
7539a3b3 3639 spin_unlock_irqrestore(&x->wait.lock, flags);
be4de352
DC
3640 return ret;
3641}
3642EXPORT_SYMBOL(completion_done);
3643
8cbbe86d
AK
3644static long __sched
3645sleep_on_common(wait_queue_head_t *q, int state, long timeout)
1da177e4 3646{
0fec171c
IM
3647 unsigned long flags;
3648 wait_queue_t wait;
3649
3650 init_waitqueue_entry(&wait, current);
1da177e4 3651
8cbbe86d 3652 __set_current_state(state);
1da177e4 3653
8cbbe86d
AK
3654 spin_lock_irqsave(&q->lock, flags);
3655 __add_wait_queue(q, &wait);
3656 spin_unlock(&q->lock);
3657 timeout = schedule_timeout(timeout);
3658 spin_lock_irq(&q->lock);
3659 __remove_wait_queue(q, &wait);
3660 spin_unlock_irqrestore(&q->lock, flags);
3661
3662 return timeout;
3663}
3664
3665void __sched interruptible_sleep_on(wait_queue_head_t *q)
3666{
3667 sleep_on_common(q, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
1da177e4 3668}
1da177e4
LT
3669EXPORT_SYMBOL(interruptible_sleep_on);
3670
0fec171c 3671long __sched
95cdf3b7 3672interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
1da177e4 3673{
8cbbe86d 3674 return sleep_on_common(q, TASK_INTERRUPTIBLE, timeout);
1da177e4 3675}
1da177e4
LT
3676EXPORT_SYMBOL(interruptible_sleep_on_timeout);
3677
0fec171c 3678void __sched sleep_on(wait_queue_head_t *q)
1da177e4 3679{
8cbbe86d 3680 sleep_on_common(q, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
1da177e4 3681}
1da177e4
LT
3682EXPORT_SYMBOL(sleep_on);
3683
0fec171c 3684long __sched sleep_on_timeout(wait_queue_head_t *q, long timeout)
1da177e4 3685{
8cbbe86d 3686 return sleep_on_common(q, TASK_UNINTERRUPTIBLE, timeout);
1da177e4 3687}
1da177e4
LT
3688EXPORT_SYMBOL(sleep_on_timeout);
3689
b29739f9
IM
3690#ifdef CONFIG_RT_MUTEXES
3691
3692/*
3693 * rt_mutex_setprio - set the current priority of a task
3694 * @p: task
3695 * @prio: prio value (kernel-internal form)
3696 *
3697 * This function changes the 'effective' priority of a task. It does
3698 * not touch ->normal_prio like __setscheduler().
3699 *
3700 * Used by the rt_mutex code to implement priority inheritance logic.
3701 */
36c8b586 3702void rt_mutex_setprio(struct task_struct *p, int prio)
b29739f9 3703{
83b699ed 3704 int oldprio, on_rq, running;
70b97a7f 3705 struct rq *rq;
83ab0aa0 3706 const struct sched_class *prev_class;
b29739f9
IM
3707
3708 BUG_ON(prio < 0 || prio > MAX_PRIO);
3709
0122ec5b 3710 rq = __task_rq_lock(p);
b29739f9 3711
a8027073 3712 trace_sched_pi_setprio(p, prio);
d5f9f942 3713 oldprio = p->prio;
83ab0aa0 3714 prev_class = p->sched_class;
fd2f4419 3715 on_rq = p->on_rq;
051a1d1a 3716 running = task_current(rq, p);
0e1f3483 3717 if (on_rq)
69be72c1 3718 dequeue_task(rq, p, 0);
0e1f3483
HS
3719 if (running)
3720 p->sched_class->put_prev_task(rq, p);
dd41f596
IM
3721
3722 if (rt_prio(prio))
3723 p->sched_class = &rt_sched_class;
3724 else
3725 p->sched_class = &fair_sched_class;
3726
b29739f9
IM
3727 p->prio = prio;
3728
0e1f3483
HS
3729 if (running)
3730 p->sched_class->set_curr_task(rq);
da7a735e 3731 if (on_rq)
371fd7e7 3732 enqueue_task(rq, p, oldprio < prio ? ENQUEUE_HEAD : 0);
cb469845 3733
da7a735e 3734 check_class_changed(rq, p, prev_class, oldprio);
0122ec5b 3735 __task_rq_unlock(rq);
b29739f9
IM
3736}
3737
3738#endif
3739
36c8b586 3740void set_user_nice(struct task_struct *p, long nice)
1da177e4 3741{
dd41f596 3742 int old_prio, delta, on_rq;
1da177e4 3743 unsigned long flags;
70b97a7f 3744 struct rq *rq;
1da177e4
LT
3745
3746 if (TASK_NICE(p) == nice || nice < -20 || nice > 19)
3747 return;
3748 /*
3749 * We have to be careful, if called from sys_setpriority(),
3750 * the task might be in the middle of scheduling on another CPU.
3751 */
3752 rq = task_rq_lock(p, &flags);
3753 /*
3754 * The RT priorities are set via sched_setscheduler(), but we still
3755 * allow the 'normal' nice value to be set - but as expected
3756 * it wont have any effect on scheduling until the task is
dd41f596 3757 * SCHED_FIFO/SCHED_RR:
1da177e4 3758 */
e05606d3 3759 if (task_has_rt_policy(p)) {
1da177e4
LT
3760 p->static_prio = NICE_TO_PRIO(nice);
3761 goto out_unlock;
3762 }
fd2f4419 3763 on_rq = p->on_rq;
c09595f6 3764 if (on_rq)
69be72c1 3765 dequeue_task(rq, p, 0);
1da177e4 3766
1da177e4 3767 p->static_prio = NICE_TO_PRIO(nice);
2dd73a4f 3768 set_load_weight(p);
b29739f9
IM
3769 old_prio = p->prio;
3770 p->prio = effective_prio(p);
3771 delta = p->prio - old_prio;
1da177e4 3772
dd41f596 3773 if (on_rq) {
371fd7e7 3774 enqueue_task(rq, p, 0);
1da177e4 3775 /*
d5f9f942
AM
3776 * If the task increased its priority or is running and
3777 * lowered its priority, then reschedule its CPU:
1da177e4 3778 */
d5f9f942 3779 if (delta < 0 || (delta > 0 && task_running(rq, p)))
1da177e4
LT
3780 resched_task(rq->curr);
3781 }
3782out_unlock:
0122ec5b 3783 task_rq_unlock(rq, p, &flags);
1da177e4 3784}
1da177e4
LT
3785EXPORT_SYMBOL(set_user_nice);
3786
e43379f1
MM
3787/*
3788 * can_nice - check if a task can reduce its nice value
3789 * @p: task
3790 * @nice: nice value
3791 */
36c8b586 3792int can_nice(const struct task_struct *p, const int nice)
e43379f1 3793{
024f4747
MM
3794 /* convert nice value [19,-20] to rlimit style value [1,40] */
3795 int nice_rlim = 20 - nice;
48f24c4d 3796
78d7d407 3797 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
e43379f1
MM
3798 capable(CAP_SYS_NICE));
3799}
3800
1da177e4
LT
3801#ifdef __ARCH_WANT_SYS_NICE
3802
3803/*
3804 * sys_nice - change the priority of the current process.
3805 * @increment: priority increment
3806 *
3807 * sys_setpriority is a more generic, but much slower function that
3808 * does similar things.
3809 */
5add95d4 3810SYSCALL_DEFINE1(nice, int, increment)
1da177e4 3811{
48f24c4d 3812 long nice, retval;
1da177e4
LT
3813
3814 /*
3815 * Setpriority might change our priority at the same moment.
3816 * We don't have to worry. Conceptually one call occurs first
3817 * and we have a single winner.
3818 */
e43379f1
MM
3819 if (increment < -40)
3820 increment = -40;
1da177e4
LT
3821 if (increment > 40)
3822 increment = 40;
3823
2b8f836f 3824 nice = TASK_NICE(current) + increment;
1da177e4
LT
3825 if (nice < -20)
3826 nice = -20;
3827 if (nice > 19)
3828 nice = 19;
3829
e43379f1
MM
3830 if (increment < 0 && !can_nice(current, nice))
3831 return -EPERM;
3832
1da177e4
LT
3833 retval = security_task_setnice(current, nice);
3834 if (retval)
3835 return retval;
3836
3837 set_user_nice(current, nice);
3838 return 0;
3839}
3840
3841#endif
3842
3843/**
3844 * task_prio - return the priority value of a given task.
3845 * @p: the task in question.
3846 *
3847 * This is the priority value as seen by users in /proc.
3848 * RT tasks are offset by -200. Normal tasks are centered
3849 * around 0, value goes from -16 to +15.
3850 */
36c8b586 3851int task_prio(const struct task_struct *p)
1da177e4
LT
3852{
3853 return p->prio - MAX_RT_PRIO;
3854}
3855
3856/**
3857 * task_nice - return the nice value of a given task.
3858 * @p: the task in question.
3859 */
36c8b586 3860int task_nice(const struct task_struct *p)
1da177e4
LT
3861{
3862 return TASK_NICE(p);
3863}
150d8bed 3864EXPORT_SYMBOL(task_nice);
1da177e4
LT
3865
3866/**
3867 * idle_cpu - is a given cpu idle currently?
3868 * @cpu: the processor in question.
3869 */
3870int idle_cpu(int cpu)
3871{
908a3283
TG
3872 struct rq *rq = cpu_rq(cpu);
3873
3874 if (rq->curr != rq->idle)
3875 return 0;
3876
3877 if (rq->nr_running)
3878 return 0;
3879
3880#ifdef CONFIG_SMP
3881 if (!llist_empty(&rq->wake_list))
3882 return 0;
3883#endif
3884
3885 return 1;
1da177e4
LT
3886}
3887
1da177e4
LT
3888/**
3889 * idle_task - return the idle task for a given cpu.
3890 * @cpu: the processor in question.
3891 */
36c8b586 3892struct task_struct *idle_task(int cpu)
1da177e4
LT
3893{
3894 return cpu_rq(cpu)->idle;
3895}
3896
3897/**
3898 * find_process_by_pid - find a process with a matching PID value.
3899 * @pid: the pid in question.
3900 */
a9957449 3901static struct task_struct *find_process_by_pid(pid_t pid)
1da177e4 3902{
228ebcbe 3903 return pid ? find_task_by_vpid(pid) : current;
1da177e4
LT
3904}
3905
3906/* Actually do priority change: must hold rq lock. */
dd41f596
IM
3907static void
3908__setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
1da177e4 3909{
1da177e4
LT
3910 p->policy = policy;
3911 p->rt_priority = prio;
b29739f9
IM
3912 p->normal_prio = normal_prio(p);
3913 /* we are holding p->pi_lock already */
3914 p->prio = rt_mutex_getprio(p);
ffd44db5
PZ
3915 if (rt_prio(p->prio))
3916 p->sched_class = &rt_sched_class;
3917 else
3918 p->sched_class = &fair_sched_class;
2dd73a4f 3919 set_load_weight(p);
1da177e4
LT
3920}
3921
c69e8d9c
DH
3922/*
3923 * check the target process has a UID that matches the current process's
3924 */
3925static bool check_same_owner(struct task_struct *p)
3926{
3927 const struct cred *cred = current_cred(), *pcred;
3928 bool match;
3929
3930 rcu_read_lock();
3931 pcred = __task_cred(p);
b0e77598
SH
3932 if (cred->user->user_ns == pcred->user->user_ns)
3933 match = (cred->euid == pcred->euid ||
3934 cred->euid == pcred->uid);
3935 else
3936 match = false;
c69e8d9c
DH
3937 rcu_read_unlock();
3938 return match;
3939}
3940
961ccddd 3941static int __sched_setscheduler(struct task_struct *p, int policy,
fe7de49f 3942 const struct sched_param *param, bool user)
1da177e4 3943{
83b699ed 3944 int retval, oldprio, oldpolicy = -1, on_rq, running;
1da177e4 3945 unsigned long flags;
83ab0aa0 3946 const struct sched_class *prev_class;
70b97a7f 3947 struct rq *rq;
ca94c442 3948 int reset_on_fork;
1da177e4 3949
66e5393a
SR
3950 /* may grab non-irq protected spin_locks */
3951 BUG_ON(in_interrupt());
1da177e4
LT
3952recheck:
3953 /* double check policy once rq lock held */
ca94c442
LP
3954 if (policy < 0) {
3955 reset_on_fork = p->sched_reset_on_fork;
1da177e4 3956 policy = oldpolicy = p->policy;
ca94c442
LP
3957 } else {
3958 reset_on_fork = !!(policy & SCHED_RESET_ON_FORK);
3959 policy &= ~SCHED_RESET_ON_FORK;
3960
3961 if (policy != SCHED_FIFO && policy != SCHED_RR &&
3962 policy != SCHED_NORMAL && policy != SCHED_BATCH &&
3963 policy != SCHED_IDLE)
3964 return -EINVAL;
3965 }
3966
1da177e4
LT
3967 /*
3968 * Valid priorities for SCHED_FIFO and SCHED_RR are
dd41f596
IM
3969 * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL,
3970 * SCHED_BATCH and SCHED_IDLE is 0.
1da177e4
LT
3971 */
3972 if (param->sched_priority < 0 ||
95cdf3b7 3973 (p->mm && param->sched_priority > MAX_USER_RT_PRIO-1) ||
d46523ea 3974 (!p->mm && param->sched_priority > MAX_RT_PRIO-1))
1da177e4 3975 return -EINVAL;
e05606d3 3976 if (rt_policy(policy) != (param->sched_priority != 0))
1da177e4
LT
3977 return -EINVAL;
3978
37e4ab3f
OC
3979 /*
3980 * Allow unprivileged RT tasks to decrease priority:
3981 */
961ccddd 3982 if (user && !capable(CAP_SYS_NICE)) {
e05606d3 3983 if (rt_policy(policy)) {
a44702e8
ON
3984 unsigned long rlim_rtprio =
3985 task_rlimit(p, RLIMIT_RTPRIO);
8dc3e909
ON
3986
3987 /* can't set/change the rt policy */
3988 if (policy != p->policy && !rlim_rtprio)
3989 return -EPERM;
3990
3991 /* can't increase priority */
3992 if (param->sched_priority > p->rt_priority &&
3993 param->sched_priority > rlim_rtprio)
3994 return -EPERM;
3995 }
c02aa73b 3996
dd41f596 3997 /*
c02aa73b
DH
3998 * Treat SCHED_IDLE as nice 20. Only allow a switch to
3999 * SCHED_NORMAL if the RLIMIT_NICE would normally permit it.
dd41f596 4000 */
c02aa73b
DH
4001 if (p->policy == SCHED_IDLE && policy != SCHED_IDLE) {
4002 if (!can_nice(p, TASK_NICE(p)))
4003 return -EPERM;
4004 }
5fe1d75f 4005
37e4ab3f 4006 /* can't change other user's priorities */
c69e8d9c 4007 if (!check_same_owner(p))
37e4ab3f 4008 return -EPERM;
ca94c442
LP
4009
4010 /* Normal users shall not reset the sched_reset_on_fork flag */
4011 if (p->sched_reset_on_fork && !reset_on_fork)
4012 return -EPERM;
37e4ab3f 4013 }
1da177e4 4014
725aad24 4015 if (user) {
b0ae1981 4016 retval = security_task_setscheduler(p);
725aad24
JF
4017 if (retval)
4018 return retval;
4019 }
4020
b29739f9
IM
4021 /*
4022 * make sure no PI-waiters arrive (or leave) while we are
4023 * changing the priority of the task:
0122ec5b 4024 *
25985edc 4025 * To be able to change p->policy safely, the appropriate
1da177e4
LT
4026 * runqueue lock must be held.
4027 */
0122ec5b 4028 rq = task_rq_lock(p, &flags);
dc61b1d6 4029
34f971f6
PZ
4030 /*
4031 * Changing the policy of the stop threads its a very bad idea
4032 */
4033 if (p == rq->stop) {
0122ec5b 4034 task_rq_unlock(rq, p, &flags);
34f971f6
PZ
4035 return -EINVAL;
4036 }
4037
a51e9198
DF
4038 /*
4039 * If not changing anything there's no need to proceed further:
4040 */
4041 if (unlikely(policy == p->policy && (!rt_policy(policy) ||
4042 param->sched_priority == p->rt_priority))) {
4043
4044 __task_rq_unlock(rq);
4045 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
4046 return 0;
4047 }
4048
dc61b1d6
PZ
4049#ifdef CONFIG_RT_GROUP_SCHED
4050 if (user) {
4051 /*
4052 * Do not allow realtime tasks into groups that have no runtime
4053 * assigned.
4054 */
4055 if (rt_bandwidth_enabled() && rt_policy(policy) &&
f4493771
MG
4056 task_group(p)->rt_bandwidth.rt_runtime == 0 &&
4057 !task_group_is_autogroup(task_group(p))) {
0122ec5b 4058 task_rq_unlock(rq, p, &flags);
dc61b1d6
PZ
4059 return -EPERM;
4060 }
4061 }
4062#endif
4063
1da177e4
LT
4064 /* recheck policy now with rq lock held */
4065 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
4066 policy = oldpolicy = -1;
0122ec5b 4067 task_rq_unlock(rq, p, &flags);
1da177e4
LT
4068 goto recheck;
4069 }
fd2f4419 4070 on_rq = p->on_rq;
051a1d1a 4071 running = task_current(rq, p);
0e1f3483 4072 if (on_rq)
2e1cb74a 4073 deactivate_task(rq, p, 0);
0e1f3483
HS
4074 if (running)
4075 p->sched_class->put_prev_task(rq, p);
f6b53205 4076
ca94c442
LP
4077 p->sched_reset_on_fork = reset_on_fork;
4078
1da177e4 4079 oldprio = p->prio;
83ab0aa0 4080 prev_class = p->sched_class;
dd41f596 4081 __setscheduler(rq, p, policy, param->sched_priority);
f6b53205 4082
0e1f3483
HS
4083 if (running)
4084 p->sched_class->set_curr_task(rq);
da7a735e 4085 if (on_rq)
dd41f596 4086 activate_task(rq, p, 0);
cb469845 4087
da7a735e 4088 check_class_changed(rq, p, prev_class, oldprio);
0122ec5b 4089 task_rq_unlock(rq, p, &flags);
b29739f9 4090
95e02ca9
TG
4091 rt_mutex_adjust_pi(p);
4092
1da177e4
LT
4093 return 0;
4094}
961ccddd
RR
4095
4096/**
4097 * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
4098 * @p: the task in question.
4099 * @policy: new policy.
4100 * @param: structure containing the new RT priority.
4101 *
4102 * NOTE that the task may be already dead.
4103 */
4104int sched_setscheduler(struct task_struct *p, int policy,
fe7de49f 4105 const struct sched_param *param)
961ccddd
RR
4106{
4107 return __sched_setscheduler(p, policy, param, true);
4108}
1da177e4
LT
4109EXPORT_SYMBOL_GPL(sched_setscheduler);
4110
961ccddd
RR
4111/**
4112 * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
4113 * @p: the task in question.
4114 * @policy: new policy.
4115 * @param: structure containing the new RT priority.
4116 *
4117 * Just like sched_setscheduler, only don't bother checking if the
4118 * current context has permission. For example, this is needed in
4119 * stop_machine(): we create temporary high priority worker threads,
4120 * but our caller might not have that capability.
4121 */
4122int sched_setscheduler_nocheck(struct task_struct *p, int policy,
fe7de49f 4123 const struct sched_param *param)
961ccddd
RR
4124{
4125 return __sched_setscheduler(p, policy, param, false);
4126}
4127
95cdf3b7
IM
4128static int
4129do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
1da177e4 4130{
1da177e4
LT
4131 struct sched_param lparam;
4132 struct task_struct *p;
36c8b586 4133 int retval;
1da177e4
LT
4134
4135 if (!param || pid < 0)
4136 return -EINVAL;
4137 if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
4138 return -EFAULT;
5fe1d75f
ON
4139
4140 rcu_read_lock();
4141 retval = -ESRCH;
1da177e4 4142 p = find_process_by_pid(pid);
5fe1d75f
ON
4143 if (p != NULL)
4144 retval = sched_setscheduler(p, policy, &lparam);
4145 rcu_read_unlock();
36c8b586 4146
1da177e4
LT
4147 return retval;
4148}
4149
4150/**
4151 * sys_sched_setscheduler - set/change the scheduler policy and RT priority
4152 * @pid: the pid in question.
4153 * @policy: new policy.
4154 * @param: structure containing the new RT priority.
4155 */
5add95d4
HC
4156SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy,
4157 struct sched_param __user *, param)
1da177e4 4158{
c21761f1
JB
4159 /* negative values for policy are not valid */
4160 if (policy < 0)
4161 return -EINVAL;
4162
1da177e4
LT
4163 return do_sched_setscheduler(pid, policy, param);
4164}
4165
4166/**
4167 * sys_sched_setparam - set/change the RT priority of a thread
4168 * @pid: the pid in question.
4169 * @param: structure containing the new RT priority.
4170 */
5add95d4 4171SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
1da177e4
LT
4172{
4173 return do_sched_setscheduler(pid, -1, param);
4174}
4175
4176/**
4177 * sys_sched_getscheduler - get the policy (scheduling class) of a thread
4178 * @pid: the pid in question.
4179 */
5add95d4 4180SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
1da177e4 4181{
36c8b586 4182 struct task_struct *p;
3a5c359a 4183 int retval;
1da177e4
LT
4184
4185 if (pid < 0)
3a5c359a 4186 return -EINVAL;
1da177e4
LT
4187
4188 retval = -ESRCH;
5fe85be0 4189 rcu_read_lock();
1da177e4
LT
4190 p = find_process_by_pid(pid);
4191 if (p) {
4192 retval = security_task_getscheduler(p);
4193 if (!retval)
ca94c442
LP
4194 retval = p->policy
4195 | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0);
1da177e4 4196 }
5fe85be0 4197 rcu_read_unlock();
1da177e4
LT
4198 return retval;
4199}
4200
4201/**
ca94c442 4202 * sys_sched_getparam - get the RT priority of a thread
1da177e4
LT
4203 * @pid: the pid in question.
4204 * @param: structure containing the RT priority.
4205 */
5add95d4 4206SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
1da177e4
LT
4207{
4208 struct sched_param lp;
36c8b586 4209 struct task_struct *p;
3a5c359a 4210 int retval;
1da177e4
LT
4211
4212 if (!param || pid < 0)
3a5c359a 4213 return -EINVAL;
1da177e4 4214
5fe85be0 4215 rcu_read_lock();
1da177e4
LT
4216 p = find_process_by_pid(pid);
4217 retval = -ESRCH;
4218 if (!p)
4219 goto out_unlock;
4220
4221 retval = security_task_getscheduler(p);
4222 if (retval)
4223 goto out_unlock;
4224
4225 lp.sched_priority = p->rt_priority;
5fe85be0 4226 rcu_read_unlock();
1da177e4
LT
4227
4228 /*
4229 * This one might sleep, we cannot do it with a spinlock held ...
4230 */
4231 retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
4232
1da177e4
LT
4233 return retval;
4234
4235out_unlock:
5fe85be0 4236 rcu_read_unlock();
1da177e4
LT
4237 return retval;
4238}
4239
96f874e2 4240long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
1da177e4 4241{
5a16f3d3 4242 cpumask_var_t cpus_allowed, new_mask;
36c8b586
IM
4243 struct task_struct *p;
4244 int retval;
1da177e4 4245
95402b38 4246 get_online_cpus();
23f5d142 4247 rcu_read_lock();
1da177e4
LT
4248
4249 p = find_process_by_pid(pid);
4250 if (!p) {
23f5d142 4251 rcu_read_unlock();
95402b38 4252 put_online_cpus();
1da177e4
LT
4253 return -ESRCH;
4254 }
4255
23f5d142 4256 /* Prevent p going away */
1da177e4 4257 get_task_struct(p);
23f5d142 4258 rcu_read_unlock();
1da177e4 4259
5a16f3d3
RR
4260 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
4261 retval = -ENOMEM;
4262 goto out_put_task;
4263 }
4264 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
4265 retval = -ENOMEM;
4266 goto out_free_cpus_allowed;
4267 }
1da177e4 4268 retval = -EPERM;
b0e77598 4269 if (!check_same_owner(p) && !task_ns_capable(p, CAP_SYS_NICE))
1da177e4
LT
4270 goto out_unlock;
4271
b0ae1981 4272 retval = security_task_setscheduler(p);
e7834f8f
DQ
4273 if (retval)
4274 goto out_unlock;
4275
5a16f3d3
RR
4276 cpuset_cpus_allowed(p, cpus_allowed);
4277 cpumask_and(new_mask, in_mask, cpus_allowed);
49246274 4278again:
5a16f3d3 4279 retval = set_cpus_allowed_ptr(p, new_mask);
1da177e4 4280
8707d8b8 4281 if (!retval) {
5a16f3d3
RR
4282 cpuset_cpus_allowed(p, cpus_allowed);
4283 if (!cpumask_subset(new_mask, cpus_allowed)) {
8707d8b8
PM
4284 /*
4285 * We must have raced with a concurrent cpuset
4286 * update. Just reset the cpus_allowed to the
4287 * cpuset's cpus_allowed
4288 */
5a16f3d3 4289 cpumask_copy(new_mask, cpus_allowed);
8707d8b8
PM
4290 goto again;
4291 }
4292 }
1da177e4 4293out_unlock:
5a16f3d3
RR
4294 free_cpumask_var(new_mask);
4295out_free_cpus_allowed:
4296 free_cpumask_var(cpus_allowed);
4297out_put_task:
1da177e4 4298 put_task_struct(p);
95402b38 4299 put_online_cpus();
1da177e4
LT
4300 return retval;
4301}
4302
4303static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
96f874e2 4304 struct cpumask *new_mask)
1da177e4 4305{
96f874e2
RR
4306 if (len < cpumask_size())
4307 cpumask_clear(new_mask);
4308 else if (len > cpumask_size())
4309 len = cpumask_size();
4310
1da177e4
LT
4311 return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
4312}
4313
4314/**
4315 * sys_sched_setaffinity - set the cpu affinity of a process
4316 * @pid: pid of the process
4317 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
4318 * @user_mask_ptr: user-space pointer to the new cpu mask
4319 */
5add95d4
HC
4320SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
4321 unsigned long __user *, user_mask_ptr)
1da177e4 4322{
5a16f3d3 4323 cpumask_var_t new_mask;
1da177e4
LT
4324 int retval;
4325
5a16f3d3
RR
4326 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
4327 return -ENOMEM;
1da177e4 4328
5a16f3d3
RR
4329 retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
4330 if (retval == 0)
4331 retval = sched_setaffinity(pid, new_mask);
4332 free_cpumask_var(new_mask);
4333 return retval;
1da177e4
LT
4334}
4335
96f874e2 4336long sched_getaffinity(pid_t pid, struct cpumask *mask)
1da177e4 4337{
36c8b586 4338 struct task_struct *p;
31605683 4339 unsigned long flags;
1da177e4 4340 int retval;
1da177e4 4341
95402b38 4342 get_online_cpus();
23f5d142 4343 rcu_read_lock();
1da177e4
LT
4344
4345 retval = -ESRCH;
4346 p = find_process_by_pid(pid);
4347 if (!p)
4348 goto out_unlock;
4349
e7834f8f
DQ
4350 retval = security_task_getscheduler(p);
4351 if (retval)
4352 goto out_unlock;
4353
013fdb80 4354 raw_spin_lock_irqsave(&p->pi_lock, flags);
96f874e2 4355 cpumask_and(mask, &p->cpus_allowed, cpu_online_mask);
013fdb80 4356 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
1da177e4
LT
4357
4358out_unlock:
23f5d142 4359 rcu_read_unlock();
95402b38 4360 put_online_cpus();
1da177e4 4361
9531b62f 4362 return retval;
1da177e4
LT
4363}
4364
4365/**
4366 * sys_sched_getaffinity - get the cpu affinity of a process
4367 * @pid: pid of the process
4368 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
4369 * @user_mask_ptr: user-space pointer to hold the current cpu mask
4370 */
5add95d4
HC
4371SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
4372 unsigned long __user *, user_mask_ptr)
1da177e4
LT
4373{
4374 int ret;
f17c8607 4375 cpumask_var_t mask;
1da177e4 4376
84fba5ec 4377 if ((len * BITS_PER_BYTE) < nr_cpu_ids)
cd3d8031
KM
4378 return -EINVAL;
4379 if (len & (sizeof(unsigned long)-1))
1da177e4
LT
4380 return -EINVAL;
4381
f17c8607
RR
4382 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
4383 return -ENOMEM;
1da177e4 4384
f17c8607
RR
4385 ret = sched_getaffinity(pid, mask);
4386 if (ret == 0) {
8bc037fb 4387 size_t retlen = min_t(size_t, len, cpumask_size());
cd3d8031
KM
4388
4389 if (copy_to_user(user_mask_ptr, mask, retlen))
f17c8607
RR
4390 ret = -EFAULT;
4391 else
cd3d8031 4392 ret = retlen;
f17c8607
RR
4393 }
4394 free_cpumask_var(mask);
1da177e4 4395
f17c8607 4396 return ret;
1da177e4
LT
4397}
4398
4399/**
4400 * sys_sched_yield - yield the current processor to other threads.
4401 *
dd41f596
IM
4402 * This function yields the current CPU to other tasks. If there are no
4403 * other threads running on this CPU then this function will return.
1da177e4 4404 */
5add95d4 4405SYSCALL_DEFINE0(sched_yield)
1da177e4 4406{
70b97a7f 4407 struct rq *rq = this_rq_lock();
1da177e4 4408
2d72376b 4409 schedstat_inc(rq, yld_count);
4530d7ab 4410 current->sched_class->yield_task(rq);
1da177e4
LT
4411
4412 /*
4413 * Since we are going to call schedule() anyway, there's
4414 * no need to preempt or enable interrupts:
4415 */
4416 __release(rq->lock);
8a25d5de 4417 spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
9828ea9d 4418 do_raw_spin_unlock(&rq->lock);
1da177e4
LT
4419 preempt_enable_no_resched();
4420
4421 schedule();
4422
4423 return 0;
4424}
4425
d86ee480
PZ
4426static inline int should_resched(void)
4427{
4428 return need_resched() && !(preempt_count() & PREEMPT_ACTIVE);
4429}
4430
e7b38404 4431static void __cond_resched(void)
1da177e4 4432{
e7aaaa69 4433 add_preempt_count(PREEMPT_ACTIVE);
c259e01a 4434 __schedule();
e7aaaa69 4435 sub_preempt_count(PREEMPT_ACTIVE);
1da177e4
LT
4436}
4437
02b67cc3 4438int __sched _cond_resched(void)
1da177e4 4439{
d86ee480 4440 if (should_resched()) {
1da177e4
LT
4441 __cond_resched();
4442 return 1;
4443 }
4444 return 0;
4445}
02b67cc3 4446EXPORT_SYMBOL(_cond_resched);
1da177e4
LT
4447
4448/*
613afbf8 4449 * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
1da177e4
LT
4450 * call schedule, and on return reacquire the lock.
4451 *
41a2d6cf 4452 * This works OK both with and without CONFIG_PREEMPT. We do strange low-level
1da177e4
LT
4453 * operations here to prevent schedule() from being called twice (once via
4454 * spin_unlock(), once by hand).
4455 */
613afbf8 4456int __cond_resched_lock(spinlock_t *lock)
1da177e4 4457{
d86ee480 4458 int resched = should_resched();
6df3cecb
JK
4459 int ret = 0;
4460
f607c668
PZ
4461 lockdep_assert_held(lock);
4462
95c354fe 4463 if (spin_needbreak(lock) || resched) {
1da177e4 4464 spin_unlock(lock);
d86ee480 4465 if (resched)
95c354fe
NP
4466 __cond_resched();
4467 else
4468 cpu_relax();
6df3cecb 4469 ret = 1;
1da177e4 4470 spin_lock(lock);
1da177e4 4471 }
6df3cecb 4472 return ret;
1da177e4 4473}
613afbf8 4474EXPORT_SYMBOL(__cond_resched_lock);
1da177e4 4475
613afbf8 4476int __sched __cond_resched_softirq(void)
1da177e4
LT
4477{
4478 BUG_ON(!in_softirq());
4479
d86ee480 4480 if (should_resched()) {
98d82567 4481 local_bh_enable();
1da177e4
LT
4482 __cond_resched();
4483 local_bh_disable();
4484 return 1;
4485 }
4486 return 0;
4487}
613afbf8 4488EXPORT_SYMBOL(__cond_resched_softirq);
1da177e4 4489
1da177e4
LT
4490/**
4491 * yield - yield the current processor to other threads.
4492 *
72fd4a35 4493 * This is a shortcut for kernel-space yielding - it marks the
1da177e4
LT
4494 * thread runnable and calls sys_sched_yield().
4495 */
4496void __sched yield(void)
4497{
4498 set_current_state(TASK_RUNNING);
4499 sys_sched_yield();
4500}
1da177e4
LT
4501EXPORT_SYMBOL(yield);
4502
d95f4122
MG
4503/**
4504 * yield_to - yield the current processor to another thread in
4505 * your thread group, or accelerate that thread toward the
4506 * processor it's on.
16addf95
RD
4507 * @p: target task
4508 * @preempt: whether task preemption is allowed or not
d95f4122
MG
4509 *
4510 * It's the caller's job to ensure that the target task struct
4511 * can't go away on us before we can do any checks.
4512 *
4513 * Returns true if we indeed boosted the target task.
4514 */
4515bool __sched yield_to(struct task_struct *p, bool preempt)
4516{
4517 struct task_struct *curr = current;
4518 struct rq *rq, *p_rq;
4519 unsigned long flags;
4520 bool yielded = 0;
4521
4522 local_irq_save(flags);
4523 rq = this_rq();
4524
4525again:
4526 p_rq = task_rq(p);
4527 double_rq_lock(rq, p_rq);
4528 while (task_rq(p) != p_rq) {
4529 double_rq_unlock(rq, p_rq);
4530 goto again;
4531 }
4532
4533 if (!curr->sched_class->yield_to_task)
4534 goto out;
4535
4536 if (curr->sched_class != p->sched_class)
4537 goto out;
4538
4539 if (task_running(p_rq, p) || p->state)
4540 goto out;
4541
4542 yielded = curr->sched_class->yield_to_task(rq, p, preempt);
6d1cafd8 4543 if (yielded) {
d95f4122 4544 schedstat_inc(rq, yld_count);
6d1cafd8
VP
4545 /*
4546 * Make p's CPU reschedule; pick_next_entity takes care of
4547 * fairness.
4548 */
4549 if (preempt && rq != p_rq)
4550 resched_task(p_rq->curr);
916671c0
MG
4551 } else {
4552 /*
4553 * We might have set it in task_yield_fair(), but are
4554 * not going to schedule(), so don't want to skip
4555 * the next update.
4556 */
4557 rq->skip_clock_update = 0;
6d1cafd8 4558 }
d95f4122
MG
4559
4560out:
4561 double_rq_unlock(rq, p_rq);
4562 local_irq_restore(flags);
4563
4564 if (yielded)
4565 schedule();
4566
4567 return yielded;
4568}
4569EXPORT_SYMBOL_GPL(yield_to);
4570
1da177e4 4571/*
41a2d6cf 4572 * This task is about to go to sleep on IO. Increment rq->nr_iowait so
1da177e4 4573 * that process accounting knows that this is a task in IO wait state.
1da177e4
LT
4574 */
4575void __sched io_schedule(void)
4576{
54d35f29 4577 struct rq *rq = raw_rq();
1da177e4 4578
0ff92245 4579 delayacct_blkio_start();
1da177e4 4580 atomic_inc(&rq->nr_iowait);
73c10101 4581 blk_flush_plug(current);
8f0dfc34 4582 current->in_iowait = 1;
1da177e4 4583 schedule();
8f0dfc34 4584 current->in_iowait = 0;
1da177e4 4585 atomic_dec(&rq->nr_iowait);
0ff92245 4586 delayacct_blkio_end();
1da177e4 4587}
1da177e4
LT
4588EXPORT_SYMBOL(io_schedule);
4589
4590long __sched io_schedule_timeout(long timeout)
4591{
54d35f29 4592 struct rq *rq = raw_rq();
1da177e4
LT
4593 long ret;
4594
0ff92245 4595 delayacct_blkio_start();
1da177e4 4596 atomic_inc(&rq->nr_iowait);
73c10101 4597 blk_flush_plug(current);
8f0dfc34 4598 current->in_iowait = 1;
1da177e4 4599 ret = schedule_timeout(timeout);
8f0dfc34 4600 current->in_iowait = 0;
1da177e4 4601 atomic_dec(&rq->nr_iowait);
0ff92245 4602 delayacct_blkio_end();
1da177e4
LT
4603 return ret;
4604}
4605
4606/**
4607 * sys_sched_get_priority_max - return maximum RT priority.
4608 * @policy: scheduling class.
4609 *
4610 * this syscall returns the maximum rt_priority that can be used
4611 * by a given scheduling class.
4612 */
5add95d4 4613SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
1da177e4
LT
4614{
4615 int ret = -EINVAL;
4616
4617 switch (policy) {
4618 case SCHED_FIFO:
4619 case SCHED_RR:
4620 ret = MAX_USER_RT_PRIO-1;
4621 break;
4622 case SCHED_NORMAL:
b0a9499c 4623 case SCHED_BATCH:
dd41f596 4624 case SCHED_IDLE:
1da177e4
LT
4625 ret = 0;
4626 break;
4627 }
4628 return ret;
4629}
4630
4631/**
4632 * sys_sched_get_priority_min - return minimum RT priority.
4633 * @policy: scheduling class.
4634 *
4635 * this syscall returns the minimum rt_priority that can be used
4636 * by a given scheduling class.
4637 */
5add95d4 4638SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
1da177e4
LT
4639{
4640 int ret = -EINVAL;
4641
4642 switch (policy) {
4643 case SCHED_FIFO:
4644 case SCHED_RR:
4645 ret = 1;
4646 break;
4647 case SCHED_NORMAL:
b0a9499c 4648 case SCHED_BATCH:
dd41f596 4649 case SCHED_IDLE:
1da177e4
LT
4650 ret = 0;
4651 }
4652 return ret;
4653}
4654
4655/**
4656 * sys_sched_rr_get_interval - return the default timeslice of a process.
4657 * @pid: pid of the process.
4658 * @interval: userspace pointer to the timeslice value.
4659 *
4660 * this syscall writes the default timeslice value of a given process
4661 * into the user-space timespec buffer. A value of '0' means infinity.
4662 */
17da2bd9 4663SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
754fe8d2 4664 struct timespec __user *, interval)
1da177e4 4665{
36c8b586 4666 struct task_struct *p;
a4ec24b4 4667 unsigned int time_slice;
dba091b9
TG
4668 unsigned long flags;
4669 struct rq *rq;
3a5c359a 4670 int retval;
1da177e4 4671 struct timespec t;
1da177e4
LT
4672
4673 if (pid < 0)
3a5c359a 4674 return -EINVAL;
1da177e4
LT
4675
4676 retval = -ESRCH;
1a551ae7 4677 rcu_read_lock();
1da177e4
LT
4678 p = find_process_by_pid(pid);
4679 if (!p)
4680 goto out_unlock;
4681
4682 retval = security_task_getscheduler(p);
4683 if (retval)
4684 goto out_unlock;
4685
dba091b9
TG
4686 rq = task_rq_lock(p, &flags);
4687 time_slice = p->sched_class->get_rr_interval(rq, p);
0122ec5b 4688 task_rq_unlock(rq, p, &flags);
a4ec24b4 4689
1a551ae7 4690 rcu_read_unlock();
a4ec24b4 4691 jiffies_to_timespec(time_slice, &t);
1da177e4 4692 retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
1da177e4 4693 return retval;
3a5c359a 4694
1da177e4 4695out_unlock:
1a551ae7 4696 rcu_read_unlock();
1da177e4
LT
4697 return retval;
4698}
4699
7c731e0a 4700static const char stat_nam[] = TASK_STATE_TO_CHAR_STR;
36c8b586 4701
82a1fcb9 4702void sched_show_task(struct task_struct *p)
1da177e4 4703{
1da177e4 4704 unsigned long free = 0;
36c8b586 4705 unsigned state;
1da177e4 4706
1da177e4 4707 state = p->state ? __ffs(p->state) + 1 : 0;
28d0686c 4708 printk(KERN_INFO "%-15.15s %c", p->comm,
2ed6e34f 4709 state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
4bd77321 4710#if BITS_PER_LONG == 32
1da177e4 4711 if (state == TASK_RUNNING)
3df0fc5b 4712 printk(KERN_CONT " running ");
1da177e4 4713 else
3df0fc5b 4714 printk(KERN_CONT " %08lx ", thread_saved_pc(p));
1da177e4
LT
4715#else
4716 if (state == TASK_RUNNING)
3df0fc5b 4717 printk(KERN_CONT " running task ");
1da177e4 4718 else
3df0fc5b 4719 printk(KERN_CONT " %016lx ", thread_saved_pc(p));
1da177e4
LT
4720#endif
4721#ifdef CONFIG_DEBUG_STACK_USAGE
7c9f8861 4722 free = stack_not_used(p);
1da177e4 4723#endif
3df0fc5b 4724 printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free,
aa47b7e0
DR
4725 task_pid_nr(p), task_pid_nr(p->real_parent),
4726 (unsigned long)task_thread_info(p)->flags);
1da177e4 4727
5fb5e6de 4728 show_stack(p, NULL);
1da177e4
LT
4729}
4730
e59e2ae2 4731void show_state_filter(unsigned long state_filter)
1da177e4 4732{
36c8b586 4733 struct task_struct *g, *p;
1da177e4 4734
4bd77321 4735#if BITS_PER_LONG == 32
3df0fc5b
PZ
4736 printk(KERN_INFO
4737 " task PC stack pid father\n");
1da177e4 4738#else
3df0fc5b
PZ
4739 printk(KERN_INFO
4740 " task PC stack pid father\n");
1da177e4 4741#endif
510f5acc 4742 rcu_read_lock();
1da177e4
LT
4743 do_each_thread(g, p) {
4744 /*
4745 * reset the NMI-timeout, listing all files on a slow
25985edc 4746 * console might take a lot of time:
1da177e4
LT
4747 */
4748 touch_nmi_watchdog();
39bc89fd 4749 if (!state_filter || (p->state & state_filter))
82a1fcb9 4750 sched_show_task(p);
1da177e4
LT
4751 } while_each_thread(g, p);
4752
04c9167f
JF
4753 touch_all_softlockup_watchdogs();
4754
dd41f596
IM
4755#ifdef CONFIG_SCHED_DEBUG
4756 sysrq_sched_debug_show();
4757#endif
510f5acc 4758 rcu_read_unlock();
e59e2ae2
IM
4759 /*
4760 * Only show locks if all tasks are dumped:
4761 */
93335a21 4762 if (!state_filter)
e59e2ae2 4763 debug_show_all_locks();
1da177e4
LT
4764}
4765
1df21055
IM
4766void __cpuinit init_idle_bootup_task(struct task_struct *idle)
4767{
dd41f596 4768 idle->sched_class = &idle_sched_class;
1df21055
IM
4769}
4770
f340c0d1
IM
4771/**
4772 * init_idle - set up an idle thread for a given CPU
4773 * @idle: task in question
4774 * @cpu: cpu the idle task belongs to
4775 *
4776 * NOTE: this function does not set the idle thread's NEED_RESCHED
4777 * flag, to make booting more robust.
4778 */
5c1e1767 4779void __cpuinit init_idle(struct task_struct *idle, int cpu)
1da177e4 4780{
70b97a7f 4781 struct rq *rq = cpu_rq(cpu);
1da177e4
LT
4782 unsigned long flags;
4783
05fa785c 4784 raw_spin_lock_irqsave(&rq->lock, flags);
5cbd54ef 4785
dd41f596 4786 __sched_fork(idle);
06b83b5f 4787 idle->state = TASK_RUNNING;
dd41f596
IM
4788 idle->se.exec_start = sched_clock();
4789
1e1b6c51 4790 do_set_cpus_allowed(idle, cpumask_of(cpu));
6506cf6c
PZ
4791 /*
4792 * We're having a chicken and egg problem, even though we are
4793 * holding rq->lock, the cpu isn't yet set to this cpu so the
4794 * lockdep check in task_group() will fail.
4795 *
4796 * Similar case to sched_fork(). / Alternatively we could
4797 * use task_rq_lock() here and obtain the other rq->lock.
4798 *
4799 * Silence PROVE_RCU
4800 */
4801 rcu_read_lock();
dd41f596 4802 __set_task_cpu(idle, cpu);
6506cf6c 4803 rcu_read_unlock();
1da177e4 4804
1da177e4 4805 rq->curr = rq->idle = idle;
3ca7a440
PZ
4806#if defined(CONFIG_SMP)
4807 idle->on_cpu = 1;
4866cde0 4808#endif
05fa785c 4809 raw_spin_unlock_irqrestore(&rq->lock, flags);
1da177e4
LT
4810
4811 /* Set the preempt count _outside_ the spinlocks! */
a1261f54 4812 task_thread_info(idle)->preempt_count = 0;
625f2a37 4813
dd41f596
IM
4814 /*
4815 * The idle tasks have their own, simple scheduling class:
4816 */
4817 idle->sched_class = &idle_sched_class;
868baf07 4818 ftrace_graph_init_idle_task(idle, cpu);
f1c6f1a7
CE
4819#if defined(CONFIG_SMP)
4820 sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
4821#endif
1da177e4
LT
4822}
4823
1da177e4 4824#ifdef CONFIG_SMP
1e1b6c51
KM
4825void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
4826{
4827 if (p->sched_class && p->sched_class->set_cpus_allowed)
4828 p->sched_class->set_cpus_allowed(p, new_mask);
4939602a
PZ
4829
4830 cpumask_copy(&p->cpus_allowed, new_mask);
4831 p->rt.nr_cpus_allowed = cpumask_weight(new_mask);
1e1b6c51
KM
4832}
4833
1da177e4
LT
4834/*
4835 * This is how migration works:
4836 *
969c7921
TH
4837 * 1) we invoke migration_cpu_stop() on the target CPU using
4838 * stop_one_cpu().
4839 * 2) stopper starts to run (implicitly forcing the migrated thread
4840 * off the CPU)
4841 * 3) it checks whether the migrated task is still in the wrong runqueue.
4842 * 4) if it's in the wrong runqueue then the migration thread removes
1da177e4 4843 * it and puts it into the right queue.
969c7921
TH
4844 * 5) stopper completes and stop_one_cpu() returns and the migration
4845 * is done.
1da177e4
LT
4846 */
4847
4848/*
4849 * Change a given task's CPU affinity. Migrate the thread to a
4850 * proper CPU and schedule it away if the CPU it's executing on
4851 * is removed from the allowed bitmask.
4852 *
4853 * NOTE: the caller must have a valid reference to the task, the
41a2d6cf 4854 * task must not exit() & deallocate itself prematurely. The
1da177e4
LT
4855 * call is not atomic; no spinlocks may be held.
4856 */
96f874e2 4857int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
1da177e4
LT
4858{
4859 unsigned long flags;
70b97a7f 4860 struct rq *rq;
969c7921 4861 unsigned int dest_cpu;
48f24c4d 4862 int ret = 0;
1da177e4
LT
4863
4864 rq = task_rq_lock(p, &flags);
e2912009 4865
db44fc01
YZ
4866 if (cpumask_equal(&p->cpus_allowed, new_mask))
4867 goto out;
4868
6ad4c188 4869 if (!cpumask_intersects(new_mask, cpu_active_mask)) {
1da177e4
LT
4870 ret = -EINVAL;
4871 goto out;
4872 }
4873
db44fc01 4874 if (unlikely((p->flags & PF_THREAD_BOUND) && p != current)) {
9985b0ba
DR
4875 ret = -EINVAL;
4876 goto out;
4877 }
4878
1e1b6c51 4879 do_set_cpus_allowed(p, new_mask);
73fe6aae 4880
1da177e4 4881 /* Can the task run on the task's current CPU? If so, we're done */
96f874e2 4882 if (cpumask_test_cpu(task_cpu(p), new_mask))
1da177e4
LT
4883 goto out;
4884
969c7921 4885 dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
bd8e7dde 4886 if (p->on_rq) {
969c7921 4887 struct migration_arg arg = { p, dest_cpu };
1da177e4 4888 /* Need help from migration thread: drop lock and wait. */
0122ec5b 4889 task_rq_unlock(rq, p, &flags);
969c7921 4890 stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
1da177e4
LT
4891 tlb_migrate_finish(p->mm);
4892 return 0;
4893 }
4894out:
0122ec5b 4895 task_rq_unlock(rq, p, &flags);
48f24c4d 4896
1da177e4
LT
4897 return ret;
4898}
cd8ba7cd 4899EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
1da177e4
LT
4900
4901/*
41a2d6cf 4902 * Move (not current) task off this cpu, onto dest cpu. We're doing
1da177e4
LT
4903 * this because either it can't run here any more (set_cpus_allowed()
4904 * away from this CPU, or CPU going down), or because we're
4905 * attempting to rebalance this task on exec (sched_exec).
4906 *
4907 * So we race with normal scheduler movements, but that's OK, as long
4908 * as the task is no longer on this CPU.
efc30814
KK
4909 *
4910 * Returns non-zero if task was successfully migrated.
1da177e4 4911 */
efc30814 4912static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
1da177e4 4913{
70b97a7f 4914 struct rq *rq_dest, *rq_src;
e2912009 4915 int ret = 0;
1da177e4 4916
e761b772 4917 if (unlikely(!cpu_active(dest_cpu)))
efc30814 4918 return ret;
1da177e4
LT
4919
4920 rq_src = cpu_rq(src_cpu);
4921 rq_dest = cpu_rq(dest_cpu);
4922
0122ec5b 4923 raw_spin_lock(&p->pi_lock);
1da177e4
LT
4924 double_rq_lock(rq_src, rq_dest);
4925 /* Already moved. */
4926 if (task_cpu(p) != src_cpu)
b1e38734 4927 goto done;
1da177e4 4928 /* Affinity changed (again). */
fa17b507 4929 if (!cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
b1e38734 4930 goto fail;
1da177e4 4931
e2912009
PZ
4932 /*
4933 * If we're not on a rq, the next wake-up will ensure we're
4934 * placed properly.
4935 */
fd2f4419 4936 if (p->on_rq) {
2e1cb74a 4937 deactivate_task(rq_src, p, 0);
e2912009 4938 set_task_cpu(p, dest_cpu);
dd41f596 4939 activate_task(rq_dest, p, 0);
15afe09b 4940 check_preempt_curr(rq_dest, p, 0);
1da177e4 4941 }
b1e38734 4942done:
efc30814 4943 ret = 1;
b1e38734 4944fail:
1da177e4 4945 double_rq_unlock(rq_src, rq_dest);
0122ec5b 4946 raw_spin_unlock(&p->pi_lock);
efc30814 4947 return ret;
1da177e4
LT
4948}
4949
4950/*
969c7921
TH
4951 * migration_cpu_stop - this will be executed by a highprio stopper thread
4952 * and performs thread migration by bumping thread off CPU then
4953 * 'pushing' onto another runqueue.
1da177e4 4954 */
969c7921 4955static int migration_cpu_stop(void *data)
1da177e4 4956{
969c7921 4957 struct migration_arg *arg = data;
f7b4cddc 4958
969c7921
TH
4959 /*
4960 * The original target cpu might have gone down and we might
4961 * be on another cpu but it doesn't matter.
4962 */
f7b4cddc 4963 local_irq_disable();
969c7921 4964 __migrate_task(arg->task, raw_smp_processor_id(), arg->dest_cpu);
f7b4cddc 4965 local_irq_enable();
1da177e4 4966 return 0;
f7b4cddc
ON
4967}
4968
1da177e4 4969#ifdef CONFIG_HOTPLUG_CPU
48c5ccae 4970
054b9108 4971/*
48c5ccae
PZ
4972 * Ensures that the idle task is using init_mm right before its cpu goes
4973 * offline.
054b9108 4974 */
48c5ccae 4975void idle_task_exit(void)
1da177e4 4976{
48c5ccae 4977 struct mm_struct *mm = current->active_mm;
e76bd8d9 4978
48c5ccae 4979 BUG_ON(cpu_online(smp_processor_id()));
e76bd8d9 4980
48c5ccae
PZ
4981 if (mm != &init_mm)
4982 switch_mm(mm, &init_mm, current);
4983 mmdrop(mm);
1da177e4
LT
4984}
4985
4986/*
4987 * While a dead CPU has no uninterruptible tasks queued at this point,
4988 * it might still have a nonzero ->nr_uninterruptible counter, because
4989 * for performance reasons the counter is not stricly tracking tasks to
4990 * their home CPUs. So we just add the counter to another CPU's counter,
4991 * to keep the global sum constant after CPU-down:
4992 */
70b97a7f 4993static void migrate_nr_uninterruptible(struct rq *rq_src)
1da177e4 4994{
6ad4c188 4995 struct rq *rq_dest = cpu_rq(cpumask_any(cpu_active_mask));
1da177e4 4996
1da177e4
LT
4997 rq_dest->nr_uninterruptible += rq_src->nr_uninterruptible;
4998 rq_src->nr_uninterruptible = 0;
1da177e4
LT
4999}
5000
dd41f596 5001/*
48c5ccae 5002 * remove the tasks which were accounted by rq from calc_load_tasks.
1da177e4 5003 */
48c5ccae 5004static void calc_global_load_remove(struct rq *rq)
1da177e4 5005{
48c5ccae
PZ
5006 atomic_long_sub(rq->calc_load_active, &calc_load_tasks);
5007 rq->calc_load_active = 0;
1da177e4
LT
5008}
5009
48f24c4d 5010/*
48c5ccae
PZ
5011 * Migrate all tasks from the rq, sleeping tasks will be migrated by
5012 * try_to_wake_up()->select_task_rq().
5013 *
5014 * Called with rq->lock held even though we'er in stop_machine() and
5015 * there's no concurrency possible, we hold the required locks anyway
5016 * because of lock validation efforts.
1da177e4 5017 */
48c5ccae 5018static void migrate_tasks(unsigned int dead_cpu)
1da177e4 5019{
70b97a7f 5020 struct rq *rq = cpu_rq(dead_cpu);
48c5ccae
PZ
5021 struct task_struct *next, *stop = rq->stop;
5022 int dest_cpu;
1da177e4
LT
5023
5024 /*
48c5ccae
PZ
5025 * Fudge the rq selection such that the below task selection loop
5026 * doesn't get stuck on the currently eligible stop task.
5027 *
5028 * We're currently inside stop_machine() and the rq is either stuck
5029 * in the stop_machine_cpu_stop() loop, or we're executing this code,
5030 * either way we should never end up calling schedule() until we're
5031 * done here.
1da177e4 5032 */
48c5ccae 5033 rq->stop = NULL;
48f24c4d 5034
8cb120d3
PT
5035 /* Ensure any throttled groups are reachable by pick_next_task */
5036 unthrottle_offline_cfs_rqs(rq);
5037
dd41f596 5038 for ( ; ; ) {
48c5ccae
PZ
5039 /*
5040 * There's this thread running, bail when that's the only
5041 * remaining thread.
5042 */
5043 if (rq->nr_running == 1)
dd41f596 5044 break;
48c5ccae 5045
b67802ea 5046 next = pick_next_task(rq);
48c5ccae 5047 BUG_ON(!next);
79c53799 5048 next->sched_class->put_prev_task(rq, next);
e692ab53 5049
48c5ccae
PZ
5050 /* Find suitable destination for @next, with force if needed. */
5051 dest_cpu = select_fallback_rq(dead_cpu, next);
5052 raw_spin_unlock(&rq->lock);
5053
5054 __migrate_task(next, dead_cpu, dest_cpu);
5055
5056 raw_spin_lock(&rq->lock);
1da177e4 5057 }
dce48a84 5058
48c5ccae 5059 rq->stop = stop;
dce48a84 5060}
48c5ccae 5061
1da177e4
LT
5062#endif /* CONFIG_HOTPLUG_CPU */
5063
e692ab53
NP
5064#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
5065
5066static struct ctl_table sd_ctl_dir[] = {
e0361851
AD
5067 {
5068 .procname = "sched_domain",
c57baf1e 5069 .mode = 0555,
e0361851 5070 },
56992309 5071 {}
e692ab53
NP
5072};
5073
5074static struct ctl_table sd_ctl_root[] = {
e0361851
AD
5075 {
5076 .procname = "kernel",
c57baf1e 5077 .mode = 0555,
e0361851
AD
5078 .child = sd_ctl_dir,
5079 },
56992309 5080 {}
e692ab53
NP
5081};
5082
5083static struct ctl_table *sd_alloc_ctl_entry(int n)
5084{
5085 struct ctl_table *entry =
5cf9f062 5086 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
e692ab53 5087
e692ab53
NP
5088 return entry;
5089}
5090
6382bc90
MM
5091static void sd_free_ctl_entry(struct ctl_table **tablep)
5092{
cd790076 5093 struct ctl_table *entry;
6382bc90 5094
cd790076
MM
5095 /*
5096 * In the intermediate directories, both the child directory and
5097 * procname are dynamically allocated and could fail but the mode
41a2d6cf 5098 * will always be set. In the lowest directory the names are
cd790076
MM
5099 * static strings and all have proc handlers.
5100 */
5101 for (entry = *tablep; entry->mode; entry++) {
6382bc90
MM
5102 if (entry->child)
5103 sd_free_ctl_entry(&entry->child);
cd790076
MM
5104 if (entry->proc_handler == NULL)
5105 kfree(entry->procname);
5106 }
6382bc90
MM
5107
5108 kfree(*tablep);
5109 *tablep = NULL;
5110}
5111
e692ab53 5112static void
e0361851 5113set_table_entry(struct ctl_table *entry,
e692ab53
NP
5114 const char *procname, void *data, int maxlen,
5115 mode_t mode, proc_handler *proc_handler)
5116{
e692ab53
NP
5117 entry->procname = procname;
5118 entry->data = data;
5119 entry->maxlen = maxlen;
5120 entry->mode = mode;
5121 entry->proc_handler = proc_handler;
5122}
5123
5124static struct ctl_table *
5125sd_alloc_ctl_domain_table(struct sched_domain *sd)
5126{
a5d8c348 5127 struct ctl_table *table = sd_alloc_ctl_entry(13);
e692ab53 5128
ad1cdc1d
MM
5129 if (table == NULL)
5130 return NULL;
5131
e0361851 5132 set_table_entry(&table[0], "min_interval", &sd->min_interval,
e692ab53 5133 sizeof(long), 0644, proc_doulongvec_minmax);
e0361851 5134 set_table_entry(&table[1], "max_interval", &sd->max_interval,
e692ab53 5135 sizeof(long), 0644, proc_doulongvec_minmax);
e0361851 5136 set_table_entry(&table[2], "busy_idx", &sd->busy_idx,
e692ab53 5137 sizeof(int), 0644, proc_dointvec_minmax);
e0361851 5138 set_table_entry(&table[3], "idle_idx", &sd->idle_idx,
e692ab53 5139 sizeof(int), 0644, proc_dointvec_minmax);
e0361851 5140 set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx,
e692ab53 5141 sizeof(int), 0644, proc_dointvec_minmax);
e0361851 5142 set_table_entry(&table[5], "wake_idx", &sd->wake_idx,
e692ab53 5143 sizeof(int), 0644, proc_dointvec_minmax);
e0361851 5144 set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx,
e692ab53 5145 sizeof(int), 0644, proc_dointvec_minmax);
e0361851 5146 set_table_entry(&table[7], "busy_factor", &sd->busy_factor,
e692ab53 5147 sizeof(int), 0644, proc_dointvec_minmax);
e0361851 5148 set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct,
e692ab53 5149 sizeof(int), 0644, proc_dointvec_minmax);
ace8b3d6 5150 set_table_entry(&table[9], "cache_nice_tries",
e692ab53
NP
5151 &sd->cache_nice_tries,
5152 sizeof(int), 0644, proc_dointvec_minmax);
ace8b3d6 5153 set_table_entry(&table[10], "flags", &sd->flags,
e692ab53 5154 sizeof(int), 0644, proc_dointvec_minmax);
a5d8c348
IM
5155 set_table_entry(&table[11], "name", sd->name,
5156 CORENAME_MAX_SIZE, 0444, proc_dostring);
5157 /* &table[12] is terminator */
e692ab53
NP
5158
5159 return table;
5160}
5161
9a4e7159 5162static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
e692ab53
NP
5163{
5164 struct ctl_table *entry, *table;
5165 struct sched_domain *sd;
5166 int domain_num = 0, i;
5167 char buf[32];
5168
5169 for_each_domain(cpu, sd)
5170 domain_num++;
5171 entry = table = sd_alloc_ctl_entry(domain_num + 1);
ad1cdc1d
MM
5172 if (table == NULL)
5173 return NULL;
e692ab53
NP
5174
5175 i = 0;
5176 for_each_domain(cpu, sd) {
5177 snprintf(buf, 32, "domain%d", i);
e692ab53 5178 entry->procname = kstrdup(buf, GFP_KERNEL);
c57baf1e 5179 entry->mode = 0555;
e692ab53
NP
5180 entry->child = sd_alloc_ctl_domain_table(sd);
5181 entry++;
5182 i++;
5183 }
5184 return table;
5185}
5186
5187static struct ctl_table_header *sd_sysctl_header;
6382bc90 5188static void register_sched_domain_sysctl(void)
e692ab53 5189{
6ad4c188 5190 int i, cpu_num = num_possible_cpus();
e692ab53
NP
5191 struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
5192 char buf[32];
5193
7378547f
MM
5194 WARN_ON(sd_ctl_dir[0].child);
5195 sd_ctl_dir[0].child = entry;
5196
ad1cdc1d
MM
5197 if (entry == NULL)
5198 return;
5199
6ad4c188 5200 for_each_possible_cpu(i) {
e692ab53 5201 snprintf(buf, 32, "cpu%d", i);
e692ab53 5202 entry->procname = kstrdup(buf, GFP_KERNEL);
c57baf1e 5203 entry->mode = 0555;
e692ab53 5204 entry->child = sd_alloc_ctl_cpu_table(i);
97b6ea7b 5205 entry++;
e692ab53 5206 }
7378547f
MM
5207
5208 WARN_ON(sd_sysctl_header);
e692ab53
NP
5209 sd_sysctl_header = register_sysctl_table(sd_ctl_root);
5210}
6382bc90 5211
7378547f 5212/* may be called multiple times per register */
6382bc90
MM
5213static void unregister_sched_domain_sysctl(void)
5214{
7378547f
MM
5215 if (sd_sysctl_header)
5216 unregister_sysctl_table(sd_sysctl_header);
6382bc90 5217 sd_sysctl_header = NULL;
7378547f
MM
5218 if (sd_ctl_dir[0].child)
5219 sd_free_ctl_entry(&sd_ctl_dir[0].child);
6382bc90 5220}
e692ab53 5221#else
6382bc90
MM
5222static void register_sched_domain_sysctl(void)
5223{
5224}
5225static void unregister_sched_domain_sysctl(void)
e692ab53
NP
5226{
5227}
5228#endif
5229
1f11eb6a
GH
5230static void set_rq_online(struct rq *rq)
5231{
5232 if (!rq->online) {
5233 const struct sched_class *class;
5234
c6c4927b 5235 cpumask_set_cpu(rq->cpu, rq->rd->online);
1f11eb6a
GH
5236 rq->online = 1;
5237
5238 for_each_class(class) {
5239 if (class->rq_online)
5240 class->rq_online(rq);
5241 }
5242 }
5243}
5244
5245static void set_rq_offline(struct rq *rq)
5246{
5247 if (rq->online) {
5248 const struct sched_class *class;
5249
5250 for_each_class(class) {
5251 if (class->rq_offline)
5252 class->rq_offline(rq);
5253 }
5254
c6c4927b 5255 cpumask_clear_cpu(rq->cpu, rq->rd->online);
1f11eb6a
GH
5256 rq->online = 0;
5257 }
5258}
5259
1da177e4
LT
5260/*
5261 * migration_call - callback that gets triggered when a CPU is added.
5262 * Here we can start up the necessary migration thread for the new CPU.
5263 */
48f24c4d
IM
5264static int __cpuinit
5265migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
1da177e4 5266{
48f24c4d 5267 int cpu = (long)hcpu;
1da177e4 5268 unsigned long flags;
969c7921 5269 struct rq *rq = cpu_rq(cpu);
1da177e4 5270
48c5ccae 5271 switch (action & ~CPU_TASKS_FROZEN) {
5be9361c 5272
1da177e4 5273 case CPU_UP_PREPARE:
a468d389 5274 rq->calc_load_update = calc_load_update;
1da177e4 5275 break;
48f24c4d 5276
1da177e4 5277 case CPU_ONLINE:
1f94ef59 5278 /* Update our root-domain */
05fa785c 5279 raw_spin_lock_irqsave(&rq->lock, flags);
1f94ef59 5280 if (rq->rd) {
c6c4927b 5281 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
1f11eb6a
GH
5282
5283 set_rq_online(rq);
1f94ef59 5284 }
05fa785c 5285 raw_spin_unlock_irqrestore(&rq->lock, flags);
1da177e4 5286 break;
48f24c4d 5287
1da177e4 5288#ifdef CONFIG_HOTPLUG_CPU
08f503b0 5289 case CPU_DYING:
317f3941 5290 sched_ttwu_pending();
57d885fe 5291 /* Update our root-domain */
05fa785c 5292 raw_spin_lock_irqsave(&rq->lock, flags);
57d885fe 5293 if (rq->rd) {
c6c4927b 5294 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
1f11eb6a 5295 set_rq_offline(rq);
57d885fe 5296 }
48c5ccae
PZ
5297 migrate_tasks(cpu);
5298 BUG_ON(rq->nr_running != 1); /* the migration thread */
05fa785c 5299 raw_spin_unlock_irqrestore(&rq->lock, flags);
48c5ccae
PZ
5300
5301 migrate_nr_uninterruptible(rq);
5302 calc_global_load_remove(rq);
57d885fe 5303 break;
1da177e4
LT
5304#endif
5305 }
49c022e6
PZ
5306
5307 update_max_interval();
5308
1da177e4
LT
5309 return NOTIFY_OK;
5310}
5311
f38b0820
PM
5312/*
5313 * Register at high priority so that task migration (migrate_all_tasks)
5314 * happens before everything else. This has to be lower priority than
cdd6c482 5315 * the notifier in the perf_event subsystem, though.
1da177e4 5316 */
26c2143b 5317static struct notifier_block __cpuinitdata migration_notifier = {
1da177e4 5318 .notifier_call = migration_call,
50a323b7 5319 .priority = CPU_PRI_MIGRATION,
1da177e4
LT
5320};
5321
3a101d05
TH
5322static int __cpuinit sched_cpu_active(struct notifier_block *nfb,
5323 unsigned long action, void *hcpu)
5324{
5325 switch (action & ~CPU_TASKS_FROZEN) {
5326 case CPU_ONLINE:
5327 case CPU_DOWN_FAILED:
5328 set_cpu_active((long)hcpu, true);
5329 return NOTIFY_OK;
5330 default:
5331 return NOTIFY_DONE;
5332 }
5333}
5334
5335static int __cpuinit sched_cpu_inactive(struct notifier_block *nfb,
5336 unsigned long action, void *hcpu)
5337{
5338 switch (action & ~CPU_TASKS_FROZEN) {
5339 case CPU_DOWN_PREPARE:
5340 set_cpu_active((long)hcpu, false);
5341 return NOTIFY_OK;
5342 default:
5343 return NOTIFY_DONE;
5344 }
5345}
5346
7babe8db 5347static int __init migration_init(void)
1da177e4
LT
5348{
5349 void *cpu = (void *)(long)smp_processor_id();
07dccf33 5350 int err;
48f24c4d 5351
3a101d05 5352 /* Initialize migration for the boot CPU */
07dccf33
AM
5353 err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu);
5354 BUG_ON(err == NOTIFY_BAD);
1da177e4
LT
5355 migration_call(&migration_notifier, CPU_ONLINE, cpu);
5356 register_cpu_notifier(&migration_notifier);
7babe8db 5357
3a101d05
TH
5358 /* Register cpu active notifiers */
5359 cpu_notifier(sched_cpu_active, CPU_PRI_SCHED_ACTIVE);
5360 cpu_notifier(sched_cpu_inactive, CPU_PRI_SCHED_INACTIVE);
5361
a004cd42 5362 return 0;
1da177e4 5363}
7babe8db 5364early_initcall(migration_init);
1da177e4
LT
5365#endif
5366
5367#ifdef CONFIG_SMP
476f3534 5368
4cb98839
PZ
5369static cpumask_var_t sched_domains_tmpmask; /* sched_domains_mutex */
5370
3e9830dc 5371#ifdef CONFIG_SCHED_DEBUG
4dcf6aff 5372
f6630114
MT
5373static __read_mostly int sched_domain_debug_enabled;
5374
5375static int __init sched_domain_debug_setup(char *str)
5376{
5377 sched_domain_debug_enabled = 1;
5378
5379 return 0;
5380}
5381early_param("sched_debug", sched_domain_debug_setup);
5382
7c16ec58 5383static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
96f874e2 5384 struct cpumask *groupmask)
1da177e4 5385{
4dcf6aff 5386 struct sched_group *group = sd->groups;
434d53b0 5387 char str[256];
1da177e4 5388
968ea6d8 5389 cpulist_scnprintf(str, sizeof(str), sched_domain_span(sd));
96f874e2 5390 cpumask_clear(groupmask);
4dcf6aff
IM
5391
5392 printk(KERN_DEBUG "%*s domain %d: ", level, "", level);
5393
5394 if (!(sd->flags & SD_LOAD_BALANCE)) {
3df0fc5b 5395 printk("does not load-balance\n");
4dcf6aff 5396 if (sd->parent)
3df0fc5b
PZ
5397 printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain"
5398 " has parent");
4dcf6aff 5399 return -1;
41c7ce9a
NP
5400 }
5401
3df0fc5b 5402 printk(KERN_CONT "span %s level %s\n", str, sd->name);
4dcf6aff 5403
758b2cdc 5404 if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
3df0fc5b
PZ
5405 printk(KERN_ERR "ERROR: domain->span does not contain "
5406 "CPU%d\n", cpu);
4dcf6aff 5407 }
758b2cdc 5408 if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) {
3df0fc5b
PZ
5409 printk(KERN_ERR "ERROR: domain->groups does not contain"
5410 " CPU%d\n", cpu);
4dcf6aff 5411 }
1da177e4 5412
4dcf6aff 5413 printk(KERN_DEBUG "%*s groups:", level + 1, "");
1da177e4 5414 do {
4dcf6aff 5415 if (!group) {
3df0fc5b
PZ
5416 printk("\n");
5417 printk(KERN_ERR "ERROR: group is NULL\n");
1da177e4
LT
5418 break;
5419 }
5420
9c3f75cb 5421 if (!group->sgp->power) {
3df0fc5b
PZ
5422 printk(KERN_CONT "\n");
5423 printk(KERN_ERR "ERROR: domain->cpu_power not "
5424 "set\n");
4dcf6aff
IM
5425 break;
5426 }
1da177e4 5427
758b2cdc 5428 if (!cpumask_weight(sched_group_cpus(group))) {
3df0fc5b
PZ
5429 printk(KERN_CONT "\n");
5430 printk(KERN_ERR "ERROR: empty group\n");
4dcf6aff
IM
5431 break;
5432 }
1da177e4 5433
758b2cdc 5434 if (cpumask_intersects(groupmask, sched_group_cpus(group))) {
3df0fc5b
PZ
5435 printk(KERN_CONT "\n");
5436 printk(KERN_ERR "ERROR: repeated CPUs\n");
4dcf6aff
IM
5437 break;
5438 }
1da177e4 5439
758b2cdc 5440 cpumask_or(groupmask, groupmask, sched_group_cpus(group));
1da177e4 5441
968ea6d8 5442 cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group));
381512cf 5443
3df0fc5b 5444 printk(KERN_CONT " %s", str);
9c3f75cb 5445 if (group->sgp->power != SCHED_POWER_SCALE) {
3df0fc5b 5446 printk(KERN_CONT " (cpu_power = %d)",
9c3f75cb 5447 group->sgp->power);
381512cf 5448 }
1da177e4 5449
4dcf6aff
IM
5450 group = group->next;
5451 } while (group != sd->groups);
3df0fc5b 5452 printk(KERN_CONT "\n");
1da177e4 5453
758b2cdc 5454 if (!cpumask_equal(sched_domain_span(sd), groupmask))
3df0fc5b 5455 printk(KERN_ERR "ERROR: groups don't span domain->span\n");
1da177e4 5456
758b2cdc
RR
5457 if (sd->parent &&
5458 !cpumask_subset(groupmask, sched_domain_span(sd->parent)))
3df0fc5b
PZ
5459 printk(KERN_ERR "ERROR: parent span is not a superset "
5460 "of domain->span\n");
4dcf6aff
IM
5461 return 0;
5462}
1da177e4 5463
4dcf6aff
IM
5464static void sched_domain_debug(struct sched_domain *sd, int cpu)
5465{
5466 int level = 0;
1da177e4 5467
f6630114
MT
5468 if (!sched_domain_debug_enabled)
5469 return;
5470
4dcf6aff
IM
5471 if (!sd) {
5472 printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
5473 return;
5474 }
1da177e4 5475
4dcf6aff
IM
5476 printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu);
5477
5478 for (;;) {
4cb98839 5479 if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask))
4dcf6aff 5480 break;
1da177e4
LT
5481 level++;
5482 sd = sd->parent;
33859f7f 5483 if (!sd)
4dcf6aff
IM
5484 break;
5485 }
1da177e4 5486}
6d6bc0ad 5487#else /* !CONFIG_SCHED_DEBUG */
48f24c4d 5488# define sched_domain_debug(sd, cpu) do { } while (0)
6d6bc0ad 5489#endif /* CONFIG_SCHED_DEBUG */
1da177e4 5490
1a20ff27 5491static int sd_degenerate(struct sched_domain *sd)
245af2c7 5492{
758b2cdc 5493 if (cpumask_weight(sched_domain_span(sd)) == 1)
245af2c7
SS
5494 return 1;
5495
5496 /* Following flags need at least 2 groups */
5497 if (sd->flags & (SD_LOAD_BALANCE |
5498 SD_BALANCE_NEWIDLE |
5499 SD_BALANCE_FORK |
89c4710e
SS
5500 SD_BALANCE_EXEC |
5501 SD_SHARE_CPUPOWER |
5502 SD_SHARE_PKG_RESOURCES)) {
245af2c7
SS
5503 if (sd->groups != sd->groups->next)
5504 return 0;
5505 }
5506
5507 /* Following flags don't use groups */
c88d5910 5508 if (sd->flags & (SD_WAKE_AFFINE))
245af2c7
SS
5509 return 0;
5510
5511 return 1;
5512}
5513
48f24c4d
IM
5514static int
5515sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
245af2c7
SS
5516{
5517 unsigned long cflags = sd->flags, pflags = parent->flags;
5518
5519 if (sd_degenerate(parent))
5520 return 1;
5521
758b2cdc 5522 if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent)))
245af2c7
SS
5523 return 0;
5524
245af2c7
SS
5525 /* Flags needing groups don't count if only 1 group in parent */
5526 if (parent->groups == parent->groups->next) {
5527 pflags &= ~(SD_LOAD_BALANCE |
5528 SD_BALANCE_NEWIDLE |
5529 SD_BALANCE_FORK |
89c4710e
SS
5530 SD_BALANCE_EXEC |
5531 SD_SHARE_CPUPOWER |
5532 SD_SHARE_PKG_RESOURCES);
5436499e
KC
5533 if (nr_node_ids == 1)
5534 pflags &= ~SD_SERIALIZE;
245af2c7
SS
5535 }
5536 if (~cflags & pflags)
5537 return 0;
5538
5539 return 1;
5540}
5541
dce840a0 5542static void free_rootdomain(struct rcu_head *rcu)
c6c4927b 5543{
dce840a0 5544 struct root_domain *rd = container_of(rcu, struct root_domain, rcu);
047106ad 5545
68e74568 5546 cpupri_cleanup(&rd->cpupri);
c6c4927b
RR
5547 free_cpumask_var(rd->rto_mask);
5548 free_cpumask_var(rd->online);
5549 free_cpumask_var(rd->span);
5550 kfree(rd);
5551}
5552
57d885fe
GH
5553static void rq_attach_root(struct rq *rq, struct root_domain *rd)
5554{
a0490fa3 5555 struct root_domain *old_rd = NULL;
57d885fe 5556 unsigned long flags;
57d885fe 5557
05fa785c 5558 raw_spin_lock_irqsave(&rq->lock, flags);
57d885fe
GH
5559
5560 if (rq->rd) {
a0490fa3 5561 old_rd = rq->rd;
57d885fe 5562
c6c4927b 5563 if (cpumask_test_cpu(rq->cpu, old_rd->online))
1f11eb6a 5564 set_rq_offline(rq);
57d885fe 5565
c6c4927b 5566 cpumask_clear_cpu(rq->cpu, old_rd->span);
dc938520 5567
a0490fa3
IM
5568 /*
5569 * If we dont want to free the old_rt yet then
5570 * set old_rd to NULL to skip the freeing later
5571 * in this function:
5572 */
5573 if (!atomic_dec_and_test(&old_rd->refcount))
5574 old_rd = NULL;
57d885fe
GH
5575 }
5576
5577 atomic_inc(&rd->refcount);
5578 rq->rd = rd;
5579
c6c4927b 5580 cpumask_set_cpu(rq->cpu, rd->span);
00aec93d 5581 if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
1f11eb6a 5582 set_rq_online(rq);
57d885fe 5583
05fa785c 5584 raw_spin_unlock_irqrestore(&rq->lock, flags);
a0490fa3
IM
5585
5586 if (old_rd)
dce840a0 5587 call_rcu_sched(&old_rd->rcu, free_rootdomain);
57d885fe
GH
5588}
5589
68c38fc3 5590static int init_rootdomain(struct root_domain *rd)
57d885fe
GH
5591{
5592 memset(rd, 0, sizeof(*rd));
5593
68c38fc3 5594 if (!alloc_cpumask_var(&rd->span, GFP_KERNEL))
0c910d28 5595 goto out;
68c38fc3 5596 if (!alloc_cpumask_var(&rd->online, GFP_KERNEL))
c6c4927b 5597 goto free_span;
68c38fc3 5598 if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
c6c4927b 5599 goto free_online;
6e0534f2 5600
68c38fc3 5601 if (cpupri_init(&rd->cpupri) != 0)
68e74568 5602 goto free_rto_mask;
c6c4927b 5603 return 0;
6e0534f2 5604
68e74568
RR
5605free_rto_mask:
5606 free_cpumask_var(rd->rto_mask);
c6c4927b
RR
5607free_online:
5608 free_cpumask_var(rd->online);
5609free_span:
5610 free_cpumask_var(rd->span);
0c910d28 5611out:
c6c4927b 5612 return -ENOMEM;
57d885fe
GH
5613}
5614
029632fb
PZ
5615/*
5616 * By default the system creates a single root-domain with all cpus as
5617 * members (mimicking the global state we have today).
5618 */
5619struct root_domain def_root_domain;
5620
57d885fe
GH
5621static void init_defrootdomain(void)
5622{
68c38fc3 5623 init_rootdomain(&def_root_domain);
c6c4927b 5624
57d885fe
GH
5625 atomic_set(&def_root_domain.refcount, 1);
5626}
5627
dc938520 5628static struct root_domain *alloc_rootdomain(void)
57d885fe
GH
5629{
5630 struct root_domain *rd;
5631
5632 rd = kmalloc(sizeof(*rd), GFP_KERNEL);
5633 if (!rd)
5634 return NULL;
5635
68c38fc3 5636 if (init_rootdomain(rd) != 0) {
c6c4927b
RR
5637 kfree(rd);
5638 return NULL;
5639 }
57d885fe
GH
5640
5641 return rd;
5642}
5643
e3589f6c
PZ
5644static void free_sched_groups(struct sched_group *sg, int free_sgp)
5645{
5646 struct sched_group *tmp, *first;
5647
5648 if (!sg)
5649 return;
5650
5651 first = sg;
5652 do {
5653 tmp = sg->next;
5654
5655 if (free_sgp && atomic_dec_and_test(&sg->sgp->ref))
5656 kfree(sg->sgp);
5657
5658 kfree(sg);
5659 sg = tmp;
5660 } while (sg != first);
5661}
5662
dce840a0
PZ
5663static void free_sched_domain(struct rcu_head *rcu)
5664{
5665 struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu);
e3589f6c
PZ
5666
5667 /*
5668 * If its an overlapping domain it has private groups, iterate and
5669 * nuke them all.
5670 */
5671 if (sd->flags & SD_OVERLAP) {
5672 free_sched_groups(sd->groups, 1);
5673 } else if (atomic_dec_and_test(&sd->groups->ref)) {
9c3f75cb 5674 kfree(sd->groups->sgp);
dce840a0 5675 kfree(sd->groups);
9c3f75cb 5676 }
dce840a0
PZ
5677 kfree(sd);
5678}
5679
5680static void destroy_sched_domain(struct sched_domain *sd, int cpu)
5681{
5682 call_rcu(&sd->rcu, free_sched_domain);
5683}
5684
5685static void destroy_sched_domains(struct sched_domain *sd, int cpu)
5686{
5687 for (; sd; sd = sd->parent)
5688 destroy_sched_domain(sd, cpu);
5689}
5690
1da177e4 5691/*
0eab9146 5692 * Attach the domain 'sd' to 'cpu' as its base domain. Callers must
1da177e4
LT
5693 * hold the hotplug lock.
5694 */
0eab9146
IM
5695static void
5696cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
1da177e4 5697{
70b97a7f 5698 struct rq *rq = cpu_rq(cpu);
245af2c7
SS
5699 struct sched_domain *tmp;
5700
5701 /* Remove the sched domains which do not contribute to scheduling. */
f29c9b1c 5702 for (tmp = sd; tmp; ) {
245af2c7
SS
5703 struct sched_domain *parent = tmp->parent;
5704 if (!parent)
5705 break;
f29c9b1c 5706
1a848870 5707 if (sd_parent_degenerate(tmp, parent)) {
245af2c7 5708 tmp->parent = parent->parent;
1a848870
SS
5709 if (parent->parent)
5710 parent->parent->child = tmp;
dce840a0 5711 destroy_sched_domain(parent, cpu);
f29c9b1c
LZ
5712 } else
5713 tmp = tmp->parent;
245af2c7
SS
5714 }
5715
1a848870 5716 if (sd && sd_degenerate(sd)) {
dce840a0 5717 tmp = sd;
245af2c7 5718 sd = sd->parent;
dce840a0 5719 destroy_sched_domain(tmp, cpu);
1a848870
SS
5720 if (sd)
5721 sd->child = NULL;
5722 }
1da177e4 5723
4cb98839 5724 sched_domain_debug(sd, cpu);
1da177e4 5725
57d885fe 5726 rq_attach_root(rq, rd);
dce840a0 5727 tmp = rq->sd;
674311d5 5728 rcu_assign_pointer(rq->sd, sd);
dce840a0 5729 destroy_sched_domains(tmp, cpu);
1da177e4
LT
5730}
5731
5732/* cpus with isolated domains */
dcc30a35 5733static cpumask_var_t cpu_isolated_map;
1da177e4
LT
5734
5735/* Setup the mask of cpus configured for isolated domains */
5736static int __init isolated_cpu_setup(char *str)
5737{
bdddd296 5738 alloc_bootmem_cpumask_var(&cpu_isolated_map);
968ea6d8 5739 cpulist_parse(str, cpu_isolated_map);
1da177e4
LT
5740 return 1;
5741}
5742
8927f494 5743__setup("isolcpus=", isolated_cpu_setup);
1da177e4 5744
9c1cfda2 5745#ifdef CONFIG_NUMA
198e2f18 5746
9c1cfda2
JH
5747/**
5748 * find_next_best_node - find the next node to include in a sched_domain
5749 * @node: node whose sched_domain we're building
5750 * @used_nodes: nodes already in the sched_domain
5751 *
41a2d6cf 5752 * Find the next node to include in a given scheduling domain. Simply
9c1cfda2
JH
5753 * finds the closest node not already in the @used_nodes map.
5754 *
5755 * Should use nodemask_t.
5756 */
c5f59f08 5757static int find_next_best_node(int node, nodemask_t *used_nodes)
9c1cfda2 5758{
7142d17e 5759 int i, n, val, min_val, best_node = -1;
9c1cfda2
JH
5760
5761 min_val = INT_MAX;
5762
076ac2af 5763 for (i = 0; i < nr_node_ids; i++) {
9c1cfda2 5764 /* Start at @node */
076ac2af 5765 n = (node + i) % nr_node_ids;
9c1cfda2
JH
5766
5767 if (!nr_cpus_node(n))
5768 continue;
5769
5770 /* Skip already used nodes */
c5f59f08 5771 if (node_isset(n, *used_nodes))
9c1cfda2
JH
5772 continue;
5773
5774 /* Simple min distance search */
5775 val = node_distance(node, n);
5776
5777 if (val < min_val) {
5778 min_val = val;
5779 best_node = n;
5780 }
5781 }
5782
7142d17e
HD
5783 if (best_node != -1)
5784 node_set(best_node, *used_nodes);
9c1cfda2
JH
5785 return best_node;
5786}
5787
5788/**
5789 * sched_domain_node_span - get a cpumask for a node's sched_domain
5790 * @node: node whose cpumask we're constructing
73486722 5791 * @span: resulting cpumask
9c1cfda2 5792 *
41a2d6cf 5793 * Given a node, construct a good cpumask for its sched_domain to span. It
9c1cfda2
JH
5794 * should be one that prevents unnecessary balancing, but also spreads tasks
5795 * out optimally.
5796 */
96f874e2 5797static void sched_domain_node_span(int node, struct cpumask *span)
9c1cfda2 5798{
c5f59f08 5799 nodemask_t used_nodes;
48f24c4d 5800 int i;
9c1cfda2 5801
6ca09dfc 5802 cpumask_clear(span);
c5f59f08 5803 nodes_clear(used_nodes);
9c1cfda2 5804
6ca09dfc 5805 cpumask_or(span, span, cpumask_of_node(node));
c5f59f08 5806 node_set(node, used_nodes);
9c1cfda2
JH
5807
5808 for (i = 1; i < SD_NODES_PER_DOMAIN; i++) {
c5f59f08 5809 int next_node = find_next_best_node(node, &used_nodes);
7142d17e
HD
5810 if (next_node < 0)
5811 break;
6ca09dfc 5812 cpumask_or(span, span, cpumask_of_node(next_node));
9c1cfda2 5813 }
9c1cfda2 5814}
d3081f52
PZ
5815
5816static const struct cpumask *cpu_node_mask(int cpu)
5817{
5818 lockdep_assert_held(&sched_domains_mutex);
5819
5820 sched_domain_node_span(cpu_to_node(cpu), sched_domains_tmpmask);
5821
5822 return sched_domains_tmpmask;
5823}
2c402dc3
PZ
5824
5825static const struct cpumask *cpu_allnodes_mask(int cpu)
5826{
5827 return cpu_possible_mask;
5828}
6d6bc0ad 5829#endif /* CONFIG_NUMA */
9c1cfda2 5830
d3081f52
PZ
5831static const struct cpumask *cpu_cpu_mask(int cpu)
5832{
5833 return cpumask_of_node(cpu_to_node(cpu));
5834}
5835
5c45bf27 5836int sched_smt_power_savings = 0, sched_mc_power_savings = 0;
48f24c4d 5837
dce840a0
PZ
5838struct sd_data {
5839 struct sched_domain **__percpu sd;
5840 struct sched_group **__percpu sg;
9c3f75cb 5841 struct sched_group_power **__percpu sgp;
dce840a0
PZ
5842};
5843
49a02c51 5844struct s_data {
21d42ccf 5845 struct sched_domain ** __percpu sd;
49a02c51
AH
5846 struct root_domain *rd;
5847};
5848
2109b99e 5849enum s_alloc {
2109b99e 5850 sa_rootdomain,
21d42ccf 5851 sa_sd,
dce840a0 5852 sa_sd_storage,
2109b99e
AH
5853 sa_none,
5854};
5855
54ab4ff4
PZ
5856struct sched_domain_topology_level;
5857
5858typedef struct sched_domain *(*sched_domain_init_f)(struct sched_domain_topology_level *tl, int cpu);
eb7a74e6
PZ
5859typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
5860
e3589f6c
PZ
5861#define SDTL_OVERLAP 0x01
5862
eb7a74e6 5863struct sched_domain_topology_level {
2c402dc3
PZ
5864 sched_domain_init_f init;
5865 sched_domain_mask_f mask;
e3589f6c 5866 int flags;
54ab4ff4 5867 struct sd_data data;
eb7a74e6
PZ
5868};
5869
e3589f6c
PZ
5870static int
5871build_overlap_sched_groups(struct sched_domain *sd, int cpu)
5872{
5873 struct sched_group *first = NULL, *last = NULL, *groups = NULL, *sg;
5874 const struct cpumask *span = sched_domain_span(sd);
5875 struct cpumask *covered = sched_domains_tmpmask;
5876 struct sd_data *sdd = sd->private;
5877 struct sched_domain *child;
5878 int i;
5879
5880 cpumask_clear(covered);
5881
5882 for_each_cpu(i, span) {
5883 struct cpumask *sg_span;
5884
5885 if (cpumask_test_cpu(i, covered))
5886 continue;
5887
5888 sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
4d78a223 5889 GFP_KERNEL, cpu_to_node(cpu));
e3589f6c
PZ
5890
5891 if (!sg)
5892 goto fail;
5893
5894 sg_span = sched_group_cpus(sg);
5895
5896 child = *per_cpu_ptr(sdd->sd, i);
5897 if (child->child) {
5898 child = child->child;
5899 cpumask_copy(sg_span, sched_domain_span(child));
5900 } else
5901 cpumask_set_cpu(i, sg_span);
5902
5903 cpumask_or(covered, covered, sg_span);
5904
5905 sg->sgp = *per_cpu_ptr(sdd->sgp, cpumask_first(sg_span));
5906 atomic_inc(&sg->sgp->ref);
5907
5908 if (cpumask_test_cpu(cpu, sg_span))
5909 groups = sg;
5910
5911 if (!first)
5912 first = sg;
5913 if (last)
5914 last->next = sg;
5915 last = sg;
5916 last->next = first;
5917 }
5918 sd->groups = groups;
5919
5920 return 0;
5921
5922fail:
5923 free_sched_groups(first, 0);
5924
5925 return -ENOMEM;
5926}
5927
dce840a0 5928static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg)
1da177e4 5929{
dce840a0
PZ
5930 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
5931 struct sched_domain *child = sd->child;
1da177e4 5932
dce840a0
PZ
5933 if (child)
5934 cpu = cpumask_first(sched_domain_span(child));
1e9f28fa 5935
9c3f75cb 5936 if (sg) {
dce840a0 5937 *sg = *per_cpu_ptr(sdd->sg, cpu);
9c3f75cb 5938 (*sg)->sgp = *per_cpu_ptr(sdd->sgp, cpu);
e3589f6c 5939 atomic_set(&(*sg)->sgp->ref, 1); /* for claim_allocations */
9c3f75cb 5940 }
dce840a0
PZ
5941
5942 return cpu;
1e9f28fa 5943}
1e9f28fa 5944
01a08546 5945/*
dce840a0
PZ
5946 * build_sched_groups will build a circular linked list of the groups
5947 * covered by the given span, and will set each group's ->cpumask correctly,
5948 * and ->cpu_power to 0.
e3589f6c
PZ
5949 *
5950 * Assumes the sched_domain tree is fully constructed
01a08546 5951 */
e3589f6c
PZ
5952static int
5953build_sched_groups(struct sched_domain *sd, int cpu)
1da177e4 5954{
dce840a0
PZ
5955 struct sched_group *first = NULL, *last = NULL;
5956 struct sd_data *sdd = sd->private;
5957 const struct cpumask *span = sched_domain_span(sd);
f96225fd 5958 struct cpumask *covered;
dce840a0 5959 int i;
9c1cfda2 5960
e3589f6c
PZ
5961 get_group(cpu, sdd, &sd->groups);
5962 atomic_inc(&sd->groups->ref);
5963
5964 if (cpu != cpumask_first(sched_domain_span(sd)))
5965 return 0;
5966
f96225fd
PZ
5967 lockdep_assert_held(&sched_domains_mutex);
5968 covered = sched_domains_tmpmask;
5969
dce840a0 5970 cpumask_clear(covered);
6711cab4 5971
dce840a0
PZ
5972 for_each_cpu(i, span) {
5973 struct sched_group *sg;
5974 int group = get_group(i, sdd, &sg);
5975 int j;
6711cab4 5976
dce840a0
PZ
5977 if (cpumask_test_cpu(i, covered))
5978 continue;
6711cab4 5979
dce840a0 5980 cpumask_clear(sched_group_cpus(sg));
9c3f75cb 5981 sg->sgp->power = 0;
0601a88d 5982
dce840a0
PZ
5983 for_each_cpu(j, span) {
5984 if (get_group(j, sdd, NULL) != group)
5985 continue;
0601a88d 5986
dce840a0
PZ
5987 cpumask_set_cpu(j, covered);
5988 cpumask_set_cpu(j, sched_group_cpus(sg));
5989 }
0601a88d 5990
dce840a0
PZ
5991 if (!first)
5992 first = sg;
5993 if (last)
5994 last->next = sg;
5995 last = sg;
5996 }
5997 last->next = first;
e3589f6c
PZ
5998
5999 return 0;
0601a88d 6000}
51888ca2 6001
89c4710e
SS
6002/*
6003 * Initialize sched groups cpu_power.
6004 *
6005 * cpu_power indicates the capacity of sched group, which is used while
6006 * distributing the load between different sched groups in a sched domain.
6007 * Typically cpu_power for all the groups in a sched domain will be same unless
6008 * there are asymmetries in the topology. If there are asymmetries, group
6009 * having more cpu_power will pickup more load compared to the group having
6010 * less cpu_power.
89c4710e
SS
6011 */
6012static void init_sched_groups_power(int cpu, struct sched_domain *sd)
6013{
e3589f6c 6014 struct sched_group *sg = sd->groups;
89c4710e 6015
e3589f6c
PZ
6016 WARN_ON(!sd || !sg);
6017
6018 do {
6019 sg->group_weight = cpumask_weight(sched_group_cpus(sg));
6020 sg = sg->next;
6021 } while (sg != sd->groups);
89c4710e 6022
e3589f6c
PZ
6023 if (cpu != group_first_cpu(sg))
6024 return;
aae6d3dd 6025
d274cb30 6026 update_group_power(sd, cpu);
89c4710e
SS
6027}
6028
029632fb
PZ
6029int __weak arch_sd_sibling_asym_packing(void)
6030{
6031 return 0*SD_ASYM_PACKING;
6032}
6033
7c16ec58
MT
6034/*
6035 * Initializers for schedule domains
6036 * Non-inlined to reduce accumulated stack pressure in build_sched_domains()
6037 */
6038
a5d8c348
IM
6039#ifdef CONFIG_SCHED_DEBUG
6040# define SD_INIT_NAME(sd, type) sd->name = #type
6041#else
6042# define SD_INIT_NAME(sd, type) do { } while (0)
6043#endif
6044
54ab4ff4
PZ
6045#define SD_INIT_FUNC(type) \
6046static noinline struct sched_domain * \
6047sd_init_##type(struct sched_domain_topology_level *tl, int cpu) \
6048{ \
6049 struct sched_domain *sd = *per_cpu_ptr(tl->data.sd, cpu); \
6050 *sd = SD_##type##_INIT; \
54ab4ff4
PZ
6051 SD_INIT_NAME(sd, type); \
6052 sd->private = &tl->data; \
6053 return sd; \
7c16ec58
MT
6054}
6055
6056SD_INIT_FUNC(CPU)
6057#ifdef CONFIG_NUMA
6058 SD_INIT_FUNC(ALLNODES)
6059 SD_INIT_FUNC(NODE)
6060#endif
6061#ifdef CONFIG_SCHED_SMT
6062 SD_INIT_FUNC(SIBLING)
6063#endif
6064#ifdef CONFIG_SCHED_MC
6065 SD_INIT_FUNC(MC)
6066#endif
01a08546
HC
6067#ifdef CONFIG_SCHED_BOOK
6068 SD_INIT_FUNC(BOOK)
6069#endif
7c16ec58 6070
1d3504fc 6071static int default_relax_domain_level = -1;
60495e77 6072int sched_domain_level_max;
1d3504fc
HS
6073
6074static int __init setup_relax_domain_level(char *str)
6075{
30e0e178
LZ
6076 unsigned long val;
6077
6078 val = simple_strtoul(str, NULL, 0);
60495e77 6079 if (val < sched_domain_level_max)
30e0e178
LZ
6080 default_relax_domain_level = val;
6081
1d3504fc
HS
6082 return 1;
6083}
6084__setup("relax_domain_level=", setup_relax_domain_level);
6085
6086static void set_domain_attribute(struct sched_domain *sd,
6087 struct sched_domain_attr *attr)
6088{
6089 int request;
6090
6091 if (!attr || attr->relax_domain_level < 0) {
6092 if (default_relax_domain_level < 0)
6093 return;
6094 else
6095 request = default_relax_domain_level;
6096 } else
6097 request = attr->relax_domain_level;
6098 if (request < sd->level) {
6099 /* turn off idle balance on this domain */
c88d5910 6100 sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
1d3504fc
HS
6101 } else {
6102 /* turn on idle balance on this domain */
c88d5910 6103 sd->flags |= (SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
1d3504fc
HS
6104 }
6105}
6106
54ab4ff4
PZ
6107static void __sdt_free(const struct cpumask *cpu_map);
6108static int __sdt_alloc(const struct cpumask *cpu_map);
6109
2109b99e
AH
6110static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
6111 const struct cpumask *cpu_map)
6112{
6113 switch (what) {
2109b99e 6114 case sa_rootdomain:
822ff793
PZ
6115 if (!atomic_read(&d->rd->refcount))
6116 free_rootdomain(&d->rd->rcu); /* fall through */
21d42ccf
PZ
6117 case sa_sd:
6118 free_percpu(d->sd); /* fall through */
dce840a0 6119 case sa_sd_storage:
54ab4ff4 6120 __sdt_free(cpu_map); /* fall through */
2109b99e
AH
6121 case sa_none:
6122 break;
6123 }
6124}
3404c8d9 6125
2109b99e
AH
6126static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
6127 const struct cpumask *cpu_map)
6128{
dce840a0
PZ
6129 memset(d, 0, sizeof(*d));
6130
54ab4ff4
PZ
6131 if (__sdt_alloc(cpu_map))
6132 return sa_sd_storage;
dce840a0
PZ
6133 d->sd = alloc_percpu(struct sched_domain *);
6134 if (!d->sd)
6135 return sa_sd_storage;
2109b99e 6136 d->rd = alloc_rootdomain();
dce840a0 6137 if (!d->rd)
21d42ccf 6138 return sa_sd;
2109b99e
AH
6139 return sa_rootdomain;
6140}
57d885fe 6141
dce840a0
PZ
6142/*
6143 * NULL the sd_data elements we've used to build the sched_domain and
6144 * sched_group structure so that the subsequent __free_domain_allocs()
6145 * will not free the data we're using.
6146 */
6147static void claim_allocations(int cpu, struct sched_domain *sd)
6148{
6149 struct sd_data *sdd = sd->private;
dce840a0
PZ
6150
6151 WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd);
6152 *per_cpu_ptr(sdd->sd, cpu) = NULL;
6153
e3589f6c 6154 if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref))
dce840a0 6155 *per_cpu_ptr(sdd->sg, cpu) = NULL;
e3589f6c
PZ
6156
6157 if (atomic_read(&(*per_cpu_ptr(sdd->sgp, cpu))->ref))
9c3f75cb 6158 *per_cpu_ptr(sdd->sgp, cpu) = NULL;
dce840a0
PZ
6159}
6160
2c402dc3
PZ
6161#ifdef CONFIG_SCHED_SMT
6162static const struct cpumask *cpu_smt_mask(int cpu)
7f4588f3 6163{
2c402dc3 6164 return topology_thread_cpumask(cpu);
3bd65a80 6165}
2c402dc3 6166#endif
7f4588f3 6167
d069b916
PZ
6168/*
6169 * Topology list, bottom-up.
6170 */
2c402dc3 6171static struct sched_domain_topology_level default_topology[] = {
d069b916
PZ
6172#ifdef CONFIG_SCHED_SMT
6173 { sd_init_SIBLING, cpu_smt_mask, },
01a08546 6174#endif
1e9f28fa 6175#ifdef CONFIG_SCHED_MC
2c402dc3 6176 { sd_init_MC, cpu_coregroup_mask, },
1e9f28fa 6177#endif
d069b916
PZ
6178#ifdef CONFIG_SCHED_BOOK
6179 { sd_init_BOOK, cpu_book_mask, },
6180#endif
6181 { sd_init_CPU, cpu_cpu_mask, },
6182#ifdef CONFIG_NUMA
e3589f6c 6183 { sd_init_NODE, cpu_node_mask, SDTL_OVERLAP, },
d069b916 6184 { sd_init_ALLNODES, cpu_allnodes_mask, },
1da177e4 6185#endif
eb7a74e6
PZ
6186 { NULL, },
6187};
6188
6189static struct sched_domain_topology_level *sched_domain_topology = default_topology;
6190
54ab4ff4
PZ
6191static int __sdt_alloc(const struct cpumask *cpu_map)
6192{
6193 struct sched_domain_topology_level *tl;
6194 int j;
6195
6196 for (tl = sched_domain_topology; tl->init; tl++) {
6197 struct sd_data *sdd = &tl->data;
6198
6199 sdd->sd = alloc_percpu(struct sched_domain *);
6200 if (!sdd->sd)
6201 return -ENOMEM;
6202
6203 sdd->sg = alloc_percpu(struct sched_group *);
6204 if (!sdd->sg)
6205 return -ENOMEM;
6206
9c3f75cb
PZ
6207 sdd->sgp = alloc_percpu(struct sched_group_power *);
6208 if (!sdd->sgp)
6209 return -ENOMEM;
6210
54ab4ff4
PZ
6211 for_each_cpu(j, cpu_map) {
6212 struct sched_domain *sd;
6213 struct sched_group *sg;
9c3f75cb 6214 struct sched_group_power *sgp;
54ab4ff4
PZ
6215
6216 sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(),
6217 GFP_KERNEL, cpu_to_node(j));
6218 if (!sd)
6219 return -ENOMEM;
6220
6221 *per_cpu_ptr(sdd->sd, j) = sd;
6222
6223 sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
6224 GFP_KERNEL, cpu_to_node(j));
6225 if (!sg)
6226 return -ENOMEM;
6227
6228 *per_cpu_ptr(sdd->sg, j) = sg;
9c3f75cb
PZ
6229
6230 sgp = kzalloc_node(sizeof(struct sched_group_power),
6231 GFP_KERNEL, cpu_to_node(j));
6232 if (!sgp)
6233 return -ENOMEM;
6234
6235 *per_cpu_ptr(sdd->sgp, j) = sgp;
54ab4ff4
PZ
6236 }
6237 }
6238
6239 return 0;
6240}
6241
6242static void __sdt_free(const struct cpumask *cpu_map)
6243{
6244 struct sched_domain_topology_level *tl;
6245 int j;
6246
6247 for (tl = sched_domain_topology; tl->init; tl++) {
6248 struct sd_data *sdd = &tl->data;
6249
6250 for_each_cpu(j, cpu_map) {
e3589f6c
PZ
6251 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, j);
6252 if (sd && (sd->flags & SD_OVERLAP))
6253 free_sched_groups(sd->groups, 0);
feff8fa0 6254 kfree(*per_cpu_ptr(sdd->sd, j));
54ab4ff4 6255 kfree(*per_cpu_ptr(sdd->sg, j));
9c3f75cb 6256 kfree(*per_cpu_ptr(sdd->sgp, j));
54ab4ff4
PZ
6257 }
6258 free_percpu(sdd->sd);
6259 free_percpu(sdd->sg);
9c3f75cb 6260 free_percpu(sdd->sgp);
54ab4ff4
PZ
6261 }
6262}
6263
2c402dc3
PZ
6264struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
6265 struct s_data *d, const struct cpumask *cpu_map,
d069b916 6266 struct sched_domain_attr *attr, struct sched_domain *child,
2c402dc3
PZ
6267 int cpu)
6268{
54ab4ff4 6269 struct sched_domain *sd = tl->init(tl, cpu);
2c402dc3 6270 if (!sd)
d069b916 6271 return child;
2c402dc3
PZ
6272
6273 set_domain_attribute(sd, attr);
6274 cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu));
60495e77
PZ
6275 if (child) {
6276 sd->level = child->level + 1;
6277 sched_domain_level_max = max(sched_domain_level_max, sd->level);
d069b916 6278 child->parent = sd;
60495e77 6279 }
d069b916 6280 sd->child = child;
2c402dc3
PZ
6281
6282 return sd;
6283}
6284
2109b99e
AH
6285/*
6286 * Build sched domains for a given set of cpus and attach the sched domains
6287 * to the individual cpus
6288 */
dce840a0
PZ
6289static int build_sched_domains(const struct cpumask *cpu_map,
6290 struct sched_domain_attr *attr)
2109b99e
AH
6291{
6292 enum s_alloc alloc_state = sa_none;
dce840a0 6293 struct sched_domain *sd;
2109b99e 6294 struct s_data d;
822ff793 6295 int i, ret = -ENOMEM;
9c1cfda2 6296
2109b99e
AH
6297 alloc_state = __visit_domain_allocation_hell(&d, cpu_map);
6298 if (alloc_state != sa_rootdomain)
6299 goto error;
9c1cfda2 6300
dce840a0 6301 /* Set up domains for cpus specified by the cpu_map. */
abcd083a 6302 for_each_cpu(i, cpu_map) {
eb7a74e6
PZ
6303 struct sched_domain_topology_level *tl;
6304
3bd65a80 6305 sd = NULL;
e3589f6c 6306 for (tl = sched_domain_topology; tl->init; tl++) {
2c402dc3 6307 sd = build_sched_domain(tl, &d, cpu_map, attr, sd, i);
e3589f6c
PZ
6308 if (tl->flags & SDTL_OVERLAP || sched_feat(FORCE_SD_OVERLAP))
6309 sd->flags |= SD_OVERLAP;
d110235d
PZ
6310 if (cpumask_equal(cpu_map, sched_domain_span(sd)))
6311 break;
e3589f6c 6312 }
d274cb30 6313
d069b916
PZ
6314 while (sd->child)
6315 sd = sd->child;
6316
21d42ccf 6317 *per_cpu_ptr(d.sd, i) = sd;
dce840a0
PZ
6318 }
6319
6320 /* Build the groups for the domains */
6321 for_each_cpu(i, cpu_map) {
6322 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
6323 sd->span_weight = cpumask_weight(sched_domain_span(sd));
e3589f6c
PZ
6324 if (sd->flags & SD_OVERLAP) {
6325 if (build_overlap_sched_groups(sd, i))
6326 goto error;
6327 } else {
6328 if (build_sched_groups(sd, i))
6329 goto error;
6330 }
1cf51902 6331 }
a06dadbe 6332 }
9c1cfda2 6333
1da177e4 6334 /* Calculate CPU power for physical packages and nodes */
a9c9a9b6
PZ
6335 for (i = nr_cpumask_bits-1; i >= 0; i--) {
6336 if (!cpumask_test_cpu(i, cpu_map))
6337 continue;
9c1cfda2 6338
dce840a0
PZ
6339 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
6340 claim_allocations(i, sd);
cd4ea6ae 6341 init_sched_groups_power(i, sd);
dce840a0 6342 }
f712c0c7 6343 }
9c1cfda2 6344
1da177e4 6345 /* Attach the domains */
dce840a0 6346 rcu_read_lock();
abcd083a 6347 for_each_cpu(i, cpu_map) {
21d42ccf 6348 sd = *per_cpu_ptr(d.sd, i);
49a02c51 6349 cpu_attach_domain(sd, d.rd, i);
1da177e4 6350 }
dce840a0 6351 rcu_read_unlock();
51888ca2 6352
822ff793 6353 ret = 0;
51888ca2 6354error:
2109b99e 6355 __free_domain_allocs(&d, alloc_state, cpu_map);
822ff793 6356 return ret;
1da177e4 6357}
029190c5 6358
acc3f5d7 6359static cpumask_var_t *doms_cur; /* current sched domains */
029190c5 6360static int ndoms_cur; /* number of sched domains in 'doms_cur' */
4285f594
IM
6361static struct sched_domain_attr *dattr_cur;
6362 /* attribues of custom domains in 'doms_cur' */
029190c5
PJ
6363
6364/*
6365 * Special case: If a kmalloc of a doms_cur partition (array of
4212823f
RR
6366 * cpumask) fails, then fallback to a single sched domain,
6367 * as determined by the single cpumask fallback_doms.
029190c5 6368 */
4212823f 6369static cpumask_var_t fallback_doms;
029190c5 6370
ee79d1bd
HC
6371/*
6372 * arch_update_cpu_topology lets virtualized architectures update the
6373 * cpu core maps. It is supposed to return 1 if the topology changed
6374 * or 0 if it stayed the same.
6375 */
6376int __attribute__((weak)) arch_update_cpu_topology(void)
22e52b07 6377{
ee79d1bd 6378 return 0;
22e52b07
HC
6379}
6380
acc3f5d7
RR
6381cpumask_var_t *alloc_sched_domains(unsigned int ndoms)
6382{
6383 int i;
6384 cpumask_var_t *doms;
6385
6386 doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL);
6387 if (!doms)
6388 return NULL;
6389 for (i = 0; i < ndoms; i++) {
6390 if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) {
6391 free_sched_domains(doms, i);
6392 return NULL;
6393 }
6394 }
6395 return doms;
6396}
6397
6398void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms)
6399{
6400 unsigned int i;
6401 for (i = 0; i < ndoms; i++)
6402 free_cpumask_var(doms[i]);
6403 kfree(doms);
6404}
6405
1a20ff27 6406/*
41a2d6cf 6407 * Set up scheduler domains and groups. Callers must hold the hotplug lock.
029190c5
PJ
6408 * For now this just excludes isolated cpus, but could be used to
6409 * exclude other special cases in the future.
1a20ff27 6410 */
c4a8849a 6411static int init_sched_domains(const struct cpumask *cpu_map)
1a20ff27 6412{
7378547f
MM
6413 int err;
6414
22e52b07 6415 arch_update_cpu_topology();
029190c5 6416 ndoms_cur = 1;
acc3f5d7 6417 doms_cur = alloc_sched_domains(ndoms_cur);
029190c5 6418 if (!doms_cur)
acc3f5d7
RR
6419 doms_cur = &fallback_doms;
6420 cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map);
1d3504fc 6421 dattr_cur = NULL;
dce840a0 6422 err = build_sched_domains(doms_cur[0], NULL);
6382bc90 6423 register_sched_domain_sysctl();
7378547f
MM
6424
6425 return err;
1a20ff27
DG
6426}
6427
1a20ff27
DG
6428/*
6429 * Detach sched domains from a group of cpus specified in cpu_map
6430 * These cpus will now be attached to the NULL domain
6431 */
96f874e2 6432static void detach_destroy_domains(const struct cpumask *cpu_map)
1a20ff27
DG
6433{
6434 int i;
6435
dce840a0 6436 rcu_read_lock();
abcd083a 6437 for_each_cpu(i, cpu_map)
57d885fe 6438 cpu_attach_domain(NULL, &def_root_domain, i);
dce840a0 6439 rcu_read_unlock();
1a20ff27
DG
6440}
6441
1d3504fc
HS
6442/* handle null as "default" */
6443static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
6444 struct sched_domain_attr *new, int idx_new)
6445{
6446 struct sched_domain_attr tmp;
6447
6448 /* fast path */
6449 if (!new && !cur)
6450 return 1;
6451
6452 tmp = SD_ATTR_INIT;
6453 return !memcmp(cur ? (cur + idx_cur) : &tmp,
6454 new ? (new + idx_new) : &tmp,
6455 sizeof(struct sched_domain_attr));
6456}
6457
029190c5
PJ
6458/*
6459 * Partition sched domains as specified by the 'ndoms_new'
41a2d6cf 6460 * cpumasks in the array doms_new[] of cpumasks. This compares
029190c5
PJ
6461 * doms_new[] to the current sched domain partitioning, doms_cur[].
6462 * It destroys each deleted domain and builds each new domain.
6463 *
acc3f5d7 6464 * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'.
41a2d6cf
IM
6465 * The masks don't intersect (don't overlap.) We should setup one
6466 * sched domain for each mask. CPUs not in any of the cpumasks will
6467 * not be load balanced. If the same cpumask appears both in the
029190c5
PJ
6468 * current 'doms_cur' domains and in the new 'doms_new', we can leave
6469 * it as it is.
6470 *
acc3f5d7
RR
6471 * The passed in 'doms_new' should be allocated using
6472 * alloc_sched_domains. This routine takes ownership of it and will
6473 * free_sched_domains it when done with it. If the caller failed the
6474 * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1,
6475 * and partition_sched_domains() will fallback to the single partition
6476 * 'fallback_doms', it also forces the domains to be rebuilt.
029190c5 6477 *
96f874e2 6478 * If doms_new == NULL it will be replaced with cpu_online_mask.
700018e0
LZ
6479 * ndoms_new == 0 is a special case for destroying existing domains,
6480 * and it will not create the default domain.
dfb512ec 6481 *
029190c5
PJ
6482 * Call with hotplug lock held
6483 */
acc3f5d7 6484void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1d3504fc 6485 struct sched_domain_attr *dattr_new)
029190c5 6486{
dfb512ec 6487 int i, j, n;
d65bd5ec 6488 int new_topology;
029190c5 6489
712555ee 6490 mutex_lock(&sched_domains_mutex);
a1835615 6491
7378547f
MM
6492 /* always unregister in case we don't destroy any domains */
6493 unregister_sched_domain_sysctl();
6494
d65bd5ec
HC
6495 /* Let architecture update cpu core mappings. */
6496 new_topology = arch_update_cpu_topology();
6497
dfb512ec 6498 n = doms_new ? ndoms_new : 0;
029190c5
PJ
6499
6500 /* Destroy deleted domains */
6501 for (i = 0; i < ndoms_cur; i++) {
d65bd5ec 6502 for (j = 0; j < n && !new_topology; j++) {
acc3f5d7 6503 if (cpumask_equal(doms_cur[i], doms_new[j])
1d3504fc 6504 && dattrs_equal(dattr_cur, i, dattr_new, j))
029190c5
PJ
6505 goto match1;
6506 }
6507 /* no match - a current sched domain not in new doms_new[] */
acc3f5d7 6508 detach_destroy_domains(doms_cur[i]);
029190c5
PJ
6509match1:
6510 ;
6511 }
6512
e761b772
MK
6513 if (doms_new == NULL) {
6514 ndoms_cur = 0;
acc3f5d7 6515 doms_new = &fallback_doms;
6ad4c188 6516 cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map);
faa2f98f 6517 WARN_ON_ONCE(dattr_new);
e761b772
MK
6518 }
6519
029190c5
PJ
6520 /* Build new domains */
6521 for (i = 0; i < ndoms_new; i++) {
d65bd5ec 6522 for (j = 0; j < ndoms_cur && !new_topology; j++) {
acc3f5d7 6523 if (cpumask_equal(doms_new[i], doms_cur[j])
1d3504fc 6524 && dattrs_equal(dattr_new, i, dattr_cur, j))
029190c5
PJ
6525 goto match2;
6526 }
6527 /* no match - add a new doms_new */
dce840a0 6528 build_sched_domains(doms_new[i], dattr_new ? dattr_new + i : NULL);
029190c5
PJ
6529match2:
6530 ;
6531 }
6532
6533 /* Remember the new sched domains */
acc3f5d7
RR
6534 if (doms_cur != &fallback_doms)
6535 free_sched_domains(doms_cur, ndoms_cur);
1d3504fc 6536 kfree(dattr_cur); /* kfree(NULL) is safe */
029190c5 6537 doms_cur = doms_new;
1d3504fc 6538 dattr_cur = dattr_new;
029190c5 6539 ndoms_cur = ndoms_new;
7378547f
MM
6540
6541 register_sched_domain_sysctl();
a1835615 6542
712555ee 6543 mutex_unlock(&sched_domains_mutex);
029190c5
PJ
6544}
6545
5c45bf27 6546#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
c4a8849a 6547static void reinit_sched_domains(void)
5c45bf27 6548{
95402b38 6549 get_online_cpus();
dfb512ec
MK
6550
6551 /* Destroy domains first to force the rebuild */
6552 partition_sched_domains(0, NULL, NULL);
6553
e761b772 6554 rebuild_sched_domains();
95402b38 6555 put_online_cpus();
5c45bf27
SS
6556}
6557
6558static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt)
6559{
afb8a9b7 6560 unsigned int level = 0;
5c45bf27 6561
afb8a9b7
GS
6562 if (sscanf(buf, "%u", &level) != 1)
6563 return -EINVAL;
6564
6565 /*
6566 * level is always be positive so don't check for
6567 * level < POWERSAVINGS_BALANCE_NONE which is 0
6568 * What happens on 0 or 1 byte write,
6569 * need to check for count as well?
6570 */
6571
6572 if (level >= MAX_POWERSAVINGS_BALANCE_LEVELS)
5c45bf27
SS
6573 return -EINVAL;
6574
6575 if (smt)
afb8a9b7 6576 sched_smt_power_savings = level;
5c45bf27 6577 else
afb8a9b7 6578 sched_mc_power_savings = level;
5c45bf27 6579
c4a8849a 6580 reinit_sched_domains();
5c45bf27 6581
c70f22d2 6582 return count;
5c45bf27
SS
6583}
6584
5c45bf27 6585#ifdef CONFIG_SCHED_MC
f718cd4a 6586static ssize_t sched_mc_power_savings_show(struct sysdev_class *class,
c9be0a36 6587 struct sysdev_class_attribute *attr,
f718cd4a 6588 char *page)
5c45bf27
SS
6589{
6590 return sprintf(page, "%u\n", sched_mc_power_savings);
6591}
f718cd4a 6592static ssize_t sched_mc_power_savings_store(struct sysdev_class *class,
c9be0a36 6593 struct sysdev_class_attribute *attr,
48f24c4d 6594 const char *buf, size_t count)
5c45bf27
SS
6595{
6596 return sched_power_savings_store(buf, count, 0);
6597}
f718cd4a
AK
6598static SYSDEV_CLASS_ATTR(sched_mc_power_savings, 0644,
6599 sched_mc_power_savings_show,
6600 sched_mc_power_savings_store);
5c45bf27
SS
6601#endif
6602
6603#ifdef CONFIG_SCHED_SMT
f718cd4a 6604static ssize_t sched_smt_power_savings_show(struct sysdev_class *dev,
c9be0a36 6605 struct sysdev_class_attribute *attr,
f718cd4a 6606 char *page)
5c45bf27
SS
6607{
6608 return sprintf(page, "%u\n", sched_smt_power_savings);
6609}
f718cd4a 6610static ssize_t sched_smt_power_savings_store(struct sysdev_class *dev,
c9be0a36 6611 struct sysdev_class_attribute *attr,
48f24c4d 6612 const char *buf, size_t count)
5c45bf27
SS
6613{
6614 return sched_power_savings_store(buf, count, 1);
6615}
f718cd4a
AK
6616static SYSDEV_CLASS_ATTR(sched_smt_power_savings, 0644,
6617 sched_smt_power_savings_show,
6707de00
AB
6618 sched_smt_power_savings_store);
6619#endif
6620
39aac648 6621int __init sched_create_sysfs_power_savings_entries(struct sysdev_class *cls)
6707de00
AB
6622{
6623 int err = 0;
6624
6625#ifdef CONFIG_SCHED_SMT
6626 if (smt_capable())
6627 err = sysfs_create_file(&cls->kset.kobj,
6628 &attr_sched_smt_power_savings.attr);
6629#endif
6630#ifdef CONFIG_SCHED_MC
6631 if (!err && mc_capable())
6632 err = sysfs_create_file(&cls->kset.kobj,
6633 &attr_sched_mc_power_savings.attr);
6634#endif
6635 return err;
6636}
6d6bc0ad 6637#endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
5c45bf27 6638
1da177e4 6639/*
3a101d05
TH
6640 * Update cpusets according to cpu_active mask. If cpusets are
6641 * disabled, cpuset_update_active_cpus() becomes a simple wrapper
6642 * around partition_sched_domains().
1da177e4 6643 */
0b2e918a
TH
6644static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action,
6645 void *hcpu)
e761b772 6646{
3a101d05 6647 switch (action & ~CPU_TASKS_FROZEN) {
e761b772 6648 case CPU_ONLINE:
6ad4c188 6649 case CPU_DOWN_FAILED:
3a101d05 6650 cpuset_update_active_cpus();
e761b772 6651 return NOTIFY_OK;
3a101d05
TH
6652 default:
6653 return NOTIFY_DONE;
6654 }
6655}
e761b772 6656
0b2e918a
TH
6657static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action,
6658 void *hcpu)
3a101d05
TH
6659{
6660 switch (action & ~CPU_TASKS_FROZEN) {
6661 case CPU_DOWN_PREPARE:
6662 cpuset_update_active_cpus();
6663 return NOTIFY_OK;
e761b772
MK
6664 default:
6665 return NOTIFY_DONE;
6666 }
6667}
e761b772 6668
1da177e4
LT
6669void __init sched_init_smp(void)
6670{
dcc30a35
RR
6671 cpumask_var_t non_isolated_cpus;
6672
6673 alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL);
cb5fd13f 6674 alloc_cpumask_var(&fallback_doms, GFP_KERNEL);
5c1e1767 6675
95402b38 6676 get_online_cpus();
712555ee 6677 mutex_lock(&sched_domains_mutex);
c4a8849a 6678 init_sched_domains(cpu_active_mask);
dcc30a35
RR
6679 cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
6680 if (cpumask_empty(non_isolated_cpus))
6681 cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
712555ee 6682 mutex_unlock(&sched_domains_mutex);
95402b38 6683 put_online_cpus();
e761b772 6684
3a101d05
TH
6685 hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE);
6686 hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE);
e761b772
MK
6687
6688 /* RT runtime code needs to handle some hotplug events */
6689 hotcpu_notifier(update_runtime, 0);
6690
b328ca18 6691 init_hrtick();
5c1e1767
NP
6692
6693 /* Move init over to a non-isolated CPU */
dcc30a35 6694 if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0)
5c1e1767 6695 BUG();
19978ca6 6696 sched_init_granularity();
dcc30a35 6697 free_cpumask_var(non_isolated_cpus);
4212823f 6698
0e3900e6 6699 init_sched_rt_class();
1da177e4
LT
6700}
6701#else
6702void __init sched_init_smp(void)
6703{
19978ca6 6704 sched_init_granularity();
1da177e4
LT
6705}
6706#endif /* CONFIG_SMP */
6707
cd1bb94b
AB
6708const_debug unsigned int sysctl_timer_migration = 1;
6709
1da177e4
LT
6710int in_sched_functions(unsigned long addr)
6711{
1da177e4
LT
6712 return in_lock_functions(addr) ||
6713 (addr >= (unsigned long)__sched_text_start
6714 && addr < (unsigned long)__sched_text_end);
6715}
6716
029632fb
PZ
6717#ifdef CONFIG_CGROUP_SCHED
6718struct task_group root_task_group;
052f1dc7 6719#endif
6f505b16 6720
029632fb 6721DECLARE_PER_CPU(cpumask_var_t, load_balance_tmpmask);
6f505b16 6722
1da177e4
LT
6723void __init sched_init(void)
6724{
dd41f596 6725 int i, j;
434d53b0
MT
6726 unsigned long alloc_size = 0, ptr;
6727
6728#ifdef CONFIG_FAIR_GROUP_SCHED
6729 alloc_size += 2 * nr_cpu_ids * sizeof(void **);
6730#endif
6731#ifdef CONFIG_RT_GROUP_SCHED
6732 alloc_size += 2 * nr_cpu_ids * sizeof(void **);
eff766a6 6733#endif
df7c8e84 6734#ifdef CONFIG_CPUMASK_OFFSTACK
8c083f08 6735 alloc_size += num_possible_cpus() * cpumask_size();
434d53b0 6736#endif
434d53b0 6737 if (alloc_size) {
36b7b6d4 6738 ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
434d53b0
MT
6739
6740#ifdef CONFIG_FAIR_GROUP_SCHED
07e06b01 6741 root_task_group.se = (struct sched_entity **)ptr;
434d53b0
MT
6742 ptr += nr_cpu_ids * sizeof(void **);
6743
07e06b01 6744 root_task_group.cfs_rq = (struct cfs_rq **)ptr;
434d53b0 6745 ptr += nr_cpu_ids * sizeof(void **);
eff766a6 6746
6d6bc0ad 6747#endif /* CONFIG_FAIR_GROUP_SCHED */
434d53b0 6748#ifdef CONFIG_RT_GROUP_SCHED
07e06b01 6749 root_task_group.rt_se = (struct sched_rt_entity **)ptr;
434d53b0
MT
6750 ptr += nr_cpu_ids * sizeof(void **);
6751
07e06b01 6752 root_task_group.rt_rq = (struct rt_rq **)ptr;
eff766a6
PZ
6753 ptr += nr_cpu_ids * sizeof(void **);
6754
6d6bc0ad 6755#endif /* CONFIG_RT_GROUP_SCHED */
df7c8e84
RR
6756#ifdef CONFIG_CPUMASK_OFFSTACK
6757 for_each_possible_cpu(i) {
6758 per_cpu(load_balance_tmpmask, i) = (void *)ptr;
6759 ptr += cpumask_size();
6760 }
6761#endif /* CONFIG_CPUMASK_OFFSTACK */
434d53b0 6762 }
dd41f596 6763
57d885fe
GH
6764#ifdef CONFIG_SMP
6765 init_defrootdomain();
6766#endif
6767
d0b27fa7
PZ
6768 init_rt_bandwidth(&def_rt_bandwidth,
6769 global_rt_period(), global_rt_runtime());
6770
6771#ifdef CONFIG_RT_GROUP_SCHED
07e06b01 6772 init_rt_bandwidth(&root_task_group.rt_bandwidth,
d0b27fa7 6773 global_rt_period(), global_rt_runtime());
6d6bc0ad 6774#endif /* CONFIG_RT_GROUP_SCHED */
d0b27fa7 6775
7c941438 6776#ifdef CONFIG_CGROUP_SCHED
07e06b01
YZ
6777 list_add(&root_task_group.list, &task_groups);
6778 INIT_LIST_HEAD(&root_task_group.children);
f4d6f6c2 6779 INIT_LIST_HEAD(&root_task_group.siblings);
5091faa4 6780 autogroup_init(&init_task);
7c941438 6781#endif /* CONFIG_CGROUP_SCHED */
6f505b16 6782
0a945022 6783 for_each_possible_cpu(i) {
70b97a7f 6784 struct rq *rq;
1da177e4
LT
6785
6786 rq = cpu_rq(i);
05fa785c 6787 raw_spin_lock_init(&rq->lock);
7897986b 6788 rq->nr_running = 0;
dce48a84
TG
6789 rq->calc_load_active = 0;
6790 rq->calc_load_update = jiffies + LOAD_FREQ;
acb5a9ba 6791 init_cfs_rq(&rq->cfs);
6f505b16 6792 init_rt_rq(&rq->rt, rq);
dd41f596 6793#ifdef CONFIG_FAIR_GROUP_SCHED
029632fb 6794 root_task_group.shares = ROOT_TASK_GROUP_LOAD;
6f505b16 6795 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
354d60c2 6796 /*
07e06b01 6797 * How much cpu bandwidth does root_task_group get?
354d60c2
DG
6798 *
6799 * In case of task-groups formed thr' the cgroup filesystem, it
6800 * gets 100% of the cpu resources in the system. This overall
6801 * system cpu resource is divided among the tasks of
07e06b01 6802 * root_task_group and its child task-groups in a fair manner,
354d60c2
DG
6803 * based on each entity's (task or task-group's) weight
6804 * (se->load.weight).
6805 *
07e06b01 6806 * In other words, if root_task_group has 10 tasks of weight
354d60c2
DG
6807 * 1024) and two child groups A0 and A1 (of weight 1024 each),
6808 * then A0's share of the cpu resource is:
6809 *
0d905bca 6810 * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
354d60c2 6811 *
07e06b01
YZ
6812 * We achieve this by letting root_task_group's tasks sit
6813 * directly in rq->cfs (i.e root_task_group->se[] = NULL).
354d60c2 6814 */
ab84d31e 6815 init_cfs_bandwidth(&root_task_group.cfs_bandwidth);
07e06b01 6816 init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL);
354d60c2
DG
6817#endif /* CONFIG_FAIR_GROUP_SCHED */
6818
6819 rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
052f1dc7 6820#ifdef CONFIG_RT_GROUP_SCHED
6f505b16 6821 INIT_LIST_HEAD(&rq->leaf_rt_rq_list);
07e06b01 6822 init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
dd41f596 6823#endif
1da177e4 6824
dd41f596
IM
6825 for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
6826 rq->cpu_load[j] = 0;
fdf3e95d
VP
6827
6828 rq->last_load_update_tick = jiffies;
6829
1da177e4 6830#ifdef CONFIG_SMP
41c7ce9a 6831 rq->sd = NULL;
57d885fe 6832 rq->rd = NULL;
1399fa78 6833 rq->cpu_power = SCHED_POWER_SCALE;
3f029d3c 6834 rq->post_schedule = 0;
1da177e4 6835 rq->active_balance = 0;
dd41f596 6836 rq->next_balance = jiffies;
1da177e4 6837 rq->push_cpu = 0;
0a2966b4 6838 rq->cpu = i;
1f11eb6a 6839 rq->online = 0;
eae0c9df
MG
6840 rq->idle_stamp = 0;
6841 rq->avg_idle = 2*sysctl_sched_migration_cost;
dc938520 6842 rq_attach_root(rq, &def_root_domain);
83cd4fe2 6843#ifdef CONFIG_NO_HZ
1c792db7 6844 rq->nohz_flags = 0;
83cd4fe2 6845#endif
1da177e4 6846#endif
8f4d37ec 6847 init_rq_hrtick(rq);
1da177e4 6848 atomic_set(&rq->nr_iowait, 0);
1da177e4
LT
6849 }
6850
2dd73a4f 6851 set_load_weight(&init_task);
b50f60ce 6852
e107be36
AK
6853#ifdef CONFIG_PREEMPT_NOTIFIERS
6854 INIT_HLIST_HEAD(&init_task.preempt_notifiers);
6855#endif
6856
b50f60ce 6857#ifdef CONFIG_RT_MUTEXES
732375c6 6858 plist_head_init(&init_task.pi_waiters);
b50f60ce
HC
6859#endif
6860
1da177e4
LT
6861 /*
6862 * The boot idle thread does lazy MMU switching as well:
6863 */
6864 atomic_inc(&init_mm.mm_count);
6865 enter_lazy_tlb(&init_mm, current);
6866
6867 /*
6868 * Make us the idle thread. Technically, schedule() should not be
6869 * called from this thread, however somewhere below it might be,
6870 * but because we are the idle thread, we just pick up running again
6871 * when this runqueue becomes "idle".
6872 */
6873 init_idle(current, smp_processor_id());
dce48a84
TG
6874
6875 calc_load_update = jiffies + LOAD_FREQ;
6876
dd41f596
IM
6877 /*
6878 * During early bootup we pretend to be a normal task:
6879 */
6880 current->sched_class = &fair_sched_class;
6892b75e 6881
bf4d83f6 6882#ifdef CONFIG_SMP
4cb98839 6883 zalloc_cpumask_var(&sched_domains_tmpmask, GFP_NOWAIT);
bdddd296
RR
6884 /* May be allocated at isolcpus cmdline parse time */
6885 if (cpu_isolated_map == NULL)
6886 zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
029632fb
PZ
6887#endif
6888 init_sched_fair_class();
6a7b3dc3 6889
6892b75e 6890 scheduler_running = 1;
1da177e4
LT
6891}
6892
d902db1e 6893#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
e4aafea2
FW
6894static inline int preempt_count_equals(int preempt_offset)
6895{
234da7bc 6896 int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth();
e4aafea2 6897
4ba8216c 6898 return (nested == preempt_offset);
e4aafea2
FW
6899}
6900
d894837f 6901void __might_sleep(const char *file, int line, int preempt_offset)
1da177e4 6902{
1da177e4
LT
6903 static unsigned long prev_jiffy; /* ratelimiting */
6904
b3fbab05 6905 rcu_sleep_check(); /* WARN_ON_ONCE() by default, no rate limit reqd. */
e4aafea2
FW
6906 if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) ||
6907 system_state != SYSTEM_RUNNING || oops_in_progress)
aef745fc
IM
6908 return;
6909 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
6910 return;
6911 prev_jiffy = jiffies;
6912
3df0fc5b
PZ
6913 printk(KERN_ERR
6914 "BUG: sleeping function called from invalid context at %s:%d\n",
6915 file, line);
6916 printk(KERN_ERR
6917 "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
6918 in_atomic(), irqs_disabled(),
6919 current->pid, current->comm);
aef745fc
IM
6920
6921 debug_show_held_locks(current);
6922 if (irqs_disabled())
6923 print_irqtrace_events(current);
6924 dump_stack();
1da177e4
LT
6925}
6926EXPORT_SYMBOL(__might_sleep);
6927#endif
6928
6929#ifdef CONFIG_MAGIC_SYSRQ
3a5e4dc1
AK
6930static void normalize_task(struct rq *rq, struct task_struct *p)
6931{
da7a735e
PZ
6932 const struct sched_class *prev_class = p->sched_class;
6933 int old_prio = p->prio;
3a5e4dc1 6934 int on_rq;
3e51f33f 6935
fd2f4419 6936 on_rq = p->on_rq;
3a5e4dc1
AK
6937 if (on_rq)
6938 deactivate_task(rq, p, 0);
6939 __setscheduler(rq, p, SCHED_NORMAL, 0);
6940 if (on_rq) {
6941 activate_task(rq, p, 0);
6942 resched_task(rq->curr);
6943 }
da7a735e
PZ
6944
6945 check_class_changed(rq, p, prev_class, old_prio);
3a5e4dc1
AK
6946}
6947
1da177e4
LT
6948void normalize_rt_tasks(void)
6949{
a0f98a1c 6950 struct task_struct *g, *p;
1da177e4 6951 unsigned long flags;
70b97a7f 6952 struct rq *rq;
1da177e4 6953
4cf5d77a 6954 read_lock_irqsave(&tasklist_lock, flags);
a0f98a1c 6955 do_each_thread(g, p) {
178be793
IM
6956 /*
6957 * Only normalize user tasks:
6958 */
6959 if (!p->mm)
6960 continue;
6961
6cfb0d5d 6962 p->se.exec_start = 0;
6cfb0d5d 6963#ifdef CONFIG_SCHEDSTATS
41acab88
LDM
6964 p->se.statistics.wait_start = 0;
6965 p->se.statistics.sleep_start = 0;
6966 p->se.statistics.block_start = 0;
6cfb0d5d 6967#endif
dd41f596
IM
6968
6969 if (!rt_task(p)) {
6970 /*
6971 * Renice negative nice level userspace
6972 * tasks back to 0:
6973 */
6974 if (TASK_NICE(p) < 0 && p->mm)
6975 set_user_nice(p, 0);
1da177e4 6976 continue;
dd41f596 6977 }
1da177e4 6978
1d615482 6979 raw_spin_lock(&p->pi_lock);
b29739f9 6980 rq = __task_rq_lock(p);
1da177e4 6981
178be793 6982 normalize_task(rq, p);
3a5e4dc1 6983
b29739f9 6984 __task_rq_unlock(rq);
1d615482 6985 raw_spin_unlock(&p->pi_lock);
a0f98a1c
IM
6986 } while_each_thread(g, p);
6987
4cf5d77a 6988 read_unlock_irqrestore(&tasklist_lock, flags);
1da177e4
LT
6989}
6990
6991#endif /* CONFIG_MAGIC_SYSRQ */
1df5c10a 6992
67fc4e0c 6993#if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB)
1df5c10a 6994/*
67fc4e0c 6995 * These functions are only useful for the IA64 MCA handling, or kdb.
1df5c10a
LT
6996 *
6997 * They can only be called when the whole system has been
6998 * stopped - every CPU needs to be quiescent, and no scheduling
6999 * activity can take place. Using them for anything else would
7000 * be a serious bug, and as a result, they aren't even visible
7001 * under any other configuration.
7002 */
7003
7004/**
7005 * curr_task - return the current task for a given cpu.
7006 * @cpu: the processor in question.
7007 *
7008 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
7009 */
36c8b586 7010struct task_struct *curr_task(int cpu)
1df5c10a
LT
7011{
7012 return cpu_curr(cpu);
7013}
7014
67fc4e0c
JW
7015#endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */
7016
7017#ifdef CONFIG_IA64
1df5c10a
LT
7018/**
7019 * set_curr_task - set the current task for a given cpu.
7020 * @cpu: the processor in question.
7021 * @p: the task pointer to set.
7022 *
7023 * Description: This function must only be used when non-maskable interrupts
41a2d6cf
IM
7024 * are serviced on a separate stack. It allows the architecture to switch the
7025 * notion of the current task on a cpu in a non-blocking manner. This function
1df5c10a
LT
7026 * must be called with all CPU's synchronized, and interrupts disabled, the
7027 * and caller must save the original value of the current task (see
7028 * curr_task() above) and restore that value before reenabling interrupts and
7029 * re-starting the system.
7030 *
7031 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
7032 */
36c8b586 7033void set_curr_task(int cpu, struct task_struct *p)
1df5c10a
LT
7034{
7035 cpu_curr(cpu) = p;
7036}
7037
7038#endif
29f59db3 7039
052f1dc7 7040#ifdef CONFIG_RT_GROUP_SCHED
6d6bc0ad 7041#else /* !CONFIG_RT_GROUP_SCHED */
6d6bc0ad 7042#endif /* CONFIG_RT_GROUP_SCHED */
bccbe08a 7043
7c941438 7044#ifdef CONFIG_CGROUP_SCHED
029632fb
PZ
7045/* task_group_lock serializes the addition/removal of task groups */
7046static DEFINE_SPINLOCK(task_group_lock);
7047
bccbe08a
PZ
7048static void free_sched_group(struct task_group *tg)
7049{
7050 free_fair_sched_group(tg);
7051 free_rt_sched_group(tg);
e9aa1dd1 7052 autogroup_free(tg);
bccbe08a
PZ
7053 kfree(tg);
7054}
7055
7056/* allocate runqueue etc for a new task group */
ec7dc8ac 7057struct task_group *sched_create_group(struct task_group *parent)
bccbe08a
PZ
7058{
7059 struct task_group *tg;
7060 unsigned long flags;
bccbe08a
PZ
7061
7062 tg = kzalloc(sizeof(*tg), GFP_KERNEL);
7063 if (!tg)
7064 return ERR_PTR(-ENOMEM);
7065
ec7dc8ac 7066 if (!alloc_fair_sched_group(tg, parent))
bccbe08a
PZ
7067 goto err;
7068
ec7dc8ac 7069 if (!alloc_rt_sched_group(tg, parent))
bccbe08a
PZ
7070 goto err;
7071
8ed36996 7072 spin_lock_irqsave(&task_group_lock, flags);
6f505b16 7073 list_add_rcu(&tg->list, &task_groups);
f473aa5e
PZ
7074
7075 WARN_ON(!parent); /* root should already exist */
7076
7077 tg->parent = parent;
f473aa5e 7078 INIT_LIST_HEAD(&tg->children);
09f2724a 7079 list_add_rcu(&tg->siblings, &parent->children);
8ed36996 7080 spin_unlock_irqrestore(&task_group_lock, flags);
29f59db3 7081
9b5b7751 7082 return tg;
29f59db3
SV
7083
7084err:
6f505b16 7085 free_sched_group(tg);
29f59db3
SV
7086 return ERR_PTR(-ENOMEM);
7087}
7088
9b5b7751 7089/* rcu callback to free various structures associated with a task group */
6f505b16 7090static void free_sched_group_rcu(struct rcu_head *rhp)
29f59db3 7091{
29f59db3 7092 /* now it should be safe to free those cfs_rqs */
6f505b16 7093 free_sched_group(container_of(rhp, struct task_group, rcu));
29f59db3
SV
7094}
7095
9b5b7751 7096/* Destroy runqueue etc associated with a task group */
4cf86d77 7097void sched_destroy_group(struct task_group *tg)
29f59db3 7098{
8ed36996 7099 unsigned long flags;
9b5b7751 7100 int i;
29f59db3 7101
3d4b47b4
PZ
7102 /* end participation in shares distribution */
7103 for_each_possible_cpu(i)
bccbe08a 7104 unregister_fair_sched_group(tg, i);
3d4b47b4
PZ
7105
7106 spin_lock_irqsave(&task_group_lock, flags);
6f505b16 7107 list_del_rcu(&tg->list);
f473aa5e 7108 list_del_rcu(&tg->siblings);
8ed36996 7109 spin_unlock_irqrestore(&task_group_lock, flags);
9b5b7751 7110
9b5b7751 7111 /* wait for possible concurrent references to cfs_rqs complete */
6f505b16 7112 call_rcu(&tg->rcu, free_sched_group_rcu);
29f59db3
SV
7113}
7114
9b5b7751 7115/* change task's runqueue when it moves between groups.
3a252015
IM
7116 * The caller of this function should have put the task in its new group
7117 * by now. This function just updates tsk->se.cfs_rq and tsk->se.parent to
7118 * reflect its new group.
9b5b7751
SV
7119 */
7120void sched_move_task(struct task_struct *tsk)
29f59db3
SV
7121{
7122 int on_rq, running;
7123 unsigned long flags;
7124 struct rq *rq;
7125
7126 rq = task_rq_lock(tsk, &flags);
7127
051a1d1a 7128 running = task_current(rq, tsk);
fd2f4419 7129 on_rq = tsk->on_rq;
29f59db3 7130
0e1f3483 7131 if (on_rq)
29f59db3 7132 dequeue_task(rq, tsk, 0);
0e1f3483
HS
7133 if (unlikely(running))
7134 tsk->sched_class->put_prev_task(rq, tsk);
29f59db3 7135
810b3817 7136#ifdef CONFIG_FAIR_GROUP_SCHED
b2b5ce02
PZ
7137 if (tsk->sched_class->task_move_group)
7138 tsk->sched_class->task_move_group(tsk, on_rq);
7139 else
810b3817 7140#endif
b2b5ce02 7141 set_task_rq(tsk, task_cpu(tsk));
810b3817 7142
0e1f3483
HS
7143 if (unlikely(running))
7144 tsk->sched_class->set_curr_task(rq);
7145 if (on_rq)
371fd7e7 7146 enqueue_task(rq, tsk, 0);
29f59db3 7147
0122ec5b 7148 task_rq_unlock(rq, tsk, &flags);
29f59db3 7149}
7c941438 7150#endif /* CONFIG_CGROUP_SCHED */
29f59db3 7151
052f1dc7 7152#ifdef CONFIG_FAIR_GROUP_SCHED
052f1dc7 7153#endif
5cb350ba 7154
a790de99 7155#if defined(CONFIG_RT_GROUP_SCHED) || defined(CONFIG_CFS_BANDWIDTH)
9f0c1e56
PZ
7156static unsigned long to_ratio(u64 period, u64 runtime)
7157{
7158 if (runtime == RUNTIME_INF)
9a7e0b18 7159 return 1ULL << 20;
9f0c1e56 7160
9a7e0b18 7161 return div64_u64(runtime << 20, period);
9f0c1e56 7162}
a790de99
PT
7163#endif
7164
7165#ifdef CONFIG_RT_GROUP_SCHED
7166/*
7167 * Ensure that the real time constraints are schedulable.
7168 */
7169static DEFINE_MUTEX(rt_constraints_mutex);
9f0c1e56 7170
9a7e0b18
PZ
7171/* Must be called with tasklist_lock held */
7172static inline int tg_has_rt_tasks(struct task_group *tg)
b40b2e8e 7173{
9a7e0b18 7174 struct task_struct *g, *p;
b40b2e8e 7175
9a7e0b18 7176 do_each_thread(g, p) {
029632fb 7177 if (rt_task(p) && task_rq(p)->rt.tg == tg)
9a7e0b18
PZ
7178 return 1;
7179 } while_each_thread(g, p);
b40b2e8e 7180
9a7e0b18
PZ
7181 return 0;
7182}
b40b2e8e 7183
9a7e0b18
PZ
7184struct rt_schedulable_data {
7185 struct task_group *tg;
7186 u64 rt_period;
7187 u64 rt_runtime;
7188};
b40b2e8e 7189
a790de99 7190static int tg_rt_schedulable(struct task_group *tg, void *data)
9a7e0b18
PZ
7191{
7192 struct rt_schedulable_data *d = data;
7193 struct task_group *child;
7194 unsigned long total, sum = 0;
7195 u64 period, runtime;
b40b2e8e 7196
9a7e0b18
PZ
7197 period = ktime_to_ns(tg->rt_bandwidth.rt_period);
7198 runtime = tg->rt_bandwidth.rt_runtime;
b40b2e8e 7199
9a7e0b18
PZ
7200 if (tg == d->tg) {
7201 period = d->rt_period;
7202 runtime = d->rt_runtime;
b40b2e8e 7203 }
b40b2e8e 7204
4653f803
PZ
7205 /*
7206 * Cannot have more runtime than the period.
7207 */
7208 if (runtime > period && runtime != RUNTIME_INF)
7209 return -EINVAL;
6f505b16 7210
4653f803
PZ
7211 /*
7212 * Ensure we don't starve existing RT tasks.
7213 */
9a7e0b18
PZ
7214 if (rt_bandwidth_enabled() && !runtime && tg_has_rt_tasks(tg))
7215 return -EBUSY;
6f505b16 7216
9a7e0b18 7217 total = to_ratio(period, runtime);
6f505b16 7218
4653f803
PZ
7219 /*
7220 * Nobody can have more than the global setting allows.
7221 */
7222 if (total > to_ratio(global_rt_period(), global_rt_runtime()))
7223 return -EINVAL;
6f505b16 7224
4653f803
PZ
7225 /*
7226 * The sum of our children's runtime should not exceed our own.
7227 */
9a7e0b18
PZ
7228 list_for_each_entry_rcu(child, &tg->children, siblings) {
7229 period = ktime_to_ns(child->rt_bandwidth.rt_period);
7230 runtime = child->rt_bandwidth.rt_runtime;
6f505b16 7231
9a7e0b18
PZ
7232 if (child == d->tg) {
7233 period = d->rt_period;
7234 runtime = d->rt_runtime;
7235 }
6f505b16 7236
9a7e0b18 7237 sum += to_ratio(period, runtime);
9f0c1e56 7238 }
6f505b16 7239
9a7e0b18
PZ
7240 if (sum > total)
7241 return -EINVAL;
7242
7243 return 0;
6f505b16
PZ
7244}
7245
9a7e0b18 7246static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
521f1a24 7247{
8277434e
PT
7248 int ret;
7249
9a7e0b18
PZ
7250 struct rt_schedulable_data data = {
7251 .tg = tg,
7252 .rt_period = period,
7253 .rt_runtime = runtime,
7254 };
7255
8277434e
PT
7256 rcu_read_lock();
7257 ret = walk_tg_tree(tg_rt_schedulable, tg_nop, &data);
7258 rcu_read_unlock();
7259
7260 return ret;
521f1a24
DG
7261}
7262
ab84d31e 7263static int tg_set_rt_bandwidth(struct task_group *tg,
d0b27fa7 7264 u64 rt_period, u64 rt_runtime)
6f505b16 7265{
ac086bc2 7266 int i, err = 0;
9f0c1e56 7267
9f0c1e56 7268 mutex_lock(&rt_constraints_mutex);
521f1a24 7269 read_lock(&tasklist_lock);
9a7e0b18
PZ
7270 err = __rt_schedulable(tg, rt_period, rt_runtime);
7271 if (err)
9f0c1e56 7272 goto unlock;
ac086bc2 7273
0986b11b 7274 raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
d0b27fa7
PZ
7275 tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
7276 tg->rt_bandwidth.rt_runtime = rt_runtime;
ac086bc2
PZ
7277
7278 for_each_possible_cpu(i) {
7279 struct rt_rq *rt_rq = tg->rt_rq[i];
7280
0986b11b 7281 raw_spin_lock(&rt_rq->rt_runtime_lock);
ac086bc2 7282 rt_rq->rt_runtime = rt_runtime;
0986b11b 7283 raw_spin_unlock(&rt_rq->rt_runtime_lock);
ac086bc2 7284 }
0986b11b 7285 raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
49246274 7286unlock:
521f1a24 7287 read_unlock(&tasklist_lock);
9f0c1e56
PZ
7288 mutex_unlock(&rt_constraints_mutex);
7289
7290 return err;
6f505b16
PZ
7291}
7292
d0b27fa7
PZ
7293int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
7294{
7295 u64 rt_runtime, rt_period;
7296
7297 rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
7298 rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
7299 if (rt_runtime_us < 0)
7300 rt_runtime = RUNTIME_INF;
7301
ab84d31e 7302 return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
d0b27fa7
PZ
7303}
7304
9f0c1e56
PZ
7305long sched_group_rt_runtime(struct task_group *tg)
7306{
7307 u64 rt_runtime_us;
7308
d0b27fa7 7309 if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF)
9f0c1e56
PZ
7310 return -1;
7311
d0b27fa7 7312 rt_runtime_us = tg->rt_bandwidth.rt_runtime;
9f0c1e56
PZ
7313 do_div(rt_runtime_us, NSEC_PER_USEC);
7314 return rt_runtime_us;
7315}
d0b27fa7
PZ
7316
7317int sched_group_set_rt_period(struct task_group *tg, long rt_period_us)
7318{
7319 u64 rt_runtime, rt_period;
7320
7321 rt_period = (u64)rt_period_us * NSEC_PER_USEC;
7322 rt_runtime = tg->rt_bandwidth.rt_runtime;
7323
619b0488
R
7324 if (rt_period == 0)
7325 return -EINVAL;
7326
ab84d31e 7327 return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
d0b27fa7
PZ
7328}
7329
7330long sched_group_rt_period(struct task_group *tg)
7331{
7332 u64 rt_period_us;
7333
7334 rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period);
7335 do_div(rt_period_us, NSEC_PER_USEC);
7336 return rt_period_us;
7337}
7338
7339static int sched_rt_global_constraints(void)
7340{
4653f803 7341 u64 runtime, period;
d0b27fa7
PZ
7342 int ret = 0;
7343
ec5d4989
HS
7344 if (sysctl_sched_rt_period <= 0)
7345 return -EINVAL;
7346
4653f803
PZ
7347 runtime = global_rt_runtime();
7348 period = global_rt_period();
7349
7350 /*
7351 * Sanity check on the sysctl variables.
7352 */
7353 if (runtime > period && runtime != RUNTIME_INF)
7354 return -EINVAL;
10b612f4 7355
d0b27fa7 7356 mutex_lock(&rt_constraints_mutex);
9a7e0b18 7357 read_lock(&tasklist_lock);
4653f803 7358 ret = __rt_schedulable(NULL, 0, 0);
9a7e0b18 7359 read_unlock(&tasklist_lock);
d0b27fa7
PZ
7360 mutex_unlock(&rt_constraints_mutex);
7361
7362 return ret;
7363}
54e99124
DG
7364
7365int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
7366{
7367 /* Don't accept realtime tasks when there is no way for them to run */
7368 if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
7369 return 0;
7370
7371 return 1;
7372}
7373
6d6bc0ad 7374#else /* !CONFIG_RT_GROUP_SCHED */
d0b27fa7
PZ
7375static int sched_rt_global_constraints(void)
7376{
ac086bc2
PZ
7377 unsigned long flags;
7378 int i;
7379
ec5d4989
HS
7380 if (sysctl_sched_rt_period <= 0)
7381 return -EINVAL;
7382
60aa605d
PZ
7383 /*
7384 * There's always some RT tasks in the root group
7385 * -- migration, kstopmachine etc..
7386 */
7387 if (sysctl_sched_rt_runtime == 0)
7388 return -EBUSY;
7389
0986b11b 7390 raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
ac086bc2
PZ
7391 for_each_possible_cpu(i) {
7392 struct rt_rq *rt_rq = &cpu_rq(i)->rt;
7393
0986b11b 7394 raw_spin_lock(&rt_rq->rt_runtime_lock);
ac086bc2 7395 rt_rq->rt_runtime = global_rt_runtime();
0986b11b 7396 raw_spin_unlock(&rt_rq->rt_runtime_lock);
ac086bc2 7397 }
0986b11b 7398 raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
ac086bc2 7399
d0b27fa7
PZ
7400 return 0;
7401}
6d6bc0ad 7402#endif /* CONFIG_RT_GROUP_SCHED */
d0b27fa7
PZ
7403
7404int sched_rt_handler(struct ctl_table *table, int write,
8d65af78 7405 void __user *buffer, size_t *lenp,
d0b27fa7
PZ
7406 loff_t *ppos)
7407{
7408 int ret;
7409 int old_period, old_runtime;
7410 static DEFINE_MUTEX(mutex);
7411
7412 mutex_lock(&mutex);
7413 old_period = sysctl_sched_rt_period;
7414 old_runtime = sysctl_sched_rt_runtime;
7415
8d65af78 7416 ret = proc_dointvec(table, write, buffer, lenp, ppos);
d0b27fa7
PZ
7417
7418 if (!ret && write) {
7419 ret = sched_rt_global_constraints();
7420 if (ret) {
7421 sysctl_sched_rt_period = old_period;
7422 sysctl_sched_rt_runtime = old_runtime;
7423 } else {
7424 def_rt_bandwidth.rt_runtime = global_rt_runtime();
7425 def_rt_bandwidth.rt_period =
7426 ns_to_ktime(global_rt_period());
7427 }
7428 }
7429 mutex_unlock(&mutex);
7430
7431 return ret;
7432}
68318b8e 7433
052f1dc7 7434#ifdef CONFIG_CGROUP_SCHED
68318b8e
SV
7435
7436/* return corresponding task_group object of a cgroup */
2b01dfe3 7437static inline struct task_group *cgroup_tg(struct cgroup *cgrp)
68318b8e 7438{
2b01dfe3
PM
7439 return container_of(cgroup_subsys_state(cgrp, cpu_cgroup_subsys_id),
7440 struct task_group, css);
68318b8e
SV
7441}
7442
7443static struct cgroup_subsys_state *
2b01dfe3 7444cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cgrp)
68318b8e 7445{
ec7dc8ac 7446 struct task_group *tg, *parent;
68318b8e 7447
2b01dfe3 7448 if (!cgrp->parent) {
68318b8e 7449 /* This is early initialization for the top cgroup */
07e06b01 7450 return &root_task_group.css;
68318b8e
SV
7451 }
7452
ec7dc8ac
DG
7453 parent = cgroup_tg(cgrp->parent);
7454 tg = sched_create_group(parent);
68318b8e
SV
7455 if (IS_ERR(tg))
7456 return ERR_PTR(-ENOMEM);
7457
68318b8e
SV
7458 return &tg->css;
7459}
7460
41a2d6cf
IM
7461static void
7462cpu_cgroup_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
68318b8e 7463{
2b01dfe3 7464 struct task_group *tg = cgroup_tg(cgrp);
68318b8e
SV
7465
7466 sched_destroy_group(tg);
7467}
7468
41a2d6cf 7469static int
be367d09 7470cpu_cgroup_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
68318b8e 7471{
b68aa230 7472#ifdef CONFIG_RT_GROUP_SCHED
54e99124 7473 if (!sched_rt_can_attach(cgroup_tg(cgrp), tsk))
b68aa230
PZ
7474 return -EINVAL;
7475#else
68318b8e
SV
7476 /* We don't support RT-tasks being in separate groups */
7477 if (tsk->sched_class != &fair_sched_class)
7478 return -EINVAL;
b68aa230 7479#endif
be367d09
BB
7480 return 0;
7481}
68318b8e 7482
68318b8e 7483static void
f780bdb7 7484cpu_cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
68318b8e
SV
7485{
7486 sched_move_task(tsk);
7487}
7488
068c5cc5 7489static void
d41d5a01
PZ
7490cpu_cgroup_exit(struct cgroup_subsys *ss, struct cgroup *cgrp,
7491 struct cgroup *old_cgrp, struct task_struct *task)
068c5cc5
PZ
7492{
7493 /*
7494 * cgroup_exit() is called in the copy_process() failure path.
7495 * Ignore this case since the task hasn't ran yet, this avoids
7496 * trying to poke a half freed task state from generic code.
7497 */
7498 if (!(task->flags & PF_EXITING))
7499 return;
7500
7501 sched_move_task(task);
7502}
7503
052f1dc7 7504#ifdef CONFIG_FAIR_GROUP_SCHED
f4c753b7 7505static int cpu_shares_write_u64(struct cgroup *cgrp, struct cftype *cftype,
2b01dfe3 7506 u64 shareval)
68318b8e 7507{
c8b28116 7508 return sched_group_set_shares(cgroup_tg(cgrp), scale_load(shareval));
68318b8e
SV
7509}
7510
f4c753b7 7511static u64 cpu_shares_read_u64(struct cgroup *cgrp, struct cftype *cft)
68318b8e 7512{
2b01dfe3 7513 struct task_group *tg = cgroup_tg(cgrp);
68318b8e 7514
c8b28116 7515 return (u64) scale_load_down(tg->shares);
68318b8e 7516}
ab84d31e
PT
7517
7518#ifdef CONFIG_CFS_BANDWIDTH
a790de99
PT
7519static DEFINE_MUTEX(cfs_constraints_mutex);
7520
ab84d31e
PT
7521const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */
7522const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */
7523
a790de99
PT
7524static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime);
7525
ab84d31e
PT
7526static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
7527{
56f570e5 7528 int i, ret = 0, runtime_enabled, runtime_was_enabled;
029632fb 7529 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
ab84d31e
PT
7530
7531 if (tg == &root_task_group)
7532 return -EINVAL;
7533
7534 /*
7535 * Ensure we have at some amount of bandwidth every period. This is
7536 * to prevent reaching a state of large arrears when throttled via
7537 * entity_tick() resulting in prolonged exit starvation.
7538 */
7539 if (quota < min_cfs_quota_period || period < min_cfs_quota_period)
7540 return -EINVAL;
7541
7542 /*
7543 * Likewise, bound things on the otherside by preventing insane quota
7544 * periods. This also allows us to normalize in computing quota
7545 * feasibility.
7546 */
7547 if (period > max_cfs_quota_period)
7548 return -EINVAL;
7549
a790de99
PT
7550 mutex_lock(&cfs_constraints_mutex);
7551 ret = __cfs_schedulable(tg, period, quota);
7552 if (ret)
7553 goto out_unlock;
7554
58088ad0 7555 runtime_enabled = quota != RUNTIME_INF;
56f570e5
PT
7556 runtime_was_enabled = cfs_b->quota != RUNTIME_INF;
7557 account_cfs_bandwidth_used(runtime_enabled, runtime_was_enabled);
ab84d31e
PT
7558 raw_spin_lock_irq(&cfs_b->lock);
7559 cfs_b->period = ns_to_ktime(period);
7560 cfs_b->quota = quota;
58088ad0 7561
a9cf55b2 7562 __refill_cfs_bandwidth_runtime(cfs_b);
58088ad0
PT
7563 /* restart the period timer (if active) to handle new period expiry */
7564 if (runtime_enabled && cfs_b->timer_active) {
7565 /* force a reprogram */
7566 cfs_b->timer_active = 0;
7567 __start_cfs_bandwidth(cfs_b);
7568 }
ab84d31e
PT
7569 raw_spin_unlock_irq(&cfs_b->lock);
7570
7571 for_each_possible_cpu(i) {
7572 struct cfs_rq *cfs_rq = tg->cfs_rq[i];
029632fb 7573 struct rq *rq = cfs_rq->rq;
ab84d31e
PT
7574
7575 raw_spin_lock_irq(&rq->lock);
58088ad0 7576 cfs_rq->runtime_enabled = runtime_enabled;
ab84d31e 7577 cfs_rq->runtime_remaining = 0;
671fd9da 7578
029632fb 7579 if (cfs_rq->throttled)
671fd9da 7580 unthrottle_cfs_rq(cfs_rq);
ab84d31e
PT
7581 raw_spin_unlock_irq(&rq->lock);
7582 }
a790de99
PT
7583out_unlock:
7584 mutex_unlock(&cfs_constraints_mutex);
ab84d31e 7585
a790de99 7586 return ret;
ab84d31e
PT
7587}
7588
7589int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us)
7590{
7591 u64 quota, period;
7592
029632fb 7593 period = ktime_to_ns(tg->cfs_bandwidth.period);
ab84d31e
PT
7594 if (cfs_quota_us < 0)
7595 quota = RUNTIME_INF;
7596 else
7597 quota = (u64)cfs_quota_us * NSEC_PER_USEC;
7598
7599 return tg_set_cfs_bandwidth(tg, period, quota);
7600}
7601
7602long tg_get_cfs_quota(struct task_group *tg)
7603{
7604 u64 quota_us;
7605
029632fb 7606 if (tg->cfs_bandwidth.quota == RUNTIME_INF)
ab84d31e
PT
7607 return -1;
7608
029632fb 7609 quota_us = tg->cfs_bandwidth.quota;
ab84d31e
PT
7610 do_div(quota_us, NSEC_PER_USEC);
7611
7612 return quota_us;
7613}
7614
7615int tg_set_cfs_period(struct task_group *tg, long cfs_period_us)
7616{
7617 u64 quota, period;
7618
7619 period = (u64)cfs_period_us * NSEC_PER_USEC;
029632fb 7620 quota = tg->cfs_bandwidth.quota;
ab84d31e
PT
7621
7622 if (period <= 0)
7623 return -EINVAL;
7624
7625 return tg_set_cfs_bandwidth(tg, period, quota);
7626}
7627
7628long tg_get_cfs_period(struct task_group *tg)
7629{
7630 u64 cfs_period_us;
7631
029632fb 7632 cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period);
ab84d31e
PT
7633 do_div(cfs_period_us, NSEC_PER_USEC);
7634
7635 return cfs_period_us;
7636}
7637
7638static s64 cpu_cfs_quota_read_s64(struct cgroup *cgrp, struct cftype *cft)
7639{
7640 return tg_get_cfs_quota(cgroup_tg(cgrp));
7641}
7642
7643static int cpu_cfs_quota_write_s64(struct cgroup *cgrp, struct cftype *cftype,
7644 s64 cfs_quota_us)
7645{
7646 return tg_set_cfs_quota(cgroup_tg(cgrp), cfs_quota_us);
7647}
7648
7649static u64 cpu_cfs_period_read_u64(struct cgroup *cgrp, struct cftype *cft)
7650{
7651 return tg_get_cfs_period(cgroup_tg(cgrp));
7652}
7653
7654static int cpu_cfs_period_write_u64(struct cgroup *cgrp, struct cftype *cftype,
7655 u64 cfs_period_us)
7656{
7657 return tg_set_cfs_period(cgroup_tg(cgrp), cfs_period_us);
7658}
7659
a790de99
PT
7660struct cfs_schedulable_data {
7661 struct task_group *tg;
7662 u64 period, quota;
7663};
7664
7665/*
7666 * normalize group quota/period to be quota/max_period
7667 * note: units are usecs
7668 */
7669static u64 normalize_cfs_quota(struct task_group *tg,
7670 struct cfs_schedulable_data *d)
7671{
7672 u64 quota, period;
7673
7674 if (tg == d->tg) {
7675 period = d->period;
7676 quota = d->quota;
7677 } else {
7678 period = tg_get_cfs_period(tg);
7679 quota = tg_get_cfs_quota(tg);
7680 }
7681
7682 /* note: these should typically be equivalent */
7683 if (quota == RUNTIME_INF || quota == -1)
7684 return RUNTIME_INF;
7685
7686 return to_ratio(period, quota);
7687}
7688
7689static int tg_cfs_schedulable_down(struct task_group *tg, void *data)
7690{
7691 struct cfs_schedulable_data *d = data;
029632fb 7692 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
a790de99
PT
7693 s64 quota = 0, parent_quota = -1;
7694
7695 if (!tg->parent) {
7696 quota = RUNTIME_INF;
7697 } else {
029632fb 7698 struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth;
a790de99
PT
7699
7700 quota = normalize_cfs_quota(tg, d);
7701 parent_quota = parent_b->hierarchal_quota;
7702
7703 /*
7704 * ensure max(child_quota) <= parent_quota, inherit when no
7705 * limit is set
7706 */
7707 if (quota == RUNTIME_INF)
7708 quota = parent_quota;
7709 else if (parent_quota != RUNTIME_INF && quota > parent_quota)
7710 return -EINVAL;
7711 }
7712 cfs_b->hierarchal_quota = quota;
7713
7714 return 0;
7715}
7716
7717static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota)
7718{
8277434e 7719 int ret;
a790de99
PT
7720 struct cfs_schedulable_data data = {
7721 .tg = tg,
7722 .period = period,
7723 .quota = quota,
7724 };
7725
7726 if (quota != RUNTIME_INF) {
7727 do_div(data.period, NSEC_PER_USEC);
7728 do_div(data.quota, NSEC_PER_USEC);
7729 }
7730
8277434e
PT
7731 rcu_read_lock();
7732 ret = walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data);
7733 rcu_read_unlock();
7734
7735 return ret;
a790de99 7736}
e8da1b18
NR
7737
7738static int cpu_stats_show(struct cgroup *cgrp, struct cftype *cft,
7739 struct cgroup_map_cb *cb)
7740{
7741 struct task_group *tg = cgroup_tg(cgrp);
029632fb 7742 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
e8da1b18
NR
7743
7744 cb->fill(cb, "nr_periods", cfs_b->nr_periods);
7745 cb->fill(cb, "nr_throttled", cfs_b->nr_throttled);
7746 cb->fill(cb, "throttled_time", cfs_b->throttled_time);
7747
7748 return 0;
7749}
ab84d31e 7750#endif /* CONFIG_CFS_BANDWIDTH */
6d6bc0ad 7751#endif /* CONFIG_FAIR_GROUP_SCHED */
68318b8e 7752
052f1dc7 7753#ifdef CONFIG_RT_GROUP_SCHED
0c70814c 7754static int cpu_rt_runtime_write(struct cgroup *cgrp, struct cftype *cft,
06ecb27c 7755 s64 val)
6f505b16 7756{
06ecb27c 7757 return sched_group_set_rt_runtime(cgroup_tg(cgrp), val);
6f505b16
PZ
7758}
7759
06ecb27c 7760static s64 cpu_rt_runtime_read(struct cgroup *cgrp, struct cftype *cft)
6f505b16 7761{
06ecb27c 7762 return sched_group_rt_runtime(cgroup_tg(cgrp));
6f505b16 7763}
d0b27fa7
PZ
7764
7765static int cpu_rt_period_write_uint(struct cgroup *cgrp, struct cftype *cftype,
7766 u64 rt_period_us)
7767{
7768 return sched_group_set_rt_period(cgroup_tg(cgrp), rt_period_us);
7769}
7770
7771static u64 cpu_rt_period_read_uint(struct cgroup *cgrp, struct cftype *cft)
7772{
7773 return sched_group_rt_period(cgroup_tg(cgrp));
7774}
6d6bc0ad 7775#endif /* CONFIG_RT_GROUP_SCHED */
6f505b16 7776
fe5c7cc2 7777static struct cftype cpu_files[] = {
052f1dc7 7778#ifdef CONFIG_FAIR_GROUP_SCHED
fe5c7cc2
PM
7779 {
7780 .name = "shares",
f4c753b7
PM
7781 .read_u64 = cpu_shares_read_u64,
7782 .write_u64 = cpu_shares_write_u64,
fe5c7cc2 7783 },
052f1dc7 7784#endif
ab84d31e
PT
7785#ifdef CONFIG_CFS_BANDWIDTH
7786 {
7787 .name = "cfs_quota_us",
7788 .read_s64 = cpu_cfs_quota_read_s64,
7789 .write_s64 = cpu_cfs_quota_write_s64,
7790 },
7791 {
7792 .name = "cfs_period_us",
7793 .read_u64 = cpu_cfs_period_read_u64,
7794 .write_u64 = cpu_cfs_period_write_u64,
7795 },
e8da1b18
NR
7796 {
7797 .name = "stat",
7798 .read_map = cpu_stats_show,
7799 },
ab84d31e 7800#endif
052f1dc7 7801#ifdef CONFIG_RT_GROUP_SCHED
6f505b16 7802 {
9f0c1e56 7803 .name = "rt_runtime_us",
06ecb27c
PM
7804 .read_s64 = cpu_rt_runtime_read,
7805 .write_s64 = cpu_rt_runtime_write,
6f505b16 7806 },
d0b27fa7
PZ
7807 {
7808 .name = "rt_period_us",
f4c753b7
PM
7809 .read_u64 = cpu_rt_period_read_uint,
7810 .write_u64 = cpu_rt_period_write_uint,
d0b27fa7 7811 },
052f1dc7 7812#endif
68318b8e
SV
7813};
7814
7815static int cpu_cgroup_populate(struct cgroup_subsys *ss, struct cgroup *cont)
7816{
fe5c7cc2 7817 return cgroup_add_files(cont, ss, cpu_files, ARRAY_SIZE(cpu_files));
68318b8e
SV
7818}
7819
7820struct cgroup_subsys cpu_cgroup_subsys = {
38605cae
IM
7821 .name = "cpu",
7822 .create = cpu_cgroup_create,
7823 .destroy = cpu_cgroup_destroy,
f780bdb7
BB
7824 .can_attach_task = cpu_cgroup_can_attach_task,
7825 .attach_task = cpu_cgroup_attach_task,
068c5cc5 7826 .exit = cpu_cgroup_exit,
38605cae
IM
7827 .populate = cpu_cgroup_populate,
7828 .subsys_id = cpu_cgroup_subsys_id,
68318b8e
SV
7829 .early_init = 1,
7830};
7831
052f1dc7 7832#endif /* CONFIG_CGROUP_SCHED */
d842de87
SV
7833
7834#ifdef CONFIG_CGROUP_CPUACCT
7835
7836/*
7837 * CPU accounting code for task groups.
7838 *
7839 * Based on the work by Paul Menage (menage@google.com) and Balbir Singh
7840 * (balbir@in.ibm.com).
7841 */
7842
934352f2 7843/* track cpu usage of a group of tasks and its child groups */
d842de87
SV
7844struct cpuacct {
7845 struct cgroup_subsys_state css;
7846 /* cpuusage holds pointer to a u64-type object on every cpu */
43cf38eb 7847 u64 __percpu *cpuusage;
ef12fefa 7848 struct percpu_counter cpustat[CPUACCT_STAT_NSTATS];
934352f2 7849 struct cpuacct *parent;
d842de87
SV
7850};
7851
7852struct cgroup_subsys cpuacct_subsys;
7853
7854/* return cpu accounting group corresponding to this container */
32cd756a 7855static inline struct cpuacct *cgroup_ca(struct cgroup *cgrp)
d842de87 7856{
32cd756a 7857 return container_of(cgroup_subsys_state(cgrp, cpuacct_subsys_id),
d842de87
SV
7858 struct cpuacct, css);
7859}
7860
7861/* return cpu accounting group to which this task belongs */
7862static inline struct cpuacct *task_ca(struct task_struct *tsk)
7863{
7864 return container_of(task_subsys_state(tsk, cpuacct_subsys_id),
7865 struct cpuacct, css);
7866}
7867
7868/* create a new cpu accounting group */
7869static struct cgroup_subsys_state *cpuacct_create(
32cd756a 7870 struct cgroup_subsys *ss, struct cgroup *cgrp)
d842de87
SV
7871{
7872 struct cpuacct *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
ef12fefa 7873 int i;
d842de87
SV
7874
7875 if (!ca)
ef12fefa 7876 goto out;
d842de87
SV
7877
7878 ca->cpuusage = alloc_percpu(u64);
ef12fefa
BR
7879 if (!ca->cpuusage)
7880 goto out_free_ca;
7881
7882 for (i = 0; i < CPUACCT_STAT_NSTATS; i++)
7883 if (percpu_counter_init(&ca->cpustat[i], 0))
7884 goto out_free_counters;
d842de87 7885
934352f2
BR
7886 if (cgrp->parent)
7887 ca->parent = cgroup_ca(cgrp->parent);
7888
d842de87 7889 return &ca->css;
ef12fefa
BR
7890
7891out_free_counters:
7892 while (--i >= 0)
7893 percpu_counter_destroy(&ca->cpustat[i]);
7894 free_percpu(ca->cpuusage);
7895out_free_ca:
7896 kfree(ca);
7897out:
7898 return ERR_PTR(-ENOMEM);
d842de87
SV
7899}
7900
7901/* destroy an existing cpu accounting group */
41a2d6cf 7902static void
32cd756a 7903cpuacct_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
d842de87 7904{
32cd756a 7905 struct cpuacct *ca = cgroup_ca(cgrp);
ef12fefa 7906 int i;
d842de87 7907
ef12fefa
BR
7908 for (i = 0; i < CPUACCT_STAT_NSTATS; i++)
7909 percpu_counter_destroy(&ca->cpustat[i]);
d842de87
SV
7910 free_percpu(ca->cpuusage);
7911 kfree(ca);
7912}
7913
720f5498
KC
7914static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu)
7915{
b36128c8 7916 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
720f5498
KC
7917 u64 data;
7918
7919#ifndef CONFIG_64BIT
7920 /*
7921 * Take rq->lock to make 64-bit read safe on 32-bit platforms.
7922 */
05fa785c 7923 raw_spin_lock_irq(&cpu_rq(cpu)->lock);
720f5498 7924 data = *cpuusage;
05fa785c 7925 raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
720f5498
KC
7926#else
7927 data = *cpuusage;
7928#endif
7929
7930 return data;
7931}
7932
7933static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val)
7934{
b36128c8 7935 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
720f5498
KC
7936
7937#ifndef CONFIG_64BIT
7938 /*
7939 * Take rq->lock to make 64-bit write safe on 32-bit platforms.
7940 */
05fa785c 7941 raw_spin_lock_irq(&cpu_rq(cpu)->lock);
720f5498 7942 *cpuusage = val;
05fa785c 7943 raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
720f5498
KC
7944#else
7945 *cpuusage = val;
7946#endif
7947}
7948
d842de87 7949/* return total cpu usage (in nanoseconds) of a group */
32cd756a 7950static u64 cpuusage_read(struct cgroup *cgrp, struct cftype *cft)
d842de87 7951{
32cd756a 7952 struct cpuacct *ca = cgroup_ca(cgrp);
d842de87
SV
7953 u64 totalcpuusage = 0;
7954 int i;
7955
720f5498
KC
7956 for_each_present_cpu(i)
7957 totalcpuusage += cpuacct_cpuusage_read(ca, i);
d842de87
SV
7958
7959 return totalcpuusage;
7960}
7961
0297b803
DG
7962static int cpuusage_write(struct cgroup *cgrp, struct cftype *cftype,
7963 u64 reset)
7964{
7965 struct cpuacct *ca = cgroup_ca(cgrp);
7966 int err = 0;
7967 int i;
7968
7969 if (reset) {
7970 err = -EINVAL;
7971 goto out;
7972 }
7973
720f5498
KC
7974 for_each_present_cpu(i)
7975 cpuacct_cpuusage_write(ca, i, 0);
0297b803 7976
0297b803
DG
7977out:
7978 return err;
7979}
7980
e9515c3c
KC
7981static int cpuacct_percpu_seq_read(struct cgroup *cgroup, struct cftype *cft,
7982 struct seq_file *m)
7983{
7984 struct cpuacct *ca = cgroup_ca(cgroup);
7985 u64 percpu;
7986 int i;
7987
7988 for_each_present_cpu(i) {
7989 percpu = cpuacct_cpuusage_read(ca, i);
7990 seq_printf(m, "%llu ", (unsigned long long) percpu);
7991 }
7992 seq_printf(m, "\n");
7993 return 0;
7994}
7995
ef12fefa
BR
7996static const char *cpuacct_stat_desc[] = {
7997 [CPUACCT_STAT_USER] = "user",
7998 [CPUACCT_STAT_SYSTEM] = "system",
7999};
8000
8001static int cpuacct_stats_show(struct cgroup *cgrp, struct cftype *cft,
8002 struct cgroup_map_cb *cb)
8003{
8004 struct cpuacct *ca = cgroup_ca(cgrp);
8005 int i;
8006
8007 for (i = 0; i < CPUACCT_STAT_NSTATS; i++) {
8008 s64 val = percpu_counter_read(&ca->cpustat[i]);
8009 val = cputime64_to_clock_t(val);
8010 cb->fill(cb, cpuacct_stat_desc[i], val);
8011 }
8012 return 0;
8013}
8014
d842de87
SV
8015static struct cftype files[] = {
8016 {
8017 .name = "usage",
f4c753b7
PM
8018 .read_u64 = cpuusage_read,
8019 .write_u64 = cpuusage_write,
d842de87 8020 },
e9515c3c
KC
8021 {
8022 .name = "usage_percpu",
8023 .read_seq_string = cpuacct_percpu_seq_read,
8024 },
ef12fefa
BR
8025 {
8026 .name = "stat",
8027 .read_map = cpuacct_stats_show,
8028 },
d842de87
SV
8029};
8030
32cd756a 8031static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cgrp)
d842de87 8032{
32cd756a 8033 return cgroup_add_files(cgrp, ss, files, ARRAY_SIZE(files));
d842de87
SV
8034}
8035
8036/*
8037 * charge this task's execution time to its accounting group.
8038 *
8039 * called with rq->lock held.
8040 */
029632fb 8041void cpuacct_charge(struct task_struct *tsk, u64 cputime)
d842de87
SV
8042{
8043 struct cpuacct *ca;
934352f2 8044 int cpu;
d842de87 8045
c40c6f85 8046 if (unlikely(!cpuacct_subsys.active))
d842de87
SV
8047 return;
8048
934352f2 8049 cpu = task_cpu(tsk);
a18b83b7
BR
8050
8051 rcu_read_lock();
8052
d842de87 8053 ca = task_ca(tsk);
d842de87 8054
934352f2 8055 for (; ca; ca = ca->parent) {
b36128c8 8056 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
d842de87
SV
8057 *cpuusage += cputime;
8058 }
a18b83b7
BR
8059
8060 rcu_read_unlock();
d842de87
SV
8061}
8062
fa535a77
AB
8063/*
8064 * When CONFIG_VIRT_CPU_ACCOUNTING is enabled one jiffy can be very large
8065 * in cputime_t units. As a result, cpuacct_update_stats calls
8066 * percpu_counter_add with values large enough to always overflow the
8067 * per cpu batch limit causing bad SMP scalability.
8068 *
8069 * To fix this we scale percpu_counter_batch by cputime_one_jiffy so we
8070 * batch the same amount of time with CONFIG_VIRT_CPU_ACCOUNTING disabled
8071 * and enabled. We cap it at INT_MAX which is the largest allowed batch value.
8072 */
8073#ifdef CONFIG_SMP
8074#define CPUACCT_BATCH \
8075 min_t(long, percpu_counter_batch * cputime_one_jiffy, INT_MAX)
8076#else
8077#define CPUACCT_BATCH 0
8078#endif
8079
ef12fefa
BR
8080/*
8081 * Charge the system/user time to the task's accounting group.
8082 */
029632fb 8083void cpuacct_update_stats(struct task_struct *tsk,
ef12fefa
BR
8084 enum cpuacct_stat_index idx, cputime_t val)
8085{
8086 struct cpuacct *ca;
fa535a77 8087 int batch = CPUACCT_BATCH;
ef12fefa
BR
8088
8089 if (unlikely(!cpuacct_subsys.active))
8090 return;
8091
8092 rcu_read_lock();
8093 ca = task_ca(tsk);
8094
8095 do {
fa535a77 8096 __percpu_counter_add(&ca->cpustat[idx], val, batch);
ef12fefa
BR
8097 ca = ca->parent;
8098 } while (ca);
8099 rcu_read_unlock();
8100}
8101
d842de87
SV
8102struct cgroup_subsys cpuacct_subsys = {
8103 .name = "cpuacct",
8104 .create = cpuacct_create,
8105 .destroy = cpuacct_destroy,
8106 .populate = cpuacct_populate,
8107 .subsys_id = cpuacct_subsys_id,
8108};
8109#endif /* CONFIG_CGROUP_CPUACCT */