Merge tag 'v3.10.86' into update
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / sched / core.c
CommitLineData
1da177e4 1/*
391e43da 2 * kernel/sched/core.c
1da177e4
LT
3 *
4 * Kernel scheduler and related syscalls
5 *
6 * Copyright (C) 1991-2002 Linus Torvalds
7 *
8 * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and
9 * make semaphores SMP safe
10 * 1998-11-19 Implemented schedule_timeout() and related stuff
11 * by Andrea Arcangeli
12 * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar:
13 * hybrid priority-list and round-robin design with
14 * an array-switch method of distributing timeslices
15 * and per-CPU runqueues. Cleanups and useful suggestions
16 * by Davide Libenzi, preemptible kernel bits by Robert Love.
17 * 2003-09-03 Interactivity tuning by Con Kolivas.
18 * 2004-04-02 Scheduler domains code by Nick Piggin
c31f2e8a
IM
19 * 2007-04-15 Work begun on replacing all interactivity tuning with a
20 * fair scheduling design by Con Kolivas.
21 * 2007-05-05 Load balancing (smp-nice) and other improvements
22 * by Peter Williams
23 * 2007-05-06 Interactivity improvements to CFS by Mike Galbraith
24 * 2007-07-01 Group scheduling enhancements by Srivatsa Vaddagiri
b9131769
IM
25 * 2007-11-29 RT balancing improvements by Steven Rostedt, Gregory Haskins,
26 * Thomas Gleixner, Mike Kravetz
1da177e4
LT
27 */
28
29#include <linux/mm.h>
30#include <linux/module.h>
31#include <linux/nmi.h>
32#include <linux/init.h>
dff06c15 33#include <linux/uaccess.h>
1da177e4 34#include <linux/highmem.h>
1da177e4
LT
35#include <asm/mmu_context.h>
36#include <linux/interrupt.h>
c59ede7b 37#include <linux/capability.h>
1da177e4
LT
38#include <linux/completion.h>
39#include <linux/kernel_stat.h>
9a11b49a 40#include <linux/debug_locks.h>
cdd6c482 41#include <linux/perf_event.h>
1da177e4
LT
42#include <linux/security.h>
43#include <linux/notifier.h>
44#include <linux/profile.h>
7dfb7103 45#include <linux/freezer.h>
198e2f18 46#include <linux/vmalloc.h>
1da177e4
LT
47#include <linux/blkdev.h>
48#include <linux/delay.h>
b488893a 49#include <linux/pid_namespace.h>
1da177e4
LT
50#include <linux/smp.h>
51#include <linux/threads.h>
52#include <linux/timer.h>
53#include <linux/rcupdate.h>
54#include <linux/cpu.h>
55#include <linux/cpuset.h>
56#include <linux/percpu.h>
b5aadf7f 57#include <linux/proc_fs.h>
1da177e4 58#include <linux/seq_file.h>
e692ab53 59#include <linux/sysctl.h>
1da177e4
LT
60#include <linux/syscalls.h>
61#include <linux/times.h>
8f0ab514 62#include <linux/tsacct_kern.h>
c6fd91f0 63#include <linux/kprobes.h>
0ff92245 64#include <linux/delayacct.h>
dff06c15 65#include <linux/unistd.h>
f5ff8422 66#include <linux/pagemap.h>
8f4d37ec 67#include <linux/hrtimer.h>
30914a58 68#include <linux/tick.h>
f00b45c1
PZ
69#include <linux/debugfs.h>
70#include <linux/ctype.h>
6cd8a4bb 71#include <linux/ftrace.h>
5a0e3ad6 72#include <linux/slab.h>
f1c6f1a7 73#include <linux/init_task.h>
40401530 74#include <linux/binfmts.h>
91d1aa43 75#include <linux/context_tracking.h>
1da177e4 76
96f951ed 77#include <asm/switch_to.h>
5517d86b 78#include <asm/tlb.h>
838225b4 79#include <asm/irq_regs.h>
db7e527d 80#include <asm/mutex.h>
e6e6685a
GC
81#ifdef CONFIG_PARAVIRT
82#include <asm/paravirt.h>
83#endif
1da177e4 84
029632fb 85#include "sched.h"
ea138446 86#include "../workqueue_internal.h"
29d5e047 87#include "../smpboot.h"
6e0534f2 88
6fa3eb70
S
89#ifdef CONFIG_MT65XX_TRACER
90#include "mach/mt_mon.h"
91#include "linux/aee.h"
92#endif
93
94#include <linux/mt_sched_mon.h>
a8d154b0 95#define CREATE_TRACE_POINTS
ad8d75ff 96#include <trace/events/sched.h>
a8d154b0 97
6fa3eb70
S
98#include <mtlbprof/mtlbprof.h>
99#include <mtlbprof/mtlbprof_stat.h>
100
101#ifdef CONFIG_MT_PRIO_TRACER
102# include <linux/prio_tracer.h>
103#endif
104
029632fb 105void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period)
d0b27fa7 106{
58088ad0
PT
107 unsigned long delta;
108 ktime_t soft, hard, now;
d0b27fa7 109
58088ad0
PT
110 for (;;) {
111 if (hrtimer_active(period_timer))
112 break;
113
114 now = hrtimer_cb_get_time(period_timer);
115 hrtimer_forward(period_timer, now, period);
d0b27fa7 116
58088ad0
PT
117 soft = hrtimer_get_softexpires(period_timer);
118 hard = hrtimer_get_expires(period_timer);
119 delta = ktime_to_ns(ktime_sub(hard, soft));
120 __hrtimer_start_range_ns(period_timer, soft, delta,
121 HRTIMER_MODE_ABS_PINNED, 0);
122 }
123}
124
029632fb
PZ
125DEFINE_MUTEX(sched_domains_mutex);
126DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
dc61b1d6 127
fe44d621 128static void update_rq_clock_task(struct rq *rq, s64 delta);
305e6835 129
029632fb 130void update_rq_clock(struct rq *rq)
3e51f33f 131{
fe44d621 132 s64 delta;
305e6835 133
61eadef6 134 if (rq->skip_clock_update > 0)
f26f9aff 135 return;
aa483808 136
fe44d621
PZ
137 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
138 rq->clock += delta;
139 update_rq_clock_task(rq, delta);
3e51f33f
PZ
140}
141
bf5c91ba
IM
142/*
143 * Debugging: various feature bits
144 */
f00b45c1 145
f00b45c1
PZ
146#define SCHED_FEAT(name, enabled) \
147 (1UL << __SCHED_FEAT_##name) * enabled |
148
bf5c91ba 149const_debug unsigned int sysctl_sched_features =
391e43da 150#include "features.h"
f00b45c1
PZ
151 0;
152
153#undef SCHED_FEAT
154
155#ifdef CONFIG_SCHED_DEBUG
156#define SCHED_FEAT(name, enabled) \
157 #name ,
158
1292531f 159static const char * const sched_feat_names[] = {
391e43da 160#include "features.h"
f00b45c1
PZ
161};
162
163#undef SCHED_FEAT
164
34f3a814 165static int sched_feat_show(struct seq_file *m, void *v)
f00b45c1 166{
f00b45c1
PZ
167 int i;
168
f8b6d1cc 169 for (i = 0; i < __SCHED_FEAT_NR; i++) {
34f3a814
LZ
170 if (!(sysctl_sched_features & (1UL << i)))
171 seq_puts(m, "NO_");
172 seq_printf(m, "%s ", sched_feat_names[i]);
f00b45c1 173 }
34f3a814 174 seq_puts(m, "\n");
f00b45c1 175
34f3a814 176 return 0;
f00b45c1
PZ
177}
178
f8b6d1cc
PZ
179#ifdef HAVE_JUMP_LABEL
180
c5905afb
IM
181#define jump_label_key__true STATIC_KEY_INIT_TRUE
182#define jump_label_key__false STATIC_KEY_INIT_FALSE
f8b6d1cc
PZ
183
184#define SCHED_FEAT(name, enabled) \
185 jump_label_key__##enabled ,
186
c5905afb 187struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
f8b6d1cc
PZ
188#include "features.h"
189};
190
191#undef SCHED_FEAT
192
193static void sched_feat_disable(int i)
194{
c5905afb
IM
195 if (static_key_enabled(&sched_feat_keys[i]))
196 static_key_slow_dec(&sched_feat_keys[i]);
f8b6d1cc
PZ
197}
198
199static void sched_feat_enable(int i)
200{
c5905afb
IM
201 if (!static_key_enabled(&sched_feat_keys[i]))
202 static_key_slow_inc(&sched_feat_keys[i]);
f8b6d1cc
PZ
203}
204#else
205static void sched_feat_disable(int i) { };
206static void sched_feat_enable(int i) { };
207#endif /* HAVE_JUMP_LABEL */
208
1a687c2e 209static int sched_feat_set(char *cmp)
f00b45c1 210{
f00b45c1 211 int i;
1a687c2e 212 int neg = 0;
f00b45c1 213
524429c3 214 if (strncmp(cmp, "NO_", 3) == 0) {
f00b45c1
PZ
215 neg = 1;
216 cmp += 3;
217 }
218
f8b6d1cc 219 for (i = 0; i < __SCHED_FEAT_NR; i++) {
7740191c 220 if (strcmp(cmp, sched_feat_names[i]) == 0) {
f8b6d1cc 221 if (neg) {
f00b45c1 222 sysctl_sched_features &= ~(1UL << i);
f8b6d1cc
PZ
223 sched_feat_disable(i);
224 } else {
f00b45c1 225 sysctl_sched_features |= (1UL << i);
f8b6d1cc
PZ
226 sched_feat_enable(i);
227 }
f00b45c1
PZ
228 break;
229 }
230 }
231
1a687c2e
MG
232 return i;
233}
234
235static ssize_t
236sched_feat_write(struct file *filp, const char __user *ubuf,
237 size_t cnt, loff_t *ppos)
238{
239 char buf[64];
240 char *cmp;
241 int i;
242
243 if (cnt > 63)
244 cnt = 63;
245
246 if (copy_from_user(&buf, ubuf, cnt))
247 return -EFAULT;
248
249 buf[cnt] = 0;
250 cmp = strstrip(buf);
251
252 i = sched_feat_set(cmp);
f8b6d1cc 253 if (i == __SCHED_FEAT_NR)
f00b45c1
PZ
254 return -EINVAL;
255
42994724 256 *ppos += cnt;
f00b45c1
PZ
257
258 return cnt;
259}
260
34f3a814
LZ
261static int sched_feat_open(struct inode *inode, struct file *filp)
262{
263 return single_open(filp, sched_feat_show, NULL);
264}
265
828c0950 266static const struct file_operations sched_feat_fops = {
34f3a814
LZ
267 .open = sched_feat_open,
268 .write = sched_feat_write,
269 .read = seq_read,
270 .llseek = seq_lseek,
271 .release = single_release,
f00b45c1
PZ
272};
273
274static __init int sched_init_debug(void)
275{
f00b45c1
PZ
276 debugfs_create_file("sched_features", 0644, NULL, NULL,
277 &sched_feat_fops);
278
279 return 0;
280}
281late_initcall(sched_init_debug);
f8b6d1cc 282#endif /* CONFIG_SCHED_DEBUG */
bf5c91ba 283
b82d9fdd
PZ
284/*
285 * Number of tasks to iterate in a single balance run.
286 * Limited because this is done with IRQs disabled.
287 */
288const_debug unsigned int sysctl_sched_nr_migrate = 32;
289
e9e9250b
PZ
290/*
291 * period over which we average the RT time consumption, measured
292 * in ms.
293 *
294 * default: 1s
295 */
296const_debug unsigned int sysctl_sched_time_avg = MSEC_PER_SEC;
297
fa85ae24 298/*
9f0c1e56 299 * period over which we measure -rt task cpu usage in us.
fa85ae24
PZ
300 * default: 1s
301 */
9f0c1e56 302unsigned int sysctl_sched_rt_period = 1000000;
fa85ae24 303
029632fb 304__read_mostly int scheduler_running;
6892b75e 305
9f0c1e56
PZ
306/*
307 * part of the period that we allow rt tasks to run in us.
308 * default: 0.95s
309 */
310int sysctl_sched_rt_runtime = 950000;
fa85ae24 311
fa85ae24 312
1da177e4 313
0970d299 314/*
0122ec5b 315 * __task_rq_lock - lock the rq @p resides on.
b29739f9 316 */
70b97a7f 317static inline struct rq *__task_rq_lock(struct task_struct *p)
b29739f9
IM
318 __acquires(rq->lock)
319{
0970d299
PZ
320 struct rq *rq;
321
0122ec5b
PZ
322 lockdep_assert_held(&p->pi_lock);
323
3a5c359a 324 for (;;) {
0970d299 325 rq = task_rq(p);
05fa785c 326 raw_spin_lock(&rq->lock);
65cc8e48 327 if (likely(rq == task_rq(p)))
3a5c359a 328 return rq;
05fa785c 329 raw_spin_unlock(&rq->lock);
b29739f9 330 }
b29739f9
IM
331}
332
1da177e4 333/*
0122ec5b 334 * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
1da177e4 335 */
70b97a7f 336static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
0122ec5b 337 __acquires(p->pi_lock)
1da177e4
LT
338 __acquires(rq->lock)
339{
70b97a7f 340 struct rq *rq;
1da177e4 341
3a5c359a 342 for (;;) {
0122ec5b 343 raw_spin_lock_irqsave(&p->pi_lock, *flags);
3a5c359a 344 rq = task_rq(p);
05fa785c 345 raw_spin_lock(&rq->lock);
65cc8e48 346 if (likely(rq == task_rq(p)))
3a5c359a 347 return rq;
0122ec5b
PZ
348 raw_spin_unlock(&rq->lock);
349 raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
1da177e4 350 }
1da177e4
LT
351}
352
a9957449 353static void __task_rq_unlock(struct rq *rq)
b29739f9
IM
354 __releases(rq->lock)
355{
05fa785c 356 raw_spin_unlock(&rq->lock);
b29739f9
IM
357}
358
0122ec5b
PZ
359static inline void
360task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags)
1da177e4 361 __releases(rq->lock)
0122ec5b 362 __releases(p->pi_lock)
1da177e4 363{
0122ec5b
PZ
364 raw_spin_unlock(&rq->lock);
365 raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
1da177e4
LT
366}
367
1da177e4 368/*
cc2a73b5 369 * this_rq_lock - lock this runqueue and disable interrupts.
1da177e4 370 */
a9957449 371static struct rq *this_rq_lock(void)
1da177e4
LT
372 __acquires(rq->lock)
373{
70b97a7f 374 struct rq *rq;
1da177e4
LT
375
376 local_irq_disable();
377 rq = this_rq();
05fa785c 378 raw_spin_lock(&rq->lock);
1da177e4
LT
379
380 return rq;
381}
382
8f4d37ec
PZ
383#ifdef CONFIG_SCHED_HRTICK
384/*
385 * Use HR-timers to deliver accurate preemption points.
386 *
387 * Its all a bit involved since we cannot program an hrt while holding the
388 * rq->lock. So what we do is store a state in in rq->hrtick_* and ask for a
389 * reschedule event.
390 *
391 * When we get rescheduled we reprogram the hrtick_timer outside of the
392 * rq->lock.
393 */
8f4d37ec 394
8f4d37ec
PZ
395static void hrtick_clear(struct rq *rq)
396{
397 if (hrtimer_active(&rq->hrtick_timer))
398 hrtimer_cancel(&rq->hrtick_timer);
399}
400
8f4d37ec
PZ
401/*
402 * High-resolution timer tick.
403 * Runs from hardirq context with interrupts disabled.
404 */
405static enum hrtimer_restart hrtick(struct hrtimer *timer)
406{
407 struct rq *rq = container_of(timer, struct rq, hrtick_timer);
408
409 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
410
05fa785c 411 raw_spin_lock(&rq->lock);
3e51f33f 412 update_rq_clock(rq);
8f4d37ec 413 rq->curr->sched_class->task_tick(rq, rq->curr, 1);
05fa785c 414 raw_spin_unlock(&rq->lock);
8f4d37ec
PZ
415
416 return HRTIMER_NORESTART;
417}
418
95e904c7 419#ifdef CONFIG_SMP
31656519
PZ
420/*
421 * called from hardirq (IPI) context
422 */
423static void __hrtick_start(void *arg)
b328ca18 424{
31656519 425 struct rq *rq = arg;
b328ca18 426
05fa785c 427 raw_spin_lock(&rq->lock);
31656519
PZ
428 hrtimer_restart(&rq->hrtick_timer);
429 rq->hrtick_csd_pending = 0;
05fa785c 430 raw_spin_unlock(&rq->lock);
b328ca18
PZ
431}
432
31656519
PZ
433/*
434 * Called to set the hrtick timer state.
435 *
436 * called with rq->lock held and irqs disabled
437 */
029632fb 438void hrtick_start(struct rq *rq, u64 delay)
b328ca18 439{
31656519
PZ
440 struct hrtimer *timer = &rq->hrtick_timer;
441 ktime_t time = ktime_add_ns(timer->base->get_time(), delay);
b328ca18 442
cc584b21 443 hrtimer_set_expires(timer, time);
31656519
PZ
444
445 if (rq == this_rq()) {
446 hrtimer_restart(timer);
447 } else if (!rq->hrtick_csd_pending) {
6e275637 448 __smp_call_function_single(cpu_of(rq), &rq->hrtick_csd, 0);
31656519
PZ
449 rq->hrtick_csd_pending = 1;
450 }
b328ca18
PZ
451}
452
453static int
454hotplug_hrtick(struct notifier_block *nfb, unsigned long action, void *hcpu)
455{
456 int cpu = (int)(long)hcpu;
457
458 switch (action) {
459 case CPU_UP_CANCELED:
460 case CPU_UP_CANCELED_FROZEN:
461 case CPU_DOWN_PREPARE:
462 case CPU_DOWN_PREPARE_FROZEN:
463 case CPU_DEAD:
464 case CPU_DEAD_FROZEN:
31656519 465 hrtick_clear(cpu_rq(cpu));
b328ca18
PZ
466 return NOTIFY_OK;
467 }
468
469 return NOTIFY_DONE;
470}
471
fa748203 472static __init void init_hrtick(void)
b328ca18
PZ
473{
474 hotcpu_notifier(hotplug_hrtick, 0);
475}
31656519
PZ
476#else
477/*
478 * Called to set the hrtick timer state.
479 *
480 * called with rq->lock held and irqs disabled
481 */
029632fb 482void hrtick_start(struct rq *rq, u64 delay)
31656519 483{
7f1e2ca9 484 __hrtimer_start_range_ns(&rq->hrtick_timer, ns_to_ktime(delay), 0,
5c333864 485 HRTIMER_MODE_REL_PINNED, 0);
31656519 486}
b328ca18 487
006c75f1 488static inline void init_hrtick(void)
8f4d37ec 489{
8f4d37ec 490}
31656519 491#endif /* CONFIG_SMP */
8f4d37ec 492
31656519 493static void init_rq_hrtick(struct rq *rq)
8f4d37ec 494{
31656519
PZ
495#ifdef CONFIG_SMP
496 rq->hrtick_csd_pending = 0;
8f4d37ec 497
31656519
PZ
498 rq->hrtick_csd.flags = 0;
499 rq->hrtick_csd.func = __hrtick_start;
500 rq->hrtick_csd.info = rq;
501#endif
8f4d37ec 502
31656519
PZ
503 hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
504 rq->hrtick_timer.function = hrtick;
8f4d37ec 505}
006c75f1 506#else /* CONFIG_SCHED_HRTICK */
8f4d37ec
PZ
507static inline void hrtick_clear(struct rq *rq)
508{
509}
510
8f4d37ec
PZ
511static inline void init_rq_hrtick(struct rq *rq)
512{
513}
514
b328ca18
PZ
515static inline void init_hrtick(void)
516{
517}
006c75f1 518#endif /* CONFIG_SCHED_HRTICK */
8f4d37ec 519
c24d20db
IM
520/*
521 * resched_task - mark a task 'to be rescheduled now'.
522 *
523 * On UP this means the setting of the need_resched flag, on SMP it
524 * might also involve a cross-CPU call to trigger the scheduler on
525 * the target CPU.
526 */
527#ifdef CONFIG_SMP
029632fb 528void resched_task(struct task_struct *p)
c24d20db
IM
529{
530 int cpu;
531
05fa785c 532 assert_raw_spin_locked(&task_rq(p)->lock);
c24d20db 533
5ed0cec0 534 if (test_tsk_need_resched(p))
c24d20db
IM
535 return;
536
5ed0cec0 537 set_tsk_need_resched(p);
c24d20db
IM
538
539 cpu = task_cpu(p);
540 if (cpu == smp_processor_id())
541 return;
542
543 /* NEED_RESCHED must be visible before we test polling */
544 smp_mb();
545 if (!tsk_is_polling(p))
546 smp_send_reschedule(cpu);
547}
548
029632fb 549void resched_cpu(int cpu)
c24d20db
IM
550{
551 struct rq *rq = cpu_rq(cpu);
552 unsigned long flags;
553
05fa785c 554 if (!raw_spin_trylock_irqsave(&rq->lock, flags))
c24d20db
IM
555 return;
556 resched_task(cpu_curr(cpu));
05fa785c 557 raw_spin_unlock_irqrestore(&rq->lock, flags);
c24d20db 558}
06d8308c 559
3451d024 560#ifdef CONFIG_NO_HZ_COMMON
83cd4fe2
VP
561/*
562 * In the semi idle case, use the nearest busy cpu for migrating timers
563 * from an idle cpu. This is good for power-savings.
564 *
565 * We don't do similar optimization for completely idle system, as
566 * selecting an idle cpu will add more delays to the timers than intended
567 * (as that cpu's timer base may not be uptodate wrt jiffies etc).
568 */
569int get_nohz_timer_target(void)
570{
571 int cpu = smp_processor_id();
572 int i;
573 struct sched_domain *sd;
574
057f3fad 575 rcu_read_lock();
83cd4fe2 576 for_each_domain(cpu, sd) {
057f3fad
PZ
577 for_each_cpu(i, sched_domain_span(sd)) {
578 if (!idle_cpu(i)) {
579 cpu = i;
580 goto unlock;
581 }
582 }
83cd4fe2 583 }
057f3fad
PZ
584unlock:
585 rcu_read_unlock();
83cd4fe2
VP
586 return cpu;
587}
06d8308c
TG
588/*
589 * When add_timer_on() enqueues a timer into the timer wheel of an
590 * idle CPU then this timer might expire before the next timer event
591 * which is scheduled to wake up that CPU. In case of a completely
592 * idle system the next event might even be infinite time into the
593 * future. wake_up_idle_cpu() ensures that the CPU is woken up and
594 * leaves the inner idle loop so the newly added timer is taken into
595 * account when the CPU goes back to idle and evaluates the timer
596 * wheel for the next timer event.
597 */
1c20091e 598static void wake_up_idle_cpu(int cpu)
06d8308c
TG
599{
600 struct rq *rq = cpu_rq(cpu);
601
602 if (cpu == smp_processor_id())
603 return;
604
605 /*
606 * This is safe, as this function is called with the timer
607 * wheel base lock of (cpu) held. When the CPU is on the way
608 * to idle and has not yet set rq->curr to idle then it will
609 * be serialized on the timer wheel base lock and take the new
610 * timer into account automatically.
611 */
612 if (rq->curr != rq->idle)
613 return;
45bf76df 614
45bf76df 615 /*
06d8308c
TG
616 * We can set TIF_RESCHED on the idle task of the other CPU
617 * lockless. The worst case is that the other CPU runs the
618 * idle task through an additional NOOP schedule()
45bf76df 619 */
5ed0cec0 620 set_tsk_need_resched(rq->idle);
45bf76df 621
06d8308c
TG
622 /* NEED_RESCHED must be visible before we test polling */
623 smp_mb();
624 if (!tsk_is_polling(rq->idle))
625 smp_send_reschedule(cpu);
45bf76df
IM
626}
627
c5bfece2 628static bool wake_up_full_nohz_cpu(int cpu)
1c20091e 629{
c5bfece2 630 if (tick_nohz_full_cpu(cpu)) {
1c20091e
FW
631 if (cpu != smp_processor_id() ||
632 tick_nohz_tick_stopped())
633 smp_send_reschedule(cpu);
634 return true;
635 }
636
637 return false;
638}
639
640void wake_up_nohz_cpu(int cpu)
641{
c5bfece2 642 if (!wake_up_full_nohz_cpu(cpu))
1c20091e
FW
643 wake_up_idle_cpu(cpu);
644}
645
ca38062e 646static inline bool got_nohz_idle_kick(void)
45bf76df 647{
1c792db7 648 int cpu = smp_processor_id();
873b4c65
VG
649
650 if (!test_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu)))
651 return false;
652
653 if (idle_cpu(cpu) && !need_resched())
654 return true;
655
656 /*
657 * We can't run Idle Load Balance on this CPU for this time so we
658 * cancel it and clear NOHZ_BALANCE_KICK
659 */
660 clear_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu));
661 return false;
45bf76df
IM
662}
663
3451d024 664#else /* CONFIG_NO_HZ_COMMON */
45bf76df 665
ca38062e 666static inline bool got_nohz_idle_kick(void)
2069dd75 667{
ca38062e 668 return false;
2069dd75
PZ
669}
670
3451d024 671#endif /* CONFIG_NO_HZ_COMMON */
d842de87 672
ce831b38
FW
673#ifdef CONFIG_NO_HZ_FULL
674bool sched_can_stop_tick(void)
675{
676 struct rq *rq;
677
678 rq = this_rq();
679
680 /* Make sure rq->nr_running update is visible after the IPI */
681 smp_rmb();
682
683 /* More than one running task need preemption */
684 if (rq->nr_running > 1)
685 return false;
686
687 return true;
688}
689#endif /* CONFIG_NO_HZ_FULL */
d842de87 690
029632fb 691void sched_avg_update(struct rq *rq)
18d95a28 692{
e9e9250b
PZ
693 s64 period = sched_avg_period();
694
695 while ((s64)(rq->clock - rq->age_stamp) > period) {
0d98bb26
WD
696 /*
697 * Inline assembly required to prevent the compiler
698 * optimising this loop into a divmod call.
699 * See __iter_div_u64_rem() for another example of this.
700 */
701 asm("" : "+rm" (rq->age_stamp));
e9e9250b
PZ
702 rq->age_stamp += period;
703 rq->rt_avg /= 2;
704 }
18d95a28
PZ
705}
706
6d6bc0ad 707#else /* !CONFIG_SMP */
029632fb 708void resched_task(struct task_struct *p)
18d95a28 709{
05fa785c 710 assert_raw_spin_locked(&task_rq(p)->lock);
31656519 711 set_tsk_need_resched(p);
18d95a28 712}
6d6bc0ad 713#endif /* CONFIG_SMP */
18d95a28 714
a790de99
PT
715#if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \
716 (defined(CONFIG_SMP) || defined(CONFIG_CFS_BANDWIDTH)))
c09595f6 717/*
8277434e
PT
718 * Iterate task_group tree rooted at *from, calling @down when first entering a
719 * node and @up when leaving it for the final time.
720 *
721 * Caller must hold rcu_lock or sufficient equivalent.
c09595f6 722 */
029632fb 723int walk_tg_tree_from(struct task_group *from,
8277434e 724 tg_visitor down, tg_visitor up, void *data)
c09595f6
PZ
725{
726 struct task_group *parent, *child;
eb755805 727 int ret;
c09595f6 728
8277434e
PT
729 parent = from;
730
c09595f6 731down:
eb755805
PZ
732 ret = (*down)(parent, data);
733 if (ret)
8277434e 734 goto out;
c09595f6
PZ
735 list_for_each_entry_rcu(child, &parent->children, siblings) {
736 parent = child;
737 goto down;
738
739up:
740 continue;
741 }
eb755805 742 ret = (*up)(parent, data);
8277434e
PT
743 if (ret || parent == from)
744 goto out;
c09595f6
PZ
745
746 child = parent;
747 parent = parent->parent;
748 if (parent)
749 goto up;
8277434e 750out:
eb755805 751 return ret;
c09595f6
PZ
752}
753
029632fb 754int tg_nop(struct task_group *tg, void *data)
eb755805 755{
e2b245f8 756 return 0;
eb755805 757}
18d95a28
PZ
758#endif
759
45bf76df
IM
760static void set_load_weight(struct task_struct *p)
761{
f05998d4
NR
762 int prio = p->static_prio - MAX_RT_PRIO;
763 struct load_weight *load = &p->se.load;
764
dd41f596
IM
765 /*
766 * SCHED_IDLE tasks get minimal weight:
767 */
768 if (p->policy == SCHED_IDLE) {
c8b28116 769 load->weight = scale_load(WEIGHT_IDLEPRIO);
f05998d4 770 load->inv_weight = WMULT_IDLEPRIO;
dd41f596
IM
771 return;
772 }
71f8bd46 773
c8b28116 774 load->weight = scale_load(prio_to_weight[prio]);
f05998d4 775 load->inv_weight = prio_to_wmult[prio];
71f8bd46
IM
776}
777
6fa3eb70
S
778#ifdef CONFIG_MTK_SCHED_CMP_TGS
779static void sched_tg_enqueue(struct rq *rq, struct task_struct *p)
780{
781 int id;
782 unsigned long flags;
783 struct task_struct *tg = p->group_leader;
784
785 if(group_leader_is_empty(p))
786 return;
787 id = get_cluster_id(rq->cpu);
788 if (unlikely(WARN_ON(id < 0)))
789 return;
790
791 raw_spin_lock_irqsave(&tg->thread_group_info_lock, flags);
792 tg->thread_group_info[id].nr_running++;
793 raw_spin_unlock_irqrestore(&tg->thread_group_info_lock, flags);
794
795#if 0
796 mt_sched_printf("enqueue %d:%s %d:%s %d %lu %lu %lu, %lu %lu %lu",
797 tg->pid, tg->comm, p->pid, p->comm, id, rq->cpu,
798 tg->thread_group_info[0].nr_running,
799 tg->thread_group_info[0].cfs_nr_running,
800 tg->thread_group_info[0].load_avg_ratio,
801 tg->thread_group_info[1].nr_running,
802 tg->thread_group_info[1].cfs_nr_running,
803 tg->thread_group_info[1].load_avg_ratio);
804#endif
805 //tgs_log(rq, p);
806}
807
808static void sched_tg_dequeue(struct rq *rq, struct task_struct *p)
809{
810 int id;
811 unsigned long flags;
812 struct task_struct *tg = p->group_leader;
813
814 if(group_leader_is_empty(p))
815 return;
816 id = get_cluster_id(rq->cpu);
817 if (unlikely(WARN_ON(id < 0)))
818 return;
819
820 raw_spin_lock_irqsave(&tg->thread_group_info_lock, flags);
821 //WARN_ON(!tg->thread_group_info[id].nr_running);
822 tg->thread_group_info[id].nr_running--;
823 raw_spin_unlock_irqrestore(&tg->thread_group_info_lock, flags);
824
825#if 0
826 mt_sched_printf("dequeue %d:%s %d:%s %d %d %lu %lu %lu, %lu %lu %lu",
827 tg->pid, tg->comm, p->pid, p->comm, id, rq->cpu,
828 tg->thread_group_info[0].nr_running,
829 tg->thread_group_info[0].cfs_nr_running,
830 tg->thread_group_info[0].load_avg_ratio,
831 tg->thread_group_info[1].nr_running,
832 tg->thread_group_info[1].cfs_nr_running,
833 tg->thread_group_info[1].load_avg_ratio);
834#endif
835 //tgs_log(rq, p);
836}
837
838#endif
839
840#ifdef CONFIG_MTK_SCHED_CMP_TGS
841static void tgs_log(struct rq *rq, struct task_struct *p)
842{
843#ifdef CONFIG_MT_SCHED_INFO
844 struct task_struct *tg = p->group_leader;
845
846 if(group_leader_is_empty(p))
847 return;
848
849 // if(!strncmp(tg->comm,"sched_test", 10)){
850 mt_sched_printf("%d:%s %d:%s %lu %lu %lu, %lu %lu %lu", tg->pid, tg->comm, p->pid, p->comm,
851 tg->thread_group_info[0].nr_running,
852 tg->thread_group_info[0].cfs_nr_running,
853 tg->thread_group_info[0].load_avg_ratio,
854 tg->thread_group_info[1].nr_running,
855 tg->thread_group_info[1].cfs_nr_running,
856 tg->thread_group_info[1].load_avg_ratio);
857 // }
858#endif
859}
860#endif
861
371fd7e7 862static void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
2087a1ad 863{
a64692a3 864 update_rq_clock(rq);
dd41f596 865 sched_info_queued(p);
371fd7e7 866 p->sched_class->enqueue_task(rq, p, flags);
6fa3eb70
S
867#ifdef CONFIG_MTK_SCHED_CMP_TGS
868 sched_tg_enqueue(rq, p);
869 tgs_log(rq, p);
870#endif
71f8bd46
IM
871}
872
371fd7e7 873static void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
71f8bd46 874{
a64692a3 875 update_rq_clock(rq);
46ac22ba 876 sched_info_dequeued(p);
371fd7e7 877 p->sched_class->dequeue_task(rq, p, flags);
6fa3eb70
S
878#ifdef CONFIG_MTK_SCHED_CMP_TGS
879 sched_tg_dequeue(rq, p);
880 tgs_log(rq, p);
881#endif
71f8bd46
IM
882}
883
029632fb 884void activate_task(struct rq *rq, struct task_struct *p, int flags)
1e3c88bd
PZ
885{
886 if (task_contributes_to_load(p))
887 rq->nr_uninterruptible--;
888
371fd7e7 889 enqueue_task(rq, p, flags);
6fa3eb70
S
890
891#ifdef CONFIG_MT_LOAD_BALANCE_PROFILER
892 if( 2 <= rq->nr_running){
893 if (1 == cpumask_weight(&p->cpus_allowed))
894 mt_lbprof_update_state_has_lock(rq->cpu, MT_LBPROF_AFFINITY_STATE);
895 else
896 mt_lbprof_update_state_has_lock(rq->cpu, MT_LBPROF_N_TASK_STATE);
897 }else if ( (1 == rq->nr_running)){
898 mt_lbprof_update_state_has_lock(rq->cpu, MT_LBPROF_ONE_TASK_STATE);
899 }
900#endif
1e3c88bd
PZ
901}
902
029632fb 903void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
1e3c88bd
PZ
904{
905 if (task_contributes_to_load(p))
906 rq->nr_uninterruptible++;
907
371fd7e7 908 dequeue_task(rq, p, flags);
6fa3eb70
S
909
910#ifdef CONFIG_MT_LOAD_BALANCE_PROFILER
911 if ( 1 == rq->nr_running )
912 mt_lbprof_update_state_has_lock(rq->cpu, MT_LBPROF_ONE_TASK_STATE);
913 else if (0 == rq->nr_running)
914 mt_lbprof_update_state_has_lock(rq->cpu, MT_LBPROF_NO_TASK_STATE);
915#endif
1e3c88bd
PZ
916}
917
fe44d621 918static void update_rq_clock_task(struct rq *rq, s64 delta)
aa483808 919{
095c0aa8
GC
920/*
921 * In theory, the compile should just see 0 here, and optimize out the call
922 * to sched_rt_avg_update. But I don't trust it...
923 */
924#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
925 s64 steal = 0, irq_delta = 0;
926#endif
927#ifdef CONFIG_IRQ_TIME_ACCOUNTING
8e92c201 928 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
fe44d621
PZ
929
930 /*
931 * Since irq_time is only updated on {soft,}irq_exit, we might run into
932 * this case when a previous update_rq_clock() happened inside a
933 * {soft,}irq region.
934 *
935 * When this happens, we stop ->clock_task and only update the
936 * prev_irq_time stamp to account for the part that fit, so that a next
937 * update will consume the rest. This ensures ->clock_task is
938 * monotonic.
939 *
940 * It does however cause some slight miss-attribution of {soft,}irq
941 * time, a more accurate solution would be to update the irq_time using
942 * the current rq->clock timestamp, except that would require using
943 * atomic ops.
944 */
945 if (irq_delta > delta)
946 irq_delta = delta;
947
948 rq->prev_irq_time += irq_delta;
949 delta -= irq_delta;
095c0aa8
GC
950#endif
951#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
c5905afb 952 if (static_key_false((&paravirt_steal_rq_enabled))) {
095c0aa8
GC
953 u64 st;
954
955 steal = paravirt_steal_clock(cpu_of(rq));
956 steal -= rq->prev_steal_time_rq;
957
958 if (unlikely(steal > delta))
959 steal = delta;
960
961 st = steal_ticks(steal);
962 steal = st * TICK_NSEC;
963
964 rq->prev_steal_time_rq += steal;
965
966 delta -= steal;
967 }
968#endif
969
fe44d621
PZ
970 rq->clock_task += delta;
971
095c0aa8
GC
972#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
973 if ((irq_delta + steal) && sched_feat(NONTASK_POWER))
974 sched_rt_avg_update(rq, irq_delta + steal);
975#endif
aa483808
VP
976}
977
34f971f6
PZ
978void sched_set_stop_task(int cpu, struct task_struct *stop)
979{
6fa3eb70
S
980 //struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
981 struct sched_param param = { .sched_priority = RTPM_PRIO_CPU_CALLBACK };
34f971f6
PZ
982 struct task_struct *old_stop = cpu_rq(cpu)->stop;
983
984 if (stop) {
985 /*
986 * Make it appear like a SCHED_FIFO task, its something
987 * userspace knows about and won't get confused about.
988 *
989 * Also, it will make PI more or less work without too
990 * much confusion -- but then, stop work should not
991 * rely on PI working anyway.
992 */
993 sched_setscheduler_nocheck(stop, SCHED_FIFO, &param);
994
995 stop->sched_class = &stop_sched_class;
996 }
997
998 cpu_rq(cpu)->stop = stop;
999
1000 if (old_stop) {
1001 /*
1002 * Reset it back to a normal scheduling class so that
1003 * it can die in pieces.
1004 */
1005 old_stop->sched_class = &rt_sched_class;
1006 }
1007}
1008
14531189 1009/*
dd41f596 1010 * __normal_prio - return the priority that is based on the static prio
14531189 1011 */
14531189
IM
1012static inline int __normal_prio(struct task_struct *p)
1013{
dd41f596 1014 return p->static_prio;
14531189
IM
1015}
1016
b29739f9
IM
1017/*
1018 * Calculate the expected normal priority: i.e. priority
1019 * without taking RT-inheritance into account. Might be
1020 * boosted by interactivity modifiers. Changes upon fork,
1021 * setprio syscalls, and whenever the interactivity
1022 * estimator recalculates.
1023 */
36c8b586 1024static inline int normal_prio(struct task_struct *p)
b29739f9
IM
1025{
1026 int prio;
1027
e05606d3 1028 if (task_has_rt_policy(p))
b29739f9
IM
1029 prio = MAX_RT_PRIO-1 - p->rt_priority;
1030 else
1031 prio = __normal_prio(p);
1032 return prio;
1033}
1034
1035/*
1036 * Calculate the current priority, i.e. the priority
1037 * taken into account by the scheduler. This value might
1038 * be boosted by RT tasks, or might be boosted by
1039 * interactivity modifiers. Will be RT if the task got
1040 * RT-boosted. If not then it returns p->normal_prio.
1041 */
36c8b586 1042static int effective_prio(struct task_struct *p)
b29739f9
IM
1043{
1044 p->normal_prio = normal_prio(p);
1045 /*
1046 * If we are RT tasks or we were boosted to RT priority,
1047 * keep the priority unchanged. Otherwise, update priority
1048 * to the normal priority:
1049 */
1050 if (!rt_prio(p->prio))
1051 return p->normal_prio;
1052 return p->prio;
1053}
1054
1da177e4
LT
1055/**
1056 * task_curr - is this task currently executing on a CPU?
1057 * @p: the task in question.
1058 */
36c8b586 1059inline int task_curr(const struct task_struct *p)
1da177e4
LT
1060{
1061 return cpu_curr(task_cpu(p)) == p;
1062}
1063
cb469845
SR
1064static inline void check_class_changed(struct rq *rq, struct task_struct *p,
1065 const struct sched_class *prev_class,
da7a735e 1066 int oldprio)
cb469845
SR
1067{
1068 if (prev_class != p->sched_class) {
1069 if (prev_class->switched_from)
da7a735e
PZ
1070 prev_class->switched_from(rq, p);
1071 p->sched_class->switched_to(rq, p);
1072 } else if (oldprio != p->prio)
1073 p->sched_class->prio_changed(rq, p, oldprio);
cb469845
SR
1074}
1075
029632fb 1076void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
1e5a7405
PZ
1077{
1078 const struct sched_class *class;
1079
1080 if (p->sched_class == rq->curr->sched_class) {
1081 rq->curr->sched_class->check_preempt_curr(rq, p, flags);
1082 } else {
1083 for_each_class(class) {
1084 if (class == rq->curr->sched_class)
1085 break;
1086 if (class == p->sched_class) {
1087 resched_task(rq->curr);
1088 break;
1089 }
1090 }
1091 }
1092
1093 /*
1094 * A queue event has occurred, and we're going to schedule. In
1095 * this case, we can save a useless back to back clock update.
1096 */
fd2f4419 1097 if (rq->curr->on_rq && test_tsk_need_resched(rq->curr))
1e5a7405
PZ
1098 rq->skip_clock_update = 1;
1099}
1100
582b336e
MT
1101static ATOMIC_NOTIFIER_HEAD(task_migration_notifier);
1102
1103void register_task_migration_notifier(struct notifier_block *n)
1104{
1105 atomic_notifier_chain_register(&task_migration_notifier, n);
1106}
1107
1da177e4 1108#ifdef CONFIG_SMP
dd41f596 1109void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
c65cc870 1110{
e2912009
PZ
1111#ifdef CONFIG_SCHED_DEBUG
1112 /*
1113 * We should never call set_task_cpu() on a blocked task,
1114 * ttwu() will sort out the placement.
1115 */
077614ee
PZ
1116 WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
1117 !(task_thread_info(p)->preempt_count & PREEMPT_ACTIVE));
0122ec5b
PZ
1118
1119#ifdef CONFIG_LOCKDEP
6c6c54e1
PZ
1120 /*
1121 * The caller should hold either p->pi_lock or rq->lock, when changing
1122 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
1123 *
1124 * sched_move_task() holds both and thus holding either pins the cgroup,
8323f26c 1125 * see task_group().
6c6c54e1
PZ
1126 *
1127 * Furthermore, all task_rq users should acquire both locks, see
1128 * task_rq_lock().
1129 */
0122ec5b
PZ
1130 WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
1131 lockdep_is_held(&task_rq(p)->lock)));
1132#endif
e2912009
PZ
1133#endif
1134
de1d7286 1135 trace_sched_migrate_task(p, new_cpu);
cbc34ed1 1136
0c69774e 1137 if (task_cpu(p) != new_cpu) {
582b336e
MT
1138 struct task_migration_notifier tmn;
1139
0a74bef8
PT
1140 if (p->sched_class->migrate_task_rq)
1141 p->sched_class->migrate_task_rq(p, new_cpu);
0c69774e 1142 p->se.nr_migrations++;
a8b0ca17 1143 perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, NULL, 0);
582b336e
MT
1144
1145 tmn.task = p;
1146 tmn.from_cpu = task_cpu(p);
1147 tmn.to_cpu = new_cpu;
1148
1149 atomic_notifier_call_chain(&task_migration_notifier, 0, &tmn);
0c69774e 1150 }
dd41f596
IM
1151
1152 __set_task_cpu(p, new_cpu);
c65cc870
IM
1153}
1154
969c7921 1155struct migration_arg {
36c8b586 1156 struct task_struct *task;
1da177e4 1157 int dest_cpu;
70b97a7f 1158};
1da177e4 1159
969c7921
TH
1160static int migration_cpu_stop(void *data);
1161
1da177e4
LT
1162/*
1163 * wait_task_inactive - wait for a thread to unschedule.
1164 *
85ba2d86
RM
1165 * If @match_state is nonzero, it's the @p->state value just checked and
1166 * not expected to change. If it changes, i.e. @p might have woken up,
1167 * then return zero. When we succeed in waiting for @p to be off its CPU,
1168 * we return a positive number (its total switch count). If a second call
1169 * a short while later returns the same number, the caller can be sure that
1170 * @p has remained unscheduled the whole time.
1171 *
1da177e4
LT
1172 * The caller must ensure that the task *will* unschedule sometime soon,
1173 * else this function might spin for a *long* time. This function can't
1174 * be called with interrupts off, or it may introduce deadlock with
1175 * smp_call_function() if an IPI is sent by the same process we are
1176 * waiting to become inactive.
1177 */
85ba2d86 1178unsigned long wait_task_inactive(struct task_struct *p, long match_state)
1da177e4
LT
1179{
1180 unsigned long flags;
dd41f596 1181 int running, on_rq;
85ba2d86 1182 unsigned long ncsw;
70b97a7f 1183 struct rq *rq;
1da177e4 1184
3a5c359a
AK
1185 for (;;) {
1186 /*
1187 * We do the initial early heuristics without holding
1188 * any task-queue locks at all. We'll only try to get
1189 * the runqueue lock when things look like they will
1190 * work out!
1191 */
1192 rq = task_rq(p);
fa490cfd 1193
3a5c359a
AK
1194 /*
1195 * If the task is actively running on another CPU
1196 * still, just relax and busy-wait without holding
1197 * any locks.
1198 *
1199 * NOTE! Since we don't hold any locks, it's not
1200 * even sure that "rq" stays as the right runqueue!
1201 * But we don't care, since "task_running()" will
1202 * return false if the runqueue has changed and p
1203 * is actually now running somewhere else!
1204 */
85ba2d86
RM
1205 while (task_running(rq, p)) {
1206 if (match_state && unlikely(p->state != match_state))
1207 return 0;
3a5c359a 1208 cpu_relax();
85ba2d86 1209 }
fa490cfd 1210
3a5c359a
AK
1211 /*
1212 * Ok, time to look more closely! We need the rq
1213 * lock now, to be *sure*. If we're wrong, we'll
1214 * just go back and repeat.
1215 */
1216 rq = task_rq_lock(p, &flags);
27a9da65 1217 trace_sched_wait_task(p);
3a5c359a 1218 running = task_running(rq, p);
fd2f4419 1219 on_rq = p->on_rq;
85ba2d86 1220 ncsw = 0;
f31e11d8 1221 if (!match_state || p->state == match_state)
93dcf55f 1222 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
0122ec5b 1223 task_rq_unlock(rq, p, &flags);
fa490cfd 1224
85ba2d86
RM
1225 /*
1226 * If it changed from the expected state, bail out now.
1227 */
1228 if (unlikely(!ncsw))
1229 break;
1230
3a5c359a
AK
1231 /*
1232 * Was it really running after all now that we
1233 * checked with the proper locks actually held?
1234 *
1235 * Oops. Go back and try again..
1236 */
1237 if (unlikely(running)) {
1238 cpu_relax();
1239 continue;
1240 }
fa490cfd 1241
3a5c359a
AK
1242 /*
1243 * It's not enough that it's not actively running,
1244 * it must be off the runqueue _entirely_, and not
1245 * preempted!
1246 *
80dd99b3 1247 * So if it was still runnable (but just not actively
3a5c359a
AK
1248 * running right now), it's preempted, and we should
1249 * yield - it could be a while.
1250 */
1251 if (unlikely(on_rq)) {
8eb90c30
TG
1252 ktime_t to = ktime_set(0, NSEC_PER_SEC/HZ);
1253
1254 set_current_state(TASK_UNINTERRUPTIBLE);
1255 schedule_hrtimeout(&to, HRTIMER_MODE_REL);
3a5c359a
AK
1256 continue;
1257 }
fa490cfd 1258
3a5c359a
AK
1259 /*
1260 * Ahh, all good. It wasn't running, and it wasn't
1261 * runnable, which means that it will never become
1262 * running in the future either. We're all done!
1263 */
1264 break;
1265 }
85ba2d86
RM
1266
1267 return ncsw;
1da177e4
LT
1268}
1269
1270/***
1271 * kick_process - kick a running thread to enter/exit the kernel
1272 * @p: the to-be-kicked thread
1273 *
1274 * Cause a process which is running on another CPU to enter
1275 * kernel-mode, without any delay. (to get signals handled.)
1276 *
25985edc 1277 * NOTE: this function doesn't have to take the runqueue lock,
1da177e4
LT
1278 * because all it wants to ensure is that the remote task enters
1279 * the kernel. If the IPI races and the task has been migrated
1280 * to another CPU then no harm is done and the purpose has been
1281 * achieved as well.
1282 */
36c8b586 1283void kick_process(struct task_struct *p)
1da177e4
LT
1284{
1285 int cpu;
1286
1287 preempt_disable();
1288 cpu = task_cpu(p);
1289 if ((cpu != smp_processor_id()) && task_curr(p))
1290 smp_send_reschedule(cpu);
1291 preempt_enable();
1292}
b43e3521 1293EXPORT_SYMBOL_GPL(kick_process);
476d139c 1294#endif /* CONFIG_SMP */
1da177e4 1295
970b13ba 1296#ifdef CONFIG_SMP
30da688e 1297/*
013fdb80 1298 * ->cpus_allowed is protected by both rq->lock and p->pi_lock
30da688e 1299 */
5da9a0fb
PZ
1300static int select_fallback_rq(int cpu, struct task_struct *p)
1301{
aa00d89c
TC
1302 int nid = cpu_to_node(cpu);
1303 const struct cpumask *nodemask = NULL;
2baab4e9
PZ
1304 enum { cpuset, possible, fail } state = cpuset;
1305 int dest_cpu;
5da9a0fb 1306
aa00d89c
TC
1307 /*
1308 * If the node that the cpu is on has been offlined, cpu_to_node()
1309 * will return -1. There is no cpu on the node, and we should
1310 * select the cpu on the other node.
1311 */
1312 if (nid != -1) {
1313 nodemask = cpumask_of_node(nid);
1314
1315 /* Look for allowed, online CPU in same node. */
1316 for_each_cpu(dest_cpu, nodemask) {
1317 if (!cpu_online(dest_cpu))
1318 continue;
1319 if (!cpu_active(dest_cpu))
1320 continue;
1321 if (cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
1322 return dest_cpu;
1323 }
2baab4e9 1324 }
5da9a0fb 1325
2baab4e9
PZ
1326 for (;;) {
1327 /* Any allowed, online CPU? */
e3831edd 1328 for_each_cpu(dest_cpu, tsk_cpus_allowed(p)) {
2baab4e9
PZ
1329 if (!cpu_online(dest_cpu))
1330 continue;
1331 if (!cpu_active(dest_cpu))
1332 continue;
1333 goto out;
1334 }
5da9a0fb 1335
2baab4e9
PZ
1336 switch (state) {
1337 case cpuset:
1338 /* No more Mr. Nice Guy. */
1339 cpuset_cpus_allowed_fallback(p);
1340 state = possible;
1341 break;
1342
1343 case possible:
1344 do_set_cpus_allowed(p, cpu_possible_mask);
1345 state = fail;
1346 break;
1347
1348 case fail:
1349 BUG();
1350 break;
1351 }
1352 }
1353
1354out:
1355 if (state != cpuset) {
1356 /*
1357 * Don't tell them about moving exiting tasks or
1358 * kernel threads (both mm NULL), since they never
1359 * leave kernel.
1360 */
1361 if (p->mm && printk_ratelimit()) {
3984bb13 1362 printk_deferred("process %d (%s) no longer affine to cpu%d\n",
2baab4e9
PZ
1363 task_pid_nr(p), p->comm, cpu);
1364 }
5da9a0fb
PZ
1365 }
1366
1367 return dest_cpu;
1368}
1369
e2912009 1370/*
013fdb80 1371 * The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable.
e2912009 1372 */
970b13ba 1373static inline
7608dec2 1374int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags)
970b13ba 1375{
7608dec2 1376 int cpu = p->sched_class->select_task_rq(p, sd_flags, wake_flags);
e2912009
PZ
1377
1378 /*
1379 * In order not to call set_task_cpu() on a blocking task we need
1380 * to rely on ttwu() to place the task on a valid ->cpus_allowed
1381 * cpu.
1382 *
1383 * Since this is common to all placement strategies, this lives here.
1384 *
1385 * [ this allows ->select_task() to simply return task_cpu(p) and
1386 * not worry about this generic constraint ]
1387 */
fa17b507 1388 if (unlikely(!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)) ||
70f11205 1389 !cpu_online(cpu)))
5da9a0fb 1390 cpu = select_fallback_rq(task_cpu(p), p);
e2912009
PZ
1391
1392 return cpu;
970b13ba 1393}
09a40af5
MG
1394
1395static void update_avg(u64 *avg, u64 sample)
1396{
1397 s64 diff = sample - *avg;
1398 *avg += diff >> 3;
1399}
970b13ba
PZ
1400#endif
1401
d7c01d27 1402static void
b84cb5df 1403ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
9ed3811a 1404{
d7c01d27 1405#ifdef CONFIG_SCHEDSTATS
b84cb5df
PZ
1406 struct rq *rq = this_rq();
1407
d7c01d27
PZ
1408#ifdef CONFIG_SMP
1409 int this_cpu = smp_processor_id();
1410
1411 if (cpu == this_cpu) {
1412 schedstat_inc(rq, ttwu_local);
1413 schedstat_inc(p, se.statistics.nr_wakeups_local);
1414 } else {
1415 struct sched_domain *sd;
1416
1417 schedstat_inc(p, se.statistics.nr_wakeups_remote);
057f3fad 1418 rcu_read_lock();
d7c01d27
PZ
1419 for_each_domain(this_cpu, sd) {
1420 if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
1421 schedstat_inc(sd, ttwu_wake_remote);
1422 break;
1423 }
1424 }
057f3fad 1425 rcu_read_unlock();
d7c01d27 1426 }
f339b9dc
PZ
1427
1428 if (wake_flags & WF_MIGRATED)
1429 schedstat_inc(p, se.statistics.nr_wakeups_migrate);
1430
d7c01d27
PZ
1431#endif /* CONFIG_SMP */
1432
1433 schedstat_inc(rq, ttwu_count);
9ed3811a 1434 schedstat_inc(p, se.statistics.nr_wakeups);
d7c01d27
PZ
1435
1436 if (wake_flags & WF_SYNC)
9ed3811a 1437 schedstat_inc(p, se.statistics.nr_wakeups_sync);
d7c01d27 1438
d7c01d27
PZ
1439#endif /* CONFIG_SCHEDSTATS */
1440}
1441
1442static void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags)
1443{
9ed3811a 1444 activate_task(rq, p, en_flags);
fd2f4419 1445 p->on_rq = 1;
c2f7115e
PZ
1446
1447 /* if a worker is waking up, notify workqueue */
1448 if (p->flags & PF_WQ_WORKER)
1449 wq_worker_waking_up(p, cpu_of(rq));
9ed3811a
TH
1450}
1451
23f41eeb
PZ
1452/*
1453 * Mark the task runnable and perform wakeup-preemption.
1454 */
89363381 1455static void
23f41eeb 1456ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
9ed3811a 1457{
9ed3811a 1458 check_preempt_curr(rq, p, wake_flags);
a8d7ad52 1459 trace_sched_wakeup(p, true);
9ed3811a
TH
1460
1461 p->state = TASK_RUNNING;
1462#ifdef CONFIG_SMP
1463 if (p->sched_class->task_woken)
1464 p->sched_class->task_woken(rq, p);
1465
e69c6341 1466 if (rq->idle_stamp) {
9ed3811a
TH
1467 u64 delta = rq->clock - rq->idle_stamp;
1468 u64 max = 2*sysctl_sched_migration_cost;
1469
1470 if (delta > max)
1471 rq->avg_idle = max;
1472 else
1473 update_avg(&rq->avg_idle, delta);
1474 rq->idle_stamp = 0;
1475 }
1476#endif
1477}
1478
c05fbafb
PZ
1479static void
1480ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags)
1481{
1482#ifdef CONFIG_SMP
1483 if (p->sched_contributes_to_load)
1484 rq->nr_uninterruptible--;
1485#endif
1486
1487 ttwu_activate(rq, p, ENQUEUE_WAKEUP | ENQUEUE_WAKING);
1488 ttwu_do_wakeup(rq, p, wake_flags);
1489}
1490
1491/*
1492 * Called in case the task @p isn't fully descheduled from its runqueue,
1493 * in this case we must do a remote wakeup. Its a 'light' wakeup though,
1494 * since all we need to do is flip p->state to TASK_RUNNING, since
1495 * the task is still ->on_rq.
1496 */
1497static int ttwu_remote(struct task_struct *p, int wake_flags)
1498{
1499 struct rq *rq;
1500 int ret = 0;
1501
1502 rq = __task_rq_lock(p);
1503 if (p->on_rq) {
1504 ttwu_do_wakeup(rq, p, wake_flags);
1505 ret = 1;
1506 }
1507 __task_rq_unlock(rq);
1508
1509 return ret;
1510}
1511
317f3941 1512#ifdef CONFIG_SMP
fa14ff4a 1513static void sched_ttwu_pending(void)
317f3941
PZ
1514{
1515 struct rq *rq = this_rq();
fa14ff4a
PZ
1516 struct llist_node *llist = llist_del_all(&rq->wake_list);
1517 struct task_struct *p;
317f3941
PZ
1518
1519 raw_spin_lock(&rq->lock);
1520
fa14ff4a
PZ
1521 while (llist) {
1522 p = llist_entry(llist, struct task_struct, wake_entry);
1523 llist = llist_next(llist);
317f3941
PZ
1524 ttwu_do_activate(rq, p, 0);
1525 }
1526
1527 raw_spin_unlock(&rq->lock);
1528}
6fa3eb70
S
1529enum ipi_msg_type {
1530 IPI_RESCHEDULE,
1531 IPI_CALL_FUNC,
1532 IPI_CALL_FUNC_SINGLE,
1533 IPI_CPU_STOP,
1534};
317f3941
PZ
1535void scheduler_ipi(void)
1536{
873b4c65
VG
1537 if (llist_empty(&this_rq()->wake_list)
1538 && !tick_nohz_full_cpu(smp_processor_id())
6fa3eb70
S
1539 && !got_nohz_idle_kick()){
1540 mt_trace_ISR_start(IPI_RESCHEDULE);
1541 mt_trace_ISR_end(IPI_RESCHEDULE);
c5d753a5 1542 return;
6fa3eb70 1543 }
c5d753a5
PZ
1544
1545 /*
1546 * Not all reschedule IPI handlers call irq_enter/irq_exit, since
1547 * traditionally all their work was done from the interrupt return
1548 * path. Now that we actually do some work, we need to make sure
1549 * we do call them.
1550 *
1551 * Some archs already do call them, luckily irq_enter/exit nest
1552 * properly.
1553 *
1554 * Arguably we should visit all archs and update all handlers,
1555 * however a fair share of IPIs are still resched only so this would
1556 * somewhat pessimize the simple resched case.
1557 */
1558 irq_enter();
6fa3eb70 1559 mt_trace_ISR_start(IPI_RESCHEDULE);
ff442c51 1560 tick_nohz_full_check();
fa14ff4a 1561 sched_ttwu_pending();
ca38062e
SS
1562
1563 /*
1564 * Check if someone kicked us for doing the nohz idle load balance.
1565 */
873b4c65 1566 if (unlikely(got_nohz_idle_kick())) {
6eb57e0d 1567 this_rq()->idle_balance = 1;
ca38062e 1568 raise_softirq_irqoff(SCHED_SOFTIRQ);
6eb57e0d 1569 }
6fa3eb70 1570 mt_trace_ISR_end(IPI_RESCHEDULE);
c5d753a5 1571 irq_exit();
317f3941
PZ
1572}
1573
1574static void ttwu_queue_remote(struct task_struct *p, int cpu)
1575{
fa14ff4a 1576 if (llist_add(&p->wake_entry, &cpu_rq(cpu)->wake_list))
317f3941
PZ
1577 smp_send_reschedule(cpu);
1578}
d6aa8f85 1579
39be3501 1580bool cpus_share_cache(int this_cpu, int that_cpu)
518cd623
PZ
1581{
1582 return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
1583}
d6aa8f85 1584#endif /* CONFIG_SMP */
317f3941 1585
c05fbafb
PZ
1586static void ttwu_queue(struct task_struct *p, int cpu)
1587{
1588 struct rq *rq = cpu_rq(cpu);
1589
17d9f311 1590#if defined(CONFIG_SMP)
39be3501 1591 if (sched_feat(TTWU_QUEUE) && !cpus_share_cache(smp_processor_id(), cpu)) {
f01114cb 1592 sched_clock_cpu(cpu); /* sync clocks x-cpu */
317f3941
PZ
1593 ttwu_queue_remote(p, cpu);
1594 return;
1595 }
1596#endif
1597
c05fbafb
PZ
1598 raw_spin_lock(&rq->lock);
1599 ttwu_do_activate(rq, p, 0);
1600 raw_spin_unlock(&rq->lock);
9ed3811a
TH
1601}
1602
1603/**
1da177e4 1604 * try_to_wake_up - wake up a thread
9ed3811a 1605 * @p: the thread to be awakened
1da177e4 1606 * @state: the mask of task states that can be woken
9ed3811a 1607 * @wake_flags: wake modifier flags (WF_*)
1da177e4
LT
1608 *
1609 * Put it on the run-queue if it's not already there. The "current"
1610 * thread is always on the run-queue (except when the actual
1611 * re-schedule is in progress), and as such you're allowed to do
1612 * the simpler "current->state = TASK_RUNNING" to mark yourself
1613 * runnable without the overhead of this.
1614 *
9ed3811a
TH
1615 * Returns %true if @p was woken up, %false if it was already running
1616 * or @state didn't match @p's state.
1da177e4 1617 */
e4a52bcb
PZ
1618static int
1619try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
1da177e4 1620{
1da177e4 1621 unsigned long flags;
c05fbafb 1622 int cpu, success = 0;
2398f2c6 1623
57f74b6e
ON
1624 /*
1625 * If we are going to wake up a thread waiting for CONDITION we
1626 * need to ensure that CONDITION=1 done by the caller can not be
1627 * reordered with p->state check below. This pairs with mb() in
1628 * set_current_state() the waiting thread does.
1629 */
1630 smp_mb__before_spinlock();
013fdb80 1631 raw_spin_lock_irqsave(&p->pi_lock, flags);
e9c84311 1632 if (!(p->state & state))
1da177e4
LT
1633 goto out;
1634
c05fbafb 1635 success = 1; /* we're going to change ->state */
1da177e4 1636 cpu = task_cpu(p);
1da177e4 1637
c05fbafb
PZ
1638 if (p->on_rq && ttwu_remote(p, wake_flags))
1639 goto stat;
1da177e4 1640
1da177e4 1641#ifdef CONFIG_SMP
e9c84311 1642 /*
c05fbafb
PZ
1643 * If the owning (remote) cpu is still in the middle of schedule() with
1644 * this task as prev, wait until its done referencing the task.
e9c84311 1645 */
f3e94786 1646 while (p->on_cpu)
e4a52bcb 1647 cpu_relax();
0970d299 1648 /*
e4a52bcb 1649 * Pairs with the smp_wmb() in finish_lock_switch().
0970d299 1650 */
e4a52bcb 1651 smp_rmb();
1da177e4 1652
a8e4f2ea 1653 p->sched_contributes_to_load = !!task_contributes_to_load(p);
e9c84311 1654 p->state = TASK_WAKING;
e7693a36 1655
e4a52bcb 1656 if (p->sched_class->task_waking)
74f8e4b2 1657 p->sched_class->task_waking(p);
efbbd05a 1658
7608dec2 1659 cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
f339b9dc 1660 if (task_cpu(p) != cpu) {
6fa3eb70
S
1661#ifdef CONFIG_MT_LOAD_BALANCE_PROFILER
1662 char strings[128]="";
1663#endif
f339b9dc 1664 wake_flags |= WF_MIGRATED;
6fa3eb70
S
1665#ifdef CONFIG_MT_LOAD_BALANCE_PROFILER
1666 snprintf(strings, 128, "%d:%d:%s:wakeup:%d:%d:%s", task_cpu(current), current->pid, current->comm, cpu, p->pid, p->comm);
1667 trace_sched_lbprof_log(strings);
1668#endif
e4a52bcb 1669 set_task_cpu(p, cpu);
f339b9dc 1670 }
1da177e4 1671#endif /* CONFIG_SMP */
1da177e4 1672
c05fbafb
PZ
1673 ttwu_queue(p, cpu);
1674stat:
b84cb5df 1675 ttwu_stat(p, cpu, wake_flags);
1da177e4 1676out:
013fdb80 1677 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
1da177e4
LT
1678
1679 return success;
1680}
1681
21aa9af0
TH
1682/**
1683 * try_to_wake_up_local - try to wake up a local task with rq lock held
1684 * @p: the thread to be awakened
1685 *
2acca55e 1686 * Put @p on the run-queue if it's not already there. The caller must
21aa9af0 1687 * ensure that this_rq() is locked, @p is bound to this_rq() and not
2acca55e 1688 * the current task.
21aa9af0
TH
1689 */
1690static void try_to_wake_up_local(struct task_struct *p)
1691{
1692 struct rq *rq = task_rq(p);
21aa9af0 1693
383efcd0
TH
1694 if (WARN_ON_ONCE(rq != this_rq()) ||
1695 WARN_ON_ONCE(p == current))
1696 return;
1697
21aa9af0
TH
1698 lockdep_assert_held(&rq->lock);
1699
2acca55e
PZ
1700 if (!raw_spin_trylock(&p->pi_lock)) {
1701 raw_spin_unlock(&rq->lock);
1702 raw_spin_lock(&p->pi_lock);
1703 raw_spin_lock(&rq->lock);
1704 }
1705
21aa9af0 1706 if (!(p->state & TASK_NORMAL))
2acca55e 1707 goto out;
21aa9af0 1708
fd2f4419 1709 if (!p->on_rq)
d7c01d27
PZ
1710 ttwu_activate(rq, p, ENQUEUE_WAKEUP);
1711
23f41eeb 1712 ttwu_do_wakeup(rq, p, 0);
b84cb5df 1713 ttwu_stat(p, smp_processor_id(), 0);
2acca55e
PZ
1714out:
1715 raw_spin_unlock(&p->pi_lock);
21aa9af0
TH
1716}
1717
50fa610a
DH
1718/**
1719 * wake_up_process - Wake up a specific process
1720 * @p: The process to be woken up.
1721 *
1722 * Attempt to wake up the nominated process and move it to the set of runnable
1723 * processes. Returns 1 if the process was woken up, 0 if it was already
1724 * running.
1725 *
1726 * It may be assumed that this function implies a write memory barrier before
1727 * changing the task state if and only if any tasks are woken up.
1728 */
7ad5b3a5 1729int wake_up_process(struct task_struct *p)
1da177e4 1730{
9067ac85
ON
1731 WARN_ON(task_is_stopped_or_traced(p));
1732 return try_to_wake_up(p, TASK_NORMAL, 0);
1da177e4 1733}
1da177e4
LT
1734EXPORT_SYMBOL(wake_up_process);
1735
7ad5b3a5 1736int wake_up_state(struct task_struct *p, unsigned int state)
1da177e4
LT
1737{
1738 return try_to_wake_up(p, state, 0);
1739}
1740
1da177e4
LT
1741/*
1742 * Perform scheduler related setup for a newly forked process p.
1743 * p is forked by current.
dd41f596
IM
1744 *
1745 * __sched_fork() is basic setup used by init_idle() too:
1746 */
1747static void __sched_fork(struct task_struct *p)
1748{
fd2f4419
PZ
1749 p->on_rq = 0;
1750
1751 p->se.on_rq = 0;
dd41f596
IM
1752 p->se.exec_start = 0;
1753 p->se.sum_exec_runtime = 0;
f6cf891c 1754 p->se.prev_sum_exec_runtime = 0;
6c594c21 1755 p->se.nr_migrations = 0;
da7a735e 1756 p->se.vruntime = 0;
fd2f4419 1757 INIT_LIST_HEAD(&p->se.group_node);
6cfb0d5d 1758
f4e26b12
PT
1759/*
1760 * Load-tracking only depends on SMP, FAIR_GROUP_SCHED dependency below may be
1761 * removed when useful for applications beyond shares distribution (e.g.
1762 * load-balance).
1763 */
1764#if defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)
9d85f21c
PT
1765 p->se.avg.runnable_avg_period = 0;
1766 p->se.avg.runnable_avg_sum = 0;
6fa3eb70
S
1767#ifdef CONFIG_SCHED_HMP
1768 /* keep LOAD_AVG_MAX in sync with fair.c if load avg series is changed */
1769#define LOAD_AVG_MAX 47742
1770 if (p->mm) {
1771 p->se.avg.hmp_last_up_migration = 0;
1772 p->se.avg.hmp_last_down_migration = 0;
1773 p->se.avg.load_avg_ratio = 1023;
1774 p->se.avg.load_avg_contrib =
1775 (1023 * scale_load_down(p->se.load.weight));
1776 p->se.avg.runnable_avg_period = LOAD_AVG_MAX;
1777 p->se.avg.runnable_avg_sum = LOAD_AVG_MAX;
1778 p->se.avg.usage_avg_sum = LOAD_AVG_MAX;
1779 }
1780#endif
9d85f21c 1781#endif
6cfb0d5d 1782#ifdef CONFIG_SCHEDSTATS
41acab88 1783 memset(&p->se.statistics, 0, sizeof(p->se.statistics));
6cfb0d5d 1784#endif
476d139c 1785
fa717060 1786 INIT_LIST_HEAD(&p->rt.run_list);
476d139c 1787
e107be36
AK
1788#ifdef CONFIG_PREEMPT_NOTIFIERS
1789 INIT_HLIST_HEAD(&p->preempt_notifiers);
1790#endif
cbee9f88
PZ
1791
1792#ifdef CONFIG_NUMA_BALANCING
1793 if (p->mm && atomic_read(&p->mm->mm_users) == 1) {
1794 p->mm->numa_next_scan = jiffies;
b8593bfd 1795 p->mm->numa_next_reset = jiffies;
cbee9f88
PZ
1796 p->mm->numa_scan_seq = 0;
1797 }
1798
1799 p->node_stamp = 0ULL;
1800 p->numa_scan_seq = p->mm ? p->mm->numa_scan_seq : 0;
1801 p->numa_migrate_seq = p->mm ? p->mm->numa_scan_seq - 1 : 0;
4b96a29b 1802 p->numa_scan_period = sysctl_numa_balancing_scan_delay;
cbee9f88
PZ
1803 p->numa_work.next = &p->numa_work;
1804#endif /* CONFIG_NUMA_BALANCING */
dd41f596
IM
1805}
1806
1a687c2e 1807#ifdef CONFIG_NUMA_BALANCING
3105b86a 1808#ifdef CONFIG_SCHED_DEBUG
1a687c2e
MG
1809void set_numabalancing_state(bool enabled)
1810{
1811 if (enabled)
1812 sched_feat_set("NUMA");
1813 else
1814 sched_feat_set("NO_NUMA");
1815}
3105b86a
MG
1816#else
1817__read_mostly bool numabalancing_enabled;
1818
1819void set_numabalancing_state(bool enabled)
1820{
1821 numabalancing_enabled = enabled;
dd41f596 1822}
3105b86a 1823#endif /* CONFIG_SCHED_DEBUG */
1a687c2e 1824#endif /* CONFIG_NUMA_BALANCING */
dd41f596
IM
1825
1826/*
1827 * fork()/clone()-time setup:
1828 */
3e51e3ed 1829void sched_fork(struct task_struct *p)
dd41f596 1830{
0122ec5b 1831 unsigned long flags;
dd41f596
IM
1832 int cpu = get_cpu();
1833
1834 __sched_fork(p);
06b83b5f 1835 /*
0017d735 1836 * We mark the process as running here. This guarantees that
06b83b5f
PZ
1837 * nobody will actually run it, and a signal or other external
1838 * event cannot wake it up and insert it on the runqueue either.
1839 */
0017d735 1840 p->state = TASK_RUNNING;
dd41f596 1841
c350a04e
MG
1842 /*
1843 * Make sure we do not leak PI boosting priority to the child.
1844 */
1845 p->prio = current->normal_prio;
1846
b9dc29e7
MG
1847 /*
1848 * Revert to default priority/policy on fork if requested.
1849 */
1850 if (unlikely(p->sched_reset_on_fork)) {
c350a04e 1851 if (task_has_rt_policy(p)) {
b9dc29e7 1852 p->policy = SCHED_NORMAL;
6c697bdf 1853 p->static_prio = NICE_TO_PRIO(0);
c350a04e
MG
1854 p->rt_priority = 0;
1855 } else if (PRIO_TO_NICE(p->static_prio) < 0)
1856 p->static_prio = NICE_TO_PRIO(0);
1857
1858 p->prio = p->normal_prio = __normal_prio(p);
1859 set_load_weight(p);
6c697bdf 1860
b9dc29e7
MG
1861 /*
1862 * We don't need the reset flag anymore after the fork. It has
1863 * fulfilled its duty:
1864 */
1865 p->sched_reset_on_fork = 0;
1866 }
ca94c442 1867
2ddbf952
HS
1868 if (!rt_prio(p->prio))
1869 p->sched_class = &fair_sched_class;
b29739f9 1870
cd29fe6f
PZ
1871 if (p->sched_class->task_fork)
1872 p->sched_class->task_fork(p);
1873
86951599
PZ
1874 /*
1875 * The child is not yet in the pid-hash so no cgroup attach races,
1876 * and the cgroup is pinned to this child due to cgroup_fork()
1877 * is ran before sched_fork().
1878 *
1879 * Silence PROVE_RCU.
1880 */
0122ec5b 1881 raw_spin_lock_irqsave(&p->pi_lock, flags);
5f3edc1b 1882 set_task_cpu(p, cpu);
0122ec5b 1883 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
5f3edc1b 1884
52f17b6c 1885#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
dd41f596 1886 if (likely(sched_info_on()))
52f17b6c 1887 memset(&p->sched_info, 0, sizeof(p->sched_info));
1da177e4 1888#endif
3ca7a440
PZ
1889#if defined(CONFIG_SMP)
1890 p->on_cpu = 0;
4866cde0 1891#endif
bdd4e85d 1892#ifdef CONFIG_PREEMPT_COUNT
4866cde0 1893 /* Want to start with kernel preemption disabled. */
a1261f54 1894 task_thread_info(p)->preempt_count = 1;
1da177e4 1895#endif
806c09a7 1896#ifdef CONFIG_SMP
917b627d 1897 plist_node_init(&p->pushable_tasks, MAX_PRIO);
806c09a7 1898#endif
917b627d 1899
476d139c 1900 put_cpu();
1da177e4
LT
1901}
1902
1903/*
1904 * wake_up_new_task - wake up a newly created task for the first time.
1905 *
1906 * This function will do some initial scheduler statistics housekeeping
1907 * that must be done for every newly created context, then puts the task
1908 * on the runqueue and wakes it.
1909 */
3e51e3ed 1910void wake_up_new_task(struct task_struct *p)
1da177e4
LT
1911{
1912 unsigned long flags;
dd41f596 1913 struct rq *rq;
fabf318e 1914
ab2515c4 1915 raw_spin_lock_irqsave(&p->pi_lock, flags);
fabf318e
PZ
1916#ifdef CONFIG_SMP
1917 /*
1918 * Fork balancing, do it here and not earlier because:
1919 * - cpus_allowed can change in the fork path
1920 * - any previously selected cpu might disappear through hotplug
fabf318e 1921 */
ab2515c4 1922 set_task_cpu(p, select_task_rq(p, SD_BALANCE_FORK, 0));
0017d735
PZ
1923#endif
1924
6fa3eb70
S
1925 /* Initialize new task's runnable average */
1926 init_task_runnable_average(p);
ab2515c4 1927 rq = __task_rq_lock(p);
cd29fe6f 1928 activate_task(rq, p, 0);
fd2f4419 1929 p->on_rq = 1;
89363381 1930 trace_sched_wakeup_new(p, true);
a7558e01 1931 check_preempt_curr(rq, p, WF_FORK);
9a897c5a 1932#ifdef CONFIG_SMP
efbbd05a
PZ
1933 if (p->sched_class->task_woken)
1934 p->sched_class->task_woken(rq, p);
9a897c5a 1935#endif
0122ec5b 1936 task_rq_unlock(rq, p, &flags);
1da177e4
LT
1937}
1938
e107be36
AK
1939#ifdef CONFIG_PREEMPT_NOTIFIERS
1940
1941/**
80dd99b3 1942 * preempt_notifier_register - tell me when current is being preempted & rescheduled
421cee29 1943 * @notifier: notifier struct to register
e107be36
AK
1944 */
1945void preempt_notifier_register(struct preempt_notifier *notifier)
1946{
1947 hlist_add_head(&notifier->link, &current->preempt_notifiers);
1948}
1949EXPORT_SYMBOL_GPL(preempt_notifier_register);
1950
1951/**
1952 * preempt_notifier_unregister - no longer interested in preemption notifications
421cee29 1953 * @notifier: notifier struct to unregister
e107be36
AK
1954 *
1955 * This is safe to call from within a preemption notifier.
1956 */
1957void preempt_notifier_unregister(struct preempt_notifier *notifier)
1958{
1959 hlist_del(&notifier->link);
1960}
1961EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
1962
1963static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
1964{
1965 struct preempt_notifier *notifier;
e107be36 1966
b67bfe0d 1967 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
e107be36
AK
1968 notifier->ops->sched_in(notifier, raw_smp_processor_id());
1969}
1970
1971static void
1972fire_sched_out_preempt_notifiers(struct task_struct *curr,
1973 struct task_struct *next)
1974{
1975 struct preempt_notifier *notifier;
e107be36 1976
b67bfe0d 1977 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
e107be36
AK
1978 notifier->ops->sched_out(notifier, next);
1979}
1980
6d6bc0ad 1981#else /* !CONFIG_PREEMPT_NOTIFIERS */
e107be36
AK
1982
1983static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
1984{
1985}
1986
1987static void
1988fire_sched_out_preempt_notifiers(struct task_struct *curr,
1989 struct task_struct *next)
1990{
1991}
1992
6d6bc0ad 1993#endif /* CONFIG_PREEMPT_NOTIFIERS */
e107be36 1994
4866cde0
NP
1995/**
1996 * prepare_task_switch - prepare to switch tasks
1997 * @rq: the runqueue preparing to switch
421cee29 1998 * @prev: the current task that is being switched out
4866cde0
NP
1999 * @next: the task we are going to switch to.
2000 *
2001 * This is called with the rq lock held and interrupts off. It must
2002 * be paired with a subsequent finish_task_switch after the context
2003 * switch.
2004 *
2005 * prepare_task_switch sets up locking and calls architecture specific
2006 * hooks.
2007 */
e107be36
AK
2008static inline void
2009prepare_task_switch(struct rq *rq, struct task_struct *prev,
2010 struct task_struct *next)
4866cde0 2011{
895dd92c 2012 trace_sched_switch(prev, next);
fe4b04fa
PZ
2013 sched_info_switch(prev, next);
2014 perf_event_task_sched_out(prev, next);
e107be36 2015 fire_sched_out_preempt_notifiers(prev, next);
4866cde0
NP
2016 prepare_lock_switch(rq, next);
2017 prepare_arch_switch(next);
2018}
2019
1da177e4
LT
2020/**
2021 * finish_task_switch - clean up after a task-switch
344babaa 2022 * @rq: runqueue associated with task-switch
1da177e4
LT
2023 * @prev: the thread we just switched away from.
2024 *
4866cde0
NP
2025 * finish_task_switch must be called after the context switch, paired
2026 * with a prepare_task_switch call before the context switch.
2027 * finish_task_switch will reconcile locking set up by prepare_task_switch,
2028 * and do any other architecture-specific cleanup actions.
1da177e4
LT
2029 *
2030 * Note that we may have delayed dropping an mm in context_switch(). If
41a2d6cf 2031 * so, we finish that here outside of the runqueue lock. (Doing it
1da177e4
LT
2032 * with the lock held can cause deadlocks; see schedule() for
2033 * details.)
2034 */
a9957449 2035static void finish_task_switch(struct rq *rq, struct task_struct *prev)
1da177e4
LT
2036 __releases(rq->lock)
2037{
1da177e4 2038 struct mm_struct *mm = rq->prev_mm;
55a101f8 2039 long prev_state;
1da177e4
LT
2040
2041 rq->prev_mm = NULL;
2042
2043 /*
2044 * A task struct has one reference for the use as "current".
c394cc9f 2045 * If a task dies, then it sets TASK_DEAD in tsk->state and calls
55a101f8
ON
2046 * schedule one last time. The schedule call will never return, and
2047 * the scheduled task must drop that reference.
c394cc9f 2048 * The test for TASK_DEAD must occur while the runqueue locks are
1da177e4
LT
2049 * still held, otherwise prev could be scheduled on another cpu, die
2050 * there before we look at prev->state, and then the reference would
2051 * be dropped twice.
2052 * Manfred Spraul <manfred@colorfullife.com>
2053 */
55a101f8 2054 prev_state = prev->state;
bf9fae9f 2055 vtime_task_switch(prev);
4866cde0 2056 finish_arch_switch(prev);
a8d757ef 2057 perf_event_task_sched_in(prev, current);
4866cde0 2058 finish_lock_switch(rq, prev);
01f23e16 2059 finish_arch_post_lock_switch();
e8fa1362 2060
e107be36 2061 fire_sched_in_preempt_notifiers(current);
1da177e4
LT
2062 if (mm)
2063 mmdrop(mm);
c394cc9f 2064 if (unlikely(prev_state == TASK_DEAD)) {
c6fd91f0 2065 /*
2066 * Remove function-return probe instances associated with this
2067 * task and put them back on the free list.
9761eea8 2068 */
c6fd91f0 2069 kprobe_flush_task(prev);
1da177e4 2070 put_task_struct(prev);
c6fd91f0 2071 }
99e5ada9
FW
2072
2073 tick_nohz_task_switch(current);
1da177e4
LT
2074}
2075
3f029d3c
GH
2076#ifdef CONFIG_SMP
2077
2078/* assumes rq->lock is held */
2079static inline void pre_schedule(struct rq *rq, struct task_struct *prev)
2080{
2081 if (prev->sched_class->pre_schedule)
2082 prev->sched_class->pre_schedule(rq, prev);
2083}
2084
2085/* rq->lock is NOT held, but preemption is disabled */
2086static inline void post_schedule(struct rq *rq)
2087{
2088 if (rq->post_schedule) {
2089 unsigned long flags;
2090
05fa785c 2091 raw_spin_lock_irqsave(&rq->lock, flags);
3f029d3c
GH
2092 if (rq->curr->sched_class->post_schedule)
2093 rq->curr->sched_class->post_schedule(rq);
05fa785c 2094 raw_spin_unlock_irqrestore(&rq->lock, flags);
3f029d3c
GH
2095
2096 rq->post_schedule = 0;
2097 }
2098}
2099
2100#else
da19ab51 2101
3f029d3c
GH
2102static inline void pre_schedule(struct rq *rq, struct task_struct *p)
2103{
2104}
2105
2106static inline void post_schedule(struct rq *rq)
2107{
1da177e4
LT
2108}
2109
3f029d3c
GH
2110#endif
2111
1da177e4
LT
2112/**
2113 * schedule_tail - first thing a freshly forked thread must call.
2114 * @prev: the thread we just switched away from.
2115 */
36c8b586 2116asmlinkage void schedule_tail(struct task_struct *prev)
1da177e4
LT
2117 __releases(rq->lock)
2118{
70b97a7f
IM
2119 struct rq *rq = this_rq();
2120
4866cde0 2121 finish_task_switch(rq, prev);
da19ab51 2122
3f029d3c
GH
2123 /*
2124 * FIXME: do we need to worry about rq being invalidated by the
2125 * task_switch?
2126 */
2127 post_schedule(rq);
70b97a7f 2128
4866cde0
NP
2129#ifdef __ARCH_WANT_UNLOCKED_CTXSW
2130 /* In this case, finish_task_switch does not reenable preemption */
2131 preempt_enable();
2132#endif
1da177e4 2133 if (current->set_child_tid)
b488893a 2134 put_user(task_pid_vnr(current), current->set_child_tid);
1da177e4
LT
2135}
2136
2137/*
2138 * context_switch - switch to the new MM and the new
2139 * thread's register state.
2140 */
dd41f596 2141static inline void
70b97a7f 2142context_switch(struct rq *rq, struct task_struct *prev,
36c8b586 2143 struct task_struct *next)
1da177e4 2144{
dd41f596 2145 struct mm_struct *mm, *oldmm;
1da177e4 2146
e107be36 2147 prepare_task_switch(rq, prev, next);
fe4b04fa 2148
6fa3eb70
S
2149#ifdef CONFIG_MT65XX_TRACER
2150 if(get_mt65xx_mon_mode() == MODE_SCHED_SWITCH)
2151 trace_mt65xx_mon_sched_switch(prev, next);
2152#endif
dd41f596
IM
2153 mm = next->mm;
2154 oldmm = prev->active_mm;
9226d125
ZA
2155 /*
2156 * For paravirt, this is coupled with an exit in switch_to to
2157 * combine the page table reload and the switch backend into
2158 * one hypercall.
2159 */
224101ed 2160 arch_start_context_switch(prev);
9226d125 2161
31915ab4 2162 if (!mm) {
1da177e4
LT
2163 next->active_mm = oldmm;
2164 atomic_inc(&oldmm->mm_count);
2165 enter_lazy_tlb(oldmm, next);
2166 } else
2167 switch_mm(oldmm, mm, next);
2168
31915ab4 2169 if (!prev->mm) {
1da177e4 2170 prev->active_mm = NULL;
1da177e4
LT
2171 rq->prev_mm = oldmm;
2172 }
3a5f5e48
IM
2173 /*
2174 * Since the runqueue lock will be released by the next
2175 * task (which is an invalid locking op but in the case
2176 * of the scheduler it's an obvious special-case), so we
2177 * do an early lockdep release here:
2178 */
2179#ifndef __ARCH_WANT_UNLOCKED_CTXSW
8a25d5de 2180 spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
3a5f5e48 2181#endif
1da177e4 2182
91d1aa43 2183 context_tracking_task_switch(prev, next);
1da177e4
LT
2184 /* Here we just switch the register state and the stack. */
2185 switch_to(prev, next, prev);
2186
dd41f596
IM
2187 barrier();
2188 /*
2189 * this_rq must be evaluated again because prev may have moved
2190 * CPUs since it called schedule(), thus the 'rq' on its stack
2191 * frame will be invalid.
2192 */
2193 finish_task_switch(this_rq(), prev);
1da177e4
LT
2194}
2195
2196/*
1c3e8264 2197 * nr_running and nr_context_switches:
1da177e4
LT
2198 *
2199 * externally visible scheduler statistics: current number of runnable
1c3e8264 2200 * threads, total number of context switches performed since bootup.
1da177e4
LT
2201 */
2202unsigned long nr_running(void)
2203{
2204 unsigned long i, sum = 0;
2205
2206 for_each_online_cpu(i)
2207 sum += cpu_rq(i)->nr_running;
2208
2209 return sum;
f711f609 2210}
1da177e4 2211
1da177e4 2212unsigned long long nr_context_switches(void)
46cb4b7c 2213{
cc94abfc
SR
2214 int i;
2215 unsigned long long sum = 0;
46cb4b7c 2216
0a945022 2217 for_each_possible_cpu(i)
1da177e4 2218 sum += cpu_rq(i)->nr_switches;
46cb4b7c 2219
1da177e4
LT
2220 return sum;
2221}
483b4ee6 2222
1da177e4
LT
2223unsigned long nr_iowait(void)
2224{
2225 unsigned long i, sum = 0;
483b4ee6 2226
0a945022 2227 for_each_possible_cpu(i)
1da177e4 2228 sum += atomic_read(&cpu_rq(i)->nr_iowait);
46cb4b7c 2229
1da177e4
LT
2230 return sum;
2231}
483b4ee6 2232
8c215bd3 2233unsigned long nr_iowait_cpu(int cpu)
69d25870 2234{
8c215bd3 2235 struct rq *this = cpu_rq(cpu);
69d25870
AV
2236 return atomic_read(&this->nr_iowait);
2237}
46cb4b7c 2238
69d25870
AV
2239unsigned long this_cpu_load(void)
2240{
2241 struct rq *this = this_rq();
2242 return this->cpu_load[0];
2243}
e790fb0b 2244
6fa3eb70
S
2245unsigned long get_cpu_load(int cpu)
2246{
2247 struct rq *this = cpu_rq(cpu);
2248 return this->cpu_load[0];
2249}
2250EXPORT_SYMBOL(get_cpu_load);
46cb4b7c 2251
5167e8d5
PZ
2252/*
2253 * Global load-average calculations
2254 *
2255 * We take a distributed and async approach to calculating the global load-avg
2256 * in order to minimize overhead.
2257 *
2258 * The global load average is an exponentially decaying average of nr_running +
2259 * nr_uninterruptible.
2260 *
2261 * Once every LOAD_FREQ:
2262 *
2263 * nr_active = 0;
2264 * for_each_possible_cpu(cpu)
2265 * nr_active += cpu_of(cpu)->nr_running + cpu_of(cpu)->nr_uninterruptible;
2266 *
2267 * avenrun[n] = avenrun[0] * exp_n + nr_active * (1 - exp_n)
2268 *
2269 * Due to a number of reasons the above turns in the mess below:
2270 *
2271 * - for_each_possible_cpu() is prohibitively expensive on machines with
2272 * serious number of cpus, therefore we need to take a distributed approach
2273 * to calculating nr_active.
2274 *
2275 * \Sum_i x_i(t) = \Sum_i x_i(t) - x_i(t_0) | x_i(t_0) := 0
2276 * = \Sum_i { \Sum_j=1 x_i(t_j) - x_i(t_j-1) }
2277 *
2278 * So assuming nr_active := 0 when we start out -- true per definition, we
2279 * can simply take per-cpu deltas and fold those into a global accumulate
2280 * to obtain the same result. See calc_load_fold_active().
2281 *
2282 * Furthermore, in order to avoid synchronizing all per-cpu delta folding
2283 * across the machine, we assume 10 ticks is sufficient time for every
2284 * cpu to have completed this task.
2285 *
2286 * This places an upper-bound on the IRQ-off latency of the machine. Then
2287 * again, being late doesn't loose the delta, just wrecks the sample.
2288 *
2289 * - cpu_rq()->nr_uninterruptible isn't accurately tracked per-cpu because
2290 * this would add another cross-cpu cacheline miss and atomic operation
2291 * to the wakeup path. Instead we increment on whatever cpu the task ran
2292 * when it went into uninterruptible state and decrement on whatever cpu
2293 * did the wakeup. This means that only the sum of nr_uninterruptible over
2294 * all cpus yields the correct result.
2295 *
2296 * This covers the NO_HZ=n code, for extra head-aches, see the comment below.
2297 */
2298
dce48a84
TG
2299/* Variables and functions for calc_load */
2300static atomic_long_t calc_load_tasks;
2301static unsigned long calc_load_update;
2302unsigned long avenrun[3];
5167e8d5
PZ
2303EXPORT_SYMBOL(avenrun); /* should be removed */
2304
2305/**
2306 * get_avenrun - get the load average array
2307 * @loads: pointer to dest load array
2308 * @offset: offset to add
2309 * @shift: shift count to shift the result left
2310 *
2311 * These values are estimates at best, so no need for locking.
2312 */
2313void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
2314{
2315 loads[0] = (avenrun[0] + offset) << shift;
2316 loads[1] = (avenrun[1] + offset) << shift;
2317 loads[2] = (avenrun[2] + offset) << shift;
2318}
46cb4b7c 2319
74f5187a
PZ
2320static long calc_load_fold_active(struct rq *this_rq)
2321{
2322 long nr_active, delta = 0;
2323
2324 nr_active = this_rq->nr_running;
2325 nr_active += (long) this_rq->nr_uninterruptible;
2326
2327 if (nr_active != this_rq->calc_load_active) {
2328 delta = nr_active - this_rq->calc_load_active;
2329 this_rq->calc_load_active = nr_active;
2330 }
2331
2332 return delta;
2333}
2334
5167e8d5
PZ
2335/*
2336 * a1 = a0 * e + a * (1 - e)
2337 */
0f004f5a
PZ
2338static unsigned long
2339calc_load(unsigned long load, unsigned long exp, unsigned long active)
2340{
2341 load *= exp;
2342 load += active * (FIXED_1 - exp);
2343 load += 1UL << (FSHIFT - 1);
2344 return load >> FSHIFT;
2345}
2346
3451d024 2347#ifdef CONFIG_NO_HZ_COMMON
74f5187a 2348/*
5167e8d5
PZ
2349 * Handle NO_HZ for the global load-average.
2350 *
2351 * Since the above described distributed algorithm to compute the global
2352 * load-average relies on per-cpu sampling from the tick, it is affected by
2353 * NO_HZ.
2354 *
2355 * The basic idea is to fold the nr_active delta into a global idle-delta upon
2356 * entering NO_HZ state such that we can include this as an 'extra' cpu delta
2357 * when we read the global state.
2358 *
2359 * Obviously reality has to ruin such a delightfully simple scheme:
2360 *
2361 * - When we go NO_HZ idle during the window, we can negate our sample
2362 * contribution, causing under-accounting.
2363 *
2364 * We avoid this by keeping two idle-delta counters and flipping them
2365 * when the window starts, thus separating old and new NO_HZ load.
2366 *
2367 * The only trick is the slight shift in index flip for read vs write.
2368 *
2369 * 0s 5s 10s 15s
2370 * +10 +10 +10 +10
2371 * |-|-----------|-|-----------|-|-----------|-|
2372 * r:0 0 1 1 0 0 1 1 0
2373 * w:0 1 1 0 0 1 1 0 0
2374 *
2375 * This ensures we'll fold the old idle contribution in this window while
2376 * accumlating the new one.
2377 *
2378 * - When we wake up from NO_HZ idle during the window, we push up our
2379 * contribution, since we effectively move our sample point to a known
2380 * busy state.
2381 *
2382 * This is solved by pushing the window forward, and thus skipping the
2383 * sample, for this cpu (effectively using the idle-delta for this cpu which
2384 * was in effect at the time the window opened). This also solves the issue
2385 * of having to deal with a cpu having been in NOHZ idle for multiple
2386 * LOAD_FREQ intervals.
74f5187a
PZ
2387 *
2388 * When making the ILB scale, we should try to pull this in as well.
2389 */
5167e8d5
PZ
2390static atomic_long_t calc_load_idle[2];
2391static int calc_load_idx;
74f5187a 2392
5167e8d5 2393static inline int calc_load_write_idx(void)
74f5187a 2394{
5167e8d5
PZ
2395 int idx = calc_load_idx;
2396
2397 /*
2398 * See calc_global_nohz(), if we observe the new index, we also
2399 * need to observe the new update time.
2400 */
2401 smp_rmb();
2402
2403 /*
2404 * If the folding window started, make sure we start writing in the
2405 * next idle-delta.
2406 */
2407 if (!time_before(jiffies, calc_load_update))
2408 idx++;
2409
2410 return idx & 1;
2411}
2412
2413static inline int calc_load_read_idx(void)
2414{
2415 return calc_load_idx & 1;
2416}
2417
2418void calc_load_enter_idle(void)
2419{
2420 struct rq *this_rq = this_rq();
74f5187a
PZ
2421 long delta;
2422
5167e8d5
PZ
2423 /*
2424 * We're going into NOHZ mode, if there's any pending delta, fold it
2425 * into the pending idle delta.
2426 */
74f5187a 2427 delta = calc_load_fold_active(this_rq);
5167e8d5
PZ
2428 if (delta) {
2429 int idx = calc_load_write_idx();
2430 atomic_long_add(delta, &calc_load_idle[idx]);
2431 }
74f5187a
PZ
2432}
2433
5167e8d5 2434void calc_load_exit_idle(void)
74f5187a 2435{
5167e8d5
PZ
2436 struct rq *this_rq = this_rq();
2437
2438 /*
2439 * If we're still before the sample window, we're done.
2440 */
2441 if (time_before(jiffies, this_rq->calc_load_update))
2442 return;
74f5187a
PZ
2443
2444 /*
5167e8d5
PZ
2445 * We woke inside or after the sample window, this means we're already
2446 * accounted through the nohz accounting, so skip the entire deal and
2447 * sync up for the next window.
74f5187a 2448 */
5167e8d5
PZ
2449 this_rq->calc_load_update = calc_load_update;
2450 if (time_before(jiffies, this_rq->calc_load_update + 10))
2451 this_rq->calc_load_update += LOAD_FREQ;
2452}
2453
2454static long calc_load_fold_idle(void)
2455{
2456 int idx = calc_load_read_idx();
2457 long delta = 0;
2458
2459 if (atomic_long_read(&calc_load_idle[idx]))
2460 delta = atomic_long_xchg(&calc_load_idle[idx], 0);
74f5187a
PZ
2461
2462 return delta;
2463}
0f004f5a
PZ
2464
2465/**
2466 * fixed_power_int - compute: x^n, in O(log n) time
2467 *
2468 * @x: base of the power
2469 * @frac_bits: fractional bits of @x
2470 * @n: power to raise @x to.
2471 *
2472 * By exploiting the relation between the definition of the natural power
2473 * function: x^n := x*x*...*x (x multiplied by itself for n times), and
2474 * the binary encoding of numbers used by computers: n := \Sum n_i * 2^i,
2475 * (where: n_i \elem {0, 1}, the binary vector representing n),
2476 * we find: x^n := x^(\Sum n_i * 2^i) := \Prod x^(n_i * 2^i), which is
2477 * of course trivially computable in O(log_2 n), the length of our binary
2478 * vector.
2479 */
2480static unsigned long
2481fixed_power_int(unsigned long x, unsigned int frac_bits, unsigned int n)
2482{
2483 unsigned long result = 1UL << frac_bits;
2484
2485 if (n) for (;;) {
2486 if (n & 1) {
2487 result *= x;
2488 result += 1UL << (frac_bits - 1);
2489 result >>= frac_bits;
2490 }
2491 n >>= 1;
2492 if (!n)
2493 break;
2494 x *= x;
2495 x += 1UL << (frac_bits - 1);
2496 x >>= frac_bits;
2497 }
2498
2499 return result;
2500}
2501
2502/*
2503 * a1 = a0 * e + a * (1 - e)
2504 *
2505 * a2 = a1 * e + a * (1 - e)
2506 * = (a0 * e + a * (1 - e)) * e + a * (1 - e)
2507 * = a0 * e^2 + a * (1 - e) * (1 + e)
2508 *
2509 * a3 = a2 * e + a * (1 - e)
2510 * = (a0 * e^2 + a * (1 - e) * (1 + e)) * e + a * (1 - e)
2511 * = a0 * e^3 + a * (1 - e) * (1 + e + e^2)
2512 *
2513 * ...
2514 *
2515 * an = a0 * e^n + a * (1 - e) * (1 + e + ... + e^n-1) [1]
2516 * = a0 * e^n + a * (1 - e) * (1 - e^n)/(1 - e)
2517 * = a0 * e^n + a * (1 - e^n)
2518 *
2519 * [1] application of the geometric series:
2520 *
2521 * n 1 - x^(n+1)
2522 * S_n := \Sum x^i = -------------
2523 * i=0 1 - x
2524 */
2525static unsigned long
2526calc_load_n(unsigned long load, unsigned long exp,
2527 unsigned long active, unsigned int n)
2528{
2529
2530 return calc_load(load, fixed_power_int(exp, FSHIFT, n), active);
2531}
2532
2533/*
2534 * NO_HZ can leave us missing all per-cpu ticks calling
2535 * calc_load_account_active(), but since an idle CPU folds its delta into
2536 * calc_load_tasks_idle per calc_load_account_idle(), all we need to do is fold
2537 * in the pending idle delta if our idle period crossed a load cycle boundary.
2538 *
2539 * Once we've updated the global active value, we need to apply the exponential
2540 * weights adjusted to the number of cycles missed.
2541 */
c308b56b 2542static void calc_global_nohz(void)
0f004f5a
PZ
2543{
2544 long delta, active, n;
2545
5167e8d5
PZ
2546 if (!time_before(jiffies, calc_load_update + 10)) {
2547 /*
2548 * Catch-up, fold however many we are behind still
2549 */
2550 delta = jiffies - calc_load_update - 10;
2551 n = 1 + (delta / LOAD_FREQ);
0f004f5a 2552
5167e8d5
PZ
2553 active = atomic_long_read(&calc_load_tasks);
2554 active = active > 0 ? active * FIXED_1 : 0;
0f004f5a 2555
5167e8d5
PZ
2556 avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n);
2557 avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n);
2558 avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n);
0f004f5a 2559
5167e8d5
PZ
2560 calc_load_update += n * LOAD_FREQ;
2561 }
74f5187a 2562
5167e8d5
PZ
2563 /*
2564 * Flip the idle index...
2565 *
2566 * Make sure we first write the new time then flip the index, so that
2567 * calc_load_write_idx() will see the new time when it reads the new
2568 * index, this avoids a double flip messing things up.
2569 */
2570 smp_wmb();
2571 calc_load_idx++;
74f5187a 2572}
3451d024 2573#else /* !CONFIG_NO_HZ_COMMON */
0f004f5a 2574
5167e8d5
PZ
2575static inline long calc_load_fold_idle(void) { return 0; }
2576static inline void calc_global_nohz(void) { }
74f5187a 2577
3451d024 2578#endif /* CONFIG_NO_HZ_COMMON */
46cb4b7c 2579
46cb4b7c 2580/*
dce48a84
TG
2581 * calc_load - update the avenrun load estimates 10 ticks after the
2582 * CPUs have updated calc_load_tasks.
7835b98b 2583 */
0f004f5a 2584void calc_global_load(unsigned long ticks)
7835b98b 2585{
5167e8d5 2586 long active, delta;
1da177e4 2587
0f004f5a 2588 if (time_before(jiffies, calc_load_update + 10))
dce48a84 2589 return;
1da177e4 2590
5167e8d5
PZ
2591 /*
2592 * Fold the 'old' idle-delta to include all NO_HZ cpus.
2593 */
2594 delta = calc_load_fold_idle();
2595 if (delta)
2596 atomic_long_add(delta, &calc_load_tasks);
2597
dce48a84
TG
2598 active = atomic_long_read(&calc_load_tasks);
2599 active = active > 0 ? active * FIXED_1 : 0;
1da177e4 2600
dce48a84
TG
2601 avenrun[0] = calc_load(avenrun[0], EXP_1, active);
2602 avenrun[1] = calc_load(avenrun[1], EXP_5, active);
2603 avenrun[2] = calc_load(avenrun[2], EXP_15, active);
dd41f596 2604
dce48a84 2605 calc_load_update += LOAD_FREQ;
c308b56b
PZ
2606
2607 /*
5167e8d5 2608 * In case we idled for multiple LOAD_FREQ intervals, catch up in bulk.
c308b56b
PZ
2609 */
2610 calc_global_nohz();
dce48a84 2611}
1da177e4 2612
dce48a84 2613/*
74f5187a
PZ
2614 * Called from update_cpu_load() to periodically update this CPU's
2615 * active count.
dce48a84
TG
2616 */
2617static void calc_load_account_active(struct rq *this_rq)
2618{
74f5187a 2619 long delta;
08c183f3 2620
74f5187a
PZ
2621 if (time_before(jiffies, this_rq->calc_load_update))
2622 return;
783609c6 2623
74f5187a 2624 delta = calc_load_fold_active(this_rq);
74f5187a 2625 if (delta)
dce48a84 2626 atomic_long_add(delta, &calc_load_tasks);
74f5187a
PZ
2627
2628 this_rq->calc_load_update += LOAD_FREQ;
46cb4b7c
SS
2629}
2630
5167e8d5
PZ
2631/*
2632 * End of global load-average stuff
2633 */
2634
fdf3e95d
VP
2635/*
2636 * The exact cpuload at various idx values, calculated at every tick would be
2637 * load = (2^idx - 1) / 2^idx * load + 1 / 2^idx * cur_load
2638 *
2639 * If a cpu misses updates for n-1 ticks (as it was idle) and update gets called
2640 * on nth tick when cpu may be busy, then we have:
2641 * load = ((2^idx - 1) / 2^idx)^(n-1) * load
2642 * load = (2^idx - 1) / 2^idx) * load + 1 / 2^idx * cur_load
2643 *
2644 * decay_load_missed() below does efficient calculation of
2645 * load = ((2^idx - 1) / 2^idx)^(n-1) * load
2646 * avoiding 0..n-1 loop doing load = ((2^idx - 1) / 2^idx) * load
2647 *
2648 * The calculation is approximated on a 128 point scale.
2649 * degrade_zero_ticks is the number of ticks after which load at any
2650 * particular idx is approximated to be zero.
2651 * degrade_factor is a precomputed table, a row for each load idx.
2652 * Each column corresponds to degradation factor for a power of two ticks,
2653 * based on 128 point scale.
2654 * Example:
2655 * row 2, col 3 (=12) says that the degradation at load idx 2 after
2656 * 8 ticks is 12/128 (which is an approximation of exact factor 3^8/4^8).
2657 *
2658 * With this power of 2 load factors, we can degrade the load n times
2659 * by looking at 1 bits in n and doing as many mult/shift instead of
2660 * n mult/shifts needed by the exact degradation.
2661 */
2662#define DEGRADE_SHIFT 7
2663static const unsigned char
2664 degrade_zero_ticks[CPU_LOAD_IDX_MAX] = {0, 8, 32, 64, 128};
2665static const unsigned char
2666 degrade_factor[CPU_LOAD_IDX_MAX][DEGRADE_SHIFT + 1] = {
2667 {0, 0, 0, 0, 0, 0, 0, 0},
2668 {64, 32, 8, 0, 0, 0, 0, 0},
2669 {96, 72, 40, 12, 1, 0, 0},
2670 {112, 98, 75, 43, 15, 1, 0},
2671 {120, 112, 98, 76, 45, 16, 2} };
2672
2673/*
2674 * Update cpu_load for any missed ticks, due to tickless idle. The backlog
2675 * would be when CPU is idle and so we just decay the old load without
2676 * adding any new load.
2677 */
2678static unsigned long
2679decay_load_missed(unsigned long load, unsigned long missed_updates, int idx)
2680{
2681 int j = 0;
2682
2683 if (!missed_updates)
2684 return load;
2685
2686 if (missed_updates >= degrade_zero_ticks[idx])
2687 return 0;
2688
2689 if (idx == 1)
2690 return load >> missed_updates;
2691
2692 while (missed_updates) {
2693 if (missed_updates % 2)
2694 load = (load * degrade_factor[idx][j]) >> DEGRADE_SHIFT;
2695
2696 missed_updates >>= 1;
2697 j++;
2698 }
2699 return load;
2700}
2701
46cb4b7c 2702/*
dd41f596 2703 * Update rq->cpu_load[] statistics. This function is usually called every
fdf3e95d
VP
2704 * scheduler tick (TICK_NSEC). With tickless idle this will not be called
2705 * every tick. We fix it up based on jiffies.
46cb4b7c 2706 */
556061b0
PZ
2707static void __update_cpu_load(struct rq *this_rq, unsigned long this_load,
2708 unsigned long pending_updates)
46cb4b7c 2709{
dd41f596 2710 int i, scale;
46cb4b7c 2711
dd41f596 2712 this_rq->nr_load_updates++;
46cb4b7c 2713
dd41f596 2714 /* Update our load: */
fdf3e95d
VP
2715 this_rq->cpu_load[0] = this_load; /* Fasttrack for idx 0 */
2716 for (i = 1, scale = 2; i < CPU_LOAD_IDX_MAX; i++, scale += scale) {
dd41f596 2717 unsigned long old_load, new_load;
7d1e6a9b 2718
dd41f596 2719 /* scale is effectively 1 << i now, and >> i divides by scale */
46cb4b7c 2720
dd41f596 2721 old_load = this_rq->cpu_load[i];
fdf3e95d 2722 old_load = decay_load_missed(old_load, pending_updates - 1, i);
dd41f596 2723 new_load = this_load;
a25707f3
IM
2724 /*
2725 * Round up the averaging division if load is increasing. This
2726 * prevents us from getting stuck on 9 if the load is 10, for
2727 * example.
2728 */
2729 if (new_load > old_load)
fdf3e95d
VP
2730 new_load += scale - 1;
2731
2732 this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i;
dd41f596 2733 }
da2b71ed
SS
2734
2735 sched_avg_update(this_rq);
fdf3e95d
VP
2736}
2737
6fa3eb70
S
2738# ifdef CONFIG_SMP
2739/* moved to kernel/sched/proc.c at Linux 3.11-rc4 */
2740static inline unsigned long get_rq_runnable_load(struct rq *rq)
2741{
2742 return rq->cfs.runnable_load_avg;
2743}
2744# else
2745static inline unsigned long get_rq_runnable_load(struct rq *rq)
2746{
2747 return rq->load.weight;
2748}
2749# endif
2750
3451d024 2751#ifdef CONFIG_NO_HZ_COMMON
5aaa0b7a
PZ
2752/*
2753 * There is no sane way to deal with nohz on smp when using jiffies because the
2754 * cpu doing the jiffies update might drift wrt the cpu doing the jiffy reading
2755 * causing off-by-one errors in observed deltas; {0,2} instead of {1,1}.
2756 *
2757 * Therefore we cannot use the delta approach from the regular tick since that
2758 * would seriously skew the load calculation. However we'll make do for those
2759 * updates happening while idle (nohz_idle_balance) or coming out of idle
2760 * (tick_nohz_idle_exit).
2761 *
2762 * This means we might still be one tick off for nohz periods.
2763 */
2764
556061b0
PZ
2765/*
2766 * Called from nohz_idle_balance() to update the load ratings before doing the
2767 * idle balance.
2768 */
6fa3eb70 2769/* moved to kernel/sched/proc.c at Linux 3.11-rc4 */
556061b0
PZ
2770void update_idle_cpu_load(struct rq *this_rq)
2771{
5aaa0b7a 2772 unsigned long curr_jiffies = ACCESS_ONCE(jiffies);
556061b0
PZ
2773 unsigned long load = this_rq->load.weight;
2774 unsigned long pending_updates;
2775
2776 /*
5aaa0b7a 2777 * bail if there's load or we're actually up-to-date.
556061b0
PZ
2778 */
2779 if (load || curr_jiffies == this_rq->last_load_update_tick)
2780 return;
2781
2782 pending_updates = curr_jiffies - this_rq->last_load_update_tick;
2783 this_rq->last_load_update_tick = curr_jiffies;
2784
2785 __update_cpu_load(this_rq, load, pending_updates);
2786}
2787
5aaa0b7a
PZ
2788/*
2789 * Called from tick_nohz_idle_exit() -- try and fix up the ticks we missed.
2790 */
6fa3eb70 2791/* moved to kernel/sched/proc.c at Linux 3.11-rc4 */
5aaa0b7a
PZ
2792void update_cpu_load_nohz(void)
2793{
2794 struct rq *this_rq = this_rq();
2795 unsigned long curr_jiffies = ACCESS_ONCE(jiffies);
2796 unsigned long pending_updates;
2797
2798 if (curr_jiffies == this_rq->last_load_update_tick)
2799 return;
2800
2801 raw_spin_lock(&this_rq->lock);
2802 pending_updates = curr_jiffies - this_rq->last_load_update_tick;
2803 if (pending_updates) {
2804 this_rq->last_load_update_tick = curr_jiffies;
2805 /*
2806 * We were idle, this means load 0, the current load might be
2807 * !0 due to remote wakeups and the sort.
2808 */
2809 __update_cpu_load(this_rq, 0, pending_updates);
2810 }
2811 raw_spin_unlock(&this_rq->lock);
2812}
3451d024 2813#endif /* CONFIG_NO_HZ_COMMON */
5aaa0b7a 2814
556061b0
PZ
2815/*
2816 * Called from scheduler_tick()
2817 */
6fa3eb70 2818/* moved to kernel/sched/proc.c at Linux 3.11-rc4 */
fdf3e95d
VP
2819static void update_cpu_load_active(struct rq *this_rq)
2820{
6fa3eb70 2821 unsigned long load = get_rq_runnable_load(this_rq);
556061b0 2822 /*
5aaa0b7a 2823 * See the mess around update_idle_cpu_load() / update_cpu_load_nohz().
556061b0
PZ
2824 */
2825 this_rq->last_load_update_tick = jiffies;
6fa3eb70 2826 __update_cpu_load(this_rq, load, 1);
46cb4b7c 2827
74f5187a 2828 calc_load_account_active(this_rq);
46cb4b7c
SS
2829}
2830
dd41f596 2831#ifdef CONFIG_SMP
8a0be9ef 2832
46cb4b7c 2833/*
38022906
PZ
2834 * sched_exec - execve() is a valuable balancing opportunity, because at
2835 * this point the task has the smallest effective memory and cache footprint.
46cb4b7c 2836 */
38022906 2837void sched_exec(void)
46cb4b7c 2838{
38022906 2839 struct task_struct *p = current;
1da177e4 2840 unsigned long flags;
0017d735 2841 int dest_cpu;
46cb4b7c 2842
8f42ced9 2843 raw_spin_lock_irqsave(&p->pi_lock, flags);
7608dec2 2844 dest_cpu = p->sched_class->select_task_rq(p, SD_BALANCE_EXEC, 0);
0017d735
PZ
2845 if (dest_cpu == smp_processor_id())
2846 goto unlock;
38022906 2847
8f42ced9 2848 if (likely(cpu_active(dest_cpu))) {
969c7921 2849 struct migration_arg arg = { p, dest_cpu };
46cb4b7c 2850
8f42ced9
PZ
2851 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
2852 stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
1da177e4
LT
2853 return;
2854 }
0017d735 2855unlock:
8f42ced9 2856 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
1da177e4 2857}
dd41f596 2858
1da177e4
LT
2859#endif
2860
1da177e4 2861DEFINE_PER_CPU(struct kernel_stat, kstat);
3292beb3 2862DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
1da177e4
LT
2863
2864EXPORT_PER_CPU_SYMBOL(kstat);
3292beb3 2865EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
1da177e4
LT
2866
2867/*
c5f8d995 2868 * Return any ns on the sched_clock that have not yet been accounted in
f06febc9 2869 * @p in case that task is currently running.
c5f8d995
HS
2870 *
2871 * Called with task_rq_lock() held on @rq.
1da177e4 2872 */
c5f8d995
HS
2873static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq)
2874{
2875 u64 ns = 0;
2876
2877 if (task_current(rq, p)) {
2878 update_rq_clock(rq);
305e6835 2879 ns = rq->clock_task - p->se.exec_start;
c5f8d995
HS
2880 if ((s64)ns < 0)
2881 ns = 0;
2882 }
2883
2884 return ns;
2885}
2886
bb34d92f 2887unsigned long long task_delta_exec(struct task_struct *p)
1da177e4 2888{
1da177e4 2889 unsigned long flags;
41b86e9c 2890 struct rq *rq;
bb34d92f 2891 u64 ns = 0;
48f24c4d 2892
41b86e9c 2893 rq = task_rq_lock(p, &flags);
c5f8d995 2894 ns = do_task_delta_exec(p, rq);
0122ec5b 2895 task_rq_unlock(rq, p, &flags);
1508487e 2896
c5f8d995
HS
2897 return ns;
2898}
f06febc9 2899
c5f8d995
HS
2900/*
2901 * Return accounted runtime for the task.
2902 * In case the task is currently running, return the runtime plus current's
2903 * pending runtime that have not been accounted yet.
2904 */
2905unsigned long long task_sched_runtime(struct task_struct *p)
2906{
2907 unsigned long flags;
2908 struct rq *rq;
2909 u64 ns = 0;
2910
2911 rq = task_rq_lock(p, &flags);
2912 ns = p->se.sum_exec_runtime + do_task_delta_exec(p, rq);
0122ec5b 2913 task_rq_unlock(rq, p, &flags);
c5f8d995
HS
2914
2915 return ns;
2916}
48f24c4d 2917
7835b98b
CL
2918/*
2919 * This function gets called by the timer code, with HZ frequency.
2920 * We call it with interrupts disabled.
7835b98b
CL
2921 */
2922void scheduler_tick(void)
2923{
7835b98b
CL
2924 int cpu = smp_processor_id();
2925 struct rq *rq = cpu_rq(cpu);
dd41f596 2926 struct task_struct *curr = rq->curr;
3e51f33f
PZ
2927
2928 sched_clock_tick();
dd41f596 2929
05fa785c 2930 raw_spin_lock(&rq->lock);
3e51f33f 2931 update_rq_clock(rq);
fa85ae24 2932 curr->sched_class->task_tick(rq, curr, 0);
6fa3eb70
S
2933 update_cpu_load_active(rq);
2934#ifdef CONFIG_MT_RT_SCHED
2935 mt_check_rt_policy(rq);
2936#endif
05fa785c 2937 raw_spin_unlock(&rq->lock);
7835b98b 2938
e9d2b064 2939 perf_event_task_tick();
6fa3eb70
S
2940#ifdef CONFIG_MT_SCHED_MONITOR
2941 if(smp_processor_id() == 0) //only record by CPU#0
2942 mt_save_irq_counts();
2943#endif
e418e1c2 2944#ifdef CONFIG_SMP
6eb57e0d 2945 rq->idle_balance = idle_cpu(cpu);
dd41f596 2946 trigger_load_balance(rq, cpu);
e418e1c2 2947#endif
265f22a9 2948 rq_last_tick_reset(rq);
1da177e4
LT
2949}
2950
265f22a9
FW
2951#ifdef CONFIG_NO_HZ_FULL
2952/**
2953 * scheduler_tick_max_deferment
2954 *
2955 * Keep at least one tick per second when a single
2956 * active task is running because the scheduler doesn't
2957 * yet completely support full dynticks environment.
2958 *
2959 * This makes sure that uptime, CFS vruntime, load
2960 * balancing, etc... continue to move forward, even
2961 * with a very low granularity.
2962 */
2963u64 scheduler_tick_max_deferment(void)
2964{
2965 struct rq *rq = this_rq();
2966 unsigned long next, now = ACCESS_ONCE(jiffies);
2967
2968 next = rq->last_sched_tick + HZ;
2969
2970 if (time_before_eq(next, now))
2971 return 0;
2972
2973 return jiffies_to_usecs(next - now) * NSEC_PER_USEC;
1da177e4 2974}
265f22a9 2975#endif
1da177e4 2976
132380a0 2977notrace unsigned long get_parent_ip(unsigned long addr)
6cd8a4bb
SR
2978{
2979 if (in_lock_functions(addr)) {
2980 addr = CALLER_ADDR2;
2981 if (in_lock_functions(addr))
2982 addr = CALLER_ADDR3;
2983 }
2984 return addr;
2985}
1da177e4 2986
7e49fcce
SR
2987#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
2988 defined(CONFIG_PREEMPT_TRACER))
2989
43627582 2990void __kprobes add_preempt_count(int val)
1da177e4 2991{
6cd8a4bb 2992#ifdef CONFIG_DEBUG_PREEMPT
1da177e4
LT
2993 /*
2994 * Underflow?
2995 */
9a11b49a
IM
2996 if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
2997 return;
6cd8a4bb 2998#endif
1da177e4 2999 preempt_count() += val;
6cd8a4bb 3000#ifdef CONFIG_DEBUG_PREEMPT
1da177e4
LT
3001 /*
3002 * Spinlock count overflowing soon?
3003 */
33859f7f
MOS
3004 DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
3005 PREEMPT_MASK - 10);
6cd8a4bb 3006#endif
6fa3eb70
S
3007 //if (preempt_count() == val)
3008 // trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
3009 if (preempt_count() == (val & ~PREEMPT_ACTIVE)){
3010#ifdef CONFIG_PREEMPT_TRACER
3011 trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
3012#endif
3013#ifdef CONFIG_PREEMPT_MONITOR
3014 if(unlikely(__raw_get_cpu_var(mtsched_mon_enabled) & 0x1)){
3015 //current->t_add_prmpt = sched_clock();
3016 MT_trace_preempt_off();
3017 }
3018#endif
3019 }
1da177e4
LT
3020}
3021EXPORT_SYMBOL(add_preempt_count);
3022
43627582 3023void __kprobes sub_preempt_count(int val)
1da177e4 3024{
6cd8a4bb 3025#ifdef CONFIG_DEBUG_PREEMPT
1da177e4
LT
3026 /*
3027 * Underflow?
3028 */
01e3eb82 3029 if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
9a11b49a 3030 return;
1da177e4
LT
3031 /*
3032 * Is the spinlock portion underflowing?
3033 */
9a11b49a
IM
3034 if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
3035 !(preempt_count() & PREEMPT_MASK)))
3036 return;
6cd8a4bb 3037#endif
9a11b49a 3038
6fa3eb70
S
3039 //if (preempt_count() == val)
3040 // trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
3041 if (preempt_count() == (val & ~PREEMPT_ACTIVE)){
3042#ifdef CONFIG_PREEMPT_TRACER
3043 trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
3044#endif
3045#ifdef CONFIG_PREEMPT_MONITOR
3046 if(unlikely(__raw_get_cpu_var(mtsched_mon_enabled) & 0x1)){
3047 MT_trace_preempt_on();
3048 }
3049#endif
3050 }
1da177e4
LT
3051 preempt_count() -= val;
3052}
3053EXPORT_SYMBOL(sub_preempt_count);
3054
3055#endif
3056
3057/*
dd41f596 3058 * Print scheduling while atomic bug:
1da177e4 3059 */
dd41f596 3060static noinline void __schedule_bug(struct task_struct *prev)
1da177e4 3061{
664dfa65
DJ
3062 if (oops_in_progress)
3063 return;
3064
3df0fc5b
PZ
3065 printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
3066 prev->comm, prev->pid, preempt_count());
838225b4 3067
dd41f596 3068 debug_show_held_locks(prev);
e21f5b15 3069 print_modules();
dd41f596
IM
3070 if (irqs_disabled())
3071 print_irqtrace_events(prev);
6135fc1e 3072 dump_stack();
373d4d09 3073 add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
6fa3eb70 3074 BUG_ON(1);
dd41f596 3075}
1da177e4 3076
dd41f596
IM
3077/*
3078 * Various schedule()-time debugging checks and statistics:
3079 */
3080static inline void schedule_debug(struct task_struct *prev)
3081{
1da177e4 3082 /*
41a2d6cf 3083 * Test if we are atomic. Since do_exit() needs to call into
1da177e4
LT
3084 * schedule() atomically, we ignore that path for now.
3085 * Otherwise, whine if we are scheduling when we should not be.
3086 */
3f33a7ce 3087 if (unlikely(in_atomic_preempt_off() && !prev->exit_state))
dd41f596 3088 __schedule_bug(prev);
b3fbab05 3089 rcu_sleep_check();
dd41f596 3090
1da177e4
LT
3091 profile_hit(SCHED_PROFILING, __builtin_return_address(0));
3092
2d72376b 3093 schedstat_inc(this_rq(), sched_count);
dd41f596
IM
3094}
3095
6cecd084 3096static void put_prev_task(struct rq *rq, struct task_struct *prev)
df1c99d4 3097{
61eadef6 3098 if (prev->on_rq || rq->skip_clock_update < 0)
a64692a3 3099 update_rq_clock(rq);
6cecd084 3100 prev->sched_class->put_prev_task(rq, prev);
df1c99d4
MG
3101}
3102
dd41f596
IM
3103/*
3104 * Pick up the highest-prio task:
3105 */
3106static inline struct task_struct *
b67802ea 3107pick_next_task(struct rq *rq)
dd41f596 3108{
5522d5d5 3109 const struct sched_class *class;
dd41f596 3110 struct task_struct *p;
1da177e4
LT
3111
3112 /*
dd41f596
IM
3113 * Optimization: we know that if all tasks are in
3114 * the fair class we can call that function directly:
1da177e4 3115 */
953bfcd1 3116 if (likely(rq->nr_running == rq->cfs.h_nr_running)) {
fb8d4724 3117 p = fair_sched_class.pick_next_task(rq);
dd41f596
IM
3118 if (likely(p))
3119 return p;
1da177e4
LT
3120 }
3121
34f971f6 3122 for_each_class(class) {
fb8d4724 3123 p = class->pick_next_task(rq);
dd41f596
IM
3124 if (p)
3125 return p;
dd41f596 3126 }
34f971f6
PZ
3127
3128 BUG(); /* the idle class will always have a runnable task */
dd41f596 3129}
1da177e4 3130
dd41f596 3131/*
c259e01a 3132 * __schedule() is the main scheduler function.
edde96ea
PE
3133 *
3134 * The main means of driving the scheduler and thus entering this function are:
3135 *
3136 * 1. Explicit blocking: mutex, semaphore, waitqueue, etc.
3137 *
3138 * 2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return
3139 * paths. For example, see arch/x86/entry_64.S.
3140 *
3141 * To drive preemption between tasks, the scheduler sets the flag in timer
3142 * interrupt handler scheduler_tick().
3143 *
3144 * 3. Wakeups don't really cause entry into schedule(). They add a
3145 * task to the run-queue and that's it.
3146 *
3147 * Now, if the new task added to the run-queue preempts the current
3148 * task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets
3149 * called on the nearest possible occasion:
3150 *
3151 * - If the kernel is preemptible (CONFIG_PREEMPT=y):
3152 *
3153 * - in syscall or exception context, at the next outmost
3154 * preempt_enable(). (this might be as soon as the wake_up()'s
3155 * spin_unlock()!)
3156 *
3157 * - in IRQ context, return from interrupt-handler to
3158 * preemptible context
3159 *
3160 * - If the kernel is not preemptible (CONFIG_PREEMPT is not set)
3161 * then at the next:
3162 *
3163 * - cond_resched() call
3164 * - explicit schedule() call
3165 * - return from syscall or exception to user-space
3166 * - return from interrupt-handler to user-space
dd41f596 3167 */
c259e01a 3168static void __sched __schedule(void)
dd41f596
IM
3169{
3170 struct task_struct *prev, *next;
67ca7bde 3171 unsigned long *switch_count;
dd41f596 3172 struct rq *rq;
31656519 3173 int cpu;
dd41f596 3174
ff743345
PZ
3175need_resched:
3176 preempt_disable();
dd41f596
IM
3177 cpu = smp_processor_id();
3178 rq = cpu_rq(cpu);
25502a6c 3179 rcu_note_context_switch(cpu);
dd41f596 3180 prev = rq->curr;
dd41f596 3181
dd41f596 3182 schedule_debug(prev);
1da177e4 3183
31656519 3184 if (sched_feat(HRTICK))
f333fdc9 3185 hrtick_clear(rq);
6fa3eb70
S
3186#ifdef CONFIG_MT_SCHED_MONITOR
3187 __raw_get_cpu_var(MT_trace_in_sched) = 1;
3188#endif
8f4d37ec 3189
57f74b6e
ON
3190 /*
3191 * Make sure that signal_pending_state()->signal_pending() below
3192 * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
3193 * done by the caller to avoid the race with signal_wake_up().
3194 */
3195 smp_mb__before_spinlock();
05fa785c 3196 raw_spin_lock_irq(&rq->lock);
1da177e4 3197
246d86b5 3198 switch_count = &prev->nivcsw;
1da177e4 3199 if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
21aa9af0 3200 if (unlikely(signal_pending_state(prev->state, prev))) {
1da177e4 3201 prev->state = TASK_RUNNING;
21aa9af0 3202 } else {
2acca55e
PZ
3203 deactivate_task(rq, prev, DEQUEUE_SLEEP);
3204 prev->on_rq = 0;
3205
21aa9af0 3206 /*
2acca55e
PZ
3207 * If a worker went to sleep, notify and ask workqueue
3208 * whether it wants to wake up a task to maintain
3209 * concurrency.
21aa9af0
TH
3210 */
3211 if (prev->flags & PF_WQ_WORKER) {
3212 struct task_struct *to_wakeup;
3213
3214 to_wakeup = wq_worker_sleeping(prev, cpu);
3215 if (to_wakeup)
3216 try_to_wake_up_local(to_wakeup);
3217 }
21aa9af0 3218 }
dd41f596 3219 switch_count = &prev->nvcsw;
1da177e4
LT
3220 }
3221
3f029d3c 3222 pre_schedule(rq, prev);
f65eda4f 3223
dd41f596 3224 if (unlikely(!rq->nr_running))
1da177e4 3225 idle_balance(cpu, rq);
1da177e4 3226
df1c99d4 3227 put_prev_task(rq, prev);
b67802ea 3228 next = pick_next_task(rq);
f26f9aff
MG
3229 clear_tsk_need_resched(prev);
3230 rq->skip_clock_update = 0;
1da177e4 3231
1da177e4 3232 if (likely(prev != next)) {
1da177e4
LT
3233 rq->nr_switches++;
3234 rq->curr = next;
3235 ++*switch_count;
3236
dd41f596 3237 context_switch(rq, prev, next); /* unlocks the rq */
8f4d37ec 3238 /*
246d86b5
ON
3239 * The context switch have flipped the stack from under us
3240 * and restored the local variables which were saved when
3241 * this task called schedule() in the past. prev == current
3242 * is still correct, but it can be moved to another cpu/rq.
8f4d37ec
PZ
3243 */
3244 cpu = smp_processor_id();
3245 rq = cpu_rq(cpu);
1da177e4 3246 } else
05fa785c 3247 raw_spin_unlock_irq(&rq->lock);
1da177e4 3248
6fa3eb70
S
3249#ifdef CONFIG_MT_RT_SCHED
3250 mt_post_schedule(rq);
3251#endif
3252#ifdef CONFIG_MT_SCHED_MONITOR
3253 __raw_get_cpu_var(MT_trace_in_sched) = 0;
3254#endif
3f029d3c 3255 post_schedule(rq);
1da177e4 3256
ba74c144 3257 sched_preempt_enable_no_resched();
ff743345 3258 if (need_resched())
1da177e4
LT
3259 goto need_resched;
3260}
c259e01a 3261
9c40cef2
TG
3262static inline void sched_submit_work(struct task_struct *tsk)
3263{
3c7d5184 3264 if (!tsk->state || tsk_is_pi_blocked(tsk))
9c40cef2
TG
3265 return;
3266 /*
3267 * If we are going to sleep and we have plugged IO queued,
3268 * make sure to submit it to avoid deadlocks.
3269 */
3270 if (blk_needs_flush_plug(tsk))
3271 blk_schedule_flush_plug(tsk);
3272}
3273
6ebbe7a0 3274asmlinkage void __sched schedule(void)
c259e01a 3275{
9c40cef2
TG
3276 struct task_struct *tsk = current;
3277
3278 sched_submit_work(tsk);
c259e01a
TG
3279 __schedule();
3280}
1da177e4
LT
3281EXPORT_SYMBOL(schedule);
3282
91d1aa43 3283#ifdef CONFIG_CONTEXT_TRACKING
20ab65e3
FW
3284asmlinkage void __sched schedule_user(void)
3285{
3286 /*
3287 * If we come here after a random call to set_need_resched(),
3288 * or we have been woken up remotely but the IPI has not yet arrived,
3289 * we haven't yet exited the RCU idle mode. Do it here manually until
3290 * we find a better solution.
3291 */
91d1aa43 3292 user_exit();
20ab65e3 3293 schedule();
91d1aa43 3294 user_enter();
20ab65e3
FW
3295}
3296#endif
3297
c5491ea7
TG
3298/**
3299 * schedule_preempt_disabled - called with preemption disabled
3300 *
3301 * Returns with preemption disabled. Note: preempt_count must be 1
3302 */
3303void __sched schedule_preempt_disabled(void)
3304{
ba74c144 3305 sched_preempt_enable_no_resched();
c5491ea7
TG
3306 schedule();
3307 preempt_disable();
3308}
3309
1da177e4
LT
3310#ifdef CONFIG_PREEMPT
3311/*
2ed6e34f 3312 * this is the entry point to schedule() from in-kernel preemption
41a2d6cf 3313 * off of preempt_enable. Kernel preemptions off return from interrupt
1da177e4
LT
3314 * occur there and call schedule directly.
3315 */
d1f74e20 3316asmlinkage void __sched notrace preempt_schedule(void)
1da177e4
LT
3317{
3318 struct thread_info *ti = current_thread_info();
6478d880 3319
1da177e4
LT
3320 /*
3321 * If there is a non-zero preempt_count or interrupts are disabled,
41a2d6cf 3322 * we do not want to preempt the current task. Just return..
1da177e4 3323 */
beed33a8 3324 if (likely(ti->preempt_count || irqs_disabled()))
1da177e4
LT
3325 return;
3326
3a5c359a 3327 do {
d1f74e20 3328 add_preempt_count_notrace(PREEMPT_ACTIVE);
c259e01a 3329 __schedule();
d1f74e20 3330 sub_preempt_count_notrace(PREEMPT_ACTIVE);
1da177e4 3331
3a5c359a
AK
3332 /*
3333 * Check again in case we missed a preemption opportunity
3334 * between schedule and now.
3335 */
3336 barrier();
5ed0cec0 3337 } while (need_resched());
1da177e4 3338}
1da177e4
LT
3339EXPORT_SYMBOL(preempt_schedule);
3340
3341/*
2ed6e34f 3342 * this is the entry point to schedule() from kernel preemption
1da177e4
LT
3343 * off of irq context.
3344 * Note, that this is called and return with irqs disabled. This will
3345 * protect us against recursive calling from irq.
3346 */
3347asmlinkage void __sched preempt_schedule_irq(void)
3348{
3349 struct thread_info *ti = current_thread_info();
b22366cd 3350 enum ctx_state prev_state;
6478d880 3351
2ed6e34f 3352 /* Catch callers which need to be fixed */
1da177e4
LT
3353 BUG_ON(ti->preempt_count || !irqs_disabled());
3354
b22366cd
FW
3355 prev_state = exception_enter();
3356
3a5c359a
AK
3357 do {
3358 add_preempt_count(PREEMPT_ACTIVE);
3a5c359a 3359 local_irq_enable();
c259e01a 3360 __schedule();
3a5c359a 3361 local_irq_disable();
3a5c359a 3362 sub_preempt_count(PREEMPT_ACTIVE);
1da177e4 3363
3a5c359a
AK
3364 /*
3365 * Check again in case we missed a preemption opportunity
3366 * between schedule and now.
3367 */
3368 barrier();
5ed0cec0 3369 } while (need_resched());
b22366cd
FW
3370
3371 exception_exit(prev_state);
1da177e4
LT
3372}
3373
3374#endif /* CONFIG_PREEMPT */
3375
63859d4f 3376int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags,
95cdf3b7 3377 void *key)
1da177e4 3378{
63859d4f 3379 return try_to_wake_up(curr->private, mode, wake_flags);
1da177e4 3380}
1da177e4
LT
3381EXPORT_SYMBOL(default_wake_function);
3382
3383/*
41a2d6cf
IM
3384 * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
3385 * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
1da177e4
LT
3386 * number) then we wake all the non-exclusive tasks and one exclusive task.
3387 *
3388 * There are circumstances in which we can try to wake a task which has already
41a2d6cf 3389 * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
1da177e4
LT
3390 * zero in this (rare) case, and we handle it by continuing to scan the queue.
3391 */
78ddb08f 3392static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
63859d4f 3393 int nr_exclusive, int wake_flags, void *key)
1da177e4 3394{
2e45874c 3395 wait_queue_t *curr, *next;
1da177e4 3396
2e45874c 3397 list_for_each_entry_safe(curr, next, &q->task_list, task_list) {
48f24c4d
IM
3398 unsigned flags = curr->flags;
3399
63859d4f 3400 if (curr->func(curr, mode, wake_flags, key) &&
48f24c4d 3401 (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
1da177e4
LT
3402 break;
3403 }
3404}
3405
3406/**
3407 * __wake_up - wake up threads blocked on a waitqueue.
3408 * @q: the waitqueue
3409 * @mode: which threads
3410 * @nr_exclusive: how many wake-one or wake-many threads to wake up
67be2dd1 3411 * @key: is directly passed to the wakeup function
50fa610a
DH
3412 *
3413 * It may be assumed that this function implies a write memory barrier before
3414 * changing the task state if and only if any tasks are woken up.
1da177e4 3415 */
7ad5b3a5 3416void __wake_up(wait_queue_head_t *q, unsigned int mode,
95cdf3b7 3417 int nr_exclusive, void *key)
1da177e4
LT
3418{
3419 unsigned long flags;
3420
3421 spin_lock_irqsave(&q->lock, flags);
3422 __wake_up_common(q, mode, nr_exclusive, 0, key);
3423 spin_unlock_irqrestore(&q->lock, flags);
3424}
1da177e4
LT
3425EXPORT_SYMBOL(__wake_up);
3426
3427/*
3428 * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
3429 */
63b20011 3430void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr)
1da177e4 3431{
63b20011 3432 __wake_up_common(q, mode, nr, 0, NULL);
1da177e4 3433}
22c43c81 3434EXPORT_SYMBOL_GPL(__wake_up_locked);
1da177e4 3435
4ede816a
DL
3436void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key)
3437{
3438 __wake_up_common(q, mode, 1, 0, key);
3439}
bf294b41 3440EXPORT_SYMBOL_GPL(__wake_up_locked_key);
4ede816a 3441
1da177e4 3442/**
4ede816a 3443 * __wake_up_sync_key - wake up threads blocked on a waitqueue.
1da177e4
LT
3444 * @q: the waitqueue
3445 * @mode: which threads
3446 * @nr_exclusive: how many wake-one or wake-many threads to wake up
4ede816a 3447 * @key: opaque value to be passed to wakeup targets
1da177e4
LT
3448 *
3449 * The sync wakeup differs that the waker knows that it will schedule
3450 * away soon, so while the target thread will be woken up, it will not
3451 * be migrated to another CPU - ie. the two threads are 'synchronized'
3452 * with each other. This can prevent needless bouncing between CPUs.
3453 *
3454 * On UP it can prevent extra preemption.
50fa610a
DH
3455 *
3456 * It may be assumed that this function implies a write memory barrier before
3457 * changing the task state if and only if any tasks are woken up.
1da177e4 3458 */
4ede816a
DL
3459void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode,
3460 int nr_exclusive, void *key)
1da177e4
LT
3461{
3462 unsigned long flags;
7d478721 3463 int wake_flags = WF_SYNC;
1da177e4
LT
3464
3465 if (unlikely(!q))
3466 return;
3467
3468 if (unlikely(!nr_exclusive))
7d478721 3469 wake_flags = 0;
1da177e4
LT
3470
3471 spin_lock_irqsave(&q->lock, flags);
7d478721 3472 __wake_up_common(q, mode, nr_exclusive, wake_flags, key);
1da177e4
LT
3473 spin_unlock_irqrestore(&q->lock, flags);
3474}
4ede816a
DL
3475EXPORT_SYMBOL_GPL(__wake_up_sync_key);
3476
3477/*
3478 * __wake_up_sync - see __wake_up_sync_key()
3479 */
3480void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
3481{
3482 __wake_up_sync_key(q, mode, nr_exclusive, NULL);
3483}
1da177e4
LT
3484EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
3485
65eb3dc6
KD
3486/**
3487 * complete: - signals a single thread waiting on this completion
3488 * @x: holds the state of this particular completion
3489 *
3490 * This will wake up a single thread waiting on this completion. Threads will be
3491 * awakened in the same order in which they were queued.
3492 *
3493 * See also complete_all(), wait_for_completion() and related routines.
50fa610a
DH
3494 *
3495 * It may be assumed that this function implies a write memory barrier before
3496 * changing the task state if and only if any tasks are woken up.
65eb3dc6 3497 */
b15136e9 3498void complete(struct completion *x)
1da177e4
LT
3499{
3500 unsigned long flags;
3501
3502 spin_lock_irqsave(&x->wait.lock, flags);
3503 x->done++;
d9514f6c 3504 __wake_up_common(&x->wait, TASK_NORMAL, 1, 0, NULL);
1da177e4
LT
3505 spin_unlock_irqrestore(&x->wait.lock, flags);
3506}
3507EXPORT_SYMBOL(complete);
3508
65eb3dc6
KD
3509/**
3510 * complete_all: - signals all threads waiting on this completion
3511 * @x: holds the state of this particular completion
3512 *
3513 * This will wake up all threads waiting on this particular completion event.
50fa610a
DH
3514 *
3515 * It may be assumed that this function implies a write memory barrier before
3516 * changing the task state if and only if any tasks are woken up.
65eb3dc6 3517 */
b15136e9 3518void complete_all(struct completion *x)
1da177e4
LT
3519{
3520 unsigned long flags;
3521
3522 spin_lock_irqsave(&x->wait.lock, flags);
3523 x->done += UINT_MAX/2;
d9514f6c 3524 __wake_up_common(&x->wait, TASK_NORMAL, 0, 0, NULL);
1da177e4
LT
3525 spin_unlock_irqrestore(&x->wait.lock, flags);
3526}
3527EXPORT_SYMBOL(complete_all);
3528
8cbbe86d 3529static inline long __sched
686855f5
VD
3530do_wait_for_common(struct completion *x,
3531 long (*action)(long), long timeout, int state)
1da177e4 3532{
1da177e4
LT
3533 if (!x->done) {
3534 DECLARE_WAITQUEUE(wait, current);
3535
a93d2f17 3536 __add_wait_queue_tail_exclusive(&x->wait, &wait);
1da177e4 3537 do {
94d3d824 3538 if (signal_pending_state(state, current)) {
ea71a546
ON
3539 timeout = -ERESTARTSYS;
3540 break;
8cbbe86d
AK
3541 }
3542 __set_current_state(state);
1da177e4 3543 spin_unlock_irq(&x->wait.lock);
686855f5 3544 timeout = action(timeout);
1da177e4 3545 spin_lock_irq(&x->wait.lock);
ea71a546 3546 } while (!x->done && timeout);
1da177e4 3547 __remove_wait_queue(&x->wait, &wait);
ea71a546
ON
3548 if (!x->done)
3549 return timeout;
1da177e4
LT
3550 }
3551 x->done--;
ea71a546 3552 return timeout ?: 1;
1da177e4 3553}
1da177e4 3554
686855f5
VD
3555static inline long __sched
3556__wait_for_common(struct completion *x,
3557 long (*action)(long), long timeout, int state)
1da177e4 3558{
1da177e4
LT
3559 might_sleep();
3560
3561 spin_lock_irq(&x->wait.lock);
686855f5 3562 timeout = do_wait_for_common(x, action, timeout, state);
1da177e4 3563 spin_unlock_irq(&x->wait.lock);
8cbbe86d
AK
3564 return timeout;
3565}
1da177e4 3566
686855f5
VD
3567static long __sched
3568wait_for_common(struct completion *x, long timeout, int state)
3569{
3570 return __wait_for_common(x, schedule_timeout, timeout, state);
3571}
3572
3573static long __sched
3574wait_for_common_io(struct completion *x, long timeout, int state)
3575{
3576 return __wait_for_common(x, io_schedule_timeout, timeout, state);
3577}
3578
65eb3dc6
KD
3579/**
3580 * wait_for_completion: - waits for completion of a task
3581 * @x: holds the state of this particular completion
3582 *
3583 * This waits to be signaled for completion of a specific task. It is NOT
3584 * interruptible and there is no timeout.
3585 *
3586 * See also similar routines (i.e. wait_for_completion_timeout()) with timeout
3587 * and interrupt capability. Also see complete().
3588 */
b15136e9 3589void __sched wait_for_completion(struct completion *x)
8cbbe86d
AK
3590{
3591 wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
1da177e4 3592}
8cbbe86d 3593EXPORT_SYMBOL(wait_for_completion);
1da177e4 3594
65eb3dc6
KD
3595/**
3596 * wait_for_completion_timeout: - waits for completion of a task (w/timeout)
3597 * @x: holds the state of this particular completion
3598 * @timeout: timeout value in jiffies
3599 *
3600 * This waits for either a completion of a specific task to be signaled or for a
3601 * specified timeout to expire. The timeout is in jiffies. It is not
3602 * interruptible.
c6dc7f05
BF
3603 *
3604 * The return value is 0 if timed out, and positive (at least 1, or number of
3605 * jiffies left till timeout) if completed.
65eb3dc6 3606 */
b15136e9 3607unsigned long __sched
8cbbe86d 3608wait_for_completion_timeout(struct completion *x, unsigned long timeout)
1da177e4 3609{
8cbbe86d 3610 return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE);
1da177e4 3611}
8cbbe86d 3612EXPORT_SYMBOL(wait_for_completion_timeout);
1da177e4 3613
686855f5
VD
3614/**
3615 * wait_for_completion_io: - waits for completion of a task
3616 * @x: holds the state of this particular completion
3617 *
3618 * This waits to be signaled for completion of a specific task. It is NOT
3619 * interruptible and there is no timeout. The caller is accounted as waiting
3620 * for IO.
3621 */
3622void __sched wait_for_completion_io(struct completion *x)
3623{
3624 wait_for_common_io(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
3625}
3626EXPORT_SYMBOL(wait_for_completion_io);
3627
3628/**
3629 * wait_for_completion_io_timeout: - waits for completion of a task (w/timeout)
3630 * @x: holds the state of this particular completion
3631 * @timeout: timeout value in jiffies
3632 *
3633 * This waits for either a completion of a specific task to be signaled or for a
3634 * specified timeout to expire. The timeout is in jiffies. It is not
3635 * interruptible. The caller is accounted as waiting for IO.
3636 *
3637 * The return value is 0 if timed out, and positive (at least 1, or number of
3638 * jiffies left till timeout) if completed.
3639 */
3640unsigned long __sched
3641wait_for_completion_io_timeout(struct completion *x, unsigned long timeout)
3642{
3643 return wait_for_common_io(x, timeout, TASK_UNINTERRUPTIBLE);
3644}
3645EXPORT_SYMBOL(wait_for_completion_io_timeout);
3646
65eb3dc6
KD
3647/**
3648 * wait_for_completion_interruptible: - waits for completion of a task (w/intr)
3649 * @x: holds the state of this particular completion
3650 *
3651 * This waits for completion of a specific task to be signaled. It is
3652 * interruptible.
c6dc7f05
BF
3653 *
3654 * The return value is -ERESTARTSYS if interrupted, 0 if completed.
65eb3dc6 3655 */
8cbbe86d 3656int __sched wait_for_completion_interruptible(struct completion *x)
0fec171c 3657{
51e97990
AK
3658 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE);
3659 if (t == -ERESTARTSYS)
3660 return t;
3661 return 0;
0fec171c 3662}
8cbbe86d 3663EXPORT_SYMBOL(wait_for_completion_interruptible);
1da177e4 3664
65eb3dc6
KD
3665/**
3666 * wait_for_completion_interruptible_timeout: - waits for completion (w/(to,intr))
3667 * @x: holds the state of this particular completion
3668 * @timeout: timeout value in jiffies
3669 *
3670 * This waits for either a completion of a specific task to be signaled or for a
3671 * specified timeout to expire. It is interruptible. The timeout is in jiffies.
c6dc7f05
BF
3672 *
3673 * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
3674 * positive (at least 1, or number of jiffies left till timeout) if completed.
65eb3dc6 3675 */
6bf41237 3676long __sched
8cbbe86d
AK
3677wait_for_completion_interruptible_timeout(struct completion *x,
3678 unsigned long timeout)
0fec171c 3679{
8cbbe86d 3680 return wait_for_common(x, timeout, TASK_INTERRUPTIBLE);
0fec171c 3681}
8cbbe86d 3682EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
1da177e4 3683
65eb3dc6
KD
3684/**
3685 * wait_for_completion_killable: - waits for completion of a task (killable)
3686 * @x: holds the state of this particular completion
3687 *
3688 * This waits to be signaled for completion of a specific task. It can be
3689 * interrupted by a kill signal.
c6dc7f05
BF
3690 *
3691 * The return value is -ERESTARTSYS if interrupted, 0 if completed.
65eb3dc6 3692 */
009e577e
MW
3693int __sched wait_for_completion_killable(struct completion *x)
3694{
3695 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
3696 if (t == -ERESTARTSYS)
3697 return t;
3698 return 0;
3699}
3700EXPORT_SYMBOL(wait_for_completion_killable);
3701
0aa12fb4
SW
3702/**
3703 * wait_for_completion_killable_timeout: - waits for completion of a task (w/(to,killable))
3704 * @x: holds the state of this particular completion
3705 * @timeout: timeout value in jiffies
3706 *
3707 * This waits for either a completion of a specific task to be
3708 * signaled or for a specified timeout to expire. It can be
3709 * interrupted by a kill signal. The timeout is in jiffies.
c6dc7f05
BF
3710 *
3711 * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
3712 * positive (at least 1, or number of jiffies left till timeout) if completed.
0aa12fb4 3713 */
6bf41237 3714long __sched
0aa12fb4
SW
3715wait_for_completion_killable_timeout(struct completion *x,
3716 unsigned long timeout)
3717{
3718 return wait_for_common(x, timeout, TASK_KILLABLE);
3719}
3720EXPORT_SYMBOL(wait_for_completion_killable_timeout);
3721
be4de352
DC
3722/**
3723 * try_wait_for_completion - try to decrement a completion without blocking
3724 * @x: completion structure
3725 *
3726 * Returns: 0 if a decrement cannot be done without blocking
3727 * 1 if a decrement succeeded.
3728 *
3729 * If a completion is being used as a counting completion,
3730 * attempt to decrement the counter without blocking. This
3731 * enables us to avoid waiting if the resource the completion
3732 * is protecting is not available.
3733 */
3734bool try_wait_for_completion(struct completion *x)
3735{
7539a3b3 3736 unsigned long flags;
be4de352
DC
3737 int ret = 1;
3738
7539a3b3 3739 spin_lock_irqsave(&x->wait.lock, flags);
be4de352
DC
3740 if (!x->done)
3741 ret = 0;
3742 else
3743 x->done--;
7539a3b3 3744 spin_unlock_irqrestore(&x->wait.lock, flags);
be4de352
DC
3745 return ret;
3746}
3747EXPORT_SYMBOL(try_wait_for_completion);
3748
3749/**
3750 * completion_done - Test to see if a completion has any waiters
3751 * @x: completion structure
3752 *
3753 * Returns: 0 if there are waiters (wait_for_completion() in progress)
3754 * 1 if there are no waiters.
3755 *
3756 */
3757bool completion_done(struct completion *x)
3758{
7539a3b3 3759 unsigned long flags;
be4de352
DC
3760 int ret = 1;
3761
7539a3b3 3762 spin_lock_irqsave(&x->wait.lock, flags);
be4de352
DC
3763 if (!x->done)
3764 ret = 0;
7539a3b3 3765 spin_unlock_irqrestore(&x->wait.lock, flags);
be4de352
DC
3766 return ret;
3767}
3768EXPORT_SYMBOL(completion_done);
3769
8cbbe86d
AK
3770static long __sched
3771sleep_on_common(wait_queue_head_t *q, int state, long timeout)
1da177e4 3772{
0fec171c
IM
3773 unsigned long flags;
3774 wait_queue_t wait;
3775
3776 init_waitqueue_entry(&wait, current);
1da177e4 3777
8cbbe86d 3778 __set_current_state(state);
1da177e4 3779
8cbbe86d
AK
3780 spin_lock_irqsave(&q->lock, flags);
3781 __add_wait_queue(q, &wait);
3782 spin_unlock(&q->lock);
3783 timeout = schedule_timeout(timeout);
3784 spin_lock_irq(&q->lock);
3785 __remove_wait_queue(q, &wait);
3786 spin_unlock_irqrestore(&q->lock, flags);
3787
3788 return timeout;
3789}
3790
3791void __sched interruptible_sleep_on(wait_queue_head_t *q)
3792{
3793 sleep_on_common(q, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
1da177e4 3794}
1da177e4
LT
3795EXPORT_SYMBOL(interruptible_sleep_on);
3796
0fec171c 3797long __sched
95cdf3b7 3798interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
1da177e4 3799{
8cbbe86d 3800 return sleep_on_common(q, TASK_INTERRUPTIBLE, timeout);
1da177e4 3801}
1da177e4
LT
3802EXPORT_SYMBOL(interruptible_sleep_on_timeout);
3803
0fec171c 3804void __sched sleep_on(wait_queue_head_t *q)
1da177e4 3805{
8cbbe86d 3806 sleep_on_common(q, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
1da177e4 3807}
1da177e4
LT
3808EXPORT_SYMBOL(sleep_on);
3809
0fec171c 3810long __sched sleep_on_timeout(wait_queue_head_t *q, long timeout)
1da177e4 3811{
8cbbe86d 3812 return sleep_on_common(q, TASK_UNINTERRUPTIBLE, timeout);
1da177e4 3813}
1da177e4
LT
3814EXPORT_SYMBOL(sleep_on_timeout);
3815
b29739f9
IM
3816#ifdef CONFIG_RT_MUTEXES
3817
3818/*
3819 * rt_mutex_setprio - set the current priority of a task
3820 * @p: task
3821 * @prio: prio value (kernel-internal form)
3822 *
3823 * This function changes the 'effective' priority of a task. It does
3824 * not touch ->normal_prio like __setscheduler().
3825 *
3826 * Used by the rt_mutex code to implement priority inheritance logic.
3827 */
36c8b586 3828void rt_mutex_setprio(struct task_struct *p, int prio)
b29739f9 3829{
83b699ed 3830 int oldprio, on_rq, running;
70b97a7f 3831 struct rq *rq;
83ab0aa0 3832 const struct sched_class *prev_class;
b29739f9
IM
3833
3834 BUG_ON(prio < 0 || prio > MAX_PRIO);
3835
0122ec5b 3836 rq = __task_rq_lock(p);
b29739f9 3837
1c4dd99b
TG
3838 /*
3839 * Idle task boosting is a nono in general. There is one
3840 * exception, when PREEMPT_RT and NOHZ is active:
3841 *
3842 * The idle task calls get_next_timer_interrupt() and holds
3843 * the timer wheel base->lock on the CPU and another CPU wants
3844 * to access the timer (probably to cancel it). We can safely
3845 * ignore the boosting request, as the idle CPU runs this code
3846 * with interrupts disabled and will complete the lock
3847 * protected section without being interrupted. So there is no
3848 * real need to boost.
3849 */
3850 if (unlikely(p == rq->idle)) {
3851 WARN_ON(p != rq->curr);
3852 WARN_ON(p->pi_blocked_on);
3853 goto out_unlock;
3854 }
3855
a8027073 3856 trace_sched_pi_setprio(p, prio);
d5f9f942 3857 oldprio = p->prio;
83ab0aa0 3858 prev_class = p->sched_class;
fd2f4419 3859 on_rq = p->on_rq;
051a1d1a 3860 running = task_current(rq, p);
0e1f3483 3861 if (on_rq)
69be72c1 3862 dequeue_task(rq, p, 0);
0e1f3483
HS
3863 if (running)
3864 p->sched_class->put_prev_task(rq, p);
dd41f596
IM
3865
3866 if (rt_prio(prio))
3867 p->sched_class = &rt_sched_class;
3868 else
3869 p->sched_class = &fair_sched_class;
3870
b29739f9
IM
3871 p->prio = prio;
3872
0e1f3483
HS
3873 if (running)
3874 p->sched_class->set_curr_task(rq);
da7a735e 3875 if (on_rq)
371fd7e7 3876 enqueue_task(rq, p, oldprio < prio ? ENQUEUE_HEAD : 0);
cb469845 3877
da7a735e 3878 check_class_changed(rq, p, prev_class, oldprio);
1c4dd99b 3879out_unlock:
0122ec5b 3880 __task_rq_unlock(rq);
b29739f9 3881}
b29739f9 3882#endif
6fa3eb70
S
3883
3884#ifdef CONFIG_MT_PRIO_TRACER
3885void set_user_nice_core(struct task_struct *p, long nice)
3886{
3887 int old_prio, delta, on_rq;
3888 unsigned long flags;
3889 struct rq *rq;
3890
3891 if (TASK_NICE(p) == nice || nice < -20 || nice > 19)
3892 return;
3893 /*
3894 * We have to be careful, if called from sys_setpriority(),
3895 * the task might be in the middle of scheduling on another CPU.
3896 */
3897 rq = task_rq_lock(p, &flags);
3898 /*
3899 * The RT priorities are set via sched_setscheduler(), but we still
3900 * allow the 'normal' nice value to be set - but as expected
3901 * it wont have any effect on scheduling until the task is
3902 * SCHED_FIFO/SCHED_RR:
3903 */
3904 if (task_has_rt_policy(p)) {
3905 p->static_prio = NICE_TO_PRIO(nice);
3906 goto out_unlock;
3907 }
3908 on_rq = p->on_rq;
3909 if (on_rq)
3910 dequeue_task(rq, p, 0);
3911
3912 p->static_prio = NICE_TO_PRIO(nice);
3913 set_load_weight(p);
3914 old_prio = p->prio;
3915 p->prio = effective_prio(p);
3916 delta = p->prio - old_prio;
3917
3918 if (on_rq) {
3919 enqueue_task(rq, p, 0);
3920 /*
3921 * If the task increased its priority or is running and
3922 * lowered its priority, then reschedule its CPU:
3923 */
3924 if (delta < 0 || (delta > 0 && task_running(rq, p)))
3925 resched_task(rq->curr);
3926 }
3927out_unlock:
3928 task_rq_unlock(rq, p, &flags);
3929}
3930
3931void set_user_nice(struct task_struct *p, long nice)
3932{
3933 set_user_nice_core(p, nice);
3934 /* setting nice implies to set a normal sched policy */
3935 update_prio_tracer(task_pid_nr(p), NICE_TO_PRIO(nice), 0, PTS_KRNL);
3936}
3937#else /* !CONFIG_MT_PRIO_TRACER */
36c8b586 3938void set_user_nice(struct task_struct *p, long nice)
1da177e4 3939{
dd41f596 3940 int old_prio, delta, on_rq;
1da177e4 3941 unsigned long flags;
70b97a7f 3942 struct rq *rq;
1da177e4
LT
3943
3944 if (TASK_NICE(p) == nice || nice < -20 || nice > 19)
3945 return;
3946 /*
3947 * We have to be careful, if called from sys_setpriority(),
3948 * the task might be in the middle of scheduling on another CPU.
3949 */
3950 rq = task_rq_lock(p, &flags);
3951 /*
3952 * The RT priorities are set via sched_setscheduler(), but we still
3953 * allow the 'normal' nice value to be set - but as expected
3954 * it wont have any effect on scheduling until the task is
dd41f596 3955 * SCHED_FIFO/SCHED_RR:
1da177e4 3956 */
e05606d3 3957 if (task_has_rt_policy(p)) {
1da177e4
LT
3958 p->static_prio = NICE_TO_PRIO(nice);
3959 goto out_unlock;
3960 }
fd2f4419 3961 on_rq = p->on_rq;
c09595f6 3962 if (on_rq)
69be72c1 3963 dequeue_task(rq, p, 0);
1da177e4 3964
1da177e4 3965 p->static_prio = NICE_TO_PRIO(nice);
2dd73a4f 3966 set_load_weight(p);
b29739f9
IM
3967 old_prio = p->prio;
3968 p->prio = effective_prio(p);
3969 delta = p->prio - old_prio;
1da177e4 3970
dd41f596 3971 if (on_rq) {
371fd7e7 3972 enqueue_task(rq, p, 0);
1da177e4 3973 /*
d5f9f942
AM
3974 * If the task increased its priority or is running and
3975 * lowered its priority, then reschedule its CPU:
1da177e4 3976 */
d5f9f942 3977 if (delta < 0 || (delta > 0 && task_running(rq, p)))
1da177e4
LT
3978 resched_task(rq->curr);
3979 }
3980out_unlock:
0122ec5b 3981 task_rq_unlock(rq, p, &flags);
1da177e4 3982}
6fa3eb70 3983#endif
1da177e4
LT
3984EXPORT_SYMBOL(set_user_nice);
3985
e43379f1
MM
3986/*
3987 * can_nice - check if a task can reduce its nice value
3988 * @p: task
3989 * @nice: nice value
3990 */
36c8b586 3991int can_nice(const struct task_struct *p, const int nice)
e43379f1 3992{
024f4747
MM
3993 /* convert nice value [19,-20] to rlimit style value [1,40] */
3994 int nice_rlim = 20 - nice;
48f24c4d 3995
78d7d407 3996 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
e43379f1
MM
3997 capable(CAP_SYS_NICE));
3998}
3999
1da177e4
LT
4000#ifdef __ARCH_WANT_SYS_NICE
4001
4002/*
4003 * sys_nice - change the priority of the current process.
4004 * @increment: priority increment
4005 *
4006 * sys_setpriority is a more generic, but much slower function that
4007 * does similar things.
4008 */
5add95d4 4009SYSCALL_DEFINE1(nice, int, increment)
1da177e4 4010{
48f24c4d 4011 long nice, retval;
1da177e4
LT
4012
4013 /*
4014 * Setpriority might change our priority at the same moment.
4015 * We don't have to worry. Conceptually one call occurs first
4016 * and we have a single winner.
4017 */
e43379f1
MM
4018 if (increment < -40)
4019 increment = -40;
1da177e4
LT
4020 if (increment > 40)
4021 increment = 40;
4022
2b8f836f 4023 nice = TASK_NICE(current) + increment;
1da177e4
LT
4024 if (nice < -20)
4025 nice = -20;
4026 if (nice > 19)
4027 nice = 19;
4028
e43379f1
MM
4029 if (increment < 0 && !can_nice(current, nice))
4030 return -EPERM;
4031
1da177e4
LT
4032 retval = security_task_setnice(current, nice);
4033 if (retval)
4034 return retval;
6fa3eb70
S
4035#ifdef CONFIG_MT_PRIO_TRACER
4036 set_user_nice_syscall(current, nice);
4037#else
1da177e4 4038 set_user_nice(current, nice);
6fa3eb70 4039#endif
1da177e4
LT
4040 return 0;
4041}
4042
4043#endif
4044
4045/**
4046 * task_prio - return the priority value of a given task.
4047 * @p: the task in question.
4048 *
4049 * This is the priority value as seen by users in /proc.
4050 * RT tasks are offset by -200. Normal tasks are centered
4051 * around 0, value goes from -16 to +15.
4052 */
36c8b586 4053int task_prio(const struct task_struct *p)
1da177e4
LT
4054{
4055 return p->prio - MAX_RT_PRIO;
4056}
4057
4058/**
4059 * task_nice - return the nice value of a given task.
4060 * @p: the task in question.
4061 */
36c8b586 4062int task_nice(const struct task_struct *p)
1da177e4
LT
4063{
4064 return TASK_NICE(p);
4065}
150d8bed 4066EXPORT_SYMBOL(task_nice);
1da177e4
LT
4067
4068/**
4069 * idle_cpu - is a given cpu idle currently?
4070 * @cpu: the processor in question.
4071 */
4072int idle_cpu(int cpu)
4073{
908a3283
TG
4074 struct rq *rq = cpu_rq(cpu);
4075
4076 if (rq->curr != rq->idle)
4077 return 0;
4078
4079 if (rq->nr_running)
4080 return 0;
4081
4082#ifdef CONFIG_SMP
4083 if (!llist_empty(&rq->wake_list))
4084 return 0;
4085#endif
4086
4087 return 1;
1da177e4
LT
4088}
4089
1da177e4
LT
4090/**
4091 * idle_task - return the idle task for a given cpu.
4092 * @cpu: the processor in question.
4093 */
36c8b586 4094struct task_struct *idle_task(int cpu)
1da177e4
LT
4095{
4096 return cpu_rq(cpu)->idle;
4097}
4098
4099/**
4100 * find_process_by_pid - find a process with a matching PID value.
4101 * @pid: the pid in question.
4102 */
a9957449 4103static struct task_struct *find_process_by_pid(pid_t pid)
1da177e4 4104{
228ebcbe 4105 return pid ? find_task_by_vpid(pid) : current;
1da177e4
LT
4106}
4107
6fa3eb70
S
4108extern struct cpumask hmp_slow_cpu_mask;
4109
1da177e4 4110/* Actually do priority change: must hold rq lock. */
dd41f596
IM
4111static void
4112__setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
1da177e4 4113{
1da177e4
LT
4114 p->policy = policy;
4115 p->rt_priority = prio;
b29739f9
IM
4116 p->normal_prio = normal_prio(p);
4117 /* we are holding p->pi_lock already */
4118 p->prio = rt_mutex_getprio(p);
6fa3eb70 4119 if (rt_prio(p->prio)) {
ffd44db5 4120 p->sched_class = &rt_sched_class;
6fa3eb70 4121 }
ffd44db5
PZ
4122 else
4123 p->sched_class = &fair_sched_class;
2dd73a4f 4124 set_load_weight(p);
1da177e4
LT
4125}
4126
c69e8d9c
DH
4127/*
4128 * check the target process has a UID that matches the current process's
4129 */
4130static bool check_same_owner(struct task_struct *p)
4131{
4132 const struct cred *cred = current_cred(), *pcred;
4133 bool match;
4134
4135 rcu_read_lock();
4136 pcred = __task_cred(p);
9c806aa0
EB
4137 match = (uid_eq(cred->euid, pcred->euid) ||
4138 uid_eq(cred->euid, pcred->uid));
c69e8d9c
DH
4139 rcu_read_unlock();
4140 return match;
4141}
4142
6fa3eb70
S
4143static int check_mt_allow_rt(struct sched_param *param)
4144{
4145 int allow = 0;
4146 if(0 == MT_ALLOW_RT_PRIO_BIT){
4147 //this condition check will be removed
4148 return 1;
4149 }
4150
4151 if(param->sched_priority & MT_ALLOW_RT_PRIO_BIT){
4152 param->sched_priority &= ~MT_ALLOW_RT_PRIO_BIT;
4153 allow = 1;
4154 }
4155 return allow;
4156}
4157
961ccddd 4158static int __sched_setscheduler(struct task_struct *p, int policy,
fe7de49f 4159 const struct sched_param *param, bool user)
1da177e4 4160{
83b699ed 4161 int retval, oldprio, oldpolicy = -1, on_rq, running;
1da177e4 4162 unsigned long flags;
83ab0aa0 4163 const struct sched_class *prev_class;
70b97a7f 4164 struct rq *rq;
ca94c442 4165 int reset_on_fork;
1da177e4 4166
66e5393a
SR
4167 /* may grab non-irq protected spin_locks */
4168 BUG_ON(in_interrupt());
1da177e4
LT
4169recheck:
4170 /* double check policy once rq lock held */
ca94c442
LP
4171 if (policy < 0) {
4172 reset_on_fork = p->sched_reset_on_fork;
1da177e4 4173 policy = oldpolicy = p->policy;
ca94c442
LP
4174 } else {
4175 reset_on_fork = !!(policy & SCHED_RESET_ON_FORK);
4176 policy &= ~SCHED_RESET_ON_FORK;
4177
4178 if (policy != SCHED_FIFO && policy != SCHED_RR &&
4179 policy != SCHED_NORMAL && policy != SCHED_BATCH &&
4180 policy != SCHED_IDLE)
4181 return -EINVAL;
4182 }
4183
6fa3eb70
S
4184 if(rt_policy(policy)){
4185 if (!check_mt_allow_rt((struct sched_param *)param)){
4186 printk("[RT_MONITOR]WARNNING [%d:%s] SET NOT ALLOW RT Prio [%d] for proc [%d:%s]\n", current->pid, current->comm, param->sched_priority, p->pid, p->comm);
4187 //dump_stack();
4188 }
4189 }
4190
1da177e4
LT
4191 /*
4192 * Valid priorities for SCHED_FIFO and SCHED_RR are
dd41f596
IM
4193 * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL,
4194 * SCHED_BATCH and SCHED_IDLE is 0.
1da177e4
LT
4195 */
4196 if (param->sched_priority < 0 ||
95cdf3b7 4197 (p->mm && param->sched_priority > MAX_USER_RT_PRIO-1) ||
d46523ea 4198 (!p->mm && param->sched_priority > MAX_RT_PRIO-1))
1da177e4 4199 return -EINVAL;
e05606d3 4200 if (rt_policy(policy) != (param->sched_priority != 0))
1da177e4
LT
4201 return -EINVAL;
4202
37e4ab3f
OC
4203 /*
4204 * Allow unprivileged RT tasks to decrease priority:
4205 */
961ccddd 4206 if (user && !capable(CAP_SYS_NICE)) {
e05606d3 4207 if (rt_policy(policy)) {
a44702e8
ON
4208 unsigned long rlim_rtprio =
4209 task_rlimit(p, RLIMIT_RTPRIO);
8dc3e909
ON
4210
4211 /* can't set/change the rt policy */
4212 if (policy != p->policy && !rlim_rtprio)
4213 return -EPERM;
4214
4215 /* can't increase priority */
4216 if (param->sched_priority > p->rt_priority &&
4217 param->sched_priority > rlim_rtprio)
4218 return -EPERM;
4219 }
c02aa73b 4220
dd41f596 4221 /*
c02aa73b
DH
4222 * Treat SCHED_IDLE as nice 20. Only allow a switch to
4223 * SCHED_NORMAL if the RLIMIT_NICE would normally permit it.
dd41f596 4224 */
c02aa73b
DH
4225 if (p->policy == SCHED_IDLE && policy != SCHED_IDLE) {
4226 if (!can_nice(p, TASK_NICE(p)))
4227 return -EPERM;
4228 }
5fe1d75f 4229
37e4ab3f 4230 /* can't change other user's priorities */
c69e8d9c 4231 if (!check_same_owner(p))
37e4ab3f 4232 return -EPERM;
ca94c442
LP
4233
4234 /* Normal users shall not reset the sched_reset_on_fork flag */
4235 if (p->sched_reset_on_fork && !reset_on_fork)
4236 return -EPERM;
37e4ab3f 4237 }
1da177e4 4238
725aad24 4239 if (user) {
b0ae1981 4240 retval = security_task_setscheduler(p);
725aad24
JF
4241 if (retval)
4242 return retval;
4243 }
4244
b29739f9
IM
4245 /*
4246 * make sure no PI-waiters arrive (or leave) while we are
4247 * changing the priority of the task:
0122ec5b 4248 *
25985edc 4249 * To be able to change p->policy safely, the appropriate
1da177e4
LT
4250 * runqueue lock must be held.
4251 */
0122ec5b 4252 rq = task_rq_lock(p, &flags);
dc61b1d6 4253
34f971f6
PZ
4254 /*
4255 * Changing the policy of the stop threads its a very bad idea
4256 */
4257 if (p == rq->stop) {
0122ec5b 4258 task_rq_unlock(rq, p, &flags);
34f971f6
PZ
4259 return -EINVAL;
4260 }
4261
a51e9198
DF
4262 /*
4263 * If not changing anything there's no need to proceed further:
4264 */
4265 if (unlikely(policy == p->policy && (!rt_policy(policy) ||
4266 param->sched_priority == p->rt_priority))) {
45afb173 4267 task_rq_unlock(rq, p, &flags);
a51e9198
DF
4268 return 0;
4269 }
4270
dc61b1d6
PZ
4271#ifdef CONFIG_RT_GROUP_SCHED
4272 if (user) {
4273 /*
4274 * Do not allow realtime tasks into groups that have no runtime
4275 * assigned.
4276 */
4277 if (rt_bandwidth_enabled() && rt_policy(policy) &&
f4493771
MG
4278 task_group(p)->rt_bandwidth.rt_runtime == 0 &&
4279 !task_group_is_autogroup(task_group(p))) {
0122ec5b 4280 task_rq_unlock(rq, p, &flags);
dc61b1d6
PZ
4281 return -EPERM;
4282 }
4283 }
4284#endif
4285
1da177e4
LT
4286 /* recheck policy now with rq lock held */
4287 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
4288 policy = oldpolicy = -1;
0122ec5b 4289 task_rq_unlock(rq, p, &flags);
1da177e4
LT
4290 goto recheck;
4291 }
fd2f4419 4292 on_rq = p->on_rq;
051a1d1a 4293 running = task_current(rq, p);
0e1f3483 4294 if (on_rq)
4ca9b72b 4295 dequeue_task(rq, p, 0);
0e1f3483
HS
4296 if (running)
4297 p->sched_class->put_prev_task(rq, p);
f6b53205 4298
ca94c442
LP
4299 p->sched_reset_on_fork = reset_on_fork;
4300
1da177e4 4301 oldprio = p->prio;
83ab0aa0 4302 prev_class = p->sched_class;
dd41f596 4303 __setscheduler(rq, p, policy, param->sched_priority);
f6b53205 4304
0e1f3483
HS
4305 if (running)
4306 p->sched_class->set_curr_task(rq);
da7a735e 4307 if (on_rq)
4ca9b72b 4308 enqueue_task(rq, p, 0);
cb469845 4309
da7a735e 4310 check_class_changed(rq, p, prev_class, oldprio);
0122ec5b 4311 task_rq_unlock(rq, p, &flags);
b29739f9 4312
95e02ca9
TG
4313 rt_mutex_adjust_pi(p);
4314
1da177e4
LT
4315 return 0;
4316}
961ccddd
RR
4317
4318/**
4319 * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
4320 * @p: the task in question.
4321 * @policy: new policy.
4322 * @param: structure containing the new RT priority.
4323 *
4324 * NOTE that the task may be already dead.
4325 */
6fa3eb70
S
4326#ifdef CONFIG_MT_PRIO_TRACER
4327int sched_setscheduler_core(struct task_struct *p, int policy,
4328 const struct sched_param *param)
4329{
4330 return __sched_setscheduler(p, policy, param, true);
4331}
4332
4333int sched_setscheduler(struct task_struct *p, int policy,
4334 const struct sched_param *param)
4335{
4336 int retval;
4337
4338 retval = sched_setscheduler_core(p, policy, param);
4339 if (!retval) {
4340 int prio = param->sched_priority & ~MT_ALLOW_RT_PRIO_BIT;
4341 if (!rt_policy(policy))
4342 prio = __normal_prio(p);
4343 else
4344 prio = MAX_RT_PRIO-1 - prio;
4345 update_prio_tracer(task_pid_nr(p), prio, policy, PTS_KRNL);
4346 }
4347 return retval;
4348}
4349#else /* !CONFIG_MT_PRIO_TRACER */
961ccddd 4350int sched_setscheduler(struct task_struct *p, int policy,
fe7de49f 4351 const struct sched_param *param)
961ccddd
RR
4352{
4353 return __sched_setscheduler(p, policy, param, true);
4354}
6fa3eb70 4355#endif
1da177e4
LT
4356EXPORT_SYMBOL_GPL(sched_setscheduler);
4357
961ccddd
RR
4358/**
4359 * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
4360 * @p: the task in question.
4361 * @policy: new policy.
4362 * @param: structure containing the new RT priority.
4363 *
4364 * Just like sched_setscheduler, only don't bother checking if the
4365 * current context has permission. For example, this is needed in
4366 * stop_machine(): we create temporary high priority worker threads,
4367 * but our caller might not have that capability.
4368 */
6fa3eb70
S
4369#ifdef CONFIG_MT_PRIO_TRACER
4370int sched_setscheduler_nocheck_core(struct task_struct *p, int policy,
4371 const struct sched_param *param)
4372{
4373 return __sched_setscheduler(p, policy, param, false);
4374}
4375
4376
4377int sched_setscheduler_nocheck(struct task_struct *p, int policy,
4378 const struct sched_param *param)
4379{
4380 int retval;
4381
4382 retval = sched_setscheduler_nocheck_core(p, policy, param);
4383 if (!retval) {
4384 int prio = param->sched_priority & ~MT_ALLOW_RT_PRIO_BIT;
4385 if (!rt_policy(policy))
4386 prio = __normal_prio(p);
4387 else
4388 prio = MAX_RT_PRIO-1 - prio;
4389 update_prio_tracer(task_pid_nr(p), prio, policy, PTS_KRNL);
4390 }
4391 return retval;
4392}
4393#else /* !CONFIG_MT_PRIO_TRACER */
961ccddd 4394int sched_setscheduler_nocheck(struct task_struct *p, int policy,
fe7de49f 4395 const struct sched_param *param)
961ccddd
RR
4396{
4397 return __sched_setscheduler(p, policy, param, false);
4398}
6fa3eb70 4399#endif
961ccddd 4400
95cdf3b7
IM
4401static int
4402do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
1da177e4 4403{
1da177e4
LT
4404 struct sched_param lparam;
4405 struct task_struct *p;
36c8b586 4406 int retval;
1da177e4
LT
4407
4408 if (!param || pid < 0)
4409 return -EINVAL;
4410 if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
4411 return -EFAULT;
5fe1d75f
ON
4412
4413 rcu_read_lock();
4414 retval = -ESRCH;
1da177e4 4415 p = find_process_by_pid(pid);
6fa3eb70
S
4416#ifdef CONFIG_MT_PRIO_TRACER
4417 if (p != NULL)
4418 retval = sched_setscheduler_syscall(p, policy, &lparam);
4419#else
5fe1d75f
ON
4420 if (p != NULL)
4421 retval = sched_setscheduler(p, policy, &lparam);
6fa3eb70
S
4422#endif
4423
5fe1d75f 4424 rcu_read_unlock();
36c8b586 4425
1da177e4
LT
4426 return retval;
4427}
4428
4429/**
4430 * sys_sched_setscheduler - set/change the scheduler policy and RT priority
4431 * @pid: the pid in question.
4432 * @policy: new policy.
4433 * @param: structure containing the new RT priority.
4434 */
5add95d4
HC
4435SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy,
4436 struct sched_param __user *, param)
1da177e4 4437{
c21761f1
JB
4438 /* negative values for policy are not valid */
4439 if (policy < 0)
4440 return -EINVAL;
4441
1da177e4
LT
4442 return do_sched_setscheduler(pid, policy, param);
4443}
4444
4445/**
4446 * sys_sched_setparam - set/change the RT priority of a thread
4447 * @pid: the pid in question.
4448 * @param: structure containing the new RT priority.
4449 */
5add95d4 4450SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
1da177e4
LT
4451{
4452 return do_sched_setscheduler(pid, -1, param);
4453}
4454
4455/**
4456 * sys_sched_getscheduler - get the policy (scheduling class) of a thread
4457 * @pid: the pid in question.
4458 */
5add95d4 4459SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
1da177e4 4460{
36c8b586 4461 struct task_struct *p;
3a5c359a 4462 int retval;
1da177e4
LT
4463
4464 if (pid < 0)
3a5c359a 4465 return -EINVAL;
1da177e4
LT
4466
4467 retval = -ESRCH;
5fe85be0 4468 rcu_read_lock();
1da177e4
LT
4469 p = find_process_by_pid(pid);
4470 if (p) {
4471 retval = security_task_getscheduler(p);
4472 if (!retval)
ca94c442
LP
4473 retval = p->policy
4474 | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0);
1da177e4 4475 }
5fe85be0 4476 rcu_read_unlock();
1da177e4
LT
4477 return retval;
4478}
4479
4480/**
ca94c442 4481 * sys_sched_getparam - get the RT priority of a thread
1da177e4
LT
4482 * @pid: the pid in question.
4483 * @param: structure containing the RT priority.
4484 */
5add95d4 4485SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
1da177e4
LT
4486{
4487 struct sched_param lp;
36c8b586 4488 struct task_struct *p;
3a5c359a 4489 int retval;
1da177e4
LT
4490
4491 if (!param || pid < 0)
3a5c359a 4492 return -EINVAL;
1da177e4 4493
5fe85be0 4494 rcu_read_lock();
1da177e4
LT
4495 p = find_process_by_pid(pid);
4496 retval = -ESRCH;
4497 if (!p)
4498 goto out_unlock;
4499
4500 retval = security_task_getscheduler(p);
4501 if (retval)
4502 goto out_unlock;
4503
4504 lp.sched_priority = p->rt_priority;
5fe85be0 4505 rcu_read_unlock();
1da177e4
LT
4506
4507 /*
4508 * This one might sleep, we cannot do it with a spinlock held ...
4509 */
4510 retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
4511
1da177e4
LT
4512 return retval;
4513
4514out_unlock:
5fe85be0 4515 rcu_read_unlock();
1da177e4
LT
4516 return retval;
4517}
4518
96f874e2 4519long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
1da177e4 4520{
5a16f3d3 4521 cpumask_var_t cpus_allowed, new_mask;
36c8b586
IM
4522 struct task_struct *p;
4523 int retval;
1da177e4 4524
95402b38 4525 get_online_cpus();
23f5d142 4526 rcu_read_lock();
1da177e4
LT
4527
4528 p = find_process_by_pid(pid);
4529 if (!p) {
23f5d142 4530 rcu_read_unlock();
95402b38 4531 put_online_cpus();
6fa3eb70 4532 printk(KERN_DEBUG "SCHED: setaffinity find process %d fail\n", pid);
1da177e4
LT
4533 return -ESRCH;
4534 }
4535
23f5d142 4536 /* Prevent p going away */
1da177e4 4537 get_task_struct(p);
23f5d142 4538 rcu_read_unlock();
1da177e4 4539
14a40ffc
TH
4540 if (p->flags & PF_NO_SETAFFINITY) {
4541 retval = -EINVAL;
6fa3eb70 4542 printk(KERN_DEBUG "SCHED: setaffinity flags PF_NO_SETAFFINITY fail\n");
14a40ffc
TH
4543 goto out_put_task;
4544 }
5a16f3d3
RR
4545 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
4546 retval = -ENOMEM;
6fa3eb70 4547 printk(KERN_DEBUG "SCHED: setaffinity allo_cpumask_var for cpus_allowed fail\n");
5a16f3d3
RR
4548 goto out_put_task;
4549 }
4550 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
4551 retval = -ENOMEM;
6fa3eb70 4552 printk(KERN_DEBUG "SCHED: setaffinity allo_cpumask_var for new_mask fail\n");
5a16f3d3
RR
4553 goto out_free_cpus_allowed;
4554 }
1da177e4 4555 retval = -EPERM;
4c44aaaf
EB
4556 if (!check_same_owner(p)) {
4557 rcu_read_lock();
4558 if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) {
4559 rcu_read_unlock();
6fa3eb70 4560 printk(KERN_DEBUG "SCHED: setaffinity check_same_owner and task_ns_capable fail\n");
4c44aaaf
EB
4561 goto out_unlock;
4562 }
4563 rcu_read_unlock();
4564 }
1da177e4 4565
b0ae1981 4566 retval = security_task_setscheduler(p);
6fa3eb70
S
4567 if (retval){
4568 printk(KERN_DEBUG "SCHED: setaffinity security_task_setscheduler fail, status: %d\n", retval);
e7834f8f 4569 goto out_unlock;
6fa3eb70 4570 }
e7834f8f 4571
5a16f3d3
RR
4572 cpuset_cpus_allowed(p, cpus_allowed);
4573 cpumask_and(new_mask, in_mask, cpus_allowed);
49246274 4574again:
5a16f3d3 4575 retval = set_cpus_allowed_ptr(p, new_mask);
6fa3eb70
S
4576 if (retval)
4577 printk(KERN_DEBUG "SCHED: set_cpus_allowed_ptr status %d\n", retval);
1da177e4 4578
8707d8b8 4579 if (!retval) {
5a16f3d3
RR
4580 cpuset_cpus_allowed(p, cpus_allowed);
4581 if (!cpumask_subset(new_mask, cpus_allowed)) {
8707d8b8
PM
4582 /*
4583 * We must have raced with a concurrent cpuset
4584 * update. Just reset the cpus_allowed to the
4585 * cpuset's cpus_allowed
4586 */
5a16f3d3 4587 cpumask_copy(new_mask, cpus_allowed);
8707d8b8
PM
4588 goto again;
4589 }
4590 }
1da177e4 4591out_unlock:
5a16f3d3
RR
4592 free_cpumask_var(new_mask);
4593out_free_cpus_allowed:
4594 free_cpumask_var(cpus_allowed);
4595out_put_task:
1da177e4 4596 put_task_struct(p);
95402b38 4597 put_online_cpus();
6fa3eb70
S
4598 if (retval)
4599 printk(KERN_DEBUG "SCHED: setaffinity status %d\n", retval);
1da177e4
LT
4600 return retval;
4601}
4602
4603static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
96f874e2 4604 struct cpumask *new_mask)
1da177e4 4605{
96f874e2
RR
4606 if (len < cpumask_size())
4607 cpumask_clear(new_mask);
4608 else if (len > cpumask_size())
4609 len = cpumask_size();
4610
1da177e4
LT
4611 return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
4612}
4613
4614/**
4615 * sys_sched_setaffinity - set the cpu affinity of a process
4616 * @pid: pid of the process
4617 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
4618 * @user_mask_ptr: user-space pointer to the new cpu mask
4619 */
5add95d4
HC
4620SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
4621 unsigned long __user *, user_mask_ptr)
1da177e4 4622{
5a16f3d3 4623 cpumask_var_t new_mask;
1da177e4
LT
4624 int retval;
4625
5a16f3d3
RR
4626 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
4627 return -ENOMEM;
1da177e4 4628
5a16f3d3
RR
4629 retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
4630 if (retval == 0)
4631 retval = sched_setaffinity(pid, new_mask);
4632 free_cpumask_var(new_mask);
4633 return retval;
1da177e4
LT
4634}
4635
96f874e2 4636long sched_getaffinity(pid_t pid, struct cpumask *mask)
1da177e4 4637{
36c8b586 4638 struct task_struct *p;
31605683 4639 unsigned long flags;
1da177e4 4640 int retval;
1da177e4 4641
95402b38 4642 get_online_cpus();
23f5d142 4643 rcu_read_lock();
1da177e4
LT
4644
4645 retval = -ESRCH;
4646 p = find_process_by_pid(pid);
6fa3eb70
S
4647 if (!p){
4648 printk(KERN_DEBUG "SCHED: getaffinity find process %d fail\n", pid);
1da177e4 4649 goto out_unlock;
6fa3eb70 4650 }
1da177e4 4651
e7834f8f 4652 retval = security_task_getscheduler(p);
6fa3eb70
S
4653 if (retval){
4654 printk(KERN_DEBUG "SCHED: getaffinity security_task_getscheduler fail, status: %d\n", retval);
e7834f8f 4655 goto out_unlock;
6fa3eb70 4656 }
e7834f8f 4657
013fdb80 4658 raw_spin_lock_irqsave(&p->pi_lock, flags);
96f874e2 4659 cpumask_and(mask, &p->cpus_allowed, cpu_online_mask);
013fdb80 4660 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
1da177e4
LT
4661
4662out_unlock:
23f5d142 4663 rcu_read_unlock();
95402b38 4664 put_online_cpus();
1da177e4 4665
6fa3eb70
S
4666 if (retval){
4667 printk(KERN_DEBUG "SCHED: getaffinity status %d\n", retval);
4668 }
9531b62f 4669 return retval;
1da177e4
LT
4670}
4671
4672/**
4673 * sys_sched_getaffinity - get the cpu affinity of a process
4674 * @pid: pid of the process
4675 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
4676 * @user_mask_ptr: user-space pointer to hold the current cpu mask
4677 */
5add95d4
HC
4678SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
4679 unsigned long __user *, user_mask_ptr)
1da177e4
LT
4680{
4681 int ret;
f17c8607 4682 cpumask_var_t mask;
1da177e4 4683
84fba5ec 4684 if ((len * BITS_PER_BYTE) < nr_cpu_ids)
cd3d8031
KM
4685 return -EINVAL;
4686 if (len & (sizeof(unsigned long)-1))
1da177e4
LT
4687 return -EINVAL;
4688
f17c8607
RR
4689 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
4690 return -ENOMEM;
1da177e4 4691
f17c8607
RR
4692 ret = sched_getaffinity(pid, mask);
4693 if (ret == 0) {
8bc037fb 4694 size_t retlen = min_t(size_t, len, cpumask_size());
cd3d8031
KM
4695
4696 if (copy_to_user(user_mask_ptr, mask, retlen))
f17c8607
RR
4697 ret = -EFAULT;
4698 else
cd3d8031 4699 ret = retlen;
f17c8607
RR
4700 }
4701 free_cpumask_var(mask);
1da177e4 4702
f17c8607 4703 return ret;
1da177e4
LT
4704}
4705
4706/**
4707 * sys_sched_yield - yield the current processor to other threads.
4708 *
dd41f596
IM
4709 * This function yields the current CPU to other tasks. If there are no
4710 * other threads running on this CPU then this function will return.
1da177e4 4711 */
5add95d4 4712SYSCALL_DEFINE0(sched_yield)
1da177e4 4713{
70b97a7f 4714 struct rq *rq = this_rq_lock();
1da177e4 4715
2d72376b 4716 schedstat_inc(rq, yld_count);
4530d7ab 4717 current->sched_class->yield_task(rq);
1da177e4
LT
4718
4719 /*
4720 * Since we are going to call schedule() anyway, there's
4721 * no need to preempt or enable interrupts:
4722 */
4723 __release(rq->lock);
8a25d5de 4724 spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
9828ea9d 4725 do_raw_spin_unlock(&rq->lock);
ba74c144 4726 sched_preempt_enable_no_resched();
1da177e4
LT
4727
4728 schedule();
4729
4730 return 0;
4731}
4732
d86ee480
PZ
4733static inline int should_resched(void)
4734{
4735 return need_resched() && !(preempt_count() & PREEMPT_ACTIVE);
4736}
4737
e7b38404 4738static void __cond_resched(void)
1da177e4 4739{
e7aaaa69 4740 add_preempt_count(PREEMPT_ACTIVE);
c259e01a 4741 __schedule();
e7aaaa69 4742 sub_preempt_count(PREEMPT_ACTIVE);
1da177e4
LT
4743}
4744
02b67cc3 4745int __sched _cond_resched(void)
1da177e4 4746{
d86ee480 4747 if (should_resched()) {
1da177e4
LT
4748 __cond_resched();
4749 return 1;
4750 }
4751 return 0;
4752}
02b67cc3 4753EXPORT_SYMBOL(_cond_resched);
1da177e4
LT
4754
4755/*
613afbf8 4756 * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
1da177e4
LT
4757 * call schedule, and on return reacquire the lock.
4758 *
41a2d6cf 4759 * This works OK both with and without CONFIG_PREEMPT. We do strange low-level
1da177e4
LT
4760 * operations here to prevent schedule() from being called twice (once via
4761 * spin_unlock(), once by hand).
4762 */
613afbf8 4763int __cond_resched_lock(spinlock_t *lock)
1da177e4 4764{
d86ee480 4765 int resched = should_resched();
6df3cecb
JK
4766 int ret = 0;
4767
f607c668
PZ
4768 lockdep_assert_held(lock);
4769
95c354fe 4770 if (spin_needbreak(lock) || resched) {
1da177e4 4771 spin_unlock(lock);
d86ee480 4772 if (resched)
95c354fe
NP
4773 __cond_resched();
4774 else
4775 cpu_relax();
6df3cecb 4776 ret = 1;
1da177e4 4777 spin_lock(lock);
1da177e4 4778 }
6df3cecb 4779 return ret;
1da177e4 4780}
613afbf8 4781EXPORT_SYMBOL(__cond_resched_lock);
1da177e4 4782
613afbf8 4783int __sched __cond_resched_softirq(void)
1da177e4
LT
4784{
4785 BUG_ON(!in_softirq());
4786
d86ee480 4787 if (should_resched()) {
98d82567 4788 local_bh_enable();
1da177e4
LT
4789 __cond_resched();
4790 local_bh_disable();
4791 return 1;
4792 }
4793 return 0;
4794}
613afbf8 4795EXPORT_SYMBOL(__cond_resched_softirq);
1da177e4 4796
1da177e4
LT
4797/**
4798 * yield - yield the current processor to other threads.
4799 *
8e3fabfd
PZ
4800 * Do not ever use this function, there's a 99% chance you're doing it wrong.
4801 *
4802 * The scheduler is at all times free to pick the calling task as the most
4803 * eligible task to run, if removing the yield() call from your code breaks
4804 * it, its already broken.
4805 *
4806 * Typical broken usage is:
4807 *
4808 * while (!event)
4809 * yield();
4810 *
4811 * where one assumes that yield() will let 'the other' process run that will
4812 * make event true. If the current task is a SCHED_FIFO task that will never
4813 * happen. Never use yield() as a progress guarantee!!
4814 *
4815 * If you want to use yield() to wait for something, use wait_event().
4816 * If you want to use yield() to be 'nice' for others, use cond_resched().
4817 * If you still want to use yield(), do not!
1da177e4
LT
4818 */
4819void __sched yield(void)
4820{
4821 set_current_state(TASK_RUNNING);
4822 sys_sched_yield();
4823}
1da177e4
LT
4824EXPORT_SYMBOL(yield);
4825
d95f4122
MG
4826/**
4827 * yield_to - yield the current processor to another thread in
4828 * your thread group, or accelerate that thread toward the
4829 * processor it's on.
16addf95
RD
4830 * @p: target task
4831 * @preempt: whether task preemption is allowed or not
d95f4122
MG
4832 *
4833 * It's the caller's job to ensure that the target task struct
4834 * can't go away on us before we can do any checks.
4835 *
7b270f60
PZ
4836 * Returns:
4837 * true (>0) if we indeed boosted the target task.
4838 * false (0) if we failed to boost the target.
4839 * -ESRCH if there's no task to yield to.
d95f4122
MG
4840 */
4841bool __sched yield_to(struct task_struct *p, bool preempt)
4842{
4843 struct task_struct *curr = current;
4844 struct rq *rq, *p_rq;
4845 unsigned long flags;
c3c18640 4846 int yielded = 0;
d95f4122
MG
4847
4848 local_irq_save(flags);
4849 rq = this_rq();
4850
4851again:
4852 p_rq = task_rq(p);
7b270f60
PZ
4853 /*
4854 * If we're the only runnable task on the rq and target rq also
4855 * has only one task, there's absolutely no point in yielding.
4856 */
4857 if (rq->nr_running == 1 && p_rq->nr_running == 1) {
4858 yielded = -ESRCH;
4859 goto out_irq;
4860 }
4861
d95f4122
MG
4862 double_rq_lock(rq, p_rq);
4863 while (task_rq(p) != p_rq) {
4864 double_rq_unlock(rq, p_rq);
4865 goto again;
4866 }
4867
4868 if (!curr->sched_class->yield_to_task)
7b270f60 4869 goto out_unlock;
d95f4122
MG
4870
4871 if (curr->sched_class != p->sched_class)
7b270f60 4872 goto out_unlock;
d95f4122
MG
4873
4874 if (task_running(p_rq, p) || p->state)
7b270f60 4875 goto out_unlock;
d95f4122
MG
4876
4877 yielded = curr->sched_class->yield_to_task(rq, p, preempt);
6d1cafd8 4878 if (yielded) {
d95f4122 4879 schedstat_inc(rq, yld_count);
6d1cafd8
VP
4880 /*
4881 * Make p's CPU reschedule; pick_next_entity takes care of
4882 * fairness.
4883 */
4884 if (preempt && rq != p_rq)
4885 resched_task(p_rq->curr);
4886 }
d95f4122 4887
7b270f60 4888out_unlock:
d95f4122 4889 double_rq_unlock(rq, p_rq);
7b270f60 4890out_irq:
d95f4122
MG
4891 local_irq_restore(flags);
4892
7b270f60 4893 if (yielded > 0)
d95f4122
MG
4894 schedule();
4895
4896 return yielded;
4897}
4898EXPORT_SYMBOL_GPL(yield_to);
4899
1da177e4 4900/*
41a2d6cf 4901 * This task is about to go to sleep on IO. Increment rq->nr_iowait so
1da177e4 4902 * that process accounting knows that this is a task in IO wait state.
1da177e4
LT
4903 */
4904void __sched io_schedule(void)
4905{
54d35f29 4906 struct rq *rq = raw_rq();
1da177e4 4907
0ff92245 4908 delayacct_blkio_start();
1da177e4 4909 atomic_inc(&rq->nr_iowait);
73c10101 4910 blk_flush_plug(current);
8f0dfc34 4911 current->in_iowait = 1;
1da177e4 4912 schedule();
8f0dfc34 4913 current->in_iowait = 0;
1da177e4 4914 atomic_dec(&rq->nr_iowait);
0ff92245 4915 delayacct_blkio_end();
1da177e4 4916}
1da177e4
LT
4917EXPORT_SYMBOL(io_schedule);
4918
4919long __sched io_schedule_timeout(long timeout)
4920{
54d35f29 4921 struct rq *rq = raw_rq();
1da177e4
LT
4922 long ret;
4923
0ff92245 4924 delayacct_blkio_start();
1da177e4 4925 atomic_inc(&rq->nr_iowait);
73c10101 4926 blk_flush_plug(current);
8f0dfc34 4927 current->in_iowait = 1;
1da177e4 4928 ret = schedule_timeout(timeout);
8f0dfc34 4929 current->in_iowait = 0;
1da177e4 4930 atomic_dec(&rq->nr_iowait);
0ff92245 4931 delayacct_blkio_end();
1da177e4
LT
4932 return ret;
4933}
4934
4935/**
4936 * sys_sched_get_priority_max - return maximum RT priority.
4937 * @policy: scheduling class.
4938 *
4939 * this syscall returns the maximum rt_priority that can be used
4940 * by a given scheduling class.
4941 */
5add95d4 4942SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
1da177e4
LT
4943{
4944 int ret = -EINVAL;
4945
4946 switch (policy) {
4947 case SCHED_FIFO:
4948 case SCHED_RR:
4949 ret = MAX_USER_RT_PRIO-1;
4950 break;
4951 case SCHED_NORMAL:
b0a9499c 4952 case SCHED_BATCH:
dd41f596 4953 case SCHED_IDLE:
1da177e4
LT
4954 ret = 0;
4955 break;
4956 }
4957 return ret;
4958}
4959
4960/**
4961 * sys_sched_get_priority_min - return minimum RT priority.
4962 * @policy: scheduling class.
4963 *
4964 * this syscall returns the minimum rt_priority that can be used
4965 * by a given scheduling class.
4966 */
5add95d4 4967SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
1da177e4
LT
4968{
4969 int ret = -EINVAL;
4970
4971 switch (policy) {
4972 case SCHED_FIFO:
4973 case SCHED_RR:
4974 ret = 1;
4975 break;
4976 case SCHED_NORMAL:
b0a9499c 4977 case SCHED_BATCH:
dd41f596 4978 case SCHED_IDLE:
1da177e4
LT
4979 ret = 0;
4980 }
4981 return ret;
4982}
4983
4984/**
4985 * sys_sched_rr_get_interval - return the default timeslice of a process.
4986 * @pid: pid of the process.
4987 * @interval: userspace pointer to the timeslice value.
4988 *
4989 * this syscall writes the default timeslice value of a given process
4990 * into the user-space timespec buffer. A value of '0' means infinity.
4991 */
17da2bd9 4992SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
754fe8d2 4993 struct timespec __user *, interval)
1da177e4 4994{
36c8b586 4995 struct task_struct *p;
a4ec24b4 4996 unsigned int time_slice;
dba091b9
TG
4997 unsigned long flags;
4998 struct rq *rq;
3a5c359a 4999 int retval;
1da177e4 5000 struct timespec t;
1da177e4
LT
5001
5002 if (pid < 0)
3a5c359a 5003 return -EINVAL;
1da177e4
LT
5004
5005 retval = -ESRCH;
1a551ae7 5006 rcu_read_lock();
1da177e4
LT
5007 p = find_process_by_pid(pid);
5008 if (!p)
5009 goto out_unlock;
5010
5011 retval = security_task_getscheduler(p);
5012 if (retval)
5013 goto out_unlock;
5014
dba091b9
TG
5015 rq = task_rq_lock(p, &flags);
5016 time_slice = p->sched_class->get_rr_interval(rq, p);
0122ec5b 5017 task_rq_unlock(rq, p, &flags);
a4ec24b4 5018
1a551ae7 5019 rcu_read_unlock();
a4ec24b4 5020 jiffies_to_timespec(time_slice, &t);
1da177e4 5021 retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
1da177e4 5022 return retval;
3a5c359a 5023
1da177e4 5024out_unlock:
1a551ae7 5025 rcu_read_unlock();
1da177e4
LT
5026 return retval;
5027}
5028
7c731e0a 5029static const char stat_nam[] = TASK_STATE_TO_CHAR_STR;
6fa3eb70
S
5030#ifdef CONFIG_MT_DEBUG_MUTEXES
5031void mt_mutex_state(struct task_struct *p)
5032{
5033 struct task_struct *locker;
5034 if(p->blocked_on){
5035 locker = p->blocked_on->task_wait_on;
5036 if(find_task_by_vpid(locker->pid) != NULL){
5037 printk("Hint: wait on mutex, holder is [%d:%s:%ld]\n", locker->pid, locker->comm, locker->state);
5038 if(locker->state != TASK_RUNNING){
5039 printk("Mutex holder process[%d:%s] is not running now:\n", locker->pid, locker->comm);
5040 show_stack(locker, NULL);
5041 printk("----\n");
5042 }
5043 }else{
5044 printk("Hint: wait on mutex, but holder already released lock\n");
5045 }
5046 }
5047}
5048#endif
82a1fcb9 5049void sched_show_task(struct task_struct *p)
1da177e4 5050{
1da177e4 5051 unsigned long free = 0;
4e79752c 5052 int ppid;
36c8b586 5053 unsigned state;
1da177e4 5054
1da177e4 5055 state = p->state ? __ffs(p->state) + 1 : 0;
28d0686c 5056 printk(KERN_INFO "%-15.15s %c", p->comm,
2ed6e34f 5057 state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
4bd77321 5058#if BITS_PER_LONG == 32
1da177e4 5059 if (state == TASK_RUNNING)
3df0fc5b 5060 printk(KERN_CONT " running ");
1da177e4 5061 else
3df0fc5b 5062 printk(KERN_CONT " %08lx ", thread_saved_pc(p));
1da177e4
LT
5063#else
5064 if (state == TASK_RUNNING)
3df0fc5b 5065 printk(KERN_CONT " running task ");
1da177e4 5066 else
3df0fc5b 5067 printk(KERN_CONT " %016lx ", thread_saved_pc(p));
1da177e4
LT
5068#endif
5069#ifdef CONFIG_DEBUG_STACK_USAGE
7c9f8861 5070 free = stack_not_used(p);
1da177e4 5071#endif
4e79752c
PM
5072 rcu_read_lock();
5073 ppid = task_pid_nr(rcu_dereference(p->real_parent));
5074 rcu_read_unlock();
3df0fc5b 5075 printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free,
4e79752c 5076 task_pid_nr(p), ppid,
aa47b7e0 5077 (unsigned long)task_thread_info(p)->flags);
1da177e4 5078
3d1cb205 5079 print_worker_info(KERN_INFO, p);
5fb5e6de 5080 show_stack(p, NULL);
6fa3eb70
S
5081#ifdef CONFIG_MT_DEBUG_MUTEXES
5082 mt_mutex_state(p);
5083#endif
1da177e4
LT
5084}
5085
e59e2ae2 5086void show_state_filter(unsigned long state_filter)
1da177e4 5087{
36c8b586 5088 struct task_struct *g, *p;
1da177e4 5089
4bd77321 5090#if BITS_PER_LONG == 32
3df0fc5b
PZ
5091 printk(KERN_INFO
5092 " task PC stack pid father\n");
1da177e4 5093#else
3df0fc5b
PZ
5094 printk(KERN_INFO
5095 " task PC stack pid father\n");
1da177e4 5096#endif
510f5acc 5097 rcu_read_lock();
1da177e4
LT
5098 do_each_thread(g, p) {
5099 /*
5100 * reset the NMI-timeout, listing all files on a slow
25985edc 5101 * console might take a lot of time:
1da177e4
LT
5102 */
5103 touch_nmi_watchdog();
39bc89fd 5104 if (!state_filter || (p->state & state_filter))
82a1fcb9 5105 sched_show_task(p);
1da177e4
LT
5106 } while_each_thread(g, p);
5107
04c9167f
JF
5108 touch_all_softlockup_watchdogs();
5109
dd41f596
IM
5110#ifdef CONFIG_SCHED_DEBUG
5111 sysrq_sched_debug_show();
5112#endif
510f5acc 5113 rcu_read_unlock();
e59e2ae2
IM
5114 /*
5115 * Only show locks if all tasks are dumped:
5116 */
93335a21 5117 if (!state_filter)
e59e2ae2 5118 debug_show_all_locks();
1da177e4
LT
5119}
5120
1df21055
IM
5121void __cpuinit init_idle_bootup_task(struct task_struct *idle)
5122{
dd41f596 5123 idle->sched_class = &idle_sched_class;
1df21055
IM
5124}
5125
f340c0d1
IM
5126/**
5127 * init_idle - set up an idle thread for a given CPU
5128 * @idle: task in question
5129 * @cpu: cpu the idle task belongs to
5130 *
5131 * NOTE: this function does not set the idle thread's NEED_RESCHED
5132 * flag, to make booting more robust.
5133 */
5c1e1767 5134void __cpuinit init_idle(struct task_struct *idle, int cpu)
1da177e4 5135{
70b97a7f 5136 struct rq *rq = cpu_rq(cpu);
1da177e4
LT
5137 unsigned long flags;
5138
05fa785c 5139 raw_spin_lock_irqsave(&rq->lock, flags);
5cbd54ef 5140
dd41f596 5141 __sched_fork(idle);
06b83b5f 5142 idle->state = TASK_RUNNING;
dd41f596
IM
5143 idle->se.exec_start = sched_clock();
5144
1e1b6c51 5145 do_set_cpus_allowed(idle, cpumask_of(cpu));
6506cf6c
PZ
5146 /*
5147 * We're having a chicken and egg problem, even though we are
5148 * holding rq->lock, the cpu isn't yet set to this cpu so the
5149 * lockdep check in task_group() will fail.
5150 *
5151 * Similar case to sched_fork(). / Alternatively we could
5152 * use task_rq_lock() here and obtain the other rq->lock.
5153 *
5154 * Silence PROVE_RCU
5155 */
5156 rcu_read_lock();
dd41f596 5157 __set_task_cpu(idle, cpu);
6506cf6c 5158 rcu_read_unlock();
1da177e4 5159
1da177e4 5160 rq->curr = rq->idle = idle;
3ca7a440
PZ
5161#if defined(CONFIG_SMP)
5162 idle->on_cpu = 1;
4866cde0 5163#endif
05fa785c 5164 raw_spin_unlock_irqrestore(&rq->lock, flags);
1da177e4
LT
5165
5166 /* Set the preempt count _outside_ the spinlocks! */
a1261f54 5167 task_thread_info(idle)->preempt_count = 0;
55cd5340 5168
dd41f596
IM
5169 /*
5170 * The idle tasks have their own, simple scheduling class:
5171 */
5172 idle->sched_class = &idle_sched_class;
868baf07 5173 ftrace_graph_init_idle_task(idle, cpu);
45eacc69 5174 vtime_init_idle(idle, cpu);
f1c6f1a7
CE
5175#if defined(CONFIG_SMP)
5176 sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
5177#endif
19978ca6
IM
5178}
5179
1da177e4 5180#ifdef CONFIG_SMP
1e1b6c51
KM
5181void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
5182{
5183 if (p->sched_class && p->sched_class->set_cpus_allowed)
5184 p->sched_class->set_cpus_allowed(p, new_mask);
4939602a
PZ
5185
5186 cpumask_copy(&p->cpus_allowed, new_mask);
29baa747 5187 p->nr_cpus_allowed = cpumask_weight(new_mask);
1e1b6c51
KM
5188}
5189
1da177e4
LT
5190/*
5191 * This is how migration works:
5192 *
969c7921
TH
5193 * 1) we invoke migration_cpu_stop() on the target CPU using
5194 * stop_one_cpu().
5195 * 2) stopper starts to run (implicitly forcing the migrated thread
5196 * off the CPU)
5197 * 3) it checks whether the migrated task is still in the wrong runqueue.
5198 * 4) if it's in the wrong runqueue then the migration thread removes
1da177e4 5199 * it and puts it into the right queue.
969c7921
TH
5200 * 5) stopper completes and stop_one_cpu() returns and the migration
5201 * is done.
1da177e4
LT
5202 */
5203
5204/*
5205 * Change a given task's CPU affinity. Migrate the thread to a
5206 * proper CPU and schedule it away if the CPU it's executing on
5207 * is removed from the allowed bitmask.
5208 *
5209 * NOTE: the caller must have a valid reference to the task, the
41a2d6cf 5210 * task must not exit() & deallocate itself prematurely. The
1da177e4
LT
5211 * call is not atomic; no spinlocks may be held.
5212 */
96f874e2 5213int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
1da177e4
LT
5214{
5215 unsigned long flags;
70b97a7f 5216 struct rq *rq;
969c7921 5217 unsigned int dest_cpu;
48f24c4d 5218 int ret = 0;
1da177e4
LT
5219
5220 rq = task_rq_lock(p, &flags);
e2912009 5221
db44fc01
YZ
5222 if (cpumask_equal(&p->cpus_allowed, new_mask))
5223 goto out;
5224
6ad4c188 5225 if (!cpumask_intersects(new_mask, cpu_active_mask)) {
1da177e4 5226 ret = -EINVAL;
6fa3eb70 5227 printk(KERN_DEBUG "SCHED: intersects new_mask: %lu, cpu_active_mask: %lu\n", new_mask->bits[0], cpu_active_mask->bits[0]);
1da177e4
LT
5228 goto out;
5229 }
5230
1e1b6c51 5231 do_set_cpus_allowed(p, new_mask);
73fe6aae 5232
1da177e4 5233 /* Can the task run on the task's current CPU? If so, we're done */
96f874e2 5234 if (cpumask_test_cpu(task_cpu(p), new_mask))
1da177e4
LT
5235 goto out;
5236
969c7921 5237 dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
bd8e7dde 5238 if (p->on_rq) {
969c7921 5239 struct migration_arg arg = { p, dest_cpu };
1da177e4 5240 /* Need help from migration thread: drop lock and wait. */
0122ec5b 5241 task_rq_unlock(rq, p, &flags);
969c7921 5242 stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
1da177e4
LT
5243 tlb_migrate_finish(p->mm);
5244 return 0;
5245 }
5246out:
0122ec5b 5247 task_rq_unlock(rq, p, &flags);
48f24c4d 5248
1da177e4
LT
5249 return ret;
5250}
cd8ba7cd 5251EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
1da177e4
LT
5252
5253/*
41a2d6cf 5254 * Move (not current) task off this cpu, onto dest cpu. We're doing
1da177e4
LT
5255 * this because either it can't run here any more (set_cpus_allowed()
5256 * away from this CPU, or CPU going down), or because we're
5257 * attempting to rebalance this task on exec (sched_exec).
5258 *
5259 * So we race with normal scheduler movements, but that's OK, as long
5260 * as the task is no longer on this CPU.
efc30814
KK
5261 *
5262 * Returns non-zero if task was successfully migrated.
1da177e4 5263 */
efc30814 5264static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
1da177e4 5265{
70b97a7f 5266 struct rq *rq_dest, *rq_src;
e2912009 5267 int ret = 0;
1da177e4 5268
e761b772 5269 if (unlikely(!cpu_active(dest_cpu)))
efc30814 5270 return ret;
1da177e4
LT
5271
5272 rq_src = cpu_rq(src_cpu);
5273 rq_dest = cpu_rq(dest_cpu);
5274
0122ec5b 5275 raw_spin_lock(&p->pi_lock);
1da177e4
LT
5276 double_rq_lock(rq_src, rq_dest);
5277 /* Already moved. */
5278 if (task_cpu(p) != src_cpu)
b1e38734 5279 goto done;
1da177e4 5280 /* Affinity changed (again). */
fa17b507 5281 if (!cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
b1e38734 5282 goto fail;
1da177e4 5283
e2912009
PZ
5284 /*
5285 * If we're not on a rq, the next wake-up will ensure we're
5286 * placed properly.
5287 */
fd2f4419 5288 if (p->on_rq) {
4ca9b72b 5289 dequeue_task(rq_src, p, 0);
e2912009 5290 set_task_cpu(p, dest_cpu);
4ca9b72b 5291 enqueue_task(rq_dest, p, 0);
15afe09b 5292 check_preempt_curr(rq_dest, p, 0);
1da177e4 5293 }
b1e38734 5294done:
efc30814 5295 ret = 1;
b1e38734 5296fail:
1da177e4 5297 double_rq_unlock(rq_src, rq_dest);
0122ec5b 5298 raw_spin_unlock(&p->pi_lock);
efc30814 5299 return ret;
1da177e4
LT
5300}
5301
5302/*
969c7921
TH
5303 * migration_cpu_stop - this will be executed by a highprio stopper thread
5304 * and performs thread migration by bumping thread off CPU then
5305 * 'pushing' onto another runqueue.
1da177e4 5306 */
969c7921 5307static int migration_cpu_stop(void *data)
1da177e4 5308{
969c7921 5309 struct migration_arg *arg = data;
f7b4cddc 5310
969c7921
TH
5311 /*
5312 * The original target cpu might have gone down and we might
5313 * be on another cpu but it doesn't matter.
5314 */
f7b4cddc 5315 local_irq_disable();
969c7921 5316 __migrate_task(arg->task, raw_smp_processor_id(), arg->dest_cpu);
f7b4cddc 5317 local_irq_enable();
1da177e4 5318 return 0;
f7b4cddc
ON
5319}
5320
1da177e4 5321#ifdef CONFIG_HOTPLUG_CPU
48c5ccae 5322
054b9108 5323/*
48c5ccae
PZ
5324 * Ensures that the idle task is using init_mm right before its cpu goes
5325 * offline.
054b9108 5326 */
48c5ccae 5327void idle_task_exit(void)
1da177e4 5328{
48c5ccae 5329 struct mm_struct *mm = current->active_mm;
e76bd8d9 5330
48c5ccae 5331 BUG_ON(cpu_online(smp_processor_id()));
e76bd8d9 5332
48c5ccae
PZ
5333 if (mm != &init_mm)
5334 switch_mm(mm, &init_mm, current);
5335 mmdrop(mm);
1da177e4
LT
5336}
5337
5338/*
5d180232
PZ
5339 * Since this CPU is going 'away' for a while, fold any nr_active delta
5340 * we might have. Assumes we're called after migrate_tasks() so that the
5341 * nr_active count is stable.
5342 *
5343 * Also see the comment "Global load-average calculations".
1da177e4 5344 */
5d180232 5345static void calc_load_migrate(struct rq *rq)
1da177e4 5346{
5d180232
PZ
5347 long delta = calc_load_fold_active(rq);
5348 if (delta)
5349 atomic_long_add(delta, &calc_load_tasks);
1da177e4
LT
5350}
5351
48f24c4d 5352/*
48c5ccae
PZ
5353 * Migrate all tasks from the rq, sleeping tasks will be migrated by
5354 * try_to_wake_up()->select_task_rq().
5355 *
5356 * Called with rq->lock held even though we'er in stop_machine() and
5357 * there's no concurrency possible, we hold the required locks anyway
5358 * because of lock validation efforts.
1da177e4 5359 */
48c5ccae 5360static void migrate_tasks(unsigned int dead_cpu)
1da177e4 5361{
70b97a7f 5362 struct rq *rq = cpu_rq(dead_cpu);
48c5ccae
PZ
5363 struct task_struct *next, *stop = rq->stop;
5364 int dest_cpu;
1da177e4
LT
5365
5366 /*
48c5ccae
PZ
5367 * Fudge the rq selection such that the below task selection loop
5368 * doesn't get stuck on the currently eligible stop task.
5369 *
5370 * We're currently inside stop_machine() and the rq is either stuck
5371 * in the stop_machine_cpu_stop() loop, or we're executing this code,
5372 * either way we should never end up calling schedule() until we're
5373 * done here.
1da177e4 5374 */
48c5ccae 5375 rq->stop = NULL;
6fa3eb70
S
5376 /* MTK patch: prevent could not migrate RT task when RT throttle*/
5377 unthrottle_offline_rt_rqs(rq);
48f24c4d 5378
dd41f596 5379 for ( ; ; ) {
48c5ccae
PZ
5380 /*
5381 * There's this thread running, bail when that's the only
5382 * remaining thread.
5383 */
5384 if (rq->nr_running == 1)
dd41f596 5385 break;
48c5ccae 5386
b67802ea 5387 next = pick_next_task(rq);
48c5ccae 5388 BUG_ON(!next);
79c53799 5389 next->sched_class->put_prev_task(rq, next);
e692ab53 5390
48c5ccae
PZ
5391 /* Find suitable destination for @next, with force if needed. */
5392 dest_cpu = select_fallback_rq(dead_cpu, next);
5393 raw_spin_unlock(&rq->lock);
5394
5395 __migrate_task(next, dead_cpu, dest_cpu);
5396
5397 raw_spin_lock(&rq->lock);
1da177e4 5398 }
dce48a84 5399
48c5ccae 5400 rq->stop = stop;
dce48a84 5401}
48c5ccae 5402
1da177e4
LT
5403#endif /* CONFIG_HOTPLUG_CPU */
5404
e692ab53
NP
5405#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
5406
5407static struct ctl_table sd_ctl_dir[] = {
e0361851
AD
5408 {
5409 .procname = "sched_domain",
c57baf1e 5410 .mode = 0555,
e0361851 5411 },
56992309 5412 {}
e692ab53
NP
5413};
5414
5415static struct ctl_table sd_ctl_root[] = {
e0361851
AD
5416 {
5417 .procname = "kernel",
c57baf1e 5418 .mode = 0555,
e0361851
AD
5419 .child = sd_ctl_dir,
5420 },
56992309 5421 {}
e692ab53
NP
5422};
5423
5424static struct ctl_table *sd_alloc_ctl_entry(int n)
5425{
5426 struct ctl_table *entry =
5cf9f062 5427 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
e692ab53 5428
e692ab53
NP
5429 return entry;
5430}
5431
6382bc90
MM
5432static void sd_free_ctl_entry(struct ctl_table **tablep)
5433{
cd790076 5434 struct ctl_table *entry;
6382bc90 5435
cd790076
MM
5436 /*
5437 * In the intermediate directories, both the child directory and
5438 * procname are dynamically allocated and could fail but the mode
41a2d6cf 5439 * will always be set. In the lowest directory the names are
cd790076
MM
5440 * static strings and all have proc handlers.
5441 */
5442 for (entry = *tablep; entry->mode; entry++) {
6382bc90
MM
5443 if (entry->child)
5444 sd_free_ctl_entry(&entry->child);
cd790076
MM
5445 if (entry->proc_handler == NULL)
5446 kfree(entry->procname);
5447 }
6382bc90
MM
5448
5449 kfree(*tablep);
5450 *tablep = NULL;
5451}
5452
201c373e 5453static int min_load_idx = 0;
fd9b86d3 5454static int max_load_idx = CPU_LOAD_IDX_MAX-1;
201c373e 5455
e692ab53 5456static void
e0361851 5457set_table_entry(struct ctl_table *entry,
e692ab53 5458 const char *procname, void *data, int maxlen,
201c373e
NK
5459 umode_t mode, proc_handler *proc_handler,
5460 bool load_idx)
e692ab53 5461{
e692ab53
NP
5462 entry->procname = procname;
5463 entry->data = data;
5464 entry->maxlen = maxlen;
5465 entry->mode = mode;
5466 entry->proc_handler = proc_handler;
201c373e
NK
5467
5468 if (load_idx) {
5469 entry->extra1 = &min_load_idx;
5470 entry->extra2 = &max_load_idx;
5471 }
e692ab53
NP
5472}
5473
5474static struct ctl_table *
5475sd_alloc_ctl_domain_table(struct sched_domain *sd)
5476{
a5d8c348 5477 struct ctl_table *table = sd_alloc_ctl_entry(13);
e692ab53 5478
ad1cdc1d
MM
5479 if (table == NULL)
5480 return NULL;
5481
e0361851 5482 set_table_entry(&table[0], "min_interval", &sd->min_interval,
201c373e 5483 sizeof(long), 0644, proc_doulongvec_minmax, false);
e0361851 5484 set_table_entry(&table[1], "max_interval", &sd->max_interval,
201c373e 5485 sizeof(long), 0644, proc_doulongvec_minmax, false);
e0361851 5486 set_table_entry(&table[2], "busy_idx", &sd->busy_idx,
201c373e 5487 sizeof(int), 0644, proc_dointvec_minmax, true);
e0361851 5488 set_table_entry(&table[3], "idle_idx", &sd->idle_idx,
201c373e 5489 sizeof(int), 0644, proc_dointvec_minmax, true);
e0361851 5490 set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx,
201c373e 5491 sizeof(int), 0644, proc_dointvec_minmax, true);
e0361851 5492 set_table_entry(&table[5], "wake_idx", &sd->wake_idx,
201c373e 5493 sizeof(int), 0644, proc_dointvec_minmax, true);
e0361851 5494 set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx,
201c373e 5495 sizeof(int), 0644, proc_dointvec_minmax, true);
e0361851 5496 set_table_entry(&table[7], "busy_factor", &sd->busy_factor,
201c373e 5497 sizeof(int), 0644, proc_dointvec_minmax, false);
e0361851 5498 set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct,
201c373e 5499 sizeof(int), 0644, proc_dointvec_minmax, false);
ace8b3d6 5500 set_table_entry(&table[9], "cache_nice_tries",
e692ab53 5501 &sd->cache_nice_tries,
201c373e 5502 sizeof(int), 0644, proc_dointvec_minmax, false);
ace8b3d6 5503 set_table_entry(&table[10], "flags", &sd->flags,
201c373e 5504 sizeof(int), 0644, proc_dointvec_minmax, false);
a5d8c348 5505 set_table_entry(&table[11], "name", sd->name,
201c373e 5506 CORENAME_MAX_SIZE, 0444, proc_dostring, false);
a5d8c348 5507 /* &table[12] is terminator */
e692ab53
NP
5508
5509 return table;
5510}
5511
9a4e7159 5512static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
e692ab53
NP
5513{
5514 struct ctl_table *entry, *table;
5515 struct sched_domain *sd;
5516 int domain_num = 0, i;
5517 char buf[32];
5518
5519 for_each_domain(cpu, sd)
5520 domain_num++;
5521 entry = table = sd_alloc_ctl_entry(domain_num + 1);
ad1cdc1d
MM
5522 if (table == NULL)
5523 return NULL;
e692ab53
NP
5524
5525 i = 0;
5526 for_each_domain(cpu, sd) {
5527 snprintf(buf, 32, "domain%d", i);
e692ab53 5528 entry->procname = kstrdup(buf, GFP_KERNEL);
c57baf1e 5529 entry->mode = 0555;
e692ab53
NP
5530 entry->child = sd_alloc_ctl_domain_table(sd);
5531 entry++;
5532 i++;
5533 }
5534 return table;
5535}
5536
5537static struct ctl_table_header *sd_sysctl_header;
6382bc90 5538static void register_sched_domain_sysctl(void)
e692ab53 5539{
6ad4c188 5540 int i, cpu_num = num_possible_cpus();
e692ab53
NP
5541 struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
5542 char buf[32];
5543
7378547f
MM
5544 WARN_ON(sd_ctl_dir[0].child);
5545 sd_ctl_dir[0].child = entry;
5546
ad1cdc1d
MM
5547 if (entry == NULL)
5548 return;
5549
6ad4c188 5550 for_each_possible_cpu(i) {
e692ab53 5551 snprintf(buf, 32, "cpu%d", i);
e692ab53 5552 entry->procname = kstrdup(buf, GFP_KERNEL);
c57baf1e 5553 entry->mode = 0555;
e692ab53 5554 entry->child = sd_alloc_ctl_cpu_table(i);
97b6ea7b 5555 entry++;
e692ab53 5556 }
7378547f
MM
5557
5558 WARN_ON(sd_sysctl_header);
e692ab53
NP
5559 sd_sysctl_header = register_sysctl_table(sd_ctl_root);
5560}
6382bc90 5561
7378547f 5562/* may be called multiple times per register */
6382bc90
MM
5563static void unregister_sched_domain_sysctl(void)
5564{
7378547f
MM
5565 if (sd_sysctl_header)
5566 unregister_sysctl_table(sd_sysctl_header);
6382bc90 5567 sd_sysctl_header = NULL;
7378547f
MM
5568 if (sd_ctl_dir[0].child)
5569 sd_free_ctl_entry(&sd_ctl_dir[0].child);
6382bc90 5570}
e692ab53 5571#else
6382bc90
MM
5572static void register_sched_domain_sysctl(void)
5573{
5574}
5575static void unregister_sched_domain_sysctl(void)
e692ab53
NP
5576{
5577}
5578#endif
5579
1f11eb6a
GH
5580static void set_rq_online(struct rq *rq)
5581{
5582 if (!rq->online) {
5583 const struct sched_class *class;
5584
c6c4927b 5585 cpumask_set_cpu(rq->cpu, rq->rd->online);
1f11eb6a
GH
5586 rq->online = 1;
5587
5588 for_each_class(class) {
5589 if (class->rq_online)
5590 class->rq_online(rq);
5591 }
5592 }
5593}
5594
5595static void set_rq_offline(struct rq *rq)
5596{
5597 if (rq->online) {
5598 const struct sched_class *class;
5599
5600 for_each_class(class) {
5601 if (class->rq_offline)
5602 class->rq_offline(rq);
5603 }
5604
c6c4927b 5605 cpumask_clear_cpu(rq->cpu, rq->rd->online);
1f11eb6a
GH
5606 rq->online = 0;
5607 }
5608}
5609
1da177e4
LT
5610/*
5611 * migration_call - callback that gets triggered when a CPU is added.
5612 * Here we can start up the necessary migration thread for the new CPU.
5613 */
48f24c4d
IM
5614static int __cpuinit
5615migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
1da177e4 5616{
48f24c4d 5617 int cpu = (long)hcpu;
1da177e4 5618 unsigned long flags;
969c7921 5619 struct rq *rq = cpu_rq(cpu);
1da177e4 5620
48c5ccae 5621 switch (action & ~CPU_TASKS_FROZEN) {
5be9361c 5622
1da177e4 5623 case CPU_UP_PREPARE:
a468d389 5624 rq->calc_load_update = calc_load_update;
1da177e4 5625 break;
48f24c4d 5626
1da177e4 5627 case CPU_ONLINE:
1f94ef59 5628 /* Update our root-domain */
05fa785c 5629 raw_spin_lock_irqsave(&rq->lock, flags);
1f94ef59 5630 if (rq->rd) {
c6c4927b 5631 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
1f11eb6a 5632 set_rq_online(rq);
1f94ef59 5633 }
05fa785c 5634 raw_spin_unlock_irqrestore(&rq->lock, flags);
1da177e4 5635 break;
48f24c4d 5636
1da177e4 5637#ifdef CONFIG_HOTPLUG_CPU
08f503b0 5638 case CPU_DYING:
317f3941 5639 sched_ttwu_pending();
57d885fe 5640 /* Update our root-domain */
05fa785c 5641 raw_spin_lock_irqsave(&rq->lock, flags);
57d885fe 5642 if (rq->rd) {
c6c4927b 5643 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
1f11eb6a 5644 set_rq_offline(rq);
57d885fe 5645 }
48c5ccae
PZ
5646 migrate_tasks(cpu);
5647 BUG_ON(rq->nr_running != 1); /* the migration thread */
05fa785c 5648 raw_spin_unlock_irqrestore(&rq->lock, flags);
5d180232 5649 break;
48c5ccae 5650
5d180232 5651 case CPU_DEAD:
f319da0c 5652 calc_load_migrate(rq);
57d885fe 5653 break;
1da177e4
LT
5654#endif
5655 }
49c022e6
PZ
5656
5657 update_max_interval();
5658
1da177e4
LT
5659 return NOTIFY_OK;
5660}
5661
f38b0820
PM
5662/*
5663 * Register at high priority so that task migration (migrate_all_tasks)
5664 * happens before everything else. This has to be lower priority than
cdd6c482 5665 * the notifier in the perf_event subsystem, though.
1da177e4 5666 */
26c2143b 5667static struct notifier_block __cpuinitdata migration_notifier = {
1da177e4 5668 .notifier_call = migration_call,
50a323b7 5669 .priority = CPU_PRI_MIGRATION,
1da177e4
LT
5670};
5671
3a101d05
TH
5672static int __cpuinit sched_cpu_active(struct notifier_block *nfb,
5673 unsigned long action, void *hcpu)
5674{
5675 switch (action & ~CPU_TASKS_FROZEN) {
3a101d05
TH
5676 case CPU_DOWN_FAILED:
5677 set_cpu_active((long)hcpu, true);
5678 return NOTIFY_OK;
5679 default:
5680 return NOTIFY_DONE;
5681 }
5682}
5683
5684static int __cpuinit sched_cpu_inactive(struct notifier_block *nfb,
5685 unsigned long action, void *hcpu)
5686{
5687 switch (action & ~CPU_TASKS_FROZEN) {
5688 case CPU_DOWN_PREPARE:
5689 set_cpu_active((long)hcpu, false);
5690 return NOTIFY_OK;
5691 default:
5692 return NOTIFY_DONE;
5693 }
5694}
5695
7babe8db 5696static int __init migration_init(void)
1da177e4
LT
5697{
5698 void *cpu = (void *)(long)smp_processor_id();
07dccf33 5699 int err;
48f24c4d 5700
3a101d05 5701 /* Initialize migration for the boot CPU */
07dccf33
AM
5702 err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu);
5703 BUG_ON(err == NOTIFY_BAD);
1da177e4
LT
5704 migration_call(&migration_notifier, CPU_ONLINE, cpu);
5705 register_cpu_notifier(&migration_notifier);
7babe8db 5706
3a101d05
TH
5707 /* Register cpu active notifiers */
5708 cpu_notifier(sched_cpu_active, CPU_PRI_SCHED_ACTIVE);
5709 cpu_notifier(sched_cpu_inactive, CPU_PRI_SCHED_INACTIVE);
5710
a004cd42 5711 return 0;
1da177e4 5712}
7babe8db 5713early_initcall(migration_init);
1da177e4
LT
5714#endif
5715
5716#ifdef CONFIG_SMP
476f3534 5717
4cb98839
PZ
5718static cpumask_var_t sched_domains_tmpmask; /* sched_domains_mutex */
5719
3e9830dc 5720#ifdef CONFIG_SCHED_DEBUG
4dcf6aff 5721
d039ac60 5722static __read_mostly int sched_debug_enabled;
f6630114 5723
d039ac60 5724static int __init sched_debug_setup(char *str)
f6630114 5725{
d039ac60 5726 sched_debug_enabled = 1;
f6630114
MT
5727
5728 return 0;
5729}
d039ac60
PZ
5730early_param("sched_debug", sched_debug_setup);
5731
5732static inline bool sched_debug(void)
5733{
5734 return sched_debug_enabled;
5735}
f6630114 5736
7c16ec58 5737static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
96f874e2 5738 struct cpumask *groupmask)
1da177e4 5739{
4dcf6aff 5740 struct sched_group *group = sd->groups;
434d53b0 5741 char str[256];
1da177e4 5742
968ea6d8 5743 cpulist_scnprintf(str, sizeof(str), sched_domain_span(sd));
96f874e2 5744 cpumask_clear(groupmask);
4dcf6aff
IM
5745
5746 printk(KERN_DEBUG "%*s domain %d: ", level, "", level);
5747
5748 if (!(sd->flags & SD_LOAD_BALANCE)) {
3df0fc5b 5749 printk("does not load-balance\n");
4dcf6aff 5750 if (sd->parent)
3df0fc5b
PZ
5751 printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain"
5752 " has parent");
4dcf6aff 5753 return -1;
41c7ce9a
NP
5754 }
5755
3df0fc5b 5756 printk(KERN_CONT "span %s level %s\n", str, sd->name);
4dcf6aff 5757
758b2cdc 5758 if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
3df0fc5b
PZ
5759 printk(KERN_ERR "ERROR: domain->span does not contain "
5760 "CPU%d\n", cpu);
4dcf6aff 5761 }
758b2cdc 5762 if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) {
3df0fc5b
PZ
5763 printk(KERN_ERR "ERROR: domain->groups does not contain"
5764 " CPU%d\n", cpu);
4dcf6aff 5765 }
1da177e4 5766
4dcf6aff 5767 printk(KERN_DEBUG "%*s groups:", level + 1, "");
1da177e4 5768 do {
4dcf6aff 5769 if (!group) {
3df0fc5b
PZ
5770 printk("\n");
5771 printk(KERN_ERR "ERROR: group is NULL\n");
1da177e4
LT
5772 break;
5773 }
5774
c3decf0d
PZ
5775 /*
5776 * Even though we initialize ->power to something semi-sane,
5777 * we leave power_orig unset. This allows us to detect if
5778 * domain iteration is still funny without causing /0 traps.
5779 */
5780 if (!group->sgp->power_orig) {
3df0fc5b
PZ
5781 printk(KERN_CONT "\n");
5782 printk(KERN_ERR "ERROR: domain->cpu_power not "
5783 "set\n");
4dcf6aff
IM
5784 break;
5785 }
1da177e4 5786
758b2cdc 5787 if (!cpumask_weight(sched_group_cpus(group))) {
3df0fc5b
PZ
5788 printk(KERN_CONT "\n");
5789 printk(KERN_ERR "ERROR: empty group\n");
4dcf6aff
IM
5790 break;
5791 }
1da177e4 5792
cb83b629
PZ
5793 if (!(sd->flags & SD_OVERLAP) &&
5794 cpumask_intersects(groupmask, sched_group_cpus(group))) {
3df0fc5b
PZ
5795 printk(KERN_CONT "\n");
5796 printk(KERN_ERR "ERROR: repeated CPUs\n");
4dcf6aff
IM
5797 break;
5798 }
1da177e4 5799
758b2cdc 5800 cpumask_or(groupmask, groupmask, sched_group_cpus(group));
1da177e4 5801
968ea6d8 5802 cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group));
381512cf 5803
3df0fc5b 5804 printk(KERN_CONT " %s", str);
9c3f75cb 5805 if (group->sgp->power != SCHED_POWER_SCALE) {
3df0fc5b 5806 printk(KERN_CONT " (cpu_power = %d)",
9c3f75cb 5807 group->sgp->power);
381512cf 5808 }
1da177e4 5809
4dcf6aff
IM
5810 group = group->next;
5811 } while (group != sd->groups);
3df0fc5b 5812 printk(KERN_CONT "\n");
1da177e4 5813
758b2cdc 5814 if (!cpumask_equal(sched_domain_span(sd), groupmask))
3df0fc5b 5815 printk(KERN_ERR "ERROR: groups don't span domain->span\n");
1da177e4 5816
758b2cdc
RR
5817 if (sd->parent &&
5818 !cpumask_subset(groupmask, sched_domain_span(sd->parent)))
3df0fc5b
PZ
5819 printk(KERN_ERR "ERROR: parent span is not a superset "
5820 "of domain->span\n");
4dcf6aff
IM
5821 return 0;
5822}
1da177e4 5823
4dcf6aff
IM
5824static void sched_domain_debug(struct sched_domain *sd, int cpu)
5825{
5826 int level = 0;
1da177e4 5827
d039ac60 5828 if (!sched_debug_enabled)
f6630114
MT
5829 return;
5830
4dcf6aff
IM
5831 if (!sd) {
5832 printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
5833 return;
5834 }
1da177e4 5835
4dcf6aff
IM
5836 printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu);
5837
5838 for (;;) {
4cb98839 5839 if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask))
4dcf6aff 5840 break;
1da177e4
LT
5841 level++;
5842 sd = sd->parent;
33859f7f 5843 if (!sd)
4dcf6aff
IM
5844 break;
5845 }
1da177e4 5846}
6d6bc0ad 5847#else /* !CONFIG_SCHED_DEBUG */
48f24c4d 5848# define sched_domain_debug(sd, cpu) do { } while (0)
d039ac60
PZ
5849static inline bool sched_debug(void)
5850{
5851 return false;
5852}
6d6bc0ad 5853#endif /* CONFIG_SCHED_DEBUG */
1da177e4 5854
1a20ff27 5855static int sd_degenerate(struct sched_domain *sd)
245af2c7 5856{
758b2cdc 5857 if (cpumask_weight(sched_domain_span(sd)) == 1)
245af2c7
SS
5858 return 1;
5859
5860 /* Following flags need at least 2 groups */
5861 if (sd->flags & (SD_LOAD_BALANCE |
5862 SD_BALANCE_NEWIDLE |
5863 SD_BALANCE_FORK |
89c4710e
SS
5864 SD_BALANCE_EXEC |
5865 SD_SHARE_CPUPOWER |
5866 SD_SHARE_PKG_RESOURCES)) {
245af2c7
SS
5867 if (sd->groups != sd->groups->next)
5868 return 0;
5869 }
5870
5871 /* Following flags don't use groups */
c88d5910 5872 if (sd->flags & (SD_WAKE_AFFINE))
245af2c7
SS
5873 return 0;
5874
5875 return 1;
5876}
5877
48f24c4d
IM
5878static int
5879sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
245af2c7
SS
5880{
5881 unsigned long cflags = sd->flags, pflags = parent->flags;
5882
5883 if (sd_degenerate(parent))
5884 return 1;
5885
758b2cdc 5886 if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent)))
245af2c7
SS
5887 return 0;
5888
245af2c7
SS
5889 /* Flags needing groups don't count if only 1 group in parent */
5890 if (parent->groups == parent->groups->next) {
5891 pflags &= ~(SD_LOAD_BALANCE |
5892 SD_BALANCE_NEWIDLE |
5893 SD_BALANCE_FORK |
89c4710e
SS
5894 SD_BALANCE_EXEC |
5895 SD_SHARE_CPUPOWER |
5896 SD_SHARE_PKG_RESOURCES);
5436499e
KC
5897 if (nr_node_ids == 1)
5898 pflags &= ~SD_SERIALIZE;
245af2c7
SS
5899 }
5900 if (~cflags & pflags)
5901 return 0;
5902
5903 return 1;
5904}
5905
dce840a0 5906static void free_rootdomain(struct rcu_head *rcu)
c6c4927b 5907{
dce840a0 5908 struct root_domain *rd = container_of(rcu, struct root_domain, rcu);
047106ad 5909
68e74568 5910 cpupri_cleanup(&rd->cpupri);
c6c4927b
RR
5911 free_cpumask_var(rd->rto_mask);
5912 free_cpumask_var(rd->online);
5913 free_cpumask_var(rd->span);
5914 kfree(rd);
5915}
5916
57d885fe
GH
5917static void rq_attach_root(struct rq *rq, struct root_domain *rd)
5918{
a0490fa3 5919 struct root_domain *old_rd = NULL;
57d885fe 5920 unsigned long flags;
57d885fe 5921
05fa785c 5922 raw_spin_lock_irqsave(&rq->lock, flags);
57d885fe
GH
5923
5924 if (rq->rd) {
a0490fa3 5925 old_rd = rq->rd;
57d885fe 5926
c6c4927b 5927 if (cpumask_test_cpu(rq->cpu, old_rd->online))
1f11eb6a 5928 set_rq_offline(rq);
57d885fe 5929
c6c4927b 5930 cpumask_clear_cpu(rq->cpu, old_rd->span);
dc938520 5931
a0490fa3
IM
5932 /*
5933 * If we dont want to free the old_rt yet then
5934 * set old_rd to NULL to skip the freeing later
5935 * in this function:
5936 */
5937 if (!atomic_dec_and_test(&old_rd->refcount))
5938 old_rd = NULL;
57d885fe
GH
5939 }
5940
5941 atomic_inc(&rd->refcount);
5942 rq->rd = rd;
5943
c6c4927b 5944 cpumask_set_cpu(rq->cpu, rd->span);
00aec93d 5945 if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
1f11eb6a 5946 set_rq_online(rq);
57d885fe 5947
05fa785c 5948 raw_spin_unlock_irqrestore(&rq->lock, flags);
a0490fa3
IM
5949
5950 if (old_rd)
dce840a0 5951 call_rcu_sched(&old_rd->rcu, free_rootdomain);
57d885fe
GH
5952}
5953
68c38fc3 5954static int init_rootdomain(struct root_domain *rd)
57d885fe
GH
5955{
5956 memset(rd, 0, sizeof(*rd));
5957
68c38fc3 5958 if (!alloc_cpumask_var(&rd->span, GFP_KERNEL))
0c910d28 5959 goto out;
68c38fc3 5960 if (!alloc_cpumask_var(&rd->online, GFP_KERNEL))
c6c4927b 5961 goto free_span;
68c38fc3 5962 if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
c6c4927b 5963 goto free_online;
6e0534f2 5964
68c38fc3 5965 if (cpupri_init(&rd->cpupri) != 0)
68e74568 5966 goto free_rto_mask;
c6c4927b 5967 return 0;
6e0534f2 5968
68e74568
RR
5969free_rto_mask:
5970 free_cpumask_var(rd->rto_mask);
c6c4927b
RR
5971free_online:
5972 free_cpumask_var(rd->online);
5973free_span:
5974 free_cpumask_var(rd->span);
0c910d28 5975out:
c6c4927b 5976 return -ENOMEM;
57d885fe
GH
5977}
5978
029632fb
PZ
5979/*
5980 * By default the system creates a single root-domain with all cpus as
5981 * members (mimicking the global state we have today).
5982 */
5983struct root_domain def_root_domain;
5984
57d885fe
GH
5985static void init_defrootdomain(void)
5986{
68c38fc3 5987 init_rootdomain(&def_root_domain);
c6c4927b 5988
57d885fe
GH
5989 atomic_set(&def_root_domain.refcount, 1);
5990}
5991
dc938520 5992static struct root_domain *alloc_rootdomain(void)
57d885fe
GH
5993{
5994 struct root_domain *rd;
5995
5996 rd = kmalloc(sizeof(*rd), GFP_KERNEL);
5997 if (!rd)
5998 return NULL;
5999
68c38fc3 6000 if (init_rootdomain(rd) != 0) {
c6c4927b
RR
6001 kfree(rd);
6002 return NULL;
6003 }
57d885fe
GH
6004
6005 return rd;
6006}
6007
e3589f6c
PZ
6008static void free_sched_groups(struct sched_group *sg, int free_sgp)
6009{
6010 struct sched_group *tmp, *first;
6011
6012 if (!sg)
6013 return;
6014
6015 first = sg;
6016 do {
6017 tmp = sg->next;
6018
6019 if (free_sgp && atomic_dec_and_test(&sg->sgp->ref))
6020 kfree(sg->sgp);
6021
6022 kfree(sg);
6023 sg = tmp;
6024 } while (sg != first);
6025}
6026
dce840a0
PZ
6027static void free_sched_domain(struct rcu_head *rcu)
6028{
6029 struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu);
e3589f6c
PZ
6030
6031 /*
6032 * If its an overlapping domain it has private groups, iterate and
6033 * nuke them all.
6034 */
6035 if (sd->flags & SD_OVERLAP) {
6036 free_sched_groups(sd->groups, 1);
6037 } else if (atomic_dec_and_test(&sd->groups->ref)) {
9c3f75cb 6038 kfree(sd->groups->sgp);
dce840a0 6039 kfree(sd->groups);
9c3f75cb 6040 }
dce840a0
PZ
6041 kfree(sd);
6042}
6043
6044static void destroy_sched_domain(struct sched_domain *sd, int cpu)
6045{
6046 call_rcu(&sd->rcu, free_sched_domain);
6047}
6048
6049static void destroy_sched_domains(struct sched_domain *sd, int cpu)
6050{
6051 for (; sd; sd = sd->parent)
6052 destroy_sched_domain(sd, cpu);
6053}
6054
518cd623
PZ
6055/*
6056 * Keep a special pointer to the highest sched_domain that has
6057 * SD_SHARE_PKG_RESOURCE set (Last Level Cache Domain) for this
6058 * allows us to avoid some pointer chasing select_idle_sibling().
6059 *
6060 * Also keep a unique ID per domain (we use the first cpu number in
6061 * the cpumask of the domain), this allows us to quickly tell if
39be3501 6062 * two cpus are in the same cache domain, see cpus_share_cache().
518cd623
PZ
6063 */
6064DEFINE_PER_CPU(struct sched_domain *, sd_llc);
6065DEFINE_PER_CPU(int, sd_llc_id);
6066
6067static void update_top_cache_domain(int cpu)
6068{
6069 struct sched_domain *sd;
6070 int id = cpu;
6071
6072 sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES);
37407ea7 6073 if (sd)
518cd623
PZ
6074 id = cpumask_first(sched_domain_span(sd));
6075
6076 rcu_assign_pointer(per_cpu(sd_llc, cpu), sd);
6077 per_cpu(sd_llc_id, cpu) = id;
6078}
6079
1da177e4 6080/*
0eab9146 6081 * Attach the domain 'sd' to 'cpu' as its base domain. Callers must
1da177e4
LT
6082 * hold the hotplug lock.
6083 */
0eab9146
IM
6084static void
6085cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
1da177e4 6086{
70b97a7f 6087 struct rq *rq = cpu_rq(cpu);
245af2c7
SS
6088 struct sched_domain *tmp;
6089
6090 /* Remove the sched domains which do not contribute to scheduling. */
f29c9b1c 6091 for (tmp = sd; tmp; ) {
245af2c7
SS
6092 struct sched_domain *parent = tmp->parent;
6093 if (!parent)
6094 break;
f29c9b1c 6095
1a848870 6096 if (sd_parent_degenerate(tmp, parent)) {
245af2c7 6097 tmp->parent = parent->parent;
1a848870
SS
6098 if (parent->parent)
6099 parent->parent->child = tmp;
dce840a0 6100 destroy_sched_domain(parent, cpu);
f29c9b1c
LZ
6101 } else
6102 tmp = tmp->parent;
245af2c7
SS
6103 }
6104
1a848870 6105 if (sd && sd_degenerate(sd)) {
dce840a0 6106 tmp = sd;
245af2c7 6107 sd = sd->parent;
dce840a0 6108 destroy_sched_domain(tmp, cpu);
1a848870
SS
6109 if (sd)
6110 sd->child = NULL;
6111 }
1da177e4 6112
4cb98839 6113 sched_domain_debug(sd, cpu);
1da177e4 6114
57d885fe 6115 rq_attach_root(rq, rd);
dce840a0 6116 tmp = rq->sd;
674311d5 6117 rcu_assign_pointer(rq->sd, sd);
dce840a0 6118 destroy_sched_domains(tmp, cpu);
518cd623 6119
6fa3eb70
S
6120#if defined (CONFIG_MTK_SCHED_CMP_PACK_SMALL_TASK) || defined (CONFIG_HMP_PACK_SMALL_TASK)
6121 update_packing_domain(cpu);
6122#endif /* CONFIG_MTK_SCHED_CMP_PACK_SMALL_TASK || CONFIG_HMP_PACK_SMALL_TASK */
518cd623 6123 update_top_cache_domain(cpu);
1da177e4
LT
6124}
6125
6126/* cpus with isolated domains */
dcc30a35 6127static cpumask_var_t cpu_isolated_map;
1da177e4
LT
6128
6129/* Setup the mask of cpus configured for isolated domains */
6130static int __init isolated_cpu_setup(char *str)
6131{
bdddd296 6132 alloc_bootmem_cpumask_var(&cpu_isolated_map);
968ea6d8 6133 cpulist_parse(str, cpu_isolated_map);
1da177e4
LT
6134 return 1;
6135}
6136
8927f494 6137__setup("isolcpus=", isolated_cpu_setup);
1da177e4 6138
d3081f52
PZ
6139static const struct cpumask *cpu_cpu_mask(int cpu)
6140{
6141 return cpumask_of_node(cpu_to_node(cpu));
6142}
6143
dce840a0
PZ
6144struct sd_data {
6145 struct sched_domain **__percpu sd;
6146 struct sched_group **__percpu sg;
9c3f75cb 6147 struct sched_group_power **__percpu sgp;
dce840a0
PZ
6148};
6149
49a02c51 6150struct s_data {
21d42ccf 6151 struct sched_domain ** __percpu sd;
49a02c51
AH
6152 struct root_domain *rd;
6153};
6154
2109b99e 6155enum s_alloc {
2109b99e 6156 sa_rootdomain,
21d42ccf 6157 sa_sd,
dce840a0 6158 sa_sd_storage,
2109b99e
AH
6159 sa_none,
6160};
6161
54ab4ff4
PZ
6162struct sched_domain_topology_level;
6163
6164typedef struct sched_domain *(*sched_domain_init_f)(struct sched_domain_topology_level *tl, int cpu);
eb7a74e6
PZ
6165typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
6166
e3589f6c
PZ
6167#define SDTL_OVERLAP 0x01
6168
eb7a74e6 6169struct sched_domain_topology_level {
2c402dc3
PZ
6170 sched_domain_init_f init;
6171 sched_domain_mask_f mask;
e3589f6c 6172 int flags;
cb83b629 6173 int numa_level;
54ab4ff4 6174 struct sd_data data;
eb7a74e6
PZ
6175};
6176
c1174876
PZ
6177/*
6178 * Build an iteration mask that can exclude certain CPUs from the upwards
6179 * domain traversal.
6180 *
6181 * Asymmetric node setups can result in situations where the domain tree is of
6182 * unequal depth, make sure to skip domains that already cover the entire
6183 * range.
6184 *
6185 * In that case build_sched_domains() will have terminated the iteration early
6186 * and our sibling sd spans will be empty. Domains should always include the
6187 * cpu they're built on, so check that.
6188 *
6189 */
6190static void build_group_mask(struct sched_domain *sd, struct sched_group *sg)
6191{
6192 const struct cpumask *span = sched_domain_span(sd);
6193 struct sd_data *sdd = sd->private;
6194 struct sched_domain *sibling;
6195 int i;
6196
6197 for_each_cpu(i, span) {
6198 sibling = *per_cpu_ptr(sdd->sd, i);
6199 if (!cpumask_test_cpu(i, sched_domain_span(sibling)))
6200 continue;
6201
6202 cpumask_set_cpu(i, sched_group_mask(sg));
6203 }
6204}
6205
6206/*
6207 * Return the canonical balance cpu for this group, this is the first cpu
6208 * of this group that's also in the iteration mask.
6209 */
6210int group_balance_cpu(struct sched_group *sg)
6211{
6212 return cpumask_first_and(sched_group_cpus(sg), sched_group_mask(sg));
6213}
6214
e3589f6c
PZ
6215static int
6216build_overlap_sched_groups(struct sched_domain *sd, int cpu)
6217{
6218 struct sched_group *first = NULL, *last = NULL, *groups = NULL, *sg;
6219 const struct cpumask *span = sched_domain_span(sd);
6220 struct cpumask *covered = sched_domains_tmpmask;
6221 struct sd_data *sdd = sd->private;
6222 struct sched_domain *child;
6223 int i;
6224
6225 cpumask_clear(covered);
6226
6227 for_each_cpu(i, span) {
6228 struct cpumask *sg_span;
6229
6230 if (cpumask_test_cpu(i, covered))
6231 continue;
6232
c1174876
PZ
6233 child = *per_cpu_ptr(sdd->sd, i);
6234
6235 /* See the comment near build_group_mask(). */
6236 if (!cpumask_test_cpu(i, sched_domain_span(child)))
6237 continue;
6238
e3589f6c 6239 sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
4d78a223 6240 GFP_KERNEL, cpu_to_node(cpu));
e3589f6c
PZ
6241
6242 if (!sg)
6243 goto fail;
6244
6245 sg_span = sched_group_cpus(sg);
e3589f6c
PZ
6246 if (child->child) {
6247 child = child->child;
6248 cpumask_copy(sg_span, sched_domain_span(child));
6249 } else
6250 cpumask_set_cpu(i, sg_span);
6251
6252 cpumask_or(covered, covered, sg_span);
6253
74a5ce20 6254 sg->sgp = *per_cpu_ptr(sdd->sgp, i);
c1174876
PZ
6255 if (atomic_inc_return(&sg->sgp->ref) == 1)
6256 build_group_mask(sd, sg);
6257
c3decf0d
PZ
6258 /*
6259 * Initialize sgp->power such that even if we mess up the
6260 * domains and no possible iteration will get us here, we won't
6261 * die on a /0 trap.
6262 */
6263 sg->sgp->power = SCHED_POWER_SCALE * cpumask_weight(sg_span);
e3589f6c 6264
c1174876
PZ
6265 /*
6266 * Make sure the first group of this domain contains the
6267 * canonical balance cpu. Otherwise the sched_domain iteration
6268 * breaks. See update_sg_lb_stats().
6269 */
74a5ce20 6270 if ((!groups && cpumask_test_cpu(cpu, sg_span)) ||
c1174876 6271 group_balance_cpu(sg) == cpu)
e3589f6c
PZ
6272 groups = sg;
6273
6274 if (!first)
6275 first = sg;
6276 if (last)
6277 last->next = sg;
6278 last = sg;
6279 last->next = first;
6280 }
6281 sd->groups = groups;
6282
6283 return 0;
6284
6285fail:
6286 free_sched_groups(first, 0);
6287
6288 return -ENOMEM;
6289}
6290
dce840a0 6291static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg)
1da177e4 6292{
dce840a0
PZ
6293 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
6294 struct sched_domain *child = sd->child;
1da177e4 6295
dce840a0
PZ
6296 if (child)
6297 cpu = cpumask_first(sched_domain_span(child));
1e9f28fa 6298
9c3f75cb 6299 if (sg) {
dce840a0 6300 *sg = *per_cpu_ptr(sdd->sg, cpu);
9c3f75cb 6301 (*sg)->sgp = *per_cpu_ptr(sdd->sgp, cpu);
e3589f6c 6302 atomic_set(&(*sg)->sgp->ref, 1); /* for claim_allocations */
9c3f75cb 6303 }
dce840a0
PZ
6304
6305 return cpu;
1e9f28fa 6306}
1e9f28fa 6307
01a08546 6308/*
dce840a0
PZ
6309 * build_sched_groups will build a circular linked list of the groups
6310 * covered by the given span, and will set each group's ->cpumask correctly,
6311 * and ->cpu_power to 0.
e3589f6c
PZ
6312 *
6313 * Assumes the sched_domain tree is fully constructed
01a08546 6314 */
e3589f6c
PZ
6315static int
6316build_sched_groups(struct sched_domain *sd, int cpu)
1da177e4 6317{
dce840a0
PZ
6318 struct sched_group *first = NULL, *last = NULL;
6319 struct sd_data *sdd = sd->private;
6320 const struct cpumask *span = sched_domain_span(sd);
f96225fd 6321 struct cpumask *covered;
dce840a0 6322 int i;
9c1cfda2 6323
e3589f6c
PZ
6324 get_group(cpu, sdd, &sd->groups);
6325 atomic_inc(&sd->groups->ref);
6326
6327 if (cpu != cpumask_first(sched_domain_span(sd)))
6328 return 0;
6329
f96225fd
PZ
6330 lockdep_assert_held(&sched_domains_mutex);
6331 covered = sched_domains_tmpmask;
6332
dce840a0 6333 cpumask_clear(covered);
6711cab4 6334
dce840a0
PZ
6335 for_each_cpu(i, span) {
6336 struct sched_group *sg;
6337 int group = get_group(i, sdd, &sg);
6338 int j;
6711cab4 6339
dce840a0
PZ
6340 if (cpumask_test_cpu(i, covered))
6341 continue;
6711cab4 6342
dce840a0 6343 cpumask_clear(sched_group_cpus(sg));
9c3f75cb 6344 sg->sgp->power = 0;
c1174876 6345 cpumask_setall(sched_group_mask(sg));
0601a88d 6346
dce840a0
PZ
6347 for_each_cpu(j, span) {
6348 if (get_group(j, sdd, NULL) != group)
6349 continue;
0601a88d 6350
dce840a0
PZ
6351 cpumask_set_cpu(j, covered);
6352 cpumask_set_cpu(j, sched_group_cpus(sg));
6353 }
0601a88d 6354
dce840a0
PZ
6355 if (!first)
6356 first = sg;
6357 if (last)
6358 last->next = sg;
6359 last = sg;
6360 }
6361 last->next = first;
e3589f6c
PZ
6362
6363 return 0;
0601a88d 6364}
51888ca2 6365
89c4710e
SS
6366/*
6367 * Initialize sched groups cpu_power.
6368 *
6369 * cpu_power indicates the capacity of sched group, which is used while
6370 * distributing the load between different sched groups in a sched domain.
6371 * Typically cpu_power for all the groups in a sched domain will be same unless
6372 * there are asymmetries in the topology. If there are asymmetries, group
6373 * having more cpu_power will pickup more load compared to the group having
6374 * less cpu_power.
89c4710e
SS
6375 */
6376static void init_sched_groups_power(int cpu, struct sched_domain *sd)
6377{
e3589f6c 6378 struct sched_group *sg = sd->groups;
89c4710e 6379
e3589f6c
PZ
6380 WARN_ON(!sd || !sg);
6381
6382 do {
6383 sg->group_weight = cpumask_weight(sched_group_cpus(sg));
6384 sg = sg->next;
6385 } while (sg != sd->groups);
89c4710e 6386
c1174876 6387 if (cpu != group_balance_cpu(sg))
e3589f6c 6388 return;
aae6d3dd 6389
d274cb30 6390 update_group_power(sd, cpu);
69e1e811 6391 atomic_set(&sg->sgp->nr_busy_cpus, sg->group_weight);
89c4710e
SS
6392}
6393
029632fb
PZ
6394int __weak arch_sd_sibling_asym_packing(void)
6395{
6396 return 0*SD_ASYM_PACKING;
89c4710e
SS
6397}
6398
6fa3eb70
S
6399#if defined (CONFIG_MTK_SCHED_CMP_PACK_SMALL_TASK) || defined (CONFIG_HMP_PACK_SMALL_TASK)
6400int __weak arch_sd_share_power_line(void)
6401{
6402 return 0*SD_SHARE_POWERLINE;
6403}
6404#endif /* CONFIG_MTK_SCHED_CMP_PACK_SMALL_TASK || CONFIG_HMP_PACK_SMALL_TASK */
7c16ec58
MT
6405/*
6406 * Initializers for schedule domains
6407 * Non-inlined to reduce accumulated stack pressure in build_sched_domains()
6408 */
6409
a5d8c348
IM
6410#ifdef CONFIG_SCHED_DEBUG
6411# define SD_INIT_NAME(sd, type) sd->name = #type
6412#else
6413# define SD_INIT_NAME(sd, type) do { } while (0)
6414#endif
6415
54ab4ff4
PZ
6416#define SD_INIT_FUNC(type) \
6417static noinline struct sched_domain * \
6418sd_init_##type(struct sched_domain_topology_level *tl, int cpu) \
6419{ \
6420 struct sched_domain *sd = *per_cpu_ptr(tl->data.sd, cpu); \
6421 *sd = SD_##type##_INIT; \
54ab4ff4
PZ
6422 SD_INIT_NAME(sd, type); \
6423 sd->private = &tl->data; \
6424 return sd; \
7c16ec58
MT
6425}
6426
6427SD_INIT_FUNC(CPU)
7c16ec58
MT
6428#ifdef CONFIG_SCHED_SMT
6429 SD_INIT_FUNC(SIBLING)
6430#endif
6431#ifdef CONFIG_SCHED_MC
6432 SD_INIT_FUNC(MC)
6433#endif
01a08546
HC
6434#ifdef CONFIG_SCHED_BOOK
6435 SD_INIT_FUNC(BOOK)
6436#endif
7c16ec58 6437
1d3504fc 6438static int default_relax_domain_level = -1;
60495e77 6439int sched_domain_level_max;
1d3504fc
HS
6440
6441static int __init setup_relax_domain_level(char *str)
6442{
a841f8ce
DS
6443 if (kstrtoint(str, 0, &default_relax_domain_level))
6444 pr_warn("Unable to set relax_domain_level\n");
30e0e178 6445
1d3504fc
HS
6446 return 1;
6447}
6448__setup("relax_domain_level=", setup_relax_domain_level);
6449
6450static void set_domain_attribute(struct sched_domain *sd,
6451 struct sched_domain_attr *attr)
6452{
6453 int request;
6454
6455 if (!attr || attr->relax_domain_level < 0) {
6456 if (default_relax_domain_level < 0)
6457 return;
6458 else
6459 request = default_relax_domain_level;
6460 } else
6461 request = attr->relax_domain_level;
6462 if (request < sd->level) {
6463 /* turn off idle balance on this domain */
c88d5910 6464 sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
1d3504fc
HS
6465 } else {
6466 /* turn on idle balance on this domain */
c88d5910 6467 sd->flags |= (SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
1d3504fc
HS
6468 }
6469}
6470
54ab4ff4
PZ
6471static void __sdt_free(const struct cpumask *cpu_map);
6472static int __sdt_alloc(const struct cpumask *cpu_map);
6473
2109b99e
AH
6474static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
6475 const struct cpumask *cpu_map)
6476{
6477 switch (what) {
2109b99e 6478 case sa_rootdomain:
822ff793
PZ
6479 if (!atomic_read(&d->rd->refcount))
6480 free_rootdomain(&d->rd->rcu); /* fall through */
21d42ccf
PZ
6481 case sa_sd:
6482 free_percpu(d->sd); /* fall through */
dce840a0 6483 case sa_sd_storage:
54ab4ff4 6484 __sdt_free(cpu_map); /* fall through */
2109b99e
AH
6485 case sa_none:
6486 break;
6487 }
6488}
3404c8d9 6489
2109b99e
AH
6490static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
6491 const struct cpumask *cpu_map)
6492{
dce840a0
PZ
6493 memset(d, 0, sizeof(*d));
6494
54ab4ff4
PZ
6495 if (__sdt_alloc(cpu_map))
6496 return sa_sd_storage;
dce840a0
PZ
6497 d->sd = alloc_percpu(struct sched_domain *);
6498 if (!d->sd)
6499 return sa_sd_storage;
2109b99e 6500 d->rd = alloc_rootdomain();
dce840a0 6501 if (!d->rd)
21d42ccf 6502 return sa_sd;
2109b99e
AH
6503 return sa_rootdomain;
6504}
57d885fe 6505
dce840a0
PZ
6506/*
6507 * NULL the sd_data elements we've used to build the sched_domain and
6508 * sched_group structure so that the subsequent __free_domain_allocs()
6509 * will not free the data we're using.
6510 */
6511static void claim_allocations(int cpu, struct sched_domain *sd)
6512{
6513 struct sd_data *sdd = sd->private;
dce840a0
PZ
6514
6515 WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd);
6516 *per_cpu_ptr(sdd->sd, cpu) = NULL;
6517
e3589f6c 6518 if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref))
dce840a0 6519 *per_cpu_ptr(sdd->sg, cpu) = NULL;
e3589f6c
PZ
6520
6521 if (atomic_read(&(*per_cpu_ptr(sdd->sgp, cpu))->ref))
9c3f75cb 6522 *per_cpu_ptr(sdd->sgp, cpu) = NULL;
dce840a0
PZ
6523}
6524
2c402dc3
PZ
6525#ifdef CONFIG_SCHED_SMT
6526static const struct cpumask *cpu_smt_mask(int cpu)
7f4588f3 6527{
2c402dc3 6528 return topology_thread_cpumask(cpu);
3bd65a80 6529}
2c402dc3 6530#endif
7f4588f3 6531
d069b916
PZ
6532/*
6533 * Topology list, bottom-up.
6534 */
2c402dc3 6535static struct sched_domain_topology_level default_topology[] = {
d069b916
PZ
6536#ifdef CONFIG_SCHED_SMT
6537 { sd_init_SIBLING, cpu_smt_mask, },
01a08546 6538#endif
1e9f28fa 6539#ifdef CONFIG_SCHED_MC
2c402dc3 6540 { sd_init_MC, cpu_coregroup_mask, },
1e9f28fa 6541#endif
d069b916
PZ
6542#ifdef CONFIG_SCHED_BOOK
6543 { sd_init_BOOK, cpu_book_mask, },
6544#endif
6545 { sd_init_CPU, cpu_cpu_mask, },
eb7a74e6
PZ
6546 { NULL, },
6547};
6548
6549static struct sched_domain_topology_level *sched_domain_topology = default_topology;
6550
cb83b629
PZ
6551#ifdef CONFIG_NUMA
6552
6553static int sched_domains_numa_levels;
cb83b629
PZ
6554static int *sched_domains_numa_distance;
6555static struct cpumask ***sched_domains_numa_masks;
6556static int sched_domains_curr_level;
6557
cb83b629
PZ
6558static inline int sd_local_flags(int level)
6559{
10717dcd 6560 if (sched_domains_numa_distance[level] > RECLAIM_DISTANCE)
cb83b629
PZ
6561 return 0;
6562
6563 return SD_BALANCE_EXEC | SD_BALANCE_FORK | SD_WAKE_AFFINE;
6564}
6565
6566static struct sched_domain *
6567sd_numa_init(struct sched_domain_topology_level *tl, int cpu)
6568{
6569 struct sched_domain *sd = *per_cpu_ptr(tl->data.sd, cpu);
6570 int level = tl->numa_level;
6571 int sd_weight = cpumask_weight(
6572 sched_domains_numa_masks[level][cpu_to_node(cpu)]);
6573
6574 *sd = (struct sched_domain){
6575 .min_interval = sd_weight,
6576 .max_interval = 2*sd_weight,
6577 .busy_factor = 32,
870a0bb5 6578 .imbalance_pct = 125,
cb83b629
PZ
6579 .cache_nice_tries = 2,
6580 .busy_idx = 3,
6581 .idle_idx = 2,
6582 .newidle_idx = 0,
6583 .wake_idx = 0,
6584 .forkexec_idx = 0,
6585
6586 .flags = 1*SD_LOAD_BALANCE
6587 | 1*SD_BALANCE_NEWIDLE
6588 | 0*SD_BALANCE_EXEC
6589 | 0*SD_BALANCE_FORK
6590 | 0*SD_BALANCE_WAKE
6591 | 0*SD_WAKE_AFFINE
cb83b629 6592 | 0*SD_SHARE_CPUPOWER
cb83b629
PZ
6593 | 0*SD_SHARE_PKG_RESOURCES
6594 | 1*SD_SERIALIZE
6595 | 0*SD_PREFER_SIBLING
6596 | sd_local_flags(level)
6597 ,
6598 .last_balance = jiffies,
6599 .balance_interval = sd_weight,
6600 };
6601 SD_INIT_NAME(sd, NUMA);
6602 sd->private = &tl->data;
6603
6604 /*
6605 * Ugly hack to pass state to sd_numa_mask()...
6606 */
6607 sched_domains_curr_level = tl->numa_level;
6608
6609 return sd;
6610}
6611
6612static const struct cpumask *sd_numa_mask(int cpu)
6613{
6614 return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)];
6615}
6616
d039ac60
PZ
6617static void sched_numa_warn(const char *str)
6618{
6619 static int done = false;
6620 int i,j;
6621
6622 if (done)
6623 return;
6624
6625 done = true;
6626
6627 printk(KERN_WARNING "ERROR: %s\n\n", str);
6628
6629 for (i = 0; i < nr_node_ids; i++) {
6630 printk(KERN_WARNING " ");
6631 for (j = 0; j < nr_node_ids; j++)
6632 printk(KERN_CONT "%02d ", node_distance(i,j));
6633 printk(KERN_CONT "\n");
6634 }
6635 printk(KERN_WARNING "\n");
6636}
6637
6638static bool find_numa_distance(int distance)
6639{
6640 int i;
6641
6642 if (distance == node_distance(0, 0))
6643 return true;
6644
6645 for (i = 0; i < sched_domains_numa_levels; i++) {
6646 if (sched_domains_numa_distance[i] == distance)
6647 return true;
6648 }
6649
6650 return false;
6651}
6652
cb83b629
PZ
6653static void sched_init_numa(void)
6654{
6655 int next_distance, curr_distance = node_distance(0, 0);
6656 struct sched_domain_topology_level *tl;
6657 int level = 0;
6658 int i, j, k;
6659
cb83b629
PZ
6660 sched_domains_numa_distance = kzalloc(sizeof(int) * nr_node_ids, GFP_KERNEL);
6661 if (!sched_domains_numa_distance)
6662 return;
6663
6664 /*
6665 * O(nr_nodes^2) deduplicating selection sort -- in order to find the
6666 * unique distances in the node_distance() table.
6667 *
6668 * Assumes node_distance(0,j) includes all distances in
6669 * node_distance(i,j) in order to avoid cubic time.
cb83b629
PZ
6670 */
6671 next_distance = curr_distance;
6672 for (i = 0; i < nr_node_ids; i++) {
6673 for (j = 0; j < nr_node_ids; j++) {
d039ac60
PZ
6674 for (k = 0; k < nr_node_ids; k++) {
6675 int distance = node_distance(i, k);
6676
6677 if (distance > curr_distance &&
6678 (distance < next_distance ||
6679 next_distance == curr_distance))
6680 next_distance = distance;
6681
6682 /*
6683 * While not a strong assumption it would be nice to know
6684 * about cases where if node A is connected to B, B is not
6685 * equally connected to A.
6686 */
6687 if (sched_debug() && node_distance(k, i) != distance)
6688 sched_numa_warn("Node-distance not symmetric");
6689
6690 if (sched_debug() && i && !find_numa_distance(distance))
6691 sched_numa_warn("Node-0 not representative");
6692 }
6693 if (next_distance != curr_distance) {
6694 sched_domains_numa_distance[level++] = next_distance;
6695 sched_domains_numa_levels = level;
6696 curr_distance = next_distance;
6697 } else break;
cb83b629 6698 }
d039ac60
PZ
6699
6700 /*
6701 * In case of sched_debug() we verify the above assumption.
6702 */
6703 if (!sched_debug())
6704 break;
cb83b629
PZ
6705 }
6706 /*
6707 * 'level' contains the number of unique distances, excluding the
6708 * identity distance node_distance(i,i).
6709 *
28b4a521 6710 * The sched_domains_numa_distance[] array includes the actual distance
cb83b629
PZ
6711 * numbers.
6712 */
6713
5f7865f3
TC
6714 /*
6715 * Here, we should temporarily reset sched_domains_numa_levels to 0.
6716 * If it fails to allocate memory for array sched_domains_numa_masks[][],
6717 * the array will contain less then 'level' members. This could be
6718 * dangerous when we use it to iterate array sched_domains_numa_masks[][]
6719 * in other functions.
6720 *
6721 * We reset it to 'level' at the end of this function.
6722 */
6723 sched_domains_numa_levels = 0;
6724
cb83b629
PZ
6725 sched_domains_numa_masks = kzalloc(sizeof(void *) * level, GFP_KERNEL);
6726 if (!sched_domains_numa_masks)
6727 return;
6728
6729 /*
6730 * Now for each level, construct a mask per node which contains all
6731 * cpus of nodes that are that many hops away from us.
6732 */
6733 for (i = 0; i < level; i++) {
6734 sched_domains_numa_masks[i] =
6735 kzalloc(nr_node_ids * sizeof(void *), GFP_KERNEL);
6736 if (!sched_domains_numa_masks[i])
6737 return;
6738
6739 for (j = 0; j < nr_node_ids; j++) {
2ea45800 6740 struct cpumask *mask = kzalloc(cpumask_size(), GFP_KERNEL);
cb83b629
PZ
6741 if (!mask)
6742 return;
6743
6744 sched_domains_numa_masks[i][j] = mask;
6745
6746 for (k = 0; k < nr_node_ids; k++) {
dd7d8634 6747 if (node_distance(j, k) > sched_domains_numa_distance[i])
cb83b629
PZ
6748 continue;
6749
6750 cpumask_or(mask, mask, cpumask_of_node(k));
6751 }
6752 }
6753 }
6754
6755 tl = kzalloc((ARRAY_SIZE(default_topology) + level) *
6756 sizeof(struct sched_domain_topology_level), GFP_KERNEL);
6757 if (!tl)
6758 return;
6759
6760 /*
6761 * Copy the default topology bits..
6762 */
6763 for (i = 0; default_topology[i].init; i++)
6764 tl[i] = default_topology[i];
6765
6766 /*
6767 * .. and append 'j' levels of NUMA goodness.
6768 */
6769 for (j = 0; j < level; i++, j++) {
6770 tl[i] = (struct sched_domain_topology_level){
6771 .init = sd_numa_init,
6772 .mask = sd_numa_mask,
6773 .flags = SDTL_OVERLAP,
6774 .numa_level = j,
6775 };
6776 }
6777
6778 sched_domain_topology = tl;
5f7865f3
TC
6779
6780 sched_domains_numa_levels = level;
cb83b629 6781}
301a5cba
TC
6782
6783static void sched_domains_numa_masks_set(int cpu)
6784{
6785 int i, j;
6786 int node = cpu_to_node(cpu);
6787
6788 for (i = 0; i < sched_domains_numa_levels; i++) {
6789 for (j = 0; j < nr_node_ids; j++) {
6790 if (node_distance(j, node) <= sched_domains_numa_distance[i])
6791 cpumask_set_cpu(cpu, sched_domains_numa_masks[i][j]);
6792 }
6793 }
6794}
6795
6796static void sched_domains_numa_masks_clear(int cpu)
6797{
6798 int i, j;
6799 for (i = 0; i < sched_domains_numa_levels; i++) {
6800 for (j = 0; j < nr_node_ids; j++)
6801 cpumask_clear_cpu(cpu, sched_domains_numa_masks[i][j]);
6802 }
6803}
6804
6805/*
6806 * Update sched_domains_numa_masks[level][node] array when new cpus
6807 * are onlined.
6808 */
6809static int sched_domains_numa_masks_update(struct notifier_block *nfb,
6810 unsigned long action,
6811 void *hcpu)
6812{
6813 int cpu = (long)hcpu;
6814
6815 switch (action & ~CPU_TASKS_FROZEN) {
6816 case CPU_ONLINE:
6817 sched_domains_numa_masks_set(cpu);
6818 break;
6819
6820 case CPU_DEAD:
6821 sched_domains_numa_masks_clear(cpu);
6822 break;
6823
6824 default:
6825 return NOTIFY_DONE;
6826 }
6827
6828 return NOTIFY_OK;
cb83b629
PZ
6829}
6830#else
6831static inline void sched_init_numa(void)
6832{
6833}
301a5cba
TC
6834
6835static int sched_domains_numa_masks_update(struct notifier_block *nfb,
6836 unsigned long action,
6837 void *hcpu)
6838{
6839 return 0;
6840}
cb83b629
PZ
6841#endif /* CONFIG_NUMA */
6842
54ab4ff4
PZ
6843static int __sdt_alloc(const struct cpumask *cpu_map)
6844{
6845 struct sched_domain_topology_level *tl;
6846 int j;
6847
6848 for (tl = sched_domain_topology; tl->init; tl++) {
6849 struct sd_data *sdd = &tl->data;
6850
6851 sdd->sd = alloc_percpu(struct sched_domain *);
6852 if (!sdd->sd)
6853 return -ENOMEM;
6854
6855 sdd->sg = alloc_percpu(struct sched_group *);
6856 if (!sdd->sg)
6857 return -ENOMEM;
6858
9c3f75cb
PZ
6859 sdd->sgp = alloc_percpu(struct sched_group_power *);
6860 if (!sdd->sgp)
6861 return -ENOMEM;
6862
54ab4ff4
PZ
6863 for_each_cpu(j, cpu_map) {
6864 struct sched_domain *sd;
6865 struct sched_group *sg;
9c3f75cb 6866 struct sched_group_power *sgp;
54ab4ff4
PZ
6867
6868 sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(),
6869 GFP_KERNEL, cpu_to_node(j));
6870 if (!sd)
6871 return -ENOMEM;
6872
6873 *per_cpu_ptr(sdd->sd, j) = sd;
6874
6875 sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
6876 GFP_KERNEL, cpu_to_node(j));
6877 if (!sg)
6878 return -ENOMEM;
6879
30b4e9eb
IM
6880 sg->next = sg;
6881
54ab4ff4 6882 *per_cpu_ptr(sdd->sg, j) = sg;
9c3f75cb 6883
c1174876 6884 sgp = kzalloc_node(sizeof(struct sched_group_power) + cpumask_size(),
9c3f75cb
PZ
6885 GFP_KERNEL, cpu_to_node(j));
6886 if (!sgp)
6887 return -ENOMEM;
6888
6889 *per_cpu_ptr(sdd->sgp, j) = sgp;
54ab4ff4
PZ
6890 }
6891 }
6892
6893 return 0;
6894}
6895
6896static void __sdt_free(const struct cpumask *cpu_map)
6897{
6898 struct sched_domain_topology_level *tl;
6899 int j;
6900
6901 for (tl = sched_domain_topology; tl->init; tl++) {
6902 struct sd_data *sdd = &tl->data;
6903
6904 for_each_cpu(j, cpu_map) {
fb2cf2c6 6905 struct sched_domain *sd;
6906
6907 if (sdd->sd) {
6908 sd = *per_cpu_ptr(sdd->sd, j);
6909 if (sd && (sd->flags & SD_OVERLAP))
6910 free_sched_groups(sd->groups, 0);
6911 kfree(*per_cpu_ptr(sdd->sd, j));
6912 }
6913
6914 if (sdd->sg)
6915 kfree(*per_cpu_ptr(sdd->sg, j));
6916 if (sdd->sgp)
6917 kfree(*per_cpu_ptr(sdd->sgp, j));
54ab4ff4
PZ
6918 }
6919 free_percpu(sdd->sd);
fb2cf2c6 6920 sdd->sd = NULL;
54ab4ff4 6921 free_percpu(sdd->sg);
fb2cf2c6 6922 sdd->sg = NULL;
9c3f75cb 6923 free_percpu(sdd->sgp);
fb2cf2c6 6924 sdd->sgp = NULL;
54ab4ff4
PZ
6925 }
6926}
6927
2c402dc3
PZ
6928struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
6929 struct s_data *d, const struct cpumask *cpu_map,
d069b916 6930 struct sched_domain_attr *attr, struct sched_domain *child,
2c402dc3
PZ
6931 int cpu)
6932{
54ab4ff4 6933 struct sched_domain *sd = tl->init(tl, cpu);
2c402dc3 6934 if (!sd)
d069b916 6935 return child;
2c402dc3 6936
2c402dc3 6937 cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu));
60495e77
PZ
6938 if (child) {
6939 sd->level = child->level + 1;
6940 sched_domain_level_max = max(sched_domain_level_max, sd->level);
d069b916 6941 child->parent = sd;
60495e77 6942 }
d069b916 6943 sd->child = child;
a841f8ce 6944 set_domain_attribute(sd, attr);
2c402dc3
PZ
6945
6946 return sd;
6947}
6948
2109b99e
AH
6949/*
6950 * Build sched domains for a given set of cpus and attach the sched domains
6951 * to the individual cpus
6952 */
dce840a0
PZ
6953static int build_sched_domains(const struct cpumask *cpu_map,
6954 struct sched_domain_attr *attr)
2109b99e
AH
6955{
6956 enum s_alloc alloc_state = sa_none;
dce840a0 6957 struct sched_domain *sd;
2109b99e 6958 struct s_data d;
822ff793 6959 int i, ret = -ENOMEM;
9c1cfda2 6960
2109b99e
AH
6961 alloc_state = __visit_domain_allocation_hell(&d, cpu_map);
6962 if (alloc_state != sa_rootdomain)
6963 goto error;
9c1cfda2 6964
dce840a0 6965 /* Set up domains for cpus specified by the cpu_map. */
abcd083a 6966 for_each_cpu(i, cpu_map) {
eb7a74e6
PZ
6967 struct sched_domain_topology_level *tl;
6968
3bd65a80 6969 sd = NULL;
e3589f6c 6970 for (tl = sched_domain_topology; tl->init; tl++) {
2c402dc3 6971 sd = build_sched_domain(tl, &d, cpu_map, attr, sd, i);
e3589f6c
PZ
6972 if (tl->flags & SDTL_OVERLAP || sched_feat(FORCE_SD_OVERLAP))
6973 sd->flags |= SD_OVERLAP;
d110235d
PZ
6974 if (cpumask_equal(cpu_map, sched_domain_span(sd)))
6975 break;
e3589f6c 6976 }
d274cb30 6977
d069b916
PZ
6978 while (sd->child)
6979 sd = sd->child;
6980
21d42ccf 6981 *per_cpu_ptr(d.sd, i) = sd;
dce840a0
PZ
6982 }
6983
6984 /* Build the groups for the domains */
6985 for_each_cpu(i, cpu_map) {
6986 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
6987 sd->span_weight = cpumask_weight(sched_domain_span(sd));
e3589f6c
PZ
6988 if (sd->flags & SD_OVERLAP) {
6989 if (build_overlap_sched_groups(sd, i))
6990 goto error;
6991 } else {
6992 if (build_sched_groups(sd, i))
6993 goto error;
6994 }
1cf51902 6995 }
a06dadbe 6996 }
9c1cfda2 6997
1da177e4 6998 /* Calculate CPU power for physical packages and nodes */
a9c9a9b6
PZ
6999 for (i = nr_cpumask_bits-1; i >= 0; i--) {
7000 if (!cpumask_test_cpu(i, cpu_map))
7001 continue;
9c1cfda2 7002
dce840a0
PZ
7003 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
7004 claim_allocations(i, sd);
cd4ea6ae 7005 init_sched_groups_power(i, sd);
dce840a0 7006 }
f712c0c7 7007 }
9c1cfda2 7008
1da177e4 7009 /* Attach the domains */
dce840a0 7010 rcu_read_lock();
abcd083a 7011 for_each_cpu(i, cpu_map) {
21d42ccf 7012 sd = *per_cpu_ptr(d.sd, i);
49a02c51 7013 cpu_attach_domain(sd, d.rd, i);
1da177e4 7014 }
dce840a0 7015 rcu_read_unlock();
51888ca2 7016
822ff793 7017 ret = 0;
51888ca2 7018error:
2109b99e 7019 __free_domain_allocs(&d, alloc_state, cpu_map);
822ff793 7020 return ret;
1da177e4 7021}
029190c5 7022
acc3f5d7 7023static cpumask_var_t *doms_cur; /* current sched domains */
029190c5 7024static int ndoms_cur; /* number of sched domains in 'doms_cur' */
4285f594
IM
7025static struct sched_domain_attr *dattr_cur;
7026 /* attribues of custom domains in 'doms_cur' */
029190c5
PJ
7027
7028/*
7029 * Special case: If a kmalloc of a doms_cur partition (array of
4212823f
RR
7030 * cpumask) fails, then fallback to a single sched domain,
7031 * as determined by the single cpumask fallback_doms.
029190c5 7032 */
4212823f 7033static cpumask_var_t fallback_doms;
029190c5 7034
ee79d1bd
HC
7035/*
7036 * arch_update_cpu_topology lets virtualized architectures update the
7037 * cpu core maps. It is supposed to return 1 if the topology changed
7038 * or 0 if it stayed the same.
7039 */
7040int __attribute__((weak)) arch_update_cpu_topology(void)
22e52b07 7041{
ee79d1bd 7042 return 0;
22e52b07
HC
7043}
7044
acc3f5d7
RR
7045cpumask_var_t *alloc_sched_domains(unsigned int ndoms)
7046{
7047 int i;
7048 cpumask_var_t *doms;
7049
7050 doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL);
7051 if (!doms)
7052 return NULL;
7053 for (i = 0; i < ndoms; i++) {
7054 if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) {
7055 free_sched_domains(doms, i);
7056 return NULL;
7057 }
7058 }
7059 return doms;
7060}
7061
7062void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms)
7063{
7064 unsigned int i;
7065 for (i = 0; i < ndoms; i++)
7066 free_cpumask_var(doms[i]);
7067 kfree(doms);
7068}
7069
1a20ff27 7070/*
41a2d6cf 7071 * Set up scheduler domains and groups. Callers must hold the hotplug lock.
029190c5
PJ
7072 * For now this just excludes isolated cpus, but could be used to
7073 * exclude other special cases in the future.
1a20ff27 7074 */
c4a8849a 7075static int init_sched_domains(const struct cpumask *cpu_map)
1a20ff27 7076{
7378547f
MM
7077 int err;
7078
22e52b07 7079 arch_update_cpu_topology();
029190c5 7080 ndoms_cur = 1;
acc3f5d7 7081 doms_cur = alloc_sched_domains(ndoms_cur);
029190c5 7082 if (!doms_cur)
acc3f5d7
RR
7083 doms_cur = &fallback_doms;
7084 cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map);
dce840a0 7085 err = build_sched_domains(doms_cur[0], NULL);
6382bc90 7086 register_sched_domain_sysctl();
7378547f
MM
7087
7088 return err;
1a20ff27
DG
7089}
7090
1a20ff27
DG
7091/*
7092 * Detach sched domains from a group of cpus specified in cpu_map
7093 * These cpus will now be attached to the NULL domain
7094 */
96f874e2 7095static void detach_destroy_domains(const struct cpumask *cpu_map)
1a20ff27
DG
7096{
7097 int i;
7098
dce840a0 7099 rcu_read_lock();
abcd083a 7100 for_each_cpu(i, cpu_map)
57d885fe 7101 cpu_attach_domain(NULL, &def_root_domain, i);
dce840a0 7102 rcu_read_unlock();
1a20ff27
DG
7103}
7104
1d3504fc
HS
7105/* handle null as "default" */
7106static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
7107 struct sched_domain_attr *new, int idx_new)
7108{
7109 struct sched_domain_attr tmp;
7110
7111 /* fast path */
7112 if (!new && !cur)
7113 return 1;
7114
7115 tmp = SD_ATTR_INIT;
7116 return !memcmp(cur ? (cur + idx_cur) : &tmp,
7117 new ? (new + idx_new) : &tmp,
7118 sizeof(struct sched_domain_attr));
7119}
7120
029190c5
PJ
7121/*
7122 * Partition sched domains as specified by the 'ndoms_new'
41a2d6cf 7123 * cpumasks in the array doms_new[] of cpumasks. This compares
029190c5
PJ
7124 * doms_new[] to the current sched domain partitioning, doms_cur[].
7125 * It destroys each deleted domain and builds each new domain.
7126 *
acc3f5d7 7127 * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'.
41a2d6cf
IM
7128 * The masks don't intersect (don't overlap.) We should setup one
7129 * sched domain for each mask. CPUs not in any of the cpumasks will
7130 * not be load balanced. If the same cpumask appears both in the
029190c5
PJ
7131 * current 'doms_cur' domains and in the new 'doms_new', we can leave
7132 * it as it is.
7133 *
acc3f5d7
RR
7134 * The passed in 'doms_new' should be allocated using
7135 * alloc_sched_domains. This routine takes ownership of it and will
7136 * free_sched_domains it when done with it. If the caller failed the
7137 * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1,
7138 * and partition_sched_domains() will fallback to the single partition
7139 * 'fallback_doms', it also forces the domains to be rebuilt.
029190c5 7140 *
96f874e2 7141 * If doms_new == NULL it will be replaced with cpu_online_mask.
700018e0
LZ
7142 * ndoms_new == 0 is a special case for destroying existing domains,
7143 * and it will not create the default domain.
dfb512ec 7144 *
029190c5
PJ
7145 * Call with hotplug lock held
7146 */
acc3f5d7 7147void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1d3504fc 7148 struct sched_domain_attr *dattr_new)
029190c5 7149{
dfb512ec 7150 int i, j, n;
d65bd5ec 7151 int new_topology;
029190c5 7152
712555ee 7153 mutex_lock(&sched_domains_mutex);
a1835615 7154
7378547f
MM
7155 /* always unregister in case we don't destroy any domains */
7156 unregister_sched_domain_sysctl();
7157
d65bd5ec
HC
7158 /* Let architecture update cpu core mappings. */
7159 new_topology = arch_update_cpu_topology();
7160
dfb512ec 7161 n = doms_new ? ndoms_new : 0;
029190c5
PJ
7162
7163 /* Destroy deleted domains */
7164 for (i = 0; i < ndoms_cur; i++) {
d65bd5ec 7165 for (j = 0; j < n && !new_topology; j++) {
acc3f5d7 7166 if (cpumask_equal(doms_cur[i], doms_new[j])
1d3504fc 7167 && dattrs_equal(dattr_cur, i, dattr_new, j))
029190c5
PJ
7168 goto match1;
7169 }
7170 /* no match - a current sched domain not in new doms_new[] */
acc3f5d7 7171 detach_destroy_domains(doms_cur[i]);
029190c5
PJ
7172match1:
7173 ;
7174 }
7175
e761b772
MK
7176 if (doms_new == NULL) {
7177 ndoms_cur = 0;
acc3f5d7 7178 doms_new = &fallback_doms;
6ad4c188 7179 cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map);
faa2f98f 7180 WARN_ON_ONCE(dattr_new);
e761b772
MK
7181 }
7182
029190c5
PJ
7183 /* Build new domains */
7184 for (i = 0; i < ndoms_new; i++) {
d65bd5ec 7185 for (j = 0; j < ndoms_cur && !new_topology; j++) {
acc3f5d7 7186 if (cpumask_equal(doms_new[i], doms_cur[j])
1d3504fc 7187 && dattrs_equal(dattr_new, i, dattr_cur, j))
029190c5
PJ
7188 goto match2;
7189 }
7190 /* no match - add a new doms_new */
dce840a0 7191 build_sched_domains(doms_new[i], dattr_new ? dattr_new + i : NULL);
029190c5
PJ
7192match2:
7193 ;
7194 }
7195
7196 /* Remember the new sched domains */
acc3f5d7
RR
7197 if (doms_cur != &fallback_doms)
7198 free_sched_domains(doms_cur, ndoms_cur);
1d3504fc 7199 kfree(dattr_cur); /* kfree(NULL) is safe */
029190c5 7200 doms_cur = doms_new;
1d3504fc 7201 dattr_cur = dattr_new;
029190c5 7202 ndoms_cur = ndoms_new;
7378547f
MM
7203
7204 register_sched_domain_sysctl();
a1835615 7205
712555ee 7206 mutex_unlock(&sched_domains_mutex);
029190c5
PJ
7207}
7208
d35be8ba
SB
7209static int num_cpus_frozen; /* used to mark begin/end of suspend/resume */
7210
1da177e4 7211/*
3a101d05
TH
7212 * Update cpusets according to cpu_active mask. If cpusets are
7213 * disabled, cpuset_update_active_cpus() becomes a simple wrapper
7214 * around partition_sched_domains().
d35be8ba
SB
7215 *
7216 * If we come here as part of a suspend/resume, don't touch cpusets because we
7217 * want to restore it back to its original state upon resume anyway.
1da177e4 7218 */
0b2e918a
TH
7219static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action,
7220 void *hcpu)
e761b772 7221{
d35be8ba
SB
7222 switch (action) {
7223 case CPU_ONLINE_FROZEN:
7224 case CPU_DOWN_FAILED_FROZEN:
7225
7226 /*
7227 * num_cpus_frozen tracks how many CPUs are involved in suspend
7228 * resume sequence. As long as this is not the last online
7229 * operation in the resume sequence, just build a single sched
7230 * domain, ignoring cpusets.
7231 */
7232 num_cpus_frozen--;
7233 if (likely(num_cpus_frozen)) {
7234 partition_sched_domains(1, NULL, NULL);
7235 break;
7236 }
7237
7238 /*
7239 * This is the last CPU online operation. So fall through and
7240 * restore the original sched domains by considering the
7241 * cpuset configurations.
7242 */
7243
e761b772 7244 case CPU_ONLINE:
6ad4c188 7245 case CPU_DOWN_FAILED:
7ddf96b0 7246 cpuset_update_active_cpus(true);
d35be8ba 7247 break;
3a101d05
TH
7248 default:
7249 return NOTIFY_DONE;
7250 }
d35be8ba 7251 return NOTIFY_OK;
3a101d05 7252}
e761b772 7253
0b2e918a
TH
7254static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action,
7255 void *hcpu)
3a101d05 7256{
d35be8ba 7257 switch (action) {
3a101d05 7258 case CPU_DOWN_PREPARE:
7ddf96b0 7259 cpuset_update_active_cpus(false);
d35be8ba
SB
7260 break;
7261 case CPU_DOWN_PREPARE_FROZEN:
7262 num_cpus_frozen++;
7263 partition_sched_domains(1, NULL, NULL);
7264 break;
e761b772
MK
7265 default:
7266 return NOTIFY_DONE;
7267 }
d35be8ba 7268 return NOTIFY_OK;
e761b772 7269}
e761b772 7270
1da177e4
LT
7271void __init sched_init_smp(void)
7272{
dcc30a35
RR
7273 cpumask_var_t non_isolated_cpus;
7274
7275 alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL);
cb5fd13f 7276 alloc_cpumask_var(&fallback_doms, GFP_KERNEL);
5c1e1767 7277
cb83b629
PZ
7278 sched_init_numa();
7279
95402b38 7280 get_online_cpus();
712555ee 7281 mutex_lock(&sched_domains_mutex);
c4a8849a 7282 init_sched_domains(cpu_active_mask);
dcc30a35
RR
7283 cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
7284 if (cpumask_empty(non_isolated_cpus))
7285 cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
712555ee 7286 mutex_unlock(&sched_domains_mutex);
95402b38 7287 put_online_cpus();
e761b772 7288
301a5cba 7289 hotcpu_notifier(sched_domains_numa_masks_update, CPU_PRI_SCHED_ACTIVE);
3a101d05
TH
7290 hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE);
7291 hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE);
e761b772
MK
7292
7293 /* RT runtime code needs to handle some hotplug events */
7294 hotcpu_notifier(update_runtime, 0);
7295
b328ca18 7296 init_hrtick();
5c1e1767
NP
7297
7298 /* Move init over to a non-isolated CPU */
dcc30a35 7299 if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0)
5c1e1767 7300 BUG();
19978ca6 7301 sched_init_granularity();
dcc30a35 7302 free_cpumask_var(non_isolated_cpus);
4212823f 7303
0e3900e6 7304 init_sched_rt_class();
1da177e4
LT
7305}
7306#else
7307void __init sched_init_smp(void)
7308{
19978ca6 7309 sched_init_granularity();
1da177e4
LT
7310}
7311#endif /* CONFIG_SMP */
7312
cd1bb94b
AB
7313const_debug unsigned int sysctl_timer_migration = 1;
7314
1da177e4
LT
7315int in_sched_functions(unsigned long addr)
7316{
1da177e4
LT
7317 return in_lock_functions(addr) ||
7318 (addr >= (unsigned long)__sched_text_start
7319 && addr < (unsigned long)__sched_text_end);
7320}
7321
029632fb 7322#ifdef CONFIG_CGROUP_SCHED
27b4b931
LZ
7323/*
7324 * Default task group.
7325 * Every task in system belongs to this group at bootup.
7326 */
029632fb 7327struct task_group root_task_group;
35cf4e50 7328LIST_HEAD(task_groups);
052f1dc7 7329#endif
6f505b16 7330
e6252c3e 7331DECLARE_PER_CPU(cpumask_var_t, load_balance_mask);
6f505b16 7332
1da177e4
LT
7333void __init sched_init(void)
7334{
dd41f596 7335 int i, j;
434d53b0
MT
7336 unsigned long alloc_size = 0, ptr;
7337
7338#ifdef CONFIG_FAIR_GROUP_SCHED
7339 alloc_size += 2 * nr_cpu_ids * sizeof(void **);
7340#endif
7341#ifdef CONFIG_RT_GROUP_SCHED
7342 alloc_size += 2 * nr_cpu_ids * sizeof(void **);
eff766a6 7343#endif
df7c8e84 7344#ifdef CONFIG_CPUMASK_OFFSTACK
8c083f08 7345 alloc_size += num_possible_cpus() * cpumask_size();
434d53b0 7346#endif
434d53b0 7347 if (alloc_size) {
36b7b6d4 7348 ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
434d53b0
MT
7349
7350#ifdef CONFIG_FAIR_GROUP_SCHED
07e06b01 7351 root_task_group.se = (struct sched_entity **)ptr;
434d53b0
MT
7352 ptr += nr_cpu_ids * sizeof(void **);
7353
07e06b01 7354 root_task_group.cfs_rq = (struct cfs_rq **)ptr;
434d53b0 7355 ptr += nr_cpu_ids * sizeof(void **);
eff766a6 7356
6d6bc0ad 7357#endif /* CONFIG_FAIR_GROUP_SCHED */
434d53b0 7358#ifdef CONFIG_RT_GROUP_SCHED
07e06b01 7359 root_task_group.rt_se = (struct sched_rt_entity **)ptr;
434d53b0
MT
7360 ptr += nr_cpu_ids * sizeof(void **);
7361
07e06b01 7362 root_task_group.rt_rq = (struct rt_rq **)ptr;
eff766a6
PZ
7363 ptr += nr_cpu_ids * sizeof(void **);
7364
6d6bc0ad 7365#endif /* CONFIG_RT_GROUP_SCHED */
df7c8e84
RR
7366#ifdef CONFIG_CPUMASK_OFFSTACK
7367 for_each_possible_cpu(i) {
e6252c3e 7368 per_cpu(load_balance_mask, i) = (void *)ptr;
df7c8e84
RR
7369 ptr += cpumask_size();
7370 }
7371#endif /* CONFIG_CPUMASK_OFFSTACK */
434d53b0 7372 }
dd41f596 7373
57d885fe
GH
7374#ifdef CONFIG_SMP
7375 init_defrootdomain();
7376#endif
7377
d0b27fa7
PZ
7378 init_rt_bandwidth(&def_rt_bandwidth,
7379 global_rt_period(), global_rt_runtime());
7380
7381#ifdef CONFIG_RT_GROUP_SCHED
07e06b01 7382 init_rt_bandwidth(&root_task_group.rt_bandwidth,
d0b27fa7 7383 global_rt_period(), global_rt_runtime());
6d6bc0ad 7384#endif /* CONFIG_RT_GROUP_SCHED */
d0b27fa7 7385
7c941438 7386#ifdef CONFIG_CGROUP_SCHED
07e06b01
YZ
7387 list_add(&root_task_group.list, &task_groups);
7388 INIT_LIST_HEAD(&root_task_group.children);
f4d6f6c2 7389 INIT_LIST_HEAD(&root_task_group.siblings);
5091faa4 7390 autogroup_init(&init_task);
54c707e9 7391
7c941438 7392#endif /* CONFIG_CGROUP_SCHED */
6f505b16 7393
0a945022 7394 for_each_possible_cpu(i) {
70b97a7f 7395 struct rq *rq;
1da177e4
LT
7396
7397 rq = cpu_rq(i);
05fa785c 7398 raw_spin_lock_init(&rq->lock);
7897986b 7399 rq->nr_running = 0;
dce48a84
TG
7400 rq->calc_load_active = 0;
7401 rq->calc_load_update = jiffies + LOAD_FREQ;
6fa3eb70
S
7402#ifdef CONFIG_PROVE_LOCKING
7403 rq->cpu = i;
7404#endif
acb5a9ba 7405 init_cfs_rq(&rq->cfs);
6f505b16 7406 init_rt_rq(&rq->rt, rq);
dd41f596 7407#ifdef CONFIG_FAIR_GROUP_SCHED
029632fb 7408 root_task_group.shares = ROOT_TASK_GROUP_LOAD;
6f505b16 7409 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
354d60c2 7410 /*
07e06b01 7411 * How much cpu bandwidth does root_task_group get?
354d60c2
DG
7412 *
7413 * In case of task-groups formed thr' the cgroup filesystem, it
7414 * gets 100% of the cpu resources in the system. This overall
7415 * system cpu resource is divided among the tasks of
07e06b01 7416 * root_task_group and its child task-groups in a fair manner,
354d60c2
DG
7417 * based on each entity's (task or task-group's) weight
7418 * (se->load.weight).
7419 *
07e06b01 7420 * In other words, if root_task_group has 10 tasks of weight
354d60c2
DG
7421 * 1024) and two child groups A0 and A1 (of weight 1024 each),
7422 * then A0's share of the cpu resource is:
7423 *
0d905bca 7424 * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
354d60c2 7425 *
07e06b01
YZ
7426 * We achieve this by letting root_task_group's tasks sit
7427 * directly in rq->cfs (i.e root_task_group->se[] = NULL).
354d60c2 7428 */
ab84d31e 7429 init_cfs_bandwidth(&root_task_group.cfs_bandwidth);
07e06b01 7430 init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL);
354d60c2
DG
7431#endif /* CONFIG_FAIR_GROUP_SCHED */
7432
7433 rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
052f1dc7 7434#ifdef CONFIG_RT_GROUP_SCHED
6f505b16 7435 INIT_LIST_HEAD(&rq->leaf_rt_rq_list);
07e06b01 7436 init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
dd41f596 7437#endif
1da177e4 7438
dd41f596
IM
7439 for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
7440 rq->cpu_load[j] = 0;
fdf3e95d
VP
7441
7442 rq->last_load_update_tick = jiffies;
7443
1da177e4 7444#ifdef CONFIG_SMP
41c7ce9a 7445 rq->sd = NULL;
57d885fe 7446 rq->rd = NULL;
1399fa78 7447 rq->cpu_power = SCHED_POWER_SCALE;
3f029d3c 7448 rq->post_schedule = 0;
1da177e4 7449 rq->active_balance = 0;
dd41f596 7450 rq->next_balance = jiffies;
1da177e4 7451 rq->push_cpu = 0;
0a2966b4 7452 rq->cpu = i;
1f11eb6a 7453 rq->online = 0;
eae0c9df
MG
7454 rq->idle_stamp = 0;
7455 rq->avg_idle = 2*sysctl_sched_migration_cost;
367456c7
PZ
7456
7457 INIT_LIST_HEAD(&rq->cfs_tasks);
7458
dc938520 7459 rq_attach_root(rq, &def_root_domain);
3451d024 7460#ifdef CONFIG_NO_HZ_COMMON
1c792db7 7461 rq->nohz_flags = 0;
83cd4fe2 7462#endif
265f22a9
FW
7463#ifdef CONFIG_NO_HZ_FULL
7464 rq->last_sched_tick = 0;
7465#endif
1da177e4 7466#endif
8f4d37ec 7467 init_rq_hrtick(rq);
1da177e4 7468 atomic_set(&rq->nr_iowait, 0);
1da177e4
LT
7469 }
7470
2dd73a4f 7471 set_load_weight(&init_task);
b50f60ce 7472
e107be36
AK
7473#ifdef CONFIG_PREEMPT_NOTIFIERS
7474 INIT_HLIST_HEAD(&init_task.preempt_notifiers);
7475#endif
7476
b50f60ce 7477#ifdef CONFIG_RT_MUTEXES
732375c6 7478 plist_head_init(&init_task.pi_waiters);
b50f60ce
HC
7479#endif
7480
1da177e4
LT
7481 /*
7482 * The boot idle thread does lazy MMU switching as well:
7483 */
7484 atomic_inc(&init_mm.mm_count);
7485 enter_lazy_tlb(&init_mm, current);
7486
7487 /*
7488 * Make us the idle thread. Technically, schedule() should not be
7489 * called from this thread, however somewhere below it might be,
7490 * but because we are the idle thread, we just pick up running again
7491 * when this runqueue becomes "idle".
7492 */
7493 init_idle(current, smp_processor_id());
dce48a84
TG
7494
7495 calc_load_update = jiffies + LOAD_FREQ;
7496
dd41f596
IM
7497 /*
7498 * During early bootup we pretend to be a normal task:
7499 */
7500 current->sched_class = &fair_sched_class;
6892b75e 7501
bf4d83f6 7502#ifdef CONFIG_SMP
4cb98839 7503 zalloc_cpumask_var(&sched_domains_tmpmask, GFP_NOWAIT);
bdddd296
RR
7504 /* May be allocated at isolcpus cmdline parse time */
7505 if (cpu_isolated_map == NULL)
7506 zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
29d5e047 7507 idle_thread_set_boot_cpu();
029632fb
PZ
7508#endif
7509 init_sched_fair_class();
6a7b3dc3 7510
6892b75e 7511 scheduler_running = 1;
1da177e4
LT
7512}
7513
d902db1e 7514#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
e4aafea2
FW
7515static inline int preempt_count_equals(int preempt_offset)
7516{
234da7bc 7517 int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth();
e4aafea2 7518
4ba8216c 7519 return (nested == preempt_offset);
e4aafea2
FW
7520}
7521
6fa3eb70
S
7522static int __might_sleep_init_called;
7523int __init __might_sleep_init(void)
7524{
7525 __might_sleep_init_called = 1;
7526 return 0;
7527}
7528early_initcall(__might_sleep_init);
7529
d894837f 7530void __might_sleep(const char *file, int line, int preempt_offset)
1da177e4 7531{
1da177e4
LT
7532 static unsigned long prev_jiffy; /* ratelimiting */
7533
b3fbab05 7534 rcu_sleep_check(); /* WARN_ON_ONCE() by default, no rate limit reqd. */
e4aafea2 7535 if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) ||
6fa3eb70
S
7536 oops_in_progress)
7537 return;
7538 if (system_state != SYSTEM_RUNNING &&
7539 (!__might_sleep_init_called || system_state != SYSTEM_BOOTING))
aef745fc
IM
7540 return;
7541 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
7542 return;
7543 prev_jiffy = jiffies;
7544
3df0fc5b
PZ
7545 printk(KERN_ERR
7546 "BUG: sleeping function called from invalid context at %s:%d\n",
7547 file, line);
7548 printk(KERN_ERR
7549 "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
7550 in_atomic(), irqs_disabled(),
7551 current->pid, current->comm);
aef745fc
IM
7552
7553 debug_show_held_locks(current);
7554 if (irqs_disabled())
7555 print_irqtrace_events(current);
7556 dump_stack();
1da177e4
LT
7557}
7558EXPORT_SYMBOL(__might_sleep);
7559#endif
7560
7561#ifdef CONFIG_MAGIC_SYSRQ
3a5e4dc1
AK
7562static void normalize_task(struct rq *rq, struct task_struct *p)
7563{
da7a735e
PZ
7564 const struct sched_class *prev_class = p->sched_class;
7565 int old_prio = p->prio;
3a5e4dc1 7566 int on_rq;
3e51f33f 7567
fd2f4419 7568 on_rq = p->on_rq;
3a5e4dc1 7569 if (on_rq)
4ca9b72b 7570 dequeue_task(rq, p, 0);
3a5e4dc1
AK
7571 __setscheduler(rq, p, SCHED_NORMAL, 0);
7572 if (on_rq) {
4ca9b72b 7573 enqueue_task(rq, p, 0);
3a5e4dc1
AK
7574 resched_task(rq->curr);
7575 }
da7a735e
PZ
7576
7577 check_class_changed(rq, p, prev_class, old_prio);
3a5e4dc1
AK
7578}
7579
1da177e4
LT
7580void normalize_rt_tasks(void)
7581{
a0f98a1c 7582 struct task_struct *g, *p;
1da177e4 7583 unsigned long flags;
70b97a7f 7584 struct rq *rq;
1da177e4 7585
4cf5d77a 7586 read_lock_irqsave(&tasklist_lock, flags);
a0f98a1c 7587 do_each_thread(g, p) {
178be793
IM
7588 /*
7589 * Only normalize user tasks:
7590 */
7591 if (!p->mm)
7592 continue;
7593
6cfb0d5d 7594 p->se.exec_start = 0;
6cfb0d5d 7595#ifdef CONFIG_SCHEDSTATS
41acab88
LDM
7596 p->se.statistics.wait_start = 0;
7597 p->se.statistics.sleep_start = 0;
7598 p->se.statistics.block_start = 0;
6cfb0d5d 7599#endif
dd41f596
IM
7600
7601 if (!rt_task(p)) {
7602 /*
7603 * Renice negative nice level userspace
7604 * tasks back to 0:
7605 */
7606 if (TASK_NICE(p) < 0 && p->mm)
7607 set_user_nice(p, 0);
1da177e4 7608 continue;
dd41f596 7609 }
1da177e4 7610
1d615482 7611 raw_spin_lock(&p->pi_lock);
b29739f9 7612 rq = __task_rq_lock(p);
1da177e4 7613
178be793 7614 normalize_task(rq, p);
3a5e4dc1 7615
b29739f9 7616 __task_rq_unlock(rq);
1d615482 7617 raw_spin_unlock(&p->pi_lock);
a0f98a1c
IM
7618 } while_each_thread(g, p);
7619
4cf5d77a 7620 read_unlock_irqrestore(&tasklist_lock, flags);
1da177e4
LT
7621}
7622
7623#endif /* CONFIG_MAGIC_SYSRQ */
1df5c10a 7624
67fc4e0c 7625#if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB)
1df5c10a 7626/*
67fc4e0c 7627 * These functions are only useful for the IA64 MCA handling, or kdb.
1df5c10a
LT
7628 *
7629 * They can only be called when the whole system has been
7630 * stopped - every CPU needs to be quiescent, and no scheduling
7631 * activity can take place. Using them for anything else would
7632 * be a serious bug, and as a result, they aren't even visible
7633 * under any other configuration.
7634 */
7635
7636/**
7637 * curr_task - return the current task for a given cpu.
7638 * @cpu: the processor in question.
7639 *
7640 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
7641 */
36c8b586 7642struct task_struct *curr_task(int cpu)
1df5c10a
LT
7643{
7644 return cpu_curr(cpu);
7645}
7646
67fc4e0c
JW
7647#endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */
7648
7649#ifdef CONFIG_IA64
1df5c10a
LT
7650/**
7651 * set_curr_task - set the current task for a given cpu.
7652 * @cpu: the processor in question.
7653 * @p: the task pointer to set.
7654 *
7655 * Description: This function must only be used when non-maskable interrupts
41a2d6cf
IM
7656 * are serviced on a separate stack. It allows the architecture to switch the
7657 * notion of the current task on a cpu in a non-blocking manner. This function
1df5c10a
LT
7658 * must be called with all CPU's synchronized, and interrupts disabled, the
7659 * and caller must save the original value of the current task (see
7660 * curr_task() above) and restore that value before reenabling interrupts and
7661 * re-starting the system.
7662 *
7663 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
7664 */
36c8b586 7665void set_curr_task(int cpu, struct task_struct *p)
1df5c10a
LT
7666{
7667 cpu_curr(cpu) = p;
7668}
7669
7670#endif
29f59db3 7671
7c941438 7672#ifdef CONFIG_CGROUP_SCHED
029632fb
PZ
7673/* task_group_lock serializes the addition/removal of task groups */
7674static DEFINE_SPINLOCK(task_group_lock);
7675
bccbe08a
PZ
7676static void free_sched_group(struct task_group *tg)
7677{
7678 free_fair_sched_group(tg);
7679 free_rt_sched_group(tg);
e9aa1dd1 7680 autogroup_free(tg);
bccbe08a
PZ
7681 kfree(tg);
7682}
7683
7684/* allocate runqueue etc for a new task group */
ec7dc8ac 7685struct task_group *sched_create_group(struct task_group *parent)
bccbe08a
PZ
7686{
7687 struct task_group *tg;
bccbe08a
PZ
7688
7689 tg = kzalloc(sizeof(*tg), GFP_KERNEL);
7690 if (!tg)
7691 return ERR_PTR(-ENOMEM);
7692
ec7dc8ac 7693 if (!alloc_fair_sched_group(tg, parent))
bccbe08a
PZ
7694 goto err;
7695
ec7dc8ac 7696 if (!alloc_rt_sched_group(tg, parent))
bccbe08a
PZ
7697 goto err;
7698
ace783b9
LZ
7699 return tg;
7700
7701err:
7702 free_sched_group(tg);
7703 return ERR_PTR(-ENOMEM);
7704}
7705
7706void sched_online_group(struct task_group *tg, struct task_group *parent)
7707{
7708 unsigned long flags;
7709
8ed36996 7710 spin_lock_irqsave(&task_group_lock, flags);
6f505b16 7711 list_add_rcu(&tg->list, &task_groups);
f473aa5e
PZ
7712
7713 WARN_ON(!parent); /* root should already exist */
7714
7715 tg->parent = parent;
f473aa5e 7716 INIT_LIST_HEAD(&tg->children);
09f2724a 7717 list_add_rcu(&tg->siblings, &parent->children);
8ed36996 7718 spin_unlock_irqrestore(&task_group_lock, flags);
29f59db3
SV
7719}
7720
9b5b7751 7721/* rcu callback to free various structures associated with a task group */
6f505b16 7722static void free_sched_group_rcu(struct rcu_head *rhp)
29f59db3 7723{
29f59db3 7724 /* now it should be safe to free those cfs_rqs */
6f505b16 7725 free_sched_group(container_of(rhp, struct task_group, rcu));
29f59db3
SV
7726}
7727
9b5b7751 7728/* Destroy runqueue etc associated with a task group */
4cf86d77 7729void sched_destroy_group(struct task_group *tg)
ace783b9
LZ
7730{
7731 /* wait for possible concurrent references to cfs_rqs complete */
7732 call_rcu(&tg->rcu, free_sched_group_rcu);
7733}
7734
7735void sched_offline_group(struct task_group *tg)
29f59db3 7736{
8ed36996 7737 unsigned long flags;
9b5b7751 7738 int i;
29f59db3 7739
3d4b47b4
PZ
7740 /* end participation in shares distribution */
7741 for_each_possible_cpu(i)
bccbe08a 7742 unregister_fair_sched_group(tg, i);
3d4b47b4
PZ
7743
7744 spin_lock_irqsave(&task_group_lock, flags);
6f505b16 7745 list_del_rcu(&tg->list);
f473aa5e 7746 list_del_rcu(&tg->siblings);
8ed36996 7747 spin_unlock_irqrestore(&task_group_lock, flags);
29f59db3
SV
7748}
7749
9b5b7751 7750/* change task's runqueue when it moves between groups.
3a252015
IM
7751 * The caller of this function should have put the task in its new group
7752 * by now. This function just updates tsk->se.cfs_rq and tsk->se.parent to
7753 * reflect its new group.
9b5b7751
SV
7754 */
7755void sched_move_task(struct task_struct *tsk)
29f59db3 7756{
8323f26c 7757 struct task_group *tg;
29f59db3
SV
7758 int on_rq, running;
7759 unsigned long flags;
7760 struct rq *rq;
7761
7762 rq = task_rq_lock(tsk, &flags);
7763
051a1d1a 7764 running = task_current(rq, tsk);
fd2f4419 7765 on_rq = tsk->on_rq;
29f59db3 7766
0e1f3483 7767 if (on_rq)
29f59db3 7768 dequeue_task(rq, tsk, 0);
0e1f3483
HS
7769 if (unlikely(running))
7770 tsk->sched_class->put_prev_task(rq, tsk);
29f59db3 7771
8323f26c
PZ
7772 tg = container_of(task_subsys_state_check(tsk, cpu_cgroup_subsys_id,
7773 lockdep_is_held(&tsk->sighand->siglock)),
7774 struct task_group, css);
7775 tg = autogroup_task_group(tsk, tg);
7776 tsk->sched_task_group = tg;
7777
810b3817 7778#ifdef CONFIG_FAIR_GROUP_SCHED
b2b5ce02
PZ
7779 if (tsk->sched_class->task_move_group)
7780 tsk->sched_class->task_move_group(tsk, on_rq);
7781 else
810b3817 7782#endif
b2b5ce02 7783 set_task_rq(tsk, task_cpu(tsk));
810b3817 7784
0e1f3483
HS
7785 if (unlikely(running))
7786 tsk->sched_class->set_curr_task(rq);
7787 if (on_rq)
371fd7e7 7788 enqueue_task(rq, tsk, 0);
29f59db3 7789
0122ec5b 7790 task_rq_unlock(rq, tsk, &flags);
29f59db3 7791}
7c941438 7792#endif /* CONFIG_CGROUP_SCHED */
29f59db3 7793
a790de99 7794#if defined(CONFIG_RT_GROUP_SCHED) || defined(CONFIG_CFS_BANDWIDTH)
9f0c1e56
PZ
7795static unsigned long to_ratio(u64 period, u64 runtime)
7796{
7797 if (runtime == RUNTIME_INF)
9a7e0b18 7798 return 1ULL << 20;
9f0c1e56 7799
9a7e0b18 7800 return div64_u64(runtime << 20, period);
9f0c1e56 7801}
a790de99
PT
7802#endif
7803
7804#ifdef CONFIG_RT_GROUP_SCHED
7805/*
7806 * Ensure that the real time constraints are schedulable.
7807 */
7808static DEFINE_MUTEX(rt_constraints_mutex);
9f0c1e56 7809
9a7e0b18
PZ
7810/* Must be called with tasklist_lock held */
7811static inline int tg_has_rt_tasks(struct task_group *tg)
b40b2e8e 7812{
9a7e0b18 7813 struct task_struct *g, *p;
b40b2e8e 7814
9a7e0b18 7815 do_each_thread(g, p) {
029632fb 7816 if (rt_task(p) && task_rq(p)->rt.tg == tg)
9a7e0b18
PZ
7817 return 1;
7818 } while_each_thread(g, p);
b40b2e8e 7819
9a7e0b18
PZ
7820 return 0;
7821}
b40b2e8e 7822
9a7e0b18
PZ
7823struct rt_schedulable_data {
7824 struct task_group *tg;
7825 u64 rt_period;
7826 u64 rt_runtime;
7827};
b40b2e8e 7828
a790de99 7829static int tg_rt_schedulable(struct task_group *tg, void *data)
9a7e0b18
PZ
7830{
7831 struct rt_schedulable_data *d = data;
7832 struct task_group *child;
7833 unsigned long total, sum = 0;
7834 u64 period, runtime;
b40b2e8e 7835
9a7e0b18
PZ
7836 period = ktime_to_ns(tg->rt_bandwidth.rt_period);
7837 runtime = tg->rt_bandwidth.rt_runtime;
b40b2e8e 7838
9a7e0b18
PZ
7839 if (tg == d->tg) {
7840 period = d->rt_period;
7841 runtime = d->rt_runtime;
b40b2e8e 7842 }
b40b2e8e 7843
4653f803
PZ
7844 /*
7845 * Cannot have more runtime than the period.
7846 */
7847 if (runtime > period && runtime != RUNTIME_INF)
7848 return -EINVAL;
6f505b16 7849
4653f803
PZ
7850 /*
7851 * Ensure we don't starve existing RT tasks.
7852 */
9a7e0b18
PZ
7853 if (rt_bandwidth_enabled() && !runtime && tg_has_rt_tasks(tg))
7854 return -EBUSY;
6f505b16 7855
9a7e0b18 7856 total = to_ratio(period, runtime);
6f505b16 7857
4653f803
PZ
7858 /*
7859 * Nobody can have more than the global setting allows.
7860 */
7861 if (total > to_ratio(global_rt_period(), global_rt_runtime()))
7862 return -EINVAL;
6f505b16 7863
4653f803
PZ
7864 /*
7865 * The sum of our children's runtime should not exceed our own.
7866 */
9a7e0b18
PZ
7867 list_for_each_entry_rcu(child, &tg->children, siblings) {
7868 period = ktime_to_ns(child->rt_bandwidth.rt_period);
7869 runtime = child->rt_bandwidth.rt_runtime;
6f505b16 7870
9a7e0b18
PZ
7871 if (child == d->tg) {
7872 period = d->rt_period;
7873 runtime = d->rt_runtime;
7874 }
6f505b16 7875
9a7e0b18 7876 sum += to_ratio(period, runtime);
9f0c1e56 7877 }
6f505b16 7878
9a7e0b18
PZ
7879 if (sum > total)
7880 return -EINVAL;
7881
7882 return 0;
6f505b16
PZ
7883}
7884
9a7e0b18 7885static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
521f1a24 7886{
8277434e
PT
7887 int ret;
7888
9a7e0b18
PZ
7889 struct rt_schedulable_data data = {
7890 .tg = tg,
7891 .rt_period = period,
7892 .rt_runtime = runtime,
7893 };
7894
8277434e
PT
7895 rcu_read_lock();
7896 ret = walk_tg_tree(tg_rt_schedulable, tg_nop, &data);
7897 rcu_read_unlock();
7898
7899 return ret;
521f1a24
DG
7900}
7901
ab84d31e 7902static int tg_set_rt_bandwidth(struct task_group *tg,
d0b27fa7 7903 u64 rt_period, u64 rt_runtime)
6f505b16 7904{
ac086bc2 7905 int i, err = 0;
9f0c1e56 7906
9f0c1e56 7907 mutex_lock(&rt_constraints_mutex);
521f1a24 7908 read_lock(&tasklist_lock);
9a7e0b18
PZ
7909 err = __rt_schedulable(tg, rt_period, rt_runtime);
7910 if (err)
9f0c1e56 7911 goto unlock;
ac086bc2 7912
0986b11b 7913 raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
d0b27fa7
PZ
7914 tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
7915 tg->rt_bandwidth.rt_runtime = rt_runtime;
ac086bc2
PZ
7916
7917 for_each_possible_cpu(i) {
7918 struct rt_rq *rt_rq = tg->rt_rq[i];
7919
0986b11b 7920 raw_spin_lock(&rt_rq->rt_runtime_lock);
ac086bc2 7921 rt_rq->rt_runtime = rt_runtime;
0986b11b 7922 raw_spin_unlock(&rt_rq->rt_runtime_lock);
ac086bc2 7923 }
0986b11b 7924 raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
49246274 7925unlock:
521f1a24 7926 read_unlock(&tasklist_lock);
9f0c1e56
PZ
7927 mutex_unlock(&rt_constraints_mutex);
7928
7929 return err;
6f505b16
PZ
7930}
7931
25cc7da7 7932static int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
d0b27fa7
PZ
7933{
7934 u64 rt_runtime, rt_period;
7935
7936 rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
7937 rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
7938 if (rt_runtime_us < 0)
7939 rt_runtime = RUNTIME_INF;
7940
ab84d31e 7941 return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
d0b27fa7
PZ
7942}
7943
25cc7da7 7944static long sched_group_rt_runtime(struct task_group *tg)
9f0c1e56
PZ
7945{
7946 u64 rt_runtime_us;
7947
d0b27fa7 7948 if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF)
9f0c1e56
PZ
7949 return -1;
7950
d0b27fa7 7951 rt_runtime_us = tg->rt_bandwidth.rt_runtime;
9f0c1e56
PZ
7952 do_div(rt_runtime_us, NSEC_PER_USEC);
7953 return rt_runtime_us;
7954}
d0b27fa7 7955
25cc7da7 7956static int sched_group_set_rt_period(struct task_group *tg, long rt_period_us)
d0b27fa7
PZ
7957{
7958 u64 rt_runtime, rt_period;
7959
7960 rt_period = (u64)rt_period_us * NSEC_PER_USEC;
7961 rt_runtime = tg->rt_bandwidth.rt_runtime;
7962
619b0488
R
7963 if (rt_period == 0)
7964 return -EINVAL;
7965
ab84d31e 7966 return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
d0b27fa7
PZ
7967}
7968
25cc7da7 7969static long sched_group_rt_period(struct task_group *tg)
d0b27fa7
PZ
7970{
7971 u64 rt_period_us;
7972
7973 rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period);
7974 do_div(rt_period_us, NSEC_PER_USEC);
7975 return rt_period_us;
7976}
7977
7978static int sched_rt_global_constraints(void)
7979{
4653f803 7980 u64 runtime, period;
d0b27fa7
PZ
7981 int ret = 0;
7982
ec5d4989
HS
7983 if (sysctl_sched_rt_period <= 0)
7984 return -EINVAL;
7985
4653f803
PZ
7986 runtime = global_rt_runtime();
7987 period = global_rt_period();
7988
7989 /*
7990 * Sanity check on the sysctl variables.
7991 */
7992 if (runtime > period && runtime != RUNTIME_INF)
7993 return -EINVAL;
10b612f4 7994
d0b27fa7 7995 mutex_lock(&rt_constraints_mutex);
9a7e0b18 7996 read_lock(&tasklist_lock);
4653f803 7997 ret = __rt_schedulable(NULL, 0, 0);
9a7e0b18 7998 read_unlock(&tasklist_lock);
d0b27fa7
PZ
7999 mutex_unlock(&rt_constraints_mutex);
8000
8001 return ret;
8002}
54e99124 8003
25cc7da7 8004static int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
54e99124
DG
8005{
8006 /* Don't accept realtime tasks when there is no way for them to run */
8007 if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
8008 return 0;
8009
8010 return 1;
8011}
8012
6d6bc0ad 8013#else /* !CONFIG_RT_GROUP_SCHED */
d0b27fa7
PZ
8014static int sched_rt_global_constraints(void)
8015{
ac086bc2
PZ
8016 unsigned long flags;
8017 int i;
8018
ec5d4989
HS
8019 if (sysctl_sched_rt_period <= 0)
8020 return -EINVAL;
8021
60aa605d
PZ
8022 /*
8023 * There's always some RT tasks in the root group
8024 * -- migration, kstopmachine etc..
8025 */
8026 if (sysctl_sched_rt_runtime == 0)
8027 return -EBUSY;
8028
0986b11b 8029 raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
ac086bc2
PZ
8030 for_each_possible_cpu(i) {
8031 struct rt_rq *rt_rq = &cpu_rq(i)->rt;
8032
0986b11b 8033 raw_spin_lock(&rt_rq->rt_runtime_lock);
ac086bc2 8034 rt_rq->rt_runtime = global_rt_runtime();
0986b11b 8035 raw_spin_unlock(&rt_rq->rt_runtime_lock);
ac086bc2 8036 }
0986b11b 8037 raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
ac086bc2 8038
d0b27fa7
PZ
8039 return 0;
8040}
6d6bc0ad 8041#endif /* CONFIG_RT_GROUP_SCHED */
d0b27fa7 8042
ce0dbbbb
CW
8043int sched_rr_handler(struct ctl_table *table, int write,
8044 void __user *buffer, size_t *lenp,
8045 loff_t *ppos)
8046{
8047 int ret;
8048 static DEFINE_MUTEX(mutex);
8049
8050 mutex_lock(&mutex);
8051 ret = proc_dointvec(table, write, buffer, lenp, ppos);
8052 /* make sure that internally we keep jiffies */
8053 /* also, writing zero resets timeslice to default */
8054 if (!ret && write) {
8055 sched_rr_timeslice = sched_rr_timeslice <= 0 ?
8056 RR_TIMESLICE : msecs_to_jiffies(sched_rr_timeslice);
8057 }
8058 mutex_unlock(&mutex);
8059 return ret;
8060}
8061
d0b27fa7 8062int sched_rt_handler(struct ctl_table *table, int write,
8d65af78 8063 void __user *buffer, size_t *lenp,
d0b27fa7
PZ
8064 loff_t *ppos)
8065{
8066 int ret;
8067 int old_period, old_runtime;
8068 static DEFINE_MUTEX(mutex);
8069
8070 mutex_lock(&mutex);
8071 old_period = sysctl_sched_rt_period;
8072 old_runtime = sysctl_sched_rt_runtime;
8073
8d65af78 8074 ret = proc_dointvec(table, write, buffer, lenp, ppos);
d0b27fa7
PZ
8075
8076 if (!ret && write) {
8077 ret = sched_rt_global_constraints();
8078 if (ret) {
8079 sysctl_sched_rt_period = old_period;
8080 sysctl_sched_rt_runtime = old_runtime;
8081 } else {
8082 def_rt_bandwidth.rt_runtime = global_rt_runtime();
8083 def_rt_bandwidth.rt_period =
8084 ns_to_ktime(global_rt_period());
8085 }
8086 }
8087 mutex_unlock(&mutex);
8088
8089 return ret;
8090}
68318b8e 8091
052f1dc7 8092#ifdef CONFIG_CGROUP_SCHED
68318b8e
SV
8093
8094/* return corresponding task_group object of a cgroup */
2b01dfe3 8095static inline struct task_group *cgroup_tg(struct cgroup *cgrp)
68318b8e 8096{
2b01dfe3
PM
8097 return container_of(cgroup_subsys_state(cgrp, cpu_cgroup_subsys_id),
8098 struct task_group, css);
68318b8e
SV
8099}
8100
92fb9748 8101static struct cgroup_subsys_state *cpu_cgroup_css_alloc(struct cgroup *cgrp)
68318b8e 8102{
ec7dc8ac 8103 struct task_group *tg, *parent;
68318b8e 8104
2b01dfe3 8105 if (!cgrp->parent) {
68318b8e 8106 /* This is early initialization for the top cgroup */
07e06b01 8107 return &root_task_group.css;
68318b8e
SV
8108 }
8109
ec7dc8ac
DG
8110 parent = cgroup_tg(cgrp->parent);
8111 tg = sched_create_group(parent);
68318b8e
SV
8112 if (IS_ERR(tg))
8113 return ERR_PTR(-ENOMEM);
8114
68318b8e
SV
8115 return &tg->css;
8116}
8117
ace783b9
LZ
8118static int cpu_cgroup_css_online(struct cgroup *cgrp)
8119{
8120 struct task_group *tg = cgroup_tg(cgrp);
8121 struct task_group *parent;
8122
8123 if (!cgrp->parent)
8124 return 0;
8125
8126 parent = cgroup_tg(cgrp->parent);
8127 sched_online_group(tg, parent);
8128 return 0;
8129}
8130
92fb9748 8131static void cpu_cgroup_css_free(struct cgroup *cgrp)
68318b8e 8132{
2b01dfe3 8133 struct task_group *tg = cgroup_tg(cgrp);
68318b8e
SV
8134
8135 sched_destroy_group(tg);
8136}
8137
ace783b9
LZ
8138static void cpu_cgroup_css_offline(struct cgroup *cgrp)
8139{
8140 struct task_group *tg = cgroup_tg(cgrp);
8141
8142 sched_offline_group(tg);
8143}
8144
6fa3eb70
S
8145static int
8146cpu_cgroup_allow_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
8147{
8148 const struct cred *cred = current_cred(), *tcred;
8149 struct task_struct *task;
8150
8151 cgroup_taskset_for_each(task, cgrp, tset) {
8152 tcred = __task_cred(task);
8153
8154 if ((current != task) && !capable(CAP_SYS_NICE) &&
8155 cred->euid != tcred->uid && cred->euid != tcred->suid)
8156 return -EACCES;
8157 }
8158
8159 return 0;
8160}
8161
761b3ef5 8162static int cpu_cgroup_can_attach(struct cgroup *cgrp,
bb9d97b6 8163 struct cgroup_taskset *tset)
68318b8e 8164{
bb9d97b6
TH
8165 struct task_struct *task;
8166
8167 cgroup_taskset_for_each(task, cgrp, tset) {
b68aa230 8168#ifdef CONFIG_RT_GROUP_SCHED
bb9d97b6 8169 if (!sched_rt_can_attach(cgroup_tg(cgrp), task))
6fa3eb70 8170 return -ERTGROUP;
b68aa230 8171#else
bb9d97b6
TH
8172 /* We don't support RT-tasks being in separate groups */
8173 if (task->sched_class != &fair_sched_class)
8174 return -EINVAL;
b68aa230 8175#endif
bb9d97b6 8176 }
be367d09
BB
8177 return 0;
8178}
68318b8e 8179
761b3ef5 8180static void cpu_cgroup_attach(struct cgroup *cgrp,
bb9d97b6 8181 struct cgroup_taskset *tset)
68318b8e 8182{
bb9d97b6
TH
8183 struct task_struct *task;
8184
8185 cgroup_taskset_for_each(task, cgrp, tset)
8186 sched_move_task(task);
68318b8e
SV
8187}
8188
068c5cc5 8189static void
761b3ef5
LZ
8190cpu_cgroup_exit(struct cgroup *cgrp, struct cgroup *old_cgrp,
8191 struct task_struct *task)
068c5cc5
PZ
8192{
8193 /*
8194 * cgroup_exit() is called in the copy_process() failure path.
8195 * Ignore this case since the task hasn't ran yet, this avoids
8196 * trying to poke a half freed task state from generic code.
8197 */
8198 if (!(task->flags & PF_EXITING))
8199 return;
8200
8201 sched_move_task(task);
8202}
8203
052f1dc7 8204#ifdef CONFIG_FAIR_GROUP_SCHED
f4c753b7 8205static int cpu_shares_write_u64(struct cgroup *cgrp, struct cftype *cftype,
2b01dfe3 8206 u64 shareval)
68318b8e 8207{
c8b28116 8208 return sched_group_set_shares(cgroup_tg(cgrp), scale_load(shareval));
68318b8e
SV
8209}
8210
f4c753b7 8211static u64 cpu_shares_read_u64(struct cgroup *cgrp, struct cftype *cft)
68318b8e 8212{
2b01dfe3 8213 struct task_group *tg = cgroup_tg(cgrp);
68318b8e 8214
c8b28116 8215 return (u64) scale_load_down(tg->shares);
68318b8e 8216}
ab84d31e
PT
8217
8218#ifdef CONFIG_CFS_BANDWIDTH
a790de99
PT
8219static DEFINE_MUTEX(cfs_constraints_mutex);
8220
ab84d31e
PT
8221const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */
8222const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */
8223
a790de99
PT
8224static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime);
8225
ab84d31e
PT
8226static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
8227{
56f570e5 8228 int i, ret = 0, runtime_enabled, runtime_was_enabled;
029632fb 8229 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
ab84d31e
PT
8230
8231 if (tg == &root_task_group)
8232 return -EINVAL;
8233
8234 /*
8235 * Ensure we have at some amount of bandwidth every period. This is
8236 * to prevent reaching a state of large arrears when throttled via
8237 * entity_tick() resulting in prolonged exit starvation.
8238 */
8239 if (quota < min_cfs_quota_period || period < min_cfs_quota_period)
8240 return -EINVAL;
8241
8242 /*
8243 * Likewise, bound things on the otherside by preventing insane quota
8244 * periods. This also allows us to normalize in computing quota
8245 * feasibility.
8246 */
8247 if (period > max_cfs_quota_period)
8248 return -EINVAL;
8249
a790de99
PT
8250 mutex_lock(&cfs_constraints_mutex);
8251 ret = __cfs_schedulable(tg, period, quota);
8252 if (ret)
8253 goto out_unlock;
8254
58088ad0 8255 runtime_enabled = quota != RUNTIME_INF;
56f570e5 8256 runtime_was_enabled = cfs_b->quota != RUNTIME_INF;
9d80092f
BS
8257 /*
8258 * If we need to toggle cfs_bandwidth_used, off->on must occur
8259 * before making related changes, and on->off must occur afterwards
8260 */
8261 if (runtime_enabled && !runtime_was_enabled)
8262 cfs_bandwidth_usage_inc();
ab84d31e
PT
8263 raw_spin_lock_irq(&cfs_b->lock);
8264 cfs_b->period = ns_to_ktime(period);
8265 cfs_b->quota = quota;
58088ad0 8266
a9cf55b2 8267 __refill_cfs_bandwidth_runtime(cfs_b);
58088ad0
PT
8268 /* restart the period timer (if active) to handle new period expiry */
8269 if (runtime_enabled && cfs_b->timer_active) {
8270 /* force a reprogram */
8271 cfs_b->timer_active = 0;
8272 __start_cfs_bandwidth(cfs_b);
8273 }
ab84d31e
PT
8274 raw_spin_unlock_irq(&cfs_b->lock);
8275
8276 for_each_possible_cpu(i) {
8277 struct cfs_rq *cfs_rq = tg->cfs_rq[i];
029632fb 8278 struct rq *rq = cfs_rq->rq;
ab84d31e
PT
8279
8280 raw_spin_lock_irq(&rq->lock);
58088ad0 8281 cfs_rq->runtime_enabled = runtime_enabled;
ab84d31e 8282 cfs_rq->runtime_remaining = 0;
671fd9da 8283
029632fb 8284 if (cfs_rq->throttled)
671fd9da 8285 unthrottle_cfs_rq(cfs_rq);
ab84d31e
PT
8286 raw_spin_unlock_irq(&rq->lock);
8287 }
9d80092f
BS
8288 if (runtime_was_enabled && !runtime_enabled)
8289 cfs_bandwidth_usage_dec();
a790de99
PT
8290out_unlock:
8291 mutex_unlock(&cfs_constraints_mutex);
ab84d31e 8292
a790de99 8293 return ret;
ab84d31e
PT
8294}
8295
8296int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us)
8297{
8298 u64 quota, period;
8299
029632fb 8300 period = ktime_to_ns(tg->cfs_bandwidth.period);
ab84d31e
PT
8301 if (cfs_quota_us < 0)
8302 quota = RUNTIME_INF;
8303 else
8304 quota = (u64)cfs_quota_us * NSEC_PER_USEC;
8305
8306 return tg_set_cfs_bandwidth(tg, period, quota);
8307}
8308
8309long tg_get_cfs_quota(struct task_group *tg)
8310{
8311 u64 quota_us;
8312
029632fb 8313 if (tg->cfs_bandwidth.quota == RUNTIME_INF)
ab84d31e
PT
8314 return -1;
8315
029632fb 8316 quota_us = tg->cfs_bandwidth.quota;
ab84d31e
PT
8317 do_div(quota_us, NSEC_PER_USEC);
8318
8319 return quota_us;
8320}
8321
8322int tg_set_cfs_period(struct task_group *tg, long cfs_period_us)
8323{
8324 u64 quota, period;
8325
8326 period = (u64)cfs_period_us * NSEC_PER_USEC;
029632fb 8327 quota = tg->cfs_bandwidth.quota;
ab84d31e 8328
ab84d31e
PT
8329 return tg_set_cfs_bandwidth(tg, period, quota);
8330}
8331
8332long tg_get_cfs_period(struct task_group *tg)
8333{
8334 u64 cfs_period_us;
8335
029632fb 8336 cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period);
ab84d31e
PT
8337 do_div(cfs_period_us, NSEC_PER_USEC);
8338
8339 return cfs_period_us;
8340}
8341
8342static s64 cpu_cfs_quota_read_s64(struct cgroup *cgrp, struct cftype *cft)
8343{
8344 return tg_get_cfs_quota(cgroup_tg(cgrp));
8345}
8346
8347static int cpu_cfs_quota_write_s64(struct cgroup *cgrp, struct cftype *cftype,
8348 s64 cfs_quota_us)
8349{
8350 return tg_set_cfs_quota(cgroup_tg(cgrp), cfs_quota_us);
8351}
8352
8353static u64 cpu_cfs_period_read_u64(struct cgroup *cgrp, struct cftype *cft)
8354{
8355 return tg_get_cfs_period(cgroup_tg(cgrp));
8356}
8357
8358static int cpu_cfs_period_write_u64(struct cgroup *cgrp, struct cftype *cftype,
8359 u64 cfs_period_us)
8360{
8361 return tg_set_cfs_period(cgroup_tg(cgrp), cfs_period_us);
8362}
8363
a790de99
PT
8364struct cfs_schedulable_data {
8365 struct task_group *tg;
8366 u64 period, quota;
8367};
8368
8369/*
8370 * normalize group quota/period to be quota/max_period
8371 * note: units are usecs
8372 */
8373static u64 normalize_cfs_quota(struct task_group *tg,
8374 struct cfs_schedulable_data *d)
8375{
8376 u64 quota, period;
8377
8378 if (tg == d->tg) {
8379 period = d->period;
8380 quota = d->quota;
8381 } else {
8382 period = tg_get_cfs_period(tg);
8383 quota = tg_get_cfs_quota(tg);
8384 }
8385
8386 /* note: these should typically be equivalent */
8387 if (quota == RUNTIME_INF || quota == -1)
8388 return RUNTIME_INF;
8389
8390 return to_ratio(period, quota);
8391}
8392
8393static int tg_cfs_schedulable_down(struct task_group *tg, void *data)
8394{
8395 struct cfs_schedulable_data *d = data;
029632fb 8396 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
a790de99
PT
8397 s64 quota = 0, parent_quota = -1;
8398
8399 if (!tg->parent) {
8400 quota = RUNTIME_INF;
8401 } else {
029632fb 8402 struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth;
a790de99
PT
8403
8404 quota = normalize_cfs_quota(tg, d);
8405 parent_quota = parent_b->hierarchal_quota;
8406
8407 /*
8408 * ensure max(child_quota) <= parent_quota, inherit when no
8409 * limit is set
8410 */
8411 if (quota == RUNTIME_INF)
8412 quota = parent_quota;
8413 else if (parent_quota != RUNTIME_INF && quota > parent_quota)
8414 return -EINVAL;
8415 }
8416 cfs_b->hierarchal_quota = quota;
8417
8418 return 0;
8419}
8420
8421static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota)
8422{
8277434e 8423 int ret;
a790de99
PT
8424 struct cfs_schedulable_data data = {
8425 .tg = tg,
8426 .period = period,
8427 .quota = quota,
8428 };
8429
8430 if (quota != RUNTIME_INF) {
8431 do_div(data.period, NSEC_PER_USEC);
8432 do_div(data.quota, NSEC_PER_USEC);
8433 }
8434
8277434e
PT
8435 rcu_read_lock();
8436 ret = walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data);
8437 rcu_read_unlock();
8438
8439 return ret;
a790de99 8440}
e8da1b18
NR
8441
8442static int cpu_stats_show(struct cgroup *cgrp, struct cftype *cft,
8443 struct cgroup_map_cb *cb)
8444{
8445 struct task_group *tg = cgroup_tg(cgrp);
029632fb 8446 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
e8da1b18
NR
8447
8448 cb->fill(cb, "nr_periods", cfs_b->nr_periods);
8449 cb->fill(cb, "nr_throttled", cfs_b->nr_throttled);
8450 cb->fill(cb, "throttled_time", cfs_b->throttled_time);
8451
8452 return 0;
8453}
ab84d31e 8454#endif /* CONFIG_CFS_BANDWIDTH */
6d6bc0ad 8455#endif /* CONFIG_FAIR_GROUP_SCHED */
68318b8e 8456
052f1dc7 8457#ifdef CONFIG_RT_GROUP_SCHED
0c70814c 8458static int cpu_rt_runtime_write(struct cgroup *cgrp, struct cftype *cft,
06ecb27c 8459 s64 val)
6f505b16 8460{
06ecb27c 8461 return sched_group_set_rt_runtime(cgroup_tg(cgrp), val);
6f505b16
PZ
8462}
8463
06ecb27c 8464static s64 cpu_rt_runtime_read(struct cgroup *cgrp, struct cftype *cft)
6f505b16 8465{
06ecb27c 8466 return sched_group_rt_runtime(cgroup_tg(cgrp));
6f505b16 8467}
d0b27fa7
PZ
8468
8469static int cpu_rt_period_write_uint(struct cgroup *cgrp, struct cftype *cftype,
8470 u64 rt_period_us)
8471{
8472 return sched_group_set_rt_period(cgroup_tg(cgrp), rt_period_us);
8473}
8474
8475static u64 cpu_rt_period_read_uint(struct cgroup *cgrp, struct cftype *cft)
8476{
8477 return sched_group_rt_period(cgroup_tg(cgrp));
8478}
6d6bc0ad 8479#endif /* CONFIG_RT_GROUP_SCHED */
6f505b16 8480
fe5c7cc2 8481static struct cftype cpu_files[] = {
052f1dc7 8482#ifdef CONFIG_FAIR_GROUP_SCHED
fe5c7cc2
PM
8483 {
8484 .name = "shares",
f4c753b7
PM
8485 .read_u64 = cpu_shares_read_u64,
8486 .write_u64 = cpu_shares_write_u64,
fe5c7cc2 8487 },
052f1dc7 8488#endif
ab84d31e
PT
8489#ifdef CONFIG_CFS_BANDWIDTH
8490 {
8491 .name = "cfs_quota_us",
8492 .read_s64 = cpu_cfs_quota_read_s64,
8493 .write_s64 = cpu_cfs_quota_write_s64,
8494 },
8495 {
8496 .name = "cfs_period_us",
8497 .read_u64 = cpu_cfs_period_read_u64,
8498 .write_u64 = cpu_cfs_period_write_u64,
8499 },
e8da1b18
NR
8500 {
8501 .name = "stat",
8502 .read_map = cpu_stats_show,
8503 },
ab84d31e 8504#endif
052f1dc7 8505#ifdef CONFIG_RT_GROUP_SCHED
6f505b16 8506 {
9f0c1e56 8507 .name = "rt_runtime_us",
06ecb27c
PM
8508 .read_s64 = cpu_rt_runtime_read,
8509 .write_s64 = cpu_rt_runtime_write,
6f505b16 8510 },
d0b27fa7
PZ
8511 {
8512 .name = "rt_period_us",
f4c753b7
PM
8513 .read_u64 = cpu_rt_period_read_uint,
8514 .write_u64 = cpu_rt_period_write_uint,
d0b27fa7 8515 },
052f1dc7 8516#endif
4baf6e33 8517 { } /* terminate */
68318b8e
SV
8518};
8519
68318b8e 8520struct cgroup_subsys cpu_cgroup_subsys = {
38605cae 8521 .name = "cpu",
92fb9748
TH
8522 .css_alloc = cpu_cgroup_css_alloc,
8523 .css_free = cpu_cgroup_css_free,
ace783b9
LZ
8524 .css_online = cpu_cgroup_css_online,
8525 .css_offline = cpu_cgroup_css_offline,
bb9d97b6
TH
8526 .can_attach = cpu_cgroup_can_attach,
8527 .attach = cpu_cgroup_attach,
6fa3eb70 8528 .allow_attach = cpu_cgroup_allow_attach,
068c5cc5 8529 .exit = cpu_cgroup_exit,
38605cae 8530 .subsys_id = cpu_cgroup_subsys_id,
4baf6e33 8531 .base_cftypes = cpu_files,
68318b8e
SV
8532 .early_init = 1,
8533};
8534
052f1dc7 8535#endif /* CONFIG_CGROUP_SCHED */
d842de87 8536
b637a328
PM
8537void dump_cpu_task(int cpu)
8538{
8539 pr_info("Task dump for CPU %d:\n", cpu);
8540 sched_show_task(cpu_curr(cpu));
8541}
6fa3eb70
S
8542
8543unsigned long long mt_get_thread_cputime(pid_t pid)
8544{
8545 struct task_struct *p;
8546 p = pid ? find_task_by_vpid(pid) : current;
8547 return task_sched_runtime(p);
8548}
8549unsigned long long mt_get_cpu_idle(int cpu)
8550{
8551 unsigned long long *unused = 0;
8552 return get_cpu_idle_time_us(cpu, unused);
8553}
8554unsigned long long mt_sched_clock(void)
8555{
8556 return sched_clock();
8557}
8558EXPORT_SYMBOL(mt_get_thread_cputime);
8559EXPORT_SYMBOL(mt_get_cpu_idle);
8560EXPORT_SYMBOL(mt_sched_clock);