Commit | Line | Data |
---|---|---|
fa72e9e4 IM |
1 | /* |
2 | * idle-task scheduling class. | |
3 | * | |
4 | * (NOTE: these are not related to SCHED_IDLE tasks which are | |
5 | * handled in sched_fair.c) | |
6 | */ | |
7 | ||
8 | /* | |
9 | * Idle tasks are unconditionally rescheduled: | |
10 | */ | |
11 | static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p) | |
12 | { | |
13 | resched_task(rq->idle); | |
14 | } | |
15 | ||
fb8d4724 | 16 | static struct task_struct *pick_next_task_idle(struct rq *rq) |
fa72e9e4 IM |
17 | { |
18 | schedstat_inc(rq, sched_goidle); | |
19 | ||
20 | return rq->idle; | |
21 | } | |
22 | ||
23 | /* | |
24 | * It is not legal to sleep in the idle task - print a warning | |
25 | * message if some code attempts to do it: | |
26 | */ | |
27 | static void | |
f02231e5 | 28 | dequeue_task_idle(struct rq *rq, struct task_struct *p, int sleep) |
fa72e9e4 IM |
29 | { |
30 | spin_unlock_irq(&rq->lock); | |
31 | printk(KERN_ERR "bad: scheduling from the idle thread!\n"); | |
32 | dump_stack(); | |
33 | spin_lock_irq(&rq->lock); | |
34 | } | |
35 | ||
31ee529c | 36 | static void put_prev_task_idle(struct rq *rq, struct task_struct *prev) |
fa72e9e4 IM |
37 | { |
38 | } | |
39 | ||
681f3e68 | 40 | #ifdef CONFIG_SMP |
43010659 | 41 | static unsigned long |
fa72e9e4 | 42 | load_balance_idle(struct rq *this_rq, int this_cpu, struct rq *busiest, |
e1d1484f PW |
43 | unsigned long max_load_move, |
44 | struct sched_domain *sd, enum cpu_idle_type idle, | |
45 | int *all_pinned, int *this_best_prio) | |
46 | { | |
47 | return 0; | |
48 | } | |
49 | ||
50 | static int | |
51 | move_one_task_idle(struct rq *this_rq, int this_cpu, struct rq *busiest, | |
52 | struct sched_domain *sd, enum cpu_idle_type idle) | |
fa72e9e4 IM |
53 | { |
54 | return 0; | |
55 | } | |
681f3e68 | 56 | #endif |
fa72e9e4 IM |
57 | |
58 | static void task_tick_idle(struct rq *rq, struct task_struct *curr) | |
59 | { | |
60 | } | |
61 | ||
83b699ed SV |
62 | static void set_curr_task_idle(struct rq *rq) |
63 | { | |
64 | } | |
65 | ||
fa72e9e4 IM |
66 | /* |
67 | * Simple, special scheduling class for the per-CPU idle tasks: | |
68 | */ | |
5522d5d5 IM |
69 | const struct sched_class idle_sched_class = { |
70 | /* .next is NULL */ | |
fa72e9e4 IM |
71 | /* no enqueue/yield_task for idle tasks */ |
72 | ||
73 | /* dequeue is not valid, we print a debug message there: */ | |
74 | .dequeue_task = dequeue_task_idle, | |
75 | ||
76 | .check_preempt_curr = check_preempt_curr_idle, | |
77 | ||
78 | .pick_next_task = pick_next_task_idle, | |
79 | .put_prev_task = put_prev_task_idle, | |
80 | ||
681f3e68 | 81 | #ifdef CONFIG_SMP |
fa72e9e4 | 82 | .load_balance = load_balance_idle, |
e1d1484f | 83 | .move_one_task = move_one_task_idle, |
681f3e68 | 84 | #endif |
fa72e9e4 | 85 | |
83b699ed | 86 | .set_curr_task = set_curr_task_idle, |
fa72e9e4 IM |
87 | .task_tick = task_tick_idle, |
88 | /* no .task_new for idle tasks */ | |
89 | }; |