Merge branch 'linus' into sched/urgent
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / sched_rt.c
1 /*
2 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
3 * policies)
4 */
5
6 #ifdef CONFIG_SMP
7
8 static inline int rt_overloaded(struct rq *rq)
9 {
10 return atomic_read(&rq->rd->rto_count);
11 }
12
13 static inline void rt_set_overload(struct rq *rq)
14 {
15 cpu_set(rq->cpu, rq->rd->rto_mask);
16 /*
17 * Make sure the mask is visible before we set
18 * the overload count. That is checked to determine
19 * if we should look at the mask. It would be a shame
20 * if we looked at the mask, but the mask was not
21 * updated yet.
22 */
23 wmb();
24 atomic_inc(&rq->rd->rto_count);
25 }
26
27 static inline void rt_clear_overload(struct rq *rq)
28 {
29 /* the order here really doesn't matter */
30 atomic_dec(&rq->rd->rto_count);
31 cpu_clear(rq->cpu, rq->rd->rto_mask);
32 }
33
34 static void update_rt_migration(struct rq *rq)
35 {
36 if (rq->rt.rt_nr_migratory && (rq->rt.rt_nr_running > 1)) {
37 if (!rq->rt.overloaded) {
38 rt_set_overload(rq);
39 rq->rt.overloaded = 1;
40 }
41 } else if (rq->rt.overloaded) {
42 rt_clear_overload(rq);
43 rq->rt.overloaded = 0;
44 }
45 }
46 #endif /* CONFIG_SMP */
47
48 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
49 {
50 return container_of(rt_se, struct task_struct, rt);
51 }
52
53 static inline int on_rt_rq(struct sched_rt_entity *rt_se)
54 {
55 return !list_empty(&rt_se->run_list);
56 }
57
58 #ifdef CONFIG_RT_GROUP_SCHED
59
60 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
61 {
62 if (!rt_rq->tg)
63 return RUNTIME_INF;
64
65 return rt_rq->rt_runtime;
66 }
67
68 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
69 {
70 return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
71 }
72
73 #define for_each_leaf_rt_rq(rt_rq, rq) \
74 list_for_each_entry(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list)
75
76 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
77 {
78 return rt_rq->rq;
79 }
80
81 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
82 {
83 return rt_se->rt_rq;
84 }
85
86 #define for_each_sched_rt_entity(rt_se) \
87 for (; rt_se; rt_se = rt_se->parent)
88
89 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
90 {
91 return rt_se->my_q;
92 }
93
94 static void enqueue_rt_entity(struct sched_rt_entity *rt_se);
95 static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
96
97 static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
98 {
99 struct sched_rt_entity *rt_se = rt_rq->rt_se;
100
101 if (rt_se && !on_rt_rq(rt_se) && rt_rq->rt_nr_running) {
102 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
103
104 enqueue_rt_entity(rt_se);
105 if (rt_rq->highest_prio < curr->prio)
106 resched_task(curr);
107 }
108 }
109
110 static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
111 {
112 struct sched_rt_entity *rt_se = rt_rq->rt_se;
113
114 if (rt_se && on_rt_rq(rt_se))
115 dequeue_rt_entity(rt_se);
116 }
117
118 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
119 {
120 return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
121 }
122
123 static int rt_se_boosted(struct sched_rt_entity *rt_se)
124 {
125 struct rt_rq *rt_rq = group_rt_rq(rt_se);
126 struct task_struct *p;
127
128 if (rt_rq)
129 return !!rt_rq->rt_nr_boosted;
130
131 p = rt_task_of(rt_se);
132 return p->prio != p->normal_prio;
133 }
134
135 #ifdef CONFIG_SMP
136 static inline cpumask_t sched_rt_period_mask(void)
137 {
138 return cpu_rq(smp_processor_id())->rd->span;
139 }
140 #else
141 static inline cpumask_t sched_rt_period_mask(void)
142 {
143 return cpu_online_map;
144 }
145 #endif
146
147 static inline
148 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
149 {
150 return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
151 }
152
153 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
154 {
155 return &rt_rq->tg->rt_bandwidth;
156 }
157
158 #else
159
160 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
161 {
162 return rt_rq->rt_runtime;
163 }
164
165 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
166 {
167 return ktime_to_ns(def_rt_bandwidth.rt_period);
168 }
169
170 #define for_each_leaf_rt_rq(rt_rq, rq) \
171 for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
172
173 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
174 {
175 return container_of(rt_rq, struct rq, rt);
176 }
177
178 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
179 {
180 struct task_struct *p = rt_task_of(rt_se);
181 struct rq *rq = task_rq(p);
182
183 return &rq->rt;
184 }
185
186 #define for_each_sched_rt_entity(rt_se) \
187 for (; rt_se; rt_se = NULL)
188
189 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
190 {
191 return NULL;
192 }
193
194 static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
195 {
196 }
197
198 static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
199 {
200 }
201
202 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
203 {
204 return rt_rq->rt_throttled;
205 }
206
207 static inline cpumask_t sched_rt_period_mask(void)
208 {
209 return cpu_online_map;
210 }
211
212 static inline
213 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
214 {
215 return &cpu_rq(cpu)->rt;
216 }
217
218 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
219 {
220 return &def_rt_bandwidth;
221 }
222
223 #endif
224
225 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
226 {
227 int i, idle = 1;
228 cpumask_t span;
229
230 if (rt_b->rt_runtime == RUNTIME_INF)
231 return 1;
232
233 span = sched_rt_period_mask();
234 for_each_cpu_mask(i, span) {
235 int enqueue = 0;
236 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
237 struct rq *rq = rq_of_rt_rq(rt_rq);
238
239 spin_lock(&rq->lock);
240 if (rt_rq->rt_time) {
241 u64 runtime;
242
243 spin_lock(&rt_rq->rt_runtime_lock);
244 runtime = rt_rq->rt_runtime;
245 rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
246 if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
247 rt_rq->rt_throttled = 0;
248 enqueue = 1;
249 }
250 if (rt_rq->rt_time || rt_rq->rt_nr_running)
251 idle = 0;
252 spin_unlock(&rt_rq->rt_runtime_lock);
253 } else if (rt_rq->rt_nr_running)
254 idle = 0;
255
256 if (enqueue)
257 sched_rt_rq_enqueue(rt_rq);
258 spin_unlock(&rq->lock);
259 }
260
261 return idle;
262 }
263
264 #ifdef CONFIG_SMP
265 static int balance_runtime(struct rt_rq *rt_rq)
266 {
267 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
268 struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
269 int i, weight, more = 0;
270 u64 rt_period;
271
272 weight = cpus_weight(rd->span);
273
274 spin_lock(&rt_b->rt_runtime_lock);
275 rt_period = ktime_to_ns(rt_b->rt_period);
276 for_each_cpu_mask(i, rd->span) {
277 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
278 s64 diff;
279
280 if (iter == rt_rq)
281 continue;
282
283 spin_lock(&iter->rt_runtime_lock);
284 diff = iter->rt_runtime - iter->rt_time;
285 if (diff > 0) {
286 do_div(diff, weight);
287 if (rt_rq->rt_runtime + diff > rt_period)
288 diff = rt_period - rt_rq->rt_runtime;
289 iter->rt_runtime -= diff;
290 rt_rq->rt_runtime += diff;
291 more = 1;
292 if (rt_rq->rt_runtime == rt_period) {
293 spin_unlock(&iter->rt_runtime_lock);
294 break;
295 }
296 }
297 spin_unlock(&iter->rt_runtime_lock);
298 }
299 spin_unlock(&rt_b->rt_runtime_lock);
300
301 return more;
302 }
303 #endif
304
305 static inline int rt_se_prio(struct sched_rt_entity *rt_se)
306 {
307 #ifdef CONFIG_RT_GROUP_SCHED
308 struct rt_rq *rt_rq = group_rt_rq(rt_se);
309
310 if (rt_rq)
311 return rt_rq->highest_prio;
312 #endif
313
314 return rt_task_of(rt_se)->prio;
315 }
316
317 static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
318 {
319 u64 runtime = sched_rt_runtime(rt_rq);
320
321 if (runtime == RUNTIME_INF)
322 return 0;
323
324 if (rt_rq->rt_throttled)
325 return rt_rq_throttled(rt_rq);
326
327 if (sched_rt_runtime(rt_rq) >= sched_rt_period(rt_rq))
328 return 0;
329
330 #ifdef CONFIG_SMP
331 if (rt_rq->rt_time > runtime) {
332 int more;
333
334 spin_unlock(&rt_rq->rt_runtime_lock);
335 more = balance_runtime(rt_rq);
336 spin_lock(&rt_rq->rt_runtime_lock);
337
338 if (more)
339 runtime = sched_rt_runtime(rt_rq);
340 }
341 #endif
342
343 if (rt_rq->rt_time > runtime) {
344 rt_rq->rt_throttled = 1;
345 if (rt_rq_throttled(rt_rq)) {
346 sched_rt_rq_dequeue(rt_rq);
347 return 1;
348 }
349 }
350
351 return 0;
352 }
353
354 /*
355 * Update the current task's runtime statistics. Skip current tasks that
356 * are not in our scheduling class.
357 */
358 static void update_curr_rt(struct rq *rq)
359 {
360 struct task_struct *curr = rq->curr;
361 struct sched_rt_entity *rt_se = &curr->rt;
362 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
363 u64 delta_exec;
364
365 if (!task_has_rt_policy(curr))
366 return;
367
368 delta_exec = rq->clock - curr->se.exec_start;
369 if (unlikely((s64)delta_exec < 0))
370 delta_exec = 0;
371
372 schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec));
373
374 curr->se.sum_exec_runtime += delta_exec;
375 curr->se.exec_start = rq->clock;
376 cpuacct_charge(curr, delta_exec);
377
378 for_each_sched_rt_entity(rt_se) {
379 rt_rq = rt_rq_of_se(rt_se);
380
381 spin_lock(&rt_rq->rt_runtime_lock);
382 rt_rq->rt_time += delta_exec;
383 if (sched_rt_runtime_exceeded(rt_rq))
384 resched_task(curr);
385 spin_unlock(&rt_rq->rt_runtime_lock);
386 }
387 }
388
389 static inline
390 void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
391 {
392 WARN_ON(!rt_prio(rt_se_prio(rt_se)));
393 rt_rq->rt_nr_running++;
394 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
395 if (rt_se_prio(rt_se) < rt_rq->highest_prio)
396 rt_rq->highest_prio = rt_se_prio(rt_se);
397 #endif
398 #ifdef CONFIG_SMP
399 if (rt_se->nr_cpus_allowed > 1) {
400 struct rq *rq = rq_of_rt_rq(rt_rq);
401 rq->rt.rt_nr_migratory++;
402 }
403
404 update_rt_migration(rq_of_rt_rq(rt_rq));
405 #endif
406 #ifdef CONFIG_RT_GROUP_SCHED
407 if (rt_se_boosted(rt_se))
408 rt_rq->rt_nr_boosted++;
409
410 if (rt_rq->tg)
411 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
412 #else
413 start_rt_bandwidth(&def_rt_bandwidth);
414 #endif
415 }
416
417 static inline
418 void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
419 {
420 WARN_ON(!rt_prio(rt_se_prio(rt_se)));
421 WARN_ON(!rt_rq->rt_nr_running);
422 rt_rq->rt_nr_running--;
423 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
424 if (rt_rq->rt_nr_running) {
425 struct rt_prio_array *array;
426
427 WARN_ON(rt_se_prio(rt_se) < rt_rq->highest_prio);
428 if (rt_se_prio(rt_se) == rt_rq->highest_prio) {
429 /* recalculate */
430 array = &rt_rq->active;
431 rt_rq->highest_prio =
432 sched_find_first_bit(array->bitmap);
433 } /* otherwise leave rq->highest prio alone */
434 } else
435 rt_rq->highest_prio = MAX_RT_PRIO;
436 #endif
437 #ifdef CONFIG_SMP
438 if (rt_se->nr_cpus_allowed > 1) {
439 struct rq *rq = rq_of_rt_rq(rt_rq);
440 rq->rt.rt_nr_migratory--;
441 }
442
443 update_rt_migration(rq_of_rt_rq(rt_rq));
444 #endif /* CONFIG_SMP */
445 #ifdef CONFIG_RT_GROUP_SCHED
446 if (rt_se_boosted(rt_se))
447 rt_rq->rt_nr_boosted--;
448
449 WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
450 #endif
451 }
452
453 static void __enqueue_rt_entity(struct sched_rt_entity *rt_se)
454 {
455 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
456 struct rt_prio_array *array = &rt_rq->active;
457 struct rt_rq *group_rq = group_rt_rq(rt_se);
458
459 /*
460 * Don't enqueue the group if its throttled, or when empty.
461 * The latter is a consequence of the former when a child group
462 * get throttled and the current group doesn't have any other
463 * active members.
464 */
465 if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
466 return;
467
468 list_add_tail(&rt_se->run_list, array->queue + rt_se_prio(rt_se));
469 __set_bit(rt_se_prio(rt_se), array->bitmap);
470
471 inc_rt_tasks(rt_se, rt_rq);
472 }
473
474 static void __dequeue_rt_entity(struct sched_rt_entity *rt_se)
475 {
476 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
477 struct rt_prio_array *array = &rt_rq->active;
478
479 list_del_init(&rt_se->run_list);
480 if (list_empty(array->queue + rt_se_prio(rt_se)))
481 __clear_bit(rt_se_prio(rt_se), array->bitmap);
482
483 dec_rt_tasks(rt_se, rt_rq);
484 }
485
486 /*
487 * Because the prio of an upper entry depends on the lower
488 * entries, we must remove entries top - down.
489 */
490 static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
491 {
492 struct sched_rt_entity *back = NULL;
493
494 for_each_sched_rt_entity(rt_se) {
495 rt_se->back = back;
496 back = rt_se;
497 }
498
499 for (rt_se = back; rt_se; rt_se = rt_se->back) {
500 if (on_rt_rq(rt_se))
501 __dequeue_rt_entity(rt_se);
502 }
503 }
504
505 static void enqueue_rt_entity(struct sched_rt_entity *rt_se)
506 {
507 dequeue_rt_stack(rt_se);
508 for_each_sched_rt_entity(rt_se)
509 __enqueue_rt_entity(rt_se);
510 }
511
512 static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
513 {
514 dequeue_rt_stack(rt_se);
515
516 for_each_sched_rt_entity(rt_se) {
517 struct rt_rq *rt_rq = group_rt_rq(rt_se);
518
519 if (rt_rq && rt_rq->rt_nr_running)
520 __enqueue_rt_entity(rt_se);
521 }
522 }
523
524 /*
525 * Adding/removing a task to/from a priority array:
526 */
527 static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
528 {
529 struct sched_rt_entity *rt_se = &p->rt;
530
531 if (wakeup)
532 rt_se->timeout = 0;
533
534 enqueue_rt_entity(rt_se);
535 }
536
537 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
538 {
539 struct sched_rt_entity *rt_se = &p->rt;
540
541 update_curr_rt(rq);
542 dequeue_rt_entity(rt_se);
543 }
544
545 /*
546 * Put task to the end of the run list without the overhead of dequeue
547 * followed by enqueue.
548 */
549 static
550 void requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se)
551 {
552 struct rt_prio_array *array = &rt_rq->active;
553 struct list_head *queue = array->queue + rt_se_prio(rt_se);
554
555 if (on_rt_rq(rt_se))
556 list_move_tail(&rt_se->run_list, queue);
557 }
558
559 static void requeue_task_rt(struct rq *rq, struct task_struct *p)
560 {
561 struct sched_rt_entity *rt_se = &p->rt;
562 struct rt_rq *rt_rq;
563
564 for_each_sched_rt_entity(rt_se) {
565 rt_rq = rt_rq_of_se(rt_se);
566 requeue_rt_entity(rt_rq, rt_se);
567 }
568 }
569
570 static void yield_task_rt(struct rq *rq)
571 {
572 requeue_task_rt(rq, rq->curr);
573 }
574
575 #ifdef CONFIG_SMP
576 static int find_lowest_rq(struct task_struct *task);
577
578 static int select_task_rq_rt(struct task_struct *p, int sync)
579 {
580 struct rq *rq = task_rq(p);
581
582 /*
583 * If the current task is an RT task, then
584 * try to see if we can wake this RT task up on another
585 * runqueue. Otherwise simply start this RT task
586 * on its current runqueue.
587 *
588 * We want to avoid overloading runqueues. Even if
589 * the RT task is of higher priority than the current RT task.
590 * RT tasks behave differently than other tasks. If
591 * one gets preempted, we try to push it off to another queue.
592 * So trying to keep a preempting RT task on the same
593 * cache hot CPU will force the running RT task to
594 * a cold CPU. So we waste all the cache for the lower
595 * RT task in hopes of saving some of a RT task
596 * that is just being woken and probably will have
597 * cold cache anyway.
598 */
599 if (unlikely(rt_task(rq->curr)) &&
600 (p->rt.nr_cpus_allowed > 1)) {
601 int cpu = find_lowest_rq(p);
602
603 return (cpu == -1) ? task_cpu(p) : cpu;
604 }
605
606 /*
607 * Otherwise, just let it ride on the affined RQ and the
608 * post-schedule router will push the preempted task away
609 */
610 return task_cpu(p);
611 }
612 #endif /* CONFIG_SMP */
613
614 /*
615 * Preempt the current task with a newly woken task if needed:
616 */
617 static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p)
618 {
619 if (p->prio < rq->curr->prio)
620 resched_task(rq->curr);
621 }
622
623 static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
624 struct rt_rq *rt_rq)
625 {
626 struct rt_prio_array *array = &rt_rq->active;
627 struct sched_rt_entity *next = NULL;
628 struct list_head *queue;
629 int idx;
630
631 idx = sched_find_first_bit(array->bitmap);
632 BUG_ON(idx >= MAX_RT_PRIO);
633
634 queue = array->queue + idx;
635 next = list_entry(queue->next, struct sched_rt_entity, run_list);
636
637 return next;
638 }
639
640 static struct task_struct *pick_next_task_rt(struct rq *rq)
641 {
642 struct sched_rt_entity *rt_se;
643 struct task_struct *p;
644 struct rt_rq *rt_rq;
645
646 rt_rq = &rq->rt;
647
648 if (unlikely(!rt_rq->rt_nr_running))
649 return NULL;
650
651 if (rt_rq_throttled(rt_rq))
652 return NULL;
653
654 do {
655 rt_se = pick_next_rt_entity(rq, rt_rq);
656 BUG_ON(!rt_se);
657 rt_rq = group_rt_rq(rt_se);
658 } while (rt_rq);
659
660 p = rt_task_of(rt_se);
661 p->se.exec_start = rq->clock;
662 return p;
663 }
664
665 static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
666 {
667 update_curr_rt(rq);
668 p->se.exec_start = 0;
669 }
670
671 #ifdef CONFIG_SMP
672
673 /* Only try algorithms three times */
674 #define RT_MAX_TRIES 3
675
676 static int double_lock_balance(struct rq *this_rq, struct rq *busiest);
677 static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
678
679 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
680 {
681 if (!task_running(rq, p) &&
682 (cpu < 0 || cpu_isset(cpu, p->cpus_allowed)) &&
683 (p->rt.nr_cpus_allowed > 1))
684 return 1;
685 return 0;
686 }
687
688 /* Return the second highest RT task, NULL otherwise */
689 static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
690 {
691 struct task_struct *next = NULL;
692 struct sched_rt_entity *rt_se;
693 struct rt_prio_array *array;
694 struct rt_rq *rt_rq;
695 int idx;
696
697 for_each_leaf_rt_rq(rt_rq, rq) {
698 array = &rt_rq->active;
699 idx = sched_find_first_bit(array->bitmap);
700 next_idx:
701 if (idx >= MAX_RT_PRIO)
702 continue;
703 if (next && next->prio < idx)
704 continue;
705 list_for_each_entry(rt_se, array->queue + idx, run_list) {
706 struct task_struct *p = rt_task_of(rt_se);
707 if (pick_rt_task(rq, p, cpu)) {
708 next = p;
709 break;
710 }
711 }
712 if (!next) {
713 idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);
714 goto next_idx;
715 }
716 }
717
718 return next;
719 }
720
721 static DEFINE_PER_CPU(cpumask_t, local_cpu_mask);
722
723 static int find_lowest_cpus(struct task_struct *task, cpumask_t *lowest_mask)
724 {
725 int lowest_prio = -1;
726 int lowest_cpu = -1;
727 int count = 0;
728 int cpu;
729
730 cpus_and(*lowest_mask, task_rq(task)->rd->online, task->cpus_allowed);
731
732 /*
733 * Scan each rq for the lowest prio.
734 */
735 for_each_cpu_mask(cpu, *lowest_mask) {
736 struct rq *rq = cpu_rq(cpu);
737
738 /* We look for lowest RT prio or non-rt CPU */
739 if (rq->rt.highest_prio >= MAX_RT_PRIO) {
740 /*
741 * if we already found a low RT queue
742 * and now we found this non-rt queue
743 * clear the mask and set our bit.
744 * Otherwise just return the queue as is
745 * and the count==1 will cause the algorithm
746 * to use the first bit found.
747 */
748 if (lowest_cpu != -1) {
749 cpus_clear(*lowest_mask);
750 cpu_set(rq->cpu, *lowest_mask);
751 }
752 return 1;
753 }
754
755 /* no locking for now */
756 if ((rq->rt.highest_prio > task->prio)
757 && (rq->rt.highest_prio >= lowest_prio)) {
758 if (rq->rt.highest_prio > lowest_prio) {
759 /* new low - clear old data */
760 lowest_prio = rq->rt.highest_prio;
761 lowest_cpu = cpu;
762 count = 0;
763 }
764 count++;
765 } else
766 cpu_clear(cpu, *lowest_mask);
767 }
768
769 /*
770 * Clear out all the set bits that represent
771 * runqueues that were of higher prio than
772 * the lowest_prio.
773 */
774 if (lowest_cpu > 0) {
775 /*
776 * Perhaps we could add another cpumask op to
777 * zero out bits. Like cpu_zero_bits(cpumask, nrbits);
778 * Then that could be optimized to use memset and such.
779 */
780 for_each_cpu_mask(cpu, *lowest_mask) {
781 if (cpu >= lowest_cpu)
782 break;
783 cpu_clear(cpu, *lowest_mask);
784 }
785 }
786
787 return count;
788 }
789
790 static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask)
791 {
792 int first;
793
794 /* "this_cpu" is cheaper to preempt than a remote processor */
795 if ((this_cpu != -1) && cpu_isset(this_cpu, *mask))
796 return this_cpu;
797
798 first = first_cpu(*mask);
799 if (first != NR_CPUS)
800 return first;
801
802 return -1;
803 }
804
805 static int find_lowest_rq(struct task_struct *task)
806 {
807 struct sched_domain *sd;
808 cpumask_t *lowest_mask = &__get_cpu_var(local_cpu_mask);
809 int this_cpu = smp_processor_id();
810 int cpu = task_cpu(task);
811 int count = find_lowest_cpus(task, lowest_mask);
812
813 if (!count)
814 return -1; /* No targets found */
815
816 /*
817 * There is no sense in performing an optimal search if only one
818 * target is found.
819 */
820 if (count == 1)
821 return first_cpu(*lowest_mask);
822
823 /*
824 * At this point we have built a mask of cpus representing the
825 * lowest priority tasks in the system. Now we want to elect
826 * the best one based on our affinity and topology.
827 *
828 * We prioritize the last cpu that the task executed on since
829 * it is most likely cache-hot in that location.
830 */
831 if (cpu_isset(cpu, *lowest_mask))
832 return cpu;
833
834 /*
835 * Otherwise, we consult the sched_domains span maps to figure
836 * out which cpu is logically closest to our hot cache data.
837 */
838 if (this_cpu == cpu)
839 this_cpu = -1; /* Skip this_cpu opt if the same */
840
841 for_each_domain(cpu, sd) {
842 if (sd->flags & SD_WAKE_AFFINE) {
843 cpumask_t domain_mask;
844 int best_cpu;
845
846 cpus_and(domain_mask, sd->span, *lowest_mask);
847
848 best_cpu = pick_optimal_cpu(this_cpu,
849 &domain_mask);
850 if (best_cpu != -1)
851 return best_cpu;
852 }
853 }
854
855 /*
856 * And finally, if there were no matches within the domains
857 * just give the caller *something* to work with from the compatible
858 * locations.
859 */
860 return pick_optimal_cpu(this_cpu, lowest_mask);
861 }
862
863 /* Will lock the rq it finds */
864 static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
865 {
866 struct rq *lowest_rq = NULL;
867 int tries;
868 int cpu;
869
870 for (tries = 0; tries < RT_MAX_TRIES; tries++) {
871 cpu = find_lowest_rq(task);
872
873 if ((cpu == -1) || (cpu == rq->cpu))
874 break;
875
876 lowest_rq = cpu_rq(cpu);
877
878 /* if the prio of this runqueue changed, try again */
879 if (double_lock_balance(rq, lowest_rq)) {
880 /*
881 * We had to unlock the run queue. In
882 * the mean time, task could have
883 * migrated already or had its affinity changed.
884 * Also make sure that it wasn't scheduled on its rq.
885 */
886 if (unlikely(task_rq(task) != rq ||
887 !cpu_isset(lowest_rq->cpu,
888 task->cpus_allowed) ||
889 task_running(rq, task) ||
890 !task->se.on_rq)) {
891
892 spin_unlock(&lowest_rq->lock);
893 lowest_rq = NULL;
894 break;
895 }
896 }
897
898 /* If this rq is still suitable use it. */
899 if (lowest_rq->rt.highest_prio > task->prio)
900 break;
901
902 /* try again */
903 spin_unlock(&lowest_rq->lock);
904 lowest_rq = NULL;
905 }
906
907 return lowest_rq;
908 }
909
910 /*
911 * If the current CPU has more than one RT task, see if the non
912 * running task can migrate over to a CPU that is running a task
913 * of lesser priority.
914 */
915 static int push_rt_task(struct rq *rq)
916 {
917 struct task_struct *next_task;
918 struct rq *lowest_rq;
919 int ret = 0;
920 int paranoid = RT_MAX_TRIES;
921
922 if (!rq->rt.overloaded)
923 return 0;
924
925 next_task = pick_next_highest_task_rt(rq, -1);
926 if (!next_task)
927 return 0;
928
929 retry:
930 if (unlikely(next_task == rq->curr)) {
931 WARN_ON(1);
932 return 0;
933 }
934
935 /*
936 * It's possible that the next_task slipped in of
937 * higher priority than current. If that's the case
938 * just reschedule current.
939 */
940 if (unlikely(next_task->prio < rq->curr->prio)) {
941 resched_task(rq->curr);
942 return 0;
943 }
944
945 /* We might release rq lock */
946 get_task_struct(next_task);
947
948 /* find_lock_lowest_rq locks the rq if found */
949 lowest_rq = find_lock_lowest_rq(next_task, rq);
950 if (!lowest_rq) {
951 struct task_struct *task;
952 /*
953 * find lock_lowest_rq releases rq->lock
954 * so it is possible that next_task has changed.
955 * If it has, then try again.
956 */
957 task = pick_next_highest_task_rt(rq, -1);
958 if (unlikely(task != next_task) && task && paranoid--) {
959 put_task_struct(next_task);
960 next_task = task;
961 goto retry;
962 }
963 goto out;
964 }
965
966 deactivate_task(rq, next_task, 0);
967 set_task_cpu(next_task, lowest_rq->cpu);
968 activate_task(lowest_rq, next_task, 0);
969
970 resched_task(lowest_rq->curr);
971
972 spin_unlock(&lowest_rq->lock);
973
974 ret = 1;
975 out:
976 put_task_struct(next_task);
977
978 return ret;
979 }
980
981 /*
982 * TODO: Currently we just use the second highest prio task on
983 * the queue, and stop when it can't migrate (or there's
984 * no more RT tasks). There may be a case where a lower
985 * priority RT task has a different affinity than the
986 * higher RT task. In this case the lower RT task could
987 * possibly be able to migrate where as the higher priority
988 * RT task could not. We currently ignore this issue.
989 * Enhancements are welcome!
990 */
991 static void push_rt_tasks(struct rq *rq)
992 {
993 /* push_rt_task will return true if it moved an RT */
994 while (push_rt_task(rq))
995 ;
996 }
997
998 static int pull_rt_task(struct rq *this_rq)
999 {
1000 int this_cpu = this_rq->cpu, ret = 0, cpu;
1001 struct task_struct *p, *next;
1002 struct rq *src_rq;
1003
1004 if (likely(!rt_overloaded(this_rq)))
1005 return 0;
1006
1007 next = pick_next_task_rt(this_rq);
1008
1009 for_each_cpu_mask(cpu, this_rq->rd->rto_mask) {
1010 if (this_cpu == cpu)
1011 continue;
1012
1013 src_rq = cpu_rq(cpu);
1014 /*
1015 * We can potentially drop this_rq's lock in
1016 * double_lock_balance, and another CPU could
1017 * steal our next task - hence we must cause
1018 * the caller to recalculate the next task
1019 * in that case:
1020 */
1021 if (double_lock_balance(this_rq, src_rq)) {
1022 struct task_struct *old_next = next;
1023
1024 next = pick_next_task_rt(this_rq);
1025 if (next != old_next)
1026 ret = 1;
1027 }
1028
1029 /*
1030 * Are there still pullable RT tasks?
1031 */
1032 if (src_rq->rt.rt_nr_running <= 1)
1033 goto skip;
1034
1035 p = pick_next_highest_task_rt(src_rq, this_cpu);
1036
1037 /*
1038 * Do we have an RT task that preempts
1039 * the to-be-scheduled task?
1040 */
1041 if (p && (!next || (p->prio < next->prio))) {
1042 WARN_ON(p == src_rq->curr);
1043 WARN_ON(!p->se.on_rq);
1044
1045 /*
1046 * There's a chance that p is higher in priority
1047 * than what's currently running on its cpu.
1048 * This is just that p is wakeing up and hasn't
1049 * had a chance to schedule. We only pull
1050 * p if it is lower in priority than the
1051 * current task on the run queue or
1052 * this_rq next task is lower in prio than
1053 * the current task on that rq.
1054 */
1055 if (p->prio < src_rq->curr->prio ||
1056 (next && next->prio < src_rq->curr->prio))
1057 goto skip;
1058
1059 ret = 1;
1060
1061 deactivate_task(src_rq, p, 0);
1062 set_task_cpu(p, this_cpu);
1063 activate_task(this_rq, p, 0);
1064 /*
1065 * We continue with the search, just in
1066 * case there's an even higher prio task
1067 * in another runqueue. (low likelyhood
1068 * but possible)
1069 *
1070 * Update next so that we won't pick a task
1071 * on another cpu with a priority lower (or equal)
1072 * than the one we just picked.
1073 */
1074 next = p;
1075
1076 }
1077 skip:
1078 spin_unlock(&src_rq->lock);
1079 }
1080
1081 return ret;
1082 }
1083
1084 static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
1085 {
1086 /* Try to pull RT tasks here if we lower this rq's prio */
1087 if (unlikely(rt_task(prev)) && rq->rt.highest_prio > prev->prio)
1088 pull_rt_task(rq);
1089 }
1090
1091 static void post_schedule_rt(struct rq *rq)
1092 {
1093 /*
1094 * If we have more than one rt_task queued, then
1095 * see if we can push the other rt_tasks off to other CPUS.
1096 * Note we may release the rq lock, and since
1097 * the lock was owned by prev, we need to release it
1098 * first via finish_lock_switch and then reaquire it here.
1099 */
1100 if (unlikely(rq->rt.overloaded)) {
1101 spin_lock_irq(&rq->lock);
1102 push_rt_tasks(rq);
1103 spin_unlock_irq(&rq->lock);
1104 }
1105 }
1106
1107 /*
1108 * If we are not running and we are not going to reschedule soon, we should
1109 * try to push tasks away now
1110 */
1111 static void task_wake_up_rt(struct rq *rq, struct task_struct *p)
1112 {
1113 if (!task_running(rq, p) &&
1114 !test_tsk_need_resched(rq->curr) &&
1115 rq->rt.overloaded)
1116 push_rt_tasks(rq);
1117 }
1118
1119 static unsigned long
1120 load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
1121 unsigned long max_load_move,
1122 struct sched_domain *sd, enum cpu_idle_type idle,
1123 int *all_pinned, int *this_best_prio)
1124 {
1125 /* don't touch RT tasks */
1126 return 0;
1127 }
1128
1129 static int
1130 move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
1131 struct sched_domain *sd, enum cpu_idle_type idle)
1132 {
1133 /* don't touch RT tasks */
1134 return 0;
1135 }
1136
1137 static void set_cpus_allowed_rt(struct task_struct *p,
1138 const cpumask_t *new_mask)
1139 {
1140 int weight = cpus_weight(*new_mask);
1141
1142 BUG_ON(!rt_task(p));
1143
1144 /*
1145 * Update the migration status of the RQ if we have an RT task
1146 * which is running AND changing its weight value.
1147 */
1148 if (p->se.on_rq && (weight != p->rt.nr_cpus_allowed)) {
1149 struct rq *rq = task_rq(p);
1150
1151 if ((p->rt.nr_cpus_allowed <= 1) && (weight > 1)) {
1152 rq->rt.rt_nr_migratory++;
1153 } else if ((p->rt.nr_cpus_allowed > 1) && (weight <= 1)) {
1154 BUG_ON(!rq->rt.rt_nr_migratory);
1155 rq->rt.rt_nr_migratory--;
1156 }
1157
1158 update_rt_migration(rq);
1159 }
1160
1161 p->cpus_allowed = *new_mask;
1162 p->rt.nr_cpus_allowed = weight;
1163 }
1164
1165 /* Assumes rq->lock is held */
1166 static void join_domain_rt(struct rq *rq)
1167 {
1168 if (rq->rt.overloaded)
1169 rt_set_overload(rq);
1170 }
1171
1172 /* Assumes rq->lock is held */
1173 static void leave_domain_rt(struct rq *rq)
1174 {
1175 if (rq->rt.overloaded)
1176 rt_clear_overload(rq);
1177 }
1178
1179 /*
1180 * When switch from the rt queue, we bring ourselves to a position
1181 * that we might want to pull RT tasks from other runqueues.
1182 */
1183 static void switched_from_rt(struct rq *rq, struct task_struct *p,
1184 int running)
1185 {
1186 /*
1187 * If there are other RT tasks then we will reschedule
1188 * and the scheduling of the other RT tasks will handle
1189 * the balancing. But if we are the last RT task
1190 * we may need to handle the pulling of RT tasks
1191 * now.
1192 */
1193 if (!rq->rt.rt_nr_running)
1194 pull_rt_task(rq);
1195 }
1196 #endif /* CONFIG_SMP */
1197
1198 /*
1199 * When switching a task to RT, we may overload the runqueue
1200 * with RT tasks. In this case we try to push them off to
1201 * other runqueues.
1202 */
1203 static void switched_to_rt(struct rq *rq, struct task_struct *p,
1204 int running)
1205 {
1206 int check_resched = 1;
1207
1208 /*
1209 * If we are already running, then there's nothing
1210 * that needs to be done. But if we are not running
1211 * we may need to preempt the current running task.
1212 * If that current running task is also an RT task
1213 * then see if we can move to another run queue.
1214 */
1215 if (!running) {
1216 #ifdef CONFIG_SMP
1217 if (rq->rt.overloaded && push_rt_task(rq) &&
1218 /* Don't resched if we changed runqueues */
1219 rq != task_rq(p))
1220 check_resched = 0;
1221 #endif /* CONFIG_SMP */
1222 if (check_resched && p->prio < rq->curr->prio)
1223 resched_task(rq->curr);
1224 }
1225 }
1226
1227 /*
1228 * Priority of the task has changed. This may cause
1229 * us to initiate a push or pull.
1230 */
1231 static void prio_changed_rt(struct rq *rq, struct task_struct *p,
1232 int oldprio, int running)
1233 {
1234 if (running) {
1235 #ifdef CONFIG_SMP
1236 /*
1237 * If our priority decreases while running, we
1238 * may need to pull tasks to this runqueue.
1239 */
1240 if (oldprio < p->prio)
1241 pull_rt_task(rq);
1242 /*
1243 * If there's a higher priority task waiting to run
1244 * then reschedule. Note, the above pull_rt_task
1245 * can release the rq lock and p could migrate.
1246 * Only reschedule if p is still on the same runqueue.
1247 */
1248 if (p->prio > rq->rt.highest_prio && rq->curr == p)
1249 resched_task(p);
1250 #else
1251 /* For UP simply resched on drop of prio */
1252 if (oldprio < p->prio)
1253 resched_task(p);
1254 #endif /* CONFIG_SMP */
1255 } else {
1256 /*
1257 * This task is not running, but if it is
1258 * greater than the current running task
1259 * then reschedule.
1260 */
1261 if (p->prio < rq->curr->prio)
1262 resched_task(rq->curr);
1263 }
1264 }
1265
1266 static void watchdog(struct rq *rq, struct task_struct *p)
1267 {
1268 unsigned long soft, hard;
1269
1270 if (!p->signal)
1271 return;
1272
1273 soft = p->signal->rlim[RLIMIT_RTTIME].rlim_cur;
1274 hard = p->signal->rlim[RLIMIT_RTTIME].rlim_max;
1275
1276 if (soft != RLIM_INFINITY) {
1277 unsigned long next;
1278
1279 p->rt.timeout++;
1280 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
1281 if (p->rt.timeout > next)
1282 p->it_sched_expires = p->se.sum_exec_runtime;
1283 }
1284 }
1285
1286 static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
1287 {
1288 update_curr_rt(rq);
1289
1290 watchdog(rq, p);
1291
1292 /*
1293 * RR tasks need a special form of timeslice management.
1294 * FIFO tasks have no timeslices.
1295 */
1296 if (p->policy != SCHED_RR)
1297 return;
1298
1299 if (--p->rt.time_slice)
1300 return;
1301
1302 p->rt.time_slice = DEF_TIMESLICE;
1303
1304 /*
1305 * Requeue to the end of queue if we are not the only element
1306 * on the queue:
1307 */
1308 if (p->rt.run_list.prev != p->rt.run_list.next) {
1309 requeue_task_rt(rq, p);
1310 set_tsk_need_resched(p);
1311 }
1312 }
1313
1314 static void set_curr_task_rt(struct rq *rq)
1315 {
1316 struct task_struct *p = rq->curr;
1317
1318 p->se.exec_start = rq->clock;
1319 }
1320
1321 static const struct sched_class rt_sched_class = {
1322 .next = &fair_sched_class,
1323 .enqueue_task = enqueue_task_rt,
1324 .dequeue_task = dequeue_task_rt,
1325 .yield_task = yield_task_rt,
1326 #ifdef CONFIG_SMP
1327 .select_task_rq = select_task_rq_rt,
1328 #endif /* CONFIG_SMP */
1329
1330 .check_preempt_curr = check_preempt_curr_rt,
1331
1332 .pick_next_task = pick_next_task_rt,
1333 .put_prev_task = put_prev_task_rt,
1334
1335 #ifdef CONFIG_SMP
1336 .load_balance = load_balance_rt,
1337 .move_one_task = move_one_task_rt,
1338 .set_cpus_allowed = set_cpus_allowed_rt,
1339 .join_domain = join_domain_rt,
1340 .leave_domain = leave_domain_rt,
1341 .pre_schedule = pre_schedule_rt,
1342 .post_schedule = post_schedule_rt,
1343 .task_wake_up = task_wake_up_rt,
1344 .switched_from = switched_from_rt,
1345 #endif
1346
1347 .set_curr_task = set_curr_task_rt,
1348 .task_tick = task_tick_rt,
1349
1350 .prio_changed = prio_changed_rt,
1351 .switched_to = switched_to_rt,
1352 };