ad6267714c840b2ee53154faaece04b2f2caee8a
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / sched_rt.c
1 /*
2 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
3 * policies)
4 */
5
6 #ifdef CONFIG_RT_GROUP_SCHED
7
8 #define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
9
10 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
11 {
12 #ifdef CONFIG_SCHED_DEBUG
13 WARN_ON_ONCE(!rt_entity_is_task(rt_se));
14 #endif
15 return container_of(rt_se, struct task_struct, rt);
16 }
17
18 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
19 {
20 return rt_rq->rq;
21 }
22
23 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
24 {
25 return rt_se->rt_rq;
26 }
27
28 #else /* CONFIG_RT_GROUP_SCHED */
29
30 #define rt_entity_is_task(rt_se) (1)
31
32 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
33 {
34 return container_of(rt_se, struct task_struct, rt);
35 }
36
37 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
38 {
39 return container_of(rt_rq, struct rq, rt);
40 }
41
42 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
43 {
44 struct task_struct *p = rt_task_of(rt_se);
45 struct rq *rq = task_rq(p);
46
47 return &rq->rt;
48 }
49
50 #endif /* CONFIG_RT_GROUP_SCHED */
51
52 #ifdef CONFIG_SMP
53
54 static inline int rt_overloaded(struct rq *rq)
55 {
56 return atomic_read(&rq->rd->rto_count);
57 }
58
59 static inline void rt_set_overload(struct rq *rq)
60 {
61 if (!rq->online)
62 return;
63
64 cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
65 /*
66 * Make sure the mask is visible before we set
67 * the overload count. That is checked to determine
68 * if we should look at the mask. It would be a shame
69 * if we looked at the mask, but the mask was not
70 * updated yet.
71 */
72 wmb();
73 atomic_inc(&rq->rd->rto_count);
74 }
75
76 static inline void rt_clear_overload(struct rq *rq)
77 {
78 if (!rq->online)
79 return;
80
81 /* the order here really doesn't matter */
82 atomic_dec(&rq->rd->rto_count);
83 cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
84 }
85
86 static void update_rt_migration(struct rt_rq *rt_rq)
87 {
88 if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
89 if (!rt_rq->overloaded) {
90 rt_set_overload(rq_of_rt_rq(rt_rq));
91 rt_rq->overloaded = 1;
92 }
93 } else if (rt_rq->overloaded) {
94 rt_clear_overload(rq_of_rt_rq(rt_rq));
95 rt_rq->overloaded = 0;
96 }
97 }
98
99 static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
100 {
101 if (!rt_entity_is_task(rt_se))
102 return;
103
104 rt_rq = &rq_of_rt_rq(rt_rq)->rt;
105
106 rt_rq->rt_nr_total++;
107 if (rt_se->nr_cpus_allowed > 1)
108 rt_rq->rt_nr_migratory++;
109
110 update_rt_migration(rt_rq);
111 }
112
113 static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
114 {
115 if (!rt_entity_is_task(rt_se))
116 return;
117
118 rt_rq = &rq_of_rt_rq(rt_rq)->rt;
119
120 rt_rq->rt_nr_total--;
121 if (rt_se->nr_cpus_allowed > 1)
122 rt_rq->rt_nr_migratory--;
123
124 update_rt_migration(rt_rq);
125 }
126
127 static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
128 {
129 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
130 plist_node_init(&p->pushable_tasks, p->prio);
131 plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
132 }
133
134 static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
135 {
136 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
137 }
138
139 static inline int has_pushable_tasks(struct rq *rq)
140 {
141 return !plist_head_empty(&rq->rt.pushable_tasks);
142 }
143
144 #else
145
146 static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
147 {
148 }
149
150 static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
151 {
152 }
153
154 static inline
155 void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
156 {
157 }
158
159 static inline
160 void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
161 {
162 }
163
164 #endif /* CONFIG_SMP */
165
166 static inline int on_rt_rq(struct sched_rt_entity *rt_se)
167 {
168 return !list_empty(&rt_se->run_list);
169 }
170
171 #ifdef CONFIG_RT_GROUP_SCHED
172
173 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
174 {
175 if (!rt_rq->tg)
176 return RUNTIME_INF;
177
178 return rt_rq->rt_runtime;
179 }
180
181 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
182 {
183 return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
184 }
185
186 static inline void list_add_leaf_rt_rq(struct rt_rq *rt_rq)
187 {
188 list_add_rcu(&rt_rq->leaf_rt_rq_list,
189 &rq_of_rt_rq(rt_rq)->leaf_rt_rq_list);
190 }
191
192 static inline void list_del_leaf_rt_rq(struct rt_rq *rt_rq)
193 {
194 list_del_rcu(&rt_rq->leaf_rt_rq_list);
195 }
196
197 #define for_each_leaf_rt_rq(rt_rq, rq) \
198 list_for_each_entry_rcu(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list)
199
200 #define for_each_sched_rt_entity(rt_se) \
201 for (; rt_se; rt_se = rt_se->parent)
202
203 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
204 {
205 return rt_se->my_q;
206 }
207
208 static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head);
209 static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
210
211 static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
212 {
213 int this_cpu = smp_processor_id();
214 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
215 struct sched_rt_entity *rt_se;
216
217 rt_se = rt_rq->tg->rt_se[this_cpu];
218
219 if (rt_rq->rt_nr_running) {
220 if (rt_se && !on_rt_rq(rt_se))
221 enqueue_rt_entity(rt_se, false);
222 if (rt_rq->highest_prio.curr < curr->prio)
223 resched_task(curr);
224 }
225 }
226
227 static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
228 {
229 int this_cpu = smp_processor_id();
230 struct sched_rt_entity *rt_se;
231
232 rt_se = rt_rq->tg->rt_se[this_cpu];
233
234 if (rt_se && on_rt_rq(rt_se))
235 dequeue_rt_entity(rt_se);
236 }
237
238 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
239 {
240 return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
241 }
242
243 static int rt_se_boosted(struct sched_rt_entity *rt_se)
244 {
245 struct rt_rq *rt_rq = group_rt_rq(rt_se);
246 struct task_struct *p;
247
248 if (rt_rq)
249 return !!rt_rq->rt_nr_boosted;
250
251 p = rt_task_of(rt_se);
252 return p->prio != p->normal_prio;
253 }
254
255 #ifdef CONFIG_SMP
256 static inline const struct cpumask *sched_rt_period_mask(void)
257 {
258 return cpu_rq(smp_processor_id())->rd->span;
259 }
260 #else
261 static inline const struct cpumask *sched_rt_period_mask(void)
262 {
263 return cpu_online_mask;
264 }
265 #endif
266
267 static inline
268 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
269 {
270 return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
271 }
272
273 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
274 {
275 return &rt_rq->tg->rt_bandwidth;
276 }
277
278 #else /* !CONFIG_RT_GROUP_SCHED */
279
280 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
281 {
282 return rt_rq->rt_runtime;
283 }
284
285 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
286 {
287 return ktime_to_ns(def_rt_bandwidth.rt_period);
288 }
289
290 static inline void list_add_leaf_rt_rq(struct rt_rq *rt_rq)
291 {
292 }
293
294 static inline void list_del_leaf_rt_rq(struct rt_rq *rt_rq)
295 {
296 }
297
298 #define for_each_leaf_rt_rq(rt_rq, rq) \
299 for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
300
301 #define for_each_sched_rt_entity(rt_se) \
302 for (; rt_se; rt_se = NULL)
303
304 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
305 {
306 return NULL;
307 }
308
309 static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
310 {
311 if (rt_rq->rt_nr_running)
312 resched_task(rq_of_rt_rq(rt_rq)->curr);
313 }
314
315 static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
316 {
317 }
318
319 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
320 {
321 return rt_rq->rt_throttled;
322 }
323
324 static inline const struct cpumask *sched_rt_period_mask(void)
325 {
326 return cpu_online_mask;
327 }
328
329 static inline
330 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
331 {
332 return &cpu_rq(cpu)->rt;
333 }
334
335 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
336 {
337 return &def_rt_bandwidth;
338 }
339
340 #endif /* CONFIG_RT_GROUP_SCHED */
341
342 #ifdef CONFIG_SMP
343 /*
344 * We ran out of runtime, see if we can borrow some from our neighbours.
345 */
346 static int do_balance_runtime(struct rt_rq *rt_rq)
347 {
348 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
349 struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
350 int i, weight, more = 0;
351 u64 rt_period;
352
353 weight = cpumask_weight(rd->span);
354
355 raw_spin_lock(&rt_b->rt_runtime_lock);
356 rt_period = ktime_to_ns(rt_b->rt_period);
357 for_each_cpu(i, rd->span) {
358 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
359 s64 diff;
360
361 if (iter == rt_rq)
362 continue;
363
364 raw_spin_lock(&iter->rt_runtime_lock);
365 /*
366 * Either all rqs have inf runtime and there's nothing to steal
367 * or __disable_runtime() below sets a specific rq to inf to
368 * indicate its been disabled and disalow stealing.
369 */
370 if (iter->rt_runtime == RUNTIME_INF)
371 goto next;
372
373 /*
374 * From runqueues with spare time, take 1/n part of their
375 * spare time, but no more than our period.
376 */
377 diff = iter->rt_runtime - iter->rt_time;
378 if (diff > 0) {
379 diff = div_u64((u64)diff, weight);
380 if (rt_rq->rt_runtime + diff > rt_period)
381 diff = rt_period - rt_rq->rt_runtime;
382 iter->rt_runtime -= diff;
383 rt_rq->rt_runtime += diff;
384 more = 1;
385 if (rt_rq->rt_runtime == rt_period) {
386 raw_spin_unlock(&iter->rt_runtime_lock);
387 break;
388 }
389 }
390 next:
391 raw_spin_unlock(&iter->rt_runtime_lock);
392 }
393 raw_spin_unlock(&rt_b->rt_runtime_lock);
394
395 return more;
396 }
397
398 /*
399 * Ensure this RQ takes back all the runtime it lend to its neighbours.
400 */
401 static void __disable_runtime(struct rq *rq)
402 {
403 struct root_domain *rd = rq->rd;
404 struct rt_rq *rt_rq;
405
406 if (unlikely(!scheduler_running))
407 return;
408
409 for_each_leaf_rt_rq(rt_rq, rq) {
410 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
411 s64 want;
412 int i;
413
414 raw_spin_lock(&rt_b->rt_runtime_lock);
415 raw_spin_lock(&rt_rq->rt_runtime_lock);
416 /*
417 * Either we're all inf and nobody needs to borrow, or we're
418 * already disabled and thus have nothing to do, or we have
419 * exactly the right amount of runtime to take out.
420 */
421 if (rt_rq->rt_runtime == RUNTIME_INF ||
422 rt_rq->rt_runtime == rt_b->rt_runtime)
423 goto balanced;
424 raw_spin_unlock(&rt_rq->rt_runtime_lock);
425
426 /*
427 * Calculate the difference between what we started out with
428 * and what we current have, that's the amount of runtime
429 * we lend and now have to reclaim.
430 */
431 want = rt_b->rt_runtime - rt_rq->rt_runtime;
432
433 /*
434 * Greedy reclaim, take back as much as we can.
435 */
436 for_each_cpu(i, rd->span) {
437 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
438 s64 diff;
439
440 /*
441 * Can't reclaim from ourselves or disabled runqueues.
442 */
443 if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
444 continue;
445
446 raw_spin_lock(&iter->rt_runtime_lock);
447 if (want > 0) {
448 diff = min_t(s64, iter->rt_runtime, want);
449 iter->rt_runtime -= diff;
450 want -= diff;
451 } else {
452 iter->rt_runtime -= want;
453 want -= want;
454 }
455 raw_spin_unlock(&iter->rt_runtime_lock);
456
457 if (!want)
458 break;
459 }
460
461 raw_spin_lock(&rt_rq->rt_runtime_lock);
462 /*
463 * We cannot be left wanting - that would mean some runtime
464 * leaked out of the system.
465 */
466 BUG_ON(want);
467 balanced:
468 /*
469 * Disable all the borrow logic by pretending we have inf
470 * runtime - in which case borrowing doesn't make sense.
471 */
472 rt_rq->rt_runtime = RUNTIME_INF;
473 raw_spin_unlock(&rt_rq->rt_runtime_lock);
474 raw_spin_unlock(&rt_b->rt_runtime_lock);
475 }
476 }
477
478 static void disable_runtime(struct rq *rq)
479 {
480 unsigned long flags;
481
482 raw_spin_lock_irqsave(&rq->lock, flags);
483 __disable_runtime(rq);
484 raw_spin_unlock_irqrestore(&rq->lock, flags);
485 }
486
487 static void __enable_runtime(struct rq *rq)
488 {
489 struct rt_rq *rt_rq;
490
491 if (unlikely(!scheduler_running))
492 return;
493
494 /*
495 * Reset each runqueue's bandwidth settings
496 */
497 for_each_leaf_rt_rq(rt_rq, rq) {
498 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
499
500 raw_spin_lock(&rt_b->rt_runtime_lock);
501 raw_spin_lock(&rt_rq->rt_runtime_lock);
502 rt_rq->rt_runtime = rt_b->rt_runtime;
503 rt_rq->rt_time = 0;
504 rt_rq->rt_throttled = 0;
505 raw_spin_unlock(&rt_rq->rt_runtime_lock);
506 raw_spin_unlock(&rt_b->rt_runtime_lock);
507 }
508 }
509
510 static void enable_runtime(struct rq *rq)
511 {
512 unsigned long flags;
513
514 raw_spin_lock_irqsave(&rq->lock, flags);
515 __enable_runtime(rq);
516 raw_spin_unlock_irqrestore(&rq->lock, flags);
517 }
518
519 static int balance_runtime(struct rt_rq *rt_rq)
520 {
521 int more = 0;
522
523 if (rt_rq->rt_time > rt_rq->rt_runtime) {
524 raw_spin_unlock(&rt_rq->rt_runtime_lock);
525 more = do_balance_runtime(rt_rq);
526 raw_spin_lock(&rt_rq->rt_runtime_lock);
527 }
528
529 return more;
530 }
531 #else /* !CONFIG_SMP */
532 static inline int balance_runtime(struct rt_rq *rt_rq)
533 {
534 return 0;
535 }
536 #endif /* CONFIG_SMP */
537
538 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
539 {
540 int i, idle = 1;
541 const struct cpumask *span;
542
543 if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
544 return 1;
545
546 span = sched_rt_period_mask();
547 for_each_cpu(i, span) {
548 int enqueue = 0;
549 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
550 struct rq *rq = rq_of_rt_rq(rt_rq);
551
552 raw_spin_lock(&rq->lock);
553 if (rt_rq->rt_time) {
554 u64 runtime;
555
556 raw_spin_lock(&rt_rq->rt_runtime_lock);
557 if (rt_rq->rt_throttled)
558 balance_runtime(rt_rq);
559 runtime = rt_rq->rt_runtime;
560 rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
561 if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
562 rt_rq->rt_throttled = 0;
563 enqueue = 1;
564 }
565 if (rt_rq->rt_time || rt_rq->rt_nr_running)
566 idle = 0;
567 raw_spin_unlock(&rt_rq->rt_runtime_lock);
568 } else if (rt_rq->rt_nr_running)
569 idle = 0;
570
571 if (enqueue)
572 sched_rt_rq_enqueue(rt_rq);
573 raw_spin_unlock(&rq->lock);
574 }
575
576 return idle;
577 }
578
579 static inline int rt_se_prio(struct sched_rt_entity *rt_se)
580 {
581 #ifdef CONFIG_RT_GROUP_SCHED
582 struct rt_rq *rt_rq = group_rt_rq(rt_se);
583
584 if (rt_rq)
585 return rt_rq->highest_prio.curr;
586 #endif
587
588 return rt_task_of(rt_se)->prio;
589 }
590
591 static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
592 {
593 u64 runtime = sched_rt_runtime(rt_rq);
594
595 if (rt_rq->rt_throttled)
596 return rt_rq_throttled(rt_rq);
597
598 if (sched_rt_runtime(rt_rq) >= sched_rt_period(rt_rq))
599 return 0;
600
601 balance_runtime(rt_rq);
602 runtime = sched_rt_runtime(rt_rq);
603 if (runtime == RUNTIME_INF)
604 return 0;
605
606 if (rt_rq->rt_time > runtime) {
607 rt_rq->rt_throttled = 1;
608 if (rt_rq_throttled(rt_rq)) {
609 sched_rt_rq_dequeue(rt_rq);
610 return 1;
611 }
612 }
613
614 return 0;
615 }
616
617 /*
618 * Update the current task's runtime statistics. Skip current tasks that
619 * are not in our scheduling class.
620 */
621 static void update_curr_rt(struct rq *rq)
622 {
623 struct task_struct *curr = rq->curr;
624 struct sched_rt_entity *rt_se = &curr->rt;
625 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
626 u64 delta_exec;
627
628 if (curr->sched_class != &rt_sched_class)
629 return;
630
631 delta_exec = rq->clock_task - curr->se.exec_start;
632 if (unlikely((s64)delta_exec < 0))
633 delta_exec = 0;
634
635 schedstat_set(curr->se.statistics.exec_max, max(curr->se.statistics.exec_max, delta_exec));
636
637 curr->se.sum_exec_runtime += delta_exec;
638 account_group_exec_runtime(curr, delta_exec);
639
640 curr->se.exec_start = rq->clock_task;
641 cpuacct_charge(curr, delta_exec);
642
643 sched_rt_avg_update(rq, delta_exec);
644
645 if (!rt_bandwidth_enabled())
646 return;
647
648 for_each_sched_rt_entity(rt_se) {
649 rt_rq = rt_rq_of_se(rt_se);
650
651 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
652 raw_spin_lock(&rt_rq->rt_runtime_lock);
653 rt_rq->rt_time += delta_exec;
654 if (sched_rt_runtime_exceeded(rt_rq))
655 resched_task(curr);
656 raw_spin_unlock(&rt_rq->rt_runtime_lock);
657 }
658 }
659 }
660
661 #if defined CONFIG_SMP
662
663 static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu);
664
665 static inline int next_prio(struct rq *rq)
666 {
667 struct task_struct *next = pick_next_highest_task_rt(rq, rq->cpu);
668
669 if (next && rt_prio(next->prio))
670 return next->prio;
671 else
672 return MAX_RT_PRIO;
673 }
674
675 static void
676 inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
677 {
678 struct rq *rq = rq_of_rt_rq(rt_rq);
679
680 if (prio < prev_prio) {
681
682 /*
683 * If the new task is higher in priority than anything on the
684 * run-queue, we know that the previous high becomes our
685 * next-highest.
686 */
687 rt_rq->highest_prio.next = prev_prio;
688
689 if (rq->online)
690 cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
691
692 } else if (prio == rt_rq->highest_prio.curr)
693 /*
694 * If the next task is equal in priority to the highest on
695 * the run-queue, then we implicitly know that the next highest
696 * task cannot be any lower than current
697 */
698 rt_rq->highest_prio.next = prio;
699 else if (prio < rt_rq->highest_prio.next)
700 /*
701 * Otherwise, we need to recompute next-highest
702 */
703 rt_rq->highest_prio.next = next_prio(rq);
704 }
705
706 static void
707 dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
708 {
709 struct rq *rq = rq_of_rt_rq(rt_rq);
710
711 if (rt_rq->rt_nr_running && (prio <= rt_rq->highest_prio.next))
712 rt_rq->highest_prio.next = next_prio(rq);
713
714 if (rq->online && rt_rq->highest_prio.curr != prev_prio)
715 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
716 }
717
718 #else /* CONFIG_SMP */
719
720 static inline
721 void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
722 static inline
723 void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
724
725 #endif /* CONFIG_SMP */
726
727 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
728 static void
729 inc_rt_prio(struct rt_rq *rt_rq, int prio)
730 {
731 int prev_prio = rt_rq->highest_prio.curr;
732
733 if (prio < prev_prio)
734 rt_rq->highest_prio.curr = prio;
735
736 inc_rt_prio_smp(rt_rq, prio, prev_prio);
737 }
738
739 static void
740 dec_rt_prio(struct rt_rq *rt_rq, int prio)
741 {
742 int prev_prio = rt_rq->highest_prio.curr;
743
744 if (rt_rq->rt_nr_running) {
745
746 WARN_ON(prio < prev_prio);
747
748 /*
749 * This may have been our highest task, and therefore
750 * we may have some recomputation to do
751 */
752 if (prio == prev_prio) {
753 struct rt_prio_array *array = &rt_rq->active;
754
755 rt_rq->highest_prio.curr =
756 sched_find_first_bit(array->bitmap);
757 }
758
759 } else
760 rt_rq->highest_prio.curr = MAX_RT_PRIO;
761
762 dec_rt_prio_smp(rt_rq, prio, prev_prio);
763 }
764
765 #else
766
767 static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
768 static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
769
770 #endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
771
772 #ifdef CONFIG_RT_GROUP_SCHED
773
774 static void
775 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
776 {
777 if (rt_se_boosted(rt_se))
778 rt_rq->rt_nr_boosted++;
779
780 if (rt_rq->tg)
781 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
782 }
783
784 static void
785 dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
786 {
787 if (rt_se_boosted(rt_se))
788 rt_rq->rt_nr_boosted--;
789
790 WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
791 }
792
793 #else /* CONFIG_RT_GROUP_SCHED */
794
795 static void
796 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
797 {
798 start_rt_bandwidth(&def_rt_bandwidth);
799 }
800
801 static inline
802 void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
803
804 #endif /* CONFIG_RT_GROUP_SCHED */
805
806 static inline
807 void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
808 {
809 int prio = rt_se_prio(rt_se);
810
811 WARN_ON(!rt_prio(prio));
812 rt_rq->rt_nr_running++;
813
814 inc_rt_prio(rt_rq, prio);
815 inc_rt_migration(rt_se, rt_rq);
816 inc_rt_group(rt_se, rt_rq);
817 }
818
819 static inline
820 void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
821 {
822 WARN_ON(!rt_prio(rt_se_prio(rt_se)));
823 WARN_ON(!rt_rq->rt_nr_running);
824 rt_rq->rt_nr_running--;
825
826 dec_rt_prio(rt_rq, rt_se_prio(rt_se));
827 dec_rt_migration(rt_se, rt_rq);
828 dec_rt_group(rt_se, rt_rq);
829 }
830
831 static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
832 {
833 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
834 struct rt_prio_array *array = &rt_rq->active;
835 struct rt_rq *group_rq = group_rt_rq(rt_se);
836 struct list_head *queue = array->queue + rt_se_prio(rt_se);
837
838 /*
839 * Don't enqueue the group if its throttled, or when empty.
840 * The latter is a consequence of the former when a child group
841 * get throttled and the current group doesn't have any other
842 * active members.
843 */
844 if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
845 return;
846
847 if (!rt_rq->rt_nr_running)
848 list_add_leaf_rt_rq(rt_rq);
849
850 if (head)
851 list_add(&rt_se->run_list, queue);
852 else
853 list_add_tail(&rt_se->run_list, queue);
854 __set_bit(rt_se_prio(rt_se), array->bitmap);
855
856 inc_rt_tasks(rt_se, rt_rq);
857 }
858
859 static void __dequeue_rt_entity(struct sched_rt_entity *rt_se)
860 {
861 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
862 struct rt_prio_array *array = &rt_rq->active;
863
864 list_del_init(&rt_se->run_list);
865 if (list_empty(array->queue + rt_se_prio(rt_se)))
866 __clear_bit(rt_se_prio(rt_se), array->bitmap);
867
868 dec_rt_tasks(rt_se, rt_rq);
869 if (!rt_rq->rt_nr_running)
870 list_del_leaf_rt_rq(rt_rq);
871 }
872
873 /*
874 * Because the prio of an upper entry depends on the lower
875 * entries, we must remove entries top - down.
876 */
877 static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
878 {
879 struct sched_rt_entity *back = NULL;
880
881 for_each_sched_rt_entity(rt_se) {
882 rt_se->back = back;
883 back = rt_se;
884 }
885
886 for (rt_se = back; rt_se; rt_se = rt_se->back) {
887 if (on_rt_rq(rt_se))
888 __dequeue_rt_entity(rt_se);
889 }
890 }
891
892 static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
893 {
894 dequeue_rt_stack(rt_se);
895 for_each_sched_rt_entity(rt_se)
896 __enqueue_rt_entity(rt_se, head);
897 }
898
899 static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
900 {
901 dequeue_rt_stack(rt_se);
902
903 for_each_sched_rt_entity(rt_se) {
904 struct rt_rq *rt_rq = group_rt_rq(rt_se);
905
906 if (rt_rq && rt_rq->rt_nr_running)
907 __enqueue_rt_entity(rt_se, false);
908 }
909 }
910
911 /*
912 * Adding/removing a task to/from a priority array:
913 */
914 static void
915 enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
916 {
917 struct sched_rt_entity *rt_se = &p->rt;
918
919 if (flags & ENQUEUE_WAKEUP)
920 rt_se->timeout = 0;
921
922 enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD);
923
924 if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1)
925 enqueue_pushable_task(rq, p);
926 }
927
928 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
929 {
930 struct sched_rt_entity *rt_se = &p->rt;
931
932 update_curr_rt(rq);
933 dequeue_rt_entity(rt_se);
934
935 dequeue_pushable_task(rq, p);
936 }
937
938 /*
939 * Put task to the end of the run list without the overhead of dequeue
940 * followed by enqueue.
941 */
942 static void
943 requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
944 {
945 if (on_rt_rq(rt_se)) {
946 struct rt_prio_array *array = &rt_rq->active;
947 struct list_head *queue = array->queue + rt_se_prio(rt_se);
948
949 if (head)
950 list_move(&rt_se->run_list, queue);
951 else
952 list_move_tail(&rt_se->run_list, queue);
953 }
954 }
955
956 static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
957 {
958 struct sched_rt_entity *rt_se = &p->rt;
959 struct rt_rq *rt_rq;
960
961 for_each_sched_rt_entity(rt_se) {
962 rt_rq = rt_rq_of_se(rt_se);
963 requeue_rt_entity(rt_rq, rt_se, head);
964 }
965 }
966
967 static void yield_task_rt(struct rq *rq)
968 {
969 requeue_task_rt(rq, rq->curr, 0);
970 }
971
972 #ifdef CONFIG_SMP
973 static int find_lowest_rq(struct task_struct *task);
974
975 static int
976 select_task_rq_rt(struct rq *rq, struct task_struct *p, int sd_flag, int flags)
977 {
978 if (sd_flag != SD_BALANCE_WAKE)
979 return smp_processor_id();
980
981 /*
982 * If the current task is an RT task, then
983 * try to see if we can wake this RT task up on another
984 * runqueue. Otherwise simply start this RT task
985 * on its current runqueue.
986 *
987 * We want to avoid overloading runqueues. If the woken
988 * task is a higher priority, then it will stay on this CPU
989 * and the lower prio task should be moved to another CPU.
990 * Even though this will probably make the lower prio task
991 * lose its cache, we do not want to bounce a higher task
992 * around just because it gave up its CPU, perhaps for a
993 * lock?
994 *
995 * For equal prio tasks, we just let the scheduler sort it out.
996 */
997 if (unlikely(rt_task(rq->curr)) &&
998 (rq->curr->rt.nr_cpus_allowed < 2 ||
999 rq->curr->prio < p->prio) &&
1000 (p->rt.nr_cpus_allowed > 1)) {
1001 int cpu = find_lowest_rq(p);
1002
1003 return (cpu == -1) ? task_cpu(p) : cpu;
1004 }
1005
1006 /*
1007 * Otherwise, just let it ride on the affined RQ and the
1008 * post-schedule router will push the preempted task away
1009 */
1010 return task_cpu(p);
1011 }
1012
1013 static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
1014 {
1015 if (rq->curr->rt.nr_cpus_allowed == 1)
1016 return;
1017
1018 if (p->rt.nr_cpus_allowed != 1
1019 && cpupri_find(&rq->rd->cpupri, p, NULL))
1020 return;
1021
1022 if (!cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
1023 return;
1024
1025 /*
1026 * There appears to be other cpus that can accept
1027 * current and none to run 'p', so lets reschedule
1028 * to try and push current away:
1029 */
1030 requeue_task_rt(rq, p, 1);
1031 resched_task(rq->curr);
1032 }
1033
1034 #endif /* CONFIG_SMP */
1035
1036 /*
1037 * Preempt the current task with a newly woken task if needed:
1038 */
1039 static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
1040 {
1041 if (p->prio < rq->curr->prio) {
1042 resched_task(rq->curr);
1043 return;
1044 }
1045
1046 #ifdef CONFIG_SMP
1047 /*
1048 * If:
1049 *
1050 * - the newly woken task is of equal priority to the current task
1051 * - the newly woken task is non-migratable while current is migratable
1052 * - current will be preempted on the next reschedule
1053 *
1054 * we should check to see if current can readily move to a different
1055 * cpu. If so, we will reschedule to allow the push logic to try
1056 * to move current somewhere else, making room for our non-migratable
1057 * task.
1058 */
1059 if (p->prio == rq->curr->prio && !need_resched())
1060 check_preempt_equal_prio(rq, p);
1061 #endif
1062 }
1063
1064 static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
1065 struct rt_rq *rt_rq)
1066 {
1067 struct rt_prio_array *array = &rt_rq->active;
1068 struct sched_rt_entity *next = NULL;
1069 struct list_head *queue;
1070 int idx;
1071
1072 idx = sched_find_first_bit(array->bitmap);
1073 BUG_ON(idx >= MAX_RT_PRIO);
1074
1075 queue = array->queue + idx;
1076 next = list_entry(queue->next, struct sched_rt_entity, run_list);
1077
1078 return next;
1079 }
1080
1081 static struct task_struct *_pick_next_task_rt(struct rq *rq)
1082 {
1083 struct sched_rt_entity *rt_se;
1084 struct task_struct *p;
1085 struct rt_rq *rt_rq;
1086
1087 rt_rq = &rq->rt;
1088
1089 if (unlikely(!rt_rq->rt_nr_running))
1090 return NULL;
1091
1092 if (rt_rq_throttled(rt_rq))
1093 return NULL;
1094
1095 do {
1096 rt_se = pick_next_rt_entity(rq, rt_rq);
1097 BUG_ON(!rt_se);
1098 rt_rq = group_rt_rq(rt_se);
1099 } while (rt_rq);
1100
1101 p = rt_task_of(rt_se);
1102 p->se.exec_start = rq->clock_task;
1103
1104 return p;
1105 }
1106
1107 static struct task_struct *pick_next_task_rt(struct rq *rq)
1108 {
1109 struct task_struct *p = _pick_next_task_rt(rq);
1110
1111 /* The running task is never eligible for pushing */
1112 if (p)
1113 dequeue_pushable_task(rq, p);
1114
1115 #ifdef CONFIG_SMP
1116 /*
1117 * We detect this state here so that we can avoid taking the RQ
1118 * lock again later if there is no need to push
1119 */
1120 rq->post_schedule = has_pushable_tasks(rq);
1121 #endif
1122
1123 return p;
1124 }
1125
1126 static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
1127 {
1128 update_curr_rt(rq);
1129 p->se.exec_start = 0;
1130
1131 /*
1132 * The previous task needs to be made eligible for pushing
1133 * if it is still active
1134 */
1135 if (p->se.on_rq && p->rt.nr_cpus_allowed > 1)
1136 enqueue_pushable_task(rq, p);
1137 }
1138
1139 #ifdef CONFIG_SMP
1140
1141 /* Only try algorithms three times */
1142 #define RT_MAX_TRIES 3
1143
1144 static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
1145
1146 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
1147 {
1148 if (!task_running(rq, p) &&
1149 (cpu < 0 || cpumask_test_cpu(cpu, &p->cpus_allowed)) &&
1150 (p->rt.nr_cpus_allowed > 1))
1151 return 1;
1152 return 0;
1153 }
1154
1155 /* Return the second highest RT task, NULL otherwise */
1156 static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
1157 {
1158 struct task_struct *next = NULL;
1159 struct sched_rt_entity *rt_se;
1160 struct rt_prio_array *array;
1161 struct rt_rq *rt_rq;
1162 int idx;
1163
1164 for_each_leaf_rt_rq(rt_rq, rq) {
1165 array = &rt_rq->active;
1166 idx = sched_find_first_bit(array->bitmap);
1167 next_idx:
1168 if (idx >= MAX_RT_PRIO)
1169 continue;
1170 if (next && next->prio < idx)
1171 continue;
1172 list_for_each_entry(rt_se, array->queue + idx, run_list) {
1173 struct task_struct *p;
1174
1175 if (!rt_entity_is_task(rt_se))
1176 continue;
1177
1178 p = rt_task_of(rt_se);
1179 if (pick_rt_task(rq, p, cpu)) {
1180 next = p;
1181 break;
1182 }
1183 }
1184 if (!next) {
1185 idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);
1186 goto next_idx;
1187 }
1188 }
1189
1190 return next;
1191 }
1192
1193 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
1194
1195 static int find_lowest_rq(struct task_struct *task)
1196 {
1197 struct sched_domain *sd;
1198 struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask);
1199 int this_cpu = smp_processor_id();
1200 int cpu = task_cpu(task);
1201
1202 if (task->rt.nr_cpus_allowed == 1)
1203 return -1; /* No other targets possible */
1204
1205 if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
1206 return -1; /* No targets found */
1207
1208 /*
1209 * At this point we have built a mask of cpus representing the
1210 * lowest priority tasks in the system. Now we want to elect
1211 * the best one based on our affinity and topology.
1212 *
1213 * We prioritize the last cpu that the task executed on since
1214 * it is most likely cache-hot in that location.
1215 */
1216 if (cpumask_test_cpu(cpu, lowest_mask))
1217 return cpu;
1218
1219 /*
1220 * Otherwise, we consult the sched_domains span maps to figure
1221 * out which cpu is logically closest to our hot cache data.
1222 */
1223 if (!cpumask_test_cpu(this_cpu, lowest_mask))
1224 this_cpu = -1; /* Skip this_cpu opt if not among lowest */
1225
1226 for_each_domain(cpu, sd) {
1227 if (sd->flags & SD_WAKE_AFFINE) {
1228 int best_cpu;
1229
1230 /*
1231 * "this_cpu" is cheaper to preempt than a
1232 * remote processor.
1233 */
1234 if (this_cpu != -1 &&
1235 cpumask_test_cpu(this_cpu, sched_domain_span(sd)))
1236 return this_cpu;
1237
1238 best_cpu = cpumask_first_and(lowest_mask,
1239 sched_domain_span(sd));
1240 if (best_cpu < nr_cpu_ids)
1241 return best_cpu;
1242 }
1243 }
1244
1245 /*
1246 * And finally, if there were no matches within the domains
1247 * just give the caller *something* to work with from the compatible
1248 * locations.
1249 */
1250 if (this_cpu != -1)
1251 return this_cpu;
1252
1253 cpu = cpumask_any(lowest_mask);
1254 if (cpu < nr_cpu_ids)
1255 return cpu;
1256 return -1;
1257 }
1258
1259 /* Will lock the rq it finds */
1260 static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1261 {
1262 struct rq *lowest_rq = NULL;
1263 int tries;
1264 int cpu;
1265
1266 for (tries = 0; tries < RT_MAX_TRIES; tries++) {
1267 cpu = find_lowest_rq(task);
1268
1269 if ((cpu == -1) || (cpu == rq->cpu))
1270 break;
1271
1272 lowest_rq = cpu_rq(cpu);
1273
1274 /* if the prio of this runqueue changed, try again */
1275 if (double_lock_balance(rq, lowest_rq)) {
1276 /*
1277 * We had to unlock the run queue. In
1278 * the mean time, task could have
1279 * migrated already or had its affinity changed.
1280 * Also make sure that it wasn't scheduled on its rq.
1281 */
1282 if (unlikely(task_rq(task) != rq ||
1283 !cpumask_test_cpu(lowest_rq->cpu,
1284 &task->cpus_allowed) ||
1285 task_running(rq, task) ||
1286 !task->se.on_rq)) {
1287
1288 raw_spin_unlock(&lowest_rq->lock);
1289 lowest_rq = NULL;
1290 break;
1291 }
1292 }
1293
1294 /* If this rq is still suitable use it. */
1295 if (lowest_rq->rt.highest_prio.curr > task->prio)
1296 break;
1297
1298 /* try again */
1299 double_unlock_balance(rq, lowest_rq);
1300 lowest_rq = NULL;
1301 }
1302
1303 return lowest_rq;
1304 }
1305
1306 static struct task_struct *pick_next_pushable_task(struct rq *rq)
1307 {
1308 struct task_struct *p;
1309
1310 if (!has_pushable_tasks(rq))
1311 return NULL;
1312
1313 p = plist_first_entry(&rq->rt.pushable_tasks,
1314 struct task_struct, pushable_tasks);
1315
1316 BUG_ON(rq->cpu != task_cpu(p));
1317 BUG_ON(task_current(rq, p));
1318 BUG_ON(p->rt.nr_cpus_allowed <= 1);
1319
1320 BUG_ON(!p->se.on_rq);
1321 BUG_ON(!rt_task(p));
1322
1323 return p;
1324 }
1325
1326 /*
1327 * If the current CPU has more than one RT task, see if the non
1328 * running task can migrate over to a CPU that is running a task
1329 * of lesser priority.
1330 */
1331 static int push_rt_task(struct rq *rq)
1332 {
1333 struct task_struct *next_task;
1334 struct rq *lowest_rq;
1335
1336 if (!rq->rt.overloaded)
1337 return 0;
1338
1339 next_task = pick_next_pushable_task(rq);
1340 if (!next_task)
1341 return 0;
1342
1343 retry:
1344 if (unlikely(next_task == rq->curr)) {
1345 WARN_ON(1);
1346 return 0;
1347 }
1348
1349 /*
1350 * It's possible that the next_task slipped in of
1351 * higher priority than current. If that's the case
1352 * just reschedule current.
1353 */
1354 if (unlikely(next_task->prio < rq->curr->prio)) {
1355 resched_task(rq->curr);
1356 return 0;
1357 }
1358
1359 /* We might release rq lock */
1360 get_task_struct(next_task);
1361
1362 /* find_lock_lowest_rq locks the rq if found */
1363 lowest_rq = find_lock_lowest_rq(next_task, rq);
1364 if (!lowest_rq) {
1365 struct task_struct *task;
1366 /*
1367 * find lock_lowest_rq releases rq->lock
1368 * so it is possible that next_task has migrated.
1369 *
1370 * We need to make sure that the task is still on the same
1371 * run-queue and is also still the next task eligible for
1372 * pushing.
1373 */
1374 task = pick_next_pushable_task(rq);
1375 if (task_cpu(next_task) == rq->cpu && task == next_task) {
1376 /*
1377 * If we get here, the task hasnt moved at all, but
1378 * it has failed to push. We will not try again,
1379 * since the other cpus will pull from us when they
1380 * are ready.
1381 */
1382 dequeue_pushable_task(rq, next_task);
1383 goto out;
1384 }
1385
1386 if (!task)
1387 /* No more tasks, just exit */
1388 goto out;
1389
1390 /*
1391 * Something has shifted, try again.
1392 */
1393 put_task_struct(next_task);
1394 next_task = task;
1395 goto retry;
1396 }
1397
1398 deactivate_task(rq, next_task, 0);
1399 set_task_cpu(next_task, lowest_rq->cpu);
1400 activate_task(lowest_rq, next_task, 0);
1401
1402 resched_task(lowest_rq->curr);
1403
1404 double_unlock_balance(rq, lowest_rq);
1405
1406 out:
1407 put_task_struct(next_task);
1408
1409 return 1;
1410 }
1411
1412 static void push_rt_tasks(struct rq *rq)
1413 {
1414 /* push_rt_task will return true if it moved an RT */
1415 while (push_rt_task(rq))
1416 ;
1417 }
1418
1419 static int pull_rt_task(struct rq *this_rq)
1420 {
1421 int this_cpu = this_rq->cpu, ret = 0, cpu;
1422 struct task_struct *p;
1423 struct rq *src_rq;
1424
1425 if (likely(!rt_overloaded(this_rq)))
1426 return 0;
1427
1428 for_each_cpu(cpu, this_rq->rd->rto_mask) {
1429 if (this_cpu == cpu)
1430 continue;
1431
1432 src_rq = cpu_rq(cpu);
1433
1434 /*
1435 * Don't bother taking the src_rq->lock if the next highest
1436 * task is known to be lower-priority than our current task.
1437 * This may look racy, but if this value is about to go
1438 * logically higher, the src_rq will push this task away.
1439 * And if its going logically lower, we do not care
1440 */
1441 if (src_rq->rt.highest_prio.next >=
1442 this_rq->rt.highest_prio.curr)
1443 continue;
1444
1445 /*
1446 * We can potentially drop this_rq's lock in
1447 * double_lock_balance, and another CPU could
1448 * alter this_rq
1449 */
1450 double_lock_balance(this_rq, src_rq);
1451
1452 /*
1453 * Are there still pullable RT tasks?
1454 */
1455 if (src_rq->rt.rt_nr_running <= 1)
1456 goto skip;
1457
1458 p = pick_next_highest_task_rt(src_rq, this_cpu);
1459
1460 /*
1461 * Do we have an RT task that preempts
1462 * the to-be-scheduled task?
1463 */
1464 if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
1465 WARN_ON(p == src_rq->curr);
1466 WARN_ON(!p->se.on_rq);
1467
1468 /*
1469 * There's a chance that p is higher in priority
1470 * than what's currently running on its cpu.
1471 * This is just that p is wakeing up and hasn't
1472 * had a chance to schedule. We only pull
1473 * p if it is lower in priority than the
1474 * current task on the run queue
1475 */
1476 if (p->prio < src_rq->curr->prio)
1477 goto skip;
1478
1479 ret = 1;
1480
1481 deactivate_task(src_rq, p, 0);
1482 set_task_cpu(p, this_cpu);
1483 activate_task(this_rq, p, 0);
1484 /*
1485 * We continue with the search, just in
1486 * case there's an even higher prio task
1487 * in another runqueue. (low likelyhood
1488 * but possible)
1489 */
1490 }
1491 skip:
1492 double_unlock_balance(this_rq, src_rq);
1493 }
1494
1495 return ret;
1496 }
1497
1498 static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
1499 {
1500 /* Try to pull RT tasks here if we lower this rq's prio */
1501 if (unlikely(rt_task(prev)) && rq->rt.highest_prio.curr > prev->prio)
1502 pull_rt_task(rq);
1503 }
1504
1505 static void post_schedule_rt(struct rq *rq)
1506 {
1507 push_rt_tasks(rq);
1508 }
1509
1510 /*
1511 * If we are not running and we are not going to reschedule soon, we should
1512 * try to push tasks away now
1513 */
1514 static void task_woken_rt(struct rq *rq, struct task_struct *p)
1515 {
1516 if (!task_running(rq, p) &&
1517 !test_tsk_need_resched(rq->curr) &&
1518 has_pushable_tasks(rq) &&
1519 p->rt.nr_cpus_allowed > 1 &&
1520 rt_task(rq->curr) &&
1521 (rq->curr->rt.nr_cpus_allowed < 2 ||
1522 rq->curr->prio < p->prio))
1523 push_rt_tasks(rq);
1524 }
1525
1526 static void set_cpus_allowed_rt(struct task_struct *p,
1527 const struct cpumask *new_mask)
1528 {
1529 int weight = cpumask_weight(new_mask);
1530
1531 BUG_ON(!rt_task(p));
1532
1533 /*
1534 * Update the migration status of the RQ if we have an RT task
1535 * which is running AND changing its weight value.
1536 */
1537 if (p->se.on_rq && (weight != p->rt.nr_cpus_allowed)) {
1538 struct rq *rq = task_rq(p);
1539
1540 if (!task_current(rq, p)) {
1541 /*
1542 * Make sure we dequeue this task from the pushable list
1543 * before going further. It will either remain off of
1544 * the list because we are no longer pushable, or it
1545 * will be requeued.
1546 */
1547 if (p->rt.nr_cpus_allowed > 1)
1548 dequeue_pushable_task(rq, p);
1549
1550 /*
1551 * Requeue if our weight is changing and still > 1
1552 */
1553 if (weight > 1)
1554 enqueue_pushable_task(rq, p);
1555
1556 }
1557
1558 if ((p->rt.nr_cpus_allowed <= 1) && (weight > 1)) {
1559 rq->rt.rt_nr_migratory++;
1560 } else if ((p->rt.nr_cpus_allowed > 1) && (weight <= 1)) {
1561 BUG_ON(!rq->rt.rt_nr_migratory);
1562 rq->rt.rt_nr_migratory--;
1563 }
1564
1565 update_rt_migration(&rq->rt);
1566 }
1567
1568 cpumask_copy(&p->cpus_allowed, new_mask);
1569 p->rt.nr_cpus_allowed = weight;
1570 }
1571
1572 /* Assumes rq->lock is held */
1573 static void rq_online_rt(struct rq *rq)
1574 {
1575 if (rq->rt.overloaded)
1576 rt_set_overload(rq);
1577
1578 __enable_runtime(rq);
1579
1580 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
1581 }
1582
1583 /* Assumes rq->lock is held */
1584 static void rq_offline_rt(struct rq *rq)
1585 {
1586 if (rq->rt.overloaded)
1587 rt_clear_overload(rq);
1588
1589 __disable_runtime(rq);
1590
1591 cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
1592 }
1593
1594 /*
1595 * When switch from the rt queue, we bring ourselves to a position
1596 * that we might want to pull RT tasks from other runqueues.
1597 */
1598 static void switched_from_rt(struct rq *rq, struct task_struct *p,
1599 int running)
1600 {
1601 /*
1602 * If there are other RT tasks then we will reschedule
1603 * and the scheduling of the other RT tasks will handle
1604 * the balancing. But if we are the last RT task
1605 * we may need to handle the pulling of RT tasks
1606 * now.
1607 */
1608 if (!rq->rt.rt_nr_running)
1609 pull_rt_task(rq);
1610 }
1611
1612 static inline void init_sched_rt_class(void)
1613 {
1614 unsigned int i;
1615
1616 for_each_possible_cpu(i)
1617 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
1618 GFP_KERNEL, cpu_to_node(i));
1619 }
1620 #endif /* CONFIG_SMP */
1621
1622 /*
1623 * When switching a task to RT, we may overload the runqueue
1624 * with RT tasks. In this case we try to push them off to
1625 * other runqueues.
1626 */
1627 static void switched_to_rt(struct rq *rq, struct task_struct *p,
1628 int running)
1629 {
1630 int check_resched = 1;
1631
1632 /*
1633 * If we are already running, then there's nothing
1634 * that needs to be done. But if we are not running
1635 * we may need to preempt the current running task.
1636 * If that current running task is also an RT task
1637 * then see if we can move to another run queue.
1638 */
1639 if (!running) {
1640 #ifdef CONFIG_SMP
1641 if (rq->rt.overloaded && push_rt_task(rq) &&
1642 /* Don't resched if we changed runqueues */
1643 rq != task_rq(p))
1644 check_resched = 0;
1645 #endif /* CONFIG_SMP */
1646 if (check_resched && p->prio < rq->curr->prio)
1647 resched_task(rq->curr);
1648 }
1649 }
1650
1651 /*
1652 * Priority of the task has changed. This may cause
1653 * us to initiate a push or pull.
1654 */
1655 static void prio_changed_rt(struct rq *rq, struct task_struct *p,
1656 int oldprio, int running)
1657 {
1658 if (running) {
1659 #ifdef CONFIG_SMP
1660 /*
1661 * If our priority decreases while running, we
1662 * may need to pull tasks to this runqueue.
1663 */
1664 if (oldprio < p->prio)
1665 pull_rt_task(rq);
1666 /*
1667 * If there's a higher priority task waiting to run
1668 * then reschedule. Note, the above pull_rt_task
1669 * can release the rq lock and p could migrate.
1670 * Only reschedule if p is still on the same runqueue.
1671 */
1672 if (p->prio > rq->rt.highest_prio.curr && rq->curr == p)
1673 resched_task(p);
1674 #else
1675 /* For UP simply resched on drop of prio */
1676 if (oldprio < p->prio)
1677 resched_task(p);
1678 #endif /* CONFIG_SMP */
1679 } else {
1680 /*
1681 * This task is not running, but if it is
1682 * greater than the current running task
1683 * then reschedule.
1684 */
1685 if (p->prio < rq->curr->prio)
1686 resched_task(rq->curr);
1687 }
1688 }
1689
1690 static void watchdog(struct rq *rq, struct task_struct *p)
1691 {
1692 unsigned long soft, hard;
1693
1694 /* max may change after cur was read, this will be fixed next tick */
1695 soft = task_rlimit(p, RLIMIT_RTTIME);
1696 hard = task_rlimit_max(p, RLIMIT_RTTIME);
1697
1698 if (soft != RLIM_INFINITY) {
1699 unsigned long next;
1700
1701 p->rt.timeout++;
1702 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
1703 if (p->rt.timeout > next)
1704 p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
1705 }
1706 }
1707
1708 static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
1709 {
1710 update_curr_rt(rq);
1711
1712 watchdog(rq, p);
1713
1714 /*
1715 * RR tasks need a special form of timeslice management.
1716 * FIFO tasks have no timeslices.
1717 */
1718 if (p->policy != SCHED_RR)
1719 return;
1720
1721 if (--p->rt.time_slice)
1722 return;
1723
1724 p->rt.time_slice = DEF_TIMESLICE;
1725
1726 /*
1727 * Requeue to the end of queue if we are not the only element
1728 * on the queue:
1729 */
1730 if (p->rt.run_list.prev != p->rt.run_list.next) {
1731 requeue_task_rt(rq, p, 0);
1732 set_tsk_need_resched(p);
1733 }
1734 }
1735
1736 static void set_curr_task_rt(struct rq *rq)
1737 {
1738 struct task_struct *p = rq->curr;
1739
1740 p->se.exec_start = rq->clock_task;
1741
1742 /* The running task is never eligible for pushing */
1743 dequeue_pushable_task(rq, p);
1744 }
1745
1746 static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
1747 {
1748 /*
1749 * Time slice is 0 for SCHED_FIFO tasks
1750 */
1751 if (task->policy == SCHED_RR)
1752 return DEF_TIMESLICE;
1753 else
1754 return 0;
1755 }
1756
1757 static const struct sched_class rt_sched_class = {
1758 .next = &fair_sched_class,
1759 .enqueue_task = enqueue_task_rt,
1760 .dequeue_task = dequeue_task_rt,
1761 .yield_task = yield_task_rt,
1762
1763 .check_preempt_curr = check_preempt_curr_rt,
1764
1765 .pick_next_task = pick_next_task_rt,
1766 .put_prev_task = put_prev_task_rt,
1767
1768 #ifdef CONFIG_SMP
1769 .select_task_rq = select_task_rq_rt,
1770
1771 .set_cpus_allowed = set_cpus_allowed_rt,
1772 .rq_online = rq_online_rt,
1773 .rq_offline = rq_offline_rt,
1774 .pre_schedule = pre_schedule_rt,
1775 .post_schedule = post_schedule_rt,
1776 .task_woken = task_woken_rt,
1777 .switched_from = switched_from_rt,
1778 #endif
1779
1780 .set_curr_task = set_curr_task_rt,
1781 .task_tick = task_tick_rt,
1782
1783 .get_rr_interval = get_rr_interval_rt,
1784
1785 .prio_changed = prio_changed_rt,
1786 .switched_to = switched_to_rt,
1787 };
1788
1789 #ifdef CONFIG_SCHED_DEBUG
1790 extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
1791
1792 static void print_rt_stats(struct seq_file *m, int cpu)
1793 {
1794 struct rt_rq *rt_rq;
1795
1796 rcu_read_lock();
1797 for_each_leaf_rt_rq(rt_rq, cpu_rq(cpu))
1798 print_rt_rq(m, cpu, rt_rq);
1799 rcu_read_unlock();
1800 }
1801 #endif /* CONFIG_SCHED_DEBUG */
1802