2 * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
4 * Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
6 * Interactivity improvements by Mike Galbraith
7 * (C) 2007 Mike Galbraith <efault@gmx.de>
9 * Various enhancements by Dmitry Adamushko.
10 * (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
12 * Group scheduling enhancements by Srivatsa Vaddagiri
13 * Copyright IBM Corporation, 2007
14 * Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
16 * Scaled math optimizations by Thomas Gleixner
17 * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
19 * Adaptive scheduling granularity, math enhancements by Peter Zijlstra
20 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
23 #include <linux/latencytop.h>
24 #include <linux/sched.h>
25 #include <linux/cpumask.h>
26 #include <linux/slab.h>
27 #include <linux/profile.h>
28 #include <linux/interrupt.h>
29 #include <linux/mempolicy.h>
30 #include <linux/migrate.h>
31 #include <linux/task_work.h>
33 #include <trace/events/sched.h>
38 * Targeted preemption latency for CPU-bound tasks:
39 * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
41 * NOTE: this latency value is not the same as the concept of
42 * 'timeslice length' - timeslices in CFS are of variable length
43 * and have no persistent notion like in traditional, time-slice
44 * based scheduling concepts.
46 * (to see the precise effective timeslice length of your workload,
47 * run vmstat and monitor the context-switches (cs) field)
49 unsigned int sysctl_sched_latency
= 6000000ULL;
50 unsigned int normalized_sysctl_sched_latency
= 6000000ULL;
53 * The initial- and re-scaling of tunables is configurable
54 * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
57 * SCHED_TUNABLESCALING_NONE - unscaled, always *1
58 * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus)
59 * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
61 enum sched_tunable_scaling sysctl_sched_tunable_scaling
62 = SCHED_TUNABLESCALING_LOG
;
65 * Minimal preemption granularity for CPU-bound tasks:
66 * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
68 unsigned int sysctl_sched_min_granularity
= 750000ULL;
69 unsigned int normalized_sysctl_sched_min_granularity
= 750000ULL;
72 * is kept at sysctl_sched_latency / sysctl_sched_min_granularity
74 static unsigned int sched_nr_latency
= 8;
77 * After fork, child runs first. If set to 0 (default) then
78 * parent will (try to) run first.
80 unsigned int sysctl_sched_child_runs_first __read_mostly
;
83 * SCHED_OTHER wake-up granularity.
84 * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
86 * This option delays the preemption effects of decoupled workloads
87 * and reduces their over-scheduling. Synchronous workloads will still
88 * have immediate wakeup/sleep latencies.
90 unsigned int sysctl_sched_wakeup_granularity
= 1000000UL;
91 unsigned int normalized_sysctl_sched_wakeup_granularity
= 1000000UL;
93 const_debug
unsigned int sysctl_sched_migration_cost
= 500000UL;
96 * The exponential sliding window over which load is averaged for shares
100 unsigned int __read_mostly sysctl_sched_shares_window
= 10000000UL;
102 #ifdef CONFIG_CFS_BANDWIDTH
104 * Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool
105 * each time a cfs_rq requests quota.
107 * Note: in the case that the slice exceeds the runtime remaining (either due
108 * to consumption or the quota being specified to be smaller than the slice)
109 * we will always only issue the remaining available time.
111 * default: 5 msec, units: microseconds
113 unsigned int sysctl_sched_cfs_bandwidth_slice
= 5000UL;
117 * Increase the granularity value when there are more CPUs,
118 * because with more CPUs the 'effective latency' as visible
119 * to users decreases. But the relationship is not linear,
120 * so pick a second-best guess by going with the log2 of the
123 * This idea comes from the SD scheduler of Con Kolivas:
125 static int get_update_sysctl_factor(void)
127 unsigned int cpus
= min_t(int, num_online_cpus(), 8);
130 switch (sysctl_sched_tunable_scaling
) {
131 case SCHED_TUNABLESCALING_NONE
:
134 case SCHED_TUNABLESCALING_LINEAR
:
137 case SCHED_TUNABLESCALING_LOG
:
139 factor
= 1 + ilog2(cpus
);
146 static void update_sysctl(void)
148 unsigned int factor
= get_update_sysctl_factor();
150 #define SET_SYSCTL(name) \
151 (sysctl_##name = (factor) * normalized_sysctl_##name)
152 SET_SYSCTL(sched_min_granularity
);
153 SET_SYSCTL(sched_latency
);
154 SET_SYSCTL(sched_wakeup_granularity
);
158 void sched_init_granularity(void)
163 #if BITS_PER_LONG == 32
164 # define WMULT_CONST (~0UL)
166 # define WMULT_CONST (1UL << 32)
169 #define WMULT_SHIFT 32
172 * Shift right and round:
174 #define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y))
177 * delta *= weight / lw
180 calc_delta_mine(unsigned long delta_exec
, unsigned long weight
,
181 struct load_weight
*lw
)
186 * weight can be less than 2^SCHED_LOAD_RESOLUTION for task group sched
187 * entities since MIN_SHARES = 2. Treat weight as 1 if less than
188 * 2^SCHED_LOAD_RESOLUTION.
190 if (likely(weight
> (1UL << SCHED_LOAD_RESOLUTION
)))
191 tmp
= (u64
)delta_exec
* scale_load_down(weight
);
193 tmp
= (u64
)delta_exec
;
195 if (!lw
->inv_weight
) {
196 unsigned long w
= scale_load_down(lw
->weight
);
198 if (BITS_PER_LONG
> 32 && unlikely(w
>= WMULT_CONST
))
200 else if (unlikely(!w
))
201 lw
->inv_weight
= WMULT_CONST
;
203 lw
->inv_weight
= WMULT_CONST
/ w
;
207 * Check whether we'd overflow the 64-bit multiplication:
209 if (unlikely(tmp
> WMULT_CONST
))
210 tmp
= SRR(SRR(tmp
, WMULT_SHIFT
/2) * lw
->inv_weight
,
213 tmp
= SRR(tmp
* lw
->inv_weight
, WMULT_SHIFT
);
215 return (unsigned long)min(tmp
, (u64
)(unsigned long)LONG_MAX
);
219 const struct sched_class fair_sched_class
;
221 /**************************************************************
222 * CFS operations on generic schedulable entities:
225 #ifdef CONFIG_FAIR_GROUP_SCHED
227 /* cpu runqueue to which this cfs_rq is attached */
228 static inline struct rq
*rq_of(struct cfs_rq
*cfs_rq
)
233 /* An entity is a task if it doesn't "own" a runqueue */
234 #define entity_is_task(se) (!se->my_q)
236 static inline struct task_struct
*task_of(struct sched_entity
*se
)
238 #ifdef CONFIG_SCHED_DEBUG
239 WARN_ON_ONCE(!entity_is_task(se
));
241 return container_of(se
, struct task_struct
, se
);
244 /* Walk up scheduling entities hierarchy */
245 #define for_each_sched_entity(se) \
246 for (; se; se = se->parent)
248 static inline struct cfs_rq
*task_cfs_rq(struct task_struct
*p
)
253 /* runqueue on which this entity is (to be) queued */
254 static inline struct cfs_rq
*cfs_rq_of(struct sched_entity
*se
)
259 /* runqueue "owned" by this group */
260 static inline struct cfs_rq
*group_cfs_rq(struct sched_entity
*grp
)
265 static inline void list_add_leaf_cfs_rq(struct cfs_rq
*cfs_rq
)
267 if (!cfs_rq
->on_list
) {
269 * Ensure we either appear before our parent (if already
270 * enqueued) or force our parent to appear after us when it is
271 * enqueued. The fact that we always enqueue bottom-up
272 * reduces this to two cases.
274 if (cfs_rq
->tg
->parent
&&
275 cfs_rq
->tg
->parent
->cfs_rq
[cpu_of(rq_of(cfs_rq
))]->on_list
) {
276 list_add_rcu(&cfs_rq
->leaf_cfs_rq_list
,
277 &rq_of(cfs_rq
)->leaf_cfs_rq_list
);
279 list_add_tail_rcu(&cfs_rq
->leaf_cfs_rq_list
,
280 &rq_of(cfs_rq
)->leaf_cfs_rq_list
);
287 static inline void list_del_leaf_cfs_rq(struct cfs_rq
*cfs_rq
)
289 if (cfs_rq
->on_list
) {
290 list_del_rcu(&cfs_rq
->leaf_cfs_rq_list
);
295 /* Iterate thr' all leaf cfs_rq's on a runqueue */
296 #define for_each_leaf_cfs_rq(rq, cfs_rq) \
297 list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
299 /* Do the two (enqueued) entities belong to the same group ? */
301 is_same_group(struct sched_entity
*se
, struct sched_entity
*pse
)
303 if (se
->cfs_rq
== pse
->cfs_rq
)
309 static inline struct sched_entity
*parent_entity(struct sched_entity
*se
)
314 /* return depth at which a sched entity is present in the hierarchy */
315 static inline int depth_se(struct sched_entity
*se
)
319 for_each_sched_entity(se
)
326 find_matching_se(struct sched_entity
**se
, struct sched_entity
**pse
)
328 int se_depth
, pse_depth
;
331 * preemption test can be made between sibling entities who are in the
332 * same cfs_rq i.e who have a common parent. Walk up the hierarchy of
333 * both tasks until we find their ancestors who are siblings of common
337 /* First walk up until both entities are at same depth */
338 se_depth
= depth_se(*se
);
339 pse_depth
= depth_se(*pse
);
341 while (se_depth
> pse_depth
) {
343 *se
= parent_entity(*se
);
346 while (pse_depth
> se_depth
) {
348 *pse
= parent_entity(*pse
);
351 while (!is_same_group(*se
, *pse
)) {
352 *se
= parent_entity(*se
);
353 *pse
= parent_entity(*pse
);
357 #else /* !CONFIG_FAIR_GROUP_SCHED */
359 static inline struct task_struct
*task_of(struct sched_entity
*se
)
361 return container_of(se
, struct task_struct
, se
);
364 static inline struct rq
*rq_of(struct cfs_rq
*cfs_rq
)
366 return container_of(cfs_rq
, struct rq
, cfs
);
369 #define entity_is_task(se) 1
371 #define for_each_sched_entity(se) \
372 for (; se; se = NULL)
374 static inline struct cfs_rq
*task_cfs_rq(struct task_struct
*p
)
376 return &task_rq(p
)->cfs
;
379 static inline struct cfs_rq
*cfs_rq_of(struct sched_entity
*se
)
381 struct task_struct
*p
= task_of(se
);
382 struct rq
*rq
= task_rq(p
);
387 /* runqueue "owned" by this group */
388 static inline struct cfs_rq
*group_cfs_rq(struct sched_entity
*grp
)
393 static inline void list_add_leaf_cfs_rq(struct cfs_rq
*cfs_rq
)
397 static inline void list_del_leaf_cfs_rq(struct cfs_rq
*cfs_rq
)
401 #define for_each_leaf_cfs_rq(rq, cfs_rq) \
402 for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
405 is_same_group(struct sched_entity
*se
, struct sched_entity
*pse
)
410 static inline struct sched_entity
*parent_entity(struct sched_entity
*se
)
416 find_matching_se(struct sched_entity
**se
, struct sched_entity
**pse
)
420 #endif /* CONFIG_FAIR_GROUP_SCHED */
422 static __always_inline
423 void account_cfs_rq_runtime(struct cfs_rq
*cfs_rq
, unsigned long delta_exec
);
425 /**************************************************************
426 * Scheduling class tree data structure manipulation methods:
429 static inline u64
max_vruntime(u64 min_vruntime
, u64 vruntime
)
431 s64 delta
= (s64
)(vruntime
- min_vruntime
);
433 min_vruntime
= vruntime
;
438 static inline u64
min_vruntime(u64 min_vruntime
, u64 vruntime
)
440 s64 delta
= (s64
)(vruntime
- min_vruntime
);
442 min_vruntime
= vruntime
;
447 static inline int entity_before(struct sched_entity
*a
,
448 struct sched_entity
*b
)
450 return (s64
)(a
->vruntime
- b
->vruntime
) < 0;
453 static void update_min_vruntime(struct cfs_rq
*cfs_rq
)
455 u64 vruntime
= cfs_rq
->min_vruntime
;
458 vruntime
= cfs_rq
->curr
->vruntime
;
460 if (cfs_rq
->rb_leftmost
) {
461 struct sched_entity
*se
= rb_entry(cfs_rq
->rb_leftmost
,
466 vruntime
= se
->vruntime
;
468 vruntime
= min_vruntime(vruntime
, se
->vruntime
);
471 cfs_rq
->min_vruntime
= max_vruntime(cfs_rq
->min_vruntime
, vruntime
);
474 cfs_rq
->min_vruntime_copy
= cfs_rq
->min_vruntime
;
479 * Enqueue an entity into the rb-tree:
481 static void __enqueue_entity(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
)
483 struct rb_node
**link
= &cfs_rq
->tasks_timeline
.rb_node
;
484 struct rb_node
*parent
= NULL
;
485 struct sched_entity
*entry
;
489 * Find the right place in the rbtree:
493 entry
= rb_entry(parent
, struct sched_entity
, run_node
);
495 * We dont care about collisions. Nodes with
496 * the same key stay together.
498 if (entity_before(se
, entry
)) {
499 link
= &parent
->rb_left
;
501 link
= &parent
->rb_right
;
507 * Maintain a cache of leftmost tree entries (it is frequently
511 cfs_rq
->rb_leftmost
= &se
->run_node
;
513 rb_link_node(&se
->run_node
, parent
, link
);
514 rb_insert_color(&se
->run_node
, &cfs_rq
->tasks_timeline
);
517 static void __dequeue_entity(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
)
519 if (cfs_rq
->rb_leftmost
== &se
->run_node
) {
520 struct rb_node
*next_node
;
522 next_node
= rb_next(&se
->run_node
);
523 cfs_rq
->rb_leftmost
= next_node
;
526 rb_erase(&se
->run_node
, &cfs_rq
->tasks_timeline
);
529 struct sched_entity
*__pick_first_entity(struct cfs_rq
*cfs_rq
)
531 struct rb_node
*left
= cfs_rq
->rb_leftmost
;
536 return rb_entry(left
, struct sched_entity
, run_node
);
539 static struct sched_entity
*__pick_next_entity(struct sched_entity
*se
)
541 struct rb_node
*next
= rb_next(&se
->run_node
);
546 return rb_entry(next
, struct sched_entity
, run_node
);
549 #ifdef CONFIG_SCHED_DEBUG
550 struct sched_entity
*__pick_last_entity(struct cfs_rq
*cfs_rq
)
552 struct rb_node
*last
= rb_last(&cfs_rq
->tasks_timeline
);
557 return rb_entry(last
, struct sched_entity
, run_node
);
560 /**************************************************************
561 * Scheduling class statistics methods:
564 int sched_proc_update_handler(struct ctl_table
*table
, int write
,
565 void __user
*buffer
, size_t *lenp
,
568 int ret
= proc_dointvec_minmax(table
, write
, buffer
, lenp
, ppos
);
569 int factor
= get_update_sysctl_factor();
574 sched_nr_latency
= DIV_ROUND_UP(sysctl_sched_latency
,
575 sysctl_sched_min_granularity
);
577 #define WRT_SYSCTL(name) \
578 (normalized_sysctl_##name = sysctl_##name / (factor))
579 WRT_SYSCTL(sched_min_granularity
);
580 WRT_SYSCTL(sched_latency
);
581 WRT_SYSCTL(sched_wakeup_granularity
);
591 static inline unsigned long
592 calc_delta_fair(unsigned long delta
, struct sched_entity
*se
)
594 if (unlikely(se
->load
.weight
!= NICE_0_LOAD
))
595 delta
= calc_delta_mine(delta
, NICE_0_LOAD
, &se
->load
);
601 * The idea is to set a period in which each task runs once.
603 * When there are too many tasks (sched_nr_latency) we have to stretch
604 * this period because otherwise the slices get too small.
606 * p = (nr <= nl) ? l : l*nr/nl
608 static u64
__sched_period(unsigned long nr_running
)
610 u64 period
= sysctl_sched_latency
;
611 unsigned long nr_latency
= sched_nr_latency
;
613 if (unlikely(nr_running
> nr_latency
)) {
614 period
= sysctl_sched_min_granularity
;
615 period
*= nr_running
;
622 * We calculate the wall-time slice from the period by taking a part
623 * proportional to the weight.
627 static u64
sched_slice(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
)
629 u64 slice
= __sched_period(cfs_rq
->nr_running
+ !se
->on_rq
);
631 for_each_sched_entity(se
) {
632 struct load_weight
*load
;
633 struct load_weight lw
;
635 cfs_rq
= cfs_rq_of(se
);
636 load
= &cfs_rq
->load
;
638 if (unlikely(!se
->on_rq
)) {
641 update_load_add(&lw
, se
->load
.weight
);
644 slice
= calc_delta_mine(slice
, se
->load
.weight
, load
);
650 * We calculate the vruntime slice of a to be inserted task
654 static u64
sched_vslice(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
)
656 return calc_delta_fair(sched_slice(cfs_rq
, se
), se
);
659 static void update_cfs_load(struct cfs_rq
*cfs_rq
, int global_update
);
660 static void update_cfs_shares(struct cfs_rq
*cfs_rq
);
663 * Update the current task's runtime statistics. Skip current tasks that
664 * are not in our scheduling class.
667 __update_curr(struct cfs_rq
*cfs_rq
, struct sched_entity
*curr
,
668 unsigned long delta_exec
)
670 unsigned long delta_exec_weighted
;
672 schedstat_set(curr
->statistics
.exec_max
,
673 max((u64
)delta_exec
, curr
->statistics
.exec_max
));
675 curr
->sum_exec_runtime
+= delta_exec
;
676 schedstat_add(cfs_rq
, exec_clock
, delta_exec
);
677 delta_exec_weighted
= calc_delta_fair(delta_exec
, curr
);
679 curr
->vruntime
+= delta_exec_weighted
;
680 update_min_vruntime(cfs_rq
);
682 #if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED
683 cfs_rq
->load_unacc_exec_time
+= delta_exec
;
687 static void update_curr(struct cfs_rq
*cfs_rq
)
689 struct sched_entity
*curr
= cfs_rq
->curr
;
690 u64 now
= rq_of(cfs_rq
)->clock_task
;
691 unsigned long delta_exec
;
697 * Get the amount of time the current task was running
698 * since the last time we changed load (this cannot
699 * overflow on 32 bits):
701 delta_exec
= (unsigned long)(now
- curr
->exec_start
);
705 __update_curr(cfs_rq
, curr
, delta_exec
);
706 curr
->exec_start
= now
;
708 if (entity_is_task(curr
)) {
709 struct task_struct
*curtask
= task_of(curr
);
711 trace_sched_stat_runtime(curtask
, delta_exec
, curr
->vruntime
);
712 cpuacct_charge(curtask
, delta_exec
);
713 account_group_exec_runtime(curtask
, delta_exec
);
716 account_cfs_rq_runtime(cfs_rq
, delta_exec
);
720 update_stats_wait_start(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
)
722 schedstat_set(se
->statistics
.wait_start
, rq_of(cfs_rq
)->clock
);
726 * Task is being enqueued - update stats:
728 static void update_stats_enqueue(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
)
731 * Are we enqueueing a waiting task? (for current tasks
732 * a dequeue/enqueue event is a NOP)
734 if (se
!= cfs_rq
->curr
)
735 update_stats_wait_start(cfs_rq
, se
);
739 update_stats_wait_end(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
)
741 schedstat_set(se
->statistics
.wait_max
, max(se
->statistics
.wait_max
,
742 rq_of(cfs_rq
)->clock
- se
->statistics
.wait_start
));
743 schedstat_set(se
->statistics
.wait_count
, se
->statistics
.wait_count
+ 1);
744 schedstat_set(se
->statistics
.wait_sum
, se
->statistics
.wait_sum
+
745 rq_of(cfs_rq
)->clock
- se
->statistics
.wait_start
);
746 #ifdef CONFIG_SCHEDSTATS
747 if (entity_is_task(se
)) {
748 trace_sched_stat_wait(task_of(se
),
749 rq_of(cfs_rq
)->clock
- se
->statistics
.wait_start
);
752 schedstat_set(se
->statistics
.wait_start
, 0);
756 update_stats_dequeue(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
)
759 * Mark the end of the wait period if dequeueing a
762 if (se
!= cfs_rq
->curr
)
763 update_stats_wait_end(cfs_rq
, se
);
767 * We are picking a new current task - update its stats:
770 update_stats_curr_start(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
)
773 * We are starting a new run period:
775 se
->exec_start
= rq_of(cfs_rq
)->clock_task
;
778 /**************************************************
779 * Scheduling class queueing methods:
782 #ifdef CONFIG_NUMA_BALANCING
784 * numa task sample period in ms
786 unsigned int sysctl_numa_balancing_scan_period_min
= 100;
787 unsigned int sysctl_numa_balancing_scan_period_max
= 100*50;
788 unsigned int sysctl_numa_balancing_scan_period_reset
= 100*600;
790 /* Portion of address space to scan in MB */
791 unsigned int sysctl_numa_balancing_scan_size
= 256;
793 /* Scan @scan_size MB every @scan_period after an initial @scan_delay in ms */
794 unsigned int sysctl_numa_balancing_scan_delay
= 1000;
796 static void task_numa_placement(struct task_struct
*p
)
798 int seq
= ACCESS_ONCE(p
->mm
->numa_scan_seq
);
800 if (p
->numa_scan_seq
== seq
)
802 p
->numa_scan_seq
= seq
;
804 /* FIXME: Scheduling placement policy hints go here */
808 * Got a PROT_NONE fault for a page on @node.
810 void task_numa_fault(int node
, int pages
, bool migrated
)
812 struct task_struct
*p
= current
;
814 if (!sched_feat_numa(NUMA
))
817 /* FIXME: Allocate task-specific structure for placement policy here */
820 * If pages are properly placed (did not migrate) then scan slower.
821 * This is reset periodically in case of phase changes
824 p
->numa_scan_period
= min(sysctl_numa_balancing_scan_period_max
,
825 p
->numa_scan_period
+ jiffies_to_msecs(10));
827 task_numa_placement(p
);
830 static void reset_ptenuma_scan(struct task_struct
*p
)
832 ACCESS_ONCE(p
->mm
->numa_scan_seq
)++;
833 p
->mm
->numa_scan_offset
= 0;
837 * The expensive part of numa migration is done from task_work context.
838 * Triggered from task_tick_numa().
840 void task_numa_work(struct callback_head
*work
)
842 unsigned long migrate
, next_scan
, now
= jiffies
;
843 struct task_struct
*p
= current
;
844 struct mm_struct
*mm
= p
->mm
;
845 struct vm_area_struct
*vma
;
846 unsigned long start
, end
;
849 WARN_ON_ONCE(p
!= container_of(work
, struct task_struct
, numa_work
));
851 work
->next
= work
; /* protect against double add */
853 * Who cares about NUMA placement when they're dying.
855 * NOTE: make sure not to dereference p->mm before this check,
856 * exit_task_work() happens _after_ exit_mm() so we could be called
857 * without p->mm even though we still had it when we enqueued this
860 if (p
->flags
& PF_EXITING
)
864 * We do not care about task placement until a task runs on a node
865 * other than the first one used by the address space. This is
866 * largely because migrations are driven by what CPU the task
867 * is running on. If it's never scheduled on another node, it'll
868 * not migrate so why bother trapping the fault.
870 if (mm
->first_nid
== NUMA_PTE_SCAN_INIT
)
871 mm
->first_nid
= numa_node_id();
872 if (mm
->first_nid
!= NUMA_PTE_SCAN_ACTIVE
) {
873 /* Are we running on a new node yet? */
874 if (numa_node_id() == mm
->first_nid
&&
875 !sched_feat_numa(NUMA_FORCE
))
878 mm
->first_nid
= NUMA_PTE_SCAN_ACTIVE
;
882 * Reset the scan period if enough time has gone by. Objective is that
883 * scanning will be reduced if pages are properly placed. As tasks
884 * can enter different phases this needs to be re-examined. Lacking
885 * proper tracking of reference behaviour, this blunt hammer is used.
887 migrate
= mm
->numa_next_reset
;
888 if (time_after(now
, migrate
)) {
889 p
->numa_scan_period
= sysctl_numa_balancing_scan_period_min
;
890 next_scan
= now
+ msecs_to_jiffies(sysctl_numa_balancing_scan_period_reset
);
891 xchg(&mm
->numa_next_reset
, next_scan
);
895 * Enforce maximal scan/migration frequency..
897 migrate
= mm
->numa_next_scan
;
898 if (time_before(now
, migrate
))
901 if (p
->numa_scan_period
== 0)
902 p
->numa_scan_period
= sysctl_numa_balancing_scan_period_min
;
904 next_scan
= now
+ msecs_to_jiffies(p
->numa_scan_period
);
905 if (cmpxchg(&mm
->numa_next_scan
, migrate
, next_scan
) != migrate
)
909 * Do not set pte_numa if the current running node is rate-limited.
910 * This loses statistics on the fault but if we are unwilling to
911 * migrate to this node, it is less likely we can do useful work
913 if (migrate_ratelimited(numa_node_id()))
916 start
= mm
->numa_scan_offset
;
917 pages
= sysctl_numa_balancing_scan_size
;
918 pages
<<= 20 - PAGE_SHIFT
; /* MB in pages */
922 down_read(&mm
->mmap_sem
);
923 vma
= find_vma(mm
, start
);
925 reset_ptenuma_scan(p
);
929 for (; vma
; vma
= vma
->vm_next
) {
930 if (!vma_migratable(vma
))
933 /* Skip small VMAs. They are not likely to be of relevance */
934 if (((vma
->vm_end
- vma
->vm_start
) >> PAGE_SHIFT
) < HPAGE_PMD_NR
)
938 start
= max(start
, vma
->vm_start
);
939 end
= ALIGN(start
+ (pages
<< PAGE_SHIFT
), HPAGE_SIZE
);
940 end
= min(end
, vma
->vm_end
);
941 pages
-= change_prot_numa(vma
, start
, end
);
946 } while (end
!= vma
->vm_end
);
951 * It is possible to reach the end of the VMA list but the last few VMAs are
952 * not guaranteed to the vma_migratable. If they are not, we would find the
953 * !migratable VMA on the next scan but not reset the scanner to the start
957 mm
->numa_scan_offset
= start
;
959 reset_ptenuma_scan(p
);
960 up_read(&mm
->mmap_sem
);
964 * Drive the periodic memory faults..
966 void task_tick_numa(struct rq
*rq
, struct task_struct
*curr
)
968 struct callback_head
*work
= &curr
->numa_work
;
972 * We don't care about NUMA placement if we don't have memory.
974 if (!curr
->mm
|| (curr
->flags
& PF_EXITING
) || work
->next
!= work
)
978 * Using runtime rather than walltime has the dual advantage that
979 * we (mostly) drive the selection from busy threads and that the
980 * task needs to have done some actual work before we bother with
983 now
= curr
->se
.sum_exec_runtime
;
984 period
= (u64
)curr
->numa_scan_period
* NSEC_PER_MSEC
;
986 if (now
- curr
->node_stamp
> period
) {
987 if (!curr
->node_stamp
)
988 curr
->numa_scan_period
= sysctl_numa_balancing_scan_period_min
;
989 curr
->node_stamp
= now
;
991 if (!time_before(jiffies
, curr
->mm
->numa_next_scan
)) {
992 init_task_work(work
, task_numa_work
); /* TODO: move this into sched_fork() */
993 task_work_add(curr
, work
, true);
998 static void task_tick_numa(struct rq
*rq
, struct task_struct
*curr
)
1001 #endif /* CONFIG_NUMA_BALANCING */
1004 account_entity_enqueue(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
)
1006 update_load_add(&cfs_rq
->load
, se
->load
.weight
);
1007 if (!parent_entity(se
))
1008 update_load_add(&rq_of(cfs_rq
)->load
, se
->load
.weight
);
1010 if (entity_is_task(se
))
1011 list_add(&se
->group_node
, &rq_of(cfs_rq
)->cfs_tasks
);
1013 cfs_rq
->nr_running
++;
1017 account_entity_dequeue(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
)
1019 update_load_sub(&cfs_rq
->load
, se
->load
.weight
);
1020 if (!parent_entity(se
))
1021 update_load_sub(&rq_of(cfs_rq
)->load
, se
->load
.weight
);
1022 if (entity_is_task(se
))
1023 list_del_init(&se
->group_node
);
1024 cfs_rq
->nr_running
--;
1027 #ifdef CONFIG_FAIR_GROUP_SCHED
1028 /* we need this in update_cfs_load and load-balance functions below */
1029 static inline int throttled_hierarchy(struct cfs_rq
*cfs_rq
);
1031 static void update_cfs_rq_load_contribution(struct cfs_rq
*cfs_rq
,
1034 struct task_group
*tg
= cfs_rq
->tg
;
1037 load_avg
= div64_u64(cfs_rq
->load_avg
, cfs_rq
->load_period
+1);
1038 load_avg
-= cfs_rq
->load_contribution
;
1040 if (global_update
|| abs(load_avg
) > cfs_rq
->load_contribution
/ 8) {
1041 atomic_add(load_avg
, &tg
->load_weight
);
1042 cfs_rq
->load_contribution
+= load_avg
;
1046 static void update_cfs_load(struct cfs_rq
*cfs_rq
, int global_update
)
1048 u64 period
= sysctl_sched_shares_window
;
1050 unsigned long load
= cfs_rq
->load
.weight
;
1052 if (cfs_rq
->tg
== &root_task_group
|| throttled_hierarchy(cfs_rq
))
1055 now
= rq_of(cfs_rq
)->clock_task
;
1056 delta
= now
- cfs_rq
->load_stamp
;
1058 /* truncate load history at 4 idle periods */
1059 if (cfs_rq
->load_stamp
> cfs_rq
->load_last
&&
1060 now
- cfs_rq
->load_last
> 4 * period
) {
1061 cfs_rq
->load_period
= 0;
1062 cfs_rq
->load_avg
= 0;
1066 cfs_rq
->load_stamp
= now
;
1067 cfs_rq
->load_unacc_exec_time
= 0;
1068 cfs_rq
->load_period
+= delta
;
1070 cfs_rq
->load_last
= now
;
1071 cfs_rq
->load_avg
+= delta
* load
;
1074 /* consider updating load contribution on each fold or truncate */
1075 if (global_update
|| cfs_rq
->load_period
> period
1076 || !cfs_rq
->load_period
)
1077 update_cfs_rq_load_contribution(cfs_rq
, global_update
);
1079 while (cfs_rq
->load_period
> period
) {
1081 * Inline assembly required to prevent the compiler
1082 * optimising this loop into a divmod call.
1083 * See __iter_div_u64_rem() for another example of this.
1085 asm("" : "+rm" (cfs_rq
->load_period
));
1086 cfs_rq
->load_period
/= 2;
1087 cfs_rq
->load_avg
/= 2;
1090 if (!cfs_rq
->curr
&& !cfs_rq
->nr_running
&& !cfs_rq
->load_avg
)
1091 list_del_leaf_cfs_rq(cfs_rq
);
1094 static inline long calc_tg_weight(struct task_group
*tg
, struct cfs_rq
*cfs_rq
)
1099 * Use this CPU's actual weight instead of the last load_contribution
1100 * to gain a more accurate current total weight. See
1101 * update_cfs_rq_load_contribution().
1103 tg_weight
= atomic_read(&tg
->load_weight
);
1104 tg_weight
-= cfs_rq
->load_contribution
;
1105 tg_weight
+= cfs_rq
->load
.weight
;
1110 static long calc_cfs_shares(struct cfs_rq
*cfs_rq
, struct task_group
*tg
)
1112 long tg_weight
, load
, shares
;
1114 tg_weight
= calc_tg_weight(tg
, cfs_rq
);
1115 load
= cfs_rq
->load
.weight
;
1117 shares
= (tg
->shares
* load
);
1119 shares
/= tg_weight
;
1121 if (shares
< MIN_SHARES
)
1122 shares
= MIN_SHARES
;
1123 if (shares
> tg
->shares
)
1124 shares
= tg
->shares
;
1129 static void update_entity_shares_tick(struct cfs_rq
*cfs_rq
)
1131 if (cfs_rq
->load_unacc_exec_time
> sysctl_sched_shares_window
) {
1132 update_cfs_load(cfs_rq
, 0);
1133 update_cfs_shares(cfs_rq
);
1136 # else /* CONFIG_SMP */
1137 static void update_cfs_load(struct cfs_rq
*cfs_rq
, int global_update
)
1141 static inline long calc_cfs_shares(struct cfs_rq
*cfs_rq
, struct task_group
*tg
)
1146 static inline void update_entity_shares_tick(struct cfs_rq
*cfs_rq
)
1149 # endif /* CONFIG_SMP */
1150 static void reweight_entity(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
,
1151 unsigned long weight
)
1154 /* commit outstanding execution time */
1155 if (cfs_rq
->curr
== se
)
1156 update_curr(cfs_rq
);
1157 account_entity_dequeue(cfs_rq
, se
);
1160 update_load_set(&se
->load
, weight
);
1163 account_entity_enqueue(cfs_rq
, se
);
1166 static void update_cfs_shares(struct cfs_rq
*cfs_rq
)
1168 struct task_group
*tg
;
1169 struct sched_entity
*se
;
1173 se
= tg
->se
[cpu_of(rq_of(cfs_rq
))];
1174 if (!se
|| throttled_hierarchy(cfs_rq
))
1177 if (likely(se
->load
.weight
== tg
->shares
))
1180 shares
= calc_cfs_shares(cfs_rq
, tg
);
1182 reweight_entity(cfs_rq_of(se
), se
, shares
);
1184 #else /* CONFIG_FAIR_GROUP_SCHED */
1185 static void update_cfs_load(struct cfs_rq
*cfs_rq
, int global_update
)
1189 static inline void update_cfs_shares(struct cfs_rq
*cfs_rq
)
1193 static inline void update_entity_shares_tick(struct cfs_rq
*cfs_rq
)
1196 #endif /* CONFIG_FAIR_GROUP_SCHED */
1198 static void enqueue_sleeper(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
)
1200 #ifdef CONFIG_SCHEDSTATS
1201 struct task_struct
*tsk
= NULL
;
1203 if (entity_is_task(se
))
1206 if (se
->statistics
.sleep_start
) {
1207 u64 delta
= rq_of(cfs_rq
)->clock
- se
->statistics
.sleep_start
;
1212 if (unlikely(delta
> se
->statistics
.sleep_max
))
1213 se
->statistics
.sleep_max
= delta
;
1215 se
->statistics
.sleep_start
= 0;
1216 se
->statistics
.sum_sleep_runtime
+= delta
;
1219 account_scheduler_latency(tsk
, delta
>> 10, 1);
1220 trace_sched_stat_sleep(tsk
, delta
);
1223 if (se
->statistics
.block_start
) {
1224 u64 delta
= rq_of(cfs_rq
)->clock
- se
->statistics
.block_start
;
1229 if (unlikely(delta
> se
->statistics
.block_max
))
1230 se
->statistics
.block_max
= delta
;
1232 se
->statistics
.block_start
= 0;
1233 se
->statistics
.sum_sleep_runtime
+= delta
;
1236 if (tsk
->in_iowait
) {
1237 se
->statistics
.iowait_sum
+= delta
;
1238 se
->statistics
.iowait_count
++;
1239 trace_sched_stat_iowait(tsk
, delta
);
1242 trace_sched_stat_blocked(tsk
, delta
);
1245 * Blocking time is in units of nanosecs, so shift by
1246 * 20 to get a milliseconds-range estimation of the
1247 * amount of time that the task spent sleeping:
1249 if (unlikely(prof_on
== SLEEP_PROFILING
)) {
1250 profile_hits(SLEEP_PROFILING
,
1251 (void *)get_wchan(tsk
),
1254 account_scheduler_latency(tsk
, delta
>> 10, 0);
1260 static void check_spread(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
)
1262 #ifdef CONFIG_SCHED_DEBUG
1263 s64 d
= se
->vruntime
- cfs_rq
->min_vruntime
;
1268 if (d
> 3*sysctl_sched_latency
)
1269 schedstat_inc(cfs_rq
, nr_spread_over
);
1274 place_entity(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
, int initial
)
1276 u64 vruntime
= cfs_rq
->min_vruntime
;
1279 * The 'current' period is already promised to the current tasks,
1280 * however the extra weight of the new task will slow them down a
1281 * little, place the new task so that it fits in the slot that
1282 * stays open at the end.
1284 if (initial
&& sched_feat(START_DEBIT
))
1285 vruntime
+= sched_vslice(cfs_rq
, se
);
1287 /* sleeps up to a single latency don't count. */
1289 unsigned long thresh
= sysctl_sched_latency
;
1292 * Halve their sleep time's effect, to allow
1293 * for a gentler effect of sleepers:
1295 if (sched_feat(GENTLE_FAIR_SLEEPERS
))
1301 /* ensure we never gain time by being placed backwards. */
1302 vruntime
= max_vruntime(se
->vruntime
, vruntime
);
1304 se
->vruntime
= vruntime
;
1307 static void check_enqueue_throttle(struct cfs_rq
*cfs_rq
);
1310 enqueue_entity(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
, int flags
)
1313 * Update the normalized vruntime before updating min_vruntime
1314 * through callig update_curr().
1316 if (!(flags
& ENQUEUE_WAKEUP
) || (flags
& ENQUEUE_WAKING
))
1317 se
->vruntime
+= cfs_rq
->min_vruntime
;
1320 * Update run-time statistics of the 'current'.
1322 update_curr(cfs_rq
);
1323 update_cfs_load(cfs_rq
, 0);
1324 account_entity_enqueue(cfs_rq
, se
);
1325 update_cfs_shares(cfs_rq
);
1327 if (flags
& ENQUEUE_WAKEUP
) {
1328 place_entity(cfs_rq
, se
, 0);
1329 enqueue_sleeper(cfs_rq
, se
);
1332 update_stats_enqueue(cfs_rq
, se
);
1333 check_spread(cfs_rq
, se
);
1334 if (se
!= cfs_rq
->curr
)
1335 __enqueue_entity(cfs_rq
, se
);
1338 if (cfs_rq
->nr_running
== 1) {
1339 list_add_leaf_cfs_rq(cfs_rq
);
1340 check_enqueue_throttle(cfs_rq
);
1344 static void __clear_buddies_last(struct sched_entity
*se
)
1346 for_each_sched_entity(se
) {
1347 struct cfs_rq
*cfs_rq
= cfs_rq_of(se
);
1348 if (cfs_rq
->last
== se
)
1349 cfs_rq
->last
= NULL
;
1355 static void __clear_buddies_next(struct sched_entity
*se
)
1357 for_each_sched_entity(se
) {
1358 struct cfs_rq
*cfs_rq
= cfs_rq_of(se
);
1359 if (cfs_rq
->next
== se
)
1360 cfs_rq
->next
= NULL
;
1366 static void __clear_buddies_skip(struct sched_entity
*se
)
1368 for_each_sched_entity(se
) {
1369 struct cfs_rq
*cfs_rq
= cfs_rq_of(se
);
1370 if (cfs_rq
->skip
== se
)
1371 cfs_rq
->skip
= NULL
;
1377 static void clear_buddies(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
)
1379 if (cfs_rq
->last
== se
)
1380 __clear_buddies_last(se
);
1382 if (cfs_rq
->next
== se
)
1383 __clear_buddies_next(se
);
1385 if (cfs_rq
->skip
== se
)
1386 __clear_buddies_skip(se
);
1389 static __always_inline
void return_cfs_rq_runtime(struct cfs_rq
*cfs_rq
);
1392 dequeue_entity(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
, int flags
)
1395 * Update run-time statistics of the 'current'.
1397 update_curr(cfs_rq
);
1399 update_stats_dequeue(cfs_rq
, se
);
1400 if (flags
& DEQUEUE_SLEEP
) {
1401 #ifdef CONFIG_SCHEDSTATS
1402 if (entity_is_task(se
)) {
1403 struct task_struct
*tsk
= task_of(se
);
1405 if (tsk
->state
& TASK_INTERRUPTIBLE
)
1406 se
->statistics
.sleep_start
= rq_of(cfs_rq
)->clock
;
1407 if (tsk
->state
& TASK_UNINTERRUPTIBLE
)
1408 se
->statistics
.block_start
= rq_of(cfs_rq
)->clock
;
1413 clear_buddies(cfs_rq
, se
);
1415 if (se
!= cfs_rq
->curr
)
1416 __dequeue_entity(cfs_rq
, se
);
1418 update_cfs_load(cfs_rq
, 0);
1419 account_entity_dequeue(cfs_rq
, se
);
1422 * Normalize the entity after updating the min_vruntime because the
1423 * update can refer to the ->curr item and we need to reflect this
1424 * movement in our normalized position.
1426 if (!(flags
& DEQUEUE_SLEEP
))
1427 se
->vruntime
-= cfs_rq
->min_vruntime
;
1429 /* return excess runtime on last dequeue */
1430 return_cfs_rq_runtime(cfs_rq
);
1432 update_min_vruntime(cfs_rq
);
1433 update_cfs_shares(cfs_rq
);
1437 * Preempt the current task with a newly woken task if needed:
1440 check_preempt_tick(struct cfs_rq
*cfs_rq
, struct sched_entity
*curr
)
1442 unsigned long ideal_runtime
, delta_exec
;
1443 struct sched_entity
*se
;
1446 ideal_runtime
= sched_slice(cfs_rq
, curr
);
1447 delta_exec
= curr
->sum_exec_runtime
- curr
->prev_sum_exec_runtime
;
1448 if (delta_exec
> ideal_runtime
) {
1449 resched_task(rq_of(cfs_rq
)->curr
);
1451 * The current task ran long enough, ensure it doesn't get
1452 * re-elected due to buddy favours.
1454 clear_buddies(cfs_rq
, curr
);
1459 * Ensure that a task that missed wakeup preemption by a
1460 * narrow margin doesn't have to wait for a full slice.
1461 * This also mitigates buddy induced latencies under load.
1463 if (delta_exec
< sysctl_sched_min_granularity
)
1466 se
= __pick_first_entity(cfs_rq
);
1467 delta
= curr
->vruntime
- se
->vruntime
;
1472 if (delta
> ideal_runtime
)
1473 resched_task(rq_of(cfs_rq
)->curr
);
1477 set_next_entity(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
)
1479 /* 'current' is not kept within the tree. */
1482 * Any task has to be enqueued before it get to execute on
1483 * a CPU. So account for the time it spent waiting on the
1486 update_stats_wait_end(cfs_rq
, se
);
1487 __dequeue_entity(cfs_rq
, se
);
1490 update_stats_curr_start(cfs_rq
, se
);
1492 #ifdef CONFIG_SCHEDSTATS
1494 * Track our maximum slice length, if the CPU's load is at
1495 * least twice that of our own weight (i.e. dont track it
1496 * when there are only lesser-weight tasks around):
1498 if (rq_of(cfs_rq
)->load
.weight
>= 2*se
->load
.weight
) {
1499 se
->statistics
.slice_max
= max(se
->statistics
.slice_max
,
1500 se
->sum_exec_runtime
- se
->prev_sum_exec_runtime
);
1503 se
->prev_sum_exec_runtime
= se
->sum_exec_runtime
;
1507 wakeup_preempt_entity(struct sched_entity
*curr
, struct sched_entity
*se
);
1510 * Pick the next process, keeping these things in mind, in this order:
1511 * 1) keep things fair between processes/task groups
1512 * 2) pick the "next" process, since someone really wants that to run
1513 * 3) pick the "last" process, for cache locality
1514 * 4) do not run the "skip" process, if something else is available
1516 static struct sched_entity
*pick_next_entity(struct cfs_rq
*cfs_rq
)
1518 struct sched_entity
*se
= __pick_first_entity(cfs_rq
);
1519 struct sched_entity
*left
= se
;
1522 * Avoid running the skip buddy, if running something else can
1523 * be done without getting too unfair.
1525 if (cfs_rq
->skip
== se
) {
1526 struct sched_entity
*second
= __pick_next_entity(se
);
1527 if (second
&& wakeup_preempt_entity(second
, left
) < 1)
1532 * Prefer last buddy, try to return the CPU to a preempted task.
1534 if (cfs_rq
->last
&& wakeup_preempt_entity(cfs_rq
->last
, left
) < 1)
1538 * Someone really wants this to run. If it's not unfair, run it.
1540 if (cfs_rq
->next
&& wakeup_preempt_entity(cfs_rq
->next
, left
) < 1)
1543 clear_buddies(cfs_rq
, se
);
1548 static void check_cfs_rq_runtime(struct cfs_rq
*cfs_rq
);
1550 static void put_prev_entity(struct cfs_rq
*cfs_rq
, struct sched_entity
*prev
)
1553 * If still on the runqueue then deactivate_task()
1554 * was not called and update_curr() has to be done:
1557 update_curr(cfs_rq
);
1559 /* throttle cfs_rqs exceeding runtime */
1560 check_cfs_rq_runtime(cfs_rq
);
1562 check_spread(cfs_rq
, prev
);
1564 update_stats_wait_start(cfs_rq
, prev
);
1565 /* Put 'current' back into the tree. */
1566 __enqueue_entity(cfs_rq
, prev
);
1568 cfs_rq
->curr
= NULL
;
1572 entity_tick(struct cfs_rq
*cfs_rq
, struct sched_entity
*curr
, int queued
)
1575 * Update run-time statistics of the 'current'.
1577 update_curr(cfs_rq
);
1580 * Update share accounting for long-running entities.
1582 update_entity_shares_tick(cfs_rq
);
1584 #ifdef CONFIG_SCHED_HRTICK
1586 * queued ticks are scheduled to match the slice, so don't bother
1587 * validating it and just reschedule.
1590 resched_task(rq_of(cfs_rq
)->curr
);
1594 * don't let the period tick interfere with the hrtick preemption
1596 if (!sched_feat(DOUBLE_TICK
) &&
1597 hrtimer_active(&rq_of(cfs_rq
)->hrtick_timer
))
1601 if (cfs_rq
->nr_running
> 1)
1602 check_preempt_tick(cfs_rq
, curr
);
1606 /**************************************************
1607 * CFS bandwidth control machinery
1610 #ifdef CONFIG_CFS_BANDWIDTH
1612 #ifdef HAVE_JUMP_LABEL
1613 static struct static_key __cfs_bandwidth_used
;
1615 static inline bool cfs_bandwidth_used(void)
1617 return static_key_false(&__cfs_bandwidth_used
);
1620 void account_cfs_bandwidth_used(int enabled
, int was_enabled
)
1622 /* only need to count groups transitioning between enabled/!enabled */
1623 if (enabled
&& !was_enabled
)
1624 static_key_slow_inc(&__cfs_bandwidth_used
);
1625 else if (!enabled
&& was_enabled
)
1626 static_key_slow_dec(&__cfs_bandwidth_used
);
1628 #else /* HAVE_JUMP_LABEL */
1629 static bool cfs_bandwidth_used(void)
1634 void account_cfs_bandwidth_used(int enabled
, int was_enabled
) {}
1635 #endif /* HAVE_JUMP_LABEL */
1638 * default period for cfs group bandwidth.
1639 * default: 0.1s, units: nanoseconds
1641 static inline u64
default_cfs_period(void)
1643 return 100000000ULL;
1646 static inline u64
sched_cfs_bandwidth_slice(void)
1648 return (u64
)sysctl_sched_cfs_bandwidth_slice
* NSEC_PER_USEC
;
1652 * Replenish runtime according to assigned quota and update expiration time.
1653 * We use sched_clock_cpu directly instead of rq->clock to avoid adding
1654 * additional synchronization around rq->lock.
1656 * requires cfs_b->lock
1658 void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth
*cfs_b
)
1662 if (cfs_b
->quota
== RUNTIME_INF
)
1665 now
= sched_clock_cpu(smp_processor_id());
1666 cfs_b
->runtime
= cfs_b
->quota
;
1667 cfs_b
->runtime_expires
= now
+ ktime_to_ns(cfs_b
->period
);
1670 static inline struct cfs_bandwidth
*tg_cfs_bandwidth(struct task_group
*tg
)
1672 return &tg
->cfs_bandwidth
;
1675 /* returns 0 on failure to allocate runtime */
1676 static int assign_cfs_rq_runtime(struct cfs_rq
*cfs_rq
)
1678 struct task_group
*tg
= cfs_rq
->tg
;
1679 struct cfs_bandwidth
*cfs_b
= tg_cfs_bandwidth(tg
);
1680 u64 amount
= 0, min_amount
, expires
;
1682 /* note: this is a positive sum as runtime_remaining <= 0 */
1683 min_amount
= sched_cfs_bandwidth_slice() - cfs_rq
->runtime_remaining
;
1685 raw_spin_lock(&cfs_b
->lock
);
1686 if (cfs_b
->quota
== RUNTIME_INF
)
1687 amount
= min_amount
;
1690 * If the bandwidth pool has become inactive, then at least one
1691 * period must have elapsed since the last consumption.
1692 * Refresh the global state and ensure bandwidth timer becomes
1695 if (!cfs_b
->timer_active
) {
1696 __refill_cfs_bandwidth_runtime(cfs_b
);
1697 __start_cfs_bandwidth(cfs_b
);
1700 if (cfs_b
->runtime
> 0) {
1701 amount
= min(cfs_b
->runtime
, min_amount
);
1702 cfs_b
->runtime
-= amount
;
1706 expires
= cfs_b
->runtime_expires
;
1707 raw_spin_unlock(&cfs_b
->lock
);
1709 cfs_rq
->runtime_remaining
+= amount
;
1711 * we may have advanced our local expiration to account for allowed
1712 * spread between our sched_clock and the one on which runtime was
1715 if ((s64
)(expires
- cfs_rq
->runtime_expires
) > 0)
1716 cfs_rq
->runtime_expires
= expires
;
1718 return cfs_rq
->runtime_remaining
> 0;
1722 * Note: This depends on the synchronization provided by sched_clock and the
1723 * fact that rq->clock snapshots this value.
1725 static void expire_cfs_rq_runtime(struct cfs_rq
*cfs_rq
)
1727 struct cfs_bandwidth
*cfs_b
= tg_cfs_bandwidth(cfs_rq
->tg
);
1728 struct rq
*rq
= rq_of(cfs_rq
);
1730 /* if the deadline is ahead of our clock, nothing to do */
1731 if (likely((s64
)(rq
->clock
- cfs_rq
->runtime_expires
) < 0))
1734 if (cfs_rq
->runtime_remaining
< 0)
1738 * If the local deadline has passed we have to consider the
1739 * possibility that our sched_clock is 'fast' and the global deadline
1740 * has not truly expired.
1742 * Fortunately we can check determine whether this the case by checking
1743 * whether the global deadline has advanced.
1746 if ((s64
)(cfs_rq
->runtime_expires
- cfs_b
->runtime_expires
) >= 0) {
1747 /* extend local deadline, drift is bounded above by 2 ticks */
1748 cfs_rq
->runtime_expires
+= TICK_NSEC
;
1750 /* global deadline is ahead, expiration has passed */
1751 cfs_rq
->runtime_remaining
= 0;
1755 static void __account_cfs_rq_runtime(struct cfs_rq
*cfs_rq
,
1756 unsigned long delta_exec
)
1758 /* dock delta_exec before expiring quota (as it could span periods) */
1759 cfs_rq
->runtime_remaining
-= delta_exec
;
1760 expire_cfs_rq_runtime(cfs_rq
);
1762 if (likely(cfs_rq
->runtime_remaining
> 0))
1766 * if we're unable to extend our runtime we resched so that the active
1767 * hierarchy can be throttled
1769 if (!assign_cfs_rq_runtime(cfs_rq
) && likely(cfs_rq
->curr
))
1770 resched_task(rq_of(cfs_rq
)->curr
);
1773 static __always_inline
1774 void account_cfs_rq_runtime(struct cfs_rq
*cfs_rq
, unsigned long delta_exec
)
1776 if (!cfs_bandwidth_used() || !cfs_rq
->runtime_enabled
)
1779 __account_cfs_rq_runtime(cfs_rq
, delta_exec
);
1782 static inline int cfs_rq_throttled(struct cfs_rq
*cfs_rq
)
1784 return cfs_bandwidth_used() && cfs_rq
->throttled
;
1787 /* check whether cfs_rq, or any parent, is throttled */
1788 static inline int throttled_hierarchy(struct cfs_rq
*cfs_rq
)
1790 return cfs_bandwidth_used() && cfs_rq
->throttle_count
;
1794 * Ensure that neither of the group entities corresponding to src_cpu or
1795 * dest_cpu are members of a throttled hierarchy when performing group
1796 * load-balance operations.
1798 static inline int throttled_lb_pair(struct task_group
*tg
,
1799 int src_cpu
, int dest_cpu
)
1801 struct cfs_rq
*src_cfs_rq
, *dest_cfs_rq
;
1803 src_cfs_rq
= tg
->cfs_rq
[src_cpu
];
1804 dest_cfs_rq
= tg
->cfs_rq
[dest_cpu
];
1806 return throttled_hierarchy(src_cfs_rq
) ||
1807 throttled_hierarchy(dest_cfs_rq
);
1810 /* updated child weight may affect parent so we have to do this bottom up */
1811 static int tg_unthrottle_up(struct task_group
*tg
, void *data
)
1813 struct rq
*rq
= data
;
1814 struct cfs_rq
*cfs_rq
= tg
->cfs_rq
[cpu_of(rq
)];
1816 cfs_rq
->throttle_count
--;
1818 if (!cfs_rq
->throttle_count
) {
1819 u64 delta
= rq
->clock_task
- cfs_rq
->load_stamp
;
1821 /* leaving throttled state, advance shares averaging windows */
1822 cfs_rq
->load_stamp
+= delta
;
1823 cfs_rq
->load_last
+= delta
;
1825 /* update entity weight now that we are on_rq again */
1826 update_cfs_shares(cfs_rq
);
1833 static int tg_throttle_down(struct task_group
*tg
, void *data
)
1835 struct rq
*rq
= data
;
1836 struct cfs_rq
*cfs_rq
= tg
->cfs_rq
[cpu_of(rq
)];
1838 /* group is entering throttled state, record last load */
1839 if (!cfs_rq
->throttle_count
)
1840 update_cfs_load(cfs_rq
, 0);
1841 cfs_rq
->throttle_count
++;
1846 static void throttle_cfs_rq(struct cfs_rq
*cfs_rq
)
1848 struct rq
*rq
= rq_of(cfs_rq
);
1849 struct cfs_bandwidth
*cfs_b
= tg_cfs_bandwidth(cfs_rq
->tg
);
1850 struct sched_entity
*se
;
1851 long task_delta
, dequeue
= 1;
1853 se
= cfs_rq
->tg
->se
[cpu_of(rq_of(cfs_rq
))];
1855 /* account load preceding throttle */
1857 walk_tg_tree_from(cfs_rq
->tg
, tg_throttle_down
, tg_nop
, (void *)rq
);
1860 task_delta
= cfs_rq
->h_nr_running
;
1861 for_each_sched_entity(se
) {
1862 struct cfs_rq
*qcfs_rq
= cfs_rq_of(se
);
1863 /* throttled entity or throttle-on-deactivate */
1868 dequeue_entity(qcfs_rq
, se
, DEQUEUE_SLEEP
);
1869 qcfs_rq
->h_nr_running
-= task_delta
;
1871 if (qcfs_rq
->load
.weight
)
1876 rq
->nr_running
-= task_delta
;
1878 cfs_rq
->throttled
= 1;
1879 cfs_rq
->throttled_timestamp
= rq
->clock
;
1880 raw_spin_lock(&cfs_b
->lock
);
1881 list_add_tail_rcu(&cfs_rq
->throttled_list
, &cfs_b
->throttled_cfs_rq
);
1882 raw_spin_unlock(&cfs_b
->lock
);
1885 void unthrottle_cfs_rq(struct cfs_rq
*cfs_rq
)
1887 struct rq
*rq
= rq_of(cfs_rq
);
1888 struct cfs_bandwidth
*cfs_b
= tg_cfs_bandwidth(cfs_rq
->tg
);
1889 struct sched_entity
*se
;
1893 se
= cfs_rq
->tg
->se
[cpu_of(rq_of(cfs_rq
))];
1895 cfs_rq
->throttled
= 0;
1896 raw_spin_lock(&cfs_b
->lock
);
1897 cfs_b
->throttled_time
+= rq
->clock
- cfs_rq
->throttled_timestamp
;
1898 list_del_rcu(&cfs_rq
->throttled_list
);
1899 raw_spin_unlock(&cfs_b
->lock
);
1900 cfs_rq
->throttled_timestamp
= 0;
1902 update_rq_clock(rq
);
1903 /* update hierarchical throttle state */
1904 walk_tg_tree_from(cfs_rq
->tg
, tg_nop
, tg_unthrottle_up
, (void *)rq
);
1906 if (!cfs_rq
->load
.weight
)
1909 task_delta
= cfs_rq
->h_nr_running
;
1910 for_each_sched_entity(se
) {
1914 cfs_rq
= cfs_rq_of(se
);
1916 enqueue_entity(cfs_rq
, se
, ENQUEUE_WAKEUP
);
1917 cfs_rq
->h_nr_running
+= task_delta
;
1919 if (cfs_rq_throttled(cfs_rq
))
1924 rq
->nr_running
+= task_delta
;
1926 /* determine whether we need to wake up potentially idle cpu */
1927 if (rq
->curr
== rq
->idle
&& rq
->cfs
.nr_running
)
1928 resched_task(rq
->curr
);
1931 static u64
distribute_cfs_runtime(struct cfs_bandwidth
*cfs_b
,
1932 u64 remaining
, u64 expires
)
1934 struct cfs_rq
*cfs_rq
;
1935 u64 runtime
= remaining
;
1938 list_for_each_entry_rcu(cfs_rq
, &cfs_b
->throttled_cfs_rq
,
1940 struct rq
*rq
= rq_of(cfs_rq
);
1942 raw_spin_lock(&rq
->lock
);
1943 if (!cfs_rq_throttled(cfs_rq
))
1946 runtime
= -cfs_rq
->runtime_remaining
+ 1;
1947 if (runtime
> remaining
)
1948 runtime
= remaining
;
1949 remaining
-= runtime
;
1951 cfs_rq
->runtime_remaining
+= runtime
;
1952 cfs_rq
->runtime_expires
= expires
;
1954 /* we check whether we're throttled above */
1955 if (cfs_rq
->runtime_remaining
> 0)
1956 unthrottle_cfs_rq(cfs_rq
);
1959 raw_spin_unlock(&rq
->lock
);
1970 * Responsible for refilling a task_group's bandwidth and unthrottling its
1971 * cfs_rqs as appropriate. If there has been no activity within the last
1972 * period the timer is deactivated until scheduling resumes; cfs_b->idle is
1973 * used to track this state.
1975 static int do_sched_cfs_period_timer(struct cfs_bandwidth
*cfs_b
, int overrun
)
1977 u64 runtime
, runtime_expires
;
1978 int idle
= 1, throttled
;
1980 raw_spin_lock(&cfs_b
->lock
);
1981 /* no need to continue the timer with no bandwidth constraint */
1982 if (cfs_b
->quota
== RUNTIME_INF
)
1985 throttled
= !list_empty(&cfs_b
->throttled_cfs_rq
);
1986 /* idle depends on !throttled (for the case of a large deficit) */
1987 idle
= cfs_b
->idle
&& !throttled
;
1988 cfs_b
->nr_periods
+= overrun
;
1990 /* if we're going inactive then everything else can be deferred */
1994 __refill_cfs_bandwidth_runtime(cfs_b
);
1997 /* mark as potentially idle for the upcoming period */
2002 /* account preceding periods in which throttling occurred */
2003 cfs_b
->nr_throttled
+= overrun
;
2006 * There are throttled entities so we must first use the new bandwidth
2007 * to unthrottle them before making it generally available. This
2008 * ensures that all existing debts will be paid before a new cfs_rq is
2011 runtime
= cfs_b
->runtime
;
2012 runtime_expires
= cfs_b
->runtime_expires
;
2016 * This check is repeated as we are holding onto the new bandwidth
2017 * while we unthrottle. This can potentially race with an unthrottled
2018 * group trying to acquire new bandwidth from the global pool.
2020 while (throttled
&& runtime
> 0) {
2021 raw_spin_unlock(&cfs_b
->lock
);
2022 /* we can't nest cfs_b->lock while distributing bandwidth */
2023 runtime
= distribute_cfs_runtime(cfs_b
, runtime
,
2025 raw_spin_lock(&cfs_b
->lock
);
2027 throttled
= !list_empty(&cfs_b
->throttled_cfs_rq
);
2030 /* return (any) remaining runtime */
2031 cfs_b
->runtime
= runtime
;
2033 * While we are ensured activity in the period following an
2034 * unthrottle, this also covers the case in which the new bandwidth is
2035 * insufficient to cover the existing bandwidth deficit. (Forcing the
2036 * timer to remain active while there are any throttled entities.)
2041 cfs_b
->timer_active
= 0;
2042 raw_spin_unlock(&cfs_b
->lock
);
2047 /* a cfs_rq won't donate quota below this amount */
2048 static const u64 min_cfs_rq_runtime
= 1 * NSEC_PER_MSEC
;
2049 /* minimum remaining period time to redistribute slack quota */
2050 static const u64 min_bandwidth_expiration
= 2 * NSEC_PER_MSEC
;
2051 /* how long we wait to gather additional slack before distributing */
2052 static const u64 cfs_bandwidth_slack_period
= 5 * NSEC_PER_MSEC
;
2054 /* are we near the end of the current quota period? */
2055 static int runtime_refresh_within(struct cfs_bandwidth
*cfs_b
, u64 min_expire
)
2057 struct hrtimer
*refresh_timer
= &cfs_b
->period_timer
;
2060 /* if the call-back is running a quota refresh is already occurring */
2061 if (hrtimer_callback_running(refresh_timer
))
2064 /* is a quota refresh about to occur? */
2065 remaining
= ktime_to_ns(hrtimer_expires_remaining(refresh_timer
));
2066 if (remaining
< min_expire
)
2072 static void start_cfs_slack_bandwidth(struct cfs_bandwidth
*cfs_b
)
2074 u64 min_left
= cfs_bandwidth_slack_period
+ min_bandwidth_expiration
;
2076 /* if there's a quota refresh soon don't bother with slack */
2077 if (runtime_refresh_within(cfs_b
, min_left
))
2080 start_bandwidth_timer(&cfs_b
->slack_timer
,
2081 ns_to_ktime(cfs_bandwidth_slack_period
));
2084 /* we know any runtime found here is valid as update_curr() precedes return */
2085 static void __return_cfs_rq_runtime(struct cfs_rq
*cfs_rq
)
2087 struct cfs_bandwidth
*cfs_b
= tg_cfs_bandwidth(cfs_rq
->tg
);
2088 s64 slack_runtime
= cfs_rq
->runtime_remaining
- min_cfs_rq_runtime
;
2090 if (slack_runtime
<= 0)
2093 raw_spin_lock(&cfs_b
->lock
);
2094 if (cfs_b
->quota
!= RUNTIME_INF
&&
2095 cfs_rq
->runtime_expires
== cfs_b
->runtime_expires
) {
2096 cfs_b
->runtime
+= slack_runtime
;
2098 /* we are under rq->lock, defer unthrottling using a timer */
2099 if (cfs_b
->runtime
> sched_cfs_bandwidth_slice() &&
2100 !list_empty(&cfs_b
->throttled_cfs_rq
))
2101 start_cfs_slack_bandwidth(cfs_b
);
2103 raw_spin_unlock(&cfs_b
->lock
);
2105 /* even if it's not valid for return we don't want to try again */
2106 cfs_rq
->runtime_remaining
-= slack_runtime
;
2109 static __always_inline
void return_cfs_rq_runtime(struct cfs_rq
*cfs_rq
)
2111 if (!cfs_bandwidth_used())
2114 if (!cfs_rq
->runtime_enabled
|| cfs_rq
->nr_running
)
2117 __return_cfs_rq_runtime(cfs_rq
);
2121 * This is done with a timer (instead of inline with bandwidth return) since
2122 * it's necessary to juggle rq->locks to unthrottle their respective cfs_rqs.
2124 static void do_sched_cfs_slack_timer(struct cfs_bandwidth
*cfs_b
)
2126 u64 runtime
= 0, slice
= sched_cfs_bandwidth_slice();
2129 /* confirm we're still not at a refresh boundary */
2130 if (runtime_refresh_within(cfs_b
, min_bandwidth_expiration
))
2133 raw_spin_lock(&cfs_b
->lock
);
2134 if (cfs_b
->quota
!= RUNTIME_INF
&& cfs_b
->runtime
> slice
) {
2135 runtime
= cfs_b
->runtime
;
2138 expires
= cfs_b
->runtime_expires
;
2139 raw_spin_unlock(&cfs_b
->lock
);
2144 runtime
= distribute_cfs_runtime(cfs_b
, runtime
, expires
);
2146 raw_spin_lock(&cfs_b
->lock
);
2147 if (expires
== cfs_b
->runtime_expires
)
2148 cfs_b
->runtime
= runtime
;
2149 raw_spin_unlock(&cfs_b
->lock
);
2153 * When a group wakes up we want to make sure that its quota is not already
2154 * expired/exceeded, otherwise it may be allowed to steal additional ticks of
2155 * runtime as update_curr() throttling can not not trigger until it's on-rq.
2157 static void check_enqueue_throttle(struct cfs_rq
*cfs_rq
)
2159 if (!cfs_bandwidth_used())
2162 /* an active group must be handled by the update_curr()->put() path */
2163 if (!cfs_rq
->runtime_enabled
|| cfs_rq
->curr
)
2166 /* ensure the group is not already throttled */
2167 if (cfs_rq_throttled(cfs_rq
))
2170 /* update runtime allocation */
2171 account_cfs_rq_runtime(cfs_rq
, 0);
2172 if (cfs_rq
->runtime_remaining
<= 0)
2173 throttle_cfs_rq(cfs_rq
);
2176 /* conditionally throttle active cfs_rq's from put_prev_entity() */
2177 static void check_cfs_rq_runtime(struct cfs_rq
*cfs_rq
)
2179 if (!cfs_bandwidth_used())
2182 if (likely(!cfs_rq
->runtime_enabled
|| cfs_rq
->runtime_remaining
> 0))
2186 * it's possible for a throttled entity to be forced into a running
2187 * state (e.g. set_curr_task), in this case we're finished.
2189 if (cfs_rq_throttled(cfs_rq
))
2192 throttle_cfs_rq(cfs_rq
);
2195 static inline u64
default_cfs_period(void);
2196 static int do_sched_cfs_period_timer(struct cfs_bandwidth
*cfs_b
, int overrun
);
2197 static void do_sched_cfs_slack_timer(struct cfs_bandwidth
*cfs_b
);
2199 static enum hrtimer_restart
sched_cfs_slack_timer(struct hrtimer
*timer
)
2201 struct cfs_bandwidth
*cfs_b
=
2202 container_of(timer
, struct cfs_bandwidth
, slack_timer
);
2203 do_sched_cfs_slack_timer(cfs_b
);
2205 return HRTIMER_NORESTART
;
2208 static enum hrtimer_restart
sched_cfs_period_timer(struct hrtimer
*timer
)
2210 struct cfs_bandwidth
*cfs_b
=
2211 container_of(timer
, struct cfs_bandwidth
, period_timer
);
2217 now
= hrtimer_cb_get_time(timer
);
2218 overrun
= hrtimer_forward(timer
, now
, cfs_b
->period
);
2223 idle
= do_sched_cfs_period_timer(cfs_b
, overrun
);
2226 return idle
? HRTIMER_NORESTART
: HRTIMER_RESTART
;
2229 void init_cfs_bandwidth(struct cfs_bandwidth
*cfs_b
)
2231 raw_spin_lock_init(&cfs_b
->lock
);
2233 cfs_b
->quota
= RUNTIME_INF
;
2234 cfs_b
->period
= ns_to_ktime(default_cfs_period());
2236 INIT_LIST_HEAD(&cfs_b
->throttled_cfs_rq
);
2237 hrtimer_init(&cfs_b
->period_timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
2238 cfs_b
->period_timer
.function
= sched_cfs_period_timer
;
2239 hrtimer_init(&cfs_b
->slack_timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
2240 cfs_b
->slack_timer
.function
= sched_cfs_slack_timer
;
2243 static void init_cfs_rq_runtime(struct cfs_rq
*cfs_rq
)
2245 cfs_rq
->runtime_enabled
= 0;
2246 INIT_LIST_HEAD(&cfs_rq
->throttled_list
);
2249 /* requires cfs_b->lock, may release to reprogram timer */
2250 void __start_cfs_bandwidth(struct cfs_bandwidth
*cfs_b
)
2253 * The timer may be active because we're trying to set a new bandwidth
2254 * period or because we're racing with the tear-down path
2255 * (timer_active==0 becomes visible before the hrtimer call-back
2256 * terminates). In either case we ensure that it's re-programmed
2258 while (unlikely(hrtimer_active(&cfs_b
->period_timer
))) {
2259 raw_spin_unlock(&cfs_b
->lock
);
2260 /* ensure cfs_b->lock is available while we wait */
2261 hrtimer_cancel(&cfs_b
->period_timer
);
2263 raw_spin_lock(&cfs_b
->lock
);
2264 /* if someone else restarted the timer then we're done */
2265 if (cfs_b
->timer_active
)
2269 cfs_b
->timer_active
= 1;
2270 start_bandwidth_timer(&cfs_b
->period_timer
, cfs_b
->period
);
2273 static void destroy_cfs_bandwidth(struct cfs_bandwidth
*cfs_b
)
2275 hrtimer_cancel(&cfs_b
->period_timer
);
2276 hrtimer_cancel(&cfs_b
->slack_timer
);
2279 static void unthrottle_offline_cfs_rqs(struct rq
*rq
)
2281 struct cfs_rq
*cfs_rq
;
2283 for_each_leaf_cfs_rq(rq
, cfs_rq
) {
2284 struct cfs_bandwidth
*cfs_b
= tg_cfs_bandwidth(cfs_rq
->tg
);
2286 if (!cfs_rq
->runtime_enabled
)
2290 * clock_task is not advancing so we just need to make sure
2291 * there's some valid quota amount
2293 cfs_rq
->runtime_remaining
= cfs_b
->quota
;
2294 if (cfs_rq_throttled(cfs_rq
))
2295 unthrottle_cfs_rq(cfs_rq
);
2299 #else /* CONFIG_CFS_BANDWIDTH */
2300 static __always_inline
2301 void account_cfs_rq_runtime(struct cfs_rq
*cfs_rq
, unsigned long delta_exec
) {}
2302 static void check_cfs_rq_runtime(struct cfs_rq
*cfs_rq
) {}
2303 static void check_enqueue_throttle(struct cfs_rq
*cfs_rq
) {}
2304 static __always_inline
void return_cfs_rq_runtime(struct cfs_rq
*cfs_rq
) {}
2306 static inline int cfs_rq_throttled(struct cfs_rq
*cfs_rq
)
2311 static inline int throttled_hierarchy(struct cfs_rq
*cfs_rq
)
2316 static inline int throttled_lb_pair(struct task_group
*tg
,
2317 int src_cpu
, int dest_cpu
)
2322 void init_cfs_bandwidth(struct cfs_bandwidth
*cfs_b
) {}
2324 #ifdef CONFIG_FAIR_GROUP_SCHED
2325 static void init_cfs_rq_runtime(struct cfs_rq
*cfs_rq
) {}
2328 static inline struct cfs_bandwidth
*tg_cfs_bandwidth(struct task_group
*tg
)
2332 static inline void destroy_cfs_bandwidth(struct cfs_bandwidth
*cfs_b
) {}
2333 static inline void unthrottle_offline_cfs_rqs(struct rq
*rq
) {}
2335 #endif /* CONFIG_CFS_BANDWIDTH */
2337 /**************************************************
2338 * CFS operations on tasks:
2341 #ifdef CONFIG_SCHED_HRTICK
2342 static void hrtick_start_fair(struct rq
*rq
, struct task_struct
*p
)
2344 struct sched_entity
*se
= &p
->se
;
2345 struct cfs_rq
*cfs_rq
= cfs_rq_of(se
);
2347 WARN_ON(task_rq(p
) != rq
);
2349 if (cfs_rq
->nr_running
> 1) {
2350 u64 slice
= sched_slice(cfs_rq
, se
);
2351 u64 ran
= se
->sum_exec_runtime
- se
->prev_sum_exec_runtime
;
2352 s64 delta
= slice
- ran
;
2361 * Don't schedule slices shorter than 10000ns, that just
2362 * doesn't make sense. Rely on vruntime for fairness.
2365 delta
= max_t(s64
, 10000LL, delta
);
2367 hrtick_start(rq
, delta
);
2372 * called from enqueue/dequeue and updates the hrtick when the
2373 * current task is from our class and nr_running is low enough
2376 static void hrtick_update(struct rq
*rq
)
2378 struct task_struct
*curr
= rq
->curr
;
2380 if (!hrtick_enabled(rq
) || curr
->sched_class
!= &fair_sched_class
)
2383 if (cfs_rq_of(&curr
->se
)->nr_running
< sched_nr_latency
)
2384 hrtick_start_fair(rq
, curr
);
2386 #else /* !CONFIG_SCHED_HRTICK */
2388 hrtick_start_fair(struct rq
*rq
, struct task_struct
*p
)
2392 static inline void hrtick_update(struct rq
*rq
)
2398 * The enqueue_task method is called before nr_running is
2399 * increased. Here we update the fair scheduling stats and
2400 * then put the task into the rbtree:
2403 enqueue_task_fair(struct rq
*rq
, struct task_struct
*p
, int flags
)
2405 struct cfs_rq
*cfs_rq
;
2406 struct sched_entity
*se
= &p
->se
;
2408 for_each_sched_entity(se
) {
2411 cfs_rq
= cfs_rq_of(se
);
2412 enqueue_entity(cfs_rq
, se
, flags
);
2415 * end evaluation on encountering a throttled cfs_rq
2417 * note: in the case of encountering a throttled cfs_rq we will
2418 * post the final h_nr_running increment below.
2420 if (cfs_rq_throttled(cfs_rq
))
2422 cfs_rq
->h_nr_running
++;
2424 flags
= ENQUEUE_WAKEUP
;
2427 for_each_sched_entity(se
) {
2428 cfs_rq
= cfs_rq_of(se
);
2429 cfs_rq
->h_nr_running
++;
2431 if (cfs_rq_throttled(cfs_rq
))
2434 update_cfs_load(cfs_rq
, 0);
2435 update_cfs_shares(cfs_rq
);
2443 static void set_next_buddy(struct sched_entity
*se
);
2446 * The dequeue_task method is called before nr_running is
2447 * decreased. We remove the task from the rbtree and
2448 * update the fair scheduling stats:
2450 static void dequeue_task_fair(struct rq
*rq
, struct task_struct
*p
, int flags
)
2452 struct cfs_rq
*cfs_rq
;
2453 struct sched_entity
*se
= &p
->se
;
2454 int task_sleep
= flags
& DEQUEUE_SLEEP
;
2456 for_each_sched_entity(se
) {
2457 cfs_rq
= cfs_rq_of(se
);
2458 dequeue_entity(cfs_rq
, se
, flags
);
2461 * end evaluation on encountering a throttled cfs_rq
2463 * note: in the case of encountering a throttled cfs_rq we will
2464 * post the final h_nr_running decrement below.
2466 if (cfs_rq_throttled(cfs_rq
))
2468 cfs_rq
->h_nr_running
--;
2470 /* Don't dequeue parent if it has other entities besides us */
2471 if (cfs_rq
->load
.weight
) {
2473 * Bias pick_next to pick a task from this cfs_rq, as
2474 * p is sleeping when it is within its sched_slice.
2476 if (task_sleep
&& parent_entity(se
))
2477 set_next_buddy(parent_entity(se
));
2479 /* avoid re-evaluating load for this entity */
2480 se
= parent_entity(se
);
2483 flags
|= DEQUEUE_SLEEP
;
2486 for_each_sched_entity(se
) {
2487 cfs_rq
= cfs_rq_of(se
);
2488 cfs_rq
->h_nr_running
--;
2490 if (cfs_rq_throttled(cfs_rq
))
2493 update_cfs_load(cfs_rq
, 0);
2494 update_cfs_shares(cfs_rq
);
2503 /* Used instead of source_load when we know the type == 0 */
2504 static unsigned long weighted_cpuload(const int cpu
)
2506 return cpu_rq(cpu
)->load
.weight
;
2510 * Return a low guess at the load of a migration-source cpu weighted
2511 * according to the scheduling class and "nice" value.
2513 * We want to under-estimate the load of migration sources, to
2514 * balance conservatively.
2516 static unsigned long source_load(int cpu
, int type
)
2518 struct rq
*rq
= cpu_rq(cpu
);
2519 unsigned long total
= weighted_cpuload(cpu
);
2521 if (type
== 0 || !sched_feat(LB_BIAS
))
2524 return min(rq
->cpu_load
[type
-1], total
);
2528 * Return a high guess at the load of a migration-target cpu weighted
2529 * according to the scheduling class and "nice" value.
2531 static unsigned long target_load(int cpu
, int type
)
2533 struct rq
*rq
= cpu_rq(cpu
);
2534 unsigned long total
= weighted_cpuload(cpu
);
2536 if (type
== 0 || !sched_feat(LB_BIAS
))
2539 return max(rq
->cpu_load
[type
-1], total
);
2542 static unsigned long power_of(int cpu
)
2544 return cpu_rq(cpu
)->cpu_power
;
2547 static unsigned long cpu_avg_load_per_task(int cpu
)
2549 struct rq
*rq
= cpu_rq(cpu
);
2550 unsigned long nr_running
= ACCESS_ONCE(rq
->nr_running
);
2553 return rq
->load
.weight
/ nr_running
;
2559 static void task_waking_fair(struct task_struct
*p
)
2561 struct sched_entity
*se
= &p
->se
;
2562 struct cfs_rq
*cfs_rq
= cfs_rq_of(se
);
2565 #ifndef CONFIG_64BIT
2566 u64 min_vruntime_copy
;
2569 min_vruntime_copy
= cfs_rq
->min_vruntime_copy
;
2571 min_vruntime
= cfs_rq
->min_vruntime
;
2572 } while (min_vruntime
!= min_vruntime_copy
);
2574 min_vruntime
= cfs_rq
->min_vruntime
;
2577 se
->vruntime
-= min_vruntime
;
2580 #ifdef CONFIG_FAIR_GROUP_SCHED
2582 * effective_load() calculates the load change as seen from the root_task_group
2584 * Adding load to a group doesn't make a group heavier, but can cause movement
2585 * of group shares between cpus. Assuming the shares were perfectly aligned one
2586 * can calculate the shift in shares.
2588 * Calculate the effective load difference if @wl is added (subtracted) to @tg
2589 * on this @cpu and results in a total addition (subtraction) of @wg to the
2590 * total group weight.
2592 * Given a runqueue weight distribution (rw_i) we can compute a shares
2593 * distribution (s_i) using:
2595 * s_i = rw_i / \Sum rw_j (1)
2597 * Suppose we have 4 CPUs and our @tg is a direct child of the root group and
2598 * has 7 equal weight tasks, distributed as below (rw_i), with the resulting
2599 * shares distribution (s_i):
2601 * rw_i = { 2, 4, 1, 0 }
2602 * s_i = { 2/7, 4/7, 1/7, 0 }
2604 * As per wake_affine() we're interested in the load of two CPUs (the CPU the
2605 * task used to run on and the CPU the waker is running on), we need to
2606 * compute the effect of waking a task on either CPU and, in case of a sync
2607 * wakeup, compute the effect of the current task going to sleep.
2609 * So for a change of @wl to the local @cpu with an overall group weight change
2610 * of @wl we can compute the new shares distribution (s'_i) using:
2612 * s'_i = (rw_i + @wl) / (@wg + \Sum rw_j) (2)
2614 * Suppose we're interested in CPUs 0 and 1, and want to compute the load
2615 * differences in waking a task to CPU 0. The additional task changes the
2616 * weight and shares distributions like:
2618 * rw'_i = { 3, 4, 1, 0 }
2619 * s'_i = { 3/8, 4/8, 1/8, 0 }
2621 * We can then compute the difference in effective weight by using:
2623 * dw_i = S * (s'_i - s_i) (3)
2625 * Where 'S' is the group weight as seen by its parent.
2627 * Therefore the effective change in loads on CPU 0 would be 5/56 (3/8 - 2/7)
2628 * times the weight of the group. The effect on CPU 1 would be -4/56 (4/8 -
2629 * 4/7) times the weight of the group.
2631 static long effective_load(struct task_group
*tg
, int cpu
, long wl
, long wg
)
2633 struct sched_entity
*se
= tg
->se
[cpu
];
2635 if (!tg
->parent
) /* the trivial, non-cgroup case */
2638 for_each_sched_entity(se
) {
2644 * W = @wg + \Sum rw_j
2646 W
= wg
+ calc_tg_weight(tg
, se
->my_q
);
2651 w
= se
->my_q
->load
.weight
+ wl
;
2654 * wl = S * s'_i; see (2)
2657 wl
= (w
* tg
->shares
) / W
;
2662 * Per the above, wl is the new se->load.weight value; since
2663 * those are clipped to [MIN_SHARES, ...) do so now. See
2664 * calc_cfs_shares().
2666 if (wl
< MIN_SHARES
)
2670 * wl = dw_i = S * (s'_i - s_i); see (3)
2672 wl
-= se
->load
.weight
;
2675 * Recursively apply this logic to all parent groups to compute
2676 * the final effective load change on the root group. Since
2677 * only the @tg group gets extra weight, all parent groups can
2678 * only redistribute existing shares. @wl is the shift in shares
2679 * resulting from this level per the above.
2688 static inline unsigned long effective_load(struct task_group
*tg
, int cpu
,
2689 unsigned long wl
, unsigned long wg
)
2696 static int wake_affine(struct sched_domain
*sd
, struct task_struct
*p
, int sync
)
2698 s64 this_load
, load
;
2699 int idx
, this_cpu
, prev_cpu
;
2700 unsigned long tl_per_task
;
2701 struct task_group
*tg
;
2702 unsigned long weight
;
2706 this_cpu
= smp_processor_id();
2707 prev_cpu
= task_cpu(p
);
2708 load
= source_load(prev_cpu
, idx
);
2709 this_load
= target_load(this_cpu
, idx
);
2712 * If sync wakeup then subtract the (maximum possible)
2713 * effect of the currently running task from the load
2714 * of the current CPU:
2717 tg
= task_group(current
);
2718 weight
= current
->se
.load
.weight
;
2720 this_load
+= effective_load(tg
, this_cpu
, -weight
, -weight
);
2721 load
+= effective_load(tg
, prev_cpu
, 0, -weight
);
2725 weight
= p
->se
.load
.weight
;
2728 * In low-load situations, where prev_cpu is idle and this_cpu is idle
2729 * due to the sync cause above having dropped this_load to 0, we'll
2730 * always have an imbalance, but there's really nothing you can do
2731 * about that, so that's good too.
2733 * Otherwise check if either cpus are near enough in load to allow this
2734 * task to be woken on this_cpu.
2736 if (this_load
> 0) {
2737 s64 this_eff_load
, prev_eff_load
;
2739 this_eff_load
= 100;
2740 this_eff_load
*= power_of(prev_cpu
);
2741 this_eff_load
*= this_load
+
2742 effective_load(tg
, this_cpu
, weight
, weight
);
2744 prev_eff_load
= 100 + (sd
->imbalance_pct
- 100) / 2;
2745 prev_eff_load
*= power_of(this_cpu
);
2746 prev_eff_load
*= load
+ effective_load(tg
, prev_cpu
, 0, weight
);
2748 balanced
= this_eff_load
<= prev_eff_load
;
2753 * If the currently running task will sleep within
2754 * a reasonable amount of time then attract this newly
2757 if (sync
&& balanced
)
2760 schedstat_inc(p
, se
.statistics
.nr_wakeups_affine_attempts
);
2761 tl_per_task
= cpu_avg_load_per_task(this_cpu
);
2764 (this_load
<= load
&&
2765 this_load
+ target_load(prev_cpu
, idx
) <= tl_per_task
)) {
2767 * This domain has SD_WAKE_AFFINE and
2768 * p is cache cold in this domain, and
2769 * there is no bad imbalance.
2771 schedstat_inc(sd
, ttwu_move_affine
);
2772 schedstat_inc(p
, se
.statistics
.nr_wakeups_affine
);
2780 * find_idlest_group finds and returns the least busy CPU group within the
2783 static struct sched_group
*
2784 find_idlest_group(struct sched_domain
*sd
, struct task_struct
*p
,
2785 int this_cpu
, int load_idx
)
2787 struct sched_group
*idlest
= NULL
, *group
= sd
->groups
;
2788 unsigned long min_load
= ULONG_MAX
, this_load
= 0;
2789 int imbalance
= 100 + (sd
->imbalance_pct
-100)/2;
2792 unsigned long load
, avg_load
;
2796 /* Skip over this group if it has no CPUs allowed */
2797 if (!cpumask_intersects(sched_group_cpus(group
),
2798 tsk_cpus_allowed(p
)))
2801 local_group
= cpumask_test_cpu(this_cpu
,
2802 sched_group_cpus(group
));
2804 /* Tally up the load of all CPUs in the group */
2807 for_each_cpu(i
, sched_group_cpus(group
)) {
2808 /* Bias balancing toward cpus of our domain */
2810 load
= source_load(i
, load_idx
);
2812 load
= target_load(i
, load_idx
);
2817 /* Adjust by relative CPU power of the group */
2818 avg_load
= (avg_load
* SCHED_POWER_SCALE
) / group
->sgp
->power
;
2821 this_load
= avg_load
;
2822 } else if (avg_load
< min_load
) {
2823 min_load
= avg_load
;
2826 } while (group
= group
->next
, group
!= sd
->groups
);
2828 if (!idlest
|| 100*this_load
< imbalance
*min_load
)
2834 * find_idlest_cpu - find the idlest cpu among the cpus in group.
2837 find_idlest_cpu(struct sched_group
*group
, struct task_struct
*p
, int this_cpu
)
2839 unsigned long load
, min_load
= ULONG_MAX
;
2843 /* Traverse only the allowed CPUs */
2844 for_each_cpu_and(i
, sched_group_cpus(group
), tsk_cpus_allowed(p
)) {
2845 load
= weighted_cpuload(i
);
2847 if (load
< min_load
|| (load
== min_load
&& i
== this_cpu
)) {
2857 * Try and locate an idle CPU in the sched_domain.
2859 static int select_idle_sibling(struct task_struct
*p
, int target
)
2861 int cpu
= smp_processor_id();
2862 int prev_cpu
= task_cpu(p
);
2863 struct sched_domain
*sd
;
2864 struct sched_group
*sg
;
2868 * If the task is going to be woken-up on this cpu and if it is
2869 * already idle, then it is the right target.
2871 if (target
== cpu
&& idle_cpu(cpu
))
2875 * If the task is going to be woken-up on the cpu where it previously
2876 * ran and if it is currently idle, then it the right target.
2878 if (target
== prev_cpu
&& idle_cpu(prev_cpu
))
2882 * Otherwise, iterate the domains and find an elegible idle cpu.
2884 sd
= rcu_dereference(per_cpu(sd_llc
, target
));
2885 for_each_lower_domain(sd
) {
2888 if (!cpumask_intersects(sched_group_cpus(sg
),
2889 tsk_cpus_allowed(p
)))
2892 for_each_cpu(i
, sched_group_cpus(sg
)) {
2897 target
= cpumask_first_and(sched_group_cpus(sg
),
2898 tsk_cpus_allowed(p
));
2902 } while (sg
!= sd
->groups
);
2909 * sched_balance_self: balance the current task (running on cpu) in domains
2910 * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and
2913 * Balance, ie. select the least loaded group.
2915 * Returns the target CPU number, or the same CPU if no balancing is needed.
2917 * preempt must be disabled.
2920 select_task_rq_fair(struct task_struct
*p
, int sd_flag
, int wake_flags
)
2922 struct sched_domain
*tmp
, *affine_sd
= NULL
, *sd
= NULL
;
2923 int cpu
= smp_processor_id();
2924 int prev_cpu
= task_cpu(p
);
2926 int want_affine
= 0;
2927 int sync
= wake_flags
& WF_SYNC
;
2929 if (p
->nr_cpus_allowed
== 1)
2932 if (sd_flag
& SD_BALANCE_WAKE
) {
2933 if (cpumask_test_cpu(cpu
, tsk_cpus_allowed(p
)))
2939 for_each_domain(cpu
, tmp
) {
2940 if (!(tmp
->flags
& SD_LOAD_BALANCE
))
2944 * If both cpu and prev_cpu are part of this domain,
2945 * cpu is a valid SD_WAKE_AFFINE target.
2947 if (want_affine
&& (tmp
->flags
& SD_WAKE_AFFINE
) &&
2948 cpumask_test_cpu(prev_cpu
, sched_domain_span(tmp
))) {
2953 if (tmp
->flags
& sd_flag
)
2958 if (cpu
!= prev_cpu
&& wake_affine(affine_sd
, p
, sync
))
2961 new_cpu
= select_idle_sibling(p
, prev_cpu
);
2966 int load_idx
= sd
->forkexec_idx
;
2967 struct sched_group
*group
;
2970 if (!(sd
->flags
& sd_flag
)) {
2975 if (sd_flag
& SD_BALANCE_WAKE
)
2976 load_idx
= sd
->wake_idx
;
2978 group
= find_idlest_group(sd
, p
, cpu
, load_idx
);
2984 new_cpu
= find_idlest_cpu(group
, p
, cpu
);
2985 if (new_cpu
== -1 || new_cpu
== cpu
) {
2986 /* Now try balancing at a lower domain level of cpu */
2991 /* Now try balancing at a lower domain level of new_cpu */
2993 weight
= sd
->span_weight
;
2995 for_each_domain(cpu
, tmp
) {
2996 if (weight
<= tmp
->span_weight
)
2998 if (tmp
->flags
& sd_flag
)
3001 /* while loop will break here if sd == NULL */
3008 #endif /* CONFIG_SMP */
3010 static unsigned long
3011 wakeup_gran(struct sched_entity
*curr
, struct sched_entity
*se
)
3013 unsigned long gran
= sysctl_sched_wakeup_granularity
;
3016 * Since its curr running now, convert the gran from real-time
3017 * to virtual-time in his units.
3019 * By using 'se' instead of 'curr' we penalize light tasks, so
3020 * they get preempted easier. That is, if 'se' < 'curr' then
3021 * the resulting gran will be larger, therefore penalizing the
3022 * lighter, if otoh 'se' > 'curr' then the resulting gran will
3023 * be smaller, again penalizing the lighter task.
3025 * This is especially important for buddies when the leftmost
3026 * task is higher priority than the buddy.
3028 return calc_delta_fair(gran
, se
);
3032 * Should 'se' preempt 'curr'.
3046 wakeup_preempt_entity(struct sched_entity
*curr
, struct sched_entity
*se
)
3048 s64 gran
, vdiff
= curr
->vruntime
- se
->vruntime
;
3053 gran
= wakeup_gran(curr
, se
);
3060 static void set_last_buddy(struct sched_entity
*se
)
3062 if (entity_is_task(se
) && unlikely(task_of(se
)->policy
== SCHED_IDLE
))
3065 for_each_sched_entity(se
)
3066 cfs_rq_of(se
)->last
= se
;
3069 static void set_next_buddy(struct sched_entity
*se
)
3071 if (entity_is_task(se
) && unlikely(task_of(se
)->policy
== SCHED_IDLE
))
3074 for_each_sched_entity(se
)
3075 cfs_rq_of(se
)->next
= se
;
3078 static void set_skip_buddy(struct sched_entity
*se
)
3080 for_each_sched_entity(se
)
3081 cfs_rq_of(se
)->skip
= se
;
3085 * Preempt the current task with a newly woken task if needed:
3087 static void check_preempt_wakeup(struct rq
*rq
, struct task_struct
*p
, int wake_flags
)
3089 struct task_struct
*curr
= rq
->curr
;
3090 struct sched_entity
*se
= &curr
->se
, *pse
= &p
->se
;
3091 struct cfs_rq
*cfs_rq
= task_cfs_rq(curr
);
3092 int scale
= cfs_rq
->nr_running
>= sched_nr_latency
;
3093 int next_buddy_marked
= 0;
3095 if (unlikely(se
== pse
))
3099 * This is possible from callers such as move_task(), in which we
3100 * unconditionally check_prempt_curr() after an enqueue (which may have
3101 * lead to a throttle). This both saves work and prevents false
3102 * next-buddy nomination below.
3104 if (unlikely(throttled_hierarchy(cfs_rq_of(pse
))))
3107 if (sched_feat(NEXT_BUDDY
) && scale
&& !(wake_flags
& WF_FORK
)) {
3108 set_next_buddy(pse
);
3109 next_buddy_marked
= 1;
3113 * We can come here with TIF_NEED_RESCHED already set from new task
3116 * Note: this also catches the edge-case of curr being in a throttled
3117 * group (e.g. via set_curr_task), since update_curr() (in the
3118 * enqueue of curr) will have resulted in resched being set. This
3119 * prevents us from potentially nominating it as a false LAST_BUDDY
3122 if (test_tsk_need_resched(curr
))
3125 /* Idle tasks are by definition preempted by non-idle tasks. */
3126 if (unlikely(curr
->policy
== SCHED_IDLE
) &&
3127 likely(p
->policy
!= SCHED_IDLE
))
3131 * Batch and idle tasks do not preempt non-idle tasks (their preemption
3132 * is driven by the tick):
3134 if (unlikely(p
->policy
!= SCHED_NORMAL
))
3137 find_matching_se(&se
, &pse
);
3138 update_curr(cfs_rq_of(se
));
3140 if (wakeup_preempt_entity(se
, pse
) == 1) {
3142 * Bias pick_next to pick the sched entity that is
3143 * triggering this preemption.
3145 if (!next_buddy_marked
)
3146 set_next_buddy(pse
);
3155 * Only set the backward buddy when the current task is still
3156 * on the rq. This can happen when a wakeup gets interleaved
3157 * with schedule on the ->pre_schedule() or idle_balance()
3158 * point, either of which can * drop the rq lock.
3160 * Also, during early boot the idle thread is in the fair class,
3161 * for obvious reasons its a bad idea to schedule back to it.
3163 if (unlikely(!se
->on_rq
|| curr
== rq
->idle
))
3166 if (sched_feat(LAST_BUDDY
) && scale
&& entity_is_task(se
))
3170 static struct task_struct
*pick_next_task_fair(struct rq
*rq
)
3172 struct task_struct
*p
;
3173 struct cfs_rq
*cfs_rq
= &rq
->cfs
;
3174 struct sched_entity
*se
;
3176 if (!cfs_rq
->nr_running
)
3180 se
= pick_next_entity(cfs_rq
);
3181 set_next_entity(cfs_rq
, se
);
3182 cfs_rq
= group_cfs_rq(se
);
3186 if (hrtick_enabled(rq
))
3187 hrtick_start_fair(rq
, p
);
3193 * Account for a descheduled task:
3195 static void put_prev_task_fair(struct rq
*rq
, struct task_struct
*prev
)
3197 struct sched_entity
*se
= &prev
->se
;
3198 struct cfs_rq
*cfs_rq
;
3200 for_each_sched_entity(se
) {
3201 cfs_rq
= cfs_rq_of(se
);
3202 put_prev_entity(cfs_rq
, se
);
3207 * sched_yield() is very simple
3209 * The magic of dealing with the ->skip buddy is in pick_next_entity.
3211 static void yield_task_fair(struct rq
*rq
)
3213 struct task_struct
*curr
= rq
->curr
;
3214 struct cfs_rq
*cfs_rq
= task_cfs_rq(curr
);
3215 struct sched_entity
*se
= &curr
->se
;
3218 * Are we the only task in the tree?
3220 if (unlikely(rq
->nr_running
== 1))
3223 clear_buddies(cfs_rq
, se
);
3225 if (curr
->policy
!= SCHED_BATCH
) {
3226 update_rq_clock(rq
);
3228 * Update run-time statistics of the 'current'.
3230 update_curr(cfs_rq
);
3232 * Tell update_rq_clock() that we've just updated,
3233 * so we don't do microscopic update in schedule()
3234 * and double the fastpath cost.
3236 rq
->skip_clock_update
= 1;
3242 static bool yield_to_task_fair(struct rq
*rq
, struct task_struct
*p
, bool preempt
)
3244 struct sched_entity
*se
= &p
->se
;
3246 /* throttled hierarchies are not runnable */
3247 if (!se
->on_rq
|| throttled_hierarchy(cfs_rq_of(se
)))
3250 /* Tell the scheduler that we'd really like pse to run next. */
3253 yield_task_fair(rq
);
3259 /**************************************************
3260 * Fair scheduling class load-balancing methods:
3263 static unsigned long __read_mostly max_load_balance_interval
= HZ
/10;
3265 #define LBF_ALL_PINNED 0x01
3266 #define LBF_NEED_BREAK 0x02
3267 #define LBF_SOME_PINNED 0x04
3270 struct sched_domain
*sd
;
3278 struct cpumask
*dst_grpmask
;
3280 enum cpu_idle_type idle
;
3282 /* The set of CPUs under consideration for load-balancing */
3283 struct cpumask
*cpus
;
3288 unsigned int loop_break
;
3289 unsigned int loop_max
;
3293 * move_task - move a task from one runqueue to another runqueue.
3294 * Both runqueues must be locked.
3296 static void move_task(struct task_struct
*p
, struct lb_env
*env
)
3298 deactivate_task(env
->src_rq
, p
, 0);
3299 set_task_cpu(p
, env
->dst_cpu
);
3300 activate_task(env
->dst_rq
, p
, 0);
3301 check_preempt_curr(env
->dst_rq
, p
, 0);
3305 * Is this task likely cache-hot:
3308 task_hot(struct task_struct
*p
, u64 now
, struct sched_domain
*sd
)
3312 if (p
->sched_class
!= &fair_sched_class
)
3315 if (unlikely(p
->policy
== SCHED_IDLE
))
3319 * Buddy candidates are cache hot:
3321 if (sched_feat(CACHE_HOT_BUDDY
) && this_rq()->nr_running
&&
3322 (&p
->se
== cfs_rq_of(&p
->se
)->next
||
3323 &p
->se
== cfs_rq_of(&p
->se
)->last
))
3326 if (sysctl_sched_migration_cost
== -1)
3328 if (sysctl_sched_migration_cost
== 0)
3331 delta
= now
- p
->se
.exec_start
;
3333 return delta
< (s64
)sysctl_sched_migration_cost
;
3337 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
3340 int can_migrate_task(struct task_struct
*p
, struct lb_env
*env
)
3342 int tsk_cache_hot
= 0;
3344 * We do not migrate tasks that are:
3345 * 1) running (obviously), or
3346 * 2) cannot be migrated to this CPU due to cpus_allowed, or
3347 * 3) are cache-hot on their current CPU.
3349 if (!cpumask_test_cpu(env
->dst_cpu
, tsk_cpus_allowed(p
))) {
3352 schedstat_inc(p
, se
.statistics
.nr_failed_migrations_affine
);
3355 * Remember if this task can be migrated to any other cpu in
3356 * our sched_group. We may want to revisit it if we couldn't
3357 * meet load balance goals by pulling other tasks on src_cpu.
3359 * Also avoid computing new_dst_cpu if we have already computed
3360 * one in current iteration.
3362 if (!env
->dst_grpmask
|| (env
->flags
& LBF_SOME_PINNED
))
3365 new_dst_cpu
= cpumask_first_and(env
->dst_grpmask
,
3366 tsk_cpus_allowed(p
));
3367 if (new_dst_cpu
< nr_cpu_ids
) {
3368 env
->flags
|= LBF_SOME_PINNED
;
3369 env
->new_dst_cpu
= new_dst_cpu
;
3374 /* Record that we found atleast one task that could run on dst_cpu */
3375 env
->flags
&= ~LBF_ALL_PINNED
;
3377 if (task_running(env
->src_rq
, p
)) {
3378 schedstat_inc(p
, se
.statistics
.nr_failed_migrations_running
);
3383 * Aggressive migration if:
3384 * 1) task is cache cold, or
3385 * 2) too many balance attempts have failed.
3388 tsk_cache_hot
= task_hot(p
, env
->src_rq
->clock_task
, env
->sd
);
3389 if (!tsk_cache_hot
||
3390 env
->sd
->nr_balance_failed
> env
->sd
->cache_nice_tries
) {
3391 #ifdef CONFIG_SCHEDSTATS
3392 if (tsk_cache_hot
) {
3393 schedstat_inc(env
->sd
, lb_hot_gained
[env
->idle
]);
3394 schedstat_inc(p
, se
.statistics
.nr_forced_migrations
);
3400 if (tsk_cache_hot
) {
3401 schedstat_inc(p
, se
.statistics
.nr_failed_migrations_hot
);
3408 * move_one_task tries to move exactly one task from busiest to this_rq, as
3409 * part of active balancing operations within "domain".
3410 * Returns 1 if successful and 0 otherwise.
3412 * Called with both runqueues locked.
3414 static int move_one_task(struct lb_env
*env
)
3416 struct task_struct
*p
, *n
;
3418 list_for_each_entry_safe(p
, n
, &env
->src_rq
->cfs_tasks
, se
.group_node
) {
3419 if (throttled_lb_pair(task_group(p
), env
->src_rq
->cpu
, env
->dst_cpu
))
3422 if (!can_migrate_task(p
, env
))
3427 * Right now, this is only the second place move_task()
3428 * is called, so we can safely collect move_task()
3429 * stats here rather than inside move_task().
3431 schedstat_inc(env
->sd
, lb_gained
[env
->idle
]);
3437 static unsigned long task_h_load(struct task_struct
*p
);
3439 static const unsigned int sched_nr_migrate_break
= 32;
3442 * move_tasks tries to move up to imbalance weighted load from busiest to
3443 * this_rq, as part of a balancing operation within domain "sd".
3444 * Returns 1 if successful and 0 otherwise.
3446 * Called with both runqueues locked.
3448 static int move_tasks(struct lb_env
*env
)
3450 struct list_head
*tasks
= &env
->src_rq
->cfs_tasks
;
3451 struct task_struct
*p
;
3455 if (env
->imbalance
<= 0)
3458 while (!list_empty(tasks
)) {
3459 p
= list_first_entry(tasks
, struct task_struct
, se
.group_node
);
3462 /* We've more or less seen every task there is, call it quits */
3463 if (env
->loop
> env
->loop_max
)
3466 /* take a breather every nr_migrate tasks */
3467 if (env
->loop
> env
->loop_break
) {
3468 env
->loop_break
+= sched_nr_migrate_break
;
3469 env
->flags
|= LBF_NEED_BREAK
;
3473 if (throttled_lb_pair(task_group(p
), env
->src_cpu
, env
->dst_cpu
))
3476 load
= task_h_load(p
);
3478 if (sched_feat(LB_MIN
) && load
< 16 && !env
->sd
->nr_balance_failed
)
3481 if ((load
/ 2) > env
->imbalance
)
3484 if (!can_migrate_task(p
, env
))
3489 env
->imbalance
-= load
;
3491 #ifdef CONFIG_PREEMPT
3493 * NEWIDLE balancing is a source of latency, so preemptible
3494 * kernels will stop after the first task is pulled to minimize
3495 * the critical section.
3497 if (env
->idle
== CPU_NEWLY_IDLE
)
3502 * We only want to steal up to the prescribed amount of
3505 if (env
->imbalance
<= 0)
3510 list_move_tail(&p
->se
.group_node
, tasks
);
3514 * Right now, this is one of only two places move_task() is called,
3515 * so we can safely collect move_task() stats here rather than
3516 * inside move_task().
3518 schedstat_add(env
->sd
, lb_gained
[env
->idle
], pulled
);
3523 #ifdef CONFIG_FAIR_GROUP_SCHED
3525 * update tg->load_weight by folding this cpu's load_avg
3527 static int update_shares_cpu(struct task_group
*tg
, int cpu
)
3529 struct cfs_rq
*cfs_rq
;
3530 unsigned long flags
;
3537 cfs_rq
= tg
->cfs_rq
[cpu
];
3539 raw_spin_lock_irqsave(&rq
->lock
, flags
);
3541 update_rq_clock(rq
);
3542 update_cfs_load(cfs_rq
, 1);
3545 * We need to update shares after updating tg->load_weight in
3546 * order to adjust the weight of groups with long running tasks.
3548 update_cfs_shares(cfs_rq
);
3550 raw_spin_unlock_irqrestore(&rq
->lock
, flags
);
3555 static void update_shares(int cpu
)
3557 struct cfs_rq
*cfs_rq
;
3558 struct rq
*rq
= cpu_rq(cpu
);
3562 * Iterates the task_group tree in a bottom up fashion, see
3563 * list_add_leaf_cfs_rq() for details.
3565 for_each_leaf_cfs_rq(rq
, cfs_rq
) {
3566 /* throttled entities do not contribute to load */
3567 if (throttled_hierarchy(cfs_rq
))
3570 update_shares_cpu(cfs_rq
->tg
, cpu
);
3576 * Compute the cpu's hierarchical load factor for each task group.
3577 * This needs to be done in a top-down fashion because the load of a child
3578 * group is a fraction of its parents load.
3580 static int tg_load_down(struct task_group
*tg
, void *data
)
3583 long cpu
= (long)data
;
3586 load
= cpu_rq(cpu
)->load
.weight
;
3588 load
= tg
->parent
->cfs_rq
[cpu
]->h_load
;
3589 load
*= tg
->se
[cpu
]->load
.weight
;
3590 load
/= tg
->parent
->cfs_rq
[cpu
]->load
.weight
+ 1;
3593 tg
->cfs_rq
[cpu
]->h_load
= load
;
3598 static void update_h_load(long cpu
)
3600 struct rq
*rq
= cpu_rq(cpu
);
3601 unsigned long now
= jiffies
;
3603 if (rq
->h_load_throttle
== now
)
3606 rq
->h_load_throttle
= now
;
3609 walk_tg_tree(tg_load_down
, tg_nop
, (void *)cpu
);
3613 static unsigned long task_h_load(struct task_struct
*p
)
3615 struct cfs_rq
*cfs_rq
= task_cfs_rq(p
);
3618 load
= p
->se
.load
.weight
;
3619 load
= div_u64(load
* cfs_rq
->h_load
, cfs_rq
->load
.weight
+ 1);
3624 static inline void update_shares(int cpu
)
3628 static inline void update_h_load(long cpu
)
3632 static unsigned long task_h_load(struct task_struct
*p
)
3634 return p
->se
.load
.weight
;
3638 /********** Helpers for find_busiest_group ************************/
3640 * sd_lb_stats - Structure to store the statistics of a sched_domain
3641 * during load balancing.
3643 struct sd_lb_stats
{
3644 struct sched_group
*busiest
; /* Busiest group in this sd */
3645 struct sched_group
*this; /* Local group in this sd */
3646 unsigned long total_load
; /* Total load of all groups in sd */
3647 unsigned long total_pwr
; /* Total power of all groups in sd */
3648 unsigned long avg_load
; /* Average load across all groups in sd */
3650 /** Statistics of this group */
3651 unsigned long this_load
;
3652 unsigned long this_load_per_task
;
3653 unsigned long this_nr_running
;
3654 unsigned long this_has_capacity
;
3655 unsigned int this_idle_cpus
;
3657 /* Statistics of the busiest group */
3658 unsigned int busiest_idle_cpus
;
3659 unsigned long max_load
;
3660 unsigned long busiest_load_per_task
;
3661 unsigned long busiest_nr_running
;
3662 unsigned long busiest_group_capacity
;
3663 unsigned long busiest_has_capacity
;
3664 unsigned int busiest_group_weight
;
3666 int group_imb
; /* Is there imbalance in this sd */
3670 * sg_lb_stats - stats of a sched_group required for load_balancing
3672 struct sg_lb_stats
{
3673 unsigned long avg_load
; /*Avg load across the CPUs of the group */
3674 unsigned long group_load
; /* Total load over the CPUs of the group */
3675 unsigned long sum_nr_running
; /* Nr tasks running in the group */
3676 unsigned long sum_weighted_load
; /* Weighted load of group's tasks */
3677 unsigned long group_capacity
;
3678 unsigned long idle_cpus
;
3679 unsigned long group_weight
;
3680 int group_imb
; /* Is there an imbalance in the group ? */
3681 int group_has_capacity
; /* Is there extra capacity in the group? */
3685 * get_sd_load_idx - Obtain the load index for a given sched domain.
3686 * @sd: The sched_domain whose load_idx is to be obtained.
3687 * @idle: The Idle status of the CPU for whose sd load_icx is obtained.
3689 static inline int get_sd_load_idx(struct sched_domain
*sd
,
3690 enum cpu_idle_type idle
)
3696 load_idx
= sd
->busy_idx
;
3699 case CPU_NEWLY_IDLE
:
3700 load_idx
= sd
->newidle_idx
;
3703 load_idx
= sd
->idle_idx
;
3710 unsigned long default_scale_freq_power(struct sched_domain
*sd
, int cpu
)
3712 return SCHED_POWER_SCALE
;
3715 unsigned long __weak
arch_scale_freq_power(struct sched_domain
*sd
, int cpu
)
3717 return default_scale_freq_power(sd
, cpu
);
3720 unsigned long default_scale_smt_power(struct sched_domain
*sd
, int cpu
)
3722 unsigned long weight
= sd
->span_weight
;
3723 unsigned long smt_gain
= sd
->smt_gain
;
3730 unsigned long __weak
arch_scale_smt_power(struct sched_domain
*sd
, int cpu
)
3732 return default_scale_smt_power(sd
, cpu
);
3735 unsigned long scale_rt_power(int cpu
)
3737 struct rq
*rq
= cpu_rq(cpu
);
3738 u64 total
, available
, age_stamp
, avg
;
3741 * Since we're reading these variables without serialization make sure
3742 * we read them once before doing sanity checks on them.
3744 age_stamp
= ACCESS_ONCE(rq
->age_stamp
);
3745 avg
= ACCESS_ONCE(rq
->rt_avg
);
3747 total
= sched_avg_period() + (rq
->clock
- age_stamp
);
3749 if (unlikely(total
< avg
)) {
3750 /* Ensures that power won't end up being negative */
3753 available
= total
- avg
;
3756 if (unlikely((s64
)total
< SCHED_POWER_SCALE
))
3757 total
= SCHED_POWER_SCALE
;
3759 total
>>= SCHED_POWER_SHIFT
;
3761 return div_u64(available
, total
);
3764 static void update_cpu_power(struct sched_domain
*sd
, int cpu
)
3766 unsigned long weight
= sd
->span_weight
;
3767 unsigned long power
= SCHED_POWER_SCALE
;
3768 struct sched_group
*sdg
= sd
->groups
;
3770 if ((sd
->flags
& SD_SHARE_CPUPOWER
) && weight
> 1) {
3771 if (sched_feat(ARCH_POWER
))
3772 power
*= arch_scale_smt_power(sd
, cpu
);
3774 power
*= default_scale_smt_power(sd
, cpu
);
3776 power
>>= SCHED_POWER_SHIFT
;
3779 sdg
->sgp
->power_orig
= power
;
3781 if (sched_feat(ARCH_POWER
))
3782 power
*= arch_scale_freq_power(sd
, cpu
);
3784 power
*= default_scale_freq_power(sd
, cpu
);
3786 power
>>= SCHED_POWER_SHIFT
;
3788 power
*= scale_rt_power(cpu
);
3789 power
>>= SCHED_POWER_SHIFT
;
3794 cpu_rq(cpu
)->cpu_power
= power
;
3795 sdg
->sgp
->power
= power
;
3798 void update_group_power(struct sched_domain
*sd
, int cpu
)
3800 struct sched_domain
*child
= sd
->child
;
3801 struct sched_group
*group
, *sdg
= sd
->groups
;
3802 unsigned long power
;
3803 unsigned long interval
;
3805 interval
= msecs_to_jiffies(sd
->balance_interval
);
3806 interval
= clamp(interval
, 1UL, max_load_balance_interval
);
3807 sdg
->sgp
->next_update
= jiffies
+ interval
;
3810 update_cpu_power(sd
, cpu
);
3816 if (child
->flags
& SD_OVERLAP
) {
3818 * SD_OVERLAP domains cannot assume that child groups
3819 * span the current group.
3822 for_each_cpu(cpu
, sched_group_cpus(sdg
))
3823 power
+= power_of(cpu
);
3826 * !SD_OVERLAP domains can assume that child groups
3827 * span the current group.
3830 group
= child
->groups
;
3832 power
+= group
->sgp
->power
;
3833 group
= group
->next
;
3834 } while (group
!= child
->groups
);
3837 sdg
->sgp
->power_orig
= sdg
->sgp
->power
= power
;
3841 * Try and fix up capacity for tiny siblings, this is needed when
3842 * things like SD_ASYM_PACKING need f_b_g to select another sibling
3843 * which on its own isn't powerful enough.
3845 * See update_sd_pick_busiest() and check_asym_packing().
3848 fix_small_capacity(struct sched_domain
*sd
, struct sched_group
*group
)
3851 * Only siblings can have significantly less than SCHED_POWER_SCALE
3853 if (!(sd
->flags
& SD_SHARE_CPUPOWER
))
3857 * If ~90% of the cpu_power is still there, we're good.
3859 if (group
->sgp
->power
* 32 > group
->sgp
->power_orig
* 29)
3866 * update_sg_lb_stats - Update sched_group's statistics for load balancing.
3867 * @env: The load balancing environment.
3868 * @group: sched_group whose statistics are to be updated.
3869 * @load_idx: Load index of sched_domain of this_cpu for load calc.
3870 * @local_group: Does group contain this_cpu.
3871 * @balance: Should we balance.
3872 * @sgs: variable to hold the statistics for this group.
3874 static inline void update_sg_lb_stats(struct lb_env
*env
,
3875 struct sched_group
*group
, int load_idx
,
3876 int local_group
, int *balance
, struct sg_lb_stats
*sgs
)
3878 unsigned long nr_running
, max_nr_running
, min_nr_running
;
3879 unsigned long load
, max_cpu_load
, min_cpu_load
;
3880 unsigned int balance_cpu
= -1, first_idle_cpu
= 0;
3881 unsigned long avg_load_per_task
= 0;
3885 balance_cpu
= group_balance_cpu(group
);
3887 /* Tally up the load of all CPUs in the group */
3889 min_cpu_load
= ~0UL;
3891 min_nr_running
= ~0UL;
3893 for_each_cpu_and(i
, sched_group_cpus(group
), env
->cpus
) {
3894 struct rq
*rq
= cpu_rq(i
);
3896 nr_running
= rq
->nr_running
;
3898 /* Bias balancing toward cpus of our domain */
3900 if (idle_cpu(i
) && !first_idle_cpu
&&
3901 cpumask_test_cpu(i
, sched_group_mask(group
))) {
3906 load
= target_load(i
, load_idx
);
3908 load
= source_load(i
, load_idx
);
3909 if (load
> max_cpu_load
)
3910 max_cpu_load
= load
;
3911 if (min_cpu_load
> load
)
3912 min_cpu_load
= load
;
3914 if (nr_running
> max_nr_running
)
3915 max_nr_running
= nr_running
;
3916 if (min_nr_running
> nr_running
)
3917 min_nr_running
= nr_running
;
3920 sgs
->group_load
+= load
;
3921 sgs
->sum_nr_running
+= nr_running
;
3922 sgs
->sum_weighted_load
+= weighted_cpuload(i
);
3928 * First idle cpu or the first cpu(busiest) in this sched group
3929 * is eligible for doing load balancing at this and above
3930 * domains. In the newly idle case, we will allow all the cpu's
3931 * to do the newly idle load balance.
3934 if (env
->idle
!= CPU_NEWLY_IDLE
) {
3935 if (balance_cpu
!= env
->dst_cpu
) {
3939 update_group_power(env
->sd
, env
->dst_cpu
);
3940 } else if (time_after_eq(jiffies
, group
->sgp
->next_update
))
3941 update_group_power(env
->sd
, env
->dst_cpu
);
3944 /* Adjust by relative CPU power of the group */
3945 sgs
->avg_load
= (sgs
->group_load
*SCHED_POWER_SCALE
) / group
->sgp
->power
;
3948 * Consider the group unbalanced when the imbalance is larger
3949 * than the average weight of a task.
3951 * APZ: with cgroup the avg task weight can vary wildly and
3952 * might not be a suitable number - should we keep a
3953 * normalized nr_running number somewhere that negates
3956 if (sgs
->sum_nr_running
)
3957 avg_load_per_task
= sgs
->sum_weighted_load
/ sgs
->sum_nr_running
;
3959 if ((max_cpu_load
- min_cpu_load
) >= avg_load_per_task
&&
3960 (max_nr_running
- min_nr_running
) > 1)
3963 sgs
->group_capacity
= DIV_ROUND_CLOSEST(group
->sgp
->power
,
3965 if (!sgs
->group_capacity
)
3966 sgs
->group_capacity
= fix_small_capacity(env
->sd
, group
);
3967 sgs
->group_weight
= group
->group_weight
;
3969 if (sgs
->group_capacity
> sgs
->sum_nr_running
)
3970 sgs
->group_has_capacity
= 1;
3974 * update_sd_pick_busiest - return 1 on busiest group
3975 * @env: The load balancing environment.
3976 * @sds: sched_domain statistics
3977 * @sg: sched_group candidate to be checked for being the busiest
3978 * @sgs: sched_group statistics
3980 * Determine if @sg is a busier group than the previously selected
3983 static bool update_sd_pick_busiest(struct lb_env
*env
,
3984 struct sd_lb_stats
*sds
,
3985 struct sched_group
*sg
,
3986 struct sg_lb_stats
*sgs
)
3988 if (sgs
->avg_load
<= sds
->max_load
)
3991 if (sgs
->sum_nr_running
> sgs
->group_capacity
)
3998 * ASYM_PACKING needs to move all the work to the lowest
3999 * numbered CPUs in the group, therefore mark all groups
4000 * higher than ourself as busy.
4002 if ((env
->sd
->flags
& SD_ASYM_PACKING
) && sgs
->sum_nr_running
&&
4003 env
->dst_cpu
< group_first_cpu(sg
)) {
4007 if (group_first_cpu(sds
->busiest
) > group_first_cpu(sg
))
4015 * update_sd_lb_stats - Update sched_domain's statistics for load balancing.
4016 * @env: The load balancing environment.
4017 * @balance: Should we balance.
4018 * @sds: variable to hold the statistics for this sched_domain.
4020 static inline void update_sd_lb_stats(struct lb_env
*env
,
4021 int *balance
, struct sd_lb_stats
*sds
)
4023 struct sched_domain
*child
= env
->sd
->child
;
4024 struct sched_group
*sg
= env
->sd
->groups
;
4025 struct sg_lb_stats sgs
;
4026 int load_idx
, prefer_sibling
= 0;
4028 if (child
&& child
->flags
& SD_PREFER_SIBLING
)
4031 load_idx
= get_sd_load_idx(env
->sd
, env
->idle
);
4036 local_group
= cpumask_test_cpu(env
->dst_cpu
, sched_group_cpus(sg
));
4037 memset(&sgs
, 0, sizeof(sgs
));
4038 update_sg_lb_stats(env
, sg
, load_idx
, local_group
, balance
, &sgs
);
4040 if (local_group
&& !(*balance
))
4043 sds
->total_load
+= sgs
.group_load
;
4044 sds
->total_pwr
+= sg
->sgp
->power
;
4047 * In case the child domain prefers tasks go to siblings
4048 * first, lower the sg capacity to one so that we'll try
4049 * and move all the excess tasks away. We lower the capacity
4050 * of a group only if the local group has the capacity to fit
4051 * these excess tasks, i.e. nr_running < group_capacity. The
4052 * extra check prevents the case where you always pull from the
4053 * heaviest group when it is already under-utilized (possible
4054 * with a large weight task outweighs the tasks on the system).
4056 if (prefer_sibling
&& !local_group
&& sds
->this_has_capacity
)
4057 sgs
.group_capacity
= min(sgs
.group_capacity
, 1UL);
4060 sds
->this_load
= sgs
.avg_load
;
4062 sds
->this_nr_running
= sgs
.sum_nr_running
;
4063 sds
->this_load_per_task
= sgs
.sum_weighted_load
;
4064 sds
->this_has_capacity
= sgs
.group_has_capacity
;
4065 sds
->this_idle_cpus
= sgs
.idle_cpus
;
4066 } else if (update_sd_pick_busiest(env
, sds
, sg
, &sgs
)) {
4067 sds
->max_load
= sgs
.avg_load
;
4069 sds
->busiest_nr_running
= sgs
.sum_nr_running
;
4070 sds
->busiest_idle_cpus
= sgs
.idle_cpus
;
4071 sds
->busiest_group_capacity
= sgs
.group_capacity
;
4072 sds
->busiest_load_per_task
= sgs
.sum_weighted_load
;
4073 sds
->busiest_has_capacity
= sgs
.group_has_capacity
;
4074 sds
->busiest_group_weight
= sgs
.group_weight
;
4075 sds
->group_imb
= sgs
.group_imb
;
4079 } while (sg
!= env
->sd
->groups
);
4083 * check_asym_packing - Check to see if the group is packed into the
4086 * This is primarily intended to used at the sibling level. Some
4087 * cores like POWER7 prefer to use lower numbered SMT threads. In the
4088 * case of POWER7, it can move to lower SMT modes only when higher
4089 * threads are idle. When in lower SMT modes, the threads will
4090 * perform better since they share less core resources. Hence when we
4091 * have idle threads, we want them to be the higher ones.
4093 * This packing function is run on idle threads. It checks to see if
4094 * the busiest CPU in this domain (core in the P7 case) has a higher
4095 * CPU number than the packing function is being run on. Here we are
4096 * assuming lower CPU number will be equivalent to lower a SMT thread
4099 * Returns 1 when packing is required and a task should be moved to
4100 * this CPU. The amount of the imbalance is returned in *imbalance.
4102 * @env: The load balancing environment.
4103 * @sds: Statistics of the sched_domain which is to be packed
4105 static int check_asym_packing(struct lb_env
*env
, struct sd_lb_stats
*sds
)
4109 if (!(env
->sd
->flags
& SD_ASYM_PACKING
))
4115 busiest_cpu
= group_first_cpu(sds
->busiest
);
4116 if (env
->dst_cpu
> busiest_cpu
)
4119 env
->imbalance
= DIV_ROUND_CLOSEST(
4120 sds
->max_load
* sds
->busiest
->sgp
->power
, SCHED_POWER_SCALE
);
4126 * fix_small_imbalance - Calculate the minor imbalance that exists
4127 * amongst the groups of a sched_domain, during
4129 * @env: The load balancing environment.
4130 * @sds: Statistics of the sched_domain whose imbalance is to be calculated.
4133 void fix_small_imbalance(struct lb_env
*env
, struct sd_lb_stats
*sds
)
4135 unsigned long tmp
, pwr_now
= 0, pwr_move
= 0;
4136 unsigned int imbn
= 2;
4137 unsigned long scaled_busy_load_per_task
;
4139 if (sds
->this_nr_running
) {
4140 sds
->this_load_per_task
/= sds
->this_nr_running
;
4141 if (sds
->busiest_load_per_task
>
4142 sds
->this_load_per_task
)
4145 sds
->this_load_per_task
=
4146 cpu_avg_load_per_task(env
->dst_cpu
);
4149 scaled_busy_load_per_task
= sds
->busiest_load_per_task
4150 * SCHED_POWER_SCALE
;
4151 scaled_busy_load_per_task
/= sds
->busiest
->sgp
->power
;
4153 if (sds
->max_load
- sds
->this_load
+ scaled_busy_load_per_task
>=
4154 (scaled_busy_load_per_task
* imbn
)) {
4155 env
->imbalance
= sds
->busiest_load_per_task
;
4160 * OK, we don't have enough imbalance to justify moving tasks,
4161 * however we may be able to increase total CPU power used by
4165 pwr_now
+= sds
->busiest
->sgp
->power
*
4166 min(sds
->busiest_load_per_task
, sds
->max_load
);
4167 pwr_now
+= sds
->this->sgp
->power
*
4168 min(sds
->this_load_per_task
, sds
->this_load
);
4169 pwr_now
/= SCHED_POWER_SCALE
;
4171 /* Amount of load we'd subtract */
4172 tmp
= (sds
->busiest_load_per_task
* SCHED_POWER_SCALE
) /
4173 sds
->busiest
->sgp
->power
;
4174 if (sds
->max_load
> tmp
)
4175 pwr_move
+= sds
->busiest
->sgp
->power
*
4176 min(sds
->busiest_load_per_task
, sds
->max_load
- tmp
);
4178 /* Amount of load we'd add */
4179 if (sds
->max_load
* sds
->busiest
->sgp
->power
<
4180 sds
->busiest_load_per_task
* SCHED_POWER_SCALE
)
4181 tmp
= (sds
->max_load
* sds
->busiest
->sgp
->power
) /
4182 sds
->this->sgp
->power
;
4184 tmp
= (sds
->busiest_load_per_task
* SCHED_POWER_SCALE
) /
4185 sds
->this->sgp
->power
;
4186 pwr_move
+= sds
->this->sgp
->power
*
4187 min(sds
->this_load_per_task
, sds
->this_load
+ tmp
);
4188 pwr_move
/= SCHED_POWER_SCALE
;
4190 /* Move if we gain throughput */
4191 if (pwr_move
> pwr_now
)
4192 env
->imbalance
= sds
->busiest_load_per_task
;
4196 * calculate_imbalance - Calculate the amount of imbalance present within the
4197 * groups of a given sched_domain during load balance.
4198 * @env: load balance environment
4199 * @sds: statistics of the sched_domain whose imbalance is to be calculated.
4201 static inline void calculate_imbalance(struct lb_env
*env
, struct sd_lb_stats
*sds
)
4203 unsigned long max_pull
, load_above_capacity
= ~0UL;
4205 sds
->busiest_load_per_task
/= sds
->busiest_nr_running
;
4206 if (sds
->group_imb
) {
4207 sds
->busiest_load_per_task
=
4208 min(sds
->busiest_load_per_task
, sds
->avg_load
);
4212 * In the presence of smp nice balancing, certain scenarios can have
4213 * max load less than avg load(as we skip the groups at or below
4214 * its cpu_power, while calculating max_load..)
4216 if (sds
->max_load
< sds
->avg_load
) {
4218 return fix_small_imbalance(env
, sds
);
4221 if (!sds
->group_imb
) {
4223 * Don't want to pull so many tasks that a group would go idle.
4225 load_above_capacity
= (sds
->busiest_nr_running
-
4226 sds
->busiest_group_capacity
);
4228 load_above_capacity
*= (SCHED_LOAD_SCALE
* SCHED_POWER_SCALE
);
4230 load_above_capacity
/= sds
->busiest
->sgp
->power
;
4234 * We're trying to get all the cpus to the average_load, so we don't
4235 * want to push ourselves above the average load, nor do we wish to
4236 * reduce the max loaded cpu below the average load. At the same time,
4237 * we also don't want to reduce the group load below the group capacity
4238 * (so that we can implement power-savings policies etc). Thus we look
4239 * for the minimum possible imbalance.
4240 * Be careful of negative numbers as they'll appear as very large values
4241 * with unsigned longs.
4243 max_pull
= min(sds
->max_load
- sds
->avg_load
, load_above_capacity
);
4245 /* How much load to actually move to equalise the imbalance */
4246 env
->imbalance
= min(max_pull
* sds
->busiest
->sgp
->power
,
4247 (sds
->avg_load
- sds
->this_load
) * sds
->this->sgp
->power
)
4248 / SCHED_POWER_SCALE
;
4251 * if *imbalance is less than the average load per runnable task
4252 * there is no guarantee that any tasks will be moved so we'll have
4253 * a think about bumping its value to force at least one task to be
4256 if (env
->imbalance
< sds
->busiest_load_per_task
)
4257 return fix_small_imbalance(env
, sds
);
4261 /******* find_busiest_group() helpers end here *********************/
4264 * find_busiest_group - Returns the busiest group within the sched_domain
4265 * if there is an imbalance. If there isn't an imbalance, and
4266 * the user has opted for power-savings, it returns a group whose
4267 * CPUs can be put to idle by rebalancing those tasks elsewhere, if
4268 * such a group exists.
4270 * Also calculates the amount of weighted load which should be moved
4271 * to restore balance.
4273 * @env: The load balancing environment.
4274 * @balance: Pointer to a variable indicating if this_cpu
4275 * is the appropriate cpu to perform load balancing at this_level.
4277 * Returns: - the busiest group if imbalance exists.
4278 * - If no imbalance and user has opted for power-savings balance,
4279 * return the least loaded group whose CPUs can be
4280 * put to idle by rebalancing its tasks onto our group.
4282 static struct sched_group
*
4283 find_busiest_group(struct lb_env
*env
, int *balance
)
4285 struct sd_lb_stats sds
;
4287 memset(&sds
, 0, sizeof(sds
));
4290 * Compute the various statistics relavent for load balancing at
4293 update_sd_lb_stats(env
, balance
, &sds
);
4296 * this_cpu is not the appropriate cpu to perform load balancing at
4302 if ((env
->idle
== CPU_IDLE
|| env
->idle
== CPU_NEWLY_IDLE
) &&
4303 check_asym_packing(env
, &sds
))
4306 /* There is no busy sibling group to pull tasks from */
4307 if (!sds
.busiest
|| sds
.busiest_nr_running
== 0)
4310 sds
.avg_load
= (SCHED_POWER_SCALE
* sds
.total_load
) / sds
.total_pwr
;
4313 * If the busiest group is imbalanced the below checks don't
4314 * work because they assumes all things are equal, which typically
4315 * isn't true due to cpus_allowed constraints and the like.
4320 /* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */
4321 if (env
->idle
== CPU_NEWLY_IDLE
&& sds
.this_has_capacity
&&
4322 !sds
.busiest_has_capacity
)
4326 * If the local group is more busy than the selected busiest group
4327 * don't try and pull any tasks.
4329 if (sds
.this_load
>= sds
.max_load
)
4333 * Don't pull any tasks if this group is already above the domain
4336 if (sds
.this_load
>= sds
.avg_load
)
4339 if (env
->idle
== CPU_IDLE
) {
4341 * This cpu is idle. If the busiest group load doesn't
4342 * have more tasks than the number of available cpu's and
4343 * there is no imbalance between this and busiest group
4344 * wrt to idle cpu's, it is balanced.
4346 if ((sds
.this_idle_cpus
<= sds
.busiest_idle_cpus
+ 1) &&
4347 sds
.busiest_nr_running
<= sds
.busiest_group_weight
)
4351 * In the CPU_NEWLY_IDLE, CPU_NOT_IDLE cases, use
4352 * imbalance_pct to be conservative.
4354 if (100 * sds
.max_load
<= env
->sd
->imbalance_pct
* sds
.this_load
)
4359 /* Looks like there is an imbalance. Compute it */
4360 calculate_imbalance(env
, &sds
);
4370 * find_busiest_queue - find the busiest runqueue among the cpus in group.
4372 static struct rq
*find_busiest_queue(struct lb_env
*env
,
4373 struct sched_group
*group
)
4375 struct rq
*busiest
= NULL
, *rq
;
4376 unsigned long max_load
= 0;
4379 for_each_cpu(i
, sched_group_cpus(group
)) {
4380 unsigned long power
= power_of(i
);
4381 unsigned long capacity
= DIV_ROUND_CLOSEST(power
,
4386 capacity
= fix_small_capacity(env
->sd
, group
);
4388 if (!cpumask_test_cpu(i
, env
->cpus
))
4392 wl
= weighted_cpuload(i
);
4395 * When comparing with imbalance, use weighted_cpuload()
4396 * which is not scaled with the cpu power.
4398 if (capacity
&& rq
->nr_running
== 1 && wl
> env
->imbalance
)
4402 * For the load comparisons with the other cpu's, consider
4403 * the weighted_cpuload() scaled with the cpu power, so that
4404 * the load can be moved away from the cpu that is potentially
4405 * running at a lower capacity.
4407 wl
= (wl
* SCHED_POWER_SCALE
) / power
;
4409 if (wl
> max_load
) {
4419 * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
4420 * so long as it is large enough.
4422 #define MAX_PINNED_INTERVAL 512
4424 /* Working cpumask for load_balance and load_balance_newidle. */
4425 DEFINE_PER_CPU(cpumask_var_t
, load_balance_tmpmask
);
4427 static int need_active_balance(struct lb_env
*env
)
4429 struct sched_domain
*sd
= env
->sd
;
4431 if (env
->idle
== CPU_NEWLY_IDLE
) {
4434 * ASYM_PACKING needs to force migrate tasks from busy but
4435 * higher numbered CPUs in order to pack all tasks in the
4436 * lowest numbered CPUs.
4438 if ((sd
->flags
& SD_ASYM_PACKING
) && env
->src_cpu
> env
->dst_cpu
)
4442 return unlikely(sd
->nr_balance_failed
> sd
->cache_nice_tries
+2);
4445 static int active_load_balance_cpu_stop(void *data
);
4448 * Check this_cpu to ensure it is balanced within domain. Attempt to move
4449 * tasks if there is an imbalance.
4451 static int load_balance(int this_cpu
, struct rq
*this_rq
,
4452 struct sched_domain
*sd
, enum cpu_idle_type idle
,
4455 int ld_moved
, cur_ld_moved
, active_balance
= 0;
4456 int lb_iterations
, max_lb_iterations
;
4457 struct sched_group
*group
;
4459 unsigned long flags
;
4460 struct cpumask
*cpus
= __get_cpu_var(load_balance_tmpmask
);
4462 struct lb_env env
= {
4464 .dst_cpu
= this_cpu
,
4466 .dst_grpmask
= sched_group_cpus(sd
->groups
),
4468 .loop_break
= sched_nr_migrate_break
,
4472 cpumask_copy(cpus
, cpu_active_mask
);
4473 max_lb_iterations
= cpumask_weight(env
.dst_grpmask
);
4475 schedstat_inc(sd
, lb_count
[idle
]);
4478 group
= find_busiest_group(&env
, balance
);
4484 schedstat_inc(sd
, lb_nobusyg
[idle
]);
4488 busiest
= find_busiest_queue(&env
, group
);
4490 schedstat_inc(sd
, lb_nobusyq
[idle
]);
4494 BUG_ON(busiest
== env
.dst_rq
);
4496 schedstat_add(sd
, lb_imbalance
[idle
], env
.imbalance
);
4500 if (busiest
->nr_running
> 1) {
4502 * Attempt to move tasks. If find_busiest_group has found
4503 * an imbalance but busiest->nr_running <= 1, the group is
4504 * still unbalanced. ld_moved simply stays zero, so it is
4505 * correctly treated as an imbalance.
4507 env
.flags
|= LBF_ALL_PINNED
;
4508 env
.src_cpu
= busiest
->cpu
;
4509 env
.src_rq
= busiest
;
4510 env
.loop_max
= min(sysctl_sched_nr_migrate
, busiest
->nr_running
);
4512 update_h_load(env
.src_cpu
);
4514 local_irq_save(flags
);
4515 double_rq_lock(env
.dst_rq
, busiest
);
4518 * cur_ld_moved - load moved in current iteration
4519 * ld_moved - cumulative load moved across iterations
4521 cur_ld_moved
= move_tasks(&env
);
4522 ld_moved
+= cur_ld_moved
;
4523 double_rq_unlock(env
.dst_rq
, busiest
);
4524 local_irq_restore(flags
);
4526 if (env
.flags
& LBF_NEED_BREAK
) {
4527 env
.flags
&= ~LBF_NEED_BREAK
;
4532 * some other cpu did the load balance for us.
4534 if (cur_ld_moved
&& env
.dst_cpu
!= smp_processor_id())
4535 resched_cpu(env
.dst_cpu
);
4538 * Revisit (affine) tasks on src_cpu that couldn't be moved to
4539 * us and move them to an alternate dst_cpu in our sched_group
4540 * where they can run. The upper limit on how many times we
4541 * iterate on same src_cpu is dependent on number of cpus in our
4544 * This changes load balance semantics a bit on who can move
4545 * load to a given_cpu. In addition to the given_cpu itself
4546 * (or a ilb_cpu acting on its behalf where given_cpu is
4547 * nohz-idle), we now have balance_cpu in a position to move
4548 * load to given_cpu. In rare situations, this may cause
4549 * conflicts (balance_cpu and given_cpu/ilb_cpu deciding
4550 * _independently_ and at _same_ time to move some load to
4551 * given_cpu) causing exceess load to be moved to given_cpu.
4552 * This however should not happen so much in practice and
4553 * moreover subsequent load balance cycles should correct the
4554 * excess load moved.
4556 if ((env
.flags
& LBF_SOME_PINNED
) && env
.imbalance
> 0 &&
4557 lb_iterations
++ < max_lb_iterations
) {
4559 env
.dst_rq
= cpu_rq(env
.new_dst_cpu
);
4560 env
.dst_cpu
= env
.new_dst_cpu
;
4561 env
.flags
&= ~LBF_SOME_PINNED
;
4563 env
.loop_break
= sched_nr_migrate_break
;
4565 * Go back to "more_balance" rather than "redo" since we
4566 * need to continue with same src_cpu.
4571 /* All tasks on this runqueue were pinned by CPU affinity */
4572 if (unlikely(env
.flags
& LBF_ALL_PINNED
)) {
4573 cpumask_clear_cpu(cpu_of(busiest
), cpus
);
4574 if (!cpumask_empty(cpus
)) {
4576 env
.loop_break
= sched_nr_migrate_break
;
4584 schedstat_inc(sd
, lb_failed
[idle
]);
4586 * Increment the failure counter only on periodic balance.
4587 * We do not want newidle balance, which can be very
4588 * frequent, pollute the failure counter causing
4589 * excessive cache_hot migrations and active balances.
4591 if (idle
!= CPU_NEWLY_IDLE
)
4592 sd
->nr_balance_failed
++;
4594 if (need_active_balance(&env
)) {
4595 raw_spin_lock_irqsave(&busiest
->lock
, flags
);
4597 /* don't kick the active_load_balance_cpu_stop,
4598 * if the curr task on busiest cpu can't be
4601 if (!cpumask_test_cpu(this_cpu
,
4602 tsk_cpus_allowed(busiest
->curr
))) {
4603 raw_spin_unlock_irqrestore(&busiest
->lock
,
4605 env
.flags
|= LBF_ALL_PINNED
;
4606 goto out_one_pinned
;
4610 * ->active_balance synchronizes accesses to
4611 * ->active_balance_work. Once set, it's cleared
4612 * only after active load balance is finished.
4614 if (!busiest
->active_balance
) {
4615 busiest
->active_balance
= 1;
4616 busiest
->push_cpu
= this_cpu
;
4619 raw_spin_unlock_irqrestore(&busiest
->lock
, flags
);
4621 if (active_balance
) {
4622 stop_one_cpu_nowait(cpu_of(busiest
),
4623 active_load_balance_cpu_stop
, busiest
,
4624 &busiest
->active_balance_work
);
4628 * We've kicked active balancing, reset the failure
4631 sd
->nr_balance_failed
= sd
->cache_nice_tries
+1;
4634 sd
->nr_balance_failed
= 0;
4636 if (likely(!active_balance
)) {
4637 /* We were unbalanced, so reset the balancing interval */
4638 sd
->balance_interval
= sd
->min_interval
;
4641 * If we've begun active balancing, start to back off. This
4642 * case may not be covered by the all_pinned logic if there
4643 * is only 1 task on the busy runqueue (because we don't call
4646 if (sd
->balance_interval
< sd
->max_interval
)
4647 sd
->balance_interval
*= 2;
4653 schedstat_inc(sd
, lb_balanced
[idle
]);
4655 sd
->nr_balance_failed
= 0;
4658 /* tune up the balancing interval */
4659 if (((env
.flags
& LBF_ALL_PINNED
) &&
4660 sd
->balance_interval
< MAX_PINNED_INTERVAL
) ||
4661 (sd
->balance_interval
< sd
->max_interval
))
4662 sd
->balance_interval
*= 2;
4670 * idle_balance is called by schedule() if this_cpu is about to become
4671 * idle. Attempts to pull tasks from other CPUs.
4673 void idle_balance(int this_cpu
, struct rq
*this_rq
)
4675 struct sched_domain
*sd
;
4676 int pulled_task
= 0;
4677 unsigned long next_balance
= jiffies
+ HZ
;
4679 this_rq
->idle_stamp
= this_rq
->clock
;
4681 if (this_rq
->avg_idle
< sysctl_sched_migration_cost
)
4685 * Drop the rq->lock, but keep IRQ/preempt disabled.
4687 raw_spin_unlock(&this_rq
->lock
);
4689 update_shares(this_cpu
);
4691 for_each_domain(this_cpu
, sd
) {
4692 unsigned long interval
;
4695 if (!(sd
->flags
& SD_LOAD_BALANCE
))
4698 if (sd
->flags
& SD_BALANCE_NEWIDLE
) {
4699 /* If we've pulled tasks over stop searching: */
4700 pulled_task
= load_balance(this_cpu
, this_rq
,
4701 sd
, CPU_NEWLY_IDLE
, &balance
);
4704 interval
= msecs_to_jiffies(sd
->balance_interval
);
4705 if (time_after(next_balance
, sd
->last_balance
+ interval
))
4706 next_balance
= sd
->last_balance
+ interval
;
4708 this_rq
->idle_stamp
= 0;
4714 raw_spin_lock(&this_rq
->lock
);
4716 if (pulled_task
|| time_after(jiffies
, this_rq
->next_balance
)) {
4718 * We are going idle. next_balance may be set based on
4719 * a busy processor. So reset next_balance.
4721 this_rq
->next_balance
= next_balance
;
4726 * active_load_balance_cpu_stop is run by cpu stopper. It pushes
4727 * running tasks off the busiest CPU onto idle CPUs. It requires at
4728 * least 1 task to be running on each physical CPU where possible, and
4729 * avoids physical / logical imbalances.
4731 static int active_load_balance_cpu_stop(void *data
)
4733 struct rq
*busiest_rq
= data
;
4734 int busiest_cpu
= cpu_of(busiest_rq
);
4735 int target_cpu
= busiest_rq
->push_cpu
;
4736 struct rq
*target_rq
= cpu_rq(target_cpu
);
4737 struct sched_domain
*sd
;
4739 raw_spin_lock_irq(&busiest_rq
->lock
);
4741 /* make sure the requested cpu hasn't gone down in the meantime */
4742 if (unlikely(busiest_cpu
!= smp_processor_id() ||
4743 !busiest_rq
->active_balance
))
4746 /* Is there any task to move? */
4747 if (busiest_rq
->nr_running
<= 1)
4751 * This condition is "impossible", if it occurs
4752 * we need to fix it. Originally reported by
4753 * Bjorn Helgaas on a 128-cpu setup.
4755 BUG_ON(busiest_rq
== target_rq
);
4757 /* move a task from busiest_rq to target_rq */
4758 double_lock_balance(busiest_rq
, target_rq
);
4760 /* Search for an sd spanning us and the target CPU. */
4762 for_each_domain(target_cpu
, sd
) {
4763 if ((sd
->flags
& SD_LOAD_BALANCE
) &&
4764 cpumask_test_cpu(busiest_cpu
, sched_domain_span(sd
)))
4769 struct lb_env env
= {
4771 .dst_cpu
= target_cpu
,
4772 .dst_rq
= target_rq
,
4773 .src_cpu
= busiest_rq
->cpu
,
4774 .src_rq
= busiest_rq
,
4778 schedstat_inc(sd
, alb_count
);
4780 if (move_one_task(&env
))
4781 schedstat_inc(sd
, alb_pushed
);
4783 schedstat_inc(sd
, alb_failed
);
4786 double_unlock_balance(busiest_rq
, target_rq
);
4788 busiest_rq
->active_balance
= 0;
4789 raw_spin_unlock_irq(&busiest_rq
->lock
);
4795 * idle load balancing details
4796 * - When one of the busy CPUs notice that there may be an idle rebalancing
4797 * needed, they will kick the idle load balancer, which then does idle
4798 * load balancing for all the idle CPUs.
4801 cpumask_var_t idle_cpus_mask
;
4803 unsigned long next_balance
; /* in jiffy units */
4804 } nohz ____cacheline_aligned
;
4806 static inline int find_new_ilb(int call_cpu
)
4808 int ilb
= cpumask_first(nohz
.idle_cpus_mask
);
4810 if (ilb
< nr_cpu_ids
&& idle_cpu(ilb
))
4817 * Kick a CPU to do the nohz balancing, if it is time for it. We pick the
4818 * nohz_load_balancer CPU (if there is one) otherwise fallback to any idle
4819 * CPU (if there is one).
4821 static void nohz_balancer_kick(int cpu
)
4825 nohz
.next_balance
++;
4827 ilb_cpu
= find_new_ilb(cpu
);
4829 if (ilb_cpu
>= nr_cpu_ids
)
4832 if (test_and_set_bit(NOHZ_BALANCE_KICK
, nohz_flags(ilb_cpu
)))
4835 * Use smp_send_reschedule() instead of resched_cpu().
4836 * This way we generate a sched IPI on the target cpu which
4837 * is idle. And the softirq performing nohz idle load balance
4838 * will be run before returning from the IPI.
4840 smp_send_reschedule(ilb_cpu
);
4844 static inline void nohz_balance_exit_idle(int cpu
)
4846 if (unlikely(test_bit(NOHZ_TICK_STOPPED
, nohz_flags(cpu
)))) {
4847 cpumask_clear_cpu(cpu
, nohz
.idle_cpus_mask
);
4848 atomic_dec(&nohz
.nr_cpus
);
4849 clear_bit(NOHZ_TICK_STOPPED
, nohz_flags(cpu
));
4853 static inline void set_cpu_sd_state_busy(void)
4855 struct sched_domain
*sd
;
4856 int cpu
= smp_processor_id();
4858 if (!test_bit(NOHZ_IDLE
, nohz_flags(cpu
)))
4860 clear_bit(NOHZ_IDLE
, nohz_flags(cpu
));
4863 for_each_domain(cpu
, sd
)
4864 atomic_inc(&sd
->groups
->sgp
->nr_busy_cpus
);
4868 void set_cpu_sd_state_idle(void)
4870 struct sched_domain
*sd
;
4871 int cpu
= smp_processor_id();
4873 if (test_bit(NOHZ_IDLE
, nohz_flags(cpu
)))
4875 set_bit(NOHZ_IDLE
, nohz_flags(cpu
));
4878 for_each_domain(cpu
, sd
)
4879 atomic_dec(&sd
->groups
->sgp
->nr_busy_cpus
);
4884 * This routine will record that the cpu is going idle with tick stopped.
4885 * This info will be used in performing idle load balancing in the future.
4887 void nohz_balance_enter_idle(int cpu
)
4890 * If this cpu is going down, then nothing needs to be done.
4892 if (!cpu_active(cpu
))
4895 if (test_bit(NOHZ_TICK_STOPPED
, nohz_flags(cpu
)))
4898 cpumask_set_cpu(cpu
, nohz
.idle_cpus_mask
);
4899 atomic_inc(&nohz
.nr_cpus
);
4900 set_bit(NOHZ_TICK_STOPPED
, nohz_flags(cpu
));
4903 static int __cpuinit
sched_ilb_notifier(struct notifier_block
*nfb
,
4904 unsigned long action
, void *hcpu
)
4906 switch (action
& ~CPU_TASKS_FROZEN
) {
4908 nohz_balance_exit_idle(smp_processor_id());
4916 static DEFINE_SPINLOCK(balancing
);
4919 * Scale the max load_balance interval with the number of CPUs in the system.
4920 * This trades load-balance latency on larger machines for less cross talk.
4922 void update_max_interval(void)
4924 max_load_balance_interval
= HZ
*num_online_cpus()/10;
4928 * It checks each scheduling domain to see if it is due to be balanced,
4929 * and initiates a balancing operation if so.
4931 * Balancing parameters are set up in arch_init_sched_domains.
4933 static void rebalance_domains(int cpu
, enum cpu_idle_type idle
)
4936 struct rq
*rq
= cpu_rq(cpu
);
4937 unsigned long interval
;
4938 struct sched_domain
*sd
;
4939 /* Earliest time when we have to do rebalance again */
4940 unsigned long next_balance
= jiffies
+ 60*HZ
;
4941 int update_next_balance
= 0;
4947 for_each_domain(cpu
, sd
) {
4948 if (!(sd
->flags
& SD_LOAD_BALANCE
))
4951 interval
= sd
->balance_interval
;
4952 if (idle
!= CPU_IDLE
)
4953 interval
*= sd
->busy_factor
;
4955 /* scale ms to jiffies */
4956 interval
= msecs_to_jiffies(interval
);
4957 interval
= clamp(interval
, 1UL, max_load_balance_interval
);
4959 need_serialize
= sd
->flags
& SD_SERIALIZE
;
4961 if (need_serialize
) {
4962 if (!spin_trylock(&balancing
))
4966 if (time_after_eq(jiffies
, sd
->last_balance
+ interval
)) {
4967 if (load_balance(cpu
, rq
, sd
, idle
, &balance
)) {
4969 * We've pulled tasks over so either we're no
4972 idle
= CPU_NOT_IDLE
;
4974 sd
->last_balance
= jiffies
;
4977 spin_unlock(&balancing
);
4979 if (time_after(next_balance
, sd
->last_balance
+ interval
)) {
4980 next_balance
= sd
->last_balance
+ interval
;
4981 update_next_balance
= 1;
4985 * Stop the load balance at this level. There is another
4986 * CPU in our sched group which is doing load balancing more
4995 * next_balance will be updated only when there is a need.
4996 * When the cpu is attached to null domain for ex, it will not be
4999 if (likely(update_next_balance
))
5000 rq
->next_balance
= next_balance
;
5005 * In CONFIG_NO_HZ case, the idle balance kickee will do the
5006 * rebalancing for all the cpus for whom scheduler ticks are stopped.
5008 static void nohz_idle_balance(int this_cpu
, enum cpu_idle_type idle
)
5010 struct rq
*this_rq
= cpu_rq(this_cpu
);
5014 if (idle
!= CPU_IDLE
||
5015 !test_bit(NOHZ_BALANCE_KICK
, nohz_flags(this_cpu
)))
5018 for_each_cpu(balance_cpu
, nohz
.idle_cpus_mask
) {
5019 if (balance_cpu
== this_cpu
|| !idle_cpu(balance_cpu
))
5023 * If this cpu gets work to do, stop the load balancing
5024 * work being done for other cpus. Next load
5025 * balancing owner will pick it up.
5030 rq
= cpu_rq(balance_cpu
);
5032 raw_spin_lock_irq(&rq
->lock
);
5033 update_rq_clock(rq
);
5034 update_idle_cpu_load(rq
);
5035 raw_spin_unlock_irq(&rq
->lock
);
5037 rebalance_domains(balance_cpu
, CPU_IDLE
);
5039 if (time_after(this_rq
->next_balance
, rq
->next_balance
))
5040 this_rq
->next_balance
= rq
->next_balance
;
5042 nohz
.next_balance
= this_rq
->next_balance
;
5044 clear_bit(NOHZ_BALANCE_KICK
, nohz_flags(this_cpu
));
5048 * Current heuristic for kicking the idle load balancer in the presence
5049 * of an idle cpu is the system.
5050 * - This rq has more than one task.
5051 * - At any scheduler domain level, this cpu's scheduler group has multiple
5052 * busy cpu's exceeding the group's power.
5053 * - For SD_ASYM_PACKING, if the lower numbered cpu's in the scheduler
5054 * domain span are idle.
5056 static inline int nohz_kick_needed(struct rq
*rq
, int cpu
)
5058 unsigned long now
= jiffies
;
5059 struct sched_domain
*sd
;
5061 if (unlikely(idle_cpu(cpu
)))
5065 * We may be recently in ticked or tickless idle mode. At the first
5066 * busy tick after returning from idle, we will update the busy stats.
5068 set_cpu_sd_state_busy();
5069 nohz_balance_exit_idle(cpu
);
5072 * None are in tickless mode and hence no need for NOHZ idle load
5075 if (likely(!atomic_read(&nohz
.nr_cpus
)))
5078 if (time_before(now
, nohz
.next_balance
))
5081 if (rq
->nr_running
>= 2)
5085 for_each_domain(cpu
, sd
) {
5086 struct sched_group
*sg
= sd
->groups
;
5087 struct sched_group_power
*sgp
= sg
->sgp
;
5088 int nr_busy
= atomic_read(&sgp
->nr_busy_cpus
);
5090 if (sd
->flags
& SD_SHARE_PKG_RESOURCES
&& nr_busy
> 1)
5091 goto need_kick_unlock
;
5093 if (sd
->flags
& SD_ASYM_PACKING
&& nr_busy
!= sg
->group_weight
5094 && (cpumask_first_and(nohz
.idle_cpus_mask
,
5095 sched_domain_span(sd
)) < cpu
))
5096 goto need_kick_unlock
;
5098 if (!(sd
->flags
& (SD_SHARE_PKG_RESOURCES
| SD_ASYM_PACKING
)))
5110 static void nohz_idle_balance(int this_cpu
, enum cpu_idle_type idle
) { }
5114 * run_rebalance_domains is triggered when needed from the scheduler tick.
5115 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
5117 static void run_rebalance_domains(struct softirq_action
*h
)
5119 int this_cpu
= smp_processor_id();
5120 struct rq
*this_rq
= cpu_rq(this_cpu
);
5121 enum cpu_idle_type idle
= this_rq
->idle_balance
?
5122 CPU_IDLE
: CPU_NOT_IDLE
;
5124 rebalance_domains(this_cpu
, idle
);
5127 * If this cpu has a pending nohz_balance_kick, then do the
5128 * balancing on behalf of the other idle cpus whose ticks are
5131 nohz_idle_balance(this_cpu
, idle
);
5134 static inline int on_null_domain(int cpu
)
5136 return !rcu_dereference_sched(cpu_rq(cpu
)->sd
);
5140 * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
5142 void trigger_load_balance(struct rq
*rq
, int cpu
)
5144 /* Don't need to rebalance while attached to NULL domain */
5145 if (time_after_eq(jiffies
, rq
->next_balance
) &&
5146 likely(!on_null_domain(cpu
)))
5147 raise_softirq(SCHED_SOFTIRQ
);
5149 if (nohz_kick_needed(rq
, cpu
) && likely(!on_null_domain(cpu
)))
5150 nohz_balancer_kick(cpu
);
5154 static void rq_online_fair(struct rq
*rq
)
5159 static void rq_offline_fair(struct rq
*rq
)
5163 /* Ensure any throttled groups are reachable by pick_next_task */
5164 unthrottle_offline_cfs_rqs(rq
);
5167 #endif /* CONFIG_SMP */
5170 * scheduler tick hitting a task of our scheduling class:
5172 static void task_tick_fair(struct rq
*rq
, struct task_struct
*curr
, int queued
)
5174 struct cfs_rq
*cfs_rq
;
5175 struct sched_entity
*se
= &curr
->se
;
5177 for_each_sched_entity(se
) {
5178 cfs_rq
= cfs_rq_of(se
);
5179 entity_tick(cfs_rq
, se
, queued
);
5182 if (sched_feat_numa(NUMA
))
5183 task_tick_numa(rq
, curr
);
5187 * called on fork with the child task as argument from the parent's context
5188 * - child not yet on the tasklist
5189 * - preemption disabled
5191 static void task_fork_fair(struct task_struct
*p
)
5193 struct cfs_rq
*cfs_rq
;
5194 struct sched_entity
*se
= &p
->se
, *curr
;
5195 int this_cpu
= smp_processor_id();
5196 struct rq
*rq
= this_rq();
5197 unsigned long flags
;
5199 raw_spin_lock_irqsave(&rq
->lock
, flags
);
5201 update_rq_clock(rq
);
5203 cfs_rq
= task_cfs_rq(current
);
5204 curr
= cfs_rq
->curr
;
5206 if (unlikely(task_cpu(p
) != this_cpu
)) {
5208 __set_task_cpu(p
, this_cpu
);
5212 update_curr(cfs_rq
);
5215 se
->vruntime
= curr
->vruntime
;
5216 place_entity(cfs_rq
, se
, 1);
5218 if (sysctl_sched_child_runs_first
&& curr
&& entity_before(curr
, se
)) {
5220 * Upon rescheduling, sched_class::put_prev_task() will place
5221 * 'current' within the tree based on its new key value.
5223 swap(curr
->vruntime
, se
->vruntime
);
5224 resched_task(rq
->curr
);
5227 se
->vruntime
-= cfs_rq
->min_vruntime
;
5229 raw_spin_unlock_irqrestore(&rq
->lock
, flags
);
5233 * Priority of the task has changed. Check to see if we preempt
5237 prio_changed_fair(struct rq
*rq
, struct task_struct
*p
, int oldprio
)
5243 * Reschedule if we are currently running on this runqueue and
5244 * our priority decreased, or if we are not currently running on
5245 * this runqueue and our priority is higher than the current's
5247 if (rq
->curr
== p
) {
5248 if (p
->prio
> oldprio
)
5249 resched_task(rq
->curr
);
5251 check_preempt_curr(rq
, p
, 0);
5254 static void switched_from_fair(struct rq
*rq
, struct task_struct
*p
)
5256 struct sched_entity
*se
= &p
->se
;
5257 struct cfs_rq
*cfs_rq
= cfs_rq_of(se
);
5260 * Ensure the task's vruntime is normalized, so that when its
5261 * switched back to the fair class the enqueue_entity(.flags=0) will
5262 * do the right thing.
5264 * If it was on_rq, then the dequeue_entity(.flags=0) will already
5265 * have normalized the vruntime, if it was !on_rq, then only when
5266 * the task is sleeping will it still have non-normalized vruntime.
5268 if (!se
->on_rq
&& p
->state
!= TASK_RUNNING
) {
5270 * Fix up our vruntime so that the current sleep doesn't
5271 * cause 'unlimited' sleep bonus.
5273 place_entity(cfs_rq
, se
, 0);
5274 se
->vruntime
-= cfs_rq
->min_vruntime
;
5279 * We switched to the sched_fair class.
5281 static void switched_to_fair(struct rq
*rq
, struct task_struct
*p
)
5287 * We were most likely switched from sched_rt, so
5288 * kick off the schedule if running, otherwise just see
5289 * if we can still preempt the current task.
5292 resched_task(rq
->curr
);
5294 check_preempt_curr(rq
, p
, 0);
5297 /* Account for a task changing its policy or group.
5299 * This routine is mostly called to set cfs_rq->curr field when a task
5300 * migrates between groups/classes.
5302 static void set_curr_task_fair(struct rq
*rq
)
5304 struct sched_entity
*se
= &rq
->curr
->se
;
5306 for_each_sched_entity(se
) {
5307 struct cfs_rq
*cfs_rq
= cfs_rq_of(se
);
5309 set_next_entity(cfs_rq
, se
);
5310 /* ensure bandwidth has been allocated on our new cfs_rq */
5311 account_cfs_rq_runtime(cfs_rq
, 0);
5315 void init_cfs_rq(struct cfs_rq
*cfs_rq
)
5317 cfs_rq
->tasks_timeline
= RB_ROOT
;
5318 cfs_rq
->min_vruntime
= (u64
)(-(1LL << 20));
5319 #ifndef CONFIG_64BIT
5320 cfs_rq
->min_vruntime_copy
= cfs_rq
->min_vruntime
;
5324 #ifdef CONFIG_FAIR_GROUP_SCHED
5325 static void task_move_group_fair(struct task_struct
*p
, int on_rq
)
5328 * If the task was not on the rq at the time of this cgroup movement
5329 * it must have been asleep, sleeping tasks keep their ->vruntime
5330 * absolute on their old rq until wakeup (needed for the fair sleeper
5331 * bonus in place_entity()).
5333 * If it was on the rq, we've just 'preempted' it, which does convert
5334 * ->vruntime to a relative base.
5336 * Make sure both cases convert their relative position when migrating
5337 * to another cgroup's rq. This does somewhat interfere with the
5338 * fair sleeper stuff for the first placement, but who cares.
5341 * When !on_rq, vruntime of the task has usually NOT been normalized.
5342 * But there are some cases where it has already been normalized:
5344 * - Moving a forked child which is waiting for being woken up by
5345 * wake_up_new_task().
5346 * - Moving a task which has been woken up by try_to_wake_up() and
5347 * waiting for actually being woken up by sched_ttwu_pending().
5349 * To prevent boost or penalty in the new cfs_rq caused by delta
5350 * min_vruntime between the two cfs_rqs, we skip vruntime adjustment.
5352 if (!on_rq
&& (!p
->se
.sum_exec_runtime
|| p
->state
== TASK_WAKING
))
5356 p
->se
.vruntime
-= cfs_rq_of(&p
->se
)->min_vruntime
;
5357 set_task_rq(p
, task_cpu(p
));
5359 p
->se
.vruntime
+= cfs_rq_of(&p
->se
)->min_vruntime
;
5362 void free_fair_sched_group(struct task_group
*tg
)
5366 destroy_cfs_bandwidth(tg_cfs_bandwidth(tg
));
5368 for_each_possible_cpu(i
) {
5370 kfree(tg
->cfs_rq
[i
]);
5379 int alloc_fair_sched_group(struct task_group
*tg
, struct task_group
*parent
)
5381 struct cfs_rq
*cfs_rq
;
5382 struct sched_entity
*se
;
5385 tg
->cfs_rq
= kzalloc(sizeof(cfs_rq
) * nr_cpu_ids
, GFP_KERNEL
);
5388 tg
->se
= kzalloc(sizeof(se
) * nr_cpu_ids
, GFP_KERNEL
);
5392 tg
->shares
= NICE_0_LOAD
;
5394 init_cfs_bandwidth(tg_cfs_bandwidth(tg
));
5396 for_each_possible_cpu(i
) {
5397 cfs_rq
= kzalloc_node(sizeof(struct cfs_rq
),
5398 GFP_KERNEL
, cpu_to_node(i
));
5402 se
= kzalloc_node(sizeof(struct sched_entity
),
5403 GFP_KERNEL
, cpu_to_node(i
));
5407 init_cfs_rq(cfs_rq
);
5408 init_tg_cfs_entry(tg
, cfs_rq
, se
, i
, parent
->se
[i
]);
5419 void unregister_fair_sched_group(struct task_group
*tg
, int cpu
)
5421 struct rq
*rq
= cpu_rq(cpu
);
5422 unsigned long flags
;
5425 * Only empty task groups can be destroyed; so we can speculatively
5426 * check on_list without danger of it being re-added.
5428 if (!tg
->cfs_rq
[cpu
]->on_list
)
5431 raw_spin_lock_irqsave(&rq
->lock
, flags
);
5432 list_del_leaf_cfs_rq(tg
->cfs_rq
[cpu
]);
5433 raw_spin_unlock_irqrestore(&rq
->lock
, flags
);
5436 void init_tg_cfs_entry(struct task_group
*tg
, struct cfs_rq
*cfs_rq
,
5437 struct sched_entity
*se
, int cpu
,
5438 struct sched_entity
*parent
)
5440 struct rq
*rq
= cpu_rq(cpu
);
5445 /* allow initial update_cfs_load() to truncate */
5446 cfs_rq
->load_stamp
= 1;
5448 init_cfs_rq_runtime(cfs_rq
);
5450 tg
->cfs_rq
[cpu
] = cfs_rq
;
5453 /* se could be NULL for root_task_group */
5458 se
->cfs_rq
= &rq
->cfs
;
5460 se
->cfs_rq
= parent
->my_q
;
5463 update_load_set(&se
->load
, 0);
5464 se
->parent
= parent
;
5467 static DEFINE_MUTEX(shares_mutex
);
5469 int sched_group_set_shares(struct task_group
*tg
, unsigned long shares
)
5472 unsigned long flags
;
5475 * We can't change the weight of the root cgroup.
5480 shares
= clamp(shares
, scale_load(MIN_SHARES
), scale_load(MAX_SHARES
));
5482 mutex_lock(&shares_mutex
);
5483 if (tg
->shares
== shares
)
5486 tg
->shares
= shares
;
5487 for_each_possible_cpu(i
) {
5488 struct rq
*rq
= cpu_rq(i
);
5489 struct sched_entity
*se
;
5492 /* Propagate contribution to hierarchy */
5493 raw_spin_lock_irqsave(&rq
->lock
, flags
);
5494 for_each_sched_entity(se
)
5495 update_cfs_shares(group_cfs_rq(se
));
5496 raw_spin_unlock_irqrestore(&rq
->lock
, flags
);
5500 mutex_unlock(&shares_mutex
);
5503 #else /* CONFIG_FAIR_GROUP_SCHED */
5505 void free_fair_sched_group(struct task_group
*tg
) { }
5507 int alloc_fair_sched_group(struct task_group
*tg
, struct task_group
*parent
)
5512 void unregister_fair_sched_group(struct task_group
*tg
, int cpu
) { }
5514 #endif /* CONFIG_FAIR_GROUP_SCHED */
5517 static unsigned int get_rr_interval_fair(struct rq
*rq
, struct task_struct
*task
)
5519 struct sched_entity
*se
= &task
->se
;
5520 unsigned int rr_interval
= 0;
5523 * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise
5526 if (rq
->cfs
.load
.weight
)
5527 rr_interval
= NS_TO_JIFFIES(sched_slice(&rq
->cfs
, se
));
5533 * All the scheduling class methods:
5535 const struct sched_class fair_sched_class
= {
5536 .next
= &idle_sched_class
,
5537 .enqueue_task
= enqueue_task_fair
,
5538 .dequeue_task
= dequeue_task_fair
,
5539 .yield_task
= yield_task_fair
,
5540 .yield_to_task
= yield_to_task_fair
,
5542 .check_preempt_curr
= check_preempt_wakeup
,
5544 .pick_next_task
= pick_next_task_fair
,
5545 .put_prev_task
= put_prev_task_fair
,
5548 .select_task_rq
= select_task_rq_fair
,
5550 .rq_online
= rq_online_fair
,
5551 .rq_offline
= rq_offline_fair
,
5553 .task_waking
= task_waking_fair
,
5556 .set_curr_task
= set_curr_task_fair
,
5557 .task_tick
= task_tick_fair
,
5558 .task_fork
= task_fork_fair
,
5560 .prio_changed
= prio_changed_fair
,
5561 .switched_from
= switched_from_fair
,
5562 .switched_to
= switched_to_fair
,
5564 .get_rr_interval
= get_rr_interval_fair
,
5566 #ifdef CONFIG_FAIR_GROUP_SCHED
5567 .task_move_group
= task_move_group_fair
,
5571 #ifdef CONFIG_SCHED_DEBUG
5572 void print_cfs_stats(struct seq_file
*m
, int cpu
)
5574 struct cfs_rq
*cfs_rq
;
5577 for_each_leaf_cfs_rq(cpu_rq(cpu
), cfs_rq
)
5578 print_cfs_rq(m
, cpu
, cfs_rq
);
5583 __init
void init_sched_fair_class(void)
5586 open_softirq(SCHED_SOFTIRQ
, run_rebalance_domains
);
5589 nohz
.next_balance
= jiffies
;
5590 zalloc_cpumask_var(&nohz
.idle_cpus_mask
, GFP_NOWAIT
);
5591 cpu_notifier(sched_ilb_notifier
, 0);