Merge branch 'sched/cputime' of git://git.kernel.org/pub/scm/linux/kernel/git/frederi...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / sched / sched.h
CommitLineData
029632fb
PZ
1
2#include <linux/sched.h>
cf4aebc2 3#include <linux/sched/sysctl.h>
8bd75c77 4#include <linux/sched/rt.h>
029632fb
PZ
5#include <linux/mutex.h>
6#include <linux/spinlock.h>
7#include <linux/stop_machine.h>
8
391e43da 9#include "cpupri.h"
029632fb
PZ
10
11extern __read_mostly int scheduler_running;
12
13/*
14 * Convert user-nice values [ -20 ... 0 ... 19 ]
15 * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
16 * and back.
17 */
18#define NICE_TO_PRIO(nice) (MAX_RT_PRIO + (nice) + 20)
19#define PRIO_TO_NICE(prio) ((prio) - MAX_RT_PRIO - 20)
20#define TASK_NICE(p) PRIO_TO_NICE((p)->static_prio)
21
22/*
23 * 'User priority' is the nice value converted to something we
24 * can work with better when scaling various scheduler parameters,
25 * it's a [ 0 ... 39 ] range.
26 */
27#define USER_PRIO(p) ((p)-MAX_RT_PRIO)
28#define TASK_USER_PRIO(p) USER_PRIO((p)->static_prio)
29#define MAX_USER_PRIO (USER_PRIO(MAX_PRIO))
30
31/*
32 * Helpers for converting nanosecond timing to jiffy resolution
33 */
34#define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ))
35
cc1f4b1f
LZ
36/*
37 * Increase resolution of nice-level calculations for 64-bit architectures.
38 * The extra resolution improves shares distribution and load balancing of
39 * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup
40 * hierarchies, especially on larger systems. This is not a user-visible change
41 * and does not change the user-interface for setting shares/weights.
42 *
43 * We increase resolution only if we have enough bits to allow this increased
44 * resolution (i.e. BITS_PER_LONG > 32). The costs for increasing resolution
45 * when BITS_PER_LONG <= 32 are pretty high and the returns do not justify the
46 * increased costs.
47 */
48#if 0 /* BITS_PER_LONG > 32 -- currently broken: it increases power usage under light load */
49# define SCHED_LOAD_RESOLUTION 10
50# define scale_load(w) ((w) << SCHED_LOAD_RESOLUTION)
51# define scale_load_down(w) ((w) >> SCHED_LOAD_RESOLUTION)
52#else
53# define SCHED_LOAD_RESOLUTION 0
54# define scale_load(w) (w)
55# define scale_load_down(w) (w)
56#endif
57
58#define SCHED_LOAD_SHIFT (10 + SCHED_LOAD_RESOLUTION)
59#define SCHED_LOAD_SCALE (1L << SCHED_LOAD_SHIFT)
60
029632fb
PZ
61#define NICE_0_LOAD SCHED_LOAD_SCALE
62#define NICE_0_SHIFT SCHED_LOAD_SHIFT
63
64/*
65 * These are the 'tuning knobs' of the scheduler:
029632fb 66 */
029632fb
PZ
67
68/*
69 * single value that denotes runtime == period, ie unlimited time.
70 */
71#define RUNTIME_INF ((u64)~0ULL)
72
73static inline int rt_policy(int policy)
74{
75 if (policy == SCHED_FIFO || policy == SCHED_RR)
76 return 1;
77 return 0;
78}
79
80static inline int task_has_rt_policy(struct task_struct *p)
81{
82 return rt_policy(p->policy);
83}
84
85/*
86 * This is the priority-queue data structure of the RT scheduling class:
87 */
88struct rt_prio_array {
89 DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */
90 struct list_head queue[MAX_RT_PRIO];
91};
92
93struct rt_bandwidth {
94 /* nests inside the rq lock: */
95 raw_spinlock_t rt_runtime_lock;
96 ktime_t rt_period;
97 u64 rt_runtime;
98 struct hrtimer rt_period_timer;
99};
100
101extern struct mutex sched_domains_mutex;
102
103#ifdef CONFIG_CGROUP_SCHED
104
105#include <linux/cgroup.h>
106
107struct cfs_rq;
108struct rt_rq;
109
35cf4e50 110extern struct list_head task_groups;
029632fb
PZ
111
112struct cfs_bandwidth {
113#ifdef CONFIG_CFS_BANDWIDTH
114 raw_spinlock_t lock;
115 ktime_t period;
116 u64 quota, runtime;
117 s64 hierarchal_quota;
118 u64 runtime_expires;
119
120 int idle, timer_active;
121 struct hrtimer period_timer, slack_timer;
122 struct list_head throttled_cfs_rq;
123
124 /* statistics */
125 int nr_periods, nr_throttled;
126 u64 throttled_time;
127#endif
128};
129
130/* task group related information */
131struct task_group {
132 struct cgroup_subsys_state css;
133
134#ifdef CONFIG_FAIR_GROUP_SCHED
135 /* schedulable entities of this group on each cpu */
136 struct sched_entity **se;
137 /* runqueue "owned" by this group on each cpu */
138 struct cfs_rq **cfs_rq;
139 unsigned long shares;
140
141 atomic_t load_weight;
c566e8e9 142 atomic64_t load_avg;
bb17f655 143 atomic_t runnable_avg;
029632fb
PZ
144#endif
145
146#ifdef CONFIG_RT_GROUP_SCHED
147 struct sched_rt_entity **rt_se;
148 struct rt_rq **rt_rq;
149
150 struct rt_bandwidth rt_bandwidth;
151#endif
152
153 struct rcu_head rcu;
154 struct list_head list;
155
156 struct task_group *parent;
157 struct list_head siblings;
158 struct list_head children;
159
160#ifdef CONFIG_SCHED_AUTOGROUP
161 struct autogroup *autogroup;
162#endif
163
164 struct cfs_bandwidth cfs_bandwidth;
165};
166
167#ifdef CONFIG_FAIR_GROUP_SCHED
168#define ROOT_TASK_GROUP_LOAD NICE_0_LOAD
169
170/*
171 * A weight of 0 or 1 can cause arithmetics problems.
172 * A weight of a cfs_rq is the sum of weights of which entities
173 * are queued on this cfs_rq, so a weight of a entity should not be
174 * too large, so as the shares value of a task group.
175 * (The default weight is 1024 - so there's no practical
176 * limitation from this.)
177 */
178#define MIN_SHARES (1UL << 1)
179#define MAX_SHARES (1UL << 18)
180#endif
181
029632fb
PZ
182typedef int (*tg_visitor)(struct task_group *, void *);
183
184extern int walk_tg_tree_from(struct task_group *from,
185 tg_visitor down, tg_visitor up, void *data);
186
187/*
188 * Iterate the full tree, calling @down when first entering a node and @up when
189 * leaving it for the final time.
190 *
191 * Caller must hold rcu_lock or sufficient equivalent.
192 */
193static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
194{
195 return walk_tg_tree_from(&root_task_group, down, up, data);
196}
197
198extern int tg_nop(struct task_group *tg, void *data);
199
200extern void free_fair_sched_group(struct task_group *tg);
201extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent);
202extern void unregister_fair_sched_group(struct task_group *tg, int cpu);
203extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
204 struct sched_entity *se, int cpu,
205 struct sched_entity *parent);
206extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
207extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
208
209extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b);
210extern void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
211extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq);
212
213extern void free_rt_sched_group(struct task_group *tg);
214extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent);
215extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
216 struct sched_rt_entity *rt_se, int cpu,
217 struct sched_rt_entity *parent);
218
25cc7da7
LZ
219extern struct task_group *sched_create_group(struct task_group *parent);
220extern void sched_online_group(struct task_group *tg,
221 struct task_group *parent);
222extern void sched_destroy_group(struct task_group *tg);
223extern void sched_offline_group(struct task_group *tg);
224
225extern void sched_move_task(struct task_struct *tsk);
226
227#ifdef CONFIG_FAIR_GROUP_SCHED
228extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
229#endif
230
029632fb
PZ
231#else /* CONFIG_CGROUP_SCHED */
232
233struct cfs_bandwidth { };
234
235#endif /* CONFIG_CGROUP_SCHED */
236
237/* CFS-related fields in a runqueue */
238struct cfs_rq {
239 struct load_weight load;
c82513e5 240 unsigned int nr_running, h_nr_running;
029632fb
PZ
241
242 u64 exec_clock;
243 u64 min_vruntime;
244#ifndef CONFIG_64BIT
245 u64 min_vruntime_copy;
246#endif
247
248 struct rb_root tasks_timeline;
249 struct rb_node *rb_leftmost;
250
029632fb
PZ
251 /*
252 * 'curr' points to currently running entity on this cfs_rq.
253 * It is set to NULL otherwise (i.e when none are currently running).
254 */
255 struct sched_entity *curr, *next, *last, *skip;
256
257#ifdef CONFIG_SCHED_DEBUG
258 unsigned int nr_spread_over;
259#endif
260
2dac754e 261#ifdef CONFIG_SMP
f4e26b12
PT
262/*
263 * Load-tracking only depends on SMP, FAIR_GROUP_SCHED dependency below may be
264 * removed when useful for applications beyond shares distribution (e.g.
265 * load-balance).
266 */
267#ifdef CONFIG_FAIR_GROUP_SCHED
2dac754e
PT
268 /*
269 * CFS Load tracking
270 * Under CFS, load is tracked on a per-entity basis and aggregated up.
271 * This allows for the description of both thread and group usage (in
272 * the FAIR_GROUP_SCHED case).
273 */
9ee474f5 274 u64 runnable_load_avg, blocked_load_avg;
aff3e498 275 atomic64_t decay_counter, removed_load;
9ee474f5 276 u64 last_decay;
f4e26b12
PT
277#endif /* CONFIG_FAIR_GROUP_SCHED */
278/* These always depend on CONFIG_FAIR_GROUP_SCHED */
c566e8e9 279#ifdef CONFIG_FAIR_GROUP_SCHED
bb17f655 280 u32 tg_runnable_contrib;
c566e8e9 281 u64 tg_load_contrib;
82958366
PT
282#endif /* CONFIG_FAIR_GROUP_SCHED */
283
284 /*
285 * h_load = weight * f(tg)
286 *
287 * Where f(tg) is the recursive weight fraction assigned to
288 * this group.
289 */
290 unsigned long h_load;
291#endif /* CONFIG_SMP */
292
029632fb
PZ
293#ifdef CONFIG_FAIR_GROUP_SCHED
294 struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */
295
296 /*
297 * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
298 * a hierarchy). Non-leaf lrqs hold other higher schedulable entities
299 * (like users, containers etc.)
300 *
301 * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This
302 * list is used during load balance.
303 */
304 int on_list;
305 struct list_head leaf_cfs_rq_list;
306 struct task_group *tg; /* group that "owns" this runqueue */
307
029632fb
PZ
308#ifdef CONFIG_CFS_BANDWIDTH
309 int runtime_enabled;
310 u64 runtime_expires;
311 s64 runtime_remaining;
312
f1b17280
PT
313 u64 throttled_clock, throttled_clock_task;
314 u64 throttled_clock_task_time;
029632fb
PZ
315 int throttled, throttle_count;
316 struct list_head throttled_list;
317#endif /* CONFIG_CFS_BANDWIDTH */
318#endif /* CONFIG_FAIR_GROUP_SCHED */
319};
320
321static inline int rt_bandwidth_enabled(void)
322{
323 return sysctl_sched_rt_runtime >= 0;
324}
325
326/* Real-Time classes' related field in a runqueue: */
327struct rt_rq {
328 struct rt_prio_array active;
c82513e5 329 unsigned int rt_nr_running;
029632fb
PZ
330#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
331 struct {
332 int curr; /* highest queued rt task prio */
333#ifdef CONFIG_SMP
334 int next; /* next highest */
335#endif
336 } highest_prio;
337#endif
338#ifdef CONFIG_SMP
339 unsigned long rt_nr_migratory;
340 unsigned long rt_nr_total;
341 int overloaded;
342 struct plist_head pushable_tasks;
343#endif
344 int rt_throttled;
345 u64 rt_time;
346 u64 rt_runtime;
347 /* Nests inside the rq lock: */
348 raw_spinlock_t rt_runtime_lock;
349
350#ifdef CONFIG_RT_GROUP_SCHED
351 unsigned long rt_nr_boosted;
352
353 struct rq *rq;
354 struct list_head leaf_rt_rq_list;
355 struct task_group *tg;
356#endif
357};
358
359#ifdef CONFIG_SMP
360
361/*
362 * We add the notion of a root-domain which will be used to define per-domain
363 * variables. Each exclusive cpuset essentially defines an island domain by
364 * fully partitioning the member cpus from any other cpuset. Whenever a new
365 * exclusive cpuset is created, we also create and attach a new root-domain
366 * object.
367 *
368 */
369struct root_domain {
370 atomic_t refcount;
371 atomic_t rto_count;
372 struct rcu_head rcu;
373 cpumask_var_t span;
374 cpumask_var_t online;
375
376 /*
377 * The "RT overload" flag: it gets set if a CPU has more than
378 * one runnable RT task.
379 */
380 cpumask_var_t rto_mask;
381 struct cpupri cpupri;
382};
383
384extern struct root_domain def_root_domain;
385
386#endif /* CONFIG_SMP */
387
388/*
389 * This is the main, per-CPU runqueue data structure.
390 *
391 * Locking rule: those places that want to lock multiple runqueues
392 * (such as the load balancing or the thread migration code), lock
393 * acquire operations must be ordered by ascending &runqueue.
394 */
395struct rq {
396 /* runqueue lock: */
397 raw_spinlock_t lock;
398
399 /*
400 * nr_running and cpu_load should be in the same cacheline because
401 * remote CPUs use both these fields when doing load calculation.
402 */
c82513e5 403 unsigned int nr_running;
029632fb
PZ
404 #define CPU_LOAD_IDX_MAX 5
405 unsigned long cpu_load[CPU_LOAD_IDX_MAX];
406 unsigned long last_load_update_tick;
407#ifdef CONFIG_NO_HZ
408 u64 nohz_stamp;
1c792db7 409 unsigned long nohz_flags;
029632fb
PZ
410#endif
411 int skip_clock_update;
412
413 /* capture load from *all* tasks on this cpu: */
414 struct load_weight load;
415 unsigned long nr_load_updates;
416 u64 nr_switches;
417
418 struct cfs_rq cfs;
419 struct rt_rq rt;
420
421#ifdef CONFIG_FAIR_GROUP_SCHED
422 /* list of leaf cfs_rq on this cpu: */
423 struct list_head leaf_cfs_rq_list;
a35b6466
PZ
424#ifdef CONFIG_SMP
425 unsigned long h_load_throttle;
426#endif /* CONFIG_SMP */
427#endif /* CONFIG_FAIR_GROUP_SCHED */
428
029632fb
PZ
429#ifdef CONFIG_RT_GROUP_SCHED
430 struct list_head leaf_rt_rq_list;
431#endif
432
433 /*
434 * This is part of a global counter where only the total sum
435 * over all CPUs matters. A task can increase this counter on
436 * one CPU and if it got migrated afterwards it may decrease
437 * it on another CPU. Always updated under the runqueue lock:
438 */
439 unsigned long nr_uninterruptible;
440
441 struct task_struct *curr, *idle, *stop;
442 unsigned long next_balance;
443 struct mm_struct *prev_mm;
444
445 u64 clock;
446 u64 clock_task;
447
448 atomic_t nr_iowait;
449
450#ifdef CONFIG_SMP
451 struct root_domain *rd;
452 struct sched_domain *sd;
453
454 unsigned long cpu_power;
455
456 unsigned char idle_balance;
457 /* For active balancing */
458 int post_schedule;
459 int active_balance;
460 int push_cpu;
461 struct cpu_stop_work active_balance_work;
462 /* cpu of this runqueue: */
463 int cpu;
464 int online;
465
367456c7
PZ
466 struct list_head cfs_tasks;
467
029632fb
PZ
468 u64 rt_avg;
469 u64 age_stamp;
470 u64 idle_stamp;
471 u64 avg_idle;
472#endif
473
474#ifdef CONFIG_IRQ_TIME_ACCOUNTING
475 u64 prev_irq_time;
476#endif
477#ifdef CONFIG_PARAVIRT
478 u64 prev_steal_time;
479#endif
480#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
481 u64 prev_steal_time_rq;
482#endif
483
484 /* calc_load related fields */
485 unsigned long calc_load_update;
486 long calc_load_active;
487
488#ifdef CONFIG_SCHED_HRTICK
489#ifdef CONFIG_SMP
490 int hrtick_csd_pending;
491 struct call_single_data hrtick_csd;
492#endif
493 struct hrtimer hrtick_timer;
494#endif
495
496#ifdef CONFIG_SCHEDSTATS
497 /* latency stats */
498 struct sched_info rq_sched_info;
499 unsigned long long rq_cpu_time;
500 /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
501
502 /* sys_sched_yield() stats */
503 unsigned int yld_count;
504
505 /* schedule() stats */
029632fb
PZ
506 unsigned int sched_count;
507 unsigned int sched_goidle;
508
509 /* try_to_wake_up() stats */
510 unsigned int ttwu_count;
511 unsigned int ttwu_local;
512#endif
513
514#ifdef CONFIG_SMP
515 struct llist_head wake_list;
516#endif
18bf2805
BS
517
518 struct sched_avg avg;
029632fb
PZ
519};
520
521static inline int cpu_of(struct rq *rq)
522{
523#ifdef CONFIG_SMP
524 return rq->cpu;
525#else
526 return 0;
527#endif
528}
529
530DECLARE_PER_CPU(struct rq, runqueues);
531
518cd623
PZ
532#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
533#define this_rq() (&__get_cpu_var(runqueues))
534#define task_rq(p) cpu_rq(task_cpu(p))
535#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
536#define raw_rq() (&__raw_get_cpu_var(runqueues))
537
538#ifdef CONFIG_SMP
539
029632fb
PZ
540#define rcu_dereference_check_sched_domain(p) \
541 rcu_dereference_check((p), \
542 lockdep_is_held(&sched_domains_mutex))
543
544/*
545 * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
546 * See detach_destroy_domains: synchronize_sched for details.
547 *
548 * The domain tree of any CPU may only be accessed from within
549 * preempt-disabled sections.
550 */
551#define for_each_domain(cpu, __sd) \
518cd623
PZ
552 for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \
553 __sd; __sd = __sd->parent)
029632fb 554
77e81365
SS
555#define for_each_lower_domain(sd) for (; sd; sd = sd->child)
556
518cd623
PZ
557/**
558 * highest_flag_domain - Return highest sched_domain containing flag.
559 * @cpu: The cpu whose highest level of sched domain is to
560 * be returned.
561 * @flag: The flag to check for the highest sched_domain
562 * for the given cpu.
563 *
564 * Returns the highest sched_domain of a cpu which contains the given flag.
565 */
566static inline struct sched_domain *highest_flag_domain(int cpu, int flag)
567{
568 struct sched_domain *sd, *hsd = NULL;
569
570 for_each_domain(cpu, sd) {
571 if (!(sd->flags & flag))
572 break;
573 hsd = sd;
574 }
575
576 return hsd;
577}
578
579DECLARE_PER_CPU(struct sched_domain *, sd_llc);
580DECLARE_PER_CPU(int, sd_llc_id);
581
5e6521ea
LZ
582struct sched_group_power {
583 atomic_t ref;
584 /*
585 * CPU power of this group, SCHED_LOAD_SCALE being max power for a
586 * single CPU.
587 */
588 unsigned int power, power_orig;
589 unsigned long next_update;
590 /*
591 * Number of busy cpus in this group.
592 */
593 atomic_t nr_busy_cpus;
594
595 unsigned long cpumask[0]; /* iteration mask */
596};
597
598struct sched_group {
599 struct sched_group *next; /* Must be a circular list */
600 atomic_t ref;
601
602 unsigned int group_weight;
603 struct sched_group_power *sgp;
604
605 /*
606 * The CPUs this group covers.
607 *
608 * NOTE: this field is variable length. (Allocated dynamically
609 * by attaching extra space to the end of the structure,
610 * depending on how many CPUs the kernel has booted up with)
611 */
612 unsigned long cpumask[0];
613};
614
615static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
616{
617 return to_cpumask(sg->cpumask);
618}
619
620/*
621 * cpumask masking which cpus in the group are allowed to iterate up the domain
622 * tree.
623 */
624static inline struct cpumask *sched_group_mask(struct sched_group *sg)
625{
626 return to_cpumask(sg->sgp->cpumask);
627}
628
629/**
630 * group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
631 * @group: The group whose first cpu is to be returned.
632 */
633static inline unsigned int group_first_cpu(struct sched_group *group)
634{
635 return cpumask_first(sched_group_cpus(group));
636}
637
c1174876
PZ
638extern int group_balance_cpu(struct sched_group *sg);
639
518cd623 640#endif /* CONFIG_SMP */
029632fb 641
391e43da
PZ
642#include "stats.h"
643#include "auto_group.h"
029632fb
PZ
644
645#ifdef CONFIG_CGROUP_SCHED
646
647/*
648 * Return the group to which this tasks belongs.
649 *
8323f26c
PZ
650 * We cannot use task_subsys_state() and friends because the cgroup
651 * subsystem changes that value before the cgroup_subsys::attach() method
652 * is called, therefore we cannot pin it and might observe the wrong value.
653 *
654 * The same is true for autogroup's p->signal->autogroup->tg, the autogroup
655 * core changes this before calling sched_move_task().
656 *
657 * Instead we use a 'copy' which is updated from sched_move_task() while
658 * holding both task_struct::pi_lock and rq::lock.
029632fb
PZ
659 */
660static inline struct task_group *task_group(struct task_struct *p)
661{
8323f26c 662 return p->sched_task_group;
029632fb
PZ
663}
664
665/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
666static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
667{
668#if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED)
669 struct task_group *tg = task_group(p);
670#endif
671
672#ifdef CONFIG_FAIR_GROUP_SCHED
673 p->se.cfs_rq = tg->cfs_rq[cpu];
674 p->se.parent = tg->se[cpu];
675#endif
676
677#ifdef CONFIG_RT_GROUP_SCHED
678 p->rt.rt_rq = tg->rt_rq[cpu];
679 p->rt.parent = tg->rt_se[cpu];
680#endif
681}
682
683#else /* CONFIG_CGROUP_SCHED */
684
685static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
686static inline struct task_group *task_group(struct task_struct *p)
687{
688 return NULL;
689}
690
691#endif /* CONFIG_CGROUP_SCHED */
692
693static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
694{
695 set_task_rq(p, cpu);
696#ifdef CONFIG_SMP
697 /*
698 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
699 * successfuly executed on another CPU. We must ensure that updates of
700 * per-task data have been completed by this moment.
701 */
702 smp_wmb();
703 task_thread_info(p)->cpu = cpu;
704#endif
705}
706
707/*
708 * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
709 */
710#ifdef CONFIG_SCHED_DEBUG
c5905afb 711# include <linux/static_key.h>
029632fb
PZ
712# define const_debug __read_mostly
713#else
714# define const_debug const
715#endif
716
717extern const_debug unsigned int sysctl_sched_features;
718
719#define SCHED_FEAT(name, enabled) \
720 __SCHED_FEAT_##name ,
721
722enum {
391e43da 723#include "features.h"
f8b6d1cc 724 __SCHED_FEAT_NR,
029632fb
PZ
725};
726
727#undef SCHED_FEAT
728
f8b6d1cc 729#if defined(CONFIG_SCHED_DEBUG) && defined(HAVE_JUMP_LABEL)
c5905afb 730static __always_inline bool static_branch__true(struct static_key *key)
f8b6d1cc 731{
c5905afb 732 return static_key_true(key); /* Not out of line branch. */
f8b6d1cc
PZ
733}
734
c5905afb 735static __always_inline bool static_branch__false(struct static_key *key)
f8b6d1cc 736{
c5905afb 737 return static_key_false(key); /* Out of line branch. */
f8b6d1cc
PZ
738}
739
740#define SCHED_FEAT(name, enabled) \
c5905afb 741static __always_inline bool static_branch_##name(struct static_key *key) \
f8b6d1cc
PZ
742{ \
743 return static_branch__##enabled(key); \
744}
745
746#include "features.h"
747
748#undef SCHED_FEAT
749
c5905afb 750extern struct static_key sched_feat_keys[__SCHED_FEAT_NR];
f8b6d1cc
PZ
751#define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x]))
752#else /* !(SCHED_DEBUG && HAVE_JUMP_LABEL) */
029632fb 753#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
f8b6d1cc 754#endif /* SCHED_DEBUG && HAVE_JUMP_LABEL */
029632fb 755
cbee9f88
PZ
756#ifdef CONFIG_NUMA_BALANCING
757#define sched_feat_numa(x) sched_feat(x)
3105b86a
MG
758#ifdef CONFIG_SCHED_DEBUG
759#define numabalancing_enabled sched_feat_numa(NUMA)
760#else
761extern bool numabalancing_enabled;
762#endif /* CONFIG_SCHED_DEBUG */
cbee9f88
PZ
763#else
764#define sched_feat_numa(x) (0)
3105b86a
MG
765#define numabalancing_enabled (0)
766#endif /* CONFIG_NUMA_BALANCING */
cbee9f88 767
029632fb
PZ
768static inline u64 global_rt_period(void)
769{
770 return (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
771}
772
773static inline u64 global_rt_runtime(void)
774{
775 if (sysctl_sched_rt_runtime < 0)
776 return RUNTIME_INF;
777
778 return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
779}
780
781
782
783static inline int task_current(struct rq *rq, struct task_struct *p)
784{
785 return rq->curr == p;
786}
787
788static inline int task_running(struct rq *rq, struct task_struct *p)
789{
790#ifdef CONFIG_SMP
791 return p->on_cpu;
792#else
793 return task_current(rq, p);
794#endif
795}
796
797
798#ifndef prepare_arch_switch
799# define prepare_arch_switch(next) do { } while (0)
800#endif
801#ifndef finish_arch_switch
802# define finish_arch_switch(prev) do { } while (0)
803#endif
01f23e16
CM
804#ifndef finish_arch_post_lock_switch
805# define finish_arch_post_lock_switch() do { } while (0)
806#endif
029632fb
PZ
807
808#ifndef __ARCH_WANT_UNLOCKED_CTXSW
809static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
810{
811#ifdef CONFIG_SMP
812 /*
813 * We can optimise this out completely for !SMP, because the
814 * SMP rebalancing from interrupt is the only thing that cares
815 * here.
816 */
817 next->on_cpu = 1;
818#endif
819}
820
821static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
822{
823#ifdef CONFIG_SMP
824 /*
825 * After ->on_cpu is cleared, the task can be moved to a different CPU.
826 * We must ensure this doesn't happen until the switch is completely
827 * finished.
828 */
829 smp_wmb();
830 prev->on_cpu = 0;
831#endif
832#ifdef CONFIG_DEBUG_SPINLOCK
833 /* this is a valid case when another task releases the spinlock */
834 rq->lock.owner = current;
835#endif
836 /*
837 * If we are tracking spinlock dependencies then we have to
838 * fix up the runqueue lock - which gets 'carried over' from
839 * prev into current:
840 */
841 spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
842
843 raw_spin_unlock_irq(&rq->lock);
844}
845
846#else /* __ARCH_WANT_UNLOCKED_CTXSW */
847static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
848{
849#ifdef CONFIG_SMP
850 /*
851 * We can optimise this out completely for !SMP, because the
852 * SMP rebalancing from interrupt is the only thing that cares
853 * here.
854 */
855 next->on_cpu = 1;
856#endif
029632fb 857 raw_spin_unlock(&rq->lock);
029632fb
PZ
858}
859
860static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
861{
862#ifdef CONFIG_SMP
863 /*
864 * After ->on_cpu is cleared, the task can be moved to a different CPU.
865 * We must ensure this doesn't happen until the switch is completely
866 * finished.
867 */
868 smp_wmb();
869 prev->on_cpu = 0;
870#endif
029632fb 871 local_irq_enable();
029632fb
PZ
872}
873#endif /* __ARCH_WANT_UNLOCKED_CTXSW */
874
b13095f0
LZ
875/*
876 * wake flags
877 */
878#define WF_SYNC 0x01 /* waker goes to sleep after wakeup */
879#define WF_FORK 0x02 /* child wakeup after fork */
880#define WF_MIGRATED 0x4 /* internal use, task got migrated */
881
029632fb
PZ
882static inline void update_load_add(struct load_weight *lw, unsigned long inc)
883{
884 lw->weight += inc;
885 lw->inv_weight = 0;
886}
887
888static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
889{
890 lw->weight -= dec;
891 lw->inv_weight = 0;
892}
893
894static inline void update_load_set(struct load_weight *lw, unsigned long w)
895{
896 lw->weight = w;
897 lw->inv_weight = 0;
898}
899
900/*
901 * To aid in avoiding the subversion of "niceness" due to uneven distribution
902 * of tasks with abnormal "nice" values across CPUs the contribution that
903 * each task makes to its run queue's load is weighted according to its
904 * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a
905 * scaled version of the new time slice allocation that they receive on time
906 * slice expiry etc.
907 */
908
909#define WEIGHT_IDLEPRIO 3
910#define WMULT_IDLEPRIO 1431655765
911
912/*
913 * Nice levels are multiplicative, with a gentle 10% change for every
914 * nice level changed. I.e. when a CPU-bound task goes from nice 0 to
915 * nice 1, it will get ~10% less CPU time than another CPU-bound task
916 * that remained on nice 0.
917 *
918 * The "10% effect" is relative and cumulative: from _any_ nice level,
919 * if you go up 1 level, it's -10% CPU usage, if you go down 1 level
920 * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25.
921 * If a task goes up by ~10% and another task goes down by ~10% then
922 * the relative distance between them is ~25%.)
923 */
924static const int prio_to_weight[40] = {
925 /* -20 */ 88761, 71755, 56483, 46273, 36291,
926 /* -15 */ 29154, 23254, 18705, 14949, 11916,
927 /* -10 */ 9548, 7620, 6100, 4904, 3906,
928 /* -5 */ 3121, 2501, 1991, 1586, 1277,
929 /* 0 */ 1024, 820, 655, 526, 423,
930 /* 5 */ 335, 272, 215, 172, 137,
931 /* 10 */ 110, 87, 70, 56, 45,
932 /* 15 */ 36, 29, 23, 18, 15,
933};
934
935/*
936 * Inverse (2^32/x) values of the prio_to_weight[] array, precalculated.
937 *
938 * In cases where the weight does not change often, we can use the
939 * precalculated inverse to speed up arithmetics by turning divisions
940 * into multiplications:
941 */
942static const u32 prio_to_wmult[40] = {
943 /* -20 */ 48388, 59856, 76040, 92818, 118348,
944 /* -15 */ 147320, 184698, 229616, 287308, 360437,
945 /* -10 */ 449829, 563644, 704093, 875809, 1099582,
946 /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326,
947 /* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587,
948 /* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126,
949 /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717,
950 /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
951};
952
953/* Time spent by the tasks of the cpu accounting group executing in ... */
954enum cpuacct_stat_index {
955 CPUACCT_STAT_USER, /* ... user mode */
956 CPUACCT_STAT_SYSTEM, /* ... kernel mode */
957
958 CPUACCT_STAT_NSTATS,
959};
960
c82ba9fa
LZ
961#define ENQUEUE_WAKEUP 1
962#define ENQUEUE_HEAD 2
963#ifdef CONFIG_SMP
964#define ENQUEUE_WAKING 4 /* sched_class::task_waking was called */
965#else
966#define ENQUEUE_WAKING 0
967#endif
968
969#define DEQUEUE_SLEEP 1
970
971struct sched_class {
972 const struct sched_class *next;
973
974 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
975 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
976 void (*yield_task) (struct rq *rq);
977 bool (*yield_to_task) (struct rq *rq, struct task_struct *p, bool preempt);
978
979 void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags);
980
981 struct task_struct * (*pick_next_task) (struct rq *rq);
982 void (*put_prev_task) (struct rq *rq, struct task_struct *p);
983
984#ifdef CONFIG_SMP
985 int (*select_task_rq)(struct task_struct *p, int sd_flag, int flags);
986 void (*migrate_task_rq)(struct task_struct *p, int next_cpu);
987
988 void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
989 void (*post_schedule) (struct rq *this_rq);
990 void (*task_waking) (struct task_struct *task);
991 void (*task_woken) (struct rq *this_rq, struct task_struct *task);
992
993 void (*set_cpus_allowed)(struct task_struct *p,
994 const struct cpumask *newmask);
995
996 void (*rq_online)(struct rq *rq);
997 void (*rq_offline)(struct rq *rq);
998#endif
999
1000 void (*set_curr_task) (struct rq *rq);
1001 void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
1002 void (*task_fork) (struct task_struct *p);
1003
1004 void (*switched_from) (struct rq *this_rq, struct task_struct *task);
1005 void (*switched_to) (struct rq *this_rq, struct task_struct *task);
1006 void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
1007 int oldprio);
1008
1009 unsigned int (*get_rr_interval) (struct rq *rq,
1010 struct task_struct *task);
1011
1012#ifdef CONFIG_FAIR_GROUP_SCHED
1013 void (*task_move_group) (struct task_struct *p, int on_rq);
1014#endif
1015};
029632fb
PZ
1016
1017#define sched_class_highest (&stop_sched_class)
1018#define for_each_class(class) \
1019 for (class = sched_class_highest; class; class = class->next)
1020
1021extern const struct sched_class stop_sched_class;
1022extern const struct sched_class rt_sched_class;
1023extern const struct sched_class fair_sched_class;
1024extern const struct sched_class idle_sched_class;
1025
1026
1027#ifdef CONFIG_SMP
1028
1029extern void trigger_load_balance(struct rq *rq, int cpu);
1030extern void idle_balance(int this_cpu, struct rq *this_rq);
1031
1032#else /* CONFIG_SMP */
1033
1034static inline void idle_balance(int cpu, struct rq *rq)
1035{
1036}
1037
1038#endif
1039
1040extern void sysrq_sched_debug_show(void);
1041extern void sched_init_granularity(void);
1042extern void update_max_interval(void);
1043extern void update_group_power(struct sched_domain *sd, int cpu);
1044extern int update_runtime(struct notifier_block *nfb, unsigned long action, void *hcpu);
1045extern void init_sched_rt_class(void);
1046extern void init_sched_fair_class(void);
1047
1048extern void resched_task(struct task_struct *p);
1049extern void resched_cpu(int cpu);
1050
1051extern struct rt_bandwidth def_rt_bandwidth;
1052extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
1053
556061b0 1054extern void update_idle_cpu_load(struct rq *this_rq);
029632fb
PZ
1055
1056#ifdef CONFIG_CGROUP_CPUACCT
54c707e9
GC
1057#include <linux/cgroup.h>
1058/* track cpu usage of a group of tasks and its child groups */
1059struct cpuacct {
1060 struct cgroup_subsys_state css;
1061 /* cpuusage holds pointer to a u64-type object on every cpu */
1062 u64 __percpu *cpuusage;
1063 struct kernel_cpustat __percpu *cpustat;
1064};
1065
73fbec60
FW
1066extern struct cgroup_subsys cpuacct_subsys;
1067extern struct cpuacct root_cpuacct;
1068
54c707e9
GC
1069/* return cpu accounting group corresponding to this container */
1070static inline struct cpuacct *cgroup_ca(struct cgroup *cgrp)
1071{
1072 return container_of(cgroup_subsys_state(cgrp, cpuacct_subsys_id),
1073 struct cpuacct, css);
1074}
1075
1076/* return cpu accounting group to which this task belongs */
1077static inline struct cpuacct *task_ca(struct task_struct *tsk)
1078{
1079 return container_of(task_subsys_state(tsk, cpuacct_subsys_id),
1080 struct cpuacct, css);
1081}
1082
1083static inline struct cpuacct *parent_ca(struct cpuacct *ca)
1084{
1085 if (!ca || !ca->css.cgroup->parent)
1086 return NULL;
1087 return cgroup_ca(ca->css.cgroup->parent);
1088}
1089
029632fb 1090extern void cpuacct_charge(struct task_struct *tsk, u64 cputime);
029632fb
PZ
1091#else
1092static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
029632fb
PZ
1093#endif
1094
73fbec60
FW
1095#ifdef CONFIG_PARAVIRT
1096static inline u64 steal_ticks(u64 steal)
1097{
1098 if (unlikely(steal > NSEC_PER_SEC))
1099 return div_u64(steal, TICK_NSEC);
1100
1101 return __iter_div_u64_rem(steal, TICK_NSEC, &steal);
1102}
1103#endif
1104
029632fb
PZ
1105static inline void inc_nr_running(struct rq *rq)
1106{
1107 rq->nr_running++;
1108}
1109
1110static inline void dec_nr_running(struct rq *rq)
1111{
1112 rq->nr_running--;
1113}
1114
1115extern void update_rq_clock(struct rq *rq);
1116
1117extern void activate_task(struct rq *rq, struct task_struct *p, int flags);
1118extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags);
1119
1120extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
1121
1122extern const_debug unsigned int sysctl_sched_time_avg;
1123extern const_debug unsigned int sysctl_sched_nr_migrate;
1124extern const_debug unsigned int sysctl_sched_migration_cost;
1125
1126static inline u64 sched_avg_period(void)
1127{
1128 return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2;
1129}
1130
029632fb
PZ
1131#ifdef CONFIG_SCHED_HRTICK
1132
1133/*
1134 * Use hrtick when:
1135 * - enabled by features
1136 * - hrtimer is actually high res
1137 */
1138static inline int hrtick_enabled(struct rq *rq)
1139{
1140 if (!sched_feat(HRTICK))
1141 return 0;
1142 if (!cpu_active(cpu_of(rq)))
1143 return 0;
1144 return hrtimer_is_hres_active(&rq->hrtick_timer);
1145}
1146
1147void hrtick_start(struct rq *rq, u64 delay);
1148
b39e66ea
MG
1149#else
1150
1151static inline int hrtick_enabled(struct rq *rq)
1152{
1153 return 0;
1154}
1155
029632fb
PZ
1156#endif /* CONFIG_SCHED_HRTICK */
1157
1158#ifdef CONFIG_SMP
1159extern void sched_avg_update(struct rq *rq);
1160static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
1161{
1162 rq->rt_avg += rt_delta;
1163 sched_avg_update(rq);
1164}
1165#else
1166static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) { }
1167static inline void sched_avg_update(struct rq *rq) { }
1168#endif
1169
1170extern void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period);
1171
1172#ifdef CONFIG_SMP
1173#ifdef CONFIG_PREEMPT
1174
1175static inline void double_rq_lock(struct rq *rq1, struct rq *rq2);
1176
1177/*
1178 * fair double_lock_balance: Safely acquires both rq->locks in a fair
1179 * way at the expense of forcing extra atomic operations in all
1180 * invocations. This assures that the double_lock is acquired using the
1181 * same underlying policy as the spinlock_t on this architecture, which
1182 * reduces latency compared to the unfair variant below. However, it
1183 * also adds more overhead and therefore may reduce throughput.
1184 */
1185static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1186 __releases(this_rq->lock)
1187 __acquires(busiest->lock)
1188 __acquires(this_rq->lock)
1189{
1190 raw_spin_unlock(&this_rq->lock);
1191 double_rq_lock(this_rq, busiest);
1192
1193 return 1;
1194}
1195
1196#else
1197/*
1198 * Unfair double_lock_balance: Optimizes throughput at the expense of
1199 * latency by eliminating extra atomic operations when the locks are
1200 * already in proper order on entry. This favors lower cpu-ids and will
1201 * grant the double lock to lower cpus over higher ids under contention,
1202 * regardless of entry order into the function.
1203 */
1204static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1205 __releases(this_rq->lock)
1206 __acquires(busiest->lock)
1207 __acquires(this_rq->lock)
1208{
1209 int ret = 0;
1210
1211 if (unlikely(!raw_spin_trylock(&busiest->lock))) {
1212 if (busiest < this_rq) {
1213 raw_spin_unlock(&this_rq->lock);
1214 raw_spin_lock(&busiest->lock);
1215 raw_spin_lock_nested(&this_rq->lock,
1216 SINGLE_DEPTH_NESTING);
1217 ret = 1;
1218 } else
1219 raw_spin_lock_nested(&busiest->lock,
1220 SINGLE_DEPTH_NESTING);
1221 }
1222 return ret;
1223}
1224
1225#endif /* CONFIG_PREEMPT */
1226
1227/*
1228 * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
1229 */
1230static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest)
1231{
1232 if (unlikely(!irqs_disabled())) {
1233 /* printk() doesn't work good under rq->lock */
1234 raw_spin_unlock(&this_rq->lock);
1235 BUG_ON(1);
1236 }
1237
1238 return _double_lock_balance(this_rq, busiest);
1239}
1240
1241static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
1242 __releases(busiest->lock)
1243{
1244 raw_spin_unlock(&busiest->lock);
1245 lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
1246}
1247
1248/*
1249 * double_rq_lock - safely lock two runqueues
1250 *
1251 * Note this does not disable interrupts like task_rq_lock,
1252 * you need to do so manually before calling.
1253 */
1254static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
1255 __acquires(rq1->lock)
1256 __acquires(rq2->lock)
1257{
1258 BUG_ON(!irqs_disabled());
1259 if (rq1 == rq2) {
1260 raw_spin_lock(&rq1->lock);
1261 __acquire(rq2->lock); /* Fake it out ;) */
1262 } else {
1263 if (rq1 < rq2) {
1264 raw_spin_lock(&rq1->lock);
1265 raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
1266 } else {
1267 raw_spin_lock(&rq2->lock);
1268 raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
1269 }
1270 }
1271}
1272
1273/*
1274 * double_rq_unlock - safely unlock two runqueues
1275 *
1276 * Note this does not restore interrupts like task_rq_unlock,
1277 * you need to do so manually after calling.
1278 */
1279static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
1280 __releases(rq1->lock)
1281 __releases(rq2->lock)
1282{
1283 raw_spin_unlock(&rq1->lock);
1284 if (rq1 != rq2)
1285 raw_spin_unlock(&rq2->lock);
1286 else
1287 __release(rq2->lock);
1288}
1289
1290#else /* CONFIG_SMP */
1291
1292/*
1293 * double_rq_lock - safely lock two runqueues
1294 *
1295 * Note this does not disable interrupts like task_rq_lock,
1296 * you need to do so manually before calling.
1297 */
1298static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
1299 __acquires(rq1->lock)
1300 __acquires(rq2->lock)
1301{
1302 BUG_ON(!irqs_disabled());
1303 BUG_ON(rq1 != rq2);
1304 raw_spin_lock(&rq1->lock);
1305 __acquire(rq2->lock); /* Fake it out ;) */
1306}
1307
1308/*
1309 * double_rq_unlock - safely unlock two runqueues
1310 *
1311 * Note this does not restore interrupts like task_rq_unlock,
1312 * you need to do so manually after calling.
1313 */
1314static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
1315 __releases(rq1->lock)
1316 __releases(rq2->lock)
1317{
1318 BUG_ON(rq1 != rq2);
1319 raw_spin_unlock(&rq1->lock);
1320 __release(rq2->lock);
1321}
1322
1323#endif
1324
1325extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq);
1326extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq);
1327extern void print_cfs_stats(struct seq_file *m, int cpu);
1328extern void print_rt_stats(struct seq_file *m, int cpu);
1329
1330extern void init_cfs_rq(struct cfs_rq *cfs_rq);
1331extern void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq);
029632fb
PZ
1332
1333extern void account_cfs_bandwidth_used(int enabled, int was_enabled);
1c792db7
SS
1334
1335#ifdef CONFIG_NO_HZ
1336enum rq_nohz_flag_bits {
1337 NOHZ_TICK_STOPPED,
1338 NOHZ_BALANCE_KICK,
69e1e811 1339 NOHZ_IDLE,
1c792db7
SS
1340};
1341
1342#define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags)
1343#endif
73fbec60
FW
1344
1345#ifdef CONFIG_IRQ_TIME_ACCOUNTING
1346
1347DECLARE_PER_CPU(u64, cpu_hardirq_time);
1348DECLARE_PER_CPU(u64, cpu_softirq_time);
1349
1350#ifndef CONFIG_64BIT
1351DECLARE_PER_CPU(seqcount_t, irq_time_seq);
1352
1353static inline void irq_time_write_begin(void)
1354{
1355 __this_cpu_inc(irq_time_seq.sequence);
1356 smp_wmb();
1357}
1358
1359static inline void irq_time_write_end(void)
1360{
1361 smp_wmb();
1362 __this_cpu_inc(irq_time_seq.sequence);
1363}
1364
1365static inline u64 irq_time_read(int cpu)
1366{
1367 u64 irq_time;
1368 unsigned seq;
1369
1370 do {
1371 seq = read_seqcount_begin(&per_cpu(irq_time_seq, cpu));
1372 irq_time = per_cpu(cpu_softirq_time, cpu) +
1373 per_cpu(cpu_hardirq_time, cpu);
1374 } while (read_seqcount_retry(&per_cpu(irq_time_seq, cpu), seq));
1375
1376 return irq_time;
1377}
1378#else /* CONFIG_64BIT */
1379static inline void irq_time_write_begin(void)
1380{
1381}
1382
1383static inline void irq_time_write_end(void)
1384{
1385}
1386
1387static inline u64 irq_time_read(int cpu)
1388{
1389 return per_cpu(cpu_softirq_time, cpu) + per_cpu(cpu_hardirq_time, cpu);
1390}
1391#endif /* CONFIG_64BIT */
1392#endif /* CONFIG_IRQ_TIME_ACCOUNTING */