proc: move /proc/modules boilerplate to kernel/module.c
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / sched_stats.h
CommitLineData
425e0968
IM
1
2#ifdef CONFIG_SCHEDSTATS
3/*
4 * bump this up when changing the output format or the meaning of an existing
5 * format, so that tools can adapt (or abort)
6 */
7#define SCHEDSTAT_VERSION 14
8
9static int show_schedstat(struct seq_file *seq, void *v)
10{
11 int cpu;
39106dcf
MT
12 int mask_len = NR_CPUS/32 * 9;
13 char *mask_str = kmalloc(mask_len, GFP_KERNEL);
14
15 if (mask_str == NULL)
16 return -ENOMEM;
425e0968
IM
17
18 seq_printf(seq, "version %d\n", SCHEDSTAT_VERSION);
19 seq_printf(seq, "timestamp %lu\n", jiffies);
20 for_each_online_cpu(cpu) {
21 struct rq *rq = cpu_rq(cpu);
22#ifdef CONFIG_SMP
23 struct sched_domain *sd;
2d72376b 24 int dcount = 0;
425e0968
IM
25#endif
26
27 /* runqueue-specific stats */
28 seq_printf(seq,
480b9434 29 "cpu%d %u %u %u %u %u %u %u %u %u %llu %llu %lu",
425e0968 30 cpu, rq->yld_both_empty,
2d72376b
IM
31 rq->yld_act_empty, rq->yld_exp_empty, rq->yld_count,
32 rq->sched_switch, rq->sched_count, rq->sched_goidle,
33 rq->ttwu_count, rq->ttwu_local,
425e0968 34 rq->rq_sched_info.cpu_time,
2d72376b 35 rq->rq_sched_info.run_delay, rq->rq_sched_info.pcount);
425e0968
IM
36
37 seq_printf(seq, "\n");
38
39#ifdef CONFIG_SMP
40 /* domain-specific stats */
41 preempt_disable();
42 for_each_domain(cpu, sd) {
43 enum cpu_idle_type itype;
425e0968 44
39106dcf 45 cpumask_scnprintf(mask_str, mask_len, sd->span);
2d72376b 46 seq_printf(seq, "domain%d %s", dcount++, mask_str);
425e0968
IM
47 for (itype = CPU_IDLE; itype < CPU_MAX_IDLE_TYPES;
48 itype++) {
480b9434 49 seq_printf(seq, " %u %u %u %u %u %u %u %u",
2d72376b 50 sd->lb_count[itype],
425e0968
IM
51 sd->lb_balanced[itype],
52 sd->lb_failed[itype],
53 sd->lb_imbalance[itype],
54 sd->lb_gained[itype],
55 sd->lb_hot_gained[itype],
56 sd->lb_nobusyq[itype],
57 sd->lb_nobusyg[itype]);
58 }
f95e0d1c
IM
59 seq_printf(seq,
60 " %u %u %u %u %u %u %u %u %u %u %u %u\n",
2d72376b
IM
61 sd->alb_count, sd->alb_failed, sd->alb_pushed,
62 sd->sbe_count, sd->sbe_balanced, sd->sbe_pushed,
63 sd->sbf_count, sd->sbf_balanced, sd->sbf_pushed,
425e0968
IM
64 sd->ttwu_wake_remote, sd->ttwu_move_affine,
65 sd->ttwu_move_balance);
66 }
67 preempt_enable();
68#endif
69 }
c6fba545 70 kfree(mask_str);
425e0968
IM
71 return 0;
72}
73
74static int schedstat_open(struct inode *inode, struct file *file)
75{
76 unsigned int size = PAGE_SIZE * (1 + num_online_cpus() / 32);
77 char *buf = kmalloc(size, GFP_KERNEL);
78 struct seq_file *m;
79 int res;
80
81 if (!buf)
82 return -ENOMEM;
83 res = single_open(file, show_schedstat, NULL);
84 if (!res) {
85 m = file->private_data;
86 m->buf = buf;
87 m->size = size;
88 } else
89 kfree(buf);
90 return res;
91}
92
93const struct file_operations proc_schedstat_operations = {
94 .open = schedstat_open,
95 .read = seq_read,
96 .llseek = seq_lseek,
97 .release = single_release,
98};
99
100/*
101 * Expects runqueue lock to be held for atomicity of update
102 */
103static inline void
104rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
105{
106 if (rq) {
107 rq->rq_sched_info.run_delay += delta;
2d72376b 108 rq->rq_sched_info.pcount++;
425e0968
IM
109 }
110}
111
112/*
113 * Expects runqueue lock to be held for atomicity of update
114 */
115static inline void
116rq_sched_info_depart(struct rq *rq, unsigned long long delta)
117{
118 if (rq)
119 rq->rq_sched_info.cpu_time += delta;
120}
46ac22ba
AG
121
122static inline void
123rq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
124{
125 if (rq)
126 rq->rq_sched_info.run_delay += delta;
127}
425e0968
IM
128# define schedstat_inc(rq, field) do { (rq)->field++; } while (0)
129# define schedstat_add(rq, field, amt) do { (rq)->field += (amt); } while (0)
c3c70119 130# define schedstat_set(var, val) do { var = (val); } while (0)
425e0968
IM
131#else /* !CONFIG_SCHEDSTATS */
132static inline void
133rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
134{}
135static inline void
46ac22ba
AG
136rq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
137{}
138static inline void
425e0968
IM
139rq_sched_info_depart(struct rq *rq, unsigned long long delta)
140{}
141# define schedstat_inc(rq, field) do { } while (0)
142# define schedstat_add(rq, field, amt) do { } while (0)
c3c70119 143# define schedstat_set(var, val) do { } while (0)
425e0968
IM
144#endif
145
9a41785c 146#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
46ac22ba
AG
147static inline void sched_info_reset_dequeued(struct task_struct *t)
148{
149 t->sched_info.last_queued = 0;
150}
151
425e0968
IM
152/*
153 * Called when a process is dequeued from the active array and given
154 * the cpu. We should note that with the exception of interactive
155 * tasks, the expired queue will become the active queue after the active
156 * queue is empty, without explicitly dequeuing and requeuing tasks in the
157 * expired queue. (Interactive tasks may be requeued directly to the
158 * active queue, thus delaying tasks in the expired queue from running;
159 * see scheduler_tick()).
160 *
46ac22ba
AG
161 * Though we are interested in knowing how long it was from the *first* time a
162 * task was queued to the time that it finally hit a cpu, we call this routine
163 * from dequeue_task() to account for possible rq->clock skew across cpus. The
164 * delta taken on each cpu would annul the skew.
425e0968
IM
165 */
166static inline void sched_info_dequeued(struct task_struct *t)
167{
46ac22ba
AG
168 unsigned long long now = task_rq(t)->clock, delta = 0;
169
170 if (unlikely(sched_info_on()))
171 if (t->sched_info.last_queued)
172 delta = now - t->sched_info.last_queued;
173 sched_info_reset_dequeued(t);
174 t->sched_info.run_delay += delta;
175
176 rq_sched_info_dequeued(task_rq(t), delta);
425e0968
IM
177}
178
179/*
180 * Called when a task finally hits the cpu. We can now calculate how
181 * long it was waiting to run. We also note when it began so that we
182 * can keep stats on how long its timeslice is.
183 */
184static void sched_info_arrive(struct task_struct *t)
185{
9a41785c 186 unsigned long long now = task_rq(t)->clock, delta = 0;
425e0968
IM
187
188 if (t->sched_info.last_queued)
189 delta = now - t->sched_info.last_queued;
46ac22ba 190 sched_info_reset_dequeued(t);
425e0968
IM
191 t->sched_info.run_delay += delta;
192 t->sched_info.last_arrival = now;
2d72376b 193 t->sched_info.pcount++;
425e0968
IM
194
195 rq_sched_info_arrive(task_rq(t), delta);
196}
197
198/*
199 * Called when a process is queued into either the active or expired
200 * array. The time is noted and later used to determine how long we
201 * had to wait for us to reach the cpu. Since the expired queue will
202 * become the active queue after active queue is empty, without dequeuing
203 * and requeuing any tasks, we are interested in queuing to either. It
204 * is unusual but not impossible for tasks to be dequeued and immediately
205 * requeued in the same or another array: this can happen in sched_yield(),
206 * set_user_nice(), and even load_balance() as it moves tasks from runqueue
207 * to runqueue.
208 *
209 * This function is only called from enqueue_task(), but also only updates
210 * the timestamp if it is already not set. It's assumed that
211 * sched_info_dequeued() will clear that stamp when appropriate.
212 */
213static inline void sched_info_queued(struct task_struct *t)
214{
215 if (unlikely(sched_info_on()))
216 if (!t->sched_info.last_queued)
9a41785c 217 t->sched_info.last_queued = task_rq(t)->clock;
425e0968
IM
218}
219
220/*
221 * Called when a process ceases being the active-running process, either
222 * voluntarily or involuntarily. Now we can calculate how long we ran.
d4abc238
BR
223 * Also, if the process is still in the TASK_RUNNING state, call
224 * sched_info_queued() to mark that it has now again started waiting on
225 * the runqueue.
425e0968
IM
226 */
227static inline void sched_info_depart(struct task_struct *t)
228{
9a41785c
BS
229 unsigned long long delta = task_rq(t)->clock -
230 t->sched_info.last_arrival;
425e0968
IM
231
232 t->sched_info.cpu_time += delta;
233 rq_sched_info_depart(task_rq(t), delta);
d4abc238
BR
234
235 if (t->state == TASK_RUNNING)
236 sched_info_queued(t);
425e0968
IM
237}
238
239/*
240 * Called when tasks are switched involuntarily due, typically, to expiring
241 * their time slice. (This may also be called when switching to or from
242 * the idle task.) We are only called when prev != next.
243 */
244static inline void
245__sched_info_switch(struct task_struct *prev, struct task_struct *next)
246{
247 struct rq *rq = task_rq(prev);
248
249 /*
250 * prev now departs the cpu. It's not interesting to record
251 * stats about how efficient we were at scheduling the idle
252 * process, however.
253 */
254 if (prev != rq->idle)
255 sched_info_depart(prev);
256
257 if (next != rq->idle)
258 sched_info_arrive(next);
259}
260static inline void
261sched_info_switch(struct task_struct *prev, struct task_struct *next)
262{
263 if (unlikely(sched_info_on()))
264 __sched_info_switch(prev, next);
265}
266#else
46ac22ba
AG
267#define sched_info_queued(t) do { } while (0)
268#define sched_info_reset_dequeued(t) do { } while (0)
269#define sched_info_dequeued(t) do { } while (0)
270#define sched_info_switch(t, next) do { } while (0)
9a41785c 271#endif /* CONFIG_SCHEDSTATS || CONFIG_TASK_DELAY_ACCT */
425e0968 272
bb34d92f
FM
273/*
274 * The following are functions that support scheduler-internal time accounting.
275 * These functions are generally called at the timer tick. None of this depends
276 * on CONFIG_SCHEDSTATS.
277 */
278
bb34d92f 279/**
7086efe1 280 * account_group_user_time - Maintain utime for a thread group.
bb34d92f 281 *
7086efe1
FM
282 * @tsk: Pointer to task structure.
283 * @cputime: Time value by which to increment the utime field of the
284 * thread_group_cputime structure.
bb34d92f
FM
285 *
286 * If thread group time is being maintained, get the structure for the
287 * running CPU and update the utime field there.
288 */
7086efe1
FM
289static inline void account_group_user_time(struct task_struct *tsk,
290 cputime_t cputime)
bb34d92f 291{
7086efe1
FM
292 struct signal_struct *sig;
293
294 sig = tsk->signal;
295 if (unlikely(!sig))
296 return;
297 if (sig->cputime.totals) {
bb34d92f
FM
298 struct task_cputime *times;
299
7086efe1 300 times = per_cpu_ptr(sig->cputime.totals, get_cpu());
bb34d92f
FM
301 times->utime = cputime_add(times->utime, cputime);
302 put_cpu_no_resched();
303 }
304}
305
306/**
7086efe1 307 * account_group_system_time - Maintain stime for a thread group.
bb34d92f 308 *
7086efe1
FM
309 * @tsk: Pointer to task structure.
310 * @cputime: Time value by which to increment the stime field of the
311 * thread_group_cputime structure.
bb34d92f
FM
312 *
313 * If thread group time is being maintained, get the structure for the
314 * running CPU and update the stime field there.
315 */
7086efe1
FM
316static inline void account_group_system_time(struct task_struct *tsk,
317 cputime_t cputime)
bb34d92f 318{
7086efe1
FM
319 struct signal_struct *sig;
320
321 sig = tsk->signal;
322 if (unlikely(!sig))
323 return;
324 if (sig->cputime.totals) {
bb34d92f
FM
325 struct task_cputime *times;
326
7086efe1 327 times = per_cpu_ptr(sig->cputime.totals, get_cpu());
bb34d92f
FM
328 times->stime = cputime_add(times->stime, cputime);
329 put_cpu_no_resched();
330 }
331}
332
333/**
7086efe1 334 * account_group_exec_runtime - Maintain exec runtime for a thread group.
bb34d92f 335 *
7086efe1 336 * @tsk: Pointer to task structure.
bb34d92f 337 * @ns: Time value by which to increment the sum_exec_runtime field
7086efe1 338 * of the thread_group_cputime structure.
bb34d92f
FM
339 *
340 * If thread group time is being maintained, get the structure for the
341 * running CPU and update the sum_exec_runtime field there.
342 */
7086efe1
FM
343static inline void account_group_exec_runtime(struct task_struct *tsk,
344 unsigned long long ns)
bb34d92f 345{
7086efe1
FM
346 struct signal_struct *sig;
347
348 sig = tsk->signal;
349 if (unlikely(!sig))
350 return;
351 if (sig->cputime.totals) {
bb34d92f
FM
352 struct task_cputime *times;
353
7086efe1 354 times = per_cpu_ptr(sig->cputime.totals, get_cpu());
bb34d92f
FM
355 times->sum_exec_runtime += ns;
356 put_cpu_no_resched();
357 }
358}