[RAMEN9610-14454] [COMMON] sched: ems: Fix possibility of slab-out-of-bounds error
[GitHub/moto-9609/android_kernel_motorola_exynos9610.git] / kernel / sched / stats.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2
3 #ifdef CONFIG_SCHEDSTATS
4
5 /*
6 * Expects runqueue lock to be held for atomicity of update
7 */
8 static inline void
9 rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
10 {
11 if (rq) {
12 rq->rq_sched_info.run_delay += delta;
13 rq->rq_sched_info.pcount++;
14 }
15 }
16
17 /*
18 * Expects runqueue lock to be held for atomicity of update
19 */
20 static inline void
21 rq_sched_info_depart(struct rq *rq, unsigned long long delta)
22 {
23 if (rq)
24 rq->rq_cpu_time += delta;
25 }
26
27 static inline void
28 rq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
29 {
30 if (rq)
31 rq->rq_sched_info.run_delay += delta;
32 }
33 #define schedstat_enabled() static_branch_unlikely(&sched_schedstats)
34 #define schedstat_inc(var) do { if (schedstat_enabled()) { var++; } } while (0)
35 #define schedstat_add(var, amt) do { if (schedstat_enabled()) { var += (amt); } } while (0)
36 #define schedstat_set(var, val) do { if (schedstat_enabled()) { var = (val); } } while (0)
37 #define schedstat_val(var) (var)
38 #define schedstat_val_or_zero(var) ((schedstat_enabled()) ? (var) : 0)
39
40 #else /* !CONFIG_SCHEDSTATS */
41 static inline void
42 rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
43 {}
44 static inline void
45 rq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
46 {}
47 static inline void
48 rq_sched_info_depart(struct rq *rq, unsigned long long delta)
49 {}
50 #define schedstat_enabled() 0
51 #define schedstat_inc(var) do { } while (0)
52 #define schedstat_add(var, amt) do { } while (0)
53 #define schedstat_set(var, val) do { } while (0)
54 #define schedstat_val(var) 0
55 #define schedstat_val_or_zero(var) 0
56 #endif /* CONFIG_SCHEDSTATS */
57
58 #ifdef CONFIG_SCHED_INFO
59 static inline void sched_info_reset_dequeued(struct task_struct *t)
60 {
61 t->sched_info.last_queued = 0;
62 }
63
64 /*
65 * We are interested in knowing how long it was from the *first* time a
66 * task was queued to the time that it finally hit a cpu, we call this routine
67 * from dequeue_task() to account for possible rq->clock skew across cpus. The
68 * delta taken on each cpu would annul the skew.
69 */
70 static inline void sched_info_dequeued(struct rq *rq, struct task_struct *t)
71 {
72 unsigned long long now = rq_clock(rq), delta = 0;
73
74 if (unlikely(sched_info_on()))
75 if (t->sched_info.last_queued)
76 delta = now - t->sched_info.last_queued;
77 sched_info_reset_dequeued(t);
78 t->sched_info.run_delay += delta;
79
80 rq_sched_info_dequeued(rq, delta);
81 }
82
83 /*
84 * Called when a task finally hits the cpu. We can now calculate how
85 * long it was waiting to run. We also note when it began so that we
86 * can keep stats on how long its timeslice is.
87 */
88 static void sched_info_arrive(struct rq *rq, struct task_struct *t)
89 {
90 unsigned long long now = rq_clock(rq), delta = 0;
91
92 if (t->sched_info.last_queued)
93 delta = now - t->sched_info.last_queued;
94 sched_info_reset_dequeued(t);
95 t->sched_info.run_delay += delta;
96 t->sched_info.last_arrival = now;
97 t->sched_info.pcount++;
98
99 rq_sched_info_arrive(rq, delta);
100 }
101
102 /*
103 * This function is only called from enqueue_task(), but also only updates
104 * the timestamp if it is already not set. It's assumed that
105 * sched_info_dequeued() will clear that stamp when appropriate.
106 */
107 static inline void sched_info_queued(struct rq *rq, struct task_struct *t)
108 {
109 if (unlikely(sched_info_on()))
110 if (!t->sched_info.last_queued)
111 t->sched_info.last_queued = rq_clock(rq);
112 }
113
114 /*
115 * Called when a process ceases being the active-running process involuntarily
116 * due, typically, to expiring its time slice (this may also be called when
117 * switching to the idle task). Now we can calculate how long we ran.
118 * Also, if the process is still in the TASK_RUNNING state, call
119 * sched_info_queued() to mark that it has now again started waiting on
120 * the runqueue.
121 */
122 static inline void sched_info_depart(struct rq *rq, struct task_struct *t)
123 {
124 unsigned long long delta = rq_clock(rq) -
125 t->sched_info.last_arrival;
126
127 rq_sched_info_depart(rq, delta);
128
129 if (t->state == TASK_RUNNING)
130 sched_info_queued(rq, t);
131 }
132
133 /*
134 * Called when tasks are switched involuntarily due, typically, to expiring
135 * their time slice. (This may also be called when switching to or from
136 * the idle task.) We are only called when prev != next.
137 */
138 static inline void
139 __sched_info_switch(struct rq *rq,
140 struct task_struct *prev, struct task_struct *next)
141 {
142 /*
143 * prev now departs the cpu. It's not interesting to record
144 * stats about how efficient we were at scheduling the idle
145 * process, however.
146 */
147 if (prev != rq->idle)
148 sched_info_depart(rq, prev);
149
150 if (next != rq->idle)
151 sched_info_arrive(rq, next);
152 }
153 static inline void
154 sched_info_switch(struct rq *rq,
155 struct task_struct *prev, struct task_struct *next)
156 {
157 if (unlikely(sched_info_on()))
158 __sched_info_switch(rq, prev, next);
159 }
160 #else
161 #define sched_info_queued(rq, t) do { } while (0)
162 #define sched_info_reset_dequeued(t) do { } while (0)
163 #define sched_info_dequeued(rq, t) do { } while (0)
164 #define sched_info_depart(rq, t) do { } while (0)
165 #define sched_info_arrive(rq, next) do { } while (0)
166 #define sched_info_switch(rq, t, next) do { } while (0)
167 #endif /* CONFIG_SCHED_INFO */