bpf: restrict map value pointer arithmetic for unprivileged
[GitHub/MotorolaMobilityLLC/kernel-slsi.git] / kernel / hung_task.c
1 /*
2 * Detect Hung Task
3 *
4 * kernel/hung_task.c - kernel thread for detecting tasks stuck in D state
5 *
6 */
7
8 #include <linux/mm.h>
9 #include <linux/cpu.h>
10 #include <linux/nmi.h>
11 #include <linux/init.h>
12 #include <linux/delay.h>
13 #include <linux/freezer.h>
14 #include <linux/kthread.h>
15 #include <linux/lockdep.h>
16 #include <linux/export.h>
17 #include <linux/sysctl.h>
18 #include <linux/suspend.h>
19 #include <linux/utsname.h>
20 #include <linux/sched/signal.h>
21 #include <linux/sched/debug.h>
22
23 #include <trace/events/sched.h>
24
25 /*
26 * The number of tasks checked:
27 */
28 int __read_mostly sysctl_hung_task_check_count = PID_MAX_LIMIT;
29
30 /*
31 * Limit number of tasks checked in a batch.
32 *
33 * This value controls the preemptibility of khungtaskd since preemption
34 * is disabled during the critical section. It also controls the size of
35 * the RCU grace period. So it needs to be upper-bound.
36 */
37 #define HUNG_TASK_LOCK_BREAK (HZ / 10)
38
39 /*
40 * Zero means infinite timeout - no checking done:
41 */
42 unsigned long __read_mostly sysctl_hung_task_timeout_secs = CONFIG_DEFAULT_HUNG_TASK_TIMEOUT;
43
44 int __read_mostly sysctl_hung_task_warnings = 10;
45
46 static int __read_mostly did_panic;
47 static bool hung_task_show_lock;
48 static bool hung_task_call_panic;
49
50 static struct task_struct *watchdog_task;
51
52 /*
53 * Should we panic (and reboot, if panic_timeout= is set) when a
54 * hung task is detected:
55 */
56 unsigned int __read_mostly sysctl_hung_task_panic =
57 CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE;
58
59 static int __init hung_task_panic_setup(char *str)
60 {
61 int rc = kstrtouint(str, 0, &sysctl_hung_task_panic);
62
63 if (rc)
64 return rc;
65 return 1;
66 }
67 __setup("hung_task_panic=", hung_task_panic_setup);
68
69 static int
70 hung_task_panic(struct notifier_block *this, unsigned long event, void *ptr)
71 {
72 did_panic = 1;
73
74 return NOTIFY_DONE;
75 }
76
77 static struct notifier_block panic_block = {
78 .notifier_call = hung_task_panic,
79 };
80
81 static void check_hung_task(struct task_struct *t, unsigned long timeout)
82 {
83 unsigned long switch_count = t->nvcsw + t->nivcsw;
84
85 /*
86 * Ensure the task is not frozen.
87 * Also, skip vfork and any other user process that freezer should skip.
88 */
89 if (unlikely(t->flags & (PF_FROZEN | PF_FREEZER_SKIP)))
90 return;
91
92 /*
93 * When a freshly created task is scheduled once, changes its state to
94 * TASK_UNINTERRUPTIBLE without having ever been switched out once, it
95 * musn't be checked.
96 */
97 if (unlikely(!switch_count))
98 return;
99
100 if (switch_count != t->last_switch_count) {
101 t->last_switch_count = switch_count;
102 return;
103 }
104
105 trace_sched_process_hang(t);
106
107 if (sysctl_hung_task_panic) {
108 console_verbose();
109 hung_task_show_lock = true;
110 hung_task_call_panic = true;
111 }
112
113 /*
114 * Ok, the task did not get scheduled for more than 2 minutes,
115 * complain:
116 */
117 if (sysctl_hung_task_warnings) {
118 if (sysctl_hung_task_warnings > 0)
119 sysctl_hung_task_warnings--;
120 pr_err("INFO: task %s:%d blocked for more than %ld seconds.\n",
121 t->comm, t->pid, timeout);
122 pr_err(" %s %s %.*s\n",
123 print_tainted(), init_utsname()->release,
124 (int)strcspn(init_utsname()->version, " "),
125 init_utsname()->version);
126 pr_err("\"echo 0 > /proc/sys/kernel/hung_task_timeout_secs\""
127 " disables this message.\n");
128 sched_show_task(t);
129 hung_task_show_lock = true;
130 }
131
132 touch_nmi_watchdog();
133 }
134
135 /*
136 * To avoid extending the RCU grace period for an unbounded amount of time,
137 * periodically exit the critical section and enter a new one.
138 *
139 * For preemptible RCU it is sufficient to call rcu_read_unlock in order
140 * to exit the grace period. For classic RCU, a reschedule is required.
141 */
142 static bool rcu_lock_break(struct task_struct *g, struct task_struct *t)
143 {
144 bool can_cont;
145
146 get_task_struct(g);
147 get_task_struct(t);
148 rcu_read_unlock();
149 cond_resched();
150 rcu_read_lock();
151 can_cont = pid_alive(g) && pid_alive(t);
152 put_task_struct(t);
153 put_task_struct(g);
154
155 return can_cont;
156 }
157
158 /*
159 * Check whether a TASK_UNINTERRUPTIBLE does not get woken up for
160 * a really long time (120 seconds). If that happens, print out
161 * a warning.
162 */
163 static void check_hung_uninterruptible_tasks(unsigned long timeout)
164 {
165 int max_count = sysctl_hung_task_check_count;
166 unsigned long last_break = jiffies;
167 struct task_struct *g, *t;
168
169 /*
170 * If the system crashed already then all bets are off,
171 * do not report extra hung tasks:
172 */
173 if (test_taint(TAINT_DIE) || did_panic)
174 return;
175
176 hung_task_show_lock = false;
177 rcu_read_lock();
178 for_each_process_thread(g, t) {
179 if (!max_count--)
180 goto unlock;
181 if (time_after(jiffies, last_break + HUNG_TASK_LOCK_BREAK)) {
182 if (!rcu_lock_break(g, t))
183 goto unlock;
184 last_break = jiffies;
185 }
186 /* use "==" to skip the TASK_KILLABLE tasks waiting on NFS */
187 if (t->state == TASK_UNINTERRUPTIBLE)
188 check_hung_task(t, timeout);
189 }
190 unlock:
191 rcu_read_unlock();
192 if (hung_task_show_lock)
193 debug_show_all_locks();
194 if (hung_task_call_panic) {
195 trigger_all_cpu_backtrace();
196 panic("hung_task: blocked tasks");
197 }
198 }
199
200 static long hung_timeout_jiffies(unsigned long last_checked,
201 unsigned long timeout)
202 {
203 /* timeout of 0 will disable the watchdog */
204 return timeout ? last_checked - jiffies + timeout * HZ :
205 MAX_SCHEDULE_TIMEOUT;
206 }
207
208 /*
209 * Process updating of timeout sysctl
210 */
211 int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
212 void __user *buffer,
213 size_t *lenp, loff_t *ppos)
214 {
215 int ret;
216
217 ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
218
219 if (ret || !write)
220 goto out;
221
222 wake_up_process(watchdog_task);
223
224 out:
225 return ret;
226 }
227
228 static atomic_t reset_hung_task = ATOMIC_INIT(0);
229
230 void reset_hung_task_detector(void)
231 {
232 atomic_set(&reset_hung_task, 1);
233 }
234 EXPORT_SYMBOL_GPL(reset_hung_task_detector);
235
236 static bool hung_detector_suspended;
237
238 static int hungtask_pm_notify(struct notifier_block *self,
239 unsigned long action, void *hcpu)
240 {
241 switch (action) {
242 case PM_SUSPEND_PREPARE:
243 case PM_HIBERNATION_PREPARE:
244 case PM_RESTORE_PREPARE:
245 hung_detector_suspended = true;
246 break;
247 case PM_POST_SUSPEND:
248 case PM_POST_HIBERNATION:
249 case PM_POST_RESTORE:
250 hung_detector_suspended = false;
251 break;
252 default:
253 break;
254 }
255 return NOTIFY_OK;
256 }
257
258 /*
259 * kthread which checks for tasks stuck in D state
260 */
261 static int watchdog(void *dummy)
262 {
263 unsigned long hung_last_checked = jiffies;
264
265 set_user_nice(current, 0);
266
267 for ( ; ; ) {
268 unsigned long timeout = sysctl_hung_task_timeout_secs;
269 long t = hung_timeout_jiffies(hung_last_checked, timeout);
270
271 if (t <= 0) {
272 if (!atomic_xchg(&reset_hung_task, 0) &&
273 !hung_detector_suspended)
274 check_hung_uninterruptible_tasks(timeout);
275 hung_last_checked = jiffies;
276 continue;
277 }
278 schedule_timeout_interruptible(t);
279 }
280
281 return 0;
282 }
283
284 static int __init hung_task_init(void)
285 {
286 atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
287
288 /* Disable hung task detector on suspend */
289 pm_notifier(hungtask_pm_notify, 0);
290
291 watchdog_task = kthread_run(watchdog, NULL, "khungtaskd");
292
293 return 0;
294 }
295 subsys_initcall(hung_task_init);