[9610] usbpd change abnormal detection threshold/time
[GitHub/MotorolaMobilityLLC/kernel-slsi.git] / lib / debug-snapshot-utils.c
CommitLineData
dd101ca5
DC
1/*
2 * Copyright (c) 2013 Samsung Electronics Co., Ltd.
3 * http://www.samsung.com
4 *
5 * Debug-SnapShot: Debug Framework for Ramdump based debugging method
6 * The original code is Exynos-Snapshot for Exynos SoC
7 *
8 * Author: Hosung Kim <hosung0.kim@samsung.com>
9 * Author: Changki Kim <changki.kim@samsung.com>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 */
15
16#include <linux/kernel.h>
17#include <linux/io.h>
18#include <linux/notifier.h>
dd101ca5
DC
19#include <linux/delay.h>
20#include <linux/kallsyms.h>
21#include <linux/input.h>
22#include <linux/smc.h>
23#include <linux/bitops.h>
24#include <linux/sched/clock.h>
25#include <linux/sched/debug.h>
26#include <linux/nmi.h>
27#include <linux/init_task.h>
28#include <linux/ftrace.h>
29
30#include <asm/cputype.h>
31#include <asm/smp_plat.h>
32#include <asm/core_regs.h>
33#include <asm/cacheflush.h>
34
35#include "debug-snapshot-local.h"
36
37DEFINE_PER_CPU(struct pt_regs *, dss_core_reg);
38DEFINE_PER_CPU(struct dbg_snapshot_mmu_reg *, dss_mmu_reg);
39
40struct dbg_snapshot_allcorelockup_param {
41 unsigned long last_pc_addr;
42 unsigned long spin_pc_addr;
43} dss_allcorelockup_param;
44
45void dbg_snapshot_hook_hardlockup_entry(void *v_regs)
46{
47 int cpu = raw_smp_processor_id();
48 unsigned int val;
49
50 if (!dss_base.enabled)
51 return;
52
53 if (!dss_desc.hardlockup_core_mask) {
54 if (dss_desc.multistage_wdt_irq &&
55 !dss_desc.allcorelockup_detected) {
56 /* 1st FIQ trigger */
57 val = readl(dbg_snapshot_get_base_vaddr() +
58 DSS_OFFSET_CORE_LAST_PC + (DSS_NR_CPUS * sizeof(unsigned long)));
59 if (val == DSS_SIGN_LOCKUP || val == (DSS_SIGN_LOCKUP + 1)) {
60 dss_desc.allcorelockup_detected = true;
61 dss_desc.hardlockup_core_mask = GENMASK(DSS_NR_CPUS - 1, 0);
62 } else {
63 return;
64 }
65 }
66 }
67
68 /* re-check the cpu number which is lockup */
69 if (dss_desc.hardlockup_core_mask & BIT(cpu)) {
70 int ret;
71 unsigned long last_pc;
72 struct pt_regs *regs;
27d0e459 73 unsigned long timeout = USEC_PER_SEC * 2;
dd101ca5
DC
74
75 do {
76 /*
77 * If one cpu is occurred to lockup,
78 * others are going to output its own information
79 * without side-effect.
80 */
81 ret = do_raw_spin_trylock(&dss_desc.nmi_lock);
82 if (!ret)
83 udelay(1);
84 } while (!ret && timeout--);
85
86 last_pc = dbg_snapshot_get_last_pc(cpu);
87
88 regs = (struct pt_regs *)v_regs;
89
90 /* Replace real pc value even if it is invalid */
91 regs->pc = last_pc;
92
93 /* Then, we expect bug() function works well */
94 pr_emerg("\n--------------------------------------------------------------------------\n"
27d0e459
HK
95 " Debugging Information for Hardlockup core - CPU(%d), Mask:(0x%x)"
96 "\n--------------------------------------------------------------------------\n\n",
97 cpu, dss_desc.hardlockup_core_mask);
dd101ca5
DC
98 }
99}
100
101void dbg_snapshot_hook_hardlockup_exit(void)
102{
103 int cpu = raw_smp_processor_id();
104
105 if (!dss_base.enabled ||
27d0e459 106 !dss_desc.hardlockup_core_mask) {
dd101ca5
DC
107 return;
108 }
109
110 /* re-check the cpu number which is lockup */
111 if (dss_desc.hardlockup_core_mask & BIT(cpu)) {
112 /* clear bit to complete replace */
113 dss_desc.hardlockup_core_mask &= ~(BIT(cpu));
114 /*
115 * If this unlock function does not make a side-effect
116 * even it's not lock
117 */
118 do_raw_spin_unlock(&dss_desc.nmi_lock);
119 }
120}
121
122void dbg_snapshot_recall_hardlockup_core(void)
123{
124 int i;
125#ifdef SMC_CMD_KERNEL_PANIC_NOTICE
126 int ret;
127#endif
128 unsigned long cpu_mask = 0, tmp_bit = 0;
129 unsigned long last_pc_addr = 0, timeout;
130
131 if (dss_desc.allcorelockup_detected) {
132 pr_emerg("debug-snapshot: skip recall hardlockup for dump of each core\n");
133 goto out;
134 }
135
136 for (i = 0; i < DSS_NR_CPUS; i++) {
137 if (i == raw_smp_processor_id())
138 continue;
139 tmp_bit = cpu_online_mask->bits[DSS_NR_CPUS/SZ_64] & (1 << i);
140 if (tmp_bit)
141 cpu_mask |= tmp_bit;
142 }
143
144 if (!cpu_mask)
145 goto out;
146
147 last_pc_addr = dbg_snapshot_get_last_pc_paddr();
148
149 pr_emerg("debug-snapshot: core hardlockup mask information: 0x%lx\n", cpu_mask);
150 dss_desc.hardlockup_core_mask = cpu_mask;
151
152#ifdef SMC_CMD_KERNEL_PANIC_NOTICE
153 /* Setup for generating NMI interrupt to unstopped CPUs */
154 ret = dss_soc_ops->soc_smc_call(SMC_CMD_KERNEL_PANIC_NOTICE,
155 cpu_mask,
156 (unsigned long)dbg_snapshot_bug_func,
157 last_pc_addr);
158 if (ret) {
159 pr_emerg("debug-snapshot: failed to generate NMI, "
160 "not support to dump information of core\n");
161 dss_desc.hardlockup_core_mask = 0;
162 goto out;
163 }
164#endif
165 /* Wait up to 3 seconds for NMI interrupt */
166 timeout = USEC_PER_SEC * 3;
167 while (dss_desc.hardlockup_core_mask != 0 && timeout--)
168 udelay(1);
169out:
170 return;
171}
172
173void dbg_snapshot_save_system(void *unused)
174{
175 struct dbg_snapshot_mmu_reg *mmu_reg;
176
177 if (!dbg_snapshot_get_enable("header"))
178 return;
179
180 mmu_reg = per_cpu(dss_mmu_reg, raw_smp_processor_id());
6b666fd8
HK
181
182 dss_soc_ops->soc_save_system((void *)mmu_reg);
dd101ca5
DC
183}
184
185int dbg_snapshot_dump(void)
186{
6b666fd8 187 dss_soc_ops->soc_dump_info(NULL);
dd101ca5
DC
188 return 0;
189}
190EXPORT_SYMBOL(dbg_snapshot_dump);
191
192int dbg_snapshot_save_core(void *v_regs)
193{
194 struct pt_regs *regs = (struct pt_regs *)v_regs;
195 struct pt_regs *core_reg =
196 per_cpu(dss_core_reg, smp_processor_id());
197
198 if(!dbg_snapshot_get_enable("header"))
199 return 0;
200
6b666fd8
HK
201 if (!regs)
202 dss_soc_ops->soc_save_core((void *)core_reg);
203 else
dd101ca5 204 memcpy(core_reg, regs, sizeof(struct user_pt_regs));
dd101ca5
DC
205
206 pr_emerg("debug-snapshot: core register saved(CPU:%d)\n",
207 smp_processor_id());
208 return 0;
209}
210EXPORT_SYMBOL(dbg_snapshot_save_core);
211
212int dbg_snapshot_save_context(void *v_regs)
213{
214 int cpu;
215 unsigned long flags;
216 struct pt_regs *regs = (struct pt_regs *)v_regs;
217
218 if (unlikely(!dss_base.enabled))
219 return 0;
220
221 dss_soc_ops->soc_save_context_entry(NULL);
222
223 cpu = smp_processor_id();
224 raw_spin_lock_irqsave(&dss_desc.ctrl_lock, flags);
225
226 /* If it was already saved the context information, it should be skipped */
227 if (dbg_snapshot_get_core_panic_stat(cpu) != DSS_SIGN_PANIC) {
228 dbg_snapshot_save_system(NULL);
229 dbg_snapshot_save_core(regs);
230 dbg_snapshot_dump();
231 dbg_snapshot_set_core_panic_stat(DSS_SIGN_PANIC, cpu);
232 pr_emerg("debug-snapshot: context saved(CPU:%d)\n", cpu);
233 } else
234 pr_emerg("debug-snapshot: skip context saved(CPU:%d)\n", cpu);
235
dd101ca5
DC
236 raw_spin_unlock_irqrestore(&dss_desc.ctrl_lock, flags);
237
238 dss_soc_ops->soc_save_context_exit(NULL);
239 return 0;
240}
241EXPORT_SYMBOL(dbg_snapshot_save_context);
242
243static void dbg_snapshot_dump_one_task_info(struct task_struct *tsk, bool is_main)
244{
24701dd6 245 char state_array[] = {'R', 'S', 'D', 'T', 't', 'X', 'Z', 'P', 'x', 'K', 'W', 'I', 'N'};
dd101ca5 246 unsigned char idx = 0;
dd9b8a1b 247 unsigned long state;
dd101ca5
DC
248 unsigned long wchan;
249 unsigned long pc = 0;
250 char symname[KSYM_NAME_LEN];
251
24701dd6 252 if ((tsk == NULL) || !try_get_task_stack(tsk))
dd101ca5 253 return;
24701dd6 254 state = tsk->state | tsk->exit_state;
dd101ca5
DC
255
256 pc = KSTK_EIP(tsk);
257 wchan = get_wchan(tsk);
258 if (lookup_symbol_name(wchan, symname) < 0)
259 snprintf(symname, KSYM_NAME_LEN, "%lu", wchan);
260
261 while (state) {
262 idx++;
263 state >>= 1;
264 }
265
266 /*
267 * kick watchdog to prevent unexpected reset during panic sequence
268 * and it prevents the hang during panic sequence by watchedog
269 */
270 touch_softlockup_watchdog();
271 dss_soc_ops->soc_kick_watchdog(NULL);
272
273 pr_info("%8d %8d %8d %16lld %c(%d) %3d %16zx %16zx %16zx %c %16s [%s]\n",
274 tsk->pid, (int)(tsk->utime), (int)(tsk->stime),
275 tsk->se.exec_start, state_array[idx], (int)(tsk->state),
276 task_cpu(tsk), wchan, pc, (unsigned long)tsk,
277 is_main ? '*' : ' ', tsk->comm, symname);
278
279 if (tsk->state == TASK_RUNNING
280 || tsk->state == TASK_UNINTERRUPTIBLE
281 || tsk->mm == NULL) {
dd101ca5
DC
282 show_stack(tsk, NULL);
283 pr_info("\n");
284 }
285}
286
287static inline struct task_struct *get_next_thread(struct task_struct *tsk)
288{
289 return container_of(tsk->thread_group.next,
290 struct task_struct,
291 thread_group);
292}
293
294void dbg_snapshot_dump_task_info(void)
295{
296 struct task_struct *frst_tsk;
297 struct task_struct *curr_tsk;
298 struct task_struct *frst_thr;
299 struct task_struct *curr_thr;
300
301 pr_info("\n");
302 pr_info(" current proc : %d %s\n", current->pid, current->comm);
303 pr_info(" ----------------------------------------------------------------------------------------------------------------------------\n");
304 pr_info(" pid uTime sTime exec(ns) stat cpu wchan user_pc task_struct comm sym_wchan\n");
305 pr_info(" ----------------------------------------------------------------------------------------------------------------------------\n");
306
307 /* processes */
308 frst_tsk = &init_task;
309 curr_tsk = frst_tsk;
310 while (curr_tsk != NULL) {
311 dbg_snapshot_dump_one_task_info(curr_tsk, true);
312 /* threads */
313 if (curr_tsk->thread_group.next != NULL) {
314 frst_thr = get_next_thread(curr_tsk);
315 curr_thr = frst_thr;
316 if (frst_thr != curr_tsk) {
317 while (curr_thr != NULL) {
318 dbg_snapshot_dump_one_task_info(curr_thr, false);
319 curr_thr = get_next_thread(curr_thr);
320 if (curr_thr == curr_tsk)
321 break;
322 }
323 }
324 }
325 curr_tsk = container_of(curr_tsk->tasks.next,
326 struct task_struct, tasks);
327 if (curr_tsk == frst_tsk)
328 break;
329 }
330 pr_info(" ----------------------------------------------------------------------------------------------------------------------------\n");
331}
332
333#ifdef CONFIG_DEBUG_SNAPSHOT_CRASH_KEY
334void dbg_snapshot_check_crash_key(unsigned int code, int value)
335{
336 static bool volup_p;
337 static bool voldown_p;
338 static int loopcount;
339
340 static const unsigned int VOLUME_UP = KEY_VOLUMEUP;
341 static const unsigned int VOLUME_DOWN = KEY_VOLUMEDOWN;
342
343 if (code == KEY_POWER)
344 pr_info("debug-snapshot: POWER-KEY %s\n", value ? "pressed" : "released");
345
346 /* Enter Forced Upload
347 * Hold volume down key first
348 * and then press power key twice
349 * and volume up key should not be pressed
350 */
351 if (value) {
352 if (code == VOLUME_UP)
353 volup_p = true;
354 if (code == VOLUME_DOWN)
355 voldown_p = true;
356 if (!volup_p && voldown_p) {
357 if (code == KEY_POWER) {
358 pr_info
359 ("debug-snapshot: count for entering forced upload [%d]\n",
360 ++loopcount);
361 if (loopcount == 2) {
362 panic("Crash Key");
363 }
364 }
365 }
366 } else {
367 if (code == VOLUME_UP)
368 volup_p = false;
369 if (code == VOLUME_DOWN) {
370 loopcount = 0;
371 voldown_p = false;
372 }
373 }
374}
375#endif
376
377void __init dbg_snapshot_allcorelockup_detector_init(void)
378{
dd9b8a1b 379 int ret = -1;
dd101ca5
DC
380
381 if (!dss_desc.multistage_wdt_irq)
382 return;
383
384 dss_allcorelockup_param.last_pc_addr = dbg_snapshot_get_last_pc_paddr();
385 dss_allcorelockup_param.spin_pc_addr = __pa_symbol(dbg_snapshot_spin_func);
386
387 __flush_dcache_area((void *)&dss_allcorelockup_param,
388 sizeof(struct dbg_snapshot_allcorelockup_param));
389
390#ifdef SMC_CMD_LOCKUP_NOTICE
391 /* Setup for generating NMI interrupt to unstopped CPUs */
392 ret = dss_soc_ops->soc_smc_call(SMC_CMD_LOCKUP_NOTICE,
393 (unsigned long)dbg_snapshot_bug_func,
394 dss_desc.multistage_wdt_irq,
395 (unsigned long)(virt_to_phys)(&dss_allcorelockup_param));
396#endif
397
398 pr_emerg("debug-snapshot: %s to register all-core lockup detector - ret: %d\n",
399 ret == 0 ? "success" : "failed", ret);
400}
401
1abf4739 402void __init dbg_snapshot_init_utils(void)
dd101ca5
DC
403{
404 size_t vaddr;
405 int i;
406
407 vaddr = dss_items[dss_desc.header_num].entry.vaddr;
408
409 for (i = 0; i < DSS_NR_CPUS; i++) {
410 per_cpu(dss_mmu_reg, i) = (struct dbg_snapshot_mmu_reg *)
411 (vaddr + DSS_HEADER_SZ +
412 i * DSS_MMU_REG_OFFSET);
413 per_cpu(dss_core_reg, i) = (struct pt_regs *)
414 (vaddr + DSS_HEADER_SZ + DSS_MMU_REG_SZ +
415 i * DSS_CORE_REG_OFFSET);
416 }
417
418 /* hardlockup_detector function should be called before secondary booting */
419 dbg_snapshot_allcorelockup_detector_init();
420}
421
422static int __init dbg_snapshot_utils_save_systems_all(void)
423{
424 smp_call_function(dbg_snapshot_save_system, NULL, 1);
425 dbg_snapshot_save_system(NULL);
426
427 return 0;
428}
429postcore_initcall(dbg_snapshot_utils_save_systems_all);