2 * Copyright (c) 2013 Samsung Electronics Co., Ltd.
3 * http://www.samsung.com
5 * Debug-SnapShot: Debug Framework for Ramdump based debugging method
6 * The original code is Exynos-Snapshot for Exynos SoC
8 * Author: Hosung Kim <hosung0.kim@samsung.com>
9 * Author: Changki Kim <changki.kim@samsung.com>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
16 #include <linux/kernel.h>
18 #include <linux/notifier.h>
19 #include <linux/delay.h>
20 #include <linux/kallsyms.h>
21 #include <linux/input.h>
22 #include <linux/smc.h>
23 #include <linux/bitops.h>
24 #include <linux/sched/clock.h>
25 #include <linux/sched/debug.h>
26 #include <linux/nmi.h>
27 #include <linux/init_task.h>
28 #include <linux/ftrace.h>
30 #include <asm/cputype.h>
31 #include <asm/smp_plat.h>
32 #include <asm/core_regs.h>
33 #include <asm/cacheflush.h>
35 #include "debug-snapshot-local.h"
37 DEFINE_PER_CPU(struct pt_regs
*, dss_core_reg
);
38 DEFINE_PER_CPU(struct dbg_snapshot_mmu_reg
*, dss_mmu_reg
);
40 struct dbg_snapshot_allcorelockup_param
{
41 unsigned long last_pc_addr
;
42 unsigned long spin_pc_addr
;
43 } dss_allcorelockup_param
;
45 void dbg_snapshot_hook_hardlockup_entry(void *v_regs
)
47 int cpu
= raw_smp_processor_id();
50 if (!dss_base
.enabled
)
53 if (!dss_desc
.hardlockup_core_mask
) {
54 if (dss_desc
.multistage_wdt_irq
&&
55 !dss_desc
.allcorelockup_detected
) {
57 val
= readl(dbg_snapshot_get_base_vaddr() +
58 DSS_OFFSET_CORE_LAST_PC
+ (DSS_NR_CPUS
* sizeof(unsigned long)));
59 if (val
== DSS_SIGN_LOCKUP
|| val
== (DSS_SIGN_LOCKUP
+ 1)) {
60 dss_desc
.allcorelockup_detected
= true;
61 dss_desc
.hardlockup_core_mask
= GENMASK(DSS_NR_CPUS
- 1, 0);
68 /* re-check the cpu number which is lockup */
69 if (dss_desc
.hardlockup_core_mask
& BIT(cpu
)) {
71 unsigned long last_pc
;
73 unsigned long timeout
= USEC_PER_SEC
* 2;
77 * If one cpu is occurred to lockup,
78 * others are going to output its own information
79 * without side-effect.
81 ret
= do_raw_spin_trylock(&dss_desc
.nmi_lock
);
84 } while (!ret
&& timeout
--);
86 last_pc
= dbg_snapshot_get_last_pc(cpu
);
88 regs
= (struct pt_regs
*)v_regs
;
90 /* Replace real pc value even if it is invalid */
93 /* Then, we expect bug() function works well */
94 pr_emerg("\n--------------------------------------------------------------------------\n"
95 " Debugging Information for Hardlockup core - CPU(%d), Mask:(0x%x)"
96 "\n--------------------------------------------------------------------------\n\n",
97 cpu
, dss_desc
.hardlockup_core_mask
);
101 void dbg_snapshot_hook_hardlockup_exit(void)
103 int cpu
= raw_smp_processor_id();
105 if (!dss_base
.enabled
||
106 !dss_desc
.hardlockup_core_mask
) {
110 /* re-check the cpu number which is lockup */
111 if (dss_desc
.hardlockup_core_mask
& BIT(cpu
)) {
112 /* clear bit to complete replace */
113 dss_desc
.hardlockup_core_mask
&= ~(BIT(cpu
));
115 * If this unlock function does not make a side-effect
118 do_raw_spin_unlock(&dss_desc
.nmi_lock
);
122 void dbg_snapshot_recall_hardlockup_core(void)
125 #ifdef SMC_CMD_KERNEL_PANIC_NOTICE
128 unsigned long cpu_mask
= 0, tmp_bit
= 0;
129 unsigned long last_pc_addr
= 0, timeout
;
131 if (dss_desc
.allcorelockup_detected
) {
132 pr_emerg("debug-snapshot: skip recall hardlockup for dump of each core\n");
136 for (i
= 0; i
< DSS_NR_CPUS
; i
++) {
137 if (i
== raw_smp_processor_id())
139 tmp_bit
= cpu_online_mask
->bits
[DSS_NR_CPUS
/SZ_64
] & (1 << i
);
147 last_pc_addr
= dbg_snapshot_get_last_pc_paddr();
149 pr_emerg("debug-snapshot: core hardlockup mask information: 0x%lx\n", cpu_mask
);
150 dss_desc
.hardlockup_core_mask
= cpu_mask
;
152 #ifdef SMC_CMD_KERNEL_PANIC_NOTICE
153 /* Setup for generating NMI interrupt to unstopped CPUs */
154 ret
= dss_soc_ops
->soc_smc_call(SMC_CMD_KERNEL_PANIC_NOTICE
,
156 (unsigned long)dbg_snapshot_bug_func
,
159 pr_emerg("debug-snapshot: failed to generate NMI, "
160 "not support to dump information of core\n");
161 dss_desc
.hardlockup_core_mask
= 0;
165 /* Wait up to 3 seconds for NMI interrupt */
166 timeout
= USEC_PER_SEC
* 3;
167 while (dss_desc
.hardlockup_core_mask
!= 0 && timeout
--)
173 void dbg_snapshot_save_system(void *unused
)
175 struct dbg_snapshot_mmu_reg
*mmu_reg
;
177 if (!dbg_snapshot_get_enable("header"))
180 mmu_reg
= per_cpu(dss_mmu_reg
, raw_smp_processor_id());
182 dss_soc_ops
->soc_save_system((void *)mmu_reg
);
185 int dbg_snapshot_dump(void)
187 dss_soc_ops
->soc_dump_info(NULL
);
190 EXPORT_SYMBOL(dbg_snapshot_dump
);
192 int dbg_snapshot_save_core(void *v_regs
)
194 struct pt_regs
*regs
= (struct pt_regs
*)v_regs
;
195 struct pt_regs
*core_reg
=
196 per_cpu(dss_core_reg
, smp_processor_id());
198 if(!dbg_snapshot_get_enable("header"))
202 dss_soc_ops
->soc_save_core((void *)core_reg
);
204 memcpy(core_reg
, regs
, sizeof(struct user_pt_regs
));
206 pr_emerg("debug-snapshot: core register saved(CPU:%d)\n",
210 EXPORT_SYMBOL(dbg_snapshot_save_core
);
212 int dbg_snapshot_save_context(void *v_regs
)
216 struct pt_regs
*regs
= (struct pt_regs
*)v_regs
;
218 if (unlikely(!dss_base
.enabled
))
221 dss_soc_ops
->soc_save_context_entry(NULL
);
223 cpu
= smp_processor_id();
224 raw_spin_lock_irqsave(&dss_desc
.ctrl_lock
, flags
);
226 /* If it was already saved the context information, it should be skipped */
227 if (dbg_snapshot_get_core_panic_stat(cpu
) != DSS_SIGN_PANIC
) {
228 dbg_snapshot_save_system(NULL
);
229 dbg_snapshot_save_core(regs
);
231 dbg_snapshot_set_core_panic_stat(DSS_SIGN_PANIC
, cpu
);
232 pr_emerg("debug-snapshot: context saved(CPU:%d)\n", cpu
);
234 pr_emerg("debug-snapshot: skip context saved(CPU:%d)\n", cpu
);
236 raw_spin_unlock_irqrestore(&dss_desc
.ctrl_lock
, flags
);
238 dss_soc_ops
->soc_save_context_exit(NULL
);
241 EXPORT_SYMBOL(dbg_snapshot_save_context
);
243 static void dbg_snapshot_dump_one_task_info(struct task_struct
*tsk
, bool is_main
)
245 char state_array
[] = {'R', 'S', 'D', 'T', 't', 'X', 'Z', 'P', 'x', 'K', 'W', 'I', 'N'};
246 unsigned char idx
= 0;
249 unsigned long pc
= 0;
250 char symname
[KSYM_NAME_LEN
];
252 if ((tsk
== NULL
) || !try_get_task_stack(tsk
))
254 state
= tsk
->state
| tsk
->exit_state
;
257 wchan
= get_wchan(tsk
);
258 if (lookup_symbol_name(wchan
, symname
) < 0)
259 snprintf(symname
, KSYM_NAME_LEN
, "%lu", wchan
);
267 * kick watchdog to prevent unexpected reset during panic sequence
268 * and it prevents the hang during panic sequence by watchedog
270 touch_softlockup_watchdog();
271 dss_soc_ops
->soc_kick_watchdog(NULL
);
273 pr_info("%8d %8d %8d %16lld %c(%d) %3d %16zx %16zx %16zx %c %16s [%s]\n",
274 tsk
->pid
, (int)(tsk
->utime
), (int)(tsk
->stime
),
275 tsk
->se
.exec_start
, state_array
[idx
], (int)(tsk
->state
),
276 task_cpu(tsk
), wchan
, pc
, (unsigned long)tsk
,
277 is_main
? '*' : ' ', tsk
->comm
, symname
);
279 if (tsk
->state
== TASK_RUNNING
280 || tsk
->state
== TASK_UNINTERRUPTIBLE
281 || tsk
->mm
== NULL
) {
282 show_stack(tsk
, NULL
);
287 static inline struct task_struct
*get_next_thread(struct task_struct
*tsk
)
289 return container_of(tsk
->thread_group
.next
,
294 void dbg_snapshot_dump_task_info(void)
296 struct task_struct
*frst_tsk
;
297 struct task_struct
*curr_tsk
;
298 struct task_struct
*frst_thr
;
299 struct task_struct
*curr_thr
;
302 pr_info(" current proc : %d %s\n", current
->pid
, current
->comm
);
303 pr_info(" ----------------------------------------------------------------------------------------------------------------------------\n");
304 pr_info(" pid uTime sTime exec(ns) stat cpu wchan user_pc task_struct comm sym_wchan\n");
305 pr_info(" ----------------------------------------------------------------------------------------------------------------------------\n");
308 frst_tsk
= &init_task
;
310 while (curr_tsk
!= NULL
) {
311 dbg_snapshot_dump_one_task_info(curr_tsk
, true);
313 if (curr_tsk
->thread_group
.next
!= NULL
) {
314 frst_thr
= get_next_thread(curr_tsk
);
316 if (frst_thr
!= curr_tsk
) {
317 while (curr_thr
!= NULL
) {
318 dbg_snapshot_dump_one_task_info(curr_thr
, false);
319 curr_thr
= get_next_thread(curr_thr
);
320 if (curr_thr
== curr_tsk
)
325 curr_tsk
= container_of(curr_tsk
->tasks
.next
,
326 struct task_struct
, tasks
);
327 if (curr_tsk
== frst_tsk
)
330 pr_info(" ----------------------------------------------------------------------------------------------------------------------------\n");
333 #ifdef CONFIG_DEBUG_SNAPSHOT_CRASH_KEY
334 void dbg_snapshot_check_crash_key(unsigned int code
, int value
)
337 static bool voldown_p
;
338 static int loopcount
;
340 static const unsigned int VOLUME_UP
= KEY_VOLUMEUP
;
341 static const unsigned int VOLUME_DOWN
= KEY_VOLUMEDOWN
;
343 if (code
== KEY_POWER
)
344 pr_info("debug-snapshot: POWER-KEY %s\n", value
? "pressed" : "released");
346 /* Enter Forced Upload
347 * Hold volume down key first
348 * and then press power key twice
349 * and volume up key should not be pressed
352 if (code
== VOLUME_UP
)
354 if (code
== VOLUME_DOWN
)
356 if (!volup_p
&& voldown_p
) {
357 if (code
== KEY_POWER
) {
359 ("debug-snapshot: count for entering forced upload [%d]\n",
361 if (loopcount
== 2) {
367 if (code
== VOLUME_UP
)
369 if (code
== VOLUME_DOWN
) {
377 void __init
dbg_snapshot_allcorelockup_detector_init(void)
381 if (!dss_desc
.multistage_wdt_irq
)
384 dss_allcorelockup_param
.last_pc_addr
= dbg_snapshot_get_last_pc_paddr();
385 dss_allcorelockup_param
.spin_pc_addr
= __pa_symbol(dbg_snapshot_spin_func
);
387 __flush_dcache_area((void *)&dss_allcorelockup_param
,
388 sizeof(struct dbg_snapshot_allcorelockup_param
));
390 #ifdef SMC_CMD_LOCKUP_NOTICE
391 /* Setup for generating NMI interrupt to unstopped CPUs */
392 ret
= dss_soc_ops
->soc_smc_call(SMC_CMD_LOCKUP_NOTICE
,
393 (unsigned long)dbg_snapshot_bug_func
,
394 dss_desc
.multistage_wdt_irq
,
395 (unsigned long)(virt_to_phys
)(&dss_allcorelockup_param
));
398 pr_emerg("debug-snapshot: %s to register all-core lockup detector - ret: %d\n",
399 ret
== 0 ? "success" : "failed", ret
);
402 void __init
dbg_snapshot_init_utils(void)
407 vaddr
= dss_items
[dss_desc
.header_num
].entry
.vaddr
;
409 for (i
= 0; i
< DSS_NR_CPUS
; i
++) {
410 per_cpu(dss_mmu_reg
, i
) = (struct dbg_snapshot_mmu_reg
*)
411 (vaddr
+ DSS_HEADER_SZ
+
412 i
* DSS_MMU_REG_OFFSET
);
413 per_cpu(dss_core_reg
, i
) = (struct pt_regs
*)
414 (vaddr
+ DSS_HEADER_SZ
+ DSS_MMU_REG_SZ
+
415 i
* DSS_CORE_REG_OFFSET
);
418 /* hardlockup_detector function should be called before secondary booting */
419 dbg_snapshot_allcorelockup_detector_init();
422 static int __init
dbg_snapshot_utils_save_systems_all(void)
424 smp_call_function(dbg_snapshot_save_system
, NULL
, 1);
425 dbg_snapshot_save_system(NULL
);
429 postcore_initcall(dbg_snapshot_utils_save_systems_all
);