2 * Copyright (c) 2013 Samsung Electronics Co., Ltd.
3 * http://www.samsung.com
5 * Debug-SnapShot: Debug Framework for Ramdump based debugging method
6 * The original code is Exynos-Snapshot for Exynos SoC
8 * Author: Hosung Kim <hosung0.kim@samsung.com>
9 * Author: Changki Kim <changki.kim@samsung.com>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
16 #include <linux/kernel.h>
17 #include <linux/slab.h>
18 #include <linux/module.h>
19 #include <linux/ktime.h>
20 #include <linux/kallsyms.h>
21 #include <linux/platform_device.h>
22 #include <linux/clk-provider.h>
23 #include <linux/pstore_ram.h>
24 #include <linux/sched/clock.h>
25 #include <linux/ftrace.h>
27 #include "debug-snapshot-local.h"
29 #include <asm/traps.h>
30 #include <asm/hardirq.h>
31 #include <asm/stacktrace.h>
32 #include <linux/debug-snapshot.h>
33 #include <linux/kernel_stat.h>
34 #include <linux/irqnr.h>
35 #include <linux/irq.h>
36 #include <linux/irqdesc.h>
38 struct dbg_snapshot_lastinfo
{
39 #ifdef CONFIG_DEBUG_SNAPSHOT_FREQ
40 atomic_t freq_last_idx
[DSS_FLAG_END
];
42 char log
[DSS_NR_CPUS
][SZ_1K
];
43 char *last_p
[DSS_NR_CPUS
];
55 enum dss_kevent_flag
{
77 struct dbg_snapshot_log_idx
{
78 atomic_t task_log_idx
[DSS_NR_CPUS
];
79 atomic_t work_log_idx
[DSS_NR_CPUS
];
80 atomic_t cpuidle_log_idx
[DSS_NR_CPUS
];
81 atomic_t suspend_log_idx
;
82 atomic_t irq_log_idx
[DSS_NR_CPUS
];
83 #ifdef CONFIG_DEBUG_SNAPSHOT_SPINLOCK
84 atomic_t spinlock_log_idx
[DSS_NR_CPUS
];
86 #ifdef CONFIG_DEBUG_SNAPSHOT_IRQ_DISABLED
87 atomic_t irqs_disabled_log_idx
[DSS_NR_CPUS
];
89 #ifdef CONFIG_DEBUG_SNAPSHOT_IRQ_EXIT
90 atomic_t irq_exit_log_idx
[DSS_NR_CPUS
];
92 #ifdef CONFIG_DEBUG_SNAPSHOT_REG
93 atomic_t reg_log_idx
[DSS_NR_CPUS
];
95 #ifdef CONFIG_DEBUG_SNAPSHOT_HRTIMER
96 atomic_t hrtimer_log_idx
[DSS_NR_CPUS
];
98 #ifdef CONFIG_DEBUG_SNAPSHOT_CLK
101 #ifdef CONFIG_DEBUG_SNAPSHOT_PMU
102 atomic_t pmu_log_idx
;
104 #ifdef CONFIG_DEBUG_SNAPSHOT_FREQ
105 atomic_t freq_log_idx
;
107 #ifdef CONFIG_DEBUG_SNAPSHOT_DM
110 #ifdef CONFIG_DEBUG_SNAPSHOT_REGULATOR
111 atomic_t regulator_log_idx
;
113 #ifdef CONFIG_DEBUG_SNAPSHOT_REGULATOR
114 atomic_t thermal_log_idx
;
116 #ifdef CONFIG_DEBUG_SNAPSHOT_I2C
117 atomic_t i2c_log_idx
;
119 #ifdef CONFIG_DEBUG_SNAPSHOT_SPI
120 atomic_t spi_log_idx
;
122 #ifdef CONFIG_DEBUG_SNAPSHOT_BINDER
123 atomic_t binder_log_idx
;
125 #ifndef CONFIG_DEBUG_SNAPSHOT_MINIMIZED_MODE
126 atomic_t clockevent_log_idx
[DSS_NR_CPUS
];
127 atomic_t printkl_log_idx
;
128 atomic_t printk_log_idx
;
130 #ifdef CONFIG_DEBUG_SNAPSHOT_ACPM
131 atomic_t acpm_log_idx
;
135 int dbg_snapshot_log_size
= sizeof(struct dbg_snapshot_log
);
137 * including or excluding options
138 * if you want to except some interrupt, it should be written in this array
140 int dss_irqlog_exlist
[DSS_EX_MAX_NUM
] = {
141 /* interrupt number ex) 152, 153, 154, */
145 #ifdef CONFIG_DEBUG_SNAPSHOT_IRQ_EXIT
146 int dss_irqexit_exlist
[DSS_EX_MAX_NUM
] = {
147 /* interrupt number ex) 152, 153, 154, */
151 unsigned int dss_irqexit_threshold
=
152 CONFIG_DEBUG_SNAPSHOT_IRQ_EXIT_THRESHOLD
;
155 #ifdef CONFIG_DEBUG_SNAPSHOT_REG
156 struct dss_reg_list
{
161 static struct dss_reg_list dss_reg_exlist
[] = {
163 * if it wants to reduce effect enabled reg feautre to system,
164 * you must add these registers - mct, serial
165 * because they are called very often.
166 * physical address, size ex) {0x10C00000, 0x1000},
168 {DSS_REG_MCT_ADDR
, DSS_REG_MCT_SIZE
},
169 {DSS_REG_UART_ADDR
, DSS_REG_UART_SIZE
},
180 #ifdef CONFIG_DEBUG_SNAPSHOT_FREQ
181 static char *dss_freq_name
[] = {
182 "LITTLE", "BIG", "INT", "MIF", "ISP", "DISP", "INTCAM", "AUD", "IVA", "SCORE", "FSYS0",
186 /* Internal interface variable */
187 static struct dbg_snapshot_log_idx dss_idx
;
188 static struct dbg_snapshot_lastinfo dss_lastinfo
;
190 void __init
dbg_snapshot_init_log_idx(void)
194 #ifndef CONFIG_DEBUG_SNAPSHOT_MINIMIZED_MODE
195 atomic_set(&(dss_idx
.printk_log_idx
), -1);
196 atomic_set(&(dss_idx
.printkl_log_idx
), -1);
198 #ifdef CONFIG_DEBUG_SNAPSHOT_REGULATOR
199 atomic_set(&(dss_idx
.regulator_log_idx
), -1);
201 #ifdef CONFIG_DEBUG_SNAPSHOT_THERMAL
202 atomic_set(&(dss_idx
.thermal_log_idx
), -1);
204 #ifdef CONFIG_DEBUG_SNAPSHOT_FREQ
205 atomic_set(&(dss_idx
.freq_log_idx
), -1);
207 #ifdef CONFIG_DEBUG_SNAPSHOT_DM
208 atomic_set(&(dss_idx
.dm_log_idx
), -1);
210 #ifdef CONFIG_DEBUG_SNAPSHOT_CLK
211 atomic_set(&(dss_idx
.clk_log_idx
), -1);
213 #ifdef CONFIG_DEBUG_SNAPSHOT_PMU
214 atomic_set(&(dss_idx
.pmu_log_idx
), -1);
216 #ifdef CONFIG_DEBUG_SNAPSHOT_ACPM
217 atomic_set(&(dss_idx
.acpm_log_idx
), -1);
219 #ifdef CONFIG_DEBUG_SNAPSHOT_I2C
220 atomic_set(&(dss_idx
.i2c_log_idx
), -1);
222 #ifdef CONFIG_DEBUG_SNAPSHOT_SPI
223 atomic_set(&(dss_idx
.spi_log_idx
), -1);
225 #ifdef CONFIG_DEBUG_SNAPSHOT_BINDER
226 atomic_set(&(dss_idx
.binder_log_idx
), -1);
228 atomic_set(&(dss_idx
.suspend_log_idx
), -1);
230 for (i
= 0; i
< DSS_NR_CPUS
; i
++) {
231 atomic_set(&(dss_idx
.task_log_idx
[i
]), -1);
232 atomic_set(&(dss_idx
.work_log_idx
[i
]), -1);
233 #ifndef CONFIG_DEBUG_SNAPSHOT_MINIMIZED_MODE
234 atomic_set(&(dss_idx
.clockevent_log_idx
[i
]), -1);
236 atomic_set(&(dss_idx
.cpuidle_log_idx
[i
]), -1);
237 atomic_set(&(dss_idx
.irq_log_idx
[i
]), -1);
238 #ifdef CONFIG_DEBUG_SNAPSHOT_SPINLOCK
239 atomic_set(&(dss_idx
.spinlock_log_idx
[i
]), -1);
241 #ifdef CONFIG_DEBUG_SNAPSHOT_IRQ_DISABLED
242 atomic_set(&(dss_idx
.irqs_disabled_log_idx
[i
]), -1);
244 #ifdef CONFIG_DEBUG_SNAPSHOT_IRQ_EXIT
245 atomic_set(&(dss_idx
.irq_exit_log_idx
[i
]), -1);
247 #ifdef CONFIG_DEBUG_SNAPSHOT_REG
248 atomic_set(&(dss_idx
.reg_log_idx
[i
]), -1);
250 #ifdef CONFIG_DEBUG_SNAPSHOT_HRTIMER
251 atomic_set(&(dss_idx
.hrtimer_log_idx
[i
]), -1);
256 bool dbg_snapshot_dumper_one(void *v_dumper
, char *line
, size_t size
, size_t *len
)
260 unsigned int cpu
, items
;
261 unsigned long rem_nsec
;
263 struct dss_dumper
*dumper
= (struct dss_dumper
*)v_dumper
;
265 if (!line
|| size
< SZ_128
||
266 dumper
->cur_cpu
>= NR_CPUS
)
269 if (dumper
->active
) {
270 if (dumper
->init_idx
== dumper
->cur_idx
)
274 cpu
= dumper
->cur_cpu
;
275 idx
= dumper
->cur_idx
;
276 items
= dumper
->items
;
281 struct task_struct
*task
;
282 array_size
= ARRAY_SIZE(dss_log
->task
[0]) - 1;
283 if (!dumper
->active
) {
284 idx
= (atomic_read(&dss_idx
.task_log_idx
[0]) + 1) & array_size
;
285 dumper
->init_idx
= idx
;
286 dumper
->active
= true;
288 ts
= dss_log
->task
[cpu
][idx
].time
;
289 rem_nsec
= do_div(ts
, NSEC_PER_SEC
);
290 task
= dss_log
->task
[cpu
][idx
].task
;
292 *len
= snprintf(line
, size
, "[%8lu.%09lu][%04d:CPU%u] task_name:%16s, "
293 "task:0x%16p, stack:0x%16p, exec_start:%16llu\n",
294 (unsigned long)ts
, rem_nsec
/ NSEC_PER_USEC
, idx
, cpu
,
295 task
->comm
, task
, task
->stack
,
296 task
->se
.exec_start
);
301 char work_fn
[KSYM_NAME_LEN
] = {0,};
305 array_size
= ARRAY_SIZE(dss_log
->work
[0]) - 1;
306 if (!dumper
->active
) {
307 idx
= (atomic_read(&dss_idx
.work_log_idx
[0]) + 1) & array_size
;
308 dumper
->init_idx
= idx
;
309 dumper
->active
= true;
311 ts
= dss_log
->work
[cpu
][idx
].time
;
312 rem_nsec
= do_div(ts
, NSEC_PER_SEC
);
313 lookup_symbol_name((unsigned long)dss_log
->work
[cpu
][idx
].fn
, work_fn
);
314 task_comm
= dss_log
->work
[cpu
][idx
].task_comm
;
315 en
= dss_log
->work
[cpu
][idx
].en
;
318 *len
= snprintf(line
, size
, "[%8lu.%09lu][%04d:CPU%u] task_name:%16s, work_fn:%32s, %3s\n",
319 (unsigned long)ts
, rem_nsec
/ NSEC_PER_USEC
, idx
, cpu
,
321 en
== DSS_FLAG_IN
? "IN" : "OUT");
324 case DSS_FLAG_CPUIDLE
:
327 int state
, num_cpus
, en
;
330 array_size
= ARRAY_SIZE(dss_log
->cpuidle
[0]) - 1;
331 if (!dumper
->active
) {
332 idx
= (atomic_read(&dss_idx
.cpuidle_log_idx
[0]) + 1) & array_size
;
333 dumper
->init_idx
= idx
;
334 dumper
->active
= true;
336 ts
= dss_log
->cpuidle
[cpu
][idx
].time
;
337 rem_nsec
= do_div(ts
, NSEC_PER_SEC
);
339 index
= dss_log
->cpuidle
[cpu
][idx
].modes
;
340 en
= dss_log
->cpuidle
[cpu
][idx
].en
;
341 state
= dss_log
->cpuidle
[cpu
][idx
].state
;
342 num_cpus
= dss_log
->cpuidle
[cpu
][idx
].num_online_cpus
;
343 delta
= dss_log
->cpuidle
[cpu
][idx
].delta
;
345 *len
= snprintf(line
, size
, "[%8lu.%09lu][%04d:CPU%u] cpuidle: %s, "
346 "state:%d, num_online_cpus:%d, stay_time:%8u, %3s\n",
347 (unsigned long)ts
, rem_nsec
/ NSEC_PER_USEC
, idx
, cpu
,
348 index
, state
, num_cpus
, delta
,
349 en
== DSS_FLAG_IN
? "IN" : "OUT");
352 case DSS_FLAG_SUSPEND
:
354 char suspend_fn
[KSYM_NAME_LEN
];
357 array_size
= ARRAY_SIZE(dss_log
->suspend
) - 1;
358 if (!dumper
->active
) {
359 idx
= (atomic_read(&dss_idx
.suspend_log_idx
) + 1) & array_size
;
360 dumper
->init_idx
= idx
;
361 dumper
->active
= true;
363 ts
= dss_log
->suspend
[idx
].time
;
364 rem_nsec
= do_div(ts
, NSEC_PER_SEC
);
366 lookup_symbol_name((unsigned long)dss_log
->suspend
[idx
].fn
, suspend_fn
);
367 en
= dss_log
->suspend
[idx
].en
;
369 *len
= snprintf(line
, size
, "[%8lu.%09lu][%04d:CPU%u] suspend_fn:%s, %3s\n",
370 (unsigned long)ts
, rem_nsec
/ NSEC_PER_USEC
, idx
, cpu
,
371 suspend_fn
, en
== DSS_FLAG_IN
? "IN" : "OUT");
376 char irq_fn
[KSYM_NAME_LEN
];
379 array_size
= ARRAY_SIZE(dss_log
->irq
[0]) - 1;
380 if (!dumper
->active
) {
381 idx
= (atomic_read(&dss_idx
.irq_log_idx
[0]) + 1) & array_size
;
382 dumper
->init_idx
= idx
;
383 dumper
->active
= true;
385 ts
= dss_log
->irq
[cpu
][idx
].time
;
386 rem_nsec
= do_div(ts
, NSEC_PER_SEC
);
388 lookup_symbol_name((unsigned long)dss_log
->irq
[cpu
][idx
].fn
, irq_fn
);
389 irq
= dss_log
->irq
[cpu
][idx
].irq
;
390 en
= dss_log
->irq
[cpu
][idx
].en
;
392 *len
= snprintf(line
, size
, "[%8lu.%09lu][%04d:CPU%u] irq:%6d, irq_fn:%32s, %3s\n",
393 (unsigned long)ts
, rem_nsec
/ NSEC_PER_USEC
, idx
, cpu
,
394 irq
, irq_fn
, en
== DSS_FLAG_IN
? "IN" : "OUT");
397 #ifdef CONFIG_DEBUG_SNAPSHOT_IRQ_EXIT
398 case DSS_FLAG_IRQ_EXIT
:
400 unsigned long end_time
, latency
;
403 array_size
= ARRAY_SIZE(dss_log
->irq_exit
[0]) - 1;
404 if (!dumper
->active
) {
405 idx
= (atomic_read(&dss_idx
.irq_exit_log_idx
[0]) + 1) & array_size
;
406 dumper
->init_idx
= idx
;
407 dumper
->active
= true;
409 ts
= dss_log
->irq_exit
[cpu
][idx
].time
;
410 rem_nsec
= do_div(ts
, NSEC_PER_SEC
);
412 end_time
= dss_log
->irq_exit
[cpu
][idx
].end_time
;
413 latency
= dss_log
->irq_exit
[cpu
][idx
].latency
;
414 irq
= dss_log
->irq_exit
[cpu
][idx
].irq
;
416 *len
= snprintf(line
, size
, "[%8lu.%09lu][%04d:CPU%u] irq:%6d, "
417 "latency:%16zu, end_time:%16zu\n",
418 (unsigned long)ts
, rem_nsec
/ NSEC_PER_USEC
, idx
, cpu
,
419 irq
, latency
, end_time
);
423 #ifdef CONFIG_DEBUG_SNAPSHOT_SPINLOCK
424 case DSS_FLAG_SPINLOCK
:
426 unsigned int jiffies_local
;
427 char callstack
[CONFIG_DEBUG_SNAPSHOT_CALLSTACK
][KSYM_NAME_LEN
];
431 array_size
= ARRAY_SIZE(dss_log
->spinlock
[0]) - 1;
432 if (!dumper
->active
) {
433 idx
= (atomic_read(&dss_idx
.spinlock_log_idx
[0]) + 1) & array_size
;
434 dumper
->init_idx
= idx
;
435 dumper
->active
= true;
437 ts
= dss_log
->spinlock
[cpu
][idx
].time
;
438 rem_nsec
= do_div(ts
, NSEC_PER_SEC
);
440 jiffies_local
= dss_log
->spinlock
[cpu
][idx
].jiffies
;
441 en
= dss_log
->spinlock
[cpu
][idx
].en
;
442 for (i
= 0; i
< CONFIG_DEBUG_SNAPSHOT_CALLSTACK
; i
++)
443 lookup_symbol_name((unsigned long)dss_log
->spinlock
[cpu
][idx
].caller
[i
],
446 next
= dss_log
->spinlock
[cpu
][idx
].next
;
447 owner
= dss_log
->spinlock
[cpu
][idx
].owner
;
449 *len
= snprintf(line
, size
, "[%8lu.%09lu][%04d:CPU%u] next:%8x, owner:%8x jiffies:%12u, %3s\n"
454 (unsigned long)ts
, rem_nsec
/ NSEC_PER_USEC
, idx
, cpu
,
455 next
, owner
, jiffies_local
,
456 en
== DSS_FLAG_IN
? "IN" : "OUT",
457 callstack
[0], callstack
[1], callstack
[2], callstack
[3]);
461 #ifdef CONFIG_DEBUG_SNAPSHOT_CLK
464 const char *clk_name
;
465 char clk_fn
[KSYM_NAME_LEN
];
469 array_size
= ARRAY_SIZE(dss_log
->clk
) - 1;
470 if (!dumper
->active
) {
471 idx
= (atomic_read(&dss_idx
.clk_log_idx
) + 1) & array_size
;
472 dumper
->init_idx
= idx
;
473 dumper
->active
= true;
475 ts
= dss_log
->clk
[idx
].time
;
476 rem_nsec
= do_div(ts
, NSEC_PER_SEC
);
478 clk
= (struct clk_hw
*)dss_log
->clk
[idx
].clk
;
479 clk_name
= clk_hw_get_name(clk
);
480 lookup_symbol_name((unsigned long)dss_log
->clk
[idx
].f_name
, clk_fn
);
481 en
= dss_log
->clk
[idx
].mode
;
483 *len
= snprintf(line
, size
, "[%8lu.%09lu][%04d:CPU] clk_name:%30s, clk_fn:%30s, "
485 (unsigned long)ts
, rem_nsec
/ NSEC_PER_USEC
, idx
,
486 clk_name
, clk_fn
, en
== DSS_FLAG_IN
? "IN" : "OUT");
490 #ifdef CONFIG_DEBUG_SNAPSHOT_FREQ
495 unsigned long old_freq
, target_freq
;
498 array_size
= ARRAY_SIZE(dss_log
->freq
) - 1;
499 if (!dumper
->active
) {
500 idx
= (atomic_read(&dss_idx
.freq_log_idx
) + 1) & array_size
;
501 dumper
->init_idx
= idx
;
502 dumper
->active
= true;
504 ts
= dss_log
->freq
[idx
].time
;
505 rem_nsec
= do_div(ts
, NSEC_PER_SEC
);
507 freq_name
= dss_log
->freq
[idx
].freq_name
;
508 old_freq
= dss_log
->freq
[idx
].old_freq
;
509 target_freq
= dss_log
->freq
[idx
].target_freq
;
510 on_cpu
= dss_log
->freq
[idx
].cpu
;
511 en
= dss_log
->freq
[idx
].en
;
513 *len
= snprintf(line
, size
, "[%8lu.%09lu][%04d:CPU%u] freq_name:%16s, "
514 "old_freq:%16lu, target_freq:%16lu, %3s\n",
515 (unsigned long)ts
, rem_nsec
/ NSEC_PER_USEC
, idx
, on_cpu
,
516 freq_name
, old_freq
, target_freq
,
517 en
== DSS_FLAG_IN
? "IN" : "OUT");
521 #ifndef CONFIG_DEBUG_SNAPSHOT_MINIMIZED_MODE
522 case DSS_FLAG_PRINTK
:
525 char callstack
[CONFIG_DEBUG_SNAPSHOT_CALLSTACK
][KSYM_NAME_LEN
];
529 array_size
= ARRAY_SIZE(dss_log
->printk
) - 1;
530 if (!dumper
->active
) {
531 idx
= (atomic_read(&dss_idx
.printk_log_idx
) + 1) & array_size
;
532 dumper
->init_idx
= idx
;
533 dumper
->active
= true;
535 ts
= dss_log
->printk
[idx
].time
;
536 cpu
= dss_log
->printk
[idx
].cpu
;
537 rem_nsec
= do_div(ts
, NSEC_PER_SEC
);
538 log
= dss_log
->printk
[idx
].log
;
539 for (i
= 0; i
< CONFIG_DEBUG_SNAPSHOT_CALLSTACK
; i
++)
540 lookup_symbol_name((unsigned long)dss_log
->printk
[idx
].caller
[i
],
543 *len
= snprintf(line
, size
, "[%8lu.%09lu][%04d:CPU%u] log:%s, callstack:%s, %s, %s, %s\n",
544 (unsigned long)ts
, rem_nsec
/ NSEC_PER_USEC
, idx
, cpu
,
545 log
, callstack
[0], callstack
[1], callstack
[2], callstack
[3]);
548 case DSS_FLAG_PRINTKL
:
550 char callstack
[CONFIG_DEBUG_SNAPSHOT_CALLSTACK
][KSYM_NAME_LEN
];
555 array_size
= ARRAY_SIZE(dss_log
->printkl
) - 1;
556 if (!dumper
->active
) {
557 idx
= (atomic_read(&dss_idx
.printkl_log_idx
) + 1) & array_size
;
558 dumper
->init_idx
= idx
;
559 dumper
->active
= true;
561 ts
= dss_log
->printkl
[idx
].time
;
562 cpu
= dss_log
->printkl
[idx
].cpu
;
563 rem_nsec
= do_div(ts
, NSEC_PER_SEC
);
564 msg
= dss_log
->printkl
[idx
].msg
;
565 val
= dss_log
->printkl
[idx
].val
;
566 for (i
= 0; i
< CONFIG_DEBUG_SNAPSHOT_CALLSTACK
; i
++)
567 lookup_symbol_name((unsigned long)dss_log
->printkl
[idx
].caller
[i
],
570 *len
= snprintf(line
, size
, "[%8lu.%09lu][%04d:CPU%u] msg:%zx, val:%zx, callstack: %s, %s, %s, %s\n",
571 (unsigned long)ts
, rem_nsec
/ NSEC_PER_USEC
, idx
, cpu
,
572 msg
, val
, callstack
[0], callstack
[1], callstack
[2], callstack
[3]);
577 snprintf(line
, size
, "unsupported inforation to dump\n");
580 if (array_size
== idx
)
583 dumper
->cur_idx
= idx
+ 1;
591 static inline unsigned long pure_arch_local_irq_save(void)
596 "mrs %0, daif // arch_local_irq_save\n"
605 static inline void pure_arch_local_irq_restore(unsigned long flags
)
608 "msr daif, %0 // arch_local_irq_restore"
614 static inline unsigned long arch_local_irq_save(void)
619 " mrs %0, cpsr @ arch_local_irq_save\n"
621 : "=r" (flags
) : : "memory", "cc");
625 static inline void arch_local_irq_restore(unsigned long flags
)
628 " msr cpsr_c, %0 @ local_irq_restore"
635 void dbg_snapshot_task(int cpu
, void *v_task
)
637 struct dbg_snapshot_item
*item
= &dss_items
[dss_desc
.kevents_num
];
639 if (unlikely(!dss_base
.enabled
|| !item
->entry
.enabled
))
642 unsigned long i
= atomic_inc_return(&dss_idx
.task_log_idx
[cpu
]) &
643 (ARRAY_SIZE(dss_log
->task
[0]) - 1);
645 dss_log
->task
[cpu
][i
].time
= cpu_clock(cpu
);
646 dss_log
->task
[cpu
][i
].sp
= (unsigned long)current_stack_pointer
;
647 dss_log
->task
[cpu
][i
].task
= (struct task_struct
*)v_task
;
648 dss_log
->task
[cpu
][i
].pid
= (int)((struct task_struct
*)v_task
)->pid
;
649 strncpy(dss_log
->task
[cpu
][i
].task_comm
,
650 dss_log
->task
[cpu
][i
].task
->comm
,
655 void dbg_snapshot_work(void *worker
, void *v_task
, void *fn
, int en
)
657 struct dbg_snapshot_item
*item
= &dss_items
[dss_desc
.kevents_num
];
659 if (unlikely(!dss_base
.enabled
|| !item
->entry
.enabled
))
663 int cpu
= raw_smp_processor_id();
664 unsigned long i
= atomic_inc_return(&dss_idx
.work_log_idx
[cpu
]) &
665 (ARRAY_SIZE(dss_log
->work
[0]) - 1);
666 struct task_struct
*task
= (struct task_struct
*)v_task
;
667 dss_log
->work
[cpu
][i
].time
= cpu_clock(cpu
);
668 dss_log
->work
[cpu
][i
].sp
= (unsigned long) current_stack_pointer
;
669 dss_log
->work
[cpu
][i
].worker
= (struct worker
*)worker
;
670 strncpy(dss_log
->work
[cpu
][i
].task_comm
, task
->comm
, TASK_COMM_LEN
- 1);
671 dss_log
->work
[cpu
][i
].fn
= (work_func_t
)fn
;
672 dss_log
->work
[cpu
][i
].en
= en
;
676 void dbg_snapshot_cpuidle(char *modes
, unsigned state
, int diff
, int en
)
678 struct dbg_snapshot_item
*item
= &dss_items
[dss_desc
.kevents_num
];
680 if (unlikely(!dss_base
.enabled
|| !item
->entry
.enabled
))
683 int cpu
= raw_smp_processor_id();
684 unsigned long i
= atomic_inc_return(&dss_idx
.cpuidle_log_idx
[cpu
]) &
685 (ARRAY_SIZE(dss_log
->cpuidle
[0]) - 1);
687 dss_log
->cpuidle
[cpu
][i
].time
= cpu_clock(cpu
);
688 dss_log
->cpuidle
[cpu
][i
].modes
= modes
;
689 dss_log
->cpuidle
[cpu
][i
].state
= state
;
690 dss_log
->cpuidle
[cpu
][i
].sp
= (unsigned long) current_stack_pointer
;
691 dss_log
->cpuidle
[cpu
][i
].num_online_cpus
= num_online_cpus();
692 dss_log
->cpuidle
[cpu
][i
].delta
= diff
;
693 dss_log
->cpuidle
[cpu
][i
].en
= en
;
697 void dbg_snapshot_suspend(void *fn
, void *dev
, int en
)
699 struct dbg_snapshot_item
*item
= &dss_items
[dss_desc
.kevents_num
];
701 if (unlikely(!dss_base
.enabled
|| !item
->entry
.enabled
))
704 int cpu
= raw_smp_processor_id();
705 unsigned long i
= atomic_inc_return(&dss_idx
.suspend_log_idx
) &
706 (ARRAY_SIZE(dss_log
->suspend
) - 1);
708 dss_log
->suspend
[i
].time
= cpu_clock(cpu
);
709 dss_log
->suspend
[i
].sp
= (unsigned long) current_stack_pointer
;
710 dss_log
->suspend
[i
].fn
= fn
;
711 dss_log
->suspend
[i
].dev
= (struct device
*)dev
;
712 dss_log
->suspend
[i
].core
= cpu
;
713 dss_log
->suspend
[i
].en
= en
;
717 static void dbg_snapshot_print_calltrace(void)
721 pr_info("\n<Call trace>\n");
722 for (i
= 0; i
< DSS_NR_CPUS
; i
++) {
723 pr_info("CPU ID: %d -----------------------------------------------\n", i
);
724 pr_info("%s", dss_lastinfo
.log
[i
]);
728 void dbg_snapshot_save_log(int cpu
, unsigned long where
)
730 if (dss_lastinfo
.last_p
[cpu
] == NULL
)
731 dss_lastinfo
.last_p
[cpu
] = &dss_lastinfo
.log
[cpu
][0];
733 if (dss_lastinfo
.last_p
[cpu
] > &dss_lastinfo
.log
[cpu
][SZ_1K
- SZ_128
])
736 *(unsigned long *)&(dss_lastinfo
.last_p
[cpu
]) += sprintf(dss_lastinfo
.last_p
[cpu
],
737 "[<%p>] %pS\n", (void *)where
, (void *)where
);
741 static void dbg_snapshot_get_sec(unsigned long long ts
, unsigned long *sec
, unsigned long *msec
)
743 *sec
= ts
/ NSEC_PER_SEC
;
744 *msec
= (ts
% NSEC_PER_SEC
) / USEC_PER_MSEC
;
747 static void dbg_snapshot_print_last_irq(int cpu
)
749 unsigned long idx
, sec
, msec
;
750 char fn_name
[KSYM_NAME_LEN
];
752 idx
= atomic_read(&dss_idx
.irq_log_idx
[cpu
]) & (ARRAY_SIZE(dss_log
->irq
[0]) - 1);
753 dbg_snapshot_get_sec(dss_log
->irq
[cpu
][idx
].time
, &sec
, &msec
);
754 lookup_symbol_name((unsigned long)dss_log
->irq
[cpu
][idx
].fn
, fn_name
);
756 pr_info("%-16s: [%4lu] %10lu.%06lu sec, %10s: %24s, %8s: %8d, %10s: %2d, %s\n",
757 ">>> last irq", idx
, sec
, msec
,
759 "irq", dss_log
->irq
[cpu
][idx
].irq
,
760 "en", dss_log
->irq
[cpu
][idx
].en
,
761 (dss_log
->irq
[cpu
][idx
].en
== 1) ? "[Missmatch]" : "");
764 static void dbg_snapshot_print_last_task(int cpu
)
766 unsigned long idx
, sec
, msec
;
767 struct task_struct
*task
;
769 idx
= atomic_read(&dss_idx
.task_log_idx
[cpu
]) & (ARRAY_SIZE(dss_log
->task
[0]) - 1);
770 dbg_snapshot_get_sec(dss_log
->task
[cpu
][idx
].time
, &sec
, &msec
);
771 task
= dss_log
->task
[cpu
][idx
].task
;
773 pr_info("%-16s: [%4lu] %10lu.%06lu sec, %10s: %24s, %8s: 0x%-16p, %10s: %16llu\n",
774 ">>> last task", idx
, sec
, msec
,
775 "task_comm", (task
) ? task
->comm
: "NULL",
777 "exec_start", (task
) ? task
->se
.exec_start
: 0);
780 static void dbg_snapshot_print_last_work(int cpu
)
782 unsigned long idx
, sec
, msec
;
783 char fn_name
[KSYM_NAME_LEN
];
785 idx
= atomic_read(&dss_idx
.work_log_idx
[cpu
]) & (ARRAY_SIZE(dss_log
->work
[0]) - 1);
786 dbg_snapshot_get_sec(dss_log
->work
[cpu
][idx
].time
, &sec
, &msec
);
787 lookup_symbol_name((unsigned long)dss_log
->work
[cpu
][idx
].fn
, fn_name
);
789 pr_info("%-16s: [%4lu] %10lu.%06lu sec, %10s: %24s, %8s: %20s, %3s: %3d %s\n",
790 ">>> last work", idx
, sec
, msec
,
791 "task_name", dss_log
->work
[cpu
][idx
].task_comm
,
793 "en", dss_log
->work
[cpu
][idx
].en
,
794 (dss_log
->work
[cpu
][idx
].en
== 1) ? "[Missmatch]" : "");
797 static void dbg_snapshot_print_last_cpuidle(int cpu
)
799 unsigned long idx
, sec
, msec
;
801 idx
= atomic_read(&dss_idx
.cpuidle_log_idx
[cpu
]) & (ARRAY_SIZE(dss_log
->cpuidle
[0]) - 1);
802 dbg_snapshot_get_sec(dss_log
->cpuidle
[cpu
][idx
].time
, &sec
, &msec
);
804 pr_info("%-16s: [%4lu] %10lu.%06lu sec, %10s: %24d, %8s: %4s, %6s: %3d, %12s: %2d, %3s: %3d %s\n",
805 ">>> last cpuidle", idx
, sec
, msec
,
806 "stay time", dss_log
->cpuidle
[cpu
][idx
].delta
,
807 "modes", dss_log
->cpuidle
[cpu
][idx
].modes
,
808 "state", dss_log
->cpuidle
[cpu
][idx
].state
,
809 "online_cpus", dss_log
->cpuidle
[cpu
][idx
].num_online_cpus
,
810 "en", dss_log
->cpuidle
[cpu
][idx
].en
,
811 (dss_log
->cpuidle
[cpu
][idx
].en
== 1) ? "[Missmatch]" : "");
814 static void dbg_snapshot_print_lastinfo(void)
818 pr_info("<last info>\n");
819 for (cpu
= 0; cpu
< DSS_NR_CPUS
; cpu
++) {
820 pr_info("CPU ID: %d -----------------------------------------------\n", cpu
);
821 dbg_snapshot_print_last_task(cpu
);
822 dbg_snapshot_print_last_work(cpu
);
823 dbg_snapshot_print_last_irq(cpu
);
824 dbg_snapshot_print_last_cpuidle(cpu
);
828 #ifdef CONFIG_DEBUG_SNAPSHOT_REGULATOR
829 void dbg_snapshot_regulator(unsigned long long timestamp
, char* f_name
, unsigned int addr
, unsigned int volt
, unsigned int rvolt
, int en
)
831 struct dbg_snapshot_item
*item
= &dss_items
[dss_desc
.kevents_num
];
833 if (unlikely(!dss_base
.enabled
|| !item
->entry
.enabled
))
836 int cpu
= raw_smp_processor_id();
837 unsigned long i
= atomic_inc_return(&dss_idx
.regulator_log_idx
) &
838 (ARRAY_SIZE(dss_log
->regulator
) - 1);
839 int size
= strlen(f_name
);
842 dss_log
->regulator
[i
].time
= cpu_clock(cpu
);
843 dss_log
->regulator
[i
].cpu
= cpu
;
844 dss_log
->regulator
[i
].acpm_time
= timestamp
;
845 strncpy(dss_log
->regulator
[i
].name
, f_name
, size
);
846 dss_log
->regulator
[i
].reg
= addr
;
847 dss_log
->regulator
[i
].en
= en
;
848 dss_log
->regulator
[i
].voltage
= volt
;
849 dss_log
->regulator
[i
].raw_volt
= rvolt
;
854 #ifdef CONFIG_DEBUG_SNAPSHOT_THERMAL
855 void dbg_snapshot_thermal(void *data
, unsigned int temp
, char *name
, unsigned int max_cooling
)
857 struct dbg_snapshot_item
*item
= &dss_items
[dss_desc
.kevents_num
];
859 if (unlikely(!dss_base
.enabled
|| !item
->entry
.enabled
))
862 int cpu
= raw_smp_processor_id();
863 unsigned long i
= atomic_inc_return(&dss_idx
.thermal_log_idx
) &
864 (ARRAY_SIZE(dss_log
->thermal
) - 1);
866 dss_log
->thermal
[i
].time
= cpu_clock(cpu
);
867 dss_log
->thermal
[i
].cpu
= cpu
;
868 dss_log
->thermal
[i
].data
= (struct exynos_tmu_platform_data
*)data
;
869 dss_log
->thermal
[i
].temp
= temp
;
870 dss_log
->thermal
[i
].cooling_device
= name
;
871 dss_log
->thermal
[i
].cooling_state
= max_cooling
;
876 void dbg_snapshot_irq(int irq
, void *fn
, void *val
, int en
)
878 struct dbg_snapshot_item
*item
= &dss_items
[dss_desc
.kevents_num
];
881 if (unlikely(!dss_base
.enabled
|| !item
->entry
.enabled
))
884 flags
= pure_arch_local_irq_save();
886 int cpu
= raw_smp_processor_id();
889 for (i
= 0; i
< ARRAY_SIZE(dss_irqlog_exlist
); i
++) {
890 if (irq
== dss_irqlog_exlist
[i
]) {
891 pure_arch_local_irq_restore(flags
);
895 i
= atomic_inc_return(&dss_idx
.irq_log_idx
[cpu
]) &
896 (ARRAY_SIZE(dss_log
->irq
[0]) - 1);
898 dss_log
->irq
[cpu
][i
].time
= cpu_clock(cpu
);
899 dss_log
->irq
[cpu
][i
].sp
= (unsigned long) current_stack_pointer
;
900 dss_log
->irq
[cpu
][i
].irq
= irq
;
901 dss_log
->irq
[cpu
][i
].fn
= (void *)fn
;
902 dss_log
->irq
[cpu
][i
].action
= (struct irqaction
*)val
;
903 dss_log
->irq
[cpu
][i
].en
= en
;
905 pure_arch_local_irq_restore(flags
);
908 #ifdef CONFIG_DEBUG_SNAPSHOT_IRQ_EXIT
909 void dbg_snapshot_irq_exit(unsigned int irq
, unsigned long long start_time
)
911 struct dbg_snapshot_item
*item
= &dss_items
[dss_desc
.kevents_num
];
914 if (unlikely(!dss_base
.enabled
|| !item
->entry
.enabled
))
917 for (i
= 0; i
< ARRAY_SIZE(dss_irqexit_exlist
); i
++)
918 if (irq
== dss_irqexit_exlist
[i
])
921 int cpu
= raw_smp_processor_id();
922 unsigned long long time
, latency
;
924 i
= atomic_inc_return(&dss_idx
.irq_exit_log_idx
[cpu
]) &
925 (ARRAY_SIZE(dss_log
->irq_exit
[0]) - 1);
927 time
= cpu_clock(cpu
);
928 latency
= time
- start_time
;
930 if (unlikely(latency
>
931 (dss_irqexit_threshold
* 1000))) {
932 dss_log
->irq_exit
[cpu
][i
].latency
= latency
;
933 dss_log
->irq_exit
[cpu
][i
].sp
= (unsigned long) current_stack_pointer
;
934 dss_log
->irq_exit
[cpu
][i
].end_time
= time
;
935 dss_log
->irq_exit
[cpu
][i
].time
= start_time
;
936 dss_log
->irq_exit
[cpu
][i
].irq
= irq
;
938 atomic_dec(&dss_idx
.irq_exit_log_idx
[cpu
]);
943 #ifdef CONFIG_DEBUG_SNAPSHOT_SPINLOCK
944 void dbg_snapshot_spinlock(void *v_lock
, int en
)
946 struct dbg_snapshot_item
*item
= &dss_items
[dss_desc
.kevents_num
];
948 if (unlikely(!dss_base
.enabled
|| !item
->entry
.enabled
))
951 int cpu
= raw_smp_processor_id();
952 unsigned index
= atomic_inc_return(&dss_idx
.spinlock_log_idx
[cpu
]);
953 unsigned long j
, i
= index
& (ARRAY_SIZE(dss_log
->spinlock
[0]) - 1);
954 raw_spinlock_t
*lock
= (raw_spinlock_t
*)v_lock
;
955 #ifdef CONFIG_ARM_ARCH_TIMER
956 dss_log
->spinlock
[cpu
][i
].time
= cpu_clock(cpu
);
958 dss_log
->spinlock
[cpu
][i
].time
= index
;
960 dss_log
->spinlock
[cpu
][i
].sp
= (unsigned long) current_stack_pointer
;
961 dss_log
->spinlock
[cpu
][i
].jiffies
= jiffies_64
;
962 #ifdef CONFIG_DEBUG_SPINLOCK
963 dss_log
->spinlock
[cpu
][i
].lock
= lock
;
964 dss_log
->spinlock
[cpu
][i
].next
= lock
->raw_lock
.next
;
965 dss_log
->spinlock
[cpu
][i
].owner
= lock
->raw_lock
.owner
;
967 dss_log
->spinlock
[cpu
][i
].en
= en
;
969 for (j
= 0; j
< dss_desc
.callstack
; j
++) {
970 dss_log
->spinlock
[cpu
][i
].caller
[j
] =
971 (void *)((size_t)return_address(j
+ 1));
977 #ifdef CONFIG_DEBUG_SNAPSHOT_IRQ_DISABLED
978 void dbg_snapshot_irqs_disabled(unsigned long flags
)
980 struct dbg_snapshot_item
*item
= &dss_items
[dss_desc
.kevents_num
];
981 int cpu
= raw_smp_processor_id();
983 if (unlikely(!dss_base
.enabled
|| !item
->entry
.enabled
))
986 if (unlikely(flags
)) {
987 unsigned j
, local_flags
= pure_arch_local_irq_save();
989 /* If flags has one, it shows interrupt enable status */
990 atomic_set(&dss_idx
.irqs_disabled_log_idx
[cpu
], -1);
991 dss_log
->irqs_disabled
[cpu
][0].time
= 0;
992 dss_log
->irqs_disabled
[cpu
][0].index
= 0;
993 dss_log
->irqs_disabled
[cpu
][0].task
= NULL
;
994 dss_log
->irqs_disabled
[cpu
][0].task_comm
= NULL
;
996 for (j
= 0; j
< dss_desc
.callstack
; j
++) {
997 dss_log
->irqs_disabled
[cpu
][0].caller
[j
] = NULL
;
1000 pure_arch_local_irq_restore(local_flags
);
1002 unsigned index
= atomic_inc_return(&dss_idx
.irqs_disabled_log_idx
[cpu
]);
1003 unsigned long j
, i
= index
% ARRAY_SIZE(dss_log
->irqs_disabled
[0]);
1005 dss_log
->irqs_disabled
[cpu
][0].time
= jiffies_64
;
1006 dss_log
->irqs_disabled
[cpu
][i
].index
= index
;
1007 dss_log
->irqs_disabled
[cpu
][i
].task
= get_current();
1008 dss_log
->irqs_disabled
[cpu
][i
].task_comm
= get_current()->comm
;
1010 for (j
= 0; j
< dss_desc
.callstack
; j
++) {
1011 dss_log
->irqs_disabled
[cpu
][i
].caller
[j
] =
1012 (void *)((size_t)return_address(j
+ 1));
1018 #ifdef CONFIG_DEBUG_SNAPSHOT_CLK
1019 void dbg_snapshot_clk(void *clock
, const char *func_name
, unsigned long arg
, int mode
)
1021 struct dbg_snapshot_item
*item
= &dss_items
[dss_desc
.kevents_num
];
1023 if (unlikely(!dss_base
.enabled
|| !item
->entry
.enabled
))
1026 int cpu
= raw_smp_processor_id();
1027 unsigned long i
= atomic_inc_return(&dss_idx
.clk_log_idx
) &
1028 (ARRAY_SIZE(dss_log
->clk
) - 1);
1030 dss_log
->clk
[i
].time
= cpu_clock(cpu
);
1031 dss_log
->clk
[i
].mode
= mode
;
1032 dss_log
->clk
[i
].arg
= arg
;
1033 dss_log
->clk
[i
].clk
= (struct clk_hw
*)clock
;
1034 dss_log
->clk
[i
].f_name
= func_name
;
1039 #ifdef CONFIG_DEBUG_SNAPSHOT_PMU
1040 void dbg_snapshot_pmu(int id
, const char *func_name
, int mode
)
1042 struct dbg_snapshot_item
*item
= &dss_items
[dss_desc
.kevents_num
];
1044 if (unlikely(!dss_base
.enabled
|| !item
->entry
.enabled
))
1047 int cpu
= raw_smp_processor_id();
1048 unsigned long i
= atomic_inc_return(&dss_idx
.pmu_log_idx
) &
1049 (ARRAY_SIZE(dss_log
->pmu
) - 1);
1051 dss_log
->pmu
[i
].time
= cpu_clock(cpu
);
1052 dss_log
->pmu
[i
].mode
= mode
;
1053 dss_log
->pmu
[i
].id
= id
;
1054 dss_log
->pmu
[i
].f_name
= func_name
;
1059 static struct notifier_block
**dss_should_check_nl
[] = {
1060 (struct notifier_block
**)(&panic_notifier_list
.head
),
1061 (struct notifier_block
**)(&reboot_notifier_list
.head
),
1062 (struct notifier_block
**)(&restart_handler_list
.head
),
1063 #ifdef CONFIG_EXYNOS_ITMON
1064 (struct notifier_block
**)(&itmon_notifier_list
.head
),
1068 void dbg_snapshot_print_notifier_call(void **nl
, unsigned long func
, int en
)
1070 struct notifier_block
**nl_org
= (struct notifier_block
**)nl
;
1071 char notifier_name
[KSYM_NAME_LEN
];
1072 char notifier_func_name
[KSYM_NAME_LEN
];
1075 for (i
= 0; i
< ARRAY_SIZE(dss_should_check_nl
); i
++) {
1076 if (nl_org
== dss_should_check_nl
[i
]) {
1077 lookup_symbol_name((unsigned long)nl_org
, notifier_name
);
1078 lookup_symbol_name((unsigned long)func
, notifier_func_name
);
1080 pr_info("debug-snapshot: %s -> %s call %s\n",
1083 en
== DSS_FLAG_IN
? "+" : "-");
1089 #ifdef CONFIG_DEBUG_SNAPSHOT_FREQ
1090 static void dbg_snapshot_print_freqinfo(void)
1092 unsigned long idx
, sec
, msec
;
1095 unsigned long old_freq
, target_freq
;
1097 pr_info("\n<freq info>\n");
1099 for (i
= 0; i
< DSS_FLAG_END
; i
++) {
1100 idx
= atomic_read(&dss_lastinfo
.freq_last_idx
[i
]) & (ARRAY_SIZE(dss_log
->freq
) - 1);
1101 freq_name
= dss_log
->freq
[idx
].freq_name
;
1102 if ((!freq_name
) || strncmp(freq_name
, dss_freq_name
[i
], strlen(dss_freq_name
[i
]))) {
1103 pr_info("%10s: no infomation\n", dss_freq_name
[i
]);
1107 dbg_snapshot_get_sec(dss_log
->freq
[idx
].time
, &sec
, &msec
);
1108 old_freq
= dss_log
->freq
[idx
].old_freq
;
1109 target_freq
= dss_log
->freq
[idx
].target_freq
;
1110 pr_info("%10s: [%4lu] %10lu.%06lu sec, %12s: %6luMhz, %12s: %6luMhz, %3s: %3d %s\n",
1111 freq_name
, idx
, sec
, msec
,
1112 "old_freq", old_freq
/1000,
1113 "target_freq", target_freq
/1000,
1114 "en", dss_log
->freq
[idx
].en
,
1115 (dss_log
->freq
[idx
].en
== 1) ? "[Missmatch]" : "");
1119 void dbg_snapshot_freq(int type
, unsigned long old_freq
, unsigned long target_freq
, int en
)
1121 struct dbg_snapshot_item
*item
= &dss_items
[dss_desc
.kevents_num
];
1123 if (unlikely(!dss_base
.enabled
|| !item
->entry
.enabled
))
1126 int cpu
= raw_smp_processor_id();
1127 unsigned long i
= atomic_inc_return(&dss_idx
.freq_log_idx
) &
1128 (ARRAY_SIZE(dss_log
->freq
) - 1);
1130 if (atomic_read(&dss_idx
.freq_log_idx
) > atomic_read(&dss_lastinfo
.freq_last_idx
[type
]))
1131 atomic_set(&dss_lastinfo
.freq_last_idx
[type
], atomic_read(&dss_idx
.freq_log_idx
));
1133 dss_log
->freq
[i
].time
= cpu_clock(cpu
);
1134 dss_log
->freq
[i
].cpu
= cpu
;
1135 dss_log
->freq
[i
].freq_name
= dss_freq_name
[type
];
1136 dss_log
->freq
[i
].type
= type
;
1137 dss_log
->freq
[i
].old_freq
= old_freq
;
1138 dss_log
->freq
[i
].target_freq
= target_freq
;
1139 dss_log
->freq
[i
].en
= en
;
1144 #ifndef arch_irq_stat
1145 #define arch_irq_stat() 0
1148 static void dbg_snapshot_print_irq(void)
1153 for_each_possible_cpu(i
) {
1154 sum
+= kstat_cpu_irqs_sum(i
);
1155 sum
+= arch_irq_stat_cpu(i
);
1157 sum
+= arch_irq_stat();
1159 pr_info("\n<irq info>\n");
1160 pr_info("------------------------------------------------------------------\n");
1162 pr_info("sum irq : %llu", (unsigned long long)sum
);
1163 pr_info("------------------------------------------------------------------\n");
1165 for_each_irq_nr(j
) {
1166 unsigned int irq_stat
= kstat_irqs(j
);
1169 struct irq_desc
*desc
= irq_to_desc(j
);
1172 name
= desc
->action
? (desc
->action
->name
? desc
->action
->name
: "???") : "???";
1173 pr_info("irq-%-4d : %8u %s\n", j
, irq_stat
, name
);
1178 void dbg_snapshot_print_panic_report(void)
1180 pr_info("============================================================\n");
1181 pr_info("Panic Report\n");
1182 pr_info("============================================================\n");
1183 dbg_snapshot_print_lastinfo();
1184 #ifdef CONFIG_DEBUG_SNAPSHOT_FREQ
1185 dbg_snapshot_print_freqinfo();
1187 dbg_snapshot_print_calltrace();
1188 dbg_snapshot_print_irq();
1189 pr_info("============================================================\n");
1192 #ifdef CONFIG_DEBUG_SNAPSHOT_DM
1193 void dbg_snapshot_dm(int type
, unsigned long min
, unsigned long max
, s32 wait_t
, s32 t
)
1195 struct dbg_snapshot_item
*item
= &dss_items
[dss_desc
.kevents_num
];
1197 if (unlikely(!dss_base
.enabled
|| !item
->entry
.enabled
))
1200 int cpu
= raw_smp_processor_id();
1201 unsigned long i
= atomic_inc_return(&dss_idx
.dm_log_idx
) &
1202 (ARRAY_SIZE(dss_log
->dm
) - 1);
1204 dss_log
->dm
[i
].time
= cpu_clock(cpu
);
1205 dss_log
->dm
[i
].cpu
= cpu
;
1206 dss_log
->dm
[i
].dm_num
= type
;
1207 dss_log
->dm
[i
].min_freq
= min
;
1208 dss_log
->dm
[i
].max_freq
= max
;
1209 dss_log
->dm
[i
].wait_dmt
= wait_t
;
1210 dss_log
->dm
[i
].do_dmt
= t
;
1215 #ifdef CONFIG_DEBUG_SNAPSHOT_HRTIMER
1216 void dbg_snapshot_hrtimer(void *timer
, s64
*now
, void *fn
, int en
)
1218 struct dbg_snapshot_item
*item
= &dss_items
[dss_desc
.kevents_num
];
1220 if (unlikely(!dss_base
.enabled
|| !item
->entry
.enabled
))
1223 int cpu
= raw_smp_processor_id();
1224 unsigned long i
= atomic_inc_return(&dss_idx
.hrtimer_log_idx
[cpu
]) &
1225 (ARRAY_SIZE(dss_log
->hrtimers
[0]) - 1);
1227 dss_log
->hrtimers
[cpu
][i
].time
= cpu_clock(cpu
);
1228 dss_log
->hrtimers
[cpu
][i
].now
= *now
;
1229 dss_log
->hrtimers
[cpu
][i
].timer
= (struct hrtimer
*)timer
;
1230 dss_log
->hrtimers
[cpu
][i
].fn
= fn
;
1231 dss_log
->hrtimers
[cpu
][i
].en
= en
;
1236 #ifdef CONFIG_DEBUG_SNAPSHOT_I2C
1237 void dbg_snapshot_i2c(struct i2c_adapter
*adap
, struct i2c_msg
*msgs
, int num
, int en
)
1239 struct dbg_snapshot_item
*item
= &dss_items
[dss_desc
.kevents_num
];
1241 if (unlikely(!dss_base
.enabled
|| !item
->entry
.enabled
))
1244 int cpu
= raw_smp_processor_id();
1245 unsigned long i
= atomic_inc_return(&dss_idx
.i2c_log_idx
) &
1246 (ARRAY_SIZE(dss_log
->i2c
) - 1);
1248 dss_log
->i2c
[i
].time
= cpu_clock(cpu
);
1249 dss_log
->i2c
[i
].cpu
= cpu
;
1250 dss_log
->i2c
[i
].adap
= adap
;
1251 dss_log
->i2c
[i
].msgs
= msgs
;
1252 dss_log
->i2c
[i
].num
= num
;
1253 dss_log
->i2c
[i
].en
= en
;
1258 #ifdef CONFIG_DEBUG_SNAPSHOT_SPI
1259 void dbg_snapshot_spi(struct spi_controller
*ctlr
, struct spi_message
*cur_msg
, int en
)
1261 struct dbg_snapshot_item
*item
= &dss_items
[dss_desc
.kevents_num
];
1263 if (unlikely(!dss_base
.enabled
|| !item
->entry
.enabled
))
1266 int cpu
= raw_smp_processor_id();
1267 unsigned long i
= atomic_inc_return(&dss_idx
.spi_log_idx
) &
1268 (ARRAY_SIZE(dss_log
->spi
) - 1);
1270 dss_log
->spi
[i
].time
= cpu_clock(cpu
);
1271 dss_log
->spi
[i
].cpu
= cpu
;
1272 dss_log
->spi
[i
].ctlr
= ctlr
;
1273 dss_log
->spi
[i
].cur_msg
= cur_msg
;
1274 dss_log
->spi
[i
].en
= en
;
1279 #ifdef CONFIG_DEBUG_SNAPSHOT_BINDER
1280 void dbg_snapshot_binder(struct trace_binder_transaction_base
*base
,
1281 struct trace_binder_transaction
*transaction
,
1282 struct trace_binder_transaction_error
*error
)
1284 struct dbg_snapshot_item
*item
= &dss_items
[dss_desc
.kevents_num
];
1288 if (unlikely(!dss_base
.enabled
|| !item
->entry
.enabled
))
1293 cpu
= raw_smp_processor_id();
1294 i
= atomic_inc_return(&dss_idx
.binder_log_idx
) &
1295 (ARRAY_SIZE(dss_log
->binder
) - 1);
1297 dss_log
->binder
[i
].time
= cpu_clock(cpu
);
1298 dss_log
->binder
[i
].cpu
= cpu
;
1299 dss_log
->binder
[i
].base
= *base
;
1302 dss_log
->binder
[i
].transaction
= *transaction
;
1304 dss_log
->binder
[i
].transaction
.to_node_id
= 0;
1305 dss_log
->binder
[i
].transaction
.reply
= 0;
1306 dss_log
->binder
[i
].transaction
.flags
= 0;
1307 dss_log
->binder
[i
].transaction
.code
= 0;
1310 dss_log
->binder
[i
].error
= *error
;
1312 dss_log
->binder
[i
].error
.return_error
= 0;
1313 dss_log
->binder
[i
].error
.return_error_param
= 0;
1314 dss_log
->binder
[i
].error
.return_error_line
= 0;
1319 #ifdef CONFIG_DEBUG_SNAPSHOT_ACPM
1320 void dbg_snapshot_acpm(unsigned long long timestamp
, const char *log
, unsigned int data
)
1322 struct dbg_snapshot_item
*item
= &dss_items
[dss_desc
.kevents_num
];
1324 if (unlikely(!dss_base
.enabled
|| !item
->entry
.enabled
))
1327 int cpu
= raw_smp_processor_id();
1328 unsigned long i
= atomic_inc_return(&dss_idx
.acpm_log_idx
) &
1329 (ARRAY_SIZE(dss_log
->acpm
) - 1);
1330 int len
= strlen(log
);
1335 dss_log
->acpm
[i
].time
= cpu_clock(cpu
);
1336 dss_log
->acpm
[i
].acpm_time
= timestamp
;
1337 strncpy(dss_log
->acpm
[i
].log
, log
, len
);
1338 dss_log
->acpm
[i
].log
[len
] = '\0';
1339 dss_log
->acpm
[i
].data
= data
;
1344 #ifdef CONFIG_DEBUG_SNAPSHOT_REG
1345 static phys_addr_t
virt_to_phys_high(size_t vaddr
)
1347 phys_addr_t paddr
= 0;
1352 if (virt_addr_valid((void *) vaddr
)) {
1353 paddr
= virt_to_phys((void *) vaddr
);
1357 pgd
= pgd_offset_k(vaddr
);
1358 if (pgd_none(*pgd
) || unlikely(pgd_bad(*pgd
)))
1361 if (pgd_val(*pgd
) & 2) {
1362 paddr
= pgd_val(*pgd
) & SECTION_MASK
;
1366 pmd
= pmd_offset((pud_t
*)pgd
, vaddr
);
1367 if (pmd_none_or_clear_bad(pmd
))
1370 pte
= pte_offset_kernel(pmd
, vaddr
);
1374 paddr
= pte_val(*pte
) & PAGE_MASK
;
1377 return paddr
| (vaddr
& UL(SZ_4K
- 1));
1380 void dbg_snapshot_reg(unsigned int read
, size_t val
, size_t reg
, int en
)
1382 struct dbg_snapshot_item
*item
= &dss_items
[dss_desc
.kevents_num
];
1383 int cpu
= raw_smp_processor_id();
1385 size_t phys_reg
, start_addr
, end_addr
;
1387 if (unlikely(!dss_base
.enabled
|| !item
->entry
.enabled
))
1390 if (dss_reg_exlist
[0].addr
== 0)
1393 phys_reg
= virt_to_phys_high(reg
);
1394 if (unlikely(!phys_reg
))
1397 for (j
= 0; j
< ARRAY_SIZE(dss_reg_exlist
); j
++) {
1398 if (dss_reg_exlist
[j
].addr
== 0)
1400 start_addr
= dss_reg_exlist
[j
].addr
;
1401 end_addr
= start_addr
+ dss_reg_exlist
[j
].size
;
1402 if (start_addr
<= phys_reg
&& phys_reg
<= end_addr
)
1406 i
= atomic_inc_return(&dss_idx
.reg_log_idx
[cpu
]) &
1407 (ARRAY_SIZE(dss_log
->reg
[0]) - 1);
1409 dss_log
->reg
[cpu
][i
].time
= cpu_clock(cpu
);
1410 dss_log
->reg
[cpu
][i
].read
= read
;
1411 dss_log
->reg
[cpu
][i
].val
= val
;
1412 dss_log
->reg
[cpu
][i
].reg
= phys_reg
;
1413 dss_log
->reg
[cpu
][i
].en
= en
;
1415 for (j
= 0; j
< dss_desc
.callstack
; j
++) {
1416 dss_log
->reg
[cpu
][i
].caller
[j
] =
1417 (void *)((size_t)return_address(j
+ 1));
1422 #ifndef CONFIG_DEBUG_SNAPSHOT_MINIMIZED_MODE
1423 void dbg_snapshot_clockevent(unsigned long long clc
, int64_t delta
, void *next_event
)
1425 struct dbg_snapshot_item
*item
= &dss_items
[dss_desc
.kevents_num
];
1427 if (unlikely(!dss_base
.enabled
|| !item
->entry
.enabled
))
1430 int cpu
= raw_smp_processor_id();
1431 unsigned long j
, i
= atomic_inc_return(&dss_idx
.clockevent_log_idx
[cpu
]) &
1432 (ARRAY_SIZE(dss_log
->clockevent
[0]) - 1);
1434 dss_log
->clockevent
[cpu
][i
].time
= cpu_clock(cpu
);
1435 dss_log
->clockevent
[cpu
][i
].mct_cycle
= clc
;
1436 dss_log
->clockevent
[cpu
][i
].delta_ns
= delta
;
1437 dss_log
->clockevent
[cpu
][i
].next_event
= *((ktime_t
*)next_event
);
1439 for (j
= 0; j
< dss_desc
.callstack
; j
++) {
1440 dss_log
->clockevent
[cpu
][i
].caller
[j
] =
1441 (void *)((size_t)return_address(j
+ 1));
1446 void dbg_snapshot_printk(const char *fmt
, ...)
1448 struct dbg_snapshot_item
*item
= &dss_items
[dss_desc
.kevents_num
];
1450 if (unlikely(!dss_base
.enabled
|| !item
->entry
.enabled
))
1453 int cpu
= raw_smp_processor_id();
1456 unsigned long j
, i
= atomic_inc_return(&dss_idx
.printk_log_idx
) &
1457 (ARRAY_SIZE(dss_log
->printk
) - 1);
1459 va_start(args
, fmt
);
1460 ret
= vsnprintf(dss_log
->printk
[i
].log
,
1461 sizeof(dss_log
->printk
[i
].log
), fmt
, args
);
1464 dss_log
->printk
[i
].time
= cpu_clock(cpu
);
1465 dss_log
->printk
[i
].cpu
= cpu
;
1467 for (j
= 0; j
< dss_desc
.callstack
; j
++) {
1468 dss_log
->printk
[i
].caller
[j
] =
1469 (void *)((size_t)return_address(j
));
1474 void dbg_snapshot_printkl(size_t msg
, size_t val
)
1476 struct dbg_snapshot_item
*item
= &dss_items
[dss_desc
.kevents_num
];
1478 if (unlikely(!dss_base
.enabled
|| !item
->entry
.enabled
))
1481 int cpu
= raw_smp_processor_id();
1482 unsigned long j
, i
= atomic_inc_return(&dss_idx
.printkl_log_idx
) &
1483 (ARRAY_SIZE(dss_log
->printkl
) - 1);
1485 dss_log
->printkl
[i
].time
= cpu_clock(cpu
);
1486 dss_log
->printkl
[i
].cpu
= cpu
;
1487 dss_log
->printkl
[i
].msg
= msg
;
1488 dss_log
->printkl
[i
].val
= val
;
1490 for (j
= 0; j
< dss_desc
.callstack
; j
++) {
1491 dss_log
->printkl
[i
].caller
[j
] =
1492 (void *)((size_t)return_address(j
));