mm: mm_event supports vmstat
[GitHub/MotorolaMobilityLLC/kernel-slsi.git] / lib / debug-snapshot-log.c
CommitLineData
dd101ca5
DC
1/*
2 * Copyright (c) 2013 Samsung Electronics Co., Ltd.
3 * http://www.samsung.com
4 *
5 * Debug-SnapShot: Debug Framework for Ramdump based debugging method
6 * The original code is Exynos-Snapshot for Exynos SoC
7 *
8 * Author: Hosung Kim <hosung0.kim@samsung.com>
9 * Author: Changki Kim <changki.kim@samsung.com>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 */
15
16#include <linux/kernel.h>
17#include <linux/slab.h>
18#include <linux/module.h>
19#include <linux/ktime.h>
20#include <linux/kallsyms.h>
21#include <linux/platform_device.h>
22#include <linux/clk-provider.h>
23#include <linux/pstore_ram.h>
24#include <linux/sched/clock.h>
25#include <linux/ftrace.h>
26
27#include "debug-snapshot-local.h"
28#include <asm/irq.h>
29#include <asm/traps.h>
30#include <asm/hardirq.h>
31#include <asm/stacktrace.h>
05db6ce8 32#include <asm/arch_timer.h>
dd101ca5
DC
33#include <linux/debug-snapshot.h>
34#include <linux/kernel_stat.h>
35#include <linux/irqnr.h>
36#include <linux/irq.h>
37#include <linux/irqdesc.h>
38
39struct dbg_snapshot_lastinfo {
40#ifdef CONFIG_DEBUG_SNAPSHOT_FREQ
41 atomic_t freq_last_idx[DSS_FLAG_END];
42#endif
43 char log[DSS_NR_CPUS][SZ_1K];
44 char *last_p[DSS_NR_CPUS];
45};
46
47struct dss_dumper {
48 bool active;
49 u32 items;
50 int init_idx;
51 int cur_idx;
52 u32 cur_cpu;
53 u32 step;
54};
55
56enum dss_kevent_flag {
57 DSS_FLAG_TASK = 1,
58 DSS_FLAG_WORK,
59 DSS_FLAG_CPUIDLE,
60 DSS_FLAG_SUSPEND,
61 DSS_FLAG_IRQ,
62 DSS_FLAG_IRQ_EXIT,
63 DSS_FLAG_SPINLOCK,
64 DSS_FLAG_IRQ_DISABLE,
65 DSS_FLAG_CLK,
66 DSS_FLAG_FREQ,
67 DSS_FLAG_REG,
68 DSS_FLAG_HRTIMER,
69 DSS_FLAG_REGULATOR,
70 DSS_FLAG_THERMAL,
71 DSS_FLAG_MAILBOX,
72 DSS_FLAG_CLOCKEVENT,
73 DSS_FLAG_PRINTK,
74 DSS_FLAG_PRINTKL,
75 DSS_FLAG_KEVENT,
76};
77
78struct dbg_snapshot_log_idx {
79 atomic_t task_log_idx[DSS_NR_CPUS];
80 atomic_t work_log_idx[DSS_NR_CPUS];
81 atomic_t cpuidle_log_idx[DSS_NR_CPUS];
82 atomic_t suspend_log_idx;
83 atomic_t irq_log_idx[DSS_NR_CPUS];
84#ifdef CONFIG_DEBUG_SNAPSHOT_SPINLOCK
85 atomic_t spinlock_log_idx[DSS_NR_CPUS];
86#endif
87#ifdef CONFIG_DEBUG_SNAPSHOT_IRQ_DISABLED
88 atomic_t irqs_disabled_log_idx[DSS_NR_CPUS];
89#endif
dd101ca5
DC
90#ifdef CONFIG_DEBUG_SNAPSHOT_REG
91 atomic_t reg_log_idx[DSS_NR_CPUS];
92#endif
93#ifdef CONFIG_DEBUG_SNAPSHOT_HRTIMER
94 atomic_t hrtimer_log_idx[DSS_NR_CPUS];
95#endif
96#ifdef CONFIG_DEBUG_SNAPSHOT_CLK
97 atomic_t clk_log_idx;
98#endif
99#ifdef CONFIG_DEBUG_SNAPSHOT_PMU
100 atomic_t pmu_log_idx;
101#endif
102#ifdef CONFIG_DEBUG_SNAPSHOT_FREQ
103 atomic_t freq_log_idx;
104#endif
105#ifdef CONFIG_DEBUG_SNAPSHOT_DM
106 atomic_t dm_log_idx;
107#endif
108#ifdef CONFIG_DEBUG_SNAPSHOT_REGULATOR
109 atomic_t regulator_log_idx;
110#endif
111#ifdef CONFIG_DEBUG_SNAPSHOT_REGULATOR
112 atomic_t thermal_log_idx;
113#endif
114#ifdef CONFIG_DEBUG_SNAPSHOT_I2C
115 atomic_t i2c_log_idx;
116#endif
117#ifdef CONFIG_DEBUG_SNAPSHOT_SPI
118 atomic_t spi_log_idx;
119#endif
ae09f559
YK
120#ifdef CONFIG_DEBUG_SNAPSHOT_BINDER
121 atomic_t binder_log_idx;
122#endif
dd101ca5 123#ifndef CONFIG_DEBUG_SNAPSHOT_MINIMIZED_MODE
dd101ca5
DC
124 atomic_t printkl_log_idx;
125 atomic_t printk_log_idx;
126#endif
127#ifdef CONFIG_DEBUG_SNAPSHOT_ACPM
128 atomic_t acpm_log_idx;
129#endif
130};
131
132int dbg_snapshot_log_size = sizeof(struct dbg_snapshot_log);
133/*
134 * including or excluding options
135 * if you want to except some interrupt, it should be written in this array
136 */
137int dss_irqlog_exlist[DSS_EX_MAX_NUM] = {
138/* interrupt number ex) 152, 153, 154, */
139 -1,
140};
141
dd101ca5
DC
142#ifdef CONFIG_DEBUG_SNAPSHOT_REG
143struct dss_reg_list {
144 size_t addr;
145 size_t size;
146};
147
148static struct dss_reg_list dss_reg_exlist[] = {
149/*
150 * if it wants to reduce effect enabled reg feautre to system,
151 * you must add these registers - mct, serial
152 * because they are called very often.
153 * physical address, size ex) {0x10C00000, 0x1000},
154 */
155 {DSS_REG_MCT_ADDR, DSS_REG_MCT_SIZE},
156 {DSS_REG_UART_ADDR, DSS_REG_UART_SIZE},
157 {0, 0},
158 {0, 0},
159 {0, 0},
160 {0, 0},
161 {0, 0},
162 {0, 0},
163 {0, 0},
164};
165#endif
166
167#ifdef CONFIG_DEBUG_SNAPSHOT_FREQ
168static char *dss_freq_name[] = {
7ce10966 169 "LITTLE", "BIG", "INT", "MIF", "ISP", "DISP", "INTCAM", "AUD", "IVA", "SCORE", "FSYS0",
dd101ca5
DC
170};
171#endif
172
173/* Internal interface variable */
174static struct dbg_snapshot_log_idx dss_idx;
175static struct dbg_snapshot_lastinfo dss_lastinfo;
176
1abf4739 177void __init dbg_snapshot_init_log_idx(void)
dd101ca5
DC
178{
179 int i;
180
181#ifndef CONFIG_DEBUG_SNAPSHOT_MINIMIZED_MODE
182 atomic_set(&(dss_idx.printk_log_idx), -1);
183 atomic_set(&(dss_idx.printkl_log_idx), -1);
184#endif
185#ifdef CONFIG_DEBUG_SNAPSHOT_REGULATOR
186 atomic_set(&(dss_idx.regulator_log_idx), -1);
187#endif
188#ifdef CONFIG_DEBUG_SNAPSHOT_THERMAL
189 atomic_set(&(dss_idx.thermal_log_idx), -1);
190#endif
191#ifdef CONFIG_DEBUG_SNAPSHOT_FREQ
192 atomic_set(&(dss_idx.freq_log_idx), -1);
193#endif
194#ifdef CONFIG_DEBUG_SNAPSHOT_DM
195 atomic_set(&(dss_idx.dm_log_idx), -1);
196#endif
197#ifdef CONFIG_DEBUG_SNAPSHOT_CLK
198 atomic_set(&(dss_idx.clk_log_idx), -1);
199#endif
200#ifdef CONFIG_DEBUG_SNAPSHOT_PMU
201 atomic_set(&(dss_idx.pmu_log_idx), -1);
202#endif
203#ifdef CONFIG_DEBUG_SNAPSHOT_ACPM
204 atomic_set(&(dss_idx.acpm_log_idx), -1);
205#endif
206#ifdef CONFIG_DEBUG_SNAPSHOT_I2C
207 atomic_set(&(dss_idx.i2c_log_idx), -1);
208#endif
209#ifdef CONFIG_DEBUG_SNAPSHOT_SPI
210 atomic_set(&(dss_idx.spi_log_idx), -1);
ae09f559
YK
211#endif
212#ifdef CONFIG_DEBUG_SNAPSHOT_BINDER
213 atomic_set(&(dss_idx.binder_log_idx), -1);
dd101ca5
DC
214#endif
215 atomic_set(&(dss_idx.suspend_log_idx), -1);
216
217 for (i = 0; i < DSS_NR_CPUS; i++) {
218 atomic_set(&(dss_idx.task_log_idx[i]), -1);
219 atomic_set(&(dss_idx.work_log_idx[i]), -1);
dd101ca5
DC
220 atomic_set(&(dss_idx.cpuidle_log_idx[i]), -1);
221 atomic_set(&(dss_idx.irq_log_idx[i]), -1);
222#ifdef CONFIG_DEBUG_SNAPSHOT_SPINLOCK
223 atomic_set(&(dss_idx.spinlock_log_idx[i]), -1);
224#endif
225#ifdef CONFIG_DEBUG_SNAPSHOT_IRQ_DISABLED
226 atomic_set(&(dss_idx.irqs_disabled_log_idx[i]), -1);
227#endif
dd101ca5
DC
228#ifdef CONFIG_DEBUG_SNAPSHOT_REG
229 atomic_set(&(dss_idx.reg_log_idx[i]), -1);
230#endif
231#ifdef CONFIG_DEBUG_SNAPSHOT_HRTIMER
232 atomic_set(&(dss_idx.hrtimer_log_idx[i]), -1);
233#endif
234 }
235}
236
237bool dbg_snapshot_dumper_one(void *v_dumper, char *line, size_t size, size_t *len)
238{
239 bool ret = false;
240 int idx, array_size;
241 unsigned int cpu, items;
242 unsigned long rem_nsec;
243 u64 ts;
244 struct dss_dumper *dumper = (struct dss_dumper *)v_dumper;
245
246 if (!line || size < SZ_128 ||
247 dumper->cur_cpu >= NR_CPUS)
248 goto out;
249
250 if (dumper->active) {
251 if (dumper->init_idx == dumper->cur_idx)
252 goto out;
253 }
254
255 cpu = dumper->cur_cpu;
256 idx = dumper->cur_idx;
257 items = dumper->items;
258
259 switch(items) {
260 case DSS_FLAG_TASK:
261 {
262 struct task_struct *task;
263 array_size = ARRAY_SIZE(dss_log->task[0]) - 1;
264 if (!dumper->active) {
265 idx = (atomic_read(&dss_idx.task_log_idx[0]) + 1) & array_size;
266 dumper->init_idx = idx;
267 dumper->active = true;
268 }
269 ts = dss_log->task[cpu][idx].time;
270 rem_nsec = do_div(ts, NSEC_PER_SEC);
271 task = dss_log->task[cpu][idx].task;
272
273 *len = snprintf(line, size, "[%8lu.%09lu][%04d:CPU%u] task_name:%16s, "
274 "task:0x%16p, stack:0x%16p, exec_start:%16llu\n",
275 (unsigned long)ts, rem_nsec / NSEC_PER_USEC, idx, cpu,
276 task->comm, task, task->stack,
277 task->se.exec_start);
278 break;
279 }
280 case DSS_FLAG_WORK:
281 {
282 char work_fn[KSYM_NAME_LEN] = {0,};
283 char *task_comm;
284 int en;
285
286 array_size = ARRAY_SIZE(dss_log->work[0]) - 1;
287 if (!dumper->active) {
288 idx = (atomic_read(&dss_idx.work_log_idx[0]) + 1) & array_size;
289 dumper->init_idx = idx;
290 dumper->active = true;
291 }
292 ts = dss_log->work[cpu][idx].time;
293 rem_nsec = do_div(ts, NSEC_PER_SEC);
294 lookup_symbol_name((unsigned long)dss_log->work[cpu][idx].fn, work_fn);
295 task_comm = dss_log->work[cpu][idx].task_comm;
296 en = dss_log->work[cpu][idx].en;
297
298 dumper->step = 6;
299 *len = snprintf(line, size, "[%8lu.%09lu][%04d:CPU%u] task_name:%16s, work_fn:%32s, %3s\n",
300 (unsigned long)ts, rem_nsec / NSEC_PER_USEC, idx, cpu,
301 task_comm, work_fn,
302 en == DSS_FLAG_IN ? "IN" : "OUT");
303 break;
304 }
305 case DSS_FLAG_CPUIDLE:
306 {
307 unsigned int delta;
308 int state, num_cpus, en;
309 char *index;
310
311 array_size = ARRAY_SIZE(dss_log->cpuidle[0]) - 1;
312 if (!dumper->active) {
313 idx = (atomic_read(&dss_idx.cpuidle_log_idx[0]) + 1) & array_size;
314 dumper->init_idx = idx;
315 dumper->active = true;
316 }
317 ts = dss_log->cpuidle[cpu][idx].time;
318 rem_nsec = do_div(ts, NSEC_PER_SEC);
319
320 index = dss_log->cpuidle[cpu][idx].modes;
321 en = dss_log->cpuidle[cpu][idx].en;
322 state = dss_log->cpuidle[cpu][idx].state;
323 num_cpus = dss_log->cpuidle[cpu][idx].num_online_cpus;
324 delta = dss_log->cpuidle[cpu][idx].delta;
325
326 *len = snprintf(line, size, "[%8lu.%09lu][%04d:CPU%u] cpuidle: %s, "
327 "state:%d, num_online_cpus:%d, stay_time:%8u, %3s\n",
328 (unsigned long)ts, rem_nsec / NSEC_PER_USEC, idx, cpu,
329 index, state, num_cpus, delta,
330 en == DSS_FLAG_IN ? "IN" : "OUT");
331 break;
332 }
333 case DSS_FLAG_SUSPEND:
334 {
335 char suspend_fn[KSYM_NAME_LEN];
336 int en;
337
338 array_size = ARRAY_SIZE(dss_log->suspend) - 1;
339 if (!dumper->active) {
340 idx = (atomic_read(&dss_idx.suspend_log_idx) + 1) & array_size;
341 dumper->init_idx = idx;
342 dumper->active = true;
343 }
344 ts = dss_log->suspend[idx].time;
345 rem_nsec = do_div(ts, NSEC_PER_SEC);
346
347 lookup_symbol_name((unsigned long)dss_log->suspend[idx].fn, suspend_fn);
348 en = dss_log->suspend[idx].en;
349
350 *len = snprintf(line, size, "[%8lu.%09lu][%04d:CPU%u] suspend_fn:%s, %3s\n",
351 (unsigned long)ts, rem_nsec / NSEC_PER_USEC, idx, cpu,
352 suspend_fn, en == DSS_FLAG_IN ? "IN" : "OUT");
353 break;
354 }
355 case DSS_FLAG_IRQ:
356 {
357 char irq_fn[KSYM_NAME_LEN];
ddad4f4d 358 int en, irq;
dd101ca5
DC
359
360 array_size = ARRAY_SIZE(dss_log->irq[0]) - 1;
361 if (!dumper->active) {
362 idx = (atomic_read(&dss_idx.irq_log_idx[0]) + 1) & array_size;
363 dumper->init_idx = idx;
364 dumper->active = true;
365 }
366 ts = dss_log->irq[cpu][idx].time;
367 rem_nsec = do_div(ts, NSEC_PER_SEC);
368
369 lookup_symbol_name((unsigned long)dss_log->irq[cpu][idx].fn, irq_fn);
370 irq = dss_log->irq[cpu][idx].irq;
dd101ca5
DC
371 en = dss_log->irq[cpu][idx].en;
372
ddad4f4d 373 *len = snprintf(line, size, "[%8lu.%09lu][%04d:CPU%u] irq:%6d, irq_fn:%32s, %3s\n",
dd101ca5 374 (unsigned long)ts, rem_nsec / NSEC_PER_USEC, idx, cpu,
ddad4f4d 375 irq, irq_fn, en == DSS_FLAG_IN ? "IN" : "OUT");
dd101ca5
DC
376 break;
377 }
dd101ca5
DC
378#ifdef CONFIG_DEBUG_SNAPSHOT_SPINLOCK
379 case DSS_FLAG_SPINLOCK:
380 {
381 unsigned int jiffies_local;
382 char callstack[CONFIG_DEBUG_SNAPSHOT_CALLSTACK][KSYM_NAME_LEN];
383 int en, i;
384 u16 next, owner;
385
386 array_size = ARRAY_SIZE(dss_log->spinlock[0]) - 1;
387 if (!dumper->active) {
388 idx = (atomic_read(&dss_idx.spinlock_log_idx[0]) + 1) & array_size;
389 dumper->init_idx = idx;
390 dumper->active = true;
391 }
392 ts = dss_log->spinlock[cpu][idx].time;
393 rem_nsec = do_div(ts, NSEC_PER_SEC);
394
395 jiffies_local = dss_log->spinlock[cpu][idx].jiffies;
396 en = dss_log->spinlock[cpu][idx].en;
397 for (i = 0; i < CONFIG_DEBUG_SNAPSHOT_CALLSTACK; i++)
398 lookup_symbol_name((unsigned long)dss_log->spinlock[cpu][idx].caller[i],
399 callstack[i]);
400
401 next = dss_log->spinlock[cpu][idx].next;
402 owner = dss_log->spinlock[cpu][idx].owner;
403
404 *len = snprintf(line, size, "[%8lu.%09lu][%04d:CPU%u] next:%8x, owner:%8x jiffies:%12u, %3s\n"
405 "callstack: %s\n"
406 " %s\n"
407 " %s\n"
408 " %s\n",
409 (unsigned long)ts, rem_nsec / NSEC_PER_USEC, idx, cpu,
410 next, owner, jiffies_local,
411 en == DSS_FLAG_IN ? "IN" : "OUT",
412 callstack[0], callstack[1], callstack[2], callstack[3]);
413 break;
414 }
415#endif
416#ifdef CONFIG_DEBUG_SNAPSHOT_CLK
417 case DSS_FLAG_CLK:
418 {
419 const char *clk_name;
420 char clk_fn[KSYM_NAME_LEN];
421 struct clk_hw *clk;
422 int en;
423
424 array_size = ARRAY_SIZE(dss_log->clk) - 1;
425 if (!dumper->active) {
426 idx = (atomic_read(&dss_idx.clk_log_idx) + 1) & array_size;
427 dumper->init_idx = idx;
428 dumper->active = true;
429 }
430 ts = dss_log->clk[idx].time;
431 rem_nsec = do_div(ts, NSEC_PER_SEC);
432
433 clk = (struct clk_hw *)dss_log->clk[idx].clk;
434 clk_name = clk_hw_get_name(clk);
435 lookup_symbol_name((unsigned long)dss_log->clk[idx].f_name, clk_fn);
436 en = dss_log->clk[idx].mode;
437
438 *len = snprintf(line, size, "[%8lu.%09lu][%04d:CPU] clk_name:%30s, clk_fn:%30s, "
439 ", %s\n",
440 (unsigned long)ts, rem_nsec / NSEC_PER_USEC, idx,
441 clk_name, clk_fn, en == DSS_FLAG_IN ? "IN" : "OUT");
442 break;
443 }
444#endif
445#ifdef CONFIG_DEBUG_SNAPSHOT_FREQ
446 case DSS_FLAG_FREQ:
447 {
448 char *freq_name;
449 unsigned int on_cpu;
450 unsigned long old_freq, target_freq;
451 int en;
452
453 array_size = ARRAY_SIZE(dss_log->freq) - 1;
454 if (!dumper->active) {
455 idx = (atomic_read(&dss_idx.freq_log_idx) + 1) & array_size;
456 dumper->init_idx = idx;
457 dumper->active = true;
458 }
459 ts = dss_log->freq[idx].time;
460 rem_nsec = do_div(ts, NSEC_PER_SEC);
461
462 freq_name = dss_log->freq[idx].freq_name;
463 old_freq = dss_log->freq[idx].old_freq;
464 target_freq = dss_log->freq[idx].target_freq;
465 on_cpu = dss_log->freq[idx].cpu;
466 en = dss_log->freq[idx].en;
467
468 *len = snprintf(line, size, "[%8lu.%09lu][%04d:CPU%u] freq_name:%16s, "
469 "old_freq:%16lu, target_freq:%16lu, %3s\n",
470 (unsigned long)ts, rem_nsec / NSEC_PER_USEC, idx, on_cpu,
471 freq_name, old_freq, target_freq,
472 en == DSS_FLAG_IN ? "IN" : "OUT");
473 break;
474 }
475#endif
476#ifndef CONFIG_DEBUG_SNAPSHOT_MINIMIZED_MODE
477 case DSS_FLAG_PRINTK:
478 {
479 char *log;
480 char callstack[CONFIG_DEBUG_SNAPSHOT_CALLSTACK][KSYM_NAME_LEN];
481 unsigned int cpu;
482 int i;
483
484 array_size = ARRAY_SIZE(dss_log->printk) - 1;
485 if (!dumper->active) {
486 idx = (atomic_read(&dss_idx.printk_log_idx) + 1) & array_size;
487 dumper->init_idx = idx;
488 dumper->active = true;
489 }
490 ts = dss_log->printk[idx].time;
491 cpu = dss_log->printk[idx].cpu;
492 rem_nsec = do_div(ts, NSEC_PER_SEC);
493 log = dss_log->printk[idx].log;
494 for (i = 0; i < CONFIG_DEBUG_SNAPSHOT_CALLSTACK; i++)
495 lookup_symbol_name((unsigned long)dss_log->printk[idx].caller[i],
496 callstack[i]);
497
498 *len = snprintf(line, size, "[%8lu.%09lu][%04d:CPU%u] log:%s, callstack:%s, %s, %s, %s\n",
499 (unsigned long)ts, rem_nsec / NSEC_PER_USEC, idx, cpu,
500 log, callstack[0], callstack[1], callstack[2], callstack[3]);
501 break;
502 }
503 case DSS_FLAG_PRINTKL:
504 {
505 char callstack[CONFIG_DEBUG_SNAPSHOT_CALLSTACK][KSYM_NAME_LEN];
506 size_t msg, val;
507 unsigned int cpu;
508 int i;
509
510 array_size = ARRAY_SIZE(dss_log->printkl) - 1;
511 if (!dumper->active) {
512 idx = (atomic_read(&dss_idx.printkl_log_idx) + 1) & array_size;
513 dumper->init_idx = idx;
514 dumper->active = true;
515 }
516 ts = dss_log->printkl[idx].time;
517 cpu = dss_log->printkl[idx].cpu;
518 rem_nsec = do_div(ts, NSEC_PER_SEC);
519 msg = dss_log->printkl[idx].msg;
520 val = dss_log->printkl[idx].val;
521 for (i = 0; i < CONFIG_DEBUG_SNAPSHOT_CALLSTACK; i++)
522 lookup_symbol_name((unsigned long)dss_log->printkl[idx].caller[i],
523 callstack[i]);
524
525 *len = snprintf(line, size, "[%8lu.%09lu][%04d:CPU%u] msg:%zx, val:%zx, callstack: %s, %s, %s, %s\n",
526 (unsigned long)ts, rem_nsec / NSEC_PER_USEC, idx, cpu,
527 msg, val, callstack[0], callstack[1], callstack[2], callstack[3]);
528 break;
529 }
530#endif
531 default:
532 snprintf(line, size, "unsupported inforation to dump\n");
533 goto out;
534 }
535 if (array_size == idx)
536 dumper->cur_idx = 0;
537 else
538 dumper->cur_idx = idx + 1;
539
540 ret = true;
541out:
542 return ret;
543}
544
545#ifdef CONFIG_ARM64
546static inline unsigned long pure_arch_local_irq_save(void)
547{
548 unsigned long flags;
549
550 asm volatile(
551 "mrs %0, daif // arch_local_irq_save\n"
552 "msr daifset, #2"
553 : "=r" (flags)
554 :
555 : "memory");
556
557 return flags;
558}
559
560static inline void pure_arch_local_irq_restore(unsigned long flags)
561{
562 asm volatile(
563 "msr daif, %0 // arch_local_irq_restore"
564 :
565 : "r" (flags)
566 : "memory");
567}
568#else
569static inline unsigned long arch_local_irq_save(void)
570{
571 unsigned long flags;
572
573 asm volatile(
574 " mrs %0, cpsr @ arch_local_irq_save\n"
575 " cpsid i"
576 : "=r" (flags) : : "memory", "cc");
577 return flags;
578}
579
580static inline void arch_local_irq_restore(unsigned long flags)
581{
582 asm volatile(
583 " msr cpsr_c, %0 @ local_irq_restore"
584 :
585 : "r" (flags)
586 : "memory", "cc");
587}
588#endif
589
590void dbg_snapshot_task(int cpu, void *v_task)
591{
592 struct dbg_snapshot_item *item = &dss_items[dss_desc.kevents_num];
593
594 if (unlikely(!dss_base.enabled || !item->entry.enabled))
595 return;
596 {
597 unsigned long i = atomic_inc_return(&dss_idx.task_log_idx[cpu]) &
598 (ARRAY_SIZE(dss_log->task[0]) - 1);
599
600 dss_log->task[cpu][i].time = cpu_clock(cpu);
ddad4f4d 601 dss_log->task[cpu][i].sp = (unsigned long)current_stack_pointer;
dd101ca5 602 dss_log->task[cpu][i].task = (struct task_struct *)v_task;
bf2bb2c5 603 dss_log->task[cpu][i].pid = (int)((struct task_struct *)v_task)->pid;
dd101ca5
DC
604 strncpy(dss_log->task[cpu][i].task_comm,
605 dss_log->task[cpu][i].task->comm,
606 TASK_COMM_LEN - 1);
607 }
608}
609
610void dbg_snapshot_work(void *worker, void *v_task, void *fn, int en)
611{
612 struct dbg_snapshot_item *item = &dss_items[dss_desc.kevents_num];
613
614 if (unlikely(!dss_base.enabled || !item->entry.enabled))
615 return;
616
617 {
618 int cpu = raw_smp_processor_id();
619 unsigned long i = atomic_inc_return(&dss_idx.work_log_idx[cpu]) &
620 (ARRAY_SIZE(dss_log->work[0]) - 1);
621 struct task_struct *task = (struct task_struct *)v_task;
622 dss_log->work[cpu][i].time = cpu_clock(cpu);
623 dss_log->work[cpu][i].sp = (unsigned long) current_stack_pointer;
624 dss_log->work[cpu][i].worker = (struct worker *)worker;
625 strncpy(dss_log->work[cpu][i].task_comm, task->comm, TASK_COMM_LEN - 1);
626 dss_log->work[cpu][i].fn = (work_func_t)fn;
627 dss_log->work[cpu][i].en = en;
628 }
629}
630
631void dbg_snapshot_cpuidle(char *modes, unsigned state, int diff, int en)
632{
633 struct dbg_snapshot_item *item = &dss_items[dss_desc.kevents_num];
634
635 if (unlikely(!dss_base.enabled || !item->entry.enabled))
636 return;
637 {
638 int cpu = raw_smp_processor_id();
639 unsigned long i = atomic_inc_return(&dss_idx.cpuidle_log_idx[cpu]) &
640 (ARRAY_SIZE(dss_log->cpuidle[0]) - 1);
641
642 dss_log->cpuidle[cpu][i].time = cpu_clock(cpu);
643 dss_log->cpuidle[cpu][i].modes = modes;
644 dss_log->cpuidle[cpu][i].state = state;
645 dss_log->cpuidle[cpu][i].sp = (unsigned long) current_stack_pointer;
646 dss_log->cpuidle[cpu][i].num_online_cpus = num_online_cpus();
647 dss_log->cpuidle[cpu][i].delta = diff;
648 dss_log->cpuidle[cpu][i].en = en;
649 }
650}
651
05db6ce8 652void dbg_snapshot_suspend(char *log, void *fn, void *dev, int state, int en)
dd101ca5
DC
653{
654 struct dbg_snapshot_item *item = &dss_items[dss_desc.kevents_num];
655
656 if (unlikely(!dss_base.enabled || !item->entry.enabled))
657 return;
658 {
05db6ce8 659 int len;
dd101ca5
DC
660 int cpu = raw_smp_processor_id();
661 unsigned long i = atomic_inc_return(&dss_idx.suspend_log_idx) &
662 (ARRAY_SIZE(dss_log->suspend) - 1);
663
664 dss_log->suspend[i].time = cpu_clock(cpu);
665 dss_log->suspend[i].sp = (unsigned long) current_stack_pointer;
05db6ce8
HK
666
667 if (log) {
668 len = strlen(log);
669 memcpy(dss_log->suspend[i].log, log,
670 len < DSS_LOG_GEN_LEN ?
671 len : DSS_LOG_GEN_LEN - 1);
672 } else {
673 memset(dss_log->suspend[i].log, 0, DSS_LOG_GEN_LEN - 1);
674 }
675
dd101ca5
DC
676 dss_log->suspend[i].fn = fn;
677 dss_log->suspend[i].dev = (struct device *)dev;
678 dss_log->suspend[i].core = cpu;
679 dss_log->suspend[i].en = en;
680 }
681}
682
683static void dbg_snapshot_print_calltrace(void)
684{
685 int i;
686
687 pr_info("\n<Call trace>\n");
688 for (i = 0; i < DSS_NR_CPUS; i++) {
689 pr_info("CPU ID: %d -----------------------------------------------\n", i);
690 pr_info("%s", dss_lastinfo.log[i]);
691 }
692}
693
694void dbg_snapshot_save_log(int cpu, unsigned long where)
695{
696 if (dss_lastinfo.last_p[cpu] == NULL)
697 dss_lastinfo.last_p[cpu] = &dss_lastinfo.log[cpu][0];
698
699 if (dss_lastinfo.last_p[cpu] > &dss_lastinfo.log[cpu][SZ_1K - SZ_128])
700 return;
701
702 *(unsigned long *)&(dss_lastinfo.last_p[cpu]) += sprintf(dss_lastinfo.last_p[cpu],
703 "[<%p>] %pS\n", (void *)where, (void *)where);
704
705}
706
707static void dbg_snapshot_get_sec(unsigned long long ts, unsigned long *sec, unsigned long *msec)
708{
709 *sec = ts / NSEC_PER_SEC;
710 *msec = (ts % NSEC_PER_SEC) / USEC_PER_MSEC;
711}
712
713static void dbg_snapshot_print_last_irq(int cpu)
714{
715 unsigned long idx, sec, msec;
716 char fn_name[KSYM_NAME_LEN];
717
718 idx = atomic_read(&dss_idx.irq_log_idx[cpu]) & (ARRAY_SIZE(dss_log->irq[0]) - 1);
719 dbg_snapshot_get_sec(dss_log->irq[cpu][idx].time, &sec, &msec);
720 lookup_symbol_name((unsigned long)dss_log->irq[cpu][idx].fn, fn_name);
721
722 pr_info("%-16s: [%4lu] %10lu.%06lu sec, %10s: %24s, %8s: %8d, %10s: %2d, %s\n",
723 ">>> last irq", idx, sec, msec,
724 "handler", fn_name,
725 "irq", dss_log->irq[cpu][idx].irq,
726 "en", dss_log->irq[cpu][idx].en,
727 (dss_log->irq[cpu][idx].en == 1) ? "[Missmatch]" : "");
728}
729
730static void dbg_snapshot_print_last_task(int cpu)
731{
732 unsigned long idx, sec, msec;
733 struct task_struct *task;
734
735 idx = atomic_read(&dss_idx.task_log_idx[cpu]) & (ARRAY_SIZE(dss_log->task[0]) - 1);
736 dbg_snapshot_get_sec(dss_log->task[cpu][idx].time, &sec, &msec);
737 task = dss_log->task[cpu][idx].task;
738
739 pr_info("%-16s: [%4lu] %10lu.%06lu sec, %10s: %24s, %8s: 0x%-16p, %10s: %16llu\n",
740 ">>> last task", idx, sec, msec,
741 "task_comm", (task) ? task->comm : "NULL",
742 "task", task,
743 "exec_start", (task) ? task->se.exec_start : 0);
744}
745
746static void dbg_snapshot_print_last_work(int cpu)
747{
748 unsigned long idx, sec, msec;
749 char fn_name[KSYM_NAME_LEN];
750
751 idx = atomic_read(&dss_idx.work_log_idx[cpu]) & (ARRAY_SIZE(dss_log->work[0]) - 1);
752 dbg_snapshot_get_sec(dss_log->work[cpu][idx].time, &sec, &msec);
753 lookup_symbol_name((unsigned long)dss_log->work[cpu][idx].fn, fn_name);
754
755 pr_info("%-16s: [%4lu] %10lu.%06lu sec, %10s: %24s, %8s: %20s, %3s: %3d %s\n",
756 ">>> last work", idx, sec, msec,
757 "task_name", dss_log->work[cpu][idx].task_comm,
758 "work_fn", fn_name,
759 "en", dss_log->work[cpu][idx].en,
760 (dss_log->work[cpu][idx].en == 1) ? "[Missmatch]" : "");
761}
762
763static void dbg_snapshot_print_last_cpuidle(int cpu)
764{
765 unsigned long idx, sec, msec;
766
767 idx = atomic_read(&dss_idx.cpuidle_log_idx[cpu]) & (ARRAY_SIZE(dss_log->cpuidle[0]) - 1);
768 dbg_snapshot_get_sec(dss_log->cpuidle[cpu][idx].time, &sec, &msec);
769
770 pr_info("%-16s: [%4lu] %10lu.%06lu sec, %10s: %24d, %8s: %4s, %6s: %3d, %12s: %2d, %3s: %3d %s\n",
771 ">>> last cpuidle", idx, sec, msec,
772 "stay time", dss_log->cpuidle[cpu][idx].delta,
773 "modes", dss_log->cpuidle[cpu][idx].modes,
774 "state", dss_log->cpuidle[cpu][idx].state,
775 "online_cpus", dss_log->cpuidle[cpu][idx].num_online_cpus,
776 "en", dss_log->cpuidle[cpu][idx].en,
777 (dss_log->cpuidle[cpu][idx].en == 1) ? "[Missmatch]" : "");
778}
779
780static void dbg_snapshot_print_lastinfo(void)
781{
782 int cpu;
783
784 pr_info("<last info>\n");
785 for (cpu = 0; cpu < DSS_NR_CPUS; cpu++) {
786 pr_info("CPU ID: %d -----------------------------------------------\n", cpu);
787 dbg_snapshot_print_last_task(cpu);
788 dbg_snapshot_print_last_work(cpu);
789 dbg_snapshot_print_last_irq(cpu);
790 dbg_snapshot_print_last_cpuidle(cpu);
791 }
792}
793
794#ifdef CONFIG_DEBUG_SNAPSHOT_REGULATOR
795void dbg_snapshot_regulator(unsigned long long timestamp, char* f_name, unsigned int addr, unsigned int volt, unsigned int rvolt, int en)
796{
797 struct dbg_snapshot_item *item = &dss_items[dss_desc.kevents_num];
798
799 if (unlikely(!dss_base.enabled || !item->entry.enabled))
800 return;
801 {
802 int cpu = raw_smp_processor_id();
803 unsigned long i = atomic_inc_return(&dss_idx.regulator_log_idx) &
804 (ARRAY_SIZE(dss_log->regulator) - 1);
805 int size = strlen(f_name);
806 if (size >= SZ_16)
807 size = SZ_16 - 1;
808 dss_log->regulator[i].time = cpu_clock(cpu);
809 dss_log->regulator[i].cpu = cpu;
810 dss_log->regulator[i].acpm_time = timestamp;
811 strncpy(dss_log->regulator[i].name, f_name, size);
812 dss_log->regulator[i].reg = addr;
813 dss_log->regulator[i].en = en;
814 dss_log->regulator[i].voltage = volt;
815 dss_log->regulator[i].raw_volt = rvolt;
816 }
817}
818#endif
819
820#ifdef CONFIG_DEBUG_SNAPSHOT_THERMAL
821void dbg_snapshot_thermal(void *data, unsigned int temp, char *name, unsigned int max_cooling)
822{
823 struct dbg_snapshot_item *item = &dss_items[dss_desc.kevents_num];
824
825 if (unlikely(!dss_base.enabled || !item->entry.enabled))
826 return;
827 {
828 int cpu = raw_smp_processor_id();
829 unsigned long i = atomic_inc_return(&dss_idx.thermal_log_idx) &
830 (ARRAY_SIZE(dss_log->thermal) - 1);
831
832 dss_log->thermal[i].time = cpu_clock(cpu);
833 dss_log->thermal[i].cpu = cpu;
834 dss_log->thermal[i].data = (struct exynos_tmu_platform_data *)data;
835 dss_log->thermal[i].temp = temp;
836 dss_log->thermal[i].cooling_device = name;
837 dss_log->thermal[i].cooling_state = max_cooling;
838 }
839}
840#endif
841
ee5a4833 842void dbg_snapshot_irq(int irq, void *fn, void *val, unsigned long long start_time, int en)
dd101ca5
DC
843{
844 struct dbg_snapshot_item *item = &dss_items[dss_desc.kevents_num];
845 unsigned long flags;
846
847 if (unlikely(!dss_base.enabled || !item->entry.enabled))
848 return;
849
850 flags = pure_arch_local_irq_save();
851 {
852 int cpu = raw_smp_processor_id();
ee5a4833 853 unsigned long long time, latency;
dd101ca5
DC
854 unsigned long i;
855
ee5a4833
HK
856 time = cpu_clock(cpu);
857
858 if (start_time == 0)
859 start_time = time;
860
861 latency = time - start_time;
dd101ca5
DC
862 i = atomic_inc_return(&dss_idx.irq_log_idx[cpu]) &
863 (ARRAY_SIZE(dss_log->irq[0]) - 1);
864
ee5a4833 865 dss_log->irq[cpu][i].time = time;
dd101ca5
DC
866 dss_log->irq[cpu][i].sp = (unsigned long) current_stack_pointer;
867 dss_log->irq[cpu][i].irq = irq;
868 dss_log->irq[cpu][i].fn = (void *)fn;
ee5a4833
HK
869 dss_log->irq[cpu][i].desc = (struct irq_desc *)val;
870 dss_log->irq[cpu][i].latency = latency;
dd101ca5
DC
871 dss_log->irq[cpu][i].en = en;
872 }
873 pure_arch_local_irq_restore(flags);
874}
875
dd101ca5
DC
876#ifdef CONFIG_DEBUG_SNAPSHOT_SPINLOCK
877void dbg_snapshot_spinlock(void *v_lock, int en)
878{
879 struct dbg_snapshot_item *item = &dss_items[dss_desc.kevents_num];
880
881 if (unlikely(!dss_base.enabled || !item->entry.enabled))
882 return;
883 {
884 int cpu = raw_smp_processor_id();
885 unsigned index = atomic_inc_return(&dss_idx.spinlock_log_idx[cpu]);
886 unsigned long j, i = index & (ARRAY_SIZE(dss_log->spinlock[0]) - 1);
887 raw_spinlock_t *lock = (raw_spinlock_t *)v_lock;
888#ifdef CONFIG_ARM_ARCH_TIMER
889 dss_log->spinlock[cpu][i].time = cpu_clock(cpu);
890#else
891 dss_log->spinlock[cpu][i].time = index;
892#endif
893 dss_log->spinlock[cpu][i].sp = (unsigned long) current_stack_pointer;
894 dss_log->spinlock[cpu][i].jiffies = jiffies_64;
895#ifdef CONFIG_DEBUG_SPINLOCK
896 dss_log->spinlock[cpu][i].lock = lock;
897 dss_log->spinlock[cpu][i].next = lock->raw_lock.next;
898 dss_log->spinlock[cpu][i].owner = lock->raw_lock.owner;
899#endif
900 dss_log->spinlock[cpu][i].en = en;
901
902 for (j = 0; j < dss_desc.callstack; j++) {
903 dss_log->spinlock[cpu][i].caller[j] =
904 (void *)((size_t)return_address(j + 1));
905 }
906 }
907}
908#endif
909
910#ifdef CONFIG_DEBUG_SNAPSHOT_IRQ_DISABLED
911void dbg_snapshot_irqs_disabled(unsigned long flags)
912{
913 struct dbg_snapshot_item *item = &dss_items[dss_desc.kevents_num];
914 int cpu = raw_smp_processor_id();
915
916 if (unlikely(!dss_base.enabled || !item->entry.enabled))
917 return;
918
919 if (unlikely(flags)) {
920 unsigned j, local_flags = pure_arch_local_irq_save();
921
922 /* If flags has one, it shows interrupt enable status */
923 atomic_set(&dss_idx.irqs_disabled_log_idx[cpu], -1);
924 dss_log->irqs_disabled[cpu][0].time = 0;
925 dss_log->irqs_disabled[cpu][0].index = 0;
926 dss_log->irqs_disabled[cpu][0].task = NULL;
927 dss_log->irqs_disabled[cpu][0].task_comm = NULL;
928
929 for (j = 0; j < dss_desc.callstack; j++) {
930 dss_log->irqs_disabled[cpu][0].caller[j] = NULL;
931 }
932
933 pure_arch_local_irq_restore(local_flags);
934 } else {
935 unsigned index = atomic_inc_return(&dss_idx.irqs_disabled_log_idx[cpu]);
936 unsigned long j, i = index % ARRAY_SIZE(dss_log->irqs_disabled[0]);
937
938 dss_log->irqs_disabled[cpu][0].time = jiffies_64;
939 dss_log->irqs_disabled[cpu][i].index = index;
940 dss_log->irqs_disabled[cpu][i].task = get_current();
941 dss_log->irqs_disabled[cpu][i].task_comm = get_current()->comm;
942
943 for (j = 0; j < dss_desc.callstack; j++) {
944 dss_log->irqs_disabled[cpu][i].caller[j] =
945 (void *)((size_t)return_address(j + 1));
946 }
947 }
948}
949#endif
950
951#ifdef CONFIG_DEBUG_SNAPSHOT_CLK
952void dbg_snapshot_clk(void *clock, const char *func_name, unsigned long arg, int mode)
953{
954 struct dbg_snapshot_item *item = &dss_items[dss_desc.kevents_num];
955
956 if (unlikely(!dss_base.enabled || !item->entry.enabled))
957 return;
958 {
959 int cpu = raw_smp_processor_id();
960 unsigned long i = atomic_inc_return(&dss_idx.clk_log_idx) &
961 (ARRAY_SIZE(dss_log->clk) - 1);
962
963 dss_log->clk[i].time = cpu_clock(cpu);
964 dss_log->clk[i].mode = mode;
965 dss_log->clk[i].arg = arg;
966 dss_log->clk[i].clk = (struct clk_hw *)clock;
967 dss_log->clk[i].f_name = func_name;
968 }
969}
970#endif
971
972#ifdef CONFIG_DEBUG_SNAPSHOT_PMU
973void dbg_snapshot_pmu(int id, const char *func_name, int mode)
974{
975 struct dbg_snapshot_item *item = &dss_items[dss_desc.kevents_num];
976
977 if (unlikely(!dss_base.enabled || !item->entry.enabled))
978 return;
979 {
980 int cpu = raw_smp_processor_id();
981 unsigned long i = atomic_inc_return(&dss_idx.pmu_log_idx) &
982 (ARRAY_SIZE(dss_log->pmu) - 1);
983
984 dss_log->pmu[i].time = cpu_clock(cpu);
985 dss_log->pmu[i].mode = mode;
986 dss_log->pmu[i].id = id;
987 dss_log->pmu[i].f_name = func_name;
988 }
989}
990#endif
991
69152827
HK
992static struct notifier_block **dss_should_check_nl[] = {
993 (struct notifier_block **)(&panic_notifier_list.head),
994 (struct notifier_block **)(&reboot_notifier_list.head),
995 (struct notifier_block **)(&restart_handler_list.head),
05db6ce8
HK
996#ifdef CONFIG_PM_SLEEP
997 (struct notifier_block **)(&pm_chain_head.head),
998#endif
69152827
HK
999#ifdef CONFIG_EXYNOS_ITMON
1000 (struct notifier_block **)(&itmon_notifier_list.head),
1001#endif
1002};
1003
1004void dbg_snapshot_print_notifier_call(void **nl, unsigned long func, int en)
1005{
1006 struct notifier_block **nl_org = (struct notifier_block **)nl;
1007 char notifier_name[KSYM_NAME_LEN];
1008 char notifier_func_name[KSYM_NAME_LEN];
1009 int i;
1010
1011 for (i = 0; i < ARRAY_SIZE(dss_should_check_nl); i++) {
1012 if (nl_org == dss_should_check_nl[i]) {
1013 lookup_symbol_name((unsigned long)nl_org, notifier_name);
1014 lookup_symbol_name((unsigned long)func, notifier_func_name);
1015
1016 pr_info("debug-snapshot: %s -> %s call %s\n",
1017 notifier_name,
1018 notifier_func_name,
1019 en == DSS_FLAG_IN ? "+" : "-");
1020 break;
1021 }
1022 }
1023}
1024
dd101ca5
DC
1025#ifdef CONFIG_DEBUG_SNAPSHOT_FREQ
1026static void dbg_snapshot_print_freqinfo(void)
1027{
1028 unsigned long idx, sec, msec;
1029 char *freq_name;
1030 unsigned int i;
1031 unsigned long old_freq, target_freq;
1032
1033 pr_info("\n<freq info>\n");
1034
1035 for (i = 0; i < DSS_FLAG_END; i++) {
1036 idx = atomic_read(&dss_lastinfo.freq_last_idx[i]) & (ARRAY_SIZE(dss_log->freq) - 1);
1037 freq_name = dss_log->freq[idx].freq_name;
1038 if ((!freq_name) || strncmp(freq_name, dss_freq_name[i], strlen(dss_freq_name[i]))) {
1039 pr_info("%10s: no infomation\n", dss_freq_name[i]);
1040 continue;
1041 }
1042
1043 dbg_snapshot_get_sec(dss_log->freq[idx].time, &sec, &msec);
1044 old_freq = dss_log->freq[idx].old_freq;
1045 target_freq = dss_log->freq[idx].target_freq;
1046 pr_info("%10s: [%4lu] %10lu.%06lu sec, %12s: %6luMhz, %12s: %6luMhz, %3s: %3d %s\n",
1047 freq_name, idx, sec, msec,
1048 "old_freq", old_freq/1000,
1049 "target_freq", target_freq/1000,
1050 "en", dss_log->freq[idx].en,
1051 (dss_log->freq[idx].en == 1) ? "[Missmatch]" : "");
1052 }
1053}
1054
1055void dbg_snapshot_freq(int type, unsigned long old_freq, unsigned long target_freq, int en)
1056{
1057 struct dbg_snapshot_item *item = &dss_items[dss_desc.kevents_num];
1058
1059 if (unlikely(!dss_base.enabled || !item->entry.enabled))
1060 return;
1061 {
1062 int cpu = raw_smp_processor_id();
1063 unsigned long i = atomic_inc_return(&dss_idx.freq_log_idx) &
1064 (ARRAY_SIZE(dss_log->freq) - 1);
1065
1066 if (atomic_read(&dss_idx.freq_log_idx) > atomic_read(&dss_lastinfo.freq_last_idx[type]))
1067 atomic_set(&dss_lastinfo.freq_last_idx[type], atomic_read(&dss_idx.freq_log_idx));
1068
1069 dss_log->freq[i].time = cpu_clock(cpu);
1070 dss_log->freq[i].cpu = cpu;
1071 dss_log->freq[i].freq_name = dss_freq_name[type];
bf2bb2c5 1072 dss_log->freq[i].type = type;
dd101ca5
DC
1073 dss_log->freq[i].old_freq = old_freq;
1074 dss_log->freq[i].target_freq = target_freq;
1075 dss_log->freq[i].en = en;
1076 }
1077}
1078#endif
1079
1080#ifndef arch_irq_stat
1081#define arch_irq_stat() 0
1082#endif
1083
1084static void dbg_snapshot_print_irq(void)
1085{
1086 int i, j;
1087 u64 sum = 0;
1088
1089 for_each_possible_cpu(i) {
1090 sum += kstat_cpu_irqs_sum(i);
1091 sum += arch_irq_stat_cpu(i);
1092 }
1093 sum += arch_irq_stat();
1094
1095 pr_info("\n<irq info>\n");
1096 pr_info("------------------------------------------------------------------\n");
1097 pr_info("\n");
1098 pr_info("sum irq : %llu", (unsigned long long)sum);
1099 pr_info("------------------------------------------------------------------\n");
1100
1101 for_each_irq_nr(j) {
1102 unsigned int irq_stat = kstat_irqs(j);
1103
1104 if (irq_stat) {
1105 struct irq_desc *desc = irq_to_desc(j);
1106 const char *name;
1107
1108 name = desc->action ? (desc->action->name ? desc->action->name : "???") : "???";
1109 pr_info("irq-%-4d : %8u %s\n", j, irq_stat, name);
1110 }
1111 }
1112}
1113
1114void dbg_snapshot_print_panic_report(void)
1115{
1116 pr_info("============================================================\n");
1117 pr_info("Panic Report\n");
1118 pr_info("============================================================\n");
1119 dbg_snapshot_print_lastinfo();
1120#ifdef CONFIG_DEBUG_SNAPSHOT_FREQ
1121 dbg_snapshot_print_freqinfo();
1122#endif
1123 dbg_snapshot_print_calltrace();
1124 dbg_snapshot_print_irq();
1125 pr_info("============================================================\n");
1126}
1127
1128#ifdef CONFIG_DEBUG_SNAPSHOT_DM
1129void dbg_snapshot_dm(int type, unsigned long min, unsigned long max, s32 wait_t, s32 t)
1130{
1131 struct dbg_snapshot_item *item = &dss_items[dss_desc.kevents_num];
1132
1133 if (unlikely(!dss_base.enabled || !item->entry.enabled))
1134 return;
1135 {
1136 int cpu = raw_smp_processor_id();
1137 unsigned long i = atomic_inc_return(&dss_idx.dm_log_idx) &
1138 (ARRAY_SIZE(dss_log->dm) - 1);
1139
1140 dss_log->dm[i].time = cpu_clock(cpu);
1141 dss_log->dm[i].cpu = cpu;
1142 dss_log->dm[i].dm_num = type;
1143 dss_log->dm[i].min_freq = min;
1144 dss_log->dm[i].max_freq = max;
1145 dss_log->dm[i].wait_dmt = wait_t;
1146 dss_log->dm[i].do_dmt = t;
1147 }
1148}
1149#endif
1150
1151#ifdef CONFIG_DEBUG_SNAPSHOT_HRTIMER
1152void dbg_snapshot_hrtimer(void *timer, s64 *now, void *fn, int en)
1153{
1154 struct dbg_snapshot_item *item = &dss_items[dss_desc.kevents_num];
1155
1156 if (unlikely(!dss_base.enabled || !item->entry.enabled))
1157 return;
1158 {
1159 int cpu = raw_smp_processor_id();
1160 unsigned long i = atomic_inc_return(&dss_idx.hrtimer_log_idx[cpu]) &
1161 (ARRAY_SIZE(dss_log->hrtimers[0]) - 1);
1162
1163 dss_log->hrtimers[cpu][i].time = cpu_clock(cpu);
1164 dss_log->hrtimers[cpu][i].now = *now;
1165 dss_log->hrtimers[cpu][i].timer = (struct hrtimer *)timer;
1166 dss_log->hrtimers[cpu][i].fn = fn;
1167 dss_log->hrtimers[cpu][i].en = en;
1168 }
1169}
1170#endif
1171
1172#ifdef CONFIG_DEBUG_SNAPSHOT_I2C
1173void dbg_snapshot_i2c(struct i2c_adapter *adap, struct i2c_msg *msgs, int num, int en)
1174{
1175 struct dbg_snapshot_item *item = &dss_items[dss_desc.kevents_num];
1176
1177 if (unlikely(!dss_base.enabled || !item->entry.enabled))
1178 return;
1179 {
1180 int cpu = raw_smp_processor_id();
1181 unsigned long i = atomic_inc_return(&dss_idx.i2c_log_idx) &
1182 (ARRAY_SIZE(dss_log->i2c) - 1);
1183
1184 dss_log->i2c[i].time = cpu_clock(cpu);
1185 dss_log->i2c[i].cpu = cpu;
1186 dss_log->i2c[i].adap = adap;
1187 dss_log->i2c[i].msgs = msgs;
1188 dss_log->i2c[i].num = num;
1189 dss_log->i2c[i].en = en;
1190 }
1191}
1192#endif
1193
1194#ifdef CONFIG_DEBUG_SNAPSHOT_SPI
1195void dbg_snapshot_spi(struct spi_controller *ctlr, struct spi_message *cur_msg, int en)
1196{
1197 struct dbg_snapshot_item *item = &dss_items[dss_desc.kevents_num];
1198
1199 if (unlikely(!dss_base.enabled || !item->entry.enabled))
1200 return;
1201 {
1202 int cpu = raw_smp_processor_id();
1203 unsigned long i = atomic_inc_return(&dss_idx.spi_log_idx) &
1204 (ARRAY_SIZE(dss_log->spi) - 1);
1205
1206 dss_log->spi[i].time = cpu_clock(cpu);
1207 dss_log->spi[i].cpu = cpu;
1208 dss_log->spi[i].ctlr = ctlr;
1209 dss_log->spi[i].cur_msg = cur_msg;
1210 dss_log->spi[i].en = en;
1211 }
1212}
1213#endif
1214
ae09f559
YK
1215#ifdef CONFIG_DEBUG_SNAPSHOT_BINDER
1216void dbg_snapshot_binder(struct trace_binder_transaction_base *base,
1217 struct trace_binder_transaction *transaction,
1218 struct trace_binder_transaction_error *error)
1219{
1220 struct dbg_snapshot_item *item = &dss_items[dss_desc.kevents_num];
1221 int cpu;
1222 unsigned long i;
1223
1224 if (unlikely(!dss_base.enabled || !item->entry.enabled))
1225 return;
1226 if (base == NULL)
1227 return;
1228
1229 cpu = raw_smp_processor_id();
1230 i = atomic_inc_return(&dss_idx.binder_log_idx) &
1231 (ARRAY_SIZE(dss_log->binder) - 1);
1232
1233 dss_log->binder[i].time = cpu_clock(cpu);
1234 dss_log->binder[i].cpu = cpu;
1235 dss_log->binder[i].base = *base;
1236
1237 if (transaction) {
1238 dss_log->binder[i].transaction = *transaction;
1239 } else {
1240 dss_log->binder[i].transaction.to_node_id = 0;
1241 dss_log->binder[i].transaction.reply = 0;
1242 dss_log->binder[i].transaction.flags = 0;
1243 dss_log->binder[i].transaction.code = 0;
1244 }
1245 if (error) {
1246 dss_log->binder[i].error = *error;
1247 } else {
1248 dss_log->binder[i].error.return_error = 0;
1249 dss_log->binder[i].error.return_error_param = 0;
1250 dss_log->binder[i].error.return_error_line = 0;
1251 }
1252}
1253#endif
1254
dd101ca5
DC
1255#ifdef CONFIG_DEBUG_SNAPSHOT_ACPM
1256void dbg_snapshot_acpm(unsigned long long timestamp, const char *log, unsigned int data)
1257{
1258 struct dbg_snapshot_item *item = &dss_items[dss_desc.kevents_num];
1259
1260 if (unlikely(!dss_base.enabled || !item->entry.enabled))
1261 return;
1262 {
1263 int cpu = raw_smp_processor_id();
1264 unsigned long i = atomic_inc_return(&dss_idx.acpm_log_idx) &
1265 (ARRAY_SIZE(dss_log->acpm) - 1);
1266 int len = strlen(log);
1267
103360ff
YK
1268 if (len >= 8)
1269 len = 8;
dd101ca5
DC
1270
1271 dss_log->acpm[i].time = cpu_clock(cpu);
1272 dss_log->acpm[i].acpm_time = timestamp;
1273 strncpy(dss_log->acpm[i].log, log, len);
103360ff 1274 dss_log->acpm[i].log[len] = '\0';
dd101ca5
DC
1275 dss_log->acpm[i].data = data;
1276 }
1277}
1278#endif
1279
1280#ifdef CONFIG_DEBUG_SNAPSHOT_REG
1281static phys_addr_t virt_to_phys_high(size_t vaddr)
1282{
1283 phys_addr_t paddr = 0;
1284 pgd_t *pgd;
1285 pmd_t *pmd;
1286 pte_t *pte;
1287
1288 if (virt_addr_valid((void *) vaddr)) {
1289 paddr = virt_to_phys((void *) vaddr);
1290 goto out;
1291 }
1292
1293 pgd = pgd_offset_k(vaddr);
1294 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
1295 goto out;
1296
1297 if (pgd_val(*pgd) & 2) {
1298 paddr = pgd_val(*pgd) & SECTION_MASK;
1299 goto out;
1300 }
1301
1302 pmd = pmd_offset((pud_t *)pgd, vaddr);
1303 if (pmd_none_or_clear_bad(pmd))
1304 goto out;
1305
1306 pte = pte_offset_kernel(pmd, vaddr);
1307 if (pte_none(*pte))
1308 goto out;
1309
1310 paddr = pte_val(*pte) & PAGE_MASK;
1311
1312out:
1313 return paddr | (vaddr & UL(SZ_4K - 1));
1314}
1315
1316void dbg_snapshot_reg(unsigned int read, size_t val, size_t reg, int en)
1317{
1318 struct dbg_snapshot_item *item = &dss_items[dss_desc.kevents_num];
1319 int cpu = raw_smp_processor_id();
1320 unsigned long i, j;
1321 size_t phys_reg, start_addr, end_addr;
1322
1323 if (unlikely(!dss_base.enabled || !item->entry.enabled))
1324 return;
1325
1326 if (dss_reg_exlist[0].addr == 0)
1327 return;
1328
1329 phys_reg = virt_to_phys_high(reg);
1330 if (unlikely(!phys_reg))
1331 return;
1332
1333 for (j = 0; j < ARRAY_SIZE(dss_reg_exlist); j++) {
1334 if (dss_reg_exlist[j].addr == 0)
1335 break;
1336 start_addr = dss_reg_exlist[j].addr;
1337 end_addr = start_addr + dss_reg_exlist[j].size;
1338 if (start_addr <= phys_reg && phys_reg <= end_addr)
1339 return;
1340 }
1341
1342 i = atomic_inc_return(&dss_idx.reg_log_idx[cpu]) &
1343 (ARRAY_SIZE(dss_log->reg[0]) - 1);
1344
1345 dss_log->reg[cpu][i].time = cpu_clock(cpu);
1346 dss_log->reg[cpu][i].read = read;
1347 dss_log->reg[cpu][i].val = val;
1348 dss_log->reg[cpu][i].reg = phys_reg;
1349 dss_log->reg[cpu][i].en = en;
1350
1351 for (j = 0; j < dss_desc.callstack; j++) {
1352 dss_log->reg[cpu][i].caller[j] =
1353 (void *)((size_t)return_address(j + 1));
1354 }
1355}
1356#endif
1357
1358#ifndef CONFIG_DEBUG_SNAPSHOT_MINIMIZED_MODE
dd101ca5
DC
1359void dbg_snapshot_printk(const char *fmt, ...)
1360{
1361 struct dbg_snapshot_item *item = &dss_items[dss_desc.kevents_num];
1362
1363 if (unlikely(!dss_base.enabled || !item->entry.enabled))
1364 return;
1365 {
1366 int cpu = raw_smp_processor_id();
1367 va_list args;
1368 int ret;
1369 unsigned long j, i = atomic_inc_return(&dss_idx.printk_log_idx) &
1370 (ARRAY_SIZE(dss_log->printk) - 1);
1371
1372 va_start(args, fmt);
1373 ret = vsnprintf(dss_log->printk[i].log,
1374 sizeof(dss_log->printk[i].log), fmt, args);
1375 va_end(args);
1376
1377 dss_log->printk[i].time = cpu_clock(cpu);
1378 dss_log->printk[i].cpu = cpu;
1379
1380 for (j = 0; j < dss_desc.callstack; j++) {
1381 dss_log->printk[i].caller[j] =
1382 (void *)((size_t)return_address(j));
1383 }
1384 }
1385}
1386
1387void dbg_snapshot_printkl(size_t msg, size_t val)
1388{
1389 struct dbg_snapshot_item *item = &dss_items[dss_desc.kevents_num];
1390
1391 if (unlikely(!dss_base.enabled || !item->entry.enabled))
1392 return;
1393 {
1394 int cpu = raw_smp_processor_id();
1395 unsigned long j, i = atomic_inc_return(&dss_idx.printkl_log_idx) &
1396 (ARRAY_SIZE(dss_log->printkl) - 1);
1397
1398 dss_log->printkl[i].time = cpu_clock(cpu);
1399 dss_log->printkl[i].cpu = cpu;
1400 dss_log->printkl[i].msg = msg;
1401 dss_log->printkl[i].val = val;
1402
1403 for (j = 0; j < dss_desc.callstack; j++) {
1404 dss_log->printkl[i].caller[j] =
1405 (void *)((size_t)return_address(j));
1406 }
1407 }
1408}
1409#endif