[ERD][NEUS7920-76] [COMMON] lib: dss: support to output notifier call functions
[GitHub/LineageOS/android_kernel_motorola_exynos9610.git] / lib / debug-snapshot-log.c
CommitLineData
dd101ca5
DC
1/*
2 * Copyright (c) 2013 Samsung Electronics Co., Ltd.
3 * http://www.samsung.com
4 *
5 * Debug-SnapShot: Debug Framework for Ramdump based debugging method
6 * The original code is Exynos-Snapshot for Exynos SoC
7 *
8 * Author: Hosung Kim <hosung0.kim@samsung.com>
9 * Author: Changki Kim <changki.kim@samsung.com>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 */
15
16#include <linux/kernel.h>
17#include <linux/slab.h>
18#include <linux/module.h>
19#include <linux/ktime.h>
20#include <linux/kallsyms.h>
21#include <linux/platform_device.h>
22#include <linux/clk-provider.h>
23#include <linux/pstore_ram.h>
24#include <linux/sched/clock.h>
25#include <linux/ftrace.h>
26
27#include "debug-snapshot-local.h"
28#include <asm/irq.h>
29#include <asm/traps.h>
30#include <asm/hardirq.h>
31#include <asm/stacktrace.h>
32#include <linux/debug-snapshot.h>
33#include <linux/kernel_stat.h>
34#include <linux/irqnr.h>
35#include <linux/irq.h>
36#include <linux/irqdesc.h>
37
38struct dbg_snapshot_lastinfo {
39#ifdef CONFIG_DEBUG_SNAPSHOT_FREQ
40 atomic_t freq_last_idx[DSS_FLAG_END];
41#endif
42 char log[DSS_NR_CPUS][SZ_1K];
43 char *last_p[DSS_NR_CPUS];
44};
45
46struct dss_dumper {
47 bool active;
48 u32 items;
49 int init_idx;
50 int cur_idx;
51 u32 cur_cpu;
52 u32 step;
53};
54
55enum dss_kevent_flag {
56 DSS_FLAG_TASK = 1,
57 DSS_FLAG_WORK,
58 DSS_FLAG_CPUIDLE,
59 DSS_FLAG_SUSPEND,
60 DSS_FLAG_IRQ,
61 DSS_FLAG_IRQ_EXIT,
62 DSS_FLAG_SPINLOCK,
63 DSS_FLAG_IRQ_DISABLE,
64 DSS_FLAG_CLK,
65 DSS_FLAG_FREQ,
66 DSS_FLAG_REG,
67 DSS_FLAG_HRTIMER,
68 DSS_FLAG_REGULATOR,
69 DSS_FLAG_THERMAL,
70 DSS_FLAG_MAILBOX,
71 DSS_FLAG_CLOCKEVENT,
72 DSS_FLAG_PRINTK,
73 DSS_FLAG_PRINTKL,
74 DSS_FLAG_KEVENT,
75};
76
77struct dbg_snapshot_log_idx {
78 atomic_t task_log_idx[DSS_NR_CPUS];
79 atomic_t work_log_idx[DSS_NR_CPUS];
80 atomic_t cpuidle_log_idx[DSS_NR_CPUS];
81 atomic_t suspend_log_idx;
82 atomic_t irq_log_idx[DSS_NR_CPUS];
83#ifdef CONFIG_DEBUG_SNAPSHOT_SPINLOCK
84 atomic_t spinlock_log_idx[DSS_NR_CPUS];
85#endif
86#ifdef CONFIG_DEBUG_SNAPSHOT_IRQ_DISABLED
87 atomic_t irqs_disabled_log_idx[DSS_NR_CPUS];
88#endif
89#ifdef CONFIG_DEBUG_SNAPSHOT_IRQ_EXIT
90 atomic_t irq_exit_log_idx[DSS_NR_CPUS];
91#endif
92#ifdef CONFIG_DEBUG_SNAPSHOT_REG
93 atomic_t reg_log_idx[DSS_NR_CPUS];
94#endif
95#ifdef CONFIG_DEBUG_SNAPSHOT_HRTIMER
96 atomic_t hrtimer_log_idx[DSS_NR_CPUS];
97#endif
98#ifdef CONFIG_DEBUG_SNAPSHOT_CLK
99 atomic_t clk_log_idx;
100#endif
101#ifdef CONFIG_DEBUG_SNAPSHOT_PMU
102 atomic_t pmu_log_idx;
103#endif
104#ifdef CONFIG_DEBUG_SNAPSHOT_FREQ
105 atomic_t freq_log_idx;
106#endif
107#ifdef CONFIG_DEBUG_SNAPSHOT_DM
108 atomic_t dm_log_idx;
109#endif
110#ifdef CONFIG_DEBUG_SNAPSHOT_REGULATOR
111 atomic_t regulator_log_idx;
112#endif
113#ifdef CONFIG_DEBUG_SNAPSHOT_REGULATOR
114 atomic_t thermal_log_idx;
115#endif
116#ifdef CONFIG_DEBUG_SNAPSHOT_I2C
117 atomic_t i2c_log_idx;
118#endif
119#ifdef CONFIG_DEBUG_SNAPSHOT_SPI
120 atomic_t spi_log_idx;
121#endif
ae09f559
YK
122#ifdef CONFIG_DEBUG_SNAPSHOT_BINDER
123 atomic_t binder_log_idx;
124#endif
dd101ca5
DC
125#ifndef CONFIG_DEBUG_SNAPSHOT_MINIMIZED_MODE
126 atomic_t clockevent_log_idx[DSS_NR_CPUS];
127 atomic_t printkl_log_idx;
128 atomic_t printk_log_idx;
129#endif
130#ifdef CONFIG_DEBUG_SNAPSHOT_ACPM
131 atomic_t acpm_log_idx;
132#endif
133};
134
135int dbg_snapshot_log_size = sizeof(struct dbg_snapshot_log);
136/*
137 * including or excluding options
138 * if you want to except some interrupt, it should be written in this array
139 */
140int dss_irqlog_exlist[DSS_EX_MAX_NUM] = {
141/* interrupt number ex) 152, 153, 154, */
142 -1,
143};
144
145#ifdef CONFIG_DEBUG_SNAPSHOT_IRQ_EXIT
146int dss_irqexit_exlist[DSS_EX_MAX_NUM] = {
147/* interrupt number ex) 152, 153, 154, */
148 -1,
149};
150
151unsigned int dss_irqexit_threshold =
152 CONFIG_DEBUG_SNAPSHOT_IRQ_EXIT_THRESHOLD;
153#endif
154
155#ifdef CONFIG_DEBUG_SNAPSHOT_REG
156struct dss_reg_list {
157 size_t addr;
158 size_t size;
159};
160
161static struct dss_reg_list dss_reg_exlist[] = {
162/*
163 * if it wants to reduce effect enabled reg feautre to system,
164 * you must add these registers - mct, serial
165 * because they are called very often.
166 * physical address, size ex) {0x10C00000, 0x1000},
167 */
168 {DSS_REG_MCT_ADDR, DSS_REG_MCT_SIZE},
169 {DSS_REG_UART_ADDR, DSS_REG_UART_SIZE},
170 {0, 0},
171 {0, 0},
172 {0, 0},
173 {0, 0},
174 {0, 0},
175 {0, 0},
176 {0, 0},
177};
178#endif
179
180#ifdef CONFIG_DEBUG_SNAPSHOT_FREQ
181static char *dss_freq_name[] = {
7ce10966 182 "LITTLE", "BIG", "INT", "MIF", "ISP", "DISP", "INTCAM", "AUD", "IVA", "SCORE", "FSYS0",
dd101ca5
DC
183};
184#endif
185
186/* Internal interface variable */
187static struct dbg_snapshot_log_idx dss_idx;
188static struct dbg_snapshot_lastinfo dss_lastinfo;
189
1abf4739 190void __init dbg_snapshot_init_log_idx(void)
dd101ca5
DC
191{
192 int i;
193
194#ifndef CONFIG_DEBUG_SNAPSHOT_MINIMIZED_MODE
195 atomic_set(&(dss_idx.printk_log_idx), -1);
196 atomic_set(&(dss_idx.printkl_log_idx), -1);
197#endif
198#ifdef CONFIG_DEBUG_SNAPSHOT_REGULATOR
199 atomic_set(&(dss_idx.regulator_log_idx), -1);
200#endif
201#ifdef CONFIG_DEBUG_SNAPSHOT_THERMAL
202 atomic_set(&(dss_idx.thermal_log_idx), -1);
203#endif
204#ifdef CONFIG_DEBUG_SNAPSHOT_FREQ
205 atomic_set(&(dss_idx.freq_log_idx), -1);
206#endif
207#ifdef CONFIG_DEBUG_SNAPSHOT_DM
208 atomic_set(&(dss_idx.dm_log_idx), -1);
209#endif
210#ifdef CONFIG_DEBUG_SNAPSHOT_CLK
211 atomic_set(&(dss_idx.clk_log_idx), -1);
212#endif
213#ifdef CONFIG_DEBUG_SNAPSHOT_PMU
214 atomic_set(&(dss_idx.pmu_log_idx), -1);
215#endif
216#ifdef CONFIG_DEBUG_SNAPSHOT_ACPM
217 atomic_set(&(dss_idx.acpm_log_idx), -1);
218#endif
219#ifdef CONFIG_DEBUG_SNAPSHOT_I2C
220 atomic_set(&(dss_idx.i2c_log_idx), -1);
221#endif
222#ifdef CONFIG_DEBUG_SNAPSHOT_SPI
223 atomic_set(&(dss_idx.spi_log_idx), -1);
ae09f559
YK
224#endif
225#ifdef CONFIG_DEBUG_SNAPSHOT_BINDER
226 atomic_set(&(dss_idx.binder_log_idx), -1);
dd101ca5
DC
227#endif
228 atomic_set(&(dss_idx.suspend_log_idx), -1);
229
230 for (i = 0; i < DSS_NR_CPUS; i++) {
231 atomic_set(&(dss_idx.task_log_idx[i]), -1);
232 atomic_set(&(dss_idx.work_log_idx[i]), -1);
233#ifndef CONFIG_DEBUG_SNAPSHOT_MINIMIZED_MODE
234 atomic_set(&(dss_idx.clockevent_log_idx[i]), -1);
235#endif
236 atomic_set(&(dss_idx.cpuidle_log_idx[i]), -1);
237 atomic_set(&(dss_idx.irq_log_idx[i]), -1);
238#ifdef CONFIG_DEBUG_SNAPSHOT_SPINLOCK
239 atomic_set(&(dss_idx.spinlock_log_idx[i]), -1);
240#endif
241#ifdef CONFIG_DEBUG_SNAPSHOT_IRQ_DISABLED
242 atomic_set(&(dss_idx.irqs_disabled_log_idx[i]), -1);
243#endif
244#ifdef CONFIG_DEBUG_SNAPSHOT_IRQ_EXIT
245 atomic_set(&(dss_idx.irq_exit_log_idx[i]), -1);
246#endif
247#ifdef CONFIG_DEBUG_SNAPSHOT_REG
248 atomic_set(&(dss_idx.reg_log_idx[i]), -1);
249#endif
250#ifdef CONFIG_DEBUG_SNAPSHOT_HRTIMER
251 atomic_set(&(dss_idx.hrtimer_log_idx[i]), -1);
252#endif
253 }
254}
255
256bool dbg_snapshot_dumper_one(void *v_dumper, char *line, size_t size, size_t *len)
257{
258 bool ret = false;
259 int idx, array_size;
260 unsigned int cpu, items;
261 unsigned long rem_nsec;
262 u64 ts;
263 struct dss_dumper *dumper = (struct dss_dumper *)v_dumper;
264
265 if (!line || size < SZ_128 ||
266 dumper->cur_cpu >= NR_CPUS)
267 goto out;
268
269 if (dumper->active) {
270 if (dumper->init_idx == dumper->cur_idx)
271 goto out;
272 }
273
274 cpu = dumper->cur_cpu;
275 idx = dumper->cur_idx;
276 items = dumper->items;
277
278 switch(items) {
279 case DSS_FLAG_TASK:
280 {
281 struct task_struct *task;
282 array_size = ARRAY_SIZE(dss_log->task[0]) - 1;
283 if (!dumper->active) {
284 idx = (atomic_read(&dss_idx.task_log_idx[0]) + 1) & array_size;
285 dumper->init_idx = idx;
286 dumper->active = true;
287 }
288 ts = dss_log->task[cpu][idx].time;
289 rem_nsec = do_div(ts, NSEC_PER_SEC);
290 task = dss_log->task[cpu][idx].task;
291
292 *len = snprintf(line, size, "[%8lu.%09lu][%04d:CPU%u] task_name:%16s, "
293 "task:0x%16p, stack:0x%16p, exec_start:%16llu\n",
294 (unsigned long)ts, rem_nsec / NSEC_PER_USEC, idx, cpu,
295 task->comm, task, task->stack,
296 task->se.exec_start);
297 break;
298 }
299 case DSS_FLAG_WORK:
300 {
301 char work_fn[KSYM_NAME_LEN] = {0,};
302 char *task_comm;
303 int en;
304
305 array_size = ARRAY_SIZE(dss_log->work[0]) - 1;
306 if (!dumper->active) {
307 idx = (atomic_read(&dss_idx.work_log_idx[0]) + 1) & array_size;
308 dumper->init_idx = idx;
309 dumper->active = true;
310 }
311 ts = dss_log->work[cpu][idx].time;
312 rem_nsec = do_div(ts, NSEC_PER_SEC);
313 lookup_symbol_name((unsigned long)dss_log->work[cpu][idx].fn, work_fn);
314 task_comm = dss_log->work[cpu][idx].task_comm;
315 en = dss_log->work[cpu][idx].en;
316
317 dumper->step = 6;
318 *len = snprintf(line, size, "[%8lu.%09lu][%04d:CPU%u] task_name:%16s, work_fn:%32s, %3s\n",
319 (unsigned long)ts, rem_nsec / NSEC_PER_USEC, idx, cpu,
320 task_comm, work_fn,
321 en == DSS_FLAG_IN ? "IN" : "OUT");
322 break;
323 }
324 case DSS_FLAG_CPUIDLE:
325 {
326 unsigned int delta;
327 int state, num_cpus, en;
328 char *index;
329
330 array_size = ARRAY_SIZE(dss_log->cpuidle[0]) - 1;
331 if (!dumper->active) {
332 idx = (atomic_read(&dss_idx.cpuidle_log_idx[0]) + 1) & array_size;
333 dumper->init_idx = idx;
334 dumper->active = true;
335 }
336 ts = dss_log->cpuidle[cpu][idx].time;
337 rem_nsec = do_div(ts, NSEC_PER_SEC);
338
339 index = dss_log->cpuidle[cpu][idx].modes;
340 en = dss_log->cpuidle[cpu][idx].en;
341 state = dss_log->cpuidle[cpu][idx].state;
342 num_cpus = dss_log->cpuidle[cpu][idx].num_online_cpus;
343 delta = dss_log->cpuidle[cpu][idx].delta;
344
345 *len = snprintf(line, size, "[%8lu.%09lu][%04d:CPU%u] cpuidle: %s, "
346 "state:%d, num_online_cpus:%d, stay_time:%8u, %3s\n",
347 (unsigned long)ts, rem_nsec / NSEC_PER_USEC, idx, cpu,
348 index, state, num_cpus, delta,
349 en == DSS_FLAG_IN ? "IN" : "OUT");
350 break;
351 }
352 case DSS_FLAG_SUSPEND:
353 {
354 char suspend_fn[KSYM_NAME_LEN];
355 int en;
356
357 array_size = ARRAY_SIZE(dss_log->suspend) - 1;
358 if (!dumper->active) {
359 idx = (atomic_read(&dss_idx.suspend_log_idx) + 1) & array_size;
360 dumper->init_idx = idx;
361 dumper->active = true;
362 }
363 ts = dss_log->suspend[idx].time;
364 rem_nsec = do_div(ts, NSEC_PER_SEC);
365
366 lookup_symbol_name((unsigned long)dss_log->suspend[idx].fn, suspend_fn);
367 en = dss_log->suspend[idx].en;
368
369 *len = snprintf(line, size, "[%8lu.%09lu][%04d:CPU%u] suspend_fn:%s, %3s\n",
370 (unsigned long)ts, rem_nsec / NSEC_PER_USEC, idx, cpu,
371 suspend_fn, en == DSS_FLAG_IN ? "IN" : "OUT");
372 break;
373 }
374 case DSS_FLAG_IRQ:
375 {
376 char irq_fn[KSYM_NAME_LEN];
ddad4f4d 377 int en, irq;
dd101ca5
DC
378
379 array_size = ARRAY_SIZE(dss_log->irq[0]) - 1;
380 if (!dumper->active) {
381 idx = (atomic_read(&dss_idx.irq_log_idx[0]) + 1) & array_size;
382 dumper->init_idx = idx;
383 dumper->active = true;
384 }
385 ts = dss_log->irq[cpu][idx].time;
386 rem_nsec = do_div(ts, NSEC_PER_SEC);
387
388 lookup_symbol_name((unsigned long)dss_log->irq[cpu][idx].fn, irq_fn);
389 irq = dss_log->irq[cpu][idx].irq;
dd101ca5
DC
390 en = dss_log->irq[cpu][idx].en;
391
ddad4f4d 392 *len = snprintf(line, size, "[%8lu.%09lu][%04d:CPU%u] irq:%6d, irq_fn:%32s, %3s\n",
dd101ca5 393 (unsigned long)ts, rem_nsec / NSEC_PER_USEC, idx, cpu,
ddad4f4d 394 irq, irq_fn, en == DSS_FLAG_IN ? "IN" : "OUT");
dd101ca5
DC
395 break;
396 }
397#ifdef CONFIG_DEBUG_SNAPSHOT_IRQ_EXIT
398 case DSS_FLAG_IRQ_EXIT:
399 {
400 unsigned long end_time, latency;
401 int irq;
402
403 array_size = ARRAY_SIZE(dss_log->irq_exit[0]) - 1;
404 if (!dumper->active) {
405 idx = (atomic_read(&dss_idx.irq_exit_log_idx[0]) + 1) & array_size;
406 dumper->init_idx = idx;
407 dumper->active = true;
408 }
409 ts = dss_log->irq_exit[cpu][idx].time;
410 rem_nsec = do_div(ts, NSEC_PER_SEC);
411
412 end_time = dss_log->irq_exit[cpu][idx].end_time;
413 latency = dss_log->irq_exit[cpu][idx].latency;
414 irq = dss_log->irq_exit[cpu][idx].irq;
415
416 *len = snprintf(line, size, "[%8lu.%09lu][%04d:CPU%u] irq:%6d, "
417 "latency:%16zu, end_time:%16zu\n",
418 (unsigned long)ts, rem_nsec / NSEC_PER_USEC, idx, cpu,
419 irq, latency, end_time);
420 break;
421 }
422#endif
423#ifdef CONFIG_DEBUG_SNAPSHOT_SPINLOCK
424 case DSS_FLAG_SPINLOCK:
425 {
426 unsigned int jiffies_local;
427 char callstack[CONFIG_DEBUG_SNAPSHOT_CALLSTACK][KSYM_NAME_LEN];
428 int en, i;
429 u16 next, owner;
430
431 array_size = ARRAY_SIZE(dss_log->spinlock[0]) - 1;
432 if (!dumper->active) {
433 idx = (atomic_read(&dss_idx.spinlock_log_idx[0]) + 1) & array_size;
434 dumper->init_idx = idx;
435 dumper->active = true;
436 }
437 ts = dss_log->spinlock[cpu][idx].time;
438 rem_nsec = do_div(ts, NSEC_PER_SEC);
439
440 jiffies_local = dss_log->spinlock[cpu][idx].jiffies;
441 en = dss_log->spinlock[cpu][idx].en;
442 for (i = 0; i < CONFIG_DEBUG_SNAPSHOT_CALLSTACK; i++)
443 lookup_symbol_name((unsigned long)dss_log->spinlock[cpu][idx].caller[i],
444 callstack[i]);
445
446 next = dss_log->spinlock[cpu][idx].next;
447 owner = dss_log->spinlock[cpu][idx].owner;
448
449 *len = snprintf(line, size, "[%8lu.%09lu][%04d:CPU%u] next:%8x, owner:%8x jiffies:%12u, %3s\n"
450 "callstack: %s\n"
451 " %s\n"
452 " %s\n"
453 " %s\n",
454 (unsigned long)ts, rem_nsec / NSEC_PER_USEC, idx, cpu,
455 next, owner, jiffies_local,
456 en == DSS_FLAG_IN ? "IN" : "OUT",
457 callstack[0], callstack[1], callstack[2], callstack[3]);
458 break;
459 }
460#endif
461#ifdef CONFIG_DEBUG_SNAPSHOT_CLK
462 case DSS_FLAG_CLK:
463 {
464 const char *clk_name;
465 char clk_fn[KSYM_NAME_LEN];
466 struct clk_hw *clk;
467 int en;
468
469 array_size = ARRAY_SIZE(dss_log->clk) - 1;
470 if (!dumper->active) {
471 idx = (atomic_read(&dss_idx.clk_log_idx) + 1) & array_size;
472 dumper->init_idx = idx;
473 dumper->active = true;
474 }
475 ts = dss_log->clk[idx].time;
476 rem_nsec = do_div(ts, NSEC_PER_SEC);
477
478 clk = (struct clk_hw *)dss_log->clk[idx].clk;
479 clk_name = clk_hw_get_name(clk);
480 lookup_symbol_name((unsigned long)dss_log->clk[idx].f_name, clk_fn);
481 en = dss_log->clk[idx].mode;
482
483 *len = snprintf(line, size, "[%8lu.%09lu][%04d:CPU] clk_name:%30s, clk_fn:%30s, "
484 ", %s\n",
485 (unsigned long)ts, rem_nsec / NSEC_PER_USEC, idx,
486 clk_name, clk_fn, en == DSS_FLAG_IN ? "IN" : "OUT");
487 break;
488 }
489#endif
490#ifdef CONFIG_DEBUG_SNAPSHOT_FREQ
491 case DSS_FLAG_FREQ:
492 {
493 char *freq_name;
494 unsigned int on_cpu;
495 unsigned long old_freq, target_freq;
496 int en;
497
498 array_size = ARRAY_SIZE(dss_log->freq) - 1;
499 if (!dumper->active) {
500 idx = (atomic_read(&dss_idx.freq_log_idx) + 1) & array_size;
501 dumper->init_idx = idx;
502 dumper->active = true;
503 }
504 ts = dss_log->freq[idx].time;
505 rem_nsec = do_div(ts, NSEC_PER_SEC);
506
507 freq_name = dss_log->freq[idx].freq_name;
508 old_freq = dss_log->freq[idx].old_freq;
509 target_freq = dss_log->freq[idx].target_freq;
510 on_cpu = dss_log->freq[idx].cpu;
511 en = dss_log->freq[idx].en;
512
513 *len = snprintf(line, size, "[%8lu.%09lu][%04d:CPU%u] freq_name:%16s, "
514 "old_freq:%16lu, target_freq:%16lu, %3s\n",
515 (unsigned long)ts, rem_nsec / NSEC_PER_USEC, idx, on_cpu,
516 freq_name, old_freq, target_freq,
517 en == DSS_FLAG_IN ? "IN" : "OUT");
518 break;
519 }
520#endif
521#ifndef CONFIG_DEBUG_SNAPSHOT_MINIMIZED_MODE
522 case DSS_FLAG_PRINTK:
523 {
524 char *log;
525 char callstack[CONFIG_DEBUG_SNAPSHOT_CALLSTACK][KSYM_NAME_LEN];
526 unsigned int cpu;
527 int i;
528
529 array_size = ARRAY_SIZE(dss_log->printk) - 1;
530 if (!dumper->active) {
531 idx = (atomic_read(&dss_idx.printk_log_idx) + 1) & array_size;
532 dumper->init_idx = idx;
533 dumper->active = true;
534 }
535 ts = dss_log->printk[idx].time;
536 cpu = dss_log->printk[idx].cpu;
537 rem_nsec = do_div(ts, NSEC_PER_SEC);
538 log = dss_log->printk[idx].log;
539 for (i = 0; i < CONFIG_DEBUG_SNAPSHOT_CALLSTACK; i++)
540 lookup_symbol_name((unsigned long)dss_log->printk[idx].caller[i],
541 callstack[i]);
542
543 *len = snprintf(line, size, "[%8lu.%09lu][%04d:CPU%u] log:%s, callstack:%s, %s, %s, %s\n",
544 (unsigned long)ts, rem_nsec / NSEC_PER_USEC, idx, cpu,
545 log, callstack[0], callstack[1], callstack[2], callstack[3]);
546 break;
547 }
548 case DSS_FLAG_PRINTKL:
549 {
550 char callstack[CONFIG_DEBUG_SNAPSHOT_CALLSTACK][KSYM_NAME_LEN];
551 size_t msg, val;
552 unsigned int cpu;
553 int i;
554
555 array_size = ARRAY_SIZE(dss_log->printkl) - 1;
556 if (!dumper->active) {
557 idx = (atomic_read(&dss_idx.printkl_log_idx) + 1) & array_size;
558 dumper->init_idx = idx;
559 dumper->active = true;
560 }
561 ts = dss_log->printkl[idx].time;
562 cpu = dss_log->printkl[idx].cpu;
563 rem_nsec = do_div(ts, NSEC_PER_SEC);
564 msg = dss_log->printkl[idx].msg;
565 val = dss_log->printkl[idx].val;
566 for (i = 0; i < CONFIG_DEBUG_SNAPSHOT_CALLSTACK; i++)
567 lookup_symbol_name((unsigned long)dss_log->printkl[idx].caller[i],
568 callstack[i]);
569
570 *len = snprintf(line, size, "[%8lu.%09lu][%04d:CPU%u] msg:%zx, val:%zx, callstack: %s, %s, %s, %s\n",
571 (unsigned long)ts, rem_nsec / NSEC_PER_USEC, idx, cpu,
572 msg, val, callstack[0], callstack[1], callstack[2], callstack[3]);
573 break;
574 }
575#endif
576 default:
577 snprintf(line, size, "unsupported inforation to dump\n");
578 goto out;
579 }
580 if (array_size == idx)
581 dumper->cur_idx = 0;
582 else
583 dumper->cur_idx = idx + 1;
584
585 ret = true;
586out:
587 return ret;
588}
589
590#ifdef CONFIG_ARM64
591static inline unsigned long pure_arch_local_irq_save(void)
592{
593 unsigned long flags;
594
595 asm volatile(
596 "mrs %0, daif // arch_local_irq_save\n"
597 "msr daifset, #2"
598 : "=r" (flags)
599 :
600 : "memory");
601
602 return flags;
603}
604
605static inline void pure_arch_local_irq_restore(unsigned long flags)
606{
607 asm volatile(
608 "msr daif, %0 // arch_local_irq_restore"
609 :
610 : "r" (flags)
611 : "memory");
612}
613#else
614static inline unsigned long arch_local_irq_save(void)
615{
616 unsigned long flags;
617
618 asm volatile(
619 " mrs %0, cpsr @ arch_local_irq_save\n"
620 " cpsid i"
621 : "=r" (flags) : : "memory", "cc");
622 return flags;
623}
624
625static inline void arch_local_irq_restore(unsigned long flags)
626{
627 asm volatile(
628 " msr cpsr_c, %0 @ local_irq_restore"
629 :
630 : "r" (flags)
631 : "memory", "cc");
632}
633#endif
634
635void dbg_snapshot_task(int cpu, void *v_task)
636{
637 struct dbg_snapshot_item *item = &dss_items[dss_desc.kevents_num];
638
639 if (unlikely(!dss_base.enabled || !item->entry.enabled))
640 return;
641 {
642 unsigned long i = atomic_inc_return(&dss_idx.task_log_idx[cpu]) &
643 (ARRAY_SIZE(dss_log->task[0]) - 1);
644
645 dss_log->task[cpu][i].time = cpu_clock(cpu);
ddad4f4d 646 dss_log->task[cpu][i].sp = (unsigned long)current_stack_pointer;
dd101ca5 647 dss_log->task[cpu][i].task = (struct task_struct *)v_task;
bf2bb2c5 648 dss_log->task[cpu][i].pid = (int)((struct task_struct *)v_task)->pid;
dd101ca5
DC
649 strncpy(dss_log->task[cpu][i].task_comm,
650 dss_log->task[cpu][i].task->comm,
651 TASK_COMM_LEN - 1);
652 }
653}
654
655void dbg_snapshot_work(void *worker, void *v_task, void *fn, int en)
656{
657 struct dbg_snapshot_item *item = &dss_items[dss_desc.kevents_num];
658
659 if (unlikely(!dss_base.enabled || !item->entry.enabled))
660 return;
661
662 {
663 int cpu = raw_smp_processor_id();
664 unsigned long i = atomic_inc_return(&dss_idx.work_log_idx[cpu]) &
665 (ARRAY_SIZE(dss_log->work[0]) - 1);
666 struct task_struct *task = (struct task_struct *)v_task;
667 dss_log->work[cpu][i].time = cpu_clock(cpu);
668 dss_log->work[cpu][i].sp = (unsigned long) current_stack_pointer;
669 dss_log->work[cpu][i].worker = (struct worker *)worker;
670 strncpy(dss_log->work[cpu][i].task_comm, task->comm, TASK_COMM_LEN - 1);
671 dss_log->work[cpu][i].fn = (work_func_t)fn;
672 dss_log->work[cpu][i].en = en;
673 }
674}
675
676void dbg_snapshot_cpuidle(char *modes, unsigned state, int diff, int en)
677{
678 struct dbg_snapshot_item *item = &dss_items[dss_desc.kevents_num];
679
680 if (unlikely(!dss_base.enabled || !item->entry.enabled))
681 return;
682 {
683 int cpu = raw_smp_processor_id();
684 unsigned long i = atomic_inc_return(&dss_idx.cpuidle_log_idx[cpu]) &
685 (ARRAY_SIZE(dss_log->cpuidle[0]) - 1);
686
687 dss_log->cpuidle[cpu][i].time = cpu_clock(cpu);
688 dss_log->cpuidle[cpu][i].modes = modes;
689 dss_log->cpuidle[cpu][i].state = state;
690 dss_log->cpuidle[cpu][i].sp = (unsigned long) current_stack_pointer;
691 dss_log->cpuidle[cpu][i].num_online_cpus = num_online_cpus();
692 dss_log->cpuidle[cpu][i].delta = diff;
693 dss_log->cpuidle[cpu][i].en = en;
694 }
695}
696
697void dbg_snapshot_suspend(void *fn, void *dev, int en)
698{
699 struct dbg_snapshot_item *item = &dss_items[dss_desc.kevents_num];
700
701 if (unlikely(!dss_base.enabled || !item->entry.enabled))
702 return;
703 {
704 int cpu = raw_smp_processor_id();
705 unsigned long i = atomic_inc_return(&dss_idx.suspend_log_idx) &
706 (ARRAY_SIZE(dss_log->suspend) - 1);
707
708 dss_log->suspend[i].time = cpu_clock(cpu);
709 dss_log->suspend[i].sp = (unsigned long) current_stack_pointer;
710 dss_log->suspend[i].fn = fn;
711 dss_log->suspend[i].dev = (struct device *)dev;
712 dss_log->suspend[i].core = cpu;
713 dss_log->suspend[i].en = en;
714 }
715}
716
717static void dbg_snapshot_print_calltrace(void)
718{
719 int i;
720
721 pr_info("\n<Call trace>\n");
722 for (i = 0; i < DSS_NR_CPUS; i++) {
723 pr_info("CPU ID: %d -----------------------------------------------\n", i);
724 pr_info("%s", dss_lastinfo.log[i]);
725 }
726}
727
728void dbg_snapshot_save_log(int cpu, unsigned long where)
729{
730 if (dss_lastinfo.last_p[cpu] == NULL)
731 dss_lastinfo.last_p[cpu] = &dss_lastinfo.log[cpu][0];
732
733 if (dss_lastinfo.last_p[cpu] > &dss_lastinfo.log[cpu][SZ_1K - SZ_128])
734 return;
735
736 *(unsigned long *)&(dss_lastinfo.last_p[cpu]) += sprintf(dss_lastinfo.last_p[cpu],
737 "[<%p>] %pS\n", (void *)where, (void *)where);
738
739}
740
741static void dbg_snapshot_get_sec(unsigned long long ts, unsigned long *sec, unsigned long *msec)
742{
743 *sec = ts / NSEC_PER_SEC;
744 *msec = (ts % NSEC_PER_SEC) / USEC_PER_MSEC;
745}
746
747static void dbg_snapshot_print_last_irq(int cpu)
748{
749 unsigned long idx, sec, msec;
750 char fn_name[KSYM_NAME_LEN];
751
752 idx = atomic_read(&dss_idx.irq_log_idx[cpu]) & (ARRAY_SIZE(dss_log->irq[0]) - 1);
753 dbg_snapshot_get_sec(dss_log->irq[cpu][idx].time, &sec, &msec);
754 lookup_symbol_name((unsigned long)dss_log->irq[cpu][idx].fn, fn_name);
755
756 pr_info("%-16s: [%4lu] %10lu.%06lu sec, %10s: %24s, %8s: %8d, %10s: %2d, %s\n",
757 ">>> last irq", idx, sec, msec,
758 "handler", fn_name,
759 "irq", dss_log->irq[cpu][idx].irq,
760 "en", dss_log->irq[cpu][idx].en,
761 (dss_log->irq[cpu][idx].en == 1) ? "[Missmatch]" : "");
762}
763
764static void dbg_snapshot_print_last_task(int cpu)
765{
766 unsigned long idx, sec, msec;
767 struct task_struct *task;
768
769 idx = atomic_read(&dss_idx.task_log_idx[cpu]) & (ARRAY_SIZE(dss_log->task[0]) - 1);
770 dbg_snapshot_get_sec(dss_log->task[cpu][idx].time, &sec, &msec);
771 task = dss_log->task[cpu][idx].task;
772
773 pr_info("%-16s: [%4lu] %10lu.%06lu sec, %10s: %24s, %8s: 0x%-16p, %10s: %16llu\n",
774 ">>> last task", idx, sec, msec,
775 "task_comm", (task) ? task->comm : "NULL",
776 "task", task,
777 "exec_start", (task) ? task->se.exec_start : 0);
778}
779
780static void dbg_snapshot_print_last_work(int cpu)
781{
782 unsigned long idx, sec, msec;
783 char fn_name[KSYM_NAME_LEN];
784
785 idx = atomic_read(&dss_idx.work_log_idx[cpu]) & (ARRAY_SIZE(dss_log->work[0]) - 1);
786 dbg_snapshot_get_sec(dss_log->work[cpu][idx].time, &sec, &msec);
787 lookup_symbol_name((unsigned long)dss_log->work[cpu][idx].fn, fn_name);
788
789 pr_info("%-16s: [%4lu] %10lu.%06lu sec, %10s: %24s, %8s: %20s, %3s: %3d %s\n",
790 ">>> last work", idx, sec, msec,
791 "task_name", dss_log->work[cpu][idx].task_comm,
792 "work_fn", fn_name,
793 "en", dss_log->work[cpu][idx].en,
794 (dss_log->work[cpu][idx].en == 1) ? "[Missmatch]" : "");
795}
796
797static void dbg_snapshot_print_last_cpuidle(int cpu)
798{
799 unsigned long idx, sec, msec;
800
801 idx = atomic_read(&dss_idx.cpuidle_log_idx[cpu]) & (ARRAY_SIZE(dss_log->cpuidle[0]) - 1);
802 dbg_snapshot_get_sec(dss_log->cpuidle[cpu][idx].time, &sec, &msec);
803
804 pr_info("%-16s: [%4lu] %10lu.%06lu sec, %10s: %24d, %8s: %4s, %6s: %3d, %12s: %2d, %3s: %3d %s\n",
805 ">>> last cpuidle", idx, sec, msec,
806 "stay time", dss_log->cpuidle[cpu][idx].delta,
807 "modes", dss_log->cpuidle[cpu][idx].modes,
808 "state", dss_log->cpuidle[cpu][idx].state,
809 "online_cpus", dss_log->cpuidle[cpu][idx].num_online_cpus,
810 "en", dss_log->cpuidle[cpu][idx].en,
811 (dss_log->cpuidle[cpu][idx].en == 1) ? "[Missmatch]" : "");
812}
813
814static void dbg_snapshot_print_lastinfo(void)
815{
816 int cpu;
817
818 pr_info("<last info>\n");
819 for (cpu = 0; cpu < DSS_NR_CPUS; cpu++) {
820 pr_info("CPU ID: %d -----------------------------------------------\n", cpu);
821 dbg_snapshot_print_last_task(cpu);
822 dbg_snapshot_print_last_work(cpu);
823 dbg_snapshot_print_last_irq(cpu);
824 dbg_snapshot_print_last_cpuidle(cpu);
825 }
826}
827
828#ifdef CONFIG_DEBUG_SNAPSHOT_REGULATOR
829void dbg_snapshot_regulator(unsigned long long timestamp, char* f_name, unsigned int addr, unsigned int volt, unsigned int rvolt, int en)
830{
831 struct dbg_snapshot_item *item = &dss_items[dss_desc.kevents_num];
832
833 if (unlikely(!dss_base.enabled || !item->entry.enabled))
834 return;
835 {
836 int cpu = raw_smp_processor_id();
837 unsigned long i = atomic_inc_return(&dss_idx.regulator_log_idx) &
838 (ARRAY_SIZE(dss_log->regulator) - 1);
839 int size = strlen(f_name);
840 if (size >= SZ_16)
841 size = SZ_16 - 1;
842 dss_log->regulator[i].time = cpu_clock(cpu);
843 dss_log->regulator[i].cpu = cpu;
844 dss_log->regulator[i].acpm_time = timestamp;
845 strncpy(dss_log->regulator[i].name, f_name, size);
846 dss_log->regulator[i].reg = addr;
847 dss_log->regulator[i].en = en;
848 dss_log->regulator[i].voltage = volt;
849 dss_log->regulator[i].raw_volt = rvolt;
850 }
851}
852#endif
853
854#ifdef CONFIG_DEBUG_SNAPSHOT_THERMAL
855void dbg_snapshot_thermal(void *data, unsigned int temp, char *name, unsigned int max_cooling)
856{
857 struct dbg_snapshot_item *item = &dss_items[dss_desc.kevents_num];
858
859 if (unlikely(!dss_base.enabled || !item->entry.enabled))
860 return;
861 {
862 int cpu = raw_smp_processor_id();
863 unsigned long i = atomic_inc_return(&dss_idx.thermal_log_idx) &
864 (ARRAY_SIZE(dss_log->thermal) - 1);
865
866 dss_log->thermal[i].time = cpu_clock(cpu);
867 dss_log->thermal[i].cpu = cpu;
868 dss_log->thermal[i].data = (struct exynos_tmu_platform_data *)data;
869 dss_log->thermal[i].temp = temp;
870 dss_log->thermal[i].cooling_device = name;
871 dss_log->thermal[i].cooling_state = max_cooling;
872 }
873}
874#endif
875
ddad4f4d 876void dbg_snapshot_irq(int irq, void *fn, void *val, int en)
dd101ca5
DC
877{
878 struct dbg_snapshot_item *item = &dss_items[dss_desc.kevents_num];
879 unsigned long flags;
880
881 if (unlikely(!dss_base.enabled || !item->entry.enabled))
882 return;
883
884 flags = pure_arch_local_irq_save();
885 {
886 int cpu = raw_smp_processor_id();
887 unsigned long i;
888
889 for (i = 0; i < ARRAY_SIZE(dss_irqlog_exlist); i++) {
890 if (irq == dss_irqlog_exlist[i]) {
891 pure_arch_local_irq_restore(flags);
892 return;
893 }
894 }
895 i = atomic_inc_return(&dss_idx.irq_log_idx[cpu]) &
896 (ARRAY_SIZE(dss_log->irq[0]) - 1);
897
898 dss_log->irq[cpu][i].time = cpu_clock(cpu);
899 dss_log->irq[cpu][i].sp = (unsigned long) current_stack_pointer;
900 dss_log->irq[cpu][i].irq = irq;
901 dss_log->irq[cpu][i].fn = (void *)fn;
ddad4f4d 902 dss_log->irq[cpu][i].action = (struct irqaction *)val;
dd101ca5
DC
903 dss_log->irq[cpu][i].en = en;
904 }
905 pure_arch_local_irq_restore(flags);
906}
907
908#ifdef CONFIG_DEBUG_SNAPSHOT_IRQ_EXIT
909void dbg_snapshot_irq_exit(unsigned int irq, unsigned long long start_time)
910{
911 struct dbg_snapshot_item *item = &dss_items[dss_desc.kevents_num];
912 unsigned long i;
913
914 if (unlikely(!dss_base.enabled || !item->entry.enabled))
915 return;
916
917 for (i = 0; i < ARRAY_SIZE(dss_irqexit_exlist); i++)
918 if (irq == dss_irqexit_exlist[i])
919 return;
920 {
921 int cpu = raw_smp_processor_id();
922 unsigned long long time, latency;
923
924 i = atomic_inc_return(&dss_idx.irq_exit_log_idx[cpu]) &
925 (ARRAY_SIZE(dss_log->irq_exit[0]) - 1);
926
927 time = cpu_clock(cpu);
928 latency = time - start_time;
929
930 if (unlikely(latency >
931 (dss_irqexit_threshold * 1000))) {
932 dss_log->irq_exit[cpu][i].latency = latency;
933 dss_log->irq_exit[cpu][i].sp = (unsigned long) current_stack_pointer;
934 dss_log->irq_exit[cpu][i].end_time = time;
935 dss_log->irq_exit[cpu][i].time = start_time;
936 dss_log->irq_exit[cpu][i].irq = irq;
937 } else
938 atomic_dec(&dss_idx.irq_exit_log_idx[cpu]);
939 }
940}
941#endif
942
943#ifdef CONFIG_DEBUG_SNAPSHOT_SPINLOCK
944void dbg_snapshot_spinlock(void *v_lock, int en)
945{
946 struct dbg_snapshot_item *item = &dss_items[dss_desc.kevents_num];
947
948 if (unlikely(!dss_base.enabled || !item->entry.enabled))
949 return;
950 {
951 int cpu = raw_smp_processor_id();
952 unsigned index = atomic_inc_return(&dss_idx.spinlock_log_idx[cpu]);
953 unsigned long j, i = index & (ARRAY_SIZE(dss_log->spinlock[0]) - 1);
954 raw_spinlock_t *lock = (raw_spinlock_t *)v_lock;
955#ifdef CONFIG_ARM_ARCH_TIMER
956 dss_log->spinlock[cpu][i].time = cpu_clock(cpu);
957#else
958 dss_log->spinlock[cpu][i].time = index;
959#endif
960 dss_log->spinlock[cpu][i].sp = (unsigned long) current_stack_pointer;
961 dss_log->spinlock[cpu][i].jiffies = jiffies_64;
962#ifdef CONFIG_DEBUG_SPINLOCK
963 dss_log->spinlock[cpu][i].lock = lock;
964 dss_log->spinlock[cpu][i].next = lock->raw_lock.next;
965 dss_log->spinlock[cpu][i].owner = lock->raw_lock.owner;
966#endif
967 dss_log->spinlock[cpu][i].en = en;
968
969 for (j = 0; j < dss_desc.callstack; j++) {
970 dss_log->spinlock[cpu][i].caller[j] =
971 (void *)((size_t)return_address(j + 1));
972 }
973 }
974}
975#endif
976
977#ifdef CONFIG_DEBUG_SNAPSHOT_IRQ_DISABLED
978void dbg_snapshot_irqs_disabled(unsigned long flags)
979{
980 struct dbg_snapshot_item *item = &dss_items[dss_desc.kevents_num];
981 int cpu = raw_smp_processor_id();
982
983 if (unlikely(!dss_base.enabled || !item->entry.enabled))
984 return;
985
986 if (unlikely(flags)) {
987 unsigned j, local_flags = pure_arch_local_irq_save();
988
989 /* If flags has one, it shows interrupt enable status */
990 atomic_set(&dss_idx.irqs_disabled_log_idx[cpu], -1);
991 dss_log->irqs_disabled[cpu][0].time = 0;
992 dss_log->irqs_disabled[cpu][0].index = 0;
993 dss_log->irqs_disabled[cpu][0].task = NULL;
994 dss_log->irqs_disabled[cpu][0].task_comm = NULL;
995
996 for (j = 0; j < dss_desc.callstack; j++) {
997 dss_log->irqs_disabled[cpu][0].caller[j] = NULL;
998 }
999
1000 pure_arch_local_irq_restore(local_flags);
1001 } else {
1002 unsigned index = atomic_inc_return(&dss_idx.irqs_disabled_log_idx[cpu]);
1003 unsigned long j, i = index % ARRAY_SIZE(dss_log->irqs_disabled[0]);
1004
1005 dss_log->irqs_disabled[cpu][0].time = jiffies_64;
1006 dss_log->irqs_disabled[cpu][i].index = index;
1007 dss_log->irqs_disabled[cpu][i].task = get_current();
1008 dss_log->irqs_disabled[cpu][i].task_comm = get_current()->comm;
1009
1010 for (j = 0; j < dss_desc.callstack; j++) {
1011 dss_log->irqs_disabled[cpu][i].caller[j] =
1012 (void *)((size_t)return_address(j + 1));
1013 }
1014 }
1015}
1016#endif
1017
1018#ifdef CONFIG_DEBUG_SNAPSHOT_CLK
1019void dbg_snapshot_clk(void *clock, const char *func_name, unsigned long arg, int mode)
1020{
1021 struct dbg_snapshot_item *item = &dss_items[dss_desc.kevents_num];
1022
1023 if (unlikely(!dss_base.enabled || !item->entry.enabled))
1024 return;
1025 {
1026 int cpu = raw_smp_processor_id();
1027 unsigned long i = atomic_inc_return(&dss_idx.clk_log_idx) &
1028 (ARRAY_SIZE(dss_log->clk) - 1);
1029
1030 dss_log->clk[i].time = cpu_clock(cpu);
1031 dss_log->clk[i].mode = mode;
1032 dss_log->clk[i].arg = arg;
1033 dss_log->clk[i].clk = (struct clk_hw *)clock;
1034 dss_log->clk[i].f_name = func_name;
1035 }
1036}
1037#endif
1038
1039#ifdef CONFIG_DEBUG_SNAPSHOT_PMU
1040void dbg_snapshot_pmu(int id, const char *func_name, int mode)
1041{
1042 struct dbg_snapshot_item *item = &dss_items[dss_desc.kevents_num];
1043
1044 if (unlikely(!dss_base.enabled || !item->entry.enabled))
1045 return;
1046 {
1047 int cpu = raw_smp_processor_id();
1048 unsigned long i = atomic_inc_return(&dss_idx.pmu_log_idx) &
1049 (ARRAY_SIZE(dss_log->pmu) - 1);
1050
1051 dss_log->pmu[i].time = cpu_clock(cpu);
1052 dss_log->pmu[i].mode = mode;
1053 dss_log->pmu[i].id = id;
1054 dss_log->pmu[i].f_name = func_name;
1055 }
1056}
1057#endif
1058
0b1ac11f
HK
1059static struct notifier_block **dss_should_check_nl[] = {
1060 (struct notifier_block **)(&panic_notifier_list.head),
1061 (struct notifier_block **)(&reboot_notifier_list.head),
1062 (struct notifier_block **)(&restart_handler_list.head),
1063#ifdef CONFIG_EXYNOS_ITMON
1064 (struct notifier_block **)(&itmon_notifier_list.head),
1065#endif
1066};
1067
1068void dbg_snapshot_print_notifier_call(void **nl, unsigned long func, int en)
1069{
1070 struct notifier_block **nl_org = (struct notifier_block **)nl;
1071 char notifier_name[KSYM_NAME_LEN];
1072 char notifier_func_name[KSYM_NAME_LEN];
1073 int i;
1074
1075 for (i = 0; i < ARRAY_SIZE(dss_should_check_nl); i++) {
1076 if (nl_org == dss_should_check_nl[i]) {
1077 lookup_symbol_name((unsigned long)nl_org, notifier_name);
1078 lookup_symbol_name((unsigned long)func, notifier_func_name);
1079
1080 pr_info("debug-snapshot: %s -> %s call %s\n",
1081 notifier_name,
1082 notifier_func_name,
1083 en == DSS_FLAG_IN ? "+" : "-");
1084 break;
1085 }
1086 }
1087}
1088
dd101ca5
DC
1089#ifdef CONFIG_DEBUG_SNAPSHOT_FREQ
1090static void dbg_snapshot_print_freqinfo(void)
1091{
1092 unsigned long idx, sec, msec;
1093 char *freq_name;
1094 unsigned int i;
1095 unsigned long old_freq, target_freq;
1096
1097 pr_info("\n<freq info>\n");
1098
1099 for (i = 0; i < DSS_FLAG_END; i++) {
1100 idx = atomic_read(&dss_lastinfo.freq_last_idx[i]) & (ARRAY_SIZE(dss_log->freq) - 1);
1101 freq_name = dss_log->freq[idx].freq_name;
1102 if ((!freq_name) || strncmp(freq_name, dss_freq_name[i], strlen(dss_freq_name[i]))) {
1103 pr_info("%10s: no infomation\n", dss_freq_name[i]);
1104 continue;
1105 }
1106
1107 dbg_snapshot_get_sec(dss_log->freq[idx].time, &sec, &msec);
1108 old_freq = dss_log->freq[idx].old_freq;
1109 target_freq = dss_log->freq[idx].target_freq;
1110 pr_info("%10s: [%4lu] %10lu.%06lu sec, %12s: %6luMhz, %12s: %6luMhz, %3s: %3d %s\n",
1111 freq_name, idx, sec, msec,
1112 "old_freq", old_freq/1000,
1113 "target_freq", target_freq/1000,
1114 "en", dss_log->freq[idx].en,
1115 (dss_log->freq[idx].en == 1) ? "[Missmatch]" : "");
1116 }
1117}
1118
1119void dbg_snapshot_freq(int type, unsigned long old_freq, unsigned long target_freq, int en)
1120{
1121 struct dbg_snapshot_item *item = &dss_items[dss_desc.kevents_num];
1122
1123 if (unlikely(!dss_base.enabled || !item->entry.enabled))
1124 return;
1125 {
1126 int cpu = raw_smp_processor_id();
1127 unsigned long i = atomic_inc_return(&dss_idx.freq_log_idx) &
1128 (ARRAY_SIZE(dss_log->freq) - 1);
1129
1130 if (atomic_read(&dss_idx.freq_log_idx) > atomic_read(&dss_lastinfo.freq_last_idx[type]))
1131 atomic_set(&dss_lastinfo.freq_last_idx[type], atomic_read(&dss_idx.freq_log_idx));
1132
1133 dss_log->freq[i].time = cpu_clock(cpu);
1134 dss_log->freq[i].cpu = cpu;
1135 dss_log->freq[i].freq_name = dss_freq_name[type];
bf2bb2c5 1136 dss_log->freq[i].type = type;
dd101ca5
DC
1137 dss_log->freq[i].old_freq = old_freq;
1138 dss_log->freq[i].target_freq = target_freq;
1139 dss_log->freq[i].en = en;
1140 }
1141}
1142#endif
1143
1144#ifndef arch_irq_stat
1145#define arch_irq_stat() 0
1146#endif
1147
1148static void dbg_snapshot_print_irq(void)
1149{
1150 int i, j;
1151 u64 sum = 0;
1152
1153 for_each_possible_cpu(i) {
1154 sum += kstat_cpu_irqs_sum(i);
1155 sum += arch_irq_stat_cpu(i);
1156 }
1157 sum += arch_irq_stat();
1158
1159 pr_info("\n<irq info>\n");
1160 pr_info("------------------------------------------------------------------\n");
1161 pr_info("\n");
1162 pr_info("sum irq : %llu", (unsigned long long)sum);
1163 pr_info("------------------------------------------------------------------\n");
1164
1165 for_each_irq_nr(j) {
1166 unsigned int irq_stat = kstat_irqs(j);
1167
1168 if (irq_stat) {
1169 struct irq_desc *desc = irq_to_desc(j);
1170 const char *name;
1171
1172 name = desc->action ? (desc->action->name ? desc->action->name : "???") : "???";
1173 pr_info("irq-%-4d : %8u %s\n", j, irq_stat, name);
1174 }
1175 }
1176}
1177
1178void dbg_snapshot_print_panic_report(void)
1179{
1180 pr_info("============================================================\n");
1181 pr_info("Panic Report\n");
1182 pr_info("============================================================\n");
1183 dbg_snapshot_print_lastinfo();
1184#ifdef CONFIG_DEBUG_SNAPSHOT_FREQ
1185 dbg_snapshot_print_freqinfo();
1186#endif
1187 dbg_snapshot_print_calltrace();
1188 dbg_snapshot_print_irq();
1189 pr_info("============================================================\n");
1190}
1191
1192#ifdef CONFIG_DEBUG_SNAPSHOT_DM
1193void dbg_snapshot_dm(int type, unsigned long min, unsigned long max, s32 wait_t, s32 t)
1194{
1195 struct dbg_snapshot_item *item = &dss_items[dss_desc.kevents_num];
1196
1197 if (unlikely(!dss_base.enabled || !item->entry.enabled))
1198 return;
1199 {
1200 int cpu = raw_smp_processor_id();
1201 unsigned long i = atomic_inc_return(&dss_idx.dm_log_idx) &
1202 (ARRAY_SIZE(dss_log->dm) - 1);
1203
1204 dss_log->dm[i].time = cpu_clock(cpu);
1205 dss_log->dm[i].cpu = cpu;
1206 dss_log->dm[i].dm_num = type;
1207 dss_log->dm[i].min_freq = min;
1208 dss_log->dm[i].max_freq = max;
1209 dss_log->dm[i].wait_dmt = wait_t;
1210 dss_log->dm[i].do_dmt = t;
1211 }
1212}
1213#endif
1214
1215#ifdef CONFIG_DEBUG_SNAPSHOT_HRTIMER
1216void dbg_snapshot_hrtimer(void *timer, s64 *now, void *fn, int en)
1217{
1218 struct dbg_snapshot_item *item = &dss_items[dss_desc.kevents_num];
1219
1220 if (unlikely(!dss_base.enabled || !item->entry.enabled))
1221 return;
1222 {
1223 int cpu = raw_smp_processor_id();
1224 unsigned long i = atomic_inc_return(&dss_idx.hrtimer_log_idx[cpu]) &
1225 (ARRAY_SIZE(dss_log->hrtimers[0]) - 1);
1226
1227 dss_log->hrtimers[cpu][i].time = cpu_clock(cpu);
1228 dss_log->hrtimers[cpu][i].now = *now;
1229 dss_log->hrtimers[cpu][i].timer = (struct hrtimer *)timer;
1230 dss_log->hrtimers[cpu][i].fn = fn;
1231 dss_log->hrtimers[cpu][i].en = en;
1232 }
1233}
1234#endif
1235
1236#ifdef CONFIG_DEBUG_SNAPSHOT_I2C
1237void dbg_snapshot_i2c(struct i2c_adapter *adap, struct i2c_msg *msgs, int num, int en)
1238{
1239 struct dbg_snapshot_item *item = &dss_items[dss_desc.kevents_num];
1240
1241 if (unlikely(!dss_base.enabled || !item->entry.enabled))
1242 return;
1243 {
1244 int cpu = raw_smp_processor_id();
1245 unsigned long i = atomic_inc_return(&dss_idx.i2c_log_idx) &
1246 (ARRAY_SIZE(dss_log->i2c) - 1);
1247
1248 dss_log->i2c[i].time = cpu_clock(cpu);
1249 dss_log->i2c[i].cpu = cpu;
1250 dss_log->i2c[i].adap = adap;
1251 dss_log->i2c[i].msgs = msgs;
1252 dss_log->i2c[i].num = num;
1253 dss_log->i2c[i].en = en;
1254 }
1255}
1256#endif
1257
1258#ifdef CONFIG_DEBUG_SNAPSHOT_SPI
1259void dbg_snapshot_spi(struct spi_controller *ctlr, struct spi_message *cur_msg, int en)
1260{
1261 struct dbg_snapshot_item *item = &dss_items[dss_desc.kevents_num];
1262
1263 if (unlikely(!dss_base.enabled || !item->entry.enabled))
1264 return;
1265 {
1266 int cpu = raw_smp_processor_id();
1267 unsigned long i = atomic_inc_return(&dss_idx.spi_log_idx) &
1268 (ARRAY_SIZE(dss_log->spi) - 1);
1269
1270 dss_log->spi[i].time = cpu_clock(cpu);
1271 dss_log->spi[i].cpu = cpu;
1272 dss_log->spi[i].ctlr = ctlr;
1273 dss_log->spi[i].cur_msg = cur_msg;
1274 dss_log->spi[i].en = en;
1275 }
1276}
1277#endif
1278
ae09f559
YK
1279#ifdef CONFIG_DEBUG_SNAPSHOT_BINDER
1280void dbg_snapshot_binder(struct trace_binder_transaction_base *base,
1281 struct trace_binder_transaction *transaction,
1282 struct trace_binder_transaction_error *error)
1283{
1284 struct dbg_snapshot_item *item = &dss_items[dss_desc.kevents_num];
1285 int cpu;
1286 unsigned long i;
1287
1288 if (unlikely(!dss_base.enabled || !item->entry.enabled))
1289 return;
1290 if (base == NULL)
1291 return;
1292
1293 cpu = raw_smp_processor_id();
1294 i = atomic_inc_return(&dss_idx.binder_log_idx) &
1295 (ARRAY_SIZE(dss_log->binder) - 1);
1296
1297 dss_log->binder[i].time = cpu_clock(cpu);
1298 dss_log->binder[i].cpu = cpu;
1299 dss_log->binder[i].base = *base;
1300
1301 if (transaction) {
1302 dss_log->binder[i].transaction = *transaction;
1303 } else {
1304 dss_log->binder[i].transaction.to_node_id = 0;
1305 dss_log->binder[i].transaction.reply = 0;
1306 dss_log->binder[i].transaction.flags = 0;
1307 dss_log->binder[i].transaction.code = 0;
1308 }
1309 if (error) {
1310 dss_log->binder[i].error = *error;
1311 } else {
1312 dss_log->binder[i].error.return_error = 0;
1313 dss_log->binder[i].error.return_error_param = 0;
1314 dss_log->binder[i].error.return_error_line = 0;
1315 }
1316}
1317#endif
1318
dd101ca5
DC
1319#ifdef CONFIG_DEBUG_SNAPSHOT_ACPM
1320void dbg_snapshot_acpm(unsigned long long timestamp, const char *log, unsigned int data)
1321{
1322 struct dbg_snapshot_item *item = &dss_items[dss_desc.kevents_num];
1323
1324 if (unlikely(!dss_base.enabled || !item->entry.enabled))
1325 return;
1326 {
1327 int cpu = raw_smp_processor_id();
1328 unsigned long i = atomic_inc_return(&dss_idx.acpm_log_idx) &
1329 (ARRAY_SIZE(dss_log->acpm) - 1);
1330 int len = strlen(log);
1331
103360ff
YK
1332 if (len >= 8)
1333 len = 8;
dd101ca5
DC
1334
1335 dss_log->acpm[i].time = cpu_clock(cpu);
1336 dss_log->acpm[i].acpm_time = timestamp;
1337 strncpy(dss_log->acpm[i].log, log, len);
103360ff 1338 dss_log->acpm[i].log[len] = '\0';
dd101ca5
DC
1339 dss_log->acpm[i].data = data;
1340 }
1341}
1342#endif
1343
1344#ifdef CONFIG_DEBUG_SNAPSHOT_REG
1345static phys_addr_t virt_to_phys_high(size_t vaddr)
1346{
1347 phys_addr_t paddr = 0;
1348 pgd_t *pgd;
1349 pmd_t *pmd;
1350 pte_t *pte;
1351
1352 if (virt_addr_valid((void *) vaddr)) {
1353 paddr = virt_to_phys((void *) vaddr);
1354 goto out;
1355 }
1356
1357 pgd = pgd_offset_k(vaddr);
1358 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
1359 goto out;
1360
1361 if (pgd_val(*pgd) & 2) {
1362 paddr = pgd_val(*pgd) & SECTION_MASK;
1363 goto out;
1364 }
1365
1366 pmd = pmd_offset((pud_t *)pgd, vaddr);
1367 if (pmd_none_or_clear_bad(pmd))
1368 goto out;
1369
1370 pte = pte_offset_kernel(pmd, vaddr);
1371 if (pte_none(*pte))
1372 goto out;
1373
1374 paddr = pte_val(*pte) & PAGE_MASK;
1375
1376out:
1377 return paddr | (vaddr & UL(SZ_4K - 1));
1378}
1379
1380void dbg_snapshot_reg(unsigned int read, size_t val, size_t reg, int en)
1381{
1382 struct dbg_snapshot_item *item = &dss_items[dss_desc.kevents_num];
1383 int cpu = raw_smp_processor_id();
1384 unsigned long i, j;
1385 size_t phys_reg, start_addr, end_addr;
1386
1387 if (unlikely(!dss_base.enabled || !item->entry.enabled))
1388 return;
1389
1390 if (dss_reg_exlist[0].addr == 0)
1391 return;
1392
1393 phys_reg = virt_to_phys_high(reg);
1394 if (unlikely(!phys_reg))
1395 return;
1396
1397 for (j = 0; j < ARRAY_SIZE(dss_reg_exlist); j++) {
1398 if (dss_reg_exlist[j].addr == 0)
1399 break;
1400 start_addr = dss_reg_exlist[j].addr;
1401 end_addr = start_addr + dss_reg_exlist[j].size;
1402 if (start_addr <= phys_reg && phys_reg <= end_addr)
1403 return;
1404 }
1405
1406 i = atomic_inc_return(&dss_idx.reg_log_idx[cpu]) &
1407 (ARRAY_SIZE(dss_log->reg[0]) - 1);
1408
1409 dss_log->reg[cpu][i].time = cpu_clock(cpu);
1410 dss_log->reg[cpu][i].read = read;
1411 dss_log->reg[cpu][i].val = val;
1412 dss_log->reg[cpu][i].reg = phys_reg;
1413 dss_log->reg[cpu][i].en = en;
1414
1415 for (j = 0; j < dss_desc.callstack; j++) {
1416 dss_log->reg[cpu][i].caller[j] =
1417 (void *)((size_t)return_address(j + 1));
1418 }
1419}
1420#endif
1421
1422#ifndef CONFIG_DEBUG_SNAPSHOT_MINIMIZED_MODE
1423void dbg_snapshot_clockevent(unsigned long long clc, int64_t delta, void *next_event)
1424{
1425 struct dbg_snapshot_item *item = &dss_items[dss_desc.kevents_num];
1426
1427 if (unlikely(!dss_base.enabled || !item->entry.enabled))
1428 return;
1429 {
1430 int cpu = raw_smp_processor_id();
1431 unsigned long j, i = atomic_inc_return(&dss_idx.clockevent_log_idx[cpu]) &
1432 (ARRAY_SIZE(dss_log->clockevent[0]) - 1);
1433
1434 dss_log->clockevent[cpu][i].time = cpu_clock(cpu);
1435 dss_log->clockevent[cpu][i].mct_cycle = clc;
1436 dss_log->clockevent[cpu][i].delta_ns = delta;
1437 dss_log->clockevent[cpu][i].next_event = *((ktime_t *)next_event);
1438
1439 for (j = 0; j < dss_desc.callstack; j++) {
1440 dss_log->clockevent[cpu][i].caller[j] =
1441 (void *)((size_t)return_address(j + 1));
1442 }
1443 }
1444}
1445
1446void dbg_snapshot_printk(const char *fmt, ...)
1447{
1448 struct dbg_snapshot_item *item = &dss_items[dss_desc.kevents_num];
1449
1450 if (unlikely(!dss_base.enabled || !item->entry.enabled))
1451 return;
1452 {
1453 int cpu = raw_smp_processor_id();
1454 va_list args;
1455 int ret;
1456 unsigned long j, i = atomic_inc_return(&dss_idx.printk_log_idx) &
1457 (ARRAY_SIZE(dss_log->printk) - 1);
1458
1459 va_start(args, fmt);
1460 ret = vsnprintf(dss_log->printk[i].log,
1461 sizeof(dss_log->printk[i].log), fmt, args);
1462 va_end(args);
1463
1464 dss_log->printk[i].time = cpu_clock(cpu);
1465 dss_log->printk[i].cpu = cpu;
1466
1467 for (j = 0; j < dss_desc.callstack; j++) {
1468 dss_log->printk[i].caller[j] =
1469 (void *)((size_t)return_address(j));
1470 }
1471 }
1472}
1473
1474void dbg_snapshot_printkl(size_t msg, size_t val)
1475{
1476 struct dbg_snapshot_item *item = &dss_items[dss_desc.kevents_num];
1477
1478 if (unlikely(!dss_base.enabled || !item->entry.enabled))
1479 return;
1480 {
1481 int cpu = raw_smp_processor_id();
1482 unsigned long j, i = atomic_inc_return(&dss_idx.printkl_log_idx) &
1483 (ARRAY_SIZE(dss_log->printkl) - 1);
1484
1485 dss_log->printkl[i].time = cpu_clock(cpu);
1486 dss_log->printkl[i].cpu = cpu;
1487 dss_log->printkl[i].msg = msg;
1488 dss_log->printkl[i].val = val;
1489
1490 for (j = 0; j < dss_desc.callstack; j++) {
1491 dss_log->printkl[i].caller[j] =
1492 (void *)((size_t)return_address(j));
1493 }
1494 }
1495}
1496#endif