[ERD][NEUS7920-76] [COMMON] lib: dss: refactor dbg_snapshot_suspend logging
[GitHub/MotorolaMobilityLLC/kernel-slsi.git] / lib / debug-snapshot-log.c
CommitLineData
dd101ca5
DC
1/*
2 * Copyright (c) 2013 Samsung Electronics Co., Ltd.
3 * http://www.samsung.com
4 *
5 * Debug-SnapShot: Debug Framework for Ramdump based debugging method
6 * The original code is Exynos-Snapshot for Exynos SoC
7 *
8 * Author: Hosung Kim <hosung0.kim@samsung.com>
9 * Author: Changki Kim <changki.kim@samsung.com>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 */
15
16#include <linux/kernel.h>
17#include <linux/slab.h>
18#include <linux/module.h>
19#include <linux/ktime.h>
20#include <linux/kallsyms.h>
21#include <linux/platform_device.h>
22#include <linux/clk-provider.h>
23#include <linux/pstore_ram.h>
24#include <linux/sched/clock.h>
25#include <linux/ftrace.h>
26
27#include "debug-snapshot-local.h"
28#include <asm/irq.h>
29#include <asm/traps.h>
30#include <asm/hardirq.h>
31#include <asm/stacktrace.h>
05db6ce8 32#include <asm/arch_timer.h>
dd101ca5
DC
33#include <linux/debug-snapshot.h>
34#include <linux/kernel_stat.h>
35#include <linux/irqnr.h>
36#include <linux/irq.h>
37#include <linux/irqdesc.h>
38
39struct dbg_snapshot_lastinfo {
40#ifdef CONFIG_DEBUG_SNAPSHOT_FREQ
41 atomic_t freq_last_idx[DSS_FLAG_END];
42#endif
43 char log[DSS_NR_CPUS][SZ_1K];
44 char *last_p[DSS_NR_CPUS];
45};
46
47struct dss_dumper {
48 bool active;
49 u32 items;
50 int init_idx;
51 int cur_idx;
52 u32 cur_cpu;
53 u32 step;
54};
55
56enum dss_kevent_flag {
57 DSS_FLAG_TASK = 1,
58 DSS_FLAG_WORK,
59 DSS_FLAG_CPUIDLE,
60 DSS_FLAG_SUSPEND,
61 DSS_FLAG_IRQ,
62 DSS_FLAG_IRQ_EXIT,
63 DSS_FLAG_SPINLOCK,
64 DSS_FLAG_IRQ_DISABLE,
65 DSS_FLAG_CLK,
66 DSS_FLAG_FREQ,
67 DSS_FLAG_REG,
68 DSS_FLAG_HRTIMER,
69 DSS_FLAG_REGULATOR,
70 DSS_FLAG_THERMAL,
71 DSS_FLAG_MAILBOX,
72 DSS_FLAG_CLOCKEVENT,
73 DSS_FLAG_PRINTK,
74 DSS_FLAG_PRINTKL,
75 DSS_FLAG_KEVENT,
76};
77
78struct dbg_snapshot_log_idx {
79 atomic_t task_log_idx[DSS_NR_CPUS];
80 atomic_t work_log_idx[DSS_NR_CPUS];
81 atomic_t cpuidle_log_idx[DSS_NR_CPUS];
82 atomic_t suspend_log_idx;
83 atomic_t irq_log_idx[DSS_NR_CPUS];
84#ifdef CONFIG_DEBUG_SNAPSHOT_SPINLOCK
85 atomic_t spinlock_log_idx[DSS_NR_CPUS];
86#endif
87#ifdef CONFIG_DEBUG_SNAPSHOT_IRQ_DISABLED
88 atomic_t irqs_disabled_log_idx[DSS_NR_CPUS];
89#endif
90#ifdef CONFIG_DEBUG_SNAPSHOT_IRQ_EXIT
91 atomic_t irq_exit_log_idx[DSS_NR_CPUS];
92#endif
93#ifdef CONFIG_DEBUG_SNAPSHOT_REG
94 atomic_t reg_log_idx[DSS_NR_CPUS];
95#endif
96#ifdef CONFIG_DEBUG_SNAPSHOT_HRTIMER
97 atomic_t hrtimer_log_idx[DSS_NR_CPUS];
98#endif
99#ifdef CONFIG_DEBUG_SNAPSHOT_CLK
100 atomic_t clk_log_idx;
101#endif
102#ifdef CONFIG_DEBUG_SNAPSHOT_PMU
103 atomic_t pmu_log_idx;
104#endif
105#ifdef CONFIG_DEBUG_SNAPSHOT_FREQ
106 atomic_t freq_log_idx;
107#endif
108#ifdef CONFIG_DEBUG_SNAPSHOT_DM
109 atomic_t dm_log_idx;
110#endif
111#ifdef CONFIG_DEBUG_SNAPSHOT_REGULATOR
112 atomic_t regulator_log_idx;
113#endif
114#ifdef CONFIG_DEBUG_SNAPSHOT_REGULATOR
115 atomic_t thermal_log_idx;
116#endif
117#ifdef CONFIG_DEBUG_SNAPSHOT_I2C
118 atomic_t i2c_log_idx;
119#endif
120#ifdef CONFIG_DEBUG_SNAPSHOT_SPI
121 atomic_t spi_log_idx;
122#endif
ae09f559
YK
123#ifdef CONFIG_DEBUG_SNAPSHOT_BINDER
124 atomic_t binder_log_idx;
125#endif
dd101ca5
DC
126#ifndef CONFIG_DEBUG_SNAPSHOT_MINIMIZED_MODE
127 atomic_t clockevent_log_idx[DSS_NR_CPUS];
128 atomic_t printkl_log_idx;
129 atomic_t printk_log_idx;
130#endif
131#ifdef CONFIG_DEBUG_SNAPSHOT_ACPM
132 atomic_t acpm_log_idx;
133#endif
134};
135
136int dbg_snapshot_log_size = sizeof(struct dbg_snapshot_log);
137/*
138 * including or excluding options
139 * if you want to except some interrupt, it should be written in this array
140 */
141int dss_irqlog_exlist[DSS_EX_MAX_NUM] = {
142/* interrupt number ex) 152, 153, 154, */
143 -1,
144};
145
146#ifdef CONFIG_DEBUG_SNAPSHOT_IRQ_EXIT
147int dss_irqexit_exlist[DSS_EX_MAX_NUM] = {
148/* interrupt number ex) 152, 153, 154, */
149 -1,
150};
151
152unsigned int dss_irqexit_threshold =
153 CONFIG_DEBUG_SNAPSHOT_IRQ_EXIT_THRESHOLD;
154#endif
155
156#ifdef CONFIG_DEBUG_SNAPSHOT_REG
157struct dss_reg_list {
158 size_t addr;
159 size_t size;
160};
161
162static struct dss_reg_list dss_reg_exlist[] = {
163/*
164 * if it wants to reduce effect enabled reg feautre to system,
165 * you must add these registers - mct, serial
166 * because they are called very often.
167 * physical address, size ex) {0x10C00000, 0x1000},
168 */
169 {DSS_REG_MCT_ADDR, DSS_REG_MCT_SIZE},
170 {DSS_REG_UART_ADDR, DSS_REG_UART_SIZE},
171 {0, 0},
172 {0, 0},
173 {0, 0},
174 {0, 0},
175 {0, 0},
176 {0, 0},
177 {0, 0},
178};
179#endif
180
181#ifdef CONFIG_DEBUG_SNAPSHOT_FREQ
182static char *dss_freq_name[] = {
7ce10966 183 "LITTLE", "BIG", "INT", "MIF", "ISP", "DISP", "INTCAM", "AUD", "IVA", "SCORE", "FSYS0",
dd101ca5
DC
184};
185#endif
186
187/* Internal interface variable */
188static struct dbg_snapshot_log_idx dss_idx;
189static struct dbg_snapshot_lastinfo dss_lastinfo;
190
1abf4739 191void __init dbg_snapshot_init_log_idx(void)
dd101ca5
DC
192{
193 int i;
194
195#ifndef CONFIG_DEBUG_SNAPSHOT_MINIMIZED_MODE
196 atomic_set(&(dss_idx.printk_log_idx), -1);
197 atomic_set(&(dss_idx.printkl_log_idx), -1);
198#endif
199#ifdef CONFIG_DEBUG_SNAPSHOT_REGULATOR
200 atomic_set(&(dss_idx.regulator_log_idx), -1);
201#endif
202#ifdef CONFIG_DEBUG_SNAPSHOT_THERMAL
203 atomic_set(&(dss_idx.thermal_log_idx), -1);
204#endif
205#ifdef CONFIG_DEBUG_SNAPSHOT_FREQ
206 atomic_set(&(dss_idx.freq_log_idx), -1);
207#endif
208#ifdef CONFIG_DEBUG_SNAPSHOT_DM
209 atomic_set(&(dss_idx.dm_log_idx), -1);
210#endif
211#ifdef CONFIG_DEBUG_SNAPSHOT_CLK
212 atomic_set(&(dss_idx.clk_log_idx), -1);
213#endif
214#ifdef CONFIG_DEBUG_SNAPSHOT_PMU
215 atomic_set(&(dss_idx.pmu_log_idx), -1);
216#endif
217#ifdef CONFIG_DEBUG_SNAPSHOT_ACPM
218 atomic_set(&(dss_idx.acpm_log_idx), -1);
219#endif
220#ifdef CONFIG_DEBUG_SNAPSHOT_I2C
221 atomic_set(&(dss_idx.i2c_log_idx), -1);
222#endif
223#ifdef CONFIG_DEBUG_SNAPSHOT_SPI
224 atomic_set(&(dss_idx.spi_log_idx), -1);
ae09f559
YK
225#endif
226#ifdef CONFIG_DEBUG_SNAPSHOT_BINDER
227 atomic_set(&(dss_idx.binder_log_idx), -1);
dd101ca5
DC
228#endif
229 atomic_set(&(dss_idx.suspend_log_idx), -1);
230
231 for (i = 0; i < DSS_NR_CPUS; i++) {
232 atomic_set(&(dss_idx.task_log_idx[i]), -1);
233 atomic_set(&(dss_idx.work_log_idx[i]), -1);
234#ifndef CONFIG_DEBUG_SNAPSHOT_MINIMIZED_MODE
235 atomic_set(&(dss_idx.clockevent_log_idx[i]), -1);
236#endif
237 atomic_set(&(dss_idx.cpuidle_log_idx[i]), -1);
238 atomic_set(&(dss_idx.irq_log_idx[i]), -1);
239#ifdef CONFIG_DEBUG_SNAPSHOT_SPINLOCK
240 atomic_set(&(dss_idx.spinlock_log_idx[i]), -1);
241#endif
242#ifdef CONFIG_DEBUG_SNAPSHOT_IRQ_DISABLED
243 atomic_set(&(dss_idx.irqs_disabled_log_idx[i]), -1);
244#endif
245#ifdef CONFIG_DEBUG_SNAPSHOT_IRQ_EXIT
246 atomic_set(&(dss_idx.irq_exit_log_idx[i]), -1);
247#endif
248#ifdef CONFIG_DEBUG_SNAPSHOT_REG
249 atomic_set(&(dss_idx.reg_log_idx[i]), -1);
250#endif
251#ifdef CONFIG_DEBUG_SNAPSHOT_HRTIMER
252 atomic_set(&(dss_idx.hrtimer_log_idx[i]), -1);
253#endif
254 }
255}
256
257bool dbg_snapshot_dumper_one(void *v_dumper, char *line, size_t size, size_t *len)
258{
259 bool ret = false;
260 int idx, array_size;
261 unsigned int cpu, items;
262 unsigned long rem_nsec;
263 u64 ts;
264 struct dss_dumper *dumper = (struct dss_dumper *)v_dumper;
265
266 if (!line || size < SZ_128 ||
267 dumper->cur_cpu >= NR_CPUS)
268 goto out;
269
270 if (dumper->active) {
271 if (dumper->init_idx == dumper->cur_idx)
272 goto out;
273 }
274
275 cpu = dumper->cur_cpu;
276 idx = dumper->cur_idx;
277 items = dumper->items;
278
279 switch(items) {
280 case DSS_FLAG_TASK:
281 {
282 struct task_struct *task;
283 array_size = ARRAY_SIZE(dss_log->task[0]) - 1;
284 if (!dumper->active) {
285 idx = (atomic_read(&dss_idx.task_log_idx[0]) + 1) & array_size;
286 dumper->init_idx = idx;
287 dumper->active = true;
288 }
289 ts = dss_log->task[cpu][idx].time;
290 rem_nsec = do_div(ts, NSEC_PER_SEC);
291 task = dss_log->task[cpu][idx].task;
292
293 *len = snprintf(line, size, "[%8lu.%09lu][%04d:CPU%u] task_name:%16s, "
294 "task:0x%16p, stack:0x%16p, exec_start:%16llu\n",
295 (unsigned long)ts, rem_nsec / NSEC_PER_USEC, idx, cpu,
296 task->comm, task, task->stack,
297 task->se.exec_start);
298 break;
299 }
300 case DSS_FLAG_WORK:
301 {
302 char work_fn[KSYM_NAME_LEN] = {0,};
303 char *task_comm;
304 int en;
305
306 array_size = ARRAY_SIZE(dss_log->work[0]) - 1;
307 if (!dumper->active) {
308 idx = (atomic_read(&dss_idx.work_log_idx[0]) + 1) & array_size;
309 dumper->init_idx = idx;
310 dumper->active = true;
311 }
312 ts = dss_log->work[cpu][idx].time;
313 rem_nsec = do_div(ts, NSEC_PER_SEC);
314 lookup_symbol_name((unsigned long)dss_log->work[cpu][idx].fn, work_fn);
315 task_comm = dss_log->work[cpu][idx].task_comm;
316 en = dss_log->work[cpu][idx].en;
317
318 dumper->step = 6;
319 *len = snprintf(line, size, "[%8lu.%09lu][%04d:CPU%u] task_name:%16s, work_fn:%32s, %3s\n",
320 (unsigned long)ts, rem_nsec / NSEC_PER_USEC, idx, cpu,
321 task_comm, work_fn,
322 en == DSS_FLAG_IN ? "IN" : "OUT");
323 break;
324 }
325 case DSS_FLAG_CPUIDLE:
326 {
327 unsigned int delta;
328 int state, num_cpus, en;
329 char *index;
330
331 array_size = ARRAY_SIZE(dss_log->cpuidle[0]) - 1;
332 if (!dumper->active) {
333 idx = (atomic_read(&dss_idx.cpuidle_log_idx[0]) + 1) & array_size;
334 dumper->init_idx = idx;
335 dumper->active = true;
336 }
337 ts = dss_log->cpuidle[cpu][idx].time;
338 rem_nsec = do_div(ts, NSEC_PER_SEC);
339
340 index = dss_log->cpuidle[cpu][idx].modes;
341 en = dss_log->cpuidle[cpu][idx].en;
342 state = dss_log->cpuidle[cpu][idx].state;
343 num_cpus = dss_log->cpuidle[cpu][idx].num_online_cpus;
344 delta = dss_log->cpuidle[cpu][idx].delta;
345
346 *len = snprintf(line, size, "[%8lu.%09lu][%04d:CPU%u] cpuidle: %s, "
347 "state:%d, num_online_cpus:%d, stay_time:%8u, %3s\n",
348 (unsigned long)ts, rem_nsec / NSEC_PER_USEC, idx, cpu,
349 index, state, num_cpus, delta,
350 en == DSS_FLAG_IN ? "IN" : "OUT");
351 break;
352 }
353 case DSS_FLAG_SUSPEND:
354 {
355 char suspend_fn[KSYM_NAME_LEN];
356 int en;
357
358 array_size = ARRAY_SIZE(dss_log->suspend) - 1;
359 if (!dumper->active) {
360 idx = (atomic_read(&dss_idx.suspend_log_idx) + 1) & array_size;
361 dumper->init_idx = idx;
362 dumper->active = true;
363 }
364 ts = dss_log->suspend[idx].time;
365 rem_nsec = do_div(ts, NSEC_PER_SEC);
366
367 lookup_symbol_name((unsigned long)dss_log->suspend[idx].fn, suspend_fn);
368 en = dss_log->suspend[idx].en;
369
370 *len = snprintf(line, size, "[%8lu.%09lu][%04d:CPU%u] suspend_fn:%s, %3s\n",
371 (unsigned long)ts, rem_nsec / NSEC_PER_USEC, idx, cpu,
372 suspend_fn, en == DSS_FLAG_IN ? "IN" : "OUT");
373 break;
374 }
375 case DSS_FLAG_IRQ:
376 {
377 char irq_fn[KSYM_NAME_LEN];
ddad4f4d 378 int en, irq;
dd101ca5
DC
379
380 array_size = ARRAY_SIZE(dss_log->irq[0]) - 1;
381 if (!dumper->active) {
382 idx = (atomic_read(&dss_idx.irq_log_idx[0]) + 1) & array_size;
383 dumper->init_idx = idx;
384 dumper->active = true;
385 }
386 ts = dss_log->irq[cpu][idx].time;
387 rem_nsec = do_div(ts, NSEC_PER_SEC);
388
389 lookup_symbol_name((unsigned long)dss_log->irq[cpu][idx].fn, irq_fn);
390 irq = dss_log->irq[cpu][idx].irq;
dd101ca5
DC
391 en = dss_log->irq[cpu][idx].en;
392
ddad4f4d 393 *len = snprintf(line, size, "[%8lu.%09lu][%04d:CPU%u] irq:%6d, irq_fn:%32s, %3s\n",
dd101ca5 394 (unsigned long)ts, rem_nsec / NSEC_PER_USEC, idx, cpu,
ddad4f4d 395 irq, irq_fn, en == DSS_FLAG_IN ? "IN" : "OUT");
dd101ca5
DC
396 break;
397 }
398#ifdef CONFIG_DEBUG_SNAPSHOT_IRQ_EXIT
399 case DSS_FLAG_IRQ_EXIT:
400 {
401 unsigned long end_time, latency;
402 int irq;
403
404 array_size = ARRAY_SIZE(dss_log->irq_exit[0]) - 1;
405 if (!dumper->active) {
406 idx = (atomic_read(&dss_idx.irq_exit_log_idx[0]) + 1) & array_size;
407 dumper->init_idx = idx;
408 dumper->active = true;
409 }
410 ts = dss_log->irq_exit[cpu][idx].time;
411 rem_nsec = do_div(ts, NSEC_PER_SEC);
412
413 end_time = dss_log->irq_exit[cpu][idx].end_time;
414 latency = dss_log->irq_exit[cpu][idx].latency;
415 irq = dss_log->irq_exit[cpu][idx].irq;
416
417 *len = snprintf(line, size, "[%8lu.%09lu][%04d:CPU%u] irq:%6d, "
418 "latency:%16zu, end_time:%16zu\n",
419 (unsigned long)ts, rem_nsec / NSEC_PER_USEC, idx, cpu,
420 irq, latency, end_time);
421 break;
422 }
423#endif
424#ifdef CONFIG_DEBUG_SNAPSHOT_SPINLOCK
425 case DSS_FLAG_SPINLOCK:
426 {
427 unsigned int jiffies_local;
428 char callstack[CONFIG_DEBUG_SNAPSHOT_CALLSTACK][KSYM_NAME_LEN];
429 int en, i;
430 u16 next, owner;
431
432 array_size = ARRAY_SIZE(dss_log->spinlock[0]) - 1;
433 if (!dumper->active) {
434 idx = (atomic_read(&dss_idx.spinlock_log_idx[0]) + 1) & array_size;
435 dumper->init_idx = idx;
436 dumper->active = true;
437 }
438 ts = dss_log->spinlock[cpu][idx].time;
439 rem_nsec = do_div(ts, NSEC_PER_SEC);
440
441 jiffies_local = dss_log->spinlock[cpu][idx].jiffies;
442 en = dss_log->spinlock[cpu][idx].en;
443 for (i = 0; i < CONFIG_DEBUG_SNAPSHOT_CALLSTACK; i++)
444 lookup_symbol_name((unsigned long)dss_log->spinlock[cpu][idx].caller[i],
445 callstack[i]);
446
447 next = dss_log->spinlock[cpu][idx].next;
448 owner = dss_log->spinlock[cpu][idx].owner;
449
450 *len = snprintf(line, size, "[%8lu.%09lu][%04d:CPU%u] next:%8x, owner:%8x jiffies:%12u, %3s\n"
451 "callstack: %s\n"
452 " %s\n"
453 " %s\n"
454 " %s\n",
455 (unsigned long)ts, rem_nsec / NSEC_PER_USEC, idx, cpu,
456 next, owner, jiffies_local,
457 en == DSS_FLAG_IN ? "IN" : "OUT",
458 callstack[0], callstack[1], callstack[2], callstack[3]);
459 break;
460 }
461#endif
462#ifdef CONFIG_DEBUG_SNAPSHOT_CLK
463 case DSS_FLAG_CLK:
464 {
465 const char *clk_name;
466 char clk_fn[KSYM_NAME_LEN];
467 struct clk_hw *clk;
468 int en;
469
470 array_size = ARRAY_SIZE(dss_log->clk) - 1;
471 if (!dumper->active) {
472 idx = (atomic_read(&dss_idx.clk_log_idx) + 1) & array_size;
473 dumper->init_idx = idx;
474 dumper->active = true;
475 }
476 ts = dss_log->clk[idx].time;
477 rem_nsec = do_div(ts, NSEC_PER_SEC);
478
479 clk = (struct clk_hw *)dss_log->clk[idx].clk;
480 clk_name = clk_hw_get_name(clk);
481 lookup_symbol_name((unsigned long)dss_log->clk[idx].f_name, clk_fn);
482 en = dss_log->clk[idx].mode;
483
484 *len = snprintf(line, size, "[%8lu.%09lu][%04d:CPU] clk_name:%30s, clk_fn:%30s, "
485 ", %s\n",
486 (unsigned long)ts, rem_nsec / NSEC_PER_USEC, idx,
487 clk_name, clk_fn, en == DSS_FLAG_IN ? "IN" : "OUT");
488 break;
489 }
490#endif
491#ifdef CONFIG_DEBUG_SNAPSHOT_FREQ
492 case DSS_FLAG_FREQ:
493 {
494 char *freq_name;
495 unsigned int on_cpu;
496 unsigned long old_freq, target_freq;
497 int en;
498
499 array_size = ARRAY_SIZE(dss_log->freq) - 1;
500 if (!dumper->active) {
501 idx = (atomic_read(&dss_idx.freq_log_idx) + 1) & array_size;
502 dumper->init_idx = idx;
503 dumper->active = true;
504 }
505 ts = dss_log->freq[idx].time;
506 rem_nsec = do_div(ts, NSEC_PER_SEC);
507
508 freq_name = dss_log->freq[idx].freq_name;
509 old_freq = dss_log->freq[idx].old_freq;
510 target_freq = dss_log->freq[idx].target_freq;
511 on_cpu = dss_log->freq[idx].cpu;
512 en = dss_log->freq[idx].en;
513
514 *len = snprintf(line, size, "[%8lu.%09lu][%04d:CPU%u] freq_name:%16s, "
515 "old_freq:%16lu, target_freq:%16lu, %3s\n",
516 (unsigned long)ts, rem_nsec / NSEC_PER_USEC, idx, on_cpu,
517 freq_name, old_freq, target_freq,
518 en == DSS_FLAG_IN ? "IN" : "OUT");
519 break;
520 }
521#endif
522#ifndef CONFIG_DEBUG_SNAPSHOT_MINIMIZED_MODE
523 case DSS_FLAG_PRINTK:
524 {
525 char *log;
526 char callstack[CONFIG_DEBUG_SNAPSHOT_CALLSTACK][KSYM_NAME_LEN];
527 unsigned int cpu;
528 int i;
529
530 array_size = ARRAY_SIZE(dss_log->printk) - 1;
531 if (!dumper->active) {
532 idx = (atomic_read(&dss_idx.printk_log_idx) + 1) & array_size;
533 dumper->init_idx = idx;
534 dumper->active = true;
535 }
536 ts = dss_log->printk[idx].time;
537 cpu = dss_log->printk[idx].cpu;
538 rem_nsec = do_div(ts, NSEC_PER_SEC);
539 log = dss_log->printk[idx].log;
540 for (i = 0; i < CONFIG_DEBUG_SNAPSHOT_CALLSTACK; i++)
541 lookup_symbol_name((unsigned long)dss_log->printk[idx].caller[i],
542 callstack[i]);
543
544 *len = snprintf(line, size, "[%8lu.%09lu][%04d:CPU%u] log:%s, callstack:%s, %s, %s, %s\n",
545 (unsigned long)ts, rem_nsec / NSEC_PER_USEC, idx, cpu,
546 log, callstack[0], callstack[1], callstack[2], callstack[3]);
547 break;
548 }
549 case DSS_FLAG_PRINTKL:
550 {
551 char callstack[CONFIG_DEBUG_SNAPSHOT_CALLSTACK][KSYM_NAME_LEN];
552 size_t msg, val;
553 unsigned int cpu;
554 int i;
555
556 array_size = ARRAY_SIZE(dss_log->printkl) - 1;
557 if (!dumper->active) {
558 idx = (atomic_read(&dss_idx.printkl_log_idx) + 1) & array_size;
559 dumper->init_idx = idx;
560 dumper->active = true;
561 }
562 ts = dss_log->printkl[idx].time;
563 cpu = dss_log->printkl[idx].cpu;
564 rem_nsec = do_div(ts, NSEC_PER_SEC);
565 msg = dss_log->printkl[idx].msg;
566 val = dss_log->printkl[idx].val;
567 for (i = 0; i < CONFIG_DEBUG_SNAPSHOT_CALLSTACK; i++)
568 lookup_symbol_name((unsigned long)dss_log->printkl[idx].caller[i],
569 callstack[i]);
570
571 *len = snprintf(line, size, "[%8lu.%09lu][%04d:CPU%u] msg:%zx, val:%zx, callstack: %s, %s, %s, %s\n",
572 (unsigned long)ts, rem_nsec / NSEC_PER_USEC, idx, cpu,
573 msg, val, callstack[0], callstack[1], callstack[2], callstack[3]);
574 break;
575 }
576#endif
577 default:
578 snprintf(line, size, "unsupported inforation to dump\n");
579 goto out;
580 }
581 if (array_size == idx)
582 dumper->cur_idx = 0;
583 else
584 dumper->cur_idx = idx + 1;
585
586 ret = true;
587out:
588 return ret;
589}
590
591#ifdef CONFIG_ARM64
592static inline unsigned long pure_arch_local_irq_save(void)
593{
594 unsigned long flags;
595
596 asm volatile(
597 "mrs %0, daif // arch_local_irq_save\n"
598 "msr daifset, #2"
599 : "=r" (flags)
600 :
601 : "memory");
602
603 return flags;
604}
605
606static inline void pure_arch_local_irq_restore(unsigned long flags)
607{
608 asm volatile(
609 "msr daif, %0 // arch_local_irq_restore"
610 :
611 : "r" (flags)
612 : "memory");
613}
614#else
615static inline unsigned long arch_local_irq_save(void)
616{
617 unsigned long flags;
618
619 asm volatile(
620 " mrs %0, cpsr @ arch_local_irq_save\n"
621 " cpsid i"
622 : "=r" (flags) : : "memory", "cc");
623 return flags;
624}
625
626static inline void arch_local_irq_restore(unsigned long flags)
627{
628 asm volatile(
629 " msr cpsr_c, %0 @ local_irq_restore"
630 :
631 : "r" (flags)
632 : "memory", "cc");
633}
634#endif
635
636void dbg_snapshot_task(int cpu, void *v_task)
637{
638 struct dbg_snapshot_item *item = &dss_items[dss_desc.kevents_num];
639
640 if (unlikely(!dss_base.enabled || !item->entry.enabled))
641 return;
642 {
643 unsigned long i = atomic_inc_return(&dss_idx.task_log_idx[cpu]) &
644 (ARRAY_SIZE(dss_log->task[0]) - 1);
645
646 dss_log->task[cpu][i].time = cpu_clock(cpu);
ddad4f4d 647 dss_log->task[cpu][i].sp = (unsigned long)current_stack_pointer;
dd101ca5 648 dss_log->task[cpu][i].task = (struct task_struct *)v_task;
bf2bb2c5 649 dss_log->task[cpu][i].pid = (int)((struct task_struct *)v_task)->pid;
dd101ca5
DC
650 strncpy(dss_log->task[cpu][i].task_comm,
651 dss_log->task[cpu][i].task->comm,
652 TASK_COMM_LEN - 1);
653 }
654}
655
656void dbg_snapshot_work(void *worker, void *v_task, void *fn, int en)
657{
658 struct dbg_snapshot_item *item = &dss_items[dss_desc.kevents_num];
659
660 if (unlikely(!dss_base.enabled || !item->entry.enabled))
661 return;
662
663 {
664 int cpu = raw_smp_processor_id();
665 unsigned long i = atomic_inc_return(&dss_idx.work_log_idx[cpu]) &
666 (ARRAY_SIZE(dss_log->work[0]) - 1);
667 struct task_struct *task = (struct task_struct *)v_task;
668 dss_log->work[cpu][i].time = cpu_clock(cpu);
669 dss_log->work[cpu][i].sp = (unsigned long) current_stack_pointer;
670 dss_log->work[cpu][i].worker = (struct worker *)worker;
671 strncpy(dss_log->work[cpu][i].task_comm, task->comm, TASK_COMM_LEN - 1);
672 dss_log->work[cpu][i].fn = (work_func_t)fn;
673 dss_log->work[cpu][i].en = en;
674 }
675}
676
677void dbg_snapshot_cpuidle(char *modes, unsigned state, int diff, int en)
678{
679 struct dbg_snapshot_item *item = &dss_items[dss_desc.kevents_num];
680
681 if (unlikely(!dss_base.enabled || !item->entry.enabled))
682 return;
683 {
684 int cpu = raw_smp_processor_id();
685 unsigned long i = atomic_inc_return(&dss_idx.cpuidle_log_idx[cpu]) &
686 (ARRAY_SIZE(dss_log->cpuidle[0]) - 1);
687
688 dss_log->cpuidle[cpu][i].time = cpu_clock(cpu);
689 dss_log->cpuidle[cpu][i].modes = modes;
690 dss_log->cpuidle[cpu][i].state = state;
691 dss_log->cpuidle[cpu][i].sp = (unsigned long) current_stack_pointer;
692 dss_log->cpuidle[cpu][i].num_online_cpus = num_online_cpus();
693 dss_log->cpuidle[cpu][i].delta = diff;
694 dss_log->cpuidle[cpu][i].en = en;
695 }
696}
697
05db6ce8 698void dbg_snapshot_suspend(char *log, void *fn, void *dev, int state, int en)
dd101ca5
DC
699{
700 struct dbg_snapshot_item *item = &dss_items[dss_desc.kevents_num];
701
702 if (unlikely(!dss_base.enabled || !item->entry.enabled))
703 return;
704 {
05db6ce8 705 int len;
dd101ca5
DC
706 int cpu = raw_smp_processor_id();
707 unsigned long i = atomic_inc_return(&dss_idx.suspend_log_idx) &
708 (ARRAY_SIZE(dss_log->suspend) - 1);
709
710 dss_log->suspend[i].time = cpu_clock(cpu);
711 dss_log->suspend[i].sp = (unsigned long) current_stack_pointer;
05db6ce8
HK
712
713 if (log) {
714 len = strlen(log);
715 memcpy(dss_log->suspend[i].log, log,
716 len < DSS_LOG_GEN_LEN ?
717 len : DSS_LOG_GEN_LEN - 1);
718 } else {
719 memset(dss_log->suspend[i].log, 0, DSS_LOG_GEN_LEN - 1);
720 }
721
dd101ca5
DC
722 dss_log->suspend[i].fn = fn;
723 dss_log->suspend[i].dev = (struct device *)dev;
724 dss_log->suspend[i].core = cpu;
725 dss_log->suspend[i].en = en;
726 }
727}
728
729static void dbg_snapshot_print_calltrace(void)
730{
731 int i;
732
733 pr_info("\n<Call trace>\n");
734 for (i = 0; i < DSS_NR_CPUS; i++) {
735 pr_info("CPU ID: %d -----------------------------------------------\n", i);
736 pr_info("%s", dss_lastinfo.log[i]);
737 }
738}
739
740void dbg_snapshot_save_log(int cpu, unsigned long where)
741{
742 if (dss_lastinfo.last_p[cpu] == NULL)
743 dss_lastinfo.last_p[cpu] = &dss_lastinfo.log[cpu][0];
744
745 if (dss_lastinfo.last_p[cpu] > &dss_lastinfo.log[cpu][SZ_1K - SZ_128])
746 return;
747
748 *(unsigned long *)&(dss_lastinfo.last_p[cpu]) += sprintf(dss_lastinfo.last_p[cpu],
749 "[<%p>] %pS\n", (void *)where, (void *)where);
750
751}
752
753static void dbg_snapshot_get_sec(unsigned long long ts, unsigned long *sec, unsigned long *msec)
754{
755 *sec = ts / NSEC_PER_SEC;
756 *msec = (ts % NSEC_PER_SEC) / USEC_PER_MSEC;
757}
758
759static void dbg_snapshot_print_last_irq(int cpu)
760{
761 unsigned long idx, sec, msec;
762 char fn_name[KSYM_NAME_LEN];
763
764 idx = atomic_read(&dss_idx.irq_log_idx[cpu]) & (ARRAY_SIZE(dss_log->irq[0]) - 1);
765 dbg_snapshot_get_sec(dss_log->irq[cpu][idx].time, &sec, &msec);
766 lookup_symbol_name((unsigned long)dss_log->irq[cpu][idx].fn, fn_name);
767
768 pr_info("%-16s: [%4lu] %10lu.%06lu sec, %10s: %24s, %8s: %8d, %10s: %2d, %s\n",
769 ">>> last irq", idx, sec, msec,
770 "handler", fn_name,
771 "irq", dss_log->irq[cpu][idx].irq,
772 "en", dss_log->irq[cpu][idx].en,
773 (dss_log->irq[cpu][idx].en == 1) ? "[Missmatch]" : "");
774}
775
776static void dbg_snapshot_print_last_task(int cpu)
777{
778 unsigned long idx, sec, msec;
779 struct task_struct *task;
780
781 idx = atomic_read(&dss_idx.task_log_idx[cpu]) & (ARRAY_SIZE(dss_log->task[0]) - 1);
782 dbg_snapshot_get_sec(dss_log->task[cpu][idx].time, &sec, &msec);
783 task = dss_log->task[cpu][idx].task;
784
785 pr_info("%-16s: [%4lu] %10lu.%06lu sec, %10s: %24s, %8s: 0x%-16p, %10s: %16llu\n",
786 ">>> last task", idx, sec, msec,
787 "task_comm", (task) ? task->comm : "NULL",
788 "task", task,
789 "exec_start", (task) ? task->se.exec_start : 0);
790}
791
792static void dbg_snapshot_print_last_work(int cpu)
793{
794 unsigned long idx, sec, msec;
795 char fn_name[KSYM_NAME_LEN];
796
797 idx = atomic_read(&dss_idx.work_log_idx[cpu]) & (ARRAY_SIZE(dss_log->work[0]) - 1);
798 dbg_snapshot_get_sec(dss_log->work[cpu][idx].time, &sec, &msec);
799 lookup_symbol_name((unsigned long)dss_log->work[cpu][idx].fn, fn_name);
800
801 pr_info("%-16s: [%4lu] %10lu.%06lu sec, %10s: %24s, %8s: %20s, %3s: %3d %s\n",
802 ">>> last work", idx, sec, msec,
803 "task_name", dss_log->work[cpu][idx].task_comm,
804 "work_fn", fn_name,
805 "en", dss_log->work[cpu][idx].en,
806 (dss_log->work[cpu][idx].en == 1) ? "[Missmatch]" : "");
807}
808
809static void dbg_snapshot_print_last_cpuidle(int cpu)
810{
811 unsigned long idx, sec, msec;
812
813 idx = atomic_read(&dss_idx.cpuidle_log_idx[cpu]) & (ARRAY_SIZE(dss_log->cpuidle[0]) - 1);
814 dbg_snapshot_get_sec(dss_log->cpuidle[cpu][idx].time, &sec, &msec);
815
816 pr_info("%-16s: [%4lu] %10lu.%06lu sec, %10s: %24d, %8s: %4s, %6s: %3d, %12s: %2d, %3s: %3d %s\n",
817 ">>> last cpuidle", idx, sec, msec,
818 "stay time", dss_log->cpuidle[cpu][idx].delta,
819 "modes", dss_log->cpuidle[cpu][idx].modes,
820 "state", dss_log->cpuidle[cpu][idx].state,
821 "online_cpus", dss_log->cpuidle[cpu][idx].num_online_cpus,
822 "en", dss_log->cpuidle[cpu][idx].en,
823 (dss_log->cpuidle[cpu][idx].en == 1) ? "[Missmatch]" : "");
824}
825
826static void dbg_snapshot_print_lastinfo(void)
827{
828 int cpu;
829
830 pr_info("<last info>\n");
831 for (cpu = 0; cpu < DSS_NR_CPUS; cpu++) {
832 pr_info("CPU ID: %d -----------------------------------------------\n", cpu);
833 dbg_snapshot_print_last_task(cpu);
834 dbg_snapshot_print_last_work(cpu);
835 dbg_snapshot_print_last_irq(cpu);
836 dbg_snapshot_print_last_cpuidle(cpu);
837 }
838}
839
840#ifdef CONFIG_DEBUG_SNAPSHOT_REGULATOR
841void dbg_snapshot_regulator(unsigned long long timestamp, char* f_name, unsigned int addr, unsigned int volt, unsigned int rvolt, int en)
842{
843 struct dbg_snapshot_item *item = &dss_items[dss_desc.kevents_num];
844
845 if (unlikely(!dss_base.enabled || !item->entry.enabled))
846 return;
847 {
848 int cpu = raw_smp_processor_id();
849 unsigned long i = atomic_inc_return(&dss_idx.regulator_log_idx) &
850 (ARRAY_SIZE(dss_log->regulator) - 1);
851 int size = strlen(f_name);
852 if (size >= SZ_16)
853 size = SZ_16 - 1;
854 dss_log->regulator[i].time = cpu_clock(cpu);
855 dss_log->regulator[i].cpu = cpu;
856 dss_log->regulator[i].acpm_time = timestamp;
857 strncpy(dss_log->regulator[i].name, f_name, size);
858 dss_log->regulator[i].reg = addr;
859 dss_log->regulator[i].en = en;
860 dss_log->regulator[i].voltage = volt;
861 dss_log->regulator[i].raw_volt = rvolt;
862 }
863}
864#endif
865
866#ifdef CONFIG_DEBUG_SNAPSHOT_THERMAL
867void dbg_snapshot_thermal(void *data, unsigned int temp, char *name, unsigned int max_cooling)
868{
869 struct dbg_snapshot_item *item = &dss_items[dss_desc.kevents_num];
870
871 if (unlikely(!dss_base.enabled || !item->entry.enabled))
872 return;
873 {
874 int cpu = raw_smp_processor_id();
875 unsigned long i = atomic_inc_return(&dss_idx.thermal_log_idx) &
876 (ARRAY_SIZE(dss_log->thermal) - 1);
877
878 dss_log->thermal[i].time = cpu_clock(cpu);
879 dss_log->thermal[i].cpu = cpu;
880 dss_log->thermal[i].data = (struct exynos_tmu_platform_data *)data;
881 dss_log->thermal[i].temp = temp;
882 dss_log->thermal[i].cooling_device = name;
883 dss_log->thermal[i].cooling_state = max_cooling;
884 }
885}
886#endif
887
ddad4f4d 888void dbg_snapshot_irq(int irq, void *fn, void *val, int en)
dd101ca5
DC
889{
890 struct dbg_snapshot_item *item = &dss_items[dss_desc.kevents_num];
891 unsigned long flags;
892
893 if (unlikely(!dss_base.enabled || !item->entry.enabled))
894 return;
895
896 flags = pure_arch_local_irq_save();
897 {
898 int cpu = raw_smp_processor_id();
899 unsigned long i;
900
901 for (i = 0; i < ARRAY_SIZE(dss_irqlog_exlist); i++) {
902 if (irq == dss_irqlog_exlist[i]) {
903 pure_arch_local_irq_restore(flags);
904 return;
905 }
906 }
907 i = atomic_inc_return(&dss_idx.irq_log_idx[cpu]) &
908 (ARRAY_SIZE(dss_log->irq[0]) - 1);
909
910 dss_log->irq[cpu][i].time = cpu_clock(cpu);
911 dss_log->irq[cpu][i].sp = (unsigned long) current_stack_pointer;
912 dss_log->irq[cpu][i].irq = irq;
913 dss_log->irq[cpu][i].fn = (void *)fn;
ddad4f4d 914 dss_log->irq[cpu][i].action = (struct irqaction *)val;
dd101ca5
DC
915 dss_log->irq[cpu][i].en = en;
916 }
917 pure_arch_local_irq_restore(flags);
918}
919
920#ifdef CONFIG_DEBUG_SNAPSHOT_IRQ_EXIT
921void dbg_snapshot_irq_exit(unsigned int irq, unsigned long long start_time)
922{
923 struct dbg_snapshot_item *item = &dss_items[dss_desc.kevents_num];
924 unsigned long i;
925
926 if (unlikely(!dss_base.enabled || !item->entry.enabled))
927 return;
928
929 for (i = 0; i < ARRAY_SIZE(dss_irqexit_exlist); i++)
930 if (irq == dss_irqexit_exlist[i])
931 return;
932 {
933 int cpu = raw_smp_processor_id();
934 unsigned long long time, latency;
935
936 i = atomic_inc_return(&dss_idx.irq_exit_log_idx[cpu]) &
937 (ARRAY_SIZE(dss_log->irq_exit[0]) - 1);
938
939 time = cpu_clock(cpu);
940 latency = time - start_time;
941
942 if (unlikely(latency >
943 (dss_irqexit_threshold * 1000))) {
944 dss_log->irq_exit[cpu][i].latency = latency;
945 dss_log->irq_exit[cpu][i].sp = (unsigned long) current_stack_pointer;
946 dss_log->irq_exit[cpu][i].end_time = time;
947 dss_log->irq_exit[cpu][i].time = start_time;
948 dss_log->irq_exit[cpu][i].irq = irq;
949 } else
950 atomic_dec(&dss_idx.irq_exit_log_idx[cpu]);
951 }
952}
953#endif
954
955#ifdef CONFIG_DEBUG_SNAPSHOT_SPINLOCK
956void dbg_snapshot_spinlock(void *v_lock, int en)
957{
958 struct dbg_snapshot_item *item = &dss_items[dss_desc.kevents_num];
959
960 if (unlikely(!dss_base.enabled || !item->entry.enabled))
961 return;
962 {
963 int cpu = raw_smp_processor_id();
964 unsigned index = atomic_inc_return(&dss_idx.spinlock_log_idx[cpu]);
965 unsigned long j, i = index & (ARRAY_SIZE(dss_log->spinlock[0]) - 1);
966 raw_spinlock_t *lock = (raw_spinlock_t *)v_lock;
967#ifdef CONFIG_ARM_ARCH_TIMER
968 dss_log->spinlock[cpu][i].time = cpu_clock(cpu);
969#else
970 dss_log->spinlock[cpu][i].time = index;
971#endif
972 dss_log->spinlock[cpu][i].sp = (unsigned long) current_stack_pointer;
973 dss_log->spinlock[cpu][i].jiffies = jiffies_64;
974#ifdef CONFIG_DEBUG_SPINLOCK
975 dss_log->spinlock[cpu][i].lock = lock;
976 dss_log->spinlock[cpu][i].next = lock->raw_lock.next;
977 dss_log->spinlock[cpu][i].owner = lock->raw_lock.owner;
978#endif
979 dss_log->spinlock[cpu][i].en = en;
980
981 for (j = 0; j < dss_desc.callstack; j++) {
982 dss_log->spinlock[cpu][i].caller[j] =
983 (void *)((size_t)return_address(j + 1));
984 }
985 }
986}
987#endif
988
989#ifdef CONFIG_DEBUG_SNAPSHOT_IRQ_DISABLED
990void dbg_snapshot_irqs_disabled(unsigned long flags)
991{
992 struct dbg_snapshot_item *item = &dss_items[dss_desc.kevents_num];
993 int cpu = raw_smp_processor_id();
994
995 if (unlikely(!dss_base.enabled || !item->entry.enabled))
996 return;
997
998 if (unlikely(flags)) {
999 unsigned j, local_flags = pure_arch_local_irq_save();
1000
1001 /* If flags has one, it shows interrupt enable status */
1002 atomic_set(&dss_idx.irqs_disabled_log_idx[cpu], -1);
1003 dss_log->irqs_disabled[cpu][0].time = 0;
1004 dss_log->irqs_disabled[cpu][0].index = 0;
1005 dss_log->irqs_disabled[cpu][0].task = NULL;
1006 dss_log->irqs_disabled[cpu][0].task_comm = NULL;
1007
1008 for (j = 0; j < dss_desc.callstack; j++) {
1009 dss_log->irqs_disabled[cpu][0].caller[j] = NULL;
1010 }
1011
1012 pure_arch_local_irq_restore(local_flags);
1013 } else {
1014 unsigned index = atomic_inc_return(&dss_idx.irqs_disabled_log_idx[cpu]);
1015 unsigned long j, i = index % ARRAY_SIZE(dss_log->irqs_disabled[0]);
1016
1017 dss_log->irqs_disabled[cpu][0].time = jiffies_64;
1018 dss_log->irqs_disabled[cpu][i].index = index;
1019 dss_log->irqs_disabled[cpu][i].task = get_current();
1020 dss_log->irqs_disabled[cpu][i].task_comm = get_current()->comm;
1021
1022 for (j = 0; j < dss_desc.callstack; j++) {
1023 dss_log->irqs_disabled[cpu][i].caller[j] =
1024 (void *)((size_t)return_address(j + 1));
1025 }
1026 }
1027}
1028#endif
1029
1030#ifdef CONFIG_DEBUG_SNAPSHOT_CLK
1031void dbg_snapshot_clk(void *clock, const char *func_name, unsigned long arg, int mode)
1032{
1033 struct dbg_snapshot_item *item = &dss_items[dss_desc.kevents_num];
1034
1035 if (unlikely(!dss_base.enabled || !item->entry.enabled))
1036 return;
1037 {
1038 int cpu = raw_smp_processor_id();
1039 unsigned long i = atomic_inc_return(&dss_idx.clk_log_idx) &
1040 (ARRAY_SIZE(dss_log->clk) - 1);
1041
1042 dss_log->clk[i].time = cpu_clock(cpu);
1043 dss_log->clk[i].mode = mode;
1044 dss_log->clk[i].arg = arg;
1045 dss_log->clk[i].clk = (struct clk_hw *)clock;
1046 dss_log->clk[i].f_name = func_name;
1047 }
1048}
1049#endif
1050
1051#ifdef CONFIG_DEBUG_SNAPSHOT_PMU
1052void dbg_snapshot_pmu(int id, const char *func_name, int mode)
1053{
1054 struct dbg_snapshot_item *item = &dss_items[dss_desc.kevents_num];
1055
1056 if (unlikely(!dss_base.enabled || !item->entry.enabled))
1057 return;
1058 {
1059 int cpu = raw_smp_processor_id();
1060 unsigned long i = atomic_inc_return(&dss_idx.pmu_log_idx) &
1061 (ARRAY_SIZE(dss_log->pmu) - 1);
1062
1063 dss_log->pmu[i].time = cpu_clock(cpu);
1064 dss_log->pmu[i].mode = mode;
1065 dss_log->pmu[i].id = id;
1066 dss_log->pmu[i].f_name = func_name;
1067 }
1068}
1069#endif
1070
69152827
HK
1071static struct notifier_block **dss_should_check_nl[] = {
1072 (struct notifier_block **)(&panic_notifier_list.head),
1073 (struct notifier_block **)(&reboot_notifier_list.head),
1074 (struct notifier_block **)(&restart_handler_list.head),
05db6ce8
HK
1075#ifdef CONFIG_PM_SLEEP
1076 (struct notifier_block **)(&pm_chain_head.head),
1077#endif
69152827
HK
1078#ifdef CONFIG_EXYNOS_ITMON
1079 (struct notifier_block **)(&itmon_notifier_list.head),
1080#endif
1081};
1082
1083void dbg_snapshot_print_notifier_call(void **nl, unsigned long func, int en)
1084{
1085 struct notifier_block **nl_org = (struct notifier_block **)nl;
1086 char notifier_name[KSYM_NAME_LEN];
1087 char notifier_func_name[KSYM_NAME_LEN];
1088 int i;
1089
1090 for (i = 0; i < ARRAY_SIZE(dss_should_check_nl); i++) {
1091 if (nl_org == dss_should_check_nl[i]) {
1092 lookup_symbol_name((unsigned long)nl_org, notifier_name);
1093 lookup_symbol_name((unsigned long)func, notifier_func_name);
1094
1095 pr_info("debug-snapshot: %s -> %s call %s\n",
1096 notifier_name,
1097 notifier_func_name,
1098 en == DSS_FLAG_IN ? "+" : "-");
1099 break;
1100 }
1101 }
1102}
1103
dd101ca5
DC
1104#ifdef CONFIG_DEBUG_SNAPSHOT_FREQ
1105static void dbg_snapshot_print_freqinfo(void)
1106{
1107 unsigned long idx, sec, msec;
1108 char *freq_name;
1109 unsigned int i;
1110 unsigned long old_freq, target_freq;
1111
1112 pr_info("\n<freq info>\n");
1113
1114 for (i = 0; i < DSS_FLAG_END; i++) {
1115 idx = atomic_read(&dss_lastinfo.freq_last_idx[i]) & (ARRAY_SIZE(dss_log->freq) - 1);
1116 freq_name = dss_log->freq[idx].freq_name;
1117 if ((!freq_name) || strncmp(freq_name, dss_freq_name[i], strlen(dss_freq_name[i]))) {
1118 pr_info("%10s: no infomation\n", dss_freq_name[i]);
1119 continue;
1120 }
1121
1122 dbg_snapshot_get_sec(dss_log->freq[idx].time, &sec, &msec);
1123 old_freq = dss_log->freq[idx].old_freq;
1124 target_freq = dss_log->freq[idx].target_freq;
1125 pr_info("%10s: [%4lu] %10lu.%06lu sec, %12s: %6luMhz, %12s: %6luMhz, %3s: %3d %s\n",
1126 freq_name, idx, sec, msec,
1127 "old_freq", old_freq/1000,
1128 "target_freq", target_freq/1000,
1129 "en", dss_log->freq[idx].en,
1130 (dss_log->freq[idx].en == 1) ? "[Missmatch]" : "");
1131 }
1132}
1133
1134void dbg_snapshot_freq(int type, unsigned long old_freq, unsigned long target_freq, int en)
1135{
1136 struct dbg_snapshot_item *item = &dss_items[dss_desc.kevents_num];
1137
1138 if (unlikely(!dss_base.enabled || !item->entry.enabled))
1139 return;
1140 {
1141 int cpu = raw_smp_processor_id();
1142 unsigned long i = atomic_inc_return(&dss_idx.freq_log_idx) &
1143 (ARRAY_SIZE(dss_log->freq) - 1);
1144
1145 if (atomic_read(&dss_idx.freq_log_idx) > atomic_read(&dss_lastinfo.freq_last_idx[type]))
1146 atomic_set(&dss_lastinfo.freq_last_idx[type], atomic_read(&dss_idx.freq_log_idx));
1147
1148 dss_log->freq[i].time = cpu_clock(cpu);
1149 dss_log->freq[i].cpu = cpu;
1150 dss_log->freq[i].freq_name = dss_freq_name[type];
bf2bb2c5 1151 dss_log->freq[i].type = type;
dd101ca5
DC
1152 dss_log->freq[i].old_freq = old_freq;
1153 dss_log->freq[i].target_freq = target_freq;
1154 dss_log->freq[i].en = en;
1155 }
1156}
1157#endif
1158
1159#ifndef arch_irq_stat
1160#define arch_irq_stat() 0
1161#endif
1162
1163static void dbg_snapshot_print_irq(void)
1164{
1165 int i, j;
1166 u64 sum = 0;
1167
1168 for_each_possible_cpu(i) {
1169 sum += kstat_cpu_irqs_sum(i);
1170 sum += arch_irq_stat_cpu(i);
1171 }
1172 sum += arch_irq_stat();
1173
1174 pr_info("\n<irq info>\n");
1175 pr_info("------------------------------------------------------------------\n");
1176 pr_info("\n");
1177 pr_info("sum irq : %llu", (unsigned long long)sum);
1178 pr_info("------------------------------------------------------------------\n");
1179
1180 for_each_irq_nr(j) {
1181 unsigned int irq_stat = kstat_irqs(j);
1182
1183 if (irq_stat) {
1184 struct irq_desc *desc = irq_to_desc(j);
1185 const char *name;
1186
1187 name = desc->action ? (desc->action->name ? desc->action->name : "???") : "???";
1188 pr_info("irq-%-4d : %8u %s\n", j, irq_stat, name);
1189 }
1190 }
1191}
1192
1193void dbg_snapshot_print_panic_report(void)
1194{
1195 pr_info("============================================================\n");
1196 pr_info("Panic Report\n");
1197 pr_info("============================================================\n");
1198 dbg_snapshot_print_lastinfo();
1199#ifdef CONFIG_DEBUG_SNAPSHOT_FREQ
1200 dbg_snapshot_print_freqinfo();
1201#endif
1202 dbg_snapshot_print_calltrace();
1203 dbg_snapshot_print_irq();
1204 pr_info("============================================================\n");
1205}
1206
1207#ifdef CONFIG_DEBUG_SNAPSHOT_DM
1208void dbg_snapshot_dm(int type, unsigned long min, unsigned long max, s32 wait_t, s32 t)
1209{
1210 struct dbg_snapshot_item *item = &dss_items[dss_desc.kevents_num];
1211
1212 if (unlikely(!dss_base.enabled || !item->entry.enabled))
1213 return;
1214 {
1215 int cpu = raw_smp_processor_id();
1216 unsigned long i = atomic_inc_return(&dss_idx.dm_log_idx) &
1217 (ARRAY_SIZE(dss_log->dm) - 1);
1218
1219 dss_log->dm[i].time = cpu_clock(cpu);
1220 dss_log->dm[i].cpu = cpu;
1221 dss_log->dm[i].dm_num = type;
1222 dss_log->dm[i].min_freq = min;
1223 dss_log->dm[i].max_freq = max;
1224 dss_log->dm[i].wait_dmt = wait_t;
1225 dss_log->dm[i].do_dmt = t;
1226 }
1227}
1228#endif
1229
1230#ifdef CONFIG_DEBUG_SNAPSHOT_HRTIMER
1231void dbg_snapshot_hrtimer(void *timer, s64 *now, void *fn, int en)
1232{
1233 struct dbg_snapshot_item *item = &dss_items[dss_desc.kevents_num];
1234
1235 if (unlikely(!dss_base.enabled || !item->entry.enabled))
1236 return;
1237 {
1238 int cpu = raw_smp_processor_id();
1239 unsigned long i = atomic_inc_return(&dss_idx.hrtimer_log_idx[cpu]) &
1240 (ARRAY_SIZE(dss_log->hrtimers[0]) - 1);
1241
1242 dss_log->hrtimers[cpu][i].time = cpu_clock(cpu);
1243 dss_log->hrtimers[cpu][i].now = *now;
1244 dss_log->hrtimers[cpu][i].timer = (struct hrtimer *)timer;
1245 dss_log->hrtimers[cpu][i].fn = fn;
1246 dss_log->hrtimers[cpu][i].en = en;
1247 }
1248}
1249#endif
1250
1251#ifdef CONFIG_DEBUG_SNAPSHOT_I2C
1252void dbg_snapshot_i2c(struct i2c_adapter *adap, struct i2c_msg *msgs, int num, int en)
1253{
1254 struct dbg_snapshot_item *item = &dss_items[dss_desc.kevents_num];
1255
1256 if (unlikely(!dss_base.enabled || !item->entry.enabled))
1257 return;
1258 {
1259 int cpu = raw_smp_processor_id();
1260 unsigned long i = atomic_inc_return(&dss_idx.i2c_log_idx) &
1261 (ARRAY_SIZE(dss_log->i2c) - 1);
1262
1263 dss_log->i2c[i].time = cpu_clock(cpu);
1264 dss_log->i2c[i].cpu = cpu;
1265 dss_log->i2c[i].adap = adap;
1266 dss_log->i2c[i].msgs = msgs;
1267 dss_log->i2c[i].num = num;
1268 dss_log->i2c[i].en = en;
1269 }
1270}
1271#endif
1272
1273#ifdef CONFIG_DEBUG_SNAPSHOT_SPI
1274void dbg_snapshot_spi(struct spi_controller *ctlr, struct spi_message *cur_msg, int en)
1275{
1276 struct dbg_snapshot_item *item = &dss_items[dss_desc.kevents_num];
1277
1278 if (unlikely(!dss_base.enabled || !item->entry.enabled))
1279 return;
1280 {
1281 int cpu = raw_smp_processor_id();
1282 unsigned long i = atomic_inc_return(&dss_idx.spi_log_idx) &
1283 (ARRAY_SIZE(dss_log->spi) - 1);
1284
1285 dss_log->spi[i].time = cpu_clock(cpu);
1286 dss_log->spi[i].cpu = cpu;
1287 dss_log->spi[i].ctlr = ctlr;
1288 dss_log->spi[i].cur_msg = cur_msg;
1289 dss_log->spi[i].en = en;
1290 }
1291}
1292#endif
1293
ae09f559
YK
1294#ifdef CONFIG_DEBUG_SNAPSHOT_BINDER
1295void dbg_snapshot_binder(struct trace_binder_transaction_base *base,
1296 struct trace_binder_transaction *transaction,
1297 struct trace_binder_transaction_error *error)
1298{
1299 struct dbg_snapshot_item *item = &dss_items[dss_desc.kevents_num];
1300 int cpu;
1301 unsigned long i;
1302
1303 if (unlikely(!dss_base.enabled || !item->entry.enabled))
1304 return;
1305 if (base == NULL)
1306 return;
1307
1308 cpu = raw_smp_processor_id();
1309 i = atomic_inc_return(&dss_idx.binder_log_idx) &
1310 (ARRAY_SIZE(dss_log->binder) - 1);
1311
1312 dss_log->binder[i].time = cpu_clock(cpu);
1313 dss_log->binder[i].cpu = cpu;
1314 dss_log->binder[i].base = *base;
1315
1316 if (transaction) {
1317 dss_log->binder[i].transaction = *transaction;
1318 } else {
1319 dss_log->binder[i].transaction.to_node_id = 0;
1320 dss_log->binder[i].transaction.reply = 0;
1321 dss_log->binder[i].transaction.flags = 0;
1322 dss_log->binder[i].transaction.code = 0;
1323 }
1324 if (error) {
1325 dss_log->binder[i].error = *error;
1326 } else {
1327 dss_log->binder[i].error.return_error = 0;
1328 dss_log->binder[i].error.return_error_param = 0;
1329 dss_log->binder[i].error.return_error_line = 0;
1330 }
1331}
1332#endif
1333
dd101ca5
DC
1334#ifdef CONFIG_DEBUG_SNAPSHOT_ACPM
1335void dbg_snapshot_acpm(unsigned long long timestamp, const char *log, unsigned int data)
1336{
1337 struct dbg_snapshot_item *item = &dss_items[dss_desc.kevents_num];
1338
1339 if (unlikely(!dss_base.enabled || !item->entry.enabled))
1340 return;
1341 {
1342 int cpu = raw_smp_processor_id();
1343 unsigned long i = atomic_inc_return(&dss_idx.acpm_log_idx) &
1344 (ARRAY_SIZE(dss_log->acpm) - 1);
1345 int len = strlen(log);
1346
103360ff
YK
1347 if (len >= 8)
1348 len = 8;
dd101ca5
DC
1349
1350 dss_log->acpm[i].time = cpu_clock(cpu);
1351 dss_log->acpm[i].acpm_time = timestamp;
1352 strncpy(dss_log->acpm[i].log, log, len);
103360ff 1353 dss_log->acpm[i].log[len] = '\0';
dd101ca5
DC
1354 dss_log->acpm[i].data = data;
1355 }
1356}
1357#endif
1358
1359#ifdef CONFIG_DEBUG_SNAPSHOT_REG
1360static phys_addr_t virt_to_phys_high(size_t vaddr)
1361{
1362 phys_addr_t paddr = 0;
1363 pgd_t *pgd;
1364 pmd_t *pmd;
1365 pte_t *pte;
1366
1367 if (virt_addr_valid((void *) vaddr)) {
1368 paddr = virt_to_phys((void *) vaddr);
1369 goto out;
1370 }
1371
1372 pgd = pgd_offset_k(vaddr);
1373 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
1374 goto out;
1375
1376 if (pgd_val(*pgd) & 2) {
1377 paddr = pgd_val(*pgd) & SECTION_MASK;
1378 goto out;
1379 }
1380
1381 pmd = pmd_offset((pud_t *)pgd, vaddr);
1382 if (pmd_none_or_clear_bad(pmd))
1383 goto out;
1384
1385 pte = pte_offset_kernel(pmd, vaddr);
1386 if (pte_none(*pte))
1387 goto out;
1388
1389 paddr = pte_val(*pte) & PAGE_MASK;
1390
1391out:
1392 return paddr | (vaddr & UL(SZ_4K - 1));
1393}
1394
1395void dbg_snapshot_reg(unsigned int read, size_t val, size_t reg, int en)
1396{
1397 struct dbg_snapshot_item *item = &dss_items[dss_desc.kevents_num];
1398 int cpu = raw_smp_processor_id();
1399 unsigned long i, j;
1400 size_t phys_reg, start_addr, end_addr;
1401
1402 if (unlikely(!dss_base.enabled || !item->entry.enabled))
1403 return;
1404
1405 if (dss_reg_exlist[0].addr == 0)
1406 return;
1407
1408 phys_reg = virt_to_phys_high(reg);
1409 if (unlikely(!phys_reg))
1410 return;
1411
1412 for (j = 0; j < ARRAY_SIZE(dss_reg_exlist); j++) {
1413 if (dss_reg_exlist[j].addr == 0)
1414 break;
1415 start_addr = dss_reg_exlist[j].addr;
1416 end_addr = start_addr + dss_reg_exlist[j].size;
1417 if (start_addr <= phys_reg && phys_reg <= end_addr)
1418 return;
1419 }
1420
1421 i = atomic_inc_return(&dss_idx.reg_log_idx[cpu]) &
1422 (ARRAY_SIZE(dss_log->reg[0]) - 1);
1423
1424 dss_log->reg[cpu][i].time = cpu_clock(cpu);
1425 dss_log->reg[cpu][i].read = read;
1426 dss_log->reg[cpu][i].val = val;
1427 dss_log->reg[cpu][i].reg = phys_reg;
1428 dss_log->reg[cpu][i].en = en;
1429
1430 for (j = 0; j < dss_desc.callstack; j++) {
1431 dss_log->reg[cpu][i].caller[j] =
1432 (void *)((size_t)return_address(j + 1));
1433 }
1434}
1435#endif
1436
1437#ifndef CONFIG_DEBUG_SNAPSHOT_MINIMIZED_MODE
1438void dbg_snapshot_clockevent(unsigned long long clc, int64_t delta, void *next_event)
1439{
1440 struct dbg_snapshot_item *item = &dss_items[dss_desc.kevents_num];
1441
1442 if (unlikely(!dss_base.enabled || !item->entry.enabled))
1443 return;
1444 {
1445 int cpu = raw_smp_processor_id();
1446 unsigned long j, i = atomic_inc_return(&dss_idx.clockevent_log_idx[cpu]) &
1447 (ARRAY_SIZE(dss_log->clockevent[0]) - 1);
1448
1449 dss_log->clockevent[cpu][i].time = cpu_clock(cpu);
1450 dss_log->clockevent[cpu][i].mct_cycle = clc;
1451 dss_log->clockevent[cpu][i].delta_ns = delta;
1452 dss_log->clockevent[cpu][i].next_event = *((ktime_t *)next_event);
1453
1454 for (j = 0; j < dss_desc.callstack; j++) {
1455 dss_log->clockevent[cpu][i].caller[j] =
1456 (void *)((size_t)return_address(j + 1));
1457 }
1458 }
1459}
1460
1461void dbg_snapshot_printk(const char *fmt, ...)
1462{
1463 struct dbg_snapshot_item *item = &dss_items[dss_desc.kevents_num];
1464
1465 if (unlikely(!dss_base.enabled || !item->entry.enabled))
1466 return;
1467 {
1468 int cpu = raw_smp_processor_id();
1469 va_list args;
1470 int ret;
1471 unsigned long j, i = atomic_inc_return(&dss_idx.printk_log_idx) &
1472 (ARRAY_SIZE(dss_log->printk) - 1);
1473
1474 va_start(args, fmt);
1475 ret = vsnprintf(dss_log->printk[i].log,
1476 sizeof(dss_log->printk[i].log), fmt, args);
1477 va_end(args);
1478
1479 dss_log->printk[i].time = cpu_clock(cpu);
1480 dss_log->printk[i].cpu = cpu;
1481
1482 for (j = 0; j < dss_desc.callstack; j++) {
1483 dss_log->printk[i].caller[j] =
1484 (void *)((size_t)return_address(j));
1485 }
1486 }
1487}
1488
1489void dbg_snapshot_printkl(size_t msg, size_t val)
1490{
1491 struct dbg_snapshot_item *item = &dss_items[dss_desc.kevents_num];
1492
1493 if (unlikely(!dss_base.enabled || !item->entry.enabled))
1494 return;
1495 {
1496 int cpu = raw_smp_processor_id();
1497 unsigned long j, i = atomic_inc_return(&dss_idx.printkl_log_idx) &
1498 (ARRAY_SIZE(dss_log->printkl) - 1);
1499
1500 dss_log->printkl[i].time = cpu_clock(cpu);
1501 dss_log->printkl[i].cpu = cpu;
1502 dss_log->printkl[i].msg = msg;
1503 dss_log->printkl[i].val = val;
1504
1505 for (j = 0; j < dss_desc.callstack; j++) {
1506 dss_log->printkl[i].caller[j] =
1507 (void *)((size_t)return_address(j));
1508 }
1509 }
1510}
1511#endif