pm_dev_dbg(dev, state, info);
trace_device_pm_callback_start(dev, info, state.event);
+ dbg_snapshot_suspend((char *)info, cb, dev, state.event, DSS_FLAG_IN);
error = cb(dev);
+ dbg_snapshot_suspend((char *)info, cb, dev, state.event, DSS_FLAG_OUT);
trace_device_pm_callback_end(dev, error);
suspend_report_result(cb, error);
ktime_t starttime = ktime_get();
trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
+ dbg_snapshot_suspend("dpm_resume_noirq", dpm_noirq_resume_devices,
+ NULL, state.event, DSS_FLAG_IN);
mutex_lock(&dpm_list_mtx);
pm_transition = state;
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
dpm_show_time(starttime, state, 0, "noirq");
+ dbg_snapshot_suspend("dpm_resume_noirq", dpm_noirq_resume_devices,
+ NULL, state.event, DSS_FLAG_OUT);
trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
}
ktime_t starttime = ktime_get();
trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
+ dbg_snapshot_suspend("dpm_resume_early", dpm_resume_early,
+ NULL, state.event, DSS_FLAG_IN);
mutex_lock(&dpm_list_mtx);
pm_transition = state;
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
dpm_show_time(starttime, state, 0, "early");
+ dbg_snapshot_suspend("dpm_resume_early", dpm_resume_early,
+ NULL, state.event, DSS_FLAG_OUT);
trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
}
ktime_t starttime = ktime_get();
trace_suspend_resume(TPS("dpm_resume"), state.event, true);
+ dbg_snapshot_suspend("dpm_resume", dpm_resume,
+ NULL, state.event, DSS_FLAG_IN);
might_sleep();
mutex_lock(&dpm_list_mtx);
dpm_show_time(starttime, state, 0, NULL);
cpufreq_resume();
+ dbg_snapshot_suspend("dpm_resume", dpm_resume,
+ NULL, state.event, DSS_FLAG_OUT);
trace_suspend_resume(TPS("dpm_resume"), state.event, false);
}
struct list_head list;
trace_suspend_resume(TPS("dpm_complete"), state.event, true);
+ dbg_snapshot_suspend("dpm_complete", dpm_complete,
+ NULL, state.event, DSS_FLAG_OUT);
might_sleep();
INIT_LIST_HEAD(&list);
/* Allow device probing and trigger re-probing of deferred devices */
device_unblock_probing();
+ dbg_snapshot_suspend("dpm_complete", dpm_complete,
+ NULL, state.event, DSS_FLAG_OUT);
trace_suspend_resume(TPS("dpm_complete"), state.event, false);
}
int error = 0;
trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
+ dbg_snapshot_suspend("dpm_suspend_noirq", dpm_noirq_suspend_devices,
+ NULL, state.event, DSS_FLAG_IN);
mutex_lock(&dpm_list_mtx);
pm_transition = state;
async_error = 0;
dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
}
dpm_show_time(starttime, state, error, "noirq");
+ dbg_snapshot_suspend("dpm_suspend_noirq", dpm_noirq_suspend_devices,
+ NULL, state.event, DSS_FLAG_OUT);
trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
return error;
}
int error = 0;
trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
+ dbg_snapshot_suspend("dpm_suspend_late", dpm_suspend_late,
+ NULL, state.event, DSS_FLAG_IN);
mutex_lock(&dpm_list_mtx);
pm_transition = state;
async_error = 0;
dpm_resume_early(resume_event(state));
}
dpm_show_time(starttime, state, error, "late");
+ dbg_snapshot_suspend("dpm_suspend_late", dpm_suspend_late,
+ NULL, state.event, DSS_FLAG_OUT);
trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
return error;
}
int error = 0;
trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
+ dbg_snapshot_suspend("dpm_suspend", dpm_suspend, NULL, state.event, DSS_FLAG_IN);
might_sleep();
cpufreq_suspend();
dpm_save_failed_step(SUSPEND_SUSPEND);
}
dpm_show_time(starttime, state, error, NULL);
+ dbg_snapshot_suspend("dpm_suspend", dpm_suspend, NULL, state.event, DSS_FLAG_OUT);
trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
return error;
}
if (!callback && dev->driver && dev->driver->pm)
callback = dev->driver->pm->prepare;
+ dbg_snapshot_suspend("device_prepare", callback, dev, state.event, DSS_FLAG_IN);
if (callback)
ret = callback(dev);
+ dbg_snapshot_suspend("device_prepare", callback, dev, state.event, DSS_FLAG_OUT);
unlock:
device_unlock(dev);
int error = 0;
trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
+ dbg_snapshot_suspend("dpm_prepare", dpm_prepare,
+ NULL, state.event, DSS_FLAG_IN);
might_sleep();
/*
put_device(dev);
}
mutex_unlock(&dpm_list_mtx);
+ dbg_snapshot_suspend("dpm_prepare", dpm_prepare,
+ NULL, state.event, DSS_FLAG_OUT);
trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
return error;
}
if (ops->suspend) {
if (initcall_debug)
pr_info("PM: Calling %pF\n", ops->suspend);
+ dbg_snapshot_suspend("syscore_suspend", ops->suspend, NULL, 0, DSS_FLAG_IN);
ret = ops->suspend();
+ dbg_snapshot_suspend("syscore_suspend", ops->suspend, NULL, 0, DSS_FLAG_OUT);
if (ret)
goto err_out;
WARN_ONCE(!irqs_disabled(),
if (ops->resume) {
if (initcall_debug)
pr_info("PM: Calling %pF\n", ops->resume);
+ dbg_snapshot_suspend("syscore_resume", ops->resume, NULL, 0, DSS_FLAG_IN);
ops->resume();
+ dbg_snapshot_suspend("syscore_resume", ops->resume, NULL, 0, DSS_FLAG_OUT);
WARN_ONCE(!irqs_disabled(),
"Interrupts enabled after %pF\n", ops->resume);
}
if (ops->shutdown) {
if (initcall_debug)
pr_info("PM: Calling %pF\n", ops->shutdown);
+ dbg_snapshot_suspend("syscore_shutdown", ops->shutdown, NULL, 0, DSS_FLAG_IN);
ops->shutdown();
+ dbg_snapshot_suspend("syscore_shutdown", ops->shutdown, NULL, 0, DSS_FLAG_OUT);
}
mutex_unlock(&syscore_ops_lock);
extern void dbg_snapshot_task(int cpu, void *v_task);
extern void dbg_snapshot_work(void *worker, void *v_task, void *fn, int en);
extern void dbg_snapshot_cpuidle(char *modes, unsigned state, int diff, int en);
-extern void dbg_snapshot_suspend(void *fn, void *dev, int en);
+extern void dbg_snapshot_suspend(char *log, void *fn, void *dev, int state, int en);
extern void dbg_snapshot_irq(int irq, void *fn, void *val, int en);
extern void dbg_snapshot_print_notifier_call(void **nl, unsigned long func, int en);
extern int dbg_snapshot_try_enable(const char *name, unsigned long long duration);
#define dbg_snapshot_work(a,b,c,d) do { } while(0)
#define dbg_snapshot_clockevent(a,b,c) do { } while(0)
#define dbg_snapshot_cpuidle(a,b,c,d) do { } while(0)
-#define dbg_snapshot_suspend(a,b,c) do { } while(0)
+#define dbg_snapshot_suspend(a,b,c,d,e) do { } while(0)
#define dbg_snapshot_regulator(a,b,c,d,e,f) do { } while(0)
#define dbg_snapshot_thermal(a,b,c,d) do { } while(0)
#define dbg_snapshot_irq(a,b,c,d) do { } while(0)
extern struct atomic_notifier_head restart_handler_list;
extern struct blocking_notifier_head reboot_notifier_list;
+extern struct blocking_notifier_head pm_chain_head;
#ifdef CONFIG_EXYNOS_ITMON
extern struct atomic_notifier_head itmon_notifier_list;
if (cpu == primary)
continue;
trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
+ dbg_snapshot_suspend("CPU_OFF", _cpu_down, NULL, cpu, DSS_FLAG_IN);
error = _cpu_down(cpu, 1, CPUHP_OFFLINE);
+ dbg_snapshot_suspend("CPU_OFF", _cpu_down, NULL, cpu, DSS_FLAG_OUT);
trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
if (!error)
cpumask_set_cpu(cpu, frozen_cpus);
for_each_cpu(cpu, frozen_cpus) {
trace_suspend_resume(TPS("CPU_ON"), cpu, true);
+ dbg_snapshot_suspend("CPU_ON", _cpu_up, NULL, cpu, DSS_FLAG_IN);
error = _cpu_up(cpu, 1, CPUHP_ONLINE);
+ dbg_snapshot_suspend("CPU_ON", _cpu_up, NULL, cpu, DSS_FLAG_OUT);
trace_suspend_resume(TPS("CPU_ON"), cpu, false);
if (!error) {
pr_info("CPU%d is up\n", cpu);
in_suspend = 1;
save_processor_state();
trace_suspend_resume(TPS("machine_suspend"), PM_EVENT_HIBERNATE, true);
+ dbg_snapshot_suspend("machine_suspend", swsusp_arch_suspend,
+ NULL, PM_EVENT_HIBERNATE, DSS_FLAG_IN);
error = swsusp_arch_suspend();
/* Restore control flow magically appears here */
restore_processor_state();
+ dbg_snapshot_suspend("machine_suspend", swsusp_arch_suspend,
+ NULL, PM_EVENT_HIBERNATE, DSS_FLAG_OUT);
trace_suspend_resume(TPS("machine_suspend"), PM_EVENT_HIBERNATE, false);
if (error)
pr_err("Error %d creating hibernation image\n", error);
/* Routines for PM-transition notifications */
-static BLOCKING_NOTIFIER_HEAD(pm_chain_head);
+BLOCKING_NOTIFIER_HEAD(pm_chain_head);
int register_pm_notifier(struct notifier_block *nb)
{
struct task_struct *curr = current;
trace_suspend_resume(TPS("thaw_processes"), 0, true);
+ dbg_snapshot_suspend("thaw_processes", thaw_processes, NULL, 0, DSS_FLAG_IN);
if (pm_freezing)
atomic_dec(&system_freezing_cnt);
pm_freezing = false;
schedule();
pr_cont("done.\n");
+ dbg_snapshot_suspend("thaw_processes", thaw_processes, NULL, 0, DSS_FLAG_OUT);
trace_suspend_resume(TPS("thaw_processes"), 0, false);
}
static void s2idle_enter(void)
{
trace_suspend_resume(TPS("machine_suspend"), PM_SUSPEND_TO_IDLE, true);
+ dbg_snapshot_suspend("machine_suspend", s2idle_enter, NULL,
+ PM_SUSPEND_TO_IDLE, DSS_FLAG_IN);
raw_spin_lock_irq(&s2idle_lock);
if (pm_wakeup_pending())
s2idle_state = S2IDLE_STATE_NONE;
raw_spin_unlock_irq(&s2idle_lock);
+ dbg_snapshot_suspend("machine_suspend", s2idle_enter, NULL,
+ PM_SUSPEND_TO_IDLE, DSS_FLAG_OUT);
trace_suspend_resume(TPS("machine_suspend"), PM_SUSPEND_TO_IDLE, false);
}
}
trace_suspend_resume(TPS("freeze_processes"), 0, true);
+ dbg_snapshot_suspend("freeze_processes", suspend_freeze_processes,
+ NULL, 0, DSS_FLAG_IN);
error = suspend_freeze_processes();
+ dbg_snapshot_suspend("freeze_processes", suspend_freeze_processes,
+ NULL, 0, DSS_FLAG_OUT);
trace_suspend_resume(TPS("freeze_processes"), 0, false);
if (!error)
return 0;
if (!(suspend_test(TEST_CORE) || *wakeup)) {
trace_suspend_resume(TPS("machine_suspend"),
state, true);
+ dbg_snapshot_suspend("machine_suspend", suspend_ops->enter,
+ NULL, state, DSS_FLAG_IN);
error = suspend_ops->enter(state);
+ dbg_snapshot_suspend("machine_suspend", suspend_ops->enter,
+ NULL, state, DSS_FLAG_OUT);
trace_suspend_resume(TPS("machine_suspend"),
state, false);
} else if (*wakeup) {
dpm_resume_end(PMSG_RESUME);
suspend_test_finish("resume devices");
trace_suspend_resume(TPS("resume_console"), state, true);
+ dbg_snapshot_suspend("resume_console", resume_console,
+ NULL, state, DSS_FLAG_IN);
resume_console();
+ dbg_snapshot_suspend("resume_console", resume_console,
+ NULL, state, DSS_FLAG_OUT);
trace_suspend_resume(TPS("resume_console"), state, false);
-
Close:
platform_resume_end(state);
pm_suspend_target_state = PM_SUSPEND_ON;
int error;
trace_suspend_resume(TPS("suspend_enter"), state, true);
+ dbg_snapshot_suspend("suspend_enter", enter_state, NULL, state, DSS_FLAG_IN);
if (state == PM_SUSPEND_TO_IDLE) {
#ifdef CONFIG_PM_DEBUG
if (pm_test_level != TEST_NONE && pm_test_level <= TEST_CPUS) {
#ifndef CONFIG_SUSPEND_SKIP_SYNC
trace_suspend_resume(TPS("sync_filesystems"), 0, true);
+ dbg_snapshot_suspend("sync_filesystems", sys_sync, NULL, state, DSS_FLAG_IN);
pr_info("Syncing filesystems ... ");
sys_sync();
pr_cont("done.\n");
+ dbg_snapshot_suspend("sync_filesystems", sys_sync, NULL, state, DSS_FLAG_OUT);
trace_suspend_resume(TPS("sync_filesystems"), 0, false);
#endif
if (suspend_test(TEST_FREEZER))
goto Finish;
+ dbg_snapshot_suspend("suspend_enter", enter_state, NULL, state, DSS_FLAG_OUT);
trace_suspend_resume(TPS("suspend_enter"), state, false);
pm_pr_dbg("Suspending system (%s)\n", mem_sleep_labels[state]);
pm_restrict_gfp_mask();
if (tick_freeze_depth == num_online_cpus()) {
trace_suspend_resume(TPS("timekeeping_freeze"),
smp_processor_id(), true);
+ dbg_snapshot_suspend("timekeeping_freeze", timekeeping_suspend,
+ NULL, smp_processor_id(), DSS_FLAG_IN);
timekeeping_suspend();
} else {
tick_suspend_local();
if (tick_freeze_depth == num_online_cpus()) {
timekeeping_resume();
+ dbg_snapshot_suspend("timekeeping_freeze", timekeeping_suspend,
+ NULL, smp_processor_id(), DSS_FLAG_OUT);
trace_suspend_resume(TPS("timekeeping_freeze"),
smp_processor_id(), false);
} else {
#include <asm/traps.h>
#include <asm/hardirq.h>
#include <asm/stacktrace.h>
+#include <asm/arch_timer.h>
#include <linux/debug-snapshot.h>
#include <linux/kernel_stat.h>
#include <linux/irqnr.h>
}
}
-void dbg_snapshot_suspend(void *fn, void *dev, int en)
+void dbg_snapshot_suspend(char *log, void *fn, void *dev, int state, int en)
{
struct dbg_snapshot_item *item = &dss_items[dss_desc.kevents_num];
if (unlikely(!dss_base.enabled || !item->entry.enabled))
return;
{
+ int len;
int cpu = raw_smp_processor_id();
unsigned long i = atomic_inc_return(&dss_idx.suspend_log_idx) &
(ARRAY_SIZE(dss_log->suspend) - 1);
dss_log->suspend[i].time = cpu_clock(cpu);
dss_log->suspend[i].sp = (unsigned long) current_stack_pointer;
+
+ if (log) {
+ len = strlen(log);
+ memcpy(dss_log->suspend[i].log, log,
+ len < DSS_LOG_GEN_LEN ?
+ len : DSS_LOG_GEN_LEN - 1);
+ } else {
+ memset(dss_log->suspend[i].log, 0, DSS_LOG_GEN_LEN - 1);
+ }
+
dss_log->suspend[i].fn = fn;
dss_log->suspend[i].dev = (struct device *)dev;
dss_log->suspend[i].core = cpu;
(struct notifier_block **)(&panic_notifier_list.head),
(struct notifier_block **)(&reboot_notifier_list.head),
(struct notifier_block **)(&restart_handler_list.head),
+#ifdef CONFIG_PM_SLEEP
+ (struct notifier_block **)(&pm_chain_head.head),
+#endif
#ifdef CONFIG_EXYNOS_ITMON
(struct notifier_block **)(&itmon_notifier_list.head),
#endif
#define DSS_SPARE_SZ (DSS_HEADER_SIZE - DSS_HEADER_TOTAL_SZ)
/* Length domain */
-#define DSS_LOG_STRING_LENGTH SZ_128
+#define DSS_LOG_STRING_LEN SZ_128
+#define DSS_LOG_GEN_LEN SZ_16
#define DSS_MMU_REG_OFFSET SZ_512
#define DSS_CORE_REG_OFFSET SZ_512
#define DSS_LOG_MAX_NUM SZ_1K
struct __suspend_log {
unsigned long long time;
unsigned long sp;
+ char log[DSS_LOG_GEN_LEN];
void *fn;
#ifdef CONFIG_DEBUG_SNAPSHOT_LINUX_BUILD
struct device *dev;
void *dev;
#endif
int en;
+ int state;
int core;
} suspend[DSS_LOG_MAX_NUM * 4];
struct __printk_log {
unsigned long long time;
int cpu;
- char log[DSS_LOG_STRING_LENGTH];
+ char log[DSS_LOG_STRING_LEN];
void *caller[DSS_CALLSTACK_MAX_NUM];
} printk[DSS_API_MAX_NUM];
#endif