2 * kernel/power/main.c - PM subsystem core functionality.
4 * Copyright (c) 2003 Patrick Mochel
5 * Copyright (c) 2003 Open Source Development Lab
7 * This file is released under the GPLv2
11 #include <linux/export.h>
12 #include <linux/kobject.h>
13 #include <linux/string.h>
14 #include <linux/resume-trace.h>
15 #include <linux/workqueue.h>
16 #include <linux/debugfs.h>
17 #include <linux/seq_file.h>
21 #define HIB_PM_DEBUG 1
22 #define _TAG_HIB_M "HIB/PM"
25 #define hib_log(fmt, ...) pr_warn("[%s][%s]" fmt, _TAG_HIB_M, __func__, ##__VA_ARGS__);
27 #define hib_log(fmt, ...)
30 #define hib_warn(fmt, ...) pr_warn("[%s][%s]" fmt, _TAG_HIB_M, __func__, ##__VA_ARGS__);
32 DEFINE_MUTEX(pm_mutex
);
33 EXPORT_SYMBOL_GPL(pm_mutex
);
35 #ifdef CONFIG_PM_SLEEP
37 /* Routines for PM-transition notifications */
39 BLOCKING_NOTIFIER_HEAD(pm_chain_head
);
40 EXPORT_SYMBOL_GPL(pm_chain_head
);
41 //<20130327> <marc.huang> add pm_notifier_count
42 static unsigned int pm_notifier_count
= 0;
44 int register_pm_notifier(struct notifier_block
*nb
)
46 //<20130327> <marc.huang> add pm_notifier_count
48 return blocking_notifier_chain_register(&pm_chain_head
, nb
);
50 EXPORT_SYMBOL_GPL(register_pm_notifier
);
52 int unregister_pm_notifier(struct notifier_block
*nb
)
54 //<20130327> <marc.huang> add pm_notifier_count
56 return blocking_notifier_chain_unregister(&pm_chain_head
, nb
);
58 EXPORT_SYMBOL_GPL(unregister_pm_notifier
);
60 int pm_notifier_call_chain(unsigned long val
)
62 //<20130327> <marc.huang> add pm_notifier_count
64 pr_debug("[%s] pm_notifier_count: %u, event = %lu\n", __func__
, pm_notifier_count
, val
);
66 ret
= blocking_notifier_call_chain(&pm_chain_head
, val
, NULL
);
68 return notifier_to_errno(ret
);
70 EXPORT_SYMBOL_GPL(pm_notifier_call_chain
);
72 /* If set, devices may be suspended and resumed asynchronously. */
73 //<20130327> <marc.huang> disable async suspend/resume, set pm_async_enabled = 0
74 int pm_async_enabled
= 0;
76 static ssize_t
pm_async_show(struct kobject
*kobj
, struct kobj_attribute
*attr
,
79 return sprintf(buf
, "%d\n", pm_async_enabled
);
82 static ssize_t
pm_async_store(struct kobject
*kobj
, struct kobj_attribute
*attr
,
83 const char *buf
, size_t n
)
87 if (kstrtoul(buf
, 10, &val
))
93 pm_async_enabled
= val
;
99 #ifdef CONFIG_PM_DEBUG
100 int pm_test_level
= TEST_NONE
;
102 static const char * const pm_tests
[__TEST_AFTER_LAST
] = {
103 [TEST_NONE
] = "none",
104 [TEST_CORE
] = "core",
105 [TEST_CPUS
] = "processors",
106 [TEST_PLATFORM
] = "platform",
107 [TEST_DEVICES
] = "devices",
108 [TEST_FREEZER
] = "freezer",
111 static ssize_t
pm_test_show(struct kobject
*kobj
, struct kobj_attribute
*attr
,
117 for (level
= TEST_FIRST
; level
<= TEST_MAX
; level
++)
118 if (pm_tests
[level
]) {
119 if (level
== pm_test_level
)
120 s
+= sprintf(s
, "[%s] ", pm_tests
[level
]);
122 s
+= sprintf(s
, "%s ", pm_tests
[level
]);
126 /* convert the last space to a newline */
132 static ssize_t
pm_test_store(struct kobject
*kobj
, struct kobj_attribute
*attr
,
133 const char *buf
, size_t n
)
135 const char * const *s
;
141 p
= memchr(buf
, '\n', n
);
142 len
= p
? p
- buf
: n
;
147 for (s
= &pm_tests
[level
]; level
<= TEST_MAX
; s
++, level
++)
148 if (*s
&& len
== strlen(*s
) && !strncmp(buf
, *s
, len
)) {
149 pm_test_level
= level
;
154 unlock_system_sleep();
156 return error
? error
: n
;
160 #endif /* CONFIG_PM_DEBUG */
162 #ifdef CONFIG_DEBUG_FS
163 static char *suspend_step_name(enum suspend_stat_step step
)
168 case SUSPEND_PREPARE
:
170 case SUSPEND_SUSPEND
:
172 case SUSPEND_SUSPEND_NOIRQ
:
173 return "suspend_noirq";
174 case SUSPEND_RESUME_NOIRQ
:
175 return "resume_noirq";
183 static int suspend_stats_show(struct seq_file
*s
, void *unused
)
185 int i
, index
, last_dev
, last_errno
, last_step
;
187 last_dev
= suspend_stats
.last_failed_dev
+ REC_FAILED_NUM
- 1;
188 last_dev
%= REC_FAILED_NUM
;
189 last_errno
= suspend_stats
.last_failed_errno
+ REC_FAILED_NUM
- 1;
190 last_errno
%= REC_FAILED_NUM
;
191 last_step
= suspend_stats
.last_failed_step
+ REC_FAILED_NUM
- 1;
192 last_step
%= REC_FAILED_NUM
;
193 seq_printf(s
, "%s: %d\n%s: %d\n%s: %d\n%s: %d\n%s: %d\n"
194 "%s: %d\n%s: %d\n%s: %d\n%s: %d\n%s: %d\n",
195 "success", suspend_stats
.success
,
196 "fail", suspend_stats
.fail
,
197 "failed_freeze", suspend_stats
.failed_freeze
,
198 "failed_prepare", suspend_stats
.failed_prepare
,
199 "failed_suspend", suspend_stats
.failed_suspend
,
200 "failed_suspend_late",
201 suspend_stats
.failed_suspend_late
,
202 "failed_suspend_noirq",
203 suspend_stats
.failed_suspend_noirq
,
204 "failed_resume", suspend_stats
.failed_resume
,
205 "failed_resume_early",
206 suspend_stats
.failed_resume_early
,
207 "failed_resume_noirq",
208 suspend_stats
.failed_resume_noirq
);
209 seq_printf(s
, "failures:\n last_failed_dev:\t%-s\n",
210 suspend_stats
.failed_devs
[last_dev
]);
211 for (i
= 1; i
< REC_FAILED_NUM
; i
++) {
212 index
= last_dev
+ REC_FAILED_NUM
- i
;
213 index
%= REC_FAILED_NUM
;
214 seq_printf(s
, "\t\t\t%-s\n",
215 suspend_stats
.failed_devs
[index
]);
217 seq_printf(s
, " last_failed_errno:\t%-d\n",
218 suspend_stats
.errno
[last_errno
]);
219 for (i
= 1; i
< REC_FAILED_NUM
; i
++) {
220 index
= last_errno
+ REC_FAILED_NUM
- i
;
221 index
%= REC_FAILED_NUM
;
222 seq_printf(s
, "\t\t\t%-d\n",
223 suspend_stats
.errno
[index
]);
225 seq_printf(s
, " last_failed_step:\t%-s\n",
227 suspend_stats
.failed_steps
[last_step
]));
228 for (i
= 1; i
< REC_FAILED_NUM
; i
++) {
229 index
= last_step
+ REC_FAILED_NUM
- i
;
230 index
%= REC_FAILED_NUM
;
231 seq_printf(s
, "\t\t\t%-s\n",
233 suspend_stats
.failed_steps
[index
]));
239 static int suspend_stats_open(struct inode
*inode
, struct file
*file
)
241 return single_open(file
, suspend_stats_show
, NULL
);
244 static const struct file_operations suspend_stats_operations
= {
245 .open
= suspend_stats_open
,
248 .release
= single_release
,
251 static int __init
pm_debugfs_init(void)
253 debugfs_create_file("suspend_stats", S_IFREG
| S_IRUGO
,
254 NULL
, NULL
, &suspend_stats_operations
);
258 late_initcall(pm_debugfs_init
);
259 #endif /* CONFIG_DEBUG_FS */
261 #endif /* CONFIG_PM_SLEEP */
263 #ifdef CONFIG_PM_SLEEP_DEBUG
265 * pm_print_times: print time taken by devices to suspend and resume.
267 * show() returns whether printing of suspend and resume times is enabled.
268 * store() accepts 0 or 1. 0 disables printing and 1 enables it.
270 bool pm_print_times_enabled
;
272 static ssize_t
pm_print_times_show(struct kobject
*kobj
,
273 struct kobj_attribute
*attr
, char *buf
)
275 return sprintf(buf
, "%d\n", pm_print_times_enabled
);
278 static ssize_t
pm_print_times_store(struct kobject
*kobj
,
279 struct kobj_attribute
*attr
,
280 const char *buf
, size_t n
)
284 if (kstrtoul(buf
, 10, &val
))
290 pm_print_times_enabled
= !!val
;
294 power_attr(pm_print_times
);
296 static inline void pm_print_times_init(void)
298 pm_print_times_enabled
= !!initcall_debug
;
300 #else /* !CONFIG_PP_SLEEP_DEBUG */
301 static inline void pm_print_times_init(void) {}
302 #endif /* CONFIG_PM_SLEEP_DEBUG */
304 struct kobject
*power_kobj
;
305 EXPORT_SYMBOL_GPL(power_kobj
);
308 * state - control system power state.
310 * show() returns what states are supported, which is hard-coded to
311 * 'standby' (Power-On Suspend), 'mem' (Suspend-to-RAM), and
312 * 'disk' (Suspend-to-Disk).
314 * store() accepts one of those strings, translates it into the
315 * proper enumerated value, and initiates a suspend transition.
317 static ssize_t
state_show(struct kobject
*kobj
, struct kobj_attribute
*attr
,
321 #ifdef CONFIG_SUSPEND
324 for (i
= PM_SUSPEND_MIN
; i
< PM_SUSPEND_MAX
; i
++)
325 if (pm_states
[i
].state
)
326 s
+= sprintf(s
,"%s ", pm_states
[i
].label
);
329 #ifdef CONFIG_HIBERNATION
330 s
+= sprintf(s
, "%s\n", "disk");
333 /* convert the last space to a newline */
339 static suspend_state_t
decode_state(const char *buf
, size_t n
)
341 #ifdef CONFIG_SUSPEND
342 suspend_state_t state
= PM_SUSPEND_MIN
;
343 struct pm_sleep_state
*s
;
348 p
= memchr(buf
, '\n', n
);
349 len
= p
? p
- buf
: n
;
351 /* Check hibernation first. */
352 if (len
== 4 && !strncmp(buf
, "disk", len
))
353 return PM_SUSPEND_MAX
;
355 #ifdef CONFIG_SUSPEND
356 for (s
= &pm_states
[state
]; state
< PM_SUSPEND_MAX
; s
++, state
++)
357 if (s
->state
&& len
== strlen(s
->label
)
358 && !strncmp(buf
, s
->label
, len
))
362 return PM_SUSPEND_ON
;
365 //<20130327> <marc.huang> merge android kernel 3.0 state_store function
366 #ifdef CONFIG_MTK_LDVT
367 static ssize_t
state_store(struct kobject
*kobj
, struct kobj_attribute
*attr
,
368 const char *buf
, size_t n
)
370 suspend_state_t state
;
373 error
= pm_autosleep_lock();
377 if (pm_autosleep_state() > PM_SUSPEND_ON
) {
382 state
= decode_state(buf
, n
);
383 if (state
< PM_SUSPEND_MAX
)
384 error
= pm_suspend(state
);
385 else if (state
== PM_SUSPEND_MAX
)
391 pm_autosleep_unlock();
392 return error
? error
: n
;
394 #else //#ifdef CONFIG_MTK_LDVT
395 static ssize_t
state_store(struct kobject
*kobj
, struct kobj_attribute
*attr
,
396 const char *buf
, size_t n
)
398 #ifdef CONFIG_SUSPEND
399 #ifdef CONFIG_EARLYSUSPEND
400 suspend_state_t state
= PM_SUSPEND_ON
;
402 suspend_state_t state
= PM_SUSPEND_STANDBY
;
404 const char * const *s
;
410 p
= memchr(buf
, '\n', n
);
411 len
= p
? p
- buf
: n
;
413 #ifdef CONFIG_MTK_HIBERNATION
414 state
= decode_state(buf
, n
);
415 hib_log("entry (%d)\n", state
);
418 #ifdef CONFIG_MTK_HIBERNATION
419 if (len
== 8 && !strncmp(buf
, "hibabort", len
)) {
420 hib_log("abort hibernation...\n");
421 error
= mtk_hibernate_abort();
426 /* First, check if we are requested to hibernate */
427 if (len
== 4 && !strncmp(buf
, "disk", len
)) {
428 #ifdef CONFIG_MTK_HIBERNATION
429 hib_log("trigger hibernation...\n");
430 #ifdef CONFIG_EARLYSUSPEND
431 if (PM_SUSPEND_ON
== get_suspend_state()) {
432 hib_warn("\"on\" to \"disk\" (i.e., 0->4) is not supported !!!\n");
437 if (!pre_hibernate()) {
439 error
= mtk_hibernate();
441 #else // !CONFIG_MTK_HIBERNATION
447 #ifdef CONFIG_SUSPEND
448 for (s
= &pm_states
[state
]; state
< PM_SUSPEND_MAX
; s
++, state
++) {
449 if (*s
&& len
== strlen(*s
) && !strncmp(buf
, *s
, len
))
452 if (state
< PM_SUSPEND_MAX
&& *s
) {
453 #ifdef CONFIG_EARLYSUSPEND
454 if (state
== PM_SUSPEND_ON
|| valid_state(state
)) {
456 request_suspend_state(state
);
460 error
= enter_state(state
);
466 return error
? error
: n
;
471 #ifdef CONFIG_PM_SLEEP
473 * The 'wakeup_count' attribute, along with the functions defined in
474 * drivers/base/power/wakeup.c, provides a means by which wakeup events can be
475 * handled in a non-racy way.
477 * If a wakeup event occurs when the system is in a sleep state, it simply is
478 * woken up. In turn, if an event that would wake the system up from a sleep
479 * state occurs when it is undergoing a transition to that sleep state, the
480 * transition should be aborted. Moreover, if such an event occurs when the
481 * system is in the working state, an attempt to start a transition to the
482 * given sleep state should fail during certain period after the detection of
483 * the event. Using the 'state' attribute alone is not sufficient to satisfy
484 * these requirements, because a wakeup event may occur exactly when 'state'
485 * is being written to and may be delivered to user space right before it is
486 * frozen, so the event will remain only partially processed until the system is
487 * woken up by another event. In particular, it won't cause the transition to
488 * a sleep state to be aborted.
490 * This difficulty may be overcome if user space uses 'wakeup_count' before
491 * writing to 'state'. It first should read from 'wakeup_count' and store
492 * the read value. Then, after carrying out its own preparations for the system
493 * transition to a sleep state, it should write the stored value to
494 * 'wakeup_count'. If that fails, at least one wakeup event has occurred since
495 * 'wakeup_count' was read and 'state' should not be written to. Otherwise, it
496 * is allowed to write to 'state', but the transition will be aborted if there
497 * are any wakeup events detected after 'wakeup_count' was written to.
500 static ssize_t
wakeup_count_show(struct kobject
*kobj
,
501 struct kobj_attribute
*attr
,
506 return pm_get_wakeup_count(&val
, true) ?
507 sprintf(buf
, "%u\n", val
) : -EINTR
;
510 static ssize_t
wakeup_count_store(struct kobject
*kobj
,
511 struct kobj_attribute
*attr
,
512 const char *buf
, size_t n
)
517 error
= pm_autosleep_lock();
521 if (pm_autosleep_state() > PM_SUSPEND_ON
) {
527 if (sscanf(buf
, "%u", &val
) == 1) {
528 if (pm_save_wakeup_count(val
))
533 pm_autosleep_unlock();
537 power_attr(wakeup_count
);
539 #ifdef CONFIG_PM_AUTOSLEEP
540 static ssize_t
autosleep_show(struct kobject
*kobj
,
541 struct kobj_attribute
*attr
,
544 suspend_state_t state
= pm_autosleep_state();
546 if (state
== PM_SUSPEND_ON
)
547 return sprintf(buf
, "off\n");
549 #ifdef CONFIG_SUSPEND
550 if (state
< PM_SUSPEND_MAX
)
551 return sprintf(buf
, "%s\n", pm_states
[state
].state
?
552 pm_states
[state
].label
: "error");
554 #ifdef CONFIG_HIBERNATION
555 return sprintf(buf
, "disk\n");
557 return sprintf(buf
, "error");
561 static ssize_t
autosleep_store(struct kobject
*kobj
,
562 struct kobj_attribute
*attr
,
563 const char *buf
, size_t n
)
565 suspend_state_t state
= decode_state(buf
, n
);
568 hib_log("store autosleep_state(%d)\n", state
);
569 if (state
== PM_SUSPEND_ON
570 && strcmp(buf
, "off") && strcmp(buf
, "off\n"))
573 error
= pm_autosleep_set_state(state
);
574 return error
? error
: n
;
577 power_attr(autosleep
);
578 #endif /* CONFIG_PM_AUTOSLEEP */
580 #ifdef CONFIG_PM_WAKELOCKS
581 static ssize_t
wake_lock_show(struct kobject
*kobj
,
582 struct kobj_attribute
*attr
,
585 return pm_show_wakelocks(buf
, true);
588 static ssize_t
wake_lock_store(struct kobject
*kobj
,
589 struct kobj_attribute
*attr
,
590 const char *buf
, size_t n
)
592 int error
= pm_wake_lock(buf
);
593 return error
? error
: n
;
596 power_attr(wake_lock
);
598 static ssize_t
wake_unlock_show(struct kobject
*kobj
,
599 struct kobj_attribute
*attr
,
602 return pm_show_wakelocks(buf
, false);
605 static ssize_t
wake_unlock_store(struct kobject
*kobj
,
606 struct kobj_attribute
*attr
,
607 const char *buf
, size_t n
)
609 int error
= pm_wake_unlock(buf
);
610 return error
? error
: n
;
613 power_attr(wake_unlock
);
615 #endif /* CONFIG_PM_WAKELOCKS */
616 #endif /* CONFIG_PM_SLEEP */
618 #ifdef CONFIG_PM_TRACE
619 int pm_trace_enabled
;
621 static ssize_t
pm_trace_show(struct kobject
*kobj
, struct kobj_attribute
*attr
,
624 return sprintf(buf
, "%d\n", pm_trace_enabled
);
628 pm_trace_store(struct kobject
*kobj
, struct kobj_attribute
*attr
,
629 const char *buf
, size_t n
)
633 if (sscanf(buf
, "%d", &val
) == 1) {
634 pm_trace_enabled
= !!val
;
640 power_attr(pm_trace
);
642 static ssize_t
pm_trace_dev_match_show(struct kobject
*kobj
,
643 struct kobj_attribute
*attr
,
646 return show_trace_dev_match(buf
, PAGE_SIZE
);
650 pm_trace_dev_match_store(struct kobject
*kobj
, struct kobj_attribute
*attr
,
651 const char *buf
, size_t n
)
656 power_attr(pm_trace_dev_match
);
658 #endif /* CONFIG_PM_TRACE */
660 #ifdef CONFIG_FREEZER
661 static ssize_t
pm_freeze_timeout_show(struct kobject
*kobj
,
662 struct kobj_attribute
*attr
, char *buf
)
664 return sprintf(buf
, "%u\n", freeze_timeout_msecs
);
667 static ssize_t
pm_freeze_timeout_store(struct kobject
*kobj
,
668 struct kobj_attribute
*attr
,
669 const char *buf
, size_t n
)
673 if (kstrtoul(buf
, 10, &val
))
676 freeze_timeout_msecs
= val
;
680 power_attr(pm_freeze_timeout
);
682 #endif /* CONFIG_FREEZER*/
684 static struct attribute
* g
[] = {
686 #ifdef CONFIG_PM_TRACE
688 &pm_trace_dev_match_attr
.attr
,
690 #ifdef CONFIG_PM_SLEEP
692 &wakeup_count_attr
.attr
,
693 #ifdef CONFIG_PM_AUTOSLEEP
694 &autosleep_attr
.attr
,
696 #ifdef CONFIG_PM_WAKELOCKS
697 &wake_lock_attr
.attr
,
698 &wake_unlock_attr
.attr
,
700 #ifdef CONFIG_PM_DEBUG
703 #ifdef CONFIG_PM_SLEEP_DEBUG
704 &pm_print_times_attr
.attr
,
707 #ifdef CONFIG_FREEZER
708 &pm_freeze_timeout_attr
.attr
,
713 static struct attribute_group attr_group
= {
717 #ifdef CONFIG_PM_RUNTIME
718 struct workqueue_struct
*pm_wq
;
719 EXPORT_SYMBOL_GPL(pm_wq
);
721 static int __init
pm_start_workqueue(void)
723 pm_wq
= alloc_workqueue("pm", WQ_FREEZABLE
, 0);
725 return pm_wq
? 0 : -ENOMEM
;
728 static inline int pm_start_workqueue(void) { return 0; }
731 static int __init
pm_init(void)
733 int error
= pm_start_workqueue();
736 hibernate_image_size_init();
737 hibernate_reserved_size_init();
738 power_kobj
= kobject_create_and_add("power", NULL
);
741 error
= sysfs_create_group(power_kobj
, &attr_group
);
744 pm_print_times_init();
745 return pm_autosleep_init();
748 core_initcall(pm_init
);