Merge tag 'v3.10.56' into update
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / power / main.c
1 /*
2 * kernel/power/main.c - PM subsystem core functionality.
3 *
4 * Copyright (c) 2003 Patrick Mochel
5 * Copyright (c) 2003 Open Source Development Lab
6 *
7 * This file is released under the GPLv2
8 *
9 */
10
11 #include <linux/export.h>
12 #include <linux/kobject.h>
13 #include <linux/string.h>
14 #include <linux/resume-trace.h>
15 #include <linux/workqueue.h>
16 #include <linux/debugfs.h>
17 #include <linux/seq_file.h>
18
19 #include "power.h"
20
21 #define HIB_PM_DEBUG 1
22 #define _TAG_HIB_M "HIB/PM"
23 #if (HIB_PM_DEBUG)
24 #undef hib_log
25 #define hib_log(fmt, ...) pr_warn("[%s][%s]" fmt, _TAG_HIB_M, __func__, ##__VA_ARGS__);
26 #else
27 #define hib_log(fmt, ...)
28 #endif
29 #undef hib_warn
30 #define hib_warn(fmt, ...) pr_warn("[%s][%s]" fmt, _TAG_HIB_M, __func__, ##__VA_ARGS__);
31
32 DEFINE_MUTEX(pm_mutex);
33 EXPORT_SYMBOL_GPL(pm_mutex);
34
35 #ifdef CONFIG_PM_SLEEP
36
37 /* Routines for PM-transition notifications */
38
39 BLOCKING_NOTIFIER_HEAD(pm_chain_head);
40 EXPORT_SYMBOL_GPL(pm_chain_head);
41 //<20130327> <marc.huang> add pm_notifier_count
42 static unsigned int pm_notifier_count = 0;
43
44 int register_pm_notifier(struct notifier_block *nb)
45 {
46 //<20130327> <marc.huang> add pm_notifier_count
47 ++pm_notifier_count;
48 return blocking_notifier_chain_register(&pm_chain_head, nb);
49 }
50 EXPORT_SYMBOL_GPL(register_pm_notifier);
51
52 int unregister_pm_notifier(struct notifier_block *nb)
53 {
54 //<20130327> <marc.huang> add pm_notifier_count
55 --pm_notifier_count;
56 return blocking_notifier_chain_unregister(&pm_chain_head, nb);
57 }
58 EXPORT_SYMBOL_GPL(unregister_pm_notifier);
59
60 int pm_notifier_call_chain(unsigned long val)
61 {
62 //<20130327> <marc.huang> add pm_notifier_count
63 int ret;
64 pr_debug("[%s] pm_notifier_count: %u, event = %lu\n", __func__, pm_notifier_count, val);
65
66 ret = blocking_notifier_call_chain(&pm_chain_head, val, NULL);
67
68 return notifier_to_errno(ret);
69 }
70 EXPORT_SYMBOL_GPL(pm_notifier_call_chain);
71
72 /* If set, devices may be suspended and resumed asynchronously. */
73 //<20130327> <marc.huang> disable async suspend/resume, set pm_async_enabled = 0
74 int pm_async_enabled = 0;
75
76 static ssize_t pm_async_show(struct kobject *kobj, struct kobj_attribute *attr,
77 char *buf)
78 {
79 return sprintf(buf, "%d\n", pm_async_enabled);
80 }
81
82 static ssize_t pm_async_store(struct kobject *kobj, struct kobj_attribute *attr,
83 const char *buf, size_t n)
84 {
85 unsigned long val;
86
87 if (kstrtoul(buf, 10, &val))
88 return -EINVAL;
89
90 if (val > 1)
91 return -EINVAL;
92
93 pm_async_enabled = val;
94 return n;
95 }
96
97 power_attr(pm_async);
98
99 #ifdef CONFIG_PM_DEBUG
100 int pm_test_level = TEST_NONE;
101
102 static const char * const pm_tests[__TEST_AFTER_LAST] = {
103 [TEST_NONE] = "none",
104 [TEST_CORE] = "core",
105 [TEST_CPUS] = "processors",
106 [TEST_PLATFORM] = "platform",
107 [TEST_DEVICES] = "devices",
108 [TEST_FREEZER] = "freezer",
109 };
110
111 static ssize_t pm_test_show(struct kobject *kobj, struct kobj_attribute *attr,
112 char *buf)
113 {
114 char *s = buf;
115 int level;
116
117 for (level = TEST_FIRST; level <= TEST_MAX; level++)
118 if (pm_tests[level]) {
119 if (level == pm_test_level)
120 s += sprintf(s, "[%s] ", pm_tests[level]);
121 else
122 s += sprintf(s, "%s ", pm_tests[level]);
123 }
124
125 if (s != buf)
126 /* convert the last space to a newline */
127 *(s-1) = '\n';
128
129 return (s - buf);
130 }
131
132 static ssize_t pm_test_store(struct kobject *kobj, struct kobj_attribute *attr,
133 const char *buf, size_t n)
134 {
135 const char * const *s;
136 int level;
137 char *p;
138 int len;
139 int error = -EINVAL;
140
141 p = memchr(buf, '\n', n);
142 len = p ? p - buf : n;
143
144 lock_system_sleep();
145
146 level = TEST_FIRST;
147 for (s = &pm_tests[level]; level <= TEST_MAX; s++, level++)
148 if (*s && len == strlen(*s) && !strncmp(buf, *s, len)) {
149 pm_test_level = level;
150 error = 0;
151 break;
152 }
153
154 unlock_system_sleep();
155
156 return error ? error : n;
157 }
158
159 power_attr(pm_test);
160 #endif /* CONFIG_PM_DEBUG */
161
162 #ifdef CONFIG_DEBUG_FS
163 static char *suspend_step_name(enum suspend_stat_step step)
164 {
165 switch (step) {
166 case SUSPEND_FREEZE:
167 return "freeze";
168 case SUSPEND_PREPARE:
169 return "prepare";
170 case SUSPEND_SUSPEND:
171 return "suspend";
172 case SUSPEND_SUSPEND_NOIRQ:
173 return "suspend_noirq";
174 case SUSPEND_RESUME_NOIRQ:
175 return "resume_noirq";
176 case SUSPEND_RESUME:
177 return "resume";
178 default:
179 return "";
180 }
181 }
182
183 static int suspend_stats_show(struct seq_file *s, void *unused)
184 {
185 int i, index, last_dev, last_errno, last_step;
186
187 last_dev = suspend_stats.last_failed_dev + REC_FAILED_NUM - 1;
188 last_dev %= REC_FAILED_NUM;
189 last_errno = suspend_stats.last_failed_errno + REC_FAILED_NUM - 1;
190 last_errno %= REC_FAILED_NUM;
191 last_step = suspend_stats.last_failed_step + REC_FAILED_NUM - 1;
192 last_step %= REC_FAILED_NUM;
193 seq_printf(s, "%s: %d\n%s: %d\n%s: %d\n%s: %d\n%s: %d\n"
194 "%s: %d\n%s: %d\n%s: %d\n%s: %d\n%s: %d\n",
195 "success", suspend_stats.success,
196 "fail", suspend_stats.fail,
197 "failed_freeze", suspend_stats.failed_freeze,
198 "failed_prepare", suspend_stats.failed_prepare,
199 "failed_suspend", suspend_stats.failed_suspend,
200 "failed_suspend_late",
201 suspend_stats.failed_suspend_late,
202 "failed_suspend_noirq",
203 suspend_stats.failed_suspend_noirq,
204 "failed_resume", suspend_stats.failed_resume,
205 "failed_resume_early",
206 suspend_stats.failed_resume_early,
207 "failed_resume_noirq",
208 suspend_stats.failed_resume_noirq);
209 seq_printf(s, "failures:\n last_failed_dev:\t%-s\n",
210 suspend_stats.failed_devs[last_dev]);
211 for (i = 1; i < REC_FAILED_NUM; i++) {
212 index = last_dev + REC_FAILED_NUM - i;
213 index %= REC_FAILED_NUM;
214 seq_printf(s, "\t\t\t%-s\n",
215 suspend_stats.failed_devs[index]);
216 }
217 seq_printf(s, " last_failed_errno:\t%-d\n",
218 suspend_stats.errno[last_errno]);
219 for (i = 1; i < REC_FAILED_NUM; i++) {
220 index = last_errno + REC_FAILED_NUM - i;
221 index %= REC_FAILED_NUM;
222 seq_printf(s, "\t\t\t%-d\n",
223 suspend_stats.errno[index]);
224 }
225 seq_printf(s, " last_failed_step:\t%-s\n",
226 suspend_step_name(
227 suspend_stats.failed_steps[last_step]));
228 for (i = 1; i < REC_FAILED_NUM; i++) {
229 index = last_step + REC_FAILED_NUM - i;
230 index %= REC_FAILED_NUM;
231 seq_printf(s, "\t\t\t%-s\n",
232 suspend_step_name(
233 suspend_stats.failed_steps[index]));
234 }
235
236 return 0;
237 }
238
239 static int suspend_stats_open(struct inode *inode, struct file *file)
240 {
241 return single_open(file, suspend_stats_show, NULL);
242 }
243
244 static const struct file_operations suspend_stats_operations = {
245 .open = suspend_stats_open,
246 .read = seq_read,
247 .llseek = seq_lseek,
248 .release = single_release,
249 };
250
251 static int __init pm_debugfs_init(void)
252 {
253 debugfs_create_file("suspend_stats", S_IFREG | S_IRUGO,
254 NULL, NULL, &suspend_stats_operations);
255 return 0;
256 }
257
258 late_initcall(pm_debugfs_init);
259 #endif /* CONFIG_DEBUG_FS */
260
261 #endif /* CONFIG_PM_SLEEP */
262
263 #ifdef CONFIG_PM_SLEEP_DEBUG
264 /*
265 * pm_print_times: print time taken by devices to suspend and resume.
266 *
267 * show() returns whether printing of suspend and resume times is enabled.
268 * store() accepts 0 or 1. 0 disables printing and 1 enables it.
269 */
270 bool pm_print_times_enabled;
271
272 static ssize_t pm_print_times_show(struct kobject *kobj,
273 struct kobj_attribute *attr, char *buf)
274 {
275 return sprintf(buf, "%d\n", pm_print_times_enabled);
276 }
277
278 static ssize_t pm_print_times_store(struct kobject *kobj,
279 struct kobj_attribute *attr,
280 const char *buf, size_t n)
281 {
282 unsigned long val;
283
284 if (kstrtoul(buf, 10, &val))
285 return -EINVAL;
286
287 if (val > 1)
288 return -EINVAL;
289
290 pm_print_times_enabled = !!val;
291 return n;
292 }
293
294 power_attr(pm_print_times);
295
296 static inline void pm_print_times_init(void)
297 {
298 pm_print_times_enabled = !!initcall_debug;
299 }
300 #else /* !CONFIG_PP_SLEEP_DEBUG */
301 static inline void pm_print_times_init(void) {}
302 #endif /* CONFIG_PM_SLEEP_DEBUG */
303
304 struct kobject *power_kobj;
305 EXPORT_SYMBOL_GPL(power_kobj);
306
307 /**
308 * state - control system power state.
309 *
310 * show() returns what states are supported, which is hard-coded to
311 * 'standby' (Power-On Suspend), 'mem' (Suspend-to-RAM), and
312 * 'disk' (Suspend-to-Disk).
313 *
314 * store() accepts one of those strings, translates it into the
315 * proper enumerated value, and initiates a suspend transition.
316 */
317 static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr,
318 char *buf)
319 {
320 char *s = buf;
321 #ifdef CONFIG_SUSPEND
322 suspend_state_t i;
323
324 for (i = PM_SUSPEND_MIN; i < PM_SUSPEND_MAX; i++)
325 if (pm_states[i].state)
326 s += sprintf(s,"%s ", pm_states[i].label);
327
328 #endif
329 #ifdef CONFIG_HIBERNATION
330 s += sprintf(s, "%s\n", "disk");
331 #else
332 if (s != buf)
333 /* convert the last space to a newline */
334 *(s-1) = '\n';
335 #endif
336 return (s - buf);
337 }
338
339 static suspend_state_t decode_state(const char *buf, size_t n)
340 {
341 #ifdef CONFIG_SUSPEND
342 suspend_state_t state = PM_SUSPEND_MIN;
343 struct pm_sleep_state *s;
344 #endif
345 char *p;
346 int len;
347
348 p = memchr(buf, '\n', n);
349 len = p ? p - buf : n;
350
351 /* Check hibernation first. */
352 if (len == 4 && !strncmp(buf, "disk", len))
353 return PM_SUSPEND_MAX;
354
355 #ifdef CONFIG_SUSPEND
356 for (s = &pm_states[state]; state < PM_SUSPEND_MAX; s++, state++)
357 if (s->state && len == strlen(s->label)
358 && !strncmp(buf, s->label, len))
359 return s->state;
360 #endif
361
362 return PM_SUSPEND_ON;
363 }
364
365 //<20130327> <marc.huang> merge android kernel 3.0 state_store function
366 #ifdef CONFIG_MTK_LDVT
367 static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr,
368 const char *buf, size_t n)
369 {
370 suspend_state_t state;
371 int error;
372
373 error = pm_autosleep_lock();
374 if (error)
375 return error;
376
377 if (pm_autosleep_state() > PM_SUSPEND_ON) {
378 error = -EBUSY;
379 goto out;
380 }
381
382 state = decode_state(buf, n);
383 if (state < PM_SUSPEND_MAX)
384 error = pm_suspend(state);
385 else if (state == PM_SUSPEND_MAX)
386 error = hibernate();
387 else
388 error = -EINVAL;
389
390 out:
391 pm_autosleep_unlock();
392 return error ? error : n;
393 }
394 #else //#ifdef CONFIG_MTK_LDVT
395 static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr,
396 const char *buf, size_t n)
397 {
398 #ifdef CONFIG_SUSPEND
399 #ifdef CONFIG_EARLYSUSPEND
400 suspend_state_t state = PM_SUSPEND_ON;
401 #else
402 suspend_state_t state = PM_SUSPEND_STANDBY;
403 #endif
404 const char * const *s;
405 #endif
406 char *p;
407 int len;
408 int error = -EINVAL;
409
410 p = memchr(buf, '\n', n);
411 len = p ? p - buf : n;
412
413 #ifdef CONFIG_MTK_HIBERNATION
414 state = decode_state(buf, n);
415 hib_log("entry (%d)\n", state);
416 #endif
417
418 #ifdef CONFIG_MTK_HIBERNATION
419 if (len == 8 && !strncmp(buf, "hibabort", len)) {
420 hib_log("abort hibernation...\n");
421 error = mtk_hibernate_abort();
422 goto Exit;
423 }
424 #endif
425
426 /* First, check if we are requested to hibernate */
427 if (len == 4 && !strncmp(buf, "disk", len)) {
428 #ifdef CONFIG_MTK_HIBERNATION
429 hib_log("trigger hibernation...\n");
430 #ifdef CONFIG_EARLYSUSPEND
431 if (PM_SUSPEND_ON == get_suspend_state()) {
432 hib_warn("\"on\" to \"disk\" (i.e., 0->4) is not supported !!!\n");
433 error = -EINVAL;
434 goto Exit;
435 }
436 #endif
437 if (!pre_hibernate()) {
438 error = 0;
439 error = mtk_hibernate();
440 }
441 #else // !CONFIG_MTK_HIBERNATION
442 error = hibernate();
443 #endif
444 goto Exit;
445 }
446
447 #ifdef CONFIG_SUSPEND
448 for (s = &pm_states[state]; state < PM_SUSPEND_MAX; s++, state++) {
449 if (*s && len == strlen(*s) && !strncmp(buf, *s, len))
450 break;
451 }
452 if (state < PM_SUSPEND_MAX && *s) {
453 #ifdef CONFIG_EARLYSUSPEND
454 if (state == PM_SUSPEND_ON || valid_state(state)) {
455 error = 0;
456 request_suspend_state(state);
457 } else
458 error = -EINVAL;
459 #else
460 error = enter_state(state);
461 #endif
462 }
463 #endif
464
465 Exit:
466 return error ? error : n;
467 }
468 #endif
469 power_attr(state);
470
471 #ifdef CONFIG_PM_SLEEP
472 /*
473 * The 'wakeup_count' attribute, along with the functions defined in
474 * drivers/base/power/wakeup.c, provides a means by which wakeup events can be
475 * handled in a non-racy way.
476 *
477 * If a wakeup event occurs when the system is in a sleep state, it simply is
478 * woken up. In turn, if an event that would wake the system up from a sleep
479 * state occurs when it is undergoing a transition to that sleep state, the
480 * transition should be aborted. Moreover, if such an event occurs when the
481 * system is in the working state, an attempt to start a transition to the
482 * given sleep state should fail during certain period after the detection of
483 * the event. Using the 'state' attribute alone is not sufficient to satisfy
484 * these requirements, because a wakeup event may occur exactly when 'state'
485 * is being written to and may be delivered to user space right before it is
486 * frozen, so the event will remain only partially processed until the system is
487 * woken up by another event. In particular, it won't cause the transition to
488 * a sleep state to be aborted.
489 *
490 * This difficulty may be overcome if user space uses 'wakeup_count' before
491 * writing to 'state'. It first should read from 'wakeup_count' and store
492 * the read value. Then, after carrying out its own preparations for the system
493 * transition to a sleep state, it should write the stored value to
494 * 'wakeup_count'. If that fails, at least one wakeup event has occurred since
495 * 'wakeup_count' was read and 'state' should not be written to. Otherwise, it
496 * is allowed to write to 'state', but the transition will be aborted if there
497 * are any wakeup events detected after 'wakeup_count' was written to.
498 */
499
500 static ssize_t wakeup_count_show(struct kobject *kobj,
501 struct kobj_attribute *attr,
502 char *buf)
503 {
504 unsigned int val;
505
506 return pm_get_wakeup_count(&val, true) ?
507 sprintf(buf, "%u\n", val) : -EINTR;
508 }
509
510 static ssize_t wakeup_count_store(struct kobject *kobj,
511 struct kobj_attribute *attr,
512 const char *buf, size_t n)
513 {
514 unsigned int val;
515 int error;
516
517 error = pm_autosleep_lock();
518 if (error)
519 return error;
520
521 if (pm_autosleep_state() > PM_SUSPEND_ON) {
522 error = -EBUSY;
523 goto out;
524 }
525
526 error = -EINVAL;
527 if (sscanf(buf, "%u", &val) == 1) {
528 if (pm_save_wakeup_count(val))
529 error = n;
530 }
531
532 out:
533 pm_autosleep_unlock();
534 return error;
535 }
536
537 power_attr(wakeup_count);
538
539 #ifdef CONFIG_PM_AUTOSLEEP
540 static ssize_t autosleep_show(struct kobject *kobj,
541 struct kobj_attribute *attr,
542 char *buf)
543 {
544 suspend_state_t state = pm_autosleep_state();
545
546 if (state == PM_SUSPEND_ON)
547 return sprintf(buf, "off\n");
548
549 #ifdef CONFIG_SUSPEND
550 if (state < PM_SUSPEND_MAX)
551 return sprintf(buf, "%s\n", pm_states[state].state ?
552 pm_states[state].label : "error");
553 #endif
554 #ifdef CONFIG_HIBERNATION
555 return sprintf(buf, "disk\n");
556 #else
557 return sprintf(buf, "error");
558 #endif
559 }
560
561 static ssize_t autosleep_store(struct kobject *kobj,
562 struct kobj_attribute *attr,
563 const char *buf, size_t n)
564 {
565 suspend_state_t state = decode_state(buf, n);
566 int error;
567
568 hib_log("store autosleep_state(%d)\n", state);
569 if (state == PM_SUSPEND_ON
570 && strcmp(buf, "off") && strcmp(buf, "off\n"))
571 return -EINVAL;
572
573 error = pm_autosleep_set_state(state);
574 return error ? error : n;
575 }
576
577 power_attr(autosleep);
578 #endif /* CONFIG_PM_AUTOSLEEP */
579
580 #ifdef CONFIG_PM_WAKELOCKS
581 static ssize_t wake_lock_show(struct kobject *kobj,
582 struct kobj_attribute *attr,
583 char *buf)
584 {
585 return pm_show_wakelocks(buf, true);
586 }
587
588 static ssize_t wake_lock_store(struct kobject *kobj,
589 struct kobj_attribute *attr,
590 const char *buf, size_t n)
591 {
592 int error = pm_wake_lock(buf);
593 return error ? error : n;
594 }
595
596 power_attr(wake_lock);
597
598 static ssize_t wake_unlock_show(struct kobject *kobj,
599 struct kobj_attribute *attr,
600 char *buf)
601 {
602 return pm_show_wakelocks(buf, false);
603 }
604
605 static ssize_t wake_unlock_store(struct kobject *kobj,
606 struct kobj_attribute *attr,
607 const char *buf, size_t n)
608 {
609 int error = pm_wake_unlock(buf);
610 return error ? error : n;
611 }
612
613 power_attr(wake_unlock);
614
615 #endif /* CONFIG_PM_WAKELOCKS */
616 #endif /* CONFIG_PM_SLEEP */
617
618 #ifdef CONFIG_PM_TRACE
619 int pm_trace_enabled;
620
621 static ssize_t pm_trace_show(struct kobject *kobj, struct kobj_attribute *attr,
622 char *buf)
623 {
624 return sprintf(buf, "%d\n", pm_trace_enabled);
625 }
626
627 static ssize_t
628 pm_trace_store(struct kobject *kobj, struct kobj_attribute *attr,
629 const char *buf, size_t n)
630 {
631 int val;
632
633 if (sscanf(buf, "%d", &val) == 1) {
634 pm_trace_enabled = !!val;
635 return n;
636 }
637 return -EINVAL;
638 }
639
640 power_attr(pm_trace);
641
642 static ssize_t pm_trace_dev_match_show(struct kobject *kobj,
643 struct kobj_attribute *attr,
644 char *buf)
645 {
646 return show_trace_dev_match(buf, PAGE_SIZE);
647 }
648
649 static ssize_t
650 pm_trace_dev_match_store(struct kobject *kobj, struct kobj_attribute *attr,
651 const char *buf, size_t n)
652 {
653 return -EINVAL;
654 }
655
656 power_attr(pm_trace_dev_match);
657
658 #endif /* CONFIG_PM_TRACE */
659
660 #ifdef CONFIG_FREEZER
661 static ssize_t pm_freeze_timeout_show(struct kobject *kobj,
662 struct kobj_attribute *attr, char *buf)
663 {
664 return sprintf(buf, "%u\n", freeze_timeout_msecs);
665 }
666
667 static ssize_t pm_freeze_timeout_store(struct kobject *kobj,
668 struct kobj_attribute *attr,
669 const char *buf, size_t n)
670 {
671 unsigned long val;
672
673 if (kstrtoul(buf, 10, &val))
674 return -EINVAL;
675
676 freeze_timeout_msecs = val;
677 return n;
678 }
679
680 power_attr(pm_freeze_timeout);
681
682 #endif /* CONFIG_FREEZER*/
683
684 static struct attribute * g[] = {
685 &state_attr.attr,
686 #ifdef CONFIG_PM_TRACE
687 &pm_trace_attr.attr,
688 &pm_trace_dev_match_attr.attr,
689 #endif
690 #ifdef CONFIG_PM_SLEEP
691 &pm_async_attr.attr,
692 &wakeup_count_attr.attr,
693 #ifdef CONFIG_PM_AUTOSLEEP
694 &autosleep_attr.attr,
695 #endif
696 #ifdef CONFIG_PM_WAKELOCKS
697 &wake_lock_attr.attr,
698 &wake_unlock_attr.attr,
699 #endif
700 #ifdef CONFIG_PM_DEBUG
701 &pm_test_attr.attr,
702 #endif
703 #ifdef CONFIG_PM_SLEEP_DEBUG
704 &pm_print_times_attr.attr,
705 #endif
706 #endif
707 #ifdef CONFIG_FREEZER
708 &pm_freeze_timeout_attr.attr,
709 #endif
710 NULL,
711 };
712
713 static struct attribute_group attr_group = {
714 .attrs = g,
715 };
716
717 #ifdef CONFIG_PM_RUNTIME
718 struct workqueue_struct *pm_wq;
719 EXPORT_SYMBOL_GPL(pm_wq);
720
721 static int __init pm_start_workqueue(void)
722 {
723 pm_wq = alloc_workqueue("pm", WQ_FREEZABLE, 0);
724
725 return pm_wq ? 0 : -ENOMEM;
726 }
727 #else
728 static inline int pm_start_workqueue(void) { return 0; }
729 #endif
730
731 static int __init pm_init(void)
732 {
733 int error = pm_start_workqueue();
734 if (error)
735 return error;
736 hibernate_image_size_init();
737 hibernate_reserved_size_init();
738 power_kobj = kobject_create_and_add("power", NULL);
739 if (!power_kobj)
740 return -ENOMEM;
741 error = sysfs_create_group(power_kobj, &attr_group);
742 if (error)
743 return error;
744 pm_print_times_init();
745 return pm_autosleep_init();
746 }
747
748 core_initcall(pm_init);