drivers: power: report battery voltage in AOSP compatible format
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / base / power / main.c
CommitLineData
1da177e4
LT
1/*
2 * drivers/base/power/main.c - Where the driver meets power management.
3 *
4 * Copyright (c) 2003 Patrick Mochel
5 * Copyright (c) 2003 Open Source Development Lab
6 *
7 * This file is released under the GPLv2
8 *
9 *
10 * The driver model core calls device_pm_add() when a device is registered.
b595076a 11 * This will initialize the embedded device_pm_info object in the device
1da177e4
LT
12 * and add it to the list of power-controlled devices. sysfs entries for
13 * controlling device power management will also be added.
14 *
1eede070
RW
15 * A separate list is used for keeping track of power info, because the power
16 * domain dependencies may differ from the ancestral dependencies that the
17 * subsystem list maintains.
1da177e4
LT
18 */
19
1da177e4 20#include <linux/device.h>
cd59abfc 21#include <linux/kallsyms.h>
1b6bc32f 22#include <linux/export.h>
11048dcf 23#include <linux/mutex.h>
cd59abfc 24#include <linux/pm.h>
5e928f77 25#include <linux/pm_runtime.h>
cd59abfc 26#include <linux/resume-trace.h>
2ed8d2b3 27#include <linux/interrupt.h>
f2511774 28#include <linux/sched.h>
5af84b82 29#include <linux/async.h>
1e75227e 30#include <linux/suspend.h>
8651f97b 31#include <linux/cpuidle.h>
6fa3eb70
S
32#include <linux/timer.h>
33#include <linux/aee.h>
34
cd59abfc 35#include "../base.h"
1da177e4
LT
36#include "power.h"
37
6fa3eb70
S
38#define LOG
39
40#define HIB_DPM_DEBUG 0
41#define _TAG_HIB_M "HIB/DPM"
42#if (HIB_DPM_DEBUG)
43#undef hib_log
44#define hib_log(fmt, ...) pr_warn("[%s][%s]" fmt, _TAG_HIB_M, __func__, ##__VA_ARGS__);
45#else
46#define hib_log(fmt, ...)
47#endif
48#undef hib_warn
49#define hib_warn(fmt, ...) pr_warn("[%s][%s]" fmt, _TAG_HIB_M, __func__, ##__VA_ARGS__);
50
9cf519d1
RW
51typedef int (*pm_callback_t)(struct device *);
52
775b64d2 53/*
1eede070 54 * The entries in the dpm_list list are in a depth first order, simply
775b64d2
RW
55 * because children are guaranteed to be discovered after parents, and
56 * are inserted at the back of the list on discovery.
57 *
8e9394ce
GKH
58 * Since device_pm_add() may be called with a device lock held,
59 * we must never try to acquire a device lock while holding
775b64d2
RW
60 * dpm_list_mutex.
61 */
62
1eede070 63LIST_HEAD(dpm_list);
7664e969
SK
64static LIST_HEAD(dpm_prepared_list);
65static LIST_HEAD(dpm_suspended_list);
66static LIST_HEAD(dpm_late_early_list);
67static LIST_HEAD(dpm_noirq_list);
1da177e4 68
2a77c46d 69struct suspend_stats suspend_stats;
cd59abfc 70static DEFINE_MUTEX(dpm_list_mtx);
5af84b82 71static pm_message_t pm_transition;
1da177e4 72
6fa3eb70
S
73struct dpm_watchdog {
74 struct device *dev;
75 struct task_struct *tsk;
76 struct timer_list timer;
77};
78
098dff73
RW
79static int async_error;
80
5e928f77 81/**
e91c11b1 82 * device_pm_sleep_init - Initialize system suspend-related device fields.
5e928f77
RW
83 * @dev: Device object being initialized.
84 */
e91c11b1 85void device_pm_sleep_init(struct device *dev)
5e928f77 86{
f76b168b 87 dev->power.is_prepared = false;
6d0e0e84 88 dev->power.is_suspended = false;
5af84b82 89 init_completion(&dev->power.completion);
152e1d59 90 complete_all(&dev->power.completion);
074037ec 91 dev->power.wakeup = NULL;
22110faf 92 INIT_LIST_HEAD(&dev->power.entry);
5e928f77
RW
93}
94
1eede070 95/**
20d652d7 96 * device_pm_lock - Lock the list of active devices used by the PM core.
1eede070
RW
97 */
98void device_pm_lock(void)
99{
100 mutex_lock(&dpm_list_mtx);
101}
102
103/**
20d652d7 104 * device_pm_unlock - Unlock the list of active devices used by the PM core.
1eede070
RW
105 */
106void device_pm_unlock(void)
107{
108 mutex_unlock(&dpm_list_mtx);
109}
075c1771 110
775b64d2 111/**
20d652d7
RW
112 * device_pm_add - Add a device to the PM core's list of active devices.
113 * @dev: Device to add to the list.
775b64d2 114 */
3b98aeaf 115void device_pm_add(struct device *dev)
1da177e4 116{
1da177e4 117 pr_debug("PM: Adding info for %s:%s\n",
5c1a07ab 118 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
11048dcf 119 mutex_lock(&dpm_list_mtx);
f76b168b 120 if (dev->parent && dev->parent->power.is_prepared)
b64959e6
RW
121 dev_warn(dev, "parent %s should not be sleeping\n",
122 dev_name(dev->parent));
3b98aeaf 123 list_add_tail(&dev->power.entry, &dpm_list);
1a9a9152 124 mutex_unlock(&dpm_list_mtx);
1da177e4
LT
125}
126
775b64d2 127/**
20d652d7
RW
128 * device_pm_remove - Remove a device from the PM core's list of active devices.
129 * @dev: Device to be removed from the list.
775b64d2 130 */
9cddad77 131void device_pm_remove(struct device *dev)
1da177e4
LT
132{
133 pr_debug("PM: Removing info for %s:%s\n",
5c1a07ab 134 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
5af84b82 135 complete_all(&dev->power.completion);
11048dcf 136 mutex_lock(&dpm_list_mtx);
1da177e4 137 list_del_init(&dev->power.entry);
11048dcf 138 mutex_unlock(&dpm_list_mtx);
074037ec 139 device_wakeup_disable(dev);
5e928f77 140 pm_runtime_remove(dev);
775b64d2
RW
141}
142
ffa6a705 143/**
20d652d7
RW
144 * device_pm_move_before - Move device in the PM core's list of active devices.
145 * @deva: Device to move in dpm_list.
146 * @devb: Device @deva should come before.
ffa6a705
CH
147 */
148void device_pm_move_before(struct device *deva, struct device *devb)
149{
150 pr_debug("PM: Moving %s:%s before %s:%s\n",
5c1a07ab
RW
151 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
152 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
ffa6a705
CH
153 /* Delete deva from dpm_list and reinsert before devb. */
154 list_move_tail(&deva->power.entry, &devb->power.entry);
155}
156
157/**
20d652d7
RW
158 * device_pm_move_after - Move device in the PM core's list of active devices.
159 * @deva: Device to move in dpm_list.
160 * @devb: Device @deva should come after.
ffa6a705
CH
161 */
162void device_pm_move_after(struct device *deva, struct device *devb)
163{
164 pr_debug("PM: Moving %s:%s after %s:%s\n",
5c1a07ab
RW
165 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
166 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
ffa6a705
CH
167 /* Delete deva from dpm_list and reinsert after devb. */
168 list_move(&deva->power.entry, &devb->power.entry);
169}
170
171/**
20d652d7
RW
172 * device_pm_move_last - Move device to end of the PM core's list of devices.
173 * @dev: Device to move in dpm_list.
ffa6a705
CH
174 */
175void device_pm_move_last(struct device *dev)
176{
177 pr_debug("PM: Moving %s:%s to end of list\n",
5c1a07ab 178 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
ffa6a705
CH
179 list_move_tail(&dev->power.entry, &dpm_list);
180}
181
875ab0b7
RW
182static ktime_t initcall_debug_start(struct device *dev)
183{
184 ktime_t calltime = ktime_set(0, 0);
185
b2df1d4f 186 if (pm_print_times_enabled) {
0c6aebe3
RW
187 pr_info("calling %s+ @ %i, parent: %s\n",
188 dev_name(dev), task_pid_nr(current),
189 dev->parent ? dev_name(dev->parent) : "none");
875ab0b7
RW
190 calltime = ktime_get();
191 }
192
193 return calltime;
194}
195
196static void initcall_debug_report(struct device *dev, ktime_t calltime,
197 int error)
198{
199 ktime_t delta, rettime;
200
b2df1d4f 201 if (pm_print_times_enabled) {
875ab0b7
RW
202 rettime = ktime_get();
203 delta = ktime_sub(rettime, calltime);
204 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
205 error, (unsigned long long)ktime_to_ns(delta) >> 10);
206 }
207}
208
5af84b82
RW
209/**
210 * dpm_wait - Wait for a PM operation to complete.
211 * @dev: Device to wait for.
212 * @async: If unset, wait only if the device's power.async_suspend flag is set.
213 */
214static void dpm_wait(struct device *dev, bool async)
215{
216 if (!dev)
217 return;
218
0e06b4a8 219 if (async || (pm_async_enabled && dev->power.async_suspend))
5af84b82
RW
220 wait_for_completion(&dev->power.completion);
221}
222
223static int dpm_wait_fn(struct device *dev, void *async_ptr)
224{
225 dpm_wait(dev, *((bool *)async_ptr));
226 return 0;
227}
228
229static void dpm_wait_for_children(struct device *dev, bool async)
230{
231 device_for_each_child(dev, &async, dpm_wait_fn);
232}
233
1eede070 234/**
9cf519d1 235 * pm_op - Return the PM operation appropriate for given PM event.
20d652d7
RW
236 * @ops: PM operations to choose from.
237 * @state: PM transition of the system being carried out.
1eede070 238 */
9cf519d1 239static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
1eede070 240{
1eede070
RW
241 switch (state.event) {
242#ifdef CONFIG_SUSPEND
243 case PM_EVENT_SUSPEND:
9cf519d1 244 return ops->suspend;
1eede070 245 case PM_EVENT_RESUME:
9cf519d1 246 return ops->resume;
1eede070 247#endif /* CONFIG_SUSPEND */
1f112cee 248#ifdef CONFIG_HIBERNATE_CALLBACKS
1eede070
RW
249 case PM_EVENT_FREEZE:
250 case PM_EVENT_QUIESCE:
9cf519d1 251 return ops->freeze;
1eede070 252 case PM_EVENT_HIBERNATE:
9cf519d1 253 return ops->poweroff;
1eede070
RW
254 case PM_EVENT_THAW:
255 case PM_EVENT_RECOVER:
9cf519d1 256 return ops->thaw;
1eede070
RW
257 break;
258 case PM_EVENT_RESTORE:
9cf519d1 259 return ops->restore;
1f112cee 260#endif /* CONFIG_HIBERNATE_CALLBACKS */
1eede070 261 }
f2511774 262
9cf519d1 263 return NULL;
1eede070
RW
264}
265
cf579dfb
RW
266/**
267 * pm_late_early_op - Return the PM operation appropriate for given PM event.
268 * @ops: PM operations to choose from.
269 * @state: PM transition of the system being carried out.
270 *
271 * Runtime PM is disabled for @dev while this function is being executed.
272 */
273static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
274 pm_message_t state)
275{
276 switch (state.event) {
277#ifdef CONFIG_SUSPEND
278 case PM_EVENT_SUSPEND:
279 return ops->suspend_late;
280 case PM_EVENT_RESUME:
281 return ops->resume_early;
282#endif /* CONFIG_SUSPEND */
283#ifdef CONFIG_HIBERNATE_CALLBACKS
284 case PM_EVENT_FREEZE:
285 case PM_EVENT_QUIESCE:
286 return ops->freeze_late;
287 case PM_EVENT_HIBERNATE:
288 return ops->poweroff_late;
289 case PM_EVENT_THAW:
290 case PM_EVENT_RECOVER:
291 return ops->thaw_early;
292 case PM_EVENT_RESTORE:
293 return ops->restore_early;
294#endif /* CONFIG_HIBERNATE_CALLBACKS */
295 }
296
297 return NULL;
298}
299
1eede070 300/**
9cf519d1 301 * pm_noirq_op - Return the PM operation appropriate for given PM event.
20d652d7
RW
302 * @ops: PM operations to choose from.
303 * @state: PM transition of the system being carried out.
1eede070 304 *
20d652d7
RW
305 * The driver of @dev will not receive interrupts while this function is being
306 * executed.
1eede070 307 */
9cf519d1 308static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
1eede070 309{
1eede070
RW
310 switch (state.event) {
311#ifdef CONFIG_SUSPEND
312 case PM_EVENT_SUSPEND:
9cf519d1 313 return ops->suspend_noirq;
1eede070 314 case PM_EVENT_RESUME:
9cf519d1 315 return ops->resume_noirq;
1eede070 316#endif /* CONFIG_SUSPEND */
1f112cee 317#ifdef CONFIG_HIBERNATE_CALLBACKS
1eede070
RW
318 case PM_EVENT_FREEZE:
319 case PM_EVENT_QUIESCE:
9cf519d1 320 return ops->freeze_noirq;
1eede070 321 case PM_EVENT_HIBERNATE:
9cf519d1 322 return ops->poweroff_noirq;
1eede070
RW
323 case PM_EVENT_THAW:
324 case PM_EVENT_RECOVER:
9cf519d1 325 return ops->thaw_noirq;
1eede070 326 case PM_EVENT_RESTORE:
9cf519d1 327 return ops->restore_noirq;
1f112cee 328#endif /* CONFIG_HIBERNATE_CALLBACKS */
1eede070 329 }
f2511774 330
9cf519d1 331 return NULL;
1eede070
RW
332}
333
334static char *pm_verb(int event)
335{
336 switch (event) {
337 case PM_EVENT_SUSPEND:
338 return "suspend";
339 case PM_EVENT_RESUME:
340 return "resume";
341 case PM_EVENT_FREEZE:
342 return "freeze";
343 case PM_EVENT_QUIESCE:
344 return "quiesce";
345 case PM_EVENT_HIBERNATE:
346 return "hibernate";
347 case PM_EVENT_THAW:
348 return "thaw";
349 case PM_EVENT_RESTORE:
350 return "restore";
351 case PM_EVENT_RECOVER:
352 return "recover";
353 default:
354 return "(unknown PM event)";
355 }
356}
357
358static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
359{
360 dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
361 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
362 ", may wakeup" : "");
363}
364
365static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
366 int error)
367{
368 printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
5c1a07ab 369 dev_name(dev), pm_verb(state.event), info, error);
1eede070
RW
370}
371
ecf762b2
RW
372static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
373{
374 ktime_t calltime;
0702d9ee 375 u64 usecs64;
ecf762b2
RW
376 int usecs;
377
378 calltime = ktime_get();
379 usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
380 do_div(usecs64, NSEC_PER_USEC);
381 usecs = usecs64;
382 if (usecs == 0)
383 usecs = 1;
6fa3eb70 384 hib_log("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
ecf762b2
RW
385 info ?: "", info ? " " : "", pm_verb(state.event),
386 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
387}
388
9cf519d1
RW
389static int dpm_run_callback(pm_callback_t cb, struct device *dev,
390 pm_message_t state, char *info)
391{
392 ktime_t calltime;
393 int error;
394
395 if (!cb)
396 return 0;
397
398 calltime = initcall_debug_start(dev);
399
400 pm_dev_dbg(dev, state, info);
401 error = cb(dev);
402 suspend_report_result(cb, error);
403
404 initcall_debug_report(dev, calltime, error);
405
406 return error;
407}
408
6fa3eb70
S
409/**
410 * dpm_wd_handler - Driver suspend / resume watchdog handler.
411 *
412 * Called when a driver has timed out suspending or resuming.
413 * There's not much we can do here to recover so BUG() out for
414 * a crash-dump
415 */
416static void dpm_wd_handler(unsigned long data)
417{
418 struct dpm_watchdog *wd = (void *)data;
419 struct device *dev = wd->dev;
420 struct task_struct *tsk = wd->tsk;
421
422 dev_emerg(dev, "**** DPM device timeout ****\n");
423 show_stack(tsk, NULL);
424
425 BUG();
426}
427
428/**
429 * dpm_wd_set - Enable pm watchdog for given device.
430 * @wd: Watchdog. Must be allocated on the stack.
431 * @dev: Device to handle.
432 */
433static void dpm_wd_set(struct dpm_watchdog *wd, struct device *dev)
434{
435 struct timer_list *timer = &wd->timer;
436
437 wd->dev = dev;
438 wd->tsk = get_current();
439
440 init_timer_on_stack(timer);
441 timer->expires = jiffies + HZ * 12;
442 timer->function = dpm_wd_handler;
443 timer->data = (unsigned long)wd;
444 add_timer(timer);
445}
446
447/**
448 * dpm_wd_clear - Disable pm watchdog.
449 * @wd: Watchdog to disable.
450 */
451static void dpm_wd_clear(struct dpm_watchdog *wd)
452{
453 struct timer_list *timer = &wd->timer;
454
455 del_timer_sync(timer);
456 destroy_timer_on_stack(timer);
457}
458
cd59abfc
AS
459/*------------------------- Resume routines -------------------------*/
460
461/**
20d652d7
RW
462 * device_resume_noirq - Execute an "early resume" callback for given device.
463 * @dev: Device to handle.
464 * @state: PM transition of the system being carried out.
cd59abfc 465 *
20d652d7
RW
466 * The driver of @dev will not receive interrupts while this function is being
467 * executed.
cd59abfc 468 */
d1616302 469static int device_resume_noirq(struct device *dev, pm_message_t state)
cd59abfc 470{
9cf519d1
RW
471 pm_callback_t callback = NULL;
472 char *info = NULL;
cd59abfc
AS
473 int error = 0;
474
475 TRACE_DEVICE(dev);
476 TRACE_RESUME(0);
477
dbf37414
RW
478 if (dev->power.syscore)
479 goto Out;
480
564b905a 481 if (dev->pm_domain) {
cf579dfb 482 info = "noirq power domain ";
9cf519d1 483 callback = pm_noirq_op(&dev->pm_domain->ops, state);
4d27e9dc 484 } else if (dev->type && dev->type->pm) {
cf579dfb 485 info = "noirq type ";
9cf519d1 486 callback = pm_noirq_op(dev->type->pm, state);
9659cc06 487 } else if (dev->class && dev->class->pm) {
cf579dfb 488 info = "noirq class ";
9cf519d1 489 callback = pm_noirq_op(dev->class->pm, state);
9659cc06 490 } else if (dev->bus && dev->bus->pm) {
cf579dfb 491 info = "noirq bus ";
9cf519d1 492 callback = pm_noirq_op(dev->bus->pm, state);
e7176a37
DB
493 }
494
35cd133c 495 if (!callback && dev->driver && dev->driver->pm) {
cf579dfb 496 info = "noirq driver ";
35cd133c
RW
497 callback = pm_noirq_op(dev->driver->pm, state);
498 }
499
9cf519d1
RW
500 error = dpm_run_callback(callback, dev, state, info);
501
dbf37414 502 Out:
775b64d2
RW
503 TRACE_RESUME(error);
504 return error;
505}
506
507/**
cf579dfb 508 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
20d652d7 509 * @state: PM transition of the system being carried out.
775b64d2 510 *
cf579dfb 511 * Call the "noirq" resume handlers for all devices in dpm_noirq_list and
20d652d7 512 * enable device drivers to receive interrupts.
775b64d2 513 */
cf579dfb 514static void dpm_resume_noirq(pm_message_t state)
775b64d2 515{
ecf762b2 516 ktime_t starttime = ktime_get();
775b64d2 517
32bdfac5 518 mutex_lock(&dpm_list_mtx);
8a43a9ab
RW
519 while (!list_empty(&dpm_noirq_list)) {
520 struct device *dev = to_device(dpm_noirq_list.next);
5b219a51 521 int error;
d08a5ace
RW
522
523 get_device(dev);
cf579dfb 524 list_move_tail(&dev->power.entry, &dpm_late_early_list);
5b219a51 525 mutex_unlock(&dpm_list_mtx);
d08a5ace 526
5b219a51 527 error = device_resume_noirq(dev, state);
2a77c46d
SL
528 if (error) {
529 suspend_stats.failed_resume_noirq++;
530 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
531 dpm_save_failed_dev(dev_name(dev));
cf579dfb
RW
532 pm_dev_err(dev, state, " noirq", error);
533 }
534
535 mutex_lock(&dpm_list_mtx);
536 put_device(dev);
537 }
538 mutex_unlock(&dpm_list_mtx);
539 dpm_show_time(starttime, state, "noirq");
540 resume_device_irqs();
8651f97b 541 cpuidle_resume();
cf579dfb
RW
542}
543
544/**
545 * device_resume_early - Execute an "early resume" callback for given device.
546 * @dev: Device to handle.
547 * @state: PM transition of the system being carried out.
548 *
549 * Runtime PM is disabled for @dev while this function is being executed.
550 */
551static int device_resume_early(struct device *dev, pm_message_t state)
552{
553 pm_callback_t callback = NULL;
554 char *info = NULL;
555 int error = 0;
556
557 TRACE_DEVICE(dev);
558 TRACE_RESUME(0);
559
dbf37414
RW
560 if (dev->power.syscore)
561 goto Out;
562
cf579dfb
RW
563 if (dev->pm_domain) {
564 info = "early power domain ";
565 callback = pm_late_early_op(&dev->pm_domain->ops, state);
566 } else if (dev->type && dev->type->pm) {
567 info = "early type ";
568 callback = pm_late_early_op(dev->type->pm, state);
569 } else if (dev->class && dev->class->pm) {
570 info = "early class ";
571 callback = pm_late_early_op(dev->class->pm, state);
572 } else if (dev->bus && dev->bus->pm) {
573 info = "early bus ";
574 callback = pm_late_early_op(dev->bus->pm, state);
575 }
576
577 if (!callback && dev->driver && dev->driver->pm) {
578 info = "early driver ";
579 callback = pm_late_early_op(dev->driver->pm, state);
580 }
581
582 error = dpm_run_callback(callback, dev, state, info);
583
dbf37414 584 Out:
cf579dfb 585 TRACE_RESUME(error);
9f6d8f6a
RW
586
587 pm_runtime_enable(dev);
cf579dfb
RW
588 return error;
589}
590
591/**
592 * dpm_resume_early - Execute "early resume" callbacks for all devices.
593 * @state: PM transition of the system being carried out.
594 */
595static void dpm_resume_early(pm_message_t state)
596{
597 ktime_t starttime = ktime_get();
598
599 mutex_lock(&dpm_list_mtx);
600 while (!list_empty(&dpm_late_early_list)) {
601 struct device *dev = to_device(dpm_late_early_list.next);
602 int error;
603
604 get_device(dev);
605 list_move_tail(&dev->power.entry, &dpm_suspended_list);
606 mutex_unlock(&dpm_list_mtx);
607
608 error = device_resume_early(dev, state);
609 if (error) {
610 suspend_stats.failed_resume_early++;
611 dpm_save_failed_step(SUSPEND_RESUME_EARLY);
612 dpm_save_failed_dev(dev_name(dev));
5b219a51 613 pm_dev_err(dev, state, " early", error);
2a77c46d 614 }
d08a5ace 615
5b219a51 616 mutex_lock(&dpm_list_mtx);
d08a5ace
RW
617 put_device(dev);
618 }
32bdfac5 619 mutex_unlock(&dpm_list_mtx);
ecf762b2 620 dpm_show_time(starttime, state, "early");
775b64d2 621}
cf579dfb
RW
622
623/**
624 * dpm_resume_start - Execute "noirq" and "early" device callbacks.
625 * @state: PM transition of the system being carried out.
626 */
627void dpm_resume_start(pm_message_t state)
628{
629 dpm_resume_noirq(state);
630 dpm_resume_early(state);
631}
632EXPORT_SYMBOL_GPL(dpm_resume_start);
775b64d2 633
6fa3eb70
S
634
635static int device_suspend_index = 0;
636static int device_resume_index = 0;
775b64d2 637/**
97df8c12 638 * device_resume - Execute "resume" callbacks for given device.
20d652d7
RW
639 * @dev: Device to handle.
640 * @state: PM transition of the system being carried out.
5af84b82 641 * @async: If true, the device is being resumed asynchronously.
775b64d2 642 */
97df8c12 643static int device_resume(struct device *dev, pm_message_t state, bool async)
775b64d2 644{
9cf519d1
RW
645 pm_callback_t callback = NULL;
646 char *info = NULL;
775b64d2 647 int error = 0;
6fa3eb70 648 struct dpm_watchdog wd;
775b64d2
RW
649
650 TRACE_DEVICE(dev);
651 TRACE_RESUME(0);
cd59abfc 652
dbf37414
RW
653 if (dev->power.syscore)
654 goto Complete;
655
5af84b82 656 dpm_wait(dev->parent, async);
8e9394ce 657 device_lock(dev);
7a8d37a3 658
f76b168b
AS
659 /*
660 * This is a fib. But we'll allow new children to be added below
661 * a resumed device, even if the device hasn't been completed yet.
662 */
663 dev->power.is_prepared = false;
6fa3eb70 664 dpm_wd_set(&wd, dev);
97df8c12 665
6d0e0e84
AS
666 if (!dev->power.is_suspended)
667 goto Unlock;
668
564b905a 669 if (dev->pm_domain) {
6fa3eb70
S
670#ifdef LOG
671 printk(KERN_DEBUG "[%d] power domain device_resume\n",device_resume_index);
672 if (dev->driver)
673 if(dev->driver->name)
674 printk(KERN_DEBUG "dev->driver->name=%s\n", dev->driver->name);
675#endif
676 aee_sram_printk("%d\n", device_resume_index++);
9cf519d1
RW
677 info = "power domain ";
678 callback = pm_op(&dev->pm_domain->ops, state);
35cd133c 679 goto Driver;
7538e3db
RW
680 }
681
9659cc06 682 if (dev->type && dev->type->pm) {
6fa3eb70
S
683#ifdef LOG
684 printk(KERN_DEBUG "[%d] type device_resume\n",device_resume_index);
685 if (dev->driver)
686 if(dev->driver->name)
687 printk(KERN_DEBUG "dev->driver->name=%s\n", dev->driver->name);
688#endif
689 aee_sram_printk("%d\n", device_resume_index++);
9cf519d1
RW
690 info = "type ";
691 callback = pm_op(dev->type->pm, state);
35cd133c 692 goto Driver;
cd59abfc
AS
693 }
694
1eede070
RW
695 if (dev->class) {
696 if (dev->class->pm) {
6fa3eb70
S
697#ifdef LOG
698 printk(KERN_DEBUG "[%d] class device_resume\n",device_resume_index);
699 if (dev->driver)
700 if(dev->driver->name)
701 printk(KERN_DEBUG "dev->driver->name=%s\n", dev->driver->name);
702#endif
703 aee_sram_printk("%d\n", device_resume_index++);
9cf519d1
RW
704 info = "class ";
705 callback = pm_op(dev->class->pm, state);
35cd133c 706 goto Driver;
1eede070 707 } else if (dev->class->resume) {
6fa3eb70
S
708#ifdef LOG
709 printk(KERN_DEBUG "[%d] legacy class device_resume\n",device_resume_index);
710 if (dev->driver)
711 if(dev->driver->name)
712 printk(KERN_DEBUG "dev->driver->name=%s\n", dev->driver->name);
713#endif
714 aee_sram_printk("%d\n", device_resume_index++);
9cf519d1
RW
715 info = "legacy class ";
716 callback = dev->class->resume;
9659cc06 717 goto End;
1eede070 718 }
cd59abfc 719 }
9659cc06
RW
720
721 if (dev->bus) {
722 if (dev->bus->pm) {
6fa3eb70
S
723#ifdef LOG
724 printk(KERN_DEBUG "[%d] bus device_resume\n",device_resume_index);
725 if (dev->driver)
726 if(dev->driver->name)
727 printk(KERN_DEBUG "dev->driver->name=%s\n", dev->driver->name);
728#endif
729 aee_sram_printk("%d\n", device_resume_index++);
35cd133c 730 info = "bus ";
9cf519d1 731 callback = pm_op(dev->bus->pm, state);
9659cc06 732 } else if (dev->bus->resume) {
6fa3eb70
S
733#ifdef LOG
734 printk(KERN_DEBUG "[%d] legacy bus device_resume\n", device_resume_index);
735 if (dev->driver)
736 if(dev->driver->name)
737 printk(KERN_DEBUG "dev->driver->name=%s\n", dev->driver->name);
738#endif
739 aee_sram_printk("%d\n", device_resume_index++);
35cd133c 740 info = "legacy bus ";
9cf519d1 741 callback = dev->bus->resume;
35cd133c 742 goto End;
9659cc06
RW
743 }
744 }
745
35cd133c
RW
746 Driver:
747 if (!callback && dev->driver && dev->driver->pm) {
6fa3eb70
S
748#ifdef LOG
749 printk(KERN_DEBUG "[%d] driver device_resume\n", device_resume_index);
750 if (dev->driver)
751 if(dev->driver->name)
752 printk(KERN_DEBUG "dev->driver->name=%s\n", dev->driver->name);
753#endif
754 aee_sram_printk("%d\n", device_resume_index++);
35cd133c
RW
755 info = "driver ";
756 callback = pm_op(dev->driver->pm, state);
757 }
758
1eede070 759 End:
9cf519d1 760 error = dpm_run_callback(callback, dev, state, info);
6d0e0e84
AS
761 dev->power.is_suspended = false;
762
763 Unlock:
8e9394ce 764 device_unlock(dev);
6fa3eb70 765 dpm_wd_clear(&wd);
dbf37414
RW
766
767 Complete:
5af84b82 768 complete_all(&dev->power.completion);
7a8d37a3 769
cd59abfc 770 TRACE_RESUME(error);
1e2ef05b 771
cd59abfc
AS
772 return error;
773}
774
5af84b82
RW
775static void async_resume(void *data, async_cookie_t cookie)
776{
777 struct device *dev = (struct device *)data;
778 int error;
779
97df8c12 780 error = device_resume(dev, pm_transition, true);
5af84b82
RW
781 if (error)
782 pm_dev_err(dev, pm_transition, " async", error);
783 put_device(dev);
784}
785
97df8c12 786static bool is_async(struct device *dev)
5af84b82 787{
97df8c12
RW
788 return dev->power.async_suspend && pm_async_enabled
789 && !pm_trace_is_enabled();
5af84b82
RW
790}
791
775b64d2 792/**
20d652d7
RW
793 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
794 * @state: PM transition of the system being carried out.
775b64d2 795 *
20d652d7
RW
796 * Execute the appropriate "resume" callback for all devices whose status
797 * indicates that they are suspended.
1eede070 798 */
91e7c75b 799void dpm_resume(pm_message_t state)
1eede070 800{
97df8c12 801 struct device *dev;
ecf762b2 802 ktime_t starttime = ktime_get();
1eede070 803
91e7c75b
RW
804 might_sleep();
805
1eede070 806 mutex_lock(&dpm_list_mtx);
5af84b82 807 pm_transition = state;
098dff73 808 async_error = 0;
1eede070 809
8a43a9ab 810 list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
97df8c12
RW
811 INIT_COMPLETION(dev->power.completion);
812 if (is_async(dev)) {
813 get_device(dev);
814 async_schedule(async_resume, dev);
815 }
816 }
817
8a43a9ab
RW
818 while (!list_empty(&dpm_suspended_list)) {
819 dev = to_device(dpm_suspended_list.next);
1eede070 820 get_device(dev);
5b219a51 821 if (!is_async(dev)) {
1eede070
RW
822 int error;
823
1eede070
RW
824 mutex_unlock(&dpm_list_mtx);
825
97df8c12 826 error = device_resume(dev, state, false);
2a77c46d
SL
827 if (error) {
828 suspend_stats.failed_resume++;
829 dpm_save_failed_step(SUSPEND_RESUME);
830 dpm_save_failed_dev(dev_name(dev));
1eede070 831 pm_dev_err(dev, state, "", error);
2a77c46d 832 }
5b219a51
RW
833
834 mutex_lock(&dpm_list_mtx);
1eede070
RW
835 }
836 if (!list_empty(&dev->power.entry))
8a43a9ab 837 list_move_tail(&dev->power.entry, &dpm_prepared_list);
1eede070
RW
838 put_device(dev);
839 }
6fa3eb70
S
840 device_resume_index = 0;
841
1eede070 842 mutex_unlock(&dpm_list_mtx);
5af84b82 843 async_synchronize_full();
ecf762b2 844 dpm_show_time(starttime, state, NULL);
1eede070 845}
6fa3eb70 846EXPORT_SYMBOL_GPL(dpm_resume);
1eede070
RW
847
848/**
20d652d7
RW
849 * device_complete - Complete a PM transition for given device.
850 * @dev: Device to handle.
851 * @state: PM transition of the system being carried out.
1eede070 852 */
d1616302 853static void device_complete(struct device *dev, pm_message_t state)
1eede070 854{
35cd133c
RW
855 void (*callback)(struct device *) = NULL;
856 char *info = NULL;
857
dbf37414
RW
858 if (dev->power.syscore)
859 return;
860
8e9394ce 861 device_lock(dev);
1eede070 862
564b905a 863 if (dev->pm_domain) {
35cd133c
RW
864 info = "completing power domain ";
865 callback = dev->pm_domain->ops.complete;
4d27e9dc 866 } else if (dev->type && dev->type->pm) {
35cd133c
RW
867 info = "completing type ";
868 callback = dev->type->pm->complete;
9659cc06 869 } else if (dev->class && dev->class->pm) {
35cd133c
RW
870 info = "completing class ";
871 callback = dev->class->pm->complete;
9659cc06 872 } else if (dev->bus && dev->bus->pm) {
35cd133c
RW
873 info = "completing bus ";
874 callback = dev->bus->pm->complete;
875 }
876
877 if (!callback && dev->driver && dev->driver->pm) {
878 info = "completing driver ";
879 callback = dev->driver->pm->complete;
880 }
881
882 if (callback) {
883 pm_dev_dbg(dev, state, info);
884 callback(dev);
1eede070
RW
885 }
886
8e9394ce 887 device_unlock(dev);
88d26136 888
af939339 889 pm_runtime_put(dev);
1eede070
RW
890}
891
892/**
20d652d7
RW
893 * dpm_complete - Complete a PM transition for all non-sysdev devices.
894 * @state: PM transition of the system being carried out.
775b64d2 895 *
20d652d7
RW
896 * Execute the ->complete() callbacks for all devices whose PM status is not
897 * DPM_ON (this allows new devices to be registered).
cd59abfc 898 */
91e7c75b 899void dpm_complete(pm_message_t state)
cd59abfc 900{
1eede070
RW
901 struct list_head list;
902
91e7c75b
RW
903 might_sleep();
904
1eede070 905 INIT_LIST_HEAD(&list);
cd59abfc 906 mutex_lock(&dpm_list_mtx);
8a43a9ab
RW
907 while (!list_empty(&dpm_prepared_list)) {
908 struct device *dev = to_device(dpm_prepared_list.prev);
cd59abfc 909
1eede070 910 get_device(dev);
f76b168b 911 dev->power.is_prepared = false;
5b219a51
RW
912 list_move(&dev->power.entry, &list);
913 mutex_unlock(&dpm_list_mtx);
1eede070 914
5b219a51 915 device_complete(dev, state);
1eede070 916
5b219a51 917 mutex_lock(&dpm_list_mtx);
1eede070 918 put_device(dev);
cd59abfc 919 }
1eede070 920 list_splice(&list, &dpm_list);
cd59abfc
AS
921 mutex_unlock(&dpm_list_mtx);
922}
6fa3eb70 923EXPORT_SYMBOL_GPL(dpm_complete);
cd59abfc 924
cd59abfc 925/**
20d652d7
RW
926 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
927 * @state: PM transition of the system being carried out.
cd59abfc 928 *
20d652d7
RW
929 * Execute "resume" callbacks for all devices and complete the PM transition of
930 * the system.
cd59abfc 931 */
d1616302 932void dpm_resume_end(pm_message_t state)
cd59abfc 933{
1eede070
RW
934 dpm_resume(state);
935 dpm_complete(state);
cd59abfc 936}
d1616302 937EXPORT_SYMBOL_GPL(dpm_resume_end);
cd59abfc
AS
938
939
940/*------------------------- Suspend routines -------------------------*/
941
1eede070 942/**
20d652d7
RW
943 * resume_event - Return a "resume" message for given "suspend" sleep state.
944 * @sleep_state: PM message representing a sleep state.
945 *
946 * Return a PM message representing the resume event corresponding to given
947 * sleep state.
1eede070
RW
948 */
949static pm_message_t resume_event(pm_message_t sleep_state)
cd59abfc 950{
1eede070
RW
951 switch (sleep_state.event) {
952 case PM_EVENT_SUSPEND:
953 return PMSG_RESUME;
954 case PM_EVENT_FREEZE:
955 case PM_EVENT_QUIESCE:
956 return PMSG_RECOVER;
957 case PM_EVENT_HIBERNATE:
958 return PMSG_RESTORE;
cd59abfc 959 }
1eede070 960 return PMSG_ON;
cd59abfc
AS
961}
962
963/**
20d652d7
RW
964 * device_suspend_noirq - Execute a "late suspend" callback for given device.
965 * @dev: Device to handle.
966 * @state: PM transition of the system being carried out.
775b64d2 967 *
20d652d7
RW
968 * The driver of @dev will not receive interrupts while this function is being
969 * executed.
cd59abfc 970 */
d1616302 971static int device_suspend_noirq(struct device *dev, pm_message_t state)
775b64d2 972{
9cf519d1
RW
973 pm_callback_t callback = NULL;
974 char *info = NULL;
e7176a37 975
dbf37414
RW
976 if (dev->power.syscore)
977 return 0;
978
564b905a 979 if (dev->pm_domain) {
cf579dfb 980 info = "noirq power domain ";
9cf519d1 981 callback = pm_noirq_op(&dev->pm_domain->ops, state);
4d27e9dc 982 } else if (dev->type && dev->type->pm) {
cf579dfb 983 info = "noirq type ";
9cf519d1 984 callback = pm_noirq_op(dev->type->pm, state);
9659cc06 985 } else if (dev->class && dev->class->pm) {
cf579dfb 986 info = "noirq class ";
9cf519d1 987 callback = pm_noirq_op(dev->class->pm, state);
9659cc06 988 } else if (dev->bus && dev->bus->pm) {
cf579dfb 989 info = "noirq bus ";
9cf519d1 990 callback = pm_noirq_op(dev->bus->pm, state);
7538e3db
RW
991 }
992
35cd133c 993 if (!callback && dev->driver && dev->driver->pm) {
cf579dfb 994 info = "noirq driver ";
35cd133c
RW
995 callback = pm_noirq_op(dev->driver->pm, state);
996 }
997
9cf519d1 998 return dpm_run_callback(callback, dev, state, info);
775b64d2
RW
999}
1000
1001/**
cf579dfb 1002 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
20d652d7 1003 * @state: PM transition of the system being carried out.
775b64d2 1004 *
20d652d7
RW
1005 * Prevent device drivers from receiving interrupts and call the "noirq" suspend
1006 * handlers for all non-sysdev devices.
775b64d2 1007 */
cf579dfb 1008static int dpm_suspend_noirq(pm_message_t state)
775b64d2 1009{
ecf762b2 1010 ktime_t starttime = ktime_get();
775b64d2
RW
1011 int error = 0;
1012
8651f97b 1013 cpuidle_pause();
2ed8d2b3 1014 suspend_device_irqs();
32bdfac5 1015 mutex_lock(&dpm_list_mtx);
cf579dfb
RW
1016 while (!list_empty(&dpm_late_early_list)) {
1017 struct device *dev = to_device(dpm_late_early_list.prev);
d08a5ace
RW
1018
1019 get_device(dev);
1020 mutex_unlock(&dpm_list_mtx);
1021
d1616302 1022 error = device_suspend_noirq(dev, state);
d08a5ace
RW
1023
1024 mutex_lock(&dpm_list_mtx);
775b64d2 1025 if (error) {
cf579dfb 1026 pm_dev_err(dev, state, " noirq", error);
2a77c46d
SL
1027 suspend_stats.failed_suspend_noirq++;
1028 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1029 dpm_save_failed_dev(dev_name(dev));
d08a5ace 1030 put_device(dev);
775b64d2
RW
1031 break;
1032 }
d08a5ace 1033 if (!list_empty(&dev->power.entry))
8a43a9ab 1034 list_move(&dev->power.entry, &dpm_noirq_list);
d08a5ace 1035 put_device(dev);
52d136cc
RW
1036
1037 if (pm_wakeup_pending()) {
1038 error = -EBUSY;
1039 break;
1040 }
775b64d2 1041 }
32bdfac5 1042 mutex_unlock(&dpm_list_mtx);
775b64d2 1043 if (error)
d1616302 1044 dpm_resume_noirq(resume_event(state));
cf579dfb
RW
1045 else
1046 dpm_show_time(starttime, state, "noirq");
1047 return error;
1048}
1049
1050/**
1051 * device_suspend_late - Execute a "late suspend" callback for given device.
1052 * @dev: Device to handle.
1053 * @state: PM transition of the system being carried out.
1054 *
1055 * Runtime PM is disabled for @dev while this function is being executed.
1056 */
1057static int device_suspend_late(struct device *dev, pm_message_t state)
1058{
1059 pm_callback_t callback = NULL;
1060 char *info = NULL;
1061
9f6d8f6a
RW
1062 __pm_runtime_disable(dev, false);
1063
dbf37414
RW
1064 if (dev->power.syscore)
1065 return 0;
1066
cf579dfb
RW
1067 if (dev->pm_domain) {
1068 info = "late power domain ";
1069 callback = pm_late_early_op(&dev->pm_domain->ops, state);
1070 } else if (dev->type && dev->type->pm) {
1071 info = "late type ";
1072 callback = pm_late_early_op(dev->type->pm, state);
1073 } else if (dev->class && dev->class->pm) {
1074 info = "late class ";
1075 callback = pm_late_early_op(dev->class->pm, state);
1076 } else if (dev->bus && dev->bus->pm) {
1077 info = "late bus ";
1078 callback = pm_late_early_op(dev->bus->pm, state);
1079 }
1080
1081 if (!callback && dev->driver && dev->driver->pm) {
1082 info = "late driver ";
1083 callback = pm_late_early_op(dev->driver->pm, state);
1084 }
1085
1086 return dpm_run_callback(callback, dev, state, info);
1087}
1088
1089/**
1090 * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1091 * @state: PM transition of the system being carried out.
1092 */
1093static int dpm_suspend_late(pm_message_t state)
1094{
1095 ktime_t starttime = ktime_get();
1096 int error = 0;
1097
1098 mutex_lock(&dpm_list_mtx);
1099 while (!list_empty(&dpm_suspended_list)) {
1100 struct device *dev = to_device(dpm_suspended_list.prev);
1101
1102 get_device(dev);
1103 mutex_unlock(&dpm_list_mtx);
1104
1105 error = device_suspend_late(dev, state);
1106
1107 mutex_lock(&dpm_list_mtx);
1108 if (error) {
1109 pm_dev_err(dev, state, " late", error);
1110 suspend_stats.failed_suspend_late++;
1111 dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1112 dpm_save_failed_dev(dev_name(dev));
1113 put_device(dev);
1114 break;
1115 }
1116 if (!list_empty(&dev->power.entry))
1117 list_move(&dev->power.entry, &dpm_late_early_list);
1118 put_device(dev);
52d136cc
RW
1119
1120 if (pm_wakeup_pending()) {
1121 error = -EBUSY;
1122 break;
1123 }
cf579dfb
RW
1124 }
1125 mutex_unlock(&dpm_list_mtx);
1126 if (error)
1127 dpm_resume_early(resume_event(state));
ecf762b2
RW
1128 else
1129 dpm_show_time(starttime, state, "late");
cf579dfb 1130
775b64d2
RW
1131 return error;
1132}
cf579dfb
RW
1133
1134/**
1135 * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1136 * @state: PM transition of the system being carried out.
1137 */
1138int dpm_suspend_end(pm_message_t state)
1139{
1140 int error = dpm_suspend_late(state);
064b021f
CC
1141 if (error)
1142 return error;
1143
1144 error = dpm_suspend_noirq(state);
1145 if (error) {
997a0311 1146 dpm_resume_early(resume_event(state));
064b021f
CC
1147 return error;
1148 }
cf579dfb 1149
064b021f 1150 return 0;
cf579dfb
RW
1151}
1152EXPORT_SYMBOL_GPL(dpm_suspend_end);
775b64d2 1153
875ab0b7
RW
1154/**
1155 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
0a884223
RD
1156 * @dev: Device to suspend.
1157 * @state: PM transition of the system being carried out.
1158 * @cb: Suspend callback to execute.
875ab0b7
RW
1159 */
1160static int legacy_suspend(struct device *dev, pm_message_t state,
1161 int (*cb)(struct device *dev, pm_message_t state))
1162{
1163 int error;
1164 ktime_t calltime;
1165
1166 calltime = initcall_debug_start(dev);
1167
1168 error = cb(dev, state);
1169 suspend_report_result(cb, error);
1170
1171 initcall_debug_report(dev, calltime, error);
1172
1173 return error;
1174}
1175
775b64d2 1176/**
20d652d7
RW
1177 * device_suspend - Execute "suspend" callbacks for given device.
1178 * @dev: Device to handle.
1179 * @state: PM transition of the system being carried out.
5af84b82 1180 * @async: If true, the device is being suspended asynchronously.
775b64d2 1181 */
5af84b82 1182static int __device_suspend(struct device *dev, pm_message_t state, bool async)
cd59abfc 1183{
9cf519d1
RW
1184 pm_callback_t callback = NULL;
1185 char *info = NULL;
cd59abfc 1186 int error = 0;
6fa3eb70 1187 struct dpm_watchdog wd;
cd59abfc 1188
5af84b82 1189 dpm_wait_for_children(dev, async);
7a8d37a3 1190
5af84b82 1191 if (async_error)
1f758b23 1192 goto Complete;
1e2ef05b 1193
88d26136
AS
1194 /*
1195 * If a device configured to wake up the system from sleep states
1196 * has been suspended at run time and there's a resume request pending
1197 * for it, this is equivalent to the device signaling wakeup, so the
1198 * system suspend operation should be aborted.
1199 */
1e2ef05b
RW
1200 if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
1201 pm_wakeup_event(dev, 0);
5af84b82 1202
d83f905e
RW
1203 if (pm_wakeup_pending()) {
1204 async_error = -EBUSY;
6fa3eb70 1205 hib_log("async_error(%d) not zero due pm_wakeup_pending return non zero!!\n", async_error);
1f758b23 1206 goto Complete;
d83f905e
RW
1207 }
1208
dbf37414
RW
1209 if (dev->power.syscore)
1210 goto Complete;
6fa3eb70
S
1211
1212 dpm_wd_set(&wd, dev);
dbf37414 1213
1e2ef05b
RW
1214 device_lock(dev);
1215
564b905a 1216 if (dev->pm_domain) {
6fa3eb70
S
1217#ifdef LOG
1218 printk(KERN_DEBUG "[%d] power domain device_suspend\n", device_suspend_index);
1219 if (dev->driver)
1220 if(dev->driver->name)
1221 printk(KERN_DEBUG "dev->driver->name=%s\n", dev->driver->name);
1222#endif
1223 aee_sram_printk("%d\n", device_suspend_index++);
9cf519d1
RW
1224 info = "power domain ";
1225 callback = pm_op(&dev->pm_domain->ops, state);
1226 goto Run;
4d27e9dc
RW
1227 }
1228
9659cc06 1229 if (dev->type && dev->type->pm) {
6fa3eb70
S
1230#ifdef LOG
1231 printk(KERN_DEBUG "[%d] type device_suspend\n", device_suspend_index);
1232 if (dev->driver)
1233 if(dev->driver->name)
1234 printk(KERN_DEBUG "dev->driver->name=%s\n", dev->driver->name);
1235#endif
1236 aee_sram_printk("%d\n", device_suspend_index++);
9cf519d1
RW
1237 info = "type ";
1238 callback = pm_op(dev->type->pm, state);
1239 goto Run;
9659cc06
RW
1240 }
1241
1eede070
RW
1242 if (dev->class) {
1243 if (dev->class->pm) {
6fa3eb70
S
1244#ifdef LOG
1245 printk(KERN_DEBUG "[%d] class device_suspend\n", device_suspend_index);
1246 if (dev->driver)
1247 if(dev->driver->name)
1248 printk(KERN_DEBUG "dev->driver->name=%s\n", dev->driver->name);
1249#endif
1250 aee_sram_printk("%d\n", device_suspend_index++);
9cf519d1
RW
1251 info = "class ";
1252 callback = pm_op(dev->class->pm, state);
1253 goto Run;
1eede070 1254 } else if (dev->class->suspend) {
6fa3eb70
S
1255#ifdef LOG
1256 printk(KERN_DEBUG "[%d] legacy class device_suspend\n", device_suspend_index);
1257 if (dev->driver)
1258 if(dev->driver->name)
1259 printk(KERN_DEBUG "dev->driver->name=%s\n", dev->driver->name);
1260#endif
1261 aee_sram_printk("%d\n", device_suspend_index++);
1eede070 1262 pm_dev_dbg(dev, state, "legacy class ");
875ab0b7 1263 error = legacy_suspend(dev, state, dev->class->suspend);
4d27e9dc 1264 goto End;
1eede070 1265 }
cd59abfc
AS
1266 }
1267
1eede070
RW
1268 if (dev->bus) {
1269 if (dev->bus->pm) {
6fa3eb70
S
1270#ifdef LOG
1271 printk(KERN_DEBUG "[%d] bus device_suspend\n", device_suspend_index);
1272 if (dev->driver)
1273 if(dev->driver->name)
1274 printk(KERN_DEBUG "dev->driver->name=%s\n", dev->driver->name);
1275#endif
1276 aee_sram_printk("%d\n", device_suspend_index++);
35cd133c 1277 info = "bus ";
9cf519d1 1278 callback = pm_op(dev->bus->pm, state);
1eede070 1279 } else if (dev->bus->suspend) {
6fa3eb70
S
1280#ifdef LOG
1281 printk(KERN_DEBUG "[%d] legacy bus device_suspend\n", device_suspend_index);
1282 if (dev->driver)
1283 if(dev->driver->name)
1284 printk(KERN_DEBUG "dev->driver->name=%s\n", dev->driver->name);
1285#endif
1286 aee_sram_printk("%d\n", device_suspend_index++);
35cd133c 1287 pm_dev_dbg(dev, state, "legacy bus ");
875ab0b7 1288 error = legacy_suspend(dev, state, dev->bus->suspend);
9cf519d1 1289 goto End;
1eede070 1290 }
7538e3db
RW
1291 }
1292
9cf519d1 1293 Run:
35cd133c 1294 if (!callback && dev->driver && dev->driver->pm) {
6fa3eb70
S
1295#ifdef LOG
1296 printk(KERN_DEBUG "[%d] driver device_suspend\n", device_suspend_index);
1297 if (dev->driver)
1298 if(dev->driver->name)
1299 printk("dev->driver->name=%s\n", dev->driver->name);
1300#endif
1301 aee_sram_printk("%d\n", device_suspend_index++);
35cd133c
RW
1302 info = "driver ";
1303 callback = pm_op(dev->driver->pm, state);
1304 }
1305
9cf519d1
RW
1306 error = dpm_run_callback(callback, dev, state, info);
1307
1eede070 1308 End:
4ca46ff3
RW
1309 if (!error) {
1310 dev->power.is_suspended = true;
8b258cc8
RW
1311 if (dev->power.wakeup_path
1312 && dev->parent && !dev->parent->power.ignore_children)
4ca46ff3
RW
1313 dev->parent->power.wakeup_path = true;
1314 }
6d0e0e84 1315
8e9394ce 1316 device_unlock(dev);
1f758b23 1317
6fa3eb70
S
1318 dpm_wd_clear(&wd);
1319
1f758b23 1320 Complete:
5af84b82 1321 complete_all(&dev->power.completion);
88d26136 1322 if (error)
098dff73
RW
1323 async_error = error;
1324
cd59abfc
AS
1325 return error;
1326}
1327
5af84b82
RW
1328static void async_suspend(void *data, async_cookie_t cookie)
1329{
1330 struct device *dev = (struct device *)data;
1331 int error;
1332
1333 error = __device_suspend(dev, pm_transition, true);
2a77c46d
SL
1334 if (error) {
1335 dpm_save_failed_dev(dev_name(dev));
5af84b82 1336 pm_dev_err(dev, pm_transition, " async", error);
2a77c46d 1337 }
5af84b82
RW
1338
1339 put_device(dev);
1340}
1341
1342static int device_suspend(struct device *dev)
1343{
1344 INIT_COMPLETION(dev->power.completion);
1345
0e06b4a8 1346 if (pm_async_enabled && dev->power.async_suspend) {
5af84b82 1347 get_device(dev);
6fa3eb70 1348 hib_log("using async mode (check value of \"/sys/power/pm_async\"\n");
5af84b82
RW
1349 async_schedule(async_suspend, dev);
1350 return 0;
1351 }
1352
1353 return __device_suspend(dev, pm_transition, false);
1354}
1355
cd59abfc 1356/**
20d652d7
RW
1357 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1358 * @state: PM transition of the system being carried out.
cd59abfc 1359 */
91e7c75b 1360int dpm_suspend(pm_message_t state)
cd59abfc 1361{
ecf762b2 1362 ktime_t starttime = ktime_get();
cd59abfc
AS
1363 int error = 0;
1364
91e7c75b
RW
1365 might_sleep();
1366
cd59abfc 1367 mutex_lock(&dpm_list_mtx);
5af84b82
RW
1368 pm_transition = state;
1369 async_error = 0;
8a43a9ab
RW
1370 while (!list_empty(&dpm_prepared_list)) {
1371 struct device *dev = to_device(dpm_prepared_list.prev);
58aca232 1372
1eede070 1373 get_device(dev);
cd59abfc 1374 mutex_unlock(&dpm_list_mtx);
1eede070 1375
5af84b82 1376 error = device_suspend(dev);
1eede070 1377
1b3cbec1 1378 mutex_lock(&dpm_list_mtx);
775b64d2 1379 if (error) {
1eede070 1380 pm_dev_err(dev, state, "", error);
2a77c46d 1381 dpm_save_failed_dev(dev_name(dev));
1eede070 1382 put_device(dev);
6fa3eb70 1383 hib_log("Device %s failed to %s: error %d\n", dev_name(dev), pm_verb(state.event), error);
775b64d2
RW
1384 break;
1385 }
7a8d37a3 1386 if (!list_empty(&dev->power.entry))
8a43a9ab 1387 list_move(&dev->power.entry, &dpm_suspended_list);
1eede070 1388 put_device(dev);
6fa3eb70
S
1389 if (async_error) {
1390 hib_log("async_error(%d)\n", async_error);
5af84b82 1391 break;
6fa3eb70 1392 }
cd59abfc 1393 }
6fa3eb70
S
1394 device_suspend_index = 0;
1395
cd59abfc 1396 mutex_unlock(&dpm_list_mtx);
5af84b82
RW
1397 async_synchronize_full();
1398 if (!error)
1399 error = async_error;
2a77c46d
SL
1400 if (error) {
1401 suspend_stats.failed_suspend++;
1402 dpm_save_failed_step(SUSPEND_SUSPEND);
1403 } else
ecf762b2 1404 dpm_show_time(starttime, state, NULL);
6fa3eb70
S
1405
1406 hib_log("return error(%d)\n", error);
1eede070
RW
1407 return error;
1408}
6fa3eb70 1409EXPORT_SYMBOL_GPL(dpm_suspend);
1eede070
RW
1410
1411/**
20d652d7
RW
1412 * device_prepare - Prepare a device for system power transition.
1413 * @dev: Device to handle.
1414 * @state: PM transition of the system being carried out.
1415 *
1416 * Execute the ->prepare() callback(s) for given device. No new children of the
1417 * device may be registered after this function has returned.
1eede070 1418 */
d1616302 1419static int device_prepare(struct device *dev, pm_message_t state)
1eede070 1420{
35cd133c
RW
1421 int (*callback)(struct device *) = NULL;
1422 char *info = NULL;
1eede070
RW
1423 int error = 0;
1424
dbf37414
RW
1425 if (dev->power.syscore)
1426 return 0;
1427
88d26136
AS
1428 /*
1429 * If a device's parent goes into runtime suspend at the wrong time,
1430 * it won't be possible to resume the device. To prevent this we
1431 * block runtime suspend here, during the prepare phase, and allow
1432 * it again during the complete phase.
1433 */
1434 pm_runtime_get_noresume(dev);
1435
8e9394ce 1436 device_lock(dev);
1eede070 1437
4ca46ff3
RW
1438 dev->power.wakeup_path = device_may_wakeup(dev);
1439
564b905a 1440 if (dev->pm_domain) {
35cd133c
RW
1441 info = "preparing power domain ";
1442 callback = dev->pm_domain->ops.prepare;
4d27e9dc 1443 } else if (dev->type && dev->type->pm) {
35cd133c
RW
1444 info = "preparing type ";
1445 callback = dev->type->pm->prepare;
9659cc06 1446 } else if (dev->class && dev->class->pm) {
35cd133c
RW
1447 info = "preparing class ";
1448 callback = dev->class->pm->prepare;
9659cc06 1449 } else if (dev->bus && dev->bus->pm) {
35cd133c
RW
1450 info = "preparing bus ";
1451 callback = dev->bus->pm->prepare;
1452 }
1453
1454 if (!callback && dev->driver && dev->driver->pm) {
1455 info = "preparing driver ";
1456 callback = dev->driver->pm->prepare;
1457 }
1458
1459 if (callback) {
1460 error = callback(dev);
1461 suspend_report_result(callback, error);
1eede070 1462 }
7538e3db 1463
8e9394ce 1464 device_unlock(dev);
1eede070
RW
1465
1466 return error;
1467}
cd59abfc 1468
1eede070 1469/**
20d652d7
RW
1470 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1471 * @state: PM transition of the system being carried out.
1eede070 1472 *
20d652d7 1473 * Execute the ->prepare() callback(s) for all devices.
1eede070 1474 */
91e7c75b 1475int dpm_prepare(pm_message_t state)
1eede070 1476{
1eede070
RW
1477 int error = 0;
1478
91e7c75b
RW
1479 might_sleep();
1480
1eede070 1481 mutex_lock(&dpm_list_mtx);
1eede070
RW
1482 while (!list_empty(&dpm_list)) {
1483 struct device *dev = to_device(dpm_list.next);
1484
1485 get_device(dev);
1eede070
RW
1486 mutex_unlock(&dpm_list_mtx);
1487
1e2ef05b 1488 error = device_prepare(dev, state);
1eede070
RW
1489
1490 mutex_lock(&dpm_list_mtx);
1491 if (error) {
1eede070
RW
1492 if (error == -EAGAIN) {
1493 put_device(dev);
886a7a33 1494 error = 0;
1eede070
RW
1495 continue;
1496 }
1e75227e
RW
1497 printk(KERN_INFO "PM: Device %s not prepared "
1498 "for power transition: code %d\n",
5c1a07ab 1499 dev_name(dev), error);
1eede070
RW
1500 put_device(dev);
1501 break;
1502 }
f76b168b 1503 dev->power.is_prepared = true;
1eede070 1504 if (!list_empty(&dev->power.entry))
8a43a9ab 1505 list_move_tail(&dev->power.entry, &dpm_prepared_list);
1eede070
RW
1506 put_device(dev);
1507 }
1eede070 1508 mutex_unlock(&dpm_list_mtx);
cd59abfc
AS
1509 return error;
1510}
6fa3eb70 1511EXPORT_SYMBOL_GPL(dpm_prepare);
cd59abfc 1512
775b64d2 1513/**
20d652d7
RW
1514 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1515 * @state: PM transition of the system being carried out.
775b64d2 1516 *
20d652d7
RW
1517 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1518 * callbacks for them.
775b64d2 1519 */
d1616302 1520int dpm_suspend_start(pm_message_t state)
775b64d2
RW
1521{
1522 int error;
cd59abfc 1523
1eede070 1524 error = dpm_prepare(state);
2a77c46d
SL
1525 if (error) {
1526 suspend_stats.failed_prepare++;
1527 dpm_save_failed_step(SUSPEND_PREPARE);
1528 } else
1eede070 1529 error = dpm_suspend(state);
cd59abfc 1530 return error;
cd59abfc 1531}
d1616302 1532EXPORT_SYMBOL_GPL(dpm_suspend_start);
cd59abfc
AS
1533
1534void __suspend_report_result(const char *function, void *fn, int ret)
1535{
c80cfb04
BH
1536 if (ret)
1537 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
cd59abfc
AS
1538}
1539EXPORT_SYMBOL_GPL(__suspend_report_result);
f8824cee
RW
1540
1541/**
1542 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1543 * @dev: Device to wait for.
1544 * @subordinate: Device that needs to wait for @dev.
1545 */
098dff73 1546int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
f8824cee
RW
1547{
1548 dpm_wait(dev, subordinate->power.async_suspend);
098dff73 1549 return async_error;
f8824cee
RW
1550}
1551EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
dfe3212e
ML
1552
1553/**
1554 * dpm_for_each_dev - device iterator.
1555 * @data: data for the callback.
1556 * @fn: function to be called for each device.
1557 *
1558 * Iterate over devices in dpm_list, and call @fn for each device,
1559 * passing it @data.
1560 */
1561void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
1562{
1563 struct device *dev;
1564
1565 if (!fn)
1566 return;
1567
1568 device_pm_lock();
1569 list_for_each_entry(dev, &dpm_list, power.entry)
1570 fn(dev, data);
1571 device_pm_unlock();
1572}
1573EXPORT_SYMBOL_GPL(dpm_for_each_dev);