PM / Domains: Do not execute device callbacks under locks
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / base / power / domain.c
CommitLineData
f721889f
RW
1/*
2 * drivers/base/power/domain.c - Common code related to device power domains.
3 *
4 * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
5 *
6 * This file is released under the GPLv2.
7 */
8
9#include <linux/init.h>
10#include <linux/kernel.h>
11#include <linux/io.h>
12#include <linux/pm_runtime.h>
13#include <linux/pm_domain.h>
14#include <linux/slab.h>
15#include <linux/err.h>
17b75eca
RW
16#include <linux/sched.h>
17#include <linux/suspend.h>
f721889f 18
5248051b
RW
19#ifdef CONFIG_PM
20
21static struct generic_pm_domain *dev_to_genpd(struct device *dev)
22{
23 if (IS_ERR_OR_NULL(dev->pm_domain))
24 return ERR_PTR(-EINVAL);
25
596ba34b 26 return pd_to_genpd(dev->pm_domain);
5248051b 27}
f721889f
RW
28
29static void genpd_sd_counter_dec(struct generic_pm_domain *genpd)
30{
31 if (!WARN_ON(genpd->sd_count == 0))
32 genpd->sd_count--;
33}
34
17b75eca
RW
35static void genpd_acquire_lock(struct generic_pm_domain *genpd)
36{
37 DEFINE_WAIT(wait);
38
39 mutex_lock(&genpd->lock);
40 /*
41 * Wait for the domain to transition into either the active,
42 * or the power off state.
43 */
44 for (;;) {
45 prepare_to_wait(&genpd->status_wait_queue, &wait,
46 TASK_UNINTERRUPTIBLE);
47 if (genpd->status != GPD_STATE_BUSY)
48 break;
49 mutex_unlock(&genpd->lock);
50
51 schedule();
52
53 mutex_lock(&genpd->lock);
54 }
55 finish_wait(&genpd->status_wait_queue, &wait);
56}
57
58static void genpd_release_lock(struct generic_pm_domain *genpd)
59{
60 mutex_unlock(&genpd->lock);
61}
62
5248051b
RW
63/**
64 * pm_genpd_poweron - Restore power to a given PM domain and its parents.
65 * @genpd: PM domain to power up.
66 *
67 * Restore power to @genpd and all of its parents so that it is possible to
68 * resume a device belonging to it.
69 */
18b4f3f5 70int pm_genpd_poweron(struct generic_pm_domain *genpd)
5248051b 71{
17b75eca
RW
72 struct generic_pm_domain *parent = genpd->parent;
73 DEFINE_WAIT(wait);
5248051b
RW
74 int ret = 0;
75
76 start:
17b75eca
RW
77 if (parent) {
78 mutex_lock(&parent->lock);
79 mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
80 } else {
81 mutex_lock(&genpd->lock);
82 }
83 /*
84 * Wait for the domain to transition into either the active,
85 * or the power off state.
86 */
87 for (;;) {
88 prepare_to_wait(&genpd->status_wait_queue, &wait,
89 TASK_UNINTERRUPTIBLE);
90 if (genpd->status != GPD_STATE_BUSY)
91 break;
92 mutex_unlock(&genpd->lock);
93 if (parent)
94 mutex_unlock(&parent->lock);
95
96 schedule();
5248051b 97
17b75eca
RW
98 if (parent) {
99 mutex_lock(&parent->lock);
100 mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
101 } else {
102 mutex_lock(&genpd->lock);
103 }
104 }
105 finish_wait(&genpd->status_wait_queue, &wait);
106
107 if (genpd->status == GPD_STATE_ACTIVE
596ba34b 108 || (genpd->prepared_count > 0 && genpd->suspend_power_off))
5248051b
RW
109 goto out;
110
17b75eca 111 if (parent && parent->status != GPD_STATE_ACTIVE) {
5248051b 112 mutex_unlock(&genpd->lock);
17b75eca 113 mutex_unlock(&parent->lock);
5248051b 114
17b75eca 115 ret = pm_genpd_poweron(parent);
5248051b
RW
116 if (ret)
117 return ret;
118
119 goto start;
120 }
121
122 if (genpd->power_on) {
123 int ret = genpd->power_on(genpd);
124 if (ret)
125 goto out;
126 }
127
17b75eca
RW
128 genpd->status = GPD_STATE_ACTIVE;
129 if (parent)
130 parent->sd_count++;
5248051b
RW
131
132 out:
133 mutex_unlock(&genpd->lock);
17b75eca
RW
134 if (parent)
135 mutex_unlock(&parent->lock);
5248051b
RW
136
137 return ret;
138}
139
140#endif /* CONFIG_PM */
141
142#ifdef CONFIG_PM_RUNTIME
143
f721889f
RW
144/**
145 * __pm_genpd_save_device - Save the pre-suspend state of a device.
146 * @dle: Device list entry of the device to save the state of.
147 * @genpd: PM domain the device belongs to.
148 */
149static int __pm_genpd_save_device(struct dev_list_entry *dle,
150 struct generic_pm_domain *genpd)
17b75eca 151 __releases(&genpd->lock) __acquires(&genpd->lock)
f721889f
RW
152{
153 struct device *dev = dle->dev;
154 struct device_driver *drv = dev->driver;
155 int ret = 0;
156
157 if (dle->need_restore)
158 return 0;
159
17b75eca
RW
160 mutex_unlock(&genpd->lock);
161
f721889f
RW
162 if (drv && drv->pm && drv->pm->runtime_suspend) {
163 if (genpd->start_device)
164 genpd->start_device(dev);
165
166 ret = drv->pm->runtime_suspend(dev);
167
168 if (genpd->stop_device)
169 genpd->stop_device(dev);
170 }
171
17b75eca
RW
172 mutex_lock(&genpd->lock);
173
f721889f
RW
174 if (!ret)
175 dle->need_restore = true;
176
177 return ret;
178}
179
180/**
181 * __pm_genpd_restore_device - Restore the pre-suspend state of a device.
182 * @dle: Device list entry of the device to restore the state of.
183 * @genpd: PM domain the device belongs to.
184 */
185static void __pm_genpd_restore_device(struct dev_list_entry *dle,
186 struct generic_pm_domain *genpd)
17b75eca 187 __releases(&genpd->lock) __acquires(&genpd->lock)
f721889f
RW
188{
189 struct device *dev = dle->dev;
190 struct device_driver *drv = dev->driver;
191
192 if (!dle->need_restore)
193 return;
194
17b75eca
RW
195 mutex_unlock(&genpd->lock);
196
f721889f
RW
197 if (drv && drv->pm && drv->pm->runtime_resume) {
198 if (genpd->start_device)
199 genpd->start_device(dev);
200
201 drv->pm->runtime_resume(dev);
202
203 if (genpd->stop_device)
204 genpd->stop_device(dev);
205 }
206
17b75eca
RW
207 mutex_lock(&genpd->lock);
208
f721889f
RW
209 dle->need_restore = false;
210}
211
212/**
213 * pm_genpd_poweroff - Remove power from a given PM domain.
214 * @genpd: PM domain to power down.
215 *
216 * If all of the @genpd's devices have been suspended and all of its subdomains
217 * have been powered down, run the runtime suspend callbacks provided by all of
218 * the @genpd's devices' drivers and remove power from @genpd.
219 */
220static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
17b75eca 221 __releases(&genpd->lock) __acquires(&genpd->lock)
f721889f
RW
222{
223 struct generic_pm_domain *parent;
224 struct dev_list_entry *dle;
225 unsigned int not_suspended;
226 int ret;
227
17b75eca 228 if (genpd->status == GPD_STATE_POWER_OFF || genpd->prepared_count > 0)
f721889f
RW
229 return 0;
230
231 if (genpd->sd_count > 0)
232 return -EBUSY;
233
234 not_suspended = 0;
235 list_for_each_entry(dle, &genpd->dev_list, node)
236 if (dle->dev->driver && !pm_runtime_suspended(dle->dev))
237 not_suspended++;
238
239 if (not_suspended > genpd->in_progress)
240 return -EBUSY;
241
242 if (genpd->gov && genpd->gov->power_down_ok) {
243 if (!genpd->gov->power_down_ok(&genpd->domain))
244 return -EAGAIN;
245 }
246
17b75eca
RW
247 genpd->status = GPD_STATE_BUSY;
248
f721889f
RW
249 list_for_each_entry_reverse(dle, &genpd->dev_list, node) {
250 ret = __pm_genpd_save_device(dle, genpd);
251 if (ret)
252 goto err_dev;
253 }
254
17b75eca
RW
255 mutex_unlock(&genpd->lock);
256
257 parent = genpd->parent;
258 if (parent) {
259 genpd_acquire_lock(parent);
260 mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
261 } else {
262 mutex_lock(&genpd->lock);
263 }
264
f721889f
RW
265 if (genpd->power_off)
266 genpd->power_off(genpd);
267
17b75eca
RW
268 genpd->status = GPD_STATE_POWER_OFF;
269 wake_up_all(&genpd->status_wait_queue);
f721889f 270
f721889f
RW
271 if (parent) {
272 genpd_sd_counter_dec(parent);
273 if (parent->sd_count == 0)
274 queue_work(pm_wq, &parent->power_off_work);
17b75eca
RW
275
276 genpd_release_lock(parent);
f721889f
RW
277 }
278
279 return 0;
280
281 err_dev:
282 list_for_each_entry_continue(dle, &genpd->dev_list, node)
283 __pm_genpd_restore_device(dle, genpd);
284
17b75eca
RW
285 genpd->status = GPD_STATE_ACTIVE;
286 wake_up_all(&genpd->status_wait_queue);
287
f721889f
RW
288 return ret;
289}
290
291/**
292 * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
293 * @work: Work structure used for scheduling the execution of this function.
294 */
295static void genpd_power_off_work_fn(struct work_struct *work)
296{
297 struct generic_pm_domain *genpd;
298
299 genpd = container_of(work, struct generic_pm_domain, power_off_work);
300
17b75eca 301 genpd_acquire_lock(genpd);
f721889f 302 pm_genpd_poweroff(genpd);
17b75eca 303 genpd_release_lock(genpd);
f721889f
RW
304}
305
306/**
307 * pm_genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
308 * @dev: Device to suspend.
309 *
310 * Carry out a runtime suspend of a device under the assumption that its
311 * pm_domain field points to the domain member of an object of type
312 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
313 */
314static int pm_genpd_runtime_suspend(struct device *dev)
315{
316 struct generic_pm_domain *genpd;
317
318 dev_dbg(dev, "%s()\n", __func__);
319
5248051b
RW
320 genpd = dev_to_genpd(dev);
321 if (IS_ERR(genpd))
f721889f
RW
322 return -EINVAL;
323
f721889f
RW
324 if (genpd->stop_device) {
325 int ret = genpd->stop_device(dev);
326 if (ret)
17b75eca 327 return ret;
f721889f 328 }
17b75eca
RW
329
330 genpd_acquire_lock(genpd);
f721889f
RW
331 genpd->in_progress++;
332 pm_genpd_poweroff(genpd);
333 genpd->in_progress--;
17b75eca 334 genpd_release_lock(genpd);
f721889f
RW
335
336 return 0;
337}
338
596ba34b
RW
339/**
340 * __pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
341 * @dev: Device to resume.
342 * @genpd: PM domain the device belongs to.
343 */
344static void __pm_genpd_runtime_resume(struct device *dev,
345 struct generic_pm_domain *genpd)
346{
347 struct dev_list_entry *dle;
348
349 list_for_each_entry(dle, &genpd->dev_list, node) {
350 if (dle->dev == dev) {
351 __pm_genpd_restore_device(dle, genpd);
352 break;
353 }
354 }
596ba34b
RW
355}
356
f721889f
RW
357/**
358 * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
359 * @dev: Device to resume.
360 *
361 * Carry out a runtime resume of a device under the assumption that its
362 * pm_domain field points to the domain member of an object of type
363 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
364 */
365static int pm_genpd_runtime_resume(struct device *dev)
366{
367 struct generic_pm_domain *genpd;
f721889f
RW
368 int ret;
369
370 dev_dbg(dev, "%s()\n", __func__);
371
5248051b
RW
372 genpd = dev_to_genpd(dev);
373 if (IS_ERR(genpd))
f721889f
RW
374 return -EINVAL;
375
f721889f
RW
376 ret = pm_genpd_poweron(genpd);
377 if (ret)
378 return ret;
379
17b75eca
RW
380 genpd_acquire_lock(genpd);
381 genpd->status = GPD_STATE_BUSY;
596ba34b 382 __pm_genpd_runtime_resume(dev, genpd);
17b75eca
RW
383 genpd->status = GPD_STATE_ACTIVE;
384 wake_up_all(&genpd->status_wait_queue);
385 genpd_release_lock(genpd);
386
387 if (genpd->start_device)
388 genpd->start_device(dev);
f721889f
RW
389
390 return 0;
391}
392
393#else
394
395static inline void genpd_power_off_work_fn(struct work_struct *work) {}
596ba34b
RW
396static inline void __pm_genpd_runtime_resume(struct device *dev,
397 struct generic_pm_domain *genpd) {}
f721889f
RW
398
399#define pm_genpd_runtime_suspend NULL
400#define pm_genpd_runtime_resume NULL
401
402#endif /* CONFIG_PM_RUNTIME */
403
596ba34b
RW
404#ifdef CONFIG_PM_SLEEP
405
406/**
407 * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its parents.
408 * @genpd: PM domain to power off, if possible.
409 *
410 * Check if the given PM domain can be powered off (during system suspend or
411 * hibernation) and do that if so. Also, in that case propagate to its parent.
412 *
413 * This function is only called in "noirq" stages of system power transitions,
414 * so it need not acquire locks (all of the "noirq" callbacks are executed
415 * sequentially, so it is guaranteed that it will never run twice in parallel).
416 */
417static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
418{
419 struct generic_pm_domain *parent = genpd->parent;
420
17b75eca 421 if (genpd->status == GPD_STATE_POWER_OFF)
596ba34b
RW
422 return;
423
424 if (genpd->suspended_count != genpd->device_count || genpd->sd_count > 0)
425 return;
426
427 if (genpd->power_off)
428 genpd->power_off(genpd);
429
17b75eca 430 genpd->status = GPD_STATE_POWER_OFF;
596ba34b
RW
431 if (parent) {
432 genpd_sd_counter_dec(parent);
433 pm_genpd_sync_poweroff(parent);
434 }
435}
436
437/**
438 * pm_genpd_prepare - Start power transition of a device in a PM domain.
439 * @dev: Device to start the transition of.
440 *
441 * Start a power transition of a device (during a system-wide power transition)
442 * under the assumption that its pm_domain field points to the domain member of
443 * an object of type struct generic_pm_domain representing a PM domain
444 * consisting of I/O devices.
445 */
446static int pm_genpd_prepare(struct device *dev)
447{
448 struct generic_pm_domain *genpd;
b6c10c84 449 int ret;
596ba34b
RW
450
451 dev_dbg(dev, "%s()\n", __func__);
452
453 genpd = dev_to_genpd(dev);
454 if (IS_ERR(genpd))
455 return -EINVAL;
456
17b75eca
RW
457 /*
458 * If a wakeup request is pending for the device, it should be woken up
459 * at this point and a system wakeup event should be reported if it's
460 * set up to wake up the system from sleep states.
461 */
462 pm_runtime_get_noresume(dev);
463 if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
464 pm_wakeup_event(dev, 0);
465
466 if (pm_wakeup_pending()) {
467 pm_runtime_put_sync(dev);
468 return -EBUSY;
469 }
470
471 genpd_acquire_lock(genpd);
596ba34b
RW
472
473 if (genpd->prepared_count++ == 0)
17b75eca
RW
474 genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF;
475
476 genpd_release_lock(genpd);
596ba34b
RW
477
478 if (genpd->suspend_power_off) {
17b75eca 479 pm_runtime_put_noidle(dev);
596ba34b
RW
480 return 0;
481 }
482
483 /*
17b75eca
RW
484 * The PM domain must be in the GPD_STATE_ACTIVE state at this point,
485 * so pm_genpd_poweron() will return immediately, but if the device
486 * is suspended (e.g. it's been stopped by .stop_device()), we need
487 * to make it operational.
596ba34b 488 */
17b75eca 489 pm_runtime_resume(dev);
596ba34b
RW
490 __pm_runtime_disable(dev, false);
491
b6c10c84
RW
492 ret = pm_generic_prepare(dev);
493 if (ret) {
494 mutex_lock(&genpd->lock);
495
496 if (--genpd->prepared_count == 0)
497 genpd->suspend_power_off = false;
498
499 mutex_unlock(&genpd->lock);
17b75eca 500 pm_runtime_enable(dev);
b6c10c84 501 }
17b75eca
RW
502
503 pm_runtime_put_sync(dev);
b6c10c84 504 return ret;
596ba34b
RW
505}
506
507/**
508 * pm_genpd_suspend - Suspend a device belonging to an I/O PM domain.
509 * @dev: Device to suspend.
510 *
511 * Suspend a device under the assumption that its pm_domain field points to the
512 * domain member of an object of type struct generic_pm_domain representing
513 * a PM domain consisting of I/O devices.
514 */
515static int pm_genpd_suspend(struct device *dev)
516{
517 struct generic_pm_domain *genpd;
518
519 dev_dbg(dev, "%s()\n", __func__);
520
521 genpd = dev_to_genpd(dev);
522 if (IS_ERR(genpd))
523 return -EINVAL;
524
525 return genpd->suspend_power_off ? 0 : pm_generic_suspend(dev);
526}
527
528/**
529 * pm_genpd_suspend_noirq - Late suspend of a device from an I/O PM domain.
530 * @dev: Device to suspend.
531 *
532 * Carry out a late suspend of a device under the assumption that its
533 * pm_domain field points to the domain member of an object of type
534 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
535 */
536static int pm_genpd_suspend_noirq(struct device *dev)
537{
538 struct generic_pm_domain *genpd;
539 int ret;
540
541 dev_dbg(dev, "%s()\n", __func__);
542
543 genpd = dev_to_genpd(dev);
544 if (IS_ERR(genpd))
545 return -EINVAL;
546
547 if (genpd->suspend_power_off)
548 return 0;
549
550 ret = pm_generic_suspend_noirq(dev);
551 if (ret)
552 return ret;
553
d4f2d87a
RW
554 if (device_may_wakeup(dev)
555 && genpd->active_wakeup && genpd->active_wakeup(dev))
556 return 0;
557
596ba34b
RW
558 if (genpd->stop_device)
559 genpd->stop_device(dev);
560
561 /*
562 * Since all of the "noirq" callbacks are executed sequentially, it is
563 * guaranteed that this function will never run twice in parallel for
564 * the same PM domain, so it is not necessary to use locking here.
565 */
566 genpd->suspended_count++;
567 pm_genpd_sync_poweroff(genpd);
568
569 return 0;
570}
571
572/**
573 * pm_genpd_resume_noirq - Early resume of a device from an I/O power domain.
574 * @dev: Device to resume.
575 *
576 * Carry out an early resume of a device under the assumption that its
577 * pm_domain field points to the domain member of an object of type
578 * struct generic_pm_domain representing a power domain consisting of I/O
579 * devices.
580 */
581static int pm_genpd_resume_noirq(struct device *dev)
582{
583 struct generic_pm_domain *genpd;
584
585 dev_dbg(dev, "%s()\n", __func__);
586
587 genpd = dev_to_genpd(dev);
588 if (IS_ERR(genpd))
589 return -EINVAL;
590
591 if (genpd->suspend_power_off)
592 return 0;
593
594 /*
595 * Since all of the "noirq" callbacks are executed sequentially, it is
596 * guaranteed that this function will never run twice in parallel for
597 * the same PM domain, so it is not necessary to use locking here.
598 */
599 pm_genpd_poweron(genpd);
600 genpd->suspended_count--;
601 if (genpd->start_device)
602 genpd->start_device(dev);
603
604 return pm_generic_resume_noirq(dev);
605}
606
607/**
608 * pm_genpd_resume - Resume a device belonging to an I/O power domain.
609 * @dev: Device to resume.
610 *
611 * Resume a device under the assumption that its pm_domain field points to the
612 * domain member of an object of type struct generic_pm_domain representing
613 * a power domain consisting of I/O devices.
614 */
615static int pm_genpd_resume(struct device *dev)
616{
617 struct generic_pm_domain *genpd;
618
619 dev_dbg(dev, "%s()\n", __func__);
620
621 genpd = dev_to_genpd(dev);
622 if (IS_ERR(genpd))
623 return -EINVAL;
624
625 return genpd->suspend_power_off ? 0 : pm_generic_resume(dev);
626}
627
628/**
629 * pm_genpd_freeze - Freeze a device belonging to an I/O power domain.
630 * @dev: Device to freeze.
631 *
632 * Freeze a device under the assumption that its pm_domain field points to the
633 * domain member of an object of type struct generic_pm_domain representing
634 * a power domain consisting of I/O devices.
635 */
636static int pm_genpd_freeze(struct device *dev)
637{
638 struct generic_pm_domain *genpd;
639
640 dev_dbg(dev, "%s()\n", __func__);
641
642 genpd = dev_to_genpd(dev);
643 if (IS_ERR(genpd))
644 return -EINVAL;
645
646 return genpd->suspend_power_off ? 0 : pm_generic_freeze(dev);
647}
648
649/**
650 * pm_genpd_freeze_noirq - Late freeze of a device from an I/O power domain.
651 * @dev: Device to freeze.
652 *
653 * Carry out a late freeze of a device under the assumption that its
654 * pm_domain field points to the domain member of an object of type
655 * struct generic_pm_domain representing a power domain consisting of I/O
656 * devices.
657 */
658static int pm_genpd_freeze_noirq(struct device *dev)
659{
660 struct generic_pm_domain *genpd;
661 int ret;
662
663 dev_dbg(dev, "%s()\n", __func__);
664
665 genpd = dev_to_genpd(dev);
666 if (IS_ERR(genpd))
667 return -EINVAL;
668
669 if (genpd->suspend_power_off)
670 return 0;
671
672 ret = pm_generic_freeze_noirq(dev);
673 if (ret)
674 return ret;
675
676 if (genpd->stop_device)
677 genpd->stop_device(dev);
678
679 return 0;
680}
681
682/**
683 * pm_genpd_thaw_noirq - Early thaw of a device from an I/O power domain.
684 * @dev: Device to thaw.
685 *
686 * Carry out an early thaw of a device under the assumption that its
687 * pm_domain field points to the domain member of an object of type
688 * struct generic_pm_domain representing a power domain consisting of I/O
689 * devices.
690 */
691static int pm_genpd_thaw_noirq(struct device *dev)
692{
693 struct generic_pm_domain *genpd;
694
695 dev_dbg(dev, "%s()\n", __func__);
696
697 genpd = dev_to_genpd(dev);
698 if (IS_ERR(genpd))
699 return -EINVAL;
700
701 if (genpd->suspend_power_off)
702 return 0;
703
704 if (genpd->start_device)
705 genpd->start_device(dev);
706
707 return pm_generic_thaw_noirq(dev);
708}
709
710/**
711 * pm_genpd_thaw - Thaw a device belonging to an I/O power domain.
712 * @dev: Device to thaw.
713 *
714 * Thaw a device under the assumption that its pm_domain field points to the
715 * domain member of an object of type struct generic_pm_domain representing
716 * a power domain consisting of I/O devices.
717 */
718static int pm_genpd_thaw(struct device *dev)
719{
720 struct generic_pm_domain *genpd;
721
722 dev_dbg(dev, "%s()\n", __func__);
723
724 genpd = dev_to_genpd(dev);
725 if (IS_ERR(genpd))
726 return -EINVAL;
727
728 return genpd->suspend_power_off ? 0 : pm_generic_thaw(dev);
729}
730
731/**
732 * pm_genpd_dev_poweroff - Power off a device belonging to an I/O PM domain.
733 * @dev: Device to suspend.
734 *
735 * Power off a device under the assumption that its pm_domain field points to
736 * the domain member of an object of type struct generic_pm_domain representing
737 * a PM domain consisting of I/O devices.
738 */
739static int pm_genpd_dev_poweroff(struct device *dev)
740{
741 struct generic_pm_domain *genpd;
742
743 dev_dbg(dev, "%s()\n", __func__);
744
745 genpd = dev_to_genpd(dev);
746 if (IS_ERR(genpd))
747 return -EINVAL;
748
749 return genpd->suspend_power_off ? 0 : pm_generic_poweroff(dev);
750}
751
752/**
753 * pm_genpd_dev_poweroff_noirq - Late power off of a device from a PM domain.
754 * @dev: Device to suspend.
755 *
756 * Carry out a late powering off of a device under the assumption that its
757 * pm_domain field points to the domain member of an object of type
758 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
759 */
760static int pm_genpd_dev_poweroff_noirq(struct device *dev)
761{
762 struct generic_pm_domain *genpd;
763 int ret;
764
765 dev_dbg(dev, "%s()\n", __func__);
766
767 genpd = dev_to_genpd(dev);
768 if (IS_ERR(genpd))
769 return -EINVAL;
770
771 if (genpd->suspend_power_off)
772 return 0;
773
774 ret = pm_generic_poweroff_noirq(dev);
775 if (ret)
776 return ret;
777
d4f2d87a
RW
778 if (device_may_wakeup(dev)
779 && genpd->active_wakeup && genpd->active_wakeup(dev))
780 return 0;
781
596ba34b
RW
782 if (genpd->stop_device)
783 genpd->stop_device(dev);
784
785 /*
786 * Since all of the "noirq" callbacks are executed sequentially, it is
787 * guaranteed that this function will never run twice in parallel for
788 * the same PM domain, so it is not necessary to use locking here.
789 */
790 genpd->suspended_count++;
791 pm_genpd_sync_poweroff(genpd);
792
793 return 0;
794}
795
796/**
797 * pm_genpd_restore_noirq - Early restore of a device from an I/O power domain.
798 * @dev: Device to resume.
799 *
800 * Carry out an early restore of a device under the assumption that its
801 * pm_domain field points to the domain member of an object of type
802 * struct generic_pm_domain representing a power domain consisting of I/O
803 * devices.
804 */
805static int pm_genpd_restore_noirq(struct device *dev)
806{
807 struct generic_pm_domain *genpd;
808
809 dev_dbg(dev, "%s()\n", __func__);
810
811 genpd = dev_to_genpd(dev);
812 if (IS_ERR(genpd))
813 return -EINVAL;
814
815 /*
816 * Since all of the "noirq" callbacks are executed sequentially, it is
817 * guaranteed that this function will never run twice in parallel for
818 * the same PM domain, so it is not necessary to use locking here.
819 */
17b75eca 820 genpd->status = GPD_STATE_POWER_OFF;
596ba34b
RW
821 if (genpd->suspend_power_off) {
822 /*
823 * The boot kernel might put the domain into the power on state,
824 * so make sure it really is powered off.
825 */
826 if (genpd->power_off)
827 genpd->power_off(genpd);
828 return 0;
829 }
830
831 pm_genpd_poweron(genpd);
832 genpd->suspended_count--;
833 if (genpd->start_device)
834 genpd->start_device(dev);
835
836 return pm_generic_restore_noirq(dev);
837}
838
839/**
840 * pm_genpd_restore - Restore a device belonging to an I/O power domain.
841 * @dev: Device to resume.
842 *
843 * Restore a device under the assumption that its pm_domain field points to the
844 * domain member of an object of type struct generic_pm_domain representing
845 * a power domain consisting of I/O devices.
846 */
847static int pm_genpd_restore(struct device *dev)
848{
849 struct generic_pm_domain *genpd;
850
851 dev_dbg(dev, "%s()\n", __func__);
852
853 genpd = dev_to_genpd(dev);
854 if (IS_ERR(genpd))
855 return -EINVAL;
856
857 return genpd->suspend_power_off ? 0 : pm_generic_restore(dev);
858}
859
860/**
861 * pm_genpd_complete - Complete power transition of a device in a power domain.
862 * @dev: Device to complete the transition of.
863 *
864 * Complete a power transition of a device (during a system-wide power
865 * transition) under the assumption that its pm_domain field points to the
866 * domain member of an object of type struct generic_pm_domain representing
867 * a power domain consisting of I/O devices.
868 */
869static void pm_genpd_complete(struct device *dev)
870{
871 struct generic_pm_domain *genpd;
872 bool run_complete;
873
874 dev_dbg(dev, "%s()\n", __func__);
875
876 genpd = dev_to_genpd(dev);
877 if (IS_ERR(genpd))
878 return;
879
880 mutex_lock(&genpd->lock);
881
882 run_complete = !genpd->suspend_power_off;
883 if (--genpd->prepared_count == 0)
884 genpd->suspend_power_off = false;
885
886 mutex_unlock(&genpd->lock);
887
888 if (run_complete) {
889 pm_generic_complete(dev);
6f00ff78 890 pm_runtime_set_active(dev);
596ba34b 891 pm_runtime_enable(dev);
6f00ff78 892 pm_runtime_idle(dev);
596ba34b
RW
893 }
894}
895
896#else
897
898#define pm_genpd_prepare NULL
899#define pm_genpd_suspend NULL
900#define pm_genpd_suspend_noirq NULL
901#define pm_genpd_resume_noirq NULL
902#define pm_genpd_resume NULL
903#define pm_genpd_freeze NULL
904#define pm_genpd_freeze_noirq NULL
905#define pm_genpd_thaw_noirq NULL
906#define pm_genpd_thaw NULL
907#define pm_genpd_dev_poweroff_noirq NULL
908#define pm_genpd_dev_poweroff NULL
909#define pm_genpd_restore_noirq NULL
910#define pm_genpd_restore NULL
911#define pm_genpd_complete NULL
912
913#endif /* CONFIG_PM_SLEEP */
914
f721889f
RW
915/**
916 * pm_genpd_add_device - Add a device to an I/O PM domain.
917 * @genpd: PM domain to add the device to.
918 * @dev: Device to be added.
919 */
920int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
921{
922 struct dev_list_entry *dle;
923 int ret = 0;
924
925 dev_dbg(dev, "%s()\n", __func__);
926
927 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
928 return -EINVAL;
929
17b75eca 930 genpd_acquire_lock(genpd);
f721889f 931
17b75eca 932 if (genpd->status == GPD_STATE_POWER_OFF) {
f721889f
RW
933 ret = -EINVAL;
934 goto out;
935 }
936
596ba34b
RW
937 if (genpd->prepared_count > 0) {
938 ret = -EAGAIN;
939 goto out;
940 }
941
f721889f
RW
942 list_for_each_entry(dle, &genpd->dev_list, node)
943 if (dle->dev == dev) {
944 ret = -EINVAL;
945 goto out;
946 }
947
948 dle = kzalloc(sizeof(*dle), GFP_KERNEL);
949 if (!dle) {
950 ret = -ENOMEM;
951 goto out;
952 }
953
954 dle->dev = dev;
955 dle->need_restore = false;
956 list_add_tail(&dle->node, &genpd->dev_list);
596ba34b 957 genpd->device_count++;
f721889f
RW
958
959 spin_lock_irq(&dev->power.lock);
960 dev->pm_domain = &genpd->domain;
961 spin_unlock_irq(&dev->power.lock);
962
963 out:
17b75eca 964 genpd_release_lock(genpd);
f721889f
RW
965
966 return ret;
967}
968
969/**
970 * pm_genpd_remove_device - Remove a device from an I/O PM domain.
971 * @genpd: PM domain to remove the device from.
972 * @dev: Device to be removed.
973 */
974int pm_genpd_remove_device(struct generic_pm_domain *genpd,
975 struct device *dev)
976{
977 struct dev_list_entry *dle;
978 int ret = -EINVAL;
979
980 dev_dbg(dev, "%s()\n", __func__);
981
982 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
983 return -EINVAL;
984
17b75eca 985 genpd_acquire_lock(genpd);
f721889f 986
596ba34b
RW
987 if (genpd->prepared_count > 0) {
988 ret = -EAGAIN;
989 goto out;
990 }
991
f721889f
RW
992 list_for_each_entry(dle, &genpd->dev_list, node) {
993 if (dle->dev != dev)
994 continue;
995
996 spin_lock_irq(&dev->power.lock);
997 dev->pm_domain = NULL;
998 spin_unlock_irq(&dev->power.lock);
999
596ba34b 1000 genpd->device_count--;
f721889f
RW
1001 list_del(&dle->node);
1002 kfree(dle);
1003
1004 ret = 0;
1005 break;
1006 }
1007
596ba34b 1008 out:
17b75eca 1009 genpd_release_lock(genpd);
f721889f
RW
1010
1011 return ret;
1012}
1013
1014/**
1015 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1016 * @genpd: Master PM domain to add the subdomain to.
1017 * @new_subdomain: Subdomain to be added.
1018 */
1019int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
1020 struct generic_pm_domain *new_subdomain)
1021{
1022 struct generic_pm_domain *subdomain;
1023 int ret = 0;
1024
1025 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(new_subdomain))
1026 return -EINVAL;
1027
17b75eca
RW
1028 start:
1029 genpd_acquire_lock(genpd);
1030 mutex_lock_nested(&new_subdomain->lock, SINGLE_DEPTH_NESTING);
f721889f 1031
17b75eca
RW
1032 if (new_subdomain->status != GPD_STATE_POWER_OFF
1033 && new_subdomain->status != GPD_STATE_ACTIVE) {
1034 mutex_unlock(&new_subdomain->lock);
1035 genpd_release_lock(genpd);
1036 goto start;
1037 }
1038
1039 if (genpd->status == GPD_STATE_POWER_OFF
1040 && new_subdomain->status != GPD_STATE_POWER_OFF) {
f721889f
RW
1041 ret = -EINVAL;
1042 goto out;
1043 }
1044
1045 list_for_each_entry(subdomain, &genpd->sd_list, sd_node) {
1046 if (subdomain == new_subdomain) {
1047 ret = -EINVAL;
1048 goto out;
1049 }
1050 }
1051
f721889f
RW
1052 list_add_tail(&new_subdomain->sd_node, &genpd->sd_list);
1053 new_subdomain->parent = genpd;
17b75eca 1054 if (subdomain->status != GPD_STATE_POWER_OFF)
f721889f
RW
1055 genpd->sd_count++;
1056
f721889f 1057 out:
17b75eca
RW
1058 mutex_unlock(&new_subdomain->lock);
1059 genpd_release_lock(genpd);
f721889f
RW
1060
1061 return ret;
1062}
1063
1064/**
1065 * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
1066 * @genpd: Master PM domain to remove the subdomain from.
1067 * @target: Subdomain to be removed.
1068 */
1069int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1070 struct generic_pm_domain *target)
1071{
1072 struct generic_pm_domain *subdomain;
1073 int ret = -EINVAL;
1074
1075 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(target))
1076 return -EINVAL;
1077
17b75eca
RW
1078 start:
1079 genpd_acquire_lock(genpd);
f721889f
RW
1080
1081 list_for_each_entry(subdomain, &genpd->sd_list, sd_node) {
1082 if (subdomain != target)
1083 continue;
1084
1085 mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
1086
17b75eca
RW
1087 if (subdomain->status != GPD_STATE_POWER_OFF
1088 && subdomain->status != GPD_STATE_ACTIVE) {
1089 mutex_unlock(&subdomain->lock);
1090 genpd_release_lock(genpd);
1091 goto start;
1092 }
1093
f721889f
RW
1094 list_del(&subdomain->sd_node);
1095 subdomain->parent = NULL;
17b75eca 1096 if (subdomain->status != GPD_STATE_POWER_OFF)
f721889f
RW
1097 genpd_sd_counter_dec(genpd);
1098
1099 mutex_unlock(&subdomain->lock);
1100
1101 ret = 0;
1102 break;
1103 }
1104
17b75eca 1105 genpd_release_lock(genpd);
f721889f
RW
1106
1107 return ret;
1108}
1109
1110/**
1111 * pm_genpd_init - Initialize a generic I/O PM domain object.
1112 * @genpd: PM domain object to initialize.
1113 * @gov: PM domain governor to associate with the domain (may be NULL).
1114 * @is_off: Initial value of the domain's power_is_off field.
1115 */
1116void pm_genpd_init(struct generic_pm_domain *genpd,
1117 struct dev_power_governor *gov, bool is_off)
1118{
1119 if (IS_ERR_OR_NULL(genpd))
1120 return;
1121
1122 INIT_LIST_HEAD(&genpd->sd_node);
1123 genpd->parent = NULL;
1124 INIT_LIST_HEAD(&genpd->dev_list);
1125 INIT_LIST_HEAD(&genpd->sd_list);
1126 mutex_init(&genpd->lock);
1127 genpd->gov = gov;
1128 INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
1129 genpd->in_progress = 0;
1130 genpd->sd_count = 0;
17b75eca
RW
1131 genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
1132 init_waitqueue_head(&genpd->status_wait_queue);
596ba34b
RW
1133 genpd->device_count = 0;
1134 genpd->suspended_count = 0;
f721889f
RW
1135 genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;
1136 genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume;
1137 genpd->domain.ops.runtime_idle = pm_generic_runtime_idle;
596ba34b
RW
1138 genpd->domain.ops.prepare = pm_genpd_prepare;
1139 genpd->domain.ops.suspend = pm_genpd_suspend;
1140 genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq;
1141 genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq;
1142 genpd->domain.ops.resume = pm_genpd_resume;
1143 genpd->domain.ops.freeze = pm_genpd_freeze;
1144 genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
1145 genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
1146 genpd->domain.ops.thaw = pm_genpd_thaw;
1147 genpd->domain.ops.poweroff = pm_genpd_dev_poweroff;
1148 genpd->domain.ops.poweroff_noirq = pm_genpd_dev_poweroff_noirq;
1149 genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
1150 genpd->domain.ops.restore = pm_genpd_restore;
1151 genpd->domain.ops.complete = pm_genpd_complete;
f721889f 1152}