PM / Domains: System-wide transitions support for generic domains (v5)
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / base / power / domain.c
CommitLineData
f721889f
RW
1/*
2 * drivers/base/power/domain.c - Common code related to device power domains.
3 *
4 * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
5 *
6 * This file is released under the GPLv2.
7 */
8
9#include <linux/init.h>
10#include <linux/kernel.h>
11#include <linux/io.h>
12#include <linux/pm_runtime.h>
13#include <linux/pm_domain.h>
14#include <linux/slab.h>
15#include <linux/err.h>
16
5248051b
RW
17#ifdef CONFIG_PM
18
19static struct generic_pm_domain *dev_to_genpd(struct device *dev)
20{
21 if (IS_ERR_OR_NULL(dev->pm_domain))
22 return ERR_PTR(-EINVAL);
23
596ba34b 24 return pd_to_genpd(dev->pm_domain);
5248051b 25}
f721889f
RW
26
27static void genpd_sd_counter_dec(struct generic_pm_domain *genpd)
28{
29 if (!WARN_ON(genpd->sd_count == 0))
30 genpd->sd_count--;
31}
32
5248051b
RW
33/**
34 * pm_genpd_poweron - Restore power to a given PM domain and its parents.
35 * @genpd: PM domain to power up.
36 *
37 * Restore power to @genpd and all of its parents so that it is possible to
38 * resume a device belonging to it.
39 */
40static int pm_genpd_poweron(struct generic_pm_domain *genpd)
41{
42 int ret = 0;
43
44 start:
45 if (genpd->parent)
46 mutex_lock(&genpd->parent->lock);
47 mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
48
596ba34b
RW
49 if (!genpd->power_is_off
50 || (genpd->prepared_count > 0 && genpd->suspend_power_off))
5248051b
RW
51 goto out;
52
53 if (genpd->parent && genpd->parent->power_is_off) {
54 mutex_unlock(&genpd->lock);
55 mutex_unlock(&genpd->parent->lock);
56
57 ret = pm_genpd_poweron(genpd->parent);
58 if (ret)
59 return ret;
60
61 goto start;
62 }
63
64 if (genpd->power_on) {
65 int ret = genpd->power_on(genpd);
66 if (ret)
67 goto out;
68 }
69
70 genpd->power_is_off = false;
71 if (genpd->parent)
72 genpd->parent->sd_count++;
73
74 out:
75 mutex_unlock(&genpd->lock);
76 if (genpd->parent)
77 mutex_unlock(&genpd->parent->lock);
78
79 return ret;
80}
81
82#endif /* CONFIG_PM */
83
84#ifdef CONFIG_PM_RUNTIME
85
f721889f
RW
86/**
87 * __pm_genpd_save_device - Save the pre-suspend state of a device.
88 * @dle: Device list entry of the device to save the state of.
89 * @genpd: PM domain the device belongs to.
90 */
91static int __pm_genpd_save_device(struct dev_list_entry *dle,
92 struct generic_pm_domain *genpd)
93{
94 struct device *dev = dle->dev;
95 struct device_driver *drv = dev->driver;
96 int ret = 0;
97
98 if (dle->need_restore)
99 return 0;
100
101 if (drv && drv->pm && drv->pm->runtime_suspend) {
102 if (genpd->start_device)
103 genpd->start_device(dev);
104
105 ret = drv->pm->runtime_suspend(dev);
106
107 if (genpd->stop_device)
108 genpd->stop_device(dev);
109 }
110
111 if (!ret)
112 dle->need_restore = true;
113
114 return ret;
115}
116
117/**
118 * __pm_genpd_restore_device - Restore the pre-suspend state of a device.
119 * @dle: Device list entry of the device to restore the state of.
120 * @genpd: PM domain the device belongs to.
121 */
122static void __pm_genpd_restore_device(struct dev_list_entry *dle,
123 struct generic_pm_domain *genpd)
124{
125 struct device *dev = dle->dev;
126 struct device_driver *drv = dev->driver;
127
128 if (!dle->need_restore)
129 return;
130
131 if (drv && drv->pm && drv->pm->runtime_resume) {
132 if (genpd->start_device)
133 genpd->start_device(dev);
134
135 drv->pm->runtime_resume(dev);
136
137 if (genpd->stop_device)
138 genpd->stop_device(dev);
139 }
140
141 dle->need_restore = false;
142}
143
144/**
145 * pm_genpd_poweroff - Remove power from a given PM domain.
146 * @genpd: PM domain to power down.
147 *
148 * If all of the @genpd's devices have been suspended and all of its subdomains
149 * have been powered down, run the runtime suspend callbacks provided by all of
150 * the @genpd's devices' drivers and remove power from @genpd.
151 */
152static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
153{
154 struct generic_pm_domain *parent;
155 struct dev_list_entry *dle;
156 unsigned int not_suspended;
157 int ret;
158
596ba34b 159 if (genpd->power_is_off || genpd->prepared_count > 0)
f721889f
RW
160 return 0;
161
162 if (genpd->sd_count > 0)
163 return -EBUSY;
164
165 not_suspended = 0;
166 list_for_each_entry(dle, &genpd->dev_list, node)
167 if (dle->dev->driver && !pm_runtime_suspended(dle->dev))
168 not_suspended++;
169
170 if (not_suspended > genpd->in_progress)
171 return -EBUSY;
172
173 if (genpd->gov && genpd->gov->power_down_ok) {
174 if (!genpd->gov->power_down_ok(&genpd->domain))
175 return -EAGAIN;
176 }
177
178 list_for_each_entry_reverse(dle, &genpd->dev_list, node) {
179 ret = __pm_genpd_save_device(dle, genpd);
180 if (ret)
181 goto err_dev;
182 }
183
184 if (genpd->power_off)
185 genpd->power_off(genpd);
186
187 genpd->power_is_off = true;
188
189 parent = genpd->parent;
190 if (parent) {
191 genpd_sd_counter_dec(parent);
192 if (parent->sd_count == 0)
193 queue_work(pm_wq, &parent->power_off_work);
194 }
195
196 return 0;
197
198 err_dev:
199 list_for_each_entry_continue(dle, &genpd->dev_list, node)
200 __pm_genpd_restore_device(dle, genpd);
201
202 return ret;
203}
204
205/**
206 * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
207 * @work: Work structure used for scheduling the execution of this function.
208 */
209static void genpd_power_off_work_fn(struct work_struct *work)
210{
211 struct generic_pm_domain *genpd;
212
213 genpd = container_of(work, struct generic_pm_domain, power_off_work);
214
215 if (genpd->parent)
216 mutex_lock(&genpd->parent->lock);
217 mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
218 pm_genpd_poweroff(genpd);
219 mutex_unlock(&genpd->lock);
220 if (genpd->parent)
221 mutex_unlock(&genpd->parent->lock);
222}
223
224/**
225 * pm_genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
226 * @dev: Device to suspend.
227 *
228 * Carry out a runtime suspend of a device under the assumption that its
229 * pm_domain field points to the domain member of an object of type
230 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
231 */
232static int pm_genpd_runtime_suspend(struct device *dev)
233{
234 struct generic_pm_domain *genpd;
235
236 dev_dbg(dev, "%s()\n", __func__);
237
5248051b
RW
238 genpd = dev_to_genpd(dev);
239 if (IS_ERR(genpd))
f721889f
RW
240 return -EINVAL;
241
f721889f
RW
242 if (genpd->parent)
243 mutex_lock(&genpd->parent->lock);
244 mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
245
246 if (genpd->stop_device) {
247 int ret = genpd->stop_device(dev);
248 if (ret)
249 goto out;
250 }
251 genpd->in_progress++;
252 pm_genpd_poweroff(genpd);
253 genpd->in_progress--;
254
255 out:
256 mutex_unlock(&genpd->lock);
257 if (genpd->parent)
258 mutex_unlock(&genpd->parent->lock);
259
260 return 0;
261}
262
596ba34b
RW
263/**
264 * __pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
265 * @dev: Device to resume.
266 * @genpd: PM domain the device belongs to.
267 */
268static void __pm_genpd_runtime_resume(struct device *dev,
269 struct generic_pm_domain *genpd)
270{
271 struct dev_list_entry *dle;
272
273 list_for_each_entry(dle, &genpd->dev_list, node) {
274 if (dle->dev == dev) {
275 __pm_genpd_restore_device(dle, genpd);
276 break;
277 }
278 }
279
280 if (genpd->start_device)
281 genpd->start_device(dev);
282}
283
f721889f
RW
284/**
285 * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
286 * @dev: Device to resume.
287 *
288 * Carry out a runtime resume of a device under the assumption that its
289 * pm_domain field points to the domain member of an object of type
290 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
291 */
292static int pm_genpd_runtime_resume(struct device *dev)
293{
294 struct generic_pm_domain *genpd;
f721889f
RW
295 int ret;
296
297 dev_dbg(dev, "%s()\n", __func__);
298
5248051b
RW
299 genpd = dev_to_genpd(dev);
300 if (IS_ERR(genpd))
f721889f
RW
301 return -EINVAL;
302
f721889f
RW
303 ret = pm_genpd_poweron(genpd);
304 if (ret)
305 return ret;
306
307 mutex_lock(&genpd->lock);
596ba34b 308 __pm_genpd_runtime_resume(dev, genpd);
f721889f
RW
309 mutex_unlock(&genpd->lock);
310
311 return 0;
312}
313
314#else
315
316static inline void genpd_power_off_work_fn(struct work_struct *work) {}
596ba34b
RW
317static inline void __pm_genpd_runtime_resume(struct device *dev,
318 struct generic_pm_domain *genpd) {}
f721889f
RW
319
320#define pm_genpd_runtime_suspend NULL
321#define pm_genpd_runtime_resume NULL
322
323#endif /* CONFIG_PM_RUNTIME */
324
596ba34b
RW
325#ifdef CONFIG_PM_SLEEP
326
327/**
328 * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its parents.
329 * @genpd: PM domain to power off, if possible.
330 *
331 * Check if the given PM domain can be powered off (during system suspend or
332 * hibernation) and do that if so. Also, in that case propagate to its parent.
333 *
334 * This function is only called in "noirq" stages of system power transitions,
335 * so it need not acquire locks (all of the "noirq" callbacks are executed
336 * sequentially, so it is guaranteed that it will never run twice in parallel).
337 */
338static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
339{
340 struct generic_pm_domain *parent = genpd->parent;
341
342 if (genpd->power_is_off)
343 return;
344
345 if (genpd->suspended_count != genpd->device_count || genpd->sd_count > 0)
346 return;
347
348 if (genpd->power_off)
349 genpd->power_off(genpd);
350
351 genpd->power_is_off = true;
352 if (parent) {
353 genpd_sd_counter_dec(parent);
354 pm_genpd_sync_poweroff(parent);
355 }
356}
357
358/**
359 * pm_genpd_prepare - Start power transition of a device in a PM domain.
360 * @dev: Device to start the transition of.
361 *
362 * Start a power transition of a device (during a system-wide power transition)
363 * under the assumption that its pm_domain field points to the domain member of
364 * an object of type struct generic_pm_domain representing a PM domain
365 * consisting of I/O devices.
366 */
367static int pm_genpd_prepare(struct device *dev)
368{
369 struct generic_pm_domain *genpd;
370
371 dev_dbg(dev, "%s()\n", __func__);
372
373 genpd = dev_to_genpd(dev);
374 if (IS_ERR(genpd))
375 return -EINVAL;
376
377 mutex_lock(&genpd->lock);
378
379 if (genpd->prepared_count++ == 0)
380 genpd->suspend_power_off = genpd->power_is_off;
381
382 if (genpd->suspend_power_off) {
383 mutex_unlock(&genpd->lock);
384 return 0;
385 }
386
387 /*
388 * If the device is in the (runtime) "suspended" state, call
389 * .start_device() for it, if defined.
390 */
391 if (pm_runtime_suspended(dev))
392 __pm_genpd_runtime_resume(dev, genpd);
393
394 /*
395 * Do not check if runtime resume is pending at this point, because it
396 * has been taken care of already and if pm_genpd_poweron() ran at this
397 * point as a result of the check, it would deadlock.
398 */
399 __pm_runtime_disable(dev, false);
400
401 mutex_unlock(&genpd->lock);
402
403 return pm_generic_prepare(dev);
404}
405
406/**
407 * pm_genpd_suspend - Suspend a device belonging to an I/O PM domain.
408 * @dev: Device to suspend.
409 *
410 * Suspend a device under the assumption that its pm_domain field points to the
411 * domain member of an object of type struct generic_pm_domain representing
412 * a PM domain consisting of I/O devices.
413 */
414static int pm_genpd_suspend(struct device *dev)
415{
416 struct generic_pm_domain *genpd;
417
418 dev_dbg(dev, "%s()\n", __func__);
419
420 genpd = dev_to_genpd(dev);
421 if (IS_ERR(genpd))
422 return -EINVAL;
423
424 return genpd->suspend_power_off ? 0 : pm_generic_suspend(dev);
425}
426
427/**
428 * pm_genpd_suspend_noirq - Late suspend of a device from an I/O PM domain.
429 * @dev: Device to suspend.
430 *
431 * Carry out a late suspend of a device under the assumption that its
432 * pm_domain field points to the domain member of an object of type
433 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
434 */
435static int pm_genpd_suspend_noirq(struct device *dev)
436{
437 struct generic_pm_domain *genpd;
438 int ret;
439
440 dev_dbg(dev, "%s()\n", __func__);
441
442 genpd = dev_to_genpd(dev);
443 if (IS_ERR(genpd))
444 return -EINVAL;
445
446 if (genpd->suspend_power_off)
447 return 0;
448
449 ret = pm_generic_suspend_noirq(dev);
450 if (ret)
451 return ret;
452
453 if (genpd->stop_device)
454 genpd->stop_device(dev);
455
456 /*
457 * Since all of the "noirq" callbacks are executed sequentially, it is
458 * guaranteed that this function will never run twice in parallel for
459 * the same PM domain, so it is not necessary to use locking here.
460 */
461 genpd->suspended_count++;
462 pm_genpd_sync_poweroff(genpd);
463
464 return 0;
465}
466
467/**
468 * pm_genpd_resume_noirq - Early resume of a device from an I/O power domain.
469 * @dev: Device to resume.
470 *
471 * Carry out an early resume of a device under the assumption that its
472 * pm_domain field points to the domain member of an object of type
473 * struct generic_pm_domain representing a power domain consisting of I/O
474 * devices.
475 */
476static int pm_genpd_resume_noirq(struct device *dev)
477{
478 struct generic_pm_domain *genpd;
479
480 dev_dbg(dev, "%s()\n", __func__);
481
482 genpd = dev_to_genpd(dev);
483 if (IS_ERR(genpd))
484 return -EINVAL;
485
486 if (genpd->suspend_power_off)
487 return 0;
488
489 /*
490 * Since all of the "noirq" callbacks are executed sequentially, it is
491 * guaranteed that this function will never run twice in parallel for
492 * the same PM domain, so it is not necessary to use locking here.
493 */
494 pm_genpd_poweron(genpd);
495 genpd->suspended_count--;
496 if (genpd->start_device)
497 genpd->start_device(dev);
498
499 return pm_generic_resume_noirq(dev);
500}
501
502/**
503 * pm_genpd_resume - Resume a device belonging to an I/O power domain.
504 * @dev: Device to resume.
505 *
506 * Resume a device under the assumption that its pm_domain field points to the
507 * domain member of an object of type struct generic_pm_domain representing
508 * a power domain consisting of I/O devices.
509 */
510static int pm_genpd_resume(struct device *dev)
511{
512 struct generic_pm_domain *genpd;
513
514 dev_dbg(dev, "%s()\n", __func__);
515
516 genpd = dev_to_genpd(dev);
517 if (IS_ERR(genpd))
518 return -EINVAL;
519
520 return genpd->suspend_power_off ? 0 : pm_generic_resume(dev);
521}
522
523/**
524 * pm_genpd_freeze - Freeze a device belonging to an I/O power domain.
525 * @dev: Device to freeze.
526 *
527 * Freeze a device under the assumption that its pm_domain field points to the
528 * domain member of an object of type struct generic_pm_domain representing
529 * a power domain consisting of I/O devices.
530 */
531static int pm_genpd_freeze(struct device *dev)
532{
533 struct generic_pm_domain *genpd;
534
535 dev_dbg(dev, "%s()\n", __func__);
536
537 genpd = dev_to_genpd(dev);
538 if (IS_ERR(genpd))
539 return -EINVAL;
540
541 return genpd->suspend_power_off ? 0 : pm_generic_freeze(dev);
542}
543
544/**
545 * pm_genpd_freeze_noirq - Late freeze of a device from an I/O power domain.
546 * @dev: Device to freeze.
547 *
548 * Carry out a late freeze of a device under the assumption that its
549 * pm_domain field points to the domain member of an object of type
550 * struct generic_pm_domain representing a power domain consisting of I/O
551 * devices.
552 */
553static int pm_genpd_freeze_noirq(struct device *dev)
554{
555 struct generic_pm_domain *genpd;
556 int ret;
557
558 dev_dbg(dev, "%s()\n", __func__);
559
560 genpd = dev_to_genpd(dev);
561 if (IS_ERR(genpd))
562 return -EINVAL;
563
564 if (genpd->suspend_power_off)
565 return 0;
566
567 ret = pm_generic_freeze_noirq(dev);
568 if (ret)
569 return ret;
570
571 if (genpd->stop_device)
572 genpd->stop_device(dev);
573
574 return 0;
575}
576
577/**
578 * pm_genpd_thaw_noirq - Early thaw of a device from an I/O power domain.
579 * @dev: Device to thaw.
580 *
581 * Carry out an early thaw of a device under the assumption that its
582 * pm_domain field points to the domain member of an object of type
583 * struct generic_pm_domain representing a power domain consisting of I/O
584 * devices.
585 */
586static int pm_genpd_thaw_noirq(struct device *dev)
587{
588 struct generic_pm_domain *genpd;
589
590 dev_dbg(dev, "%s()\n", __func__);
591
592 genpd = dev_to_genpd(dev);
593 if (IS_ERR(genpd))
594 return -EINVAL;
595
596 if (genpd->suspend_power_off)
597 return 0;
598
599 if (genpd->start_device)
600 genpd->start_device(dev);
601
602 return pm_generic_thaw_noirq(dev);
603}
604
605/**
606 * pm_genpd_thaw - Thaw a device belonging to an I/O power domain.
607 * @dev: Device to thaw.
608 *
609 * Thaw a device under the assumption that its pm_domain field points to the
610 * domain member of an object of type struct generic_pm_domain representing
611 * a power domain consisting of I/O devices.
612 */
613static int pm_genpd_thaw(struct device *dev)
614{
615 struct generic_pm_domain *genpd;
616
617 dev_dbg(dev, "%s()\n", __func__);
618
619 genpd = dev_to_genpd(dev);
620 if (IS_ERR(genpd))
621 return -EINVAL;
622
623 return genpd->suspend_power_off ? 0 : pm_generic_thaw(dev);
624}
625
626/**
627 * pm_genpd_dev_poweroff - Power off a device belonging to an I/O PM domain.
628 * @dev: Device to suspend.
629 *
630 * Power off a device under the assumption that its pm_domain field points to
631 * the domain member of an object of type struct generic_pm_domain representing
632 * a PM domain consisting of I/O devices.
633 */
634static int pm_genpd_dev_poweroff(struct device *dev)
635{
636 struct generic_pm_domain *genpd;
637
638 dev_dbg(dev, "%s()\n", __func__);
639
640 genpd = dev_to_genpd(dev);
641 if (IS_ERR(genpd))
642 return -EINVAL;
643
644 return genpd->suspend_power_off ? 0 : pm_generic_poweroff(dev);
645}
646
647/**
648 * pm_genpd_dev_poweroff_noirq - Late power off of a device from a PM domain.
649 * @dev: Device to suspend.
650 *
651 * Carry out a late powering off of a device under the assumption that its
652 * pm_domain field points to the domain member of an object of type
653 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
654 */
655static int pm_genpd_dev_poweroff_noirq(struct device *dev)
656{
657 struct generic_pm_domain *genpd;
658 int ret;
659
660 dev_dbg(dev, "%s()\n", __func__);
661
662 genpd = dev_to_genpd(dev);
663 if (IS_ERR(genpd))
664 return -EINVAL;
665
666 if (genpd->suspend_power_off)
667 return 0;
668
669 ret = pm_generic_poweroff_noirq(dev);
670 if (ret)
671 return ret;
672
673 if (genpd->stop_device)
674 genpd->stop_device(dev);
675
676 /*
677 * Since all of the "noirq" callbacks are executed sequentially, it is
678 * guaranteed that this function will never run twice in parallel for
679 * the same PM domain, so it is not necessary to use locking here.
680 */
681 genpd->suspended_count++;
682 pm_genpd_sync_poweroff(genpd);
683
684 return 0;
685}
686
687/**
688 * pm_genpd_restore_noirq - Early restore of a device from an I/O power domain.
689 * @dev: Device to resume.
690 *
691 * Carry out an early restore of a device under the assumption that its
692 * pm_domain field points to the domain member of an object of type
693 * struct generic_pm_domain representing a power domain consisting of I/O
694 * devices.
695 */
696static int pm_genpd_restore_noirq(struct device *dev)
697{
698 struct generic_pm_domain *genpd;
699
700 dev_dbg(dev, "%s()\n", __func__);
701
702 genpd = dev_to_genpd(dev);
703 if (IS_ERR(genpd))
704 return -EINVAL;
705
706 /*
707 * Since all of the "noirq" callbacks are executed sequentially, it is
708 * guaranteed that this function will never run twice in parallel for
709 * the same PM domain, so it is not necessary to use locking here.
710 */
711 genpd->power_is_off = true;
712 if (genpd->suspend_power_off) {
713 /*
714 * The boot kernel might put the domain into the power on state,
715 * so make sure it really is powered off.
716 */
717 if (genpd->power_off)
718 genpd->power_off(genpd);
719 return 0;
720 }
721
722 pm_genpd_poweron(genpd);
723 genpd->suspended_count--;
724 if (genpd->start_device)
725 genpd->start_device(dev);
726
727 return pm_generic_restore_noirq(dev);
728}
729
730/**
731 * pm_genpd_restore - Restore a device belonging to an I/O power domain.
732 * @dev: Device to resume.
733 *
734 * Restore a device under the assumption that its pm_domain field points to the
735 * domain member of an object of type struct generic_pm_domain representing
736 * a power domain consisting of I/O devices.
737 */
738static int pm_genpd_restore(struct device *dev)
739{
740 struct generic_pm_domain *genpd;
741
742 dev_dbg(dev, "%s()\n", __func__);
743
744 genpd = dev_to_genpd(dev);
745 if (IS_ERR(genpd))
746 return -EINVAL;
747
748 return genpd->suspend_power_off ? 0 : pm_generic_restore(dev);
749}
750
751/**
752 * pm_genpd_complete - Complete power transition of a device in a power domain.
753 * @dev: Device to complete the transition of.
754 *
755 * Complete a power transition of a device (during a system-wide power
756 * transition) under the assumption that its pm_domain field points to the
757 * domain member of an object of type struct generic_pm_domain representing
758 * a power domain consisting of I/O devices.
759 */
760static void pm_genpd_complete(struct device *dev)
761{
762 struct generic_pm_domain *genpd;
763 bool run_complete;
764
765 dev_dbg(dev, "%s()\n", __func__);
766
767 genpd = dev_to_genpd(dev);
768 if (IS_ERR(genpd))
769 return;
770
771 mutex_lock(&genpd->lock);
772
773 run_complete = !genpd->suspend_power_off;
774 if (--genpd->prepared_count == 0)
775 genpd->suspend_power_off = false;
776
777 mutex_unlock(&genpd->lock);
778
779 if (run_complete) {
780 pm_generic_complete(dev);
781 pm_runtime_enable(dev);
782 }
783}
784
785#else
786
787#define pm_genpd_prepare NULL
788#define pm_genpd_suspend NULL
789#define pm_genpd_suspend_noirq NULL
790#define pm_genpd_resume_noirq NULL
791#define pm_genpd_resume NULL
792#define pm_genpd_freeze NULL
793#define pm_genpd_freeze_noirq NULL
794#define pm_genpd_thaw_noirq NULL
795#define pm_genpd_thaw NULL
796#define pm_genpd_dev_poweroff_noirq NULL
797#define pm_genpd_dev_poweroff NULL
798#define pm_genpd_restore_noirq NULL
799#define pm_genpd_restore NULL
800#define pm_genpd_complete NULL
801
802#endif /* CONFIG_PM_SLEEP */
803
f721889f
RW
804/**
805 * pm_genpd_add_device - Add a device to an I/O PM domain.
806 * @genpd: PM domain to add the device to.
807 * @dev: Device to be added.
808 */
809int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
810{
811 struct dev_list_entry *dle;
812 int ret = 0;
813
814 dev_dbg(dev, "%s()\n", __func__);
815
816 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
817 return -EINVAL;
818
819 mutex_lock(&genpd->lock);
820
821 if (genpd->power_is_off) {
822 ret = -EINVAL;
823 goto out;
824 }
825
596ba34b
RW
826 if (genpd->prepared_count > 0) {
827 ret = -EAGAIN;
828 goto out;
829 }
830
f721889f
RW
831 list_for_each_entry(dle, &genpd->dev_list, node)
832 if (dle->dev == dev) {
833 ret = -EINVAL;
834 goto out;
835 }
836
837 dle = kzalloc(sizeof(*dle), GFP_KERNEL);
838 if (!dle) {
839 ret = -ENOMEM;
840 goto out;
841 }
842
843 dle->dev = dev;
844 dle->need_restore = false;
845 list_add_tail(&dle->node, &genpd->dev_list);
596ba34b 846 genpd->device_count++;
f721889f
RW
847
848 spin_lock_irq(&dev->power.lock);
849 dev->pm_domain = &genpd->domain;
850 spin_unlock_irq(&dev->power.lock);
851
852 out:
853 mutex_unlock(&genpd->lock);
854
855 return ret;
856}
857
858/**
859 * pm_genpd_remove_device - Remove a device from an I/O PM domain.
860 * @genpd: PM domain to remove the device from.
861 * @dev: Device to be removed.
862 */
863int pm_genpd_remove_device(struct generic_pm_domain *genpd,
864 struct device *dev)
865{
866 struct dev_list_entry *dle;
867 int ret = -EINVAL;
868
869 dev_dbg(dev, "%s()\n", __func__);
870
871 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
872 return -EINVAL;
873
874 mutex_lock(&genpd->lock);
875
596ba34b
RW
876 if (genpd->prepared_count > 0) {
877 ret = -EAGAIN;
878 goto out;
879 }
880
f721889f
RW
881 list_for_each_entry(dle, &genpd->dev_list, node) {
882 if (dle->dev != dev)
883 continue;
884
885 spin_lock_irq(&dev->power.lock);
886 dev->pm_domain = NULL;
887 spin_unlock_irq(&dev->power.lock);
888
596ba34b 889 genpd->device_count--;
f721889f
RW
890 list_del(&dle->node);
891 kfree(dle);
892
893 ret = 0;
894 break;
895 }
896
596ba34b 897 out:
f721889f
RW
898 mutex_unlock(&genpd->lock);
899
900 return ret;
901}
902
903/**
904 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
905 * @genpd: Master PM domain to add the subdomain to.
906 * @new_subdomain: Subdomain to be added.
907 */
908int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
909 struct generic_pm_domain *new_subdomain)
910{
911 struct generic_pm_domain *subdomain;
912 int ret = 0;
913
914 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(new_subdomain))
915 return -EINVAL;
916
917 mutex_lock(&genpd->lock);
918
919 if (genpd->power_is_off && !new_subdomain->power_is_off) {
920 ret = -EINVAL;
921 goto out;
922 }
923
924 list_for_each_entry(subdomain, &genpd->sd_list, sd_node) {
925 if (subdomain == new_subdomain) {
926 ret = -EINVAL;
927 goto out;
928 }
929 }
930
931 mutex_lock_nested(&new_subdomain->lock, SINGLE_DEPTH_NESTING);
932
933 list_add_tail(&new_subdomain->sd_node, &genpd->sd_list);
934 new_subdomain->parent = genpd;
935 if (!subdomain->power_is_off)
936 genpd->sd_count++;
937
938 mutex_unlock(&new_subdomain->lock);
939
940 out:
941 mutex_unlock(&genpd->lock);
942
943 return ret;
944}
945
946/**
947 * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
948 * @genpd: Master PM domain to remove the subdomain from.
949 * @target: Subdomain to be removed.
950 */
951int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
952 struct generic_pm_domain *target)
953{
954 struct generic_pm_domain *subdomain;
955 int ret = -EINVAL;
956
957 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(target))
958 return -EINVAL;
959
960 mutex_lock(&genpd->lock);
961
962 list_for_each_entry(subdomain, &genpd->sd_list, sd_node) {
963 if (subdomain != target)
964 continue;
965
966 mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
967
968 list_del(&subdomain->sd_node);
969 subdomain->parent = NULL;
970 if (!subdomain->power_is_off)
971 genpd_sd_counter_dec(genpd);
972
973 mutex_unlock(&subdomain->lock);
974
975 ret = 0;
976 break;
977 }
978
979 mutex_unlock(&genpd->lock);
980
981 return ret;
982}
983
984/**
985 * pm_genpd_init - Initialize a generic I/O PM domain object.
986 * @genpd: PM domain object to initialize.
987 * @gov: PM domain governor to associate with the domain (may be NULL).
988 * @is_off: Initial value of the domain's power_is_off field.
989 */
990void pm_genpd_init(struct generic_pm_domain *genpd,
991 struct dev_power_governor *gov, bool is_off)
992{
993 if (IS_ERR_OR_NULL(genpd))
994 return;
995
996 INIT_LIST_HEAD(&genpd->sd_node);
997 genpd->parent = NULL;
998 INIT_LIST_HEAD(&genpd->dev_list);
999 INIT_LIST_HEAD(&genpd->sd_list);
1000 mutex_init(&genpd->lock);
1001 genpd->gov = gov;
1002 INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
1003 genpd->in_progress = 0;
1004 genpd->sd_count = 0;
1005 genpd->power_is_off = is_off;
596ba34b
RW
1006 genpd->device_count = 0;
1007 genpd->suspended_count = 0;
f721889f
RW
1008 genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;
1009 genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume;
1010 genpd->domain.ops.runtime_idle = pm_generic_runtime_idle;
596ba34b
RW
1011 genpd->domain.ops.prepare = pm_genpd_prepare;
1012 genpd->domain.ops.suspend = pm_genpd_suspend;
1013 genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq;
1014 genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq;
1015 genpd->domain.ops.resume = pm_genpd_resume;
1016 genpd->domain.ops.freeze = pm_genpd_freeze;
1017 genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
1018 genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
1019 genpd->domain.ops.thaw = pm_genpd_thaw;
1020 genpd->domain.ops.poweroff = pm_genpd_dev_poweroff;
1021 genpd->domain.ops.poweroff_noirq = pm_genpd_dev_poweroff_noirq;
1022 genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
1023 genpd->domain.ops.restore = pm_genpd_restore;
1024 genpd->domain.ops.complete = pm_genpd_complete;
f721889f 1025}