PM / Domains: Cache device stop and domain power off governor results, v3
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / base / power / domain.c
CommitLineData
f721889f
RW
1/*
2 * drivers/base/power/domain.c - Common code related to device power domains.
3 *
4 * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
5 *
6 * This file is released under the GPLv2.
7 */
8
9#include <linux/init.h>
10#include <linux/kernel.h>
11#include <linux/io.h>
12#include <linux/pm_runtime.h>
13#include <linux/pm_domain.h>
6ff7bb0d 14#include <linux/pm_qos.h>
f721889f
RW
15#include <linux/slab.h>
16#include <linux/err.h>
17b75eca
RW
17#include <linux/sched.h>
18#include <linux/suspend.h>
d5e4cbfe
RW
19#include <linux/export.h>
20
21#define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \
22({ \
23 type (*__routine)(struct device *__d); \
24 type __ret = (type)0; \
25 \
26 __routine = genpd->dev_ops.callback; \
27 if (__routine) { \
28 __ret = __routine(dev); \
29 } else { \
30 __routine = dev_gpd_data(dev)->ops.callback; \
31 if (__routine) \
32 __ret = __routine(dev); \
33 } \
34 __ret; \
35})
f721889f 36
0140d8bd
RW
37#define GENPD_DEV_TIMED_CALLBACK(genpd, type, callback, dev, field, name) \
38({ \
39 ktime_t __start = ktime_get(); \
40 type __retval = GENPD_DEV_CALLBACK(genpd, type, callback, dev); \
41 s64 __elapsed = ktime_to_ns(ktime_sub(ktime_get(), __start)); \
6ff7bb0d
RW
42 struct gpd_timing_data *__td = &dev_gpd_data(dev)->td; \
43 if (!__retval && __elapsed > __td->field) { \
44 __td->field = __elapsed; \
0140d8bd
RW
45 dev_warn(dev, name " latency exceeded, new value %lld ns\n", \
46 __elapsed); \
6ff7bb0d
RW
47 genpd->max_off_time_changed = true; \
48 __td->constraint_changed = true; \
0140d8bd
RW
49 } \
50 __retval; \
51})
52
5125bbf3
RW
53static LIST_HEAD(gpd_list);
54static DEFINE_MUTEX(gpd_list_lock);
55
5248051b
RW
56#ifdef CONFIG_PM
57
b02c999a 58struct generic_pm_domain *dev_to_genpd(struct device *dev)
5248051b
RW
59{
60 if (IS_ERR_OR_NULL(dev->pm_domain))
61 return ERR_PTR(-EINVAL);
62
596ba34b 63 return pd_to_genpd(dev->pm_domain);
5248051b 64}
f721889f 65
d5e4cbfe
RW
66static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev)
67{
0140d8bd
RW
68 return GENPD_DEV_TIMED_CALLBACK(genpd, int, stop, dev,
69 stop_latency_ns, "stop");
d5e4cbfe
RW
70}
71
72static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev)
73{
0140d8bd
RW
74 return GENPD_DEV_TIMED_CALLBACK(genpd, int, start, dev,
75 start_latency_ns, "start");
d5e4cbfe
RW
76}
77
ecf00475
RW
78static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev)
79{
0140d8bd
RW
80 return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev,
81 save_state_latency_ns, "state save");
ecf00475
RW
82}
83
84static int genpd_restore_dev(struct generic_pm_domain *genpd, struct device *dev)
85{
0140d8bd
RW
86 return GENPD_DEV_TIMED_CALLBACK(genpd, int, restore_state, dev,
87 restore_state_latency_ns,
88 "state restore");
ecf00475
RW
89}
90
c4bb3160 91static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
f721889f 92{
c4bb3160
RW
93 bool ret = false;
94
95 if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
96 ret = !!atomic_dec_and_test(&genpd->sd_count);
97
98 return ret;
99}
100
101static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
102{
103 atomic_inc(&genpd->sd_count);
104 smp_mb__after_atomic_inc();
f721889f
RW
105}
106
17b75eca
RW
107static void genpd_acquire_lock(struct generic_pm_domain *genpd)
108{
109 DEFINE_WAIT(wait);
110
111 mutex_lock(&genpd->lock);
112 /*
113 * Wait for the domain to transition into either the active,
114 * or the power off state.
115 */
116 for (;;) {
117 prepare_to_wait(&genpd->status_wait_queue, &wait,
118 TASK_UNINTERRUPTIBLE);
c6d22b37
RW
119 if (genpd->status == GPD_STATE_ACTIVE
120 || genpd->status == GPD_STATE_POWER_OFF)
17b75eca
RW
121 break;
122 mutex_unlock(&genpd->lock);
123
124 schedule();
125
126 mutex_lock(&genpd->lock);
127 }
128 finish_wait(&genpd->status_wait_queue, &wait);
129}
130
131static void genpd_release_lock(struct generic_pm_domain *genpd)
132{
133 mutex_unlock(&genpd->lock);
134}
135
c6d22b37
RW
136static void genpd_set_active(struct generic_pm_domain *genpd)
137{
138 if (genpd->resume_count == 0)
139 genpd->status = GPD_STATE_ACTIVE;
140}
141
5248051b 142/**
5063ce15 143 * __pm_genpd_poweron - Restore power to a given PM domain and its masters.
5248051b
RW
144 * @genpd: PM domain to power up.
145 *
5063ce15 146 * Restore power to @genpd and all of its masters so that it is possible to
5248051b
RW
147 * resume a device belonging to it.
148 */
3f241775
RW
149int __pm_genpd_poweron(struct generic_pm_domain *genpd)
150 __releases(&genpd->lock) __acquires(&genpd->lock)
5248051b 151{
5063ce15 152 struct gpd_link *link;
3f241775 153 DEFINE_WAIT(wait);
5248051b
RW
154 int ret = 0;
155
5063ce15 156 /* If the domain's master is being waited for, we have to wait too. */
3f241775
RW
157 for (;;) {
158 prepare_to_wait(&genpd->status_wait_queue, &wait,
159 TASK_UNINTERRUPTIBLE);
17877eb5 160 if (genpd->status != GPD_STATE_WAIT_MASTER)
3f241775
RW
161 break;
162 mutex_unlock(&genpd->lock);
17b75eca 163
3f241775
RW
164 schedule();
165
166 mutex_lock(&genpd->lock);
167 }
168 finish_wait(&genpd->status_wait_queue, &wait);
9e08cf42 169
17b75eca 170 if (genpd->status == GPD_STATE_ACTIVE
596ba34b 171 || (genpd->prepared_count > 0 && genpd->suspend_power_off))
3f241775 172 return 0;
5248051b 173
c6d22b37
RW
174 if (genpd->status != GPD_STATE_POWER_OFF) {
175 genpd_set_active(genpd);
3f241775 176 return 0;
c6d22b37
RW
177 }
178
5063ce15
RW
179 /*
180 * The list is guaranteed not to change while the loop below is being
181 * executed, unless one of the masters' .power_on() callbacks fiddles
182 * with it.
183 */
184 list_for_each_entry(link, &genpd->slave_links, slave_node) {
185 genpd_sd_counter_inc(link->master);
17877eb5 186 genpd->status = GPD_STATE_WAIT_MASTER;
3c07cbc4 187
5248051b 188 mutex_unlock(&genpd->lock);
5248051b 189
5063ce15 190 ret = pm_genpd_poweron(link->master);
9e08cf42
RW
191
192 mutex_lock(&genpd->lock);
193
3f241775
RW
194 /*
195 * The "wait for parent" status is guaranteed not to change
5063ce15 196 * while the master is powering on.
3f241775
RW
197 */
198 genpd->status = GPD_STATE_POWER_OFF;
199 wake_up_all(&genpd->status_wait_queue);
5063ce15
RW
200 if (ret) {
201 genpd_sd_counter_dec(link->master);
9e08cf42 202 goto err;
5063ce15 203 }
5248051b
RW
204 }
205
9e08cf42 206 if (genpd->power_on) {
0140d8bd
RW
207 ktime_t time_start = ktime_get();
208 s64 elapsed_ns;
209
fe202fde 210 ret = genpd->power_on(genpd);
9e08cf42
RW
211 if (ret)
212 goto err;
0140d8bd
RW
213
214 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
e84b2c20 215 if (elapsed_ns > genpd->power_on_latency_ns) {
0140d8bd 216 genpd->power_on_latency_ns = elapsed_ns;
6ff7bb0d 217 genpd->max_off_time_changed = true;
e84b2c20
RW
218 if (genpd->name)
219 pr_warning("%s: Power-on latency exceeded, "
220 "new value %lld ns\n", genpd->name,
221 elapsed_ns);
222 }
3c07cbc4 223 }
5248051b 224
9e08cf42
RW
225 genpd_set_active(genpd);
226
3f241775 227 return 0;
9e08cf42
RW
228
229 err:
5063ce15
RW
230 list_for_each_entry_continue_reverse(link, &genpd->slave_links, slave_node)
231 genpd_sd_counter_dec(link->master);
9e08cf42 232
3f241775
RW
233 return ret;
234}
235
236/**
5063ce15 237 * pm_genpd_poweron - Restore power to a given PM domain and its masters.
3f241775
RW
238 * @genpd: PM domain to power up.
239 */
240int pm_genpd_poweron(struct generic_pm_domain *genpd)
241{
242 int ret;
243
244 mutex_lock(&genpd->lock);
245 ret = __pm_genpd_poweron(genpd);
246 mutex_unlock(&genpd->lock);
247 return ret;
5248051b
RW
248}
249
250#endif /* CONFIG_PM */
251
252#ifdef CONFIG_PM_RUNTIME
253
6ff7bb0d
RW
254static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
255 unsigned long val, void *ptr)
256{
257 struct generic_pm_domain_data *gpd_data;
258 struct device *dev;
259
260 gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
261
262 mutex_lock(&gpd_data->lock);
263 dev = gpd_data->base.dev;
264 if (!dev) {
265 mutex_unlock(&gpd_data->lock);
266 return NOTIFY_DONE;
267 }
268 mutex_unlock(&gpd_data->lock);
269
270 for (;;) {
271 struct generic_pm_domain *genpd;
272 struct pm_domain_data *pdd;
273
274 spin_lock_irq(&dev->power.lock);
275
276 pdd = dev->power.subsys_data ?
277 dev->power.subsys_data->domain_data : NULL;
278 if (pdd) {
279 to_gpd_data(pdd)->td.constraint_changed = true;
280 genpd = dev_to_genpd(dev);
281 } else {
282 genpd = ERR_PTR(-ENODATA);
283 }
284
285 spin_unlock_irq(&dev->power.lock);
286
287 if (!IS_ERR(genpd)) {
288 mutex_lock(&genpd->lock);
289 genpd->max_off_time_changed = true;
290 mutex_unlock(&genpd->lock);
291 }
292
293 dev = dev->parent;
294 if (!dev || dev->power.ignore_children)
295 break;
296 }
297
298 return NOTIFY_DONE;
299}
300
f721889f
RW
301/**
302 * __pm_genpd_save_device - Save the pre-suspend state of a device.
4605ab65 303 * @pdd: Domain data of the device to save the state of.
f721889f
RW
304 * @genpd: PM domain the device belongs to.
305 */
4605ab65 306static int __pm_genpd_save_device(struct pm_domain_data *pdd,
f721889f 307 struct generic_pm_domain *genpd)
17b75eca 308 __releases(&genpd->lock) __acquires(&genpd->lock)
f721889f 309{
cd0ea672 310 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
4605ab65 311 struct device *dev = pdd->dev;
f721889f
RW
312 int ret = 0;
313
cd0ea672 314 if (gpd_data->need_restore)
f721889f
RW
315 return 0;
316
17b75eca
RW
317 mutex_unlock(&genpd->lock);
318
ecf00475
RW
319 genpd_start_dev(genpd, dev);
320 ret = genpd_save_dev(genpd, dev);
321 genpd_stop_dev(genpd, dev);
f721889f 322
17b75eca
RW
323 mutex_lock(&genpd->lock);
324
f721889f 325 if (!ret)
cd0ea672 326 gpd_data->need_restore = true;
f721889f
RW
327
328 return ret;
329}
330
331/**
332 * __pm_genpd_restore_device - Restore the pre-suspend state of a device.
4605ab65 333 * @pdd: Domain data of the device to restore the state of.
f721889f
RW
334 * @genpd: PM domain the device belongs to.
335 */
4605ab65 336static void __pm_genpd_restore_device(struct pm_domain_data *pdd,
f721889f 337 struct generic_pm_domain *genpd)
17b75eca 338 __releases(&genpd->lock) __acquires(&genpd->lock)
f721889f 339{
cd0ea672 340 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
4605ab65 341 struct device *dev = pdd->dev;
f721889f 342
cd0ea672 343 if (!gpd_data->need_restore)
f721889f
RW
344 return;
345
17b75eca
RW
346 mutex_unlock(&genpd->lock);
347
ecf00475
RW
348 genpd_start_dev(genpd, dev);
349 genpd_restore_dev(genpd, dev);
350 genpd_stop_dev(genpd, dev);
f721889f 351
17b75eca
RW
352 mutex_lock(&genpd->lock);
353
cd0ea672 354 gpd_data->need_restore = false;
f721889f
RW
355}
356
c6d22b37
RW
357/**
358 * genpd_abort_poweroff - Check if a PM domain power off should be aborted.
359 * @genpd: PM domain to check.
360 *
361 * Return true if a PM domain's status changed to GPD_STATE_ACTIVE during
362 * a "power off" operation, which means that a "power on" has occured in the
363 * meantime, or if its resume_count field is different from zero, which means
364 * that one of its devices has been resumed in the meantime.
365 */
366static bool genpd_abort_poweroff(struct generic_pm_domain *genpd)
367{
17877eb5 368 return genpd->status == GPD_STATE_WAIT_MASTER
3f241775 369 || genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0;
c6d22b37
RW
370}
371
56375fd4
RW
372/**
373 * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff().
374 * @genpd: PM domait to power off.
375 *
376 * Queue up the execution of pm_genpd_poweroff() unless it's already been done
377 * before.
378 */
0bc5b2de 379void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
56375fd4
RW
380{
381 if (!work_pending(&genpd->power_off_work))
382 queue_work(pm_wq, &genpd->power_off_work);
383}
384
f721889f
RW
385/**
386 * pm_genpd_poweroff - Remove power from a given PM domain.
387 * @genpd: PM domain to power down.
388 *
389 * If all of the @genpd's devices have been suspended and all of its subdomains
390 * have been powered down, run the runtime suspend callbacks provided by all of
391 * the @genpd's devices' drivers and remove power from @genpd.
392 */
393static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
17b75eca 394 __releases(&genpd->lock) __acquires(&genpd->lock)
f721889f 395{
4605ab65 396 struct pm_domain_data *pdd;
5063ce15 397 struct gpd_link *link;
f721889f 398 unsigned int not_suspended;
c6d22b37 399 int ret = 0;
f721889f 400
c6d22b37
RW
401 start:
402 /*
403 * Do not try to power off the domain in the following situations:
404 * (1) The domain is already in the "power off" state.
5063ce15 405 * (2) The domain is waiting for its master to power up.
c6d22b37 406 * (3) One of the domain's devices is being resumed right now.
3f241775 407 * (4) System suspend is in progress.
c6d22b37 408 */
3f241775 409 if (genpd->status == GPD_STATE_POWER_OFF
17877eb5 410 || genpd->status == GPD_STATE_WAIT_MASTER
3f241775 411 || genpd->resume_count > 0 || genpd->prepared_count > 0)
f721889f
RW
412 return 0;
413
c4bb3160 414 if (atomic_read(&genpd->sd_count) > 0)
f721889f
RW
415 return -EBUSY;
416
417 not_suspended = 0;
4605ab65 418 list_for_each_entry(pdd, &genpd->dev_list, list_node)
0aa2a221 419 if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev)
1e78a0c7 420 || pdd->dev->power.irq_safe || to_gpd_data(pdd)->always_on))
f721889f
RW
421 not_suspended++;
422
423 if (not_suspended > genpd->in_progress)
424 return -EBUSY;
425
c6d22b37
RW
426 if (genpd->poweroff_task) {
427 /*
428 * Another instance of pm_genpd_poweroff() is executing
429 * callbacks, so tell it to start over and return.
430 */
431 genpd->status = GPD_STATE_REPEAT;
432 return 0;
433 }
434
f721889f
RW
435 if (genpd->gov && genpd->gov->power_down_ok) {
436 if (!genpd->gov->power_down_ok(&genpd->domain))
437 return -EAGAIN;
438 }
439
17b75eca 440 genpd->status = GPD_STATE_BUSY;
c6d22b37 441 genpd->poweroff_task = current;
17b75eca 442
4605ab65 443 list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) {
3c07cbc4 444 ret = atomic_read(&genpd->sd_count) == 0 ?
4605ab65 445 __pm_genpd_save_device(pdd, genpd) : -EBUSY;
3f241775
RW
446
447 if (genpd_abort_poweroff(genpd))
448 goto out;
449
697a7f37
RW
450 if (ret) {
451 genpd_set_active(genpd);
452 goto out;
453 }
f721889f 454
c6d22b37
RW
455 if (genpd->status == GPD_STATE_REPEAT) {
456 genpd->poweroff_task = NULL;
457 goto start;
458 }
459 }
17b75eca 460
3c07cbc4 461 if (genpd->power_off) {
0140d8bd
RW
462 ktime_t time_start;
463 s64 elapsed_ns;
464
3c07cbc4
RW
465 if (atomic_read(&genpd->sd_count) > 0) {
466 ret = -EBUSY;
c6d22b37
RW
467 goto out;
468 }
17b75eca 469
0140d8bd
RW
470 time_start = ktime_get();
471
3c07cbc4 472 /*
5063ce15
RW
473 * If sd_count > 0 at this point, one of the subdomains hasn't
474 * managed to call pm_genpd_poweron() for the master yet after
3c07cbc4
RW
475 * incrementing it. In that case pm_genpd_poweron() will wait
476 * for us to drop the lock, so we can call .power_off() and let
477 * the pm_genpd_poweron() restore power for us (this shouldn't
478 * happen very often).
479 */
d2805402
RW
480 ret = genpd->power_off(genpd);
481 if (ret == -EBUSY) {
482 genpd_set_active(genpd);
d2805402
RW
483 goto out;
484 }
0140d8bd
RW
485
486 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
e84b2c20 487 if (elapsed_ns > genpd->power_off_latency_ns) {
0140d8bd 488 genpd->power_off_latency_ns = elapsed_ns;
6ff7bb0d 489 genpd->max_off_time_changed = true;
e84b2c20
RW
490 if (genpd->name)
491 pr_warning("%s: Power-off latency exceeded, "
492 "new value %lld ns\n", genpd->name,
493 elapsed_ns);
494 }
d2805402 495 }
f721889f 496
17b75eca 497 genpd->status = GPD_STATE_POWER_OFF;
221e9b58 498
5063ce15
RW
499 list_for_each_entry(link, &genpd->slave_links, slave_node) {
500 genpd_sd_counter_dec(link->master);
501 genpd_queue_power_off_work(link->master);
502 }
f721889f 503
c6d22b37
RW
504 out:
505 genpd->poweroff_task = NULL;
506 wake_up_all(&genpd->status_wait_queue);
507 return ret;
f721889f
RW
508}
509
510/**
511 * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
512 * @work: Work structure used for scheduling the execution of this function.
513 */
514static void genpd_power_off_work_fn(struct work_struct *work)
515{
516 struct generic_pm_domain *genpd;
517
518 genpd = container_of(work, struct generic_pm_domain, power_off_work);
519
17b75eca 520 genpd_acquire_lock(genpd);
f721889f 521 pm_genpd_poweroff(genpd);
17b75eca 522 genpd_release_lock(genpd);
f721889f
RW
523}
524
525/**
526 * pm_genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
527 * @dev: Device to suspend.
528 *
529 * Carry out a runtime suspend of a device under the assumption that its
530 * pm_domain field points to the domain member of an object of type
531 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
532 */
533static int pm_genpd_runtime_suspend(struct device *dev)
534{
535 struct generic_pm_domain *genpd;
b02c999a 536 bool (*stop_ok)(struct device *__dev);
d5e4cbfe 537 int ret;
f721889f
RW
538
539 dev_dbg(dev, "%s()\n", __func__);
540
5248051b
RW
541 genpd = dev_to_genpd(dev);
542 if (IS_ERR(genpd))
f721889f
RW
543 return -EINVAL;
544
0aa2a221
RW
545 might_sleep_if(!genpd->dev_irq_safe);
546
1e78a0c7
RW
547 if (dev_gpd_data(dev)->always_on)
548 return -EBUSY;
549
b02c999a
RW
550 stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL;
551 if (stop_ok && !stop_ok(dev))
552 return -EBUSY;
553
d5e4cbfe
RW
554 ret = genpd_stop_dev(genpd, dev);
555 if (ret)
556 return ret;
17b75eca 557
0aa2a221
RW
558 /*
559 * If power.irq_safe is set, this routine will be run with interrupts
560 * off, so it can't use mutexes.
561 */
562 if (dev->power.irq_safe)
563 return 0;
564
c6d22b37 565 mutex_lock(&genpd->lock);
f721889f
RW
566 genpd->in_progress++;
567 pm_genpd_poweroff(genpd);
568 genpd->in_progress--;
c6d22b37 569 mutex_unlock(&genpd->lock);
f721889f
RW
570
571 return 0;
572}
573
f721889f
RW
574/**
575 * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
576 * @dev: Device to resume.
577 *
578 * Carry out a runtime resume of a device under the assumption that its
579 * pm_domain field points to the domain member of an object of type
580 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
581 */
582static int pm_genpd_runtime_resume(struct device *dev)
583{
584 struct generic_pm_domain *genpd;
c6d22b37 585 DEFINE_WAIT(wait);
f721889f
RW
586 int ret;
587
588 dev_dbg(dev, "%s()\n", __func__);
589
5248051b
RW
590 genpd = dev_to_genpd(dev);
591 if (IS_ERR(genpd))
f721889f
RW
592 return -EINVAL;
593
0aa2a221
RW
594 might_sleep_if(!genpd->dev_irq_safe);
595
596 /* If power.irq_safe, the PM domain is never powered off. */
597 if (dev->power.irq_safe)
598 goto out;
599
c6d22b37 600 mutex_lock(&genpd->lock);
3f241775
RW
601 ret = __pm_genpd_poweron(genpd);
602 if (ret) {
603 mutex_unlock(&genpd->lock);
604 return ret;
605 }
17b75eca 606 genpd->status = GPD_STATE_BUSY;
c6d22b37
RW
607 genpd->resume_count++;
608 for (;;) {
609 prepare_to_wait(&genpd->status_wait_queue, &wait,
610 TASK_UNINTERRUPTIBLE);
611 /*
612 * If current is the powering off task, we have been called
613 * reentrantly from one of the device callbacks, so we should
614 * not wait.
615 */
616 if (!genpd->poweroff_task || genpd->poweroff_task == current)
617 break;
618 mutex_unlock(&genpd->lock);
619
620 schedule();
621
622 mutex_lock(&genpd->lock);
623 }
624 finish_wait(&genpd->status_wait_queue, &wait);
cd0ea672 625 __pm_genpd_restore_device(dev->power.subsys_data->domain_data, genpd);
c6d22b37
RW
626 genpd->resume_count--;
627 genpd_set_active(genpd);
17b75eca 628 wake_up_all(&genpd->status_wait_queue);
c6d22b37 629 mutex_unlock(&genpd->lock);
17b75eca 630
0aa2a221 631 out:
d5e4cbfe 632 genpd_start_dev(genpd, dev);
f721889f
RW
633
634 return 0;
635}
636
17f2ae7f
RW
637/**
638 * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use.
639 */
640void pm_genpd_poweroff_unused(void)
641{
642 struct generic_pm_domain *genpd;
643
644 mutex_lock(&gpd_list_lock);
645
646 list_for_each_entry(genpd, &gpd_list, gpd_list_node)
647 genpd_queue_power_off_work(genpd);
648
649 mutex_unlock(&gpd_list_lock);
650}
651
f721889f
RW
652#else
653
6ff7bb0d
RW
654static inline int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
655 unsigned long val, void *ptr)
656{
657 return NOTIFY_DONE;
658}
659
f721889f
RW
660static inline void genpd_power_off_work_fn(struct work_struct *work) {}
661
662#define pm_genpd_runtime_suspend NULL
663#define pm_genpd_runtime_resume NULL
664
665#endif /* CONFIG_PM_RUNTIME */
666
596ba34b
RW
667#ifdef CONFIG_PM_SLEEP
668
d5e4cbfe
RW
669static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd,
670 struct device *dev)
671{
672 return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev);
673}
674
d23b9b00
RW
675static int genpd_suspend_dev(struct generic_pm_domain *genpd, struct device *dev)
676{
677 return GENPD_DEV_CALLBACK(genpd, int, suspend, dev);
678}
679
680static int genpd_suspend_late(struct generic_pm_domain *genpd, struct device *dev)
681{
682 return GENPD_DEV_CALLBACK(genpd, int, suspend_late, dev);
683}
684
685static int genpd_resume_early(struct generic_pm_domain *genpd, struct device *dev)
686{
687 return GENPD_DEV_CALLBACK(genpd, int, resume_early, dev);
688}
689
690static int genpd_resume_dev(struct generic_pm_domain *genpd, struct device *dev)
691{
692 return GENPD_DEV_CALLBACK(genpd, int, resume, dev);
693}
694
695static int genpd_freeze_dev(struct generic_pm_domain *genpd, struct device *dev)
696{
697 return GENPD_DEV_CALLBACK(genpd, int, freeze, dev);
698}
699
700static int genpd_freeze_late(struct generic_pm_domain *genpd, struct device *dev)
701{
702 return GENPD_DEV_CALLBACK(genpd, int, freeze_late, dev);
703}
704
705static int genpd_thaw_early(struct generic_pm_domain *genpd, struct device *dev)
706{
707 return GENPD_DEV_CALLBACK(genpd, int, thaw_early, dev);
708}
709
710static int genpd_thaw_dev(struct generic_pm_domain *genpd, struct device *dev)
711{
712 return GENPD_DEV_CALLBACK(genpd, int, thaw, dev);
713}
714
596ba34b 715/**
5063ce15 716 * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters.
596ba34b
RW
717 * @genpd: PM domain to power off, if possible.
718 *
719 * Check if the given PM domain can be powered off (during system suspend or
5063ce15 720 * hibernation) and do that if so. Also, in that case propagate to its masters.
596ba34b
RW
721 *
722 * This function is only called in "noirq" stages of system power transitions,
723 * so it need not acquire locks (all of the "noirq" callbacks are executed
724 * sequentially, so it is guaranteed that it will never run twice in parallel).
725 */
726static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
727{
5063ce15 728 struct gpd_link *link;
596ba34b 729
17b75eca 730 if (genpd->status == GPD_STATE_POWER_OFF)
596ba34b
RW
731 return;
732
c4bb3160
RW
733 if (genpd->suspended_count != genpd->device_count
734 || atomic_read(&genpd->sd_count) > 0)
596ba34b
RW
735 return;
736
737 if (genpd->power_off)
738 genpd->power_off(genpd);
739
17b75eca 740 genpd->status = GPD_STATE_POWER_OFF;
5063ce15
RW
741
742 list_for_each_entry(link, &genpd->slave_links, slave_node) {
743 genpd_sd_counter_dec(link->master);
744 pm_genpd_sync_poweroff(link->master);
596ba34b
RW
745 }
746}
747
4ecd6e65
RW
748/**
749 * resume_needed - Check whether to resume a device before system suspend.
750 * @dev: Device to check.
751 * @genpd: PM domain the device belongs to.
752 *
753 * There are two cases in which a device that can wake up the system from sleep
754 * states should be resumed by pm_genpd_prepare(): (1) if the device is enabled
755 * to wake up the system and it has to remain active for this purpose while the
756 * system is in the sleep state and (2) if the device is not enabled to wake up
757 * the system from sleep states and it generally doesn't generate wakeup signals
758 * by itself (those signals are generated on its behalf by other parts of the
759 * system). In the latter case it may be necessary to reconfigure the device's
760 * wakeup settings during system suspend, because it may have been set up to
761 * signal remote wakeup from the system's working state as needed by runtime PM.
762 * Return 'true' in either of the above cases.
763 */
764static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd)
765{
766 bool active_wakeup;
767
768 if (!device_can_wakeup(dev))
769 return false;
770
d5e4cbfe 771 active_wakeup = genpd_dev_active_wakeup(genpd, dev);
4ecd6e65
RW
772 return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
773}
774
596ba34b
RW
775/**
776 * pm_genpd_prepare - Start power transition of a device in a PM domain.
777 * @dev: Device to start the transition of.
778 *
779 * Start a power transition of a device (during a system-wide power transition)
780 * under the assumption that its pm_domain field points to the domain member of
781 * an object of type struct generic_pm_domain representing a PM domain
782 * consisting of I/O devices.
783 */
784static int pm_genpd_prepare(struct device *dev)
785{
786 struct generic_pm_domain *genpd;
b6c10c84 787 int ret;
596ba34b
RW
788
789 dev_dbg(dev, "%s()\n", __func__);
790
791 genpd = dev_to_genpd(dev);
792 if (IS_ERR(genpd))
793 return -EINVAL;
794
17b75eca
RW
795 /*
796 * If a wakeup request is pending for the device, it should be woken up
797 * at this point and a system wakeup event should be reported if it's
798 * set up to wake up the system from sleep states.
799 */
800 pm_runtime_get_noresume(dev);
801 if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
802 pm_wakeup_event(dev, 0);
803
804 if (pm_wakeup_pending()) {
805 pm_runtime_put_sync(dev);
806 return -EBUSY;
807 }
808
4ecd6e65
RW
809 if (resume_needed(dev, genpd))
810 pm_runtime_resume(dev);
811
17b75eca 812 genpd_acquire_lock(genpd);
596ba34b 813
65533bbf
RW
814 if (genpd->prepared_count++ == 0) {
815 genpd->suspended_count = 0;
17b75eca 816 genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF;
65533bbf 817 }
17b75eca
RW
818
819 genpd_release_lock(genpd);
596ba34b
RW
820
821 if (genpd->suspend_power_off) {
17b75eca 822 pm_runtime_put_noidle(dev);
596ba34b
RW
823 return 0;
824 }
825
826 /*
17b75eca
RW
827 * The PM domain must be in the GPD_STATE_ACTIVE state at this point,
828 * so pm_genpd_poweron() will return immediately, but if the device
d5e4cbfe 829 * is suspended (e.g. it's been stopped by genpd_stop_dev()), we need
17b75eca 830 * to make it operational.
596ba34b 831 */
17b75eca 832 pm_runtime_resume(dev);
596ba34b
RW
833 __pm_runtime_disable(dev, false);
834
b6c10c84
RW
835 ret = pm_generic_prepare(dev);
836 if (ret) {
837 mutex_lock(&genpd->lock);
838
839 if (--genpd->prepared_count == 0)
840 genpd->suspend_power_off = false;
841
842 mutex_unlock(&genpd->lock);
17b75eca 843 pm_runtime_enable(dev);
b6c10c84 844 }
17b75eca
RW
845
846 pm_runtime_put_sync(dev);
b6c10c84 847 return ret;
596ba34b
RW
848}
849
850/**
851 * pm_genpd_suspend - Suspend a device belonging to an I/O PM domain.
852 * @dev: Device to suspend.
853 *
854 * Suspend a device under the assumption that its pm_domain field points to the
855 * domain member of an object of type struct generic_pm_domain representing
856 * a PM domain consisting of I/O devices.
857 */
858static int pm_genpd_suspend(struct device *dev)
859{
860 struct generic_pm_domain *genpd;
861
862 dev_dbg(dev, "%s()\n", __func__);
863
864 genpd = dev_to_genpd(dev);
865 if (IS_ERR(genpd))
866 return -EINVAL;
867
d23b9b00 868 return genpd->suspend_power_off ? 0 : genpd_suspend_dev(genpd, dev);
596ba34b
RW
869}
870
871/**
0496c8ae 872 * pm_genpd_suspend_late - Late suspend of a device from an I/O PM domain.
596ba34b
RW
873 * @dev: Device to suspend.
874 *
875 * Carry out a late suspend of a device under the assumption that its
876 * pm_domain field points to the domain member of an object of type
877 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
878 */
0496c8ae 879static int pm_genpd_suspend_late(struct device *dev)
596ba34b
RW
880{
881 struct generic_pm_domain *genpd;
596ba34b
RW
882
883 dev_dbg(dev, "%s()\n", __func__);
884
885 genpd = dev_to_genpd(dev);
886 if (IS_ERR(genpd))
887 return -EINVAL;
888
0496c8ae
RW
889 return genpd->suspend_power_off ? 0 : genpd_suspend_late(genpd, dev);
890}
596ba34b 891
0496c8ae
RW
892/**
893 * pm_genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
894 * @dev: Device to suspend.
895 *
896 * Stop the device and remove power from the domain if all devices in it have
897 * been stopped.
898 */
899static int pm_genpd_suspend_noirq(struct device *dev)
900{
901 struct generic_pm_domain *genpd;
902
903 dev_dbg(dev, "%s()\n", __func__);
904
905 genpd = dev_to_genpd(dev);
906 if (IS_ERR(genpd))
907 return -EINVAL;
596ba34b 908
1e78a0c7 909 if (genpd->suspend_power_off || dev_gpd_data(dev)->always_on
0496c8ae 910 || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
d4f2d87a
RW
911 return 0;
912
d5e4cbfe 913 genpd_stop_dev(genpd, dev);
596ba34b
RW
914
915 /*
916 * Since all of the "noirq" callbacks are executed sequentially, it is
917 * guaranteed that this function will never run twice in parallel for
918 * the same PM domain, so it is not necessary to use locking here.
919 */
920 genpd->suspended_count++;
921 pm_genpd_sync_poweroff(genpd);
922
923 return 0;
924}
925
926/**
0496c8ae 927 * pm_genpd_resume_noirq - Start of resume of device in an I/O PM domain.
596ba34b
RW
928 * @dev: Device to resume.
929 *
0496c8ae 930 * Restore power to the device's PM domain, if necessary, and start the device.
596ba34b
RW
931 */
932static int pm_genpd_resume_noirq(struct device *dev)
933{
934 struct generic_pm_domain *genpd;
935
936 dev_dbg(dev, "%s()\n", __func__);
937
938 genpd = dev_to_genpd(dev);
939 if (IS_ERR(genpd))
940 return -EINVAL;
941
1e78a0c7 942 if (genpd->suspend_power_off || dev_gpd_data(dev)->always_on
cc85b207 943 || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
596ba34b
RW
944 return 0;
945
946 /*
947 * Since all of the "noirq" callbacks are executed sequentially, it is
948 * guaranteed that this function will never run twice in parallel for
949 * the same PM domain, so it is not necessary to use locking here.
950 */
951 pm_genpd_poweron(genpd);
952 genpd->suspended_count--;
596ba34b 953
0496c8ae 954 return genpd_start_dev(genpd, dev);
596ba34b
RW
955}
956
957/**
0496c8ae
RW
958 * pm_genpd_resume_early - Early resume of a device in an I/O PM domain.
959 * @dev: Device to resume.
960 *
961 * Carry out an early resume of a device under the assumption that its
962 * pm_domain field points to the domain member of an object of type
963 * struct generic_pm_domain representing a power domain consisting of I/O
964 * devices.
965 */
966static int pm_genpd_resume_early(struct device *dev)
967{
968 struct generic_pm_domain *genpd;
969
970 dev_dbg(dev, "%s()\n", __func__);
971
972 genpd = dev_to_genpd(dev);
973 if (IS_ERR(genpd))
974 return -EINVAL;
975
976 return genpd->suspend_power_off ? 0 : genpd_resume_early(genpd, dev);
977}
978
979/**
980 * pm_genpd_resume - Resume of device in an I/O PM domain.
596ba34b
RW
981 * @dev: Device to resume.
982 *
983 * Resume a device under the assumption that its pm_domain field points to the
984 * domain member of an object of type struct generic_pm_domain representing
985 * a power domain consisting of I/O devices.
986 */
987static int pm_genpd_resume(struct device *dev)
988{
989 struct generic_pm_domain *genpd;
990
991 dev_dbg(dev, "%s()\n", __func__);
992
993 genpd = dev_to_genpd(dev);
994 if (IS_ERR(genpd))
995 return -EINVAL;
996
d23b9b00 997 return genpd->suspend_power_off ? 0 : genpd_resume_dev(genpd, dev);
596ba34b
RW
998}
999
1000/**
0496c8ae 1001 * pm_genpd_freeze - Freezing a device in an I/O PM domain.
596ba34b
RW
1002 * @dev: Device to freeze.
1003 *
1004 * Freeze a device under the assumption that its pm_domain field points to the
1005 * domain member of an object of type struct generic_pm_domain representing
1006 * a power domain consisting of I/O devices.
1007 */
1008static int pm_genpd_freeze(struct device *dev)
1009{
1010 struct generic_pm_domain *genpd;
1011
1012 dev_dbg(dev, "%s()\n", __func__);
1013
1014 genpd = dev_to_genpd(dev);
1015 if (IS_ERR(genpd))
1016 return -EINVAL;
1017
d23b9b00 1018 return genpd->suspend_power_off ? 0 : genpd_freeze_dev(genpd, dev);
596ba34b
RW
1019}
1020
1021/**
0496c8ae
RW
1022 * pm_genpd_freeze_late - Late freeze of a device in an I/O PM domain.
1023 * @dev: Device to freeze.
1024 *
1025 * Carry out a late freeze of a device under the assumption that its
1026 * pm_domain field points to the domain member of an object of type
1027 * struct generic_pm_domain representing a power domain consisting of I/O
1028 * devices.
1029 */
1030static int pm_genpd_freeze_late(struct device *dev)
1031{
1032 struct generic_pm_domain *genpd;
1033
1034 dev_dbg(dev, "%s()\n", __func__);
1035
1036 genpd = dev_to_genpd(dev);
1037 if (IS_ERR(genpd))
1038 return -EINVAL;
1039
1040 return genpd->suspend_power_off ? 0 : genpd_freeze_late(genpd, dev);
1041}
1042
1043/**
1044 * pm_genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
596ba34b
RW
1045 * @dev: Device to freeze.
1046 *
1047 * Carry out a late freeze of a device under the assumption that its
1048 * pm_domain field points to the domain member of an object of type
1049 * struct generic_pm_domain representing a power domain consisting of I/O
1050 * devices.
1051 */
1052static int pm_genpd_freeze_noirq(struct device *dev)
1053{
1054 struct generic_pm_domain *genpd;
596ba34b
RW
1055
1056 dev_dbg(dev, "%s()\n", __func__);
1057
1058 genpd = dev_to_genpd(dev);
1059 if (IS_ERR(genpd))
1060 return -EINVAL;
1061
1e78a0c7
RW
1062 return genpd->suspend_power_off || dev_gpd_data(dev)->always_on ?
1063 0 : genpd_stop_dev(genpd, dev);
0496c8ae 1064}
596ba34b 1065
0496c8ae
RW
1066/**
1067 * pm_genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
1068 * @dev: Device to thaw.
1069 *
1070 * Start the device, unless power has been removed from the domain already
1071 * before the system transition.
1072 */
1073static int pm_genpd_thaw_noirq(struct device *dev)
1074{
1075 struct generic_pm_domain *genpd;
596ba34b 1076
0496c8ae 1077 dev_dbg(dev, "%s()\n", __func__);
596ba34b 1078
0496c8ae
RW
1079 genpd = dev_to_genpd(dev);
1080 if (IS_ERR(genpd))
1081 return -EINVAL;
1082
1e78a0c7
RW
1083 return genpd->suspend_power_off || dev_gpd_data(dev)->always_on ?
1084 0 : genpd_start_dev(genpd, dev);
596ba34b
RW
1085}
1086
1087/**
0496c8ae 1088 * pm_genpd_thaw_early - Early thaw of device in an I/O PM domain.
596ba34b
RW
1089 * @dev: Device to thaw.
1090 *
1091 * Carry out an early thaw of a device under the assumption that its
1092 * pm_domain field points to the domain member of an object of type
1093 * struct generic_pm_domain representing a power domain consisting of I/O
1094 * devices.
1095 */
0496c8ae 1096static int pm_genpd_thaw_early(struct device *dev)
596ba34b
RW
1097{
1098 struct generic_pm_domain *genpd;
1099
1100 dev_dbg(dev, "%s()\n", __func__);
1101
1102 genpd = dev_to_genpd(dev);
1103 if (IS_ERR(genpd))
1104 return -EINVAL;
1105
0496c8ae 1106 return genpd->suspend_power_off ? 0 : genpd_thaw_early(genpd, dev);
596ba34b
RW
1107}
1108
1109/**
1110 * pm_genpd_thaw - Thaw a device belonging to an I/O power domain.
1111 * @dev: Device to thaw.
1112 *
1113 * Thaw a device under the assumption that its pm_domain field points to the
1114 * domain member of an object of type struct generic_pm_domain representing
1115 * a power domain consisting of I/O devices.
1116 */
1117static int pm_genpd_thaw(struct device *dev)
1118{
1119 struct generic_pm_domain *genpd;
1120
1121 dev_dbg(dev, "%s()\n", __func__);
1122
1123 genpd = dev_to_genpd(dev);
1124 if (IS_ERR(genpd))
1125 return -EINVAL;
1126
d23b9b00 1127 return genpd->suspend_power_off ? 0 : genpd_thaw_dev(genpd, dev);
596ba34b
RW
1128}
1129
1130/**
0496c8ae 1131 * pm_genpd_restore_noirq - Start of restore of device in an I/O PM domain.
596ba34b
RW
1132 * @dev: Device to resume.
1133 *
0496c8ae
RW
1134 * Make sure the domain will be in the same power state as before the
1135 * hibernation the system is resuming from and start the device if necessary.
596ba34b
RW
1136 */
1137static int pm_genpd_restore_noirq(struct device *dev)
1138{
1139 struct generic_pm_domain *genpd;
1140
1141 dev_dbg(dev, "%s()\n", __func__);
1142
1143 genpd = dev_to_genpd(dev);
1144 if (IS_ERR(genpd))
1145 return -EINVAL;
1146
1147 /*
1148 * Since all of the "noirq" callbacks are executed sequentially, it is
1149 * guaranteed that this function will never run twice in parallel for
1150 * the same PM domain, so it is not necessary to use locking here.
65533bbf
RW
1151 *
1152 * At this point suspended_count == 0 means we are being run for the
1153 * first time for the given domain in the present cycle.
596ba34b 1154 */
65533bbf 1155 if (genpd->suspended_count++ == 0) {
596ba34b 1156 /*
65533bbf
RW
1157 * The boot kernel might put the domain into arbitrary state,
1158 * so make it appear as powered off to pm_genpd_poweron(), so
1159 * that it tries to power it on in case it was really off.
596ba34b 1160 */
65533bbf
RW
1161 genpd->status = GPD_STATE_POWER_OFF;
1162 if (genpd->suspend_power_off) {
1163 /*
1164 * If the domain was off before the hibernation, make
1165 * sure it will be off going forward.
1166 */
1167 if (genpd->power_off)
1168 genpd->power_off(genpd);
1169
1170 return 0;
1171 }
596ba34b
RW
1172 }
1173
18dd2ece
RW
1174 if (genpd->suspend_power_off)
1175 return 0;
1176
596ba34b 1177 pm_genpd_poweron(genpd);
596ba34b 1178
1e78a0c7 1179 return dev_gpd_data(dev)->always_on ? 0 : genpd_start_dev(genpd, dev);
596ba34b
RW
1180}
1181
1182/**
1183 * pm_genpd_complete - Complete power transition of a device in a power domain.
1184 * @dev: Device to complete the transition of.
1185 *
1186 * Complete a power transition of a device (during a system-wide power
1187 * transition) under the assumption that its pm_domain field points to the
1188 * domain member of an object of type struct generic_pm_domain representing
1189 * a power domain consisting of I/O devices.
1190 */
1191static void pm_genpd_complete(struct device *dev)
1192{
1193 struct generic_pm_domain *genpd;
1194 bool run_complete;
1195
1196 dev_dbg(dev, "%s()\n", __func__);
1197
1198 genpd = dev_to_genpd(dev);
1199 if (IS_ERR(genpd))
1200 return;
1201
1202 mutex_lock(&genpd->lock);
1203
1204 run_complete = !genpd->suspend_power_off;
1205 if (--genpd->prepared_count == 0)
1206 genpd->suspend_power_off = false;
1207
1208 mutex_unlock(&genpd->lock);
1209
1210 if (run_complete) {
1211 pm_generic_complete(dev);
6f00ff78 1212 pm_runtime_set_active(dev);
596ba34b 1213 pm_runtime_enable(dev);
6f00ff78 1214 pm_runtime_idle(dev);
596ba34b
RW
1215 }
1216}
1217
1218#else
1219
1220#define pm_genpd_prepare NULL
1221#define pm_genpd_suspend NULL
0496c8ae 1222#define pm_genpd_suspend_late NULL
596ba34b 1223#define pm_genpd_suspend_noirq NULL
0496c8ae 1224#define pm_genpd_resume_early NULL
596ba34b
RW
1225#define pm_genpd_resume_noirq NULL
1226#define pm_genpd_resume NULL
1227#define pm_genpd_freeze NULL
0496c8ae 1228#define pm_genpd_freeze_late NULL
596ba34b 1229#define pm_genpd_freeze_noirq NULL
0496c8ae 1230#define pm_genpd_thaw_early NULL
596ba34b
RW
1231#define pm_genpd_thaw_noirq NULL
1232#define pm_genpd_thaw NULL
596ba34b 1233#define pm_genpd_restore_noirq NULL
596ba34b
RW
1234#define pm_genpd_complete NULL
1235
1236#endif /* CONFIG_PM_SLEEP */
1237
f721889f 1238/**
b02c999a 1239 * __pm_genpd_add_device - Add a device to an I/O PM domain.
f721889f
RW
1240 * @genpd: PM domain to add the device to.
1241 * @dev: Device to be added.
b02c999a 1242 * @td: Set of PM QoS timing parameters to attach to the device.
f721889f 1243 */
b02c999a
RW
1244int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1245 struct gpd_timing_data *td)
f721889f 1246{
cd0ea672 1247 struct generic_pm_domain_data *gpd_data;
4605ab65 1248 struct pm_domain_data *pdd;
f721889f
RW
1249 int ret = 0;
1250
1251 dev_dbg(dev, "%s()\n", __func__);
1252
1253 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1254 return -EINVAL;
1255
6ff7bb0d
RW
1256 gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
1257 if (!gpd_data)
1258 return -ENOMEM;
1259
1260 mutex_init(&gpd_data->lock);
1261 gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
1262 dev_pm_qos_add_notifier(dev, &gpd_data->nb);
1263
17b75eca 1264 genpd_acquire_lock(genpd);
f721889f 1265
17b75eca 1266 if (genpd->status == GPD_STATE_POWER_OFF) {
f721889f
RW
1267 ret = -EINVAL;
1268 goto out;
1269 }
1270
596ba34b
RW
1271 if (genpd->prepared_count > 0) {
1272 ret = -EAGAIN;
1273 goto out;
1274 }
1275
4605ab65
RW
1276 list_for_each_entry(pdd, &genpd->dev_list, list_node)
1277 if (pdd->dev == dev) {
f721889f
RW
1278 ret = -EINVAL;
1279 goto out;
1280 }
1281
596ba34b 1282 genpd->device_count++;
6ff7bb0d 1283 genpd->max_off_time_changed = true;
f721889f 1284
4605ab65 1285 dev_pm_get_subsys_data(dev);
6ff7bb0d
RW
1286
1287 mutex_lock(&gpd_data->lock);
1288 spin_lock_irq(&dev->power.lock);
1289 dev->pm_domain = &genpd->domain;
cd0ea672
RW
1290 dev->power.subsys_data->domain_data = &gpd_data->base;
1291 gpd_data->base.dev = dev;
cd0ea672 1292 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
6ff7bb0d 1293 gpd_data->need_restore = false;
b02c999a
RW
1294 if (td)
1295 gpd_data->td = *td;
f721889f 1296
6ff7bb0d
RW
1297 gpd_data->td.constraint_changed = true;
1298 gpd_data->td.effective_constraint_ns = -1;
1299 spin_unlock_irq(&dev->power.lock);
1300 mutex_unlock(&gpd_data->lock);
1301
1302 genpd_release_lock(genpd);
1303
1304 return 0;
1305
f721889f 1306 out:
17b75eca 1307 genpd_release_lock(genpd);
f721889f 1308
6ff7bb0d
RW
1309 dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
1310 kfree(gpd_data);
f721889f
RW
1311 return ret;
1312}
1313
c8aa130b
TA
1314/**
1315 * __pm_genpd_of_add_device - Add a device to an I/O PM domain.
1316 * @genpd_node: Device tree node pointer representing a PM domain to which the
1317 * the device is added to.
1318 * @dev: Device to be added.
1319 * @td: Set of PM QoS timing parameters to attach to the device.
1320 */
1321int __pm_genpd_of_add_device(struct device_node *genpd_node, struct device *dev,
1322 struct gpd_timing_data *td)
1323{
1324 struct generic_pm_domain *genpd = NULL, *gpd;
1325
1326 dev_dbg(dev, "%s()\n", __func__);
1327
1328 if (IS_ERR_OR_NULL(genpd_node) || IS_ERR_OR_NULL(dev))
1329 return -EINVAL;
1330
1331 mutex_lock(&gpd_list_lock);
1332 list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
1333 if (gpd->of_node == genpd_node) {
1334 genpd = gpd;
1335 break;
1336 }
1337 }
1338 mutex_unlock(&gpd_list_lock);
1339
1340 if (!genpd)
1341 return -EINVAL;
1342
1343 return __pm_genpd_add_device(genpd, dev, td);
1344}
1345
f721889f
RW
1346/**
1347 * pm_genpd_remove_device - Remove a device from an I/O PM domain.
1348 * @genpd: PM domain to remove the device from.
1349 * @dev: Device to be removed.
1350 */
1351int pm_genpd_remove_device(struct generic_pm_domain *genpd,
1352 struct device *dev)
1353{
6ff7bb0d 1354 struct generic_pm_domain_data *gpd_data;
4605ab65 1355 struct pm_domain_data *pdd;
efa69025 1356 int ret = 0;
f721889f
RW
1357
1358 dev_dbg(dev, "%s()\n", __func__);
1359
efa69025
RW
1360 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev)
1361 || IS_ERR_OR_NULL(dev->pm_domain)
1362 || pd_to_genpd(dev->pm_domain) != genpd)
f721889f
RW
1363 return -EINVAL;
1364
17b75eca 1365 genpd_acquire_lock(genpd);
f721889f 1366
596ba34b
RW
1367 if (genpd->prepared_count > 0) {
1368 ret = -EAGAIN;
1369 goto out;
1370 }
1371
6ff7bb0d
RW
1372 genpd->device_count--;
1373 genpd->max_off_time_changed = true;
1374
1375 spin_lock_irq(&dev->power.lock);
efa69025
RW
1376 dev->pm_domain = NULL;
1377 pdd = dev->power.subsys_data->domain_data;
1378 list_del_init(&pdd->list_node);
1379 dev->power.subsys_data->domain_data = NULL;
6ff7bb0d 1380 spin_unlock_irq(&dev->power.lock);
f721889f 1381
6ff7bb0d
RW
1382 gpd_data = to_gpd_data(pdd);
1383 mutex_lock(&gpd_data->lock);
1384 pdd->dev = NULL;
1385 mutex_unlock(&gpd_data->lock);
1386
1387 genpd_release_lock(genpd);
1388
1389 dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
1390 kfree(gpd_data);
1391 dev_pm_put_subsys_data(dev);
1392 return 0;
f721889f 1393
596ba34b 1394 out:
17b75eca 1395 genpd_release_lock(genpd);
f721889f
RW
1396
1397 return ret;
1398}
1399
1e78a0c7
RW
1400/**
1401 * pm_genpd_dev_always_on - Set/unset the "always on" flag for a given device.
1402 * @dev: Device to set/unset the flag for.
1403 * @val: The new value of the device's "always on" flag.
1404 */
1405void pm_genpd_dev_always_on(struct device *dev, bool val)
1406{
1407 struct pm_subsys_data *psd;
1408 unsigned long flags;
1409
1410 spin_lock_irqsave(&dev->power.lock, flags);
1411
1412 psd = dev_to_psd(dev);
1413 if (psd && psd->domain_data)
1414 to_gpd_data(psd->domain_data)->always_on = val;
1415
1416 spin_unlock_irqrestore(&dev->power.lock, flags);
1417}
1418EXPORT_SYMBOL_GPL(pm_genpd_dev_always_on);
1419
f721889f
RW
1420/**
1421 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1422 * @genpd: Master PM domain to add the subdomain to.
bc0403ff 1423 * @subdomain: Subdomain to be added.
f721889f
RW
1424 */
1425int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
bc0403ff 1426 struct generic_pm_domain *subdomain)
f721889f 1427{
5063ce15 1428 struct gpd_link *link;
f721889f
RW
1429 int ret = 0;
1430
bc0403ff 1431 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
f721889f
RW
1432 return -EINVAL;
1433
17b75eca
RW
1434 start:
1435 genpd_acquire_lock(genpd);
bc0403ff 1436 mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
f721889f 1437
bc0403ff
RW
1438 if (subdomain->status != GPD_STATE_POWER_OFF
1439 && subdomain->status != GPD_STATE_ACTIVE) {
1440 mutex_unlock(&subdomain->lock);
17b75eca
RW
1441 genpd_release_lock(genpd);
1442 goto start;
1443 }
1444
1445 if (genpd->status == GPD_STATE_POWER_OFF
bc0403ff 1446 && subdomain->status != GPD_STATE_POWER_OFF) {
f721889f
RW
1447 ret = -EINVAL;
1448 goto out;
1449 }
1450
5063ce15 1451 list_for_each_entry(link, &genpd->slave_links, slave_node) {
bc0403ff 1452 if (link->slave == subdomain && link->master == genpd) {
f721889f
RW
1453 ret = -EINVAL;
1454 goto out;
1455 }
1456 }
1457
5063ce15
RW
1458 link = kzalloc(sizeof(*link), GFP_KERNEL);
1459 if (!link) {
1460 ret = -ENOMEM;
1461 goto out;
1462 }
1463 link->master = genpd;
1464 list_add_tail(&link->master_node, &genpd->master_links);
bc0403ff
RW
1465 link->slave = subdomain;
1466 list_add_tail(&link->slave_node, &subdomain->slave_links);
1467 if (subdomain->status != GPD_STATE_POWER_OFF)
c4bb3160 1468 genpd_sd_counter_inc(genpd);
f721889f 1469
f721889f 1470 out:
bc0403ff 1471 mutex_unlock(&subdomain->lock);
17b75eca 1472 genpd_release_lock(genpd);
f721889f
RW
1473
1474 return ret;
1475}
1476
1477/**
1478 * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
1479 * @genpd: Master PM domain to remove the subdomain from.
5063ce15 1480 * @subdomain: Subdomain to be removed.
f721889f
RW
1481 */
1482int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
5063ce15 1483 struct generic_pm_domain *subdomain)
f721889f 1484{
5063ce15 1485 struct gpd_link *link;
f721889f
RW
1486 int ret = -EINVAL;
1487
5063ce15 1488 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
f721889f
RW
1489 return -EINVAL;
1490
17b75eca
RW
1491 start:
1492 genpd_acquire_lock(genpd);
f721889f 1493
5063ce15
RW
1494 list_for_each_entry(link, &genpd->master_links, master_node) {
1495 if (link->slave != subdomain)
f721889f
RW
1496 continue;
1497
1498 mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
1499
17b75eca
RW
1500 if (subdomain->status != GPD_STATE_POWER_OFF
1501 && subdomain->status != GPD_STATE_ACTIVE) {
1502 mutex_unlock(&subdomain->lock);
1503 genpd_release_lock(genpd);
1504 goto start;
1505 }
1506
5063ce15
RW
1507 list_del(&link->master_node);
1508 list_del(&link->slave_node);
1509 kfree(link);
17b75eca 1510 if (subdomain->status != GPD_STATE_POWER_OFF)
f721889f
RW
1511 genpd_sd_counter_dec(genpd);
1512
1513 mutex_unlock(&subdomain->lock);
1514
1515 ret = 0;
1516 break;
1517 }
1518
17b75eca 1519 genpd_release_lock(genpd);
f721889f
RW
1520
1521 return ret;
1522}
1523
d5e4cbfe
RW
1524/**
1525 * pm_genpd_add_callbacks - Add PM domain callbacks to a given device.
1526 * @dev: Device to add the callbacks to.
1527 * @ops: Set of callbacks to add.
b02c999a 1528 * @td: Timing data to add to the device along with the callbacks (optional).
d5e4cbfe 1529 */
b02c999a
RW
1530int pm_genpd_add_callbacks(struct device *dev, struct gpd_dev_ops *ops,
1531 struct gpd_timing_data *td)
d5e4cbfe
RW
1532{
1533 struct pm_domain_data *pdd;
1534 int ret = 0;
1535
1536 if (!(dev && dev->power.subsys_data && ops))
1537 return -EINVAL;
1538
1539 pm_runtime_disable(dev);
1540 device_pm_lock();
1541
1542 pdd = dev->power.subsys_data->domain_data;
1543 if (pdd) {
1544 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
1545
1546 gpd_data->ops = *ops;
b02c999a
RW
1547 if (td)
1548 gpd_data->td = *td;
d5e4cbfe
RW
1549 } else {
1550 ret = -EINVAL;
1551 }
1552
1553 device_pm_unlock();
1554 pm_runtime_enable(dev);
1555
1556 return ret;
1557}
1558EXPORT_SYMBOL_GPL(pm_genpd_add_callbacks);
1559
1560/**
b02c999a 1561 * __pm_genpd_remove_callbacks - Remove PM domain callbacks from a given device.
d5e4cbfe 1562 * @dev: Device to remove the callbacks from.
b02c999a 1563 * @clear_td: If set, clear the device's timing data too.
d5e4cbfe 1564 */
b02c999a 1565int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td)
d5e4cbfe
RW
1566{
1567 struct pm_domain_data *pdd;
1568 int ret = 0;
1569
1570 if (!(dev && dev->power.subsys_data))
1571 return -EINVAL;
1572
1573 pm_runtime_disable(dev);
1574 device_pm_lock();
1575
1576 pdd = dev->power.subsys_data->domain_data;
1577 if (pdd) {
1578 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
1579
1580 gpd_data->ops = (struct gpd_dev_ops){ 0 };
b02c999a
RW
1581 if (clear_td)
1582 gpd_data->td = (struct gpd_timing_data){ 0 };
d5e4cbfe
RW
1583 } else {
1584 ret = -EINVAL;
1585 }
1586
1587 device_pm_unlock();
1588 pm_runtime_enable(dev);
1589
1590 return ret;
1591}
b02c999a 1592EXPORT_SYMBOL_GPL(__pm_genpd_remove_callbacks);
d5e4cbfe 1593
d23b9b00
RW
1594/* Default device callbacks for generic PM domains. */
1595
ecf00475
RW
1596/**
1597 * pm_genpd_default_save_state - Default "save device state" for PM domians.
1598 * @dev: Device to handle.
1599 */
1600static int pm_genpd_default_save_state(struct device *dev)
1601{
1602 int (*cb)(struct device *__dev);
1603 struct device_driver *drv = dev->driver;
1604
1605 cb = dev_gpd_data(dev)->ops.save_state;
1606 if (cb)
1607 return cb(dev);
1608
1609 if (drv && drv->pm && drv->pm->runtime_suspend)
1610 return drv->pm->runtime_suspend(dev);
1611
1612 return 0;
1613}
1614
1615/**
1616 * pm_genpd_default_restore_state - Default PM domians "restore device state".
1617 * @dev: Device to handle.
1618 */
1619static int pm_genpd_default_restore_state(struct device *dev)
1620{
1621 int (*cb)(struct device *__dev);
1622 struct device_driver *drv = dev->driver;
1623
1624 cb = dev_gpd_data(dev)->ops.restore_state;
1625 if (cb)
1626 return cb(dev);
1627
1628 if (drv && drv->pm && drv->pm->runtime_resume)
1629 return drv->pm->runtime_resume(dev);
1630
1631 return 0;
1632}
1633
0f1d6986
RW
1634#ifdef CONFIG_PM_SLEEP
1635
d23b9b00
RW
1636/**
1637 * pm_genpd_default_suspend - Default "device suspend" for PM domians.
1638 * @dev: Device to handle.
1639 */
1640static int pm_genpd_default_suspend(struct device *dev)
1641{
c9914854 1642 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend;
d23b9b00
RW
1643
1644 return cb ? cb(dev) : pm_generic_suspend(dev);
1645}
1646
1647/**
1648 * pm_genpd_default_suspend_late - Default "late device suspend" for PM domians.
1649 * @dev: Device to handle.
1650 */
1651static int pm_genpd_default_suspend_late(struct device *dev)
1652{
c9914854 1653 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend_late;
d23b9b00 1654
0496c8ae 1655 return cb ? cb(dev) : pm_generic_suspend_late(dev);
d23b9b00
RW
1656}
1657
1658/**
1659 * pm_genpd_default_resume_early - Default "early device resume" for PM domians.
1660 * @dev: Device to handle.
1661 */
1662static int pm_genpd_default_resume_early(struct device *dev)
1663{
c9914854 1664 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume_early;
d23b9b00 1665
0496c8ae 1666 return cb ? cb(dev) : pm_generic_resume_early(dev);
d23b9b00
RW
1667}
1668
1669/**
1670 * pm_genpd_default_resume - Default "device resume" for PM domians.
1671 * @dev: Device to handle.
1672 */
1673static int pm_genpd_default_resume(struct device *dev)
1674{
c9914854 1675 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume;
d23b9b00
RW
1676
1677 return cb ? cb(dev) : pm_generic_resume(dev);
1678}
1679
1680/**
1681 * pm_genpd_default_freeze - Default "device freeze" for PM domians.
1682 * @dev: Device to handle.
1683 */
1684static int pm_genpd_default_freeze(struct device *dev)
1685{
1686 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze;
1687
1688 return cb ? cb(dev) : pm_generic_freeze(dev);
1689}
1690
1691/**
1692 * pm_genpd_default_freeze_late - Default "late device freeze" for PM domians.
1693 * @dev: Device to handle.
1694 */
1695static int pm_genpd_default_freeze_late(struct device *dev)
1696{
1697 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze_late;
1698
0496c8ae 1699 return cb ? cb(dev) : pm_generic_freeze_late(dev);
d23b9b00
RW
1700}
1701
1702/**
1703 * pm_genpd_default_thaw_early - Default "early device thaw" for PM domians.
1704 * @dev: Device to handle.
1705 */
1706static int pm_genpd_default_thaw_early(struct device *dev)
1707{
1708 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw_early;
1709
0496c8ae 1710 return cb ? cb(dev) : pm_generic_thaw_early(dev);
d23b9b00
RW
1711}
1712
1713/**
1714 * pm_genpd_default_thaw - Default "device thaw" for PM domians.
1715 * @dev: Device to handle.
1716 */
1717static int pm_genpd_default_thaw(struct device *dev)
1718{
1719 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw;
1720
1721 return cb ? cb(dev) : pm_generic_thaw(dev);
1722}
1723
0f1d6986
RW
1724#else /* !CONFIG_PM_SLEEP */
1725
1726#define pm_genpd_default_suspend NULL
1727#define pm_genpd_default_suspend_late NULL
1728#define pm_genpd_default_resume_early NULL
1729#define pm_genpd_default_resume NULL
1730#define pm_genpd_default_freeze NULL
1731#define pm_genpd_default_freeze_late NULL
1732#define pm_genpd_default_thaw_early NULL
1733#define pm_genpd_default_thaw NULL
1734
1735#endif /* !CONFIG_PM_SLEEP */
1736
f721889f
RW
1737/**
1738 * pm_genpd_init - Initialize a generic I/O PM domain object.
1739 * @genpd: PM domain object to initialize.
1740 * @gov: PM domain governor to associate with the domain (may be NULL).
1741 * @is_off: Initial value of the domain's power_is_off field.
1742 */
1743void pm_genpd_init(struct generic_pm_domain *genpd,
1744 struct dev_power_governor *gov, bool is_off)
1745{
1746 if (IS_ERR_OR_NULL(genpd))
1747 return;
1748
5063ce15
RW
1749 INIT_LIST_HEAD(&genpd->master_links);
1750 INIT_LIST_HEAD(&genpd->slave_links);
f721889f 1751 INIT_LIST_HEAD(&genpd->dev_list);
f721889f
RW
1752 mutex_init(&genpd->lock);
1753 genpd->gov = gov;
1754 INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
1755 genpd->in_progress = 0;
c4bb3160 1756 atomic_set(&genpd->sd_count, 0);
17b75eca
RW
1757 genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
1758 init_waitqueue_head(&genpd->status_wait_queue);
c6d22b37
RW
1759 genpd->poweroff_task = NULL;
1760 genpd->resume_count = 0;
596ba34b 1761 genpd->device_count = 0;
221e9b58 1762 genpd->max_off_time_ns = -1;
6ff7bb0d 1763 genpd->max_off_time_changed = true;
f721889f
RW
1764 genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;
1765 genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume;
1766 genpd->domain.ops.runtime_idle = pm_generic_runtime_idle;
596ba34b
RW
1767 genpd->domain.ops.prepare = pm_genpd_prepare;
1768 genpd->domain.ops.suspend = pm_genpd_suspend;
0496c8ae 1769 genpd->domain.ops.suspend_late = pm_genpd_suspend_late;
596ba34b
RW
1770 genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq;
1771 genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq;
0496c8ae 1772 genpd->domain.ops.resume_early = pm_genpd_resume_early;
596ba34b
RW
1773 genpd->domain.ops.resume = pm_genpd_resume;
1774 genpd->domain.ops.freeze = pm_genpd_freeze;
0496c8ae 1775 genpd->domain.ops.freeze_late = pm_genpd_freeze_late;
596ba34b
RW
1776 genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
1777 genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
0496c8ae 1778 genpd->domain.ops.thaw_early = pm_genpd_thaw_early;
596ba34b 1779 genpd->domain.ops.thaw = pm_genpd_thaw;
d23b9b00 1780 genpd->domain.ops.poweroff = pm_genpd_suspend;
0496c8ae 1781 genpd->domain.ops.poweroff_late = pm_genpd_suspend_late;
d23b9b00 1782 genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq;
596ba34b 1783 genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
0496c8ae 1784 genpd->domain.ops.restore_early = pm_genpd_resume_early;
d23b9b00 1785 genpd->domain.ops.restore = pm_genpd_resume;
596ba34b 1786 genpd->domain.ops.complete = pm_genpd_complete;
ecf00475
RW
1787 genpd->dev_ops.save_state = pm_genpd_default_save_state;
1788 genpd->dev_ops.restore_state = pm_genpd_default_restore_state;
c9914854
RW
1789 genpd->dev_ops.suspend = pm_genpd_default_suspend;
1790 genpd->dev_ops.suspend_late = pm_genpd_default_suspend_late;
1791 genpd->dev_ops.resume_early = pm_genpd_default_resume_early;
1792 genpd->dev_ops.resume = pm_genpd_default_resume;
d23b9b00
RW
1793 genpd->dev_ops.freeze = pm_genpd_default_freeze;
1794 genpd->dev_ops.freeze_late = pm_genpd_default_freeze_late;
1795 genpd->dev_ops.thaw_early = pm_genpd_default_thaw_early;
1796 genpd->dev_ops.thaw = pm_genpd_default_thaw;
5125bbf3
RW
1797 mutex_lock(&gpd_list_lock);
1798 list_add(&genpd->gpd_list_node, &gpd_list);
1799 mutex_unlock(&gpd_list_lock);
1800}