#include <linux/pm_runtime.h>
#include <linux/jiffies.h>
-static int __pm_runtime_resume(struct device *dev, bool from_wq);
+static int __pm_runtime_resume(struct device *dev, int rpmflags);
static int __pm_request_idle(struct device *dev);
static int __pm_request_resume(struct device *dev);
/**
* __pm_runtime_suspend - Carry out run-time suspend of given device.
* @dev: Device to suspend.
- * @from_wq: If set, the function has been called via pm_wq.
+ * @rpmflags: Flag bits.
*
* Check if the device can be suspended and run the ->runtime_suspend() callback
- * provided by its bus type. If another suspend has been started earlier, wait
- * for it to finish. If an idle notification or suspend request is pending or
+ * provided by its bus type. If another suspend has been started earlier,
+ * either return immediately or wait for it to finish, depending on the
+ * RPM_NOWAIT flag. If an idle notification or suspend request is pending or
* scheduled, cancel it.
*
* This function must be called under dev->power.lock with interrupts disabled.
*/
-int __pm_runtime_suspend(struct device *dev, bool from_wq)
+static int __pm_runtime_suspend(struct device *dev, int rpmflags)
__releases(&dev->power.lock) __acquires(&dev->power.lock)
{
struct device *parent = NULL;
bool notify = false;
int retval = 0;
- dev_dbg(dev, "__pm_runtime_suspend()%s!\n",
- from_wq ? " from workqueue" : "");
+ dev_dbg(dev, "%s flags 0x%x\n", __func__, rpmflags);
repeat:
if (dev->power.runtime_error) {
if (dev->power.runtime_status == RPM_SUSPENDING) {
DEFINE_WAIT(wait);
- if (from_wq) {
+ if (rpmflags & RPM_NOWAIT) {
retval = -EINPROGRESS;
goto out;
}
wake_up_all(&dev->power.wait_queue);
if (dev->power.deferred_resume) {
- __pm_runtime_resume(dev, false);
+ __pm_runtime_resume(dev, 0);
retval = -EAGAIN;
goto out;
}
}
out:
- dev_dbg(dev, "__pm_runtime_suspend() returns %d!\n", retval);
+ dev_dbg(dev, "%s returns %d\n", __func__, retval);
return retval;
}
int retval;
spin_lock_irq(&dev->power.lock);
- retval = __pm_runtime_suspend(dev, false);
+ retval = __pm_runtime_suspend(dev, 0);
spin_unlock_irq(&dev->power.lock);
return retval;
/**
* __pm_runtime_resume - Carry out run-time resume of given device.
* @dev: Device to resume.
- * @from_wq: If set, the function has been called via pm_wq.
+ * @rpmflags: Flag bits.
*
* Check if the device can be woken up and run the ->runtime_resume() callback
- * provided by its bus type. If another resume has been started earlier, wait
- * for it to finish. If there's a suspend running in parallel with this
- * function, wait for it to finish and resume the device. Cancel any scheduled
- * or pending requests.
+ * provided by its bus type. If another resume has been started earlier,
+ * either return imediately or wait for it to finish, depending on the
+ * RPM_NOWAIT flag. If there's a suspend running in parallel with this
+ * function, either tell the other process to resume after suspending
+ * (deferred_resume) or wait for it to finish, depending on the RPM_NOWAIT
+ * flag. Cancel any scheduled or pending requests.
*
* This function must be called under dev->power.lock with interrupts disabled.
*/
-int __pm_runtime_resume(struct device *dev, bool from_wq)
+static int __pm_runtime_resume(struct device *dev, int rpmflags)
__releases(&dev->power.lock) __acquires(&dev->power.lock)
{
struct device *parent = NULL;
int retval = 0;
- dev_dbg(dev, "__pm_runtime_resume()%s!\n",
- from_wq ? " from workqueue" : "");
+ dev_dbg(dev, "%s flags 0x%x\n", __func__, rpmflags);
repeat:
if (dev->power.runtime_error) {
|| dev->power.runtime_status == RPM_SUSPENDING) {
DEFINE_WAIT(wait);
- if (from_wq) {
+ if (rpmflags & RPM_NOWAIT) {
if (dev->power.runtime_status == RPM_SUSPENDING)
dev->power.deferred_resume = true;
retval = -EINPROGRESS;
*/
if (!parent->power.disable_depth
&& !parent->power.ignore_children) {
- __pm_runtime_resume(parent, false);
+ __pm_runtime_resume(parent, 0);
if (parent->power.runtime_status != RPM_ACTIVE)
retval = -EBUSY;
}
spin_lock_irq(&dev->power.lock);
}
- dev_dbg(dev, "__pm_runtime_resume() returns %d!\n", retval);
+ dev_dbg(dev, "%s returns %d\n", __func__, retval);
return retval;
}
int retval;
spin_lock_irq(&dev->power.lock);
- retval = __pm_runtime_resume(dev, false);
+ retval = __pm_runtime_resume(dev, 0);
spin_unlock_irq(&dev->power.lock);
return retval;
__pm_runtime_idle(dev);
break;
case RPM_REQ_SUSPEND:
- __pm_runtime_suspend(dev, true);
+ __pm_runtime_suspend(dev, RPM_NOWAIT);
break;
case RPM_REQ_RESUME:
- __pm_runtime_resume(dev, true);
+ __pm_runtime_resume(dev, RPM_NOWAIT);
break;
}
/**
* __pm_runtime_get - Reference count a device and wake it up, if necessary.
* @dev: Device to handle.
- * @sync: If set and the device is suspended, resume it synchronously.
+ * @rpmflags: Flag bits.
*
* Increment the usage count of the device and resume it or submit a resume
- * request for it, depending on the value of @sync.
+ * request for it, depending on the RPM_ASYNC flag bit.
*/
-int __pm_runtime_get(struct device *dev, bool sync)
+int __pm_runtime_get(struct device *dev, int rpmflags)
{
int retval;
atomic_inc(&dev->power.usage_count);
- retval = sync ? pm_runtime_resume(dev) : pm_request_resume(dev);
+ retval = (rpmflags & RPM_ASYNC) ?
+ pm_request_resume(dev) : pm_runtime_resume(dev);
return retval;
}
/**
* __pm_runtime_put - Decrement the device's usage counter and notify its bus.
* @dev: Device to handle.
- * @sync: If the device's bus type is to be notified, do that synchronously.
+ * @rpmflags: Flag bits.
*
* Decrement the usage count of the device and if it reaches zero, carry out a
* synchronous idle notification or submit an idle notification request for it,
- * depending on the value of @sync.
+ * depending on the RPM_ASYNC flag bit.
*/
-int __pm_runtime_put(struct device *dev, bool sync)
+int __pm_runtime_put(struct device *dev, int rpmflags)
{
int retval = 0;
if (atomic_dec_and_test(&dev->power.usage_count))
- retval = sync ? pm_runtime_idle(dev) : pm_request_idle(dev);
+ retval = (rpmflags & RPM_ASYNC) ?
+ pm_request_idle(dev) : pm_runtime_idle(dev);
return retval;
}
if (dev->power.request_pending
&& dev->power.request == RPM_REQ_RESUME) {
- __pm_runtime_resume(dev, false);
+ __pm_runtime_resume(dev, 0);
retval = 1;
}
*/
pm_runtime_get_noresume(dev);
- __pm_runtime_resume(dev, false);
+ __pm_runtime_resume(dev, 0);
pm_runtime_put_noidle(dev);
}
dev->power.runtime_auto = false;
atomic_inc(&dev->power.usage_count);
- __pm_runtime_resume(dev, false);
+ __pm_runtime_resume(dev, 0);
out:
spin_unlock_irq(&dev->power.lock);
#include <linux/device.h>
#include <linux/pm.h>
+/* Runtime PM flag argument bits */
+#define RPM_ASYNC 0x01 /* Request is asynchronous */
+#define RPM_NOWAIT 0x02 /* Don't wait for concurrent
+ state change */
+
#ifdef CONFIG_PM_RUNTIME
extern struct workqueue_struct *pm_wq;
extern int pm_request_idle(struct device *dev);
extern int pm_schedule_suspend(struct device *dev, unsigned int delay);
extern int pm_request_resume(struct device *dev);
-extern int __pm_runtime_get(struct device *dev, bool sync);
-extern int __pm_runtime_put(struct device *dev, bool sync);
+extern int __pm_runtime_get(struct device *dev, int rpmflags);
+extern int __pm_runtime_put(struct device *dev, int rpmflags);
extern int __pm_runtime_set_status(struct device *dev, unsigned int status);
extern int pm_runtime_barrier(struct device *dev);
extern void pm_runtime_enable(struct device *dev);
return -ENOSYS;
}
static inline int pm_request_resume(struct device *dev) { return 0; }
-static inline int __pm_runtime_get(struct device *dev, bool sync) { return 1; }
-static inline int __pm_runtime_put(struct device *dev, bool sync) { return 0; }
+static inline int __pm_runtime_get(struct device *dev, int rpmflags)
+ { return 1; }
+static inline int __pm_runtime_put(struct device *dev, int rpmflags)
+ { return 0; }
static inline int __pm_runtime_set_status(struct device *dev,
unsigned int status) { return 0; }
static inline int pm_runtime_barrier(struct device *dev) { return 0; }
static inline int pm_runtime_get(struct device *dev)
{
- return __pm_runtime_get(dev, false);
+ return __pm_runtime_get(dev, RPM_ASYNC);
}
static inline int pm_runtime_get_sync(struct device *dev)
{
- return __pm_runtime_get(dev, true);
+ return __pm_runtime_get(dev, 0);
}
static inline int pm_runtime_put(struct device *dev)
{
- return __pm_runtime_put(dev, false);
+ return __pm_runtime_put(dev, RPM_ASYNC);
}
static inline int pm_runtime_put_sync(struct device *dev)
{
- return __pm_runtime_put(dev, true);
+ return __pm_runtime_put(dev, 0);
}
static inline int pm_runtime_set_active(struct device *dev)