static int pm_genpd_suspend_noirq(struct device *dev)
{
struct generic_pm_domain *genpd;
+ int ret;
dev_dbg(dev, "%s()\n", __func__);
if (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))
return 0;
+ if (genpd->dev_ops.stop && genpd->dev_ops.start) {
+ ret = pm_runtime_force_suspend(dev);
+ if (ret)
+ return ret;
+ }
+
/*
* Since all of the "noirq" callbacks are executed sequentially, it is
* guaranteed that this function will never run twice in parallel for
static int pm_genpd_resume_noirq(struct device *dev)
{
struct generic_pm_domain *genpd;
+ int ret = 0;
dev_dbg(dev, "%s()\n", __func__);
pm_genpd_sync_poweron(genpd, true);
genpd->suspended_count--;
- return 0;
+ if (genpd->dev_ops.stop && genpd->dev_ops.start)
+ ret = pm_runtime_force_resume(dev);
+
+ return ret;
}
/**
static int pm_genpd_freeze_noirq(struct device *dev)
{
struct generic_pm_domain *genpd;
+ int ret = 0;
dev_dbg(dev, "%s()\n", __func__);
if (IS_ERR(genpd))
return -EINVAL;
- return 0;
+ if (genpd->dev_ops.stop && genpd->dev_ops.start)
+ ret = pm_runtime_force_suspend(dev);
+
+ return ret;
}
/**
static int pm_genpd_thaw_noirq(struct device *dev)
{
struct generic_pm_domain *genpd;
+ int ret = 0;
dev_dbg(dev, "%s()\n", __func__);
if (IS_ERR(genpd))
return -EINVAL;
- return 0;
+ if (genpd->dev_ops.stop && genpd->dev_ops.start)
+ ret = pm_runtime_force_resume(dev);
+
+ return ret;
}
/**
static int pm_genpd_restore_noirq(struct device *dev)
{
struct generic_pm_domain *genpd;
+ int ret = 0;
dev_dbg(dev, "%s()\n", __func__);
pm_genpd_sync_poweron(genpd, true);
- return 0;
+ if (genpd->dev_ops.stop && genpd->dev_ops.start)
+ ret = pm_runtime_force_resume(dev);
+
+ return ret;
}
/**