PM / Domains: Power off masters immediately in the power off sequence
authorUlf Hansson <ulf.hansson@linaro.org>
Fri, 17 Feb 2017 09:55:25 +0000 (10:55 +0100)
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>
Thu, 23 Feb 2017 21:25:46 +0000 (22:25 +0100)
Once a subdomain is powered off, genpd queues a power off work for each of
the subdomain's corresponding masters, thus postponing the masters to be
powered off to a later point.

When genpd used intermediate power off states, which was removed in
commit ba2bbfbf6307 ("PM / Domains: Remove intermediate states from the
power off sequence"), this behaviour made sense, but now it simply doesn't.

Genpd can easily try to power off the masters in the same context as the
subdomain, of course by acquiring/releasing the lock. Then, let's convert
to this behaviour, as it avoids unnecessary works from being queued.

Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
drivers/base/power/domain.c

index 179bb269a58fb3e4e8dc4cdd0d020c8017cc5ce0..e697dec9d25bf585175a5ee569097f849f9669c6 100644 (file)
@@ -284,7 +284,8 @@ static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
  * If all of the @genpd's devices have been suspended and all of its subdomains
  * have been powered down, remove power from @genpd.
  */
-static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on)
+static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
+                          unsigned int depth)
 {
        struct pm_domain_data *pdd;
        struct gpd_link *link;
@@ -351,7 +352,9 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on)
 
        list_for_each_entry(link, &genpd->slave_links, slave_node) {
                genpd_sd_counter_dec(link->master);
-               genpd_queue_power_off_work(link->master);
+               genpd_lock_nested(link->master, depth + 1);
+               genpd_power_off(link->master, false, depth + 1);
+               genpd_unlock(link->master);
        }
 
        return 0;
@@ -405,7 +408,9 @@ static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
                                        &genpd->slave_links,
                                        slave_node) {
                genpd_sd_counter_dec(link->master);
-               genpd_queue_power_off_work(link->master);
+               genpd_lock_nested(link->master, depth + 1);
+               genpd_power_off(link->master, false, depth + 1);
+               genpd_unlock(link->master);
        }
 
        return ret;
@@ -462,7 +467,7 @@ static void genpd_power_off_work_fn(struct work_struct *work)
        genpd = container_of(work, struct generic_pm_domain, power_off_work);
 
        genpd_lock(genpd);
-       genpd_power_off(genpd, false);
+       genpd_power_off(genpd, false, 0);
        genpd_unlock(genpd);
 }
 
@@ -581,7 +586,7 @@ static int genpd_runtime_suspend(struct device *dev)
                return 0;
 
        genpd_lock(genpd);
-       genpd_power_off(genpd, true);
+       genpd_power_off(genpd, true, 0);
        genpd_unlock(genpd);
 
        return 0;
@@ -661,7 +666,7 @@ err_poweroff:
        if (!pm_runtime_is_irq_safe(dev) ||
                (pm_runtime_is_irq_safe(dev) && genpd_is_irq_safe(genpd))) {
                genpd_lock(genpd);
-               genpd_power_off(genpd, true);
+               genpd_power_off(genpd, true, 0);
                genpd_unlock(genpd);
        }