From: Mike Turquette Date: Fri, 9 Nov 2012 18:28:42 +0000 (-0700) Subject: ARM: OMAP2+: clockdomain: bypass clockdomain handling when disabling unused clks X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=d043d87;p=GitHub%2FLineageOS%2Fandroid_kernel_motorola_exynos9610.git ARM: OMAP2+: clockdomain: bypass clockdomain handling when disabling unused clks The OMAP port to the common clk framework[1] resulted in spurious WARNs while disable unused clocks. This is due to _clkdm_clk_hwmod_disable catching clkdm->usecount's with a value of zero. Even less desirable it would not allow the clkdm_clk_disable function pointer to get called due to an early return of -ERANGE. This patch adds a check for such a corner case by skipping the WARN and early return in the event that clkdm->usecount and clk->enable_usecount are both zero. Presumably this could only happen during the check for unused clocks at boot-time. [1] http://article.gmane.org/gmane.linux.ports.arm.omap/88824 Signed-off-by: Mike Turquette [paul@pwsan.com: split the hwmod and clock disable cases; modified the code to skip the clockdomain handling during the disable-unused-clocks phase; added COMMON_CLK ifdef; removed include of clk-private.h at Mike's request] Signed-off-by: Paul Walmsley --- diff --git a/arch/arm/mach-omap2/clockdomain.c b/arch/arm/mach-omap2/clockdomain.c index 64e50465a4b5..be6fe356ddba 100644 --- a/arch/arm/mach-omap2/clockdomain.c +++ b/arch/arm/mach-omap2/clockdomain.c @@ -22,6 +22,7 @@ #include #include #include +#include #include @@ -947,35 +948,6 @@ static int _clkdm_clk_hwmod_enable(struct clockdomain *clkdm) return 0; } -static int _clkdm_clk_hwmod_disable(struct clockdomain *clkdm) -{ - unsigned long flags; - - if (!clkdm || !arch_clkdm || !arch_clkdm->clkdm_clk_disable) - return -EINVAL; - - spin_lock_irqsave(&clkdm->lock, flags); - - if (atomic_read(&clkdm->usecount) == 0) { - spin_unlock_irqrestore(&clkdm->lock, flags); - WARN_ON(1); /* underflow */ - return -ERANGE; - } - - if (atomic_dec_return(&clkdm->usecount) > 0) { - spin_unlock_irqrestore(&clkdm->lock, flags); - return 0; - } - - arch_clkdm->clkdm_clk_disable(clkdm); - pwrdm_state_switch(clkdm->pwrdm.ptr); - spin_unlock_irqrestore(&clkdm->lock, flags); - - pr_debug("clockdomain: %s: disabled\n", clkdm->name); - - return 0; -} - /** * clkdm_clk_enable - add an enabled downstream clock to this clkdm * @clkdm: struct clockdomain * @@ -1018,15 +990,41 @@ int clkdm_clk_enable(struct clockdomain *clkdm, struct clk *clk) */ int clkdm_clk_disable(struct clockdomain *clkdm, struct clk *clk) { - /* - * XXX Rewrite this code to maintain a list of enabled - * downstream clocks for debugging purposes? - */ + unsigned long flags; - if (!clk) + if (!clkdm || !clk || !arch_clkdm || !arch_clkdm->clkdm_clk_disable) return -EINVAL; - return _clkdm_clk_hwmod_disable(clkdm); + spin_lock_irqsave(&clkdm->lock, flags); + +#ifdef CONFIG_COMMON_CLK + /* corner case: disabling unused clocks */ + if (__clk_get_enable_count(clk) == 0) + goto ccd_exit; +#endif + + if (atomic_read(&clkdm->usecount) == 0) { + spin_unlock_irqrestore(&clkdm->lock, flags); + WARN_ON(1); /* underflow */ + return -ERANGE; + } + + if (atomic_dec_return(&clkdm->usecount) > 0) { + spin_unlock_irqrestore(&clkdm->lock, flags); + return 0; + } + + arch_clkdm->clkdm_clk_disable(clkdm); + pwrdm_state_switch(clkdm->pwrdm.ptr); + + pr_debug("clockdomain: %s: disabled\n", clkdm->name); + +#ifdef CONFIG_COMMON_CLK +ccd_exit: +#endif + spin_unlock_irqrestore(&clkdm->lock, flags); + + return 0; } /** @@ -1077,6 +1075,8 @@ int clkdm_hwmod_enable(struct clockdomain *clkdm, struct omap_hwmod *oh) */ int clkdm_hwmod_disable(struct clockdomain *clkdm, struct omap_hwmod *oh) { + unsigned long flags; + /* The clkdm attribute does not exist yet prior OMAP4 */ if (cpu_is_omap24xx() || cpu_is_omap34xx()) return 0; @@ -1086,9 +1086,28 @@ int clkdm_hwmod_disable(struct clockdomain *clkdm, struct omap_hwmod *oh) * downstream hwmods for debugging purposes? */ - if (!oh) + if (!clkdm || !oh || !arch_clkdm || !arch_clkdm->clkdm_clk_disable) return -EINVAL; - return _clkdm_clk_hwmod_disable(clkdm); + spin_lock_irqsave(&clkdm->lock, flags); + + if (atomic_read(&clkdm->usecount) == 0) { + spin_unlock_irqrestore(&clkdm->lock, flags); + WARN_ON(1); /* underflow */ + return -ERANGE; + } + + if (atomic_dec_return(&clkdm->usecount) > 0) { + spin_unlock_irqrestore(&clkdm->lock, flags); + return 0; + } + + arch_clkdm->clkdm_clk_disable(clkdm); + pwrdm_state_switch(clkdm->pwrdm.ptr); + spin_unlock_irqrestore(&clkdm->lock, flags); + + pr_debug("clockdomain: %s: disabled\n", clkdm->name); + + return 0; }