From: Dong Aisheng Date: Thu, 30 Jun 2016 09:31:12 +0000 (+0800) Subject: clk: move clk_disable_unused after clk_core_disable_unprepare function X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=7ec986efed0208ca5fdfc26f9fc19c1604d3d502;p=GitHub%2Fmoto-9609%2Fandroid_kernel_motorola_exynos9610.git clk: move clk_disable_unused after clk_core_disable_unprepare function No function level change, just moving code place. clk_disable_unused function will need to call clk_core_prepare_enable/ clk_core_disable_unprepare when adding CLK_OPS_PARENT_ENABLE features. So move it after clk_core_disable_unprepare to avoid adding forward declared functions later. Cc: Michael Turquette Cc: Stephen Boyd Cc: Shawn Guo Signed-off-by: Dong Aisheng Signed-off-by: Stephen Boyd --- diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 55e62bac0fb4..e2e8f0c9f20a 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -172,104 +172,6 @@ static bool clk_core_is_enabled(struct clk_core *core) return core->ops->is_enabled(core->hw); } -static void clk_unprepare_unused_subtree(struct clk_core *core) -{ - struct clk_core *child; - - lockdep_assert_held(&prepare_lock); - - hlist_for_each_entry(child, &core->children, child_node) - clk_unprepare_unused_subtree(child); - - if (core->prepare_count) - return; - - if (core->flags & CLK_IGNORE_UNUSED) - return; - - if (clk_core_is_prepared(core)) { - trace_clk_unprepare(core); - if (core->ops->unprepare_unused) - core->ops->unprepare_unused(core->hw); - else if (core->ops->unprepare) - core->ops->unprepare(core->hw); - trace_clk_unprepare_complete(core); - } -} - -static void clk_disable_unused_subtree(struct clk_core *core) -{ - struct clk_core *child; - unsigned long flags; - - lockdep_assert_held(&prepare_lock); - - hlist_for_each_entry(child, &core->children, child_node) - clk_disable_unused_subtree(child); - - flags = clk_enable_lock(); - - if (core->enable_count) - goto unlock_out; - - if (core->flags & CLK_IGNORE_UNUSED) - goto unlock_out; - - /* - * some gate clocks have special needs during the disable-unused - * sequence. call .disable_unused if available, otherwise fall - * back to .disable - */ - if (clk_core_is_enabled(core)) { - trace_clk_disable(core); - if (core->ops->disable_unused) - core->ops->disable_unused(core->hw); - else if (core->ops->disable) - core->ops->disable(core->hw); - trace_clk_disable_complete(core); - } - -unlock_out: - clk_enable_unlock(flags); -} - -static bool clk_ignore_unused; -static int __init clk_ignore_unused_setup(char *__unused) -{ - clk_ignore_unused = true; - return 1; -} -__setup("clk_ignore_unused", clk_ignore_unused_setup); - -static int clk_disable_unused(void) -{ - struct clk_core *core; - - if (clk_ignore_unused) { - pr_warn("clk: Not disabling unused clocks\n"); - return 0; - } - - clk_prepare_lock(); - - hlist_for_each_entry(core, &clk_root_list, child_node) - clk_disable_unused_subtree(core); - - hlist_for_each_entry(core, &clk_orphan_list, child_node) - clk_disable_unused_subtree(core); - - hlist_for_each_entry(core, &clk_root_list, child_node) - clk_unprepare_unused_subtree(core); - - hlist_for_each_entry(core, &clk_orphan_list, child_node) - clk_unprepare_unused_subtree(core); - - clk_prepare_unlock(); - - return 0; -} -late_initcall_sync(clk_disable_unused); - /*** helper functions ***/ const char *__clk_get_name(const struct clk *clk) @@ -828,6 +730,104 @@ static void clk_core_disable_unprepare(struct clk_core *core) clk_core_unprepare_lock(core); } +static void clk_unprepare_unused_subtree(struct clk_core *core) +{ + struct clk_core *child; + + lockdep_assert_held(&prepare_lock); + + hlist_for_each_entry(child, &core->children, child_node) + clk_unprepare_unused_subtree(child); + + if (core->prepare_count) + return; + + if (core->flags & CLK_IGNORE_UNUSED) + return; + + if (clk_core_is_prepared(core)) { + trace_clk_unprepare(core); + if (core->ops->unprepare_unused) + core->ops->unprepare_unused(core->hw); + else if (core->ops->unprepare) + core->ops->unprepare(core->hw); + trace_clk_unprepare_complete(core); + } +} + +static void clk_disable_unused_subtree(struct clk_core *core) +{ + struct clk_core *child; + unsigned long flags; + + lockdep_assert_held(&prepare_lock); + + hlist_for_each_entry(child, &core->children, child_node) + clk_disable_unused_subtree(child); + + flags = clk_enable_lock(); + + if (core->enable_count) + goto unlock_out; + + if (core->flags & CLK_IGNORE_UNUSED) + goto unlock_out; + + /* + * some gate clocks have special needs during the disable-unused + * sequence. call .disable_unused if available, otherwise fall + * back to .disable + */ + if (clk_core_is_enabled(core)) { + trace_clk_disable(core); + if (core->ops->disable_unused) + core->ops->disable_unused(core->hw); + else if (core->ops->disable) + core->ops->disable(core->hw); + trace_clk_disable_complete(core); + } + +unlock_out: + clk_enable_unlock(flags); +} + +static bool clk_ignore_unused; +static int __init clk_ignore_unused_setup(char *__unused) +{ + clk_ignore_unused = true; + return 1; +} +__setup("clk_ignore_unused", clk_ignore_unused_setup); + +static int clk_disable_unused(void) +{ + struct clk_core *core; + + if (clk_ignore_unused) { + pr_warn("clk: Not disabling unused clocks\n"); + return 0; + } + + clk_prepare_lock(); + + hlist_for_each_entry(core, &clk_root_list, child_node) + clk_disable_unused_subtree(core); + + hlist_for_each_entry(core, &clk_orphan_list, child_node) + clk_disable_unused_subtree(core); + + hlist_for_each_entry(core, &clk_root_list, child_node) + clk_unprepare_unused_subtree(core); + + hlist_for_each_entry(core, &clk_orphan_list, child_node) + clk_unprepare_unused_subtree(core); + + clk_prepare_unlock(); + + return 0; +} +late_initcall_sync(clk_disable_unused); + static int clk_core_round_rate_nolock(struct clk_core *core, struct clk_rate_request *req) {