static HLIST_HEAD(clk_orphan_list);
static LIST_HEAD(clk_notifier_list);
+/*** locking ***/
+static void clk_prepare_lock(void)
+{
+ mutex_lock(&prepare_lock);
+}
+
+static void clk_prepare_unlock(void)
+{
+ mutex_unlock(&prepare_lock);
+}
+
+static unsigned long clk_enable_lock(void)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&enable_lock, flags);
+ return flags;
+}
+
+static void clk_enable_unlock(unsigned long flags)
+{
+ spin_unlock_irqrestore(&enable_lock, flags);
+}
+
/*** debugfs support ***/
#ifdef CONFIG_COMMON_CLK_DEBUG
seq_printf(s, " clock enable_cnt prepare_cnt rate\n");
seq_printf(s, "---------------------------------------------------------------------\n");
- mutex_lock(&prepare_lock);
+ clk_prepare_lock();
hlist_for_each_entry(c, &clk_root_list, child_node)
clk_summary_show_subtree(s, c, 0);
hlist_for_each_entry(c, &clk_orphan_list, child_node)
clk_summary_show_subtree(s, c, 0);
- mutex_unlock(&prepare_lock);
+ clk_prepare_unlock();
return 0;
}
seq_printf(s, "{");
- mutex_lock(&prepare_lock);
+ clk_prepare_lock();
hlist_for_each_entry(c, &clk_root_list, child_node) {
if (!first_node)
clk_dump_subtree(s, c, 0);
}
- mutex_unlock(&prepare_lock);
+ clk_prepare_unlock();
seq_printf(s, "}");
return 0;
if (!orphandir)
return -ENOMEM;
- mutex_lock(&prepare_lock);
+ clk_prepare_lock();
hlist_for_each_entry(clk, &clk_root_list, child_node)
clk_debug_create_subtree(clk, rootdir);
inited = 1;
- mutex_unlock(&prepare_lock);
+ clk_prepare_unlock();
return 0;
}
hlist_for_each_entry(child, &clk->children, child_node)
clk_disable_unused_subtree(child);
- spin_lock_irqsave(&enable_lock, flags);
+ flags = clk_enable_lock();
if (clk->enable_count)
goto unlock_out;
}
unlock_out:
- spin_unlock_irqrestore(&enable_lock, flags);
+ clk_enable_unlock(flags);
out:
return;
{
struct clk *clk;
- mutex_lock(&prepare_lock);
+ clk_prepare_lock();
hlist_for_each_entry(clk, &clk_root_list, child_node)
clk_disable_unused_subtree(clk);
hlist_for_each_entry(clk, &clk_orphan_list, child_node)
clk_unprepare_unused_subtree(clk);
- mutex_unlock(&prepare_lock);
+ clk_prepare_unlock();
return 0;
}
*/
void clk_unprepare(struct clk *clk)
{
- mutex_lock(&prepare_lock);
+ clk_prepare_lock();
__clk_unprepare(clk);
- mutex_unlock(&prepare_lock);
+ clk_prepare_unlock();
}
EXPORT_SYMBOL_GPL(clk_unprepare);
{
int ret;
- mutex_lock(&prepare_lock);
+ clk_prepare_lock();
ret = __clk_prepare(clk);
- mutex_unlock(&prepare_lock);
+ clk_prepare_unlock();
return ret;
}
{
unsigned long flags;
- spin_lock_irqsave(&enable_lock, flags);
+ flags = clk_enable_lock();
__clk_disable(clk);
- spin_unlock_irqrestore(&enable_lock, flags);
+ clk_enable_unlock(flags);
}
EXPORT_SYMBOL_GPL(clk_disable);
unsigned long flags;
int ret;
- spin_lock_irqsave(&enable_lock, flags);
+ flags = clk_enable_lock();
ret = __clk_enable(clk);
- spin_unlock_irqrestore(&enable_lock, flags);
+ clk_enable_unlock(flags);
return ret;
}
{
unsigned long ret;
- mutex_lock(&prepare_lock);
+ clk_prepare_lock();
ret = __clk_round_rate(clk, rate);
- mutex_unlock(&prepare_lock);
+ clk_prepare_unlock();
return ret;
}
{
unsigned long rate;
- mutex_lock(&prepare_lock);
+ clk_prepare_lock();
if (clk && (clk->flags & CLK_GET_RATE_NOCACHE))
__clk_recalc_rates(clk, 0);
rate = __clk_get_rate(clk);
- mutex_unlock(&prepare_lock);
+ clk_prepare_unlock();
return rate;
}
int ret = 0;
/* prevent racing with updates to the clock topology */
- mutex_lock(&prepare_lock);
+ clk_prepare_lock();
/* bail early if nothing to do */
if (rate == clk->rate)
clk_change_rate(top);
out:
- mutex_unlock(&prepare_lock);
+ clk_prepare_unlock();
return ret;
}
{
struct clk *parent;
- mutex_lock(&prepare_lock);
+ clk_prepare_lock();
parent = __clk_get_parent(clk);
- mutex_unlock(&prepare_lock);
+ clk_prepare_unlock();
return parent;
}
__clk_prepare(parent);
/* FIXME replace with clk_is_enabled(clk) someday */
- spin_lock_irqsave(&enable_lock, flags);
+ flags = clk_enable_lock();
if (clk->enable_count)
__clk_enable(parent);
- spin_unlock_irqrestore(&enable_lock, flags);
+ clk_enable_unlock(flags);
/* change clock input source */
ret = clk->ops->set_parent(clk->hw, i);
/* clean up old prepare and enable */
- spin_lock_irqsave(&enable_lock, flags);
+ flags = clk_enable_lock();
if (clk->enable_count)
__clk_disable(old_parent);
- spin_unlock_irqrestore(&enable_lock, flags);
+ clk_enable_unlock(flags);
if (clk->prepare_count)
__clk_unprepare(old_parent);
return -ENOSYS;
/* prevent racing with updates to the clock topology */
- mutex_lock(&prepare_lock);
+ clk_prepare_lock();
if (clk->parent == parent)
goto out;
__clk_reparent(clk, parent);
out:
- mutex_unlock(&prepare_lock);
+ clk_prepare_unlock();
return ret;
}
if (!clk)
return -EINVAL;
- mutex_lock(&prepare_lock);
+ clk_prepare_lock();
/* check to see if a clock with this name is already registered */
if (__clk_lookup(clk->name)) {
clk_debug_register(clk);
out:
- mutex_unlock(&prepare_lock);
+ clk_prepare_unlock();
return ret;
}
if (!clk || !nb)
return -EINVAL;
- mutex_lock(&prepare_lock);
+ clk_prepare_lock();
/* search the list of notifiers for this clk */
list_for_each_entry(cn, &clk_notifier_list, node)
clk->notifier_count++;
out:
- mutex_unlock(&prepare_lock);
+ clk_prepare_unlock();
return ret;
}
if (!clk || !nb)
return -EINVAL;
- mutex_lock(&prepare_lock);
+ clk_prepare_lock();
list_for_each_entry(cn, &clk_notifier_list, node)
if (cn->clk == clk)
ret = -ENOENT;
}
- mutex_unlock(&prepare_lock);
+ clk_prepare_unlock();
return ret;
}