From: Marc Zyngier Date: Tue, 1 Aug 2017 08:02:55 +0000 (+0100) Subject: jump_label: Split out code under the hotplug lock X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=8b7b412807053ab5f059ffae426a280e769a5bda;p=GitHub%2Fmoto-9609%2Fandroid_kernel_motorola_exynos9610.git jump_label: Split out code under the hotplug lock In order to later introduce an "already locked" version of some of the static key funcions, let's split the code into the core stuff (the *_cpuslocked functions) and the usual helpers, which now take/release the hotplug lock and call into the _cpuslocked versions. Signed-off-by: Marc Zyngier Signed-off-by: Peter Zijlstra (Intel) Cc: Leo Yan Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-arm-kernel@lists.infradead.org Link: http://lkml.kernel.org/r/20170801080257.5056-3-marc.zyngier@arm.com Signed-off-by: Ingo Molnar --- diff --git a/kernel/jump_label.c b/kernel/jump_label.c index 161301fff97d..cc6d815c75ed 100644 --- a/kernel/jump_label.c +++ b/kernel/jump_label.c @@ -79,11 +79,10 @@ int static_key_count(struct static_key *key) } EXPORT_SYMBOL_GPL(static_key_count); -void static_key_slow_inc(struct static_key *key) +static void static_key_slow_inc_cpuslocked(struct static_key *key) { int v, v1; - cpus_read_lock(); STATIC_KEY_CHECK_USE(); /* @@ -100,10 +99,8 @@ void static_key_slow_inc(struct static_key *key) */ for (v = atomic_read(&key->enabled); v > 0; v = v1) { v1 = atomic_cmpxchg(&key->enabled, v, v + 1); - if (likely(v1 == v)) { - cpus_read_unlock(); + if (likely(v1 == v)) return; - } } jump_label_lock(); @@ -119,6 +116,12 @@ void static_key_slow_inc(struct static_key *key) atomic_inc(&key->enabled); } jump_label_unlock(); +} + +void static_key_slow_inc(struct static_key *key) +{ + cpus_read_lock(); + static_key_slow_inc_cpuslocked(key); cpus_read_unlock(); } EXPORT_SYMBOL_GPL(static_key_slow_inc); @@ -163,10 +166,10 @@ void static_key_disable(struct static_key *key) } EXPORT_SYMBOL_GPL(static_key_disable); -static void __static_key_slow_dec(struct static_key *key, - unsigned long rate_limit, struct delayed_work *work) +static void static_key_slow_dec_cpuslocked(struct static_key *key, + unsigned long rate_limit, + struct delayed_work *work) { - cpus_read_lock(); /* * The negative count check is valid even when a negative * key->enabled is in use by static_key_slow_inc(); a @@ -177,7 +180,6 @@ static void __static_key_slow_dec(struct static_key *key, if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) { WARN(atomic_read(&key->enabled) < 0, "jump label: negative count!\n"); - cpus_read_unlock(); return; } @@ -188,6 +190,14 @@ static void __static_key_slow_dec(struct static_key *key, jump_label_update(key); } jump_label_unlock(); +} + +static void __static_key_slow_dec(struct static_key *key, + unsigned long rate_limit, + struct delayed_work *work) +{ + cpus_read_lock(); + static_key_slow_dec_cpuslocked(key, rate_limit, work); cpus_read_unlock(); }