smpboot/threads, watchdog/core: Avoid runtime allocation
authorThomas Gleixner <tglx@linutronix.de>
Tue, 12 Sep 2017 19:37:09 +0000 (21:37 +0200)
committerIngo Molnar <mingo@kernel.org>
Thu, 14 Sep 2017 09:41:06 +0000 (11:41 +0200)
smpboot_update_cpumask_threads_percpu() allocates a temporary cpumask at
runtime. This is suboptimal because the call site needs more code size for
proper error handling than a statically allocated temporary mask requires
data size.

Add static temporary cpumask. The function is globaly serialized, so no
further protection required.

Remove the half baken error handling in the watchdog code and get rid of
the export as there are no in tree modular users of that function.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Don Zickus <dzickus@redhat.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Chris Metcalf <cmetcalf@mellanox.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Sebastian Siewior <bigeasy@linutronix.de>
Cc: Ulrich Obergfell <uobergfe@redhat.com>
Link: http://lkml.kernel.org/r/20170912194147.297288838@linutronix.de
Signed-off-by: Ingo Molnar <mingo@kernel.org>
include/linux/smpboot.h
kernel/smpboot.c
kernel/watchdog.c

index 12910cf19869c7db32d1ca34ddda0931e80570d1..c149aa7bedf306c8a04d9fd61cb8930af12d416f 100644 (file)
@@ -55,7 +55,7 @@ smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
 }
 
 void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread);
-int smpboot_update_cpumask_percpu_thread(struct smp_hotplug_thread *plug_thread,
-                                        const struct cpumask *);
+void smpboot_update_cpumask_percpu_thread(struct smp_hotplug_thread *plug_thread,
+                                         const struct cpumask *);
 
 #endif
index 1d71c051a9515c6acecd4be823465a4760e7cefa..ed7507b69b4801d0f6cee27b9ded1866509b393e 100644 (file)
@@ -344,39 +344,31 @@ EXPORT_SYMBOL_GPL(smpboot_unregister_percpu_thread);
  * by the client, but only by calling this function.
  * This function can only be called on a registered smp_hotplug_thread.
  */
-int smpboot_update_cpumask_percpu_thread(struct smp_hotplug_thread *plug_thread,
-                                        const struct cpumask *new)
+void smpboot_update_cpumask_percpu_thread(struct smp_hotplug_thread *plug_thread,
+                                         const struct cpumask *new)
 {
        struct cpumask *old = plug_thread->cpumask;
-       cpumask_var_t tmp;
+       static struct cpumask tmp;
        unsigned int cpu;
 
-       if (!alloc_cpumask_var(&tmp, GFP_KERNEL))
-               return -ENOMEM;
-
        get_online_cpus();
        mutex_lock(&smpboot_threads_lock);
 
        /* Park threads that were exclusively enabled on the old mask. */
-       cpumask_andnot(tmp, old, new);
-       for_each_cpu_and(cpu, tmp, cpu_online_mask)
+       cpumask_andnot(&tmp, old, new);
+       for_each_cpu_and(cpu, &tmp, cpu_online_mask)
                smpboot_park_thread(plug_thread, cpu);
 
        /* Unpark threads that are exclusively enabled on the new mask. */
-       cpumask_andnot(tmp, new, old);
-       for_each_cpu_and(cpu, tmp, cpu_online_mask)
+       cpumask_andnot(&tmp, new, old);
+       for_each_cpu_and(cpu, &tmp, cpu_online_mask)
                smpboot_unpark_thread(plug_thread, cpu);
 
        cpumask_copy(old, new);
 
        mutex_unlock(&smpboot_threads_lock);
        put_online_cpus();
-
-       free_cpumask_var(tmp);
-
-       return 0;
 }
-EXPORT_SYMBOL_GPL(smpboot_update_cpumask_percpu_thread);
 
 static DEFINE_PER_CPU(atomic_t, cpu_hotplug_state) = ATOMIC_INIT(CPU_POST_DEAD);
 
index cedf45ab4d81e2a8fdefd6a892c4308a1defe123..8935a3a4c2fb97d73ef7a7f88b8538fe723d7e54 100644 (file)
@@ -787,31 +787,20 @@ out:
        return err;
 }
 
-static int watchdog_update_cpus(void)
+static void watchdog_update_cpus(void)
 {
-       if (IS_ENABLED(CONFIG_SOFTLOCKUP_DETECTOR)) {
-               return smpboot_update_cpumask_percpu_thread(&watchdog_threads,
-                                                           &watchdog_cpumask);
+       if (IS_ENABLED(CONFIG_SOFTLOCKUP_DETECTOR) && watchdog_running) {
+               smpboot_update_cpumask_percpu_thread(&watchdog_threads,
+                                                    &watchdog_cpumask);
                __lockup_detector_cleanup();
        }
-       return 0;
 }
 
 static void proc_watchdog_cpumask_update(void)
 {
        /* Remove impossible cpus to keep sysctl output clean. */
        cpumask_and(&watchdog_cpumask, &watchdog_cpumask, cpu_possible_mask);
-
-       if (watchdog_running) {
-               /*
-                * Failure would be due to being unable to allocate a
-                * temporary cpumask, so we are likely not in a position to
-                * do much else to make things better.
-                */
-               if (watchdog_update_cpus() != 0)
-                       pr_err("cpumask update failed\n");
-       }
-
+       watchdog_update_cpus();
        watchdog_nmi_reconfigure();
 }