Merge branch 'smp-hotplug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Sun, 22 Jul 2012 18:22:15 +0000 (11:22 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sun, 22 Jul 2012 18:22:15 +0000 (11:22 -0700)
Pull smp/hotplug changes from Ingo Molnar:
 "Various cleanups to the SMP hotplug code - a continuing effort of
  Thomas et al"

* 'smp-hotplug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  smpboot: Remove leftover declaration
  smp: Remove num_booting_cpus()
  smp: Remove ipi_call_lock[_irq]()/ipi_call_unlock[_irq]()
  POWERPC: Smp: remove call to ipi_call_lock()/ipi_call_unlock()
  SPARC: SMP: Remove call to ipi_call_lock_irq()/ipi_call_unlock_irq()
  ia64: SMP: Remove call to ipi_call_lock_irq()/ipi_call_unlock_irq()
  x86-smp-remove-call-to-ipi_call_lock-ipi_call_unlock
  tile: SMP: Remove call to ipi_call_lock()/ipi_call_unlock()
  S390: Smp: remove call to ipi_call_lock()/ipi_call_unlock()
  parisc: Smp: remove call to ipi_call_lock()/ipi_call_unlock()
  mn10300: SMP: Remove call to ipi_call_lock()/ipi_call_unlock()
  hexagon: SMP: Remove call to ipi_call_lock()/ipi_call_unlock()

15 files changed:
arch/hexagon/kernel/smp.c
arch/ia64/kernel/smpboot.c
arch/m32r/include/asm/smp.h
arch/mn10300/kernel/smp.c
arch/parisc/kernel/smp.c
arch/powerpc/kernel/smp.c
arch/s390/kernel/smp.c
arch/sparc/kernel/smp_64.c
arch/tile/kernel/smpboot.c
arch/x86/include/asm/smp.h
arch/x86/kernel/smpboot.c
arch/x86/xen/smp.c
include/linux/smp.h
kernel/smp.c
kernel/smpboot.h

index f7264621e58ddf888708dcbe114320bd2d2fd934..149fbefc1a4d36988d6f5ce3422aaac57d029b60 100644 (file)
@@ -180,9 +180,7 @@ void __cpuinit start_secondary(void)
 
        notify_cpu_starting(cpu);
 
-       ipi_call_lock();
        set_cpu_online(cpu, true);
-       ipi_call_unlock();
 
        local_irq_enable();
 
index 1113b8aba07f5a0caa632ff01dc8111899e49185..963d2db53bfa55a9d50932b3301300d3411c3115 100644 (file)
@@ -382,7 +382,6 @@ smp_callin (void)
        set_numa_node(cpu_to_node_map[cpuid]);
        set_numa_mem(local_memory_node(cpu_to_node_map[cpuid]));
 
-       ipi_call_lock_irq();
        spin_lock(&vector_lock);
        /* Setup the per cpu irq handling data structures */
        __setup_vector_irq(cpuid);
@@ -390,7 +389,6 @@ smp_callin (void)
        set_cpu_online(cpuid, true);
        per_cpu(cpu_state, cpuid) = CPU_ONLINE;
        spin_unlock(&vector_lock);
-       ipi_call_unlock_irq();
 
        smp_setup_percpu_timer();
 
index cf7829a615513e1597d903c6398348d8e0d5ac1e..c689b828dfe2b0b218d6ec523c0233fddd086785 100644 (file)
@@ -79,11 +79,6 @@ static __inline__ int cpu_number_map(int cpu)
        return cpu;
 }
 
-static __inline__ unsigned int num_booting_cpus(void)
-{
-       return cpumask_weight(&cpu_callout_map);
-}
-
 extern void smp_send_timer(void);
 extern unsigned long send_IPI_mask_phys(const cpumask_t*, int, int);
 
index 090d35d369737599ec5c48f603ecba45c49797b3..e62c223e4c4594c3c06c762352d8e61a975e2d35 100644 (file)
@@ -876,9 +876,7 @@ static void __init smp_online(void)
 
        notify_cpu_starting(cpu);
 
-       ipi_call_lock();
        set_cpu_online(cpu, true);
-       ipi_call_unlock();
 
        local_irq_enable();
 }
index a47828d31fe6d0c4f8f5197a348c8fce64a827e6..6266730efd615552f71a9e9409932c2298001167 100644 (file)
@@ -300,9 +300,7 @@ smp_cpu_init(int cpunum)
 
        notify_cpu_starting(cpunum);
 
-       ipi_call_lock();
        set_cpu_online(cpunum, true);
-       ipi_call_unlock();
 
        /* Initialise the idle task for this CPU */
        atomic_inc(&init_mm.mm_count);
index e4cb34322de4aba848ce279b7792ea9d495dee5e..e1417c42155cd66dfe3532dfd8d7e4d3cd426233 100644 (file)
@@ -571,7 +571,6 @@ void __devinit start_secondary(void *unused)
        if (system_state == SYSTEM_RUNNING)
                vdso_data->processorCount++;
 #endif
-       ipi_call_lock();
        notify_cpu_starting(cpu);
        set_cpu_online(cpu, true);
        /* Update sibling maps */
@@ -601,7 +600,6 @@ void __devinit start_secondary(void *unused)
                of_node_put(np);
        }
        of_node_put(l2_cache);
-       ipi_call_unlock();
 
        local_irq_enable();
 
index 15cca26ccb6c4ff1cbde51c60259a7ec95471718..8dca9c248ac793ea292ad4683e0057af5e9470d8 100644 (file)
@@ -717,9 +717,7 @@ static void __cpuinit smp_start_secondary(void *cpuvoid)
        init_cpu_vtimer();
        pfault_init();
        notify_cpu_starting(smp_processor_id());
-       ipi_call_lock();
        set_cpu_online(smp_processor_id(), true);
-       ipi_call_unlock();
        local_irq_enable();
        /* cpu_idle will call schedule for us */
        cpu_idle();
index f591598d92f67b165ebccdf4e3448a74df3c8cd2..781bcb10b8bd6f0ac34e1df6cf079dd0d3d59106 100644 (file)
@@ -103,8 +103,6 @@ void __cpuinit smp_callin(void)
        if (cheetah_pcache_forced_on)
                cheetah_enable_pcache();
 
-       local_irq_enable();
-
        callin_flag = 1;
        __asm__ __volatile__("membar #Sync\n\t"
                             "flush  %%g6" : : : "memory");
@@ -124,9 +122,8 @@ void __cpuinit smp_callin(void)
        while (!cpumask_test_cpu(cpuid, &smp_commenced_mask))
                rmb();
 
-       ipi_call_lock_irq();
        set_cpu_online(cpuid, true);
-       ipi_call_unlock_irq();
+       local_irq_enable();
 
        /* idle thread is expected to have preempt disabled */
        preempt_disable();
@@ -1308,9 +1305,7 @@ int __cpu_disable(void)
        mdelay(1);
        local_irq_disable();
 
-       ipi_call_lock();
        set_cpu_online(cpu, false);
-       ipi_call_unlock();
 
        cpu_map_rebuild();
 
index 84873fbe8f2769b0bfa22faaed187ae0919c523d..e686c5ac90be36bcf14d33019ac97bb5e46bc4ee 100644 (file)
@@ -198,17 +198,7 @@ void __cpuinit online_secondary(void)
 
        notify_cpu_starting(smp_processor_id());
 
-       /*
-        * We need to hold call_lock, so there is no inconsistency
-        * between the time smp_call_function() determines number of
-        * IPI recipients, and the time when the determination is made
-        * for which cpus receive the IPI. Holding this
-        * lock helps us to not include this cpu in a currently in progress
-        * smp_call_function().
-        */
-       ipi_call_lock();
        set_cpu_online(smp_processor_id(), 1);
-       ipi_call_unlock();
        __get_cpu_var(cpu_state) = CPU_ONLINE;
 
        /* Set up tile-specific state for this cpu. */
index f48394513c377b3d540c6c0ec7da258645d3b06e..2ffa95dc2333bcc5a36efb898fd7ad12e95f5842 100644 (file)
@@ -169,11 +169,6 @@ void x86_idle_thread_init(unsigned int cpu, struct task_struct *idle);
 void smp_store_cpu_info(int id);
 #define cpu_physical_id(cpu)   per_cpu(x86_cpu_to_apicid, cpu)
 
-/* We don't mark CPUs online until __cpu_up(), so we need another measure */
-static inline int num_booting_cpus(void)
-{
-       return cpumask_weight(cpu_callout_mask);
-}
 #else /* !CONFIG_SMP */
 #define wbinvd_on_cpu(cpu)     wbinvd()
 static inline int wbinvd_on_all_cpus(void)
index 7bd8a0823654115cdb476b1693cb3eaeb6affba3..27e2eeff7a4be611a6e974e947ad5f94638613ab 100644 (file)
@@ -255,22 +255,13 @@ notrace static void __cpuinit start_secondary(void *unused)
        check_tsc_sync_target();
 
        /*
-        * We need to hold call_lock, so there is no inconsistency
-        * between the time smp_call_function() determines number of
-        * IPI recipients, and the time when the determination is made
-        * for which cpus receive the IPI. Holding this
-        * lock helps us to not include this cpu in a currently in progress
-        * smp_call_function().
-        *
         * We need to hold vector_lock so there the set of online cpus
         * does not change while we are assigning vectors to cpus.  Holding
         * this lock ensures we don't half assign or remove an irq from a cpu.
         */
-       ipi_call_lock();
        lock_vector_lock();
        set_cpu_online(smp_processor_id(), true);
        unlock_vector_lock();
-       ipi_call_unlock();
        per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
        x86_platform.nmi_init();
 
index afb250d22a6b2e29cf96f54e48bc6f619dab2ca7..f58dca7a6e52cef85503753f4b500d4d8f7ec369 100644 (file)
@@ -80,9 +80,7 @@ static void __cpuinit cpu_bringup(void)
 
        notify_cpu_starting(cpu);
 
-       ipi_call_lock();
        set_cpu_online(cpu, true);
-       ipi_call_unlock();
 
        this_cpu_write(cpu_state, CPU_ONLINE);
 
index 717fb746c9a822ce1098870b17b706c184cfdeb7..dd6f06be3c9feb18ddc0a3752aaa43797c38fd08 100644 (file)
@@ -90,10 +90,6 @@ void kick_all_cpus_sync(void);
 void __init call_function_init(void);
 void generic_smp_call_function_single_interrupt(void);
 void generic_smp_call_function_interrupt(void);
-void ipi_call_lock(void);
-void ipi_call_unlock(void);
-void ipi_call_lock_irq(void);
-void ipi_call_unlock_irq(void);
 #else
 static inline void call_function_init(void) { }
 #endif
@@ -181,7 +177,6 @@ static inline int up_smp_call_function(smp_call_func_t func, void *info)
        } while (0)
 
 static inline void smp_send_reschedule(int cpu) { }
-#define num_booting_cpus()                     1
 #define smp_prepare_boot_cpu()                 do {} while (0)
 #define smp_call_function_many(mask, func, info, wait) \
                        (up_smp_call_function(func, info))
index d0ae5b24875e0f73659d79ae6cd3f9ee11d5ab41..29dd40a9f2f403ab86f39c96ddb505851b1d36c2 100644 (file)
@@ -581,26 +581,6 @@ int smp_call_function(smp_call_func_t func, void *info, int wait)
        return 0;
 }
 EXPORT_SYMBOL(smp_call_function);
-
-void ipi_call_lock(void)
-{
-       raw_spin_lock(&call_function.lock);
-}
-
-void ipi_call_unlock(void)
-{
-       raw_spin_unlock(&call_function.lock);
-}
-
-void ipi_call_lock_irq(void)
-{
-       raw_spin_lock_irq(&call_function.lock);
-}
-
-void ipi_call_unlock_irq(void)
-{
-       raw_spin_unlock_irq(&call_function.lock);
-}
 #endif /* USE_GENERIC_SMP_HELPERS */
 
 /* Setup configured maximum number of CPUs to activate */
index 80c0acfb847211af69e9356677f45f8ea1c4c240..6ef9433e1c7001ff0d00799379de7e71ef1a0113 100644 (file)
@@ -3,8 +3,6 @@
 
 struct task_struct;
 
-int smpboot_prepare(unsigned int cpu);
-
 #ifdef CONFIG_GENERIC_SMP_IDLE_THREAD
 struct task_struct *idle_thread_get(unsigned int cpu);
 void idle_thread_set_boot_cpu(void);