header cleaning: don't include smp_lock.h when not used
[GitHub/MotorolaMobilityLLC/kernel-slsi.git] / arch / i386 / kernel / smp.c
index 9d84f6f001bfd44d71ee71ce0151fd2e9c84595b..93f202a855fa43e4ea3b831513015545b5287afc 100644 (file)
@@ -13,7 +13,6 @@
 #include <linux/mm.h>
 #include <linux/delay.h>
 #include <linux/spinlock.h>
-#include <linux/smp_lock.h>
 #include <linux/kernel_stat.h>
 #include <linux/mc146818rtc.h>
 #include <linux/cache.h>
@@ -165,20 +164,20 @@ void fastcall send_IPI_self(int vector)
 }
 
 /*
- * This is only used on smaller machines.
+ * This is used to send an IPI with no shorthand notation (the destination is
+ * specified in bits 56 to 63 of the ICR).
  */
-void send_IPI_mask_bitmask(cpumask_t cpumask, int vector)
+static inline void __send_IPI_dest_field(unsigned long mask, int vector)
 {
-       unsigned long mask = cpus_addr(cpumask)[0];
        unsigned long cfg;
-       unsigned long flags;
 
-       local_irq_save(flags);
-       WARN_ON(mask & ~cpus_addr(cpu_online_map)[0]);
        /*
         * Wait for idle.
         */
-       apic_wait_icr_idle();
+       if (unlikely(vector == NMI_VECTOR))
+               safe_apic_wait_icr_idle();
+       else
+               apic_wait_icr_idle();
                
        /*
         * prepare target chip field
@@ -195,13 +194,25 @@ void send_IPI_mask_bitmask(cpumask_t cpumask, int vector)
         * Send the IPI. The write to APIC_ICR fires this off.
         */
        apic_write_around(APIC_ICR, cfg);
+}
 
+/*
+ * This is only used on smaller machines.
+ */
+void send_IPI_mask_bitmask(cpumask_t cpumask, int vector)
+{
+       unsigned long mask = cpus_addr(cpumask)[0];
+       unsigned long flags;
+
+       local_irq_save(flags);
+       WARN_ON(mask & ~cpus_addr(cpu_online_map)[0]);
+       __send_IPI_dest_field(mask, vector);
        local_irq_restore(flags);
 }
 
 void send_IPI_mask_sequence(cpumask_t mask, int vector)
 {
-       unsigned long cfg, flags;
+       unsigned long flags;
        unsigned int query_cpu;
 
        /*
@@ -211,30 +222,10 @@ void send_IPI_mask_sequence(cpumask_t mask, int vector)
         */ 
 
        local_irq_save(flags);
-
        for (query_cpu = 0; query_cpu < NR_CPUS; ++query_cpu) {
                if (cpu_isset(query_cpu, mask)) {
-               
-                       /*
-                        * Wait for idle.
-                        */
-                       apic_wait_icr_idle();
-               
-                       /*
-                        * prepare target chip field
-                        */
-                       cfg = __prepare_ICR2(cpu_to_logical_apicid(query_cpu));
-                       apic_write_around(APIC_ICR2, cfg);
-               
-                       /*
-                        * program the ICR 
-                        */
-                       cfg = __prepare_ICR(0, vector);
-                       
-                       /*
-                        * Send the IPI. The write to APIC_ICR fires this off.
-                        */
-                       apic_write_around(APIC_ICR, cfg);
+                       __send_IPI_dest_field(cpu_to_logical_apicid(query_cpu),
+                                             vector);
                }
        }
        local_irq_restore(flags);
@@ -256,7 +247,6 @@ static cpumask_t flush_cpumask;
 static struct mm_struct * flush_mm;
 static unsigned long flush_va;
 static DEFINE_SPINLOCK(tlbstate_lock);
-#define FLUSH_ALL      0xffffffff
 
 /*
  * We cannot call mmdrop() because we are in interrupt context, 
@@ -338,7 +328,7 @@ fastcall void smp_invalidate_interrupt(struct pt_regs *regs)
                 
        if (flush_mm == per_cpu(cpu_tlbstate, cpu).active_mm) {
                if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) {
-                       if (flush_va == FLUSH_ALL)
+                       if (flush_va == TLB_FLUSH_ALL)
                                local_flush_tlb();
                        else
                                __flush_tlb_one(flush_va);
@@ -353,9 +343,11 @@ out:
        put_cpu_no_resched();
 }
 
-static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
-                                               unsigned long va)
+void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
+                            unsigned long va)
 {
+       cpumask_t cpumask = *cpumaskp;
+
        /*
         * A couple of (to be removed) sanity checks:
         *
@@ -366,10 +358,12 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
        BUG_ON(cpu_isset(smp_processor_id(), cpumask));
        BUG_ON(!mm);
 
+#ifdef CONFIG_HOTPLUG_CPU
        /* If a CPU which we ran on has gone down, OK. */
        cpus_and(cpumask, cpumask, cpu_online_map);
-       if (cpus_empty(cpumask))
+       if (unlikely(cpus_empty(cpumask)))
                return;
+#endif
 
        /*
         * i'm not happy about this global shared spinlock in the
@@ -380,17 +374,7 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
        
        flush_mm = mm;
        flush_va = va;
-#if NR_CPUS <= BITS_PER_LONG
-       atomic_set_mask(cpumask, &flush_cpumask);
-#else
-       {
-               int k;
-               unsigned long *flush_mask = (unsigned long *)&flush_cpumask;
-               unsigned long *cpu_mask = (unsigned long *)&cpumask;
-               for (k = 0; k < BITS_TO_LONGS(NR_CPUS); ++k)
-                       atomic_set_mask(cpu_mask[k], &flush_mask[k]);
-       }
-#endif
+       cpus_or(flush_cpumask, cpumask, flush_cpumask);
        /*
         * We have to send the IPI only to
         * CPUs affected.
@@ -417,7 +401,7 @@ void flush_tlb_current_task(void)
 
        local_flush_tlb();
        if (!cpus_empty(cpu_mask))
-               flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
+               flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL);
        preempt_enable();
 }
 
@@ -436,7 +420,7 @@ void flush_tlb_mm (struct mm_struct * mm)
                        leave_mm(smp_processor_id());
        }
        if (!cpus_empty(cpu_mask))
-               flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
+               flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL);
 
        preempt_enable();
 }