2 * Intel SMP support routines.
4 * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
5 * (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
7 * This code is released under the GNU General Public License version 2 or
11 #include <linux/init.h>
14 #include <linux/delay.h>
15 #include <linux/spinlock.h>
16 #include <linux/smp_lock.h>
17 #include <linux/kernel_stat.h>
18 #include <linux/mc146818rtc.h>
19 #include <linux/cache.h>
20 #include <linux/interrupt.h>
21 #include <linux/cpu.h>
22 #include <linux/module.h>
25 #include <asm/tlbflush.h>
27 #include <mach_apic.h>
30 * Some notes on x86 processor bugs affecting SMP operation:
32 * Pentium, Pentium Pro, II, III (and all CPUs) have bugs.
33 * The Linux implications for SMP are handled as follows:
35 * Pentium III / [Xeon]
36 * None of the E1AP-E3AP errata are visible to the user.
43 * None of the A1AP-A3AP errata are visible to the user.
50 * None of 1AP-9AP errata are visible to the normal user,
51 * except occasional delivery of 'spurious interrupt' as trap #15.
52 * This is very rare and a non-problem.
54 * 1AP. Linux maps APIC as non-cacheable
55 * 2AP. worked around in hardware
56 * 3AP. fixed in C0 and above steppings microcode update.
57 * Linux does not use excessive STARTUP_IPIs.
58 * 4AP. worked around in hardware
59 * 5AP. symmetric IO mode (normal Linux operation) not affected.
60 * 'noapic' mode has vector 0xf filled out properly.
61 * 6AP. 'noapic' mode might be affected - fixed in later steppings
62 * 7AP. We do not assume writes to the LVT deassering IRQs
63 * 8AP. We do not enable low power mode (deep sleep) during MP bootup
64 * 9AP. We do not use mixed mode
67 * There is a marginal case where REP MOVS on 100MHz SMP
68 * machines with B stepping processors can fail. XXX should provide
69 * an L1cache=Writethrough or L1cache=off option.
71 * B stepping CPUs may hang. There are hardware work arounds
72 * for this. We warn about it in case your board doesn't have the work
73 * arounds. Basically thats so I can tell anyone with a B stepping
74 * CPU and SMP problems "tough".
76 * Specific items [From Pentium Processor Specification Update]
78 * 1AP. Linux doesn't use remote read
79 * 2AP. Linux doesn't trust APIC errors
80 * 3AP. We work around this
81 * 4AP. Linux never generated 3 interrupts of the same priority
82 * to cause a lost local interrupt.
83 * 5AP. Remote read is never used
84 * 6AP. not affected - worked around in hardware
85 * 7AP. not affected - worked around in hardware
86 * 8AP. worked around in hardware - we get explicit CS errors if not
87 * 9AP. only 'noapic' mode affected. Might generate spurious
88 * interrupts, we log only the first one and count the
90 * 10AP. not affected - worked around in hardware
91 * 11AP. Linux reads the APIC between writes to avoid this, as per
92 * the documentation. Make sure you preserve this as it affects
93 * the C stepping chips too.
94 * 12AP. not affected - worked around in hardware
95 * 13AP. not affected - worked around in hardware
96 * 14AP. we always deassert INIT during bootup
97 * 15AP. not affected - worked around in hardware
98 * 16AP. not affected - worked around in hardware
99 * 17AP. not affected - worked around in hardware
100 * 18AP. not affected - worked around in hardware
101 * 19AP. not affected - worked around in BIOS
103 * If this sounds worrying believe me these bugs are either ___RARE___,
104 * or are signal timing bugs worked around in hardware and there's
105 * about nothing of note with C stepping upwards.
108 DEFINE_PER_CPU(struct tlb_state
, cpu_tlbstate
) ____cacheline_aligned
= { &init_mm
, 0, };
111 * the following functions deal with sending IPIs between CPUs.
113 * We use 'broadcast', CPU->CPU IPIs and self-IPIs too.
116 static inline int __prepare_ICR (unsigned int shortcut
, int vector
)
118 unsigned int icr
= shortcut
| APIC_DEST_LOGICAL
;
122 icr
|= APIC_DM_FIXED
| vector
;
131 static inline int __prepare_ICR2 (unsigned int mask
)
133 return SET_APIC_DEST_FIELD(mask
);
136 void __send_IPI_shortcut(unsigned int shortcut
, int vector
)
139 * Subtle. In the case of the 'never do double writes' workaround
140 * we have to lock out interrupts to be safe. As we don't care
141 * of the value read we use an atomic rmw access to avoid costly
142 * cli/sti. Otherwise we use an even cheaper single atomic write
150 apic_wait_icr_idle();
153 * No need to touch the target chip field
155 cfg
= __prepare_ICR(shortcut
, vector
);
158 * Send the IPI. The write to APIC_ICR fires this off.
160 apic_write_around(APIC_ICR
, cfg
);
163 void fastcall
send_IPI_self(int vector
)
165 __send_IPI_shortcut(APIC_DEST_SELF
, vector
);
169 * This is only used on smaller machines.
171 void send_IPI_mask_bitmask(cpumask_t cpumask
, int vector
)
173 unsigned long mask
= cpus_addr(cpumask
)[0];
177 local_irq_save(flags
);
178 WARN_ON(mask
& ~cpus_addr(cpu_online_map
)[0]);
182 apic_wait_icr_idle();
185 * prepare target chip field
187 cfg
= __prepare_ICR2(mask
);
188 apic_write_around(APIC_ICR2
, cfg
);
193 cfg
= __prepare_ICR(0, vector
);
196 * Send the IPI. The write to APIC_ICR fires this off.
198 apic_write_around(APIC_ICR
, cfg
);
200 local_irq_restore(flags
);
203 void send_IPI_mask_sequence(cpumask_t mask
, int vector
)
205 unsigned long cfg
, flags
;
206 unsigned int query_cpu
;
209 * Hack. The clustered APIC addressing mode doesn't allow us to send
210 * to an arbitrary mask, so I do a unicasts to each CPU instead. This
211 * should be modified to do 1 message per cluster ID - mbligh
214 local_irq_save(flags
);
216 for (query_cpu
= 0; query_cpu
< NR_CPUS
; ++query_cpu
) {
217 if (cpu_isset(query_cpu
, mask
)) {
222 apic_wait_icr_idle();
225 * prepare target chip field
227 cfg
= __prepare_ICR2(cpu_to_logical_apicid(query_cpu
));
228 apic_write_around(APIC_ICR2
, cfg
);
233 cfg
= __prepare_ICR(0, vector
);
236 * Send the IPI. The write to APIC_ICR fires this off.
238 apic_write_around(APIC_ICR
, cfg
);
241 local_irq_restore(flags
);
244 #include <mach_ipi.h> /* must come after the send_IPI functions above for inlining */
247 * Smarter SMP flushing macros.
248 * c/o Linus Torvalds.
250 * These mean you can really definitely utterly forget about
251 * writing to user space from interrupts. (Its not allowed anyway).
253 * Optimizations Manfred Spraul <manfred@colorfullife.com>
256 static cpumask_t flush_cpumask
;
257 static struct mm_struct
* flush_mm
;
258 static unsigned long flush_va
;
259 static DEFINE_SPINLOCK(tlbstate_lock
);
260 #define FLUSH_ALL 0xffffffff
263 * We cannot call mmdrop() because we are in interrupt context,
264 * instead update mm->cpu_vm_mask.
266 * We need to reload %cr3 since the page tables may be going
267 * away from under us..
269 static inline void leave_mm (unsigned long cpu
)
271 if (per_cpu(cpu_tlbstate
, cpu
).state
== TLBSTATE_OK
)
273 cpu_clear(cpu
, per_cpu(cpu_tlbstate
, cpu
).active_mm
->cpu_vm_mask
);
274 load_cr3(swapper_pg_dir
);
279 * The flush IPI assumes that a thread switch happens in this order:
280 * [cpu0: the cpu that switches]
281 * 1) switch_mm() either 1a) or 1b)
282 * 1a) thread switch to a different mm
283 * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask);
284 * Stop ipi delivery for the old mm. This is not synchronized with
285 * the other cpus, but smp_invalidate_interrupt ignore flush ipis
286 * for the wrong mm, and in the worst case we perform a superflous
288 * 1a2) set cpu_tlbstate to TLBSTATE_OK
289 * Now the smp_invalidate_interrupt won't call leave_mm if cpu0
290 * was in lazy tlb mode.
291 * 1a3) update cpu_tlbstate[].active_mm
292 * Now cpu0 accepts tlb flushes for the new mm.
293 * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask);
294 * Now the other cpus will send tlb flush ipis.
296 * 1b) thread switch without mm change
297 * cpu_tlbstate[].active_mm is correct, cpu0 already handles
299 * 1b1) set cpu_tlbstate to TLBSTATE_OK
300 * 1b2) test_and_set the cpu bit in cpu_vm_mask.
301 * Atomically set the bit [other cpus will start sending flush ipis],
303 * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
304 * 2) switch %%esp, ie current
306 * The interrupt must handle 2 special cases:
307 * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
308 * - the cpu performs speculative tlb reads, i.e. even if the cpu only
309 * runs in kernel space, the cpu could load tlb entries for user space
312 * The good news is that cpu_tlbstate is local to each cpu, no
313 * write/read ordering problems.
319 * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
320 * 2) Leave the mm if we are in the lazy tlb mode.
323 fastcall
void smp_invalidate_interrupt(struct pt_regs
*regs
)
329 if (!cpu_isset(cpu
, flush_cpumask
))
332 * This was a BUG() but until someone can quote me the
333 * line from the intel manual that guarantees an IPI to
334 * multiple CPUs is retried _only_ on the erroring CPUs
335 * its staying as a return
340 if (flush_mm
== per_cpu(cpu_tlbstate
, cpu
).active_mm
) {
341 if (per_cpu(cpu_tlbstate
, cpu
).state
== TLBSTATE_OK
) {
342 if (flush_va
== FLUSH_ALL
)
345 __flush_tlb_one(flush_va
);
350 smp_mb__before_clear_bit();
351 cpu_clear(cpu
, flush_cpumask
);
352 smp_mb__after_clear_bit();
354 put_cpu_no_resched();
357 static void flush_tlb_others(cpumask_t cpumask
, struct mm_struct
*mm
,
361 * A couple of (to be removed) sanity checks:
363 * - current CPU must not be in mask
364 * - mask must exist :)
366 BUG_ON(cpus_empty(cpumask
));
367 BUG_ON(cpu_isset(smp_processor_id(), cpumask
));
370 /* If a CPU which we ran on has gone down, OK. */
371 cpus_and(cpumask
, cpumask
, cpu_online_map
);
372 if (cpus_empty(cpumask
))
376 * i'm not happy about this global shared spinlock in the
377 * MM hot path, but we'll see how contended it is.
378 * Temporarily this turns IRQs off, so that lockups are
379 * detected by the NMI watchdog.
381 spin_lock(&tlbstate_lock
);
385 #if NR_CPUS <= BITS_PER_LONG
386 atomic_set_mask(cpumask
, &flush_cpumask
);
390 unsigned long *flush_mask
= (unsigned long *)&flush_cpumask
;
391 unsigned long *cpu_mask
= (unsigned long *)&cpumask
;
392 for (k
= 0; k
< BITS_TO_LONGS(NR_CPUS
); ++k
)
393 atomic_set_mask(cpu_mask
[k
], &flush_mask
[k
]);
397 * We have to send the IPI only to
400 send_IPI_mask(cpumask
, INVALIDATE_TLB_VECTOR
);
402 while (!cpus_empty(flush_cpumask
))
403 /* nothing. lockup detection does not belong here */
408 spin_unlock(&tlbstate_lock
);
411 void flush_tlb_current_task(void)
413 struct mm_struct
*mm
= current
->mm
;
417 cpu_mask
= mm
->cpu_vm_mask
;
418 cpu_clear(smp_processor_id(), cpu_mask
);
421 if (!cpus_empty(cpu_mask
))
422 flush_tlb_others(cpu_mask
, mm
, FLUSH_ALL
);
426 void flush_tlb_mm (struct mm_struct
* mm
)
431 cpu_mask
= mm
->cpu_vm_mask
;
432 cpu_clear(smp_processor_id(), cpu_mask
);
434 if (current
->active_mm
== mm
) {
438 leave_mm(smp_processor_id());
440 if (!cpus_empty(cpu_mask
))
441 flush_tlb_others(cpu_mask
, mm
, FLUSH_ALL
);
446 void flush_tlb_page(struct vm_area_struct
* vma
, unsigned long va
)
448 struct mm_struct
*mm
= vma
->vm_mm
;
452 cpu_mask
= mm
->cpu_vm_mask
;
453 cpu_clear(smp_processor_id(), cpu_mask
);
455 if (current
->active_mm
== mm
) {
459 leave_mm(smp_processor_id());
462 if (!cpus_empty(cpu_mask
))
463 flush_tlb_others(cpu_mask
, mm
, va
);
467 EXPORT_SYMBOL(flush_tlb_page
);
469 static void do_flush_tlb_all(void* info
)
471 unsigned long cpu
= smp_processor_id();
474 if (per_cpu(cpu_tlbstate
, cpu
).state
== TLBSTATE_LAZY
)
478 void flush_tlb_all(void)
480 on_each_cpu(do_flush_tlb_all
, NULL
, 1, 1);
484 * this function sends a 'reschedule' IPI to another CPU.
485 * it goes straight through and wastes no time serializing
486 * anything. Worst case is that we lose a reschedule ...
488 void smp_send_reschedule(int cpu
)
490 WARN_ON(cpu_is_offline(cpu
));
491 send_IPI_mask(cpumask_of_cpu(cpu
), RESCHEDULE_VECTOR
);
495 * Structure and data for smp_call_function(). This is designed to minimise
496 * static memory requirements. It also looks cleaner.
498 static DEFINE_SPINLOCK(call_lock
);
500 struct call_data_struct
{
501 void (*func
) (void *info
);
508 void lock_ipi_call_lock(void)
510 spin_lock_irq(&call_lock
);
513 void unlock_ipi_call_lock(void)
515 spin_unlock_irq(&call_lock
);
518 static struct call_data_struct
*call_data
;
521 * smp_call_function(): Run a function on all other CPUs.
522 * @func: The function to run. This must be fast and non-blocking.
523 * @info: An arbitrary pointer to pass to the function.
524 * @nonatomic: currently unused.
525 * @wait: If true, wait (atomically) until function has completed on other CPUs.
527 * Returns 0 on success, else a negative status code. Does not return until
528 * remote CPUs are nearly ready to execute <<func>> or are or have executed.
530 * You must not call this function with disabled interrupts or from a
531 * hardware interrupt handler or from a bottom half handler.
533 int smp_call_function (void (*func
) (void *info
), void *info
, int nonatomic
,
536 struct call_data_struct data
;
539 /* Holding any lock stops cpus from going down. */
540 spin_lock(&call_lock
);
541 cpus
= num_online_cpus() - 1;
543 spin_unlock(&call_lock
);
547 /* Can deadlock when called with interrupts disabled */
548 WARN_ON(irqs_disabled());
552 atomic_set(&data
.started
, 0);
555 atomic_set(&data
.finished
, 0);
560 /* Send a message to all other CPUs and wait for them to respond */
561 send_IPI_allbutself(CALL_FUNCTION_VECTOR
);
563 /* Wait for response */
564 while (atomic_read(&data
.started
) != cpus
)
568 while (atomic_read(&data
.finished
) != cpus
)
570 spin_unlock(&call_lock
);
574 EXPORT_SYMBOL(smp_call_function
);
576 static void stop_this_cpu (void * dummy
)
581 cpu_clear(smp_processor_id(), cpu_online_map
);
583 disable_local_APIC();
584 if (cpu_data
[smp_processor_id()].hlt_works_ok
)
590 * this function calls the 'stop' function on all other CPUs in the system.
593 void smp_send_stop(void)
595 smp_call_function(stop_this_cpu
, NULL
, 1, 0);
598 disable_local_APIC();
603 * Reschedule call back. Nothing to do,
604 * all the work is done automatically when
605 * we return from the interrupt.
607 fastcall
void smp_reschedule_interrupt(struct pt_regs
*regs
)
612 fastcall
void smp_call_function_interrupt(struct pt_regs
*regs
)
614 void (*func
) (void *info
) = call_data
->func
;
615 void *info
= call_data
->info
;
616 int wait
= call_data
->wait
;
620 * Notify initiating CPU that I've grabbed the data and am
621 * about to execute the function
624 atomic_inc(&call_data
->started
);
626 * At this point the info structure may be out of scope unless wait==1
635 atomic_inc(&call_data
->finished
);
640 * this function sends a 'generic call function' IPI to one other CPU
643 * cpu is a standard Linux logical CPU number.
646 __smp_call_function_single(int cpu
, void (*func
) (void *info
), void *info
,
647 int nonatomic
, int wait
)
649 struct call_data_struct data
;
654 atomic_set(&data
.started
, 0);
657 atomic_set(&data
.finished
, 0);
661 /* Send a message to all other CPUs and wait for them to respond */
662 send_IPI_mask(cpumask_of_cpu(cpu
), CALL_FUNCTION_VECTOR
);
664 /* Wait for response */
665 while (atomic_read(&data
.started
) != cpus
)
671 while (atomic_read(&data
.finished
) != cpus
)
676 * smp_call_function_single - Run a function on another CPU
677 * @func: The function to run. This must be fast and non-blocking.
678 * @info: An arbitrary pointer to pass to the function.
679 * @nonatomic: Currently unused.
680 * @wait: If true, wait until function has completed on other CPUs.
682 * Retrurns 0 on success, else a negative status code.
684 * Does not return until the remote CPU is nearly ready to execute <func>
685 * or is or has executed.
688 int smp_call_function_single(int cpu
, void (*func
) (void *info
), void *info
,
689 int nonatomic
, int wait
)
691 /* prevent preemption and reschedule on another processor */
699 /* Can deadlock when called with interrupts disabled */
700 WARN_ON(irqs_disabled());
702 spin_lock_bh(&call_lock
);
703 __smp_call_function_single(cpu
, func
, info
, nonatomic
, wait
);
704 spin_unlock_bh(&call_lock
);
708 EXPORT_SYMBOL(smp_call_function_single
);
710 static int convert_apicid_to_cpu(int apic_id
)
714 for (i
= 0; i
< NR_CPUS
; i
++) {
715 if (x86_cpu_to_apicid
[i
] == apic_id
)
721 int safe_smp_processor_id(void)
725 if (!boot_cpu_has(X86_FEATURE_APIC
))
728 apicid
= hard_smp_processor_id();
729 if (apicid
== BAD_APICID
)
732 cpuid
= convert_apicid_to_cpu(apicid
);
734 return cpuid
>= 0 ? cpuid
: 0;