[PATCH] i386: __send_IPI_dest_field - i386
[GitHub/MotorolaMobilityLLC/kernel-slsi.git] / arch / i386 / kernel / smp.c
1 /*
2 * Intel SMP support routines.
3 *
4 * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
5 * (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
6 *
7 * This code is released under the GNU General Public License version 2 or
8 * later.
9 */
10
11 #include <linux/init.h>
12
13 #include <linux/mm.h>
14 #include <linux/delay.h>
15 #include <linux/spinlock.h>
16 #include <linux/smp_lock.h>
17 #include <linux/kernel_stat.h>
18 #include <linux/mc146818rtc.h>
19 #include <linux/cache.h>
20 #include <linux/interrupt.h>
21 #include <linux/cpu.h>
22 #include <linux/module.h>
23
24 #include <asm/mtrr.h>
25 #include <asm/tlbflush.h>
26 #include <mach_apic.h>
27
28 /*
29 * Some notes on x86 processor bugs affecting SMP operation:
30 *
31 * Pentium, Pentium Pro, II, III (and all CPUs) have bugs.
32 * The Linux implications for SMP are handled as follows:
33 *
34 * Pentium III / [Xeon]
35 * None of the E1AP-E3AP errata are visible to the user.
36 *
37 * E1AP. see PII A1AP
38 * E2AP. see PII A2AP
39 * E3AP. see PII A3AP
40 *
41 * Pentium II / [Xeon]
42 * None of the A1AP-A3AP errata are visible to the user.
43 *
44 * A1AP. see PPro 1AP
45 * A2AP. see PPro 2AP
46 * A3AP. see PPro 7AP
47 *
48 * Pentium Pro
49 * None of 1AP-9AP errata are visible to the normal user,
50 * except occasional delivery of 'spurious interrupt' as trap #15.
51 * This is very rare and a non-problem.
52 *
53 * 1AP. Linux maps APIC as non-cacheable
54 * 2AP. worked around in hardware
55 * 3AP. fixed in C0 and above steppings microcode update.
56 * Linux does not use excessive STARTUP_IPIs.
57 * 4AP. worked around in hardware
58 * 5AP. symmetric IO mode (normal Linux operation) not affected.
59 * 'noapic' mode has vector 0xf filled out properly.
60 * 6AP. 'noapic' mode might be affected - fixed in later steppings
61 * 7AP. We do not assume writes to the LVT deassering IRQs
62 * 8AP. We do not enable low power mode (deep sleep) during MP bootup
63 * 9AP. We do not use mixed mode
64 *
65 * Pentium
66 * There is a marginal case where REP MOVS on 100MHz SMP
67 * machines with B stepping processors can fail. XXX should provide
68 * an L1cache=Writethrough or L1cache=off option.
69 *
70 * B stepping CPUs may hang. There are hardware work arounds
71 * for this. We warn about it in case your board doesn't have the work
72 * arounds. Basically thats so I can tell anyone with a B stepping
73 * CPU and SMP problems "tough".
74 *
75 * Specific items [From Pentium Processor Specification Update]
76 *
77 * 1AP. Linux doesn't use remote read
78 * 2AP. Linux doesn't trust APIC errors
79 * 3AP. We work around this
80 * 4AP. Linux never generated 3 interrupts of the same priority
81 * to cause a lost local interrupt.
82 * 5AP. Remote read is never used
83 * 6AP. not affected - worked around in hardware
84 * 7AP. not affected - worked around in hardware
85 * 8AP. worked around in hardware - we get explicit CS errors if not
86 * 9AP. only 'noapic' mode affected. Might generate spurious
87 * interrupts, we log only the first one and count the
88 * rest silently.
89 * 10AP. not affected - worked around in hardware
90 * 11AP. Linux reads the APIC between writes to avoid this, as per
91 * the documentation. Make sure you preserve this as it affects
92 * the C stepping chips too.
93 * 12AP. not affected - worked around in hardware
94 * 13AP. not affected - worked around in hardware
95 * 14AP. we always deassert INIT during bootup
96 * 15AP. not affected - worked around in hardware
97 * 16AP. not affected - worked around in hardware
98 * 17AP. not affected - worked around in hardware
99 * 18AP. not affected - worked around in hardware
100 * 19AP. not affected - worked around in BIOS
101 *
102 * If this sounds worrying believe me these bugs are either ___RARE___,
103 * or are signal timing bugs worked around in hardware and there's
104 * about nothing of note with C stepping upwards.
105 */
106
107 DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) ____cacheline_aligned = { &init_mm, 0, };
108
109 /*
110 * the following functions deal with sending IPIs between CPUs.
111 *
112 * We use 'broadcast', CPU->CPU IPIs and self-IPIs too.
113 */
114
115 static inline int __prepare_ICR (unsigned int shortcut, int vector)
116 {
117 unsigned int icr = shortcut | APIC_DEST_LOGICAL;
118
119 switch (vector) {
120 default:
121 icr |= APIC_DM_FIXED | vector;
122 break;
123 case NMI_VECTOR:
124 icr |= APIC_DM_NMI;
125 break;
126 }
127 return icr;
128 }
129
130 static inline int __prepare_ICR2 (unsigned int mask)
131 {
132 return SET_APIC_DEST_FIELD(mask);
133 }
134
135 void __send_IPI_shortcut(unsigned int shortcut, int vector)
136 {
137 /*
138 * Subtle. In the case of the 'never do double writes' workaround
139 * we have to lock out interrupts to be safe. As we don't care
140 * of the value read we use an atomic rmw access to avoid costly
141 * cli/sti. Otherwise we use an even cheaper single atomic write
142 * to the APIC.
143 */
144 unsigned int cfg;
145
146 /*
147 * Wait for idle.
148 */
149 apic_wait_icr_idle();
150
151 /*
152 * No need to touch the target chip field
153 */
154 cfg = __prepare_ICR(shortcut, vector);
155
156 /*
157 * Send the IPI. The write to APIC_ICR fires this off.
158 */
159 apic_write_around(APIC_ICR, cfg);
160 }
161
162 void fastcall send_IPI_self(int vector)
163 {
164 __send_IPI_shortcut(APIC_DEST_SELF, vector);
165 }
166
167 /*
168 * This is used to send an IPI with no shorthand notation (the destination is
169 * specified in bits 56 to 63 of the ICR).
170 */
171 static inline void __send_IPI_dest_field(unsigned long mask, int vector)
172 {
173 unsigned long cfg;
174
175 /*
176 * Wait for idle.
177 */
178 apic_wait_icr_idle();
179
180 /*
181 * prepare target chip field
182 */
183 cfg = __prepare_ICR2(mask);
184 apic_write_around(APIC_ICR2, cfg);
185
186 /*
187 * program the ICR
188 */
189 cfg = __prepare_ICR(0, vector);
190
191 /*
192 * Send the IPI. The write to APIC_ICR fires this off.
193 */
194 apic_write_around(APIC_ICR, cfg);
195 }
196
197 /*
198 * This is only used on smaller machines.
199 */
200 void send_IPI_mask_bitmask(cpumask_t cpumask, int vector)
201 {
202 unsigned long mask = cpus_addr(cpumask)[0];
203 unsigned long flags;
204
205 local_irq_save(flags);
206 WARN_ON(mask & ~cpus_addr(cpu_online_map)[0]);
207 __send_IPI_dest_field(mask, vector);
208 local_irq_restore(flags);
209 }
210
211 void send_IPI_mask_sequence(cpumask_t mask, int vector)
212 {
213 unsigned long flags;
214 unsigned int query_cpu;
215
216 /*
217 * Hack. The clustered APIC addressing mode doesn't allow us to send
218 * to an arbitrary mask, so I do a unicasts to each CPU instead. This
219 * should be modified to do 1 message per cluster ID - mbligh
220 */
221
222 local_irq_save(flags);
223 for (query_cpu = 0; query_cpu < NR_CPUS; ++query_cpu) {
224 if (cpu_isset(query_cpu, mask)) {
225 __send_IPI_dest_field(cpu_to_logical_apicid(query_cpu),
226 vector);
227 }
228 }
229 local_irq_restore(flags);
230 }
231
232 #include <mach_ipi.h> /* must come after the send_IPI functions above for inlining */
233
234 /*
235 * Smarter SMP flushing macros.
236 * c/o Linus Torvalds.
237 *
238 * These mean you can really definitely utterly forget about
239 * writing to user space from interrupts. (Its not allowed anyway).
240 *
241 * Optimizations Manfred Spraul <manfred@colorfullife.com>
242 */
243
244 static cpumask_t flush_cpumask;
245 static struct mm_struct * flush_mm;
246 static unsigned long flush_va;
247 static DEFINE_SPINLOCK(tlbstate_lock);
248
249 /*
250 * We cannot call mmdrop() because we are in interrupt context,
251 * instead update mm->cpu_vm_mask.
252 *
253 * We need to reload %cr3 since the page tables may be going
254 * away from under us..
255 */
256 static inline void leave_mm (unsigned long cpu)
257 {
258 if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK)
259 BUG();
260 cpu_clear(cpu, per_cpu(cpu_tlbstate, cpu).active_mm->cpu_vm_mask);
261 load_cr3(swapper_pg_dir);
262 }
263
264 /*
265 *
266 * The flush IPI assumes that a thread switch happens in this order:
267 * [cpu0: the cpu that switches]
268 * 1) switch_mm() either 1a) or 1b)
269 * 1a) thread switch to a different mm
270 * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask);
271 * Stop ipi delivery for the old mm. This is not synchronized with
272 * the other cpus, but smp_invalidate_interrupt ignore flush ipis
273 * for the wrong mm, and in the worst case we perform a superflous
274 * tlb flush.
275 * 1a2) set cpu_tlbstate to TLBSTATE_OK
276 * Now the smp_invalidate_interrupt won't call leave_mm if cpu0
277 * was in lazy tlb mode.
278 * 1a3) update cpu_tlbstate[].active_mm
279 * Now cpu0 accepts tlb flushes for the new mm.
280 * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask);
281 * Now the other cpus will send tlb flush ipis.
282 * 1a4) change cr3.
283 * 1b) thread switch without mm change
284 * cpu_tlbstate[].active_mm is correct, cpu0 already handles
285 * flush ipis.
286 * 1b1) set cpu_tlbstate to TLBSTATE_OK
287 * 1b2) test_and_set the cpu bit in cpu_vm_mask.
288 * Atomically set the bit [other cpus will start sending flush ipis],
289 * and test the bit.
290 * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
291 * 2) switch %%esp, ie current
292 *
293 * The interrupt must handle 2 special cases:
294 * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
295 * - the cpu performs speculative tlb reads, i.e. even if the cpu only
296 * runs in kernel space, the cpu could load tlb entries for user space
297 * pages.
298 *
299 * The good news is that cpu_tlbstate is local to each cpu, no
300 * write/read ordering problems.
301 */
302
303 /*
304 * TLB flush IPI:
305 *
306 * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
307 * 2) Leave the mm if we are in the lazy tlb mode.
308 */
309
310 fastcall void smp_invalidate_interrupt(struct pt_regs *regs)
311 {
312 unsigned long cpu;
313
314 cpu = get_cpu();
315
316 if (!cpu_isset(cpu, flush_cpumask))
317 goto out;
318 /*
319 * This was a BUG() but until someone can quote me the
320 * line from the intel manual that guarantees an IPI to
321 * multiple CPUs is retried _only_ on the erroring CPUs
322 * its staying as a return
323 *
324 * BUG();
325 */
326
327 if (flush_mm == per_cpu(cpu_tlbstate, cpu).active_mm) {
328 if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) {
329 if (flush_va == TLB_FLUSH_ALL)
330 local_flush_tlb();
331 else
332 __flush_tlb_one(flush_va);
333 } else
334 leave_mm(cpu);
335 }
336 ack_APIC_irq();
337 smp_mb__before_clear_bit();
338 cpu_clear(cpu, flush_cpumask);
339 smp_mb__after_clear_bit();
340 out:
341 put_cpu_no_resched();
342 }
343
344 void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
345 unsigned long va)
346 {
347 cpumask_t cpumask = *cpumaskp;
348
349 /*
350 * A couple of (to be removed) sanity checks:
351 *
352 * - current CPU must not be in mask
353 * - mask must exist :)
354 */
355 BUG_ON(cpus_empty(cpumask));
356 BUG_ON(cpu_isset(smp_processor_id(), cpumask));
357 BUG_ON(!mm);
358
359 /* If a CPU which we ran on has gone down, OK. */
360 cpus_and(cpumask, cpumask, cpu_online_map);
361 if (cpus_empty(cpumask))
362 return;
363
364 /*
365 * i'm not happy about this global shared spinlock in the
366 * MM hot path, but we'll see how contended it is.
367 * AK: x86-64 has a faster method that could be ported.
368 */
369 spin_lock(&tlbstate_lock);
370
371 flush_mm = mm;
372 flush_va = va;
373 #if NR_CPUS <= BITS_PER_LONG
374 atomic_set_mask(cpumask, &flush_cpumask);
375 #else
376 {
377 int k;
378 unsigned long *flush_mask = (unsigned long *)&flush_cpumask;
379 unsigned long *cpu_mask = (unsigned long *)&cpumask;
380 for (k = 0; k < BITS_TO_LONGS(NR_CPUS); ++k)
381 atomic_set_mask(cpu_mask[k], &flush_mask[k]);
382 }
383 #endif
384 /*
385 * We have to send the IPI only to
386 * CPUs affected.
387 */
388 send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR);
389
390 while (!cpus_empty(flush_cpumask))
391 /* nothing. lockup detection does not belong here */
392 cpu_relax();
393
394 flush_mm = NULL;
395 flush_va = 0;
396 spin_unlock(&tlbstate_lock);
397 }
398
399 void flush_tlb_current_task(void)
400 {
401 struct mm_struct *mm = current->mm;
402 cpumask_t cpu_mask;
403
404 preempt_disable();
405 cpu_mask = mm->cpu_vm_mask;
406 cpu_clear(smp_processor_id(), cpu_mask);
407
408 local_flush_tlb();
409 if (!cpus_empty(cpu_mask))
410 flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL);
411 preempt_enable();
412 }
413
414 void flush_tlb_mm (struct mm_struct * mm)
415 {
416 cpumask_t cpu_mask;
417
418 preempt_disable();
419 cpu_mask = mm->cpu_vm_mask;
420 cpu_clear(smp_processor_id(), cpu_mask);
421
422 if (current->active_mm == mm) {
423 if (current->mm)
424 local_flush_tlb();
425 else
426 leave_mm(smp_processor_id());
427 }
428 if (!cpus_empty(cpu_mask))
429 flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL);
430
431 preempt_enable();
432 }
433
434 void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
435 {
436 struct mm_struct *mm = vma->vm_mm;
437 cpumask_t cpu_mask;
438
439 preempt_disable();
440 cpu_mask = mm->cpu_vm_mask;
441 cpu_clear(smp_processor_id(), cpu_mask);
442
443 if (current->active_mm == mm) {
444 if(current->mm)
445 __flush_tlb_one(va);
446 else
447 leave_mm(smp_processor_id());
448 }
449
450 if (!cpus_empty(cpu_mask))
451 flush_tlb_others(cpu_mask, mm, va);
452
453 preempt_enable();
454 }
455 EXPORT_SYMBOL(flush_tlb_page);
456
457 static void do_flush_tlb_all(void* info)
458 {
459 unsigned long cpu = smp_processor_id();
460
461 __flush_tlb_all();
462 if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_LAZY)
463 leave_mm(cpu);
464 }
465
466 void flush_tlb_all(void)
467 {
468 on_each_cpu(do_flush_tlb_all, NULL, 1, 1);
469 }
470
471 /*
472 * this function sends a 'reschedule' IPI to another CPU.
473 * it goes straight through and wastes no time serializing
474 * anything. Worst case is that we lose a reschedule ...
475 */
476 void native_smp_send_reschedule(int cpu)
477 {
478 WARN_ON(cpu_is_offline(cpu));
479 send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
480 }
481
482 /*
483 * Structure and data for smp_call_function(). This is designed to minimise
484 * static memory requirements. It also looks cleaner.
485 */
486 static DEFINE_SPINLOCK(call_lock);
487
488 struct call_data_struct {
489 void (*func) (void *info);
490 void *info;
491 atomic_t started;
492 atomic_t finished;
493 int wait;
494 };
495
496 void lock_ipi_call_lock(void)
497 {
498 spin_lock_irq(&call_lock);
499 }
500
501 void unlock_ipi_call_lock(void)
502 {
503 spin_unlock_irq(&call_lock);
504 }
505
506 static struct call_data_struct *call_data;
507
508 static void __smp_call_function(void (*func) (void *info), void *info,
509 int nonatomic, int wait)
510 {
511 struct call_data_struct data;
512 int cpus = num_online_cpus() - 1;
513
514 if (!cpus)
515 return;
516
517 data.func = func;
518 data.info = info;
519 atomic_set(&data.started, 0);
520 data.wait = wait;
521 if (wait)
522 atomic_set(&data.finished, 0);
523
524 call_data = &data;
525 mb();
526
527 /* Send a message to all other CPUs and wait for them to respond */
528 send_IPI_allbutself(CALL_FUNCTION_VECTOR);
529
530 /* Wait for response */
531 while (atomic_read(&data.started) != cpus)
532 cpu_relax();
533
534 if (wait)
535 while (atomic_read(&data.finished) != cpus)
536 cpu_relax();
537 }
538
539
540 /**
541 * smp_call_function_mask(): Run a function on a set of other CPUs.
542 * @mask: The set of cpus to run on. Must not include the current cpu.
543 * @func: The function to run. This must be fast and non-blocking.
544 * @info: An arbitrary pointer to pass to the function.
545 * @wait: If true, wait (atomically) until function has completed on other CPUs.
546 *
547 * Returns 0 on success, else a negative status code.
548 *
549 * If @wait is true, then returns once @func has returned; otherwise
550 * it returns just before the target cpu calls @func.
551 *
552 * You must not call this function with disabled interrupts or from a
553 * hardware interrupt handler or from a bottom half handler.
554 */
555 int native_smp_call_function_mask(cpumask_t mask,
556 void (*func)(void *), void *info,
557 int wait)
558 {
559 struct call_data_struct data;
560 cpumask_t allbutself;
561 int cpus;
562
563 /* Can deadlock when called with interrupts disabled */
564 WARN_ON(irqs_disabled());
565
566 /* Holding any lock stops cpus from going down. */
567 spin_lock(&call_lock);
568
569 allbutself = cpu_online_map;
570 cpu_clear(smp_processor_id(), allbutself);
571
572 cpus_and(mask, mask, allbutself);
573 cpus = cpus_weight(mask);
574
575 if (!cpus) {
576 spin_unlock(&call_lock);
577 return 0;
578 }
579
580 data.func = func;
581 data.info = info;
582 atomic_set(&data.started, 0);
583 data.wait = wait;
584 if (wait)
585 atomic_set(&data.finished, 0);
586
587 call_data = &data;
588 mb();
589
590 /* Send a message to other CPUs */
591 if (cpus_equal(mask, allbutself))
592 send_IPI_allbutself(CALL_FUNCTION_VECTOR);
593 else
594 send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
595
596 /* Wait for response */
597 while (atomic_read(&data.started) != cpus)
598 cpu_relax();
599
600 if (wait)
601 while (atomic_read(&data.finished) != cpus)
602 cpu_relax();
603 spin_unlock(&call_lock);
604
605 return 0;
606 }
607
608 /**
609 * smp_call_function(): Run a function on all other CPUs.
610 * @func: The function to run. This must be fast and non-blocking.
611 * @info: An arbitrary pointer to pass to the function.
612 * @nonatomic: Unused.
613 * @wait: If true, wait (atomically) until function has completed on other CPUs.
614 *
615 * Returns 0 on success, else a negative status code.
616 *
617 * If @wait is true, then returns once @func has returned; otherwise
618 * it returns just before the target cpu calls @func.
619 *
620 * You must not call this function with disabled interrupts or from a
621 * hardware interrupt handler or from a bottom half handler.
622 */
623 int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
624 int wait)
625 {
626 return smp_call_function_mask(cpu_online_map, func, info, wait);
627 }
628 EXPORT_SYMBOL(smp_call_function);
629
630 /**
631 * smp_call_function_single - Run a function on another CPU
632 * @cpu: The target CPU. Cannot be the calling CPU.
633 * @func: The function to run. This must be fast and non-blocking.
634 * @info: An arbitrary pointer to pass to the function.
635 * @nonatomic: Unused.
636 * @wait: If true, wait until function has completed on other CPUs.
637 *
638 * Returns 0 on success, else a negative status code.
639 *
640 * If @wait is true, then returns once @func has returned; otherwise
641 * it returns just before the target cpu calls @func.
642 */
643 int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
644 int nonatomic, int wait)
645 {
646 /* prevent preemption and reschedule on another processor */
647 int ret;
648 int me = get_cpu();
649 if (cpu == me) {
650 WARN_ON(1);
651 put_cpu();
652 return -EBUSY;
653 }
654
655 ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, wait);
656
657 put_cpu();
658 return ret;
659 }
660 EXPORT_SYMBOL(smp_call_function_single);
661
662 static void stop_this_cpu (void * dummy)
663 {
664 local_irq_disable();
665 /*
666 * Remove this CPU:
667 */
668 cpu_clear(smp_processor_id(), cpu_online_map);
669 disable_local_APIC();
670 if (cpu_data[smp_processor_id()].hlt_works_ok)
671 for(;;) halt();
672 for (;;);
673 }
674
675 /*
676 * this function calls the 'stop' function on all other CPUs in the system.
677 */
678
679 void native_smp_send_stop(void)
680 {
681 /* Don't deadlock on the call lock in panic */
682 int nolock = !spin_trylock(&call_lock);
683 unsigned long flags;
684
685 local_irq_save(flags);
686 __smp_call_function(stop_this_cpu, NULL, 0, 0);
687 if (!nolock)
688 spin_unlock(&call_lock);
689 disable_local_APIC();
690 local_irq_restore(flags);
691 }
692
693 /*
694 * Reschedule call back. Nothing to do,
695 * all the work is done automatically when
696 * we return from the interrupt.
697 */
698 fastcall void smp_reschedule_interrupt(struct pt_regs *regs)
699 {
700 ack_APIC_irq();
701 }
702
703 fastcall void smp_call_function_interrupt(struct pt_regs *regs)
704 {
705 void (*func) (void *info) = call_data->func;
706 void *info = call_data->info;
707 int wait = call_data->wait;
708
709 ack_APIC_irq();
710 /*
711 * Notify initiating CPU that I've grabbed the data and am
712 * about to execute the function
713 */
714 mb();
715 atomic_inc(&call_data->started);
716 /*
717 * At this point the info structure may be out of scope unless wait==1
718 */
719 irq_enter();
720 (*func)(info);
721 irq_exit();
722
723 if (wait) {
724 mb();
725 atomic_inc(&call_data->finished);
726 }
727 }
728
729 static int convert_apicid_to_cpu(int apic_id)
730 {
731 int i;
732
733 for (i = 0; i < NR_CPUS; i++) {
734 if (x86_cpu_to_apicid[i] == apic_id)
735 return i;
736 }
737 return -1;
738 }
739
740 int safe_smp_processor_id(void)
741 {
742 int apicid, cpuid;
743
744 if (!boot_cpu_has(X86_FEATURE_APIC))
745 return 0;
746
747 apicid = hard_smp_processor_id();
748 if (apicid == BAD_APICID)
749 return 0;
750
751 cpuid = convert_apicid_to_cpu(apicid);
752
753 return cpuid >= 0 ? cpuid : 0;
754 }
755
756 struct smp_ops smp_ops = {
757 .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
758 .smp_prepare_cpus = native_smp_prepare_cpus,
759 .cpu_up = native_cpu_up,
760 .smp_cpus_done = native_smp_cpus_done,
761
762 .smp_send_stop = native_smp_send_stop,
763 .smp_send_reschedule = native_smp_send_reschedule,
764 .smp_call_function_mask = native_smp_call_function_mask,
765 };