x86, 64-bit: patch paravirt inline replacements when loading modules
[GitHub/exynos8895/android_kernel_samsung_universal8895.git] / arch / x86 / kernel / apic_64.c
1 /*
2 * Local APIC handling, local APIC timers
3 *
4 * (c) 1999, 2000 Ingo Molnar <mingo@redhat.com>
5 *
6 * Fixes
7 * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
8 * thanks to Eric Gilmore
9 * and Rolf G. Tews
10 * for testing these extensively.
11 * Maciej W. Rozycki : Various updates and fixes.
12 * Mikael Pettersson : Power Management for UP-APIC.
13 * Pavel Machek and
14 * Mikael Pettersson : PM converted to driver model.
15 */
16
17 #include <linux/init.h>
18
19 #include <linux/mm.h>
20 #include <linux/delay.h>
21 #include <linux/bootmem.h>
22 #include <linux/interrupt.h>
23 #include <linux/mc146818rtc.h>
24 #include <linux/kernel_stat.h>
25 #include <linux/sysdev.h>
26 #include <linux/ioport.h>
27 #include <linux/clockchips.h>
28 #include <linux/acpi_pmtmr.h>
29 #include <linux/module.h>
30
31 #include <asm/atomic.h>
32 #include <asm/smp.h>
33 #include <asm/mtrr.h>
34 #include <asm/mpspec.h>
35 #include <asm/hpet.h>
36 #include <asm/pgalloc.h>
37 #include <asm/nmi.h>
38 #include <asm/idle.h>
39 #include <asm/proto.h>
40 #include <asm/timex.h>
41 #include <asm/apic.h>
42
43 #include <mach_ipi.h>
44 #include <mach_apic.h>
45
46 int disable_apic_timer __cpuinitdata;
47 static int apic_calibrate_pmtmr __initdata;
48 int disable_apic;
49
50 /* Local APIC timer works in C2 */
51 int local_apic_timer_c2_ok;
52 EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok);
53
54 /*
55 * Debug level, exported for io_apic.c
56 */
57 int apic_verbosity;
58
59 static struct resource lapic_resource = {
60 .name = "Local APIC",
61 .flags = IORESOURCE_MEM | IORESOURCE_BUSY,
62 };
63
64 static unsigned int calibration_result;
65
66 static int lapic_next_event(unsigned long delta,
67 struct clock_event_device *evt);
68 static void lapic_timer_setup(enum clock_event_mode mode,
69 struct clock_event_device *evt);
70 static void lapic_timer_broadcast(cpumask_t mask);
71 static void apic_pm_activate(void);
72
73 static struct clock_event_device lapic_clockevent = {
74 .name = "lapic",
75 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT
76 | CLOCK_EVT_FEAT_C3STOP | CLOCK_EVT_FEAT_DUMMY,
77 .shift = 32,
78 .set_mode = lapic_timer_setup,
79 .set_next_event = lapic_next_event,
80 .broadcast = lapic_timer_broadcast,
81 .rating = 100,
82 .irq = -1,
83 };
84 static DEFINE_PER_CPU(struct clock_event_device, lapic_events);
85
86 static unsigned long apic_phys;
87
88 unsigned long mp_lapic_addr;
89
90 DEFINE_PER_CPU(u16, x86_bios_cpu_apicid) = BAD_APICID;
91 EXPORT_PER_CPU_SYMBOL(x86_bios_cpu_apicid);
92
93 unsigned int __cpuinitdata maxcpus = NR_CPUS;
94 /*
95 * Get the LAPIC version
96 */
97 static inline int lapic_get_version(void)
98 {
99 return GET_APIC_VERSION(apic_read(APIC_LVR));
100 }
101
102 /*
103 * Check, if the APIC is integrated or a seperate chip
104 */
105 static inline int lapic_is_integrated(void)
106 {
107 return 1;
108 }
109
110 /*
111 * Check, whether this is a modern or a first generation APIC
112 */
113 static int modern_apic(void)
114 {
115 /* AMD systems use old APIC versions, so check the CPU */
116 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
117 boot_cpu_data.x86 >= 0xf)
118 return 1;
119 return lapic_get_version() >= 0x14;
120 }
121
122 void apic_wait_icr_idle(void)
123 {
124 while (apic_read(APIC_ICR) & APIC_ICR_BUSY)
125 cpu_relax();
126 }
127
128 u32 safe_apic_wait_icr_idle(void)
129 {
130 u32 send_status;
131 int timeout;
132
133 timeout = 0;
134 do {
135 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
136 if (!send_status)
137 break;
138 udelay(100);
139 } while (timeout++ < 1000);
140
141 return send_status;
142 }
143
144 /**
145 * enable_NMI_through_LVT0 - enable NMI through local vector table 0
146 */
147 void __cpuinit enable_NMI_through_LVT0(void)
148 {
149 unsigned int v;
150
151 /* unmask and set to NMI */
152 v = APIC_DM_NMI;
153 apic_write(APIC_LVT0, v);
154 }
155
156 /**
157 * lapic_get_maxlvt - get the maximum number of local vector table entries
158 */
159 int lapic_get_maxlvt(void)
160 {
161 unsigned int v, maxlvt;
162
163 v = apic_read(APIC_LVR);
164 maxlvt = GET_APIC_MAXLVT(v);
165 return maxlvt;
166 }
167
168 /*
169 * This function sets up the local APIC timer, with a timeout of
170 * 'clocks' APIC bus clock. During calibration we actually call
171 * this function twice on the boot CPU, once with a bogus timeout
172 * value, second time for real. The other (noncalibrating) CPUs
173 * call this function only once, with the real, calibrated value.
174 *
175 * We do reads before writes even if unnecessary, to get around the
176 * P5 APIC double write bug.
177 */
178
179 static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen)
180 {
181 unsigned int lvtt_value, tmp_value;
182
183 lvtt_value = LOCAL_TIMER_VECTOR;
184 if (!oneshot)
185 lvtt_value |= APIC_LVT_TIMER_PERIODIC;
186 if (!irqen)
187 lvtt_value |= APIC_LVT_MASKED;
188
189 apic_write(APIC_LVTT, lvtt_value);
190
191 /*
192 * Divide PICLK by 16
193 */
194 tmp_value = apic_read(APIC_TDCR);
195 apic_write(APIC_TDCR, (tmp_value
196 & ~(APIC_TDR_DIV_1 | APIC_TDR_DIV_TMBASE))
197 | APIC_TDR_DIV_16);
198
199 if (!oneshot)
200 apic_write(APIC_TMICT, clocks);
201 }
202
203 /*
204 * Setup extended LVT, AMD specific (K8, family 10h)
205 *
206 * Vector mappings are hard coded. On K8 only offset 0 (APIC500) and
207 * MCE interrupts are supported. Thus MCE offset must be set to 0.
208 */
209
210 #define APIC_EILVT_LVTOFF_MCE 0
211 #define APIC_EILVT_LVTOFF_IBS 1
212
213 static void setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask)
214 {
215 unsigned long reg = (lvt_off << 4) + APIC_EILVT0;
216 unsigned int v = (mask << 16) | (msg_type << 8) | vector;
217
218 apic_write(reg, v);
219 }
220
221 u8 setup_APIC_eilvt_mce(u8 vector, u8 msg_type, u8 mask)
222 {
223 setup_APIC_eilvt(APIC_EILVT_LVTOFF_MCE, vector, msg_type, mask);
224 return APIC_EILVT_LVTOFF_MCE;
225 }
226
227 u8 setup_APIC_eilvt_ibs(u8 vector, u8 msg_type, u8 mask)
228 {
229 setup_APIC_eilvt(APIC_EILVT_LVTOFF_IBS, vector, msg_type, mask);
230 return APIC_EILVT_LVTOFF_IBS;
231 }
232
233 /*
234 * Program the next event, relative to now
235 */
236 static int lapic_next_event(unsigned long delta,
237 struct clock_event_device *evt)
238 {
239 apic_write(APIC_TMICT, delta);
240 return 0;
241 }
242
243 /*
244 * Setup the lapic timer in periodic or oneshot mode
245 */
246 static void lapic_timer_setup(enum clock_event_mode mode,
247 struct clock_event_device *evt)
248 {
249 unsigned long flags;
250 unsigned int v;
251
252 /* Lapic used as dummy for broadcast ? */
253 if (evt->features & CLOCK_EVT_FEAT_DUMMY)
254 return;
255
256 local_irq_save(flags);
257
258 switch (mode) {
259 case CLOCK_EVT_MODE_PERIODIC:
260 case CLOCK_EVT_MODE_ONESHOT:
261 __setup_APIC_LVTT(calibration_result,
262 mode != CLOCK_EVT_MODE_PERIODIC, 1);
263 break;
264 case CLOCK_EVT_MODE_UNUSED:
265 case CLOCK_EVT_MODE_SHUTDOWN:
266 v = apic_read(APIC_LVTT);
267 v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
268 apic_write(APIC_LVTT, v);
269 break;
270 case CLOCK_EVT_MODE_RESUME:
271 /* Nothing to do here */
272 break;
273 }
274
275 local_irq_restore(flags);
276 }
277
278 /*
279 * Local APIC timer broadcast function
280 */
281 static void lapic_timer_broadcast(cpumask_t mask)
282 {
283 #ifdef CONFIG_SMP
284 send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
285 #endif
286 }
287
288 /*
289 * Setup the local APIC timer for this CPU. Copy the initilized values
290 * of the boot CPU and register the clock event in the framework.
291 */
292 static void setup_APIC_timer(void)
293 {
294 struct clock_event_device *levt = &__get_cpu_var(lapic_events);
295
296 memcpy(levt, &lapic_clockevent, sizeof(*levt));
297 levt->cpumask = cpumask_of_cpu(smp_processor_id());
298
299 clockevents_register_device(levt);
300 }
301
302 /*
303 * In this function we calibrate APIC bus clocks to the external
304 * timer. Unfortunately we cannot use jiffies and the timer irq
305 * to calibrate, since some later bootup code depends on getting
306 * the first irq? Ugh.
307 *
308 * We want to do the calibration only once since we
309 * want to have local timer irqs syncron. CPUs connected
310 * by the same APIC bus have the very same bus frequency.
311 * And we want to have irqs off anyways, no accidental
312 * APIC irq that way.
313 */
314
315 #define TICK_COUNT 100000000
316
317 static void __init calibrate_APIC_clock(void)
318 {
319 unsigned apic, apic_start;
320 unsigned long tsc, tsc_start;
321 int result;
322
323 local_irq_disable();
324
325 /*
326 * Put whatever arbitrary (but long enough) timeout
327 * value into the APIC clock, we just want to get the
328 * counter running for calibration.
329 *
330 * No interrupt enable !
331 */
332 __setup_APIC_LVTT(250000000, 0, 0);
333
334 apic_start = apic_read(APIC_TMCCT);
335 #ifdef CONFIG_X86_PM_TIMER
336 if (apic_calibrate_pmtmr && pmtmr_ioport) {
337 pmtimer_wait(5000); /* 5ms wait */
338 apic = apic_read(APIC_TMCCT);
339 result = (apic_start - apic) * 1000L / 5;
340 } else
341 #endif
342 {
343 rdtscll(tsc_start);
344
345 do {
346 apic = apic_read(APIC_TMCCT);
347 rdtscll(tsc);
348 } while ((tsc - tsc_start) < TICK_COUNT &&
349 (apic_start - apic) < TICK_COUNT);
350
351 result = (apic_start - apic) * 1000L * tsc_khz /
352 (tsc - tsc_start);
353 }
354
355 local_irq_enable();
356
357 printk(KERN_DEBUG "APIC timer calibration result %d\n", result);
358
359 printk(KERN_INFO "Detected %d.%03d MHz APIC timer.\n",
360 result / 1000 / 1000, result / 1000 % 1000);
361
362 /* Calculate the scaled math multiplication factor */
363 lapic_clockevent.mult = div_sc(result, NSEC_PER_SEC,
364 lapic_clockevent.shift);
365 lapic_clockevent.max_delta_ns =
366 clockevent_delta2ns(0x7FFFFF, &lapic_clockevent);
367 lapic_clockevent.min_delta_ns =
368 clockevent_delta2ns(0xF, &lapic_clockevent);
369
370 calibration_result = result / HZ;
371 }
372
373 /*
374 * Setup the boot APIC
375 *
376 * Calibrate and verify the result.
377 */
378 void __init setup_boot_APIC_clock(void)
379 {
380 /*
381 * The local apic timer can be disabled via the kernel commandline.
382 * Register the lapic timer as a dummy clock event source on SMP
383 * systems, so the broadcast mechanism is used. On UP systems simply
384 * ignore it.
385 */
386 if (disable_apic_timer) {
387 printk(KERN_INFO "Disabling APIC timer\n");
388 /* No broadcast on UP ! */
389 if (num_possible_cpus() > 1) {
390 lapic_clockevent.mult = 1;
391 setup_APIC_timer();
392 }
393 return;
394 }
395
396 printk(KERN_INFO "Using local APIC timer interrupts.\n");
397 calibrate_APIC_clock();
398
399 /*
400 * Do a sanity check on the APIC calibration result
401 */
402 if (calibration_result < (1000000 / HZ)) {
403 printk(KERN_WARNING
404 "APIC frequency too slow, disabling apic timer\n");
405 /* No broadcast on UP ! */
406 if (num_possible_cpus() > 1)
407 setup_APIC_timer();
408 return;
409 }
410
411 /*
412 * If nmi_watchdog is set to IO_APIC, we need the
413 * PIT/HPET going. Otherwise register lapic as a dummy
414 * device.
415 */
416 if (nmi_watchdog != NMI_IO_APIC)
417 lapic_clockevent.features &= ~CLOCK_EVT_FEAT_DUMMY;
418 else
419 printk(KERN_WARNING "APIC timer registered as dummy,"
420 " due to nmi_watchdog=1!\n");
421
422 setup_APIC_timer();
423 }
424
425 /*
426 * AMD C1E enabled CPUs have a real nasty problem: Some BIOSes set the
427 * C1E flag only in the secondary CPU, so when we detect the wreckage
428 * we already have enabled the boot CPU local apic timer. Check, if
429 * disable_apic_timer is set and the DUMMY flag is cleared. If yes,
430 * set the DUMMY flag again and force the broadcast mode in the
431 * clockevents layer.
432 */
433 static void __cpuinit check_boot_apic_timer_broadcast(void)
434 {
435 if (!disable_apic_timer ||
436 (lapic_clockevent.features & CLOCK_EVT_FEAT_DUMMY))
437 return;
438
439 printk(KERN_INFO "AMD C1E detected late. Force timer broadcast.\n");
440 lapic_clockevent.features |= CLOCK_EVT_FEAT_DUMMY;
441
442 local_irq_enable();
443 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_FORCE,
444 &boot_cpu_physical_apicid);
445 local_irq_disable();
446 }
447
448 void __cpuinit setup_secondary_APIC_clock(void)
449 {
450 check_boot_apic_timer_broadcast();
451 setup_APIC_timer();
452 }
453
454 /*
455 * The guts of the apic timer interrupt
456 */
457 static void local_apic_timer_interrupt(void)
458 {
459 int cpu = smp_processor_id();
460 struct clock_event_device *evt = &per_cpu(lapic_events, cpu);
461
462 /*
463 * Normally we should not be here till LAPIC has been initialized but
464 * in some cases like kdump, its possible that there is a pending LAPIC
465 * timer interrupt from previous kernel's context and is delivered in
466 * new kernel the moment interrupts are enabled.
467 *
468 * Interrupts are enabled early and LAPIC is setup much later, hence
469 * its possible that when we get here evt->event_handler is NULL.
470 * Check for event_handler being NULL and discard the interrupt as
471 * spurious.
472 */
473 if (!evt->event_handler) {
474 printk(KERN_WARNING
475 "Spurious LAPIC timer interrupt on cpu %d\n", cpu);
476 /* Switch it off */
477 lapic_timer_setup(CLOCK_EVT_MODE_SHUTDOWN, evt);
478 return;
479 }
480
481 /*
482 * the NMI deadlock-detector uses this.
483 */
484 add_pda(apic_timer_irqs, 1);
485
486 evt->event_handler(evt);
487 }
488
489 /*
490 * Local APIC timer interrupt. This is the most natural way for doing
491 * local interrupts, but local timer interrupts can be emulated by
492 * broadcast interrupts too. [in case the hw doesn't support APIC timers]
493 *
494 * [ if a single-CPU system runs an SMP kernel then we call the local
495 * interrupt as well. Thus we cannot inline the local irq ... ]
496 */
497 void smp_apic_timer_interrupt(struct pt_regs *regs)
498 {
499 struct pt_regs *old_regs = set_irq_regs(regs);
500
501 /*
502 * NOTE! We'd better ACK the irq immediately,
503 * because timer handling can be slow.
504 */
505 ack_APIC_irq();
506 /*
507 * update_process_times() expects us to have done irq_enter().
508 * Besides, if we don't timer interrupts ignore the global
509 * interrupt lock, which is the WrongThing (tm) to do.
510 */
511 exit_idle();
512 irq_enter();
513 local_apic_timer_interrupt();
514 irq_exit();
515 set_irq_regs(old_regs);
516 }
517
518 int setup_profiling_timer(unsigned int multiplier)
519 {
520 return -EINVAL;
521 }
522
523
524 /*
525 * Local APIC start and shutdown
526 */
527
528 /**
529 * clear_local_APIC - shutdown the local APIC
530 *
531 * This is called, when a CPU is disabled and before rebooting, so the state of
532 * the local APIC has no dangling leftovers. Also used to cleanout any BIOS
533 * leftovers during boot.
534 */
535 void clear_local_APIC(void)
536 {
537 int maxlvt;
538 u32 v;
539
540 /* APIC hasn't been mapped yet */
541 if (!apic_phys)
542 return;
543
544 maxlvt = lapic_get_maxlvt();
545 /*
546 * Masking an LVT entry can trigger a local APIC error
547 * if the vector is zero. Mask LVTERR first to prevent this.
548 */
549 if (maxlvt >= 3) {
550 v = ERROR_APIC_VECTOR; /* any non-zero vector will do */
551 apic_write(APIC_LVTERR, v | APIC_LVT_MASKED);
552 }
553 /*
554 * Careful: we have to set masks only first to deassert
555 * any level-triggered sources.
556 */
557 v = apic_read(APIC_LVTT);
558 apic_write(APIC_LVTT, v | APIC_LVT_MASKED);
559 v = apic_read(APIC_LVT0);
560 apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
561 v = apic_read(APIC_LVT1);
562 apic_write(APIC_LVT1, v | APIC_LVT_MASKED);
563 if (maxlvt >= 4) {
564 v = apic_read(APIC_LVTPC);
565 apic_write(APIC_LVTPC, v | APIC_LVT_MASKED);
566 }
567
568 /*
569 * Clean APIC state for other OSs:
570 */
571 apic_write(APIC_LVTT, APIC_LVT_MASKED);
572 apic_write(APIC_LVT0, APIC_LVT_MASKED);
573 apic_write(APIC_LVT1, APIC_LVT_MASKED);
574 if (maxlvt >= 3)
575 apic_write(APIC_LVTERR, APIC_LVT_MASKED);
576 if (maxlvt >= 4)
577 apic_write(APIC_LVTPC, APIC_LVT_MASKED);
578 apic_write(APIC_ESR, 0);
579 apic_read(APIC_ESR);
580 }
581
582 /**
583 * disable_local_APIC - clear and disable the local APIC
584 */
585 void disable_local_APIC(void)
586 {
587 unsigned int value;
588
589 clear_local_APIC();
590
591 /*
592 * Disable APIC (implies clearing of registers
593 * for 82489DX!).
594 */
595 value = apic_read(APIC_SPIV);
596 value &= ~APIC_SPIV_APIC_ENABLED;
597 apic_write(APIC_SPIV, value);
598 }
599
600 void lapic_shutdown(void)
601 {
602 unsigned long flags;
603
604 if (!cpu_has_apic)
605 return;
606
607 local_irq_save(flags);
608
609 disable_local_APIC();
610
611 local_irq_restore(flags);
612 }
613
614 /*
615 * This is to verify that we're looking at a real local APIC.
616 * Check these against your board if the CPUs aren't getting
617 * started for no apparent reason.
618 */
619 int __init verify_local_APIC(void)
620 {
621 unsigned int reg0, reg1;
622
623 /*
624 * The version register is read-only in a real APIC.
625 */
626 reg0 = apic_read(APIC_LVR);
627 apic_printk(APIC_DEBUG, "Getting VERSION: %x\n", reg0);
628 apic_write(APIC_LVR, reg0 ^ APIC_LVR_MASK);
629 reg1 = apic_read(APIC_LVR);
630 apic_printk(APIC_DEBUG, "Getting VERSION: %x\n", reg1);
631
632 /*
633 * The two version reads above should print the same
634 * numbers. If the second one is different, then we
635 * poke at a non-APIC.
636 */
637 if (reg1 != reg0)
638 return 0;
639
640 /*
641 * Check if the version looks reasonably.
642 */
643 reg1 = GET_APIC_VERSION(reg0);
644 if (reg1 == 0x00 || reg1 == 0xff)
645 return 0;
646 reg1 = lapic_get_maxlvt();
647 if (reg1 < 0x02 || reg1 == 0xff)
648 return 0;
649
650 /*
651 * The ID register is read/write in a real APIC.
652 */
653 reg0 = read_apic_id();
654 apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg0);
655 apic_write(APIC_ID, reg0 ^ APIC_ID_MASK);
656 reg1 = read_apic_id();
657 apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg1);
658 apic_write(APIC_ID, reg0);
659 if (reg1 != (reg0 ^ APIC_ID_MASK))
660 return 0;
661
662 /*
663 * The next two are just to see if we have sane values.
664 * They're only really relevant if we're in Virtual Wire
665 * compatibility mode, but most boxes are anymore.
666 */
667 reg0 = apic_read(APIC_LVT0);
668 apic_printk(APIC_DEBUG, "Getting LVT0: %x\n", reg0);
669 reg1 = apic_read(APIC_LVT1);
670 apic_printk(APIC_DEBUG, "Getting LVT1: %x\n", reg1);
671
672 return 1;
673 }
674
675 /**
676 * sync_Arb_IDs - synchronize APIC bus arbitration IDs
677 */
678 void __init sync_Arb_IDs(void)
679 {
680 /* Unsupported on P4 - see Intel Dev. Manual Vol. 3, Ch. 8.6.1 */
681 if (modern_apic())
682 return;
683
684 /*
685 * Wait for idle.
686 */
687 apic_wait_icr_idle();
688
689 apic_printk(APIC_DEBUG, "Synchronizing Arb IDs.\n");
690 apic_write(APIC_ICR, APIC_DEST_ALLINC | APIC_INT_LEVELTRIG
691 | APIC_DM_INIT);
692 }
693
694 /*
695 * An initial setup of the virtual wire mode.
696 */
697 void __init init_bsp_APIC(void)
698 {
699 unsigned int value;
700
701 /*
702 * Don't do the setup now if we have a SMP BIOS as the
703 * through-I/O-APIC virtual wire mode might be active.
704 */
705 if (smp_found_config || !cpu_has_apic)
706 return;
707
708 value = apic_read(APIC_LVR);
709
710 /*
711 * Do not trust the local APIC being empty at bootup.
712 */
713 clear_local_APIC();
714
715 /*
716 * Enable APIC.
717 */
718 value = apic_read(APIC_SPIV);
719 value &= ~APIC_VECTOR_MASK;
720 value |= APIC_SPIV_APIC_ENABLED;
721 value |= APIC_SPIV_FOCUS_DISABLED;
722 value |= SPURIOUS_APIC_VECTOR;
723 apic_write(APIC_SPIV, value);
724
725 /*
726 * Set up the virtual wire mode.
727 */
728 apic_write(APIC_LVT0, APIC_DM_EXTINT);
729 value = APIC_DM_NMI;
730 apic_write(APIC_LVT1, value);
731 }
732
733 /**
734 * setup_local_APIC - setup the local APIC
735 */
736 void __cpuinit setup_local_APIC(void)
737 {
738 unsigned int value;
739 int i, j;
740
741 preempt_disable();
742 value = apic_read(APIC_LVR);
743
744 BUILD_BUG_ON((SPURIOUS_APIC_VECTOR & 0x0f) != 0x0f);
745
746 /*
747 * Double-check whether this APIC is really registered.
748 * This is meaningless in clustered apic mode, so we skip it.
749 */
750 if (!apic_id_registered())
751 BUG();
752
753 /*
754 * Intel recommends to set DFR, LDR and TPR before enabling
755 * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
756 * document number 292116). So here it goes...
757 */
758 init_apic_ldr();
759
760 /*
761 * Set Task Priority to 'accept all'. We never change this
762 * later on.
763 */
764 value = apic_read(APIC_TASKPRI);
765 value &= ~APIC_TPRI_MASK;
766 apic_write(APIC_TASKPRI, value);
767
768 /*
769 * After a crash, we no longer service the interrupts and a pending
770 * interrupt from previous kernel might still have ISR bit set.
771 *
772 * Most probably by now CPU has serviced that pending interrupt and
773 * it might not have done the ack_APIC_irq() because it thought,
774 * interrupt came from i8259 as ExtInt. LAPIC did not get EOI so it
775 * does not clear the ISR bit and cpu thinks it has already serivced
776 * the interrupt. Hence a vector might get locked. It was noticed
777 * for timer irq (vector 0x31). Issue an extra EOI to clear ISR.
778 */
779 for (i = APIC_ISR_NR - 1; i >= 0; i--) {
780 value = apic_read(APIC_ISR + i*0x10);
781 for (j = 31; j >= 0; j--) {
782 if (value & (1<<j))
783 ack_APIC_irq();
784 }
785 }
786
787 /*
788 * Now that we are all set up, enable the APIC
789 */
790 value = apic_read(APIC_SPIV);
791 value &= ~APIC_VECTOR_MASK;
792 /*
793 * Enable APIC
794 */
795 value |= APIC_SPIV_APIC_ENABLED;
796
797 /* We always use processor focus */
798
799 /*
800 * Set spurious IRQ vector
801 */
802 value |= SPURIOUS_APIC_VECTOR;
803 apic_write(APIC_SPIV, value);
804
805 /*
806 * Set up LVT0, LVT1:
807 *
808 * set up through-local-APIC on the BP's LINT0. This is not
809 * strictly necessary in pure symmetric-IO mode, but sometimes
810 * we delegate interrupts to the 8259A.
811 */
812 /*
813 * TODO: set up through-local-APIC from through-I/O-APIC? --macro
814 */
815 value = apic_read(APIC_LVT0) & APIC_LVT_MASKED;
816 if (!smp_processor_id() && !value) {
817 value = APIC_DM_EXTINT;
818 apic_printk(APIC_VERBOSE, "enabled ExtINT on CPU#%d\n",
819 smp_processor_id());
820 } else {
821 value = APIC_DM_EXTINT | APIC_LVT_MASKED;
822 apic_printk(APIC_VERBOSE, "masked ExtINT on CPU#%d\n",
823 smp_processor_id());
824 }
825 apic_write(APIC_LVT0, value);
826
827 /*
828 * only the BP should see the LINT1 NMI signal, obviously.
829 */
830 if (!smp_processor_id())
831 value = APIC_DM_NMI;
832 else
833 value = APIC_DM_NMI | APIC_LVT_MASKED;
834 apic_write(APIC_LVT1, value);
835 preempt_enable();
836 }
837
838 static void __cpuinit lapic_setup_esr(void)
839 {
840 unsigned maxlvt = lapic_get_maxlvt();
841
842 apic_write(APIC_LVTERR, ERROR_APIC_VECTOR);
843 /*
844 * spec says clear errors after enabling vector.
845 */
846 if (maxlvt > 3)
847 apic_write(APIC_ESR, 0);
848 }
849
850 void __cpuinit end_local_APIC_setup(void)
851 {
852 lapic_setup_esr();
853 nmi_watchdog_default();
854 setup_apic_nmi_watchdog(NULL);
855 apic_pm_activate();
856 }
857
858 /*
859 * Detect and enable local APICs on non-SMP boards.
860 * Original code written by Keir Fraser.
861 * On AMD64 we trust the BIOS - if it says no APIC it is likely
862 * not correctly set up (usually the APIC timer won't work etc.)
863 */
864 static int __init detect_init_APIC(void)
865 {
866 if (!cpu_has_apic) {
867 printk(KERN_INFO "No local APIC present\n");
868 return -1;
869 }
870
871 mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
872 boot_cpu_physical_apicid = 0;
873 return 0;
874 }
875
876 void __init early_init_lapic_mapping(void)
877 {
878 unsigned long apic_phys;
879
880 /*
881 * If no local APIC can be found then go out
882 * : it means there is no mpatable and MADT
883 */
884 if (!smp_found_config)
885 return;
886
887 apic_phys = mp_lapic_addr;
888
889 set_fixmap_nocache(FIX_APIC_BASE, apic_phys);
890 apic_printk(APIC_VERBOSE, "mapped APIC to %16lx (%16lx)\n",
891 APIC_BASE, apic_phys);
892
893 /*
894 * Fetch the APIC ID of the BSP in case we have a
895 * default configuration (or the MP table is broken).
896 */
897 boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id());
898 }
899
900 /**
901 * init_apic_mappings - initialize APIC mappings
902 */
903 void __init init_apic_mappings(void)
904 {
905 /*
906 * If no local APIC can be found then set up a fake all
907 * zeroes page to simulate the local APIC and another
908 * one for the IO-APIC.
909 */
910 if (!smp_found_config && detect_init_APIC()) {
911 apic_phys = (unsigned long) alloc_bootmem_pages(PAGE_SIZE);
912 apic_phys = __pa(apic_phys);
913 } else
914 apic_phys = mp_lapic_addr;
915
916 set_fixmap_nocache(FIX_APIC_BASE, apic_phys);
917 apic_printk(APIC_VERBOSE, "mapped APIC to %16lx (%16lx)\n",
918 APIC_BASE, apic_phys);
919
920 /*
921 * Fetch the APIC ID of the BSP in case we have a
922 * default configuration (or the MP table is broken).
923 */
924 boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id());
925 }
926
927 /*
928 * This initializes the IO-APIC and APIC hardware if this is
929 * a UP kernel.
930 */
931 int __init APIC_init_uniprocessor(void)
932 {
933 if (disable_apic) {
934 printk(KERN_INFO "Apic disabled\n");
935 return -1;
936 }
937 if (!cpu_has_apic) {
938 disable_apic = 1;
939 printk(KERN_INFO "Apic disabled by BIOS\n");
940 return -1;
941 }
942
943 verify_local_APIC();
944
945 phys_cpu_present_map = physid_mask_of_physid(boot_cpu_physical_apicid);
946 apic_write(APIC_ID, SET_APIC_ID(boot_cpu_physical_apicid));
947
948 setup_local_APIC();
949
950 /*
951 * Now enable IO-APICs, actually call clear_IO_APIC
952 * We need clear_IO_APIC before enabling vector on BP
953 */
954 if (!skip_ioapic_setup && nr_ioapics)
955 enable_IO_APIC();
956
957 end_local_APIC_setup();
958
959 if (smp_found_config && !skip_ioapic_setup && nr_ioapics)
960 setup_IO_APIC();
961 else
962 nr_ioapics = 0;
963 setup_boot_APIC_clock();
964 check_nmi_watchdog();
965 return 0;
966 }
967
968 /*
969 * Local APIC interrupts
970 */
971
972 /*
973 * This interrupt should _never_ happen with our APIC/SMP architecture
974 */
975 asmlinkage void smp_spurious_interrupt(void)
976 {
977 unsigned int v;
978 exit_idle();
979 irq_enter();
980 /*
981 * Check if this really is a spurious interrupt and ACK it
982 * if it is a vectored one. Just in case...
983 * Spurious interrupts should not be ACKed.
984 */
985 v = apic_read(APIC_ISR + ((SPURIOUS_APIC_VECTOR & ~0x1f) >> 1));
986 if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f)))
987 ack_APIC_irq();
988
989 add_pda(irq_spurious_count, 1);
990 irq_exit();
991 }
992
993 /*
994 * This interrupt should never happen with our APIC/SMP architecture
995 */
996 asmlinkage void smp_error_interrupt(void)
997 {
998 unsigned int v, v1;
999
1000 exit_idle();
1001 irq_enter();
1002 /* First tickle the hardware, only then report what went on. -- REW */
1003 v = apic_read(APIC_ESR);
1004 apic_write(APIC_ESR, 0);
1005 v1 = apic_read(APIC_ESR);
1006 ack_APIC_irq();
1007 atomic_inc(&irq_err_count);
1008
1009 /* Here is what the APIC error bits mean:
1010 0: Send CS error
1011 1: Receive CS error
1012 2: Send accept error
1013 3: Receive accept error
1014 4: Reserved
1015 5: Send illegal vector
1016 6: Received illegal vector
1017 7: Illegal register address
1018 */
1019 printk(KERN_DEBUG "APIC error on CPU%d: %02x(%02x)\n",
1020 smp_processor_id(), v , v1);
1021 irq_exit();
1022 }
1023
1024 void disconnect_bsp_APIC(int virt_wire_setup)
1025 {
1026 /* Go back to Virtual Wire compatibility mode */
1027 unsigned long value;
1028
1029 /* For the spurious interrupt use vector F, and enable it */
1030 value = apic_read(APIC_SPIV);
1031 value &= ~APIC_VECTOR_MASK;
1032 value |= APIC_SPIV_APIC_ENABLED;
1033 value |= 0xf;
1034 apic_write(APIC_SPIV, value);
1035
1036 if (!virt_wire_setup) {
1037 /*
1038 * For LVT0 make it edge triggered, active high,
1039 * external and enabled
1040 */
1041 value = apic_read(APIC_LVT0);
1042 value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING |
1043 APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
1044 APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED);
1045 value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
1046 value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_EXTINT);
1047 apic_write(APIC_LVT0, value);
1048 } else {
1049 /* Disable LVT0 */
1050 apic_write(APIC_LVT0, APIC_LVT_MASKED);
1051 }
1052
1053 /* For LVT1 make it edge triggered, active high, nmi and enabled */
1054 value = apic_read(APIC_LVT1);
1055 value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING |
1056 APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
1057 APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED);
1058 value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
1059 value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_NMI);
1060 apic_write(APIC_LVT1, value);
1061 }
1062
1063 void __cpuinit generic_processor_info(int apicid, int version)
1064 {
1065 int cpu;
1066 cpumask_t tmp_map;
1067
1068 if (num_processors >= NR_CPUS) {
1069 printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached."
1070 " Processor ignored.\n", NR_CPUS);
1071 return;
1072 }
1073
1074 if (num_processors >= maxcpus) {
1075 printk(KERN_WARNING "WARNING: maxcpus limit of %i reached."
1076 " Processor ignored.\n", maxcpus);
1077 return;
1078 }
1079
1080 num_processors++;
1081 cpus_complement(tmp_map, cpu_present_map);
1082 cpu = first_cpu(tmp_map);
1083
1084 physid_set(apicid, phys_cpu_present_map);
1085 if (apicid == boot_cpu_physical_apicid) {
1086 /*
1087 * x86_bios_cpu_apicid is required to have processors listed
1088 * in same order as logical cpu numbers. Hence the first
1089 * entry is BSP, and so on.
1090 */
1091 cpu = 0;
1092 }
1093 /* are we being called early in kernel startup? */
1094 if (x86_cpu_to_apicid_early_ptr) {
1095 u16 *cpu_to_apicid = x86_cpu_to_apicid_early_ptr;
1096 u16 *bios_cpu_apicid = x86_bios_cpu_apicid_early_ptr;
1097
1098 cpu_to_apicid[cpu] = apicid;
1099 bios_cpu_apicid[cpu] = apicid;
1100 } else {
1101 per_cpu(x86_cpu_to_apicid, cpu) = apicid;
1102 per_cpu(x86_bios_cpu_apicid, cpu) = apicid;
1103 }
1104
1105 cpu_set(cpu, cpu_possible_map);
1106 cpu_set(cpu, cpu_present_map);
1107 }
1108
1109 /*
1110 * Power management
1111 */
1112 #ifdef CONFIG_PM
1113
1114 static struct {
1115 /* 'active' is true if the local APIC was enabled by us and
1116 not the BIOS; this signifies that we are also responsible
1117 for disabling it before entering apm/acpi suspend */
1118 int active;
1119 /* r/w apic fields */
1120 unsigned int apic_id;
1121 unsigned int apic_taskpri;
1122 unsigned int apic_ldr;
1123 unsigned int apic_dfr;
1124 unsigned int apic_spiv;
1125 unsigned int apic_lvtt;
1126 unsigned int apic_lvtpc;
1127 unsigned int apic_lvt0;
1128 unsigned int apic_lvt1;
1129 unsigned int apic_lvterr;
1130 unsigned int apic_tmict;
1131 unsigned int apic_tdcr;
1132 unsigned int apic_thmr;
1133 } apic_pm_state;
1134
1135 static int lapic_suspend(struct sys_device *dev, pm_message_t state)
1136 {
1137 unsigned long flags;
1138 int maxlvt;
1139
1140 if (!apic_pm_state.active)
1141 return 0;
1142
1143 maxlvt = lapic_get_maxlvt();
1144
1145 apic_pm_state.apic_id = read_apic_id();
1146 apic_pm_state.apic_taskpri = apic_read(APIC_TASKPRI);
1147 apic_pm_state.apic_ldr = apic_read(APIC_LDR);
1148 apic_pm_state.apic_dfr = apic_read(APIC_DFR);
1149 apic_pm_state.apic_spiv = apic_read(APIC_SPIV);
1150 apic_pm_state.apic_lvtt = apic_read(APIC_LVTT);
1151 if (maxlvt >= 4)
1152 apic_pm_state.apic_lvtpc = apic_read(APIC_LVTPC);
1153 apic_pm_state.apic_lvt0 = apic_read(APIC_LVT0);
1154 apic_pm_state.apic_lvt1 = apic_read(APIC_LVT1);
1155 apic_pm_state.apic_lvterr = apic_read(APIC_LVTERR);
1156 apic_pm_state.apic_tmict = apic_read(APIC_TMICT);
1157 apic_pm_state.apic_tdcr = apic_read(APIC_TDCR);
1158 #ifdef CONFIG_X86_MCE_INTEL
1159 if (maxlvt >= 5)
1160 apic_pm_state.apic_thmr = apic_read(APIC_LVTTHMR);
1161 #endif
1162 local_irq_save(flags);
1163 disable_local_APIC();
1164 local_irq_restore(flags);
1165 return 0;
1166 }
1167
1168 static int lapic_resume(struct sys_device *dev)
1169 {
1170 unsigned int l, h;
1171 unsigned long flags;
1172 int maxlvt;
1173
1174 if (!apic_pm_state.active)
1175 return 0;
1176
1177 maxlvt = lapic_get_maxlvt();
1178
1179 local_irq_save(flags);
1180 rdmsr(MSR_IA32_APICBASE, l, h);
1181 l &= ~MSR_IA32_APICBASE_BASE;
1182 l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr;
1183 wrmsr(MSR_IA32_APICBASE, l, h);
1184 apic_write(APIC_LVTERR, ERROR_APIC_VECTOR | APIC_LVT_MASKED);
1185 apic_write(APIC_ID, apic_pm_state.apic_id);
1186 apic_write(APIC_DFR, apic_pm_state.apic_dfr);
1187 apic_write(APIC_LDR, apic_pm_state.apic_ldr);
1188 apic_write(APIC_TASKPRI, apic_pm_state.apic_taskpri);
1189 apic_write(APIC_SPIV, apic_pm_state.apic_spiv);
1190 apic_write(APIC_LVT0, apic_pm_state.apic_lvt0);
1191 apic_write(APIC_LVT1, apic_pm_state.apic_lvt1);
1192 #ifdef CONFIG_X86_MCE_INTEL
1193 if (maxlvt >= 5)
1194 apic_write(APIC_LVTTHMR, apic_pm_state.apic_thmr);
1195 #endif
1196 if (maxlvt >= 4)
1197 apic_write(APIC_LVTPC, apic_pm_state.apic_lvtpc);
1198 apic_write(APIC_LVTT, apic_pm_state.apic_lvtt);
1199 apic_write(APIC_TDCR, apic_pm_state.apic_tdcr);
1200 apic_write(APIC_TMICT, apic_pm_state.apic_tmict);
1201 apic_write(APIC_ESR, 0);
1202 apic_read(APIC_ESR);
1203 apic_write(APIC_LVTERR, apic_pm_state.apic_lvterr);
1204 apic_write(APIC_ESR, 0);
1205 apic_read(APIC_ESR);
1206 local_irq_restore(flags);
1207 return 0;
1208 }
1209
1210 static struct sysdev_class lapic_sysclass = {
1211 .name = "lapic",
1212 .resume = lapic_resume,
1213 .suspend = lapic_suspend,
1214 };
1215
1216 static struct sys_device device_lapic = {
1217 .id = 0,
1218 .cls = &lapic_sysclass,
1219 };
1220
1221 static void __cpuinit apic_pm_activate(void)
1222 {
1223 apic_pm_state.active = 1;
1224 }
1225
1226 static int __init init_lapic_sysfs(void)
1227 {
1228 int error;
1229
1230 if (!cpu_has_apic)
1231 return 0;
1232 /* XXX: remove suspend/resume procs if !apic_pm_state.active? */
1233
1234 error = sysdev_class_register(&lapic_sysclass);
1235 if (!error)
1236 error = sysdev_register(&device_lapic);
1237 return error;
1238 }
1239 device_initcall(init_lapic_sysfs);
1240
1241 #else /* CONFIG_PM */
1242
1243 static void apic_pm_activate(void) { }
1244
1245 #endif /* CONFIG_PM */
1246
1247 /*
1248 * apic_is_clustered_box() -- Check if we can expect good TSC
1249 *
1250 * Thus far, the major user of this is IBM's Summit2 series:
1251 *
1252 * Clustered boxes may have unsynced TSC problems if they are
1253 * multi-chassis. Use available data to take a good guess.
1254 * If in doubt, go HPET.
1255 */
1256 __cpuinit int apic_is_clustered_box(void)
1257 {
1258 int i, clusters, zeros;
1259 unsigned id;
1260 u16 *bios_cpu_apicid;
1261 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
1262
1263 /*
1264 * there is not this kind of box with AMD CPU yet.
1265 * Some AMD box with quadcore cpu and 8 sockets apicid
1266 * will be [4, 0x23] or [8, 0x27] could be thought to
1267 * vsmp box still need checking...
1268 */
1269 if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && !is_vsmp_box())
1270 return 0;
1271
1272 bios_cpu_apicid = x86_bios_cpu_apicid_early_ptr;
1273 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
1274
1275 for (i = 0; i < NR_CPUS; i++) {
1276 /* are we being called early in kernel startup? */
1277 if (bios_cpu_apicid) {
1278 id = bios_cpu_apicid[i];
1279 }
1280 else if (i < nr_cpu_ids) {
1281 if (cpu_present(i))
1282 id = per_cpu(x86_bios_cpu_apicid, i);
1283 else
1284 continue;
1285 }
1286 else
1287 break;
1288
1289 if (id != BAD_APICID)
1290 __set_bit(APIC_CLUSTERID(id), clustermap);
1291 }
1292
1293 /* Problem: Partially populated chassis may not have CPUs in some of
1294 * the APIC clusters they have been allocated. Only present CPUs have
1295 * x86_bios_cpu_apicid entries, thus causing zeroes in the bitmap.
1296 * Since clusters are allocated sequentially, count zeros only if
1297 * they are bounded by ones.
1298 */
1299 clusters = 0;
1300 zeros = 0;
1301 for (i = 0; i < NUM_APIC_CLUSTERS; i++) {
1302 if (test_bit(i, clustermap)) {
1303 clusters += 1 + zeros;
1304 zeros = 0;
1305 } else
1306 ++zeros;
1307 }
1308
1309 /* ScaleMP vSMPowered boxes have one cluster per board and TSCs are
1310 * not guaranteed to be synced between boards
1311 */
1312 if (is_vsmp_box() && clusters > 1)
1313 return 1;
1314
1315 /*
1316 * If clusters > 2, then should be multi-chassis.
1317 * May have to revisit this when multi-core + hyperthreaded CPUs come
1318 * out, but AFAIK this will work even for them.
1319 */
1320 return (clusters > 2);
1321 }
1322
1323 /*
1324 * APIC command line parameters
1325 */
1326 static int __init apic_set_verbosity(char *str)
1327 {
1328 if (str == NULL) {
1329 skip_ioapic_setup = 0;
1330 ioapic_force = 1;
1331 return 0;
1332 }
1333 if (strcmp("debug", str) == 0)
1334 apic_verbosity = APIC_DEBUG;
1335 else if (strcmp("verbose", str) == 0)
1336 apic_verbosity = APIC_VERBOSE;
1337 else {
1338 printk(KERN_WARNING "APIC Verbosity level %s not recognised"
1339 " use apic=verbose or apic=debug\n", str);
1340 return -EINVAL;
1341 }
1342
1343 return 0;
1344 }
1345 early_param("apic", apic_set_verbosity);
1346
1347 static __init int setup_disableapic(char *str)
1348 {
1349 disable_apic = 1;
1350 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC);
1351 return 0;
1352 }
1353 early_param("disableapic", setup_disableapic);
1354
1355 /* same as disableapic, for compatibility */
1356 static __init int setup_nolapic(char *str)
1357 {
1358 return setup_disableapic(str);
1359 }
1360 early_param("nolapic", setup_nolapic);
1361
1362 static int __init parse_lapic_timer_c2_ok(char *arg)
1363 {
1364 local_apic_timer_c2_ok = 1;
1365 return 0;
1366 }
1367 early_param("lapic_timer_c2_ok", parse_lapic_timer_c2_ok);
1368
1369 static __init int setup_noapictimer(char *str)
1370 {
1371 if (str[0] != ' ' && str[0] != 0)
1372 return 0;
1373 disable_apic_timer = 1;
1374 return 1;
1375 }
1376 __setup("noapictimer", setup_noapictimer);
1377
1378 static __init int setup_apicpmtimer(char *s)
1379 {
1380 apic_calibrate_pmtmr = 1;
1381 notsc_setup(NULL);
1382 return 0;
1383 }
1384 __setup("apicpmtimer", setup_apicpmtimer);
1385
1386 static int __init lapic_insert_resource(void)
1387 {
1388 if (!apic_phys)
1389 return -1;
1390
1391 /* Put local APIC into the resource map. */
1392 lapic_resource.start = apic_phys;
1393 lapic_resource.end = lapic_resource.start + PAGE_SIZE - 1;
1394 insert_resource(&iomem_resource, &lapic_resource);
1395
1396 return 0;
1397 }
1398
1399 /*
1400 * need call insert after e820_reserve_resources()
1401 * that is using request_resource
1402 */
1403 late_initcall(lapic_insert_resource);