--- /dev/null
-#ifdef CONFIG_X86_64
+ #include <linux/linkage.h>
+ #include <linux/errno.h>
+ #include <linux/signal.h>
+ #include <linux/sched.h>
+ #include <linux/ioport.h>
+ #include <linux/interrupt.h>
+ #include <linux/timex.h>
+ #include <linux/slab.h>
+ #include <linux/random.h>
+ #include <linux/kprobes.h>
+ #include <linux/init.h>
+ #include <linux/kernel_stat.h>
+ #include <linux/sysdev.h>
+ #include <linux/bitops.h>
+ #include <linux/acpi.h>
+ #include <linux/io.h>
+ #include <linux/delay.h>
+
+ #include <asm/atomic.h>
+ #include <asm/system.h>
+ #include <asm/timer.h>
+ #include <asm/hw_irq.h>
+ #include <asm/pgtable.h>
+ #include <asm/desc.h>
+ #include <asm/apic.h>
+ #include <asm/setup.h>
+ #include <asm/i8259.h>
+ #include <asm/traps.h>
+
+ /*
+ * ISA PIC or low IO-APIC triggered (INTA-cycle or APIC) interrupts:
+ * (these are usually mapped to vectors 0x30-0x3f)
+ */
+
+ /*
+ * The IO-APIC gives us many more interrupt sources. Most of these
+ * are unused but an SMP system is supposed to have enough memory ...
+ * sometimes (mostly wrt. hw bugs) we get corrupted vectors all
+ * across the spectrum, so we really want to be prepared to get all
+ * of these. Plus, more powerful systems might have more than 64
+ * IO-APIC registers.
+ *
+ * (these are usually mapped into the 0x30-0xff vector range)
+ */
+
+ #ifdef CONFIG_X86_32
+ /*
+ * Note that on a 486, we don't want to do a SIGFPE on an irq13
+ * as the irq is unreliable, and exception 16 works correctly
+ * (ie as explained in the intel literature). On a 386, you
+ * can't use exception 16 due to bad IBM design, so we have to
+ * rely on the less exact irq13.
+ *
+ * Careful.. Not only is IRQ13 unreliable, but it is also
+ * leads to races. IBM designers who came up with it should
+ * be shot.
+ */
+
+ static irqreturn_t math_error_irq(int cpl, void *dev_id)
+ {
+ outb(0, 0xF0);
+ if (ignore_fpu_irq || !boot_cpu_data.hard_math)
+ return IRQ_NONE;
+ math_error((void __user *)get_irq_regs()->ip);
+ return IRQ_HANDLED;
+ }
+
+ /*
+ * New motherboards sometimes make IRQ 13 be a PCI interrupt,
+ * so allow interrupt sharing.
+ */
+ static struct irqaction fpu_irq = {
+ .handler = math_error_irq,
+ .name = "fpu",
+ };
+ #endif
+
+ /*
+ * IRQ2 is cascade interrupt to second interrupt controller
+ */
+ static struct irqaction irq2 = {
+ .handler = no_action,
+ .name = "cascade",
+ };
+
+ DEFINE_PER_CPU(vector_irq_t, vector_irq) = {
+ [0 ... IRQ0_VECTOR - 1] = -1,
+ [IRQ0_VECTOR] = 0,
+ [IRQ1_VECTOR] = 1,
+ [IRQ2_VECTOR] = 2,
+ [IRQ3_VECTOR] = 3,
+ [IRQ4_VECTOR] = 4,
+ [IRQ5_VECTOR] = 5,
+ [IRQ6_VECTOR] = 6,
+ [IRQ7_VECTOR] = 7,
+ [IRQ8_VECTOR] = 8,
+ [IRQ9_VECTOR] = 9,
+ [IRQ10_VECTOR] = 10,
+ [IRQ11_VECTOR] = 11,
+ [IRQ12_VECTOR] = 12,
+ [IRQ13_VECTOR] = 13,
+ [IRQ14_VECTOR] = 14,
+ [IRQ15_VECTOR] = 15,
+ [IRQ15_VECTOR + 1 ... NR_VECTORS - 1] = -1
+ };
+
+ int vector_used_by_percpu_irq(unsigned int vector)
+ {
+ int cpu;
+
+ for_each_online_cpu(cpu) {
+ if (per_cpu(vector_irq, cpu)[vector] != -1)
+ return 1;
+ }
+
+ return 0;
+ }
+
+ static void __init init_ISA_irqs(void)
+ {
+ int i;
+
+ #if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC)
+ init_bsp_APIC();
+ #endif
+ init_8259A(0);
+
+ /*
+ * 16 old-style INTA-cycle interrupts:
+ */
+ for (i = 0; i < NR_IRQS_LEGACY; i++) {
+ struct irq_desc *desc = irq_to_desc(i);
+
+ desc->status = IRQ_DISABLED;
+ desc->action = NULL;
+ desc->depth = 1;
+
+ set_irq_chip_and_handler_name(i, &i8259A_chip,
+ handle_level_irq, "XT");
+ }
+ }
+
+ /* Overridden in paravirt.c */
+ void init_IRQ(void) __attribute__((weak, alias("native_init_IRQ")));
+
+ static void __init smp_intr_init(void)
+ {
+ #ifdef CONFIG_SMP
+ #if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC)
+ /*
+ * The reschedule interrupt is a CPU-to-CPU reschedule-helper
+ * IPI, driven by wakeup.
+ */
+ alloc_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt);
+
+ /* IPIs for invalidation */
+ alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+0, invalidate_interrupt0);
+ alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+1, invalidate_interrupt1);
+ alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+2, invalidate_interrupt2);
+ alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+3, invalidate_interrupt3);
+ alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+4, invalidate_interrupt4);
+ alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+5, invalidate_interrupt5);
+ alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+6, invalidate_interrupt6);
+ alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+7, invalidate_interrupt7);
+
+ /* IPI for generic function call */
+ alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
+
+ /* IPI for generic single function call */
+ alloc_intr_gate(CALL_FUNCTION_SINGLE_VECTOR,
+ call_function_single_interrupt);
+
+ /* Low priority IPI to cleanup after moving an irq */
+ set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt);
+ set_bit(IRQ_MOVE_CLEANUP_VECTOR, used_vectors);
+ #endif
+ #endif /* CONFIG_SMP */
+ }
+
+ static void __init apic_intr_init(void)
+ {
+ smp_intr_init();
+
- alloc_intr_gate(LOCAL_PERF_VECTOR, perf_counter_interrupt);
++#ifdef CONFIG_X86_THERMAL_VECTOR
+ alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt);
++#endif
++#ifdef CONFIG_X86_THRESHOLD
+ alloc_intr_gate(THRESHOLD_APIC_VECTOR, threshold_interrupt);
+ #endif
++#if defined(CONFIG_X86_NEW_MCE) && defined(CONFIG_X86_LOCAL_APIC)
++ alloc_intr_gate(MCE_SELF_VECTOR, mce_self_interrupt);
++#endif
+
+ #if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC)
+ /* self generated IPI for local APIC timer */
+ alloc_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt);
+
+ /* generic IPI for platform specific use */
+ alloc_intr_gate(GENERIC_INTERRUPT_VECTOR, generic_interrupt);
+
+ /* IPI vectors for APIC spurious and error interrupts */
+ alloc_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt);
+ alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt);
+
+ /* Performance monitoring interrupts: */
+ # ifdef CONFIG_PERF_COUNTERS
-#endif
-
-#ifdef CONFIG_X86_32
-#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_MCE_P4THERMAL)
- /* thermal monitor LVT interrupt */
- alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt);
-#endif
+ alloc_intr_gate(LOCAL_PENDING_VECTOR, perf_pending_interrupt);
+ # endif
+
+ #endif
+ }
+
+ /**
+ * x86_quirk_pre_intr_init - initialisation prior to setting up interrupt vectors
+ *
+ * Description:
+ * Perform any necessary interrupt initialisation prior to setting up
+ * the "ordinary" interrupt call gates. For legacy reasons, the ISA
+ * interrupts should be initialised here if the machine emulates a PC
+ * in any way.
+ **/
+ static void __init x86_quirk_pre_intr_init(void)
+ {
+ #ifdef CONFIG_X86_32
+ if (x86_quirks->arch_pre_intr_init) {
+ if (x86_quirks->arch_pre_intr_init())
+ return;
+ }
+ #endif
+ init_ISA_irqs();
+ }
+
+ void __init native_init_IRQ(void)
+ {
+ int i;
+
+ /* Execute any quirks before the call gates are initialised: */
+ x86_quirk_pre_intr_init();
+
+ apic_intr_init();
+
+ /*
+ * Cover the whole vector space, no vector can escape
+ * us. (some of these will be overridden and become
+ * 'special' SMP interrupts)
+ */
+ for (i = FIRST_EXTERNAL_VECTOR; i < NR_VECTORS; i++) {
+ /* IA32_SYSCALL_VECTOR could be used in trap_init already. */
+ if (!test_bit(i, used_vectors))
+ set_intr_gate(i, interrupt[i-FIRST_EXTERNAL_VECTOR]);
+ }
+
+ if (!acpi_ioapic)
+ setup_irq(2, &irq2);
+
+ #ifdef CONFIG_X86_32
+ /*
+ * Call quirks after call gates are initialised (usually add in
+ * the architecture specific gates):
+ */
+ x86_quirk_intr_init();
+
+ /*
+ * External FPU? Set up irq13 if so, for
+ * original braindamaged IBM FERR coupling.
+ */
+ if (boot_cpu_data.hard_math && !cpu_has_fpu)
+ setup_irq(FPU_IRQ, &fpu_irq);
+
+ irq_ctx_init(smp_processor_id());
+ #endif
+ }
* Copyright (C) 2001, 2002 Andi Kleen, SuSE Labs.
* Copyright (C) 2008-2009, Red Hat Inc., Ingo Molnar
*/
- #include <linux/interrupt.h>
- #include <linux/mmiotrace.h>
- #include <linux/bootmem.h>
- #include <linux/compiler.h>
- #include <linux/highmem.h>
- #include <linux/kprobes.h>
- #include <linux/uaccess.h>
- #include <linux/vmalloc.h>
- #include <linux/vt_kern.h>
- #include <linux/signal.h>
- #include <linux/kernel.h>
- #include <linux/ptrace.h>
- #include <linux/string.h>
- #include <linux/module.h>
- #include <linux/kdebug.h>
- #include <linux/errno.h>
- #include <linux/magic.h>
- #include <linux/sched.h>
- #include <linux/types.h>
- #include <linux/init.h>
- #include <linux/mman.h>
- #include <linux/tty.h>
- #include <linux/smp.h>
- #include <linux/mm.h>
- #include <linux/perf_counter.h>
-
- #include <asm-generic/sections.h>
-
- #include <asm/tlbflush.h>
- #include <asm/pgalloc.h>
- #include <asm/segment.h>
- #include <asm/system.h>
- #include <asm/proto.h>
- #include <asm/traps.h>
- #include <asm/desc.h>
+ #include <linux/magic.h> /* STACK_END_MAGIC */
+ #include <linux/sched.h> /* test_thread_flag(), ... */
+ #include <linux/kdebug.h> /* oops_begin/end, ... */
+ #include <linux/module.h> /* search_exception_table */
+ #include <linux/bootmem.h> /* max_low_pfn */
+ #include <linux/kprobes.h> /* __kprobes, ... */
+ #include <linux/mmiotrace.h> /* kmmio_handler, ... */
++#include <linux/perf_counter.h> /* perf_swcounter_event */
+
+ #include <asm/traps.h> /* dotraplinkage, ... */
+ #include <asm/pgalloc.h> /* pgd_*(), ... */
/*
* Page fault error code bits:
return sum;
}
- unsigned long nr_active(void)
+ /* Variables and functions for calc_load */
+ static atomic_long_t calc_load_tasks;
+ static unsigned long calc_load_update;
+ unsigned long avenrun[3];
+ EXPORT_SYMBOL(avenrun);
+
+ /**
+ * get_avenrun - get the load average array
+ * @loads: pointer to dest load array
+ * @offset: offset to add
+ * @shift: shift count to shift the result left
+ *
+ * These values are estimates at best, so no need for locking.
+ */
+ void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
+ {
+ loads[0] = (avenrun[0] + offset) << shift;
+ loads[1] = (avenrun[1] + offset) << shift;
+ loads[2] = (avenrun[2] + offset) << shift;
+ }
+
+ static unsigned long
+ calc_load(unsigned long load, unsigned long exp, unsigned long active)
{
- unsigned long i, running = 0, uninterruptible = 0;
+ load *= exp;
+ load += active * (FIXED_1 - exp);
+ return load >> FSHIFT;
+ }
- for_each_online_cpu(i) {
- running += cpu_rq(i)->nr_running;
- uninterruptible += cpu_rq(i)->nr_uninterruptible;
- }
+ /*
+ * calc_load - update the avenrun load estimates 10 ticks after the
+ * CPUs have updated calc_load_tasks.
+ */
+ void calc_global_load(void)
+ {
+ unsigned long upd = calc_load_update + 10;
+ long active;
- if (unlikely((long)uninterruptible < 0))
- uninterruptible = 0;
+ if (time_before(jiffies, upd))
+ return;
- return running + uninterruptible;
+ active = atomic_long_read(&calc_load_tasks);
+ active = active > 0 ? active * FIXED_1 : 0;
+
+ avenrun[0] = calc_load(avenrun[0], EXP_1, active);
+ avenrun[1] = calc_load(avenrun[1], EXP_5, active);
+ avenrun[2] = calc_load(avenrun[2], EXP_15, active);
+
+ calc_load_update += LOAD_FREQ;
+ }
+
+ /*
+ * Either called from update_cpu_load() or from a cpu going idle
+ */
+ static void calc_load_account_active(struct rq *this_rq)
+ {
+ long nr_active, delta;
+
+ nr_active = this_rq->nr_running;
+ nr_active += (long) this_rq->nr_uninterruptible;
+
+ if (nr_active != this_rq->calc_load_active) {
+ delta = nr_active - this_rq->calc_load_active;
+ this_rq->calc_load_active = nr_active;
+ atomic_long_add(delta, &calc_load_tasks);
+ }
}
+/*
+ * Externally visible per-cpu scheduler statistics:
+ * cpu_nr_migrations(cpu) - number of migrations into that cpu
+ */
+u64 cpu_nr_migrations(int cpu)
+{
+ return cpu_rq(cpu)->nr_migrations_in;
+}
+
/*
* Update rq->cpu_load[] statistics. This function is usually called every
* scheduler tick (TICK_NSEC).