From: Ingo Molnar Date: Thu, 11 Jun 2009 15:55:42 +0000 (+0200) Subject: Merge branch 'linus' into perfcounters/core X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=940010c5a314a7bd9b498593bc6ba1718ac5aec5;p=GitHub%2FLineageOS%2Fandroid_kernel_samsung_universal7580.git Merge branch 'linus' into perfcounters/core Conflicts: arch/x86/kernel/irqinit.c arch/x86/kernel/irqinit_64.c arch/x86/kernel/traps.c arch/x86/mm/fault.c include/linux/sched.h kernel/exit.c --- 940010c5a314a7bd9b498593bc6ba1718ac5aec5 diff --cc arch/x86/kernel/irqinit.c index 00000000000,2e08b10ad51..267c6624c77 mode 000000,100644..100644 --- a/arch/x86/kernel/irqinit.c +++ b/arch/x86/kernel/irqinit.c @@@ -1,0 -1,275 +1,272 @@@ + #include + #include + #include + #include + #include + #include + #include + #include + #include + #include + #include + #include + #include + #include + #include + #include + #include + + #include + #include + #include + #include + #include + #include + #include + #include + #include + #include + + /* + * ISA PIC or low IO-APIC triggered (INTA-cycle or APIC) interrupts: + * (these are usually mapped to vectors 0x30-0x3f) + */ + + /* + * The IO-APIC gives us many more interrupt sources. Most of these + * are unused but an SMP system is supposed to have enough memory ... + * sometimes (mostly wrt. hw bugs) we get corrupted vectors all + * across the spectrum, so we really want to be prepared to get all + * of these. Plus, more powerful systems might have more than 64 + * IO-APIC registers. + * + * (these are usually mapped into the 0x30-0xff vector range) + */ + + #ifdef CONFIG_X86_32 + /* + * Note that on a 486, we don't want to do a SIGFPE on an irq13 + * as the irq is unreliable, and exception 16 works correctly + * (ie as explained in the intel literature). On a 386, you + * can't use exception 16 due to bad IBM design, so we have to + * rely on the less exact irq13. + * + * Careful.. Not only is IRQ13 unreliable, but it is also + * leads to races. IBM designers who came up with it should + * be shot. + */ + + static irqreturn_t math_error_irq(int cpl, void *dev_id) + { + outb(0, 0xF0); + if (ignore_fpu_irq || !boot_cpu_data.hard_math) + return IRQ_NONE; + math_error((void __user *)get_irq_regs()->ip); + return IRQ_HANDLED; + } + + /* + * New motherboards sometimes make IRQ 13 be a PCI interrupt, + * so allow interrupt sharing. + */ + static struct irqaction fpu_irq = { + .handler = math_error_irq, + .name = "fpu", + }; + #endif + + /* + * IRQ2 is cascade interrupt to second interrupt controller + */ + static struct irqaction irq2 = { + .handler = no_action, + .name = "cascade", + }; + + DEFINE_PER_CPU(vector_irq_t, vector_irq) = { + [0 ... IRQ0_VECTOR - 1] = -1, + [IRQ0_VECTOR] = 0, + [IRQ1_VECTOR] = 1, + [IRQ2_VECTOR] = 2, + [IRQ3_VECTOR] = 3, + [IRQ4_VECTOR] = 4, + [IRQ5_VECTOR] = 5, + [IRQ6_VECTOR] = 6, + [IRQ7_VECTOR] = 7, + [IRQ8_VECTOR] = 8, + [IRQ9_VECTOR] = 9, + [IRQ10_VECTOR] = 10, + [IRQ11_VECTOR] = 11, + [IRQ12_VECTOR] = 12, + [IRQ13_VECTOR] = 13, + [IRQ14_VECTOR] = 14, + [IRQ15_VECTOR] = 15, + [IRQ15_VECTOR + 1 ... NR_VECTORS - 1] = -1 + }; + + int vector_used_by_percpu_irq(unsigned int vector) + { + int cpu; + + for_each_online_cpu(cpu) { + if (per_cpu(vector_irq, cpu)[vector] != -1) + return 1; + } + + return 0; + } + + static void __init init_ISA_irqs(void) + { + int i; + + #if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC) + init_bsp_APIC(); + #endif + init_8259A(0); + + /* + * 16 old-style INTA-cycle interrupts: + */ + for (i = 0; i < NR_IRQS_LEGACY; i++) { + struct irq_desc *desc = irq_to_desc(i); + + desc->status = IRQ_DISABLED; + desc->action = NULL; + desc->depth = 1; + + set_irq_chip_and_handler_name(i, &i8259A_chip, + handle_level_irq, "XT"); + } + } + + /* Overridden in paravirt.c */ + void init_IRQ(void) __attribute__((weak, alias("native_init_IRQ"))); + + static void __init smp_intr_init(void) + { + #ifdef CONFIG_SMP + #if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC) + /* + * The reschedule interrupt is a CPU-to-CPU reschedule-helper + * IPI, driven by wakeup. + */ + alloc_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt); + + /* IPIs for invalidation */ + alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+0, invalidate_interrupt0); + alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+1, invalidate_interrupt1); + alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+2, invalidate_interrupt2); + alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+3, invalidate_interrupt3); + alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+4, invalidate_interrupt4); + alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+5, invalidate_interrupt5); + alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+6, invalidate_interrupt6); + alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+7, invalidate_interrupt7); + + /* IPI for generic function call */ + alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); + + /* IPI for generic single function call */ + alloc_intr_gate(CALL_FUNCTION_SINGLE_VECTOR, + call_function_single_interrupt); + + /* Low priority IPI to cleanup after moving an irq */ + set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt); + set_bit(IRQ_MOVE_CLEANUP_VECTOR, used_vectors); + #endif + #endif /* CONFIG_SMP */ + } + + static void __init apic_intr_init(void) + { + smp_intr_init(); + -#ifdef CONFIG_X86_64 ++#ifdef CONFIG_X86_THERMAL_VECTOR + alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt); ++#endif ++#ifdef CONFIG_X86_THRESHOLD + alloc_intr_gate(THRESHOLD_APIC_VECTOR, threshold_interrupt); + #endif ++#if defined(CONFIG_X86_NEW_MCE) && defined(CONFIG_X86_LOCAL_APIC) ++ alloc_intr_gate(MCE_SELF_VECTOR, mce_self_interrupt); ++#endif + + #if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC) + /* self generated IPI for local APIC timer */ + alloc_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt); + + /* generic IPI for platform specific use */ + alloc_intr_gate(GENERIC_INTERRUPT_VECTOR, generic_interrupt); + + /* IPI vectors for APIC spurious and error interrupts */ + alloc_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt); + alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt); + + /* Performance monitoring interrupts: */ + # ifdef CONFIG_PERF_COUNTERS - alloc_intr_gate(LOCAL_PERF_VECTOR, perf_counter_interrupt); + alloc_intr_gate(LOCAL_PENDING_VECTOR, perf_pending_interrupt); + # endif + -#endif - -#ifdef CONFIG_X86_32 -#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_MCE_P4THERMAL) - /* thermal monitor LVT interrupt */ - alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt); -#endif + #endif + } + + /** + * x86_quirk_pre_intr_init - initialisation prior to setting up interrupt vectors + * + * Description: + * Perform any necessary interrupt initialisation prior to setting up + * the "ordinary" interrupt call gates. For legacy reasons, the ISA + * interrupts should be initialised here if the machine emulates a PC + * in any way. + **/ + static void __init x86_quirk_pre_intr_init(void) + { + #ifdef CONFIG_X86_32 + if (x86_quirks->arch_pre_intr_init) { + if (x86_quirks->arch_pre_intr_init()) + return; + } + #endif + init_ISA_irqs(); + } + + void __init native_init_IRQ(void) + { + int i; + + /* Execute any quirks before the call gates are initialised: */ + x86_quirk_pre_intr_init(); + + apic_intr_init(); + + /* + * Cover the whole vector space, no vector can escape + * us. (some of these will be overridden and become + * 'special' SMP interrupts) + */ + for (i = FIRST_EXTERNAL_VECTOR; i < NR_VECTORS; i++) { + /* IA32_SYSCALL_VECTOR could be used in trap_init already. */ + if (!test_bit(i, used_vectors)) + set_intr_gate(i, interrupt[i-FIRST_EXTERNAL_VECTOR]); + } + + if (!acpi_ioapic) + setup_irq(2, &irq2); + + #ifdef CONFIG_X86_32 + /* + * Call quirks after call gates are initialised (usually add in + * the architecture specific gates): + */ + x86_quirk_intr_init(); + + /* + * External FPU? Set up irq13 if so, for + * original braindamaged IBM FERR coupling. + */ + if (boot_cpu_data.hard_math && !cpu_has_fpu) + setup_irq(FPU_IRQ, &fpu_irq); + + irq_ctx_init(smp_processor_id()); + #endif + } diff --cc arch/x86/mm/fault.c index 5c6d816f30b,5ec7ae36661..c6acc632637 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@@ -3,41 -3,16 +3,17 @@@ * Copyright (C) 2001, 2002 Andi Kleen, SuSE Labs. * Copyright (C) 2008-2009, Red Hat Inc., Ingo Molnar */ - #include - #include - #include - #include - #include - #include - #include - #include - #include - #include - #include - #include - #include - #include - #include - #include - #include - #include - #include - #include - #include - #include - #include - #include - #include - - #include - - #include - #include - #include - #include - #include - #include - #include + #include /* STACK_END_MAGIC */ + #include /* test_thread_flag(), ... */ + #include /* oops_begin/end, ... */ + #include /* search_exception_table */ + #include /* max_low_pfn */ + #include /* __kprobes, ... */ + #include /* kmmio_handler, ... */ ++#include /* perf_swcounter_event */ + + #include /* dotraplinkage, ... */ + #include /* pgd_*(), ... */ /* * Page fault error code bits: diff --cc include/linux/sched.h index bc9326dcdde,d1399660b77..28c774ff3cc --- a/include/linux/sched.h +++ b/include/linux/sched.h @@@ -96,9 -97,8 +97,9 @@@ struct exec_domain struct futex_pi_state; struct robust_list_head; struct bio; - struct bts_tracer; struct fs_struct; + struct bts_context; +struct perf_counter_context; /* * List of flags we want to share for kernel threads, @@@ -136,9 -137,8 +138,9 @@@ DECLARE_PER_CPU(unsigned long, process_ extern int nr_processes(void); extern unsigned long nr_running(void); extern unsigned long nr_uninterruptible(void); - extern unsigned long nr_active(void); extern unsigned long nr_iowait(void); + extern void calc_global_load(void); +extern u64 cpu_nr_migrations(int cpu); extern unsigned long get_parent_ip(unsigned long addr); diff --cc kernel/Makefile index e914ca992d7,a35eee3436d..90b53f6dc22 --- a/kernel/Makefile +++ b/kernel/Makefile @@@ -93,9 -93,9 +93,10 @@@ obj-$(CONFIG_LATENCYTOP) += latencytop. obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o obj-$(CONFIG_FUNCTION_TRACER) += trace/ obj-$(CONFIG_TRACING) += trace/ + obj-$(CONFIG_X86_DS) += trace/ obj-$(CONFIG_SMP) += sched_cpupri.o obj-$(CONFIG_SLOW_WORK) += slow-work.o +obj-$(CONFIG_PERF_COUNTERS) += perf_counter.o ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y) # According to Alan Modra , the -fno-omit-frame-pointer is diff --cc kernel/exit.c index 99ad4063ee4,cab535c427b..49cdf6946f3 --- a/kernel/exit.c +++ b/kernel/exit.c @@@ -48,8 -48,7 +48,8 @@@ #include #include #include +#include - #include + #include #include #include diff --cc kernel/fork.c index f4466ca37ec,bb762b4dd21..4430eb1376f --- a/kernel/fork.c +++ b/kernel/fork.c @@@ -61,9 -61,7 +61,8 @@@ #include #include #include - #include #include +#include #include #include diff --cc kernel/sched.c index 8d43347a0c0,14c447ae5d5..5b3f6ec1b0b --- a/kernel/sched.c +++ b/kernel/sched.c @@@ -2885,30 -2912,74 +2941,83 @@@ unsigned long nr_iowait(void return sum; } - unsigned long nr_active(void) + /* Variables and functions for calc_load */ + static atomic_long_t calc_load_tasks; + static unsigned long calc_load_update; + unsigned long avenrun[3]; + EXPORT_SYMBOL(avenrun); + + /** + * get_avenrun - get the load average array + * @loads: pointer to dest load array + * @offset: offset to add + * @shift: shift count to shift the result left + * + * These values are estimates at best, so no need for locking. + */ + void get_avenrun(unsigned long *loads, unsigned long offset, int shift) + { + loads[0] = (avenrun[0] + offset) << shift; + loads[1] = (avenrun[1] + offset) << shift; + loads[2] = (avenrun[2] + offset) << shift; + } + + static unsigned long + calc_load(unsigned long load, unsigned long exp, unsigned long active) { - unsigned long i, running = 0, uninterruptible = 0; + load *= exp; + load += active * (FIXED_1 - exp); + return load >> FSHIFT; + } - for_each_online_cpu(i) { - running += cpu_rq(i)->nr_running; - uninterruptible += cpu_rq(i)->nr_uninterruptible; - } + /* + * calc_load - update the avenrun load estimates 10 ticks after the + * CPUs have updated calc_load_tasks. + */ + void calc_global_load(void) + { + unsigned long upd = calc_load_update + 10; + long active; - if (unlikely((long)uninterruptible < 0)) - uninterruptible = 0; + if (time_before(jiffies, upd)) + return; - return running + uninterruptible; + active = atomic_long_read(&calc_load_tasks); + active = active > 0 ? active * FIXED_1 : 0; + + avenrun[0] = calc_load(avenrun[0], EXP_1, active); + avenrun[1] = calc_load(avenrun[1], EXP_5, active); + avenrun[2] = calc_load(avenrun[2], EXP_15, active); + + calc_load_update += LOAD_FREQ; + } + + /* + * Either called from update_cpu_load() or from a cpu going idle + */ + static void calc_load_account_active(struct rq *this_rq) + { + long nr_active, delta; + + nr_active = this_rq->nr_running; + nr_active += (long) this_rq->nr_uninterruptible; + + if (nr_active != this_rq->calc_load_active) { + delta = nr_active - this_rq->calc_load_active; + this_rq->calc_load_active = nr_active; + atomic_long_add(delta, &calc_load_tasks); + } } +/* + * Externally visible per-cpu scheduler statistics: + * cpu_nr_migrations(cpu) - number of migrations into that cpu + */ +u64 cpu_nr_migrations(int cpu) +{ + return cpu_rq(cpu)->nr_migrations_in; +} + /* * Update rq->cpu_load[] statistics. This function is usually called every * scheduler tick (TICK_NSEC).