From: Ingo Molnar Date: Wed, 11 Feb 2009 08:22:04 +0000 (+0100) Subject: Merge commit 'v2.6.29-rc4' into perfcounters/core X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=95fd4845ed0ffcab305b4f30ce1c12dc34f1b56c;p=GitHub%2Fmt8127%2Fandroid_kernel_alcatel_ttab.git Merge commit 'v2.6.29-rc4' into perfcounters/core Conflicts: arch/x86/kernel/setup_percpu.c arch/x86/mm/fault.c drivers/acpi/processor_idle.c kernel/irq/handle.c --- 95fd4845ed0ffcab305b4f30ce1c12dc34f1b56c diff --cc arch/arm/kernel/irq.c index 4bb723eadad1,363db186cb93..45eacb5a2ecd --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c @@@ -101,14 -101,9 +101,14 @@@ unlock /* Handle bad interrupts */ static struct irq_desc bad_irq_desc = { .handle_irq = handle_bad_irq, - .lock = SPIN_LOCK_UNLOCKED + .lock = __SPIN_LOCK_UNLOCKED(bad_irq_desc.lock), }; +#ifdef CONFIG_CPUMASK_OFFSTACK +/* We are not allocating bad_irq_desc.affinity or .pending_mask */ +#error "ARM architecture does not support CONFIG_CPUMASK_OFFSTACK." +#endif + /* * do_IRQ handles all hardware IRQ's. Decoded IRQs should not * come via this function. Instead, they should provide their diff --cc arch/x86/kernel/entry_64.S index eb0a0703f4c9,a1346217e43c..8f8f61a1fce8 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@@ -342,11 -341,12 +342,12 @@@ ENTRY(save_args * a little cheaper to use a separate counter in the PDA (short of * moving irq_enter into assembly, which would be too much work) */ -1: incl %gs:pda_irqcount +1: incl PER_CPU_VAR(irq_count) jne 2f popq_cfi %rax /* move return address... */ - mov %gs:pda_irqstackptr,%rsp + mov PER_CPU_VAR(irq_stack_ptr),%rsp EMPTY_FRAME 0 + pushq_cfi %rbp /* backlink for unwinder */ pushq_cfi %rax /* ... to the new stack */ /* * We entered an interrupt context - irqs are off: diff --cc arch/x86/kernel/irqinit_32.c index 520e6c1c5d22,10a09c2f1828..f6ff71cdaba8 --- a/arch/x86/kernel/irqinit_32.c +++ b/arch/x86/kernel/irqinit_32.c @@@ -163,44 -162,13 +154,41 @@@ static void __init apic_intr_init(void /* IPI vectors for APIC spurious and error interrupts */ alloc_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt); alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt); -#endif +# ifdef CONFIG_PERF_COUNTERS + alloc_intr_gate(LOCAL_PERF_VECTOR, perf_counter_interrupt); +# endif -#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_MCE_P4THERMAL) +# ifdef CONFIG_X86_MCE_P4THERMAL /* thermal monitor LVT interrupt */ alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt); +# endif #endif +} + +/* Overridden in paravirt.c */ +void init_IRQ(void) __attribute__((weak, alias("native_init_IRQ"))); + +void __init native_init_IRQ(void) +{ + int i; + + /* all the set up before the call gates are initialised */ + pre_intr_init_hook(); + + apic_intr_init(); + + /* + * Cover the whole vector space, no vector can escape + * us. (some of these will be overridden and become + * 'special' SMP interrupts) + */ + for (i = 0; i < (NR_VECTORS - FIRST_EXTERNAL_VECTOR); i++) { + int vector = FIRST_EXTERNAL_VECTOR + i; + /* SYSCALL_VECTOR was reserved in trap_init. */ + if (!test_bit(vector, used_vectors)) + set_intr_gate(vector, interrupt[i]); + } - if (!acpi_ioapic) - setup_irq(2, &irq2); - /* setup after call gates are initialised (usually add in * the architecture specific gates) */ diff --cc arch/x86/kernel/setup_percpu.c index 90b8e154bb53,01161077a49c..e553803cd2db --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@@ -77,26 -50,7 +77,26 @@@ static void __init setup_node_to_cpumas static inline void setup_node_to_cpumask_map(void) { } #endif -#if defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) && defined(CONFIG_X86_SMP) +#ifdef CONFIG_X86_64 + +/* correctly size the local cpu masks */ - static void setup_cpu_local_masks(void) ++static void __init setup_cpu_local_masks(void) +{ + alloc_bootmem_cpumask_var(&cpu_initialized_mask); + alloc_bootmem_cpumask_var(&cpu_callin_mask); + alloc_bootmem_cpumask_var(&cpu_callout_mask); + alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask); +} + +#else /* CONFIG_X86_32 */ + +static inline void setup_cpu_local_masks(void) +{ +} + +#endif /* CONFIG_X86_32 */ + +#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA /* * Copy data used in early init routines from the initial arrays to the * per cpu data areas. These arrays then become expendable and the diff --cc arch/x86/mach-voyager/voyager_smp.c index 96f15b09a4c5,7ffcdeec4631..2c74aec4efc1 --- a/arch/x86/mach-voyager/voyager_smp.c +++ b/arch/x86/mach-voyager/voyager_smp.c @@@ -1749,13 -1746,13 +1747,14 @@@ static void __init voyager_smp_prepare_ static void __cpuinit voyager_smp_prepare_boot_cpu(void) { init_gdt(smp_processor_id()); + per_cpu(this_cpu_off, cpu) = __per_cpu_offset[cpu]; switch_to_new_gdt(); - cpu_set(smp_processor_id(), cpu_online_map); - cpu_set(smp_processor_id(), cpu_callout_map); - cpu_set(smp_processor_id(), cpu_possible_map); - cpu_set(smp_processor_id(), cpu_present_map); + cpu_online_map = cpumask_of_cpu(smp_processor_id()); + cpu_callout_map = cpumask_of_cpu(smp_processor_id()); + cpu_callin_map = CPU_MASK_NONE; + cpu_present_map = cpumask_of_cpu(smp_processor_id()); + } static int __cpuinit voyager_cpu_up(unsigned int cpu) @@@ -1782,12 -1779,12 +1781,12 @@@ static void __init voyager_smp_cpus_don void __init smp_setup_processor_id(void) { current_thread_info()->cpu = hard_smp_processor_id(); - x86_write_percpu(cpu_number, hard_smp_processor_id()); + percpu_write(cpu_number, hard_smp_processor_id()); } - static void voyager_send_call_func(cpumask_t callmask) + static void voyager_send_call_func(const struct cpumask *callmask) { - __u32 mask = cpus_addr(callmask)[0] & ~(1 << smp_processor_id()); + __u32 mask = cpus_addr(*callmask)[0] & ~(1 << smp_processor_id()); send_CPI(mask, VIC_CALL_FUNCTION_CPI); } diff --cc arch/x86/mm/fault.c index 65709a6aa6ee,c76ef1d701c9..8c3f3113a6ec --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@@ -807,8 -601,8 +807,6 @@@ void __kprobes do_page_fault(struct pt_ /* get the address */ address = read_cr2(); - if (unlikely(notify_page_fault(regs))) - return; - si_code = SEGV_MAPERR; - if (unlikely(kmmio_fault(regs, address))) return; @@@ -835,17 -629,23 +833,22 @@@ return; /* Can handle a stale RO->RW TLB */ - if (spurious_fault(address, error_code)) + if (spurious_fault(error_code, address)) return; + /* kprobes don't want to hook the spurious faults. */ + if (notify_page_fault(regs)) + return; /* * Don't take the mm semaphore here. If we fixup a prefetch * fault we could otherwise deadlock. */ - goto bad_area_nosemaphore; + bad_area_nosemaphore(regs, error_code, address); + return; } - /* kprobes don't want to hook the spurious faults. */ - if (notify_page_fault(regs)) ++ if (unlikely(notify_page_fault(regs))) + return; - /* * It's safe to allow irq's after cr2 has been saved and the * vmalloc fault has been handled. diff --cc drivers/acpi/processor_idle.c index 7acb23f830ce,7bc22a471fe3..259f6e806314 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@@ -1430,11 -824,8 +824,14 @@@ static int acpi_idle_bm_check(void */ static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx) { ++ u64 perf_flags; ++ + u64 pctrl; + /* Don't trace irqs off for idle */ stop_critical_timings(); ++ perf_flags = hw_perf_save_disable(); + pctrl = hw_perf_save_disable(); if (cx->entry_method == ACPI_CSTATE_FFH) { /* Call into architectural FFH based C-state */ acpi_processor_ffh_cstate_enter(cx); @@@ -1449,7 -840,6 +846,8 @@@ gets asserted in time to freeze execution properly. */ unused = inl(acpi_gbl_FADT.xpm_timer_block.address); } ++ hw_perf_restore(perf_flags); + hw_perf_restore(pctrl); start_critical_timings(); } diff --cc kernel/irq/handle.c index 375d68cd5bf0,3aba8d12f328..f51eaee921b6 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c @@@ -133,10 -146,8 +145,12 @@@ int __init early_irq_init(void int legacy_count; int i; + init_irq_default_affinity(); + + /* initialize nr_irqs based on nr_cpu_ids */ + arch_probe_nr_irqs(); + printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d\n", NR_IRQS, nr_irqs); + desc = irq_desc_legacy; legacy_count = ARRAY_SIZE(irq_desc_legacy); @@@ -229,8 -233,8 +243,10 @@@ int __init early_irq_init(void int count; int i; + init_irq_default_affinity(); + + printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS); + desc = irq_desc; count = ARRAY_SIZE(irq_desc); diff --cc kernel/irq/numa_migrate.c index 666260e4c065,acd88356ac76..7f9b80434e32 --- a/kernel/irq/numa_migrate.c +++ b/kernel/irq/numa_migrate.c @@@ -95,8 -81,10 +95,9 @@@ static struct irq_desc *__real_move_irq desc = old_desc; goto out_unlock; } - init_copy_one_irq_desc(irq, old_desc, desc, cpu); irq_desc_ptrs[irq] = desc; + spin_unlock_irqrestore(&sparse_irq_lock, flags); /* free the old one */ free_one_irq_desc(old_desc, desc);