x86-64: Move irqcount from PDA to per-cpu.
authorBrian Gerst <brgerst@gmail.com>
Sun, 18 Jan 2009 15:38:58 +0000 (00:38 +0900)
committerTejun Heo <tj@kernel.org>
Sun, 18 Jan 2009 15:38:58 +0000 (00:38 +0900)
tj: s/irqcount/irq_count/

Signed-off-by: Brian Gerst <brgerst@gmail.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
arch/x86/include/asm/pda.h
arch/x86/kernel/asm-offsets_64.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/entry_64.S

index ae23deb995595077f8fb29066c85762a9cb81aab..4527d70314d47e0cbe7a90100718ee8a91bb2c6f 100644 (file)
@@ -15,7 +15,7 @@ struct x8664_pda {
        unsigned long unused2;
        unsigned long unused3;
        unsigned long unused4;
-       int irqcount;                   /* 32 Irq nesting counter. Starts -1 */
+       int unused5;
        unsigned int unused6;           /* 36 was cpunumber */
 #ifdef CONFIG_CC_STACKPROTECTOR
        unsigned long stack_canary;     /* 40 stack canary value */
index afda6deb85155cc31f8289752ec31274b981622c..64c834a39aa84ffb04df079ab5d8c762780b8327 100644 (file)
@@ -49,7 +49,6 @@ int main(void)
        BLANK();
 #undef ENTRY
 #define ENTRY(entry) DEFINE(pda_ ## entry, offsetof(struct x8664_pda, entry))
-       ENTRY(irqcount);
        DEFINE(pda_size, sizeof(struct x8664_pda));
        BLANK();
 #undef ENTRY
index 06b6290088f40dc4618033f788da173688ca4265..e2323ecce1d32eea64e2db0a691bec3ae05af669 100644 (file)
@@ -893,6 +893,8 @@ DEFINE_PER_CPU(unsigned long, kernel_stack) =
        (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
 EXPORT_PER_CPU_SYMBOL(kernel_stack);
 
+DEFINE_PER_CPU(unsigned int, irq_count) = -1;
+
 void __cpuinit pda_init(int cpu)
 {
        struct x8664_pda *pda = cpu_pda(cpu);
@@ -903,8 +905,6 @@ void __cpuinit pda_init(int cpu)
 
        load_pda_offset(cpu);
 
-       pda->irqcount = -1;
-
        if (cpu != 0) {
                if (pda->nodenumber == 0 && cpu_to_node(cpu) != NUMA_NO_NODE)
                        pda->nodenumber = cpu_to_node(cpu);
index 7c27da407da78b64b1966c9dcca53a498732e4ce..c52b60919163af41af58a7068fcba266fb0c657e 100644 (file)
@@ -337,12 +337,12 @@ ENTRY(save_args)
        je 1f
        SWAPGS
        /*
-        * irqcount is used to check if a CPU is already on an interrupt stack
+        * irq_count is used to check if a CPU is already on an interrupt stack
         * or not. While this is essentially redundant with preempt_count it is
         * a little cheaper to use a separate counter in the PDA (short of
         * moving irq_enter into assembly, which would be too much work)
         */
-1:     incl %gs:pda_irqcount
+1:     incl PER_CPU_VAR(irq_count)
        jne 2f
        popq_cfi %rax                   /* move return address... */
        mov PER_CPU_VAR(irq_stack_ptr),%rsp
@@ -837,7 +837,7 @@ common_interrupt:
 ret_from_intr:
        DISABLE_INTERRUPTS(CLBR_NONE)
        TRACE_IRQS_OFF
-       decl %gs:pda_irqcount
+       decl PER_CPU_VAR(irq_count)
        leaveq
        CFI_DEF_CFA_REGISTER    rsp
        CFI_ADJUST_CFA_OFFSET   -8
@@ -1260,14 +1260,14 @@ ENTRY(call_softirq)
        CFI_REL_OFFSET rbp,0
        mov  %rsp,%rbp
        CFI_DEF_CFA_REGISTER rbp
-       incl %gs:pda_irqcount
+       incl PER_CPU_VAR(irq_count)
        cmove PER_CPU_VAR(irq_stack_ptr),%rsp
        push  %rbp                      # backlink for old unwinder
        call __do_softirq
        leaveq
        CFI_DEF_CFA_REGISTER    rsp
        CFI_ADJUST_CFA_OFFSET   -8
-       decl %gs:pda_irqcount
+       decl PER_CPU_VAR(irq_count)
        ret
        CFI_ENDPROC
 END(call_softirq)
@@ -1297,7 +1297,7 @@ ENTRY(xen_do_hypervisor_callback)   # do_hypervisor_callback(struct *pt_regs)
        movq %rdi, %rsp            # we don't return, adjust the stack frame
        CFI_ENDPROC
        DEFAULT_FRAME
-11:    incl %gs:pda_irqcount
+11:    incl PER_CPU_VAR(irq_count)
        movq %rsp,%rbp
        CFI_DEF_CFA_REGISTER rbp
        cmovzq PER_CPU_VAR(irq_stack_ptr),%rsp
@@ -1305,7 +1305,7 @@ ENTRY(xen_do_hypervisor_callback)   # do_hypervisor_callback(struct *pt_regs)
        call xen_evtchn_do_upcall
        popq %rsp
        CFI_DEF_CFA_REGISTER rsp
-       decl %gs:pda_irqcount
+       decl PER_CPU_VAR(irq_count)
        jmp  error_exit
        CFI_ENDPROC
 END(do_hypervisor_callback)