[POWERPC] Lazy interrupt disabling for 64-bit machines
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / powerpc / kernel / entry_64.S
index 748e74fcf541f4f1cb4eb6dcdb4694db218f4ec0..efda48741b29c82b1e35689002c9aab612977985 100644 (file)
@@ -87,6 +87,10 @@ system_call_common:
        addi    r9,r1,STACK_FRAME_OVERHEAD
        ld      r11,exception_marker@toc(r2)
        std     r11,-16(r9)             /* "regshere" marker */
+       li      r10,1
+       stb     r10,PACASOFTIRQEN(r13)
+       stb     r10,PACAHARDIRQEN(r13)
+       std     r10,SOFTE(r1)
 #ifdef CONFIG_PPC_ISERIES
 BEGIN_FW_FTR_SECTION
        /* Hack for handling interrupts when soft-enabling on iSeries */
@@ -94,8 +98,6 @@ BEGIN_FW_FTR_SECTION
        andi.   r10,r12,MSR_PR          /* from kernel */
        crand   4*cr0+eq,4*cr1+eq,4*cr0+eq
        beq     hardware_interrupt_entry
-       lbz     r10,PACAPROCENABLED(r13)
-       std     r10,SOFTE(r1)
 END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
 #endif
        mfmsr   r11
@@ -460,9 +462,9 @@ _GLOBAL(ret_from_except_lite)
 #endif
 
 restore:
+       ld      r5,SOFTE(r1)
 #ifdef CONFIG_PPC_ISERIES
 BEGIN_FW_FTR_SECTION
-       ld      r5,SOFTE(r1)
        cmpdi   0,r5,0
        beq     4f
        /* Check for pending interrupts (iSeries) */
@@ -472,16 +474,16 @@ BEGIN_FW_FTR_SECTION
        beq+    4f                      /* skip do_IRQ if no interrupts */
 
        li      r3,0
-       stb     r3,PACAPROCENABLED(r13) /* ensure we are soft-disabled */
+       stb     r3,PACASOFTIRQEN(r13)   /* ensure we are soft-disabled */
        ori     r10,r10,MSR_EE
        mtmsrd  r10                     /* hard-enable again */
        addi    r3,r1,STACK_FRAME_OVERHEAD
        bl      .do_IRQ
        b       .ret_from_except_lite           /* loop back and handle more */
-
-4:     stb     r5,PACAPROCENABLED(r13)
+4:
 END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
 #endif
+       stb     r5,PACASOFTIRQEN(r13)
 
        ld      r3,_MSR(r1)
        andi.   r0,r3,MSR_RI
@@ -538,25 +540,15 @@ do_work:
        /* Check that preempt_count() == 0 and interrupts are enabled */
        lwz     r8,TI_PREEMPT(r9)
        cmpwi   cr1,r8,0
-#ifdef CONFIG_PPC_ISERIES
-BEGIN_FW_FTR_SECTION
        ld      r0,SOFTE(r1)
        cmpdi   r0,0
-END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
-#endif
-BEGIN_FW_FTR_SECTION
-       andi.   r0,r3,MSR_EE
-END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
        crandc  eq,cr1*4+eq,eq
        bne     restore
        /* here we are preempting the current task */
 1:
-#ifdef CONFIG_PPC_ISERIES
-BEGIN_FW_FTR_SECTION
        li      r0,1
-       stb     r0,PACAPROCENABLED(r13)
-END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
-#endif
+       stb     r0,PACASOFTIRQEN(r13)
+       stb     r0,PACAHARDIRQEN(r13)
        ori     r10,r10,MSR_EE
        mtmsrd  r10,1           /* reenable interrupts */
        bl      .preempt_schedule
@@ -639,8 +631,7 @@ _GLOBAL(enter_rtas)
        /* There is no way it is acceptable to get here with interrupts enabled,
         * check it with the asm equivalent of WARN_ON
         */
-       mfmsr   r6
-       andi.   r0,r6,MSR_EE
+       lbz     r0,PACASOFTIRQEN(r13)
 1:     tdnei   r0,0
 .section __bug_table,"a"
        .llong  1b,__LINE__ + 0x1000000, 1f, 2f
@@ -649,7 +640,13 @@ _GLOBAL(enter_rtas)
 1:     .asciz  __FILE__
 2:     .asciz "enter_rtas"
 .previous
-       
+
+       /* Hard-disable interrupts */
+       mfmsr   r6
+       rldicl  r7,r6,48,1
+       rotldi  r7,r7,16
+       mtmsrd  r7,1
+
        /* Unfortunately, the stack pointer and the MSR are also clobbered,
         * so they are saved in the PACA which allows us to restore
         * our original state after RTAS returns.