powerpc/64s/idle: Move soft interrupt mask logic into C code
authorNicholas Piggin <npiggin@gmail.com>
Tue, 13 Jun 2017 13:05:45 +0000 (23:05 +1000)
committerMichael Ellerman <mpe@ellerman.id.au>
Mon, 19 Jun 2017 09:46:26 +0000 (19:46 +1000)
This simplifies the asm and fixes irq-off tracing over sleep
instructions.

Also move powersave_nap check for POWER8 into C code, and move
PSSCR register value calculation for POWER9 into C.

Reviewed-by: Gautham R. Shenoy <ego@linux.vnet.ibm.com>
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
arch/powerpc/include/asm/hw_irq.h
arch/powerpc/include/asm/machdep.h
arch/powerpc/include/asm/processor.h
arch/powerpc/kernel/idle_book3s.S
arch/powerpc/kernel/irq.c
arch/powerpc/platforms/powernv/idle.c
arch/powerpc/platforms/powernv/smp.c
arch/powerpc/platforms/powernv/subcore.c
drivers/cpuidle/cpuidle-powernv.c

index eba60416536ec0955ff4a4131b4d788db2f25f9c..f06112cf8734708d751a43f9cc910de640f0fb83 100644 (file)
@@ -129,6 +129,9 @@ static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
 }
 
 extern bool prep_irq_for_idle(void);
+extern bool prep_irq_for_idle_irqsoff(void);
+
+#define fini_irq_for_idle_irqsoff() trace_hardirqs_off();
 
 extern void force_external_irq_replay(void);
 
index f90b22c722e1861663a90ee88f23ec6fad49852e..cd2fc1cc1cc7c056255b6f49effefb1e6d0cc1e6 100644 (file)
@@ -226,6 +226,7 @@ struct machdep_calls {
 extern void e500_idle(void);
 extern void power4_idle(void);
 extern void power7_idle(void);
+extern void power9_idle(void);
 extern void ppc6xx_idle(void);
 extern void book3e_idle(void);
 
index a2123f291ab0c5c8dc13cc9364c3a12848a4bb2c..c49165a7439c592e921d8b091e2d11bbfca9d3d4 100644 (file)
@@ -481,11 +481,11 @@ extern unsigned long cpuidle_disable;
 enum idle_boot_override {IDLE_NO_OVERRIDE = 0, IDLE_POWERSAVE_OFF};
 
 extern int powersave_nap;      /* set if nap mode can be used in idle loop */
-extern unsigned long power7_nap(int check_irq);
-extern unsigned long power7_sleep(void);
-extern unsigned long power7_winkle(void);
-extern unsigned long power9_idle_stop(unsigned long stop_psscr_val,
-                                     unsigned long stop_psscr_mask);
+extern unsigned long power7_idle_insn(unsigned long type); /* PNV_THREAD_NAP/etc*/
+extern void power7_idle_type(unsigned long type);
+extern unsigned long power9_idle_stop(unsigned long psscr_val);
+extern void power9_idle_type(unsigned long stop_psscr_val,
+                             unsigned long stop_psscr_mask);
 
 extern void flush_instruction_cache(void);
 extern void hard_reset_now(void);
index 98a6d07ecb5ca09f5d941735a62b3a4f40b8f936..35cf5bb7daed673deb698076a984b90e1ce114ec 100644 (file)
@@ -109,13 +109,9 @@ core_idle_lock_held:
 /*
  * Pass requested state in r3:
  *     r3 - PNV_THREAD_NAP/SLEEP/WINKLE in POWER8
- *        - Requested STOP state in POWER9
+ *        - Requested PSSCR value in POWER9
  *
- * To check IRQ_HAPPENED in r4
- *     0 - don't check
- *     1 - check
- *
- * Address to 'rfid' to in r5
+ * Address of idle handler to 'rfid' to in r4
  */
 pnv_powersave_common:
        /* Use r3 to pass state nap/sleep/winkle */
@@ -131,30 +127,7 @@ pnv_powersave_common:
        std     r0,_LINK(r1)
        std     r0,_NIP(r1)
 
-       /* Hard disable interrupts */
-       mfmsr   r9
-       rldicl  r9,r9,48,1
-       rotldi  r9,r9,16
-       mtmsrd  r9,1                    /* hard-disable interrupts */
-
-       /* Check if something happened while soft-disabled */
-       lbz     r0,PACAIRQHAPPENED(r13)
-       andi.   r0,r0,~PACA_IRQ_HARD_DIS@l
-       beq     1f
-       cmpwi   cr0,r4,0
-       beq     1f
-       addi    r1,r1,INT_FRAME_SIZE
-       ld      r0,16(r1)
-       li      r3,0                    /* Return 0 (no nap) */
-       mtlr    r0
-       blr
-
-1:     /* We mark irqs hard disabled as this is the state we'll
-        * be in when returning and we need to tell arch_local_irq_restore()
-        * about it
-        */
-       li      r0,PACA_IRQ_HARD_DIS
-       stb     r0,PACAIRQHAPPENED(r13)
+       mfmsr   r9
 
        /* We haven't lost state ... yet */
        li      r0,0
@@ -163,8 +136,8 @@ pnv_powersave_common:
        /* Continue saving state */
        SAVE_GPR(2, r1)
        SAVE_NVGPRS(r1)
-       mfcr    r4
-       std     r4,_CCR(r1)
+       mfcr    r5
+       std     r5,_CCR(r1)
        std     r9,_MSR(r1)
        std     r1,PACAR1(r13)
 
@@ -178,7 +151,7 @@ pnv_powersave_common:
        li      r6, MSR_RI
        andc    r6, r9, r6
        mtmsrd  r6, 1           /* clear RI before setting SRR0/1 */
-       mtspr   SPRN_SRR0, r5
+       mtspr   SPRN_SRR0, r4
        mtspr   SPRN_SRR1, r7
        rfid
 
@@ -322,35 +295,14 @@ lwarx_loop_stop:
 
        IDLE_STATE_ENTER_SEQ_NORET(PPC_STOP)
 
-_GLOBAL(power7_idle)
+/*
+ * Entered with MSR[EE]=0 and no soft-masked interrupts pending.
+ * r3 contains desired idle state (PNV_THREAD_NAP/SLEEP/WINKLE).
+ */
+_GLOBAL(power7_idle_insn)
        /* Now check if user or arch enabled NAP mode */
-       LOAD_REG_ADDRBASE(r3,powersave_nap)
-       lwz     r4,ADDROFF(powersave_nap)(r3)
-       cmpwi   0,r4,0
-       beqlr
-       li      r3, 1
-       /* fall through */
-
-_GLOBAL(power7_nap)
-       mr      r4,r3
-       li      r3,PNV_THREAD_NAP
-       LOAD_REG_ADDR(r5, pnv_enter_arch207_idle_mode)
-       b       pnv_powersave_common
-       /* No return */
-
-_GLOBAL(power7_sleep)
-       li      r3,PNV_THREAD_SLEEP
-       li      r4,1
-       LOAD_REG_ADDR(r5, pnv_enter_arch207_idle_mode)
+       LOAD_REG_ADDR(r4, pnv_enter_arch207_idle_mode)
        b       pnv_powersave_common
-       /* No return */
-
-_GLOBAL(power7_winkle)
-       li      r3,PNV_THREAD_WINKLE
-       li      r4,1
-       LOAD_REG_ADDR(r5, pnv_enter_arch207_idle_mode)
-       b       pnv_powersave_common
-       /* No return */
 
 #define CHECK_HMI_INTERRUPT                                            \
        mfspr   r0,SPRN_SRR1;                                           \
@@ -372,17 +324,13 @@ ALT_FTR_SECTION_END_NESTED_IFSET(CPU_FTR_ARCH_207S, 66);          \
 20:    nop;
 
 /*
- * r3 - The PSSCR value corresponding to the stop state.
- * r4 - The PSSCR mask corrresonding to the stop state.
+ * Entered with MSR[EE]=0 and no soft-masked interrupts pending.
+ * r3 contains desired PSSCR register value.
  */
 _GLOBAL(power9_idle_stop)
-       mfspr   r5,SPRN_PSSCR
-       andc    r5,r5,r4
-       or      r3,r3,r5
        std     r3, PACA_REQ_PSSCR(r13)
        mtspr   SPRN_PSSCR,r3
-       LOAD_REG_ADDR(r5,power_enter_stop)
-       li      r4,1
+       LOAD_REG_ADDR(r4,power_enter_stop)
        b       pnv_powersave_common
        /* No return */
 
index 5c291df30fe34328d64b7bc0985843a7d9dfe73a..58dcac88bc7943f2939cafb79f5c8201e5afe4fb 100644 (file)
@@ -322,7 +322,8 @@ bool prep_irq_for_idle(void)
         * First we need to hard disable to ensure no interrupt
         * occurs before we effectively enter the low power state
         */
-       hard_irq_disable();
+       __hard_irq_disable();
+       local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
 
        /*
         * If anything happened while we were soft-disabled,
@@ -347,6 +348,36 @@ bool prep_irq_for_idle(void)
        return true;
 }
 
+/*
+ * This is for idle sequences that return with IRQs off, but the
+ * idle state itself wakes on interrupt. Tell the irq tracer that
+ * IRQs are enabled for the duration of idle so it does not get long
+ * off times. Must be paired with fini_irq_for_idle_irqsoff.
+ */
+bool prep_irq_for_idle_irqsoff(void)
+{
+       WARN_ON(!irqs_disabled());
+
+       /*
+        * First we need to hard disable to ensure no interrupt
+        * occurs before we effectively enter the low power state
+        */
+       __hard_irq_disable();
+       local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
+
+       /*
+        * If anything happened while we were soft-disabled,
+        * we return now and do not enter the low power state.
+        */
+       if (lazy_irq_pending())
+               return false;
+
+       /* Tell lockdep we are about to re-enable */
+       trace_hardirqs_on();
+
+       return true;
+}
+
 /*
  * Force a replay of the external interrupt handler on this CPU.
  */
index 46946a58700413600131d396adc59f26893256a8..f875879ff1eb32b882d1a5c755a4a54803d766c6 100644 (file)
@@ -23,6 +23,7 @@
 #include <asm/cpuidle.h>
 #include <asm/code-patching.h>
 #include <asm/smp.h>
+#include <asm/runlatch.h>
 
 #include "powernv.h"
 #include "subcore.h"
@@ -283,12 +284,68 @@ static DEVICE_ATTR(fastsleep_workaround_applyonce, 0600,
                        show_fastsleep_workaround_applyonce,
                        store_fastsleep_workaround_applyonce);
 
+static unsigned long __power7_idle_type(unsigned long type)
+{
+       unsigned long srr1;
+
+       if (!prep_irq_for_idle_irqsoff())
+               return 0;
+
+       ppc64_runlatch_off();
+       srr1 = power7_idle_insn(type);
+       ppc64_runlatch_on();
+
+       fini_irq_for_idle_irqsoff();
+
+       return srr1;
+}
+
+void power7_idle_type(unsigned long type)
+{
+       __power7_idle_type(type);
+}
+
+void power7_idle(void)
+{
+       if (!powersave_nap)
+               return;
+
+       power7_idle_type(PNV_THREAD_NAP);
+}
+
+static unsigned long __power9_idle_type(unsigned long stop_psscr_val,
+                                     unsigned long stop_psscr_mask)
+{
+       unsigned long psscr;
+       unsigned long srr1;
+
+       if (!prep_irq_for_idle_irqsoff())
+               return 0;
+
+       psscr = mfspr(SPRN_PSSCR);
+       psscr = (psscr & ~stop_psscr_mask) | stop_psscr_val;
+
+       ppc64_runlatch_off();
+       srr1 = power9_idle_stop(psscr);
+       ppc64_runlatch_on();
+
+       fini_irq_for_idle_irqsoff();
+
+       return srr1;
+}
+
+void power9_idle_type(unsigned long stop_psscr_val,
+                                     unsigned long stop_psscr_mask)
+{
+       __power9_idle_type(stop_psscr_val, stop_psscr_mask);
+}
+
 /*
  * Used for ppc_md.power_save which needs a function with no parameters
  */
-static void power9_idle(void)
+void power9_idle(void)
 {
-       power9_idle_stop(pnv_default_stop_val, pnv_default_stop_mask);
+       power9_idle_type(pnv_default_stop_val, pnv_default_stop_mask);
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
@@ -303,16 +360,17 @@ unsigned long pnv_cpu_offline(unsigned int cpu)
        u32 idle_states = pnv_get_supported_cpuidle_states();
 
        if (cpu_has_feature(CPU_FTR_ARCH_300) && deepest_stop_found) {
-               srr1 = power9_idle_stop(pnv_deepest_stop_psscr_val,
+               srr1 = __power9_idle_type(pnv_deepest_stop_psscr_val,
                                        pnv_deepest_stop_psscr_mask);
        } else if (idle_states & OPAL_PM_WINKLE_ENABLED) {
-               srr1 = power7_winkle();
+               srr1 = __power7_idle_type(PNV_THREAD_WINKLE);
        } else if ((idle_states & OPAL_PM_SLEEP_ENABLED) ||
                   (idle_states & OPAL_PM_SLEEP_ENABLED_ER1)) {
-               srr1 = power7_sleep();
+               srr1 = __power7_idle_type(PNV_THREAD_SLEEP);
        } else if (idle_states & OPAL_PM_NAP_ENABLED) {
-               srr1 = power7_nap(1);
+               srr1 = __power7_idle_type(PNV_THREAD_NAP);
        } else {
+               ppc64_runlatch_off();
                /* This is the fallback method. We emulate snooze */
                while (!generic_check_cpu_restart(cpu)) {
                        HMT_low();
@@ -320,6 +378,7 @@ unsigned long pnv_cpu_offline(unsigned int cpu)
                }
                srr1 = 0;
                HMT_medium();
+               ppc64_runlatch_on();
        }
 
        return srr1;
index 4aff754b6f2c8da6e94ae7dd5eedced19f95245e..f8752795decf3eaf564de02bfcef666bbaad1316 100644 (file)
@@ -182,9 +182,7 @@ static void pnv_smp_cpu_kill_self(void)
                 */
                kvmppc_set_host_ipi(cpu, 0);
 
-               ppc64_runlatch_off();
                srr1 = pnv_cpu_offline(cpu);
-               ppc64_runlatch_on();
 
                /*
                 * If the SRR1 value indicates that we woke up due to
index 0babef11136fc8daba7f2666fed3f3b649cc2bd8..d975d78188a98e4af4a8eee9a75b726e3058000d 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/stop_machine.h>
 
 #include <asm/cputhreads.h>
+#include <asm/cpuidle.h>
 #include <asm/kvm_ppc.h>
 #include <asm/machdep.h>
 #include <asm/opal.h>
@@ -182,7 +183,7 @@ static void unsplit_core(void)
        cpu = smp_processor_id();
        if (cpu_thread_in_core(cpu) != 0) {
                while (mfspr(SPRN_HID0) & mask)
-                       power7_nap(0);
+                       power7_idle_insn(PNV_THREAD_NAP);
 
                per_cpu(split_state, cpu).step = SYNC_STEP_UNSPLIT;
                return;
index 45eaf06462aeddc611961bb1ab648390f277a076..79152676f62b2f6d913b723dfc2eb2fe785b0e4f 100644 (file)
@@ -73,9 +73,8 @@ static int nap_loop(struct cpuidle_device *dev,
                        struct cpuidle_driver *drv,
                        int index)
 {
-       ppc64_runlatch_off();
-       power7_idle();
-       ppc64_runlatch_on();
+       power7_idle_type(PNV_THREAD_NAP);
+
        return index;
 }
 
@@ -98,7 +97,8 @@ static int fastsleep_loop(struct cpuidle_device *dev,
        new_lpcr &= ~LPCR_PECE1;
 
        mtspr(SPRN_LPCR, new_lpcr);
-       power7_sleep();
+
+       power7_idle_type(PNV_THREAD_SLEEP);
 
        mtspr(SPRN_LPCR, old_lpcr);
 
@@ -110,10 +110,8 @@ static int stop_loop(struct cpuidle_device *dev,
                     struct cpuidle_driver *drv,
                     int index)
 {
-       ppc64_runlatch_off();
-       power9_idle_stop(stop_psscr_table[index].val,
+       power9_idle_type(stop_psscr_table[index].val,
                         stop_psscr_table[index].mask);
-       ppc64_runlatch_on();
        return index;
 }