From 66feed61cdf6ee65fd551d3460b1efba6bee55b8 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Sat, 28 Mar 2015 14:21:12 +1100 Subject: [PATCH] KVM: PPC: Book3S HV: Use msgsnd for signalling threads on POWER8 This uses msgsnd where possible for signalling other threads within the same core on POWER8 systems, rather than IPIs through the XICS interrupt controller. This includes waking secondary threads to run the guest, the interrupts generated by the virtual XICS, and the interrupts to bring the other threads out of the guest when exiting. Aggregated statistics from debugfs across vcpus for a guest with 32 vcpus, 8 threads/vcore, running on a POWER8, show this before the change: rm_entry: 3387.6ns (228 - 86600, 1008969 samples) rm_exit: 4561.5ns (12 - 3477452, 1009402 samples) rm_intr: 1660.0ns (12 - 553050, 3600051 samples) and this after the change: rm_entry: 3060.1ns (212 - 65138, 953873 samples) rm_exit: 4244.1ns (12 - 9693408, 954331 samples) rm_intr: 1342.3ns (12 - 1104718, 3405326 samples) for a test of booting Fedora 20 big-endian to the login prompt. The time taken for a H_PROD hcall (which is handled in the host kernel) went down from about 35 microseconds to about 16 microseconds with this change. The noinline added to kvmppc_run_core turned out to be necessary for good performance, at least with gcc 4.9.2 as packaged with Fedora 21 and a little-endian POWER8 host. Signed-off-by: Paul Mackerras Signed-off-by: Alexander Graf --- arch/powerpc/kernel/asm-offsets.c | 3 ++ arch/powerpc/kvm/book3s_hv.c | 51 ++++++++++++++++--------- arch/powerpc/kvm/book3s_hv_builtin.c | 16 +++++++- arch/powerpc/kvm/book3s_hv_rmhandlers.S | 22 +++++++++-- 4 files changed, 70 insertions(+), 22 deletions(-) diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 0d07efbe3fa7..0034b6b3556a 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -37,6 +37,7 @@ #include #include #include +#include #ifdef CONFIG_PPC64 #include #include @@ -759,5 +760,7 @@ int main(void) offsetof(struct paca_struct, subcore_sibling_mask)); #endif + DEFINE(PPC_DBELL_SERVER, PPC_DBELL_SERVER); + return 0; } diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index ea1600ff52b2..48d3c5d2ecc9 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -51,6 +51,7 @@ #include #include #include +#include #include #include #include @@ -84,9 +85,35 @@ static DECLARE_BITMAP(default_enabled_hcalls, MAX_HCALL_OPCODE/4 + 1); static void kvmppc_end_cede(struct kvm_vcpu *vcpu); static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu); +static bool kvmppc_ipi_thread(int cpu) +{ + /* On POWER8 for IPIs to threads in the same core, use msgsnd */ + if (cpu_has_feature(CPU_FTR_ARCH_207S)) { + preempt_disable(); + if (cpu_first_thread_sibling(cpu) == + cpu_first_thread_sibling(smp_processor_id())) { + unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER); + msg |= cpu_thread_in_core(cpu); + smp_mb(); + __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg)); + preempt_enable(); + return true; + } + preempt_enable(); + } + +#if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP) + if (cpu >= 0 && cpu < nr_cpu_ids && paca[cpu].kvm_hstate.xics_phys) { + xics_wake_cpu(cpu); + return true; + } +#endif + + return false; +} + static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu) { - int me; int cpu = vcpu->cpu; wait_queue_head_t *wqp; @@ -96,20 +123,12 @@ static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu) ++vcpu->stat.halt_wakeup; } - me = get_cpu(); + if (kvmppc_ipi_thread(cpu + vcpu->arch.ptid)) + return; /* CPU points to the first thread of the core */ - if (cpu != me && cpu >= 0 && cpu < nr_cpu_ids) { -#ifdef CONFIG_PPC_ICP_NATIVE - int real_cpu = cpu + vcpu->arch.ptid; - if (paca[real_cpu].kvm_hstate.xics_phys) - xics_wake_cpu(real_cpu); - else -#endif - if (cpu_online(cpu)) - smp_send_reschedule(cpu); - } - put_cpu(); + if (cpu >= 0 && cpu < nr_cpu_ids && cpu_online(cpu)) + smp_send_reschedule(cpu); } /* @@ -1781,10 +1800,8 @@ static void kvmppc_start_thread(struct kvm_vcpu *vcpu) /* Order stores to hstate.kvm_vcore etc. before store to kvm_vcpu */ smp_wmb(); tpaca->kvm_hstate.kvm_vcpu = vcpu; -#if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP) if (cpu != smp_processor_id()) - xics_wake_cpu(cpu); -#endif + kvmppc_ipi_thread(cpu); } static void kvmppc_wait_for_nap(void) @@ -1933,7 +1950,7 @@ static void post_guest_process(struct kvmppc_vcore *vc) * Run a set of guest threads on a physical core. * Called with vc->lock held. */ -static void kvmppc_run_core(struct kvmppc_vcore *vc) +static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) { struct kvm_vcpu *vcpu; int i; diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c index c42aa55b885f..ed2589d4593f 100644 --- a/arch/powerpc/kvm/book3s_hv_builtin.c +++ b/arch/powerpc/kvm/book3s_hv_builtin.c @@ -23,6 +23,8 @@ #include #include #include +#include +#include #define KVM_CMA_CHUNK_ORDER 18 @@ -193,7 +195,7 @@ static inline void rm_writeb(unsigned long paddr, u8 val) } /* - * Send an interrupt to another CPU. + * Send an interrupt or message to another CPU. * This can only be called in real mode. * The caller needs to include any barrier needed to order writes * to memory vs. the IPI/message. @@ -202,7 +204,17 @@ void kvmhv_rm_send_ipi(int cpu) { unsigned long xics_phys; - /* Poke the target */ + /* On POWER8 for IPIs to threads in the same core, use msgsnd */ + if (cpu_has_feature(CPU_FTR_ARCH_207S) && + cpu_first_thread_sibling(cpu) == + cpu_first_thread_sibling(raw_smp_processor_id())) { + unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER); + msg |= cpu_thread_in_core(cpu); + __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg)); + return; + } + + /* Else poke the target with an IPI */ xics_phys = paca[cpu].kvm_hstate.xics_phys; rm_writeb(xics_phys + XICS_MFRR, IPI_PRIORITY); } diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index fcf3a617cc8a..4d70df26c402 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -1123,6 +1123,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) cmpwi r12,BOOK3S_INTERRUPT_SYSCALL beq hcall_try_real_mode + /* Hypervisor doorbell - exit only if host IPI flag set */ + cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL + bne 3f + lbz r0, HSTATE_HOST_IPI(r13) + beq 4f + b guest_exit_cont +3: /* External interrupt ? */ cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL bne+ guest_exit_cont @@ -1135,7 +1142,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) bgt guest_exit_cont /* Check if any CPU is heading out to the host, if so head out too */ - ld r5, HSTATE_KVM_VCORE(r13) +4: ld r5, HSTATE_KVM_VCORE(r13) lwz r0, VCORE_ENTRY_EXIT(r5) cmpwi r0, 0x100 mr r4, r9 @@ -2148,7 +2155,7 @@ _GLOBAL(kvmppc_h_cede) /* r3 = vcpu pointer, r11 = msr, r13 = paca */ /* * Take a nap until a decrementer or external or doobell interrupt * occurs, with PECE1 and PECE0 set in LPCR. - * On POWER8, if we are ceding, also set PECEDP. + * On POWER8, set PECEDH, and if we are ceding, also set PECEDP. * Also clear the runlatch bit before napping. */ kvm_do_nap: @@ -2161,6 +2168,7 @@ kvm_do_nap: mfspr r5,SPRN_LPCR ori r5,r5,LPCR_PECE0 | LPCR_PECE1 BEGIN_FTR_SECTION + ori r5, r5, LPCR_PECEDH rlwimi r5, r3, 0, LPCR_PECEDP END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) mtspr SPRN_LPCR,r5 @@ -2299,7 +2307,7 @@ machine_check_realmode: * Returns (in r3): * 0 if nothing needs to be done * 1 if something happened that needs to be handled by the host - * -1 if there was a guest wakeup (IPI) + * -1 if there was a guest wakeup (IPI or msgsnd) * * Also sets r12 to the interrupt vector for any interrupt that needs * to be handled now by the host (0x500 for external interrupt), or zero. @@ -2330,7 +2338,15 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) /* hypervisor doorbell */ 3: li r12, BOOK3S_INTERRUPT_H_DOORBELL + /* see if it's a host IPI */ li r3, 1 + lbz r0, HSTATE_HOST_IPI(r13) + cmpwi r0, 0 + bnelr + /* if not, clear it and return -1 */ + lis r6, (PPC_DBELL_SERVER << (63-36))@h + PPC_MSGCLR(6) + li r3, -1 blr /* -- 2.20.1