This uses msgsnd where possible for signalling other threads within
the same core on POWER8 systems, rather than IPIs through the XICS
interrupt controller. This includes waking secondary threads to run
the guest, the interrupts generated by the virtual XICS, and the
interrupts to bring the other threads out of the guest when exiting.
Aggregated statistics from debugfs across vcpus for a guest with 32
vcpus, 8 threads/vcore, running on a POWER8, show this before the
change:
rm_entry: 3387.6ns (228 - 86600,
1008969 samples)
rm_exit: 4561.5ns (12 -
3477452,
1009402 samples)
rm_intr: 1660.0ns (12 - 553050,
3600051 samples)
and this after the change:
rm_entry: 3060.1ns (212 - 65138, 953873 samples)
rm_exit: 4244.1ns (12 -
9693408, 954331 samples)
rm_intr: 1342.3ns (12 -
1104718,
3405326 samples)
for a test of booting Fedora 20 big-endian to the login prompt.
The time taken for a H_PROD hcall (which is handled in the host
kernel) went down from about 35 microseconds to about 16 microseconds
with this change.
The noinline added to kvmppc_run_core turned out to be necessary for
good performance, at least with gcc 4.9.2 as packaged with Fedora 21
and a little-endian POWER8 host.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
#include <asm/thread_info.h>
#include <asm/rtas.h>
#include <asm/vdso_datapage.h>
+#include <asm/dbell.h>
#ifdef CONFIG_PPC64
#include <asm/paca.h>
#include <asm/lppaca.h>
offsetof(struct paca_struct, subcore_sibling_mask));
#endif
+ DEFINE(PPC_DBELL_SERVER, PPC_DBELL_SERVER);
+
return 0;
}
#include <asm/hvcall.h>
#include <asm/switch_to.h>
#include <asm/smp.h>
+#include <asm/dbell.h>
#include <linux/gfp.h>
#include <linux/vmalloc.h>
#include <linux/highmem.h>
static void kvmppc_end_cede(struct kvm_vcpu *vcpu);
static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
+static bool kvmppc_ipi_thread(int cpu)
+{
+ /* On POWER8 for IPIs to threads in the same core, use msgsnd */
+ if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
+ preempt_disable();
+ if (cpu_first_thread_sibling(cpu) ==
+ cpu_first_thread_sibling(smp_processor_id())) {
+ unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
+ msg |= cpu_thread_in_core(cpu);
+ smp_mb();
+ __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
+ preempt_enable();
+ return true;
+ }
+ preempt_enable();
+ }
+
+#if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP)
+ if (cpu >= 0 && cpu < nr_cpu_ids && paca[cpu].kvm_hstate.xics_phys) {
+ xics_wake_cpu(cpu);
+ return true;
+ }
+#endif
+
+ return false;
+}
+
static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
{
- int me;
int cpu = vcpu->cpu;
wait_queue_head_t *wqp;
++vcpu->stat.halt_wakeup;
}
- me = get_cpu();
+ if (kvmppc_ipi_thread(cpu + vcpu->arch.ptid))
+ return;
/* CPU points to the first thread of the core */
- if (cpu != me && cpu >= 0 && cpu < nr_cpu_ids) {
-#ifdef CONFIG_PPC_ICP_NATIVE
- int real_cpu = cpu + vcpu->arch.ptid;
- if (paca[real_cpu].kvm_hstate.xics_phys)
- xics_wake_cpu(real_cpu);
- else
-#endif
- if (cpu_online(cpu))
- smp_send_reschedule(cpu);
- }
- put_cpu();
+ if (cpu >= 0 && cpu < nr_cpu_ids && cpu_online(cpu))
+ smp_send_reschedule(cpu);
}
/*
/* Order stores to hstate.kvm_vcore etc. before store to kvm_vcpu */
smp_wmb();
tpaca->kvm_hstate.kvm_vcpu = vcpu;
-#if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP)
if (cpu != smp_processor_id())
- xics_wake_cpu(cpu);
-#endif
+ kvmppc_ipi_thread(cpu);
}
static void kvmppc_wait_for_nap(void)
* Run a set of guest threads on a physical core.
* Called with vc->lock held.
*/
-static void kvmppc_run_core(struct kvmppc_vcore *vc)
+static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
{
struct kvm_vcpu *vcpu;
int i;
#include <asm/kvm_book3s.h>
#include <asm/archrandom.h>
#include <asm/xics.h>
+#include <asm/dbell.h>
+#include <asm/cputhreads.h>
#define KVM_CMA_CHUNK_ORDER 18
}
/*
- * Send an interrupt to another CPU.
+ * Send an interrupt or message to another CPU.
* This can only be called in real mode.
* The caller needs to include any barrier needed to order writes
* to memory vs. the IPI/message.
{
unsigned long xics_phys;
- /* Poke the target */
+ /* On POWER8 for IPIs to threads in the same core, use msgsnd */
+ if (cpu_has_feature(CPU_FTR_ARCH_207S) &&
+ cpu_first_thread_sibling(cpu) ==
+ cpu_first_thread_sibling(raw_smp_processor_id())) {
+ unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
+ msg |= cpu_thread_in_core(cpu);
+ __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
+ return;
+ }
+
+ /* Else poke the target with an IPI */
xics_phys = paca[cpu].kvm_hstate.xics_phys;
rm_writeb(xics_phys + XICS_MFRR, IPI_PRIORITY);
}
cmpwi r12,BOOK3S_INTERRUPT_SYSCALL
beq hcall_try_real_mode
+ /* Hypervisor doorbell - exit only if host IPI flag set */
+ cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL
+ bne 3f
+ lbz r0, HSTATE_HOST_IPI(r13)
+ beq 4f
+ b guest_exit_cont
+3:
/* External interrupt ? */
cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
bne+ guest_exit_cont
bgt guest_exit_cont
/* Check if any CPU is heading out to the host, if so head out too */
- ld r5, HSTATE_KVM_VCORE(r13)
+4: ld r5, HSTATE_KVM_VCORE(r13)
lwz r0, VCORE_ENTRY_EXIT(r5)
cmpwi r0, 0x100
mr r4, r9
/*
* Take a nap until a decrementer or external or doobell interrupt
* occurs, with PECE1 and PECE0 set in LPCR.
- * On POWER8, if we are ceding, also set PECEDP.
+ * On POWER8, set PECEDH, and if we are ceding, also set PECEDP.
* Also clear the runlatch bit before napping.
*/
kvm_do_nap:
mfspr r5,SPRN_LPCR
ori r5,r5,LPCR_PECE0 | LPCR_PECE1
BEGIN_FTR_SECTION
+ ori r5, r5, LPCR_PECEDH
rlwimi r5, r3, 0, LPCR_PECEDP
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
mtspr SPRN_LPCR,r5
* Returns (in r3):
* 0 if nothing needs to be done
* 1 if something happened that needs to be handled by the host
- * -1 if there was a guest wakeup (IPI)
+ * -1 if there was a guest wakeup (IPI or msgsnd)
*
* Also sets r12 to the interrupt vector for any interrupt that needs
* to be handled now by the host (0x500 for external interrupt), or zero.
/* hypervisor doorbell */
3: li r12, BOOK3S_INTERRUPT_H_DOORBELL
+ /* see if it's a host IPI */
li r3, 1
+ lbz r0, HSTATE_HOST_IPI(r13)
+ cmpwi r0, 0
+ bnelr
+ /* if not, clear it and return -1 */
+ lis r6, (PPC_DBELL_SERVER << (63-36))@h
+ PPC_MSGCLR(6)
+ li r3, -1
blr
/*