KVM: arm/arm64: Count guest exit due to various reasons
authorAmit Tomar <amittomer25@gmail.com>
Thu, 26 Nov 2015 10:09:43 +0000 (10:09 +0000)
committerMarc Zyngier <marc.zyngier@arm.com>
Mon, 14 Dec 2015 11:30:00 +0000 (11:30 +0000)
It would add guest exit statistics to debugfs, this can be helpful
while measuring KVM performance.

  [ Renamed some of the field names - Christoffer ]

Signed-off-by: Amit Singh Tomar <amittomer25@gmail.com>
Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
arch/arm/include/asm/kvm_host.h
arch/arm/kvm/arm.c
arch/arm/kvm/guest.c
arch/arm/kvm/handle_exit.c
arch/arm/kvm/mmio.c
arch/arm64/include/asm/kvm_host.h
arch/arm64/kvm/guest.c
arch/arm64/kvm/handle_exit.c

index 6692982c9b575db476bc12a37e2d69b27c00d9fb..f9f27792d8edc3d7a5f77b03ba72b0c051376f55 100644 (file)
@@ -150,6 +150,12 @@ struct kvm_vcpu_stat {
        u32 halt_successful_poll;
        u32 halt_attempted_poll;
        u32 halt_wakeup;
+       u32 hvc_exit_stat;
+       u64 wfe_exit_stat;
+       u64 wfi_exit_stat;
+       u64 mmio_exit_user;
+       u64 mmio_exit_kernel;
+       u64 exits;
 };
 
 int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init);
index e06fd299de0846b44b72cd037eacd05b0b2cb051..8a79a572948729be3c0d15867cc6f534914c9b2f 100644 (file)
@@ -603,6 +603,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
                ret = kvm_call_hyp(__kvm_vcpu_run, vcpu);
 
                vcpu->mode = OUTSIDE_GUEST_MODE;
+               vcpu->stat.exits++;
                /*
                 * Back from guest
                 *************************************************************/
index 96e935bbc38c8b4fd906aeacdc27ca28696b596a..5fa69d7bae58a06ef19f7ca72fd04cf6603d6963 100644 (file)
 #define VCPU_STAT(x) { #x, offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU }
 
 struct kvm_stats_debugfs_item debugfs_entries[] = {
+       VCPU_STAT(hvc_exit_stat),
+       VCPU_STAT(wfe_exit_stat),
+       VCPU_STAT(wfi_exit_stat),
+       VCPU_STAT(mmio_exit_user),
+       VCPU_STAT(mmio_exit_kernel),
+       VCPU_STAT(exits),
        { NULL }
 };
 
index 95f12b2ccdcb8172e4faa4f7148438ec27f08f1a..3ede90d8b20bae91c4b8f20173b307cc1b0659fd 100644 (file)
@@ -42,6 +42,7 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
 
        trace_kvm_hvc(*vcpu_pc(vcpu), *vcpu_reg(vcpu, 0),
                      kvm_vcpu_hvc_get_imm(vcpu));
+       vcpu->stat.hvc_exit_stat++;
 
        ret = kvm_psci_call(vcpu);
        if (ret < 0) {
@@ -89,9 +90,11 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
 {
        if (kvm_vcpu_get_hsr(vcpu) & HSR_WFI_IS_WFE) {
                trace_kvm_wfx(*vcpu_pc(vcpu), true);
+               vcpu->stat.wfe_exit_stat++;
                kvm_vcpu_on_spin(vcpu);
        } else {
                trace_kvm_wfx(*vcpu_pc(vcpu), false);
+               vcpu->stat.wfi_exit_stat++;
                kvm_vcpu_block(vcpu);
        }
 
index 3a10c9f1d0a46b68b42d2e7c311a7dbd6ecd44a0..7f33b2056ae6d92f568ad71fbf28cd52044b11fb 100644 (file)
@@ -210,8 +210,11 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
 
        if (!ret) {
                /* We handled the access successfully in the kernel. */
+               vcpu->stat.mmio_exit_kernel++;
                kvm_handle_mmio_return(vcpu, run);
                return 1;
+       } else {
+               vcpu->stat.mmio_exit_user++;
        }
 
        run->exit_reason        = KVM_EXIT_MMIO;
index a35ce7266aac3688fa6460bace61f90477448aa6..19504aa12459e4a6f83e07f8aa22ab17d94074d5 100644 (file)
@@ -197,6 +197,12 @@ struct kvm_vcpu_stat {
        u32 halt_successful_poll;
        u32 halt_attempted_poll;
        u32 halt_wakeup;
+       u32 hvc_exit_stat;
+       u64 wfe_exit_stat;
+       u64 wfi_exit_stat;
+       u64 mmio_exit_user;
+       u64 mmio_exit_kernel;
+       u64 exits;
 };
 
 int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init);
index d250160d32bc68ae636c804b9cdfe2499bdddcb9..115522ba24610c44c39ccbbcb116d42e546018f7 100644 (file)
 
 #include "trace.h"
 
+#define VM_STAT(x) { #x, offsetof(struct kvm, stat.x), KVM_STAT_VM }
+#define VCPU_STAT(x) { #x, offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU }
+
 struct kvm_stats_debugfs_item debugfs_entries[] = {
+       VCPU_STAT(hvc_exit_stat),
+       VCPU_STAT(wfe_exit_stat),
+       VCPU_STAT(wfi_exit_stat),
+       VCPU_STAT(mmio_exit_user),
+       VCPU_STAT(mmio_exit_kernel),
+       VCPU_STAT(exits),
        { NULL }
 };
 
index 15f0477b0d2adc53d86573b1733d2fa7f368bbd9..8bddae1404619ff703dac5ce297bf107dafc10e4 100644 (file)
@@ -39,6 +39,7 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
 
        trace_kvm_hvc_arm64(*vcpu_pc(vcpu), vcpu_get_reg(vcpu, 0),
                            kvm_vcpu_hvc_get_imm(vcpu));
+       vcpu->stat.hvc_exit_stat++;
 
        ret = kvm_psci_call(vcpu);
        if (ret < 0) {
@@ -71,9 +72,11 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
 {
        if (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WFx_ISS_WFE) {
                trace_kvm_wfx_arm64(*vcpu_pc(vcpu), true);
+               vcpu->stat.wfe_exit_stat++;
                kvm_vcpu_on_spin(vcpu);
        } else {
                trace_kvm_wfx_arm64(*vcpu_pc(vcpu), false);
+               vcpu->stat.wfi_exit_stat++;
                kvm_vcpu_block(vcpu);
        }