arm64: KVM: Add access handler for PMCNTENSET and PMCNTENCLR register
authorShannon Zhao <shannon.zhao@linaro.org>
Tue, 8 Sep 2015 04:26:13 +0000 (12:26 +0800)
committerMarc Zyngier <marc.zyngier@arm.com>
Mon, 29 Feb 2016 18:34:20 +0000 (18:34 +0000)
Since the reset value of PMCNTENSET and PMCNTENCLR is UNKNOWN, use
reset_unknown for its reset handler. Add a handler to emulate writing
PMCNTENSET or PMCNTENCLR register.

When writing to PMCNTENSET, call perf_event_enable to enable the perf
event. When writing to PMCNTENCLR, call perf_event_disable to disable
the perf event.

Signed-off-by: Shannon Zhao <shannon.zhao@linaro.org>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
arch/arm64/include/asm/kvm_host.h
arch/arm64/kvm/sys_regs.c
include/kvm/arm_pmu.h
virt/kvm/arm/pmu.c

index 4ae27fe34240822b4feab81395b7bda32df27a23..993793b422aa947ad750dbb64d326bbdd56b6b30 100644 (file)
@@ -123,6 +123,7 @@ enum vcpu_sysreg {
        PMEVCNTR0_EL0,  /* Event Counter Register (0-30) */
        PMEVCNTR30_EL0 = PMEVCNTR0_EL0 + 30,
        PMCCNTR_EL0,    /* Cycle Counter Register */
+       PMCNTENSET_EL0, /* Count Enable Set Register */
 
        /* 32bit specific registers. Keep them at the end of the range */
        DACR32_EL2,     /* Domain Access Control Register */
index ff3214b6fbc87d757133c4d5b1201dabfc5e3dec..d4b6ae3c09b5607212e6751f642079d6a5bf191e 100644 (file)
@@ -563,6 +563,33 @@ static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
        return true;
 }
 
+static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
+                          const struct sys_reg_desc *r)
+{
+       u64 val, mask;
+
+       if (!kvm_arm_pmu_v3_ready(vcpu))
+               return trap_raz_wi(vcpu, p, r);
+
+       mask = kvm_pmu_valid_counter_mask(vcpu);
+       if (p->is_write) {
+               val = p->regval & mask;
+               if (r->Op2 & 0x1) {
+                       /* accessing PMCNTENSET_EL0 */
+                       vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val;
+                       kvm_pmu_enable_counter(vcpu, val);
+               } else {
+                       /* accessing PMCNTENCLR_EL0 */
+                       vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val;
+                       kvm_pmu_disable_counter(vcpu, val);
+               }
+       } else {
+               p->regval = vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask;
+       }
+
+       return true;
+}
+
 /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
 #define DBG_BCR_BVR_WCR_WVR_EL1(n)                                     \
        /* DBGBVRn_EL1 */                                               \
@@ -757,10 +784,10 @@ static const struct sys_reg_desc sys_reg_descs[] = {
          access_pmcr, reset_pmcr, },
        /* PMCNTENSET_EL0 */
        { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b001),
-         trap_raz_wi },
+         access_pmcnten, reset_unknown, PMCNTENSET_EL0 },
        /* PMCNTENCLR_EL0 */
        { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b010),
-         trap_raz_wi },
+         access_pmcnten, NULL, PMCNTENSET_EL0 },
        /* PMOVSCLR_EL0 */
        { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b011),
          trap_raz_wi },
@@ -1057,8 +1084,8 @@ static const struct sys_reg_desc cp15_regs[] = {
 
        /* PMU */
        { Op1( 0), CRn( 9), CRm(12), Op2( 0), access_pmcr },
-       { Op1( 0), CRn( 9), CRm(12), Op2( 1), trap_raz_wi },
-       { Op1( 0), CRn( 9), CRm(12), Op2( 2), trap_raz_wi },
+       { Op1( 0), CRn( 9), CRm(12), Op2( 1), access_pmcnten },
+       { Op1( 0), CRn( 9), CRm(12), Op2( 2), access_pmcnten },
        { Op1( 0), CRn( 9), CRm(12), Op2( 3), trap_raz_wi },
        { Op1( 0), CRn( 9), CRm(12), Op2( 5), access_pmselr },
        { Op1( 0), CRn( 9), CRm(12), Op2( 6), access_pmceid },
index bcb7698058399a452db11fb6946f6c3440c94e65..b70058ef1dd636b216f6e25dc26d78f304daa89f 100644 (file)
@@ -40,6 +40,9 @@ struct kvm_pmu {
 #define kvm_arm_pmu_v3_ready(v)                ((v)->arch.pmu.ready)
 u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx);
 void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val);
+u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu);
+void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val);
+void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val);
 #else
 struct kvm_pmu {
 };
@@ -52,6 +55,12 @@ static inline u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu,
 }
 static inline void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu,
                                             u64 select_idx, u64 val) {}
+static inline u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
+{
+       return 0;
+}
+static inline void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val) {}
+static inline void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val) {}
 #endif
 
 #endif
index cd74e6367cd61ee560ead0cbee54c60244b973f0..f8dc174308135a61f6a717bc4dad54fec2a2a741 100644 (file)
@@ -61,3 +61,69 @@ void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
              ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + select_idx;
        vcpu_sys_reg(vcpu, reg) += (s64)val - kvm_pmu_get_counter_value(vcpu, select_idx);
 }
+
+u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
+{
+       u64 val = vcpu_sys_reg(vcpu, PMCR_EL0) >> ARMV8_PMU_PMCR_N_SHIFT;
+
+       val &= ARMV8_PMU_PMCR_N_MASK;
+       if (val == 0)
+               return BIT(ARMV8_PMU_CYCLE_IDX);
+       else
+               return GENMASK(val - 1, 0) | BIT(ARMV8_PMU_CYCLE_IDX);
+}
+
+/**
+ * kvm_pmu_enable_counter - enable selected PMU counter
+ * @vcpu: The vcpu pointer
+ * @val: the value guest writes to PMCNTENSET register
+ *
+ * Call perf_event_enable to start counting the perf event
+ */
+void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val)
+{
+       int i;
+       struct kvm_pmu *pmu = &vcpu->arch.pmu;
+       struct kvm_pmc *pmc;
+
+       if (!(vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) || !val)
+               return;
+
+       for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
+               if (!(val & BIT(i)))
+                       continue;
+
+               pmc = &pmu->pmc[i];
+               if (pmc->perf_event) {
+                       perf_event_enable(pmc->perf_event);
+                       if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE)
+                               kvm_debug("fail to enable perf event\n");
+               }
+       }
+}
+
+/**
+ * kvm_pmu_disable_counter - disable selected PMU counter
+ * @vcpu: The vcpu pointer
+ * @val: the value guest writes to PMCNTENCLR register
+ *
+ * Call perf_event_disable to stop counting the perf event
+ */
+void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val)
+{
+       int i;
+       struct kvm_pmu *pmu = &vcpu->arch.pmu;
+       struct kvm_pmc *pmc;
+
+       if (!val)
+               return;
+
+       for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
+               if (!(val & BIT(i)))
+                       continue;
+
+               pmc = &pmu->pmc[i];
+               if (pmc->perf_event)
+                       perf_event_disable(pmc->perf_event);
+       }
+}