#define KVM_REG_MIPS_LO (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 33)
#define KVM_REG_MIPS_PC (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 34)
+/* KVM specific control registers */
+
+/*
+ * CP0_Count control
+ * DC: Set 0: Master disable CP0_Count and set COUNT_RESUME to now
+ * Set 1: Master re-enable CP0_Count with unchanged bias, handling timer
+ * interrupts since COUNT_RESUME
+ * This can be used to freeze the timer to get a consistent snapshot of
+ * the CP0_Count and timer interrupt pending state, while also resuming
+ * safely without losing time or guest timer interrupts.
+ * Other: Reserved, do not change.
+ */
+#define KVM_REG_MIPS_COUNT_CTL (KVM_REG_MIPS | KVM_REG_SIZE_U64 | \
+ 0x20000 | 0)
+#define KVM_REG_MIPS_COUNT_CTL_DC 0x00000001
+
+/*
+ * CP0_Count resume monotonic nanoseconds
+ * The monotonic nanosecond time of the last set of COUNT_CTL.DC (master
+ * disable). Any reads and writes of Count related registers while
+ * COUNT_CTL.DC=1 will appear to occur at this time. When COUNT_CTL.DC is
+ * cleared again (master enable) any timer interrupts since this time will be
+ * emulated.
+ * Modifications to times in the future are rejected.
+ */
+#define KVM_REG_MIPS_COUNT_RESUME (KVM_REG_MIPS | KVM_REG_SIZE_U64 | \
+ 0x20000 | 1)
+
/*
* KVM MIPS specific structures and definitions
*
KVM_REG_MIPS_CP0_CONFIG2,
KVM_REG_MIPS_CP0_CONFIG3,
KVM_REG_MIPS_CP0_CONFIG7,
- KVM_REG_MIPS_CP0_ERROREPC
+ KVM_REG_MIPS_CP0_ERROREPC,
+
+ KVM_REG_MIPS_COUNT_CTL,
+ KVM_REG_MIPS_COUNT_RESUME,
};
static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
break;
/* registers to be handled specially */
case KVM_REG_MIPS_CP0_COUNT:
+ case KVM_REG_MIPS_COUNT_CTL:
+ case KVM_REG_MIPS_COUNT_RESUME:
ret = kvm_mips_callbacks->get_one_reg(vcpu, reg, &v);
if (ret)
return ret;
case KVM_REG_MIPS_CP0_COUNT:
case KVM_REG_MIPS_CP0_COMPARE:
case KVM_REG_MIPS_CP0_CAUSE:
+ case KVM_REG_MIPS_COUNT_CTL:
+ case KVM_REG_MIPS_COUNT_RESUME:
return kvm_mips_callbacks->set_one_reg(vcpu, reg, v);
default:
return -EINVAL;
* kvm_mips_count_disabled() - Find whether the CP0_Count timer is disabled.
* @vcpu: Virtual CPU.
*
- * Returns: 1 if the CP0_Count timer is disabled by the guest CP0_Cause.DC
- * bit.
+ * Returns: 1 if the CP0_Count timer is disabled by either the guest
+ * CP0_Cause.DC bit or the count_ctl.DC bit.
* 0 otherwise (in which case CP0_Count timer is running).
*/
static inline int kvm_mips_count_disabled(struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
- return kvm_read_c0_guest_cause(cop0) & CAUSEF_DC;
+ return (vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) ||
+ (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC);
}
/**
return div_u64(delta * vcpu->arch.count_hz, NSEC_PER_SEC);
}
+/**
+ * kvm_mips_count_time() - Get effective current time.
+ * @vcpu: Virtual CPU.
+ *
+ * Get effective monotonic ktime. This is usually a straightforward ktime_get(),
+ * except when the master disable bit is set in count_ctl, in which case it is
+ * count_resume, i.e. the time that the count was disabled.
+ *
+ * Returns: Effective monotonic ktime for CP0_Count.
+ */
+static inline ktime_t kvm_mips_count_time(struct kvm_vcpu *vcpu)
+{
+ if (unlikely(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
+ return vcpu->arch.count_resume;
+
+ return ktime_get();
+}
+
/**
* kvm_mips_read_count_running() - Read the current count value as if running.
* @vcpu: Virtual CPU.
ktime_t now;
/* Calculate bias */
- now = ktime_get();
+ now = kvm_mips_count_time(vcpu);
vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now);
if (kvm_mips_count_disabled(vcpu))
* Disable the CP0_Count timer. A timer interrupt on or before the final stop
* time will be handled but not after.
*
- * Assumes CP0_Count was previously enabled but now Guest.CP0_Cause.DC has been
- * set (count disabled).
+ * Assumes CP0_Count was previously enabled but now Guest.CP0_Cause.DC or
+ * count_ctl.DC has been set (count disabled).
*
* Returns: The time that the timer was stopped.
*/
* @vcpu: Virtual CPU.
*
* Disable the CP0_Count timer and set CP0_Cause.DC. A timer interrupt on or
- * before the final stop time will be handled, but not after.
+ * before the final stop time will be handled if the timer isn't disabled by
+ * count_ctl.DC, but not after.
*
* Assumes CP0_Cause.DC is clear (count enabled).
*/
struct mips_coproc *cop0 = vcpu->arch.cop0;
kvm_set_c0_guest_cause(cop0, CAUSEF_DC);
- kvm_mips_count_disable(vcpu);
+ if (!(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
+ kvm_mips_count_disable(vcpu);
}
/**
* @vcpu: Virtual CPU.
*
* Enable the CP0_Count timer and clear CP0_Cause.DC. A timer interrupt after
- * the start time will be handled, potentially before even returning, so the
- * caller should be careful with ordering of CP0_Cause modifications so as not
- * to lose it.
+ * the start time will be handled if the timer isn't disabled by count_ctl.DC,
+ * potentially before even returning, so the caller should be careful with
+ * ordering of CP0_Cause modifications so as not to lose it.
*
* Assumes CP0_Cause.DC is set (count disabled).
*/
/*
* Set the dynamic count to match the static count.
- * This starts the hrtimer.
+ * This starts the hrtimer if count_ctl.DC allows it.
+ * Otherwise it conveniently updates the biases.
*/
count = kvm_read_c0_guest_count(cop0);
kvm_mips_write_count(vcpu, count);
}
+/**
+ * kvm_mips_set_count_ctl() - Update the count control KVM register.
+ * @vcpu: Virtual CPU.
+ * @count_ctl: Count control register new value.
+ *
+ * Set the count control KVM register. The timer is updated accordingly.
+ *
+ * Returns: -EINVAL if reserved bits are set.
+ * 0 on success.
+ */
+int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ s64 changed = count_ctl ^ vcpu->arch.count_ctl;
+ s64 delta;
+ ktime_t expire, now;
+ uint32_t count, compare;
+
+ /* Only allow defined bits to be changed */
+ if (changed & ~(s64)(KVM_REG_MIPS_COUNT_CTL_DC))
+ return -EINVAL;
+
+ /* Apply new value */
+ vcpu->arch.count_ctl = count_ctl;
+
+ /* Master CP0_Count disable */
+ if (changed & KVM_REG_MIPS_COUNT_CTL_DC) {
+ /* Is CP0_Cause.DC already disabling CP0_Count? */
+ if (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC) {
+ if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)
+ /* Just record the current time */
+ vcpu->arch.count_resume = ktime_get();
+ } else if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) {
+ /* disable timer and record current time */
+ vcpu->arch.count_resume = kvm_mips_count_disable(vcpu);
+ } else {
+ /*
+ * Calculate timeout relative to static count at resume
+ * time (wrap 0 to 2^32).
+ */
+ count = kvm_read_c0_guest_count(cop0);
+ compare = kvm_read_c0_guest_compare(cop0);
+ delta = (u64)(uint32_t)(compare - count - 1) + 1;
+ delta = div_u64(delta * NSEC_PER_SEC,
+ vcpu->arch.count_hz);
+ expire = ktime_add_ns(vcpu->arch.count_resume, delta);
+
+ /* Handle pending interrupt */
+ now = ktime_get();
+ if (ktime_compare(now, expire) >= 0)
+ /* Nothing should be waiting on the timeout */
+ kvm_mips_callbacks->queue_timer_int(vcpu);
+
+ /* Resume hrtimer without changing bias */
+ count = kvm_mips_read_count_running(vcpu, now);
+ kvm_mips_resume_hrtimer(vcpu, now, count);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * kvm_mips_set_count_resume() - Update the count resume KVM register.
+ * @vcpu: Virtual CPU.
+ * @count_resume: Count resume register new value.
+ *
+ * Set the count resume KVM register.
+ *
+ * Returns: -EINVAL if out of valid range (0..now).
+ * 0 on success.
+ */
+int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume)
+{
+ /*
+ * It doesn't make sense for the resume time to be in the future, as it
+ * would be possible for the next interrupt to be more than a full
+ * period in the future.
+ */
+ if (count_resume < 0 || count_resume > ktime_to_ns(ktime_get()))
+ return -EINVAL;
+
+ vcpu->arch.count_resume = ns_to_ktime(count_resume);
+ return 0;
+}
+
/**
* kvm_mips_count_timeout() - Push timer forward on timeout.
* @vcpu: Virtual CPU.