KVM: x86: Extend KVM_SET_VCPU_EVENTS with selective updates
authorJan Kiszka <jan.kiszka@web.de>
Sun, 6 Dec 2009 17:24:15 +0000 (18:24 +0100)
committerMarcelo Tosatti <mtosatti@redhat.com>
Sun, 27 Dec 2009 15:36:33 +0000 (13:36 -0200)
User space may not want to overwrite asynchronously changing VCPU event
states on write-back. So allow to skip nmi.pending and sipi_vector by
setting corresponding bits in the flags field of kvm_vcpu_events.

[avi: advertise the bits in KVM_GET_VCPU_EVENTS]

Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
Documentation/kvm/api.txt
arch/x86/include/asm/kvm.h
arch/x86/kvm/x86.c

index e1a114161027d6598234959e2605af08018f6fc6..2811e452f7566f5f5fe4fa6242f8724a9970ba11 100644 (file)
@@ -685,7 +685,7 @@ struct kvm_vcpu_events {
                __u8 pad;
        } nmi;
        __u32 sipi_vector;
-       __u32 flags;   /* must be zero */
+       __u32 flags;
 };
 
 4.30 KVM_SET_VCPU_EVENTS
@@ -701,6 +701,14 @@ vcpu.
 
 See KVM_GET_VCPU_EVENTS for the data structure.
 
+Fields that may be modified asynchronously by running VCPUs can be excluded
+from the update. These fields are nmi.pending and sipi_vector. Keep the
+corresponding bits in the flags field cleared to suppress overwriting the
+current in-kernel state. The bits are:
+
+KVM_VCPUEVENT_VALID_NMI_PENDING - transfer nmi.pending to the kernel
+KVM_VCPUEVENT_VALID_SIPI_VECTOR - transfer sipi_vector
+
 
 5. The kvm_run structure
 
index 950df434763f5b2ea3fa407d3511f0020e9cb67b..f46b79f6c16c873a337013fa8a18e18f85c4ee22 100644 (file)
@@ -254,6 +254,10 @@ struct kvm_reinject_control {
        __u8 reserved[31];
 };
 
+/* When set in flags, include corresponding fields on KVM_SET_VCPU_EVENTS */
+#define KVM_VCPUEVENT_VALID_NMI_PENDING        0x00000001
+#define KVM_VCPUEVENT_VALID_SIPI_VECTOR        0x00000002
+
 /* for KVM_GET/SET_VCPU_EVENTS */
 struct kvm_vcpu_events {
        struct {
index 9d068966fb2aa491f00152e22ea047e60984f683..6651dbf58675ee7ec8d9e7f3e145dce09953e054 100644 (file)
@@ -1913,7 +1913,8 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
 
        events->sipi_vector = vcpu->arch.sipi_vector;
 
-       events->flags = 0;
+       events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
+                        | KVM_VCPUEVENT_VALID_SIPI_VECTOR);
 
        vcpu_put(vcpu);
 }
@@ -1921,7 +1922,8 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
 static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
                                              struct kvm_vcpu_events *events)
 {
-       if (events->flags)
+       if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING
+                             | KVM_VCPUEVENT_VALID_SIPI_VECTOR))
                return -EINVAL;
 
        vcpu_load(vcpu);
@@ -1938,10 +1940,12 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
                kvm_pic_clear_isr_ack(vcpu->kvm);
 
        vcpu->arch.nmi_injected = events->nmi.injected;
-       vcpu->arch.nmi_pending = events->nmi.pending;
+       if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING)
+               vcpu->arch.nmi_pending = events->nmi.pending;
        kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked);
 
-       vcpu->arch.sipi_vector = events->sipi_vector;
+       if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR)
+               vcpu->arch.sipi_vector = events->sipi_vector;
 
        vcpu_put(vcpu);