kvm: x86: Add a hook for arch specific dirty logging emulation
authorBandan Das <bsd@redhat.com>
Fri, 5 May 2017 19:25:13 +0000 (15:25 -0400)
committerPaolo Bonzini <pbonzini@redhat.com>
Tue, 9 May 2017 09:54:16 +0000 (11:54 +0200)
When KVM updates accessed/dirty bits, this hook can be used
to invoke an arch specific function that implements/emulates
dirty logging such as PML.

Signed-off-by: Bandan Das <bsd@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/mmu.c
arch/x86/kvm/mmu.h
arch/x86/kvm/paging_tmpl.h

index f5bddf92faba81675594db3c6ead4d1baf94f68f..9c761fea0c982e6dfd6acb949d7c8f738a50fd67 100644 (file)
@@ -1020,6 +1020,8 @@ struct kvm_x86_ops {
        void (*enable_log_dirty_pt_masked)(struct kvm *kvm,
                                           struct kvm_memory_slot *slot,
                                           gfn_t offset, unsigned long mask);
+       int (*write_log_dirty)(struct kvm_vcpu *vcpu);
+
        /* pmu operations of sub-arch */
        const struct kvm_pmu_ops *pmu_ops;
 
index 558676538fca3c213d9e360f39e03ef15e2b1d47..5d3376f677949067f39c5e29fca2f3aceb7880bc 100644 (file)
@@ -1498,6 +1498,21 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
                kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
 }
 
+/**
+ * kvm_arch_write_log_dirty - emulate dirty page logging
+ * @vcpu: Guest mode vcpu
+ *
+ * Emulate arch specific page modification logging for the
+ * nested hypervisor
+ */
+int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu)
+{
+       if (kvm_x86_ops->write_log_dirty)
+               return kvm_x86_ops->write_log_dirty(vcpu);
+
+       return 0;
+}
+
 bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
                                    struct kvm_memory_slot *slot, u64 gfn)
 {
index d8ccb32f7308ab3e2d1955b6dbd11203716f03a2..27975807cc64fcae7bccbb53eecab35c3f0c64e2 100644 (file)
@@ -202,4 +202,5 @@ void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
 void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
 bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
                                    struct kvm_memory_slot *slot, u64 gfn);
+int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu);
 #endif
index 314d2071b3376e697163dc5a20ce2bbb11953ed0..56241746abbd71cae280621ec4b84d13b48bc81f 100644 (file)
@@ -226,6 +226,10 @@ static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
                if (level == walker->level && write_fault &&
                                !(pte & PT_GUEST_DIRTY_MASK)) {
                        trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte));
+#if PTTYPE == PTTYPE_EPT
+                       if (kvm_arch_write_log_dirty(vcpu))
+                               return -EINVAL;
+#endif
                        pte |= PT_GUEST_DIRTY_MASK;
                }
                if (pte == orig_pte)