KVM: PPC: Improve split mode
authorAlexander Graf <agraf@suse.de>
Tue, 20 Apr 2010 00:49:48 +0000 (02:49 +0200)
committerAvi Kivity <avi@redhat.com>
Mon, 17 May 2010 09:18:58 +0000 (12:18 +0300)
When in split mode, instruction relocation and data relocation are not equal.

So far we implemented this mode by reserving a special pseudo-VSID for the
two cases and flushing all PTEs when going into split mode, which is slow.

Unfortunately 32bit Linux and Mac OS X use split mode extensively. So to not
slow down things too much, I came up with a different idea: Mark the split
mode with a bit in the VSID and then treat it like any other segment.

This means we can just flush the shadow segment cache, but keep the PTEs
intact. I verified that this works with ppc32 Linux and Mac OS X 10.4
guests and does speed them up.

Signed-off-by: Alexander Graf <agraf@suse.de>
Signed-off-by: Avi Kivity <avi@redhat.com>
arch/powerpc/include/asm/kvm_book3s.h
arch/powerpc/kvm/book3s.c
arch/powerpc/kvm/book3s_32_mmu.c
arch/powerpc/kvm/book3s_64_mmu.c

index 5d3bd0cc411691edb819529c68a3ec7487446155..6f74d93725a0f3327e4d3c6088828113b62079f4 100644 (file)
@@ -100,11 +100,10 @@ struct kvmppc_vcpu_book3s {
 #define CONTEXT_GUEST          1
 #define CONTEXT_GUEST_END      2
 
-#define VSID_REAL_DR   0x7ffffffffff00000ULL
-#define VSID_REAL_IR   0x7fffffffffe00000ULL
-#define VSID_SPLIT_MASK        0x7fffffffffe00000ULL
-#define VSID_REAL      0x7fffffffffc00000ULL
-#define VSID_BAT       0x7fffffffffb00000ULL
+#define VSID_REAL      0x1fffffffffc00000ULL
+#define VSID_BAT       0x1fffffffffb00000ULL
+#define VSID_REAL_DR   0x2000000000000000ULL
+#define VSID_REAL_IR   0x4000000000000000ULL
 #define VSID_PR                0x8000000000000000ULL
 
 extern void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong ea, ulong ea_mask);
index f66de7e518f7b8ec53b1b040f39a51b2fc835cf8..397701d39ae7571ab04b903191aa800bcd26fbd6 100644 (file)
@@ -148,16 +148,8 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
                }
        }
 
-       if (((vcpu->arch.msr & (MSR_IR|MSR_DR)) != (old_msr & (MSR_IR|MSR_DR))) ||
-           (vcpu->arch.msr & MSR_PR) != (old_msr & MSR_PR)) {
-               bool dr = (vcpu->arch.msr & MSR_DR) ? true : false;
-               bool ir = (vcpu->arch.msr & MSR_IR) ? true : false;
-
-               /* Flush split mode PTEs */
-               if (dr != ir)
-                       kvmppc_mmu_pte_vflush(vcpu, VSID_SPLIT_MASK,
-                                             VSID_SPLIT_MASK);
-
+       if ((vcpu->arch.msr & (MSR_PR|MSR_IR|MSR_DR)) !=
+                  (old_msr & (MSR_PR|MSR_IR|MSR_DR))) {
                kvmppc_mmu_flush_segments(vcpu);
                kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
        }
@@ -535,6 +527,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
        bool is_mmio = false;
        bool dr = (vcpu->arch.msr & MSR_DR) ? true : false;
        bool ir = (vcpu->arch.msr & MSR_IR) ? true : false;
+       u64 vsid;
 
        relocated = data ? dr : ir;
 
@@ -552,13 +545,20 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
 
        switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
        case 0:
-               pte.vpage |= VSID_REAL;
+               pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12));
                break;
        case MSR_DR:
-               pte.vpage |= VSID_REAL_DR;
-               break;
        case MSR_IR:
-               pte.vpage |= VSID_REAL_IR;
+               vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
+
+               if ((vcpu->arch.msr & (MSR_DR|MSR_IR)) == MSR_DR)
+                       pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12));
+               else
+                       pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12));
+               pte.vpage |= vsid;
+
+               if (vsid == -1)
+                       page_found = -EINVAL;
                break;
        }
 
index 33186b745c90647b5c82716803c15cd70ca28e79..0b10503c8a4aac4dfce46c7bf50c4fa017aa0db3 100644 (file)
@@ -330,30 +330,35 @@ static void kvmppc_mmu_book3s_32_tlbie(struct kvm_vcpu *vcpu, ulong ea, bool lar
 static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
                                             u64 *vsid)
 {
+       ulong ea = esid << SID_SHIFT;
+       struct kvmppc_sr *sr;
+       u64 gvsid = esid;
+
+       if (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
+               sr = find_sr(to_book3s(vcpu), ea);
+               if (sr->valid)
+                       gvsid = sr->vsid;
+       }
+
        /* In case we only have one of MSR_IR or MSR_DR set, let's put
           that in the real-mode context (and hope RM doesn't access
           high memory) */
        switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
        case 0:
-               *vsid = (VSID_REAL >> 16) | esid;
+               *vsid = VSID_REAL | esid;
                break;
        case MSR_IR:
-               *vsid = (VSID_REAL_IR >> 16) | esid;
+               *vsid = VSID_REAL_IR | gvsid;
                break;
        case MSR_DR:
-               *vsid = (VSID_REAL_DR >> 16) | esid;
+               *vsid = VSID_REAL_DR | gvsid;
                break;
        case MSR_DR|MSR_IR:
-       {
-               ulong ea = esid << SID_SHIFT;
-               struct kvmppc_sr *sr = find_sr(to_book3s(vcpu), ea);
-
                if (!sr->valid)
                        return -1;
 
                *vsid = sr->vsid;
                break;
-       }
        default:
                BUG();
        }
index a9241e90a68b6d2e89257d8045ff86b7826ab803..612de6e4d74b5083c530d8506af1f9377c87848d 100644 (file)
@@ -442,29 +442,32 @@ static void kvmppc_mmu_book3s_64_tlbie(struct kvm_vcpu *vcpu, ulong va,
 static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
                                             u64 *vsid)
 {
+       ulong ea = esid << SID_SHIFT;
+       struct kvmppc_slb *slb;
+       u64 gvsid = esid;
+
+       if (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
+               slb = kvmppc_mmu_book3s_64_find_slbe(to_book3s(vcpu), ea);
+               if (slb)
+                       gvsid = slb->vsid;
+       }
+
        switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
        case 0:
-               *vsid = (VSID_REAL >> 16) | esid;
+               *vsid = VSID_REAL | esid;
                break;
        case MSR_IR:
-               *vsid = (VSID_REAL_IR >> 16) | esid;
+               *vsid = VSID_REAL_IR | gvsid;
                break;
        case MSR_DR:
-               *vsid = (VSID_REAL_DR >> 16) | esid;
+               *vsid = VSID_REAL_DR | gvsid;
                break;
        case MSR_DR|MSR_IR:
-       {
-               ulong ea;
-               struct kvmppc_slb *slb;
-               ea = esid << SID_SHIFT;
-               slb = kvmppc_mmu_book3s_64_find_slbe(to_book3s(vcpu), ea);
-               if (slb)
-                       *vsid = slb->vsid;
-               else
+               if (!slb)
                        return -ENOENT;
 
+               *vsid = gvsid;
                break;
-       }
        default:
                BUG();
                break;