KVM: PPC: booke: Paravirtualize wrtee
authorScott Wood <scottwood@freescale.com>
Wed, 9 Nov 2011 00:23:28 +0000 (18:23 -0600)
committerAvi Kivity <avi@redhat.com>
Mon, 5 Mar 2012 12:52:26 +0000 (14:52 +0200)
Also fix wrteei 1 paravirt to check for a pending interrupt.

Signed-off-by: Scott Wood <scottwood@freescale.com>
Signed-off-by: Alexander Graf <agraf@suse.de>
Signed-off-by: Avi Kivity <avi@redhat.com>
arch/powerpc/kernel/kvm.c
arch/powerpc/kernel/kvm_emul.S

index 2985338d0e10164e3b77ab12fcc3bb755a1a6fb3..06b15ee997f7409b9fabf1b85fa028c4f5b3574d 100644 (file)
@@ -1,5 +1,6 @@
 /*
  * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved.
+ * Copyright 2010-2011 Freescale Semiconductor, Inc.
  *
  * Authors:
  *     Alexander Graf <agraf@suse.de>
@@ -29,6 +30,7 @@
 #include <asm/sections.h>
 #include <asm/cacheflush.h>
 #include <asm/disassemble.h>
+#include <asm/ppc-opcode.h>
 
 #define KVM_MAGIC_PAGE         (-4096L)
 #define magic_var(x) KVM_MAGIC_PAGE + offsetof(struct kvm_vcpu_arch_shared, x)
@@ -41,6 +43,7 @@
 #define KVM_INST_B             0x48000000
 #define KVM_INST_B_MASK                0x03ffffff
 #define KVM_INST_B_MAX         0x01ffffff
+#define KVM_INST_LI            0x38000000
 
 #define KVM_MASK_RT            0x03e00000
 #define KVM_RT_30              0x03c00000
@@ -69,6 +72,7 @@
 #define KVM_INST_MTMSRD_L1     0x7c010164
 #define KVM_INST_MTMSR         0x7c000124
 
+#define KVM_INST_WRTEE         0x7c000106
 #define KVM_INST_WRTEEI_0      0x7c000146
 #define KVM_INST_WRTEEI_1      0x7c008146
 
@@ -270,26 +274,27 @@ static void kvm_patch_ins_mtmsr(u32 *inst, u32 rt)
 
 #ifdef CONFIG_BOOKE
 
-extern u32 kvm_emulate_wrteei_branch_offs;
-extern u32 kvm_emulate_wrteei_ee_offs;
-extern u32 kvm_emulate_wrteei_len;
-extern u32 kvm_emulate_wrteei[];
+extern u32 kvm_emulate_wrtee_branch_offs;
+extern u32 kvm_emulate_wrtee_reg_offs;
+extern u32 kvm_emulate_wrtee_orig_ins_offs;
+extern u32 kvm_emulate_wrtee_len;
+extern u32 kvm_emulate_wrtee[];
 
-static void kvm_patch_ins_wrteei(u32 *inst)
+static void kvm_patch_ins_wrtee(u32 *inst, u32 rt, int imm_one)
 {
        u32 *p;
        int distance_start;
        int distance_end;
        ulong next_inst;
 
-       p = kvm_alloc(kvm_emulate_wrteei_len * 4);
+       p = kvm_alloc(kvm_emulate_wrtee_len * 4);
        if (!p)
                return;
 
        /* Find out where we are and put everything there */
        distance_start = (ulong)p - (ulong)inst;
        next_inst = ((ulong)inst + 4);
-       distance_end = next_inst - (ulong)&p[kvm_emulate_wrteei_branch_offs];
+       distance_end = next_inst - (ulong)&p[kvm_emulate_wrtee_branch_offs];
 
        /* Make sure we only write valid b instructions */
        if (distance_start > KVM_INST_B_MAX) {
@@ -298,10 +303,65 @@ static void kvm_patch_ins_wrteei(u32 *inst)
        }
 
        /* Modify the chunk to fit the invocation */
-       memcpy(p, kvm_emulate_wrteei, kvm_emulate_wrteei_len * 4);
-       p[kvm_emulate_wrteei_branch_offs] |= distance_end & KVM_INST_B_MASK;
-       p[kvm_emulate_wrteei_ee_offs] |= (*inst & MSR_EE);
-       flush_icache_range((ulong)p, (ulong)p + kvm_emulate_wrteei_len * 4);
+       memcpy(p, kvm_emulate_wrtee, kvm_emulate_wrtee_len * 4);
+       p[kvm_emulate_wrtee_branch_offs] |= distance_end & KVM_INST_B_MASK;
+
+       if (imm_one) {
+               p[kvm_emulate_wrtee_reg_offs] =
+                       KVM_INST_LI | __PPC_RT(30) | MSR_EE;
+       } else {
+               /* Make clobbered registers work too */
+               switch (get_rt(rt)) {
+               case 30:
+                       kvm_patch_ins_ll(&p[kvm_emulate_wrtee_reg_offs],
+                                        magic_var(scratch2), KVM_RT_30);
+                       break;
+               case 31:
+                       kvm_patch_ins_ll(&p[kvm_emulate_wrtee_reg_offs],
+                                        magic_var(scratch1), KVM_RT_30);
+                       break;
+               default:
+                       p[kvm_emulate_wrtee_reg_offs] |= rt;
+                       break;
+               }
+       }
+
+       p[kvm_emulate_wrtee_orig_ins_offs] = *inst;
+       flush_icache_range((ulong)p, (ulong)p + kvm_emulate_wrtee_len * 4);
+
+       /* Patch the invocation */
+       kvm_patch_ins_b(inst, distance_start);
+}
+
+extern u32 kvm_emulate_wrteei_0_branch_offs;
+extern u32 kvm_emulate_wrteei_0_len;
+extern u32 kvm_emulate_wrteei_0[];
+
+static void kvm_patch_ins_wrteei_0(u32 *inst)
+{
+       u32 *p;
+       int distance_start;
+       int distance_end;
+       ulong next_inst;
+
+       p = kvm_alloc(kvm_emulate_wrteei_0_len * 4);
+       if (!p)
+               return;
+
+       /* Find out where we are and put everything there */
+       distance_start = (ulong)p - (ulong)inst;
+       next_inst = ((ulong)inst + 4);
+       distance_end = next_inst - (ulong)&p[kvm_emulate_wrteei_0_branch_offs];
+
+       /* Make sure we only write valid b instructions */
+       if (distance_start > KVM_INST_B_MAX) {
+               kvm_patching_worked = false;
+               return;
+       }
+
+       memcpy(p, kvm_emulate_wrteei_0, kvm_emulate_wrteei_0_len * 4);
+       p[kvm_emulate_wrteei_0_branch_offs] |= distance_end & KVM_INST_B_MASK;
+       flush_icache_range((ulong)p, (ulong)p + kvm_emulate_wrteei_0_len * 4);
 
        /* Patch the invocation */
        kvm_patch_ins_b(inst, distance_start);
@@ -444,6 +504,11 @@ static void kvm_check_ins(u32 *inst, u32 features)
        case KVM_INST_MTMSRD_L0:
                kvm_patch_ins_mtmsr(inst, inst_rt);
                break;
+#ifdef CONFIG_BOOKE
+       case KVM_INST_WRTEE:
+               kvm_patch_ins_wrtee(inst, inst_rt, 0);
+               break;
+#endif
        }
 
        switch (inst_no_rt & ~KVM_MASK_RB) {
@@ -461,8 +526,11 @@ static void kvm_check_ins(u32 *inst, u32 features)
        switch (_inst) {
 #ifdef CONFIG_BOOKE
        case KVM_INST_WRTEEI_0:
+               kvm_patch_ins_wrteei_0(inst);
+               break;
+
        case KVM_INST_WRTEEI_1:
-               kvm_patch_ins_wrteei(inst);
+               kvm_patch_ins_wrtee(inst, 0, 1);
                break;
 #endif
        }
index 3d64c5704fd5325c58653e9b3325f329d5e7cb97..801058dd74dbc8ab887fbe5a66b56ccc2008ed3a 100644 (file)
@@ -13,6 +13,7 @@
  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
  *
  * Copyright SUSE Linux Products GmbH 2010
+ * Copyright 2010-2011 Freescale Semiconductor, Inc.
  *
  * Authors: Alexander Graf <agraf@suse.de>
  */
@@ -208,24 +209,80 @@ kvm_emulate_mtmsr_orig_ins_offs:
 kvm_emulate_mtmsr_len:
        .long (kvm_emulate_mtmsr_end - kvm_emulate_mtmsr) / 4
 
+/* also used for wrteei 1 */
+.global kvm_emulate_wrtee
+kvm_emulate_wrtee:
 
+       SCRATCH_SAVE
+
+       /* Fetch old MSR in r31 */
+       LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
 
-.global kvm_emulate_wrteei
-kvm_emulate_wrteei:
+       /* Insert new MSR[EE] */
+kvm_emulate_wrtee_reg:
+       ori     r30, r0, 0
+       rlwimi  r31, r30, 0, MSR_EE
+
+       /*
+        * If MSR[EE] is now set, check for a pending interrupt.
+        * We could skip this if MSR[EE] was already on, but that
+        * should be rare, so don't bother.
+        */
+       andi.   r30, r30, MSR_EE
 
+       /* Put MSR into magic page because we don't call wrtee */
+       STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
+
+       beq     no_wrtee
+
+       /* Check if we have to fetch an interrupt */
+       lwz     r30, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0)
+       cmpwi   r30, 0
+       bne     do_wrtee
+
+no_wrtee:
+       SCRATCH_RESTORE
+
+       /* Go back to caller */
+kvm_emulate_wrtee_branch:
+       b       .
+
+do_wrtee:
+       SCRATCH_RESTORE
+
+       /* Just fire off the wrtee if it's critical */
+kvm_emulate_wrtee_orig_ins:
+       wrtee   r0
+
+       b       kvm_emulate_wrtee_branch
+
+kvm_emulate_wrtee_end:
+
+.global kvm_emulate_wrtee_branch_offs
+kvm_emulate_wrtee_branch_offs:
+       .long (kvm_emulate_wrtee_branch - kvm_emulate_wrtee) / 4
+
+.global kvm_emulate_wrtee_reg_offs
+kvm_emulate_wrtee_reg_offs:
+       .long (kvm_emulate_wrtee_reg - kvm_emulate_wrtee) / 4
+
+.global kvm_emulate_wrtee_orig_ins_offs
+kvm_emulate_wrtee_orig_ins_offs:
+       .long (kvm_emulate_wrtee_orig_ins - kvm_emulate_wrtee) / 4
+
+.global kvm_emulate_wrtee_len
+kvm_emulate_wrtee_len:
+       .long (kvm_emulate_wrtee_end - kvm_emulate_wrtee) / 4
+
+.global kvm_emulate_wrteei_0
+kvm_emulate_wrteei_0:
        SCRATCH_SAVE
 
        /* Fetch old MSR in r31 */
        LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
 
        /* Remove MSR_EE from old MSR */
-       li      r30, 0
-       ori     r30, r30, MSR_EE
-       andc    r31, r31, r30
-
-       /* OR new MSR_EE onto the old MSR */
-kvm_emulate_wrteei_ee:
-       ori     r31, r31, 0
+       rlwinm  r31, r31, 0, ~MSR_EE
 
        /* Write new MSR value back */
        STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
@@ -233,22 +290,17 @@ kvm_emulate_wrteei_ee:
        SCRATCH_RESTORE
 
        /* Go back to caller */
-kvm_emulate_wrteei_branch:
+kvm_emulate_wrteei_0_branch:
        b       .
-kvm_emulate_wrteei_end:
-
-.global kvm_emulate_wrteei_branch_offs
-kvm_emulate_wrteei_branch_offs:
-       .long (kvm_emulate_wrteei_branch - kvm_emulate_wrteei) / 4
-
-.global kvm_emulate_wrteei_ee_offs
-kvm_emulate_wrteei_ee_offs:
-       .long (kvm_emulate_wrteei_ee - kvm_emulate_wrteei) / 4
+kvm_emulate_wrteei_0_end:
 
-.global kvm_emulate_wrteei_len
-kvm_emulate_wrteei_len:
-       .long (kvm_emulate_wrteei_end - kvm_emulate_wrteei) / 4
+.global kvm_emulate_wrteei_0_branch_offs
+kvm_emulate_wrteei_0_branch_offs:
+       .long (kvm_emulate_wrteei_0_branch - kvm_emulate_wrteei_0) / 4
 
+.global kvm_emulate_wrteei_0_len
+kvm_emulate_wrteei_0_len:
+       .long (kvm_emulate_wrteei_0_end - kvm_emulate_wrteei_0) / 4
 
 .global kvm_emulate_mtsrin
 kvm_emulate_mtsrin: