MIPS: KVM: Rename files to remove the prefix "kvm_" and "kvm_mips_"
authorDeng-Cheng Zhu <dengcheng.zhu@imgtec.com>
Thu, 26 Jun 2014 19:11:38 +0000 (12:11 -0700)
committerPaolo Bonzini <pbonzini@redhat.com>
Mon, 30 Jun 2014 14:52:03 +0000 (16:52 +0200)
Since all the files are in arch/mips/kvm/, there's no need of the prefixes
"kvm_" and "kvm_mips_".

Reviewed-by: James Hogan <james.hogan@imgtec.com>
Signed-off-by: Deng-Cheng Zhu <dengcheng.zhu@imgtec.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
27 files changed:
arch/mips/kvm/Makefile
arch/mips/kvm/callback.c [new file with mode: 0644]
arch/mips/kvm/commpage.c [new file with mode: 0644]
arch/mips/kvm/commpage.h [new file with mode: 0644]
arch/mips/kvm/dyntrans.c [new file with mode: 0644]
arch/mips/kvm/emulate.c [new file with mode: 0644]
arch/mips/kvm/interrupt.c [new file with mode: 0644]
arch/mips/kvm/interrupt.h [new file with mode: 0644]
arch/mips/kvm/kvm_cb.c [deleted file]
arch/mips/kvm/kvm_locore.S [deleted file]
arch/mips/kvm/kvm_mips.c [deleted file]
arch/mips/kvm/kvm_mips_comm.h [deleted file]
arch/mips/kvm/kvm_mips_commpage.c [deleted file]
arch/mips/kvm/kvm_mips_dyntrans.c [deleted file]
arch/mips/kvm/kvm_mips_emul.c [deleted file]
arch/mips/kvm/kvm_mips_int.c [deleted file]
arch/mips/kvm/kvm_mips_int.h [deleted file]
arch/mips/kvm/kvm_mips_opcode.h [deleted file]
arch/mips/kvm/kvm_mips_stats.c [deleted file]
arch/mips/kvm/kvm_tlb.c [deleted file]
arch/mips/kvm/kvm_trap_emul.c [deleted file]
arch/mips/kvm/locore.S [new file with mode: 0644]
arch/mips/kvm/mips.c [new file with mode: 0644]
arch/mips/kvm/opcode.h [new file with mode: 0644]
arch/mips/kvm/stats.c [new file with mode: 0644]
arch/mips/kvm/tlb.c [new file with mode: 0644]
arch/mips/kvm/trap_emul.c [new file with mode: 0644]

index 78d87bbc99db747d4369f57c042e5b743443d934..401fe027c2612cf774fa53173420f93c072ea3c7 100644 (file)
@@ -5,9 +5,9 @@ common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o)
 
 EXTRA_CFLAGS += -Ivirt/kvm -Iarch/mips/kvm
 
-kvm-objs := $(common-objs) kvm_mips.o kvm_mips_emul.o kvm_locore.o \
-           kvm_mips_int.o kvm_mips_stats.o kvm_mips_commpage.o \
-           kvm_mips_dyntrans.o kvm_trap_emul.o
+kvm-objs := $(common-objs) mips.o emulate.o locore.o \
+           interrupt.o stats.o commpage.o \
+           dyntrans.o trap_emul.o
 
 obj-$(CONFIG_KVM)      += kvm.o
-obj-y                  += kvm_cb.o kvm_tlb.o
+obj-y                  += callback.o tlb.o
diff --git a/arch/mips/kvm/callback.c b/arch/mips/kvm/callback.c
new file mode 100644 (file)
index 0000000..313c2e3
--- /dev/null
@@ -0,0 +1,14 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+ * Authors: Yann Le Du <ledu@kymasys.com>
+ */
+
+#include <linux/export.h>
+#include <linux/kvm_host.h>
+
+struct kvm_mips_callbacks *kvm_mips_callbacks;
+EXPORT_SYMBOL(kvm_mips_callbacks);
diff --git a/arch/mips/kvm/commpage.c b/arch/mips/kvm/commpage.c
new file mode 100644 (file)
index 0000000..61b9c04
--- /dev/null
@@ -0,0 +1,36 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * commpage, currently used for Virtual COP0 registers.
+ * Mapped into the guest kernel @ 0x0.
+ *
+ * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+ */
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/bootmem.h>
+#include <asm/page.h>
+#include <asm/cacheflush.h>
+#include <asm/mmu_context.h>
+
+#include <linux/kvm_host.h>
+
+#include "commpage.h"
+
+void kvm_mips_commpage_init(struct kvm_vcpu *vcpu)
+{
+       struct kvm_mips_commpage *page = vcpu->arch.kseg0_commpage;
+
+       memset(page, 0, sizeof(struct kvm_mips_commpage));
+
+       /* Specific init values for fields */
+       vcpu->arch.cop0 = &page->cop0;
+       memset(vcpu->arch.cop0, 0, sizeof(struct mips_coproc));
+}
diff --git a/arch/mips/kvm/commpage.h b/arch/mips/kvm/commpage.h
new file mode 100644 (file)
index 0000000..08c5fa2
--- /dev/null
@@ -0,0 +1,24 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * KVM/MIPS: commpage: mapped into get kernel space
+ *
+ * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+ */
+
+#ifndef __KVM_MIPS_COMMPAGE_H__
+#define __KVM_MIPS_COMMPAGE_H__
+
+struct kvm_mips_commpage {
+       /* COP0 state is mapped into Guest kernel via commpage */
+       struct mips_coproc cop0;
+};
+
+#define KVM_MIPS_COMM_EIDI_OFFSET       0x0
+
+extern void kvm_mips_commpage_init(struct kvm_vcpu *vcpu);
+
+#endif /* __KVM_MIPS_COMMPAGE_H__ */
diff --git a/arch/mips/kvm/dyntrans.c b/arch/mips/kvm/dyntrans.c
new file mode 100644 (file)
index 0000000..521121b
--- /dev/null
@@ -0,0 +1,148 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * KVM/MIPS: Binary Patching for privileged instructions, reduces traps.
+ *
+ * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+ */
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/kvm_host.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/bootmem.h>
+#include <asm/cacheflush.h>
+
+#include "commpage.h"
+
+#define SYNCI_TEMPLATE  0x041f0000
+#define SYNCI_BASE(x)   (((x) >> 21) & 0x1f)
+#define SYNCI_OFFSET    ((x) & 0xffff)
+
+#define LW_TEMPLATE     0x8c000000
+#define CLEAR_TEMPLATE  0x00000020
+#define SW_TEMPLATE     0xac000000
+
+int kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc,
+                              struct kvm_vcpu *vcpu)
+{
+       int result = 0;
+       unsigned long kseg0_opc;
+       uint32_t synci_inst = 0x0;
+
+       /* Replace the CACHE instruction, with a NOP */
+       kseg0_opc =
+           CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
+                      (vcpu, (unsigned long) opc));
+       memcpy((void *)kseg0_opc, (void *)&synci_inst, sizeof(uint32_t));
+       local_flush_icache_range(kseg0_opc, kseg0_opc + 32);
+
+       return result;
+}
+
+/*
+ * Address based CACHE instructions are transformed into synci(s). A little
+ * heavy for just D-cache invalidates, but avoids an expensive trap
+ */
+int kvm_mips_trans_cache_va(uint32_t inst, uint32_t *opc,
+                           struct kvm_vcpu *vcpu)
+{
+       int result = 0;
+       unsigned long kseg0_opc;
+       uint32_t synci_inst = SYNCI_TEMPLATE, base, offset;
+
+       base = (inst >> 21) & 0x1f;
+       offset = inst & 0xffff;
+       synci_inst |= (base << 21);
+       synci_inst |= offset;
+
+       kseg0_opc =
+           CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
+                      (vcpu, (unsigned long) opc));
+       memcpy((void *)kseg0_opc, (void *)&synci_inst, sizeof(uint32_t));
+       local_flush_icache_range(kseg0_opc, kseg0_opc + 32);
+
+       return result;
+}
+
+int kvm_mips_trans_mfc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu)
+{
+       int32_t rt, rd, sel;
+       uint32_t mfc0_inst;
+       unsigned long kseg0_opc, flags;
+
+       rt = (inst >> 16) & 0x1f;
+       rd = (inst >> 11) & 0x1f;
+       sel = inst & 0x7;
+
+       if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) {
+               mfc0_inst = CLEAR_TEMPLATE;
+               mfc0_inst |= ((rt & 0x1f) << 16);
+       } else {
+               mfc0_inst = LW_TEMPLATE;
+               mfc0_inst |= ((rt & 0x1f) << 16);
+               mfc0_inst |=
+                   offsetof(struct mips_coproc,
+                            reg[rd][sel]) + offsetof(struct kvm_mips_commpage,
+                                                     cop0);
+       }
+
+       if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
+               kseg0_opc =
+                   CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
+                              (vcpu, (unsigned long) opc));
+               memcpy((void *)kseg0_opc, (void *)&mfc0_inst, sizeof(uint32_t));
+               local_flush_icache_range(kseg0_opc, kseg0_opc + 32);
+       } else if (KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
+               local_irq_save(flags);
+               memcpy((void *)opc, (void *)&mfc0_inst, sizeof(uint32_t));
+               local_flush_icache_range((unsigned long)opc,
+                                        (unsigned long)opc + 32);
+               local_irq_restore(flags);
+       } else {
+               kvm_err("%s: Invalid address: %p\n", __func__, opc);
+               return -EFAULT;
+       }
+
+       return 0;
+}
+
+int kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu)
+{
+       int32_t rt, rd, sel;
+       uint32_t mtc0_inst = SW_TEMPLATE;
+       unsigned long kseg0_opc, flags;
+
+       rt = (inst >> 16) & 0x1f;
+       rd = (inst >> 11) & 0x1f;
+       sel = inst & 0x7;
+
+       mtc0_inst |= ((rt & 0x1f) << 16);
+       mtc0_inst |=
+           offsetof(struct mips_coproc,
+                    reg[rd][sel]) + offsetof(struct kvm_mips_commpage, cop0);
+
+       if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
+               kseg0_opc =
+                   CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
+                              (vcpu, (unsigned long) opc));
+               memcpy((void *)kseg0_opc, (void *)&mtc0_inst, sizeof(uint32_t));
+               local_flush_icache_range(kseg0_opc, kseg0_opc + 32);
+       } else if (KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
+               local_irq_save(flags);
+               memcpy((void *)opc, (void *)&mtc0_inst, sizeof(uint32_t));
+               local_flush_icache_range((unsigned long)opc,
+                                        (unsigned long)opc + 32);
+               local_irq_restore(flags);
+       } else {
+               kvm_err("%s: Invalid address: %p\n", __func__, opc);
+               return -EFAULT;
+       }
+
+       return 0;
+}
diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
new file mode 100644 (file)
index 0000000..1a60688
--- /dev/null
@@ -0,0 +1,2324 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * KVM/MIPS: Instruction/Exception emulation
+ *
+ * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+ */
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/ktime.h>
+#include <linux/kvm_host.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/bootmem.h>
+#include <linux/random.h>
+#include <asm/page.h>
+#include <asm/cacheflush.h>
+#include <asm/cpu-info.h>
+#include <asm/mmu_context.h>
+#include <asm/tlbflush.h>
+#include <asm/inst.h>
+
+#undef CONFIG_MIPS_MT
+#include <asm/r4kcache.h>
+#define CONFIG_MIPS_MT
+
+#include "opcode.h"
+#include "interrupt.h"
+#include "commpage.h"
+
+#include "trace.h"
+
+/*
+ * Compute the return address and do emulate branch simulation, if required.
+ * This function should be called only in branch delay slot active.
+ */
+unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu,
+       unsigned long instpc)
+{
+       unsigned int dspcontrol;
+       union mips_instruction insn;
+       struct kvm_vcpu_arch *arch = &vcpu->arch;
+       long epc = instpc;
+       long nextpc = KVM_INVALID_INST;
+
+       if (epc & 3)
+               goto unaligned;
+
+       /* Read the instruction */
+       insn.word = kvm_get_inst((uint32_t *) epc, vcpu);
+
+       if (insn.word == KVM_INVALID_INST)
+               return KVM_INVALID_INST;
+
+       switch (insn.i_format.opcode) {
+               /* jr and jalr are in r_format format. */
+       case spec_op:
+               switch (insn.r_format.func) {
+               case jalr_op:
+                       arch->gprs[insn.r_format.rd] = epc + 8;
+                       /* Fall through */
+               case jr_op:
+                       nextpc = arch->gprs[insn.r_format.rs];
+                       break;
+               }
+               break;
+
+               /*
+                * This group contains:
+                * bltz_op, bgez_op, bltzl_op, bgezl_op,
+                * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
+                */
+       case bcond_op:
+               switch (insn.i_format.rt) {
+               case bltz_op:
+               case bltzl_op:
+                       if ((long)arch->gprs[insn.i_format.rs] < 0)
+                               epc = epc + 4 + (insn.i_format.simmediate << 2);
+                       else
+                               epc += 8;
+                       nextpc = epc;
+                       break;
+
+               case bgez_op:
+               case bgezl_op:
+                       if ((long)arch->gprs[insn.i_format.rs] >= 0)
+                               epc = epc + 4 + (insn.i_format.simmediate << 2);
+                       else
+                               epc += 8;
+                       nextpc = epc;
+                       break;
+
+               case bltzal_op:
+               case bltzall_op:
+                       arch->gprs[31] = epc + 8;
+                       if ((long)arch->gprs[insn.i_format.rs] < 0)
+                               epc = epc + 4 + (insn.i_format.simmediate << 2);
+                       else
+                               epc += 8;
+                       nextpc = epc;
+                       break;
+
+               case bgezal_op:
+               case bgezall_op:
+                       arch->gprs[31] = epc + 8;
+                       if ((long)arch->gprs[insn.i_format.rs] >= 0)
+                               epc = epc + 4 + (insn.i_format.simmediate << 2);
+                       else
+                               epc += 8;
+                       nextpc = epc;
+                       break;
+               case bposge32_op:
+                       if (!cpu_has_dsp)
+                               goto sigill;
+
+                       dspcontrol = rddsp(0x01);
+
+                       if (dspcontrol >= 32)
+                               epc = epc + 4 + (insn.i_format.simmediate << 2);
+                       else
+                               epc += 8;
+                       nextpc = epc;
+                       break;
+               }
+               break;
+
+               /* These are unconditional and in j_format. */
+       case jal_op:
+               arch->gprs[31] = instpc + 8;
+       case j_op:
+               epc += 4;
+               epc >>= 28;
+               epc <<= 28;
+               epc |= (insn.j_format.target << 2);
+               nextpc = epc;
+               break;
+
+               /* These are conditional and in i_format. */
+       case beq_op:
+       case beql_op:
+               if (arch->gprs[insn.i_format.rs] ==
+                   arch->gprs[insn.i_format.rt])
+                       epc = epc + 4 + (insn.i_format.simmediate << 2);
+               else
+                       epc += 8;
+               nextpc = epc;
+               break;
+
+       case bne_op:
+       case bnel_op:
+               if (arch->gprs[insn.i_format.rs] !=
+                   arch->gprs[insn.i_format.rt])
+                       epc = epc + 4 + (insn.i_format.simmediate << 2);
+               else
+                       epc += 8;
+               nextpc = epc;
+               break;
+
+       case blez_op:           /* not really i_format */
+       case blezl_op:
+               /* rt field assumed to be zero */
+               if ((long)arch->gprs[insn.i_format.rs] <= 0)
+                       epc = epc + 4 + (insn.i_format.simmediate << 2);
+               else
+                       epc += 8;
+               nextpc = epc;
+               break;
+
+       case bgtz_op:
+       case bgtzl_op:
+               /* rt field assumed to be zero */
+               if ((long)arch->gprs[insn.i_format.rs] > 0)
+                       epc = epc + 4 + (insn.i_format.simmediate << 2);
+               else
+                       epc += 8;
+               nextpc = epc;
+               break;
+
+               /* And now the FPA/cp1 branch instructions. */
+       case cop1_op:
+               kvm_err("%s: unsupported cop1_op\n", __func__);
+               break;
+       }
+
+       return nextpc;
+
+unaligned:
+       kvm_err("%s: unaligned epc\n", __func__);
+       return nextpc;
+
+sigill:
+       kvm_err("%s: DSP branch but not DSP ASE\n", __func__);
+       return nextpc;
+}
+
+enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause)
+{
+       unsigned long branch_pc;
+       enum emulation_result er = EMULATE_DONE;
+
+       if (cause & CAUSEF_BD) {
+               branch_pc = kvm_compute_return_epc(vcpu, vcpu->arch.pc);
+               if (branch_pc == KVM_INVALID_INST) {
+                       er = EMULATE_FAIL;
+               } else {
+                       vcpu->arch.pc = branch_pc;
+                       kvm_debug("BD update_pc(): New PC: %#lx\n",
+                                 vcpu->arch.pc);
+               }
+       } else
+               vcpu->arch.pc += 4;
+
+       kvm_debug("update_pc(): New PC: %#lx\n", vcpu->arch.pc);
+
+       return er;
+}
+
+/**
+ * kvm_mips_count_disabled() - Find whether the CP0_Count timer is disabled.
+ * @vcpu:      Virtual CPU.
+ *
+ * Returns:    1 if the CP0_Count timer is disabled by either the guest
+ *             CP0_Cause.DC bit or the count_ctl.DC bit.
+ *             0 otherwise (in which case CP0_Count timer is running).
+ */
+static inline int kvm_mips_count_disabled(struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+
+       return  (vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) ||
+               (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC);
+}
+
+/**
+ * kvm_mips_ktime_to_count() - Scale ktime_t to a 32-bit count.
+ *
+ * Caches the dynamic nanosecond bias in vcpu->arch.count_dyn_bias.
+ *
+ * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
+ */
+static uint32_t kvm_mips_ktime_to_count(struct kvm_vcpu *vcpu, ktime_t now)
+{
+       s64 now_ns, periods;
+       u64 delta;
+
+       now_ns = ktime_to_ns(now);
+       delta = now_ns + vcpu->arch.count_dyn_bias;
+
+       if (delta >= vcpu->arch.count_period) {
+               /* If delta is out of safe range the bias needs adjusting */
+               periods = div64_s64(now_ns, vcpu->arch.count_period);
+               vcpu->arch.count_dyn_bias = -periods * vcpu->arch.count_period;
+               /* Recalculate delta with new bias */
+               delta = now_ns + vcpu->arch.count_dyn_bias;
+       }
+
+       /*
+        * We've ensured that:
+        *   delta < count_period
+        *
+        * Therefore the intermediate delta*count_hz will never overflow since
+        * at the boundary condition:
+        *   delta = count_period
+        *   delta = NSEC_PER_SEC * 2^32 / count_hz
+        *   delta * count_hz = NSEC_PER_SEC * 2^32
+        */
+       return div_u64(delta * vcpu->arch.count_hz, NSEC_PER_SEC);
+}
+
+/**
+ * kvm_mips_count_time() - Get effective current time.
+ * @vcpu:      Virtual CPU.
+ *
+ * Get effective monotonic ktime. This is usually a straightforward ktime_get(),
+ * except when the master disable bit is set in count_ctl, in which case it is
+ * count_resume, i.e. the time that the count was disabled.
+ *
+ * Returns:    Effective monotonic ktime for CP0_Count.
+ */
+static inline ktime_t kvm_mips_count_time(struct kvm_vcpu *vcpu)
+{
+       if (unlikely(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
+               return vcpu->arch.count_resume;
+
+       return ktime_get();
+}
+
+/**
+ * kvm_mips_read_count_running() - Read the current count value as if running.
+ * @vcpu:      Virtual CPU.
+ * @now:       Kernel time to read CP0_Count at.
+ *
+ * Returns the current guest CP0_Count register at time @now and handles if the
+ * timer interrupt is pending and hasn't been handled yet.
+ *
+ * Returns:    The current value of the guest CP0_Count register.
+ */
+static uint32_t kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
+{
+       ktime_t expires;
+       int running;
+
+       /* Is the hrtimer pending? */
+       expires = hrtimer_get_expires(&vcpu->arch.comparecount_timer);
+       if (ktime_compare(now, expires) >= 0) {
+               /*
+                * Cancel it while we handle it so there's no chance of
+                * interference with the timeout handler.
+                */
+               running = hrtimer_cancel(&vcpu->arch.comparecount_timer);
+
+               /* Nothing should be waiting on the timeout */
+               kvm_mips_callbacks->queue_timer_int(vcpu);
+
+               /*
+                * Restart the timer if it was running based on the expiry time
+                * we read, so that we don't push it back 2 periods.
+                */
+               if (running) {
+                       expires = ktime_add_ns(expires,
+                                              vcpu->arch.count_period);
+                       hrtimer_start(&vcpu->arch.comparecount_timer, expires,
+                                     HRTIMER_MODE_ABS);
+               }
+       }
+
+       /* Return the biased and scaled guest CP0_Count */
+       return vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now);
+}
+
+/**
+ * kvm_mips_read_count() - Read the current count value.
+ * @vcpu:      Virtual CPU.
+ *
+ * Read the current guest CP0_Count value, taking into account whether the timer
+ * is stopped.
+ *
+ * Returns:    The current guest CP0_Count value.
+ */
+uint32_t kvm_mips_read_count(struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+
+       /* If count disabled just read static copy of count */
+       if (kvm_mips_count_disabled(vcpu))
+               return kvm_read_c0_guest_count(cop0);
+
+       return kvm_mips_read_count_running(vcpu, ktime_get());
+}
+
+/**
+ * kvm_mips_freeze_hrtimer() - Safely stop the hrtimer.
+ * @vcpu:      Virtual CPU.
+ * @count:     Output pointer for CP0_Count value at point of freeze.
+ *
+ * Freeze the hrtimer safely and return both the ktime and the CP0_Count value
+ * at the point it was frozen. It is guaranteed that any pending interrupts at
+ * the point it was frozen are handled, and none after that point.
+ *
+ * This is useful where the time/CP0_Count is needed in the calculation of the
+ * new parameters.
+ *
+ * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
+ *
+ * Returns:    The ktime at the point of freeze.
+ */
+static ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu,
+                                      uint32_t *count)
+{
+       ktime_t now;
+
+       /* stop hrtimer before finding time */
+       hrtimer_cancel(&vcpu->arch.comparecount_timer);
+       now = ktime_get();
+
+       /* find count at this point and handle pending hrtimer */
+       *count = kvm_mips_read_count_running(vcpu, now);
+
+       return now;
+}
+
+/**
+ * kvm_mips_resume_hrtimer() - Resume hrtimer, updating expiry.
+ * @vcpu:      Virtual CPU.
+ * @now:       ktime at point of resume.
+ * @count:     CP0_Count at point of resume.
+ *
+ * Resumes the timer and updates the timer expiry based on @now and @count.
+ * This can be used in conjunction with kvm_mips_freeze_timer() when timer
+ * parameters need to be changed.
+ *
+ * It is guaranteed that a timer interrupt immediately after resume will be
+ * handled, but not if CP_Compare is exactly at @count. That case is already
+ * handled by kvm_mips_freeze_timer().
+ *
+ * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
+ */
+static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu,
+                                   ktime_t now, uint32_t count)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       uint32_t compare;
+       u64 delta;
+       ktime_t expire;
+
+       /* Calculate timeout (wrap 0 to 2^32) */
+       compare = kvm_read_c0_guest_compare(cop0);
+       delta = (u64)(uint32_t)(compare - count - 1) + 1;
+       delta = div_u64(delta * NSEC_PER_SEC, vcpu->arch.count_hz);
+       expire = ktime_add_ns(now, delta);
+
+       /* Update hrtimer to use new timeout */
+       hrtimer_cancel(&vcpu->arch.comparecount_timer);
+       hrtimer_start(&vcpu->arch.comparecount_timer, expire, HRTIMER_MODE_ABS);
+}
+
+/**
+ * kvm_mips_update_hrtimer() - Update next expiry time of hrtimer.
+ * @vcpu:      Virtual CPU.
+ *
+ * Recalculates and updates the expiry time of the hrtimer. This can be used
+ * after timer parameters have been altered which do not depend on the time that
+ * the change occurs (in those cases kvm_mips_freeze_hrtimer() and
+ * kvm_mips_resume_hrtimer() are used directly).
+ *
+ * It is guaranteed that no timer interrupts will be lost in the process.
+ *
+ * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
+ */
+static void kvm_mips_update_hrtimer(struct kvm_vcpu *vcpu)
+{
+       ktime_t now;
+       uint32_t count;
+
+       /*
+        * freeze_hrtimer takes care of a timer interrupts <= count, and
+        * resume_hrtimer the hrtimer takes care of a timer interrupts > count.
+        */
+       now = kvm_mips_freeze_hrtimer(vcpu, &count);
+       kvm_mips_resume_hrtimer(vcpu, now, count);
+}
+
+/**
+ * kvm_mips_write_count() - Modify the count and update timer.
+ * @vcpu:      Virtual CPU.
+ * @count:     Guest CP0_Count value to set.
+ *
+ * Sets the CP0_Count value and updates the timer accordingly.
+ */
+void kvm_mips_write_count(struct kvm_vcpu *vcpu, uint32_t count)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       ktime_t now;
+
+       /* Calculate bias */
+       now = kvm_mips_count_time(vcpu);
+       vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now);
+
+       if (kvm_mips_count_disabled(vcpu))
+               /* The timer's disabled, adjust the static count */
+               kvm_write_c0_guest_count(cop0, count);
+       else
+               /* Update timeout */
+               kvm_mips_resume_hrtimer(vcpu, now, count);
+}
+
+/**
+ * kvm_mips_init_count() - Initialise timer.
+ * @vcpu:      Virtual CPU.
+ *
+ * Initialise the timer to a sensible frequency, namely 100MHz, zero it, and set
+ * it going if it's enabled.
+ */
+void kvm_mips_init_count(struct kvm_vcpu *vcpu)
+{
+       /* 100 MHz */
+       vcpu->arch.count_hz = 100*1000*1000;
+       vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32,
+                                         vcpu->arch.count_hz);
+       vcpu->arch.count_dyn_bias = 0;
+
+       /* Starting at 0 */
+       kvm_mips_write_count(vcpu, 0);
+}
+
+/**
+ * kvm_mips_set_count_hz() - Update the frequency of the timer.
+ * @vcpu:      Virtual CPU.
+ * @count_hz:  Frequency of CP0_Count timer in Hz.
+ *
+ * Change the frequency of the CP0_Count timer. This is done atomically so that
+ * CP0_Count is continuous and no timer interrupt is lost.
+ *
+ * Returns:    -EINVAL if @count_hz is out of range.
+ *             0 on success.
+ */
+int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       int dc;
+       ktime_t now;
+       u32 count;
+
+       /* ensure the frequency is in a sensible range... */
+       if (count_hz <= 0 || count_hz > NSEC_PER_SEC)
+               return -EINVAL;
+       /* ... and has actually changed */
+       if (vcpu->arch.count_hz == count_hz)
+               return 0;
+
+       /* Safely freeze timer so we can keep it continuous */
+       dc = kvm_mips_count_disabled(vcpu);
+       if (dc) {
+               now = kvm_mips_count_time(vcpu);
+               count = kvm_read_c0_guest_count(cop0);
+       } else {
+               now = kvm_mips_freeze_hrtimer(vcpu, &count);
+       }
+
+       /* Update the frequency */
+       vcpu->arch.count_hz = count_hz;
+       vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32, count_hz);
+       vcpu->arch.count_dyn_bias = 0;
+
+       /* Calculate adjusted bias so dynamic count is unchanged */
+       vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now);
+
+       /* Update and resume hrtimer */
+       if (!dc)
+               kvm_mips_resume_hrtimer(vcpu, now, count);
+       return 0;
+}
+
+/**
+ * kvm_mips_write_compare() - Modify compare and update timer.
+ * @vcpu:      Virtual CPU.
+ * @compare:   New CP0_Compare value.
+ *
+ * Update CP0_Compare to a new value and update the timeout.
+ */
+void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+
+       /* if unchanged, must just be an ack */
+       if (kvm_read_c0_guest_compare(cop0) == compare)
+               return;
+
+       /* Update compare */
+       kvm_write_c0_guest_compare(cop0, compare);
+
+       /* Update timeout if count enabled */
+       if (!kvm_mips_count_disabled(vcpu))
+               kvm_mips_update_hrtimer(vcpu);
+}
+
+/**
+ * kvm_mips_count_disable() - Disable count.
+ * @vcpu:      Virtual CPU.
+ *
+ * Disable the CP0_Count timer. A timer interrupt on or before the final stop
+ * time will be handled but not after.
+ *
+ * Assumes CP0_Count was previously enabled but now Guest.CP0_Cause.DC or
+ * count_ctl.DC has been set (count disabled).
+ *
+ * Returns:    The time that the timer was stopped.
+ */
+static ktime_t kvm_mips_count_disable(struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       uint32_t count;
+       ktime_t now;
+
+       /* Stop hrtimer */
+       hrtimer_cancel(&vcpu->arch.comparecount_timer);
+
+       /* Set the static count from the dynamic count, handling pending TI */
+       now = ktime_get();
+       count = kvm_mips_read_count_running(vcpu, now);
+       kvm_write_c0_guest_count(cop0, count);
+
+       return now;
+}
+
+/**
+ * kvm_mips_count_disable_cause() - Disable count using CP0_Cause.DC.
+ * @vcpu:      Virtual CPU.
+ *
+ * Disable the CP0_Count timer and set CP0_Cause.DC. A timer interrupt on or
+ * before the final stop time will be handled if the timer isn't disabled by
+ * count_ctl.DC, but not after.
+ *
+ * Assumes CP0_Cause.DC is clear (count enabled).
+ */
+void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+
+       kvm_set_c0_guest_cause(cop0, CAUSEF_DC);
+       if (!(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
+               kvm_mips_count_disable(vcpu);
+}
+
+/**
+ * kvm_mips_count_enable_cause() - Enable count using CP0_Cause.DC.
+ * @vcpu:      Virtual CPU.
+ *
+ * Enable the CP0_Count timer and clear CP0_Cause.DC. A timer interrupt after
+ * the start time will be handled if the timer isn't disabled by count_ctl.DC,
+ * potentially before even returning, so the caller should be careful with
+ * ordering of CP0_Cause modifications so as not to lose it.
+ *
+ * Assumes CP0_Cause.DC is set (count disabled).
+ */
+void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       uint32_t count;
+
+       kvm_clear_c0_guest_cause(cop0, CAUSEF_DC);
+
+       /*
+        * Set the dynamic count to match the static count.
+        * This starts the hrtimer if count_ctl.DC allows it.
+        * Otherwise it conveniently updates the biases.
+        */
+       count = kvm_read_c0_guest_count(cop0);
+       kvm_mips_write_count(vcpu, count);
+}
+
+/**
+ * kvm_mips_set_count_ctl() - Update the count control KVM register.
+ * @vcpu:      Virtual CPU.
+ * @count_ctl: Count control register new value.
+ *
+ * Set the count control KVM register. The timer is updated accordingly.
+ *
+ * Returns:    -EINVAL if reserved bits are set.
+ *             0 on success.
+ */
+int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       s64 changed = count_ctl ^ vcpu->arch.count_ctl;
+       s64 delta;
+       ktime_t expire, now;
+       uint32_t count, compare;
+
+       /* Only allow defined bits to be changed */
+       if (changed & ~(s64)(KVM_REG_MIPS_COUNT_CTL_DC))
+               return -EINVAL;
+
+       /* Apply new value */
+       vcpu->arch.count_ctl = count_ctl;
+
+       /* Master CP0_Count disable */
+       if (changed & KVM_REG_MIPS_COUNT_CTL_DC) {
+               /* Is CP0_Cause.DC already disabling CP0_Count? */
+               if (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC) {
+                       if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)
+                               /* Just record the current time */
+                               vcpu->arch.count_resume = ktime_get();
+               } else if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) {
+                       /* disable timer and record current time */
+                       vcpu->arch.count_resume = kvm_mips_count_disable(vcpu);
+               } else {
+                       /*
+                        * Calculate timeout relative to static count at resume
+                        * time (wrap 0 to 2^32).
+                        */
+                       count = kvm_read_c0_guest_count(cop0);
+                       compare = kvm_read_c0_guest_compare(cop0);
+                       delta = (u64)(uint32_t)(compare - count - 1) + 1;
+                       delta = div_u64(delta * NSEC_PER_SEC,
+                                       vcpu->arch.count_hz);
+                       expire = ktime_add_ns(vcpu->arch.count_resume, delta);
+
+                       /* Handle pending interrupt */
+                       now = ktime_get();
+                       if (ktime_compare(now, expire) >= 0)
+                               /* Nothing should be waiting on the timeout */
+                               kvm_mips_callbacks->queue_timer_int(vcpu);
+
+                       /* Resume hrtimer without changing bias */
+                       count = kvm_mips_read_count_running(vcpu, now);
+                       kvm_mips_resume_hrtimer(vcpu, now, count);
+               }
+       }
+
+       return 0;
+}
+
+/**
+ * kvm_mips_set_count_resume() - Update the count resume KVM register.
+ * @vcpu:              Virtual CPU.
+ * @count_resume:      Count resume register new value.
+ *
+ * Set the count resume KVM register.
+ *
+ * Returns:    -EINVAL if out of valid range (0..now).
+ *             0 on success.
+ */
+int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume)
+{
+       /*
+        * It doesn't make sense for the resume time to be in the future, as it
+        * would be possible for the next interrupt to be more than a full
+        * period in the future.
+        */
+       if (count_resume < 0 || count_resume > ktime_to_ns(ktime_get()))
+               return -EINVAL;
+
+       vcpu->arch.count_resume = ns_to_ktime(count_resume);
+       return 0;
+}
+
+/**
+ * kvm_mips_count_timeout() - Push timer forward on timeout.
+ * @vcpu:      Virtual CPU.
+ *
+ * Handle an hrtimer event by push the hrtimer forward a period.
+ *
+ * Returns:    The hrtimer_restart value to return to the hrtimer subsystem.
+ */
+enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu)
+{
+       /* Add the Count period to the current expiry time */
+       hrtimer_add_expires_ns(&vcpu->arch.comparecount_timer,
+                              vcpu->arch.count_period);
+       return HRTIMER_RESTART;
+}
+
+enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       enum emulation_result er = EMULATE_DONE;
+
+       if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
+               kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc,
+                         kvm_read_c0_guest_epc(cop0));
+               kvm_clear_c0_guest_status(cop0, ST0_EXL);
+               vcpu->arch.pc = kvm_read_c0_guest_epc(cop0);
+
+       } else if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
+               kvm_clear_c0_guest_status(cop0, ST0_ERL);
+               vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
+       } else {
+               kvm_err("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n",
+                       vcpu->arch.pc);
+               er = EMULATE_FAIL;
+       }
+
+       return er;
+}
+
+enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
+{
+       kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu->arch.pc,
+                 vcpu->arch.pending_exceptions);
+
+       ++vcpu->stat.wait_exits;
+       trace_kvm_exit(vcpu, WAIT_EXITS);
+       if (!vcpu->arch.pending_exceptions) {
+               vcpu->arch.wait = 1;
+               kvm_vcpu_block(vcpu);
+
+               /*
+                * We we are runnable, then definitely go off to user space to
+                * check if any I/O interrupts are pending.
+                */
+               if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) {
+                       clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
+                       vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
+               }
+       }
+
+       return EMULATE_DONE;
+}
+
+/*
+ * XXXKYMA: Linux doesn't seem to use TLBR, return EMULATE_FAIL for now so that
+ * we can catch this, if things ever change
+ */
+enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       uint32_t pc = vcpu->arch.pc;
+
+       kvm_err("[%#x] COP0_TLBR [%ld]\n", pc, kvm_read_c0_guest_index(cop0));
+       return EMULATE_FAIL;
+}
+
+/* Write Guest TLB Entry @ Index */
+enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       int index = kvm_read_c0_guest_index(cop0);
+       struct kvm_mips_tlb *tlb = NULL;
+       uint32_t pc = vcpu->arch.pc;
+
+       if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
+               kvm_debug("%s: illegal index: %d\n", __func__, index);
+               kvm_debug("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
+                         pc, index, kvm_read_c0_guest_entryhi(cop0),
+                         kvm_read_c0_guest_entrylo0(cop0),
+                         kvm_read_c0_guest_entrylo1(cop0),
+                         kvm_read_c0_guest_pagemask(cop0));
+               index = (index & ~0x80000000) % KVM_MIPS_GUEST_TLB_SIZE;
+       }
+
+       tlb = &vcpu->arch.guest_tlb[index];
+       /*
+        * Probe the shadow host TLB for the entry being overwritten, if one
+        * matches, invalidate it
+        */
+       kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
+
+       tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
+       tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
+       tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0);
+       tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0);
+
+       kvm_debug("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
+                 pc, index, kvm_read_c0_guest_entryhi(cop0),
+                 kvm_read_c0_guest_entrylo0(cop0),
+                 kvm_read_c0_guest_entrylo1(cop0),
+                 kvm_read_c0_guest_pagemask(cop0));
+
+       return EMULATE_DONE;
+}
+
+/* Write Guest TLB Entry @ Random Index */
+enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       struct kvm_mips_tlb *tlb = NULL;
+       uint32_t pc = vcpu->arch.pc;
+       int index;
+
+       get_random_bytes(&index, sizeof(index));
+       index &= (KVM_MIPS_GUEST_TLB_SIZE - 1);
+
+       if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
+               kvm_err("%s: illegal index: %d\n", __func__, index);
+               return EMULATE_FAIL;
+       }
+
+       tlb = &vcpu->arch.guest_tlb[index];
+
+       /*
+        * Probe the shadow host TLB for the entry being overwritten, if one
+        * matches, invalidate it
+        */
+       kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
+
+       tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
+       tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
+       tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0);
+       tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0);
+
+       kvm_debug("[%#x] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n",
+                 pc, index, kvm_read_c0_guest_entryhi(cop0),
+                 kvm_read_c0_guest_entrylo0(cop0),
+                 kvm_read_c0_guest_entrylo1(cop0));
+
+       return EMULATE_DONE;
+}
+
+enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       long entryhi = kvm_read_c0_guest_entryhi(cop0);
+       uint32_t pc = vcpu->arch.pc;
+       int index = -1;
+
+       index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
+
+       kvm_write_c0_guest_index(cop0, index);
+
+       kvm_debug("[%#x] COP0_TLBP (entryhi: %#lx), index: %d\n", pc, entryhi,
+                 index);
+
+       return EMULATE_DONE;
+}
+
+enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
+                                          uint32_t cause, struct kvm_run *run,
+                                          struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       enum emulation_result er = EMULATE_DONE;
+       int32_t rt, rd, copz, sel, co_bit, op;
+       uint32_t pc = vcpu->arch.pc;
+       unsigned long curr_pc;
+
+       /*
+        * Update PC and hold onto current PC in case there is
+        * an error and we want to rollback the PC
+        */
+       curr_pc = vcpu->arch.pc;
+       er = update_pc(vcpu, cause);
+       if (er == EMULATE_FAIL)
+               return er;
+
+       copz = (inst >> 21) & 0x1f;
+       rt = (inst >> 16) & 0x1f;
+       rd = (inst >> 11) & 0x1f;
+       sel = inst & 0x7;
+       co_bit = (inst >> 25) & 1;
+
+       if (co_bit) {
+               op = (inst) & 0xff;
+
+               switch (op) {
+               case tlbr_op:   /*  Read indexed TLB entry  */
+                       er = kvm_mips_emul_tlbr(vcpu);
+                       break;
+               case tlbwi_op:  /*  Write indexed  */
+                       er = kvm_mips_emul_tlbwi(vcpu);
+                       break;
+               case tlbwr_op:  /*  Write random  */
+                       er = kvm_mips_emul_tlbwr(vcpu);
+                       break;
+               case tlbp_op:   /* TLB Probe */
+                       er = kvm_mips_emul_tlbp(vcpu);
+                       break;
+               case rfe_op:
+                       kvm_err("!!!COP0_RFE!!!\n");
+                       break;
+               case eret_op:
+                       er = kvm_mips_emul_eret(vcpu);
+                       goto dont_update_pc;
+                       break;
+               case wait_op:
+                       er = kvm_mips_emul_wait(vcpu);
+                       break;
+               }
+       } else {
+               switch (copz) {
+               case mfc_op:
+#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
+                       cop0->stat[rd][sel]++;
+#endif
+                       /* Get reg */
+                       if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
+                               vcpu->arch.gprs[rt] = kvm_mips_read_count(vcpu);
+                       } else if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) {
+                               vcpu->arch.gprs[rt] = 0x0;
+#ifdef CONFIG_KVM_MIPS_DYN_TRANS
+                               kvm_mips_trans_mfc0(inst, opc, vcpu);
+#endif
+                       } else {
+                               vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
+
+#ifdef CONFIG_KVM_MIPS_DYN_TRANS
+                               kvm_mips_trans_mfc0(inst, opc, vcpu);
+#endif
+                       }
+
+                       kvm_debug
+                           ("[%#x] MFCz[%d][%d], vcpu->arch.gprs[%d]: %#lx\n",
+                            pc, rd, sel, rt, vcpu->arch.gprs[rt]);
+
+                       break;
+
+               case dmfc_op:
+                       vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
+                       break;
+
+               case mtc_op:
+#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
+                       cop0->stat[rd][sel]++;
+#endif
+                       if ((rd == MIPS_CP0_TLB_INDEX)
+                           && (vcpu->arch.gprs[rt] >=
+                               KVM_MIPS_GUEST_TLB_SIZE)) {
+                               kvm_err("Invalid TLB Index: %ld",
+                                       vcpu->arch.gprs[rt]);
+                               er = EMULATE_FAIL;
+                               break;
+                       }
+#define C0_EBASE_CORE_MASK 0xff
+                       if ((rd == MIPS_CP0_PRID) && (sel == 1)) {
+                               /* Preserve CORE number */
+                               kvm_change_c0_guest_ebase(cop0,
+                                                         ~(C0_EBASE_CORE_MASK),
+                                                         vcpu->arch.gprs[rt]);
+                               kvm_err("MTCz, cop0->reg[EBASE]: %#lx\n",
+                                       kvm_read_c0_guest_ebase(cop0));
+                       } else if (rd == MIPS_CP0_TLB_HI && sel == 0) {
+                               uint32_t nasid =
+                                       vcpu->arch.gprs[rt] & ASID_MASK;
+                               if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0) &&
+                                   ((kvm_read_c0_guest_entryhi(cop0) &
+                                     ASID_MASK) != nasid)) {
+                                       kvm_debug("MTCz, change ASID from %#lx to %#lx\n",
+                                               kvm_read_c0_guest_entryhi(cop0)
+                                               & ASID_MASK,
+                                               vcpu->arch.gprs[rt]
+                                               & ASID_MASK);
+
+                                       /* Blow away the shadow host TLBs */
+                                       kvm_mips_flush_host_tlb(1);
+                               }
+                               kvm_write_c0_guest_entryhi(cop0,
+                                                          vcpu->arch.gprs[rt]);
+                       }
+                       /* Are we writing to COUNT */
+                       else if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
+                               kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]);
+                               goto done;
+                       } else if ((rd == MIPS_CP0_COMPARE) && (sel == 0)) {
+                               kvm_debug("[%#x] MTCz, COMPARE %#lx <- %#lx\n",
+                                         pc, kvm_read_c0_guest_compare(cop0),
+                                         vcpu->arch.gprs[rt]);
+
+                               /* If we are writing to COMPARE */
+                               /* Clear pending timer interrupt, if any */
+                               kvm_mips_callbacks->dequeue_timer_int(vcpu);
+                               kvm_mips_write_compare(vcpu,
+                                                      vcpu->arch.gprs[rt]);
+                       } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
+                               kvm_write_c0_guest_status(cop0,
+                                                         vcpu->arch.gprs[rt]);
+                               /*
+                                * Make sure that CU1 and NMI bits are
+                                * never set
+                                */
+                               kvm_clear_c0_guest_status(cop0,
+                                                         (ST0_CU1 | ST0_NMI));
+
+#ifdef CONFIG_KVM_MIPS_DYN_TRANS
+                               kvm_mips_trans_mtc0(inst, opc, vcpu);
+#endif
+                       } else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) {
+                               uint32_t old_cause, new_cause;
+
+                               old_cause = kvm_read_c0_guest_cause(cop0);
+                               new_cause = vcpu->arch.gprs[rt];
+                               /* Update R/W bits */
+                               kvm_change_c0_guest_cause(cop0, 0x08800300,
+                                                         new_cause);
+                               /* DC bit enabling/disabling timer? */
+                               if ((old_cause ^ new_cause) & CAUSEF_DC) {
+                                       if (new_cause & CAUSEF_DC)
+                                               kvm_mips_count_disable_cause(vcpu);
+                                       else
+                                               kvm_mips_count_enable_cause(vcpu);
+                               }
+                       } else {
+                               cop0->reg[rd][sel] = vcpu->arch.gprs[rt];
+#ifdef CONFIG_KVM_MIPS_DYN_TRANS
+                               kvm_mips_trans_mtc0(inst, opc, vcpu);
+#endif
+                       }
+
+                       kvm_debug("[%#x] MTCz, cop0->reg[%d][%d]: %#lx\n", pc,
+                                 rd, sel, cop0->reg[rd][sel]);
+                       break;
+
+               case dmtc_op:
+                       kvm_err("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n",
+                               vcpu->arch.pc, rt, rd, sel);
+                       er = EMULATE_FAIL;
+                       break;
+
+               case mfmcz_op:
+#ifdef KVM_MIPS_DEBUG_COP0_COUNTERS
+                       cop0->stat[MIPS_CP0_STATUS][0]++;
+#endif
+                       if (rt != 0) {
+                               vcpu->arch.gprs[rt] =
+                                   kvm_read_c0_guest_status(cop0);
+                       }
+                       /* EI */
+                       if (inst & 0x20) {
+                               kvm_debug("[%#lx] mfmcz_op: EI\n",
+                                         vcpu->arch.pc);
+                               kvm_set_c0_guest_status(cop0, ST0_IE);
+                       } else {
+                               kvm_debug("[%#lx] mfmcz_op: DI\n",
+                                         vcpu->arch.pc);
+                               kvm_clear_c0_guest_status(cop0, ST0_IE);
+                       }
+
+                       break;
+
+               case wrpgpr_op:
+                       {
+                               uint32_t css =
+                                   cop0->reg[MIPS_CP0_STATUS][2] & 0xf;
+                               uint32_t pss =
+                                   (cop0->reg[MIPS_CP0_STATUS][2] >> 6) & 0xf;
+                               /*
+                                * We don't support any shadow register sets, so
+                                * SRSCtl[PSS] == SRSCtl[CSS] = 0
+                                */
+                               if (css || pss) {
+                                       er = EMULATE_FAIL;
+                                       break;
+                               }
+                               kvm_debug("WRPGPR[%d][%d] = %#lx\n", pss, rd,
+                                         vcpu->arch.gprs[rt]);
+                               vcpu->arch.gprs[rd] = vcpu->arch.gprs[rt];
+                       }
+                       break;
+               default:
+                       kvm_err("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n",
+                               vcpu->arch.pc, copz);
+                       er = EMULATE_FAIL;
+                       break;
+               }
+       }
+
+done:
+       /* Rollback PC only if emulation was unsuccessful */
+       if (er == EMULATE_FAIL)
+               vcpu->arch.pc = curr_pc;
+
+dont_update_pc:
+       /*
+        * This is for special instructions whose emulation
+        * updates the PC, so do not overwrite the PC under
+        * any circumstances
+        */
+
+       return er;
+}
+
+enum emulation_result kvm_mips_emulate_store(uint32_t inst, uint32_t cause,
+                                            struct kvm_run *run,
+                                            struct kvm_vcpu *vcpu)
+{
+       enum emulation_result er = EMULATE_DO_MMIO;
+       int32_t op, base, rt, offset;
+       uint32_t bytes;
+       void *data = run->mmio.data;
+       unsigned long curr_pc;
+
+       /*
+        * Update PC and hold onto current PC in case there is
+        * an error and we want to rollback the PC
+        */
+       curr_pc = vcpu->arch.pc;
+       er = update_pc(vcpu, cause);
+       if (er == EMULATE_FAIL)
+               return er;
+
+       rt = (inst >> 16) & 0x1f;
+       base = (inst >> 21) & 0x1f;
+       offset = inst & 0xffff;
+       op = (inst >> 26) & 0x3f;
+
+       switch (op) {
+       case sb_op:
+               bytes = 1;
+               if (bytes > sizeof(run->mmio.data)) {
+                       kvm_err("%s: bad MMIO length: %d\n", __func__,
+                              run->mmio.len);
+               }
+               run->mmio.phys_addr =
+                   kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
+                                                  host_cp0_badvaddr);
+               if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
+                       er = EMULATE_FAIL;
+                       break;
+               }
+               run->mmio.len = bytes;
+               run->mmio.is_write = 1;
+               vcpu->mmio_needed = 1;
+               vcpu->mmio_is_write = 1;
+               *(u8 *) data = vcpu->arch.gprs[rt];
+               kvm_debug("OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n",
+                         vcpu->arch.host_cp0_badvaddr, vcpu->arch.gprs[rt],
+                         *(uint8_t *) data);
+
+               break;
+
+       case sw_op:
+               bytes = 4;
+               if (bytes > sizeof(run->mmio.data)) {
+                       kvm_err("%s: bad MMIO length: %d\n", __func__,
+                              run->mmio.len);
+               }
+               run->mmio.phys_addr =
+                   kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
+                                                  host_cp0_badvaddr);
+               if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
+                       er = EMULATE_FAIL;
+                       break;
+               }
+
+               run->mmio.len = bytes;
+               run->mmio.is_write = 1;
+               vcpu->mmio_needed = 1;
+               vcpu->mmio_is_write = 1;
+               *(uint32_t *) data = vcpu->arch.gprs[rt];
+
+               kvm_debug("[%#lx] OP_SW: eaddr: %#lx, gpr: %#lx, data: %#x\n",
+                         vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
+                         vcpu->arch.gprs[rt], *(uint32_t *) data);
+               break;
+
+       case sh_op:
+               bytes = 2;
+               if (bytes > sizeof(run->mmio.data)) {
+                       kvm_err("%s: bad MMIO length: %d\n", __func__,
+                              run->mmio.len);
+               }
+               run->mmio.phys_addr =
+                   kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
+                                                  host_cp0_badvaddr);
+               if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
+                       er = EMULATE_FAIL;
+                       break;
+               }
+
+               run->mmio.len = bytes;
+               run->mmio.is_write = 1;
+               vcpu->mmio_needed = 1;
+               vcpu->mmio_is_write = 1;
+               *(uint16_t *) data = vcpu->arch.gprs[rt];
+
+               kvm_debug("[%#lx] OP_SH: eaddr: %#lx, gpr: %#lx, data: %#x\n",
+                         vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
+                         vcpu->arch.gprs[rt], *(uint32_t *) data);
+               break;
+
+       default:
+               kvm_err("Store not yet supported");
+               er = EMULATE_FAIL;
+               break;
+       }
+
+       /* Rollback PC if emulation was unsuccessful */
+       if (er == EMULATE_FAIL)
+               vcpu->arch.pc = curr_pc;
+
+       return er;
+}
+
+enum emulation_result kvm_mips_emulate_load(uint32_t inst, uint32_t cause,
+                                           struct kvm_run *run,
+                                           struct kvm_vcpu *vcpu)
+{
+       enum emulation_result er = EMULATE_DO_MMIO;
+       int32_t op, base, rt, offset;
+       uint32_t bytes;
+
+       rt = (inst >> 16) & 0x1f;
+       base = (inst >> 21) & 0x1f;
+       offset = inst & 0xffff;
+       op = (inst >> 26) & 0x3f;
+
+       vcpu->arch.pending_load_cause = cause;
+       vcpu->arch.io_gpr = rt;
+
+       switch (op) {
+       case lw_op:
+               bytes = 4;
+               if (bytes > sizeof(run->mmio.data)) {
+                       kvm_err("%s: bad MMIO length: %d\n", __func__,
+                              run->mmio.len);
+                       er = EMULATE_FAIL;
+                       break;
+               }
+               run->mmio.phys_addr =
+                   kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
+                                                  host_cp0_badvaddr);
+               if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
+                       er = EMULATE_FAIL;
+                       break;
+               }
+
+               run->mmio.len = bytes;
+               run->mmio.is_write = 0;
+               vcpu->mmio_needed = 1;
+               vcpu->mmio_is_write = 0;
+               break;
+
+       case lh_op:
+       case lhu_op:
+               bytes = 2;
+               if (bytes > sizeof(run->mmio.data)) {
+                       kvm_err("%s: bad MMIO length: %d\n", __func__,
+                              run->mmio.len);
+                       er = EMULATE_FAIL;
+                       break;
+               }
+               run->mmio.phys_addr =
+                   kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
+                                                  host_cp0_badvaddr);
+               if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
+                       er = EMULATE_FAIL;
+                       break;
+               }
+
+               run->mmio.len = bytes;
+               run->mmio.is_write = 0;
+               vcpu->mmio_needed = 1;
+               vcpu->mmio_is_write = 0;
+
+               if (op == lh_op)
+                       vcpu->mmio_needed = 2;
+               else
+                       vcpu->mmio_needed = 1;
+
+               break;
+
+       case lbu_op:
+       case lb_op:
+               bytes = 1;
+               if (bytes > sizeof(run->mmio.data)) {
+                       kvm_err("%s: bad MMIO length: %d\n", __func__,
+                              run->mmio.len);
+                       er = EMULATE_FAIL;
+                       break;
+               }
+               run->mmio.phys_addr =
+                   kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
+                                                  host_cp0_badvaddr);
+               if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
+                       er = EMULATE_FAIL;
+                       break;
+               }
+
+               run->mmio.len = bytes;
+               run->mmio.is_write = 0;
+               vcpu->mmio_is_write = 0;
+
+               if (op == lb_op)
+                       vcpu->mmio_needed = 2;
+               else
+                       vcpu->mmio_needed = 1;
+
+               break;
+
+       default:
+               kvm_err("Load not yet supported");
+               er = EMULATE_FAIL;
+               break;
+       }
+
+       return er;
+}
+
+int kvm_mips_sync_icache(unsigned long va, struct kvm_vcpu *vcpu)
+{
+       unsigned long offset = (va & ~PAGE_MASK);
+       struct kvm *kvm = vcpu->kvm;
+       unsigned long pa;
+       gfn_t gfn;
+       pfn_t pfn;
+
+       gfn = va >> PAGE_SHIFT;
+
+       if (gfn >= kvm->arch.guest_pmap_npages) {
+               kvm_err("%s: Invalid gfn: %#llx\n", __func__, gfn);
+               kvm_mips_dump_host_tlbs();
+               kvm_arch_vcpu_dump_regs(vcpu);
+               return -1;
+       }
+       pfn = kvm->arch.guest_pmap[gfn];
+       pa = (pfn << PAGE_SHIFT) | offset;
+
+       kvm_debug("%s: va: %#lx, unmapped: %#x\n", __func__, va,
+                 CKSEG0ADDR(pa));
+
+       local_flush_icache_range(CKSEG0ADDR(pa), 32);
+       return 0;
+}
+
+#define MIPS_CACHE_OP_INDEX_INV         0x0
+#define MIPS_CACHE_OP_INDEX_LD_TAG      0x1
+#define MIPS_CACHE_OP_INDEX_ST_TAG      0x2
+#define MIPS_CACHE_OP_IMP               0x3
+#define MIPS_CACHE_OP_HIT_INV           0x4
+#define MIPS_CACHE_OP_FILL_WB_INV       0x5
+#define MIPS_CACHE_OP_HIT_HB            0x6
+#define MIPS_CACHE_OP_FETCH_LOCK        0x7
+
+#define MIPS_CACHE_ICACHE               0x0
+#define MIPS_CACHE_DCACHE               0x1
+#define MIPS_CACHE_SEC                  0x3
+
+enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
+                                            uint32_t cause,
+                                            struct kvm_run *run,
+                                            struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       enum emulation_result er = EMULATE_DONE;
+       int32_t offset, cache, op_inst, op, base;
+       struct kvm_vcpu_arch *arch = &vcpu->arch;
+       unsigned long va;
+       unsigned long curr_pc;
+
+       /*
+        * Update PC and hold onto current PC in case there is
+        * an error and we want to rollback the PC
+        */
+       curr_pc = vcpu->arch.pc;
+       er = update_pc(vcpu, cause);
+       if (er == EMULATE_FAIL)
+               return er;
+
+       base = (inst >> 21) & 0x1f;
+       op_inst = (inst >> 16) & 0x1f;
+       offset = inst & 0xffff;
+       cache = (inst >> 16) & 0x3;
+       op = (inst >> 18) & 0x7;
+
+       va = arch->gprs[base] + offset;
+
+       kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
+                 cache, op, base, arch->gprs[base], offset);
+
+       /*
+        * Treat INDEX_INV as a nop, basically issued by Linux on startup to
+        * invalidate the caches entirely by stepping through all the
+        * ways/indexes
+        */
+       if (op == MIPS_CACHE_OP_INDEX_INV) {
+               kvm_debug("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
+                         vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base,
+                         arch->gprs[base], offset);
+
+               if (cache == MIPS_CACHE_DCACHE)
+                       r4k_blast_dcache();
+               else if (cache == MIPS_CACHE_ICACHE)
+                       r4k_blast_icache();
+               else {
+                       kvm_err("%s: unsupported CACHE INDEX operation\n",
+                               __func__);
+                       return EMULATE_FAIL;
+               }
+
+#ifdef CONFIG_KVM_MIPS_DYN_TRANS
+               kvm_mips_trans_cache_index(inst, opc, vcpu);
+#endif
+               goto done;
+       }
+
+       preempt_disable();
+       if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) {
+               if (kvm_mips_host_tlb_lookup(vcpu, va) < 0)
+                       kvm_mips_handle_kseg0_tlb_fault(va, vcpu);
+       } else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) ||
+                  KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) {
+               int index;
+
+               /* If an entry already exists then skip */
+               if (kvm_mips_host_tlb_lookup(vcpu, va) >= 0)
+                       goto skip_fault;
+
+               /*
+                * If address not in the guest TLB, then give the guest a fault,
+                * the resulting handler will do the right thing
+                */
+               index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) |
+                                                 (kvm_read_c0_guest_entryhi
+                                                  (cop0) & ASID_MASK));
+
+               if (index < 0) {
+                       vcpu->arch.host_cp0_entryhi = (va & VPN2_MASK);
+                       vcpu->arch.host_cp0_badvaddr = va;
+                       er = kvm_mips_emulate_tlbmiss_ld(cause, NULL, run,
+                                                        vcpu);
+                       preempt_enable();
+                       goto dont_update_pc;
+               } else {
+                       struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
+                       /*
+                        * Check if the entry is valid, if not then setup a TLB
+                        * invalid exception to the guest
+                        */
+                       if (!TLB_IS_VALID(*tlb, va)) {
+                               er = kvm_mips_emulate_tlbinv_ld(cause, NULL,
+                                                               run, vcpu);
+                               preempt_enable();
+                               goto dont_update_pc;
+                       } else {
+                               /*
+                                * We fault an entry from the guest tlb to the
+                                * shadow host TLB
+                                */
+                               kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
+                                                                    NULL,
+                                                                    NULL);
+                       }
+               }
+       } else {
+               kvm_err("INVALID CACHE INDEX/ADDRESS (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
+                       cache, op, base, arch->gprs[base], offset);
+               er = EMULATE_FAIL;
+               preempt_enable();
+               goto dont_update_pc;
+
+       }
+
+skip_fault:
+       /* XXXKYMA: Only a subset of cache ops are supported, used by Linux */
+       if (cache == MIPS_CACHE_DCACHE
+           && (op == MIPS_CACHE_OP_FILL_WB_INV
+               || op == MIPS_CACHE_OP_HIT_INV)) {
+               flush_dcache_line(va);
+
+#ifdef CONFIG_KVM_MIPS_DYN_TRANS
+               /*
+                * Replace the CACHE instruction, with a SYNCI, not the same,
+                * but avoids a trap
+                */
+               kvm_mips_trans_cache_va(inst, opc, vcpu);
+#endif
+       } else if (op == MIPS_CACHE_OP_HIT_INV && cache == MIPS_CACHE_ICACHE) {
+               flush_dcache_line(va);
+               flush_icache_line(va);
+
+#ifdef CONFIG_KVM_MIPS_DYN_TRANS
+               /* Replace the CACHE instruction, with a SYNCI */
+               kvm_mips_trans_cache_va(inst, opc, vcpu);
+#endif
+       } else {
+               kvm_err("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
+                       cache, op, base, arch->gprs[base], offset);
+               er = EMULATE_FAIL;
+               preempt_enable();
+               goto dont_update_pc;
+       }
+
+       preempt_enable();
+
+dont_update_pc:
+       /* Rollback PC */
+       vcpu->arch.pc = curr_pc;
+done:
+       return er;
+}
+
+enum emulation_result kvm_mips_emulate_inst(unsigned long cause, uint32_t *opc,
+                                           struct kvm_run *run,
+                                           struct kvm_vcpu *vcpu)
+{
+       enum emulation_result er = EMULATE_DONE;
+       uint32_t inst;
+
+       /* Fetch the instruction. */
+       if (cause & CAUSEF_BD)
+               opc += 1;
+
+       inst = kvm_get_inst(opc, vcpu);
+
+       switch (((union mips_instruction)inst).r_format.opcode) {
+       case cop0_op:
+               er = kvm_mips_emulate_CP0(inst, opc, cause, run, vcpu);
+               break;
+       case sb_op:
+       case sh_op:
+       case sw_op:
+               er = kvm_mips_emulate_store(inst, cause, run, vcpu);
+               break;
+       case lb_op:
+       case lbu_op:
+       case lhu_op:
+       case lh_op:
+       case lw_op:
+               er = kvm_mips_emulate_load(inst, cause, run, vcpu);
+               break;
+
+       case cache_op:
+               ++vcpu->stat.cache_exits;
+               trace_kvm_exit(vcpu, CACHE_EXITS);
+               er = kvm_mips_emulate_cache(inst, opc, cause, run, vcpu);
+               break;
+
+       default:
+               kvm_err("Instruction emulation not supported (%p/%#x)\n", opc,
+                       inst);
+               kvm_arch_vcpu_dump_regs(vcpu);
+               er = EMULATE_FAIL;
+               break;
+       }
+
+       return er;
+}
+
+enum emulation_result kvm_mips_emulate_syscall(unsigned long cause,
+                                              uint32_t *opc,
+                                              struct kvm_run *run,
+                                              struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       struct kvm_vcpu_arch *arch = &vcpu->arch;
+       enum emulation_result er = EMULATE_DONE;
+
+       if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+               /* save old pc */
+               kvm_write_c0_guest_epc(cop0, arch->pc);
+               kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+               if (cause & CAUSEF_BD)
+                       kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+               else
+                       kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+               kvm_debug("Delivering SYSCALL @ pc %#lx\n", arch->pc);
+
+               kvm_change_c0_guest_cause(cop0, (0xff),
+                                         (T_SYSCALL << CAUSEB_EXCCODE));
+
+               /* Set PC to the exception entry point */
+               arch->pc = KVM_GUEST_KSEG0 + 0x180;
+
+       } else {
+               kvm_err("Trying to deliver SYSCALL when EXL is already set\n");
+               er = EMULATE_FAIL;
+       }
+
+       return er;
+}
+
+enum emulation_result kvm_mips_emulate_tlbmiss_ld(unsigned long cause,
+                                                 uint32_t *opc,
+                                                 struct kvm_run *run,
+                                                 struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       struct kvm_vcpu_arch *arch = &vcpu->arch;
+       unsigned long entryhi = (vcpu->arch.  host_cp0_badvaddr & VPN2_MASK) |
+                               (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
+
+       if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+               /* save old pc */
+               kvm_write_c0_guest_epc(cop0, arch->pc);
+               kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+               if (cause & CAUSEF_BD)
+                       kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+               else
+                       kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+               kvm_debug("[EXL == 0] delivering TLB MISS @ pc %#lx\n",
+                         arch->pc);
+
+               /* set pc to the exception entry point */
+               arch->pc = KVM_GUEST_KSEG0 + 0x0;
+
+       } else {
+               kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
+                         arch->pc);
+
+               arch->pc = KVM_GUEST_KSEG0 + 0x180;
+       }
+
+       kvm_change_c0_guest_cause(cop0, (0xff),
+                                 (T_TLB_LD_MISS << CAUSEB_EXCCODE));
+
+       /* setup badvaddr, context and entryhi registers for the guest */
+       kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
+       /* XXXKYMA: is the context register used by linux??? */
+       kvm_write_c0_guest_entryhi(cop0, entryhi);
+       /* Blow away the shadow host TLBs */
+       kvm_mips_flush_host_tlb(1);
+
+       return EMULATE_DONE;
+}
+
+enum emulation_result kvm_mips_emulate_tlbinv_ld(unsigned long cause,
+                                                uint32_t *opc,
+                                                struct kvm_run *run,
+                                                struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       struct kvm_vcpu_arch *arch = &vcpu->arch;
+       unsigned long entryhi =
+               (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
+               (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
+
+       if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+               /* save old pc */
+               kvm_write_c0_guest_epc(cop0, arch->pc);
+               kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+               if (cause & CAUSEF_BD)
+                       kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+               else
+                       kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+               kvm_debug("[EXL == 0] delivering TLB INV @ pc %#lx\n",
+                         arch->pc);
+
+               /* set pc to the exception entry point */
+               arch->pc = KVM_GUEST_KSEG0 + 0x180;
+
+       } else {
+               kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
+                         arch->pc);
+               arch->pc = KVM_GUEST_KSEG0 + 0x180;
+       }
+
+       kvm_change_c0_guest_cause(cop0, (0xff),
+                                 (T_TLB_LD_MISS << CAUSEB_EXCCODE));
+
+       /* setup badvaddr, context and entryhi registers for the guest */
+       kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
+       /* XXXKYMA: is the context register used by linux??? */
+       kvm_write_c0_guest_entryhi(cop0, entryhi);
+       /* Blow away the shadow host TLBs */
+       kvm_mips_flush_host_tlb(1);
+
+       return EMULATE_DONE;
+}
+
+enum emulation_result kvm_mips_emulate_tlbmiss_st(unsigned long cause,
+                                                 uint32_t *opc,
+                                                 struct kvm_run *run,
+                                                 struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       struct kvm_vcpu_arch *arch = &vcpu->arch;
+       unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
+                               (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
+
+       if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+               /* save old pc */
+               kvm_write_c0_guest_epc(cop0, arch->pc);
+               kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+               if (cause & CAUSEF_BD)
+                       kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+               else
+                       kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+               kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
+                         arch->pc);
+
+               /* Set PC to the exception entry point */
+               arch->pc = KVM_GUEST_KSEG0 + 0x0;
+       } else {
+               kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
+                         arch->pc);
+               arch->pc = KVM_GUEST_KSEG0 + 0x180;
+       }
+
+       kvm_change_c0_guest_cause(cop0, (0xff),
+                                 (T_TLB_ST_MISS << CAUSEB_EXCCODE));
+
+       /* setup badvaddr, context and entryhi registers for the guest */
+       kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
+       /* XXXKYMA: is the context register used by linux??? */
+       kvm_write_c0_guest_entryhi(cop0, entryhi);
+       /* Blow away the shadow host TLBs */
+       kvm_mips_flush_host_tlb(1);
+
+       return EMULATE_DONE;
+}
+
+enum emulation_result kvm_mips_emulate_tlbinv_st(unsigned long cause,
+                                                uint32_t *opc,
+                                                struct kvm_run *run,
+                                                struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       struct kvm_vcpu_arch *arch = &vcpu->arch;
+       unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
+               (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
+
+       if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+               /* save old pc */
+               kvm_write_c0_guest_epc(cop0, arch->pc);
+               kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+               if (cause & CAUSEF_BD)
+                       kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+               else
+                       kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+               kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
+                         arch->pc);
+
+               /* Set PC to the exception entry point */
+               arch->pc = KVM_GUEST_KSEG0 + 0x180;
+       } else {
+               kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
+                         arch->pc);
+               arch->pc = KVM_GUEST_KSEG0 + 0x180;
+       }
+
+       kvm_change_c0_guest_cause(cop0, (0xff),
+                                 (T_TLB_ST_MISS << CAUSEB_EXCCODE));
+
+       /* setup badvaddr, context and entryhi registers for the guest */
+       kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
+       /* XXXKYMA: is the context register used by linux??? */
+       kvm_write_c0_guest_entryhi(cop0, entryhi);
+       /* Blow away the shadow host TLBs */
+       kvm_mips_flush_host_tlb(1);
+
+       return EMULATE_DONE;
+}
+
+/* TLBMOD: store into address matching TLB with Dirty bit off */
+enum emulation_result kvm_mips_handle_tlbmod(unsigned long cause, uint32_t *opc,
+                                            struct kvm_run *run,
+                                            struct kvm_vcpu *vcpu)
+{
+       enum emulation_result er = EMULATE_DONE;
+#ifdef DEBUG
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
+                               (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
+       int index;
+
+       /* If address not in the guest TLB, then we are in trouble */
+       index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
+       if (index < 0) {
+               /* XXXKYMA Invalidate and retry */
+               kvm_mips_host_tlb_inv(vcpu, vcpu->arch.host_cp0_badvaddr);
+               kvm_err("%s: host got TLBMOD for %#lx but entry not present in Guest TLB\n",
+                    __func__, entryhi);
+               kvm_mips_dump_guest_tlbs(vcpu);
+               kvm_mips_dump_host_tlbs();
+               return EMULATE_FAIL;
+       }
+#endif
+
+       er = kvm_mips_emulate_tlbmod(cause, opc, run, vcpu);
+       return er;
+}
+
+enum emulation_result kvm_mips_emulate_tlbmod(unsigned long cause,
+                                             uint32_t *opc,
+                                             struct kvm_run *run,
+                                             struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
+                               (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
+       struct kvm_vcpu_arch *arch = &vcpu->arch;
+
+       if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+               /* save old pc */
+               kvm_write_c0_guest_epc(cop0, arch->pc);
+               kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+               if (cause & CAUSEF_BD)
+                       kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+               else
+                       kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+               kvm_debug("[EXL == 0] Delivering TLB MOD @ pc %#lx\n",
+                         arch->pc);
+
+               arch->pc = KVM_GUEST_KSEG0 + 0x180;
+       } else {
+               kvm_debug("[EXL == 1] Delivering TLB MOD @ pc %#lx\n",
+                         arch->pc);
+               arch->pc = KVM_GUEST_KSEG0 + 0x180;
+       }
+
+       kvm_change_c0_guest_cause(cop0, (0xff), (T_TLB_MOD << CAUSEB_EXCCODE));
+
+       /* setup badvaddr, context and entryhi registers for the guest */
+       kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
+       /* XXXKYMA: is the context register used by linux??? */
+       kvm_write_c0_guest_entryhi(cop0, entryhi);
+       /* Blow away the shadow host TLBs */
+       kvm_mips_flush_host_tlb(1);
+
+       return EMULATE_DONE;
+}
+
+enum emulation_result kvm_mips_emulate_fpu_exc(unsigned long cause,
+                                              uint32_t *opc,
+                                              struct kvm_run *run,
+                                              struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       struct kvm_vcpu_arch *arch = &vcpu->arch;
+
+       if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+               /* save old pc */
+               kvm_write_c0_guest_epc(cop0, arch->pc);
+               kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+               if (cause & CAUSEF_BD)
+                       kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+               else
+                       kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+       }
+
+       arch->pc = KVM_GUEST_KSEG0 + 0x180;
+
+       kvm_change_c0_guest_cause(cop0, (0xff),
+                                 (T_COP_UNUSABLE << CAUSEB_EXCCODE));
+       kvm_change_c0_guest_cause(cop0, (CAUSEF_CE), (0x1 << CAUSEB_CE));
+
+       return EMULATE_DONE;
+}
+
+enum emulation_result kvm_mips_emulate_ri_exc(unsigned long cause,
+                                             uint32_t *opc,
+                                             struct kvm_run *run,
+                                             struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       struct kvm_vcpu_arch *arch = &vcpu->arch;
+       enum emulation_result er = EMULATE_DONE;
+
+       if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+               /* save old pc */
+               kvm_write_c0_guest_epc(cop0, arch->pc);
+               kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+               if (cause & CAUSEF_BD)
+                       kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+               else
+                       kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+               kvm_debug("Delivering RI @ pc %#lx\n", arch->pc);
+
+               kvm_change_c0_guest_cause(cop0, (0xff),
+                                         (T_RES_INST << CAUSEB_EXCCODE));
+
+               /* Set PC to the exception entry point */
+               arch->pc = KVM_GUEST_KSEG0 + 0x180;
+
+       } else {
+               kvm_err("Trying to deliver RI when EXL is already set\n");
+               er = EMULATE_FAIL;
+       }
+
+       return er;
+}
+
+enum emulation_result kvm_mips_emulate_bp_exc(unsigned long cause,
+                                             uint32_t *opc,
+                                             struct kvm_run *run,
+                                             struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       struct kvm_vcpu_arch *arch = &vcpu->arch;
+       enum emulation_result er = EMULATE_DONE;
+
+       if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+               /* save old pc */
+               kvm_write_c0_guest_epc(cop0, arch->pc);
+               kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+               if (cause & CAUSEF_BD)
+                       kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+               else
+                       kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+               kvm_debug("Delivering BP @ pc %#lx\n", arch->pc);
+
+               kvm_change_c0_guest_cause(cop0, (0xff),
+                                         (T_BREAK << CAUSEB_EXCCODE));
+
+               /* Set PC to the exception entry point */
+               arch->pc = KVM_GUEST_KSEG0 + 0x180;
+
+       } else {
+               kvm_err("Trying to deliver BP when EXL is already set\n");
+               er = EMULATE_FAIL;
+       }
+
+       return er;
+}
+
+/* ll/sc, rdhwr, sync emulation */
+
+#define OPCODE 0xfc000000
+#define BASE   0x03e00000
+#define RT     0x001f0000
+#define OFFSET 0x0000ffff
+#define LL     0xc0000000
+#define SC     0xe0000000
+#define SPEC0  0x00000000
+#define SPEC3  0x7c000000
+#define RD     0x0000f800
+#define FUNC   0x0000003f
+#define SYNC   0x0000000f
+#define RDHWR  0x0000003b
+
+enum emulation_result kvm_mips_handle_ri(unsigned long cause, uint32_t *opc,
+                                        struct kvm_run *run,
+                                        struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       struct kvm_vcpu_arch *arch = &vcpu->arch;
+       enum emulation_result er = EMULATE_DONE;
+       unsigned long curr_pc;
+       uint32_t inst;
+
+       /*
+        * Update PC and hold onto current PC in case there is
+        * an error and we want to rollback the PC
+        */
+       curr_pc = vcpu->arch.pc;
+       er = update_pc(vcpu, cause);
+       if (er == EMULATE_FAIL)
+               return er;
+
+       /* Fetch the instruction. */
+       if (cause & CAUSEF_BD)
+               opc += 1;
+
+       inst = kvm_get_inst(opc, vcpu);
+
+       if (inst == KVM_INVALID_INST) {
+               kvm_err("%s: Cannot get inst @ %p\n", __func__, opc);
+               return EMULATE_FAIL;
+       }
+
+       if ((inst & OPCODE) == SPEC3 && (inst & FUNC) == RDHWR) {
+               int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
+               int rd = (inst & RD) >> 11;
+               int rt = (inst & RT) >> 16;
+               /* If usermode, check RDHWR rd is allowed by guest HWREna */
+               if (usermode && !(kvm_read_c0_guest_hwrena(cop0) & BIT(rd))) {
+                       kvm_debug("RDHWR %#x disallowed by HWREna @ %p\n",
+                                 rd, opc);
+                       goto emulate_ri;
+               }
+               switch (rd) {
+               case 0: /* CPU number */
+                       arch->gprs[rt] = 0;
+                       break;
+               case 1: /* SYNCI length */
+                       arch->gprs[rt] = min(current_cpu_data.dcache.linesz,
+                                            current_cpu_data.icache.linesz);
+                       break;
+               case 2: /* Read count register */
+                       arch->gprs[rt] = kvm_mips_read_count(vcpu);
+                       break;
+               case 3: /* Count register resolution */
+                       switch (current_cpu_data.cputype) {
+                       case CPU_20KC:
+                       case CPU_25KF:
+                               arch->gprs[rt] = 1;
+                               break;
+                       default:
+                               arch->gprs[rt] = 2;
+                       }
+                       break;
+               case 29:
+                       arch->gprs[rt] = kvm_read_c0_guest_userlocal(cop0);
+                       break;
+
+               default:
+                       kvm_debug("RDHWR %#x not supported @ %p\n", rd, opc);
+                       goto emulate_ri;
+               }
+       } else {
+               kvm_debug("Emulate RI not supported @ %p: %#x\n", opc, inst);
+               goto emulate_ri;
+       }
+
+       return EMULATE_DONE;
+
+emulate_ri:
+       /*
+        * Rollback PC (if in branch delay slot then the PC already points to
+        * branch target), and pass the RI exception to the guest OS.
+        */
+       vcpu->arch.pc = curr_pc;
+       return kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
+}
+
+enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
+                                                 struct kvm_run *run)
+{
+       unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
+       enum emulation_result er = EMULATE_DONE;
+       unsigned long curr_pc;
+
+       if (run->mmio.len > sizeof(*gpr)) {
+               kvm_err("Bad MMIO length: %d", run->mmio.len);
+               er = EMULATE_FAIL;
+               goto done;
+       }
+
+       /*
+        * Update PC and hold onto current PC in case there is
+        * an error and we want to rollback the PC
+        */
+       curr_pc = vcpu->arch.pc;
+       er = update_pc(vcpu, vcpu->arch.pending_load_cause);
+       if (er == EMULATE_FAIL)
+               return er;
+
+       switch (run->mmio.len) {
+       case 4:
+               *gpr = *(int32_t *) run->mmio.data;
+               break;
+
+       case 2:
+               if (vcpu->mmio_needed == 2)
+                       *gpr = *(int16_t *) run->mmio.data;
+               else
+                       *gpr = *(int16_t *) run->mmio.data;
+
+               break;
+       case 1:
+               if (vcpu->mmio_needed == 2)
+                       *gpr = *(int8_t *) run->mmio.data;
+               else
+                       *gpr = *(u8 *) run->mmio.data;
+               break;
+       }
+
+       if (vcpu->arch.pending_load_cause & CAUSEF_BD)
+               kvm_debug("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n",
+                         vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr,
+                         vcpu->mmio_needed);
+
+done:
+       return er;
+}
+
+static enum emulation_result kvm_mips_emulate_exc(unsigned long cause,
+                                                 uint32_t *opc,
+                                                 struct kvm_run *run,
+                                                 struct kvm_vcpu *vcpu)
+{
+       uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       struct kvm_vcpu_arch *arch = &vcpu->arch;
+       enum emulation_result er = EMULATE_DONE;
+
+       if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+               /* save old pc */
+               kvm_write_c0_guest_epc(cop0, arch->pc);
+               kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+               if (cause & CAUSEF_BD)
+                       kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+               else
+                       kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+               kvm_change_c0_guest_cause(cop0, (0xff),
+                                         (exccode << CAUSEB_EXCCODE));
+
+               /* Set PC to the exception entry point */
+               arch->pc = KVM_GUEST_KSEG0 + 0x180;
+               kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
+
+               kvm_debug("Delivering EXC %d @ pc %#lx, badVaddr: %#lx\n",
+                         exccode, kvm_read_c0_guest_epc(cop0),
+                         kvm_read_c0_guest_badvaddr(cop0));
+       } else {
+               kvm_err("Trying to deliver EXC when EXL is already set\n");
+               er = EMULATE_FAIL;
+       }
+
+       return er;
+}
+
+enum emulation_result kvm_mips_check_privilege(unsigned long cause,
+                                              uint32_t *opc,
+                                              struct kvm_run *run,
+                                              struct kvm_vcpu *vcpu)
+{
+       enum emulation_result er = EMULATE_DONE;
+       uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
+       unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
+
+       int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
+
+       if (usermode) {
+               switch (exccode) {
+               case T_INT:
+               case T_SYSCALL:
+               case T_BREAK:
+               case T_RES_INST:
+                       break;
+
+               case T_COP_UNUSABLE:
+                       if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 0)
+                               er = EMULATE_PRIV_FAIL;
+                       break;
+
+               case T_TLB_MOD:
+                       break;
+
+               case T_TLB_LD_MISS:
+                       /*
+                        * We we are accessing Guest kernel space, then send an
+                        * address error exception to the guest
+                        */
+                       if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
+                               kvm_debug("%s: LD MISS @ %#lx\n", __func__,
+                                         badvaddr);
+                               cause &= ~0xff;
+                               cause |= (T_ADDR_ERR_LD << CAUSEB_EXCCODE);
+                               er = EMULATE_PRIV_FAIL;
+                       }
+                       break;
+
+               case T_TLB_ST_MISS:
+                       /*
+                        * We we are accessing Guest kernel space, then send an
+                        * address error exception to the guest
+                        */
+                       if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
+                               kvm_debug("%s: ST MISS @ %#lx\n", __func__,
+                                         badvaddr);
+                               cause &= ~0xff;
+                               cause |= (T_ADDR_ERR_ST << CAUSEB_EXCCODE);
+                               er = EMULATE_PRIV_FAIL;
+                       }
+                       break;
+
+               case T_ADDR_ERR_ST:
+                       kvm_debug("%s: address error ST @ %#lx\n", __func__,
+                                 badvaddr);
+                       if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
+                               cause &= ~0xff;
+                               cause |= (T_TLB_ST_MISS << CAUSEB_EXCCODE);
+                       }
+                       er = EMULATE_PRIV_FAIL;
+                       break;
+               case T_ADDR_ERR_LD:
+                       kvm_debug("%s: address error LD @ %#lx\n", __func__,
+                                 badvaddr);
+                       if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
+                               cause &= ~0xff;
+                               cause |= (T_TLB_LD_MISS << CAUSEB_EXCCODE);
+                       }
+                       er = EMULATE_PRIV_FAIL;
+                       break;
+               default:
+                       er = EMULATE_PRIV_FAIL;
+                       break;
+               }
+       }
+
+       if (er == EMULATE_PRIV_FAIL)
+               kvm_mips_emulate_exc(cause, opc, run, vcpu);
+
+       return er;
+}
+
+/*
+ * User Address (UA) fault, this could happen if
+ * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this
+ *     case we pass on the fault to the guest kernel and let it handle it.
+ * (2) TLB entry is present in the Guest TLB but not in the shadow, in this
+ *     case we inject the TLB from the Guest TLB into the shadow host TLB
+ */
+enum emulation_result kvm_mips_handle_tlbmiss(unsigned long cause,
+                                             uint32_t *opc,
+                                             struct kvm_run *run,
+                                             struct kvm_vcpu *vcpu)
+{
+       enum emulation_result er = EMULATE_DONE;
+       uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
+       unsigned long va = vcpu->arch.host_cp0_badvaddr;
+       int index;
+
+       kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx, entryhi: %#lx\n",
+                 vcpu->arch.host_cp0_badvaddr, vcpu->arch.host_cp0_entryhi);
+
+       /*
+        * KVM would not have got the exception if this entry was valid in the
+        * shadow host TLB. Check the Guest TLB, if the entry is not there then
+        * send the guest an exception. The guest exc handler should then inject
+        * an entry into the guest TLB.
+        */
+       index = kvm_mips_guest_tlb_lookup(vcpu,
+                                         (va & VPN2_MASK) |
+                                         (kvm_read_c0_guest_entryhi
+                                          (vcpu->arch.cop0) & ASID_MASK));
+       if (index < 0) {
+               if (exccode == T_TLB_LD_MISS) {
+                       er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu);
+               } else if (exccode == T_TLB_ST_MISS) {
+                       er = kvm_mips_emulate_tlbmiss_st(cause, opc, run, vcpu);
+               } else {
+                       kvm_err("%s: invalid exc code: %d\n", __func__,
+                               exccode);
+                       er = EMULATE_FAIL;
+               }
+       } else {
+               struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
+
+               /*
+                * Check if the entry is valid, if not then setup a TLB invalid
+                * exception to the guest
+                */
+               if (!TLB_IS_VALID(*tlb, va)) {
+                       if (exccode == T_TLB_LD_MISS) {
+                               er = kvm_mips_emulate_tlbinv_ld(cause, opc, run,
+                                                               vcpu);
+                       } else if (exccode == T_TLB_ST_MISS) {
+                               er = kvm_mips_emulate_tlbinv_st(cause, opc, run,
+                                                               vcpu);
+                       } else {
+                               kvm_err("%s: invalid exc code: %d\n", __func__,
+                                       exccode);
+                               er = EMULATE_FAIL;
+                       }
+               } else {
+                       kvm_debug("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n",
+                                 tlb->tlb_hi, tlb->tlb_lo0, tlb->tlb_lo1);
+                       /*
+                        * OK we have a Guest TLB entry, now inject it into the
+                        * shadow host TLB
+                        */
+                       kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, NULL,
+                                                            NULL);
+               }
+       }
+
+       return er;
+}
diff --git a/arch/mips/kvm/interrupt.c b/arch/mips/kvm/interrupt.c
new file mode 100644 (file)
index 0000000..9b44459
--- /dev/null
@@ -0,0 +1,242 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * KVM/MIPS: Interrupt delivery
+ *
+ * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+ */
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/bootmem.h>
+#include <asm/page.h>
+#include <asm/cacheflush.h>
+
+#include <linux/kvm_host.h>
+
+#include "interrupt.h"
+
+void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, uint32_t priority)
+{
+       set_bit(priority, &vcpu->arch.pending_exceptions);
+}
+
+void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, uint32_t priority)
+{
+       clear_bit(priority, &vcpu->arch.pending_exceptions);
+}
+
+void kvm_mips_queue_timer_int_cb(struct kvm_vcpu *vcpu)
+{
+       /*
+        * Cause bits to reflect the pending timer interrupt,
+        * the EXC code will be set when we are actually
+        * delivering the interrupt:
+        */
+       kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ5 | C_TI));
+
+       /* Queue up an INT exception for the core */
+       kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_TIMER);
+
+}
+
+void kvm_mips_dequeue_timer_int_cb(struct kvm_vcpu *vcpu)
+{
+       kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ5 | C_TI));
+       kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_TIMER);
+}
+
+void kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu,
+                             struct kvm_mips_interrupt *irq)
+{
+       int intr = (int)irq->irq;
+
+       /*
+        * Cause bits to reflect the pending IO interrupt,
+        * the EXC code will be set when we are actually
+        * delivering the interrupt:
+        */
+       switch (intr) {
+       case 2:
+               kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ0));
+               /* Queue up an INT exception for the core */
+               kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_IO);
+               break;
+
+       case 3:
+               kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ1));
+               kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_IPI_1);
+               break;
+
+       case 4:
+               kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ2));
+               kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_IPI_2);
+               break;
+
+       default:
+               break;
+       }
+
+}
+
+void kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
+                               struct kvm_mips_interrupt *irq)
+{
+       int intr = (int)irq->irq;
+
+       switch (intr) {
+       case -2:
+               kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ0));
+               kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_IO);
+               break;
+
+       case -3:
+               kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ1));
+               kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_1);
+               break;
+
+       case -4:
+               kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ2));
+               kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_2);
+               break;
+
+       default:
+               break;
+       }
+
+}
+
+/* Deliver the interrupt of the corresponding priority, if possible. */
+int kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
+                           uint32_t cause)
+{
+       int allowed = 0;
+       uint32_t exccode;
+
+       struct kvm_vcpu_arch *arch = &vcpu->arch;
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+
+       switch (priority) {
+       case MIPS_EXC_INT_TIMER:
+               if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
+                   && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
+                   && (kvm_read_c0_guest_status(cop0) & IE_IRQ5)) {
+                       allowed = 1;
+                       exccode = T_INT;
+               }
+               break;
+
+       case MIPS_EXC_INT_IO:
+               if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
+                   && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
+                   && (kvm_read_c0_guest_status(cop0) & IE_IRQ0)) {
+                       allowed = 1;
+                       exccode = T_INT;
+               }
+               break;
+
+       case MIPS_EXC_INT_IPI_1:
+               if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
+                   && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
+                   && (kvm_read_c0_guest_status(cop0) & IE_IRQ1)) {
+                       allowed = 1;
+                       exccode = T_INT;
+               }
+               break;
+
+       case MIPS_EXC_INT_IPI_2:
+               if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
+                   && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
+                   && (kvm_read_c0_guest_status(cop0) & IE_IRQ2)) {
+                       allowed = 1;
+                       exccode = T_INT;
+               }
+               break;
+
+       default:
+               break;
+       }
+
+       /* Are we allowed to deliver the interrupt ??? */
+       if (allowed) {
+               if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+                       /* save old pc */
+                       kvm_write_c0_guest_epc(cop0, arch->pc);
+                       kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+                       if (cause & CAUSEF_BD)
+                               kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+                       else
+                               kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+                       kvm_debug("Delivering INT @ pc %#lx\n", arch->pc);
+
+               } else
+                       kvm_err("Trying to deliver interrupt when EXL is already set\n");
+
+               kvm_change_c0_guest_cause(cop0, CAUSEF_EXCCODE,
+                                         (exccode << CAUSEB_EXCCODE));
+
+               /* XXXSL Set PC to the interrupt exception entry point */
+               if (kvm_read_c0_guest_cause(cop0) & CAUSEF_IV)
+                       arch->pc = KVM_GUEST_KSEG0 + 0x200;
+               else
+                       arch->pc = KVM_GUEST_KSEG0 + 0x180;
+
+               clear_bit(priority, &vcpu->arch.pending_exceptions);
+       }
+
+       return allowed;
+}
+
+int kvm_mips_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
+                         uint32_t cause)
+{
+       return 1;
+}
+
+void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, uint32_t cause)
+{
+       unsigned long *pending = &vcpu->arch.pending_exceptions;
+       unsigned long *pending_clr = &vcpu->arch.pending_exceptions_clr;
+       unsigned int priority;
+
+       if (!(*pending) && !(*pending_clr))
+               return;
+
+       priority = __ffs(*pending_clr);
+       while (priority <= MIPS_EXC_MAX) {
+               if (kvm_mips_callbacks->irq_clear(vcpu, priority, cause)) {
+                       if (!KVM_MIPS_IRQ_CLEAR_ALL_AT_ONCE)
+                               break;
+               }
+
+               priority = find_next_bit(pending_clr,
+                                        BITS_PER_BYTE * sizeof(*pending_clr),
+                                        priority + 1);
+       }
+
+       priority = __ffs(*pending);
+       while (priority <= MIPS_EXC_MAX) {
+               if (kvm_mips_callbacks->irq_deliver(vcpu, priority, cause)) {
+                       if (!KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE)
+                               break;
+               }
+
+               priority = find_next_bit(pending,
+                                        BITS_PER_BYTE * sizeof(*pending),
+                                        priority + 1);
+       }
+
+}
+
+int kvm_mips_pending_timer(struct kvm_vcpu *vcpu)
+{
+       return test_bit(MIPS_EXC_INT_TIMER, &vcpu->arch.pending_exceptions);
+}
diff --git a/arch/mips/kvm/interrupt.h b/arch/mips/kvm/interrupt.h
new file mode 100644 (file)
index 0000000..4ab4bdf
--- /dev/null
@@ -0,0 +1,53 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * KVM/MIPS: Interrupts
+ * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+ */
+
+/*
+ * MIPS Exception Priorities, exceptions (including interrupts) are queued up
+ * for the guest in the order specified by their priorities
+ */
+
+#define MIPS_EXC_RESET              0
+#define MIPS_EXC_SRESET             1
+#define MIPS_EXC_DEBUG_ST           2
+#define MIPS_EXC_DEBUG              3
+#define MIPS_EXC_DDB                4
+#define MIPS_EXC_NMI                5
+#define MIPS_EXC_MCHK               6
+#define MIPS_EXC_INT_TIMER          7
+#define MIPS_EXC_INT_IO             8
+#define MIPS_EXC_EXECUTE            9
+#define MIPS_EXC_INT_IPI_1          10
+#define MIPS_EXC_INT_IPI_2          11
+#define MIPS_EXC_MAX                12
+/* XXXSL More to follow */
+
+extern char mips32_exception[], mips32_exceptionEnd[];
+extern char mips32_GuestException[], mips32_GuestExceptionEnd[];
+
+#define C_TI        (_ULCAST_(1) << 30)
+
+#define KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE (0)
+#define KVM_MIPS_IRQ_CLEAR_ALL_AT_ONCE   (0)
+
+void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, uint32_t priority);
+void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, uint32_t priority);
+int kvm_mips_pending_timer(struct kvm_vcpu *vcpu);
+
+void kvm_mips_queue_timer_int_cb(struct kvm_vcpu *vcpu);
+void kvm_mips_dequeue_timer_int_cb(struct kvm_vcpu *vcpu);
+void kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu,
+                             struct kvm_mips_interrupt *irq);
+void kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
+                               struct kvm_mips_interrupt *irq);
+int kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
+                           uint32_t cause);
+int kvm_mips_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
+                         uint32_t cause);
+void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, uint32_t cause);
diff --git a/arch/mips/kvm/kvm_cb.c b/arch/mips/kvm/kvm_cb.c
deleted file mode 100644 (file)
index 313c2e3..0000000
+++ /dev/null
@@ -1,14 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
- * Authors: Yann Le Du <ledu@kymasys.com>
- */
-
-#include <linux/export.h>
-#include <linux/kvm_host.h>
-
-struct kvm_mips_callbacks *kvm_mips_callbacks;
-EXPORT_SYMBOL(kvm_mips_callbacks);
diff --git a/arch/mips/kvm/kvm_locore.S b/arch/mips/kvm/kvm_locore.S
deleted file mode 100644 (file)
index d7279c0..0000000
+++ /dev/null
@@ -1,620 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Main entry point for the guest, exception handling.
- *
- * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
- * Authors: Sanjay Lal <sanjayl@kymasys.com>
- */
-
-#include <asm/asm.h>
-#include <asm/asmmacro.h>
-#include <asm/regdef.h>
-#include <asm/mipsregs.h>
-#include <asm/stackframe.h>
-#include <asm/asm-offsets.h>
-
-#define _C_LABEL(x)     x
-#define MIPSX(name)     mips32_ ## name
-#define CALLFRAME_SIZ   32
-
-/*
- * VECTOR
- *  exception vector entrypoint
- */
-#define VECTOR(x, regmask)      \
-    .ent    _C_LABEL(x),0;      \
-    EXPORT(x);
-
-#define VECTOR_END(x)      \
-    EXPORT(x);
-
-/* Overload, Danger Will Robinson!! */
-#define PT_HOST_ASID        PT_BVADDR
-#define PT_HOST_USERLOCAL   PT_EPC
-
-#define CP0_DDATA_LO        $28,3
-#define CP0_EBASE           $15,1
-
-#define CP0_INTCTL          $12,1
-#define CP0_SRSCTL          $12,2
-#define CP0_SRSMAP          $12,3
-#define CP0_HWRENA          $7,0
-
-/* Resume Flags */
-#define RESUME_FLAG_HOST        (1<<1)  /* Resume host? */
-
-#define RESUME_GUEST            0
-#define RESUME_HOST             RESUME_FLAG_HOST
-
-/*
- * __kvm_mips_vcpu_run: entry point to the guest
- * a0: run
- * a1: vcpu
- */
-       .set    noreorder
-       .set    noat
-
-FEXPORT(__kvm_mips_vcpu_run)
-       /* k0/k1 not being used in host kernel context */
-       INT_ADDIU k1, sp, -PT_SIZE
-       LONG_S  $0, PT_R0(k1)
-       LONG_S  $1, PT_R1(k1)
-       LONG_S  $2, PT_R2(k1)
-       LONG_S  $3, PT_R3(k1)
-
-       LONG_S  $4, PT_R4(k1)
-       LONG_S  $5, PT_R5(k1)
-       LONG_S  $6, PT_R6(k1)
-       LONG_S  $7, PT_R7(k1)
-
-       LONG_S  $8,  PT_R8(k1)
-       LONG_S  $9,  PT_R9(k1)
-       LONG_S  $10, PT_R10(k1)
-       LONG_S  $11, PT_R11(k1)
-       LONG_S  $12, PT_R12(k1)
-       LONG_S  $13, PT_R13(k1)
-       LONG_S  $14, PT_R14(k1)
-       LONG_S  $15, PT_R15(k1)
-       LONG_S  $16, PT_R16(k1)
-       LONG_S  $17, PT_R17(k1)
-
-       LONG_S  $18, PT_R18(k1)
-       LONG_S  $19, PT_R19(k1)
-       LONG_S  $20, PT_R20(k1)
-       LONG_S  $21, PT_R21(k1)
-       LONG_S  $22, PT_R22(k1)
-       LONG_S  $23, PT_R23(k1)
-       LONG_S  $24, PT_R24(k1)
-       LONG_S  $25, PT_R25(k1)
-
-       /*
-        * XXXKYMA k0/k1 not saved, not being used if we got here through
-        * an ioctl()
-        */
-
-       LONG_S  $28, PT_R28(k1)
-       LONG_S  $29, PT_R29(k1)
-       LONG_S  $30, PT_R30(k1)
-       LONG_S  $31, PT_R31(k1)
-
-       /* Save hi/lo */
-       mflo    v0
-       LONG_S  v0, PT_LO(k1)
-       mfhi    v1
-       LONG_S  v1, PT_HI(k1)
-
-       /* Save host status */
-       mfc0    v0, CP0_STATUS
-       LONG_S  v0, PT_STATUS(k1)
-
-       /* Save host ASID, shove it into the BVADDR location */
-       mfc0    v1, CP0_ENTRYHI
-       andi    v1, 0xff
-       LONG_S  v1, PT_HOST_ASID(k1)
-
-       /* Save DDATA_LO, will be used to store pointer to vcpu */
-       mfc0    v1, CP0_DDATA_LO
-       LONG_S  v1, PT_HOST_USERLOCAL(k1)
-
-       /* DDATA_LO has pointer to vcpu */
-       mtc0    a1, CP0_DDATA_LO
-
-       /* Offset into vcpu->arch */
-       INT_ADDIU k1, a1, VCPU_HOST_ARCH
-
-       /*
-        * Save the host stack to VCPU, used for exception processing
-        * when we exit from the Guest
-        */
-       LONG_S  sp, VCPU_HOST_STACK(k1)
-
-       /* Save the kernel gp as well */
-       LONG_S  gp, VCPU_HOST_GP(k1)
-
-       /*
-        * Setup status register for running the guest in UM, interrupts
-        * are disabled
-        */
-       li      k0, (ST0_EXL | KSU_USER | ST0_BEV)
-       mtc0    k0, CP0_STATUS
-       ehb
-
-       /* load up the new EBASE */
-       LONG_L  k0, VCPU_GUEST_EBASE(k1)
-       mtc0    k0, CP0_EBASE
-
-       /*
-        * Now that the new EBASE has been loaded, unset BEV, set
-        * interrupt mask as it was but make sure that timer interrupts
-        * are enabled
-        */
-       li      k0, (ST0_EXL | KSU_USER | ST0_IE)
-       andi    v0, v0, ST0_IM
-       or      k0, k0, v0
-       mtc0    k0, CP0_STATUS
-       ehb
-
-       /* Set Guest EPC */
-       LONG_L  t0, VCPU_PC(k1)
-       mtc0    t0, CP0_EPC
-
-FEXPORT(__kvm_mips_load_asid)
-       /* Set the ASID for the Guest Kernel */
-       INT_SLL t0, t0, 1       /* with kseg0 @ 0x40000000, kernel */
-                               /* addresses shift to 0x80000000 */
-       bltz    t0, 1f          /* If kernel */
-        INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID  /* (BD)  */
-       INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID    /* else user */
-1:
-       /* t1: contains the base of the ASID array, need to get the cpu id */
-       LONG_L  t2, TI_CPU($28)             /* smp_processor_id */
-       INT_SLL t2, t2, 2                   /* x4 */
-       REG_ADDU t3, t1, t2
-       LONG_L  k0, (t3)
-       andi    k0, k0, 0xff
-       mtc0    k0, CP0_ENTRYHI
-       ehb
-
-       /* Disable RDHWR access */
-       mtc0    zero, CP0_HWRENA
-
-       /* Now load up the Guest Context from VCPU */
-       LONG_L  $1, VCPU_R1(k1)
-       LONG_L  $2, VCPU_R2(k1)
-       LONG_L  $3, VCPU_R3(k1)
-
-       LONG_L  $4, VCPU_R4(k1)
-       LONG_L  $5, VCPU_R5(k1)
-       LONG_L  $6, VCPU_R6(k1)
-       LONG_L  $7, VCPU_R7(k1)
-
-       LONG_L  $8, VCPU_R8(k1)
-       LONG_L  $9, VCPU_R9(k1)
-       LONG_L  $10, VCPU_R10(k1)
-       LONG_L  $11, VCPU_R11(k1)
-       LONG_L  $12, VCPU_R12(k1)
-       LONG_L  $13, VCPU_R13(k1)
-       LONG_L  $14, VCPU_R14(k1)
-       LONG_L  $15, VCPU_R15(k1)
-       LONG_L  $16, VCPU_R16(k1)
-       LONG_L  $17, VCPU_R17(k1)
-       LONG_L  $18, VCPU_R18(k1)
-       LONG_L  $19, VCPU_R19(k1)
-       LONG_L  $20, VCPU_R20(k1)
-       LONG_L  $21, VCPU_R21(k1)
-       LONG_L  $22, VCPU_R22(k1)
-       LONG_L  $23, VCPU_R23(k1)
-       LONG_L  $24, VCPU_R24(k1)
-       LONG_L  $25, VCPU_R25(k1)
-
-       /* k0/k1 loaded up later */
-
-       LONG_L  $28, VCPU_R28(k1)
-       LONG_L  $29, VCPU_R29(k1)
-       LONG_L  $30, VCPU_R30(k1)
-       LONG_L  $31, VCPU_R31(k1)
-
-       /* Restore hi/lo */
-       LONG_L  k0, VCPU_LO(k1)
-       mtlo    k0
-
-       LONG_L  k0, VCPU_HI(k1)
-       mthi    k0
-
-FEXPORT(__kvm_mips_load_k0k1)
-       /* Restore the guest's k0/k1 registers */
-       LONG_L  k0, VCPU_R26(k1)
-       LONG_L  k1, VCPU_R27(k1)
-
-       /* Jump to guest */
-       eret
-
-VECTOR(MIPSX(exception), unknown)
-/* Find out what mode we came from and jump to the proper handler. */
-       mtc0    k0, CP0_ERROREPC        #01: Save guest k0
-       ehb                             #02:
-
-       mfc0    k0, CP0_EBASE           #02: Get EBASE
-       INT_SRL k0, k0, 10              #03: Get rid of CPUNum
-       INT_SLL k0, k0, 10              #04
-       LONG_S  k1, 0x3000(k0)          #05: Save k1 @ offset 0x3000
-       INT_ADDIU k0, k0, 0x2000        #06: Exception handler is
-                                       #    installed @ offset 0x2000
-       j       k0                      #07: jump to the function
-        nop                            #08: branch delay slot
-VECTOR_END(MIPSX(exceptionEnd))
-.end MIPSX(exception)
-
-/*
- * Generic Guest exception handler. We end up here when the guest
- * does something that causes a trap to kernel mode.
- */
-NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra)
-       /* Get the VCPU pointer from DDTATA_LO */
-       mfc0    k1, CP0_DDATA_LO
-       INT_ADDIU k1, k1, VCPU_HOST_ARCH
-
-       /* Start saving Guest context to VCPU */
-       LONG_S  $0, VCPU_R0(k1)
-       LONG_S  $1, VCPU_R1(k1)
-       LONG_S  $2, VCPU_R2(k1)
-       LONG_S  $3, VCPU_R3(k1)
-       LONG_S  $4, VCPU_R4(k1)
-       LONG_S  $5, VCPU_R5(k1)
-       LONG_S  $6, VCPU_R6(k1)
-       LONG_S  $7, VCPU_R7(k1)
-       LONG_S  $8, VCPU_R8(k1)
-       LONG_S  $9, VCPU_R9(k1)
-       LONG_S  $10, VCPU_R10(k1)
-       LONG_S  $11, VCPU_R11(k1)
-       LONG_S  $12, VCPU_R12(k1)
-       LONG_S  $13, VCPU_R13(k1)
-       LONG_S  $14, VCPU_R14(k1)
-       LONG_S  $15, VCPU_R15(k1)
-       LONG_S  $16, VCPU_R16(k1)
-       LONG_S  $17, VCPU_R17(k1)
-       LONG_S  $18, VCPU_R18(k1)
-       LONG_S  $19, VCPU_R19(k1)
-       LONG_S  $20, VCPU_R20(k1)
-       LONG_S  $21, VCPU_R21(k1)
-       LONG_S  $22, VCPU_R22(k1)
-       LONG_S  $23, VCPU_R23(k1)
-       LONG_S  $24, VCPU_R24(k1)
-       LONG_S  $25, VCPU_R25(k1)
-
-       /* Guest k0/k1 saved later */
-
-       LONG_S  $28, VCPU_R28(k1)
-       LONG_S  $29, VCPU_R29(k1)
-       LONG_S  $30, VCPU_R30(k1)
-       LONG_S  $31, VCPU_R31(k1)
-
-       /* We need to save hi/lo and restore them on the way out */
-       mfhi    t0
-       LONG_S  t0, VCPU_HI(k1)
-
-       mflo    t0
-       LONG_S  t0, VCPU_LO(k1)
-
-       /* Finally save guest k0/k1 to VCPU */
-       mfc0    t0, CP0_ERROREPC
-       LONG_S  t0, VCPU_R26(k1)
-
-       /* Get GUEST k1 and save it in VCPU */
-       PTR_LI  t1, ~0x2ff
-       mfc0    t0, CP0_EBASE
-       and     t0, t0, t1
-       LONG_L  t0, 0x3000(t0)
-       LONG_S  t0, VCPU_R27(k1)
-
-       /* Now that context has been saved, we can use other registers */
-
-       /* Restore vcpu */
-       mfc0    a1, CP0_DDATA_LO
-       move    s1, a1
-
-       /* Restore run (vcpu->run) */
-       LONG_L  a0, VCPU_RUN(a1)
-       /* Save pointer to run in s0, will be saved by the compiler */
-       move    s0, a0
-
-       /*
-        * Save Host level EPC, BadVaddr and Cause to VCPU, useful to
-        * process the exception
-        */
-       mfc0    k0,CP0_EPC
-       LONG_S  k0, VCPU_PC(k1)
-
-       mfc0    k0, CP0_BADVADDR
-       LONG_S  k0, VCPU_HOST_CP0_BADVADDR(k1)
-
-       mfc0    k0, CP0_CAUSE
-       LONG_S  k0, VCPU_HOST_CP0_CAUSE(k1)
-
-       mfc0    k0, CP0_ENTRYHI
-       LONG_S  k0, VCPU_HOST_ENTRYHI(k1)
-
-       /* Now restore the host state just enough to run the handlers */
-
-       /* Swtich EBASE to the one used by Linux */
-       /* load up the host EBASE */
-       mfc0    v0, CP0_STATUS
-
-       .set    at
-       or      k0, v0, ST0_BEV
-       .set    noat
-
-       mtc0    k0, CP0_STATUS
-       ehb
-
-       LONG_L  k0, VCPU_HOST_EBASE(k1)
-       mtc0    k0,CP0_EBASE
-
-       /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */
-       .set    at
-       and     v0, v0, ~(ST0_EXL | KSU_USER | ST0_IE)
-       or      v0, v0, ST0_CU0
-       .set    noat
-       mtc0    v0, CP0_STATUS
-       ehb
-
-       /* Load up host GP */
-       LONG_L  gp, VCPU_HOST_GP(k1)
-
-       /* Need a stack before we can jump to "C" */
-       LONG_L  sp, VCPU_HOST_STACK(k1)
-
-       /* Saved host state */
-       INT_ADDIU sp, sp, -PT_SIZE
-
-       /*
-        * XXXKYMA do we need to load the host ASID, maybe not because the
-        * kernel entries are marked GLOBAL, need to verify
-        */
-
-       /* Restore host DDATA_LO */
-       LONG_L  k0, PT_HOST_USERLOCAL(sp)
-       mtc0    k0, CP0_DDATA_LO
-
-       /* Restore RDHWR access */
-       PTR_LI  k0, 0x2000000F
-       mtc0    k0, CP0_HWRENA
-
-       /* Jump to handler */
-FEXPORT(__kvm_mips_jump_to_handler)
-       /*
-        * XXXKYMA: not sure if this is safe, how large is the stack??
-        * Now jump to the kvm_mips_handle_exit() to see if we can deal
-        * with this in the kernel
-        */
-       PTR_LA  t9, kvm_mips_handle_exit
-       jalr.hb t9
-        INT_ADDIU sp, sp, -CALLFRAME_SIZ           /* BD Slot */
-
-       /* Return from handler Make sure interrupts are disabled */
-       di
-       ehb
-
-       /*
-        * XXXKYMA: k0/k1 could have been blown away if we processed
-        * an exception while we were handling the exception from the
-        * guest, reload k1
-        */
-
-       move    k1, s1
-       INT_ADDIU k1, k1, VCPU_HOST_ARCH
-
-       /*
-        * Check return value, should tell us if we are returning to the
-        * host (handle I/O etc)or resuming the guest
-        */
-       andi    t0, v0, RESUME_HOST
-       bnez    t0, __kvm_mips_return_to_host
-        nop
-
-__kvm_mips_return_to_guest:
-       /* Put the saved pointer to vcpu (s1) back into the DDATA_LO Register */
-       mtc0    s1, CP0_DDATA_LO
-
-       /* Load up the Guest EBASE to minimize the window where BEV is set */
-       LONG_L  t0, VCPU_GUEST_EBASE(k1)
-
-       /* Switch EBASE back to the one used by KVM */
-       mfc0    v1, CP0_STATUS
-       .set    at
-       or      k0, v1, ST0_BEV
-       .set    noat
-       mtc0    k0, CP0_STATUS
-       ehb
-       mtc0    t0, CP0_EBASE
-
-       /* Setup status register for running guest in UM */
-       .set    at
-       or      v1, v1, (ST0_EXL | KSU_USER | ST0_IE)
-       and     v1, v1, ~ST0_CU0
-       .set    noat
-       mtc0    v1, CP0_STATUS
-       ehb
-
-       /* Set Guest EPC */
-       LONG_L  t0, VCPU_PC(k1)
-       mtc0    t0, CP0_EPC
-
-       /* Set the ASID for the Guest Kernel */
-       INT_SLL t0, t0, 1       /* with kseg0 @ 0x40000000, kernel */
-                               /* addresses shift to 0x80000000 */
-       bltz    t0, 1f          /* If kernel */
-        INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID  /* (BD)  */
-       INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID    /* else user */
-1:
-       /* t1: contains the base of the ASID array, need to get the cpu id  */
-       LONG_L  t2, TI_CPU($28)         /* smp_processor_id */
-       INT_SLL t2, t2, 2               /* x4 */
-       REG_ADDU t3, t1, t2
-       LONG_L  k0, (t3)
-       andi    k0, k0, 0xff
-       mtc0    k0,CP0_ENTRYHI
-       ehb
-
-       /* Disable RDHWR access */
-       mtc0    zero,  CP0_HWRENA
-
-       /* load the guest context from VCPU and return */
-       LONG_L  $0, VCPU_R0(k1)
-       LONG_L  $1, VCPU_R1(k1)
-       LONG_L  $2, VCPU_R2(k1)
-       LONG_L  $3, VCPU_R3(k1)
-       LONG_L  $4, VCPU_R4(k1)
-       LONG_L  $5, VCPU_R5(k1)
-       LONG_L  $6, VCPU_R6(k1)
-       LONG_L  $7, VCPU_R7(k1)
-       LONG_L  $8, VCPU_R8(k1)
-       LONG_L  $9, VCPU_R9(k1)
-       LONG_L  $10, VCPU_R10(k1)
-       LONG_L  $11, VCPU_R11(k1)
-       LONG_L  $12, VCPU_R12(k1)
-       LONG_L  $13, VCPU_R13(k1)
-       LONG_L  $14, VCPU_R14(k1)
-       LONG_L  $15, VCPU_R15(k1)
-       LONG_L  $16, VCPU_R16(k1)
-       LONG_L  $17, VCPU_R17(k1)
-       LONG_L  $18, VCPU_R18(k1)
-       LONG_L  $19, VCPU_R19(k1)
-       LONG_L  $20, VCPU_R20(k1)
-       LONG_L  $21, VCPU_R21(k1)
-       LONG_L  $22, VCPU_R22(k1)
-       LONG_L  $23, VCPU_R23(k1)
-       LONG_L  $24, VCPU_R24(k1)
-       LONG_L  $25, VCPU_R25(k1)
-
-       /* $/k1 loaded later */
-       LONG_L  $28, VCPU_R28(k1)
-       LONG_L  $29, VCPU_R29(k1)
-       LONG_L  $30, VCPU_R30(k1)
-       LONG_L  $31, VCPU_R31(k1)
-
-FEXPORT(__kvm_mips_skip_guest_restore)
-       LONG_L  k0, VCPU_HI(k1)
-       mthi    k0
-
-       LONG_L  k0, VCPU_LO(k1)
-       mtlo    k0
-
-       LONG_L  k0, VCPU_R26(k1)
-       LONG_L  k1, VCPU_R27(k1)
-
-       eret
-
-__kvm_mips_return_to_host:
-       /* EBASE is already pointing to Linux */
-       LONG_L  k1, VCPU_HOST_STACK(k1)
-       INT_ADDIU k1,k1, -PT_SIZE
-
-       /* Restore host DDATA_LO */
-       LONG_L  k0, PT_HOST_USERLOCAL(k1)
-       mtc0    k0, CP0_DDATA_LO
-
-       /* Restore host ASID */
-       LONG_L  k0, PT_HOST_ASID(sp)
-       andi    k0, 0xff
-       mtc0    k0,CP0_ENTRYHI
-       ehb
-
-       /* Load context saved on the host stack */
-       LONG_L  $0, PT_R0(k1)
-       LONG_L  $1, PT_R1(k1)
-
-       /*
-        * r2/v0 is the return code, shift it down by 2 (arithmetic)
-        * to recover the err code
-        */
-       INT_SRA k0, v0, 2
-       move    $2, k0
-
-       LONG_L  $3, PT_R3(k1)
-       LONG_L  $4, PT_R4(k1)
-       LONG_L  $5, PT_R5(k1)
-       LONG_L  $6, PT_R6(k1)
-       LONG_L  $7, PT_R7(k1)
-       LONG_L  $8, PT_R8(k1)
-       LONG_L  $9, PT_R9(k1)
-       LONG_L  $10, PT_R10(k1)
-       LONG_L  $11, PT_R11(k1)
-       LONG_L  $12, PT_R12(k1)
-       LONG_L  $13, PT_R13(k1)
-       LONG_L  $14, PT_R14(k1)
-       LONG_L  $15, PT_R15(k1)
-       LONG_L  $16, PT_R16(k1)
-       LONG_L  $17, PT_R17(k1)
-       LONG_L  $18, PT_R18(k1)
-       LONG_L  $19, PT_R19(k1)
-       LONG_L  $20, PT_R20(k1)
-       LONG_L  $21, PT_R21(k1)
-       LONG_L  $22, PT_R22(k1)
-       LONG_L  $23, PT_R23(k1)
-       LONG_L  $24, PT_R24(k1)
-       LONG_L  $25, PT_R25(k1)
-
-       /* Host k0/k1 were not saved */
-
-       LONG_L  $28, PT_R28(k1)
-       LONG_L  $29, PT_R29(k1)
-       LONG_L  $30, PT_R30(k1)
-
-       LONG_L  k0, PT_HI(k1)
-       mthi    k0
-
-       LONG_L  k0, PT_LO(k1)
-       mtlo    k0
-
-       /* Restore RDHWR access */
-       PTR_LI  k0, 0x2000000F
-       mtc0    k0,  CP0_HWRENA
-
-       /* Restore RA, which is the address we will return to */
-       LONG_L  ra, PT_R31(k1)
-       j       ra
-        nop
-
-VECTOR_END(MIPSX(GuestExceptionEnd))
-.end MIPSX(GuestException)
-
-MIPSX(exceptions):
-       ####
-       ##### The exception handlers.
-       #####
-       .word _C_LABEL(MIPSX(GuestException))   #  0
-       .word _C_LABEL(MIPSX(GuestException))   #  1
-       .word _C_LABEL(MIPSX(GuestException))   #  2
-       .word _C_LABEL(MIPSX(GuestException))   #  3
-       .word _C_LABEL(MIPSX(GuestException))   #  4
-       .word _C_LABEL(MIPSX(GuestException))   #  5
-       .word _C_LABEL(MIPSX(GuestException))   #  6
-       .word _C_LABEL(MIPSX(GuestException))   #  7
-       .word _C_LABEL(MIPSX(GuestException))   #  8
-       .word _C_LABEL(MIPSX(GuestException))   #  9
-       .word _C_LABEL(MIPSX(GuestException))   # 10
-       .word _C_LABEL(MIPSX(GuestException))   # 11
-       .word _C_LABEL(MIPSX(GuestException))   # 12
-       .word _C_LABEL(MIPSX(GuestException))   # 13
-       .word _C_LABEL(MIPSX(GuestException))   # 14
-       .word _C_LABEL(MIPSX(GuestException))   # 15
-       .word _C_LABEL(MIPSX(GuestException))   # 16
-       .word _C_LABEL(MIPSX(GuestException))   # 17
-       .word _C_LABEL(MIPSX(GuestException))   # 18
-       .word _C_LABEL(MIPSX(GuestException))   # 19
-       .word _C_LABEL(MIPSX(GuestException))   # 20
-       .word _C_LABEL(MIPSX(GuestException))   # 21
-       .word _C_LABEL(MIPSX(GuestException))   # 22
-       .word _C_LABEL(MIPSX(GuestException))   # 23
-       .word _C_LABEL(MIPSX(GuestException))   # 24
-       .word _C_LABEL(MIPSX(GuestException))   # 25
-       .word _C_LABEL(MIPSX(GuestException))   # 26
-       .word _C_LABEL(MIPSX(GuestException))   # 27
-       .word _C_LABEL(MIPSX(GuestException))   # 28
-       .word _C_LABEL(MIPSX(GuestException))   # 29
-       .word _C_LABEL(MIPSX(GuestException))   # 30
-       .word _C_LABEL(MIPSX(GuestException))   # 31
diff --git a/arch/mips/kvm/kvm_mips.c b/arch/mips/kvm/kvm_mips.c
deleted file mode 100644 (file)
index 289b4d2..0000000
+++ /dev/null
@@ -1,1218 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * KVM/MIPS: MIPS specific KVM APIs
- *
- * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
- * Authors: Sanjay Lal <sanjayl@kymasys.com>
- */
-
-#include <linux/errno.h>
-#include <linux/err.h>
-#include <linux/module.h>
-#include <linux/vmalloc.h>
-#include <linux/fs.h>
-#include <linux/bootmem.h>
-#include <asm/page.h>
-#include <asm/cacheflush.h>
-#include <asm/mmu_context.h>
-
-#include <linux/kvm_host.h>
-
-#include "kvm_mips_int.h"
-#include "kvm_mips_comm.h"
-
-#define CREATE_TRACE_POINTS
-#include "trace.h"
-
-#ifndef VECTORSPACING
-#define VECTORSPACING 0x100    /* for EI/VI mode */
-#endif
-
-#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x)
-struct kvm_stats_debugfs_item debugfs_entries[] = {
-       { "wait",         VCPU_STAT(wait_exits),         KVM_STAT_VCPU },
-       { "cache",        VCPU_STAT(cache_exits),        KVM_STAT_VCPU },
-       { "signal",       VCPU_STAT(signal_exits),       KVM_STAT_VCPU },
-       { "interrupt",    VCPU_STAT(int_exits),          KVM_STAT_VCPU },
-       { "cop_unsuable", VCPU_STAT(cop_unusable_exits), KVM_STAT_VCPU },
-       { "tlbmod",       VCPU_STAT(tlbmod_exits),       KVM_STAT_VCPU },
-       { "tlbmiss_ld",   VCPU_STAT(tlbmiss_ld_exits),   KVM_STAT_VCPU },
-       { "tlbmiss_st",   VCPU_STAT(tlbmiss_st_exits),   KVM_STAT_VCPU },
-       { "addrerr_st",   VCPU_STAT(addrerr_st_exits),   KVM_STAT_VCPU },
-       { "addrerr_ld",   VCPU_STAT(addrerr_ld_exits),   KVM_STAT_VCPU },
-       { "syscall",      VCPU_STAT(syscall_exits),      KVM_STAT_VCPU },
-       { "resvd_inst",   VCPU_STAT(resvd_inst_exits),   KVM_STAT_VCPU },
-       { "break_inst",   VCPU_STAT(break_inst_exits),   KVM_STAT_VCPU },
-       { "flush_dcache", VCPU_STAT(flush_dcache_exits), KVM_STAT_VCPU },
-       { "halt_wakeup",  VCPU_STAT(halt_wakeup),        KVM_STAT_VCPU },
-       {NULL}
-};
-
-static int kvm_mips_reset_vcpu(struct kvm_vcpu *vcpu)
-{
-       int i;
-
-       for_each_possible_cpu(i) {
-               vcpu->arch.guest_kernel_asid[i] = 0;
-               vcpu->arch.guest_user_asid[i] = 0;
-       }
-
-       return 0;
-}
-
-/*
- * XXXKYMA: We are simulatoring a processor that has the WII bit set in
- * Config7, so we are "runnable" if interrupts are pending
- */
-int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
-{
-       return !!(vcpu->arch.pending_exceptions);
-}
-
-int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
-{
-       return 1;
-}
-
-int kvm_arch_hardware_enable(void *garbage)
-{
-       return 0;
-}
-
-void kvm_arch_hardware_disable(void *garbage)
-{
-}
-
-int kvm_arch_hardware_setup(void)
-{
-       return 0;
-}
-
-void kvm_arch_hardware_unsetup(void)
-{
-}
-
-void kvm_arch_check_processor_compat(void *rtn)
-{
-       *(int *)rtn = 0;
-}
-
-static void kvm_mips_init_tlbs(struct kvm *kvm)
-{
-       unsigned long wired;
-
-       /*
-        * Add a wired entry to the TLB, it is used to map the commpage to
-        * the Guest kernel
-        */
-       wired = read_c0_wired();
-       write_c0_wired(wired + 1);
-       mtc0_tlbw_hazard();
-       kvm->arch.commpage_tlb = wired;
-
-       kvm_debug("[%d] commpage TLB: %d\n", smp_processor_id(),
-                 kvm->arch.commpage_tlb);
-}
-
-static void kvm_mips_init_vm_percpu(void *arg)
-{
-       struct kvm *kvm = (struct kvm *)arg;
-
-       kvm_mips_init_tlbs(kvm);
-       kvm_mips_callbacks->vm_init(kvm);
-
-}
-
-int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
-{
-       if (atomic_inc_return(&kvm_mips_instance) == 1) {
-               kvm_debug("%s: 1st KVM instance, setup host TLB parameters\n",
-                         __func__);
-               on_each_cpu(kvm_mips_init_vm_percpu, kvm, 1);
-       }
-
-       return 0;
-}
-
-void kvm_mips_free_vcpus(struct kvm *kvm)
-{
-       unsigned int i;
-       struct kvm_vcpu *vcpu;
-
-       /* Put the pages we reserved for the guest pmap */
-       for (i = 0; i < kvm->arch.guest_pmap_npages; i++) {
-               if (kvm->arch.guest_pmap[i] != KVM_INVALID_PAGE)
-                       kvm_mips_release_pfn_clean(kvm->arch.guest_pmap[i]);
-       }
-       kfree(kvm->arch.guest_pmap);
-
-       kvm_for_each_vcpu(i, vcpu, kvm) {
-               kvm_arch_vcpu_free(vcpu);
-       }
-
-       mutex_lock(&kvm->lock);
-
-       for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
-               kvm->vcpus[i] = NULL;
-
-       atomic_set(&kvm->online_vcpus, 0);
-
-       mutex_unlock(&kvm->lock);
-}
-
-void kvm_arch_sync_events(struct kvm *kvm)
-{
-}
-
-static void kvm_mips_uninit_tlbs(void *arg)
-{
-       /* Restore wired count */
-       write_c0_wired(0);
-       mtc0_tlbw_hazard();
-       /* Clear out all the TLBs */
-       kvm_local_flush_tlb_all();
-}
-
-void kvm_arch_destroy_vm(struct kvm *kvm)
-{
-       kvm_mips_free_vcpus(kvm);
-
-       /* If this is the last instance, restore wired count */
-       if (atomic_dec_return(&kvm_mips_instance) == 0) {
-               kvm_debug("%s: last KVM instance, restoring TLB parameters\n",
-                         __func__);
-               on_each_cpu(kvm_mips_uninit_tlbs, NULL, 1);
-       }
-}
-
-long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl,
-                       unsigned long arg)
-{
-       return -ENOIOCTLCMD;
-}
-
-void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
-                          struct kvm_memory_slot *dont)
-{
-}
-
-int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
-                           unsigned long npages)
-{
-       return 0;
-}
-
-void kvm_arch_memslots_updated(struct kvm *kvm)
-{
-}
-
-int kvm_arch_prepare_memory_region(struct kvm *kvm,
-                                  struct kvm_memory_slot *memslot,
-                                  struct kvm_userspace_memory_region *mem,
-                                  enum kvm_mr_change change)
-{
-       return 0;
-}
-
-void kvm_arch_commit_memory_region(struct kvm *kvm,
-                                  struct kvm_userspace_memory_region *mem,
-                                  const struct kvm_memory_slot *old,
-                                  enum kvm_mr_change change)
-{
-       unsigned long npages = 0;
-       int i;
-
-       kvm_debug("%s: kvm: %p slot: %d, GPA: %llx, size: %llx, QVA: %llx\n",
-                 __func__, kvm, mem->slot, mem->guest_phys_addr,
-                 mem->memory_size, mem->userspace_addr);
-
-       /* Setup Guest PMAP table */
-       if (!kvm->arch.guest_pmap) {
-               if (mem->slot == 0)
-                       npages = mem->memory_size >> PAGE_SHIFT;
-
-               if (npages) {
-                       kvm->arch.guest_pmap_npages = npages;
-                       kvm->arch.guest_pmap =
-                           kzalloc(npages * sizeof(unsigned long), GFP_KERNEL);
-
-                       if (!kvm->arch.guest_pmap) {
-                               kvm_err("Failed to allocate guest PMAP");
-                               return;
-                       }
-
-                       kvm_debug("Allocated space for Guest PMAP Table (%ld pages) @ %p\n",
-                                 npages, kvm->arch.guest_pmap);
-
-                       /* Now setup the page table */
-                       for (i = 0; i < npages; i++)
-                               kvm->arch.guest_pmap[i] = KVM_INVALID_PAGE;
-               }
-       }
-}
-
-void kvm_arch_flush_shadow_all(struct kvm *kvm)
-{
-}
-
-void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
-                                  struct kvm_memory_slot *slot)
-{
-}
-
-void kvm_arch_flush_shadow(struct kvm *kvm)
-{
-}
-
-struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
-{
-       int err, size, offset;
-       void *gebase;
-       int i;
-
-       struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
-
-       if (!vcpu) {
-               err = -ENOMEM;
-               goto out;
-       }
-
-       err = kvm_vcpu_init(vcpu, kvm, id);
-
-       if (err)
-               goto out_free_cpu;
-
-       kvm_debug("kvm @ %p: create cpu %d at %p\n", kvm, id, vcpu);
-
-       /*
-        * Allocate space for host mode exception handlers that handle
-        * guest mode exits
-        */
-       if (cpu_has_veic || cpu_has_vint)
-               size = 0x200 + VECTORSPACING * 64;
-       else
-               size = 0x4000;
-
-       /* Save Linux EBASE */
-       vcpu->arch.host_ebase = (void *)read_c0_ebase();
-
-       gebase = kzalloc(ALIGN(size, PAGE_SIZE), GFP_KERNEL);
-
-       if (!gebase) {
-               err = -ENOMEM;
-               goto out_free_cpu;
-       }
-       kvm_debug("Allocated %d bytes for KVM Exception Handlers @ %p\n",
-                 ALIGN(size, PAGE_SIZE), gebase);
-
-       /* Save new ebase */
-       vcpu->arch.guest_ebase = gebase;
-
-       /* Copy L1 Guest Exception handler to correct offset */
-
-       /* TLB Refill, EXL = 0 */
-       memcpy(gebase, mips32_exception,
-              mips32_exceptionEnd - mips32_exception);
-
-       /* General Exception Entry point */
-       memcpy(gebase + 0x180, mips32_exception,
-              mips32_exceptionEnd - mips32_exception);
-
-       /* For vectored interrupts poke the exception code @ all offsets 0-7 */
-       for (i = 0; i < 8; i++) {
-               kvm_debug("L1 Vectored handler @ %p\n",
-                         gebase + 0x200 + (i * VECTORSPACING));
-               memcpy(gebase + 0x200 + (i * VECTORSPACING), mips32_exception,
-                      mips32_exceptionEnd - mips32_exception);
-       }
-
-       /* General handler, relocate to unmapped space for sanity's sake */
-       offset = 0x2000;
-       kvm_debug("Installing KVM Exception handlers @ %p, %#x bytes\n",
-                 gebase + offset,
-                 mips32_GuestExceptionEnd - mips32_GuestException);
-
-       memcpy(gebase + offset, mips32_GuestException,
-              mips32_GuestExceptionEnd - mips32_GuestException);
-
-       /* Invalidate the icache for these ranges */
-       local_flush_icache_range((unsigned long)gebase,
-                               (unsigned long)gebase + ALIGN(size, PAGE_SIZE));
-
-       /*
-        * Allocate comm page for guest kernel, a TLB will be reserved for
-        * mapping GVA @ 0xFFFF8000 to this page
-        */
-       vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL);
-
-       if (!vcpu->arch.kseg0_commpage) {
-               err = -ENOMEM;
-               goto out_free_gebase;
-       }
-
-       kvm_debug("Allocated COMM page @ %p\n", vcpu->arch.kseg0_commpage);
-       kvm_mips_commpage_init(vcpu);
-
-       /* Init */
-       vcpu->arch.last_sched_cpu = -1;
-
-       /* Start off the timer */
-       kvm_mips_init_count(vcpu);
-
-       return vcpu;
-
-out_free_gebase:
-       kfree(gebase);
-
-out_free_cpu:
-       kfree(vcpu);
-
-out:
-       return ERR_PTR(err);
-}
-
-void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
-{
-       hrtimer_cancel(&vcpu->arch.comparecount_timer);
-
-       kvm_vcpu_uninit(vcpu);
-
-       kvm_mips_dump_stats(vcpu);
-
-       kfree(vcpu->arch.guest_ebase);
-       kfree(vcpu->arch.kseg0_commpage);
-}
-
-void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
-{
-       kvm_arch_vcpu_free(vcpu);
-}
-
-int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
-                                       struct kvm_guest_debug *dbg)
-{
-       return -ENOIOCTLCMD;
-}
-
-int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
-{
-       int r = 0;
-       sigset_t sigsaved;
-
-       if (vcpu->sigset_active)
-               sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
-
-       if (vcpu->mmio_needed) {
-               if (!vcpu->mmio_is_write)
-                       kvm_mips_complete_mmio_load(vcpu, run);
-               vcpu->mmio_needed = 0;
-       }
-
-       local_irq_disable();
-       /* Check if we have any exceptions/interrupts pending */
-       kvm_mips_deliver_interrupts(vcpu,
-                                   kvm_read_c0_guest_cause(vcpu->arch.cop0));
-
-       kvm_guest_enter();
-
-       r = __kvm_mips_vcpu_run(run, vcpu);
-
-       kvm_guest_exit();
-       local_irq_enable();
-
-       if (vcpu->sigset_active)
-               sigprocmask(SIG_SETMASK, &sigsaved, NULL);
-
-       return r;
-}
-
-int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
-                            struct kvm_mips_interrupt *irq)
-{
-       int intr = (int)irq->irq;
-       struct kvm_vcpu *dvcpu = NULL;
-
-       if (intr == 3 || intr == -3 || intr == 4 || intr == -4)
-               kvm_debug("%s: CPU: %d, INTR: %d\n", __func__, irq->cpu,
-                         (int)intr);
-
-       if (irq->cpu == -1)
-               dvcpu = vcpu;
-       else
-               dvcpu = vcpu->kvm->vcpus[irq->cpu];
-
-       if (intr == 2 || intr == 3 || intr == 4) {
-               kvm_mips_callbacks->queue_io_int(dvcpu, irq);
-
-       } else if (intr == -2 || intr == -3 || intr == -4) {
-               kvm_mips_callbacks->dequeue_io_int(dvcpu, irq);
-       } else {
-               kvm_err("%s: invalid interrupt ioctl (%d:%d)\n", __func__,
-                       irq->cpu, irq->irq);
-               return -EINVAL;
-       }
-
-       dvcpu->arch.wait = 0;
-
-       if (waitqueue_active(&dvcpu->wq))
-               wake_up_interruptible(&dvcpu->wq);
-
-       return 0;
-}
-
-int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
-                                   struct kvm_mp_state *mp_state)
-{
-       return -ENOIOCTLCMD;
-}
-
-int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
-                                   struct kvm_mp_state *mp_state)
-{
-       return -ENOIOCTLCMD;
-}
-
-static u64 kvm_mips_get_one_regs[] = {
-       KVM_REG_MIPS_R0,
-       KVM_REG_MIPS_R1,
-       KVM_REG_MIPS_R2,
-       KVM_REG_MIPS_R3,
-       KVM_REG_MIPS_R4,
-       KVM_REG_MIPS_R5,
-       KVM_REG_MIPS_R6,
-       KVM_REG_MIPS_R7,
-       KVM_REG_MIPS_R8,
-       KVM_REG_MIPS_R9,
-       KVM_REG_MIPS_R10,
-       KVM_REG_MIPS_R11,
-       KVM_REG_MIPS_R12,
-       KVM_REG_MIPS_R13,
-       KVM_REG_MIPS_R14,
-       KVM_REG_MIPS_R15,
-       KVM_REG_MIPS_R16,
-       KVM_REG_MIPS_R17,
-       KVM_REG_MIPS_R18,
-       KVM_REG_MIPS_R19,
-       KVM_REG_MIPS_R20,
-       KVM_REG_MIPS_R21,
-       KVM_REG_MIPS_R22,
-       KVM_REG_MIPS_R23,
-       KVM_REG_MIPS_R24,
-       KVM_REG_MIPS_R25,
-       KVM_REG_MIPS_R26,
-       KVM_REG_MIPS_R27,
-       KVM_REG_MIPS_R28,
-       KVM_REG_MIPS_R29,
-       KVM_REG_MIPS_R30,
-       KVM_REG_MIPS_R31,
-
-       KVM_REG_MIPS_HI,
-       KVM_REG_MIPS_LO,
-       KVM_REG_MIPS_PC,
-
-       KVM_REG_MIPS_CP0_INDEX,
-       KVM_REG_MIPS_CP0_CONTEXT,
-       KVM_REG_MIPS_CP0_USERLOCAL,
-       KVM_REG_MIPS_CP0_PAGEMASK,
-       KVM_REG_MIPS_CP0_WIRED,
-       KVM_REG_MIPS_CP0_HWRENA,
-       KVM_REG_MIPS_CP0_BADVADDR,
-       KVM_REG_MIPS_CP0_COUNT,
-       KVM_REG_MIPS_CP0_ENTRYHI,
-       KVM_REG_MIPS_CP0_COMPARE,
-       KVM_REG_MIPS_CP0_STATUS,
-       KVM_REG_MIPS_CP0_CAUSE,
-       KVM_REG_MIPS_CP0_EPC,
-       KVM_REG_MIPS_CP0_CONFIG,
-       KVM_REG_MIPS_CP0_CONFIG1,
-       KVM_REG_MIPS_CP0_CONFIG2,
-       KVM_REG_MIPS_CP0_CONFIG3,
-       KVM_REG_MIPS_CP0_CONFIG7,
-       KVM_REG_MIPS_CP0_ERROREPC,
-
-       KVM_REG_MIPS_COUNT_CTL,
-       KVM_REG_MIPS_COUNT_RESUME,
-       KVM_REG_MIPS_COUNT_HZ,
-};
-
-static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
-                           const struct kvm_one_reg *reg)
-{
-       struct mips_coproc *cop0 = vcpu->arch.cop0;
-       int ret;
-       s64 v;
-
-       switch (reg->id) {
-       case KVM_REG_MIPS_R0 ... KVM_REG_MIPS_R31:
-               v = (long)vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0];
-               break;
-       case KVM_REG_MIPS_HI:
-               v = (long)vcpu->arch.hi;
-               break;
-       case KVM_REG_MIPS_LO:
-               v = (long)vcpu->arch.lo;
-               break;
-       case KVM_REG_MIPS_PC:
-               v = (long)vcpu->arch.pc;
-               break;
-
-       case KVM_REG_MIPS_CP0_INDEX:
-               v = (long)kvm_read_c0_guest_index(cop0);
-               break;
-       case KVM_REG_MIPS_CP0_CONTEXT:
-               v = (long)kvm_read_c0_guest_context(cop0);
-               break;
-       case KVM_REG_MIPS_CP0_USERLOCAL:
-               v = (long)kvm_read_c0_guest_userlocal(cop0);
-               break;
-       case KVM_REG_MIPS_CP0_PAGEMASK:
-               v = (long)kvm_read_c0_guest_pagemask(cop0);
-               break;
-       case KVM_REG_MIPS_CP0_WIRED:
-               v = (long)kvm_read_c0_guest_wired(cop0);
-               break;
-       case KVM_REG_MIPS_CP0_HWRENA:
-               v = (long)kvm_read_c0_guest_hwrena(cop0);
-               break;
-       case KVM_REG_MIPS_CP0_BADVADDR:
-               v = (long)kvm_read_c0_guest_badvaddr(cop0);
-               break;
-       case KVM_REG_MIPS_CP0_ENTRYHI:
-               v = (long)kvm_read_c0_guest_entryhi(cop0);
-               break;
-       case KVM_REG_MIPS_CP0_COMPARE:
-               v = (long)kvm_read_c0_guest_compare(cop0);
-               break;
-       case KVM_REG_MIPS_CP0_STATUS:
-               v = (long)kvm_read_c0_guest_status(cop0);
-               break;
-       case KVM_REG_MIPS_CP0_CAUSE:
-               v = (long)kvm_read_c0_guest_cause(cop0);
-               break;
-       case KVM_REG_MIPS_CP0_EPC:
-               v = (long)kvm_read_c0_guest_epc(cop0);
-               break;
-       case KVM_REG_MIPS_CP0_ERROREPC:
-               v = (long)kvm_read_c0_guest_errorepc(cop0);
-               break;
-       case KVM_REG_MIPS_CP0_CONFIG:
-               v = (long)kvm_read_c0_guest_config(cop0);
-               break;
-       case KVM_REG_MIPS_CP0_CONFIG1:
-               v = (long)kvm_read_c0_guest_config1(cop0);
-               break;
-       case KVM_REG_MIPS_CP0_CONFIG2:
-               v = (long)kvm_read_c0_guest_config2(cop0);
-               break;
-       case KVM_REG_MIPS_CP0_CONFIG3:
-               v = (long)kvm_read_c0_guest_config3(cop0);
-               break;
-       case KVM_REG_MIPS_CP0_CONFIG7:
-               v = (long)kvm_read_c0_guest_config7(cop0);
-               break;
-       /* registers to be handled specially */
-       case KVM_REG_MIPS_CP0_COUNT:
-       case KVM_REG_MIPS_COUNT_CTL:
-       case KVM_REG_MIPS_COUNT_RESUME:
-       case KVM_REG_MIPS_COUNT_HZ:
-               ret = kvm_mips_callbacks->get_one_reg(vcpu, reg, &v);
-               if (ret)
-                       return ret;
-               break;
-       default:
-               return -EINVAL;
-       }
-       if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
-               u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
-
-               return put_user(v, uaddr64);
-       } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
-               u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
-               u32 v32 = (u32)v;
-
-               return put_user(v32, uaddr32);
-       } else {
-               return -EINVAL;
-       }
-}
-
-static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
-                           const struct kvm_one_reg *reg)
-{
-       struct mips_coproc *cop0 = vcpu->arch.cop0;
-       u64 v;
-
-       if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
-               u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
-
-               if (get_user(v, uaddr64) != 0)
-                       return -EFAULT;
-       } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
-               u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
-               s32 v32;
-
-               if (get_user(v32, uaddr32) != 0)
-                       return -EFAULT;
-               v = (s64)v32;
-       } else {
-               return -EINVAL;
-       }
-
-       switch (reg->id) {
-       case KVM_REG_MIPS_R0:
-               /* Silently ignore requests to set $0 */
-               break;
-       case KVM_REG_MIPS_R1 ... KVM_REG_MIPS_R31:
-               vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0] = v;
-               break;
-       case KVM_REG_MIPS_HI:
-               vcpu->arch.hi = v;
-               break;
-       case KVM_REG_MIPS_LO:
-               vcpu->arch.lo = v;
-               break;
-       case KVM_REG_MIPS_PC:
-               vcpu->arch.pc = v;
-               break;
-
-       case KVM_REG_MIPS_CP0_INDEX:
-               kvm_write_c0_guest_index(cop0, v);
-               break;
-       case KVM_REG_MIPS_CP0_CONTEXT:
-               kvm_write_c0_guest_context(cop0, v);
-               break;
-       case KVM_REG_MIPS_CP0_USERLOCAL:
-               kvm_write_c0_guest_userlocal(cop0, v);
-               break;
-       case KVM_REG_MIPS_CP0_PAGEMASK:
-               kvm_write_c0_guest_pagemask(cop0, v);
-               break;
-       case KVM_REG_MIPS_CP0_WIRED:
-               kvm_write_c0_guest_wired(cop0, v);
-               break;
-       case KVM_REG_MIPS_CP0_HWRENA:
-               kvm_write_c0_guest_hwrena(cop0, v);
-               break;
-       case KVM_REG_MIPS_CP0_BADVADDR:
-               kvm_write_c0_guest_badvaddr(cop0, v);
-               break;
-       case KVM_REG_MIPS_CP0_ENTRYHI:
-               kvm_write_c0_guest_entryhi(cop0, v);
-               break;
-       case KVM_REG_MIPS_CP0_STATUS:
-               kvm_write_c0_guest_status(cop0, v);
-               break;
-       case KVM_REG_MIPS_CP0_EPC:
-               kvm_write_c0_guest_epc(cop0, v);
-               break;
-       case KVM_REG_MIPS_CP0_ERROREPC:
-               kvm_write_c0_guest_errorepc(cop0, v);
-               break;
-       /* registers to be handled specially */
-       case KVM_REG_MIPS_CP0_COUNT:
-       case KVM_REG_MIPS_CP0_COMPARE:
-       case KVM_REG_MIPS_CP0_CAUSE:
-       case KVM_REG_MIPS_COUNT_CTL:
-       case KVM_REG_MIPS_COUNT_RESUME:
-       case KVM_REG_MIPS_COUNT_HZ:
-               return kvm_mips_callbacks->set_one_reg(vcpu, reg, v);
-       default:
-               return -EINVAL;
-       }
-       return 0;
-}
-
-long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl,
-                        unsigned long arg)
-{
-       struct kvm_vcpu *vcpu = filp->private_data;
-       void __user *argp = (void __user *)arg;
-       long r;
-
-       switch (ioctl) {
-       case KVM_SET_ONE_REG:
-       case KVM_GET_ONE_REG: {
-               struct kvm_one_reg reg;
-
-               if (copy_from_user(&reg, argp, sizeof(reg)))
-                       return -EFAULT;
-               if (ioctl == KVM_SET_ONE_REG)
-                       return kvm_mips_set_reg(vcpu, &reg);
-               else
-                       return kvm_mips_get_reg(vcpu, &reg);
-       }
-       case KVM_GET_REG_LIST: {
-               struct kvm_reg_list __user *user_list = argp;
-               u64 __user *reg_dest;
-               struct kvm_reg_list reg_list;
-               unsigned n;
-
-               if (copy_from_user(&reg_list, user_list, sizeof(reg_list)))
-                       return -EFAULT;
-               n = reg_list.n;
-               reg_list.n = ARRAY_SIZE(kvm_mips_get_one_regs);
-               if (copy_to_user(user_list, &reg_list, sizeof(reg_list)))
-                       return -EFAULT;
-               if (n < reg_list.n)
-                       return -E2BIG;
-               reg_dest = user_list->reg;
-               if (copy_to_user(reg_dest, kvm_mips_get_one_regs,
-                                sizeof(kvm_mips_get_one_regs)))
-                       return -EFAULT;
-               return 0;
-       }
-       case KVM_NMI:
-               /* Treat the NMI as a CPU reset */
-               r = kvm_mips_reset_vcpu(vcpu);
-               break;
-       case KVM_INTERRUPT:
-               {
-                       struct kvm_mips_interrupt irq;
-
-                       r = -EFAULT;
-                       if (copy_from_user(&irq, argp, sizeof(irq)))
-                               goto out;
-
-                       kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__,
-                                 irq.irq);
-
-                       r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
-                       break;
-               }
-       default:
-               r = -ENOIOCTLCMD;
-       }
-
-out:
-       return r;
-}
-
-/* Get (and clear) the dirty memory log for a memory slot. */
-int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
-{
-       struct kvm_memory_slot *memslot;
-       unsigned long ga, ga_end;
-       int is_dirty = 0;
-       int r;
-       unsigned long n;
-
-       mutex_lock(&kvm->slots_lock);
-
-       r = kvm_get_dirty_log(kvm, log, &is_dirty);
-       if (r)
-               goto out;
-
-       /* If nothing is dirty, don't bother messing with page tables. */
-       if (is_dirty) {
-               memslot = &kvm->memslots->memslots[log->slot];
-
-               ga = memslot->base_gfn << PAGE_SHIFT;
-               ga_end = ga + (memslot->npages << PAGE_SHIFT);
-
-               kvm_info("%s: dirty, ga: %#lx, ga_end %#lx\n", __func__, ga,
-                        ga_end);
-
-               n = kvm_dirty_bitmap_bytes(memslot);
-               memset(memslot->dirty_bitmap, 0, n);
-       }
-
-       r = 0;
-out:
-       mutex_unlock(&kvm->slots_lock);
-       return r;
-
-}
-
-long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
-{
-       long r;
-
-       switch (ioctl) {
-       default:
-               r = -ENOIOCTLCMD;
-       }
-
-       return r;
-}
-
-int kvm_arch_init(void *opaque)
-{
-       if (kvm_mips_callbacks) {
-               kvm_err("kvm: module already exists\n");
-               return -EEXIST;
-       }
-
-       return kvm_mips_emulation_init(&kvm_mips_callbacks);
-}
-
-void kvm_arch_exit(void)
-{
-       kvm_mips_callbacks = NULL;
-}
-
-int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
-                                 struct kvm_sregs *sregs)
-{
-       return -ENOIOCTLCMD;
-}
-
-int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
-                                 struct kvm_sregs *sregs)
-{
-       return -ENOIOCTLCMD;
-}
-
-int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
-{
-       return 0;
-}
-
-int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
-{
-       return -ENOIOCTLCMD;
-}
-
-int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
-{
-       return -ENOIOCTLCMD;
-}
-
-int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
-{
-       return VM_FAULT_SIGBUS;
-}
-
-int kvm_dev_ioctl_check_extension(long ext)
-{
-       int r;
-
-       switch (ext) {
-       case KVM_CAP_ONE_REG:
-               r = 1;
-               break;
-       case KVM_CAP_COALESCED_MMIO:
-               r = KVM_COALESCED_MMIO_PAGE_OFFSET;
-               break;
-       default:
-               r = 0;
-               break;
-       }
-       return r;
-}
-
-int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
-{
-       return kvm_mips_pending_timer(vcpu);
-}
-
-int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
-{
-       int i;
-       struct mips_coproc *cop0;
-
-       if (!vcpu)
-               return -1;
-
-       kvm_debug("VCPU Register Dump:\n");
-       kvm_debug("\tpc = 0x%08lx\n", vcpu->arch.pc);
-       kvm_debug("\texceptions: %08lx\n", vcpu->arch.pending_exceptions);
-
-       for (i = 0; i < 32; i += 4) {
-               kvm_debug("\tgpr%02d: %08lx %08lx %08lx %08lx\n", i,
-                      vcpu->arch.gprs[i],
-                      vcpu->arch.gprs[i + 1],
-                      vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]);
-       }
-       kvm_debug("\thi: 0x%08lx\n", vcpu->arch.hi);
-       kvm_debug("\tlo: 0x%08lx\n", vcpu->arch.lo);
-
-       cop0 = vcpu->arch.cop0;
-       kvm_debug("\tStatus: 0x%08lx, Cause: 0x%08lx\n",
-                 kvm_read_c0_guest_status(cop0),
-                 kvm_read_c0_guest_cause(cop0));
-
-       kvm_debug("\tEPC: 0x%08lx\n", kvm_read_c0_guest_epc(cop0));
-
-       return 0;
-}
-
-int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
-{
-       int i;
-
-       for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
-               vcpu->arch.gprs[i] = regs->gpr[i];
-       vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */
-       vcpu->arch.hi = regs->hi;
-       vcpu->arch.lo = regs->lo;
-       vcpu->arch.pc = regs->pc;
-
-       return 0;
-}
-
-int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
-{
-       int i;
-
-       for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
-               regs->gpr[i] = vcpu->arch.gprs[i];
-
-       regs->hi = vcpu->arch.hi;
-       regs->lo = vcpu->arch.lo;
-       regs->pc = vcpu->arch.pc;
-
-       return 0;
-}
-
-static void kvm_mips_comparecount_func(unsigned long data)
-{
-       struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
-
-       kvm_mips_callbacks->queue_timer_int(vcpu);
-
-       vcpu->arch.wait = 0;
-       if (waitqueue_active(&vcpu->wq))
-               wake_up_interruptible(&vcpu->wq);
-}
-
-/* low level hrtimer wake routine */
-static enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer)
-{
-       struct kvm_vcpu *vcpu;
-
-       vcpu = container_of(timer, struct kvm_vcpu, arch.comparecount_timer);
-       kvm_mips_comparecount_func((unsigned long) vcpu);
-       return kvm_mips_count_timeout(vcpu);
-}
-
-int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
-{
-       kvm_mips_callbacks->vcpu_init(vcpu);
-       hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC,
-                    HRTIMER_MODE_REL);
-       vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup;
-       return 0;
-}
-
-void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
-{
-}
-
-int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
-                                 struct kvm_translation *tr)
-{
-       return 0;
-}
-
-/* Initial guest state */
-int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
-{
-       return kvm_mips_callbacks->vcpu_setup(vcpu);
-}
-
-static void kvm_mips_set_c0_status(void)
-{
-       uint32_t status = read_c0_status();
-
-       if (cpu_has_fpu)
-               status |= (ST0_CU1);
-
-       if (cpu_has_dsp)
-               status |= (ST0_MX);
-
-       write_c0_status(status);
-       ehb();
-}
-
-/*
- * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
- */
-int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
-{
-       uint32_t cause = vcpu->arch.host_cp0_cause;
-       uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
-       uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
-       unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
-       enum emulation_result er = EMULATE_DONE;
-       int ret = RESUME_GUEST;
-
-       /* Set a default exit reason */
-       run->exit_reason = KVM_EXIT_UNKNOWN;
-       run->ready_for_interrupt_injection = 1;
-
-       /*
-        * Set the appropriate status bits based on host CPU features,
-        * before we hit the scheduler
-        */
-       kvm_mips_set_c0_status();
-
-       local_irq_enable();
-
-       kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n",
-                       cause, opc, run, vcpu);
-
-       /*
-        * Do a privilege check, if in UM most of these exit conditions end up
-        * causing an exception to be delivered to the Guest Kernel
-        */
-       er = kvm_mips_check_privilege(cause, opc, run, vcpu);
-       if (er == EMULATE_PRIV_FAIL) {
-               goto skip_emul;
-       } else if (er == EMULATE_FAIL) {
-               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
-               ret = RESUME_HOST;
-               goto skip_emul;
-       }
-
-       switch (exccode) {
-       case T_INT:
-               kvm_debug("[%d]T_INT @ %p\n", vcpu->vcpu_id, opc);
-
-               ++vcpu->stat.int_exits;
-               trace_kvm_exit(vcpu, INT_EXITS);
-
-               if (need_resched())
-                       cond_resched();
-
-               ret = RESUME_GUEST;
-               break;
-
-       case T_COP_UNUSABLE:
-               kvm_debug("T_COP_UNUSABLE: @ PC: %p\n", opc);
-
-               ++vcpu->stat.cop_unusable_exits;
-               trace_kvm_exit(vcpu, COP_UNUSABLE_EXITS);
-               ret = kvm_mips_callbacks->handle_cop_unusable(vcpu);
-               /* XXXKYMA: Might need to return to user space */
-               if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN)
-                       ret = RESUME_HOST;
-               break;
-
-       case T_TLB_MOD:
-               ++vcpu->stat.tlbmod_exits;
-               trace_kvm_exit(vcpu, TLBMOD_EXITS);
-               ret = kvm_mips_callbacks->handle_tlb_mod(vcpu);
-               break;
-
-       case T_TLB_ST_MISS:
-               kvm_debug("TLB ST fault:  cause %#x, status %#lx, PC: %p, BadVaddr: %#lx\n",
-                         cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc,
-                         badvaddr);
-
-               ++vcpu->stat.tlbmiss_st_exits;
-               trace_kvm_exit(vcpu, TLBMISS_ST_EXITS);
-               ret = kvm_mips_callbacks->handle_tlb_st_miss(vcpu);
-               break;
-
-       case T_TLB_LD_MISS:
-               kvm_debug("TLB LD fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
-                         cause, opc, badvaddr);
-
-               ++vcpu->stat.tlbmiss_ld_exits;
-               trace_kvm_exit(vcpu, TLBMISS_LD_EXITS);
-               ret = kvm_mips_callbacks->handle_tlb_ld_miss(vcpu);
-               break;
-
-       case T_ADDR_ERR_ST:
-               ++vcpu->stat.addrerr_st_exits;
-               trace_kvm_exit(vcpu, ADDRERR_ST_EXITS);
-               ret = kvm_mips_callbacks->handle_addr_err_st(vcpu);
-               break;
-
-       case T_ADDR_ERR_LD:
-               ++vcpu->stat.addrerr_ld_exits;
-               trace_kvm_exit(vcpu, ADDRERR_LD_EXITS);
-               ret = kvm_mips_callbacks->handle_addr_err_ld(vcpu);
-               break;
-
-       case T_SYSCALL:
-               ++vcpu->stat.syscall_exits;
-               trace_kvm_exit(vcpu, SYSCALL_EXITS);
-               ret = kvm_mips_callbacks->handle_syscall(vcpu);
-               break;
-
-       case T_RES_INST:
-               ++vcpu->stat.resvd_inst_exits;
-               trace_kvm_exit(vcpu, RESVD_INST_EXITS);
-               ret = kvm_mips_callbacks->handle_res_inst(vcpu);
-               break;
-
-       case T_BREAK:
-               ++vcpu->stat.break_inst_exits;
-               trace_kvm_exit(vcpu, BREAK_INST_EXITS);
-               ret = kvm_mips_callbacks->handle_break(vcpu);
-               break;
-
-       default:
-               kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x  BadVaddr: %#lx Status: %#lx\n",
-                       exccode, opc, kvm_get_inst(opc, vcpu), badvaddr,
-                       kvm_read_c0_guest_status(vcpu->arch.cop0));
-               kvm_arch_vcpu_dump_regs(vcpu);
-               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
-               ret = RESUME_HOST;
-               break;
-
-       }
-
-skip_emul:
-       local_irq_disable();
-
-       if (er == EMULATE_DONE && !(ret & RESUME_HOST))
-               kvm_mips_deliver_interrupts(vcpu, cause);
-
-       if (!(ret & RESUME_HOST)) {
-               /* Only check for signals if not already exiting to userspace */
-               if (signal_pending(current)) {
-                       run->exit_reason = KVM_EXIT_INTR;
-                       ret = (-EINTR << 2) | RESUME_HOST;
-                       ++vcpu->stat.signal_exits;
-                       trace_kvm_exit(vcpu, SIGNAL_EXITS);
-               }
-       }
-
-       return ret;
-}
-
-int __init kvm_mips_init(void)
-{
-       int ret;
-
-       ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
-
-       if (ret)
-               return ret;
-
-       /*
-        * On MIPS, kernel modules are executed from "mapped space", which
-        * requires TLBs. The TLB handling code is statically linked with
-        * the rest of the kernel (kvm_tlb.c) to avoid the possibility of
-        * double faulting. The issue is that the TLB code references
-        * routines that are part of the the KVM module, which are only
-        * available once the module is loaded.
-        */
-       kvm_mips_gfn_to_pfn = gfn_to_pfn;
-       kvm_mips_release_pfn_clean = kvm_release_pfn_clean;
-       kvm_mips_is_error_pfn = is_error_pfn;
-
-       pr_info("KVM/MIPS Initialized\n");
-       return 0;
-}
-
-void __exit kvm_mips_exit(void)
-{
-       kvm_exit();
-
-       kvm_mips_gfn_to_pfn = NULL;
-       kvm_mips_release_pfn_clean = NULL;
-       kvm_mips_is_error_pfn = NULL;
-
-       pr_info("KVM/MIPS unloaded\n");
-}
-
-module_init(kvm_mips_init);
-module_exit(kvm_mips_exit);
-
-EXPORT_TRACEPOINT_SYMBOL(kvm_exit);
diff --git a/arch/mips/kvm/kvm_mips_comm.h b/arch/mips/kvm/kvm_mips_comm.h
deleted file mode 100644 (file)
index 08c5fa2..0000000
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * KVM/MIPS: commpage: mapped into get kernel space
- *
- * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
- * Authors: Sanjay Lal <sanjayl@kymasys.com>
- */
-
-#ifndef __KVM_MIPS_COMMPAGE_H__
-#define __KVM_MIPS_COMMPAGE_H__
-
-struct kvm_mips_commpage {
-       /* COP0 state is mapped into Guest kernel via commpage */
-       struct mips_coproc cop0;
-};
-
-#define KVM_MIPS_COMM_EIDI_OFFSET       0x0
-
-extern void kvm_mips_commpage_init(struct kvm_vcpu *vcpu);
-
-#endif /* __KVM_MIPS_COMMPAGE_H__ */
diff --git a/arch/mips/kvm/kvm_mips_commpage.c b/arch/mips/kvm/kvm_mips_commpage.c
deleted file mode 100644 (file)
index 4b5612b..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * commpage, currently used for Virtual COP0 registers.
- * Mapped into the guest kernel @ 0x0.
- *
- * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
- * Authors: Sanjay Lal <sanjayl@kymasys.com>
- */
-
-#include <linux/errno.h>
-#include <linux/err.h>
-#include <linux/module.h>
-#include <linux/vmalloc.h>
-#include <linux/fs.h>
-#include <linux/bootmem.h>
-#include <asm/page.h>
-#include <asm/cacheflush.h>
-#include <asm/mmu_context.h>
-
-#include <linux/kvm_host.h>
-
-#include "kvm_mips_comm.h"
-
-void kvm_mips_commpage_init(struct kvm_vcpu *vcpu)
-{
-       struct kvm_mips_commpage *page = vcpu->arch.kseg0_commpage;
-
-       memset(page, 0, sizeof(struct kvm_mips_commpage));
-
-       /* Specific init values for fields */
-       vcpu->arch.cop0 = &page->cop0;
-       memset(vcpu->arch.cop0, 0, sizeof(struct mips_coproc));
-}
diff --git a/arch/mips/kvm/kvm_mips_dyntrans.c b/arch/mips/kvm/kvm_mips_dyntrans.c
deleted file mode 100644 (file)
index fa7184d..0000000
+++ /dev/null
@@ -1,148 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * KVM/MIPS: Binary Patching for privileged instructions, reduces traps.
- *
- * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
- * Authors: Sanjay Lal <sanjayl@kymasys.com>
- */
-
-#include <linux/errno.h>
-#include <linux/err.h>
-#include <linux/kvm_host.h>
-#include <linux/module.h>
-#include <linux/vmalloc.h>
-#include <linux/fs.h>
-#include <linux/bootmem.h>
-#include <asm/cacheflush.h>
-
-#include "kvm_mips_comm.h"
-
-#define SYNCI_TEMPLATE  0x041f0000
-#define SYNCI_BASE(x)   (((x) >> 21) & 0x1f)
-#define SYNCI_OFFSET    ((x) & 0xffff)
-
-#define LW_TEMPLATE     0x8c000000
-#define CLEAR_TEMPLATE  0x00000020
-#define SW_TEMPLATE     0xac000000
-
-int kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc,
-                              struct kvm_vcpu *vcpu)
-{
-       int result = 0;
-       unsigned long kseg0_opc;
-       uint32_t synci_inst = 0x0;
-
-       /* Replace the CACHE instruction, with a NOP */
-       kseg0_opc =
-           CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
-                      (vcpu, (unsigned long) opc));
-       memcpy((void *)kseg0_opc, (void *)&synci_inst, sizeof(uint32_t));
-       local_flush_icache_range(kseg0_opc, kseg0_opc + 32);
-
-       return result;
-}
-
-/*
- * Address based CACHE instructions are transformed into synci(s). A little
- * heavy for just D-cache invalidates, but avoids an expensive trap
- */
-int kvm_mips_trans_cache_va(uint32_t inst, uint32_t *opc,
-                           struct kvm_vcpu *vcpu)
-{
-       int result = 0;
-       unsigned long kseg0_opc;
-       uint32_t synci_inst = SYNCI_TEMPLATE, base, offset;
-
-       base = (inst >> 21) & 0x1f;
-       offset = inst & 0xffff;
-       synci_inst |= (base << 21);
-       synci_inst |= offset;
-
-       kseg0_opc =
-           CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
-                      (vcpu, (unsigned long) opc));
-       memcpy((void *)kseg0_opc, (void *)&synci_inst, sizeof(uint32_t));
-       local_flush_icache_range(kseg0_opc, kseg0_opc + 32);
-
-       return result;
-}
-
-int kvm_mips_trans_mfc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu)
-{
-       int32_t rt, rd, sel;
-       uint32_t mfc0_inst;
-       unsigned long kseg0_opc, flags;
-
-       rt = (inst >> 16) & 0x1f;
-       rd = (inst >> 11) & 0x1f;
-       sel = inst & 0x7;
-
-       if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) {
-               mfc0_inst = CLEAR_TEMPLATE;
-               mfc0_inst |= ((rt & 0x1f) << 16);
-       } else {
-               mfc0_inst = LW_TEMPLATE;
-               mfc0_inst |= ((rt & 0x1f) << 16);
-               mfc0_inst |=
-                   offsetof(struct mips_coproc,
-                            reg[rd][sel]) + offsetof(struct kvm_mips_commpage,
-                                                     cop0);
-       }
-
-       if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
-               kseg0_opc =
-                   CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
-                              (vcpu, (unsigned long) opc));
-               memcpy((void *)kseg0_opc, (void *)&mfc0_inst, sizeof(uint32_t));
-               local_flush_icache_range(kseg0_opc, kseg0_opc + 32);
-       } else if (KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
-               local_irq_save(flags);
-               memcpy((void *)opc, (void *)&mfc0_inst, sizeof(uint32_t));
-               local_flush_icache_range((unsigned long)opc,
-                                        (unsigned long)opc + 32);
-               local_irq_restore(flags);
-       } else {
-               kvm_err("%s: Invalid address: %p\n", __func__, opc);
-               return -EFAULT;
-       }
-
-       return 0;
-}
-
-int kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu)
-{
-       int32_t rt, rd, sel;
-       uint32_t mtc0_inst = SW_TEMPLATE;
-       unsigned long kseg0_opc, flags;
-
-       rt = (inst >> 16) & 0x1f;
-       rd = (inst >> 11) & 0x1f;
-       sel = inst & 0x7;
-
-       mtc0_inst |= ((rt & 0x1f) << 16);
-       mtc0_inst |=
-           offsetof(struct mips_coproc,
-                    reg[rd][sel]) + offsetof(struct kvm_mips_commpage, cop0);
-
-       if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
-               kseg0_opc =
-                   CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
-                              (vcpu, (unsigned long) opc));
-               memcpy((void *)kseg0_opc, (void *)&mtc0_inst, sizeof(uint32_t));
-               local_flush_icache_range(kseg0_opc, kseg0_opc + 32);
-       } else if (KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
-               local_irq_save(flags);
-               memcpy((void *)opc, (void *)&mtc0_inst, sizeof(uint32_t));
-               local_flush_icache_range((unsigned long)opc,
-                                        (unsigned long)opc + 32);
-               local_irq_restore(flags);
-       } else {
-               kvm_err("%s: Invalid address: %p\n", __func__, opc);
-               return -EFAULT;
-       }
-
-       return 0;
-}
diff --git a/arch/mips/kvm/kvm_mips_emul.c b/arch/mips/kvm/kvm_mips_emul.c
deleted file mode 100644 (file)
index f9b4f0f..0000000
+++ /dev/null
@@ -1,2324 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * KVM/MIPS: Instruction/Exception emulation
- *
- * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
- * Authors: Sanjay Lal <sanjayl@kymasys.com>
- */
-
-#include <linux/errno.h>
-#include <linux/err.h>
-#include <linux/ktime.h>
-#include <linux/kvm_host.h>
-#include <linux/module.h>
-#include <linux/vmalloc.h>
-#include <linux/fs.h>
-#include <linux/bootmem.h>
-#include <linux/random.h>
-#include <asm/page.h>
-#include <asm/cacheflush.h>
-#include <asm/cpu-info.h>
-#include <asm/mmu_context.h>
-#include <asm/tlbflush.h>
-#include <asm/inst.h>
-
-#undef CONFIG_MIPS_MT
-#include <asm/r4kcache.h>
-#define CONFIG_MIPS_MT
-
-#include "kvm_mips_opcode.h"
-#include "kvm_mips_int.h"
-#include "kvm_mips_comm.h"
-
-#include "trace.h"
-
-/*
- * Compute the return address and do emulate branch simulation, if required.
- * This function should be called only in branch delay slot active.
- */
-unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu,
-       unsigned long instpc)
-{
-       unsigned int dspcontrol;
-       union mips_instruction insn;
-       struct kvm_vcpu_arch *arch = &vcpu->arch;
-       long epc = instpc;
-       long nextpc = KVM_INVALID_INST;
-
-       if (epc & 3)
-               goto unaligned;
-
-       /* Read the instruction */
-       insn.word = kvm_get_inst((uint32_t *) epc, vcpu);
-
-       if (insn.word == KVM_INVALID_INST)
-               return KVM_INVALID_INST;
-
-       switch (insn.i_format.opcode) {
-               /* jr and jalr are in r_format format. */
-       case spec_op:
-               switch (insn.r_format.func) {
-               case jalr_op:
-                       arch->gprs[insn.r_format.rd] = epc + 8;
-                       /* Fall through */
-               case jr_op:
-                       nextpc = arch->gprs[insn.r_format.rs];
-                       break;
-               }
-               break;
-
-               /*
-                * This group contains:
-                * bltz_op, bgez_op, bltzl_op, bgezl_op,
-                * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
-                */
-       case bcond_op:
-               switch (insn.i_format.rt) {
-               case bltz_op:
-               case bltzl_op:
-                       if ((long)arch->gprs[insn.i_format.rs] < 0)
-                               epc = epc + 4 + (insn.i_format.simmediate << 2);
-                       else
-                               epc += 8;
-                       nextpc = epc;
-                       break;
-
-               case bgez_op:
-               case bgezl_op:
-                       if ((long)arch->gprs[insn.i_format.rs] >= 0)
-                               epc = epc + 4 + (insn.i_format.simmediate << 2);
-                       else
-                               epc += 8;
-                       nextpc = epc;
-                       break;
-
-               case bltzal_op:
-               case bltzall_op:
-                       arch->gprs[31] = epc + 8;
-                       if ((long)arch->gprs[insn.i_format.rs] < 0)
-                               epc = epc + 4 + (insn.i_format.simmediate << 2);
-                       else
-                               epc += 8;
-                       nextpc = epc;
-                       break;
-
-               case bgezal_op:
-               case bgezall_op:
-                       arch->gprs[31] = epc + 8;
-                       if ((long)arch->gprs[insn.i_format.rs] >= 0)
-                               epc = epc + 4 + (insn.i_format.simmediate << 2);
-                       else
-                               epc += 8;
-                       nextpc = epc;
-                       break;
-               case bposge32_op:
-                       if (!cpu_has_dsp)
-                               goto sigill;
-
-                       dspcontrol = rddsp(0x01);
-
-                       if (dspcontrol >= 32)
-                               epc = epc + 4 + (insn.i_format.simmediate << 2);
-                       else
-                               epc += 8;
-                       nextpc = epc;
-                       break;
-               }
-               break;
-
-               /* These are unconditional and in j_format. */
-       case jal_op:
-               arch->gprs[31] = instpc + 8;
-       case j_op:
-               epc += 4;
-               epc >>= 28;
-               epc <<= 28;
-               epc |= (insn.j_format.target << 2);
-               nextpc = epc;
-               break;
-
-               /* These are conditional and in i_format. */
-       case beq_op:
-       case beql_op:
-               if (arch->gprs[insn.i_format.rs] ==
-                   arch->gprs[insn.i_format.rt])
-                       epc = epc + 4 + (insn.i_format.simmediate << 2);
-               else
-                       epc += 8;
-               nextpc = epc;
-               break;
-
-       case bne_op:
-       case bnel_op:
-               if (arch->gprs[insn.i_format.rs] !=
-                   arch->gprs[insn.i_format.rt])
-                       epc = epc + 4 + (insn.i_format.simmediate << 2);
-               else
-                       epc += 8;
-               nextpc = epc;
-               break;
-
-       case blez_op:           /* not really i_format */
-       case blezl_op:
-               /* rt field assumed to be zero */
-               if ((long)arch->gprs[insn.i_format.rs] <= 0)
-                       epc = epc + 4 + (insn.i_format.simmediate << 2);
-               else
-                       epc += 8;
-               nextpc = epc;
-               break;
-
-       case bgtz_op:
-       case bgtzl_op:
-               /* rt field assumed to be zero */
-               if ((long)arch->gprs[insn.i_format.rs] > 0)
-                       epc = epc + 4 + (insn.i_format.simmediate << 2);
-               else
-                       epc += 8;
-               nextpc = epc;
-               break;
-
-               /* And now the FPA/cp1 branch instructions. */
-       case cop1_op:
-               kvm_err("%s: unsupported cop1_op\n", __func__);
-               break;
-       }
-
-       return nextpc;
-
-unaligned:
-       kvm_err("%s: unaligned epc\n", __func__);
-       return nextpc;
-
-sigill:
-       kvm_err("%s: DSP branch but not DSP ASE\n", __func__);
-       return nextpc;
-}
-
-enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause)
-{
-       unsigned long branch_pc;
-       enum emulation_result er = EMULATE_DONE;
-
-       if (cause & CAUSEF_BD) {
-               branch_pc = kvm_compute_return_epc(vcpu, vcpu->arch.pc);
-               if (branch_pc == KVM_INVALID_INST) {
-                       er = EMULATE_FAIL;
-               } else {
-                       vcpu->arch.pc = branch_pc;
-                       kvm_debug("BD update_pc(): New PC: %#lx\n",
-                                 vcpu->arch.pc);
-               }
-       } else
-               vcpu->arch.pc += 4;
-
-       kvm_debug("update_pc(): New PC: %#lx\n", vcpu->arch.pc);
-
-       return er;
-}
-
-/**
- * kvm_mips_count_disabled() - Find whether the CP0_Count timer is disabled.
- * @vcpu:      Virtual CPU.
- *
- * Returns:    1 if the CP0_Count timer is disabled by either the guest
- *             CP0_Cause.DC bit or the count_ctl.DC bit.
- *             0 otherwise (in which case CP0_Count timer is running).
- */
-static inline int kvm_mips_count_disabled(struct kvm_vcpu *vcpu)
-{
-       struct mips_coproc *cop0 = vcpu->arch.cop0;
-
-       return  (vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) ||
-               (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC);
-}
-
-/**
- * kvm_mips_ktime_to_count() - Scale ktime_t to a 32-bit count.
- *
- * Caches the dynamic nanosecond bias in vcpu->arch.count_dyn_bias.
- *
- * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
- */
-static uint32_t kvm_mips_ktime_to_count(struct kvm_vcpu *vcpu, ktime_t now)
-{
-       s64 now_ns, periods;
-       u64 delta;
-
-       now_ns = ktime_to_ns(now);
-       delta = now_ns + vcpu->arch.count_dyn_bias;
-
-       if (delta >= vcpu->arch.count_period) {
-               /* If delta is out of safe range the bias needs adjusting */
-               periods = div64_s64(now_ns, vcpu->arch.count_period);
-               vcpu->arch.count_dyn_bias = -periods * vcpu->arch.count_period;
-               /* Recalculate delta with new bias */
-               delta = now_ns + vcpu->arch.count_dyn_bias;
-       }
-
-       /*
-        * We've ensured that:
-        *   delta < count_period
-        *
-        * Therefore the intermediate delta*count_hz will never overflow since
-        * at the boundary condition:
-        *   delta = count_period
-        *   delta = NSEC_PER_SEC * 2^32 / count_hz
-        *   delta * count_hz = NSEC_PER_SEC * 2^32
-        */
-       return div_u64(delta * vcpu->arch.count_hz, NSEC_PER_SEC);
-}
-
-/**
- * kvm_mips_count_time() - Get effective current time.
- * @vcpu:      Virtual CPU.
- *
- * Get effective monotonic ktime. This is usually a straightforward ktime_get(),
- * except when the master disable bit is set in count_ctl, in which case it is
- * count_resume, i.e. the time that the count was disabled.
- *
- * Returns:    Effective monotonic ktime for CP0_Count.
- */
-static inline ktime_t kvm_mips_count_time(struct kvm_vcpu *vcpu)
-{
-       if (unlikely(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
-               return vcpu->arch.count_resume;
-
-       return ktime_get();
-}
-
-/**
- * kvm_mips_read_count_running() - Read the current count value as if running.
- * @vcpu:      Virtual CPU.
- * @now:       Kernel time to read CP0_Count at.
- *
- * Returns the current guest CP0_Count register at time @now and handles if the
- * timer interrupt is pending and hasn't been handled yet.
- *
- * Returns:    The current value of the guest CP0_Count register.
- */
-static uint32_t kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
-{
-       ktime_t expires;
-       int running;
-
-       /* Is the hrtimer pending? */
-       expires = hrtimer_get_expires(&vcpu->arch.comparecount_timer);
-       if (ktime_compare(now, expires) >= 0) {
-               /*
-                * Cancel it while we handle it so there's no chance of
-                * interference with the timeout handler.
-                */
-               running = hrtimer_cancel(&vcpu->arch.comparecount_timer);
-
-               /* Nothing should be waiting on the timeout */
-               kvm_mips_callbacks->queue_timer_int(vcpu);
-
-               /*
-                * Restart the timer if it was running based on the expiry time
-                * we read, so that we don't push it back 2 periods.
-                */
-               if (running) {
-                       expires = ktime_add_ns(expires,
-                                              vcpu->arch.count_period);
-                       hrtimer_start(&vcpu->arch.comparecount_timer, expires,
-                                     HRTIMER_MODE_ABS);
-               }
-       }
-
-       /* Return the biased and scaled guest CP0_Count */
-       return vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now);
-}
-
-/**
- * kvm_mips_read_count() - Read the current count value.
- * @vcpu:      Virtual CPU.
- *
- * Read the current guest CP0_Count value, taking into account whether the timer
- * is stopped.
- *
- * Returns:    The current guest CP0_Count value.
- */
-uint32_t kvm_mips_read_count(struct kvm_vcpu *vcpu)
-{
-       struct mips_coproc *cop0 = vcpu->arch.cop0;
-
-       /* If count disabled just read static copy of count */
-       if (kvm_mips_count_disabled(vcpu))
-               return kvm_read_c0_guest_count(cop0);
-
-       return kvm_mips_read_count_running(vcpu, ktime_get());
-}
-
-/**
- * kvm_mips_freeze_hrtimer() - Safely stop the hrtimer.
- * @vcpu:      Virtual CPU.
- * @count:     Output pointer for CP0_Count value at point of freeze.
- *
- * Freeze the hrtimer safely and return both the ktime and the CP0_Count value
- * at the point it was frozen. It is guaranteed that any pending interrupts at
- * the point it was frozen are handled, and none after that point.
- *
- * This is useful where the time/CP0_Count is needed in the calculation of the
- * new parameters.
- *
- * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
- *
- * Returns:    The ktime at the point of freeze.
- */
-static ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu,
-                                      uint32_t *count)
-{
-       ktime_t now;
-
-       /* stop hrtimer before finding time */
-       hrtimer_cancel(&vcpu->arch.comparecount_timer);
-       now = ktime_get();
-
-       /* find count at this point and handle pending hrtimer */
-       *count = kvm_mips_read_count_running(vcpu, now);
-
-       return now;
-}
-
-/**
- * kvm_mips_resume_hrtimer() - Resume hrtimer, updating expiry.
- * @vcpu:      Virtual CPU.
- * @now:       ktime at point of resume.
- * @count:     CP0_Count at point of resume.
- *
- * Resumes the timer and updates the timer expiry based on @now and @count.
- * This can be used in conjunction with kvm_mips_freeze_timer() when timer
- * parameters need to be changed.
- *
- * It is guaranteed that a timer interrupt immediately after resume will be
- * handled, but not if CP_Compare is exactly at @count. That case is already
- * handled by kvm_mips_freeze_timer().
- *
- * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
- */
-static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu,
-                                   ktime_t now, uint32_t count)
-{
-       struct mips_coproc *cop0 = vcpu->arch.cop0;
-       uint32_t compare;
-       u64 delta;
-       ktime_t expire;
-
-       /* Calculate timeout (wrap 0 to 2^32) */
-       compare = kvm_read_c0_guest_compare(cop0);
-       delta = (u64)(uint32_t)(compare - count - 1) + 1;
-       delta = div_u64(delta * NSEC_PER_SEC, vcpu->arch.count_hz);
-       expire = ktime_add_ns(now, delta);
-
-       /* Update hrtimer to use new timeout */
-       hrtimer_cancel(&vcpu->arch.comparecount_timer);
-       hrtimer_start(&vcpu->arch.comparecount_timer, expire, HRTIMER_MODE_ABS);
-}
-
-/**
- * kvm_mips_update_hrtimer() - Update next expiry time of hrtimer.
- * @vcpu:      Virtual CPU.
- *
- * Recalculates and updates the expiry time of the hrtimer. This can be used
- * after timer parameters have been altered which do not depend on the time that
- * the change occurs (in those cases kvm_mips_freeze_hrtimer() and
- * kvm_mips_resume_hrtimer() are used directly).
- *
- * It is guaranteed that no timer interrupts will be lost in the process.
- *
- * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
- */
-static void kvm_mips_update_hrtimer(struct kvm_vcpu *vcpu)
-{
-       ktime_t now;
-       uint32_t count;
-
-       /*
-        * freeze_hrtimer takes care of a timer interrupts <= count, and
-        * resume_hrtimer the hrtimer takes care of a timer interrupts > count.
-        */
-       now = kvm_mips_freeze_hrtimer(vcpu, &count);
-       kvm_mips_resume_hrtimer(vcpu, now, count);
-}
-
-/**
- * kvm_mips_write_count() - Modify the count and update timer.
- * @vcpu:      Virtual CPU.
- * @count:     Guest CP0_Count value to set.
- *
- * Sets the CP0_Count value and updates the timer accordingly.
- */
-void kvm_mips_write_count(struct kvm_vcpu *vcpu, uint32_t count)
-{
-       struct mips_coproc *cop0 = vcpu->arch.cop0;
-       ktime_t now;
-
-       /* Calculate bias */
-       now = kvm_mips_count_time(vcpu);
-       vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now);
-
-       if (kvm_mips_count_disabled(vcpu))
-               /* The timer's disabled, adjust the static count */
-               kvm_write_c0_guest_count(cop0, count);
-       else
-               /* Update timeout */
-               kvm_mips_resume_hrtimer(vcpu, now, count);
-}
-
-/**
- * kvm_mips_init_count() - Initialise timer.
- * @vcpu:      Virtual CPU.
- *
- * Initialise the timer to a sensible frequency, namely 100MHz, zero it, and set
- * it going if it's enabled.
- */
-void kvm_mips_init_count(struct kvm_vcpu *vcpu)
-{
-       /* 100 MHz */
-       vcpu->arch.count_hz = 100*1000*1000;
-       vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32,
-                                         vcpu->arch.count_hz);
-       vcpu->arch.count_dyn_bias = 0;
-
-       /* Starting at 0 */
-       kvm_mips_write_count(vcpu, 0);
-}
-
-/**
- * kvm_mips_set_count_hz() - Update the frequency of the timer.
- * @vcpu:      Virtual CPU.
- * @count_hz:  Frequency of CP0_Count timer in Hz.
- *
- * Change the frequency of the CP0_Count timer. This is done atomically so that
- * CP0_Count is continuous and no timer interrupt is lost.
- *
- * Returns:    -EINVAL if @count_hz is out of range.
- *             0 on success.
- */
-int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz)
-{
-       struct mips_coproc *cop0 = vcpu->arch.cop0;
-       int dc;
-       ktime_t now;
-       u32 count;
-
-       /* ensure the frequency is in a sensible range... */
-       if (count_hz <= 0 || count_hz > NSEC_PER_SEC)
-               return -EINVAL;
-       /* ... and has actually changed */
-       if (vcpu->arch.count_hz == count_hz)
-               return 0;
-
-       /* Safely freeze timer so we can keep it continuous */
-       dc = kvm_mips_count_disabled(vcpu);
-       if (dc) {
-               now = kvm_mips_count_time(vcpu);
-               count = kvm_read_c0_guest_count(cop0);
-       } else {
-               now = kvm_mips_freeze_hrtimer(vcpu, &count);
-       }
-
-       /* Update the frequency */
-       vcpu->arch.count_hz = count_hz;
-       vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32, count_hz);
-       vcpu->arch.count_dyn_bias = 0;
-
-       /* Calculate adjusted bias so dynamic count is unchanged */
-       vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now);
-
-       /* Update and resume hrtimer */
-       if (!dc)
-               kvm_mips_resume_hrtimer(vcpu, now, count);
-       return 0;
-}
-
-/**
- * kvm_mips_write_compare() - Modify compare and update timer.
- * @vcpu:      Virtual CPU.
- * @compare:   New CP0_Compare value.
- *
- * Update CP0_Compare to a new value and update the timeout.
- */
-void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare)
-{
-       struct mips_coproc *cop0 = vcpu->arch.cop0;
-
-       /* if unchanged, must just be an ack */
-       if (kvm_read_c0_guest_compare(cop0) == compare)
-               return;
-
-       /* Update compare */
-       kvm_write_c0_guest_compare(cop0, compare);
-
-       /* Update timeout if count enabled */
-       if (!kvm_mips_count_disabled(vcpu))
-               kvm_mips_update_hrtimer(vcpu);
-}
-
-/**
- * kvm_mips_count_disable() - Disable count.
- * @vcpu:      Virtual CPU.
- *
- * Disable the CP0_Count timer. A timer interrupt on or before the final stop
- * time will be handled but not after.
- *
- * Assumes CP0_Count was previously enabled but now Guest.CP0_Cause.DC or
- * count_ctl.DC has been set (count disabled).
- *
- * Returns:    The time that the timer was stopped.
- */
-static ktime_t kvm_mips_count_disable(struct kvm_vcpu *vcpu)
-{
-       struct mips_coproc *cop0 = vcpu->arch.cop0;
-       uint32_t count;
-       ktime_t now;
-
-       /* Stop hrtimer */
-       hrtimer_cancel(&vcpu->arch.comparecount_timer);
-
-       /* Set the static count from the dynamic count, handling pending TI */
-       now = ktime_get();
-       count = kvm_mips_read_count_running(vcpu, now);
-       kvm_write_c0_guest_count(cop0, count);
-
-       return now;
-}
-
-/**
- * kvm_mips_count_disable_cause() - Disable count using CP0_Cause.DC.
- * @vcpu:      Virtual CPU.
- *
- * Disable the CP0_Count timer and set CP0_Cause.DC. A timer interrupt on or
- * before the final stop time will be handled if the timer isn't disabled by
- * count_ctl.DC, but not after.
- *
- * Assumes CP0_Cause.DC is clear (count enabled).
- */
-void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu)
-{
-       struct mips_coproc *cop0 = vcpu->arch.cop0;
-
-       kvm_set_c0_guest_cause(cop0, CAUSEF_DC);
-       if (!(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
-               kvm_mips_count_disable(vcpu);
-}
-
-/**
- * kvm_mips_count_enable_cause() - Enable count using CP0_Cause.DC.
- * @vcpu:      Virtual CPU.
- *
- * Enable the CP0_Count timer and clear CP0_Cause.DC. A timer interrupt after
- * the start time will be handled if the timer isn't disabled by count_ctl.DC,
- * potentially before even returning, so the caller should be careful with
- * ordering of CP0_Cause modifications so as not to lose it.
- *
- * Assumes CP0_Cause.DC is set (count disabled).
- */
-void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu)
-{
-       struct mips_coproc *cop0 = vcpu->arch.cop0;
-       uint32_t count;
-
-       kvm_clear_c0_guest_cause(cop0, CAUSEF_DC);
-
-       /*
-        * Set the dynamic count to match the static count.
-        * This starts the hrtimer if count_ctl.DC allows it.
-        * Otherwise it conveniently updates the biases.
-        */
-       count = kvm_read_c0_guest_count(cop0);
-       kvm_mips_write_count(vcpu, count);
-}
-
-/**
- * kvm_mips_set_count_ctl() - Update the count control KVM register.
- * @vcpu:      Virtual CPU.
- * @count_ctl: Count control register new value.
- *
- * Set the count control KVM register. The timer is updated accordingly.
- *
- * Returns:    -EINVAL if reserved bits are set.
- *             0 on success.
- */
-int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl)
-{
-       struct mips_coproc *cop0 = vcpu->arch.cop0;
-       s64 changed = count_ctl ^ vcpu->arch.count_ctl;
-       s64 delta;
-       ktime_t expire, now;
-       uint32_t count, compare;
-
-       /* Only allow defined bits to be changed */
-       if (changed & ~(s64)(KVM_REG_MIPS_COUNT_CTL_DC))
-               return -EINVAL;
-
-       /* Apply new value */
-       vcpu->arch.count_ctl = count_ctl;
-
-       /* Master CP0_Count disable */
-       if (changed & KVM_REG_MIPS_COUNT_CTL_DC) {
-               /* Is CP0_Cause.DC already disabling CP0_Count? */
-               if (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC) {
-                       if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)
-                               /* Just record the current time */
-                               vcpu->arch.count_resume = ktime_get();
-               } else if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) {
-                       /* disable timer and record current time */
-                       vcpu->arch.count_resume = kvm_mips_count_disable(vcpu);
-               } else {
-                       /*
-                        * Calculate timeout relative to static count at resume
-                        * time (wrap 0 to 2^32).
-                        */
-                       count = kvm_read_c0_guest_count(cop0);
-                       compare = kvm_read_c0_guest_compare(cop0);
-                       delta = (u64)(uint32_t)(compare - count - 1) + 1;
-                       delta = div_u64(delta * NSEC_PER_SEC,
-                                       vcpu->arch.count_hz);
-                       expire = ktime_add_ns(vcpu->arch.count_resume, delta);
-
-                       /* Handle pending interrupt */
-                       now = ktime_get();
-                       if (ktime_compare(now, expire) >= 0)
-                               /* Nothing should be waiting on the timeout */
-                               kvm_mips_callbacks->queue_timer_int(vcpu);
-
-                       /* Resume hrtimer without changing bias */
-                       count = kvm_mips_read_count_running(vcpu, now);
-                       kvm_mips_resume_hrtimer(vcpu, now, count);
-               }
-       }
-
-       return 0;
-}
-
-/**
- * kvm_mips_set_count_resume() - Update the count resume KVM register.
- * @vcpu:              Virtual CPU.
- * @count_resume:      Count resume register new value.
- *
- * Set the count resume KVM register.
- *
- * Returns:    -EINVAL if out of valid range (0..now).
- *             0 on success.
- */
-int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume)
-{
-       /*
-        * It doesn't make sense for the resume time to be in the future, as it
-        * would be possible for the next interrupt to be more than a full
-        * period in the future.
-        */
-       if (count_resume < 0 || count_resume > ktime_to_ns(ktime_get()))
-               return -EINVAL;
-
-       vcpu->arch.count_resume = ns_to_ktime(count_resume);
-       return 0;
-}
-
-/**
- * kvm_mips_count_timeout() - Push timer forward on timeout.
- * @vcpu:      Virtual CPU.
- *
- * Handle an hrtimer event by push the hrtimer forward a period.
- *
- * Returns:    The hrtimer_restart value to return to the hrtimer subsystem.
- */
-enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu)
-{
-       /* Add the Count period to the current expiry time */
-       hrtimer_add_expires_ns(&vcpu->arch.comparecount_timer,
-                              vcpu->arch.count_period);
-       return HRTIMER_RESTART;
-}
-
-enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu)
-{
-       struct mips_coproc *cop0 = vcpu->arch.cop0;
-       enum emulation_result er = EMULATE_DONE;
-
-       if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
-               kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc,
-                         kvm_read_c0_guest_epc(cop0));
-               kvm_clear_c0_guest_status(cop0, ST0_EXL);
-               vcpu->arch.pc = kvm_read_c0_guest_epc(cop0);
-
-       } else if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
-               kvm_clear_c0_guest_status(cop0, ST0_ERL);
-               vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
-       } else {
-               kvm_err("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n",
-                       vcpu->arch.pc);
-               er = EMULATE_FAIL;
-       }
-
-       return er;
-}
-
-enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
-{
-       kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu->arch.pc,
-                 vcpu->arch.pending_exceptions);
-
-       ++vcpu->stat.wait_exits;
-       trace_kvm_exit(vcpu, WAIT_EXITS);
-       if (!vcpu->arch.pending_exceptions) {
-               vcpu->arch.wait = 1;
-               kvm_vcpu_block(vcpu);
-
-               /*
-                * We we are runnable, then definitely go off to user space to
-                * check if any I/O interrupts are pending.
-                */
-               if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) {
-                       clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
-                       vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
-               }
-       }
-
-       return EMULATE_DONE;
-}
-
-/*
- * XXXKYMA: Linux doesn't seem to use TLBR, return EMULATE_FAIL for now so that
- * we can catch this, if things ever change
- */
-enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu)
-{
-       struct mips_coproc *cop0 = vcpu->arch.cop0;
-       uint32_t pc = vcpu->arch.pc;
-
-       kvm_err("[%#x] COP0_TLBR [%ld]\n", pc, kvm_read_c0_guest_index(cop0));
-       return EMULATE_FAIL;
-}
-
-/* Write Guest TLB Entry @ Index */
-enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu)
-{
-       struct mips_coproc *cop0 = vcpu->arch.cop0;
-       int index = kvm_read_c0_guest_index(cop0);
-       struct kvm_mips_tlb *tlb = NULL;
-       uint32_t pc = vcpu->arch.pc;
-
-       if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
-               kvm_debug("%s: illegal index: %d\n", __func__, index);
-               kvm_debug("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
-                         pc, index, kvm_read_c0_guest_entryhi(cop0),
-                         kvm_read_c0_guest_entrylo0(cop0),
-                         kvm_read_c0_guest_entrylo1(cop0),
-                         kvm_read_c0_guest_pagemask(cop0));
-               index = (index & ~0x80000000) % KVM_MIPS_GUEST_TLB_SIZE;
-       }
-
-       tlb = &vcpu->arch.guest_tlb[index];
-       /*
-        * Probe the shadow host TLB for the entry being overwritten, if one
-        * matches, invalidate it
-        */
-       kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
-
-       tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
-       tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
-       tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0);
-       tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0);
-
-       kvm_debug("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
-                 pc, index, kvm_read_c0_guest_entryhi(cop0),
-                 kvm_read_c0_guest_entrylo0(cop0),
-                 kvm_read_c0_guest_entrylo1(cop0),
-                 kvm_read_c0_guest_pagemask(cop0));
-
-       return EMULATE_DONE;
-}
-
-/* Write Guest TLB Entry @ Random Index */
-enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu)
-{
-       struct mips_coproc *cop0 = vcpu->arch.cop0;
-       struct kvm_mips_tlb *tlb = NULL;
-       uint32_t pc = vcpu->arch.pc;
-       int index;
-
-       get_random_bytes(&index, sizeof(index));
-       index &= (KVM_MIPS_GUEST_TLB_SIZE - 1);
-
-       if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
-               kvm_err("%s: illegal index: %d\n", __func__, index);
-               return EMULATE_FAIL;
-       }
-
-       tlb = &vcpu->arch.guest_tlb[index];
-
-       /*
-        * Probe the shadow host TLB for the entry being overwritten, if one
-        * matches, invalidate it
-        */
-       kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
-
-       tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
-       tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
-       tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0);
-       tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0);
-
-       kvm_debug("[%#x] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n",
-                 pc, index, kvm_read_c0_guest_entryhi(cop0),
-                 kvm_read_c0_guest_entrylo0(cop0),
-                 kvm_read_c0_guest_entrylo1(cop0));
-
-       return EMULATE_DONE;
-}
-
-enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu)
-{
-       struct mips_coproc *cop0 = vcpu->arch.cop0;
-       long entryhi = kvm_read_c0_guest_entryhi(cop0);
-       uint32_t pc = vcpu->arch.pc;
-       int index = -1;
-
-       index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
-
-       kvm_write_c0_guest_index(cop0, index);
-
-       kvm_debug("[%#x] COP0_TLBP (entryhi: %#lx), index: %d\n", pc, entryhi,
-                 index);
-
-       return EMULATE_DONE;
-}
-
-enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
-                                          uint32_t cause, struct kvm_run *run,
-                                          struct kvm_vcpu *vcpu)
-{
-       struct mips_coproc *cop0 = vcpu->arch.cop0;
-       enum emulation_result er = EMULATE_DONE;
-       int32_t rt, rd, copz, sel, co_bit, op;
-       uint32_t pc = vcpu->arch.pc;
-       unsigned long curr_pc;
-
-       /*
-        * Update PC and hold onto current PC in case there is
-        * an error and we want to rollback the PC
-        */
-       curr_pc = vcpu->arch.pc;
-       er = update_pc(vcpu, cause);
-       if (er == EMULATE_FAIL)
-               return er;
-
-       copz = (inst >> 21) & 0x1f;
-       rt = (inst >> 16) & 0x1f;
-       rd = (inst >> 11) & 0x1f;
-       sel = inst & 0x7;
-       co_bit = (inst >> 25) & 1;
-
-       if (co_bit) {
-               op = (inst) & 0xff;
-
-               switch (op) {
-               case tlbr_op:   /*  Read indexed TLB entry  */
-                       er = kvm_mips_emul_tlbr(vcpu);
-                       break;
-               case tlbwi_op:  /*  Write indexed  */
-                       er = kvm_mips_emul_tlbwi(vcpu);
-                       break;
-               case tlbwr_op:  /*  Write random  */
-                       er = kvm_mips_emul_tlbwr(vcpu);
-                       break;
-               case tlbp_op:   /* TLB Probe */
-                       er = kvm_mips_emul_tlbp(vcpu);
-                       break;
-               case rfe_op:
-                       kvm_err("!!!COP0_RFE!!!\n");
-                       break;
-               case eret_op:
-                       er = kvm_mips_emul_eret(vcpu);
-                       goto dont_update_pc;
-                       break;
-               case wait_op:
-                       er = kvm_mips_emul_wait(vcpu);
-                       break;
-               }
-       } else {
-               switch (copz) {
-               case mfc_op:
-#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
-                       cop0->stat[rd][sel]++;
-#endif
-                       /* Get reg */
-                       if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
-                               vcpu->arch.gprs[rt] = kvm_mips_read_count(vcpu);
-                       } else if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) {
-                               vcpu->arch.gprs[rt] = 0x0;
-#ifdef CONFIG_KVM_MIPS_DYN_TRANS
-                               kvm_mips_trans_mfc0(inst, opc, vcpu);
-#endif
-                       } else {
-                               vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
-
-#ifdef CONFIG_KVM_MIPS_DYN_TRANS
-                               kvm_mips_trans_mfc0(inst, opc, vcpu);
-#endif
-                       }
-
-                       kvm_debug
-                           ("[%#x] MFCz[%d][%d], vcpu->arch.gprs[%d]: %#lx\n",
-                            pc, rd, sel, rt, vcpu->arch.gprs[rt]);
-
-                       break;
-
-               case dmfc_op:
-                       vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
-                       break;
-
-               case mtc_op:
-#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
-                       cop0->stat[rd][sel]++;
-#endif
-                       if ((rd == MIPS_CP0_TLB_INDEX)
-                           && (vcpu->arch.gprs[rt] >=
-                               KVM_MIPS_GUEST_TLB_SIZE)) {
-                               kvm_err("Invalid TLB Index: %ld",
-                                       vcpu->arch.gprs[rt]);
-                               er = EMULATE_FAIL;
-                               break;
-                       }
-#define C0_EBASE_CORE_MASK 0xff
-                       if ((rd == MIPS_CP0_PRID) && (sel == 1)) {
-                               /* Preserve CORE number */
-                               kvm_change_c0_guest_ebase(cop0,
-                                                         ~(C0_EBASE_CORE_MASK),
-                                                         vcpu->arch.gprs[rt]);
-                               kvm_err("MTCz, cop0->reg[EBASE]: %#lx\n",
-                                       kvm_read_c0_guest_ebase(cop0));
-                       } else if (rd == MIPS_CP0_TLB_HI && sel == 0) {
-                               uint32_t nasid =
-                                       vcpu->arch.gprs[rt] & ASID_MASK;
-                               if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0) &&
-                                   ((kvm_read_c0_guest_entryhi(cop0) &
-                                     ASID_MASK) != nasid)) {
-                                       kvm_debug("MTCz, change ASID from %#lx to %#lx\n",
-                                               kvm_read_c0_guest_entryhi(cop0)
-                                               & ASID_MASK,
-                                               vcpu->arch.gprs[rt]
-                                               & ASID_MASK);
-
-                                       /* Blow away the shadow host TLBs */
-                                       kvm_mips_flush_host_tlb(1);
-                               }
-                               kvm_write_c0_guest_entryhi(cop0,
-                                                          vcpu->arch.gprs[rt]);
-                       }
-                       /* Are we writing to COUNT */
-                       else if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
-                               kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]);
-                               goto done;
-                       } else if ((rd == MIPS_CP0_COMPARE) && (sel == 0)) {
-                               kvm_debug("[%#x] MTCz, COMPARE %#lx <- %#lx\n",
-                                         pc, kvm_read_c0_guest_compare(cop0),
-                                         vcpu->arch.gprs[rt]);
-
-                               /* If we are writing to COMPARE */
-                               /* Clear pending timer interrupt, if any */
-                               kvm_mips_callbacks->dequeue_timer_int(vcpu);
-                               kvm_mips_write_compare(vcpu,
-                                                      vcpu->arch.gprs[rt]);
-                       } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
-                               kvm_write_c0_guest_status(cop0,
-                                                         vcpu->arch.gprs[rt]);
-                               /*
-                                * Make sure that CU1 and NMI bits are
-                                * never set
-                                */
-                               kvm_clear_c0_guest_status(cop0,
-                                                         (ST0_CU1 | ST0_NMI));
-
-#ifdef CONFIG_KVM_MIPS_DYN_TRANS
-                               kvm_mips_trans_mtc0(inst, opc, vcpu);
-#endif
-                       } else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) {
-                               uint32_t old_cause, new_cause;
-
-                               old_cause = kvm_read_c0_guest_cause(cop0);
-                               new_cause = vcpu->arch.gprs[rt];
-                               /* Update R/W bits */
-                               kvm_change_c0_guest_cause(cop0, 0x08800300,
-                                                         new_cause);
-                               /* DC bit enabling/disabling timer? */
-                               if ((old_cause ^ new_cause) & CAUSEF_DC) {
-                                       if (new_cause & CAUSEF_DC)
-                                               kvm_mips_count_disable_cause(vcpu);
-                                       else
-                                               kvm_mips_count_enable_cause(vcpu);
-                               }
-                       } else {
-                               cop0->reg[rd][sel] = vcpu->arch.gprs[rt];
-#ifdef CONFIG_KVM_MIPS_DYN_TRANS
-                               kvm_mips_trans_mtc0(inst, opc, vcpu);
-#endif
-                       }
-
-                       kvm_debug("[%#x] MTCz, cop0->reg[%d][%d]: %#lx\n", pc,
-                                 rd, sel, cop0->reg[rd][sel]);
-                       break;
-
-               case dmtc_op:
-                       kvm_err("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n",
-                               vcpu->arch.pc, rt, rd, sel);
-                       er = EMULATE_FAIL;
-                       break;
-
-               case mfmcz_op:
-#ifdef KVM_MIPS_DEBUG_COP0_COUNTERS
-                       cop0->stat[MIPS_CP0_STATUS][0]++;
-#endif
-                       if (rt != 0) {
-                               vcpu->arch.gprs[rt] =
-                                   kvm_read_c0_guest_status(cop0);
-                       }
-                       /* EI */
-                       if (inst & 0x20) {
-                               kvm_debug("[%#lx] mfmcz_op: EI\n",
-                                         vcpu->arch.pc);
-                               kvm_set_c0_guest_status(cop0, ST0_IE);
-                       } else {
-                               kvm_debug("[%#lx] mfmcz_op: DI\n",
-                                         vcpu->arch.pc);
-                               kvm_clear_c0_guest_status(cop0, ST0_IE);
-                       }
-
-                       break;
-
-               case wrpgpr_op:
-                       {
-                               uint32_t css =
-                                   cop0->reg[MIPS_CP0_STATUS][2] & 0xf;
-                               uint32_t pss =
-                                   (cop0->reg[MIPS_CP0_STATUS][2] >> 6) & 0xf;
-                               /*
-                                * We don't support any shadow register sets, so
-                                * SRSCtl[PSS] == SRSCtl[CSS] = 0
-                                */
-                               if (css || pss) {
-                                       er = EMULATE_FAIL;
-                                       break;
-                               }
-                               kvm_debug("WRPGPR[%d][%d] = %#lx\n", pss, rd,
-                                         vcpu->arch.gprs[rt]);
-                               vcpu->arch.gprs[rd] = vcpu->arch.gprs[rt];
-                       }
-                       break;
-               default:
-                       kvm_err("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n",
-                               vcpu->arch.pc, copz);
-                       er = EMULATE_FAIL;
-                       break;
-               }
-       }
-
-done:
-       /* Rollback PC only if emulation was unsuccessful */
-       if (er == EMULATE_FAIL)
-               vcpu->arch.pc = curr_pc;
-
-dont_update_pc:
-       /*
-        * This is for special instructions whose emulation
-        * updates the PC, so do not overwrite the PC under
-        * any circumstances
-        */
-
-       return er;
-}
-
-enum emulation_result kvm_mips_emulate_store(uint32_t inst, uint32_t cause,
-                                            struct kvm_run *run,
-                                            struct kvm_vcpu *vcpu)
-{
-       enum emulation_result er = EMULATE_DO_MMIO;
-       int32_t op, base, rt, offset;
-       uint32_t bytes;
-       void *data = run->mmio.data;
-       unsigned long curr_pc;
-
-       /*
-        * Update PC and hold onto current PC in case there is
-        * an error and we want to rollback the PC
-        */
-       curr_pc = vcpu->arch.pc;
-       er = update_pc(vcpu, cause);
-       if (er == EMULATE_FAIL)
-               return er;
-
-       rt = (inst >> 16) & 0x1f;
-       base = (inst >> 21) & 0x1f;
-       offset = inst & 0xffff;
-       op = (inst >> 26) & 0x3f;
-
-       switch (op) {
-       case sb_op:
-               bytes = 1;
-               if (bytes > sizeof(run->mmio.data)) {
-                       kvm_err("%s: bad MMIO length: %d\n", __func__,
-                              run->mmio.len);
-               }
-               run->mmio.phys_addr =
-                   kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
-                                                  host_cp0_badvaddr);
-               if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
-                       er = EMULATE_FAIL;
-                       break;
-               }
-               run->mmio.len = bytes;
-               run->mmio.is_write = 1;
-               vcpu->mmio_needed = 1;
-               vcpu->mmio_is_write = 1;
-               *(u8 *) data = vcpu->arch.gprs[rt];
-               kvm_debug("OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n",
-                         vcpu->arch.host_cp0_badvaddr, vcpu->arch.gprs[rt],
-                         *(uint8_t *) data);
-
-               break;
-
-       case sw_op:
-               bytes = 4;
-               if (bytes > sizeof(run->mmio.data)) {
-                       kvm_err("%s: bad MMIO length: %d\n", __func__,
-                              run->mmio.len);
-               }
-               run->mmio.phys_addr =
-                   kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
-                                                  host_cp0_badvaddr);
-               if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
-                       er = EMULATE_FAIL;
-                       break;
-               }
-
-               run->mmio.len = bytes;
-               run->mmio.is_write = 1;
-               vcpu->mmio_needed = 1;
-               vcpu->mmio_is_write = 1;
-               *(uint32_t *) data = vcpu->arch.gprs[rt];
-
-               kvm_debug("[%#lx] OP_SW: eaddr: %#lx, gpr: %#lx, data: %#x\n",
-                         vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
-                         vcpu->arch.gprs[rt], *(uint32_t *) data);
-               break;
-
-       case sh_op:
-               bytes = 2;
-               if (bytes > sizeof(run->mmio.data)) {
-                       kvm_err("%s: bad MMIO length: %d\n", __func__,
-                              run->mmio.len);
-               }
-               run->mmio.phys_addr =
-                   kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
-                                                  host_cp0_badvaddr);
-               if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
-                       er = EMULATE_FAIL;
-                       break;
-               }
-
-               run->mmio.len = bytes;
-               run->mmio.is_write = 1;
-               vcpu->mmio_needed = 1;
-               vcpu->mmio_is_write = 1;
-               *(uint16_t *) data = vcpu->arch.gprs[rt];
-
-               kvm_debug("[%#lx] OP_SH: eaddr: %#lx, gpr: %#lx, data: %#x\n",
-                         vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
-                         vcpu->arch.gprs[rt], *(uint32_t *) data);
-               break;
-
-       default:
-               kvm_err("Store not yet supported");
-               er = EMULATE_FAIL;
-               break;
-       }
-
-       /* Rollback PC if emulation was unsuccessful */
-       if (er == EMULATE_FAIL)
-               vcpu->arch.pc = curr_pc;
-
-       return er;
-}
-
-enum emulation_result kvm_mips_emulate_load(uint32_t inst, uint32_t cause,
-                                           struct kvm_run *run,
-                                           struct kvm_vcpu *vcpu)
-{
-       enum emulation_result er = EMULATE_DO_MMIO;
-       int32_t op, base, rt, offset;
-       uint32_t bytes;
-
-       rt = (inst >> 16) & 0x1f;
-       base = (inst >> 21) & 0x1f;
-       offset = inst & 0xffff;
-       op = (inst >> 26) & 0x3f;
-
-       vcpu->arch.pending_load_cause = cause;
-       vcpu->arch.io_gpr = rt;
-
-       switch (op) {
-       case lw_op:
-               bytes = 4;
-               if (bytes > sizeof(run->mmio.data)) {
-                       kvm_err("%s: bad MMIO length: %d\n", __func__,
-                              run->mmio.len);
-                       er = EMULATE_FAIL;
-                       break;
-               }
-               run->mmio.phys_addr =
-                   kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
-                                                  host_cp0_badvaddr);
-               if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
-                       er = EMULATE_FAIL;
-                       break;
-               }
-
-               run->mmio.len = bytes;
-               run->mmio.is_write = 0;
-               vcpu->mmio_needed = 1;
-               vcpu->mmio_is_write = 0;
-               break;
-
-       case lh_op:
-       case lhu_op:
-               bytes = 2;
-               if (bytes > sizeof(run->mmio.data)) {
-                       kvm_err("%s: bad MMIO length: %d\n", __func__,
-                              run->mmio.len);
-                       er = EMULATE_FAIL;
-                       break;
-               }
-               run->mmio.phys_addr =
-                   kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
-                                                  host_cp0_badvaddr);
-               if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
-                       er = EMULATE_FAIL;
-                       break;
-               }
-
-               run->mmio.len = bytes;
-               run->mmio.is_write = 0;
-               vcpu->mmio_needed = 1;
-               vcpu->mmio_is_write = 0;
-
-               if (op == lh_op)
-                       vcpu->mmio_needed = 2;
-               else
-                       vcpu->mmio_needed = 1;
-
-               break;
-
-       case lbu_op:
-       case lb_op:
-               bytes = 1;
-               if (bytes > sizeof(run->mmio.data)) {
-                       kvm_err("%s: bad MMIO length: %d\n", __func__,
-                              run->mmio.len);
-                       er = EMULATE_FAIL;
-                       break;
-               }
-               run->mmio.phys_addr =
-                   kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
-                                                  host_cp0_badvaddr);
-               if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
-                       er = EMULATE_FAIL;
-                       break;
-               }
-
-               run->mmio.len = bytes;
-               run->mmio.is_write = 0;
-               vcpu->mmio_is_write = 0;
-
-               if (op == lb_op)
-                       vcpu->mmio_needed = 2;
-               else
-                       vcpu->mmio_needed = 1;
-
-               break;
-
-       default:
-               kvm_err("Load not yet supported");
-               er = EMULATE_FAIL;
-               break;
-       }
-
-       return er;
-}
-
-int kvm_mips_sync_icache(unsigned long va, struct kvm_vcpu *vcpu)
-{
-       unsigned long offset = (va & ~PAGE_MASK);
-       struct kvm *kvm = vcpu->kvm;
-       unsigned long pa;
-       gfn_t gfn;
-       pfn_t pfn;
-
-       gfn = va >> PAGE_SHIFT;
-
-       if (gfn >= kvm->arch.guest_pmap_npages) {
-               kvm_err("%s: Invalid gfn: %#llx\n", __func__, gfn);
-               kvm_mips_dump_host_tlbs();
-               kvm_arch_vcpu_dump_regs(vcpu);
-               return -1;
-       }
-       pfn = kvm->arch.guest_pmap[gfn];
-       pa = (pfn << PAGE_SHIFT) | offset;
-
-       kvm_debug("%s: va: %#lx, unmapped: %#x\n", __func__, va,
-                 CKSEG0ADDR(pa));
-
-       local_flush_icache_range(CKSEG0ADDR(pa), 32);
-       return 0;
-}
-
-#define MIPS_CACHE_OP_INDEX_INV         0x0
-#define MIPS_CACHE_OP_INDEX_LD_TAG      0x1
-#define MIPS_CACHE_OP_INDEX_ST_TAG      0x2
-#define MIPS_CACHE_OP_IMP               0x3
-#define MIPS_CACHE_OP_HIT_INV           0x4
-#define MIPS_CACHE_OP_FILL_WB_INV       0x5
-#define MIPS_CACHE_OP_HIT_HB            0x6
-#define MIPS_CACHE_OP_FETCH_LOCK        0x7
-
-#define MIPS_CACHE_ICACHE               0x0
-#define MIPS_CACHE_DCACHE               0x1
-#define MIPS_CACHE_SEC                  0x3
-
-enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
-                                            uint32_t cause,
-                                            struct kvm_run *run,
-                                            struct kvm_vcpu *vcpu)
-{
-       struct mips_coproc *cop0 = vcpu->arch.cop0;
-       enum emulation_result er = EMULATE_DONE;
-       int32_t offset, cache, op_inst, op, base;
-       struct kvm_vcpu_arch *arch = &vcpu->arch;
-       unsigned long va;
-       unsigned long curr_pc;
-
-       /*
-        * Update PC and hold onto current PC in case there is
-        * an error and we want to rollback the PC
-        */
-       curr_pc = vcpu->arch.pc;
-       er = update_pc(vcpu, cause);
-       if (er == EMULATE_FAIL)
-               return er;
-
-       base = (inst >> 21) & 0x1f;
-       op_inst = (inst >> 16) & 0x1f;
-       offset = inst & 0xffff;
-       cache = (inst >> 16) & 0x3;
-       op = (inst >> 18) & 0x7;
-
-       va = arch->gprs[base] + offset;
-
-       kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
-                 cache, op, base, arch->gprs[base], offset);
-
-       /*
-        * Treat INDEX_INV as a nop, basically issued by Linux on startup to
-        * invalidate the caches entirely by stepping through all the
-        * ways/indexes
-        */
-       if (op == MIPS_CACHE_OP_INDEX_INV) {
-               kvm_debug("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
-                         vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base,
-                         arch->gprs[base], offset);
-
-               if (cache == MIPS_CACHE_DCACHE)
-                       r4k_blast_dcache();
-               else if (cache == MIPS_CACHE_ICACHE)
-                       r4k_blast_icache();
-               else {
-                       kvm_err("%s: unsupported CACHE INDEX operation\n",
-                               __func__);
-                       return EMULATE_FAIL;
-               }
-
-#ifdef CONFIG_KVM_MIPS_DYN_TRANS
-               kvm_mips_trans_cache_index(inst, opc, vcpu);
-#endif
-               goto done;
-       }
-
-       preempt_disable();
-       if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) {
-               if (kvm_mips_host_tlb_lookup(vcpu, va) < 0)
-                       kvm_mips_handle_kseg0_tlb_fault(va, vcpu);
-       } else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) ||
-                  KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) {
-               int index;
-
-               /* If an entry already exists then skip */
-               if (kvm_mips_host_tlb_lookup(vcpu, va) >= 0)
-                       goto skip_fault;
-
-               /*
-                * If address not in the guest TLB, then give the guest a fault,
-                * the resulting handler will do the right thing
-                */
-               index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) |
-                                                 (kvm_read_c0_guest_entryhi
-                                                  (cop0) & ASID_MASK));
-
-               if (index < 0) {
-                       vcpu->arch.host_cp0_entryhi = (va & VPN2_MASK);
-                       vcpu->arch.host_cp0_badvaddr = va;
-                       er = kvm_mips_emulate_tlbmiss_ld(cause, NULL, run,
-                                                        vcpu);
-                       preempt_enable();
-                       goto dont_update_pc;
-               } else {
-                       struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
-                       /*
-                        * Check if the entry is valid, if not then setup a TLB
-                        * invalid exception to the guest
-                        */
-                       if (!TLB_IS_VALID(*tlb, va)) {
-                               er = kvm_mips_emulate_tlbinv_ld(cause, NULL,
-                                                               run, vcpu);
-                               preempt_enable();
-                               goto dont_update_pc;
-                       } else {
-                               /*
-                                * We fault an entry from the guest tlb to the
-                                * shadow host TLB
-                                */
-                               kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
-                                                                    NULL,
-                                                                    NULL);
-                       }
-               }
-       } else {
-               kvm_err("INVALID CACHE INDEX/ADDRESS (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
-                       cache, op, base, arch->gprs[base], offset);
-               er = EMULATE_FAIL;
-               preempt_enable();
-               goto dont_update_pc;
-
-       }
-
-skip_fault:
-       /* XXXKYMA: Only a subset of cache ops are supported, used by Linux */
-       if (cache == MIPS_CACHE_DCACHE
-           && (op == MIPS_CACHE_OP_FILL_WB_INV
-               || op == MIPS_CACHE_OP_HIT_INV)) {
-               flush_dcache_line(va);
-
-#ifdef CONFIG_KVM_MIPS_DYN_TRANS
-               /*
-                * Replace the CACHE instruction, with a SYNCI, not the same,
-                * but avoids a trap
-                */
-               kvm_mips_trans_cache_va(inst, opc, vcpu);
-#endif
-       } else if (op == MIPS_CACHE_OP_HIT_INV && cache == MIPS_CACHE_ICACHE) {
-               flush_dcache_line(va);
-               flush_icache_line(va);
-
-#ifdef CONFIG_KVM_MIPS_DYN_TRANS
-               /* Replace the CACHE instruction, with a SYNCI */
-               kvm_mips_trans_cache_va(inst, opc, vcpu);
-#endif
-       } else {
-               kvm_err("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
-                       cache, op, base, arch->gprs[base], offset);
-               er = EMULATE_FAIL;
-               preempt_enable();
-               goto dont_update_pc;
-       }
-
-       preempt_enable();
-
-dont_update_pc:
-       /* Rollback PC */
-       vcpu->arch.pc = curr_pc;
-done:
-       return er;
-}
-
-enum emulation_result kvm_mips_emulate_inst(unsigned long cause, uint32_t *opc,
-                                           struct kvm_run *run,
-                                           struct kvm_vcpu *vcpu)
-{
-       enum emulation_result er = EMULATE_DONE;
-       uint32_t inst;
-
-       /* Fetch the instruction. */
-       if (cause & CAUSEF_BD)
-               opc += 1;
-
-       inst = kvm_get_inst(opc, vcpu);
-
-       switch (((union mips_instruction)inst).r_format.opcode) {
-       case cop0_op:
-               er = kvm_mips_emulate_CP0(inst, opc, cause, run, vcpu);
-               break;
-       case sb_op:
-       case sh_op:
-       case sw_op:
-               er = kvm_mips_emulate_store(inst, cause, run, vcpu);
-               break;
-       case lb_op:
-       case lbu_op:
-       case lhu_op:
-       case lh_op:
-       case lw_op:
-               er = kvm_mips_emulate_load(inst, cause, run, vcpu);
-               break;
-
-       case cache_op:
-               ++vcpu->stat.cache_exits;
-               trace_kvm_exit(vcpu, CACHE_EXITS);
-               er = kvm_mips_emulate_cache(inst, opc, cause, run, vcpu);
-               break;
-
-       default:
-               kvm_err("Instruction emulation not supported (%p/%#x)\n", opc,
-                       inst);
-               kvm_arch_vcpu_dump_regs(vcpu);
-               er = EMULATE_FAIL;
-               break;
-       }
-
-       return er;
-}
-
-enum emulation_result kvm_mips_emulate_syscall(unsigned long cause,
-                                              uint32_t *opc,
-                                              struct kvm_run *run,
-                                              struct kvm_vcpu *vcpu)
-{
-       struct mips_coproc *cop0 = vcpu->arch.cop0;
-       struct kvm_vcpu_arch *arch = &vcpu->arch;
-       enum emulation_result er = EMULATE_DONE;
-
-       if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
-               /* save old pc */
-               kvm_write_c0_guest_epc(cop0, arch->pc);
-               kvm_set_c0_guest_status(cop0, ST0_EXL);
-
-               if (cause & CAUSEF_BD)
-                       kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
-               else
-                       kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
-
-               kvm_debug("Delivering SYSCALL @ pc %#lx\n", arch->pc);
-
-               kvm_change_c0_guest_cause(cop0, (0xff),
-                                         (T_SYSCALL << CAUSEB_EXCCODE));
-
-               /* Set PC to the exception entry point */
-               arch->pc = KVM_GUEST_KSEG0 + 0x180;
-
-       } else {
-               kvm_err("Trying to deliver SYSCALL when EXL is already set\n");
-               er = EMULATE_FAIL;
-       }
-
-       return er;
-}
-
-enum emulation_result kvm_mips_emulate_tlbmiss_ld(unsigned long cause,
-                                                 uint32_t *opc,
-                                                 struct kvm_run *run,
-                                                 struct kvm_vcpu *vcpu)
-{
-       struct mips_coproc *cop0 = vcpu->arch.cop0;
-       struct kvm_vcpu_arch *arch = &vcpu->arch;
-       unsigned long entryhi = (vcpu->arch.  host_cp0_badvaddr & VPN2_MASK) |
-                               (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
-
-       if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
-               /* save old pc */
-               kvm_write_c0_guest_epc(cop0, arch->pc);
-               kvm_set_c0_guest_status(cop0, ST0_EXL);
-
-               if (cause & CAUSEF_BD)
-                       kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
-               else
-                       kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
-
-               kvm_debug("[EXL == 0] delivering TLB MISS @ pc %#lx\n",
-                         arch->pc);
-
-               /* set pc to the exception entry point */
-               arch->pc = KVM_GUEST_KSEG0 + 0x0;
-
-       } else {
-               kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
-                         arch->pc);
-
-               arch->pc = KVM_GUEST_KSEG0 + 0x180;
-       }
-
-       kvm_change_c0_guest_cause(cop0, (0xff),
-                                 (T_TLB_LD_MISS << CAUSEB_EXCCODE));
-
-       /* setup badvaddr, context and entryhi registers for the guest */
-       kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
-       /* XXXKYMA: is the context register used by linux??? */
-       kvm_write_c0_guest_entryhi(cop0, entryhi);
-       /* Blow away the shadow host TLBs */
-       kvm_mips_flush_host_tlb(1);
-
-       return EMULATE_DONE;
-}
-
-enum emulation_result kvm_mips_emulate_tlbinv_ld(unsigned long cause,
-                                                uint32_t *opc,
-                                                struct kvm_run *run,
-                                                struct kvm_vcpu *vcpu)
-{
-       struct mips_coproc *cop0 = vcpu->arch.cop0;
-       struct kvm_vcpu_arch *arch = &vcpu->arch;
-       unsigned long entryhi =
-               (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
-               (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
-
-       if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
-               /* save old pc */
-               kvm_write_c0_guest_epc(cop0, arch->pc);
-               kvm_set_c0_guest_status(cop0, ST0_EXL);
-
-               if (cause & CAUSEF_BD)
-                       kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
-               else
-                       kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
-
-               kvm_debug("[EXL == 0] delivering TLB INV @ pc %#lx\n",
-                         arch->pc);
-
-               /* set pc to the exception entry point */
-               arch->pc = KVM_GUEST_KSEG0 + 0x180;
-
-       } else {
-               kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
-                         arch->pc);
-               arch->pc = KVM_GUEST_KSEG0 + 0x180;
-       }
-
-       kvm_change_c0_guest_cause(cop0, (0xff),
-                                 (T_TLB_LD_MISS << CAUSEB_EXCCODE));
-
-       /* setup badvaddr, context and entryhi registers for the guest */
-       kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
-       /* XXXKYMA: is the context register used by linux??? */
-       kvm_write_c0_guest_entryhi(cop0, entryhi);
-       /* Blow away the shadow host TLBs */
-       kvm_mips_flush_host_tlb(1);
-
-       return EMULATE_DONE;
-}
-
-enum emulation_result kvm_mips_emulate_tlbmiss_st(unsigned long cause,
-                                                 uint32_t *opc,
-                                                 struct kvm_run *run,
-                                                 struct kvm_vcpu *vcpu)
-{
-       struct mips_coproc *cop0 = vcpu->arch.cop0;
-       struct kvm_vcpu_arch *arch = &vcpu->arch;
-       unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
-                               (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
-
-       if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
-               /* save old pc */
-               kvm_write_c0_guest_epc(cop0, arch->pc);
-               kvm_set_c0_guest_status(cop0, ST0_EXL);
-
-               if (cause & CAUSEF_BD)
-                       kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
-               else
-                       kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
-
-               kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
-                         arch->pc);
-
-               /* Set PC to the exception entry point */
-               arch->pc = KVM_GUEST_KSEG0 + 0x0;
-       } else {
-               kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
-                         arch->pc);
-               arch->pc = KVM_GUEST_KSEG0 + 0x180;
-       }
-
-       kvm_change_c0_guest_cause(cop0, (0xff),
-                                 (T_TLB_ST_MISS << CAUSEB_EXCCODE));
-
-       /* setup badvaddr, context and entryhi registers for the guest */
-       kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
-       /* XXXKYMA: is the context register used by linux??? */
-       kvm_write_c0_guest_entryhi(cop0, entryhi);
-       /* Blow away the shadow host TLBs */
-       kvm_mips_flush_host_tlb(1);
-
-       return EMULATE_DONE;
-}
-
-enum emulation_result kvm_mips_emulate_tlbinv_st(unsigned long cause,
-                                                uint32_t *opc,
-                                                struct kvm_run *run,
-                                                struct kvm_vcpu *vcpu)
-{
-       struct mips_coproc *cop0 = vcpu->arch.cop0;
-       struct kvm_vcpu_arch *arch = &vcpu->arch;
-       unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
-               (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
-
-       if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
-               /* save old pc */
-               kvm_write_c0_guest_epc(cop0, arch->pc);
-               kvm_set_c0_guest_status(cop0, ST0_EXL);
-
-               if (cause & CAUSEF_BD)
-                       kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
-               else
-                       kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
-
-               kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
-                         arch->pc);
-
-               /* Set PC to the exception entry point */
-               arch->pc = KVM_GUEST_KSEG0 + 0x180;
-       } else {
-               kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
-                         arch->pc);
-               arch->pc = KVM_GUEST_KSEG0 + 0x180;
-       }
-
-       kvm_change_c0_guest_cause(cop0, (0xff),
-                                 (T_TLB_ST_MISS << CAUSEB_EXCCODE));
-
-       /* setup badvaddr, context and entryhi registers for the guest */
-       kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
-       /* XXXKYMA: is the context register used by linux??? */
-       kvm_write_c0_guest_entryhi(cop0, entryhi);
-       /* Blow away the shadow host TLBs */
-       kvm_mips_flush_host_tlb(1);
-
-       return EMULATE_DONE;
-}
-
-/* TLBMOD: store into address matching TLB with Dirty bit off */
-enum emulation_result kvm_mips_handle_tlbmod(unsigned long cause, uint32_t *opc,
-                                            struct kvm_run *run,
-                                            struct kvm_vcpu *vcpu)
-{
-       enum emulation_result er = EMULATE_DONE;
-#ifdef DEBUG
-       struct mips_coproc *cop0 = vcpu->arch.cop0;
-       unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
-                               (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
-       int index;
-
-       /* If address not in the guest TLB, then we are in trouble */
-       index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
-       if (index < 0) {
-               /* XXXKYMA Invalidate and retry */
-               kvm_mips_host_tlb_inv(vcpu, vcpu->arch.host_cp0_badvaddr);
-               kvm_err("%s: host got TLBMOD for %#lx but entry not present in Guest TLB\n",
-                    __func__, entryhi);
-               kvm_mips_dump_guest_tlbs(vcpu);
-               kvm_mips_dump_host_tlbs();
-               return EMULATE_FAIL;
-       }
-#endif
-
-       er = kvm_mips_emulate_tlbmod(cause, opc, run, vcpu);
-       return er;
-}
-
-enum emulation_result kvm_mips_emulate_tlbmod(unsigned long cause,
-                                             uint32_t *opc,
-                                             struct kvm_run *run,
-                                             struct kvm_vcpu *vcpu)
-{
-       struct mips_coproc *cop0 = vcpu->arch.cop0;
-       unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
-                               (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
-       struct kvm_vcpu_arch *arch = &vcpu->arch;
-
-       if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
-               /* save old pc */
-               kvm_write_c0_guest_epc(cop0, arch->pc);
-               kvm_set_c0_guest_status(cop0, ST0_EXL);
-
-               if (cause & CAUSEF_BD)
-                       kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
-               else
-                       kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
-
-               kvm_debug("[EXL == 0] Delivering TLB MOD @ pc %#lx\n",
-                         arch->pc);
-
-               arch->pc = KVM_GUEST_KSEG0 + 0x180;
-       } else {
-               kvm_debug("[EXL == 1] Delivering TLB MOD @ pc %#lx\n",
-                         arch->pc);
-               arch->pc = KVM_GUEST_KSEG0 + 0x180;
-       }
-
-       kvm_change_c0_guest_cause(cop0, (0xff), (T_TLB_MOD << CAUSEB_EXCCODE));
-
-       /* setup badvaddr, context and entryhi registers for the guest */
-       kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
-       /* XXXKYMA: is the context register used by linux??? */
-       kvm_write_c0_guest_entryhi(cop0, entryhi);
-       /* Blow away the shadow host TLBs */
-       kvm_mips_flush_host_tlb(1);
-
-       return EMULATE_DONE;
-}
-
-enum emulation_result kvm_mips_emulate_fpu_exc(unsigned long cause,
-                                              uint32_t *opc,
-                                              struct kvm_run *run,
-                                              struct kvm_vcpu *vcpu)
-{
-       struct mips_coproc *cop0 = vcpu->arch.cop0;
-       struct kvm_vcpu_arch *arch = &vcpu->arch;
-
-       if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
-               /* save old pc */
-               kvm_write_c0_guest_epc(cop0, arch->pc);
-               kvm_set_c0_guest_status(cop0, ST0_EXL);
-
-               if (cause & CAUSEF_BD)
-                       kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
-               else
-                       kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
-
-       }
-
-       arch->pc = KVM_GUEST_KSEG0 + 0x180;
-
-       kvm_change_c0_guest_cause(cop0, (0xff),
-                                 (T_COP_UNUSABLE << CAUSEB_EXCCODE));
-       kvm_change_c0_guest_cause(cop0, (CAUSEF_CE), (0x1 << CAUSEB_CE));
-
-       return EMULATE_DONE;
-}
-
-enum emulation_result kvm_mips_emulate_ri_exc(unsigned long cause,
-                                             uint32_t *opc,
-                                             struct kvm_run *run,
-                                             struct kvm_vcpu *vcpu)
-{
-       struct mips_coproc *cop0 = vcpu->arch.cop0;
-       struct kvm_vcpu_arch *arch = &vcpu->arch;
-       enum emulation_result er = EMULATE_DONE;
-
-       if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
-               /* save old pc */
-               kvm_write_c0_guest_epc(cop0, arch->pc);
-               kvm_set_c0_guest_status(cop0, ST0_EXL);
-
-               if (cause & CAUSEF_BD)
-                       kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
-               else
-                       kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
-
-               kvm_debug("Delivering RI @ pc %#lx\n", arch->pc);
-
-               kvm_change_c0_guest_cause(cop0, (0xff),
-                                         (T_RES_INST << CAUSEB_EXCCODE));
-
-               /* Set PC to the exception entry point */
-               arch->pc = KVM_GUEST_KSEG0 + 0x180;
-
-       } else {
-               kvm_err("Trying to deliver RI when EXL is already set\n");
-               er = EMULATE_FAIL;
-       }
-
-       return er;
-}
-
-enum emulation_result kvm_mips_emulate_bp_exc(unsigned long cause,
-                                             uint32_t *opc,
-                                             struct kvm_run *run,
-                                             struct kvm_vcpu *vcpu)
-{
-       struct mips_coproc *cop0 = vcpu->arch.cop0;
-       struct kvm_vcpu_arch *arch = &vcpu->arch;
-       enum emulation_result er = EMULATE_DONE;
-
-       if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
-               /* save old pc */
-               kvm_write_c0_guest_epc(cop0, arch->pc);
-               kvm_set_c0_guest_status(cop0, ST0_EXL);
-
-               if (cause & CAUSEF_BD)
-                       kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
-               else
-                       kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
-
-               kvm_debug("Delivering BP @ pc %#lx\n", arch->pc);
-
-               kvm_change_c0_guest_cause(cop0, (0xff),
-                                         (T_BREAK << CAUSEB_EXCCODE));
-
-               /* Set PC to the exception entry point */
-               arch->pc = KVM_GUEST_KSEG0 + 0x180;
-
-       } else {
-               kvm_err("Trying to deliver BP when EXL is already set\n");
-               er = EMULATE_FAIL;
-       }
-
-       return er;
-}
-
-/* ll/sc, rdhwr, sync emulation */
-
-#define OPCODE 0xfc000000
-#define BASE   0x03e00000
-#define RT     0x001f0000
-#define OFFSET 0x0000ffff
-#define LL     0xc0000000
-#define SC     0xe0000000
-#define SPEC0  0x00000000
-#define SPEC3  0x7c000000
-#define RD     0x0000f800
-#define FUNC   0x0000003f
-#define SYNC   0x0000000f
-#define RDHWR  0x0000003b
-
-enum emulation_result kvm_mips_handle_ri(unsigned long cause, uint32_t *opc,
-                                        struct kvm_run *run,
-                                        struct kvm_vcpu *vcpu)
-{
-       struct mips_coproc *cop0 = vcpu->arch.cop0;
-       struct kvm_vcpu_arch *arch = &vcpu->arch;
-       enum emulation_result er = EMULATE_DONE;
-       unsigned long curr_pc;
-       uint32_t inst;
-
-       /*
-        * Update PC and hold onto current PC in case there is
-        * an error and we want to rollback the PC
-        */
-       curr_pc = vcpu->arch.pc;
-       er = update_pc(vcpu, cause);
-       if (er == EMULATE_FAIL)
-               return er;
-
-       /* Fetch the instruction. */
-       if (cause & CAUSEF_BD)
-               opc += 1;
-
-       inst = kvm_get_inst(opc, vcpu);
-
-       if (inst == KVM_INVALID_INST) {
-               kvm_err("%s: Cannot get inst @ %p\n", __func__, opc);
-               return EMULATE_FAIL;
-       }
-
-       if ((inst & OPCODE) == SPEC3 && (inst & FUNC) == RDHWR) {
-               int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
-               int rd = (inst & RD) >> 11;
-               int rt = (inst & RT) >> 16;
-               /* If usermode, check RDHWR rd is allowed by guest HWREna */
-               if (usermode && !(kvm_read_c0_guest_hwrena(cop0) & BIT(rd))) {
-                       kvm_debug("RDHWR %#x disallowed by HWREna @ %p\n",
-                                 rd, opc);
-                       goto emulate_ri;
-               }
-               switch (rd) {
-               case 0: /* CPU number */
-                       arch->gprs[rt] = 0;
-                       break;
-               case 1: /* SYNCI length */
-                       arch->gprs[rt] = min(current_cpu_data.dcache.linesz,
-                                            current_cpu_data.icache.linesz);
-                       break;
-               case 2: /* Read count register */
-                       arch->gprs[rt] = kvm_mips_read_count(vcpu);
-                       break;
-               case 3: /* Count register resolution */
-                       switch (current_cpu_data.cputype) {
-                       case CPU_20KC:
-                       case CPU_25KF:
-                               arch->gprs[rt] = 1;
-                               break;
-                       default:
-                               arch->gprs[rt] = 2;
-                       }
-                       break;
-               case 29:
-                       arch->gprs[rt] = kvm_read_c0_guest_userlocal(cop0);
-                       break;
-
-               default:
-                       kvm_debug("RDHWR %#x not supported @ %p\n", rd, opc);
-                       goto emulate_ri;
-               }
-       } else {
-               kvm_debug("Emulate RI not supported @ %p: %#x\n", opc, inst);
-               goto emulate_ri;
-       }
-
-       return EMULATE_DONE;
-
-emulate_ri:
-       /*
-        * Rollback PC (if in branch delay slot then the PC already points to
-        * branch target), and pass the RI exception to the guest OS.
-        */
-       vcpu->arch.pc = curr_pc;
-       return kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
-}
-
-enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
-                                                 struct kvm_run *run)
-{
-       unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
-       enum emulation_result er = EMULATE_DONE;
-       unsigned long curr_pc;
-
-       if (run->mmio.len > sizeof(*gpr)) {
-               kvm_err("Bad MMIO length: %d", run->mmio.len);
-               er = EMULATE_FAIL;
-               goto done;
-       }
-
-       /*
-        * Update PC and hold onto current PC in case there is
-        * an error and we want to rollback the PC
-        */
-       curr_pc = vcpu->arch.pc;
-       er = update_pc(vcpu, vcpu->arch.pending_load_cause);
-       if (er == EMULATE_FAIL)
-               return er;
-
-       switch (run->mmio.len) {
-       case 4:
-               *gpr = *(int32_t *) run->mmio.data;
-               break;
-
-       case 2:
-               if (vcpu->mmio_needed == 2)
-                       *gpr = *(int16_t *) run->mmio.data;
-               else
-                       *gpr = *(int16_t *) run->mmio.data;
-
-               break;
-       case 1:
-               if (vcpu->mmio_needed == 2)
-                       *gpr = *(int8_t *) run->mmio.data;
-               else
-                       *gpr = *(u8 *) run->mmio.data;
-               break;
-       }
-
-       if (vcpu->arch.pending_load_cause & CAUSEF_BD)
-               kvm_debug("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n",
-                         vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr,
-                         vcpu->mmio_needed);
-
-done:
-       return er;
-}
-
-static enum emulation_result kvm_mips_emulate_exc(unsigned long cause,
-                                                 uint32_t *opc,
-                                                 struct kvm_run *run,
-                                                 struct kvm_vcpu *vcpu)
-{
-       uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
-       struct mips_coproc *cop0 = vcpu->arch.cop0;
-       struct kvm_vcpu_arch *arch = &vcpu->arch;
-       enum emulation_result er = EMULATE_DONE;
-
-       if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
-               /* save old pc */
-               kvm_write_c0_guest_epc(cop0, arch->pc);
-               kvm_set_c0_guest_status(cop0, ST0_EXL);
-
-               if (cause & CAUSEF_BD)
-                       kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
-               else
-                       kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
-
-               kvm_change_c0_guest_cause(cop0, (0xff),
-                                         (exccode << CAUSEB_EXCCODE));
-
-               /* Set PC to the exception entry point */
-               arch->pc = KVM_GUEST_KSEG0 + 0x180;
-               kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
-
-               kvm_debug("Delivering EXC %d @ pc %#lx, badVaddr: %#lx\n",
-                         exccode, kvm_read_c0_guest_epc(cop0),
-                         kvm_read_c0_guest_badvaddr(cop0));
-       } else {
-               kvm_err("Trying to deliver EXC when EXL is already set\n");
-               er = EMULATE_FAIL;
-       }
-
-       return er;
-}
-
-enum emulation_result kvm_mips_check_privilege(unsigned long cause,
-                                              uint32_t *opc,
-                                              struct kvm_run *run,
-                                              struct kvm_vcpu *vcpu)
-{
-       enum emulation_result er = EMULATE_DONE;
-       uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
-       unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
-
-       int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
-
-       if (usermode) {
-               switch (exccode) {
-               case T_INT:
-               case T_SYSCALL:
-               case T_BREAK:
-               case T_RES_INST:
-                       break;
-
-               case T_COP_UNUSABLE:
-                       if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 0)
-                               er = EMULATE_PRIV_FAIL;
-                       break;
-
-               case T_TLB_MOD:
-                       break;
-
-               case T_TLB_LD_MISS:
-                       /*
-                        * We we are accessing Guest kernel space, then send an
-                        * address error exception to the guest
-                        */
-                       if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
-                               kvm_debug("%s: LD MISS @ %#lx\n", __func__,
-                                         badvaddr);
-                               cause &= ~0xff;
-                               cause |= (T_ADDR_ERR_LD << CAUSEB_EXCCODE);
-                               er = EMULATE_PRIV_FAIL;
-                       }
-                       break;
-
-               case T_TLB_ST_MISS:
-                       /*
-                        * We we are accessing Guest kernel space, then send an
-                        * address error exception to the guest
-                        */
-                       if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
-                               kvm_debug("%s: ST MISS @ %#lx\n", __func__,
-                                         badvaddr);
-                               cause &= ~0xff;
-                               cause |= (T_ADDR_ERR_ST << CAUSEB_EXCCODE);
-                               er = EMULATE_PRIV_FAIL;
-                       }
-                       break;
-
-               case T_ADDR_ERR_ST:
-                       kvm_debug("%s: address error ST @ %#lx\n", __func__,
-                                 badvaddr);
-                       if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
-                               cause &= ~0xff;
-                               cause |= (T_TLB_ST_MISS << CAUSEB_EXCCODE);
-                       }
-                       er = EMULATE_PRIV_FAIL;
-                       break;
-               case T_ADDR_ERR_LD:
-                       kvm_debug("%s: address error LD @ %#lx\n", __func__,
-                                 badvaddr);
-                       if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
-                               cause &= ~0xff;
-                               cause |= (T_TLB_LD_MISS << CAUSEB_EXCCODE);
-                       }
-                       er = EMULATE_PRIV_FAIL;
-                       break;
-               default:
-                       er = EMULATE_PRIV_FAIL;
-                       break;
-               }
-       }
-
-       if (er == EMULATE_PRIV_FAIL)
-               kvm_mips_emulate_exc(cause, opc, run, vcpu);
-
-       return er;
-}
-
-/*
- * User Address (UA) fault, this could happen if
- * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this
- *     case we pass on the fault to the guest kernel and let it handle it.
- * (2) TLB entry is present in the Guest TLB but not in the shadow, in this
- *     case we inject the TLB from the Guest TLB into the shadow host TLB
- */
-enum emulation_result kvm_mips_handle_tlbmiss(unsigned long cause,
-                                             uint32_t *opc,
-                                             struct kvm_run *run,
-                                             struct kvm_vcpu *vcpu)
-{
-       enum emulation_result er = EMULATE_DONE;
-       uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
-       unsigned long va = vcpu->arch.host_cp0_badvaddr;
-       int index;
-
-       kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx, entryhi: %#lx\n",
-                 vcpu->arch.host_cp0_badvaddr, vcpu->arch.host_cp0_entryhi);
-
-       /*
-        * KVM would not have got the exception if this entry was valid in the
-        * shadow host TLB. Check the Guest TLB, if the entry is not there then
-        * send the guest an exception. The guest exc handler should then inject
-        * an entry into the guest TLB.
-        */
-       index = kvm_mips_guest_tlb_lookup(vcpu,
-                                         (va & VPN2_MASK) |
-                                         (kvm_read_c0_guest_entryhi
-                                          (vcpu->arch.cop0) & ASID_MASK));
-       if (index < 0) {
-               if (exccode == T_TLB_LD_MISS) {
-                       er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu);
-               } else if (exccode == T_TLB_ST_MISS) {
-                       er = kvm_mips_emulate_tlbmiss_st(cause, opc, run, vcpu);
-               } else {
-                       kvm_err("%s: invalid exc code: %d\n", __func__,
-                               exccode);
-                       er = EMULATE_FAIL;
-               }
-       } else {
-               struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
-
-               /*
-                * Check if the entry is valid, if not then setup a TLB invalid
-                * exception to the guest
-                */
-               if (!TLB_IS_VALID(*tlb, va)) {
-                       if (exccode == T_TLB_LD_MISS) {
-                               er = kvm_mips_emulate_tlbinv_ld(cause, opc, run,
-                                                               vcpu);
-                       } else if (exccode == T_TLB_ST_MISS) {
-                               er = kvm_mips_emulate_tlbinv_st(cause, opc, run,
-                                                               vcpu);
-                       } else {
-                               kvm_err("%s: invalid exc code: %d\n", __func__,
-                                       exccode);
-                               er = EMULATE_FAIL;
-                       }
-               } else {
-                       kvm_debug("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n",
-                                 tlb->tlb_hi, tlb->tlb_lo0, tlb->tlb_lo1);
-                       /*
-                        * OK we have a Guest TLB entry, now inject it into the
-                        * shadow host TLB
-                        */
-                       kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, NULL,
-                                                            NULL);
-               }
-       }
-
-       return er;
-}
diff --git a/arch/mips/kvm/kvm_mips_int.c b/arch/mips/kvm/kvm_mips_int.c
deleted file mode 100644 (file)
index d458c04..0000000
+++ /dev/null
@@ -1,242 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * KVM/MIPS: Interrupt delivery
- *
- * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
- * Authors: Sanjay Lal <sanjayl@kymasys.com>
- */
-
-#include <linux/errno.h>
-#include <linux/err.h>
-#include <linux/module.h>
-#include <linux/vmalloc.h>
-#include <linux/fs.h>
-#include <linux/bootmem.h>
-#include <asm/page.h>
-#include <asm/cacheflush.h>
-
-#include <linux/kvm_host.h>
-
-#include "kvm_mips_int.h"
-
-void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, uint32_t priority)
-{
-       set_bit(priority, &vcpu->arch.pending_exceptions);
-}
-
-void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, uint32_t priority)
-{
-       clear_bit(priority, &vcpu->arch.pending_exceptions);
-}
-
-void kvm_mips_queue_timer_int_cb(struct kvm_vcpu *vcpu)
-{
-       /*
-        * Cause bits to reflect the pending timer interrupt,
-        * the EXC code will be set when we are actually
-        * delivering the interrupt:
-        */
-       kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ5 | C_TI));
-
-       /* Queue up an INT exception for the core */
-       kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_TIMER);
-
-}
-
-void kvm_mips_dequeue_timer_int_cb(struct kvm_vcpu *vcpu)
-{
-       kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ5 | C_TI));
-       kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_TIMER);
-}
-
-void kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu,
-                             struct kvm_mips_interrupt *irq)
-{
-       int intr = (int)irq->irq;
-
-       /*
-        * Cause bits to reflect the pending IO interrupt,
-        * the EXC code will be set when we are actually
-        * delivering the interrupt:
-        */
-       switch (intr) {
-       case 2:
-               kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ0));
-               /* Queue up an INT exception for the core */
-               kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_IO);
-               break;
-
-       case 3:
-               kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ1));
-               kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_IPI_1);
-               break;
-
-       case 4:
-               kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ2));
-               kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_IPI_2);
-               break;
-
-       default:
-               break;
-       }
-
-}
-
-void kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
-                               struct kvm_mips_interrupt *irq)
-{
-       int intr = (int)irq->irq;
-
-       switch (intr) {
-       case -2:
-               kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ0));
-               kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_IO);
-               break;
-
-       case -3:
-               kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ1));
-               kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_1);
-               break;
-
-       case -4:
-               kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ2));
-               kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_2);
-               break;
-
-       default:
-               break;
-       }
-
-}
-
-/* Deliver the interrupt of the corresponding priority, if possible. */
-int kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
-                           uint32_t cause)
-{
-       int allowed = 0;
-       uint32_t exccode;
-
-       struct kvm_vcpu_arch *arch = &vcpu->arch;
-       struct mips_coproc *cop0 = vcpu->arch.cop0;
-
-       switch (priority) {
-       case MIPS_EXC_INT_TIMER:
-               if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
-                   && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
-                   && (kvm_read_c0_guest_status(cop0) & IE_IRQ5)) {
-                       allowed = 1;
-                       exccode = T_INT;
-               }
-               break;
-
-       case MIPS_EXC_INT_IO:
-               if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
-                   && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
-                   && (kvm_read_c0_guest_status(cop0) & IE_IRQ0)) {
-                       allowed = 1;
-                       exccode = T_INT;
-               }
-               break;
-
-       case MIPS_EXC_INT_IPI_1:
-               if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
-                   && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
-                   && (kvm_read_c0_guest_status(cop0) & IE_IRQ1)) {
-                       allowed = 1;
-                       exccode = T_INT;
-               }
-               break;
-
-       case MIPS_EXC_INT_IPI_2:
-               if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
-                   && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
-                   && (kvm_read_c0_guest_status(cop0) & IE_IRQ2)) {
-                       allowed = 1;
-                       exccode = T_INT;
-               }
-               break;
-
-       default:
-               break;
-       }
-
-       /* Are we allowed to deliver the interrupt ??? */
-       if (allowed) {
-               if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
-                       /* save old pc */
-                       kvm_write_c0_guest_epc(cop0, arch->pc);
-                       kvm_set_c0_guest_status(cop0, ST0_EXL);
-
-                       if (cause & CAUSEF_BD)
-                               kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
-                       else
-                               kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
-
-                       kvm_debug("Delivering INT @ pc %#lx\n", arch->pc);
-
-               } else
-                       kvm_err("Trying to deliver interrupt when EXL is already set\n");
-
-               kvm_change_c0_guest_cause(cop0, CAUSEF_EXCCODE,
-                                         (exccode << CAUSEB_EXCCODE));
-
-               /* XXXSL Set PC to the interrupt exception entry point */
-               if (kvm_read_c0_guest_cause(cop0) & CAUSEF_IV)
-                       arch->pc = KVM_GUEST_KSEG0 + 0x200;
-               else
-                       arch->pc = KVM_GUEST_KSEG0 + 0x180;
-
-               clear_bit(priority, &vcpu->arch.pending_exceptions);
-       }
-
-       return allowed;
-}
-
-int kvm_mips_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
-                         uint32_t cause)
-{
-       return 1;
-}
-
-void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, uint32_t cause)
-{
-       unsigned long *pending = &vcpu->arch.pending_exceptions;
-       unsigned long *pending_clr = &vcpu->arch.pending_exceptions_clr;
-       unsigned int priority;
-
-       if (!(*pending) && !(*pending_clr))
-               return;
-
-       priority = __ffs(*pending_clr);
-       while (priority <= MIPS_EXC_MAX) {
-               if (kvm_mips_callbacks->irq_clear(vcpu, priority, cause)) {
-                       if (!KVM_MIPS_IRQ_CLEAR_ALL_AT_ONCE)
-                               break;
-               }
-
-               priority = find_next_bit(pending_clr,
-                                        BITS_PER_BYTE * sizeof(*pending_clr),
-                                        priority + 1);
-       }
-
-       priority = __ffs(*pending);
-       while (priority <= MIPS_EXC_MAX) {
-               if (kvm_mips_callbacks->irq_deliver(vcpu, priority, cause)) {
-                       if (!KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE)
-                               break;
-               }
-
-               priority = find_next_bit(pending,
-                                        BITS_PER_BYTE * sizeof(*pending),
-                                        priority + 1);
-       }
-
-}
-
-int kvm_mips_pending_timer(struct kvm_vcpu *vcpu)
-{
-       return test_bit(MIPS_EXC_INT_TIMER, &vcpu->arch.pending_exceptions);
-}
diff --git a/arch/mips/kvm/kvm_mips_int.h b/arch/mips/kvm/kvm_mips_int.h
deleted file mode 100644 (file)
index 4ab4bdf..0000000
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * KVM/MIPS: Interrupts
- * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
- * Authors: Sanjay Lal <sanjayl@kymasys.com>
- */
-
-/*
- * MIPS Exception Priorities, exceptions (including interrupts) are queued up
- * for the guest in the order specified by their priorities
- */
-
-#define MIPS_EXC_RESET              0
-#define MIPS_EXC_SRESET             1
-#define MIPS_EXC_DEBUG_ST           2
-#define MIPS_EXC_DEBUG              3
-#define MIPS_EXC_DDB                4
-#define MIPS_EXC_NMI                5
-#define MIPS_EXC_MCHK               6
-#define MIPS_EXC_INT_TIMER          7
-#define MIPS_EXC_INT_IO             8
-#define MIPS_EXC_EXECUTE            9
-#define MIPS_EXC_INT_IPI_1          10
-#define MIPS_EXC_INT_IPI_2          11
-#define MIPS_EXC_MAX                12
-/* XXXSL More to follow */
-
-extern char mips32_exception[], mips32_exceptionEnd[];
-extern char mips32_GuestException[], mips32_GuestExceptionEnd[];
-
-#define C_TI        (_ULCAST_(1) << 30)
-
-#define KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE (0)
-#define KVM_MIPS_IRQ_CLEAR_ALL_AT_ONCE   (0)
-
-void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, uint32_t priority);
-void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, uint32_t priority);
-int kvm_mips_pending_timer(struct kvm_vcpu *vcpu);
-
-void kvm_mips_queue_timer_int_cb(struct kvm_vcpu *vcpu);
-void kvm_mips_dequeue_timer_int_cb(struct kvm_vcpu *vcpu);
-void kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu,
-                             struct kvm_mips_interrupt *irq);
-void kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
-                               struct kvm_mips_interrupt *irq);
-int kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
-                           uint32_t cause);
-int kvm_mips_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
-                         uint32_t cause);
-void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, uint32_t cause);
diff --git a/arch/mips/kvm/kvm_mips_opcode.h b/arch/mips/kvm/kvm_mips_opcode.h
deleted file mode 100644 (file)
index 03a6ae8..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
- * Authors: Sanjay Lal <sanjayl@kymasys.com>
- */
-
-/* Define opcode values not defined in <asm/isnt.h> */
-
-#ifndef __KVM_MIPS_OPCODE_H__
-#define __KVM_MIPS_OPCODE_H__
-
-/* COP0 Ops */
-#define mfmcz_op       0x0b    /* 01011 */
-#define wrpgpr_op      0x0e    /* 01110 */
-
-/* COP0 opcodes (only if COP0 and CO=1): */
-#define wait_op                0x20    /* 100000 */
-
-#endif /* __KVM_MIPS_OPCODE_H__ */
diff --git a/arch/mips/kvm/kvm_mips_stats.c b/arch/mips/kvm/kvm_mips_stats.c
deleted file mode 100644 (file)
index a74d602..0000000
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * KVM/MIPS: COP0 access histogram
- *
- * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
- * Authors: Sanjay Lal <sanjayl@kymasys.com>
- */
-
-#include <linux/kvm_host.h>
-
-char *kvm_mips_exit_types_str[MAX_KVM_MIPS_EXIT_TYPES] = {
-       "WAIT",
-       "CACHE",
-       "Signal",
-       "Interrupt",
-       "COP0/1 Unusable",
-       "TLB Mod",
-       "TLB Miss (LD)",
-       "TLB Miss (ST)",
-       "Address Err (ST)",
-       "Address Error (LD)",
-       "System Call",
-       "Reserved Inst",
-       "Break Inst",
-       "D-Cache Flushes",
-};
-
-char *kvm_cop0_str[N_MIPS_COPROC_REGS] = {
-       "Index",
-       "Random",
-       "EntryLo0",
-       "EntryLo1",
-       "Context",
-       "PG Mask",
-       "Wired",
-       "HWREna",
-       "BadVAddr",
-       "Count",
-       "EntryHI",
-       "Compare",
-       "Status",
-       "Cause",
-       "EXC PC",
-       "PRID",
-       "Config",
-       "LLAddr",
-       "Watch Lo",
-       "Watch Hi",
-       "X Context",
-       "Reserved",
-       "Impl Dep",
-       "Debug",
-       "DEPC",
-       "PerfCnt",
-       "ErrCtl",
-       "CacheErr",
-       "TagLo",
-       "TagHi",
-       "ErrorEPC",
-       "DESAVE"
-};
-
-void kvm_mips_dump_stats(struct kvm_vcpu *vcpu)
-{
-#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
-       int i, j;
-
-       kvm_info("\nKVM VCPU[%d] COP0 Access Profile:\n", vcpu->vcpu_id);
-       for (i = 0; i < N_MIPS_COPROC_REGS; i++) {
-               for (j = 0; j < N_MIPS_COPROC_SEL; j++) {
-                       if (vcpu->arch.cop0->stat[i][j])
-                               kvm_info("%s[%d]: %lu\n", kvm_cop0_str[i], j,
-                                        vcpu->arch.cop0->stat[i][j]);
-               }
-       }
-#endif
-}
diff --git a/arch/mips/kvm/kvm_tlb.c b/arch/mips/kvm/kvm_tlb.c
deleted file mode 100644 (file)
index bbcd822..0000000
+++ /dev/null
@@ -1,809 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * KVM/MIPS TLB handling, this file is part of the Linux host kernel so that
- * TLB handlers run from KSEG0
- *
- * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
- * Authors: Sanjay Lal <sanjayl@kymasys.com>
- */
-
-#include <linux/sched.h>
-#include <linux/smp.h>
-#include <linux/mm.h>
-#include <linux/delay.h>
-#include <linux/module.h>
-#include <linux/kvm_host.h>
-#include <linux/srcu.h>
-
-#include <asm/cpu.h>
-#include <asm/bootinfo.h>
-#include <asm/mmu_context.h>
-#include <asm/pgtable.h>
-#include <asm/cacheflush.h>
-#include <asm/tlb.h>
-
-#undef CONFIG_MIPS_MT
-#include <asm/r4kcache.h>
-#define CONFIG_MIPS_MT
-
-#define KVM_GUEST_PC_TLB    0
-#define KVM_GUEST_SP_TLB    1
-
-#define PRIx64 "llx"
-
-atomic_t kvm_mips_instance;
-EXPORT_SYMBOL(kvm_mips_instance);
-
-/* These function pointers are initialized once the KVM module is loaded */
-pfn_t (*kvm_mips_gfn_to_pfn)(struct kvm *kvm, gfn_t gfn);
-EXPORT_SYMBOL(kvm_mips_gfn_to_pfn);
-
-void (*kvm_mips_release_pfn_clean)(pfn_t pfn);
-EXPORT_SYMBOL(kvm_mips_release_pfn_clean);
-
-bool (*kvm_mips_is_error_pfn)(pfn_t pfn);
-EXPORT_SYMBOL(kvm_mips_is_error_pfn);
-
-uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
-{
-       return vcpu->arch.guest_kernel_asid[smp_processor_id()] & ASID_MASK;
-}
-
-uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
-{
-       return vcpu->arch.guest_user_asid[smp_processor_id()] & ASID_MASK;
-}
-
-inline uint32_t kvm_mips_get_commpage_asid(struct kvm_vcpu *vcpu)
-{
-       return vcpu->kvm->arch.commpage_tlb;
-}
-
-/* Structure defining an tlb entry data set. */
-
-void kvm_mips_dump_host_tlbs(void)
-{
-       unsigned long old_entryhi;
-       unsigned long old_pagemask;
-       struct kvm_mips_tlb tlb;
-       unsigned long flags;
-       int i;
-
-       local_irq_save(flags);
-
-       old_entryhi = read_c0_entryhi();
-       old_pagemask = read_c0_pagemask();
-
-       kvm_info("HOST TLBs:\n");
-       kvm_info("ASID: %#lx\n", read_c0_entryhi() & ASID_MASK);
-
-       for (i = 0; i < current_cpu_data.tlbsize; i++) {
-               write_c0_index(i);
-               mtc0_tlbw_hazard();
-
-               tlb_read();
-               tlbw_use_hazard();
-
-               tlb.tlb_hi = read_c0_entryhi();
-               tlb.tlb_lo0 = read_c0_entrylo0();
-               tlb.tlb_lo1 = read_c0_entrylo1();
-               tlb.tlb_mask = read_c0_pagemask();
-
-               kvm_info("TLB%c%3d Hi 0x%08lx ",
-                        (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
-                        i, tlb.tlb_hi);
-               kvm_info("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
-                        (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
-                        (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
-                        (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
-                        (tlb.tlb_lo0 >> 3) & 7);
-               kvm_info("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
-                        (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
-                        (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
-                        (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
-                        (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
-       }
-       write_c0_entryhi(old_entryhi);
-       write_c0_pagemask(old_pagemask);
-       mtc0_tlbw_hazard();
-       local_irq_restore(flags);
-}
-EXPORT_SYMBOL(kvm_mips_dump_host_tlbs);
-
-void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu)
-{
-       struct mips_coproc *cop0 = vcpu->arch.cop0;
-       struct kvm_mips_tlb tlb;
-       int i;
-
-       kvm_info("Guest TLBs:\n");
-       kvm_info("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0));
-
-       for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
-               tlb = vcpu->arch.guest_tlb[i];
-               kvm_info("TLB%c%3d Hi 0x%08lx ",
-                        (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
-                        i, tlb.tlb_hi);
-               kvm_info("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
-                        (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
-                        (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
-                        (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
-                        (tlb.tlb_lo0 >> 3) & 7);
-               kvm_info("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
-                        (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
-                        (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
-                        (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
-                        (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
-       }
-}
-EXPORT_SYMBOL(kvm_mips_dump_guest_tlbs);
-
-static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
-{
-       int srcu_idx, err = 0;
-       pfn_t pfn;
-
-       if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE)
-               return 0;
-
-       srcu_idx = srcu_read_lock(&kvm->srcu);
-       pfn = kvm_mips_gfn_to_pfn(kvm, gfn);
-
-       if (kvm_mips_is_error_pfn(pfn)) {
-               kvm_err("Couldn't get pfn for gfn %#" PRIx64 "!\n", gfn);
-               err = -EFAULT;
-               goto out;
-       }
-
-       kvm->arch.guest_pmap[gfn] = pfn;
-out:
-       srcu_read_unlock(&kvm->srcu, srcu_idx);
-       return err;
-}
-
-/* Translate guest KSEG0 addresses to Host PA */
-unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
-                                                   unsigned long gva)
-{
-       gfn_t gfn;
-       uint32_t offset = gva & ~PAGE_MASK;
-       struct kvm *kvm = vcpu->kvm;
-
-       if (KVM_GUEST_KSEGX(gva) != KVM_GUEST_KSEG0) {
-               kvm_err("%s/%p: Invalid gva: %#lx\n", __func__,
-                       __builtin_return_address(0), gva);
-               return KVM_INVALID_PAGE;
-       }
-
-       gfn = (KVM_GUEST_CPHYSADDR(gva) >> PAGE_SHIFT);
-
-       if (gfn >= kvm->arch.guest_pmap_npages) {
-               kvm_err("%s: Invalid gfn: %#llx, GVA: %#lx\n", __func__, gfn,
-                       gva);
-               return KVM_INVALID_PAGE;
-       }
-
-       if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
-               return KVM_INVALID_ADDR;
-
-       return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset;
-}
-EXPORT_SYMBOL(kvm_mips_translate_guest_kseg0_to_hpa);
-
-/* XXXKYMA: Must be called with interrupts disabled */
-/* set flush_dcache_mask == 0 if no dcache flush required */
-int kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi,
-                           unsigned long entrylo0, unsigned long entrylo1,
-                           int flush_dcache_mask)
-{
-       unsigned long flags;
-       unsigned long old_entryhi;
-       int idx;
-
-       local_irq_save(flags);
-
-       old_entryhi = read_c0_entryhi();
-       write_c0_entryhi(entryhi);
-       mtc0_tlbw_hazard();
-
-       tlb_probe();
-       tlb_probe_hazard();
-       idx = read_c0_index();
-
-       if (idx > current_cpu_data.tlbsize) {
-               kvm_err("%s: Invalid Index: %d\n", __func__, idx);
-               kvm_mips_dump_host_tlbs();
-               return -1;
-       }
-
-       write_c0_entrylo0(entrylo0);
-       write_c0_entrylo1(entrylo1);
-       mtc0_tlbw_hazard();
-
-       if (idx < 0)
-               tlb_write_random();
-       else
-               tlb_write_indexed();
-       tlbw_use_hazard();
-
-       kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0(R): 0x%08lx, entrylo1(R): 0x%08lx\n",
-                 vcpu->arch.pc, idx, read_c0_entryhi(),
-                 read_c0_entrylo0(), read_c0_entrylo1());
-
-       /* Flush D-cache */
-       if (flush_dcache_mask) {
-               if (entrylo0 & MIPS3_PG_V) {
-                       ++vcpu->stat.flush_dcache_exits;
-                       flush_data_cache_page((entryhi & VPN2_MASK) &
-                                             ~flush_dcache_mask);
-               }
-               if (entrylo1 & MIPS3_PG_V) {
-                       ++vcpu->stat.flush_dcache_exits;
-                       flush_data_cache_page(((entryhi & VPN2_MASK) &
-                                              ~flush_dcache_mask) |
-                                             (0x1 << PAGE_SHIFT));
-               }
-       }
-
-       /* Restore old ASID */
-       write_c0_entryhi(old_entryhi);
-       mtc0_tlbw_hazard();
-       tlbw_use_hazard();
-       local_irq_restore(flags);
-       return 0;
-}
-
-/* XXXKYMA: Must be called with interrupts disabled */
-int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
-                                   struct kvm_vcpu *vcpu)
-{
-       gfn_t gfn;
-       pfn_t pfn0, pfn1;
-       unsigned long vaddr = 0;
-       unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
-       int even;
-       struct kvm *kvm = vcpu->kvm;
-       const int flush_dcache_mask = 0;
-
-       if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) {
-               kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr);
-               kvm_mips_dump_host_tlbs();
-               return -1;
-       }
-
-       gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT);
-       if (gfn >= kvm->arch.guest_pmap_npages) {
-               kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__,
-                       gfn, badvaddr);
-               kvm_mips_dump_host_tlbs();
-               return -1;
-       }
-       even = !(gfn & 0x1);
-       vaddr = badvaddr & (PAGE_MASK << 1);
-
-       if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
-               return -1;
-
-       if (kvm_mips_map_page(vcpu->kvm, gfn ^ 0x1) < 0)
-               return -1;
-
-       if (even) {
-               pfn0 = kvm->arch.guest_pmap[gfn];
-               pfn1 = kvm->arch.guest_pmap[gfn ^ 0x1];
-       } else {
-               pfn0 = kvm->arch.guest_pmap[gfn ^ 0x1];
-               pfn1 = kvm->arch.guest_pmap[gfn];
-       }
-
-       entryhi = (vaddr | kvm_mips_get_kernel_asid(vcpu));
-       entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
-                  (1 << 2) | (0x1 << 1);
-       entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
-                  (1 << 2) | (0x1 << 1);
-
-       return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
-                                      flush_dcache_mask);
-}
-EXPORT_SYMBOL(kvm_mips_handle_kseg0_tlb_fault);
-
-int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
-       struct kvm_vcpu *vcpu)
-{
-       pfn_t pfn0, pfn1;
-       unsigned long flags, old_entryhi = 0, vaddr = 0;
-       unsigned long entrylo0 = 0, entrylo1 = 0;
-
-       pfn0 = CPHYSADDR(vcpu->arch.kseg0_commpage) >> PAGE_SHIFT;
-       pfn1 = 0;
-       entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
-                  (1 << 2) | (0x1 << 1);
-       entrylo1 = 0;
-
-       local_irq_save(flags);
-
-       old_entryhi = read_c0_entryhi();
-       vaddr = badvaddr & (PAGE_MASK << 1);
-       write_c0_entryhi(vaddr | kvm_mips_get_kernel_asid(vcpu));
-       mtc0_tlbw_hazard();
-       write_c0_entrylo0(entrylo0);
-       mtc0_tlbw_hazard();
-       write_c0_entrylo1(entrylo1);
-       mtc0_tlbw_hazard();
-       write_c0_index(kvm_mips_get_commpage_asid(vcpu));
-       mtc0_tlbw_hazard();
-       tlb_write_indexed();
-       mtc0_tlbw_hazard();
-       tlbw_use_hazard();
-
-       kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0 (R): 0x%08lx, entrylo1(R): 0x%08lx\n",
-                 vcpu->arch.pc, read_c0_index(), read_c0_entryhi(),
-                 read_c0_entrylo0(), read_c0_entrylo1());
-
-       /* Restore old ASID */
-       write_c0_entryhi(old_entryhi);
-       mtc0_tlbw_hazard();
-       tlbw_use_hazard();
-       local_irq_restore(flags);
-
-       return 0;
-}
-EXPORT_SYMBOL(kvm_mips_handle_commpage_tlb_fault);
-
-int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
-                                        struct kvm_mips_tlb *tlb,
-                                        unsigned long *hpa0,
-                                        unsigned long *hpa1)
-{
-       unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
-       struct kvm *kvm = vcpu->kvm;
-       pfn_t pfn0, pfn1;
-
-       if ((tlb->tlb_hi & VPN2_MASK) == 0) {
-               pfn0 = 0;
-               pfn1 = 0;
-       } else {
-               if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0)
-                                          >> PAGE_SHIFT) < 0)
-                       return -1;
-
-               if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1)
-                                          >> PAGE_SHIFT) < 0)
-                       return -1;
-
-               pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0)
-                                           >> PAGE_SHIFT];
-               pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1)
-                                           >> PAGE_SHIFT];
-       }
-
-       if (hpa0)
-               *hpa0 = pfn0 << PAGE_SHIFT;
-
-       if (hpa1)
-               *hpa1 = pfn1 << PAGE_SHIFT;
-
-       /* Get attributes from the Guest TLB */
-       entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ?
-                                              kvm_mips_get_kernel_asid(vcpu) :
-                                              kvm_mips_get_user_asid(vcpu));
-       entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
-                  (tlb->tlb_lo0 & MIPS3_PG_D) | (tlb->tlb_lo0 & MIPS3_PG_V);
-       entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
-                  (tlb->tlb_lo1 & MIPS3_PG_D) | (tlb->tlb_lo1 & MIPS3_PG_V);
-
-       kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
-                 tlb->tlb_lo0, tlb->tlb_lo1);
-
-       return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
-                                      tlb->tlb_mask);
-}
-EXPORT_SYMBOL(kvm_mips_handle_mapped_seg_tlb_fault);
-
-int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
-{
-       int i;
-       int index = -1;
-       struct kvm_mips_tlb *tlb = vcpu->arch.guest_tlb;
-
-       for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
-               if (TLB_HI_VPN2_HIT(tlb[i], entryhi) &&
-                   TLB_HI_ASID_HIT(tlb[i], entryhi)) {
-                       index = i;
-                       break;
-               }
-       }
-
-       kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n",
-                 __func__, entryhi, index, tlb[i].tlb_lo0, tlb[i].tlb_lo1);
-
-       return index;
-}
-EXPORT_SYMBOL(kvm_mips_guest_tlb_lookup);
-
-int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr)
-{
-       unsigned long old_entryhi, flags;
-       int idx;
-
-       local_irq_save(flags);
-
-       old_entryhi = read_c0_entryhi();
-
-       if (KVM_GUEST_KERNEL_MODE(vcpu))
-               write_c0_entryhi((vaddr & VPN2_MASK) |
-                                kvm_mips_get_kernel_asid(vcpu));
-       else {
-               write_c0_entryhi((vaddr & VPN2_MASK) |
-                                kvm_mips_get_user_asid(vcpu));
-       }
-
-       mtc0_tlbw_hazard();
-
-       tlb_probe();
-       tlb_probe_hazard();
-       idx = read_c0_index();
-
-       /* Restore old ASID */
-       write_c0_entryhi(old_entryhi);
-       mtc0_tlbw_hazard();
-       tlbw_use_hazard();
-
-       local_irq_restore(flags);
-
-       kvm_debug("Host TLB lookup, %#lx, idx: %2d\n", vaddr, idx);
-
-       return idx;
-}
-EXPORT_SYMBOL(kvm_mips_host_tlb_lookup);
-
-int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va)
-{
-       int idx;
-       unsigned long flags, old_entryhi;
-
-       local_irq_save(flags);
-
-       old_entryhi = read_c0_entryhi();
-
-       write_c0_entryhi((va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu));
-       mtc0_tlbw_hazard();
-
-       tlb_probe();
-       tlb_probe_hazard();
-       idx = read_c0_index();
-
-       if (idx >= current_cpu_data.tlbsize)
-               BUG();
-
-       if (idx > 0) {
-               write_c0_entryhi(UNIQUE_ENTRYHI(idx));
-               mtc0_tlbw_hazard();
-
-               write_c0_entrylo0(0);
-               mtc0_tlbw_hazard();
-
-               write_c0_entrylo1(0);
-               mtc0_tlbw_hazard();
-
-               tlb_write_indexed();
-               mtc0_tlbw_hazard();
-       }
-
-       write_c0_entryhi(old_entryhi);
-       mtc0_tlbw_hazard();
-       tlbw_use_hazard();
-
-       local_irq_restore(flags);
-
-       if (idx > 0)
-               kvm_debug("%s: Invalidated entryhi %#lx @ idx %d\n", __func__,
-                         (va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu), idx);
-
-       return 0;
-}
-EXPORT_SYMBOL(kvm_mips_host_tlb_inv);
-
-/* XXXKYMA: Fix Guest USER/KERNEL no longer share the same ASID */
-int kvm_mips_host_tlb_inv_index(struct kvm_vcpu *vcpu, int index)
-{
-       unsigned long flags, old_entryhi;
-
-       if (index >= current_cpu_data.tlbsize)
-               BUG();
-
-       local_irq_save(flags);
-
-       old_entryhi = read_c0_entryhi();
-
-       write_c0_entryhi(UNIQUE_ENTRYHI(index));
-       mtc0_tlbw_hazard();
-
-       write_c0_index(index);
-       mtc0_tlbw_hazard();
-
-       write_c0_entrylo0(0);
-       mtc0_tlbw_hazard();
-
-       write_c0_entrylo1(0);
-       mtc0_tlbw_hazard();
-
-       tlb_write_indexed();
-       mtc0_tlbw_hazard();
-       tlbw_use_hazard();
-
-       write_c0_entryhi(old_entryhi);
-       mtc0_tlbw_hazard();
-       tlbw_use_hazard();
-
-       local_irq_restore(flags);
-
-       return 0;
-}
-
-void kvm_mips_flush_host_tlb(int skip_kseg0)
-{
-       unsigned long flags;
-       unsigned long old_entryhi, entryhi;
-       unsigned long old_pagemask;
-       int entry = 0;
-       int maxentry = current_cpu_data.tlbsize;
-
-       local_irq_save(flags);
-
-       old_entryhi = read_c0_entryhi();
-       old_pagemask = read_c0_pagemask();
-
-       /* Blast 'em all away. */
-       for (entry = 0; entry < maxentry; entry++) {
-               write_c0_index(entry);
-               mtc0_tlbw_hazard();
-
-               if (skip_kseg0) {
-                       tlb_read();
-                       tlbw_use_hazard();
-
-                       entryhi = read_c0_entryhi();
-
-                       /* Don't blow away guest kernel entries */
-                       if (KVM_GUEST_KSEGX(entryhi) == KVM_GUEST_KSEG0)
-                               continue;
-               }
-
-               /* Make sure all entries differ. */
-               write_c0_entryhi(UNIQUE_ENTRYHI(entry));
-               mtc0_tlbw_hazard();
-               write_c0_entrylo0(0);
-               mtc0_tlbw_hazard();
-               write_c0_entrylo1(0);
-               mtc0_tlbw_hazard();
-
-               tlb_write_indexed();
-               mtc0_tlbw_hazard();
-       }
-
-       tlbw_use_hazard();
-
-       write_c0_entryhi(old_entryhi);
-       write_c0_pagemask(old_pagemask);
-       mtc0_tlbw_hazard();
-       tlbw_use_hazard();
-
-       local_irq_restore(flags);
-}
-EXPORT_SYMBOL(kvm_mips_flush_host_tlb);
-
-void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
-                            struct kvm_vcpu *vcpu)
-{
-       unsigned long asid = asid_cache(cpu);
-
-       asid += ASID_INC;
-       if (!(asid & ASID_MASK)) {
-               if (cpu_has_vtag_icache)
-                       flush_icache_all();
-
-               kvm_local_flush_tlb_all();      /* start new asid cycle */
-
-               if (!asid)      /* fix version if needed */
-                       asid = ASID_FIRST_VERSION;
-       }
-
-       cpu_context(cpu, mm) = asid_cache(cpu) = asid;
-}
-
-void kvm_local_flush_tlb_all(void)
-{
-       unsigned long flags;
-       unsigned long old_ctx;
-       int entry = 0;
-
-       local_irq_save(flags);
-       /* Save old context and create impossible VPN2 value */
-       old_ctx = read_c0_entryhi();
-       write_c0_entrylo0(0);
-       write_c0_entrylo1(0);
-
-       /* Blast 'em all away. */
-       while (entry < current_cpu_data.tlbsize) {
-               /* Make sure all entries differ. */
-               write_c0_entryhi(UNIQUE_ENTRYHI(entry));
-               write_c0_index(entry);
-               mtc0_tlbw_hazard();
-               tlb_write_indexed();
-               entry++;
-       }
-       tlbw_use_hazard();
-       write_c0_entryhi(old_ctx);
-       mtc0_tlbw_hazard();
-
-       local_irq_restore(flags);
-}
-EXPORT_SYMBOL(kvm_local_flush_tlb_all);
-
-/**
- * kvm_mips_migrate_count() - Migrate timer.
- * @vcpu:      Virtual CPU.
- *
- * Migrate CP0_Count hrtimer to the current CPU by cancelling and restarting it
- * if it was running prior to being cancelled.
- *
- * Must be called when the VCPU is migrated to a different CPU to ensure that
- * timer expiry during guest execution interrupts the guest and causes the
- * interrupt to be delivered in a timely manner.
- */
-static void kvm_mips_migrate_count(struct kvm_vcpu *vcpu)
-{
-       if (hrtimer_cancel(&vcpu->arch.comparecount_timer))
-               hrtimer_restart(&vcpu->arch.comparecount_timer);
-}
-
-/* Restore ASID once we are scheduled back after preemption */
-void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
-{
-       unsigned long flags;
-       int newasid = 0;
-
-       kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu);
-
-       /* Alocate new kernel and user ASIDs if needed */
-
-       local_irq_save(flags);
-
-       if (((vcpu->arch.
-             guest_kernel_asid[cpu] ^ asid_cache(cpu)) & ASID_VERSION_MASK)) {
-               kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu);
-               vcpu->arch.guest_kernel_asid[cpu] =
-                   vcpu->arch.guest_kernel_mm.context.asid[cpu];
-               kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu);
-               vcpu->arch.guest_user_asid[cpu] =
-                   vcpu->arch.guest_user_mm.context.asid[cpu];
-               newasid++;
-
-               kvm_debug("[%d]: cpu_context: %#lx\n", cpu,
-                         cpu_context(cpu, current->mm));
-               kvm_debug("[%d]: Allocated new ASID for Guest Kernel: %#x\n",
-                         cpu, vcpu->arch.guest_kernel_asid[cpu]);
-               kvm_debug("[%d]: Allocated new ASID for Guest User: %#x\n", cpu,
-                         vcpu->arch.guest_user_asid[cpu]);
-       }
-
-       if (vcpu->arch.last_sched_cpu != cpu) {
-               kvm_debug("[%d->%d]KVM VCPU[%d] switch\n",
-                         vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
-               /*
-                * Migrate the timer interrupt to the current CPU so that it
-                * always interrupts the guest and synchronously triggers a
-                * guest timer interrupt.
-                */
-               kvm_mips_migrate_count(vcpu);
-       }
-
-       if (!newasid) {
-               /*
-                * If we preempted while the guest was executing, then reload
-                * the pre-empted ASID
-                */
-               if (current->flags & PF_VCPU) {
-                       write_c0_entryhi(vcpu->arch.
-                                        preempt_entryhi & ASID_MASK);
-                       ehb();
-               }
-       } else {
-               /* New ASIDs were allocated for the VM */
-
-               /*
-                * Were we in guest context? If so then the pre-empted ASID is
-                * no longer valid, we need to set it to what it should be based
-                * on the mode of the Guest (Kernel/User)
-                */
-               if (current->flags & PF_VCPU) {
-                       if (KVM_GUEST_KERNEL_MODE(vcpu))
-                               write_c0_entryhi(vcpu->arch.
-                                                guest_kernel_asid[cpu] &
-                                                ASID_MASK);
-                       else
-                               write_c0_entryhi(vcpu->arch.
-                                                guest_user_asid[cpu] &
-                                                ASID_MASK);
-                       ehb();
-               }
-       }
-
-       local_irq_restore(flags);
-
-}
-EXPORT_SYMBOL(kvm_arch_vcpu_load);
-
-/* ASID can change if another task is scheduled during preemption */
-void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
-{
-       unsigned long flags;
-       uint32_t cpu;
-
-       local_irq_save(flags);
-
-       cpu = smp_processor_id();
-
-       vcpu->arch.preempt_entryhi = read_c0_entryhi();
-       vcpu->arch.last_sched_cpu = cpu;
-
-       if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
-            ASID_VERSION_MASK)) {
-               kvm_debug("%s: Dropping MMU Context:  %#lx\n", __func__,
-                         cpu_context(cpu, current->mm));
-               drop_mmu_context(current->mm, cpu);
-       }
-       write_c0_entryhi(cpu_asid(cpu, current->mm));
-       ehb();
-
-       local_irq_restore(flags);
-}
-EXPORT_SYMBOL(kvm_arch_vcpu_put);
-
-uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu)
-{
-       struct mips_coproc *cop0 = vcpu->arch.cop0;
-       unsigned long paddr, flags, vpn2, asid;
-       uint32_t inst;
-       int index;
-
-       if (KVM_GUEST_KSEGX((unsigned long) opc) < KVM_GUEST_KSEG0 ||
-           KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
-               local_irq_save(flags);
-               index = kvm_mips_host_tlb_lookup(vcpu, (unsigned long) opc);
-               if (index >= 0) {
-                       inst = *(opc);
-               } else {
-                       vpn2 = (unsigned long) opc & VPN2_MASK;
-                       asid = kvm_read_c0_guest_entryhi(cop0) & ASID_MASK;
-                       index = kvm_mips_guest_tlb_lookup(vcpu, vpn2 | asid);
-                       if (index < 0) {
-                               kvm_err("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n",
-                                       __func__, opc, vcpu, read_c0_entryhi());
-                               kvm_mips_dump_host_tlbs();
-                               local_irq_restore(flags);
-                               return KVM_INVALID_INST;
-                       }
-                       kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
-                                                            &vcpu->arch.
-                                                            guest_tlb[index],
-                                                            NULL, NULL);
-                       inst = *(opc);
-               }
-               local_irq_restore(flags);
-       } else if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
-               paddr =
-                   kvm_mips_translate_guest_kseg0_to_hpa(vcpu,
-                                                         (unsigned long) opc);
-               inst = *(uint32_t *) CKSEG0ADDR(paddr);
-       } else {
-               kvm_err("%s: illegal address: %p\n", __func__, opc);
-               return KVM_INVALID_INST;
-       }
-
-       return inst;
-}
-EXPORT_SYMBOL(kvm_get_inst);
diff --git a/arch/mips/kvm/kvm_trap_emul.c b/arch/mips/kvm/kvm_trap_emul.c
deleted file mode 100644 (file)
index bd2f6bc..0000000
+++ /dev/null
@@ -1,492 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * KVM/MIPS: Deliver/Emulate exceptions to the guest kernel
- *
- * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
- * Authors: Sanjay Lal <sanjayl@kymasys.com>
- */
-
-#include <linux/errno.h>
-#include <linux/err.h>
-#include <linux/module.h>
-#include <linux/vmalloc.h>
-
-#include <linux/kvm_host.h>
-
-#include "kvm_mips_opcode.h"
-#include "kvm_mips_int.h"
-
-static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva)
-{
-       gpa_t gpa;
-       uint32_t kseg = KSEGX(gva);
-
-       if ((kseg == CKSEG0) || (kseg == CKSEG1))
-               gpa = CPHYSADDR(gva);
-       else {
-               kvm_err("%s: cannot find GPA for GVA: %#lx\n", __func__, gva);
-               kvm_mips_dump_host_tlbs();
-               gpa = KVM_INVALID_ADDR;
-       }
-
-       kvm_debug("%s: gva %#lx, gpa: %#llx\n", __func__, gva, gpa);
-
-       return gpa;
-}
-
-static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
-{
-       struct kvm_run *run = vcpu->run;
-       uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
-       unsigned long cause = vcpu->arch.host_cp0_cause;
-       enum emulation_result er = EMULATE_DONE;
-       int ret = RESUME_GUEST;
-
-       if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1)
-               er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu);
-       else
-               er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
-
-       switch (er) {
-       case EMULATE_DONE:
-               ret = RESUME_GUEST;
-               break;
-
-       case EMULATE_FAIL:
-               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
-               ret = RESUME_HOST;
-               break;
-
-       case EMULATE_WAIT:
-               run->exit_reason = KVM_EXIT_INTR;
-               ret = RESUME_HOST;
-               break;
-
-       default:
-               BUG();
-       }
-       return ret;
-}
-
-static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu)
-{
-       struct kvm_run *run = vcpu->run;
-       uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
-       unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
-       unsigned long cause = vcpu->arch.host_cp0_cause;
-       enum emulation_result er = EMULATE_DONE;
-       int ret = RESUME_GUEST;
-
-       if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
-           || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
-               kvm_debug("USER/KSEG23 ADDR TLB MOD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n",
-                         cause, opc, badvaddr);
-               er = kvm_mips_handle_tlbmod(cause, opc, run, vcpu);
-
-               if (er == EMULATE_DONE)
-                       ret = RESUME_GUEST;
-               else {
-                       run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
-                       ret = RESUME_HOST;
-               }
-       } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
-               /*
-                * XXXKYMA: The guest kernel does not expect to get this fault
-                * when we are not using HIGHMEM. Need to address this in a
-                * HIGHMEM kernel
-                */
-               kvm_err("TLB MOD fault not handled, cause %#lx, PC: %p, BadVaddr: %#lx\n",
-                       cause, opc, badvaddr);
-               kvm_mips_dump_host_tlbs();
-               kvm_arch_vcpu_dump_regs(vcpu);
-               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
-               ret = RESUME_HOST;
-       } else {
-               kvm_err("Illegal TLB Mod fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
-                       cause, opc, badvaddr);
-               kvm_mips_dump_host_tlbs();
-               kvm_arch_vcpu_dump_regs(vcpu);
-               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
-               ret = RESUME_HOST;
-       }
-       return ret;
-}
-
-static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
-{
-       struct kvm_run *run = vcpu->run;
-       uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
-       unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
-       unsigned long cause = vcpu->arch.host_cp0_cause;
-       enum emulation_result er = EMULATE_DONE;
-       int ret = RESUME_GUEST;
-
-       if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR)
-           && KVM_GUEST_KERNEL_MODE(vcpu)) {
-               if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) {
-                       run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
-                       ret = RESUME_HOST;
-               }
-       } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
-                  || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
-               kvm_debug("USER ADDR TLB LD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n",
-                         cause, opc, badvaddr);
-               er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu);
-               if (er == EMULATE_DONE)
-                       ret = RESUME_GUEST;
-               else {
-                       run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
-                       ret = RESUME_HOST;
-               }
-       } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
-               /*
-                * All KSEG0 faults are handled by KVM, as the guest kernel does
-                * not expect to ever get them
-                */
-               if (kvm_mips_handle_kseg0_tlb_fault
-                   (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) {
-                       run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
-                       ret = RESUME_HOST;
-               }
-       } else {
-               kvm_err("Illegal TLB LD fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
-                       cause, opc, badvaddr);
-               kvm_mips_dump_host_tlbs();
-               kvm_arch_vcpu_dump_regs(vcpu);
-               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
-               ret = RESUME_HOST;
-       }
-       return ret;
-}
-
-static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
-{
-       struct kvm_run *run = vcpu->run;
-       uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
-       unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
-       unsigned long cause = vcpu->arch.host_cp0_cause;
-       enum emulation_result er = EMULATE_DONE;
-       int ret = RESUME_GUEST;
-
-       if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR)
-           && KVM_GUEST_KERNEL_MODE(vcpu)) {
-               if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) {
-                       run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
-                       ret = RESUME_HOST;
-               }
-       } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
-                  || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
-               kvm_debug("USER ADDR TLB ST fault: PC: %#lx, BadVaddr: %#lx\n",
-                         vcpu->arch.pc, badvaddr);
-
-               /*
-                * User Address (UA) fault, this could happen if
-                * (1) TLB entry not present/valid in both Guest and shadow host
-                *     TLBs, in this case we pass on the fault to the guest
-                *     kernel and let it handle it.
-                * (2) TLB entry is present in the Guest TLB but not in the
-                *     shadow, in this case we inject the TLB from the Guest TLB
-                *     into the shadow host TLB
-                */
-
-               er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu);
-               if (er == EMULATE_DONE)
-                       ret = RESUME_GUEST;
-               else {
-                       run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
-                       ret = RESUME_HOST;
-               }
-       } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
-               if (kvm_mips_handle_kseg0_tlb_fault
-                   (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) {
-                       run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
-                       ret = RESUME_HOST;
-               }
-       } else {
-               kvm_err("Illegal TLB ST fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
-                       cause, opc, badvaddr);
-               kvm_mips_dump_host_tlbs();
-               kvm_arch_vcpu_dump_regs(vcpu);
-               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
-               ret = RESUME_HOST;
-       }
-       return ret;
-}
-
-static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu)
-{
-       struct kvm_run *run = vcpu->run;
-       uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
-       unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
-       unsigned long cause = vcpu->arch.host_cp0_cause;
-       enum emulation_result er = EMULATE_DONE;
-       int ret = RESUME_GUEST;
-
-       if (KVM_GUEST_KERNEL_MODE(vcpu)
-           && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) {
-               kvm_debug("Emulate Store to MMIO space\n");
-               er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
-               if (er == EMULATE_FAIL) {
-                       kvm_err("Emulate Store to MMIO space failed\n");
-                       run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
-                       ret = RESUME_HOST;
-               } else {
-                       run->exit_reason = KVM_EXIT_MMIO;
-                       ret = RESUME_HOST;
-               }
-       } else {
-               kvm_err("Address Error (STORE): cause %#lx, PC: %p, BadVaddr: %#lx\n",
-                       cause, opc, badvaddr);
-               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
-               ret = RESUME_HOST;
-       }
-       return ret;
-}
-
-static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu)
-{
-       struct kvm_run *run = vcpu->run;
-       uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
-       unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
-       unsigned long cause = vcpu->arch.host_cp0_cause;
-       enum emulation_result er = EMULATE_DONE;
-       int ret = RESUME_GUEST;
-
-       if (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1) {
-               kvm_debug("Emulate Load from MMIO space @ %#lx\n", badvaddr);
-               er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
-               if (er == EMULATE_FAIL) {
-                       kvm_err("Emulate Load from MMIO space failed\n");
-                       run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
-                       ret = RESUME_HOST;
-               } else {
-                       run->exit_reason = KVM_EXIT_MMIO;
-                       ret = RESUME_HOST;
-               }
-       } else {
-               kvm_err("Address Error (LOAD): cause %#lx, PC: %p, BadVaddr: %#lx\n",
-                       cause, opc, badvaddr);
-               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
-               ret = RESUME_HOST;
-               er = EMULATE_FAIL;
-       }
-       return ret;
-}
-
-static int kvm_trap_emul_handle_syscall(struct kvm_vcpu *vcpu)
-{
-       struct kvm_run *run = vcpu->run;
-       uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
-       unsigned long cause = vcpu->arch.host_cp0_cause;
-       enum emulation_result er = EMULATE_DONE;
-       int ret = RESUME_GUEST;
-
-       er = kvm_mips_emulate_syscall(cause, opc, run, vcpu);
-       if (er == EMULATE_DONE)
-               ret = RESUME_GUEST;
-       else {
-               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
-               ret = RESUME_HOST;
-       }
-       return ret;
-}
-
-static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu *vcpu)
-{
-       struct kvm_run *run = vcpu->run;
-       uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
-       unsigned long cause = vcpu->arch.host_cp0_cause;
-       enum emulation_result er = EMULATE_DONE;
-       int ret = RESUME_GUEST;
-
-       er = kvm_mips_handle_ri(cause, opc, run, vcpu);
-       if (er == EMULATE_DONE)
-               ret = RESUME_GUEST;
-       else {
-               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
-               ret = RESUME_HOST;
-       }
-       return ret;
-}
-
-static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu)
-{
-       struct kvm_run *run = vcpu->run;
-       uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
-       unsigned long cause = vcpu->arch.host_cp0_cause;
-       enum emulation_result er = EMULATE_DONE;
-       int ret = RESUME_GUEST;
-
-       er = kvm_mips_emulate_bp_exc(cause, opc, run, vcpu);
-       if (er == EMULATE_DONE)
-               ret = RESUME_GUEST;
-       else {
-               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
-               ret = RESUME_HOST;
-       }
-       return ret;
-}
-
-static int kvm_trap_emul_vm_init(struct kvm *kvm)
-{
-       return 0;
-}
-
-static int kvm_trap_emul_vcpu_init(struct kvm_vcpu *vcpu)
-{
-       return 0;
-}
-
-static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
-{
-       struct mips_coproc *cop0 = vcpu->arch.cop0;
-       uint32_t config1;
-       int vcpu_id = vcpu->vcpu_id;
-
-       /*
-        * Arch specific stuff, set up config registers properly so that the
-        * guest will come up as expected, for now we simulate a MIPS 24kc
-        */
-       kvm_write_c0_guest_prid(cop0, 0x00019300);
-       kvm_write_c0_guest_config(cop0,
-                                 MIPS_CONFIG0 | (0x1 << CP0C0_AR) |
-                                 (MMU_TYPE_R4000 << CP0C0_MT));
-
-       /* Read the cache characteristics from the host Config1 Register */
-       config1 = (read_c0_config1() & ~0x7f);
-
-       /* Set up MMU size */
-       config1 &= ~(0x3f << 25);
-       config1 |= ((KVM_MIPS_GUEST_TLB_SIZE - 1) << 25);
-
-       /* We unset some bits that we aren't emulating */
-       config1 &=
-           ~((1 << CP0C1_C2) | (1 << CP0C1_MD) | (1 << CP0C1_PC) |
-             (1 << CP0C1_WR) | (1 << CP0C1_CA));
-       kvm_write_c0_guest_config1(cop0, config1);
-
-       kvm_write_c0_guest_config2(cop0, MIPS_CONFIG2);
-       /* MIPS_CONFIG2 | (read_c0_config2() & 0xfff) */
-       kvm_write_c0_guest_config3(cop0, MIPS_CONFIG3 | (0 << CP0C3_VInt) |
-                                        (1 << CP0C3_ULRI));
-
-       /* Set Wait IE/IXMT Ignore in Config7, IAR, AR */
-       kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10));
-
-       /*
-        * Setup IntCtl defaults, compatibilty mode for timer interrupts (HW5)
-        */
-       kvm_write_c0_guest_intctl(cop0, 0xFC000000);
-
-       /* Put in vcpu id as CPUNum into Ebase Reg to handle SMP Guests */
-       kvm_write_c0_guest_ebase(cop0, KVM_GUEST_KSEG0 | (vcpu_id & 0xFF));
-
-       return 0;
-}
-
-static int kvm_trap_emul_get_one_reg(struct kvm_vcpu *vcpu,
-                                    const struct kvm_one_reg *reg,
-                                    s64 *v)
-{
-       switch (reg->id) {
-       case KVM_REG_MIPS_CP0_COUNT:
-               *v = kvm_mips_read_count(vcpu);
-               break;
-       case KVM_REG_MIPS_COUNT_CTL:
-               *v = vcpu->arch.count_ctl;
-               break;
-       case KVM_REG_MIPS_COUNT_RESUME:
-               *v = ktime_to_ns(vcpu->arch.count_resume);
-               break;
-       case KVM_REG_MIPS_COUNT_HZ:
-               *v = vcpu->arch.count_hz;
-               break;
-       default:
-               return -EINVAL;
-       }
-       return 0;
-}
-
-static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu,
-                                    const struct kvm_one_reg *reg,
-                                    s64 v)
-{
-       struct mips_coproc *cop0 = vcpu->arch.cop0;
-       int ret = 0;
-
-       switch (reg->id) {
-       case KVM_REG_MIPS_CP0_COUNT:
-               kvm_mips_write_count(vcpu, v);
-               break;
-       case KVM_REG_MIPS_CP0_COMPARE:
-               kvm_mips_write_compare(vcpu, v);
-               break;
-       case KVM_REG_MIPS_CP0_CAUSE:
-               /*
-                * If the timer is stopped or started (DC bit) it must look
-                * atomic with changes to the interrupt pending bits (TI, IRQ5).
-                * A timer interrupt should not happen in between.
-                */
-               if ((kvm_read_c0_guest_cause(cop0) ^ v) & CAUSEF_DC) {
-                       if (v & CAUSEF_DC) {
-                               /* disable timer first */
-                               kvm_mips_count_disable_cause(vcpu);
-                               kvm_change_c0_guest_cause(cop0, ~CAUSEF_DC, v);
-                       } else {
-                               /* enable timer last */
-                               kvm_change_c0_guest_cause(cop0, ~CAUSEF_DC, v);
-                               kvm_mips_count_enable_cause(vcpu);
-                       }
-               } else {
-                       kvm_write_c0_guest_cause(cop0, v);
-               }
-               break;
-       case KVM_REG_MIPS_COUNT_CTL:
-               ret = kvm_mips_set_count_ctl(vcpu, v);
-               break;
-       case KVM_REG_MIPS_COUNT_RESUME:
-               ret = kvm_mips_set_count_resume(vcpu, v);
-               break;
-       case KVM_REG_MIPS_COUNT_HZ:
-               ret = kvm_mips_set_count_hz(vcpu, v);
-               break;
-       default:
-               return -EINVAL;
-       }
-       return ret;
-}
-
-static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
-       /* exit handlers */
-       .handle_cop_unusable = kvm_trap_emul_handle_cop_unusable,
-       .handle_tlb_mod = kvm_trap_emul_handle_tlb_mod,
-       .handle_tlb_st_miss = kvm_trap_emul_handle_tlb_st_miss,
-       .handle_tlb_ld_miss = kvm_trap_emul_handle_tlb_ld_miss,
-       .handle_addr_err_st = kvm_trap_emul_handle_addr_err_st,
-       .handle_addr_err_ld = kvm_trap_emul_handle_addr_err_ld,
-       .handle_syscall = kvm_trap_emul_handle_syscall,
-       .handle_res_inst = kvm_trap_emul_handle_res_inst,
-       .handle_break = kvm_trap_emul_handle_break,
-
-       .vm_init = kvm_trap_emul_vm_init,
-       .vcpu_init = kvm_trap_emul_vcpu_init,
-       .vcpu_setup = kvm_trap_emul_vcpu_setup,
-       .gva_to_gpa = kvm_trap_emul_gva_to_gpa_cb,
-       .queue_timer_int = kvm_mips_queue_timer_int_cb,
-       .dequeue_timer_int = kvm_mips_dequeue_timer_int_cb,
-       .queue_io_int = kvm_mips_queue_io_int_cb,
-       .dequeue_io_int = kvm_mips_dequeue_io_int_cb,
-       .irq_deliver = kvm_mips_irq_deliver_cb,
-       .irq_clear = kvm_mips_irq_clear_cb,
-       .get_one_reg = kvm_trap_emul_get_one_reg,
-       .set_one_reg = kvm_trap_emul_set_one_reg,
-};
-
-int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks)
-{
-       *install_callbacks = &kvm_trap_emul_callbacks;
-       return 0;
-}
diff --git a/arch/mips/kvm/locore.S b/arch/mips/kvm/locore.S
new file mode 100644 (file)
index 0000000..d7279c0
--- /dev/null
@@ -0,0 +1,620 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Main entry point for the guest, exception handling.
+ *
+ * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+ */
+
+#include <asm/asm.h>
+#include <asm/asmmacro.h>
+#include <asm/regdef.h>
+#include <asm/mipsregs.h>
+#include <asm/stackframe.h>
+#include <asm/asm-offsets.h>
+
+#define _C_LABEL(x)     x
+#define MIPSX(name)     mips32_ ## name
+#define CALLFRAME_SIZ   32
+
+/*
+ * VECTOR
+ *  exception vector entrypoint
+ */
+#define VECTOR(x, regmask)      \
+    .ent    _C_LABEL(x),0;      \
+    EXPORT(x);
+
+#define VECTOR_END(x)      \
+    EXPORT(x);
+
+/* Overload, Danger Will Robinson!! */
+#define PT_HOST_ASID        PT_BVADDR
+#define PT_HOST_USERLOCAL   PT_EPC
+
+#define CP0_DDATA_LO        $28,3
+#define CP0_EBASE           $15,1
+
+#define CP0_INTCTL          $12,1
+#define CP0_SRSCTL          $12,2
+#define CP0_SRSMAP          $12,3
+#define CP0_HWRENA          $7,0
+
+/* Resume Flags */
+#define RESUME_FLAG_HOST        (1<<1)  /* Resume host? */
+
+#define RESUME_GUEST            0
+#define RESUME_HOST             RESUME_FLAG_HOST
+
+/*
+ * __kvm_mips_vcpu_run: entry point to the guest
+ * a0: run
+ * a1: vcpu
+ */
+       .set    noreorder
+       .set    noat
+
+FEXPORT(__kvm_mips_vcpu_run)
+       /* k0/k1 not being used in host kernel context */
+       INT_ADDIU k1, sp, -PT_SIZE
+       LONG_S  $0, PT_R0(k1)
+       LONG_S  $1, PT_R1(k1)
+       LONG_S  $2, PT_R2(k1)
+       LONG_S  $3, PT_R3(k1)
+
+       LONG_S  $4, PT_R4(k1)
+       LONG_S  $5, PT_R5(k1)
+       LONG_S  $6, PT_R6(k1)
+       LONG_S  $7, PT_R7(k1)
+
+       LONG_S  $8,  PT_R8(k1)
+       LONG_S  $9,  PT_R9(k1)
+       LONG_S  $10, PT_R10(k1)
+       LONG_S  $11, PT_R11(k1)
+       LONG_S  $12, PT_R12(k1)
+       LONG_S  $13, PT_R13(k1)
+       LONG_S  $14, PT_R14(k1)
+       LONG_S  $15, PT_R15(k1)
+       LONG_S  $16, PT_R16(k1)
+       LONG_S  $17, PT_R17(k1)
+
+       LONG_S  $18, PT_R18(k1)
+       LONG_S  $19, PT_R19(k1)
+       LONG_S  $20, PT_R20(k1)
+       LONG_S  $21, PT_R21(k1)
+       LONG_S  $22, PT_R22(k1)
+       LONG_S  $23, PT_R23(k1)
+       LONG_S  $24, PT_R24(k1)
+       LONG_S  $25, PT_R25(k1)
+
+       /*
+        * XXXKYMA k0/k1 not saved, not being used if we got here through
+        * an ioctl()
+        */
+
+       LONG_S  $28, PT_R28(k1)
+       LONG_S  $29, PT_R29(k1)
+       LONG_S  $30, PT_R30(k1)
+       LONG_S  $31, PT_R31(k1)
+
+       /* Save hi/lo */
+       mflo    v0
+       LONG_S  v0, PT_LO(k1)
+       mfhi    v1
+       LONG_S  v1, PT_HI(k1)
+
+       /* Save host status */
+       mfc0    v0, CP0_STATUS
+       LONG_S  v0, PT_STATUS(k1)
+
+       /* Save host ASID, shove it into the BVADDR location */
+       mfc0    v1, CP0_ENTRYHI
+       andi    v1, 0xff
+       LONG_S  v1, PT_HOST_ASID(k1)
+
+       /* Save DDATA_LO, will be used to store pointer to vcpu */
+       mfc0    v1, CP0_DDATA_LO
+       LONG_S  v1, PT_HOST_USERLOCAL(k1)
+
+       /* DDATA_LO has pointer to vcpu */
+       mtc0    a1, CP0_DDATA_LO
+
+       /* Offset into vcpu->arch */
+       INT_ADDIU k1, a1, VCPU_HOST_ARCH
+
+       /*
+        * Save the host stack to VCPU, used for exception processing
+        * when we exit from the Guest
+        */
+       LONG_S  sp, VCPU_HOST_STACK(k1)
+
+       /* Save the kernel gp as well */
+       LONG_S  gp, VCPU_HOST_GP(k1)
+
+       /*
+        * Setup status register for running the guest in UM, interrupts
+        * are disabled
+        */
+       li      k0, (ST0_EXL | KSU_USER | ST0_BEV)
+       mtc0    k0, CP0_STATUS
+       ehb
+
+       /* load up the new EBASE */
+       LONG_L  k0, VCPU_GUEST_EBASE(k1)
+       mtc0    k0, CP0_EBASE
+
+       /*
+        * Now that the new EBASE has been loaded, unset BEV, set
+        * interrupt mask as it was but make sure that timer interrupts
+        * are enabled
+        */
+       li      k0, (ST0_EXL | KSU_USER | ST0_IE)
+       andi    v0, v0, ST0_IM
+       or      k0, k0, v0
+       mtc0    k0, CP0_STATUS
+       ehb
+
+       /* Set Guest EPC */
+       LONG_L  t0, VCPU_PC(k1)
+       mtc0    t0, CP0_EPC
+
+FEXPORT(__kvm_mips_load_asid)
+       /* Set the ASID for the Guest Kernel */
+       INT_SLL t0, t0, 1       /* with kseg0 @ 0x40000000, kernel */
+                               /* addresses shift to 0x80000000 */
+       bltz    t0, 1f          /* If kernel */
+        INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID  /* (BD)  */
+       INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID    /* else user */
+1:
+       /* t1: contains the base of the ASID array, need to get the cpu id */
+       LONG_L  t2, TI_CPU($28)             /* smp_processor_id */
+       INT_SLL t2, t2, 2                   /* x4 */
+       REG_ADDU t3, t1, t2
+       LONG_L  k0, (t3)
+       andi    k0, k0, 0xff
+       mtc0    k0, CP0_ENTRYHI
+       ehb
+
+       /* Disable RDHWR access */
+       mtc0    zero, CP0_HWRENA
+
+       /* Now load up the Guest Context from VCPU */
+       LONG_L  $1, VCPU_R1(k1)
+       LONG_L  $2, VCPU_R2(k1)
+       LONG_L  $3, VCPU_R3(k1)
+
+       LONG_L  $4, VCPU_R4(k1)
+       LONG_L  $5, VCPU_R5(k1)
+       LONG_L  $6, VCPU_R6(k1)
+       LONG_L  $7, VCPU_R7(k1)
+
+       LONG_L  $8, VCPU_R8(k1)
+       LONG_L  $9, VCPU_R9(k1)
+       LONG_L  $10, VCPU_R10(k1)
+       LONG_L  $11, VCPU_R11(k1)
+       LONG_L  $12, VCPU_R12(k1)
+       LONG_L  $13, VCPU_R13(k1)
+       LONG_L  $14, VCPU_R14(k1)
+       LONG_L  $15, VCPU_R15(k1)
+       LONG_L  $16, VCPU_R16(k1)
+       LONG_L  $17, VCPU_R17(k1)
+       LONG_L  $18, VCPU_R18(k1)
+       LONG_L  $19, VCPU_R19(k1)
+       LONG_L  $20, VCPU_R20(k1)
+       LONG_L  $21, VCPU_R21(k1)
+       LONG_L  $22, VCPU_R22(k1)
+       LONG_L  $23, VCPU_R23(k1)
+       LONG_L  $24, VCPU_R24(k1)
+       LONG_L  $25, VCPU_R25(k1)
+
+       /* k0/k1 loaded up later */
+
+       LONG_L  $28, VCPU_R28(k1)
+       LONG_L  $29, VCPU_R29(k1)
+       LONG_L  $30, VCPU_R30(k1)
+       LONG_L  $31, VCPU_R31(k1)
+
+       /* Restore hi/lo */
+       LONG_L  k0, VCPU_LO(k1)
+       mtlo    k0
+
+       LONG_L  k0, VCPU_HI(k1)
+       mthi    k0
+
+FEXPORT(__kvm_mips_load_k0k1)
+       /* Restore the guest's k0/k1 registers */
+       LONG_L  k0, VCPU_R26(k1)
+       LONG_L  k1, VCPU_R27(k1)
+
+       /* Jump to guest */
+       eret
+
+VECTOR(MIPSX(exception), unknown)
+/* Find out what mode we came from and jump to the proper handler. */
+       mtc0    k0, CP0_ERROREPC        #01: Save guest k0
+       ehb                             #02:
+
+       mfc0    k0, CP0_EBASE           #02: Get EBASE
+       INT_SRL k0, k0, 10              #03: Get rid of CPUNum
+       INT_SLL k0, k0, 10              #04
+       LONG_S  k1, 0x3000(k0)          #05: Save k1 @ offset 0x3000
+       INT_ADDIU k0, k0, 0x2000        #06: Exception handler is
+                                       #    installed @ offset 0x2000
+       j       k0                      #07: jump to the function
+        nop                            #08: branch delay slot
+VECTOR_END(MIPSX(exceptionEnd))
+.end MIPSX(exception)
+
+/*
+ * Generic Guest exception handler. We end up here when the guest
+ * does something that causes a trap to kernel mode.
+ */
+NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra)
+       /* Get the VCPU pointer from DDTATA_LO */
+       mfc0    k1, CP0_DDATA_LO
+       INT_ADDIU k1, k1, VCPU_HOST_ARCH
+
+       /* Start saving Guest context to VCPU */
+       LONG_S  $0, VCPU_R0(k1)
+       LONG_S  $1, VCPU_R1(k1)
+       LONG_S  $2, VCPU_R2(k1)
+       LONG_S  $3, VCPU_R3(k1)
+       LONG_S  $4, VCPU_R4(k1)
+       LONG_S  $5, VCPU_R5(k1)
+       LONG_S  $6, VCPU_R6(k1)
+       LONG_S  $7, VCPU_R7(k1)
+       LONG_S  $8, VCPU_R8(k1)
+       LONG_S  $9, VCPU_R9(k1)
+       LONG_S  $10, VCPU_R10(k1)
+       LONG_S  $11, VCPU_R11(k1)
+       LONG_S  $12, VCPU_R12(k1)
+       LONG_S  $13, VCPU_R13(k1)
+       LONG_S  $14, VCPU_R14(k1)
+       LONG_S  $15, VCPU_R15(k1)
+       LONG_S  $16, VCPU_R16(k1)
+       LONG_S  $17, VCPU_R17(k1)
+       LONG_S  $18, VCPU_R18(k1)
+       LONG_S  $19, VCPU_R19(k1)
+       LONG_S  $20, VCPU_R20(k1)
+       LONG_S  $21, VCPU_R21(k1)
+       LONG_S  $22, VCPU_R22(k1)
+       LONG_S  $23, VCPU_R23(k1)
+       LONG_S  $24, VCPU_R24(k1)
+       LONG_S  $25, VCPU_R25(k1)
+
+       /* Guest k0/k1 saved later */
+
+       LONG_S  $28, VCPU_R28(k1)
+       LONG_S  $29, VCPU_R29(k1)
+       LONG_S  $30, VCPU_R30(k1)
+       LONG_S  $31, VCPU_R31(k1)
+
+       /* We need to save hi/lo and restore them on the way out */
+       mfhi    t0
+       LONG_S  t0, VCPU_HI(k1)
+
+       mflo    t0
+       LONG_S  t0, VCPU_LO(k1)
+
+       /* Finally save guest k0/k1 to VCPU */
+       mfc0    t0, CP0_ERROREPC
+       LONG_S  t0, VCPU_R26(k1)
+
+       /* Get GUEST k1 and save it in VCPU */
+       PTR_LI  t1, ~0x2ff
+       mfc0    t0, CP0_EBASE
+       and     t0, t0, t1
+       LONG_L  t0, 0x3000(t0)
+       LONG_S  t0, VCPU_R27(k1)
+
+       /* Now that context has been saved, we can use other registers */
+
+       /* Restore vcpu */
+       mfc0    a1, CP0_DDATA_LO
+       move    s1, a1
+
+       /* Restore run (vcpu->run) */
+       LONG_L  a0, VCPU_RUN(a1)
+       /* Save pointer to run in s0, will be saved by the compiler */
+       move    s0, a0
+
+       /*
+        * Save Host level EPC, BadVaddr and Cause to VCPU, useful to
+        * process the exception
+        */
+       mfc0    k0,CP0_EPC
+       LONG_S  k0, VCPU_PC(k1)
+
+       mfc0    k0, CP0_BADVADDR
+       LONG_S  k0, VCPU_HOST_CP0_BADVADDR(k1)
+
+       mfc0    k0, CP0_CAUSE
+       LONG_S  k0, VCPU_HOST_CP0_CAUSE(k1)
+
+       mfc0    k0, CP0_ENTRYHI
+       LONG_S  k0, VCPU_HOST_ENTRYHI(k1)
+
+       /* Now restore the host state just enough to run the handlers */
+
+       /* Swtich EBASE to the one used by Linux */
+       /* load up the host EBASE */
+       mfc0    v0, CP0_STATUS
+
+       .set    at
+       or      k0, v0, ST0_BEV
+       .set    noat
+
+       mtc0    k0, CP0_STATUS
+       ehb
+
+       LONG_L  k0, VCPU_HOST_EBASE(k1)
+       mtc0    k0,CP0_EBASE
+
+       /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */
+       .set    at
+       and     v0, v0, ~(ST0_EXL | KSU_USER | ST0_IE)
+       or      v0, v0, ST0_CU0
+       .set    noat
+       mtc0    v0, CP0_STATUS
+       ehb
+
+       /* Load up host GP */
+       LONG_L  gp, VCPU_HOST_GP(k1)
+
+       /* Need a stack before we can jump to "C" */
+       LONG_L  sp, VCPU_HOST_STACK(k1)
+
+       /* Saved host state */
+       INT_ADDIU sp, sp, -PT_SIZE
+
+       /*
+        * XXXKYMA do we need to load the host ASID, maybe not because the
+        * kernel entries are marked GLOBAL, need to verify
+        */
+
+       /* Restore host DDATA_LO */
+       LONG_L  k0, PT_HOST_USERLOCAL(sp)
+       mtc0    k0, CP0_DDATA_LO
+
+       /* Restore RDHWR access */
+       PTR_LI  k0, 0x2000000F
+       mtc0    k0, CP0_HWRENA
+
+       /* Jump to handler */
+FEXPORT(__kvm_mips_jump_to_handler)
+       /*
+        * XXXKYMA: not sure if this is safe, how large is the stack??
+        * Now jump to the kvm_mips_handle_exit() to see if we can deal
+        * with this in the kernel
+        */
+       PTR_LA  t9, kvm_mips_handle_exit
+       jalr.hb t9
+        INT_ADDIU sp, sp, -CALLFRAME_SIZ           /* BD Slot */
+
+       /* Return from handler Make sure interrupts are disabled */
+       di
+       ehb
+
+       /*
+        * XXXKYMA: k0/k1 could have been blown away if we processed
+        * an exception while we were handling the exception from the
+        * guest, reload k1
+        */
+
+       move    k1, s1
+       INT_ADDIU k1, k1, VCPU_HOST_ARCH
+
+       /*
+        * Check return value, should tell us if we are returning to the
+        * host (handle I/O etc)or resuming the guest
+        */
+       andi    t0, v0, RESUME_HOST
+       bnez    t0, __kvm_mips_return_to_host
+        nop
+
+__kvm_mips_return_to_guest:
+       /* Put the saved pointer to vcpu (s1) back into the DDATA_LO Register */
+       mtc0    s1, CP0_DDATA_LO
+
+       /* Load up the Guest EBASE to minimize the window where BEV is set */
+       LONG_L  t0, VCPU_GUEST_EBASE(k1)
+
+       /* Switch EBASE back to the one used by KVM */
+       mfc0    v1, CP0_STATUS
+       .set    at
+       or      k0, v1, ST0_BEV
+       .set    noat
+       mtc0    k0, CP0_STATUS
+       ehb
+       mtc0    t0, CP0_EBASE
+
+       /* Setup status register for running guest in UM */
+       .set    at
+       or      v1, v1, (ST0_EXL | KSU_USER | ST0_IE)
+       and     v1, v1, ~ST0_CU0
+       .set    noat
+       mtc0    v1, CP0_STATUS
+       ehb
+
+       /* Set Guest EPC */
+       LONG_L  t0, VCPU_PC(k1)
+       mtc0    t0, CP0_EPC
+
+       /* Set the ASID for the Guest Kernel */
+       INT_SLL t0, t0, 1       /* with kseg0 @ 0x40000000, kernel */
+                               /* addresses shift to 0x80000000 */
+       bltz    t0, 1f          /* If kernel */
+        INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID  /* (BD)  */
+       INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID    /* else user */
+1:
+       /* t1: contains the base of the ASID array, need to get the cpu id  */
+       LONG_L  t2, TI_CPU($28)         /* smp_processor_id */
+       INT_SLL t2, t2, 2               /* x4 */
+       REG_ADDU t3, t1, t2
+       LONG_L  k0, (t3)
+       andi    k0, k0, 0xff
+       mtc0    k0,CP0_ENTRYHI
+       ehb
+
+       /* Disable RDHWR access */
+       mtc0    zero,  CP0_HWRENA
+
+       /* load the guest context from VCPU and return */
+       LONG_L  $0, VCPU_R0(k1)
+       LONG_L  $1, VCPU_R1(k1)
+       LONG_L  $2, VCPU_R2(k1)
+       LONG_L  $3, VCPU_R3(k1)
+       LONG_L  $4, VCPU_R4(k1)
+       LONG_L  $5, VCPU_R5(k1)
+       LONG_L  $6, VCPU_R6(k1)
+       LONG_L  $7, VCPU_R7(k1)
+       LONG_L  $8, VCPU_R8(k1)
+       LONG_L  $9, VCPU_R9(k1)
+       LONG_L  $10, VCPU_R10(k1)
+       LONG_L  $11, VCPU_R11(k1)
+       LONG_L  $12, VCPU_R12(k1)
+       LONG_L  $13, VCPU_R13(k1)
+       LONG_L  $14, VCPU_R14(k1)
+       LONG_L  $15, VCPU_R15(k1)
+       LONG_L  $16, VCPU_R16(k1)
+       LONG_L  $17, VCPU_R17(k1)
+       LONG_L  $18, VCPU_R18(k1)
+       LONG_L  $19, VCPU_R19(k1)
+       LONG_L  $20, VCPU_R20(k1)
+       LONG_L  $21, VCPU_R21(k1)
+       LONG_L  $22, VCPU_R22(k1)
+       LONG_L  $23, VCPU_R23(k1)
+       LONG_L  $24, VCPU_R24(k1)
+       LONG_L  $25, VCPU_R25(k1)
+
+       /* $/k1 loaded later */
+       LONG_L  $28, VCPU_R28(k1)
+       LONG_L  $29, VCPU_R29(k1)
+       LONG_L  $30, VCPU_R30(k1)
+       LONG_L  $31, VCPU_R31(k1)
+
+FEXPORT(__kvm_mips_skip_guest_restore)
+       LONG_L  k0, VCPU_HI(k1)
+       mthi    k0
+
+       LONG_L  k0, VCPU_LO(k1)
+       mtlo    k0
+
+       LONG_L  k0, VCPU_R26(k1)
+       LONG_L  k1, VCPU_R27(k1)
+
+       eret
+
+__kvm_mips_return_to_host:
+       /* EBASE is already pointing to Linux */
+       LONG_L  k1, VCPU_HOST_STACK(k1)
+       INT_ADDIU k1,k1, -PT_SIZE
+
+       /* Restore host DDATA_LO */
+       LONG_L  k0, PT_HOST_USERLOCAL(k1)
+       mtc0    k0, CP0_DDATA_LO
+
+       /* Restore host ASID */
+       LONG_L  k0, PT_HOST_ASID(sp)
+       andi    k0, 0xff
+       mtc0    k0,CP0_ENTRYHI
+       ehb
+
+       /* Load context saved on the host stack */
+       LONG_L  $0, PT_R0(k1)
+       LONG_L  $1, PT_R1(k1)
+
+       /*
+        * r2/v0 is the return code, shift it down by 2 (arithmetic)
+        * to recover the err code
+        */
+       INT_SRA k0, v0, 2
+       move    $2, k0
+
+       LONG_L  $3, PT_R3(k1)
+       LONG_L  $4, PT_R4(k1)
+       LONG_L  $5, PT_R5(k1)
+       LONG_L  $6, PT_R6(k1)
+       LONG_L  $7, PT_R7(k1)
+       LONG_L  $8, PT_R8(k1)
+       LONG_L  $9, PT_R9(k1)
+       LONG_L  $10, PT_R10(k1)
+       LONG_L  $11, PT_R11(k1)
+       LONG_L  $12, PT_R12(k1)
+       LONG_L  $13, PT_R13(k1)
+       LONG_L  $14, PT_R14(k1)
+       LONG_L  $15, PT_R15(k1)
+       LONG_L  $16, PT_R16(k1)
+       LONG_L  $17, PT_R17(k1)
+       LONG_L  $18, PT_R18(k1)
+       LONG_L  $19, PT_R19(k1)
+       LONG_L  $20, PT_R20(k1)
+       LONG_L  $21, PT_R21(k1)
+       LONG_L  $22, PT_R22(k1)
+       LONG_L  $23, PT_R23(k1)
+       LONG_L  $24, PT_R24(k1)
+       LONG_L  $25, PT_R25(k1)
+
+       /* Host k0/k1 were not saved */
+
+       LONG_L  $28, PT_R28(k1)
+       LONG_L  $29, PT_R29(k1)
+       LONG_L  $30, PT_R30(k1)
+
+       LONG_L  k0, PT_HI(k1)
+       mthi    k0
+
+       LONG_L  k0, PT_LO(k1)
+       mtlo    k0
+
+       /* Restore RDHWR access */
+       PTR_LI  k0, 0x2000000F
+       mtc0    k0,  CP0_HWRENA
+
+       /* Restore RA, which is the address we will return to */
+       LONG_L  ra, PT_R31(k1)
+       j       ra
+        nop
+
+VECTOR_END(MIPSX(GuestExceptionEnd))
+.end MIPSX(GuestException)
+
+MIPSX(exceptions):
+       ####
+       ##### The exception handlers.
+       #####
+       .word _C_LABEL(MIPSX(GuestException))   #  0
+       .word _C_LABEL(MIPSX(GuestException))   #  1
+       .word _C_LABEL(MIPSX(GuestException))   #  2
+       .word _C_LABEL(MIPSX(GuestException))   #  3
+       .word _C_LABEL(MIPSX(GuestException))   #  4
+       .word _C_LABEL(MIPSX(GuestException))   #  5
+       .word _C_LABEL(MIPSX(GuestException))   #  6
+       .word _C_LABEL(MIPSX(GuestException))   #  7
+       .word _C_LABEL(MIPSX(GuestException))   #  8
+       .word _C_LABEL(MIPSX(GuestException))   #  9
+       .word _C_LABEL(MIPSX(GuestException))   # 10
+       .word _C_LABEL(MIPSX(GuestException))   # 11
+       .word _C_LABEL(MIPSX(GuestException))   # 12
+       .word _C_LABEL(MIPSX(GuestException))   # 13
+       .word _C_LABEL(MIPSX(GuestException))   # 14
+       .word _C_LABEL(MIPSX(GuestException))   # 15
+       .word _C_LABEL(MIPSX(GuestException))   # 16
+       .word _C_LABEL(MIPSX(GuestException))   # 17
+       .word _C_LABEL(MIPSX(GuestException))   # 18
+       .word _C_LABEL(MIPSX(GuestException))   # 19
+       .word _C_LABEL(MIPSX(GuestException))   # 20
+       .word _C_LABEL(MIPSX(GuestException))   # 21
+       .word _C_LABEL(MIPSX(GuestException))   # 22
+       .word _C_LABEL(MIPSX(GuestException))   # 23
+       .word _C_LABEL(MIPSX(GuestException))   # 24
+       .word _C_LABEL(MIPSX(GuestException))   # 25
+       .word _C_LABEL(MIPSX(GuestException))   # 26
+       .word _C_LABEL(MIPSX(GuestException))   # 27
+       .word _C_LABEL(MIPSX(GuestException))   # 28
+       .word _C_LABEL(MIPSX(GuestException))   # 29
+       .word _C_LABEL(MIPSX(GuestException))   # 30
+       .word _C_LABEL(MIPSX(GuestException))   # 31
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
new file mode 100644 (file)
index 0000000..d687c6e
--- /dev/null
@@ -0,0 +1,1218 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * KVM/MIPS: MIPS specific KVM APIs
+ *
+ * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+ */
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/bootmem.h>
+#include <asm/page.h>
+#include <asm/cacheflush.h>
+#include <asm/mmu_context.h>
+
+#include <linux/kvm_host.h>
+
+#include "interrupt.h"
+#include "commpage.h"
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
+#ifndef VECTORSPACING
+#define VECTORSPACING 0x100    /* for EI/VI mode */
+#endif
+
+#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x)
+struct kvm_stats_debugfs_item debugfs_entries[] = {
+       { "wait",         VCPU_STAT(wait_exits),         KVM_STAT_VCPU },
+       { "cache",        VCPU_STAT(cache_exits),        KVM_STAT_VCPU },
+       { "signal",       VCPU_STAT(signal_exits),       KVM_STAT_VCPU },
+       { "interrupt",    VCPU_STAT(int_exits),          KVM_STAT_VCPU },
+       { "cop_unsuable", VCPU_STAT(cop_unusable_exits), KVM_STAT_VCPU },
+       { "tlbmod",       VCPU_STAT(tlbmod_exits),       KVM_STAT_VCPU },
+       { "tlbmiss_ld",   VCPU_STAT(tlbmiss_ld_exits),   KVM_STAT_VCPU },
+       { "tlbmiss_st",   VCPU_STAT(tlbmiss_st_exits),   KVM_STAT_VCPU },
+       { "addrerr_st",   VCPU_STAT(addrerr_st_exits),   KVM_STAT_VCPU },
+       { "addrerr_ld",   VCPU_STAT(addrerr_ld_exits),   KVM_STAT_VCPU },
+       { "syscall",      VCPU_STAT(syscall_exits),      KVM_STAT_VCPU },
+       { "resvd_inst",   VCPU_STAT(resvd_inst_exits),   KVM_STAT_VCPU },
+       { "break_inst",   VCPU_STAT(break_inst_exits),   KVM_STAT_VCPU },
+       { "flush_dcache", VCPU_STAT(flush_dcache_exits), KVM_STAT_VCPU },
+       { "halt_wakeup",  VCPU_STAT(halt_wakeup),        KVM_STAT_VCPU },
+       {NULL}
+};
+
+static int kvm_mips_reset_vcpu(struct kvm_vcpu *vcpu)
+{
+       int i;
+
+       for_each_possible_cpu(i) {
+               vcpu->arch.guest_kernel_asid[i] = 0;
+               vcpu->arch.guest_user_asid[i] = 0;
+       }
+
+       return 0;
+}
+
+/*
+ * XXXKYMA: We are simulatoring a processor that has the WII bit set in
+ * Config7, so we are "runnable" if interrupts are pending
+ */
+int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
+{
+       return !!(vcpu->arch.pending_exceptions);
+}
+
+int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
+{
+       return 1;
+}
+
+int kvm_arch_hardware_enable(void *garbage)
+{
+       return 0;
+}
+
+void kvm_arch_hardware_disable(void *garbage)
+{
+}
+
+int kvm_arch_hardware_setup(void)
+{
+       return 0;
+}
+
+void kvm_arch_hardware_unsetup(void)
+{
+}
+
+void kvm_arch_check_processor_compat(void *rtn)
+{
+       *(int *)rtn = 0;
+}
+
+static void kvm_mips_init_tlbs(struct kvm *kvm)
+{
+       unsigned long wired;
+
+       /*
+        * Add a wired entry to the TLB, it is used to map the commpage to
+        * the Guest kernel
+        */
+       wired = read_c0_wired();
+       write_c0_wired(wired + 1);
+       mtc0_tlbw_hazard();
+       kvm->arch.commpage_tlb = wired;
+
+       kvm_debug("[%d] commpage TLB: %d\n", smp_processor_id(),
+                 kvm->arch.commpage_tlb);
+}
+
+static void kvm_mips_init_vm_percpu(void *arg)
+{
+       struct kvm *kvm = (struct kvm *)arg;
+
+       kvm_mips_init_tlbs(kvm);
+       kvm_mips_callbacks->vm_init(kvm);
+
+}
+
+int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
+{
+       if (atomic_inc_return(&kvm_mips_instance) == 1) {
+               kvm_debug("%s: 1st KVM instance, setup host TLB parameters\n",
+                         __func__);
+               on_each_cpu(kvm_mips_init_vm_percpu, kvm, 1);
+       }
+
+       return 0;
+}
+
+void kvm_mips_free_vcpus(struct kvm *kvm)
+{
+       unsigned int i;
+       struct kvm_vcpu *vcpu;
+
+       /* Put the pages we reserved for the guest pmap */
+       for (i = 0; i < kvm->arch.guest_pmap_npages; i++) {
+               if (kvm->arch.guest_pmap[i] != KVM_INVALID_PAGE)
+                       kvm_mips_release_pfn_clean(kvm->arch.guest_pmap[i]);
+       }
+       kfree(kvm->arch.guest_pmap);
+
+       kvm_for_each_vcpu(i, vcpu, kvm) {
+               kvm_arch_vcpu_free(vcpu);
+       }
+
+       mutex_lock(&kvm->lock);
+
+       for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
+               kvm->vcpus[i] = NULL;
+
+       atomic_set(&kvm->online_vcpus, 0);
+
+       mutex_unlock(&kvm->lock);
+}
+
+void kvm_arch_sync_events(struct kvm *kvm)
+{
+}
+
+static void kvm_mips_uninit_tlbs(void *arg)
+{
+       /* Restore wired count */
+       write_c0_wired(0);
+       mtc0_tlbw_hazard();
+       /* Clear out all the TLBs */
+       kvm_local_flush_tlb_all();
+}
+
+void kvm_arch_destroy_vm(struct kvm *kvm)
+{
+       kvm_mips_free_vcpus(kvm);
+
+       /* If this is the last instance, restore wired count */
+       if (atomic_dec_return(&kvm_mips_instance) == 0) {
+               kvm_debug("%s: last KVM instance, restoring TLB parameters\n",
+                         __func__);
+               on_each_cpu(kvm_mips_uninit_tlbs, NULL, 1);
+       }
+}
+
+long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl,
+                       unsigned long arg)
+{
+       return -ENOIOCTLCMD;
+}
+
+void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
+                          struct kvm_memory_slot *dont)
+{
+}
+
+int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
+                           unsigned long npages)
+{
+       return 0;
+}
+
+void kvm_arch_memslots_updated(struct kvm *kvm)
+{
+}
+
+int kvm_arch_prepare_memory_region(struct kvm *kvm,
+                                  struct kvm_memory_slot *memslot,
+                                  struct kvm_userspace_memory_region *mem,
+                                  enum kvm_mr_change change)
+{
+       return 0;
+}
+
+void kvm_arch_commit_memory_region(struct kvm *kvm,
+                                  struct kvm_userspace_memory_region *mem,
+                                  const struct kvm_memory_slot *old,
+                                  enum kvm_mr_change change)
+{
+       unsigned long npages = 0;
+       int i;
+
+       kvm_debug("%s: kvm: %p slot: %d, GPA: %llx, size: %llx, QVA: %llx\n",
+                 __func__, kvm, mem->slot, mem->guest_phys_addr,
+                 mem->memory_size, mem->userspace_addr);
+
+       /* Setup Guest PMAP table */
+       if (!kvm->arch.guest_pmap) {
+               if (mem->slot == 0)
+                       npages = mem->memory_size >> PAGE_SHIFT;
+
+               if (npages) {
+                       kvm->arch.guest_pmap_npages = npages;
+                       kvm->arch.guest_pmap =
+                           kzalloc(npages * sizeof(unsigned long), GFP_KERNEL);
+
+                       if (!kvm->arch.guest_pmap) {
+                               kvm_err("Failed to allocate guest PMAP");
+                               return;
+                       }
+
+                       kvm_debug("Allocated space for Guest PMAP Table (%ld pages) @ %p\n",
+                                 npages, kvm->arch.guest_pmap);
+
+                       /* Now setup the page table */
+                       for (i = 0; i < npages; i++)
+                               kvm->arch.guest_pmap[i] = KVM_INVALID_PAGE;
+               }
+       }
+}
+
+void kvm_arch_flush_shadow_all(struct kvm *kvm)
+{
+}
+
+void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
+                                  struct kvm_memory_slot *slot)
+{
+}
+
+void kvm_arch_flush_shadow(struct kvm *kvm)
+{
+}
+
+struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
+{
+       int err, size, offset;
+       void *gebase;
+       int i;
+
+       struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
+
+       if (!vcpu) {
+               err = -ENOMEM;
+               goto out;
+       }
+
+       err = kvm_vcpu_init(vcpu, kvm, id);
+
+       if (err)
+               goto out_free_cpu;
+
+       kvm_debug("kvm @ %p: create cpu %d at %p\n", kvm, id, vcpu);
+
+       /*
+        * Allocate space for host mode exception handlers that handle
+        * guest mode exits
+        */
+       if (cpu_has_veic || cpu_has_vint)
+               size = 0x200 + VECTORSPACING * 64;
+       else
+               size = 0x4000;
+
+       /* Save Linux EBASE */
+       vcpu->arch.host_ebase = (void *)read_c0_ebase();
+
+       gebase = kzalloc(ALIGN(size, PAGE_SIZE), GFP_KERNEL);
+
+       if (!gebase) {
+               err = -ENOMEM;
+               goto out_free_cpu;
+       }
+       kvm_debug("Allocated %d bytes for KVM Exception Handlers @ %p\n",
+                 ALIGN(size, PAGE_SIZE), gebase);
+
+       /* Save new ebase */
+       vcpu->arch.guest_ebase = gebase;
+
+       /* Copy L1 Guest Exception handler to correct offset */
+
+       /* TLB Refill, EXL = 0 */
+       memcpy(gebase, mips32_exception,
+              mips32_exceptionEnd - mips32_exception);
+
+       /* General Exception Entry point */
+       memcpy(gebase + 0x180, mips32_exception,
+              mips32_exceptionEnd - mips32_exception);
+
+       /* For vectored interrupts poke the exception code @ all offsets 0-7 */
+       for (i = 0; i < 8; i++) {
+               kvm_debug("L1 Vectored handler @ %p\n",
+                         gebase + 0x200 + (i * VECTORSPACING));
+               memcpy(gebase + 0x200 + (i * VECTORSPACING), mips32_exception,
+                      mips32_exceptionEnd - mips32_exception);
+       }
+
+       /* General handler, relocate to unmapped space for sanity's sake */
+       offset = 0x2000;
+       kvm_debug("Installing KVM Exception handlers @ %p, %#x bytes\n",
+                 gebase + offset,
+                 mips32_GuestExceptionEnd - mips32_GuestException);
+
+       memcpy(gebase + offset, mips32_GuestException,
+              mips32_GuestExceptionEnd - mips32_GuestException);
+
+       /* Invalidate the icache for these ranges */
+       local_flush_icache_range((unsigned long)gebase,
+                               (unsigned long)gebase + ALIGN(size, PAGE_SIZE));
+
+       /*
+        * Allocate comm page for guest kernel, a TLB will be reserved for
+        * mapping GVA @ 0xFFFF8000 to this page
+        */
+       vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL);
+
+       if (!vcpu->arch.kseg0_commpage) {
+               err = -ENOMEM;
+               goto out_free_gebase;
+       }
+
+       kvm_debug("Allocated COMM page @ %p\n", vcpu->arch.kseg0_commpage);
+       kvm_mips_commpage_init(vcpu);
+
+       /* Init */
+       vcpu->arch.last_sched_cpu = -1;
+
+       /* Start off the timer */
+       kvm_mips_init_count(vcpu);
+
+       return vcpu;
+
+out_free_gebase:
+       kfree(gebase);
+
+out_free_cpu:
+       kfree(vcpu);
+
+out:
+       return ERR_PTR(err);
+}
+
+void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
+{
+       hrtimer_cancel(&vcpu->arch.comparecount_timer);
+
+       kvm_vcpu_uninit(vcpu);
+
+       kvm_mips_dump_stats(vcpu);
+
+       kfree(vcpu->arch.guest_ebase);
+       kfree(vcpu->arch.kseg0_commpage);
+}
+
+void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
+{
+       kvm_arch_vcpu_free(vcpu);
+}
+
+int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
+                                       struct kvm_guest_debug *dbg)
+{
+       return -ENOIOCTLCMD;
+}
+
+int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+       int r = 0;
+       sigset_t sigsaved;
+
+       if (vcpu->sigset_active)
+               sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
+
+       if (vcpu->mmio_needed) {
+               if (!vcpu->mmio_is_write)
+                       kvm_mips_complete_mmio_load(vcpu, run);
+               vcpu->mmio_needed = 0;
+       }
+
+       local_irq_disable();
+       /* Check if we have any exceptions/interrupts pending */
+       kvm_mips_deliver_interrupts(vcpu,
+                                   kvm_read_c0_guest_cause(vcpu->arch.cop0));
+
+       kvm_guest_enter();
+
+       r = __kvm_mips_vcpu_run(run, vcpu);
+
+       kvm_guest_exit();
+       local_irq_enable();
+
+       if (vcpu->sigset_active)
+               sigprocmask(SIG_SETMASK, &sigsaved, NULL);
+
+       return r;
+}
+
+int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
+                            struct kvm_mips_interrupt *irq)
+{
+       int intr = (int)irq->irq;
+       struct kvm_vcpu *dvcpu = NULL;
+
+       if (intr == 3 || intr == -3 || intr == 4 || intr == -4)
+               kvm_debug("%s: CPU: %d, INTR: %d\n", __func__, irq->cpu,
+                         (int)intr);
+
+       if (irq->cpu == -1)
+               dvcpu = vcpu;
+       else
+               dvcpu = vcpu->kvm->vcpus[irq->cpu];
+
+       if (intr == 2 || intr == 3 || intr == 4) {
+               kvm_mips_callbacks->queue_io_int(dvcpu, irq);
+
+       } else if (intr == -2 || intr == -3 || intr == -4) {
+               kvm_mips_callbacks->dequeue_io_int(dvcpu, irq);
+       } else {
+               kvm_err("%s: invalid interrupt ioctl (%d:%d)\n", __func__,
+                       irq->cpu, irq->irq);
+               return -EINVAL;
+       }
+
+       dvcpu->arch.wait = 0;
+
+       if (waitqueue_active(&dvcpu->wq))
+               wake_up_interruptible(&dvcpu->wq);
+
+       return 0;
+}
+
+int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
+                                   struct kvm_mp_state *mp_state)
+{
+       return -ENOIOCTLCMD;
+}
+
+int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
+                                   struct kvm_mp_state *mp_state)
+{
+       return -ENOIOCTLCMD;
+}
+
+static u64 kvm_mips_get_one_regs[] = {
+       KVM_REG_MIPS_R0,
+       KVM_REG_MIPS_R1,
+       KVM_REG_MIPS_R2,
+       KVM_REG_MIPS_R3,
+       KVM_REG_MIPS_R4,
+       KVM_REG_MIPS_R5,
+       KVM_REG_MIPS_R6,
+       KVM_REG_MIPS_R7,
+       KVM_REG_MIPS_R8,
+       KVM_REG_MIPS_R9,
+       KVM_REG_MIPS_R10,
+       KVM_REG_MIPS_R11,
+       KVM_REG_MIPS_R12,
+       KVM_REG_MIPS_R13,
+       KVM_REG_MIPS_R14,
+       KVM_REG_MIPS_R15,
+       KVM_REG_MIPS_R16,
+       KVM_REG_MIPS_R17,
+       KVM_REG_MIPS_R18,
+       KVM_REG_MIPS_R19,
+       KVM_REG_MIPS_R20,
+       KVM_REG_MIPS_R21,
+       KVM_REG_MIPS_R22,
+       KVM_REG_MIPS_R23,
+       KVM_REG_MIPS_R24,
+       KVM_REG_MIPS_R25,
+       KVM_REG_MIPS_R26,
+       KVM_REG_MIPS_R27,
+       KVM_REG_MIPS_R28,
+       KVM_REG_MIPS_R29,
+       KVM_REG_MIPS_R30,
+       KVM_REG_MIPS_R31,
+
+       KVM_REG_MIPS_HI,
+       KVM_REG_MIPS_LO,
+       KVM_REG_MIPS_PC,
+
+       KVM_REG_MIPS_CP0_INDEX,
+       KVM_REG_MIPS_CP0_CONTEXT,
+       KVM_REG_MIPS_CP0_USERLOCAL,
+       KVM_REG_MIPS_CP0_PAGEMASK,
+       KVM_REG_MIPS_CP0_WIRED,
+       KVM_REG_MIPS_CP0_HWRENA,
+       KVM_REG_MIPS_CP0_BADVADDR,
+       KVM_REG_MIPS_CP0_COUNT,
+       KVM_REG_MIPS_CP0_ENTRYHI,
+       KVM_REG_MIPS_CP0_COMPARE,
+       KVM_REG_MIPS_CP0_STATUS,
+       KVM_REG_MIPS_CP0_CAUSE,
+       KVM_REG_MIPS_CP0_EPC,
+       KVM_REG_MIPS_CP0_CONFIG,
+       KVM_REG_MIPS_CP0_CONFIG1,
+       KVM_REG_MIPS_CP0_CONFIG2,
+       KVM_REG_MIPS_CP0_CONFIG3,
+       KVM_REG_MIPS_CP0_CONFIG7,
+       KVM_REG_MIPS_CP0_ERROREPC,
+
+       KVM_REG_MIPS_COUNT_CTL,
+       KVM_REG_MIPS_COUNT_RESUME,
+       KVM_REG_MIPS_COUNT_HZ,
+};
+
+static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
+                           const struct kvm_one_reg *reg)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       int ret;
+       s64 v;
+
+       switch (reg->id) {
+       case KVM_REG_MIPS_R0 ... KVM_REG_MIPS_R31:
+               v = (long)vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0];
+               break;
+       case KVM_REG_MIPS_HI:
+               v = (long)vcpu->arch.hi;
+               break;
+       case KVM_REG_MIPS_LO:
+               v = (long)vcpu->arch.lo;
+               break;
+       case KVM_REG_MIPS_PC:
+               v = (long)vcpu->arch.pc;
+               break;
+
+       case KVM_REG_MIPS_CP0_INDEX:
+               v = (long)kvm_read_c0_guest_index(cop0);
+               break;
+       case KVM_REG_MIPS_CP0_CONTEXT:
+               v = (long)kvm_read_c0_guest_context(cop0);
+               break;
+       case KVM_REG_MIPS_CP0_USERLOCAL:
+               v = (long)kvm_read_c0_guest_userlocal(cop0);
+               break;
+       case KVM_REG_MIPS_CP0_PAGEMASK:
+               v = (long)kvm_read_c0_guest_pagemask(cop0);
+               break;
+       case KVM_REG_MIPS_CP0_WIRED:
+               v = (long)kvm_read_c0_guest_wired(cop0);
+               break;
+       case KVM_REG_MIPS_CP0_HWRENA:
+               v = (long)kvm_read_c0_guest_hwrena(cop0);
+               break;
+       case KVM_REG_MIPS_CP0_BADVADDR:
+               v = (long)kvm_read_c0_guest_badvaddr(cop0);
+               break;
+       case KVM_REG_MIPS_CP0_ENTRYHI:
+               v = (long)kvm_read_c0_guest_entryhi(cop0);
+               break;
+       case KVM_REG_MIPS_CP0_COMPARE:
+               v = (long)kvm_read_c0_guest_compare(cop0);
+               break;
+       case KVM_REG_MIPS_CP0_STATUS:
+               v = (long)kvm_read_c0_guest_status(cop0);
+               break;
+       case KVM_REG_MIPS_CP0_CAUSE:
+               v = (long)kvm_read_c0_guest_cause(cop0);
+               break;
+       case KVM_REG_MIPS_CP0_EPC:
+               v = (long)kvm_read_c0_guest_epc(cop0);
+               break;
+       case KVM_REG_MIPS_CP0_ERROREPC:
+               v = (long)kvm_read_c0_guest_errorepc(cop0);
+               break;
+       case KVM_REG_MIPS_CP0_CONFIG:
+               v = (long)kvm_read_c0_guest_config(cop0);
+               break;
+       case KVM_REG_MIPS_CP0_CONFIG1:
+               v = (long)kvm_read_c0_guest_config1(cop0);
+               break;
+       case KVM_REG_MIPS_CP0_CONFIG2:
+               v = (long)kvm_read_c0_guest_config2(cop0);
+               break;
+       case KVM_REG_MIPS_CP0_CONFIG3:
+               v = (long)kvm_read_c0_guest_config3(cop0);
+               break;
+       case KVM_REG_MIPS_CP0_CONFIG7:
+               v = (long)kvm_read_c0_guest_config7(cop0);
+               break;
+       /* registers to be handled specially */
+       case KVM_REG_MIPS_CP0_COUNT:
+       case KVM_REG_MIPS_COUNT_CTL:
+       case KVM_REG_MIPS_COUNT_RESUME:
+       case KVM_REG_MIPS_COUNT_HZ:
+               ret = kvm_mips_callbacks->get_one_reg(vcpu, reg, &v);
+               if (ret)
+                       return ret;
+               break;
+       default:
+               return -EINVAL;
+       }
+       if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
+               u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
+
+               return put_user(v, uaddr64);
+       } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
+               u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
+               u32 v32 = (u32)v;
+
+               return put_user(v32, uaddr32);
+       } else {
+               return -EINVAL;
+       }
+}
+
+static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
+                           const struct kvm_one_reg *reg)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       u64 v;
+
+       if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
+               u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
+
+               if (get_user(v, uaddr64) != 0)
+                       return -EFAULT;
+       } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
+               u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
+               s32 v32;
+
+               if (get_user(v32, uaddr32) != 0)
+                       return -EFAULT;
+               v = (s64)v32;
+       } else {
+               return -EINVAL;
+       }
+
+       switch (reg->id) {
+       case KVM_REG_MIPS_R0:
+               /* Silently ignore requests to set $0 */
+               break;
+       case KVM_REG_MIPS_R1 ... KVM_REG_MIPS_R31:
+               vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0] = v;
+               break;
+       case KVM_REG_MIPS_HI:
+               vcpu->arch.hi = v;
+               break;
+       case KVM_REG_MIPS_LO:
+               vcpu->arch.lo = v;
+               break;
+       case KVM_REG_MIPS_PC:
+               vcpu->arch.pc = v;
+               break;
+
+       case KVM_REG_MIPS_CP0_INDEX:
+               kvm_write_c0_guest_index(cop0, v);
+               break;
+       case KVM_REG_MIPS_CP0_CONTEXT:
+               kvm_write_c0_guest_context(cop0, v);
+               break;
+       case KVM_REG_MIPS_CP0_USERLOCAL:
+               kvm_write_c0_guest_userlocal(cop0, v);
+               break;
+       case KVM_REG_MIPS_CP0_PAGEMASK:
+               kvm_write_c0_guest_pagemask(cop0, v);
+               break;
+       case KVM_REG_MIPS_CP0_WIRED:
+               kvm_write_c0_guest_wired(cop0, v);
+               break;
+       case KVM_REG_MIPS_CP0_HWRENA:
+               kvm_write_c0_guest_hwrena(cop0, v);
+               break;
+       case KVM_REG_MIPS_CP0_BADVADDR:
+               kvm_write_c0_guest_badvaddr(cop0, v);
+               break;
+       case KVM_REG_MIPS_CP0_ENTRYHI:
+               kvm_write_c0_guest_entryhi(cop0, v);
+               break;
+       case KVM_REG_MIPS_CP0_STATUS:
+               kvm_write_c0_guest_status(cop0, v);
+               break;
+       case KVM_REG_MIPS_CP0_EPC:
+               kvm_write_c0_guest_epc(cop0, v);
+               break;
+       case KVM_REG_MIPS_CP0_ERROREPC:
+               kvm_write_c0_guest_errorepc(cop0, v);
+               break;
+       /* registers to be handled specially */
+       case KVM_REG_MIPS_CP0_COUNT:
+       case KVM_REG_MIPS_CP0_COMPARE:
+       case KVM_REG_MIPS_CP0_CAUSE:
+       case KVM_REG_MIPS_COUNT_CTL:
+       case KVM_REG_MIPS_COUNT_RESUME:
+       case KVM_REG_MIPS_COUNT_HZ:
+               return kvm_mips_callbacks->set_one_reg(vcpu, reg, v);
+       default:
+               return -EINVAL;
+       }
+       return 0;
+}
+
+long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl,
+                        unsigned long arg)
+{
+       struct kvm_vcpu *vcpu = filp->private_data;
+       void __user *argp = (void __user *)arg;
+       long r;
+
+       switch (ioctl) {
+       case KVM_SET_ONE_REG:
+       case KVM_GET_ONE_REG: {
+               struct kvm_one_reg reg;
+
+               if (copy_from_user(&reg, argp, sizeof(reg)))
+                       return -EFAULT;
+               if (ioctl == KVM_SET_ONE_REG)
+                       return kvm_mips_set_reg(vcpu, &reg);
+               else
+                       return kvm_mips_get_reg(vcpu, &reg);
+       }
+       case KVM_GET_REG_LIST: {
+               struct kvm_reg_list __user *user_list = argp;
+               u64 __user *reg_dest;
+               struct kvm_reg_list reg_list;
+               unsigned n;
+
+               if (copy_from_user(&reg_list, user_list, sizeof(reg_list)))
+                       return -EFAULT;
+               n = reg_list.n;
+               reg_list.n = ARRAY_SIZE(kvm_mips_get_one_regs);
+               if (copy_to_user(user_list, &reg_list, sizeof(reg_list)))
+                       return -EFAULT;
+               if (n < reg_list.n)
+                       return -E2BIG;
+               reg_dest = user_list->reg;
+               if (copy_to_user(reg_dest, kvm_mips_get_one_regs,
+                                sizeof(kvm_mips_get_one_regs)))
+                       return -EFAULT;
+               return 0;
+       }
+       case KVM_NMI:
+               /* Treat the NMI as a CPU reset */
+               r = kvm_mips_reset_vcpu(vcpu);
+               break;
+       case KVM_INTERRUPT:
+               {
+                       struct kvm_mips_interrupt irq;
+
+                       r = -EFAULT;
+                       if (copy_from_user(&irq, argp, sizeof(irq)))
+                               goto out;
+
+                       kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__,
+                                 irq.irq);
+
+                       r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
+                       break;
+               }
+       default:
+               r = -ENOIOCTLCMD;
+       }
+
+out:
+       return r;
+}
+
+/* Get (and clear) the dirty memory log for a memory slot. */
+int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
+{
+       struct kvm_memory_slot *memslot;
+       unsigned long ga, ga_end;
+       int is_dirty = 0;
+       int r;
+       unsigned long n;
+
+       mutex_lock(&kvm->slots_lock);
+
+       r = kvm_get_dirty_log(kvm, log, &is_dirty);
+       if (r)
+               goto out;
+
+       /* If nothing is dirty, don't bother messing with page tables. */
+       if (is_dirty) {
+               memslot = &kvm->memslots->memslots[log->slot];
+
+               ga = memslot->base_gfn << PAGE_SHIFT;
+               ga_end = ga + (memslot->npages << PAGE_SHIFT);
+
+               kvm_info("%s: dirty, ga: %#lx, ga_end %#lx\n", __func__, ga,
+                        ga_end);
+
+               n = kvm_dirty_bitmap_bytes(memslot);
+               memset(memslot->dirty_bitmap, 0, n);
+       }
+
+       r = 0;
+out:
+       mutex_unlock(&kvm->slots_lock);
+       return r;
+
+}
+
+long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
+{
+       long r;
+
+       switch (ioctl) {
+       default:
+               r = -ENOIOCTLCMD;
+       }
+
+       return r;
+}
+
+int kvm_arch_init(void *opaque)
+{
+       if (kvm_mips_callbacks) {
+               kvm_err("kvm: module already exists\n");
+               return -EEXIST;
+       }
+
+       return kvm_mips_emulation_init(&kvm_mips_callbacks);
+}
+
+void kvm_arch_exit(void)
+{
+       kvm_mips_callbacks = NULL;
+}
+
+int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
+                                 struct kvm_sregs *sregs)
+{
+       return -ENOIOCTLCMD;
+}
+
+int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
+                                 struct kvm_sregs *sregs)
+{
+       return -ENOIOCTLCMD;
+}
+
+int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
+{
+       return 0;
+}
+
+int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+{
+       return -ENOIOCTLCMD;
+}
+
+int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+{
+       return -ENOIOCTLCMD;
+}
+
+int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
+{
+       return VM_FAULT_SIGBUS;
+}
+
+int kvm_dev_ioctl_check_extension(long ext)
+{
+       int r;
+
+       switch (ext) {
+       case KVM_CAP_ONE_REG:
+               r = 1;
+               break;
+       case KVM_CAP_COALESCED_MMIO:
+               r = KVM_COALESCED_MMIO_PAGE_OFFSET;
+               break;
+       default:
+               r = 0;
+               break;
+       }
+       return r;
+}
+
+int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
+{
+       return kvm_mips_pending_timer(vcpu);
+}
+
+int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
+{
+       int i;
+       struct mips_coproc *cop0;
+
+       if (!vcpu)
+               return -1;
+
+       kvm_debug("VCPU Register Dump:\n");
+       kvm_debug("\tpc = 0x%08lx\n", vcpu->arch.pc);
+       kvm_debug("\texceptions: %08lx\n", vcpu->arch.pending_exceptions);
+
+       for (i = 0; i < 32; i += 4) {
+               kvm_debug("\tgpr%02d: %08lx %08lx %08lx %08lx\n", i,
+                      vcpu->arch.gprs[i],
+                      vcpu->arch.gprs[i + 1],
+                      vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]);
+       }
+       kvm_debug("\thi: 0x%08lx\n", vcpu->arch.hi);
+       kvm_debug("\tlo: 0x%08lx\n", vcpu->arch.lo);
+
+       cop0 = vcpu->arch.cop0;
+       kvm_debug("\tStatus: 0x%08lx, Cause: 0x%08lx\n",
+                 kvm_read_c0_guest_status(cop0),
+                 kvm_read_c0_guest_cause(cop0));
+
+       kvm_debug("\tEPC: 0x%08lx\n", kvm_read_c0_guest_epc(cop0));
+
+       return 0;
+}
+
+int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
+{
+       int i;
+
+       for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
+               vcpu->arch.gprs[i] = regs->gpr[i];
+       vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */
+       vcpu->arch.hi = regs->hi;
+       vcpu->arch.lo = regs->lo;
+       vcpu->arch.pc = regs->pc;
+
+       return 0;
+}
+
+int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
+               regs->gpr[i] = vcpu->arch.gprs[i];
+
+       regs->hi = vcpu->arch.hi;
+       regs->lo = vcpu->arch.lo;
+       regs->pc = vcpu->arch.pc;
+
+       return 0;
+}
+
+static void kvm_mips_comparecount_func(unsigned long data)
+{
+       struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
+
+       kvm_mips_callbacks->queue_timer_int(vcpu);
+
+       vcpu->arch.wait = 0;
+       if (waitqueue_active(&vcpu->wq))
+               wake_up_interruptible(&vcpu->wq);
+}
+
+/* low level hrtimer wake routine */
+static enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer)
+{
+       struct kvm_vcpu *vcpu;
+
+       vcpu = container_of(timer, struct kvm_vcpu, arch.comparecount_timer);
+       kvm_mips_comparecount_func((unsigned long) vcpu);
+       return kvm_mips_count_timeout(vcpu);
+}
+
+int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
+{
+       kvm_mips_callbacks->vcpu_init(vcpu);
+       hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC,
+                    HRTIMER_MODE_REL);
+       vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup;
+       return 0;
+}
+
+void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
+{
+}
+
+int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
+                                 struct kvm_translation *tr)
+{
+       return 0;
+}
+
+/* Initial guest state */
+int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
+{
+       return kvm_mips_callbacks->vcpu_setup(vcpu);
+}
+
+static void kvm_mips_set_c0_status(void)
+{
+       uint32_t status = read_c0_status();
+
+       if (cpu_has_fpu)
+               status |= (ST0_CU1);
+
+       if (cpu_has_dsp)
+               status |= (ST0_MX);
+
+       write_c0_status(status);
+       ehb();
+}
+
+/*
+ * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
+ */
+int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+       uint32_t cause = vcpu->arch.host_cp0_cause;
+       uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
+       uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+       unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
+       enum emulation_result er = EMULATE_DONE;
+       int ret = RESUME_GUEST;
+
+       /* Set a default exit reason */
+       run->exit_reason = KVM_EXIT_UNKNOWN;
+       run->ready_for_interrupt_injection = 1;
+
+       /*
+        * Set the appropriate status bits based on host CPU features,
+        * before we hit the scheduler
+        */
+       kvm_mips_set_c0_status();
+
+       local_irq_enable();
+
+       kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n",
+                       cause, opc, run, vcpu);
+
+       /*
+        * Do a privilege check, if in UM most of these exit conditions end up
+        * causing an exception to be delivered to the Guest Kernel
+        */
+       er = kvm_mips_check_privilege(cause, opc, run, vcpu);
+       if (er == EMULATE_PRIV_FAIL) {
+               goto skip_emul;
+       } else if (er == EMULATE_FAIL) {
+               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               ret = RESUME_HOST;
+               goto skip_emul;
+       }
+
+       switch (exccode) {
+       case T_INT:
+               kvm_debug("[%d]T_INT @ %p\n", vcpu->vcpu_id, opc);
+
+               ++vcpu->stat.int_exits;
+               trace_kvm_exit(vcpu, INT_EXITS);
+
+               if (need_resched())
+                       cond_resched();
+
+               ret = RESUME_GUEST;
+               break;
+
+       case T_COP_UNUSABLE:
+               kvm_debug("T_COP_UNUSABLE: @ PC: %p\n", opc);
+
+               ++vcpu->stat.cop_unusable_exits;
+               trace_kvm_exit(vcpu, COP_UNUSABLE_EXITS);
+               ret = kvm_mips_callbacks->handle_cop_unusable(vcpu);
+               /* XXXKYMA: Might need to return to user space */
+               if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN)
+                       ret = RESUME_HOST;
+               break;
+
+       case T_TLB_MOD:
+               ++vcpu->stat.tlbmod_exits;
+               trace_kvm_exit(vcpu, TLBMOD_EXITS);
+               ret = kvm_mips_callbacks->handle_tlb_mod(vcpu);
+               break;
+
+       case T_TLB_ST_MISS:
+               kvm_debug("TLB ST fault:  cause %#x, status %#lx, PC: %p, BadVaddr: %#lx\n",
+                         cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc,
+                         badvaddr);
+
+               ++vcpu->stat.tlbmiss_st_exits;
+               trace_kvm_exit(vcpu, TLBMISS_ST_EXITS);
+               ret = kvm_mips_callbacks->handle_tlb_st_miss(vcpu);
+               break;
+
+       case T_TLB_LD_MISS:
+               kvm_debug("TLB LD fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
+                         cause, opc, badvaddr);
+
+               ++vcpu->stat.tlbmiss_ld_exits;
+               trace_kvm_exit(vcpu, TLBMISS_LD_EXITS);
+               ret = kvm_mips_callbacks->handle_tlb_ld_miss(vcpu);
+               break;
+
+       case T_ADDR_ERR_ST:
+               ++vcpu->stat.addrerr_st_exits;
+               trace_kvm_exit(vcpu, ADDRERR_ST_EXITS);
+               ret = kvm_mips_callbacks->handle_addr_err_st(vcpu);
+               break;
+
+       case T_ADDR_ERR_LD:
+               ++vcpu->stat.addrerr_ld_exits;
+               trace_kvm_exit(vcpu, ADDRERR_LD_EXITS);
+               ret = kvm_mips_callbacks->handle_addr_err_ld(vcpu);
+               break;
+
+       case T_SYSCALL:
+               ++vcpu->stat.syscall_exits;
+               trace_kvm_exit(vcpu, SYSCALL_EXITS);
+               ret = kvm_mips_callbacks->handle_syscall(vcpu);
+               break;
+
+       case T_RES_INST:
+               ++vcpu->stat.resvd_inst_exits;
+               trace_kvm_exit(vcpu, RESVD_INST_EXITS);
+               ret = kvm_mips_callbacks->handle_res_inst(vcpu);
+               break;
+
+       case T_BREAK:
+               ++vcpu->stat.break_inst_exits;
+               trace_kvm_exit(vcpu, BREAK_INST_EXITS);
+               ret = kvm_mips_callbacks->handle_break(vcpu);
+               break;
+
+       default:
+               kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x  BadVaddr: %#lx Status: %#lx\n",
+                       exccode, opc, kvm_get_inst(opc, vcpu), badvaddr,
+                       kvm_read_c0_guest_status(vcpu->arch.cop0));
+               kvm_arch_vcpu_dump_regs(vcpu);
+               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               ret = RESUME_HOST;
+               break;
+
+       }
+
+skip_emul:
+       local_irq_disable();
+
+       if (er == EMULATE_DONE && !(ret & RESUME_HOST))
+               kvm_mips_deliver_interrupts(vcpu, cause);
+
+       if (!(ret & RESUME_HOST)) {
+               /* Only check for signals if not already exiting to userspace */
+               if (signal_pending(current)) {
+                       run->exit_reason = KVM_EXIT_INTR;
+                       ret = (-EINTR << 2) | RESUME_HOST;
+                       ++vcpu->stat.signal_exits;
+                       trace_kvm_exit(vcpu, SIGNAL_EXITS);
+               }
+       }
+
+       return ret;
+}
+
+int __init kvm_mips_init(void)
+{
+       int ret;
+
+       ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
+
+       if (ret)
+               return ret;
+
+       /*
+        * On MIPS, kernel modules are executed from "mapped space", which
+        * requires TLBs. The TLB handling code is statically linked with
+        * the rest of the kernel (tlb.c) to avoid the possibility of
+        * double faulting. The issue is that the TLB code references
+        * routines that are part of the the KVM module, which are only
+        * available once the module is loaded.
+        */
+       kvm_mips_gfn_to_pfn = gfn_to_pfn;
+       kvm_mips_release_pfn_clean = kvm_release_pfn_clean;
+       kvm_mips_is_error_pfn = is_error_pfn;
+
+       pr_info("KVM/MIPS Initialized\n");
+       return 0;
+}
+
+void __exit kvm_mips_exit(void)
+{
+       kvm_exit();
+
+       kvm_mips_gfn_to_pfn = NULL;
+       kvm_mips_release_pfn_clean = NULL;
+       kvm_mips_is_error_pfn = NULL;
+
+       pr_info("KVM/MIPS unloaded\n");
+}
+
+module_init(kvm_mips_init);
+module_exit(kvm_mips_exit);
+
+EXPORT_TRACEPOINT_SYMBOL(kvm_exit);
diff --git a/arch/mips/kvm/opcode.h b/arch/mips/kvm/opcode.h
new file mode 100644 (file)
index 0000000..03a6ae8
--- /dev/null
@@ -0,0 +1,22 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+ */
+
+/* Define opcode values not defined in <asm/isnt.h> */
+
+#ifndef __KVM_MIPS_OPCODE_H__
+#define __KVM_MIPS_OPCODE_H__
+
+/* COP0 Ops */
+#define mfmcz_op       0x0b    /* 01011 */
+#define wrpgpr_op      0x0e    /* 01110 */
+
+/* COP0 opcodes (only if COP0 and CO=1): */
+#define wait_op                0x20    /* 100000 */
+
+#endif /* __KVM_MIPS_OPCODE_H__ */
diff --git a/arch/mips/kvm/stats.c b/arch/mips/kvm/stats.c
new file mode 100644 (file)
index 0000000..a74d602
--- /dev/null
@@ -0,0 +1,80 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * KVM/MIPS: COP0 access histogram
+ *
+ * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+ */
+
+#include <linux/kvm_host.h>
+
+char *kvm_mips_exit_types_str[MAX_KVM_MIPS_EXIT_TYPES] = {
+       "WAIT",
+       "CACHE",
+       "Signal",
+       "Interrupt",
+       "COP0/1 Unusable",
+       "TLB Mod",
+       "TLB Miss (LD)",
+       "TLB Miss (ST)",
+       "Address Err (ST)",
+       "Address Error (LD)",
+       "System Call",
+       "Reserved Inst",
+       "Break Inst",
+       "D-Cache Flushes",
+};
+
+char *kvm_cop0_str[N_MIPS_COPROC_REGS] = {
+       "Index",
+       "Random",
+       "EntryLo0",
+       "EntryLo1",
+       "Context",
+       "PG Mask",
+       "Wired",
+       "HWREna",
+       "BadVAddr",
+       "Count",
+       "EntryHI",
+       "Compare",
+       "Status",
+       "Cause",
+       "EXC PC",
+       "PRID",
+       "Config",
+       "LLAddr",
+       "Watch Lo",
+       "Watch Hi",
+       "X Context",
+       "Reserved",
+       "Impl Dep",
+       "Debug",
+       "DEPC",
+       "PerfCnt",
+       "ErrCtl",
+       "CacheErr",
+       "TagLo",
+       "TagHi",
+       "ErrorEPC",
+       "DESAVE"
+};
+
+void kvm_mips_dump_stats(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
+       int i, j;
+
+       kvm_info("\nKVM VCPU[%d] COP0 Access Profile:\n", vcpu->vcpu_id);
+       for (i = 0; i < N_MIPS_COPROC_REGS; i++) {
+               for (j = 0; j < N_MIPS_COPROC_SEL; j++) {
+                       if (vcpu->arch.cop0->stat[i][j])
+                               kvm_info("%s[%d]: %lu\n", kvm_cop0_str[i], j,
+                                        vcpu->arch.cop0->stat[i][j]);
+               }
+       }
+#endif
+}
diff --git a/arch/mips/kvm/tlb.c b/arch/mips/kvm/tlb.c
new file mode 100644 (file)
index 0000000..bbcd822
--- /dev/null
@@ -0,0 +1,809 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * KVM/MIPS TLB handling, this file is part of the Linux host kernel so that
+ * TLB handlers run from KSEG0
+ *
+ * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+ */
+
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/mm.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/kvm_host.h>
+#include <linux/srcu.h>
+
+#include <asm/cpu.h>
+#include <asm/bootinfo.h>
+#include <asm/mmu_context.h>
+#include <asm/pgtable.h>
+#include <asm/cacheflush.h>
+#include <asm/tlb.h>
+
+#undef CONFIG_MIPS_MT
+#include <asm/r4kcache.h>
+#define CONFIG_MIPS_MT
+
+#define KVM_GUEST_PC_TLB    0
+#define KVM_GUEST_SP_TLB    1
+
+#define PRIx64 "llx"
+
+atomic_t kvm_mips_instance;
+EXPORT_SYMBOL(kvm_mips_instance);
+
+/* These function pointers are initialized once the KVM module is loaded */
+pfn_t (*kvm_mips_gfn_to_pfn)(struct kvm *kvm, gfn_t gfn);
+EXPORT_SYMBOL(kvm_mips_gfn_to_pfn);
+
+void (*kvm_mips_release_pfn_clean)(pfn_t pfn);
+EXPORT_SYMBOL(kvm_mips_release_pfn_clean);
+
+bool (*kvm_mips_is_error_pfn)(pfn_t pfn);
+EXPORT_SYMBOL(kvm_mips_is_error_pfn);
+
+uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
+{
+       return vcpu->arch.guest_kernel_asid[smp_processor_id()] & ASID_MASK;
+}
+
+uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
+{
+       return vcpu->arch.guest_user_asid[smp_processor_id()] & ASID_MASK;
+}
+
+inline uint32_t kvm_mips_get_commpage_asid(struct kvm_vcpu *vcpu)
+{
+       return vcpu->kvm->arch.commpage_tlb;
+}
+
+/* Structure defining an tlb entry data set. */
+
+void kvm_mips_dump_host_tlbs(void)
+{
+       unsigned long old_entryhi;
+       unsigned long old_pagemask;
+       struct kvm_mips_tlb tlb;
+       unsigned long flags;
+       int i;
+
+       local_irq_save(flags);
+
+       old_entryhi = read_c0_entryhi();
+       old_pagemask = read_c0_pagemask();
+
+       kvm_info("HOST TLBs:\n");
+       kvm_info("ASID: %#lx\n", read_c0_entryhi() & ASID_MASK);
+
+       for (i = 0; i < current_cpu_data.tlbsize; i++) {
+               write_c0_index(i);
+               mtc0_tlbw_hazard();
+
+               tlb_read();
+               tlbw_use_hazard();
+
+               tlb.tlb_hi = read_c0_entryhi();
+               tlb.tlb_lo0 = read_c0_entrylo0();
+               tlb.tlb_lo1 = read_c0_entrylo1();
+               tlb.tlb_mask = read_c0_pagemask();
+
+               kvm_info("TLB%c%3d Hi 0x%08lx ",
+                        (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
+                        i, tlb.tlb_hi);
+               kvm_info("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
+                        (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
+                        (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
+                        (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
+                        (tlb.tlb_lo0 >> 3) & 7);
+               kvm_info("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
+                        (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
+                        (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
+                        (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
+                        (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
+       }
+       write_c0_entryhi(old_entryhi);
+       write_c0_pagemask(old_pagemask);
+       mtc0_tlbw_hazard();
+       local_irq_restore(flags);
+}
+EXPORT_SYMBOL(kvm_mips_dump_host_tlbs);
+
+void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       struct kvm_mips_tlb tlb;
+       int i;
+
+       kvm_info("Guest TLBs:\n");
+       kvm_info("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0));
+
+       for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
+               tlb = vcpu->arch.guest_tlb[i];
+               kvm_info("TLB%c%3d Hi 0x%08lx ",
+                        (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
+                        i, tlb.tlb_hi);
+               kvm_info("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
+                        (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
+                        (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
+                        (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
+                        (tlb.tlb_lo0 >> 3) & 7);
+               kvm_info("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
+                        (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
+                        (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
+                        (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
+                        (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
+       }
+}
+EXPORT_SYMBOL(kvm_mips_dump_guest_tlbs);
+
+static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
+{
+       int srcu_idx, err = 0;
+       pfn_t pfn;
+
+       if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE)
+               return 0;
+
+       srcu_idx = srcu_read_lock(&kvm->srcu);
+       pfn = kvm_mips_gfn_to_pfn(kvm, gfn);
+
+       if (kvm_mips_is_error_pfn(pfn)) {
+               kvm_err("Couldn't get pfn for gfn %#" PRIx64 "!\n", gfn);
+               err = -EFAULT;
+               goto out;
+       }
+
+       kvm->arch.guest_pmap[gfn] = pfn;
+out:
+       srcu_read_unlock(&kvm->srcu, srcu_idx);
+       return err;
+}
+
+/* Translate guest KSEG0 addresses to Host PA */
+unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
+                                                   unsigned long gva)
+{
+       gfn_t gfn;
+       uint32_t offset = gva & ~PAGE_MASK;
+       struct kvm *kvm = vcpu->kvm;
+
+       if (KVM_GUEST_KSEGX(gva) != KVM_GUEST_KSEG0) {
+               kvm_err("%s/%p: Invalid gva: %#lx\n", __func__,
+                       __builtin_return_address(0), gva);
+               return KVM_INVALID_PAGE;
+       }
+
+       gfn = (KVM_GUEST_CPHYSADDR(gva) >> PAGE_SHIFT);
+
+       if (gfn >= kvm->arch.guest_pmap_npages) {
+               kvm_err("%s: Invalid gfn: %#llx, GVA: %#lx\n", __func__, gfn,
+                       gva);
+               return KVM_INVALID_PAGE;
+       }
+
+       if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
+               return KVM_INVALID_ADDR;
+
+       return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset;
+}
+EXPORT_SYMBOL(kvm_mips_translate_guest_kseg0_to_hpa);
+
+/* XXXKYMA: Must be called with interrupts disabled */
+/* set flush_dcache_mask == 0 if no dcache flush required */
+int kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi,
+                           unsigned long entrylo0, unsigned long entrylo1,
+                           int flush_dcache_mask)
+{
+       unsigned long flags;
+       unsigned long old_entryhi;
+       int idx;
+
+       local_irq_save(flags);
+
+       old_entryhi = read_c0_entryhi();
+       write_c0_entryhi(entryhi);
+       mtc0_tlbw_hazard();
+
+       tlb_probe();
+       tlb_probe_hazard();
+       idx = read_c0_index();
+
+       if (idx > current_cpu_data.tlbsize) {
+               kvm_err("%s: Invalid Index: %d\n", __func__, idx);
+               kvm_mips_dump_host_tlbs();
+               return -1;
+       }
+
+       write_c0_entrylo0(entrylo0);
+       write_c0_entrylo1(entrylo1);
+       mtc0_tlbw_hazard();
+
+       if (idx < 0)
+               tlb_write_random();
+       else
+               tlb_write_indexed();
+       tlbw_use_hazard();
+
+       kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0(R): 0x%08lx, entrylo1(R): 0x%08lx\n",
+                 vcpu->arch.pc, idx, read_c0_entryhi(),
+                 read_c0_entrylo0(), read_c0_entrylo1());
+
+       /* Flush D-cache */
+       if (flush_dcache_mask) {
+               if (entrylo0 & MIPS3_PG_V) {
+                       ++vcpu->stat.flush_dcache_exits;
+                       flush_data_cache_page((entryhi & VPN2_MASK) &
+                                             ~flush_dcache_mask);
+               }
+               if (entrylo1 & MIPS3_PG_V) {
+                       ++vcpu->stat.flush_dcache_exits;
+                       flush_data_cache_page(((entryhi & VPN2_MASK) &
+                                              ~flush_dcache_mask) |
+                                             (0x1 << PAGE_SHIFT));
+               }
+       }
+
+       /* Restore old ASID */
+       write_c0_entryhi(old_entryhi);
+       mtc0_tlbw_hazard();
+       tlbw_use_hazard();
+       local_irq_restore(flags);
+       return 0;
+}
+
+/* XXXKYMA: Must be called with interrupts disabled */
+int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
+                                   struct kvm_vcpu *vcpu)
+{
+       gfn_t gfn;
+       pfn_t pfn0, pfn1;
+       unsigned long vaddr = 0;
+       unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
+       int even;
+       struct kvm *kvm = vcpu->kvm;
+       const int flush_dcache_mask = 0;
+
+       if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) {
+               kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr);
+               kvm_mips_dump_host_tlbs();
+               return -1;
+       }
+
+       gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT);
+       if (gfn >= kvm->arch.guest_pmap_npages) {
+               kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__,
+                       gfn, badvaddr);
+               kvm_mips_dump_host_tlbs();
+               return -1;
+       }
+       even = !(gfn & 0x1);
+       vaddr = badvaddr & (PAGE_MASK << 1);
+
+       if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
+               return -1;
+
+       if (kvm_mips_map_page(vcpu->kvm, gfn ^ 0x1) < 0)
+               return -1;
+
+       if (even) {
+               pfn0 = kvm->arch.guest_pmap[gfn];
+               pfn1 = kvm->arch.guest_pmap[gfn ^ 0x1];
+       } else {
+               pfn0 = kvm->arch.guest_pmap[gfn ^ 0x1];
+               pfn1 = kvm->arch.guest_pmap[gfn];
+       }
+
+       entryhi = (vaddr | kvm_mips_get_kernel_asid(vcpu));
+       entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
+                  (1 << 2) | (0x1 << 1);
+       entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
+                  (1 << 2) | (0x1 << 1);
+
+       return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
+                                      flush_dcache_mask);
+}
+EXPORT_SYMBOL(kvm_mips_handle_kseg0_tlb_fault);
+
+int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
+       struct kvm_vcpu *vcpu)
+{
+       pfn_t pfn0, pfn1;
+       unsigned long flags, old_entryhi = 0, vaddr = 0;
+       unsigned long entrylo0 = 0, entrylo1 = 0;
+
+       pfn0 = CPHYSADDR(vcpu->arch.kseg0_commpage) >> PAGE_SHIFT;
+       pfn1 = 0;
+       entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
+                  (1 << 2) | (0x1 << 1);
+       entrylo1 = 0;
+
+       local_irq_save(flags);
+
+       old_entryhi = read_c0_entryhi();
+       vaddr = badvaddr & (PAGE_MASK << 1);
+       write_c0_entryhi(vaddr | kvm_mips_get_kernel_asid(vcpu));
+       mtc0_tlbw_hazard();
+       write_c0_entrylo0(entrylo0);
+       mtc0_tlbw_hazard();
+       write_c0_entrylo1(entrylo1);
+       mtc0_tlbw_hazard();
+       write_c0_index(kvm_mips_get_commpage_asid(vcpu));
+       mtc0_tlbw_hazard();
+       tlb_write_indexed();
+       mtc0_tlbw_hazard();
+       tlbw_use_hazard();
+
+       kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0 (R): 0x%08lx, entrylo1(R): 0x%08lx\n",
+                 vcpu->arch.pc, read_c0_index(), read_c0_entryhi(),
+                 read_c0_entrylo0(), read_c0_entrylo1());
+
+       /* Restore old ASID */
+       write_c0_entryhi(old_entryhi);
+       mtc0_tlbw_hazard();
+       tlbw_use_hazard();
+       local_irq_restore(flags);
+
+       return 0;
+}
+EXPORT_SYMBOL(kvm_mips_handle_commpage_tlb_fault);
+
+int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
+                                        struct kvm_mips_tlb *tlb,
+                                        unsigned long *hpa0,
+                                        unsigned long *hpa1)
+{
+       unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
+       struct kvm *kvm = vcpu->kvm;
+       pfn_t pfn0, pfn1;
+
+       if ((tlb->tlb_hi & VPN2_MASK) == 0) {
+               pfn0 = 0;
+               pfn1 = 0;
+       } else {
+               if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0)
+                                          >> PAGE_SHIFT) < 0)
+                       return -1;
+
+               if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1)
+                                          >> PAGE_SHIFT) < 0)
+                       return -1;
+
+               pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0)
+                                           >> PAGE_SHIFT];
+               pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1)
+                                           >> PAGE_SHIFT];
+       }
+
+       if (hpa0)
+               *hpa0 = pfn0 << PAGE_SHIFT;
+
+       if (hpa1)
+               *hpa1 = pfn1 << PAGE_SHIFT;
+
+       /* Get attributes from the Guest TLB */
+       entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ?
+                                              kvm_mips_get_kernel_asid(vcpu) :
+                                              kvm_mips_get_user_asid(vcpu));
+       entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
+                  (tlb->tlb_lo0 & MIPS3_PG_D) | (tlb->tlb_lo0 & MIPS3_PG_V);
+       entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
+                  (tlb->tlb_lo1 & MIPS3_PG_D) | (tlb->tlb_lo1 & MIPS3_PG_V);
+
+       kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
+                 tlb->tlb_lo0, tlb->tlb_lo1);
+
+       return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
+                                      tlb->tlb_mask);
+}
+EXPORT_SYMBOL(kvm_mips_handle_mapped_seg_tlb_fault);
+
+int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
+{
+       int i;
+       int index = -1;
+       struct kvm_mips_tlb *tlb = vcpu->arch.guest_tlb;
+
+       for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
+               if (TLB_HI_VPN2_HIT(tlb[i], entryhi) &&
+                   TLB_HI_ASID_HIT(tlb[i], entryhi)) {
+                       index = i;
+                       break;
+               }
+       }
+
+       kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n",
+                 __func__, entryhi, index, tlb[i].tlb_lo0, tlb[i].tlb_lo1);
+
+       return index;
+}
+EXPORT_SYMBOL(kvm_mips_guest_tlb_lookup);
+
+int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr)
+{
+       unsigned long old_entryhi, flags;
+       int idx;
+
+       local_irq_save(flags);
+
+       old_entryhi = read_c0_entryhi();
+
+       if (KVM_GUEST_KERNEL_MODE(vcpu))
+               write_c0_entryhi((vaddr & VPN2_MASK) |
+                                kvm_mips_get_kernel_asid(vcpu));
+       else {
+               write_c0_entryhi((vaddr & VPN2_MASK) |
+                                kvm_mips_get_user_asid(vcpu));
+       }
+
+       mtc0_tlbw_hazard();
+
+       tlb_probe();
+       tlb_probe_hazard();
+       idx = read_c0_index();
+
+       /* Restore old ASID */
+       write_c0_entryhi(old_entryhi);
+       mtc0_tlbw_hazard();
+       tlbw_use_hazard();
+
+       local_irq_restore(flags);
+
+       kvm_debug("Host TLB lookup, %#lx, idx: %2d\n", vaddr, idx);
+
+       return idx;
+}
+EXPORT_SYMBOL(kvm_mips_host_tlb_lookup);
+
+int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va)
+{
+       int idx;
+       unsigned long flags, old_entryhi;
+
+       local_irq_save(flags);
+
+       old_entryhi = read_c0_entryhi();
+
+       write_c0_entryhi((va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu));
+       mtc0_tlbw_hazard();
+
+       tlb_probe();
+       tlb_probe_hazard();
+       idx = read_c0_index();
+
+       if (idx >= current_cpu_data.tlbsize)
+               BUG();
+
+       if (idx > 0) {
+               write_c0_entryhi(UNIQUE_ENTRYHI(idx));
+               mtc0_tlbw_hazard();
+
+               write_c0_entrylo0(0);
+               mtc0_tlbw_hazard();
+
+               write_c0_entrylo1(0);
+               mtc0_tlbw_hazard();
+
+               tlb_write_indexed();
+               mtc0_tlbw_hazard();
+       }
+
+       write_c0_entryhi(old_entryhi);
+       mtc0_tlbw_hazard();
+       tlbw_use_hazard();
+
+       local_irq_restore(flags);
+
+       if (idx > 0)
+               kvm_debug("%s: Invalidated entryhi %#lx @ idx %d\n", __func__,
+                         (va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu), idx);
+
+       return 0;
+}
+EXPORT_SYMBOL(kvm_mips_host_tlb_inv);
+
+/* XXXKYMA: Fix Guest USER/KERNEL no longer share the same ASID */
+int kvm_mips_host_tlb_inv_index(struct kvm_vcpu *vcpu, int index)
+{
+       unsigned long flags, old_entryhi;
+
+       if (index >= current_cpu_data.tlbsize)
+               BUG();
+
+       local_irq_save(flags);
+
+       old_entryhi = read_c0_entryhi();
+
+       write_c0_entryhi(UNIQUE_ENTRYHI(index));
+       mtc0_tlbw_hazard();
+
+       write_c0_index(index);
+       mtc0_tlbw_hazard();
+
+       write_c0_entrylo0(0);
+       mtc0_tlbw_hazard();
+
+       write_c0_entrylo1(0);
+       mtc0_tlbw_hazard();
+
+       tlb_write_indexed();
+       mtc0_tlbw_hazard();
+       tlbw_use_hazard();
+
+       write_c0_entryhi(old_entryhi);
+       mtc0_tlbw_hazard();
+       tlbw_use_hazard();
+
+       local_irq_restore(flags);
+
+       return 0;
+}
+
+void kvm_mips_flush_host_tlb(int skip_kseg0)
+{
+       unsigned long flags;
+       unsigned long old_entryhi, entryhi;
+       unsigned long old_pagemask;
+       int entry = 0;
+       int maxentry = current_cpu_data.tlbsize;
+
+       local_irq_save(flags);
+
+       old_entryhi = read_c0_entryhi();
+       old_pagemask = read_c0_pagemask();
+
+       /* Blast 'em all away. */
+       for (entry = 0; entry < maxentry; entry++) {
+               write_c0_index(entry);
+               mtc0_tlbw_hazard();
+
+               if (skip_kseg0) {
+                       tlb_read();
+                       tlbw_use_hazard();
+
+                       entryhi = read_c0_entryhi();
+
+                       /* Don't blow away guest kernel entries */
+                       if (KVM_GUEST_KSEGX(entryhi) == KVM_GUEST_KSEG0)
+                               continue;
+               }
+
+               /* Make sure all entries differ. */
+               write_c0_entryhi(UNIQUE_ENTRYHI(entry));
+               mtc0_tlbw_hazard();
+               write_c0_entrylo0(0);
+               mtc0_tlbw_hazard();
+               write_c0_entrylo1(0);
+               mtc0_tlbw_hazard();
+
+               tlb_write_indexed();
+               mtc0_tlbw_hazard();
+       }
+
+       tlbw_use_hazard();
+
+       write_c0_entryhi(old_entryhi);
+       write_c0_pagemask(old_pagemask);
+       mtc0_tlbw_hazard();
+       tlbw_use_hazard();
+
+       local_irq_restore(flags);
+}
+EXPORT_SYMBOL(kvm_mips_flush_host_tlb);
+
+void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
+                            struct kvm_vcpu *vcpu)
+{
+       unsigned long asid = asid_cache(cpu);
+
+       asid += ASID_INC;
+       if (!(asid & ASID_MASK)) {
+               if (cpu_has_vtag_icache)
+                       flush_icache_all();
+
+               kvm_local_flush_tlb_all();      /* start new asid cycle */
+
+               if (!asid)      /* fix version if needed */
+                       asid = ASID_FIRST_VERSION;
+       }
+
+       cpu_context(cpu, mm) = asid_cache(cpu) = asid;
+}
+
+void kvm_local_flush_tlb_all(void)
+{
+       unsigned long flags;
+       unsigned long old_ctx;
+       int entry = 0;
+
+       local_irq_save(flags);
+       /* Save old context and create impossible VPN2 value */
+       old_ctx = read_c0_entryhi();
+       write_c0_entrylo0(0);
+       write_c0_entrylo1(0);
+
+       /* Blast 'em all away. */
+       while (entry < current_cpu_data.tlbsize) {
+               /* Make sure all entries differ. */
+               write_c0_entryhi(UNIQUE_ENTRYHI(entry));
+               write_c0_index(entry);
+               mtc0_tlbw_hazard();
+               tlb_write_indexed();
+               entry++;
+       }
+       tlbw_use_hazard();
+       write_c0_entryhi(old_ctx);
+       mtc0_tlbw_hazard();
+
+       local_irq_restore(flags);
+}
+EXPORT_SYMBOL(kvm_local_flush_tlb_all);
+
+/**
+ * kvm_mips_migrate_count() - Migrate timer.
+ * @vcpu:      Virtual CPU.
+ *
+ * Migrate CP0_Count hrtimer to the current CPU by cancelling and restarting it
+ * if it was running prior to being cancelled.
+ *
+ * Must be called when the VCPU is migrated to a different CPU to ensure that
+ * timer expiry during guest execution interrupts the guest and causes the
+ * interrupt to be delivered in a timely manner.
+ */
+static void kvm_mips_migrate_count(struct kvm_vcpu *vcpu)
+{
+       if (hrtimer_cancel(&vcpu->arch.comparecount_timer))
+               hrtimer_restart(&vcpu->arch.comparecount_timer);
+}
+
+/* Restore ASID once we are scheduled back after preemption */
+void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+{
+       unsigned long flags;
+       int newasid = 0;
+
+       kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu);
+
+       /* Alocate new kernel and user ASIDs if needed */
+
+       local_irq_save(flags);
+
+       if (((vcpu->arch.
+             guest_kernel_asid[cpu] ^ asid_cache(cpu)) & ASID_VERSION_MASK)) {
+               kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu);
+               vcpu->arch.guest_kernel_asid[cpu] =
+                   vcpu->arch.guest_kernel_mm.context.asid[cpu];
+               kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu);
+               vcpu->arch.guest_user_asid[cpu] =
+                   vcpu->arch.guest_user_mm.context.asid[cpu];
+               newasid++;
+
+               kvm_debug("[%d]: cpu_context: %#lx\n", cpu,
+                         cpu_context(cpu, current->mm));
+               kvm_debug("[%d]: Allocated new ASID for Guest Kernel: %#x\n",
+                         cpu, vcpu->arch.guest_kernel_asid[cpu]);
+               kvm_debug("[%d]: Allocated new ASID for Guest User: %#x\n", cpu,
+                         vcpu->arch.guest_user_asid[cpu]);
+       }
+
+       if (vcpu->arch.last_sched_cpu != cpu) {
+               kvm_debug("[%d->%d]KVM VCPU[%d] switch\n",
+                         vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
+               /*
+                * Migrate the timer interrupt to the current CPU so that it
+                * always interrupts the guest and synchronously triggers a
+                * guest timer interrupt.
+                */
+               kvm_mips_migrate_count(vcpu);
+       }
+
+       if (!newasid) {
+               /*
+                * If we preempted while the guest was executing, then reload
+                * the pre-empted ASID
+                */
+               if (current->flags & PF_VCPU) {
+                       write_c0_entryhi(vcpu->arch.
+                                        preempt_entryhi & ASID_MASK);
+                       ehb();
+               }
+       } else {
+               /* New ASIDs were allocated for the VM */
+
+               /*
+                * Were we in guest context? If so then the pre-empted ASID is
+                * no longer valid, we need to set it to what it should be based
+                * on the mode of the Guest (Kernel/User)
+                */
+               if (current->flags & PF_VCPU) {
+                       if (KVM_GUEST_KERNEL_MODE(vcpu))
+                               write_c0_entryhi(vcpu->arch.
+                                                guest_kernel_asid[cpu] &
+                                                ASID_MASK);
+                       else
+                               write_c0_entryhi(vcpu->arch.
+                                                guest_user_asid[cpu] &
+                                                ASID_MASK);
+                       ehb();
+               }
+       }
+
+       local_irq_restore(flags);
+
+}
+EXPORT_SYMBOL(kvm_arch_vcpu_load);
+
+/* ASID can change if another task is scheduled during preemption */
+void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
+{
+       unsigned long flags;
+       uint32_t cpu;
+
+       local_irq_save(flags);
+
+       cpu = smp_processor_id();
+
+       vcpu->arch.preempt_entryhi = read_c0_entryhi();
+       vcpu->arch.last_sched_cpu = cpu;
+
+       if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
+            ASID_VERSION_MASK)) {
+               kvm_debug("%s: Dropping MMU Context:  %#lx\n", __func__,
+                         cpu_context(cpu, current->mm));
+               drop_mmu_context(current->mm, cpu);
+       }
+       write_c0_entryhi(cpu_asid(cpu, current->mm));
+       ehb();
+
+       local_irq_restore(flags);
+}
+EXPORT_SYMBOL(kvm_arch_vcpu_put);
+
+uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       unsigned long paddr, flags, vpn2, asid;
+       uint32_t inst;
+       int index;
+
+       if (KVM_GUEST_KSEGX((unsigned long) opc) < KVM_GUEST_KSEG0 ||
+           KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
+               local_irq_save(flags);
+               index = kvm_mips_host_tlb_lookup(vcpu, (unsigned long) opc);
+               if (index >= 0) {
+                       inst = *(opc);
+               } else {
+                       vpn2 = (unsigned long) opc & VPN2_MASK;
+                       asid = kvm_read_c0_guest_entryhi(cop0) & ASID_MASK;
+                       index = kvm_mips_guest_tlb_lookup(vcpu, vpn2 | asid);
+                       if (index < 0) {
+                               kvm_err("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n",
+                                       __func__, opc, vcpu, read_c0_entryhi());
+                               kvm_mips_dump_host_tlbs();
+                               local_irq_restore(flags);
+                               return KVM_INVALID_INST;
+                       }
+                       kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
+                                                            &vcpu->arch.
+                                                            guest_tlb[index],
+                                                            NULL, NULL);
+                       inst = *(opc);
+               }
+               local_irq_restore(flags);
+       } else if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
+               paddr =
+                   kvm_mips_translate_guest_kseg0_to_hpa(vcpu,
+                                                         (unsigned long) opc);
+               inst = *(uint32_t *) CKSEG0ADDR(paddr);
+       } else {
+               kvm_err("%s: illegal address: %p\n", __func__, opc);
+               return KVM_INVALID_INST;
+       }
+
+       return inst;
+}
+EXPORT_SYMBOL(kvm_get_inst);
diff --git a/arch/mips/kvm/trap_emul.c b/arch/mips/kvm/trap_emul.c
new file mode 100644 (file)
index 0000000..fd7257b
--- /dev/null
@@ -0,0 +1,492 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * KVM/MIPS: Deliver/Emulate exceptions to the guest kernel
+ *
+ * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+ */
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+
+#include <linux/kvm_host.h>
+
+#include "opcode.h"
+#include "interrupt.h"
+
+static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva)
+{
+       gpa_t gpa;
+       uint32_t kseg = KSEGX(gva);
+
+       if ((kseg == CKSEG0) || (kseg == CKSEG1))
+               gpa = CPHYSADDR(gva);
+       else {
+               kvm_err("%s: cannot find GPA for GVA: %#lx\n", __func__, gva);
+               kvm_mips_dump_host_tlbs();
+               gpa = KVM_INVALID_ADDR;
+       }
+
+       kvm_debug("%s: gva %#lx, gpa: %#llx\n", __func__, gva, gpa);
+
+       return gpa;
+}
+
+static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
+{
+       struct kvm_run *run = vcpu->run;
+       uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+       unsigned long cause = vcpu->arch.host_cp0_cause;
+       enum emulation_result er = EMULATE_DONE;
+       int ret = RESUME_GUEST;
+
+       if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1)
+               er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu);
+       else
+               er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
+
+       switch (er) {
+       case EMULATE_DONE:
+               ret = RESUME_GUEST;
+               break;
+
+       case EMULATE_FAIL:
+               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               ret = RESUME_HOST;
+               break;
+
+       case EMULATE_WAIT:
+               run->exit_reason = KVM_EXIT_INTR;
+               ret = RESUME_HOST;
+               break;
+
+       default:
+               BUG();
+       }
+       return ret;
+}
+
+static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu)
+{
+       struct kvm_run *run = vcpu->run;
+       uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+       unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
+       unsigned long cause = vcpu->arch.host_cp0_cause;
+       enum emulation_result er = EMULATE_DONE;
+       int ret = RESUME_GUEST;
+
+       if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
+           || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
+               kvm_debug("USER/KSEG23 ADDR TLB MOD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n",
+                         cause, opc, badvaddr);
+               er = kvm_mips_handle_tlbmod(cause, opc, run, vcpu);
+
+               if (er == EMULATE_DONE)
+                       ret = RESUME_GUEST;
+               else {
+                       run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+                       ret = RESUME_HOST;
+               }
+       } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
+               /*
+                * XXXKYMA: The guest kernel does not expect to get this fault
+                * when we are not using HIGHMEM. Need to address this in a
+                * HIGHMEM kernel
+                */
+               kvm_err("TLB MOD fault not handled, cause %#lx, PC: %p, BadVaddr: %#lx\n",
+                       cause, opc, badvaddr);
+               kvm_mips_dump_host_tlbs();
+               kvm_arch_vcpu_dump_regs(vcpu);
+               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               ret = RESUME_HOST;
+       } else {
+               kvm_err("Illegal TLB Mod fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
+                       cause, opc, badvaddr);
+               kvm_mips_dump_host_tlbs();
+               kvm_arch_vcpu_dump_regs(vcpu);
+               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               ret = RESUME_HOST;
+       }
+       return ret;
+}
+
+static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
+{
+       struct kvm_run *run = vcpu->run;
+       uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+       unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
+       unsigned long cause = vcpu->arch.host_cp0_cause;
+       enum emulation_result er = EMULATE_DONE;
+       int ret = RESUME_GUEST;
+
+       if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR)
+           && KVM_GUEST_KERNEL_MODE(vcpu)) {
+               if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) {
+                       run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+                       ret = RESUME_HOST;
+               }
+       } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
+                  || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
+               kvm_debug("USER ADDR TLB LD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n",
+                         cause, opc, badvaddr);
+               er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu);
+               if (er == EMULATE_DONE)
+                       ret = RESUME_GUEST;
+               else {
+                       run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+                       ret = RESUME_HOST;
+               }
+       } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
+               /*
+                * All KSEG0 faults are handled by KVM, as the guest kernel does
+                * not expect to ever get them
+                */
+               if (kvm_mips_handle_kseg0_tlb_fault
+                   (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) {
+                       run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+                       ret = RESUME_HOST;
+               }
+       } else {
+               kvm_err("Illegal TLB LD fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
+                       cause, opc, badvaddr);
+               kvm_mips_dump_host_tlbs();
+               kvm_arch_vcpu_dump_regs(vcpu);
+               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               ret = RESUME_HOST;
+       }
+       return ret;
+}
+
+static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
+{
+       struct kvm_run *run = vcpu->run;
+       uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+       unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
+       unsigned long cause = vcpu->arch.host_cp0_cause;
+       enum emulation_result er = EMULATE_DONE;
+       int ret = RESUME_GUEST;
+
+       if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR)
+           && KVM_GUEST_KERNEL_MODE(vcpu)) {
+               if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) {
+                       run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+                       ret = RESUME_HOST;
+               }
+       } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
+                  || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
+               kvm_debug("USER ADDR TLB ST fault: PC: %#lx, BadVaddr: %#lx\n",
+                         vcpu->arch.pc, badvaddr);
+
+               /*
+                * User Address (UA) fault, this could happen if
+                * (1) TLB entry not present/valid in both Guest and shadow host
+                *     TLBs, in this case we pass on the fault to the guest
+                *     kernel and let it handle it.
+                * (2) TLB entry is present in the Guest TLB but not in the
+                *     shadow, in this case we inject the TLB from the Guest TLB
+                *     into the shadow host TLB
+                */
+
+               er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu);
+               if (er == EMULATE_DONE)
+                       ret = RESUME_GUEST;
+               else {
+                       run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+                       ret = RESUME_HOST;
+               }
+       } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
+               if (kvm_mips_handle_kseg0_tlb_fault
+                   (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) {
+                       run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+                       ret = RESUME_HOST;
+               }
+       } else {
+               kvm_err("Illegal TLB ST fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
+                       cause, opc, badvaddr);
+               kvm_mips_dump_host_tlbs();
+               kvm_arch_vcpu_dump_regs(vcpu);
+               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               ret = RESUME_HOST;
+       }
+       return ret;
+}
+
+static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu)
+{
+       struct kvm_run *run = vcpu->run;
+       uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+       unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
+       unsigned long cause = vcpu->arch.host_cp0_cause;
+       enum emulation_result er = EMULATE_DONE;
+       int ret = RESUME_GUEST;
+
+       if (KVM_GUEST_KERNEL_MODE(vcpu)
+           && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) {
+               kvm_debug("Emulate Store to MMIO space\n");
+               er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
+               if (er == EMULATE_FAIL) {
+                       kvm_err("Emulate Store to MMIO space failed\n");
+                       run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+                       ret = RESUME_HOST;
+               } else {
+                       run->exit_reason = KVM_EXIT_MMIO;
+                       ret = RESUME_HOST;
+               }
+       } else {
+               kvm_err("Address Error (STORE): cause %#lx, PC: %p, BadVaddr: %#lx\n",
+                       cause, opc, badvaddr);
+               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               ret = RESUME_HOST;
+       }
+       return ret;
+}
+
+static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu)
+{
+       struct kvm_run *run = vcpu->run;
+       uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+       unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
+       unsigned long cause = vcpu->arch.host_cp0_cause;
+       enum emulation_result er = EMULATE_DONE;
+       int ret = RESUME_GUEST;
+
+       if (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1) {
+               kvm_debug("Emulate Load from MMIO space @ %#lx\n", badvaddr);
+               er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
+               if (er == EMULATE_FAIL) {
+                       kvm_err("Emulate Load from MMIO space failed\n");
+                       run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+                       ret = RESUME_HOST;
+               } else {
+                       run->exit_reason = KVM_EXIT_MMIO;
+                       ret = RESUME_HOST;
+               }
+       } else {
+               kvm_err("Address Error (LOAD): cause %#lx, PC: %p, BadVaddr: %#lx\n",
+                       cause, opc, badvaddr);
+               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               ret = RESUME_HOST;
+               er = EMULATE_FAIL;
+       }
+       return ret;
+}
+
+static int kvm_trap_emul_handle_syscall(struct kvm_vcpu *vcpu)
+{
+       struct kvm_run *run = vcpu->run;
+       uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+       unsigned long cause = vcpu->arch.host_cp0_cause;
+       enum emulation_result er = EMULATE_DONE;
+       int ret = RESUME_GUEST;
+
+       er = kvm_mips_emulate_syscall(cause, opc, run, vcpu);
+       if (er == EMULATE_DONE)
+               ret = RESUME_GUEST;
+       else {
+               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               ret = RESUME_HOST;
+       }
+       return ret;
+}
+
+static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu *vcpu)
+{
+       struct kvm_run *run = vcpu->run;
+       uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+       unsigned long cause = vcpu->arch.host_cp0_cause;
+       enum emulation_result er = EMULATE_DONE;
+       int ret = RESUME_GUEST;
+
+       er = kvm_mips_handle_ri(cause, opc, run, vcpu);
+       if (er == EMULATE_DONE)
+               ret = RESUME_GUEST;
+       else {
+               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               ret = RESUME_HOST;
+       }
+       return ret;
+}
+
+static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu)
+{
+       struct kvm_run *run = vcpu->run;
+       uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+       unsigned long cause = vcpu->arch.host_cp0_cause;
+       enum emulation_result er = EMULATE_DONE;
+       int ret = RESUME_GUEST;
+
+       er = kvm_mips_emulate_bp_exc(cause, opc, run, vcpu);
+       if (er == EMULATE_DONE)
+               ret = RESUME_GUEST;
+       else {
+               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               ret = RESUME_HOST;
+       }
+       return ret;
+}
+
+static int kvm_trap_emul_vm_init(struct kvm *kvm)
+{
+       return 0;
+}
+
+static int kvm_trap_emul_vcpu_init(struct kvm_vcpu *vcpu)
+{
+       return 0;
+}
+
+static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       uint32_t config1;
+       int vcpu_id = vcpu->vcpu_id;
+
+       /*
+        * Arch specific stuff, set up config registers properly so that the
+        * guest will come up as expected, for now we simulate a MIPS 24kc
+        */
+       kvm_write_c0_guest_prid(cop0, 0x00019300);
+       kvm_write_c0_guest_config(cop0,
+                                 MIPS_CONFIG0 | (0x1 << CP0C0_AR) |
+                                 (MMU_TYPE_R4000 << CP0C0_MT));
+
+       /* Read the cache characteristics from the host Config1 Register */
+       config1 = (read_c0_config1() & ~0x7f);
+
+       /* Set up MMU size */
+       config1 &= ~(0x3f << 25);
+       config1 |= ((KVM_MIPS_GUEST_TLB_SIZE - 1) << 25);
+
+       /* We unset some bits that we aren't emulating */
+       config1 &=
+           ~((1 << CP0C1_C2) | (1 << CP0C1_MD) | (1 << CP0C1_PC) |
+             (1 << CP0C1_WR) | (1 << CP0C1_CA));
+       kvm_write_c0_guest_config1(cop0, config1);
+
+       kvm_write_c0_guest_config2(cop0, MIPS_CONFIG2);
+       /* MIPS_CONFIG2 | (read_c0_config2() & 0xfff) */
+       kvm_write_c0_guest_config3(cop0, MIPS_CONFIG3 | (0 << CP0C3_VInt) |
+                                        (1 << CP0C3_ULRI));
+
+       /* Set Wait IE/IXMT Ignore in Config7, IAR, AR */
+       kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10));
+
+       /*
+        * Setup IntCtl defaults, compatibilty mode for timer interrupts (HW5)
+        */
+       kvm_write_c0_guest_intctl(cop0, 0xFC000000);
+
+       /* Put in vcpu id as CPUNum into Ebase Reg to handle SMP Guests */
+       kvm_write_c0_guest_ebase(cop0, KVM_GUEST_KSEG0 | (vcpu_id & 0xFF));
+
+       return 0;
+}
+
+static int kvm_trap_emul_get_one_reg(struct kvm_vcpu *vcpu,
+                                    const struct kvm_one_reg *reg,
+                                    s64 *v)
+{
+       switch (reg->id) {
+       case KVM_REG_MIPS_CP0_COUNT:
+               *v = kvm_mips_read_count(vcpu);
+               break;
+       case KVM_REG_MIPS_COUNT_CTL:
+               *v = vcpu->arch.count_ctl;
+               break;
+       case KVM_REG_MIPS_COUNT_RESUME:
+               *v = ktime_to_ns(vcpu->arch.count_resume);
+               break;
+       case KVM_REG_MIPS_COUNT_HZ:
+               *v = vcpu->arch.count_hz;
+               break;
+       default:
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu,
+                                    const struct kvm_one_reg *reg,
+                                    s64 v)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       int ret = 0;
+
+       switch (reg->id) {
+       case KVM_REG_MIPS_CP0_COUNT:
+               kvm_mips_write_count(vcpu, v);
+               break;
+       case KVM_REG_MIPS_CP0_COMPARE:
+               kvm_mips_write_compare(vcpu, v);
+               break;
+       case KVM_REG_MIPS_CP0_CAUSE:
+               /*
+                * If the timer is stopped or started (DC bit) it must look
+                * atomic with changes to the interrupt pending bits (TI, IRQ5).
+                * A timer interrupt should not happen in between.
+                */
+               if ((kvm_read_c0_guest_cause(cop0) ^ v) & CAUSEF_DC) {
+                       if (v & CAUSEF_DC) {
+                               /* disable timer first */
+                               kvm_mips_count_disable_cause(vcpu);
+                               kvm_change_c0_guest_cause(cop0, ~CAUSEF_DC, v);
+                       } else {
+                               /* enable timer last */
+                               kvm_change_c0_guest_cause(cop0, ~CAUSEF_DC, v);
+                               kvm_mips_count_enable_cause(vcpu);
+                       }
+               } else {
+                       kvm_write_c0_guest_cause(cop0, v);
+               }
+               break;
+       case KVM_REG_MIPS_COUNT_CTL:
+               ret = kvm_mips_set_count_ctl(vcpu, v);
+               break;
+       case KVM_REG_MIPS_COUNT_RESUME:
+               ret = kvm_mips_set_count_resume(vcpu, v);
+               break;
+       case KVM_REG_MIPS_COUNT_HZ:
+               ret = kvm_mips_set_count_hz(vcpu, v);
+               break;
+       default:
+               return -EINVAL;
+       }
+       return ret;
+}
+
+static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
+       /* exit handlers */
+       .handle_cop_unusable = kvm_trap_emul_handle_cop_unusable,
+       .handle_tlb_mod = kvm_trap_emul_handle_tlb_mod,
+       .handle_tlb_st_miss = kvm_trap_emul_handle_tlb_st_miss,
+       .handle_tlb_ld_miss = kvm_trap_emul_handle_tlb_ld_miss,
+       .handle_addr_err_st = kvm_trap_emul_handle_addr_err_st,
+       .handle_addr_err_ld = kvm_trap_emul_handle_addr_err_ld,
+       .handle_syscall = kvm_trap_emul_handle_syscall,
+       .handle_res_inst = kvm_trap_emul_handle_res_inst,
+       .handle_break = kvm_trap_emul_handle_break,
+
+       .vm_init = kvm_trap_emul_vm_init,
+       .vcpu_init = kvm_trap_emul_vcpu_init,
+       .vcpu_setup = kvm_trap_emul_vcpu_setup,
+       .gva_to_gpa = kvm_trap_emul_gva_to_gpa_cb,
+       .queue_timer_int = kvm_mips_queue_timer_int_cb,
+       .dequeue_timer_int = kvm_mips_dequeue_timer_int_cb,
+       .queue_io_int = kvm_mips_queue_io_int_cb,
+       .dequeue_io_int = kvm_mips_dequeue_io_int_cb,
+       .irq_deliver = kvm_mips_irq_deliver_cb,
+       .irq_clear = kvm_mips_irq_clear_cb,
+       .get_one_reg = kvm_trap_emul_get_one_reg,
+       .set_one_reg = kvm_trap_emul_set_one_reg,
+};
+
+int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks)
+{
+       *install_callbacks = &kvm_trap_emul_callbacks;
+       return 0;
+}