2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * KVM/MIPS: Deliver/Emulate exceptions to the guest kernel
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
12 #include <linux/errno.h>
13 #include <linux/err.h>
14 #include <linux/kvm_host.h>
15 #include <linux/uaccess.h>
16 #include <linux/vmalloc.h>
17 #include <asm/mmu_context.h>
18 #include <asm/pgalloc.h>
20 #include "interrupt.h"
22 static gpa_t
kvm_trap_emul_gva_to_gpa_cb(gva_t gva
)
25 gva_t kseg
= KSEGX(gva
);
26 gva_t gkseg
= KVM_GUEST_KSEGX(gva
);
28 if ((kseg
== CKSEG0
) || (kseg
== CKSEG1
))
30 else if (gkseg
== KVM_GUEST_KSEG0
)
31 gpa
= KVM_GUEST_CPHYSADDR(gva
);
33 kvm_err("%s: cannot find GPA for GVA: %#lx\n", __func__
, gva
);
34 kvm_mips_dump_host_tlbs();
35 gpa
= KVM_INVALID_ADDR
;
38 kvm_debug("%s: gva %#lx, gpa: %#llx\n", __func__
, gva
, gpa
);
43 static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu
*vcpu
)
45 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
46 struct kvm_run
*run
= vcpu
->run
;
47 u32 __user
*opc
= (u32 __user
*) vcpu
->arch
.pc
;
48 u32 cause
= vcpu
->arch
.host_cp0_cause
;
49 enum emulation_result er
= EMULATE_DONE
;
50 int ret
= RESUME_GUEST
;
52 if (((cause
& CAUSEF_CE
) >> CAUSEB_CE
) == 1) {
54 if (!kvm_mips_guest_has_fpu(&vcpu
->arch
) ||
55 (kvm_read_c0_guest_status(cop0
) & ST0_CU1
) == 0) {
57 * Unusable/no FPU in guest:
58 * deliver guest COP1 Unusable Exception
60 er
= kvm_mips_emulate_fpu_exc(cause
, opc
, run
, vcpu
);
62 /* Restore FPU state */
67 er
= kvm_mips_emulate_inst(cause
, opc
, run
, vcpu
);
76 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
81 run
->exit_reason
= KVM_EXIT_INTR
;
85 case EMULATE_HYPERCALL
:
86 ret
= kvm_mips_handle_hypcall(vcpu
);
95 static int kvm_mips_bad_load(u32 cause
, u32
*opc
, struct kvm_run
*run
,
96 struct kvm_vcpu
*vcpu
)
98 enum emulation_result er
;
99 union mips_instruction inst
;
102 /* A code fetch fault doesn't count as an MMIO */
103 if (kvm_is_ifetch_fault(&vcpu
->arch
)) {
104 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
108 /* Fetch the instruction. */
109 if (cause
& CAUSEF_BD
)
111 err
= kvm_get_badinstr(opc
, vcpu
, &inst
.word
);
113 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
117 /* Emulate the load */
118 er
= kvm_mips_emulate_load(inst
, cause
, run
, vcpu
);
119 if (er
== EMULATE_FAIL
) {
120 kvm_err("Emulate load from MMIO space failed\n");
121 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
123 run
->exit_reason
= KVM_EXIT_MMIO
;
128 static int kvm_mips_bad_store(u32 cause
, u32
*opc
, struct kvm_run
*run
,
129 struct kvm_vcpu
*vcpu
)
131 enum emulation_result er
;
132 union mips_instruction inst
;
135 /* Fetch the instruction. */
136 if (cause
& CAUSEF_BD
)
138 err
= kvm_get_badinstr(opc
, vcpu
, &inst
.word
);
140 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
144 /* Emulate the store */
145 er
= kvm_mips_emulate_store(inst
, cause
, run
, vcpu
);
146 if (er
== EMULATE_FAIL
) {
147 kvm_err("Emulate store to MMIO space failed\n");
148 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
150 run
->exit_reason
= KVM_EXIT_MMIO
;
155 static int kvm_mips_bad_access(u32 cause
, u32
*opc
, struct kvm_run
*run
,
156 struct kvm_vcpu
*vcpu
, bool store
)
159 return kvm_mips_bad_store(cause
, opc
, run
, vcpu
);
161 return kvm_mips_bad_load(cause
, opc
, run
, vcpu
);
164 static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu
*vcpu
)
166 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
167 struct kvm_run
*run
= vcpu
->run
;
168 u32 __user
*opc
= (u32 __user
*) vcpu
->arch
.pc
;
169 unsigned long badvaddr
= vcpu
->arch
.host_cp0_badvaddr
;
170 u32 cause
= vcpu
->arch
.host_cp0_cause
;
171 struct kvm_mips_tlb
*tlb
;
172 unsigned long entryhi
;
175 if (KVM_GUEST_KSEGX(badvaddr
) < KVM_GUEST_KSEG0
176 || KVM_GUEST_KSEGX(badvaddr
) == KVM_GUEST_KSEG23
) {
178 * First find the mapping in the guest TLB. If the failure to
179 * write was due to the guest TLB, it should be up to the guest
182 entryhi
= (badvaddr
& VPN2_MASK
) |
183 (kvm_read_c0_guest_entryhi(cop0
) & KVM_ENTRYHI_ASID
);
184 index
= kvm_mips_guest_tlb_lookup(vcpu
, entryhi
);
187 * These should never happen.
188 * They would indicate stale host TLB entries.
190 if (unlikely(index
< 0)) {
191 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
194 tlb
= vcpu
->arch
.guest_tlb
+ index
;
195 if (unlikely(!TLB_IS_VALID(*tlb
, badvaddr
))) {
196 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
201 * Guest entry not dirty? That would explain the TLB modified
202 * exception. Relay that on to the guest so it can handle it.
204 if (!TLB_IS_DIRTY(*tlb
, badvaddr
)) {
205 kvm_mips_emulate_tlbmod(cause
, opc
, run
, vcpu
);
209 if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu
, tlb
, badvaddr
,
211 /* Not writable, needs handling as MMIO */
212 return kvm_mips_bad_store(cause
, opc
, run
, vcpu
);
214 } else if (KVM_GUEST_KSEGX(badvaddr
) == KVM_GUEST_KSEG0
) {
215 if (kvm_mips_handle_kseg0_tlb_fault(badvaddr
, vcpu
, true) < 0)
216 /* Not writable, needs handling as MMIO */
217 return kvm_mips_bad_store(cause
, opc
, run
, vcpu
);
220 /* host kernel addresses are all handled as MMIO */
221 return kvm_mips_bad_store(cause
, opc
, run
, vcpu
);
225 static int kvm_trap_emul_handle_tlb_miss(struct kvm_vcpu
*vcpu
, bool store
)
227 struct kvm_run
*run
= vcpu
->run
;
228 u32 __user
*opc
= (u32 __user
*) vcpu
->arch
.pc
;
229 unsigned long badvaddr
= vcpu
->arch
.host_cp0_badvaddr
;
230 u32 cause
= vcpu
->arch
.host_cp0_cause
;
231 enum emulation_result er
= EMULATE_DONE
;
232 int ret
= RESUME_GUEST
;
234 if (((badvaddr
& PAGE_MASK
) == KVM_GUEST_COMMPAGE_ADDR
)
235 && KVM_GUEST_KERNEL_MODE(vcpu
)) {
236 if (kvm_mips_handle_commpage_tlb_fault(badvaddr
, vcpu
) < 0) {
237 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
240 } else if (KVM_GUEST_KSEGX(badvaddr
) < KVM_GUEST_KSEG0
241 || KVM_GUEST_KSEGX(badvaddr
) == KVM_GUEST_KSEG23
) {
242 kvm_debug("USER ADDR TLB %s fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
243 store
? "ST" : "LD", cause
, opc
, badvaddr
);
246 * User Address (UA) fault, this could happen if
247 * (1) TLB entry not present/valid in both Guest and shadow host
248 * TLBs, in this case we pass on the fault to the guest
249 * kernel and let it handle it.
250 * (2) TLB entry is present in the Guest TLB but not in the
251 * shadow, in this case we inject the TLB from the Guest TLB
252 * into the shadow host TLB
255 er
= kvm_mips_handle_tlbmiss(cause
, opc
, run
, vcpu
, store
);
256 if (er
== EMULATE_DONE
)
259 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
262 } else if (KVM_GUEST_KSEGX(badvaddr
) == KVM_GUEST_KSEG0
) {
264 * All KSEG0 faults are handled by KVM, as the guest kernel does
265 * not expect to ever get them
267 if (kvm_mips_handle_kseg0_tlb_fault(badvaddr
, vcpu
, store
) < 0)
268 ret
= kvm_mips_bad_access(cause
, opc
, run
, vcpu
, store
);
269 } else if (KVM_GUEST_KERNEL_MODE(vcpu
)
270 && (KSEGX(badvaddr
) == CKSEG0
|| KSEGX(badvaddr
) == CKSEG1
)) {
272 * With EVA we may get a TLB exception instead of an address
273 * error when the guest performs MMIO to KSeg1 addresses.
275 ret
= kvm_mips_bad_access(cause
, opc
, run
, vcpu
, store
);
277 kvm_err("Illegal TLB %s fault address , cause %#x, PC: %p, BadVaddr: %#lx\n",
278 store
? "ST" : "LD", cause
, opc
, badvaddr
);
279 kvm_mips_dump_host_tlbs();
280 kvm_arch_vcpu_dump_regs(vcpu
);
281 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
287 static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu
*vcpu
)
289 return kvm_trap_emul_handle_tlb_miss(vcpu
, true);
292 static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu
*vcpu
)
294 return kvm_trap_emul_handle_tlb_miss(vcpu
, false);
297 static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu
*vcpu
)
299 struct kvm_run
*run
= vcpu
->run
;
300 u32 __user
*opc
= (u32 __user
*) vcpu
->arch
.pc
;
301 unsigned long badvaddr
= vcpu
->arch
.host_cp0_badvaddr
;
302 u32 cause
= vcpu
->arch
.host_cp0_cause
;
303 int ret
= RESUME_GUEST
;
305 if (KVM_GUEST_KERNEL_MODE(vcpu
)
306 && (KSEGX(badvaddr
) == CKSEG0
|| KSEGX(badvaddr
) == CKSEG1
)) {
307 ret
= kvm_mips_bad_store(cause
, opc
, run
, vcpu
);
309 kvm_err("Address Error (STORE): cause %#x, PC: %p, BadVaddr: %#lx\n",
310 cause
, opc
, badvaddr
);
311 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
317 static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu
*vcpu
)
319 struct kvm_run
*run
= vcpu
->run
;
320 u32 __user
*opc
= (u32 __user
*) vcpu
->arch
.pc
;
321 unsigned long badvaddr
= vcpu
->arch
.host_cp0_badvaddr
;
322 u32 cause
= vcpu
->arch
.host_cp0_cause
;
323 int ret
= RESUME_GUEST
;
325 if (KSEGX(badvaddr
) == CKSEG0
|| KSEGX(badvaddr
) == CKSEG1
) {
326 ret
= kvm_mips_bad_load(cause
, opc
, run
, vcpu
);
328 kvm_err("Address Error (LOAD): cause %#x, PC: %p, BadVaddr: %#lx\n",
329 cause
, opc
, badvaddr
);
330 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
336 static int kvm_trap_emul_handle_syscall(struct kvm_vcpu
*vcpu
)
338 struct kvm_run
*run
= vcpu
->run
;
339 u32 __user
*opc
= (u32 __user
*) vcpu
->arch
.pc
;
340 u32 cause
= vcpu
->arch
.host_cp0_cause
;
341 enum emulation_result er
= EMULATE_DONE
;
342 int ret
= RESUME_GUEST
;
344 er
= kvm_mips_emulate_syscall(cause
, opc
, run
, vcpu
);
345 if (er
== EMULATE_DONE
)
348 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
354 static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu
*vcpu
)
356 struct kvm_run
*run
= vcpu
->run
;
357 u32 __user
*opc
= (u32 __user
*) vcpu
->arch
.pc
;
358 u32 cause
= vcpu
->arch
.host_cp0_cause
;
359 enum emulation_result er
= EMULATE_DONE
;
360 int ret
= RESUME_GUEST
;
362 er
= kvm_mips_handle_ri(cause
, opc
, run
, vcpu
);
363 if (er
== EMULATE_DONE
)
366 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
372 static int kvm_trap_emul_handle_break(struct kvm_vcpu
*vcpu
)
374 struct kvm_run
*run
= vcpu
->run
;
375 u32 __user
*opc
= (u32 __user
*) vcpu
->arch
.pc
;
376 u32 cause
= vcpu
->arch
.host_cp0_cause
;
377 enum emulation_result er
= EMULATE_DONE
;
378 int ret
= RESUME_GUEST
;
380 er
= kvm_mips_emulate_bp_exc(cause
, opc
, run
, vcpu
);
381 if (er
== EMULATE_DONE
)
384 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
390 static int kvm_trap_emul_handle_trap(struct kvm_vcpu
*vcpu
)
392 struct kvm_run
*run
= vcpu
->run
;
393 u32 __user
*opc
= (u32 __user
*)vcpu
->arch
.pc
;
394 u32 cause
= vcpu
->arch
.host_cp0_cause
;
395 enum emulation_result er
= EMULATE_DONE
;
396 int ret
= RESUME_GUEST
;
398 er
= kvm_mips_emulate_trap_exc(cause
, opc
, run
, vcpu
);
399 if (er
== EMULATE_DONE
) {
402 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
408 static int kvm_trap_emul_handle_msa_fpe(struct kvm_vcpu
*vcpu
)
410 struct kvm_run
*run
= vcpu
->run
;
411 u32 __user
*opc
= (u32 __user
*)vcpu
->arch
.pc
;
412 u32 cause
= vcpu
->arch
.host_cp0_cause
;
413 enum emulation_result er
= EMULATE_DONE
;
414 int ret
= RESUME_GUEST
;
416 er
= kvm_mips_emulate_msafpe_exc(cause
, opc
, run
, vcpu
);
417 if (er
== EMULATE_DONE
) {
420 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
426 static int kvm_trap_emul_handle_fpe(struct kvm_vcpu
*vcpu
)
428 struct kvm_run
*run
= vcpu
->run
;
429 u32 __user
*opc
= (u32 __user
*)vcpu
->arch
.pc
;
430 u32 cause
= vcpu
->arch
.host_cp0_cause
;
431 enum emulation_result er
= EMULATE_DONE
;
432 int ret
= RESUME_GUEST
;
434 er
= kvm_mips_emulate_fpe_exc(cause
, opc
, run
, vcpu
);
435 if (er
== EMULATE_DONE
) {
438 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
445 * kvm_trap_emul_handle_msa_disabled() - Guest used MSA while disabled in root.
446 * @vcpu: Virtual CPU context.
448 * Handle when the guest attempts to use MSA when it is disabled.
450 static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu
*vcpu
)
452 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
453 struct kvm_run
*run
= vcpu
->run
;
454 u32 __user
*opc
= (u32 __user
*) vcpu
->arch
.pc
;
455 u32 cause
= vcpu
->arch
.host_cp0_cause
;
456 enum emulation_result er
= EMULATE_DONE
;
457 int ret
= RESUME_GUEST
;
459 if (!kvm_mips_guest_has_msa(&vcpu
->arch
) ||
460 (kvm_read_c0_guest_status(cop0
) & (ST0_CU1
| ST0_FR
)) == ST0_CU1
) {
462 * No MSA in guest, or FPU enabled and not in FR=1 mode,
463 * guest reserved instruction exception
465 er
= kvm_mips_emulate_ri_exc(cause
, opc
, run
, vcpu
);
466 } else if (!(kvm_read_c0_guest_config5(cop0
) & MIPS_CONF5_MSAEN
)) {
467 /* MSA disabled by guest, guest MSA disabled exception */
468 er
= kvm_mips_emulate_msadis_exc(cause
, opc
, run
, vcpu
);
470 /* Restore MSA/FPU state */
481 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
491 static int kvm_trap_emul_check_extension(struct kvm
*kvm
, long ext
)
496 case KVM_CAP_MIPS_TE
:
507 static int kvm_trap_emul_vcpu_init(struct kvm_vcpu
*vcpu
)
509 struct mm_struct
*kern_mm
= &vcpu
->arch
.guest_kernel_mm
;
510 struct mm_struct
*user_mm
= &vcpu
->arch
.guest_user_mm
;
513 * Allocate GVA -> HPA page tables.
514 * MIPS doesn't use the mm_struct pointer argument.
516 kern_mm
->pgd
= pgd_alloc(kern_mm
);
520 user_mm
->pgd
= pgd_alloc(user_mm
);
522 pgd_free(kern_mm
, kern_mm
->pgd
);
529 static void kvm_mips_emul_free_gva_pt(pgd_t
*pgd
)
531 /* Don't free host kernel page tables copied from init_mm.pgd */
532 const unsigned long end
= 0x80000000;
533 unsigned long pgd_va
, pud_va
, pmd_va
;
539 for (i
= 0; i
< USER_PTRS_PER_PGD
; i
++) {
540 if (pgd_none(pgd
[i
]))
543 pgd_va
= (unsigned long)i
<< PGDIR_SHIFT
;
546 pud
= pud_offset(pgd
+ i
, 0);
547 for (j
= 0; j
< PTRS_PER_PUD
; j
++) {
548 if (pud_none(pud
[j
]))
551 pud_va
= pgd_va
| ((unsigned long)j
<< PUD_SHIFT
);
554 pmd
= pmd_offset(pud
+ j
, 0);
555 for (k
= 0; k
< PTRS_PER_PMD
; k
++) {
556 if (pmd_none(pmd
[k
]))
559 pmd_va
= pud_va
| (k
<< PMD_SHIFT
);
562 pte
= pte_offset(pmd
+ k
, 0);
563 pte_free_kernel(NULL
, pte
);
572 static void kvm_trap_emul_vcpu_uninit(struct kvm_vcpu
*vcpu
)
574 kvm_mips_emul_free_gva_pt(vcpu
->arch
.guest_kernel_mm
.pgd
);
575 kvm_mips_emul_free_gva_pt(vcpu
->arch
.guest_user_mm
.pgd
);
578 static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu
*vcpu
)
580 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
582 int vcpu_id
= vcpu
->vcpu_id
;
584 /* Start off the timer at 100 MHz */
585 kvm_mips_init_count(vcpu
, 100*1000*1000);
588 * Arch specific stuff, set up config registers properly so that the
589 * guest will come up as expected
591 #ifndef CONFIG_CPU_MIPSR6
592 /* r2-r5, simulate a MIPS 24kc */
593 kvm_write_c0_guest_prid(cop0
, 0x00019300);
595 /* r6+, simulate a generic QEMU machine */
596 kvm_write_c0_guest_prid(cop0
, 0x00010000);
599 * Have config1, Cacheable, noncoherent, write-back, write allocate.
600 * Endianness, arch revision & virtually tagged icache should match
603 config
= read_c0_config() & MIPS_CONF_AR
;
604 config
|= MIPS_CONF_M
| CONF_CM_CACHABLE_NONCOHERENT
| MIPS_CONF_MT_TLB
;
605 #ifdef CONFIG_CPU_BIG_ENDIAN
608 if (cpu_has_vtag_icache
)
609 config
|= MIPS_CONF_VI
;
610 kvm_write_c0_guest_config(cop0
, config
);
612 /* Read the cache characteristics from the host Config1 Register */
613 config1
= (read_c0_config1() & ~0x7f);
615 /* Set up MMU size */
616 config1
&= ~(0x3f << 25);
617 config1
|= ((KVM_MIPS_GUEST_TLB_SIZE
- 1) << 25);
619 /* We unset some bits that we aren't emulating */
620 config1
&= ~(MIPS_CONF1_C2
| MIPS_CONF1_MD
| MIPS_CONF1_PC
|
621 MIPS_CONF1_WR
| MIPS_CONF1_CA
);
622 kvm_write_c0_guest_config1(cop0
, config1
);
624 /* Have config3, no tertiary/secondary caches implemented */
625 kvm_write_c0_guest_config2(cop0
, MIPS_CONF_M
);
626 /* MIPS_CONF_M | (read_c0_config2() & 0xfff) */
628 /* Have config4, UserLocal */
629 kvm_write_c0_guest_config3(cop0
, MIPS_CONF_M
| MIPS_CONF3_ULRI
);
632 kvm_write_c0_guest_config4(cop0
, MIPS_CONF_M
);
635 kvm_write_c0_guest_config5(cop0
, 0);
637 /* Set Wait IE/IXMT Ignore in Config7, IAR, AR */
638 kvm_write_c0_guest_config7(cop0
, (MIPS_CONF7_WII
) | (1 << 10));
641 kvm_write_c0_guest_status(cop0
, ST0_BEV
| ST0_ERL
);
644 * Setup IntCtl defaults, compatibility mode for timer interrupts (HW5)
646 kvm_write_c0_guest_intctl(cop0
, 0xFC000000);
648 /* Put in vcpu id as CPUNum into Ebase Reg to handle SMP Guests */
649 kvm_write_c0_guest_ebase(cop0
, KVM_GUEST_KSEG0
|
650 (vcpu_id
& MIPS_EBASE_CPUNUM
));
652 /* Put PC at guest reset vector */
653 vcpu
->arch
.pc
= KVM_GUEST_CKSEG1ADDR(0x1fc00000);
658 static void kvm_trap_emul_flush_shadow_all(struct kvm
*kvm
)
660 /* Flush GVA page tables and invalidate GVA ASIDs on all VCPUs */
661 kvm_flush_remote_tlbs(kvm
);
664 static void kvm_trap_emul_flush_shadow_memslot(struct kvm
*kvm
,
665 const struct kvm_memory_slot
*slot
)
667 kvm_trap_emul_flush_shadow_all(kvm
);
670 static u64 kvm_trap_emul_get_one_regs
[] = {
671 KVM_REG_MIPS_CP0_INDEX
,
672 KVM_REG_MIPS_CP0_ENTRYLO0
,
673 KVM_REG_MIPS_CP0_ENTRYLO1
,
674 KVM_REG_MIPS_CP0_CONTEXT
,
675 KVM_REG_MIPS_CP0_USERLOCAL
,
676 KVM_REG_MIPS_CP0_PAGEMASK
,
677 KVM_REG_MIPS_CP0_WIRED
,
678 KVM_REG_MIPS_CP0_HWRENA
,
679 KVM_REG_MIPS_CP0_BADVADDR
,
680 KVM_REG_MIPS_CP0_COUNT
,
681 KVM_REG_MIPS_CP0_ENTRYHI
,
682 KVM_REG_MIPS_CP0_COMPARE
,
683 KVM_REG_MIPS_CP0_STATUS
,
684 KVM_REG_MIPS_CP0_INTCTL
,
685 KVM_REG_MIPS_CP0_CAUSE
,
686 KVM_REG_MIPS_CP0_EPC
,
687 KVM_REG_MIPS_CP0_PRID
,
688 KVM_REG_MIPS_CP0_EBASE
,
689 KVM_REG_MIPS_CP0_CONFIG
,
690 KVM_REG_MIPS_CP0_CONFIG1
,
691 KVM_REG_MIPS_CP0_CONFIG2
,
692 KVM_REG_MIPS_CP0_CONFIG3
,
693 KVM_REG_MIPS_CP0_CONFIG4
,
694 KVM_REG_MIPS_CP0_CONFIG5
,
695 KVM_REG_MIPS_CP0_CONFIG7
,
696 KVM_REG_MIPS_CP0_ERROREPC
,
697 KVM_REG_MIPS_CP0_KSCRATCH1
,
698 KVM_REG_MIPS_CP0_KSCRATCH2
,
699 KVM_REG_MIPS_CP0_KSCRATCH3
,
700 KVM_REG_MIPS_CP0_KSCRATCH4
,
701 KVM_REG_MIPS_CP0_KSCRATCH5
,
702 KVM_REG_MIPS_CP0_KSCRATCH6
,
704 KVM_REG_MIPS_COUNT_CTL
,
705 KVM_REG_MIPS_COUNT_RESUME
,
706 KVM_REG_MIPS_COUNT_HZ
,
709 static unsigned long kvm_trap_emul_num_regs(struct kvm_vcpu
*vcpu
)
711 return ARRAY_SIZE(kvm_trap_emul_get_one_regs
);
714 static int kvm_trap_emul_copy_reg_indices(struct kvm_vcpu
*vcpu
,
717 if (copy_to_user(indices
, kvm_trap_emul_get_one_regs
,
718 sizeof(kvm_trap_emul_get_one_regs
)))
720 indices
+= ARRAY_SIZE(kvm_trap_emul_get_one_regs
);
725 static int kvm_trap_emul_get_one_reg(struct kvm_vcpu
*vcpu
,
726 const struct kvm_one_reg
*reg
,
729 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
732 case KVM_REG_MIPS_CP0_INDEX
:
733 *v
= (long)kvm_read_c0_guest_index(cop0
);
735 case KVM_REG_MIPS_CP0_ENTRYLO0
:
736 *v
= kvm_read_c0_guest_entrylo0(cop0
);
738 case KVM_REG_MIPS_CP0_ENTRYLO1
:
739 *v
= kvm_read_c0_guest_entrylo1(cop0
);
741 case KVM_REG_MIPS_CP0_CONTEXT
:
742 *v
= (long)kvm_read_c0_guest_context(cop0
);
744 case KVM_REG_MIPS_CP0_USERLOCAL
:
745 *v
= (long)kvm_read_c0_guest_userlocal(cop0
);
747 case KVM_REG_MIPS_CP0_PAGEMASK
:
748 *v
= (long)kvm_read_c0_guest_pagemask(cop0
);
750 case KVM_REG_MIPS_CP0_WIRED
:
751 *v
= (long)kvm_read_c0_guest_wired(cop0
);
753 case KVM_REG_MIPS_CP0_HWRENA
:
754 *v
= (long)kvm_read_c0_guest_hwrena(cop0
);
756 case KVM_REG_MIPS_CP0_BADVADDR
:
757 *v
= (long)kvm_read_c0_guest_badvaddr(cop0
);
759 case KVM_REG_MIPS_CP0_ENTRYHI
:
760 *v
= (long)kvm_read_c0_guest_entryhi(cop0
);
762 case KVM_REG_MIPS_CP0_COMPARE
:
763 *v
= (long)kvm_read_c0_guest_compare(cop0
);
765 case KVM_REG_MIPS_CP0_STATUS
:
766 *v
= (long)kvm_read_c0_guest_status(cop0
);
768 case KVM_REG_MIPS_CP0_INTCTL
:
769 *v
= (long)kvm_read_c0_guest_intctl(cop0
);
771 case KVM_REG_MIPS_CP0_CAUSE
:
772 *v
= (long)kvm_read_c0_guest_cause(cop0
);
774 case KVM_REG_MIPS_CP0_EPC
:
775 *v
= (long)kvm_read_c0_guest_epc(cop0
);
777 case KVM_REG_MIPS_CP0_PRID
:
778 *v
= (long)kvm_read_c0_guest_prid(cop0
);
780 case KVM_REG_MIPS_CP0_EBASE
:
781 *v
= (long)kvm_read_c0_guest_ebase(cop0
);
783 case KVM_REG_MIPS_CP0_CONFIG
:
784 *v
= (long)kvm_read_c0_guest_config(cop0
);
786 case KVM_REG_MIPS_CP0_CONFIG1
:
787 *v
= (long)kvm_read_c0_guest_config1(cop0
);
789 case KVM_REG_MIPS_CP0_CONFIG2
:
790 *v
= (long)kvm_read_c0_guest_config2(cop0
);
792 case KVM_REG_MIPS_CP0_CONFIG3
:
793 *v
= (long)kvm_read_c0_guest_config3(cop0
);
795 case KVM_REG_MIPS_CP0_CONFIG4
:
796 *v
= (long)kvm_read_c0_guest_config4(cop0
);
798 case KVM_REG_MIPS_CP0_CONFIG5
:
799 *v
= (long)kvm_read_c0_guest_config5(cop0
);
801 case KVM_REG_MIPS_CP0_CONFIG7
:
802 *v
= (long)kvm_read_c0_guest_config7(cop0
);
804 case KVM_REG_MIPS_CP0_COUNT
:
805 *v
= kvm_mips_read_count(vcpu
);
807 case KVM_REG_MIPS_COUNT_CTL
:
808 *v
= vcpu
->arch
.count_ctl
;
810 case KVM_REG_MIPS_COUNT_RESUME
:
811 *v
= ktime_to_ns(vcpu
->arch
.count_resume
);
813 case KVM_REG_MIPS_COUNT_HZ
:
814 *v
= vcpu
->arch
.count_hz
;
816 case KVM_REG_MIPS_CP0_ERROREPC
:
817 *v
= (long)kvm_read_c0_guest_errorepc(cop0
);
819 case KVM_REG_MIPS_CP0_KSCRATCH1
:
820 *v
= (long)kvm_read_c0_guest_kscratch1(cop0
);
822 case KVM_REG_MIPS_CP0_KSCRATCH2
:
823 *v
= (long)kvm_read_c0_guest_kscratch2(cop0
);
825 case KVM_REG_MIPS_CP0_KSCRATCH3
:
826 *v
= (long)kvm_read_c0_guest_kscratch3(cop0
);
828 case KVM_REG_MIPS_CP0_KSCRATCH4
:
829 *v
= (long)kvm_read_c0_guest_kscratch4(cop0
);
831 case KVM_REG_MIPS_CP0_KSCRATCH5
:
832 *v
= (long)kvm_read_c0_guest_kscratch5(cop0
);
834 case KVM_REG_MIPS_CP0_KSCRATCH6
:
835 *v
= (long)kvm_read_c0_guest_kscratch6(cop0
);
843 static int kvm_trap_emul_set_one_reg(struct kvm_vcpu
*vcpu
,
844 const struct kvm_one_reg
*reg
,
847 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
849 unsigned int cur
, change
;
852 case KVM_REG_MIPS_CP0_INDEX
:
853 kvm_write_c0_guest_index(cop0
, v
);
855 case KVM_REG_MIPS_CP0_ENTRYLO0
:
856 kvm_write_c0_guest_entrylo0(cop0
, v
);
858 case KVM_REG_MIPS_CP0_ENTRYLO1
:
859 kvm_write_c0_guest_entrylo1(cop0
, v
);
861 case KVM_REG_MIPS_CP0_CONTEXT
:
862 kvm_write_c0_guest_context(cop0
, v
);
864 case KVM_REG_MIPS_CP0_USERLOCAL
:
865 kvm_write_c0_guest_userlocal(cop0
, v
);
867 case KVM_REG_MIPS_CP0_PAGEMASK
:
868 kvm_write_c0_guest_pagemask(cop0
, v
);
870 case KVM_REG_MIPS_CP0_WIRED
:
871 kvm_write_c0_guest_wired(cop0
, v
);
873 case KVM_REG_MIPS_CP0_HWRENA
:
874 kvm_write_c0_guest_hwrena(cop0
, v
);
876 case KVM_REG_MIPS_CP0_BADVADDR
:
877 kvm_write_c0_guest_badvaddr(cop0
, v
);
879 case KVM_REG_MIPS_CP0_ENTRYHI
:
880 kvm_write_c0_guest_entryhi(cop0
, v
);
882 case KVM_REG_MIPS_CP0_STATUS
:
883 kvm_write_c0_guest_status(cop0
, v
);
885 case KVM_REG_MIPS_CP0_INTCTL
:
886 /* No VInt, so no VS, read-only for now */
888 case KVM_REG_MIPS_CP0_EPC
:
889 kvm_write_c0_guest_epc(cop0
, v
);
891 case KVM_REG_MIPS_CP0_PRID
:
892 kvm_write_c0_guest_prid(cop0
, v
);
894 case KVM_REG_MIPS_CP0_EBASE
:
896 * Allow core number to be written, but the exception base must
897 * remain in guest KSeg0.
899 kvm_change_c0_guest_ebase(cop0
, 0x1ffff000 | MIPS_EBASE_CPUNUM
,
902 case KVM_REG_MIPS_CP0_COUNT
:
903 kvm_mips_write_count(vcpu
, v
);
905 case KVM_REG_MIPS_CP0_COMPARE
:
906 kvm_mips_write_compare(vcpu
, v
, false);
908 case KVM_REG_MIPS_CP0_CAUSE
:
910 * If the timer is stopped or started (DC bit) it must look
911 * atomic with changes to the interrupt pending bits (TI, IRQ5).
912 * A timer interrupt should not happen in between.
914 if ((kvm_read_c0_guest_cause(cop0
) ^ v
) & CAUSEF_DC
) {
916 /* disable timer first */
917 kvm_mips_count_disable_cause(vcpu
);
918 kvm_change_c0_guest_cause(cop0
, ~CAUSEF_DC
, v
);
920 /* enable timer last */
921 kvm_change_c0_guest_cause(cop0
, ~CAUSEF_DC
, v
);
922 kvm_mips_count_enable_cause(vcpu
);
925 kvm_write_c0_guest_cause(cop0
, v
);
928 case KVM_REG_MIPS_CP0_CONFIG
:
929 /* read-only for now */
931 case KVM_REG_MIPS_CP0_CONFIG1
:
932 cur
= kvm_read_c0_guest_config1(cop0
);
933 change
= (cur
^ v
) & kvm_mips_config1_wrmask(vcpu
);
936 kvm_write_c0_guest_config1(cop0
, v
);
939 case KVM_REG_MIPS_CP0_CONFIG2
:
940 /* read-only for now */
942 case KVM_REG_MIPS_CP0_CONFIG3
:
943 cur
= kvm_read_c0_guest_config3(cop0
);
944 change
= (cur
^ v
) & kvm_mips_config3_wrmask(vcpu
);
947 kvm_write_c0_guest_config3(cop0
, v
);
950 case KVM_REG_MIPS_CP0_CONFIG4
:
951 cur
= kvm_read_c0_guest_config4(cop0
);
952 change
= (cur
^ v
) & kvm_mips_config4_wrmask(vcpu
);
955 kvm_write_c0_guest_config4(cop0
, v
);
958 case KVM_REG_MIPS_CP0_CONFIG5
:
959 cur
= kvm_read_c0_guest_config5(cop0
);
960 change
= (cur
^ v
) & kvm_mips_config5_wrmask(vcpu
);
963 kvm_write_c0_guest_config5(cop0
, v
);
966 case KVM_REG_MIPS_CP0_CONFIG7
:
969 case KVM_REG_MIPS_COUNT_CTL
:
970 ret
= kvm_mips_set_count_ctl(vcpu
, v
);
972 case KVM_REG_MIPS_COUNT_RESUME
:
973 ret
= kvm_mips_set_count_resume(vcpu
, v
);
975 case KVM_REG_MIPS_COUNT_HZ
:
976 ret
= kvm_mips_set_count_hz(vcpu
, v
);
978 case KVM_REG_MIPS_CP0_ERROREPC
:
979 kvm_write_c0_guest_errorepc(cop0
, v
);
981 case KVM_REG_MIPS_CP0_KSCRATCH1
:
982 kvm_write_c0_guest_kscratch1(cop0
, v
);
984 case KVM_REG_MIPS_CP0_KSCRATCH2
:
985 kvm_write_c0_guest_kscratch2(cop0
, v
);
987 case KVM_REG_MIPS_CP0_KSCRATCH3
:
988 kvm_write_c0_guest_kscratch3(cop0
, v
);
990 case KVM_REG_MIPS_CP0_KSCRATCH4
:
991 kvm_write_c0_guest_kscratch4(cop0
, v
);
993 case KVM_REG_MIPS_CP0_KSCRATCH5
:
994 kvm_write_c0_guest_kscratch5(cop0
, v
);
996 case KVM_REG_MIPS_CP0_KSCRATCH6
:
997 kvm_write_c0_guest_kscratch6(cop0
, v
);
1005 static int kvm_trap_emul_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
1007 struct mm_struct
*kern_mm
= &vcpu
->arch
.guest_kernel_mm
;
1008 struct mm_struct
*user_mm
= &vcpu
->arch
.guest_user_mm
;
1009 struct mm_struct
*mm
;
1012 * Were we in guest context? If so, restore the appropriate ASID based
1013 * on the mode of the Guest (Kernel/User).
1015 if (current
->flags
& PF_VCPU
) {
1016 mm
= KVM_GUEST_KERNEL_MODE(vcpu
) ? kern_mm
: user_mm
;
1017 if ((cpu_context(cpu
, mm
) ^ asid_cache(cpu
)) &
1018 asid_version_mask(cpu
))
1019 get_new_mmu_context(mm
, cpu
);
1020 write_c0_entryhi(cpu_asid(cpu
, mm
));
1021 TLBMISS_HANDLER_SETUP_PGD(mm
->pgd
);
1022 kvm_mips_suspend_mm(cpu
);
1029 static int kvm_trap_emul_vcpu_put(struct kvm_vcpu
*vcpu
, int cpu
)
1033 if (current
->flags
& PF_VCPU
) {
1034 /* Restore normal Linux process memory map */
1035 if (((cpu_context(cpu
, current
->mm
) ^ asid_cache(cpu
)) &
1036 asid_version_mask(cpu
)))
1037 get_new_mmu_context(current
->mm
, cpu
);
1038 write_c0_entryhi(cpu_asid(cpu
, current
->mm
));
1039 TLBMISS_HANDLER_SETUP_PGD(current
->mm
->pgd
);
1040 kvm_mips_resume_mm(cpu
);
1047 static void kvm_trap_emul_check_requests(struct kvm_vcpu
*vcpu
, int cpu
,
1050 struct mm_struct
*kern_mm
= &vcpu
->arch
.guest_kernel_mm
;
1051 struct mm_struct
*user_mm
= &vcpu
->arch
.guest_user_mm
;
1052 struct mm_struct
*mm
;
1055 if (likely(!vcpu
->requests
))
1058 if (kvm_check_request(KVM_REQ_TLB_FLUSH
, vcpu
)) {
1060 * Both kernel & user GVA mappings must be invalidated. The
1061 * caller is just about to check whether the ASID is stale
1062 * anyway so no need to reload it here.
1064 kvm_mips_flush_gva_pt(kern_mm
->pgd
, KMF_GPA
| KMF_KERN
);
1065 kvm_mips_flush_gva_pt(user_mm
->pgd
, KMF_GPA
| KMF_USER
);
1066 for_each_possible_cpu(i
) {
1067 cpu_context(i
, kern_mm
) = 0;
1068 cpu_context(i
, user_mm
) = 0;
1071 /* Generate new ASID for current mode */
1073 mm
= KVM_GUEST_KERNEL_MODE(vcpu
) ? kern_mm
: user_mm
;
1074 get_new_mmu_context(mm
, cpu
);
1076 write_c0_entryhi(cpu_asid(cpu
, mm
));
1077 TLBMISS_HANDLER_SETUP_PGD(mm
->pgd
);
1084 * kvm_trap_emul_gva_lockless_begin() - Begin lockless access to GVA space.
1085 * @vcpu: VCPU pointer.
1087 * Call before a GVA space access outside of guest mode, to ensure that
1088 * asynchronous TLB flush requests are handled or delayed until completion of
1089 * the GVA access (as indicated by a matching kvm_trap_emul_gva_lockless_end()).
1091 * Should be called with IRQs already enabled.
1093 void kvm_trap_emul_gva_lockless_begin(struct kvm_vcpu
*vcpu
)
1095 /* We re-enable IRQs in kvm_trap_emul_gva_lockless_end() */
1096 WARN_ON_ONCE(irqs_disabled());
1099 * The caller is about to access the GVA space, so we set the mode to
1100 * force TLB flush requests to send an IPI, and also disable IRQs to
1101 * delay IPI handling until kvm_trap_emul_gva_lockless_end().
1103 local_irq_disable();
1106 * Make sure the read of VCPU requests is not reordered ahead of the
1107 * write to vcpu->mode, or we could miss a TLB flush request while
1108 * the requester sees the VCPU as outside of guest mode and not needing
1111 smp_store_mb(vcpu
->mode
, READING_SHADOW_PAGE_TABLES
);
1114 * If a TLB flush has been requested (potentially while
1115 * OUTSIDE_GUEST_MODE and assumed immediately effective), perform it
1116 * before accessing the GVA space, and be sure to reload the ASID if
1117 * necessary as it'll be immediately used.
1119 * TLB flush requests after this check will trigger an IPI due to the
1120 * mode change above, which will be delayed due to IRQs disabled.
1122 kvm_trap_emul_check_requests(vcpu
, smp_processor_id(), true);
1126 * kvm_trap_emul_gva_lockless_end() - End lockless access to GVA space.
1127 * @vcpu: VCPU pointer.
1129 * Called after a GVA space access outside of guest mode. Should have a matching
1130 * call to kvm_trap_emul_gva_lockless_begin().
1132 void kvm_trap_emul_gva_lockless_end(struct kvm_vcpu
*vcpu
)
1135 * Make sure the write to vcpu->mode is not reordered in front of GVA
1136 * accesses, or a TLB flush requester may not think it necessary to send
1139 smp_store_release(&vcpu
->mode
, OUTSIDE_GUEST_MODE
);
1142 * Now that the access to GVA space is complete, its safe for pending
1143 * TLB flush request IPIs to be handled (which indicates completion).
1148 static void kvm_trap_emul_vcpu_reenter(struct kvm_run
*run
,
1149 struct kvm_vcpu
*vcpu
)
1151 struct mm_struct
*kern_mm
= &vcpu
->arch
.guest_kernel_mm
;
1152 struct mm_struct
*user_mm
= &vcpu
->arch
.guest_user_mm
;
1153 struct mm_struct
*mm
;
1154 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
1155 int i
, cpu
= smp_processor_id();
1159 * No need to reload ASID, IRQs are disabled already so there's no rush,
1160 * and we'll check if we need to regenerate below anyway before
1161 * re-entering the guest.
1163 kvm_trap_emul_check_requests(vcpu
, cpu
, false);
1165 if (KVM_GUEST_KERNEL_MODE(vcpu
)) {
1171 * Lazy host ASID regeneration / PT flush for guest user mode.
1172 * If the guest ASID has changed since the last guest usermode
1173 * execution, invalidate the stale TLB entries and flush GVA PT
1176 gasid
= kvm_read_c0_guest_entryhi(cop0
) & KVM_ENTRYHI_ASID
;
1177 if (gasid
!= vcpu
->arch
.last_user_gasid
) {
1178 kvm_mips_flush_gva_pt(user_mm
->pgd
, KMF_USER
);
1179 for_each_possible_cpu(i
)
1180 cpu_context(i
, user_mm
) = 0;
1181 vcpu
->arch
.last_user_gasid
= gasid
;
1186 * Check if ASID is stale. This may happen due to a TLB flush request or
1187 * a lazy user MM invalidation.
1189 if ((cpu_context(cpu
, mm
) ^ asid_cache(cpu
)) &
1190 asid_version_mask(cpu
))
1191 get_new_mmu_context(mm
, cpu
);
1194 static int kvm_trap_emul_vcpu_run(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
1196 int cpu
= smp_processor_id();
1199 /* Check if we have any exceptions/interrupts pending */
1200 kvm_mips_deliver_interrupts(vcpu
,
1201 kvm_read_c0_guest_cause(vcpu
->arch
.cop0
));
1203 kvm_trap_emul_vcpu_reenter(run
, vcpu
);
1206 * We use user accessors to access guest memory, but we don't want to
1207 * invoke Linux page faulting.
1209 pagefault_disable();
1211 /* Disable hardware page table walking while in guest */
1215 * While in guest context we're in the guest's address space, not the
1216 * host process address space, so we need to be careful not to confuse
1217 * e.g. cache management IPIs.
1219 kvm_mips_suspend_mm(cpu
);
1221 r
= vcpu
->arch
.vcpu_run(run
, vcpu
);
1223 /* We may have migrated while handling guest exits */
1224 cpu
= smp_processor_id();
1226 /* Restore normal Linux process memory map */
1227 if (((cpu_context(cpu
, current
->mm
) ^ asid_cache(cpu
)) &
1228 asid_version_mask(cpu
)))
1229 get_new_mmu_context(current
->mm
, cpu
);
1230 write_c0_entryhi(cpu_asid(cpu
, current
->mm
));
1231 TLBMISS_HANDLER_SETUP_PGD(current
->mm
->pgd
);
1232 kvm_mips_resume_mm(cpu
);
1241 static struct kvm_mips_callbacks kvm_trap_emul_callbacks
= {
1243 .handle_cop_unusable
= kvm_trap_emul_handle_cop_unusable
,
1244 .handle_tlb_mod
= kvm_trap_emul_handle_tlb_mod
,
1245 .handle_tlb_st_miss
= kvm_trap_emul_handle_tlb_st_miss
,
1246 .handle_tlb_ld_miss
= kvm_trap_emul_handle_tlb_ld_miss
,
1247 .handle_addr_err_st
= kvm_trap_emul_handle_addr_err_st
,
1248 .handle_addr_err_ld
= kvm_trap_emul_handle_addr_err_ld
,
1249 .handle_syscall
= kvm_trap_emul_handle_syscall
,
1250 .handle_res_inst
= kvm_trap_emul_handle_res_inst
,
1251 .handle_break
= kvm_trap_emul_handle_break
,
1252 .handle_trap
= kvm_trap_emul_handle_trap
,
1253 .handle_msa_fpe
= kvm_trap_emul_handle_msa_fpe
,
1254 .handle_fpe
= kvm_trap_emul_handle_fpe
,
1255 .handle_msa_disabled
= kvm_trap_emul_handle_msa_disabled
,
1257 .check_extension
= kvm_trap_emul_check_extension
,
1258 .vcpu_init
= kvm_trap_emul_vcpu_init
,
1259 .vcpu_uninit
= kvm_trap_emul_vcpu_uninit
,
1260 .vcpu_setup
= kvm_trap_emul_vcpu_setup
,
1261 .flush_shadow_all
= kvm_trap_emul_flush_shadow_all
,
1262 .flush_shadow_memslot
= kvm_trap_emul_flush_shadow_memslot
,
1263 .gva_to_gpa
= kvm_trap_emul_gva_to_gpa_cb
,
1264 .queue_timer_int
= kvm_mips_queue_timer_int_cb
,
1265 .dequeue_timer_int
= kvm_mips_dequeue_timer_int_cb
,
1266 .queue_io_int
= kvm_mips_queue_io_int_cb
,
1267 .dequeue_io_int
= kvm_mips_dequeue_io_int_cb
,
1268 .irq_deliver
= kvm_mips_irq_deliver_cb
,
1269 .irq_clear
= kvm_mips_irq_clear_cb
,
1270 .num_regs
= kvm_trap_emul_num_regs
,
1271 .copy_reg_indices
= kvm_trap_emul_copy_reg_indices
,
1272 .get_one_reg
= kvm_trap_emul_get_one_reg
,
1273 .set_one_reg
= kvm_trap_emul_set_one_reg
,
1274 .vcpu_load
= kvm_trap_emul_vcpu_load
,
1275 .vcpu_put
= kvm_trap_emul_vcpu_put
,
1276 .vcpu_run
= kvm_trap_emul_vcpu_run
,
1277 .vcpu_reenter
= kvm_trap_emul_vcpu_reenter
,
1280 int kvm_mips_emulation_init(struct kvm_mips_callbacks
**install_callbacks
)
1282 *install_callbacks
= &kvm_trap_emul_callbacks
;