Merge tag 'armsoc-tee' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc
[GitHub/moto-9609/android_kernel_motorola_exynos9610.git] / arch / mips / kvm / trap_emul.c
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * KVM/MIPS: Deliver/Emulate exceptions to the guest kernel
7 *
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
10 */
11
12 #include <linux/errno.h>
13 #include <linux/err.h>
14 #include <linux/kvm_host.h>
15 #include <linux/log2.h>
16 #include <linux/uaccess.h>
17 #include <linux/vmalloc.h>
18 #include <asm/mmu_context.h>
19 #include <asm/pgalloc.h>
20
21 #include "interrupt.h"
22
23 static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva)
24 {
25 gpa_t gpa;
26 gva_t kseg = KSEGX(gva);
27 gva_t gkseg = KVM_GUEST_KSEGX(gva);
28
29 if ((kseg == CKSEG0) || (kseg == CKSEG1))
30 gpa = CPHYSADDR(gva);
31 else if (gkseg == KVM_GUEST_KSEG0)
32 gpa = KVM_GUEST_CPHYSADDR(gva);
33 else {
34 kvm_err("%s: cannot find GPA for GVA: %#lx\n", __func__, gva);
35 kvm_mips_dump_host_tlbs();
36 gpa = KVM_INVALID_ADDR;
37 }
38
39 kvm_debug("%s: gva %#lx, gpa: %#llx\n", __func__, gva, gpa);
40
41 return gpa;
42 }
43
44 static int kvm_trap_emul_no_handler(struct kvm_vcpu *vcpu)
45 {
46 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
47 u32 cause = vcpu->arch.host_cp0_cause;
48 u32 exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
49 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
50 u32 inst = 0;
51
52 /*
53 * Fetch the instruction.
54 */
55 if (cause & CAUSEF_BD)
56 opc += 1;
57 kvm_get_badinstr(opc, vcpu, &inst);
58
59 kvm_err("Exception Code: %d not handled @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#x\n",
60 exccode, opc, inst, badvaddr,
61 kvm_read_c0_guest_status(vcpu->arch.cop0));
62 kvm_arch_vcpu_dump_regs(vcpu);
63 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
64 return RESUME_HOST;
65 }
66
67 static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
68 {
69 struct mips_coproc *cop0 = vcpu->arch.cop0;
70 struct kvm_run *run = vcpu->run;
71 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
72 u32 cause = vcpu->arch.host_cp0_cause;
73 enum emulation_result er = EMULATE_DONE;
74 int ret = RESUME_GUEST;
75
76 if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) {
77 /* FPU Unusable */
78 if (!kvm_mips_guest_has_fpu(&vcpu->arch) ||
79 (kvm_read_c0_guest_status(cop0) & ST0_CU1) == 0) {
80 /*
81 * Unusable/no FPU in guest:
82 * deliver guest COP1 Unusable Exception
83 */
84 er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu);
85 } else {
86 /* Restore FPU state */
87 kvm_own_fpu(vcpu);
88 er = EMULATE_DONE;
89 }
90 } else {
91 er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
92 }
93
94 switch (er) {
95 case EMULATE_DONE:
96 ret = RESUME_GUEST;
97 break;
98
99 case EMULATE_FAIL:
100 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
101 ret = RESUME_HOST;
102 break;
103
104 case EMULATE_WAIT:
105 run->exit_reason = KVM_EXIT_INTR;
106 ret = RESUME_HOST;
107 break;
108
109 case EMULATE_HYPERCALL:
110 ret = kvm_mips_handle_hypcall(vcpu);
111 break;
112
113 default:
114 BUG();
115 }
116 return ret;
117 }
118
119 static int kvm_mips_bad_load(u32 cause, u32 *opc, struct kvm_run *run,
120 struct kvm_vcpu *vcpu)
121 {
122 enum emulation_result er;
123 union mips_instruction inst;
124 int err;
125
126 /* A code fetch fault doesn't count as an MMIO */
127 if (kvm_is_ifetch_fault(&vcpu->arch)) {
128 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
129 return RESUME_HOST;
130 }
131
132 /* Fetch the instruction. */
133 if (cause & CAUSEF_BD)
134 opc += 1;
135 err = kvm_get_badinstr(opc, vcpu, &inst.word);
136 if (err) {
137 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
138 return RESUME_HOST;
139 }
140
141 /* Emulate the load */
142 er = kvm_mips_emulate_load(inst, cause, run, vcpu);
143 if (er == EMULATE_FAIL) {
144 kvm_err("Emulate load from MMIO space failed\n");
145 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
146 } else {
147 run->exit_reason = KVM_EXIT_MMIO;
148 }
149 return RESUME_HOST;
150 }
151
152 static int kvm_mips_bad_store(u32 cause, u32 *opc, struct kvm_run *run,
153 struct kvm_vcpu *vcpu)
154 {
155 enum emulation_result er;
156 union mips_instruction inst;
157 int err;
158
159 /* Fetch the instruction. */
160 if (cause & CAUSEF_BD)
161 opc += 1;
162 err = kvm_get_badinstr(opc, vcpu, &inst.word);
163 if (err) {
164 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
165 return RESUME_HOST;
166 }
167
168 /* Emulate the store */
169 er = kvm_mips_emulate_store(inst, cause, run, vcpu);
170 if (er == EMULATE_FAIL) {
171 kvm_err("Emulate store to MMIO space failed\n");
172 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
173 } else {
174 run->exit_reason = KVM_EXIT_MMIO;
175 }
176 return RESUME_HOST;
177 }
178
179 static int kvm_mips_bad_access(u32 cause, u32 *opc, struct kvm_run *run,
180 struct kvm_vcpu *vcpu, bool store)
181 {
182 if (store)
183 return kvm_mips_bad_store(cause, opc, run, vcpu);
184 else
185 return kvm_mips_bad_load(cause, opc, run, vcpu);
186 }
187
188 static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu)
189 {
190 struct mips_coproc *cop0 = vcpu->arch.cop0;
191 struct kvm_run *run = vcpu->run;
192 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
193 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
194 u32 cause = vcpu->arch.host_cp0_cause;
195 struct kvm_mips_tlb *tlb;
196 unsigned long entryhi;
197 int index;
198
199 if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
200 || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
201 /*
202 * First find the mapping in the guest TLB. If the failure to
203 * write was due to the guest TLB, it should be up to the guest
204 * to handle it.
205 */
206 entryhi = (badvaddr & VPN2_MASK) |
207 (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
208 index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
209
210 /*
211 * These should never happen.
212 * They would indicate stale host TLB entries.
213 */
214 if (unlikely(index < 0)) {
215 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
216 return RESUME_HOST;
217 }
218 tlb = vcpu->arch.guest_tlb + index;
219 if (unlikely(!TLB_IS_VALID(*tlb, badvaddr))) {
220 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
221 return RESUME_HOST;
222 }
223
224 /*
225 * Guest entry not dirty? That would explain the TLB modified
226 * exception. Relay that on to the guest so it can handle it.
227 */
228 if (!TLB_IS_DIRTY(*tlb, badvaddr)) {
229 kvm_mips_emulate_tlbmod(cause, opc, run, vcpu);
230 return RESUME_GUEST;
231 }
232
233 if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, badvaddr,
234 true))
235 /* Not writable, needs handling as MMIO */
236 return kvm_mips_bad_store(cause, opc, run, vcpu);
237 return RESUME_GUEST;
238 } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
239 if (kvm_mips_handle_kseg0_tlb_fault(badvaddr, vcpu, true) < 0)
240 /* Not writable, needs handling as MMIO */
241 return kvm_mips_bad_store(cause, opc, run, vcpu);
242 return RESUME_GUEST;
243 } else {
244 /* host kernel addresses are all handled as MMIO */
245 return kvm_mips_bad_store(cause, opc, run, vcpu);
246 }
247 }
248
249 static int kvm_trap_emul_handle_tlb_miss(struct kvm_vcpu *vcpu, bool store)
250 {
251 struct kvm_run *run = vcpu->run;
252 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
253 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
254 u32 cause = vcpu->arch.host_cp0_cause;
255 enum emulation_result er = EMULATE_DONE;
256 int ret = RESUME_GUEST;
257
258 if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR)
259 && KVM_GUEST_KERNEL_MODE(vcpu)) {
260 if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) {
261 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
262 ret = RESUME_HOST;
263 }
264 } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
265 || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
266 kvm_debug("USER ADDR TLB %s fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
267 store ? "ST" : "LD", cause, opc, badvaddr);
268
269 /*
270 * User Address (UA) fault, this could happen if
271 * (1) TLB entry not present/valid in both Guest and shadow host
272 * TLBs, in this case we pass on the fault to the guest
273 * kernel and let it handle it.
274 * (2) TLB entry is present in the Guest TLB but not in the
275 * shadow, in this case we inject the TLB from the Guest TLB
276 * into the shadow host TLB
277 */
278
279 er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu, store);
280 if (er == EMULATE_DONE)
281 ret = RESUME_GUEST;
282 else {
283 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
284 ret = RESUME_HOST;
285 }
286 } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
287 /*
288 * All KSEG0 faults are handled by KVM, as the guest kernel does
289 * not expect to ever get them
290 */
291 if (kvm_mips_handle_kseg0_tlb_fault(badvaddr, vcpu, store) < 0)
292 ret = kvm_mips_bad_access(cause, opc, run, vcpu, store);
293 } else if (KVM_GUEST_KERNEL_MODE(vcpu)
294 && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) {
295 /*
296 * With EVA we may get a TLB exception instead of an address
297 * error when the guest performs MMIO to KSeg1 addresses.
298 */
299 ret = kvm_mips_bad_access(cause, opc, run, vcpu, store);
300 } else {
301 kvm_err("Illegal TLB %s fault address , cause %#x, PC: %p, BadVaddr: %#lx\n",
302 store ? "ST" : "LD", cause, opc, badvaddr);
303 kvm_mips_dump_host_tlbs();
304 kvm_arch_vcpu_dump_regs(vcpu);
305 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
306 ret = RESUME_HOST;
307 }
308 return ret;
309 }
310
311 static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
312 {
313 return kvm_trap_emul_handle_tlb_miss(vcpu, true);
314 }
315
316 static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
317 {
318 return kvm_trap_emul_handle_tlb_miss(vcpu, false);
319 }
320
321 static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu)
322 {
323 struct kvm_run *run = vcpu->run;
324 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
325 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
326 u32 cause = vcpu->arch.host_cp0_cause;
327 int ret = RESUME_GUEST;
328
329 if (KVM_GUEST_KERNEL_MODE(vcpu)
330 && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) {
331 ret = kvm_mips_bad_store(cause, opc, run, vcpu);
332 } else {
333 kvm_err("Address Error (STORE): cause %#x, PC: %p, BadVaddr: %#lx\n",
334 cause, opc, badvaddr);
335 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
336 ret = RESUME_HOST;
337 }
338 return ret;
339 }
340
341 static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu)
342 {
343 struct kvm_run *run = vcpu->run;
344 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
345 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
346 u32 cause = vcpu->arch.host_cp0_cause;
347 int ret = RESUME_GUEST;
348
349 if (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1) {
350 ret = kvm_mips_bad_load(cause, opc, run, vcpu);
351 } else {
352 kvm_err("Address Error (LOAD): cause %#x, PC: %p, BadVaddr: %#lx\n",
353 cause, opc, badvaddr);
354 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
355 ret = RESUME_HOST;
356 }
357 return ret;
358 }
359
360 static int kvm_trap_emul_handle_syscall(struct kvm_vcpu *vcpu)
361 {
362 struct kvm_run *run = vcpu->run;
363 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
364 u32 cause = vcpu->arch.host_cp0_cause;
365 enum emulation_result er = EMULATE_DONE;
366 int ret = RESUME_GUEST;
367
368 er = kvm_mips_emulate_syscall(cause, opc, run, vcpu);
369 if (er == EMULATE_DONE)
370 ret = RESUME_GUEST;
371 else {
372 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
373 ret = RESUME_HOST;
374 }
375 return ret;
376 }
377
378 static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu *vcpu)
379 {
380 struct kvm_run *run = vcpu->run;
381 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
382 u32 cause = vcpu->arch.host_cp0_cause;
383 enum emulation_result er = EMULATE_DONE;
384 int ret = RESUME_GUEST;
385
386 er = kvm_mips_handle_ri(cause, opc, run, vcpu);
387 if (er == EMULATE_DONE)
388 ret = RESUME_GUEST;
389 else {
390 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
391 ret = RESUME_HOST;
392 }
393 return ret;
394 }
395
396 static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu)
397 {
398 struct kvm_run *run = vcpu->run;
399 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
400 u32 cause = vcpu->arch.host_cp0_cause;
401 enum emulation_result er = EMULATE_DONE;
402 int ret = RESUME_GUEST;
403
404 er = kvm_mips_emulate_bp_exc(cause, opc, run, vcpu);
405 if (er == EMULATE_DONE)
406 ret = RESUME_GUEST;
407 else {
408 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
409 ret = RESUME_HOST;
410 }
411 return ret;
412 }
413
414 static int kvm_trap_emul_handle_trap(struct kvm_vcpu *vcpu)
415 {
416 struct kvm_run *run = vcpu->run;
417 u32 __user *opc = (u32 __user *)vcpu->arch.pc;
418 u32 cause = vcpu->arch.host_cp0_cause;
419 enum emulation_result er = EMULATE_DONE;
420 int ret = RESUME_GUEST;
421
422 er = kvm_mips_emulate_trap_exc(cause, opc, run, vcpu);
423 if (er == EMULATE_DONE) {
424 ret = RESUME_GUEST;
425 } else {
426 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
427 ret = RESUME_HOST;
428 }
429 return ret;
430 }
431
432 static int kvm_trap_emul_handle_msa_fpe(struct kvm_vcpu *vcpu)
433 {
434 struct kvm_run *run = vcpu->run;
435 u32 __user *opc = (u32 __user *)vcpu->arch.pc;
436 u32 cause = vcpu->arch.host_cp0_cause;
437 enum emulation_result er = EMULATE_DONE;
438 int ret = RESUME_GUEST;
439
440 er = kvm_mips_emulate_msafpe_exc(cause, opc, run, vcpu);
441 if (er == EMULATE_DONE) {
442 ret = RESUME_GUEST;
443 } else {
444 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
445 ret = RESUME_HOST;
446 }
447 return ret;
448 }
449
450 static int kvm_trap_emul_handle_fpe(struct kvm_vcpu *vcpu)
451 {
452 struct kvm_run *run = vcpu->run;
453 u32 __user *opc = (u32 __user *)vcpu->arch.pc;
454 u32 cause = vcpu->arch.host_cp0_cause;
455 enum emulation_result er = EMULATE_DONE;
456 int ret = RESUME_GUEST;
457
458 er = kvm_mips_emulate_fpe_exc(cause, opc, run, vcpu);
459 if (er == EMULATE_DONE) {
460 ret = RESUME_GUEST;
461 } else {
462 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
463 ret = RESUME_HOST;
464 }
465 return ret;
466 }
467
468 /**
469 * kvm_trap_emul_handle_msa_disabled() - Guest used MSA while disabled in root.
470 * @vcpu: Virtual CPU context.
471 *
472 * Handle when the guest attempts to use MSA when it is disabled.
473 */
474 static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu *vcpu)
475 {
476 struct mips_coproc *cop0 = vcpu->arch.cop0;
477 struct kvm_run *run = vcpu->run;
478 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
479 u32 cause = vcpu->arch.host_cp0_cause;
480 enum emulation_result er = EMULATE_DONE;
481 int ret = RESUME_GUEST;
482
483 if (!kvm_mips_guest_has_msa(&vcpu->arch) ||
484 (kvm_read_c0_guest_status(cop0) & (ST0_CU1 | ST0_FR)) == ST0_CU1) {
485 /*
486 * No MSA in guest, or FPU enabled and not in FR=1 mode,
487 * guest reserved instruction exception
488 */
489 er = kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
490 } else if (!(kvm_read_c0_guest_config5(cop0) & MIPS_CONF5_MSAEN)) {
491 /* MSA disabled by guest, guest MSA disabled exception */
492 er = kvm_mips_emulate_msadis_exc(cause, opc, run, vcpu);
493 } else {
494 /* Restore MSA/FPU state */
495 kvm_own_msa(vcpu);
496 er = EMULATE_DONE;
497 }
498
499 switch (er) {
500 case EMULATE_DONE:
501 ret = RESUME_GUEST;
502 break;
503
504 case EMULATE_FAIL:
505 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
506 ret = RESUME_HOST;
507 break;
508
509 default:
510 BUG();
511 }
512 return ret;
513 }
514
515 static int kvm_trap_emul_hardware_enable(void)
516 {
517 return 0;
518 }
519
520 static void kvm_trap_emul_hardware_disable(void)
521 {
522 }
523
524 static int kvm_trap_emul_check_extension(struct kvm *kvm, long ext)
525 {
526 int r;
527
528 switch (ext) {
529 case KVM_CAP_MIPS_TE:
530 r = 1;
531 break;
532 default:
533 r = 0;
534 break;
535 }
536
537 return r;
538 }
539
540 static int kvm_trap_emul_vcpu_init(struct kvm_vcpu *vcpu)
541 {
542 struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
543 struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
544
545 /*
546 * Allocate GVA -> HPA page tables.
547 * MIPS doesn't use the mm_struct pointer argument.
548 */
549 kern_mm->pgd = pgd_alloc(kern_mm);
550 if (!kern_mm->pgd)
551 return -ENOMEM;
552
553 user_mm->pgd = pgd_alloc(user_mm);
554 if (!user_mm->pgd) {
555 pgd_free(kern_mm, kern_mm->pgd);
556 return -ENOMEM;
557 }
558
559 return 0;
560 }
561
562 static void kvm_mips_emul_free_gva_pt(pgd_t *pgd)
563 {
564 /* Don't free host kernel page tables copied from init_mm.pgd */
565 const unsigned long end = 0x80000000;
566 unsigned long pgd_va, pud_va, pmd_va;
567 pud_t *pud;
568 pmd_t *pmd;
569 pte_t *pte;
570 int i, j, k;
571
572 for (i = 0; i < USER_PTRS_PER_PGD; i++) {
573 if (pgd_none(pgd[i]))
574 continue;
575
576 pgd_va = (unsigned long)i << PGDIR_SHIFT;
577 if (pgd_va >= end)
578 break;
579 pud = pud_offset(pgd + i, 0);
580 for (j = 0; j < PTRS_PER_PUD; j++) {
581 if (pud_none(pud[j]))
582 continue;
583
584 pud_va = pgd_va | ((unsigned long)j << PUD_SHIFT);
585 if (pud_va >= end)
586 break;
587 pmd = pmd_offset(pud + j, 0);
588 for (k = 0; k < PTRS_PER_PMD; k++) {
589 if (pmd_none(pmd[k]))
590 continue;
591
592 pmd_va = pud_va | (k << PMD_SHIFT);
593 if (pmd_va >= end)
594 break;
595 pte = pte_offset(pmd + k, 0);
596 pte_free_kernel(NULL, pte);
597 }
598 pmd_free(NULL, pmd);
599 }
600 pud_free(NULL, pud);
601 }
602 pgd_free(NULL, pgd);
603 }
604
605 static void kvm_trap_emul_vcpu_uninit(struct kvm_vcpu *vcpu)
606 {
607 kvm_mips_emul_free_gva_pt(vcpu->arch.guest_kernel_mm.pgd);
608 kvm_mips_emul_free_gva_pt(vcpu->arch.guest_user_mm.pgd);
609 }
610
611 static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
612 {
613 struct mips_coproc *cop0 = vcpu->arch.cop0;
614 u32 config, config1;
615 int vcpu_id = vcpu->vcpu_id;
616
617 /* Start off the timer at 100 MHz */
618 kvm_mips_init_count(vcpu, 100*1000*1000);
619
620 /*
621 * Arch specific stuff, set up config registers properly so that the
622 * guest will come up as expected
623 */
624 #ifndef CONFIG_CPU_MIPSR6
625 /* r2-r5, simulate a MIPS 24kc */
626 kvm_write_c0_guest_prid(cop0, 0x00019300);
627 #else
628 /* r6+, simulate a generic QEMU machine */
629 kvm_write_c0_guest_prid(cop0, 0x00010000);
630 #endif
631 /*
632 * Have config1, Cacheable, noncoherent, write-back, write allocate.
633 * Endianness, arch revision & virtually tagged icache should match
634 * host.
635 */
636 config = read_c0_config() & MIPS_CONF_AR;
637 config |= MIPS_CONF_M | CONF_CM_CACHABLE_NONCOHERENT | MIPS_CONF_MT_TLB;
638 #ifdef CONFIG_CPU_BIG_ENDIAN
639 config |= CONF_BE;
640 #endif
641 if (cpu_has_vtag_icache)
642 config |= MIPS_CONF_VI;
643 kvm_write_c0_guest_config(cop0, config);
644
645 /* Read the cache characteristics from the host Config1 Register */
646 config1 = (read_c0_config1() & ~0x7f);
647
648 /* DCache line size not correctly reported in Config1 on Octeon CPUs */
649 if (cpu_dcache_line_size()) {
650 config1 &= ~MIPS_CONF1_DL;
651 config1 |= ((ilog2(cpu_dcache_line_size()) - 1) <<
652 MIPS_CONF1_DL_SHF) & MIPS_CONF1_DL;
653 }
654
655 /* Set up MMU size */
656 config1 &= ~(0x3f << 25);
657 config1 |= ((KVM_MIPS_GUEST_TLB_SIZE - 1) << 25);
658
659 /* We unset some bits that we aren't emulating */
660 config1 &= ~(MIPS_CONF1_C2 | MIPS_CONF1_MD | MIPS_CONF1_PC |
661 MIPS_CONF1_WR | MIPS_CONF1_CA);
662 kvm_write_c0_guest_config1(cop0, config1);
663
664 /* Have config3, no tertiary/secondary caches implemented */
665 kvm_write_c0_guest_config2(cop0, MIPS_CONF_M);
666 /* MIPS_CONF_M | (read_c0_config2() & 0xfff) */
667
668 /* Have config4, UserLocal */
669 kvm_write_c0_guest_config3(cop0, MIPS_CONF_M | MIPS_CONF3_ULRI);
670
671 /* Have config5 */
672 kvm_write_c0_guest_config4(cop0, MIPS_CONF_M);
673
674 /* No config6 */
675 kvm_write_c0_guest_config5(cop0, 0);
676
677 /* Set Wait IE/IXMT Ignore in Config7, IAR, AR */
678 kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10));
679
680 /* Status */
681 kvm_write_c0_guest_status(cop0, ST0_BEV | ST0_ERL);
682
683 /*
684 * Setup IntCtl defaults, compatibility mode for timer interrupts (HW5)
685 */
686 kvm_write_c0_guest_intctl(cop0, 0xFC000000);
687
688 /* Put in vcpu id as CPUNum into Ebase Reg to handle SMP Guests */
689 kvm_write_c0_guest_ebase(cop0, KVM_GUEST_KSEG0 |
690 (vcpu_id & MIPS_EBASE_CPUNUM));
691
692 /* Put PC at guest reset vector */
693 vcpu->arch.pc = KVM_GUEST_CKSEG1ADDR(0x1fc00000);
694
695 return 0;
696 }
697
698 static void kvm_trap_emul_flush_shadow_all(struct kvm *kvm)
699 {
700 /* Flush GVA page tables and invalidate GVA ASIDs on all VCPUs */
701 kvm_flush_remote_tlbs(kvm);
702 }
703
704 static void kvm_trap_emul_flush_shadow_memslot(struct kvm *kvm,
705 const struct kvm_memory_slot *slot)
706 {
707 kvm_trap_emul_flush_shadow_all(kvm);
708 }
709
710 static u64 kvm_trap_emul_get_one_regs[] = {
711 KVM_REG_MIPS_CP0_INDEX,
712 KVM_REG_MIPS_CP0_ENTRYLO0,
713 KVM_REG_MIPS_CP0_ENTRYLO1,
714 KVM_REG_MIPS_CP0_CONTEXT,
715 KVM_REG_MIPS_CP0_USERLOCAL,
716 KVM_REG_MIPS_CP0_PAGEMASK,
717 KVM_REG_MIPS_CP0_WIRED,
718 KVM_REG_MIPS_CP0_HWRENA,
719 KVM_REG_MIPS_CP0_BADVADDR,
720 KVM_REG_MIPS_CP0_COUNT,
721 KVM_REG_MIPS_CP0_ENTRYHI,
722 KVM_REG_MIPS_CP0_COMPARE,
723 KVM_REG_MIPS_CP0_STATUS,
724 KVM_REG_MIPS_CP0_INTCTL,
725 KVM_REG_MIPS_CP0_CAUSE,
726 KVM_REG_MIPS_CP0_EPC,
727 KVM_REG_MIPS_CP0_PRID,
728 KVM_REG_MIPS_CP0_EBASE,
729 KVM_REG_MIPS_CP0_CONFIG,
730 KVM_REG_MIPS_CP0_CONFIG1,
731 KVM_REG_MIPS_CP0_CONFIG2,
732 KVM_REG_MIPS_CP0_CONFIG3,
733 KVM_REG_MIPS_CP0_CONFIG4,
734 KVM_REG_MIPS_CP0_CONFIG5,
735 KVM_REG_MIPS_CP0_CONFIG7,
736 KVM_REG_MIPS_CP0_ERROREPC,
737 KVM_REG_MIPS_CP0_KSCRATCH1,
738 KVM_REG_MIPS_CP0_KSCRATCH2,
739 KVM_REG_MIPS_CP0_KSCRATCH3,
740 KVM_REG_MIPS_CP0_KSCRATCH4,
741 KVM_REG_MIPS_CP0_KSCRATCH5,
742 KVM_REG_MIPS_CP0_KSCRATCH6,
743
744 KVM_REG_MIPS_COUNT_CTL,
745 KVM_REG_MIPS_COUNT_RESUME,
746 KVM_REG_MIPS_COUNT_HZ,
747 };
748
749 static unsigned long kvm_trap_emul_num_regs(struct kvm_vcpu *vcpu)
750 {
751 return ARRAY_SIZE(kvm_trap_emul_get_one_regs);
752 }
753
754 static int kvm_trap_emul_copy_reg_indices(struct kvm_vcpu *vcpu,
755 u64 __user *indices)
756 {
757 if (copy_to_user(indices, kvm_trap_emul_get_one_regs,
758 sizeof(kvm_trap_emul_get_one_regs)))
759 return -EFAULT;
760 indices += ARRAY_SIZE(kvm_trap_emul_get_one_regs);
761
762 return 0;
763 }
764
765 static int kvm_trap_emul_get_one_reg(struct kvm_vcpu *vcpu,
766 const struct kvm_one_reg *reg,
767 s64 *v)
768 {
769 struct mips_coproc *cop0 = vcpu->arch.cop0;
770
771 switch (reg->id) {
772 case KVM_REG_MIPS_CP0_INDEX:
773 *v = (long)kvm_read_c0_guest_index(cop0);
774 break;
775 case KVM_REG_MIPS_CP0_ENTRYLO0:
776 *v = kvm_read_c0_guest_entrylo0(cop0);
777 break;
778 case KVM_REG_MIPS_CP0_ENTRYLO1:
779 *v = kvm_read_c0_guest_entrylo1(cop0);
780 break;
781 case KVM_REG_MIPS_CP0_CONTEXT:
782 *v = (long)kvm_read_c0_guest_context(cop0);
783 break;
784 case KVM_REG_MIPS_CP0_USERLOCAL:
785 *v = (long)kvm_read_c0_guest_userlocal(cop0);
786 break;
787 case KVM_REG_MIPS_CP0_PAGEMASK:
788 *v = (long)kvm_read_c0_guest_pagemask(cop0);
789 break;
790 case KVM_REG_MIPS_CP0_WIRED:
791 *v = (long)kvm_read_c0_guest_wired(cop0);
792 break;
793 case KVM_REG_MIPS_CP0_HWRENA:
794 *v = (long)kvm_read_c0_guest_hwrena(cop0);
795 break;
796 case KVM_REG_MIPS_CP0_BADVADDR:
797 *v = (long)kvm_read_c0_guest_badvaddr(cop0);
798 break;
799 case KVM_REG_MIPS_CP0_ENTRYHI:
800 *v = (long)kvm_read_c0_guest_entryhi(cop0);
801 break;
802 case KVM_REG_MIPS_CP0_COMPARE:
803 *v = (long)kvm_read_c0_guest_compare(cop0);
804 break;
805 case KVM_REG_MIPS_CP0_STATUS:
806 *v = (long)kvm_read_c0_guest_status(cop0);
807 break;
808 case KVM_REG_MIPS_CP0_INTCTL:
809 *v = (long)kvm_read_c0_guest_intctl(cop0);
810 break;
811 case KVM_REG_MIPS_CP0_CAUSE:
812 *v = (long)kvm_read_c0_guest_cause(cop0);
813 break;
814 case KVM_REG_MIPS_CP0_EPC:
815 *v = (long)kvm_read_c0_guest_epc(cop0);
816 break;
817 case KVM_REG_MIPS_CP0_PRID:
818 *v = (long)kvm_read_c0_guest_prid(cop0);
819 break;
820 case KVM_REG_MIPS_CP0_EBASE:
821 *v = (long)kvm_read_c0_guest_ebase(cop0);
822 break;
823 case KVM_REG_MIPS_CP0_CONFIG:
824 *v = (long)kvm_read_c0_guest_config(cop0);
825 break;
826 case KVM_REG_MIPS_CP0_CONFIG1:
827 *v = (long)kvm_read_c0_guest_config1(cop0);
828 break;
829 case KVM_REG_MIPS_CP0_CONFIG2:
830 *v = (long)kvm_read_c0_guest_config2(cop0);
831 break;
832 case KVM_REG_MIPS_CP0_CONFIG3:
833 *v = (long)kvm_read_c0_guest_config3(cop0);
834 break;
835 case KVM_REG_MIPS_CP0_CONFIG4:
836 *v = (long)kvm_read_c0_guest_config4(cop0);
837 break;
838 case KVM_REG_MIPS_CP0_CONFIG5:
839 *v = (long)kvm_read_c0_guest_config5(cop0);
840 break;
841 case KVM_REG_MIPS_CP0_CONFIG7:
842 *v = (long)kvm_read_c0_guest_config7(cop0);
843 break;
844 case KVM_REG_MIPS_CP0_COUNT:
845 *v = kvm_mips_read_count(vcpu);
846 break;
847 case KVM_REG_MIPS_COUNT_CTL:
848 *v = vcpu->arch.count_ctl;
849 break;
850 case KVM_REG_MIPS_COUNT_RESUME:
851 *v = ktime_to_ns(vcpu->arch.count_resume);
852 break;
853 case KVM_REG_MIPS_COUNT_HZ:
854 *v = vcpu->arch.count_hz;
855 break;
856 case KVM_REG_MIPS_CP0_ERROREPC:
857 *v = (long)kvm_read_c0_guest_errorepc(cop0);
858 break;
859 case KVM_REG_MIPS_CP0_KSCRATCH1:
860 *v = (long)kvm_read_c0_guest_kscratch1(cop0);
861 break;
862 case KVM_REG_MIPS_CP0_KSCRATCH2:
863 *v = (long)kvm_read_c0_guest_kscratch2(cop0);
864 break;
865 case KVM_REG_MIPS_CP0_KSCRATCH3:
866 *v = (long)kvm_read_c0_guest_kscratch3(cop0);
867 break;
868 case KVM_REG_MIPS_CP0_KSCRATCH4:
869 *v = (long)kvm_read_c0_guest_kscratch4(cop0);
870 break;
871 case KVM_REG_MIPS_CP0_KSCRATCH5:
872 *v = (long)kvm_read_c0_guest_kscratch5(cop0);
873 break;
874 case KVM_REG_MIPS_CP0_KSCRATCH6:
875 *v = (long)kvm_read_c0_guest_kscratch6(cop0);
876 break;
877 default:
878 return -EINVAL;
879 }
880 return 0;
881 }
882
883 static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu,
884 const struct kvm_one_reg *reg,
885 s64 v)
886 {
887 struct mips_coproc *cop0 = vcpu->arch.cop0;
888 int ret = 0;
889 unsigned int cur, change;
890
891 switch (reg->id) {
892 case KVM_REG_MIPS_CP0_INDEX:
893 kvm_write_c0_guest_index(cop0, v);
894 break;
895 case KVM_REG_MIPS_CP0_ENTRYLO0:
896 kvm_write_c0_guest_entrylo0(cop0, v);
897 break;
898 case KVM_REG_MIPS_CP0_ENTRYLO1:
899 kvm_write_c0_guest_entrylo1(cop0, v);
900 break;
901 case KVM_REG_MIPS_CP0_CONTEXT:
902 kvm_write_c0_guest_context(cop0, v);
903 break;
904 case KVM_REG_MIPS_CP0_USERLOCAL:
905 kvm_write_c0_guest_userlocal(cop0, v);
906 break;
907 case KVM_REG_MIPS_CP0_PAGEMASK:
908 kvm_write_c0_guest_pagemask(cop0, v);
909 break;
910 case KVM_REG_MIPS_CP0_WIRED:
911 kvm_write_c0_guest_wired(cop0, v);
912 break;
913 case KVM_REG_MIPS_CP0_HWRENA:
914 kvm_write_c0_guest_hwrena(cop0, v);
915 break;
916 case KVM_REG_MIPS_CP0_BADVADDR:
917 kvm_write_c0_guest_badvaddr(cop0, v);
918 break;
919 case KVM_REG_MIPS_CP0_ENTRYHI:
920 kvm_write_c0_guest_entryhi(cop0, v);
921 break;
922 case KVM_REG_MIPS_CP0_STATUS:
923 kvm_write_c0_guest_status(cop0, v);
924 break;
925 case KVM_REG_MIPS_CP0_INTCTL:
926 /* No VInt, so no VS, read-only for now */
927 break;
928 case KVM_REG_MIPS_CP0_EPC:
929 kvm_write_c0_guest_epc(cop0, v);
930 break;
931 case KVM_REG_MIPS_CP0_PRID:
932 kvm_write_c0_guest_prid(cop0, v);
933 break;
934 case KVM_REG_MIPS_CP0_EBASE:
935 /*
936 * Allow core number to be written, but the exception base must
937 * remain in guest KSeg0.
938 */
939 kvm_change_c0_guest_ebase(cop0, 0x1ffff000 | MIPS_EBASE_CPUNUM,
940 v);
941 break;
942 case KVM_REG_MIPS_CP0_COUNT:
943 kvm_mips_write_count(vcpu, v);
944 break;
945 case KVM_REG_MIPS_CP0_COMPARE:
946 kvm_mips_write_compare(vcpu, v, false);
947 break;
948 case KVM_REG_MIPS_CP0_CAUSE:
949 /*
950 * If the timer is stopped or started (DC bit) it must look
951 * atomic with changes to the interrupt pending bits (TI, IRQ5).
952 * A timer interrupt should not happen in between.
953 */
954 if ((kvm_read_c0_guest_cause(cop0) ^ v) & CAUSEF_DC) {
955 if (v & CAUSEF_DC) {
956 /* disable timer first */
957 kvm_mips_count_disable_cause(vcpu);
958 kvm_change_c0_guest_cause(cop0, (u32)~CAUSEF_DC,
959 v);
960 } else {
961 /* enable timer last */
962 kvm_change_c0_guest_cause(cop0, (u32)~CAUSEF_DC,
963 v);
964 kvm_mips_count_enable_cause(vcpu);
965 }
966 } else {
967 kvm_write_c0_guest_cause(cop0, v);
968 }
969 break;
970 case KVM_REG_MIPS_CP0_CONFIG:
971 /* read-only for now */
972 break;
973 case KVM_REG_MIPS_CP0_CONFIG1:
974 cur = kvm_read_c0_guest_config1(cop0);
975 change = (cur ^ v) & kvm_mips_config1_wrmask(vcpu);
976 if (change) {
977 v = cur ^ change;
978 kvm_write_c0_guest_config1(cop0, v);
979 }
980 break;
981 case KVM_REG_MIPS_CP0_CONFIG2:
982 /* read-only for now */
983 break;
984 case KVM_REG_MIPS_CP0_CONFIG3:
985 cur = kvm_read_c0_guest_config3(cop0);
986 change = (cur ^ v) & kvm_mips_config3_wrmask(vcpu);
987 if (change) {
988 v = cur ^ change;
989 kvm_write_c0_guest_config3(cop0, v);
990 }
991 break;
992 case KVM_REG_MIPS_CP0_CONFIG4:
993 cur = kvm_read_c0_guest_config4(cop0);
994 change = (cur ^ v) & kvm_mips_config4_wrmask(vcpu);
995 if (change) {
996 v = cur ^ change;
997 kvm_write_c0_guest_config4(cop0, v);
998 }
999 break;
1000 case KVM_REG_MIPS_CP0_CONFIG5:
1001 cur = kvm_read_c0_guest_config5(cop0);
1002 change = (cur ^ v) & kvm_mips_config5_wrmask(vcpu);
1003 if (change) {
1004 v = cur ^ change;
1005 kvm_write_c0_guest_config5(cop0, v);
1006 }
1007 break;
1008 case KVM_REG_MIPS_CP0_CONFIG7:
1009 /* writes ignored */
1010 break;
1011 case KVM_REG_MIPS_COUNT_CTL:
1012 ret = kvm_mips_set_count_ctl(vcpu, v);
1013 break;
1014 case KVM_REG_MIPS_COUNT_RESUME:
1015 ret = kvm_mips_set_count_resume(vcpu, v);
1016 break;
1017 case KVM_REG_MIPS_COUNT_HZ:
1018 ret = kvm_mips_set_count_hz(vcpu, v);
1019 break;
1020 case KVM_REG_MIPS_CP0_ERROREPC:
1021 kvm_write_c0_guest_errorepc(cop0, v);
1022 break;
1023 case KVM_REG_MIPS_CP0_KSCRATCH1:
1024 kvm_write_c0_guest_kscratch1(cop0, v);
1025 break;
1026 case KVM_REG_MIPS_CP0_KSCRATCH2:
1027 kvm_write_c0_guest_kscratch2(cop0, v);
1028 break;
1029 case KVM_REG_MIPS_CP0_KSCRATCH3:
1030 kvm_write_c0_guest_kscratch3(cop0, v);
1031 break;
1032 case KVM_REG_MIPS_CP0_KSCRATCH4:
1033 kvm_write_c0_guest_kscratch4(cop0, v);
1034 break;
1035 case KVM_REG_MIPS_CP0_KSCRATCH5:
1036 kvm_write_c0_guest_kscratch5(cop0, v);
1037 break;
1038 case KVM_REG_MIPS_CP0_KSCRATCH6:
1039 kvm_write_c0_guest_kscratch6(cop0, v);
1040 break;
1041 default:
1042 return -EINVAL;
1043 }
1044 return ret;
1045 }
1046
1047 static int kvm_trap_emul_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1048 {
1049 struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
1050 struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
1051 struct mm_struct *mm;
1052
1053 /*
1054 * Were we in guest context? If so, restore the appropriate ASID based
1055 * on the mode of the Guest (Kernel/User).
1056 */
1057 if (current->flags & PF_VCPU) {
1058 mm = KVM_GUEST_KERNEL_MODE(vcpu) ? kern_mm : user_mm;
1059 if ((cpu_context(cpu, mm) ^ asid_cache(cpu)) &
1060 asid_version_mask(cpu))
1061 get_new_mmu_context(mm, cpu);
1062 write_c0_entryhi(cpu_asid(cpu, mm));
1063 TLBMISS_HANDLER_SETUP_PGD(mm->pgd);
1064 kvm_mips_suspend_mm(cpu);
1065 ehb();
1066 }
1067
1068 return 0;
1069 }
1070
1071 static int kvm_trap_emul_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
1072 {
1073 kvm_lose_fpu(vcpu);
1074
1075 if (current->flags & PF_VCPU) {
1076 /* Restore normal Linux process memory map */
1077 if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
1078 asid_version_mask(cpu)))
1079 get_new_mmu_context(current->mm, cpu);
1080 write_c0_entryhi(cpu_asid(cpu, current->mm));
1081 TLBMISS_HANDLER_SETUP_PGD(current->mm->pgd);
1082 kvm_mips_resume_mm(cpu);
1083 ehb();
1084 }
1085
1086 return 0;
1087 }
1088
1089 static void kvm_trap_emul_check_requests(struct kvm_vcpu *vcpu, int cpu,
1090 bool reload_asid)
1091 {
1092 struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
1093 struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
1094 struct mm_struct *mm;
1095 int i;
1096
1097 if (likely(!vcpu->requests))
1098 return;
1099
1100 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
1101 /*
1102 * Both kernel & user GVA mappings must be invalidated. The
1103 * caller is just about to check whether the ASID is stale
1104 * anyway so no need to reload it here.
1105 */
1106 kvm_mips_flush_gva_pt(kern_mm->pgd, KMF_GPA | KMF_KERN);
1107 kvm_mips_flush_gva_pt(user_mm->pgd, KMF_GPA | KMF_USER);
1108 for_each_possible_cpu(i) {
1109 cpu_context(i, kern_mm) = 0;
1110 cpu_context(i, user_mm) = 0;
1111 }
1112
1113 /* Generate new ASID for current mode */
1114 if (reload_asid) {
1115 mm = KVM_GUEST_KERNEL_MODE(vcpu) ? kern_mm : user_mm;
1116 get_new_mmu_context(mm, cpu);
1117 htw_stop();
1118 write_c0_entryhi(cpu_asid(cpu, mm));
1119 TLBMISS_HANDLER_SETUP_PGD(mm->pgd);
1120 htw_start();
1121 }
1122 }
1123 }
1124
1125 /**
1126 * kvm_trap_emul_gva_lockless_begin() - Begin lockless access to GVA space.
1127 * @vcpu: VCPU pointer.
1128 *
1129 * Call before a GVA space access outside of guest mode, to ensure that
1130 * asynchronous TLB flush requests are handled or delayed until completion of
1131 * the GVA access (as indicated by a matching kvm_trap_emul_gva_lockless_end()).
1132 *
1133 * Should be called with IRQs already enabled.
1134 */
1135 void kvm_trap_emul_gva_lockless_begin(struct kvm_vcpu *vcpu)
1136 {
1137 /* We re-enable IRQs in kvm_trap_emul_gva_lockless_end() */
1138 WARN_ON_ONCE(irqs_disabled());
1139
1140 /*
1141 * The caller is about to access the GVA space, so we set the mode to
1142 * force TLB flush requests to send an IPI, and also disable IRQs to
1143 * delay IPI handling until kvm_trap_emul_gva_lockless_end().
1144 */
1145 local_irq_disable();
1146
1147 /*
1148 * Make sure the read of VCPU requests is not reordered ahead of the
1149 * write to vcpu->mode, or we could miss a TLB flush request while
1150 * the requester sees the VCPU as outside of guest mode and not needing
1151 * an IPI.
1152 */
1153 smp_store_mb(vcpu->mode, READING_SHADOW_PAGE_TABLES);
1154
1155 /*
1156 * If a TLB flush has been requested (potentially while
1157 * OUTSIDE_GUEST_MODE and assumed immediately effective), perform it
1158 * before accessing the GVA space, and be sure to reload the ASID if
1159 * necessary as it'll be immediately used.
1160 *
1161 * TLB flush requests after this check will trigger an IPI due to the
1162 * mode change above, which will be delayed due to IRQs disabled.
1163 */
1164 kvm_trap_emul_check_requests(vcpu, smp_processor_id(), true);
1165 }
1166
1167 /**
1168 * kvm_trap_emul_gva_lockless_end() - End lockless access to GVA space.
1169 * @vcpu: VCPU pointer.
1170 *
1171 * Called after a GVA space access outside of guest mode. Should have a matching
1172 * call to kvm_trap_emul_gva_lockless_begin().
1173 */
1174 void kvm_trap_emul_gva_lockless_end(struct kvm_vcpu *vcpu)
1175 {
1176 /*
1177 * Make sure the write to vcpu->mode is not reordered in front of GVA
1178 * accesses, or a TLB flush requester may not think it necessary to send
1179 * an IPI.
1180 */
1181 smp_store_release(&vcpu->mode, OUTSIDE_GUEST_MODE);
1182
1183 /*
1184 * Now that the access to GVA space is complete, its safe for pending
1185 * TLB flush request IPIs to be handled (which indicates completion).
1186 */
1187 local_irq_enable();
1188 }
1189
1190 static void kvm_trap_emul_vcpu_reenter(struct kvm_run *run,
1191 struct kvm_vcpu *vcpu)
1192 {
1193 struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
1194 struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
1195 struct mm_struct *mm;
1196 struct mips_coproc *cop0 = vcpu->arch.cop0;
1197 int i, cpu = smp_processor_id();
1198 unsigned int gasid;
1199
1200 /*
1201 * No need to reload ASID, IRQs are disabled already so there's no rush,
1202 * and we'll check if we need to regenerate below anyway before
1203 * re-entering the guest.
1204 */
1205 kvm_trap_emul_check_requests(vcpu, cpu, false);
1206
1207 if (KVM_GUEST_KERNEL_MODE(vcpu)) {
1208 mm = kern_mm;
1209 } else {
1210 mm = user_mm;
1211
1212 /*
1213 * Lazy host ASID regeneration / PT flush for guest user mode.
1214 * If the guest ASID has changed since the last guest usermode
1215 * execution, invalidate the stale TLB entries and flush GVA PT
1216 * entries too.
1217 */
1218 gasid = kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID;
1219 if (gasid != vcpu->arch.last_user_gasid) {
1220 kvm_mips_flush_gva_pt(user_mm->pgd, KMF_USER);
1221 for_each_possible_cpu(i)
1222 cpu_context(i, user_mm) = 0;
1223 vcpu->arch.last_user_gasid = gasid;
1224 }
1225 }
1226
1227 /*
1228 * Check if ASID is stale. This may happen due to a TLB flush request or
1229 * a lazy user MM invalidation.
1230 */
1231 if ((cpu_context(cpu, mm) ^ asid_cache(cpu)) &
1232 asid_version_mask(cpu))
1233 get_new_mmu_context(mm, cpu);
1234 }
1235
1236 static int kvm_trap_emul_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
1237 {
1238 int cpu = smp_processor_id();
1239 int r;
1240
1241 /* Check if we have any exceptions/interrupts pending */
1242 kvm_mips_deliver_interrupts(vcpu,
1243 kvm_read_c0_guest_cause(vcpu->arch.cop0));
1244
1245 kvm_trap_emul_vcpu_reenter(run, vcpu);
1246
1247 /*
1248 * We use user accessors to access guest memory, but we don't want to
1249 * invoke Linux page faulting.
1250 */
1251 pagefault_disable();
1252
1253 /* Disable hardware page table walking while in guest */
1254 htw_stop();
1255
1256 /*
1257 * While in guest context we're in the guest's address space, not the
1258 * host process address space, so we need to be careful not to confuse
1259 * e.g. cache management IPIs.
1260 */
1261 kvm_mips_suspend_mm(cpu);
1262
1263 r = vcpu->arch.vcpu_run(run, vcpu);
1264
1265 /* We may have migrated while handling guest exits */
1266 cpu = smp_processor_id();
1267
1268 /* Restore normal Linux process memory map */
1269 if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
1270 asid_version_mask(cpu)))
1271 get_new_mmu_context(current->mm, cpu);
1272 write_c0_entryhi(cpu_asid(cpu, current->mm));
1273 TLBMISS_HANDLER_SETUP_PGD(current->mm->pgd);
1274 kvm_mips_resume_mm(cpu);
1275
1276 htw_start();
1277
1278 pagefault_enable();
1279
1280 return r;
1281 }
1282
1283 static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
1284 /* exit handlers */
1285 .handle_cop_unusable = kvm_trap_emul_handle_cop_unusable,
1286 .handle_tlb_mod = kvm_trap_emul_handle_tlb_mod,
1287 .handle_tlb_st_miss = kvm_trap_emul_handle_tlb_st_miss,
1288 .handle_tlb_ld_miss = kvm_trap_emul_handle_tlb_ld_miss,
1289 .handle_addr_err_st = kvm_trap_emul_handle_addr_err_st,
1290 .handle_addr_err_ld = kvm_trap_emul_handle_addr_err_ld,
1291 .handle_syscall = kvm_trap_emul_handle_syscall,
1292 .handle_res_inst = kvm_trap_emul_handle_res_inst,
1293 .handle_break = kvm_trap_emul_handle_break,
1294 .handle_trap = kvm_trap_emul_handle_trap,
1295 .handle_msa_fpe = kvm_trap_emul_handle_msa_fpe,
1296 .handle_fpe = kvm_trap_emul_handle_fpe,
1297 .handle_msa_disabled = kvm_trap_emul_handle_msa_disabled,
1298 .handle_guest_exit = kvm_trap_emul_no_handler,
1299
1300 .hardware_enable = kvm_trap_emul_hardware_enable,
1301 .hardware_disable = kvm_trap_emul_hardware_disable,
1302 .check_extension = kvm_trap_emul_check_extension,
1303 .vcpu_init = kvm_trap_emul_vcpu_init,
1304 .vcpu_uninit = kvm_trap_emul_vcpu_uninit,
1305 .vcpu_setup = kvm_trap_emul_vcpu_setup,
1306 .flush_shadow_all = kvm_trap_emul_flush_shadow_all,
1307 .flush_shadow_memslot = kvm_trap_emul_flush_shadow_memslot,
1308 .gva_to_gpa = kvm_trap_emul_gva_to_gpa_cb,
1309 .queue_timer_int = kvm_mips_queue_timer_int_cb,
1310 .dequeue_timer_int = kvm_mips_dequeue_timer_int_cb,
1311 .queue_io_int = kvm_mips_queue_io_int_cb,
1312 .dequeue_io_int = kvm_mips_dequeue_io_int_cb,
1313 .irq_deliver = kvm_mips_irq_deliver_cb,
1314 .irq_clear = kvm_mips_irq_clear_cb,
1315 .num_regs = kvm_trap_emul_num_regs,
1316 .copy_reg_indices = kvm_trap_emul_copy_reg_indices,
1317 .get_one_reg = kvm_trap_emul_get_one_reg,
1318 .set_one_reg = kvm_trap_emul_set_one_reg,
1319 .vcpu_load = kvm_trap_emul_vcpu_load,
1320 .vcpu_put = kvm_trap_emul_vcpu_put,
1321 .vcpu_run = kvm_trap_emul_vcpu_run,
1322 .vcpu_reenter = kvm_trap_emul_vcpu_reenter,
1323 };
1324
1325 int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks)
1326 {
1327 *install_callbacks = &kvm_trap_emul_callbacks;
1328 return 0;
1329 }