KVM: MIPS: Drop other CPU ASIDs on guest MMU changes
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / mips / kvm / kvm_mips_emul.c
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * KVM/MIPS: Instruction/Exception emulation
7 *
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
10 */
11
12 #include <linux/errno.h>
13 #include <linux/err.h>
14 #include <linux/kvm_host.h>
15 #include <linux/module.h>
16 #include <linux/vmalloc.h>
17 #include <linux/fs.h>
18 #include <linux/bootmem.h>
19 #include <linux/random.h>
20 #include <asm/page.h>
21 #include <asm/cacheflush.h>
22 #include <asm/cpu-info.h>
23 #include <asm/mmu_context.h>
24 #include <asm/tlbflush.h>
25 #include <asm/inst.h>
26
27 #undef CONFIG_MIPS_MT
28 #include <asm/r4kcache.h>
29 #define CONFIG_MIPS_MT
30
31 #include "kvm_mips_opcode.h"
32 #include "kvm_mips_int.h"
33 #include "kvm_mips_comm.h"
34
35 #include "trace.h"
36
37 /*
38 * Compute the return address and do emulate branch simulation, if required.
39 * This function should be called only in branch delay slot active.
40 */
41 unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu,
42 unsigned long instpc)
43 {
44 unsigned int dspcontrol;
45 union mips_instruction insn;
46 struct kvm_vcpu_arch *arch = &vcpu->arch;
47 long epc = instpc;
48 long nextpc = KVM_INVALID_INST;
49
50 if (epc & 3)
51 goto unaligned;
52
53 /*
54 * Read the instruction
55 */
56 insn.word = kvm_get_inst((uint32_t *) epc, vcpu);
57
58 if (insn.word == KVM_INVALID_INST)
59 return KVM_INVALID_INST;
60
61 switch (insn.i_format.opcode) {
62 /*
63 * jr and jalr are in r_format format.
64 */
65 case spec_op:
66 switch (insn.r_format.func) {
67 case jalr_op:
68 arch->gprs[insn.r_format.rd] = epc + 8;
69 /* Fall through */
70 case jr_op:
71 nextpc = arch->gprs[insn.r_format.rs];
72 break;
73 }
74 break;
75
76 /*
77 * This group contains:
78 * bltz_op, bgez_op, bltzl_op, bgezl_op,
79 * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
80 */
81 case bcond_op:
82 switch (insn.i_format.rt) {
83 case bltz_op:
84 case bltzl_op:
85 if ((long)arch->gprs[insn.i_format.rs] < 0)
86 epc = epc + 4 + (insn.i_format.simmediate << 2);
87 else
88 epc += 8;
89 nextpc = epc;
90 break;
91
92 case bgez_op:
93 case bgezl_op:
94 if ((long)arch->gprs[insn.i_format.rs] >= 0)
95 epc = epc + 4 + (insn.i_format.simmediate << 2);
96 else
97 epc += 8;
98 nextpc = epc;
99 break;
100
101 case bltzal_op:
102 case bltzall_op:
103 arch->gprs[31] = epc + 8;
104 if ((long)arch->gprs[insn.i_format.rs] < 0)
105 epc = epc + 4 + (insn.i_format.simmediate << 2);
106 else
107 epc += 8;
108 nextpc = epc;
109 break;
110
111 case bgezal_op:
112 case bgezall_op:
113 arch->gprs[31] = epc + 8;
114 if ((long)arch->gprs[insn.i_format.rs] >= 0)
115 epc = epc + 4 + (insn.i_format.simmediate << 2);
116 else
117 epc += 8;
118 nextpc = epc;
119 break;
120 case bposge32_op:
121 if (!cpu_has_dsp)
122 goto sigill;
123
124 dspcontrol = rddsp(0x01);
125
126 if (dspcontrol >= 32) {
127 epc = epc + 4 + (insn.i_format.simmediate << 2);
128 } else
129 epc += 8;
130 nextpc = epc;
131 break;
132 }
133 break;
134
135 /*
136 * These are unconditional and in j_format.
137 */
138 case jal_op:
139 arch->gprs[31] = instpc + 8;
140 case j_op:
141 epc += 4;
142 epc >>= 28;
143 epc <<= 28;
144 epc |= (insn.j_format.target << 2);
145 nextpc = epc;
146 break;
147
148 /*
149 * These are conditional and in i_format.
150 */
151 case beq_op:
152 case beql_op:
153 if (arch->gprs[insn.i_format.rs] ==
154 arch->gprs[insn.i_format.rt])
155 epc = epc + 4 + (insn.i_format.simmediate << 2);
156 else
157 epc += 8;
158 nextpc = epc;
159 break;
160
161 case bne_op:
162 case bnel_op:
163 if (arch->gprs[insn.i_format.rs] !=
164 arch->gprs[insn.i_format.rt])
165 epc = epc + 4 + (insn.i_format.simmediate << 2);
166 else
167 epc += 8;
168 nextpc = epc;
169 break;
170
171 case blez_op: /* not really i_format */
172 case blezl_op:
173 /* rt field assumed to be zero */
174 if ((long)arch->gprs[insn.i_format.rs] <= 0)
175 epc = epc + 4 + (insn.i_format.simmediate << 2);
176 else
177 epc += 8;
178 nextpc = epc;
179 break;
180
181 case bgtz_op:
182 case bgtzl_op:
183 /* rt field assumed to be zero */
184 if ((long)arch->gprs[insn.i_format.rs] > 0)
185 epc = epc + 4 + (insn.i_format.simmediate << 2);
186 else
187 epc += 8;
188 nextpc = epc;
189 break;
190
191 /*
192 * And now the FPA/cp1 branch instructions.
193 */
194 case cop1_op:
195 printk("%s: unsupported cop1_op\n", __func__);
196 break;
197 }
198
199 return nextpc;
200
201 unaligned:
202 printk("%s: unaligned epc\n", __func__);
203 return nextpc;
204
205 sigill:
206 printk("%s: DSP branch but not DSP ASE\n", __func__);
207 return nextpc;
208 }
209
210 enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause)
211 {
212 unsigned long branch_pc;
213 enum emulation_result er = EMULATE_DONE;
214
215 if (cause & CAUSEF_BD) {
216 branch_pc = kvm_compute_return_epc(vcpu, vcpu->arch.pc);
217 if (branch_pc == KVM_INVALID_INST) {
218 er = EMULATE_FAIL;
219 } else {
220 vcpu->arch.pc = branch_pc;
221 kvm_debug("BD update_pc(): New PC: %#lx\n", vcpu->arch.pc);
222 }
223 } else
224 vcpu->arch.pc += 4;
225
226 kvm_debug("update_pc(): New PC: %#lx\n", vcpu->arch.pc);
227
228 return er;
229 }
230
231 /* Everytime the compare register is written to, we need to decide when to fire
232 * the timer that represents timer ticks to the GUEST.
233 *
234 */
235 enum emulation_result kvm_mips_emulate_count(struct kvm_vcpu *vcpu)
236 {
237 struct mips_coproc *cop0 = vcpu->arch.cop0;
238 enum emulation_result er = EMULATE_DONE;
239
240 /* If COUNT is enabled */
241 if (!(kvm_read_c0_guest_cause(cop0) & CAUSEF_DC)) {
242 hrtimer_try_to_cancel(&vcpu->arch.comparecount_timer);
243 hrtimer_start(&vcpu->arch.comparecount_timer,
244 ktime_set(0, MS_TO_NS(10)), HRTIMER_MODE_REL);
245 } else {
246 hrtimer_try_to_cancel(&vcpu->arch.comparecount_timer);
247 }
248
249 return er;
250 }
251
252 enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu)
253 {
254 struct mips_coproc *cop0 = vcpu->arch.cop0;
255 enum emulation_result er = EMULATE_DONE;
256
257 if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
258 kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc,
259 kvm_read_c0_guest_epc(cop0));
260 kvm_clear_c0_guest_status(cop0, ST0_EXL);
261 vcpu->arch.pc = kvm_read_c0_guest_epc(cop0);
262
263 } else if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
264 kvm_clear_c0_guest_status(cop0, ST0_ERL);
265 vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
266 } else {
267 printk("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n",
268 vcpu->arch.pc);
269 er = EMULATE_FAIL;
270 }
271
272 return er;
273 }
274
275 enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
276 {
277 enum emulation_result er = EMULATE_DONE;
278
279 kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu->arch.pc,
280 vcpu->arch.pending_exceptions);
281
282 ++vcpu->stat.wait_exits;
283 trace_kvm_exit(vcpu, WAIT_EXITS);
284 if (!vcpu->arch.pending_exceptions) {
285 vcpu->arch.wait = 1;
286 kvm_vcpu_block(vcpu);
287
288 /* We we are runnable, then definitely go off to user space to check if any
289 * I/O interrupts are pending.
290 */
291 if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) {
292 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
293 vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
294 }
295 }
296
297 return er;
298 }
299
300 /* XXXKYMA: Linux doesn't seem to use TLBR, return EMULATE_FAIL for now so that we can catch
301 * this, if things ever change
302 */
303 enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu)
304 {
305 struct mips_coproc *cop0 = vcpu->arch.cop0;
306 enum emulation_result er = EMULATE_FAIL;
307 uint32_t pc = vcpu->arch.pc;
308
309 printk("[%#x] COP0_TLBR [%ld]\n", pc, kvm_read_c0_guest_index(cop0));
310 return er;
311 }
312
313 /**
314 * kvm_mips_invalidate_guest_tlb() - Indicates a change in guest MMU map.
315 * @vcpu: VCPU with changed mappings.
316 * @tlb: TLB entry being removed.
317 *
318 * This is called to indicate a single change in guest MMU mappings, so that we
319 * can arrange TLB flushes on this and other CPUs.
320 */
321 static void kvm_mips_invalidate_guest_tlb(struct kvm_vcpu *vcpu,
322 struct kvm_mips_tlb *tlb)
323 {
324 int cpu, i;
325 bool user;
326
327 /* No need to flush for entries which are already invalid */
328 if (!((tlb->tlb_lo0 | tlb->tlb_lo1) & MIPS3_PG_V))
329 return;
330 /* User address space doesn't need flushing for KSeg2/3 changes */
331 user = tlb->tlb_hi < KVM_GUEST_KSEG0;
332
333 preempt_disable();
334
335 /*
336 * Probe the shadow host TLB for the entry being overwritten, if one
337 * matches, invalidate it
338 */
339 kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
340
341 /* Invalidate the whole ASID on other CPUs */
342 cpu = smp_processor_id();
343 for_each_possible_cpu(i) {
344 if (i == cpu)
345 continue;
346 if (user)
347 vcpu->arch.guest_user_asid[i] = 0;
348 vcpu->arch.guest_kernel_asid[i] = 0;
349 }
350
351 preempt_enable();
352 }
353
354 /* Write Guest TLB Entry @ Index */
355 enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu)
356 {
357 struct mips_coproc *cop0 = vcpu->arch.cop0;
358 int index = kvm_read_c0_guest_index(cop0);
359 enum emulation_result er = EMULATE_DONE;
360 struct kvm_mips_tlb *tlb = NULL;
361 uint32_t pc = vcpu->arch.pc;
362
363 if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
364 printk("%s: illegal index: %d\n", __func__, index);
365 printk
366 ("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
367 pc, index, kvm_read_c0_guest_entryhi(cop0),
368 kvm_read_c0_guest_entrylo0(cop0),
369 kvm_read_c0_guest_entrylo1(cop0),
370 kvm_read_c0_guest_pagemask(cop0));
371 index = (index & ~0x80000000) % KVM_MIPS_GUEST_TLB_SIZE;
372 }
373
374 tlb = &vcpu->arch.guest_tlb[index];
375
376 kvm_mips_invalidate_guest_tlb(vcpu, tlb);
377
378 tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
379 tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
380 tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0);
381 tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0);
382
383 kvm_debug
384 ("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
385 pc, index, kvm_read_c0_guest_entryhi(cop0),
386 kvm_read_c0_guest_entrylo0(cop0), kvm_read_c0_guest_entrylo1(cop0),
387 kvm_read_c0_guest_pagemask(cop0));
388
389 return er;
390 }
391
392 /* Write Guest TLB Entry @ Random Index */
393 enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu)
394 {
395 struct mips_coproc *cop0 = vcpu->arch.cop0;
396 enum emulation_result er = EMULATE_DONE;
397 struct kvm_mips_tlb *tlb = NULL;
398 uint32_t pc = vcpu->arch.pc;
399 int index;
400
401 #if 1
402 get_random_bytes(&index, sizeof(index));
403 index &= (KVM_MIPS_GUEST_TLB_SIZE - 1);
404 #else
405 index = jiffies % KVM_MIPS_GUEST_TLB_SIZE;
406 #endif
407
408 if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
409 printk("%s: illegal index: %d\n", __func__, index);
410 return EMULATE_FAIL;
411 }
412
413 tlb = &vcpu->arch.guest_tlb[index];
414
415 kvm_mips_invalidate_guest_tlb(vcpu, tlb);
416
417 tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
418 tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
419 tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0);
420 tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0);
421
422 kvm_debug
423 ("[%#x] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n",
424 pc, index, kvm_read_c0_guest_entryhi(cop0),
425 kvm_read_c0_guest_entrylo0(cop0),
426 kvm_read_c0_guest_entrylo1(cop0));
427
428 return er;
429 }
430
431 enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu)
432 {
433 struct mips_coproc *cop0 = vcpu->arch.cop0;
434 long entryhi = kvm_read_c0_guest_entryhi(cop0);
435 enum emulation_result er = EMULATE_DONE;
436 uint32_t pc = vcpu->arch.pc;
437 int index = -1;
438
439 index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
440
441 kvm_write_c0_guest_index(cop0, index);
442
443 kvm_debug("[%#x] COP0_TLBP (entryhi: %#lx), index: %d\n", pc, entryhi,
444 index);
445
446 return er;
447 }
448
449 enum emulation_result
450 kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause,
451 struct kvm_run *run, struct kvm_vcpu *vcpu)
452 {
453 struct mips_coproc *cop0 = vcpu->arch.cop0;
454 enum emulation_result er = EMULATE_DONE;
455 int32_t rt, rd, copz, sel, co_bit, op;
456 uint32_t pc = vcpu->arch.pc;
457 unsigned long curr_pc;
458 int cpu, i;
459
460 /*
461 * Update PC and hold onto current PC in case there is
462 * an error and we want to rollback the PC
463 */
464 curr_pc = vcpu->arch.pc;
465 er = update_pc(vcpu, cause);
466 if (er == EMULATE_FAIL) {
467 return er;
468 }
469
470 copz = (inst >> 21) & 0x1f;
471 rt = (inst >> 16) & 0x1f;
472 rd = (inst >> 11) & 0x1f;
473 sel = inst & 0x7;
474 co_bit = (inst >> 25) & 1;
475
476 /* Verify that the register is valid */
477 if (rd > MIPS_CP0_DESAVE) {
478 printk("Invalid rd: %d\n", rd);
479 er = EMULATE_FAIL;
480 goto done;
481 }
482
483 if (co_bit) {
484 op = (inst) & 0xff;
485
486 switch (op) {
487 case tlbr_op: /* Read indexed TLB entry */
488 er = kvm_mips_emul_tlbr(vcpu);
489 break;
490 case tlbwi_op: /* Write indexed */
491 er = kvm_mips_emul_tlbwi(vcpu);
492 break;
493 case tlbwr_op: /* Write random */
494 er = kvm_mips_emul_tlbwr(vcpu);
495 break;
496 case tlbp_op: /* TLB Probe */
497 er = kvm_mips_emul_tlbp(vcpu);
498 break;
499 case rfe_op:
500 printk("!!!COP0_RFE!!!\n");
501 break;
502 case eret_op:
503 er = kvm_mips_emul_eret(vcpu);
504 goto dont_update_pc;
505 break;
506 case wait_op:
507 er = kvm_mips_emul_wait(vcpu);
508 break;
509 }
510 } else {
511 switch (copz) {
512 case mfc_op:
513 #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
514 cop0->stat[rd][sel]++;
515 #endif
516 /* Get reg */
517 if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
518 /* XXXKYMA: Run the Guest count register @ 1/4 the rate of the host */
519 vcpu->arch.gprs[rt] = (read_c0_count() >> 2);
520 } else if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) {
521 vcpu->arch.gprs[rt] = 0x0;
522 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
523 kvm_mips_trans_mfc0(inst, opc, vcpu);
524 #endif
525 }
526 else {
527 vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
528
529 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
530 kvm_mips_trans_mfc0(inst, opc, vcpu);
531 #endif
532 }
533
534 kvm_debug
535 ("[%#x] MFCz[%d][%d], vcpu->arch.gprs[%d]: %#lx\n",
536 pc, rd, sel, rt, vcpu->arch.gprs[rt]);
537
538 break;
539
540 case dmfc_op:
541 vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
542 break;
543
544 case mtc_op:
545 #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
546 cop0->stat[rd][sel]++;
547 #endif
548 if ((rd == MIPS_CP0_TLB_INDEX)
549 && (vcpu->arch.gprs[rt] >=
550 KVM_MIPS_GUEST_TLB_SIZE)) {
551 printk("Invalid TLB Index: %ld",
552 vcpu->arch.gprs[rt]);
553 er = EMULATE_FAIL;
554 break;
555 }
556 #define C0_EBASE_CORE_MASK 0xff
557 if ((rd == MIPS_CP0_PRID) && (sel == 1)) {
558 /* Preserve CORE number */
559 kvm_change_c0_guest_ebase(cop0,
560 ~(C0_EBASE_CORE_MASK),
561 vcpu->arch.gprs[rt]);
562 printk("MTCz, cop0->reg[EBASE]: %#lx\n",
563 kvm_read_c0_guest_ebase(cop0));
564 } else if (rd == MIPS_CP0_TLB_HI && sel == 0) {
565 uint32_t nasid =
566 vcpu->arch.gprs[rt] & ASID_MASK;
567 if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0)
568 &&
569 ((kvm_read_c0_guest_entryhi(cop0) &
570 ASID_MASK) != nasid)) {
571
572 kvm_debug
573 ("MTCz, change ASID from %#lx to %#lx\n",
574 kvm_read_c0_guest_entryhi(cop0) &
575 ASID_MASK,
576 vcpu->arch.gprs[rt] & ASID_MASK);
577
578 preempt_disable();
579 /* Blow away the shadow host TLBs */
580 kvm_mips_flush_host_tlb(1);
581 cpu = smp_processor_id();
582 for_each_possible_cpu(i)
583 if (i != cpu) {
584 vcpu->arch.guest_user_asid[i] = 0;
585 vcpu->arch.guest_kernel_asid[i] = 0;
586 }
587 preempt_enable();
588 }
589 kvm_write_c0_guest_entryhi(cop0,
590 vcpu->arch.gprs[rt]);
591 }
592 /* Are we writing to COUNT */
593 else if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
594 /* Linux doesn't seem to write into COUNT, we throw an error
595 * if we notice a write to COUNT
596 */
597 /*er = EMULATE_FAIL; */
598 goto done;
599 } else if ((rd == MIPS_CP0_COMPARE) && (sel == 0)) {
600 kvm_debug("[%#x] MTCz, COMPARE %#lx <- %#lx\n",
601 pc, kvm_read_c0_guest_compare(cop0),
602 vcpu->arch.gprs[rt]);
603
604 /* If we are writing to COMPARE */
605 /* Clear pending timer interrupt, if any */
606 kvm_mips_callbacks->dequeue_timer_int(vcpu);
607 kvm_write_c0_guest_compare(cop0,
608 vcpu->arch.gprs[rt]);
609 } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
610 kvm_write_c0_guest_status(cop0,
611 vcpu->arch.gprs[rt]);
612 /* Make sure that CU1 and NMI bits are never set */
613 kvm_clear_c0_guest_status(cop0,
614 (ST0_CU1 | ST0_NMI));
615
616 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
617 kvm_mips_trans_mtc0(inst, opc, vcpu);
618 #endif
619 } else {
620 cop0->reg[rd][sel] = vcpu->arch.gprs[rt];
621 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
622 kvm_mips_trans_mtc0(inst, opc, vcpu);
623 #endif
624 }
625
626 kvm_debug("[%#x] MTCz, cop0->reg[%d][%d]: %#lx\n", pc,
627 rd, sel, cop0->reg[rd][sel]);
628 break;
629
630 case dmtc_op:
631 printk
632 ("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n",
633 vcpu->arch.pc, rt, rd, sel);
634 er = EMULATE_FAIL;
635 break;
636
637 case mfmcz_op:
638 #ifdef KVM_MIPS_DEBUG_COP0_COUNTERS
639 cop0->stat[MIPS_CP0_STATUS][0]++;
640 #endif
641 if (rt != 0) {
642 vcpu->arch.gprs[rt] =
643 kvm_read_c0_guest_status(cop0);
644 }
645 /* EI */
646 if (inst & 0x20) {
647 kvm_debug("[%#lx] mfmcz_op: EI\n",
648 vcpu->arch.pc);
649 kvm_set_c0_guest_status(cop0, ST0_IE);
650 } else {
651 kvm_debug("[%#lx] mfmcz_op: DI\n",
652 vcpu->arch.pc);
653 kvm_clear_c0_guest_status(cop0, ST0_IE);
654 }
655
656 break;
657
658 case wrpgpr_op:
659 {
660 uint32_t css =
661 cop0->reg[MIPS_CP0_STATUS][2] & 0xf;
662 uint32_t pss =
663 (cop0->reg[MIPS_CP0_STATUS][2] >> 6) & 0xf;
664 /* We don't support any shadow register sets, so SRSCtl[PSS] == SRSCtl[CSS] = 0 */
665 if (css || pss) {
666 er = EMULATE_FAIL;
667 break;
668 }
669 kvm_debug("WRPGPR[%d][%d] = %#lx\n", pss, rd,
670 vcpu->arch.gprs[rt]);
671 vcpu->arch.gprs[rd] = vcpu->arch.gprs[rt];
672 }
673 break;
674 default:
675 printk
676 ("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n",
677 vcpu->arch.pc, copz);
678 er = EMULATE_FAIL;
679 break;
680 }
681 }
682
683 done:
684 /*
685 * Rollback PC only if emulation was unsuccessful
686 */
687 if (er == EMULATE_FAIL) {
688 vcpu->arch.pc = curr_pc;
689 }
690
691 dont_update_pc:
692 /*
693 * This is for special instructions whose emulation
694 * updates the PC, so do not overwrite the PC under
695 * any circumstances
696 */
697
698 return er;
699 }
700
701 enum emulation_result
702 kvm_mips_emulate_store(uint32_t inst, uint32_t cause,
703 struct kvm_run *run, struct kvm_vcpu *vcpu)
704 {
705 enum emulation_result er = EMULATE_DO_MMIO;
706 int32_t op, base, rt, offset;
707 uint32_t bytes;
708 void *data = run->mmio.data;
709 unsigned long curr_pc;
710
711 /*
712 * Update PC and hold onto current PC in case there is
713 * an error and we want to rollback the PC
714 */
715 curr_pc = vcpu->arch.pc;
716 er = update_pc(vcpu, cause);
717 if (er == EMULATE_FAIL)
718 return er;
719
720 rt = (inst >> 16) & 0x1f;
721 base = (inst >> 21) & 0x1f;
722 offset = inst & 0xffff;
723 op = (inst >> 26) & 0x3f;
724
725 switch (op) {
726 case sb_op:
727 bytes = 1;
728 if (bytes > sizeof(run->mmio.data)) {
729 kvm_err("%s: bad MMIO length: %d\n", __func__,
730 run->mmio.len);
731 }
732 run->mmio.phys_addr =
733 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
734 host_cp0_badvaddr);
735 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
736 er = EMULATE_FAIL;
737 break;
738 }
739 run->mmio.len = bytes;
740 run->mmio.is_write = 1;
741 vcpu->mmio_needed = 1;
742 vcpu->mmio_is_write = 1;
743 *(u8 *) data = vcpu->arch.gprs[rt];
744 kvm_debug("OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n",
745 vcpu->arch.host_cp0_badvaddr, vcpu->arch.gprs[rt],
746 *(uint8_t *) data);
747
748 break;
749
750 case sw_op:
751 bytes = 4;
752 if (bytes > sizeof(run->mmio.data)) {
753 kvm_err("%s: bad MMIO length: %d\n", __func__,
754 run->mmio.len);
755 }
756 run->mmio.phys_addr =
757 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
758 host_cp0_badvaddr);
759 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
760 er = EMULATE_FAIL;
761 break;
762 }
763
764 run->mmio.len = bytes;
765 run->mmio.is_write = 1;
766 vcpu->mmio_needed = 1;
767 vcpu->mmio_is_write = 1;
768 *(uint32_t *) data = vcpu->arch.gprs[rt];
769
770 kvm_debug("[%#lx] OP_SW: eaddr: %#lx, gpr: %#lx, data: %#x\n",
771 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
772 vcpu->arch.gprs[rt], *(uint32_t *) data);
773 break;
774
775 case sh_op:
776 bytes = 2;
777 if (bytes > sizeof(run->mmio.data)) {
778 kvm_err("%s: bad MMIO length: %d\n", __func__,
779 run->mmio.len);
780 }
781 run->mmio.phys_addr =
782 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
783 host_cp0_badvaddr);
784 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
785 er = EMULATE_FAIL;
786 break;
787 }
788
789 run->mmio.len = bytes;
790 run->mmio.is_write = 1;
791 vcpu->mmio_needed = 1;
792 vcpu->mmio_is_write = 1;
793 *(uint16_t *) data = vcpu->arch.gprs[rt];
794
795 kvm_debug("[%#lx] OP_SH: eaddr: %#lx, gpr: %#lx, data: %#x\n",
796 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
797 vcpu->arch.gprs[rt], *(uint32_t *) data);
798 break;
799
800 default:
801 printk("Store not yet supported");
802 er = EMULATE_FAIL;
803 break;
804 }
805
806 /*
807 * Rollback PC if emulation was unsuccessful
808 */
809 if (er == EMULATE_FAIL) {
810 vcpu->arch.pc = curr_pc;
811 }
812
813 return er;
814 }
815
816 enum emulation_result
817 kvm_mips_emulate_load(uint32_t inst, uint32_t cause,
818 struct kvm_run *run, struct kvm_vcpu *vcpu)
819 {
820 enum emulation_result er = EMULATE_DO_MMIO;
821 unsigned long curr_pc;
822 int32_t op, base, rt, offset;
823 uint32_t bytes;
824
825 rt = (inst >> 16) & 0x1f;
826 base = (inst >> 21) & 0x1f;
827 offset = inst & 0xffff;
828 op = (inst >> 26) & 0x3f;
829
830 /*
831 * Find the resume PC now while we have safe and easy access to the
832 * prior branch instruction, and save it for
833 * kvm_mips_complete_mmio_load() to restore later.
834 */
835 curr_pc = vcpu->arch.pc;
836 er = update_pc(vcpu, cause);
837 if (er == EMULATE_FAIL)
838 return er;
839 vcpu->arch.io_pc = vcpu->arch.pc;
840 vcpu->arch.pc = curr_pc;
841
842 vcpu->arch.io_gpr = rt;
843
844 switch (op) {
845 case lw_op:
846 bytes = 4;
847 if (bytes > sizeof(run->mmio.data)) {
848 kvm_err("%s: bad MMIO length: %d\n", __func__,
849 run->mmio.len);
850 er = EMULATE_FAIL;
851 break;
852 }
853 run->mmio.phys_addr =
854 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
855 host_cp0_badvaddr);
856 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
857 er = EMULATE_FAIL;
858 break;
859 }
860
861 run->mmio.len = bytes;
862 run->mmio.is_write = 0;
863 vcpu->mmio_needed = 1;
864 vcpu->mmio_is_write = 0;
865 break;
866
867 case lh_op:
868 case lhu_op:
869 bytes = 2;
870 if (bytes > sizeof(run->mmio.data)) {
871 kvm_err("%s: bad MMIO length: %d\n", __func__,
872 run->mmio.len);
873 er = EMULATE_FAIL;
874 break;
875 }
876 run->mmio.phys_addr =
877 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
878 host_cp0_badvaddr);
879 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
880 er = EMULATE_FAIL;
881 break;
882 }
883
884 run->mmio.len = bytes;
885 run->mmio.is_write = 0;
886 vcpu->mmio_needed = 1;
887 vcpu->mmio_is_write = 0;
888
889 if (op == lh_op)
890 vcpu->mmio_needed = 2;
891 else
892 vcpu->mmio_needed = 1;
893
894 break;
895
896 case lbu_op:
897 case lb_op:
898 bytes = 1;
899 if (bytes > sizeof(run->mmio.data)) {
900 kvm_err("%s: bad MMIO length: %d\n", __func__,
901 run->mmio.len);
902 er = EMULATE_FAIL;
903 break;
904 }
905 run->mmio.phys_addr =
906 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
907 host_cp0_badvaddr);
908 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
909 er = EMULATE_FAIL;
910 break;
911 }
912
913 run->mmio.len = bytes;
914 run->mmio.is_write = 0;
915 vcpu->mmio_is_write = 0;
916
917 if (op == lb_op)
918 vcpu->mmio_needed = 2;
919 else
920 vcpu->mmio_needed = 1;
921
922 break;
923
924 default:
925 printk("Load not yet supported");
926 er = EMULATE_FAIL;
927 break;
928 }
929
930 return er;
931 }
932
933 int kvm_mips_sync_icache(unsigned long va, struct kvm_vcpu *vcpu)
934 {
935 unsigned long offset = (va & ~PAGE_MASK);
936 struct kvm *kvm = vcpu->kvm;
937 unsigned long pa;
938 gfn_t gfn;
939 pfn_t pfn;
940
941 gfn = va >> PAGE_SHIFT;
942
943 if (gfn >= kvm->arch.guest_pmap_npages) {
944 printk("%s: Invalid gfn: %#llx\n", __func__, gfn);
945 kvm_mips_dump_host_tlbs();
946 kvm_arch_vcpu_dump_regs(vcpu);
947 return -1;
948 }
949 pfn = kvm->arch.guest_pmap[gfn];
950 pa = (pfn << PAGE_SHIFT) | offset;
951
952 printk("%s: va: %#lx, unmapped: %#x\n", __func__, va, CKSEG0ADDR(pa));
953
954 mips32_SyncICache(CKSEG0ADDR(pa), 32);
955 return 0;
956 }
957
958 #define MIPS_CACHE_OP_INDEX_INV 0x0
959 #define MIPS_CACHE_OP_INDEX_LD_TAG 0x1
960 #define MIPS_CACHE_OP_INDEX_ST_TAG 0x2
961 #define MIPS_CACHE_OP_IMP 0x3
962 #define MIPS_CACHE_OP_HIT_INV 0x4
963 #define MIPS_CACHE_OP_FILL_WB_INV 0x5
964 #define MIPS_CACHE_OP_HIT_HB 0x6
965 #define MIPS_CACHE_OP_FETCH_LOCK 0x7
966
967 #define MIPS_CACHE_ICACHE 0x0
968 #define MIPS_CACHE_DCACHE 0x1
969 #define MIPS_CACHE_SEC 0x3
970
971 enum emulation_result
972 kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, uint32_t cause,
973 struct kvm_run *run, struct kvm_vcpu *vcpu)
974 {
975 struct mips_coproc *cop0 = vcpu->arch.cop0;
976 extern void (*r4k_blast_dcache) (void);
977 extern void (*r4k_blast_icache) (void);
978 enum emulation_result er = EMULATE_DONE;
979 int32_t offset, cache, op_inst, op, base;
980 struct kvm_vcpu_arch *arch = &vcpu->arch;
981 unsigned long va;
982 unsigned long curr_pc;
983
984 /*
985 * Update PC and hold onto current PC in case there is
986 * an error and we want to rollback the PC
987 */
988 curr_pc = vcpu->arch.pc;
989 er = update_pc(vcpu, cause);
990 if (er == EMULATE_FAIL)
991 return er;
992
993 base = (inst >> 21) & 0x1f;
994 op_inst = (inst >> 16) & 0x1f;
995 offset = (int16_t)inst;
996 cache = (inst >> 16) & 0x3;
997 op = (inst >> 18) & 0x7;
998
999 va = arch->gprs[base] + offset;
1000
1001 kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1002 cache, op, base, arch->gprs[base], offset);
1003
1004 /* Treat INDEX_INV as a nop, basically issued by Linux on startup to invalidate
1005 * the caches entirely by stepping through all the ways/indexes
1006 */
1007 if (op == MIPS_CACHE_OP_INDEX_INV) {
1008 kvm_debug
1009 ("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1010 vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base,
1011 arch->gprs[base], offset);
1012
1013 if (cache == MIPS_CACHE_DCACHE)
1014 r4k_blast_dcache();
1015 else if (cache == MIPS_CACHE_ICACHE)
1016 r4k_blast_icache();
1017 else {
1018 printk("%s: unsupported CACHE INDEX operation\n",
1019 __func__);
1020 return EMULATE_FAIL;
1021 }
1022
1023 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1024 kvm_mips_trans_cache_index(inst, opc, vcpu);
1025 #endif
1026 goto done;
1027 }
1028
1029 preempt_disable();
1030 if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) {
1031
1032 if (kvm_mips_host_tlb_lookup(vcpu, va) < 0 &&
1033 kvm_mips_handle_kseg0_tlb_fault(va, vcpu)) {
1034 kvm_err("%s: handling mapped kseg0 tlb fault for %lx, vcpu: %p, ASID: %#lx\n",
1035 __func__, va, vcpu, read_c0_entryhi());
1036 er = EMULATE_FAIL;
1037 preempt_enable();
1038 goto done;
1039 }
1040 } else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) ||
1041 KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) {
1042 int index;
1043
1044 /* If an entry already exists then skip */
1045 if (kvm_mips_host_tlb_lookup(vcpu, va) >= 0) {
1046 goto skip_fault;
1047 }
1048
1049 /* If address not in the guest TLB, then give the guest a fault, the
1050 * resulting handler will do the right thing
1051 */
1052 index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) |
1053 (kvm_read_c0_guest_entryhi
1054 (cop0) & ASID_MASK));
1055
1056 if (index < 0) {
1057 vcpu->arch.host_cp0_entryhi = (va & VPN2_MASK);
1058 vcpu->arch.host_cp0_badvaddr = va;
1059 er = kvm_mips_emulate_tlbmiss_ld(cause, NULL, run,
1060 vcpu);
1061 preempt_enable();
1062 goto dont_update_pc;
1063 } else {
1064 struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
1065 /* Check if the entry is valid, if not then setup a TLB invalid exception to the guest */
1066 if (!TLB_IS_VALID(*tlb, va)) {
1067 er = kvm_mips_emulate_tlbinv_ld(cause, NULL,
1068 run, vcpu);
1069 preempt_enable();
1070 goto dont_update_pc;
1071 }
1072 /* We fault an entry from the guest tlb to the shadow host TLB */
1073 if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
1074 NULL, NULL)) {
1075 kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n",
1076 __func__, va, index, vcpu,
1077 read_c0_entryhi());
1078 er = EMULATE_FAIL;
1079 preempt_enable();
1080 goto done;
1081 }
1082 }
1083 } else {
1084 printk
1085 ("INVALID CACHE INDEX/ADDRESS (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1086 cache, op, base, arch->gprs[base], offset);
1087 er = EMULATE_FAIL;
1088 preempt_enable();
1089 goto dont_update_pc;
1090
1091 }
1092
1093 skip_fault:
1094 /* XXXKYMA: Only a subset of cache ops are supported, used by Linux */
1095 if (cache == MIPS_CACHE_DCACHE
1096 && (op == MIPS_CACHE_OP_FILL_WB_INV
1097 || op == MIPS_CACHE_OP_HIT_INV)) {
1098 flush_dcache_line(va);
1099
1100 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1101 /* Replace the CACHE instruction, with a SYNCI, not the same, but avoids a trap */
1102 kvm_mips_trans_cache_va(inst, opc, vcpu);
1103 #endif
1104 } else if (op == MIPS_CACHE_OP_HIT_INV && cache == MIPS_CACHE_ICACHE) {
1105 flush_dcache_line(va);
1106 flush_icache_line(va);
1107
1108 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1109 /* Replace the CACHE instruction, with a SYNCI */
1110 kvm_mips_trans_cache_va(inst, opc, vcpu);
1111 #endif
1112 } else {
1113 printk
1114 ("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1115 cache, op, base, arch->gprs[base], offset);
1116 er = EMULATE_FAIL;
1117 preempt_enable();
1118 goto dont_update_pc;
1119 }
1120
1121 preempt_enable();
1122
1123 dont_update_pc:
1124 /*
1125 * Rollback PC
1126 */
1127 vcpu->arch.pc = curr_pc;
1128 done:
1129 return er;
1130 }
1131
1132 enum emulation_result
1133 kvm_mips_emulate_inst(unsigned long cause, uint32_t *opc,
1134 struct kvm_run *run, struct kvm_vcpu *vcpu)
1135 {
1136 enum emulation_result er = EMULATE_DONE;
1137 uint32_t inst;
1138
1139 /*
1140 * Fetch the instruction.
1141 */
1142 if (cause & CAUSEF_BD) {
1143 opc += 1;
1144 }
1145
1146 inst = kvm_get_inst(opc, vcpu);
1147
1148 switch (((union mips_instruction)inst).r_format.opcode) {
1149 case cop0_op:
1150 er = kvm_mips_emulate_CP0(inst, opc, cause, run, vcpu);
1151 break;
1152 case sb_op:
1153 case sh_op:
1154 case sw_op:
1155 er = kvm_mips_emulate_store(inst, cause, run, vcpu);
1156 break;
1157 case lb_op:
1158 case lbu_op:
1159 case lhu_op:
1160 case lh_op:
1161 case lw_op:
1162 er = kvm_mips_emulate_load(inst, cause, run, vcpu);
1163 break;
1164
1165 case cache_op:
1166 ++vcpu->stat.cache_exits;
1167 trace_kvm_exit(vcpu, CACHE_EXITS);
1168 er = kvm_mips_emulate_cache(inst, opc, cause, run, vcpu);
1169 break;
1170
1171 default:
1172 printk("Instruction emulation not supported (%p/%#x)\n", opc,
1173 inst);
1174 kvm_arch_vcpu_dump_regs(vcpu);
1175 er = EMULATE_FAIL;
1176 break;
1177 }
1178
1179 return er;
1180 }
1181
1182 enum emulation_result
1183 kvm_mips_emulate_syscall(unsigned long cause, uint32_t *opc,
1184 struct kvm_run *run, struct kvm_vcpu *vcpu)
1185 {
1186 struct mips_coproc *cop0 = vcpu->arch.cop0;
1187 struct kvm_vcpu_arch *arch = &vcpu->arch;
1188 enum emulation_result er = EMULATE_DONE;
1189
1190 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1191 /* save old pc */
1192 kvm_write_c0_guest_epc(cop0, arch->pc);
1193 kvm_set_c0_guest_status(cop0, ST0_EXL);
1194
1195 if (cause & CAUSEF_BD)
1196 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1197 else
1198 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1199
1200 kvm_debug("Delivering SYSCALL @ pc %#lx\n", arch->pc);
1201
1202 kvm_change_c0_guest_cause(cop0, (0xff),
1203 (T_SYSCALL << CAUSEB_EXCCODE));
1204
1205 /* Set PC to the exception entry point */
1206 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1207
1208 } else {
1209 printk("Trying to deliver SYSCALL when EXL is already set\n");
1210 er = EMULATE_FAIL;
1211 }
1212
1213 return er;
1214 }
1215
1216 enum emulation_result
1217 kvm_mips_emulate_tlbmiss_ld(unsigned long cause, uint32_t *opc,
1218 struct kvm_run *run, struct kvm_vcpu *vcpu)
1219 {
1220 struct mips_coproc *cop0 = vcpu->arch.cop0;
1221 struct kvm_vcpu_arch *arch = &vcpu->arch;
1222 enum emulation_result er = EMULATE_DONE;
1223 unsigned long entryhi = (vcpu->arch. host_cp0_badvaddr & VPN2_MASK) |
1224 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1225
1226 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1227 /* save old pc */
1228 kvm_write_c0_guest_epc(cop0, arch->pc);
1229 kvm_set_c0_guest_status(cop0, ST0_EXL);
1230
1231 if (cause & CAUSEF_BD)
1232 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1233 else
1234 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1235
1236 kvm_debug("[EXL == 0] delivering TLB MISS @ pc %#lx\n",
1237 arch->pc);
1238
1239 /* set pc to the exception entry point */
1240 arch->pc = KVM_GUEST_KSEG0 + 0x0;
1241
1242 } else {
1243 kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
1244 arch->pc);
1245
1246 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1247 }
1248
1249 kvm_change_c0_guest_cause(cop0, (0xff),
1250 (T_TLB_LD_MISS << CAUSEB_EXCCODE));
1251
1252 /* setup badvaddr, context and entryhi registers for the guest */
1253 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1254 /* XXXKYMA: is the context register used by linux??? */
1255 kvm_write_c0_guest_entryhi(cop0, entryhi);
1256 /* Blow away the shadow host TLBs */
1257 kvm_mips_flush_host_tlb(1);
1258
1259 return er;
1260 }
1261
1262 enum emulation_result
1263 kvm_mips_emulate_tlbinv_ld(unsigned long cause, uint32_t *opc,
1264 struct kvm_run *run, struct kvm_vcpu *vcpu)
1265 {
1266 struct mips_coproc *cop0 = vcpu->arch.cop0;
1267 struct kvm_vcpu_arch *arch = &vcpu->arch;
1268 enum emulation_result er = EMULATE_DONE;
1269 unsigned long entryhi =
1270 (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1271 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1272
1273 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1274 /* save old pc */
1275 kvm_write_c0_guest_epc(cop0, arch->pc);
1276 kvm_set_c0_guest_status(cop0, ST0_EXL);
1277
1278 if (cause & CAUSEF_BD)
1279 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1280 else
1281 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1282
1283 kvm_debug("[EXL == 0] delivering TLB INV @ pc %#lx\n",
1284 arch->pc);
1285
1286 /* set pc to the exception entry point */
1287 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1288
1289 } else {
1290 kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
1291 arch->pc);
1292 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1293 }
1294
1295 kvm_change_c0_guest_cause(cop0, (0xff),
1296 (T_TLB_LD_MISS << CAUSEB_EXCCODE));
1297
1298 /* setup badvaddr, context and entryhi registers for the guest */
1299 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1300 /* XXXKYMA: is the context register used by linux??? */
1301 kvm_write_c0_guest_entryhi(cop0, entryhi);
1302 /* Blow away the shadow host TLBs */
1303 kvm_mips_flush_host_tlb(1);
1304
1305 return er;
1306 }
1307
1308 enum emulation_result
1309 kvm_mips_emulate_tlbmiss_st(unsigned long cause, uint32_t *opc,
1310 struct kvm_run *run, struct kvm_vcpu *vcpu)
1311 {
1312 struct mips_coproc *cop0 = vcpu->arch.cop0;
1313 struct kvm_vcpu_arch *arch = &vcpu->arch;
1314 enum emulation_result er = EMULATE_DONE;
1315 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1316 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1317
1318 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1319 /* save old pc */
1320 kvm_write_c0_guest_epc(cop0, arch->pc);
1321 kvm_set_c0_guest_status(cop0, ST0_EXL);
1322
1323 if (cause & CAUSEF_BD)
1324 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1325 else
1326 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1327
1328 kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
1329 arch->pc);
1330
1331 /* Set PC to the exception entry point */
1332 arch->pc = KVM_GUEST_KSEG0 + 0x0;
1333 } else {
1334 kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
1335 arch->pc);
1336 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1337 }
1338
1339 kvm_change_c0_guest_cause(cop0, (0xff),
1340 (T_TLB_ST_MISS << CAUSEB_EXCCODE));
1341
1342 /* setup badvaddr, context and entryhi registers for the guest */
1343 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1344 /* XXXKYMA: is the context register used by linux??? */
1345 kvm_write_c0_guest_entryhi(cop0, entryhi);
1346 /* Blow away the shadow host TLBs */
1347 kvm_mips_flush_host_tlb(1);
1348
1349 return er;
1350 }
1351
1352 enum emulation_result
1353 kvm_mips_emulate_tlbinv_st(unsigned long cause, uint32_t *opc,
1354 struct kvm_run *run, struct kvm_vcpu *vcpu)
1355 {
1356 struct mips_coproc *cop0 = vcpu->arch.cop0;
1357 struct kvm_vcpu_arch *arch = &vcpu->arch;
1358 enum emulation_result er = EMULATE_DONE;
1359 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1360 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1361
1362 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1363 /* save old pc */
1364 kvm_write_c0_guest_epc(cop0, arch->pc);
1365 kvm_set_c0_guest_status(cop0, ST0_EXL);
1366
1367 if (cause & CAUSEF_BD)
1368 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1369 else
1370 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1371
1372 kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
1373 arch->pc);
1374
1375 /* Set PC to the exception entry point */
1376 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1377 } else {
1378 kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
1379 arch->pc);
1380 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1381 }
1382
1383 kvm_change_c0_guest_cause(cop0, (0xff),
1384 (T_TLB_ST_MISS << CAUSEB_EXCCODE));
1385
1386 /* setup badvaddr, context and entryhi registers for the guest */
1387 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1388 /* XXXKYMA: is the context register used by linux??? */
1389 kvm_write_c0_guest_entryhi(cop0, entryhi);
1390 /* Blow away the shadow host TLBs */
1391 kvm_mips_flush_host_tlb(1);
1392
1393 return er;
1394 }
1395
1396 /* TLBMOD: store into address matching TLB with Dirty bit off */
1397 enum emulation_result
1398 kvm_mips_handle_tlbmod(unsigned long cause, uint32_t *opc,
1399 struct kvm_run *run, struct kvm_vcpu *vcpu)
1400 {
1401 enum emulation_result er = EMULATE_DONE;
1402
1403 #ifdef DEBUG
1404 /*
1405 * If address not in the guest TLB, then we are in trouble
1406 */
1407 index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
1408 if (index < 0) {
1409 /* XXXKYMA Invalidate and retry */
1410 kvm_mips_host_tlb_inv(vcpu, vcpu->arch.host_cp0_badvaddr);
1411 kvm_err("%s: host got TLBMOD for %#lx but entry not present in Guest TLB\n",
1412 __func__, entryhi);
1413 kvm_mips_dump_guest_tlbs(vcpu);
1414 kvm_mips_dump_host_tlbs();
1415 return EMULATE_FAIL;
1416 }
1417 #endif
1418
1419 er = kvm_mips_emulate_tlbmod(cause, opc, run, vcpu);
1420 return er;
1421 }
1422
1423 enum emulation_result
1424 kvm_mips_emulate_tlbmod(unsigned long cause, uint32_t *opc,
1425 struct kvm_run *run, struct kvm_vcpu *vcpu)
1426 {
1427 struct mips_coproc *cop0 = vcpu->arch.cop0;
1428 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1429 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1430 struct kvm_vcpu_arch *arch = &vcpu->arch;
1431 enum emulation_result er = EMULATE_DONE;
1432
1433 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1434 /* save old pc */
1435 kvm_write_c0_guest_epc(cop0, arch->pc);
1436 kvm_set_c0_guest_status(cop0, ST0_EXL);
1437
1438 if (cause & CAUSEF_BD)
1439 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1440 else
1441 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1442
1443 kvm_debug("[EXL == 0] Delivering TLB MOD @ pc %#lx\n",
1444 arch->pc);
1445
1446 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1447 } else {
1448 kvm_debug("[EXL == 1] Delivering TLB MOD @ pc %#lx\n",
1449 arch->pc);
1450 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1451 }
1452
1453 kvm_change_c0_guest_cause(cop0, (0xff), (T_TLB_MOD << CAUSEB_EXCCODE));
1454
1455 /* setup badvaddr, context and entryhi registers for the guest */
1456 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1457 /* XXXKYMA: is the context register used by linux??? */
1458 kvm_write_c0_guest_entryhi(cop0, entryhi);
1459 /* Blow away the shadow host TLBs */
1460 kvm_mips_flush_host_tlb(1);
1461
1462 return er;
1463 }
1464
1465 enum emulation_result
1466 kvm_mips_emulate_fpu_exc(unsigned long cause, uint32_t *opc,
1467 struct kvm_run *run, struct kvm_vcpu *vcpu)
1468 {
1469 struct mips_coproc *cop0 = vcpu->arch.cop0;
1470 struct kvm_vcpu_arch *arch = &vcpu->arch;
1471 enum emulation_result er = EMULATE_DONE;
1472
1473 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1474 /* save old pc */
1475 kvm_write_c0_guest_epc(cop0, arch->pc);
1476 kvm_set_c0_guest_status(cop0, ST0_EXL);
1477
1478 if (cause & CAUSEF_BD)
1479 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1480 else
1481 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1482
1483 }
1484
1485 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1486
1487 kvm_change_c0_guest_cause(cop0, (0xff),
1488 (T_COP_UNUSABLE << CAUSEB_EXCCODE));
1489 kvm_change_c0_guest_cause(cop0, (CAUSEF_CE), (0x1 << CAUSEB_CE));
1490
1491 return er;
1492 }
1493
1494 enum emulation_result
1495 kvm_mips_emulate_ri_exc(unsigned long cause, uint32_t *opc,
1496 struct kvm_run *run, struct kvm_vcpu *vcpu)
1497 {
1498 struct mips_coproc *cop0 = vcpu->arch.cop0;
1499 struct kvm_vcpu_arch *arch = &vcpu->arch;
1500 enum emulation_result er = EMULATE_DONE;
1501
1502 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1503 /* save old pc */
1504 kvm_write_c0_guest_epc(cop0, arch->pc);
1505 kvm_set_c0_guest_status(cop0, ST0_EXL);
1506
1507 if (cause & CAUSEF_BD)
1508 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1509 else
1510 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1511
1512 kvm_debug("Delivering RI @ pc %#lx\n", arch->pc);
1513
1514 kvm_change_c0_guest_cause(cop0, (0xff),
1515 (T_RES_INST << CAUSEB_EXCCODE));
1516
1517 /* Set PC to the exception entry point */
1518 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1519
1520 } else {
1521 kvm_err("Trying to deliver RI when EXL is already set\n");
1522 er = EMULATE_FAIL;
1523 }
1524
1525 return er;
1526 }
1527
1528 enum emulation_result
1529 kvm_mips_emulate_bp_exc(unsigned long cause, uint32_t *opc,
1530 struct kvm_run *run, struct kvm_vcpu *vcpu)
1531 {
1532 struct mips_coproc *cop0 = vcpu->arch.cop0;
1533 struct kvm_vcpu_arch *arch = &vcpu->arch;
1534 enum emulation_result er = EMULATE_DONE;
1535
1536 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1537 /* save old pc */
1538 kvm_write_c0_guest_epc(cop0, arch->pc);
1539 kvm_set_c0_guest_status(cop0, ST0_EXL);
1540
1541 if (cause & CAUSEF_BD)
1542 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1543 else
1544 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1545
1546 kvm_debug("Delivering BP @ pc %#lx\n", arch->pc);
1547
1548 kvm_change_c0_guest_cause(cop0, (0xff),
1549 (T_BREAK << CAUSEB_EXCCODE));
1550
1551 /* Set PC to the exception entry point */
1552 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1553
1554 } else {
1555 printk("Trying to deliver BP when EXL is already set\n");
1556 er = EMULATE_FAIL;
1557 }
1558
1559 return er;
1560 }
1561
1562 /*
1563 * ll/sc, rdhwr, sync emulation
1564 */
1565
1566 #define OPCODE 0xfc000000
1567 #define BASE 0x03e00000
1568 #define RT 0x001f0000
1569 #define OFFSET 0x0000ffff
1570 #define LL 0xc0000000
1571 #define SC 0xe0000000
1572 #define SPEC0 0x00000000
1573 #define SPEC3 0x7c000000
1574 #define RD 0x0000f800
1575 #define FUNC 0x0000003f
1576 #define SYNC 0x0000000f
1577 #define RDHWR 0x0000003b
1578
1579 enum emulation_result
1580 kvm_mips_handle_ri(unsigned long cause, uint32_t *opc,
1581 struct kvm_run *run, struct kvm_vcpu *vcpu)
1582 {
1583 struct mips_coproc *cop0 = vcpu->arch.cop0;
1584 struct kvm_vcpu_arch *arch = &vcpu->arch;
1585 enum emulation_result er = EMULATE_DONE;
1586 unsigned long curr_pc;
1587 uint32_t inst;
1588
1589 /*
1590 * Update PC and hold onto current PC in case there is
1591 * an error and we want to rollback the PC
1592 */
1593 curr_pc = vcpu->arch.pc;
1594 er = update_pc(vcpu, cause);
1595 if (er == EMULATE_FAIL)
1596 return er;
1597
1598 /*
1599 * Fetch the instruction.
1600 */
1601 if (cause & CAUSEF_BD)
1602 opc += 1;
1603
1604 inst = kvm_get_inst(opc, vcpu);
1605
1606 if (inst == KVM_INVALID_INST) {
1607 printk("%s: Cannot get inst @ %p\n", __func__, opc);
1608 return EMULATE_FAIL;
1609 }
1610
1611 if ((inst & OPCODE) == SPEC3 && (inst & FUNC) == RDHWR) {
1612 int rd = (inst & RD) >> 11;
1613 int rt = (inst & RT) >> 16;
1614 switch (rd) {
1615 case 0: /* CPU number */
1616 arch->gprs[rt] = 0;
1617 break;
1618 case 1: /* SYNCI length */
1619 arch->gprs[rt] = min(current_cpu_data.dcache.linesz,
1620 current_cpu_data.icache.linesz);
1621 break;
1622 case 2: /* Read count register */
1623 printk("RDHWR: Cont register\n");
1624 arch->gprs[rt] = kvm_read_c0_guest_count(cop0);
1625 break;
1626 case 3: /* Count register resolution */
1627 switch (current_cpu_data.cputype) {
1628 case CPU_20KC:
1629 case CPU_25KF:
1630 arch->gprs[rt] = 1;
1631 break;
1632 default:
1633 arch->gprs[rt] = 2;
1634 }
1635 break;
1636 case 29:
1637 #if 1
1638 arch->gprs[rt] = kvm_read_c0_guest_userlocal(cop0);
1639 #else
1640 /* UserLocal not implemented */
1641 er = EMULATE_FAIL;
1642 #endif
1643 break;
1644
1645 default:
1646 kvm_debug("RDHWR %#x not supported @ %p\n", rd, opc);
1647 er = EMULATE_FAIL;
1648 break;
1649 }
1650 } else {
1651 kvm_debug("Emulate RI not supported @ %p: %#x\n", opc, inst);
1652 er = EMULATE_FAIL;
1653 }
1654
1655 /*
1656 * Rollback PC only if emulation was unsuccessful
1657 */
1658 if (er == EMULATE_FAIL) {
1659 vcpu->arch.pc = curr_pc;
1660 er = kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
1661 }
1662 return er;
1663 }
1664
1665 enum emulation_result
1666 kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run)
1667 {
1668 unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
1669 enum emulation_result er = EMULATE_DONE;
1670
1671 if (run->mmio.len > sizeof(*gpr)) {
1672 printk("Bad MMIO length: %d", run->mmio.len);
1673 er = EMULATE_FAIL;
1674 goto done;
1675 }
1676
1677 /* Restore saved resume PC */
1678 vcpu->arch.pc = vcpu->arch.io_pc;
1679
1680 switch (run->mmio.len) {
1681 case 4:
1682 *gpr = *(int32_t *) run->mmio.data;
1683 break;
1684
1685 case 2:
1686 if (vcpu->mmio_needed == 2)
1687 *gpr = *(int16_t *) run->mmio.data;
1688 else
1689 *gpr = *(uint16_t *)run->mmio.data;
1690
1691 break;
1692 case 1:
1693 if (vcpu->mmio_needed == 2)
1694 *gpr = *(int8_t *) run->mmio.data;
1695 else
1696 *gpr = *(u8 *) run->mmio.data;
1697 break;
1698 }
1699
1700 done:
1701 return er;
1702 }
1703
1704 static enum emulation_result
1705 kvm_mips_emulate_exc(unsigned long cause, uint32_t *opc,
1706 struct kvm_run *run, struct kvm_vcpu *vcpu)
1707 {
1708 uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
1709 struct mips_coproc *cop0 = vcpu->arch.cop0;
1710 struct kvm_vcpu_arch *arch = &vcpu->arch;
1711 enum emulation_result er = EMULATE_DONE;
1712
1713 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1714 /* save old pc */
1715 kvm_write_c0_guest_epc(cop0, arch->pc);
1716 kvm_set_c0_guest_status(cop0, ST0_EXL);
1717
1718 if (cause & CAUSEF_BD)
1719 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1720 else
1721 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1722
1723 kvm_change_c0_guest_cause(cop0, (0xff),
1724 (exccode << CAUSEB_EXCCODE));
1725
1726 /* Set PC to the exception entry point */
1727 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1728 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1729
1730 kvm_debug("Delivering EXC %d @ pc %#lx, badVaddr: %#lx\n",
1731 exccode, kvm_read_c0_guest_epc(cop0),
1732 kvm_read_c0_guest_badvaddr(cop0));
1733 } else {
1734 printk("Trying to deliver EXC when EXL is already set\n");
1735 er = EMULATE_FAIL;
1736 }
1737
1738 return er;
1739 }
1740
1741 enum emulation_result
1742 kvm_mips_check_privilege(unsigned long cause, uint32_t *opc,
1743 struct kvm_run *run, struct kvm_vcpu *vcpu)
1744 {
1745 enum emulation_result er = EMULATE_DONE;
1746 uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
1747 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
1748
1749 int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
1750
1751 if (usermode) {
1752 switch (exccode) {
1753 case T_INT:
1754 case T_SYSCALL:
1755 case T_BREAK:
1756 case T_RES_INST:
1757 break;
1758
1759 case T_COP_UNUSABLE:
1760 if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 0)
1761 er = EMULATE_PRIV_FAIL;
1762 break;
1763
1764 case T_TLB_MOD:
1765 break;
1766
1767 case T_TLB_LD_MISS:
1768 /* We we are accessing Guest kernel space, then send an address error exception to the guest */
1769 if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
1770 printk("%s: LD MISS @ %#lx\n", __func__,
1771 badvaddr);
1772 cause &= ~0xff;
1773 cause |= (T_ADDR_ERR_LD << CAUSEB_EXCCODE);
1774 er = EMULATE_PRIV_FAIL;
1775 }
1776 break;
1777
1778 case T_TLB_ST_MISS:
1779 /* We we are accessing Guest kernel space, then send an address error exception to the guest */
1780 if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
1781 printk("%s: ST MISS @ %#lx\n", __func__,
1782 badvaddr);
1783 cause &= ~0xff;
1784 cause |= (T_ADDR_ERR_ST << CAUSEB_EXCCODE);
1785 er = EMULATE_PRIV_FAIL;
1786 }
1787 break;
1788
1789 case T_ADDR_ERR_ST:
1790 printk("%s: address error ST @ %#lx\n", __func__,
1791 badvaddr);
1792 if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
1793 cause &= ~0xff;
1794 cause |= (T_TLB_ST_MISS << CAUSEB_EXCCODE);
1795 }
1796 er = EMULATE_PRIV_FAIL;
1797 break;
1798 case T_ADDR_ERR_LD:
1799 printk("%s: address error LD @ %#lx\n", __func__,
1800 badvaddr);
1801 if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
1802 cause &= ~0xff;
1803 cause |= (T_TLB_LD_MISS << CAUSEB_EXCCODE);
1804 }
1805 er = EMULATE_PRIV_FAIL;
1806 break;
1807 default:
1808 er = EMULATE_PRIV_FAIL;
1809 break;
1810 }
1811 }
1812
1813 if (er == EMULATE_PRIV_FAIL) {
1814 kvm_mips_emulate_exc(cause, opc, run, vcpu);
1815 }
1816 return er;
1817 }
1818
1819 /* User Address (UA) fault, this could happen if
1820 * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this
1821 * case we pass on the fault to the guest kernel and let it handle it.
1822 * (2) TLB entry is present in the Guest TLB but not in the shadow, in this
1823 * case we inject the TLB from the Guest TLB into the shadow host TLB
1824 */
1825 enum emulation_result
1826 kvm_mips_handle_tlbmiss(unsigned long cause, uint32_t *opc,
1827 struct kvm_run *run, struct kvm_vcpu *vcpu)
1828 {
1829 enum emulation_result er = EMULATE_DONE;
1830 uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
1831 unsigned long va = vcpu->arch.host_cp0_badvaddr;
1832 int index;
1833
1834 kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx, entryhi: %#lx\n",
1835 vcpu->arch.host_cp0_badvaddr, vcpu->arch.host_cp0_entryhi);
1836
1837 /* KVM would not have got the exception if this entry was valid in the shadow host TLB
1838 * Check the Guest TLB, if the entry is not there then send the guest an
1839 * exception. The guest exc handler should then inject an entry into the
1840 * guest TLB
1841 */
1842 index = kvm_mips_guest_tlb_lookup(vcpu,
1843 (va & VPN2_MASK) |
1844 (kvm_read_c0_guest_entryhi
1845 (vcpu->arch.cop0) & ASID_MASK));
1846 if (index < 0) {
1847 if (exccode == T_TLB_LD_MISS) {
1848 er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu);
1849 } else if (exccode == T_TLB_ST_MISS) {
1850 er = kvm_mips_emulate_tlbmiss_st(cause, opc, run, vcpu);
1851 } else {
1852 printk("%s: invalid exc code: %d\n", __func__, exccode);
1853 er = EMULATE_FAIL;
1854 }
1855 } else {
1856 struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
1857
1858 /* Check if the entry is valid, if not then setup a TLB invalid exception to the guest */
1859 if (!TLB_IS_VALID(*tlb, va)) {
1860 if (exccode == T_TLB_LD_MISS) {
1861 er = kvm_mips_emulate_tlbinv_ld(cause, opc, run,
1862 vcpu);
1863 } else if (exccode == T_TLB_ST_MISS) {
1864 er = kvm_mips_emulate_tlbinv_st(cause, opc, run,
1865 vcpu);
1866 } else {
1867 printk("%s: invalid exc code: %d\n", __func__,
1868 exccode);
1869 er = EMULATE_FAIL;
1870 }
1871 } else {
1872 #ifdef DEBUG
1873 kvm_debug
1874 ("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n",
1875 tlb->tlb_hi, tlb->tlb_lo0, tlb->tlb_lo1);
1876 #endif
1877 /* OK we have a Guest TLB entry, now inject it into the shadow host TLB */
1878 if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
1879 NULL, NULL)) {
1880 kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n",
1881 __func__, va, index, vcpu,
1882 read_c0_entryhi());
1883 er = EMULATE_FAIL;
1884 }
1885 }
1886 }
1887
1888 return er;
1889 }