Merge branch 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / mips / kvm / kvm_mips.c
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * KVM/MIPS: MIPS specific KVM APIs
7 *
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
10 */
11
12 #include <linux/errno.h>
13 #include <linux/err.h>
14 #include <linux/module.h>
15 #include <linux/vmalloc.h>
16 #include <linux/fs.h>
17 #include <linux/bootmem.h>
18 #include <asm/page.h>
19 #include <asm/cacheflush.h>
20 #include <asm/mmu_context.h>
21
22 #include <linux/kvm_host.h>
23
24 #include "kvm_mips_int.h"
25 #include "kvm_mips_comm.h"
26
27 #define CREATE_TRACE_POINTS
28 #include "trace.h"
29
30 #ifndef VECTORSPACING
31 #define VECTORSPACING 0x100 /* for EI/VI mode */
32 #endif
33
34 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
35 struct kvm_stats_debugfs_item debugfs_entries[] = {
36 { "wait", VCPU_STAT(wait_exits) },
37 { "cache", VCPU_STAT(cache_exits) },
38 { "signal", VCPU_STAT(signal_exits) },
39 { "interrupt", VCPU_STAT(int_exits) },
40 { "cop_unsuable", VCPU_STAT(cop_unusable_exits) },
41 { "tlbmod", VCPU_STAT(tlbmod_exits) },
42 { "tlbmiss_ld", VCPU_STAT(tlbmiss_ld_exits) },
43 { "tlbmiss_st", VCPU_STAT(tlbmiss_st_exits) },
44 { "addrerr_st", VCPU_STAT(addrerr_st_exits) },
45 { "addrerr_ld", VCPU_STAT(addrerr_ld_exits) },
46 { "syscall", VCPU_STAT(syscall_exits) },
47 { "resvd_inst", VCPU_STAT(resvd_inst_exits) },
48 { "break_inst", VCPU_STAT(break_inst_exits) },
49 { "flush_dcache", VCPU_STAT(flush_dcache_exits) },
50 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
51 {NULL}
52 };
53
54 static int kvm_mips_reset_vcpu(struct kvm_vcpu *vcpu)
55 {
56 int i;
57 for_each_possible_cpu(i) {
58 vcpu->arch.guest_kernel_asid[i] = 0;
59 vcpu->arch.guest_user_asid[i] = 0;
60 }
61 return 0;
62 }
63
64 gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
65 {
66 return gfn;
67 }
68
69 /* XXXKYMA: We are simulatoring a processor that has the WII bit set in Config7, so we
70 * are "runnable" if interrupts are pending
71 */
72 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
73 {
74 return !!(vcpu->arch.pending_exceptions);
75 }
76
77 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
78 {
79 return 1;
80 }
81
82 int kvm_arch_hardware_enable(void *garbage)
83 {
84 return 0;
85 }
86
87 void kvm_arch_hardware_disable(void *garbage)
88 {
89 }
90
91 int kvm_arch_hardware_setup(void)
92 {
93 return 0;
94 }
95
96 void kvm_arch_hardware_unsetup(void)
97 {
98 }
99
100 void kvm_arch_check_processor_compat(void *rtn)
101 {
102 int *r = (int *)rtn;
103 *r = 0;
104 return;
105 }
106
107 static void kvm_mips_init_tlbs(struct kvm *kvm)
108 {
109 unsigned long wired;
110
111 /* Add a wired entry to the TLB, it is used to map the commpage to the Guest kernel */
112 wired = read_c0_wired();
113 write_c0_wired(wired + 1);
114 mtc0_tlbw_hazard();
115 kvm->arch.commpage_tlb = wired;
116
117 kvm_debug("[%d] commpage TLB: %d\n", smp_processor_id(),
118 kvm->arch.commpage_tlb);
119 }
120
121 static void kvm_mips_init_vm_percpu(void *arg)
122 {
123 struct kvm *kvm = (struct kvm *)arg;
124
125 kvm_mips_init_tlbs(kvm);
126 kvm_mips_callbacks->vm_init(kvm);
127
128 }
129
130 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
131 {
132 if (atomic_inc_return(&kvm_mips_instance) == 1) {
133 kvm_info("%s: 1st KVM instance, setup host TLB parameters\n",
134 __func__);
135 on_each_cpu(kvm_mips_init_vm_percpu, kvm, 1);
136 }
137
138
139 return 0;
140 }
141
142 void kvm_mips_free_vcpus(struct kvm *kvm)
143 {
144 unsigned int i;
145 struct kvm_vcpu *vcpu;
146
147 /* Put the pages we reserved for the guest pmap */
148 for (i = 0; i < kvm->arch.guest_pmap_npages; i++) {
149 if (kvm->arch.guest_pmap[i] != KVM_INVALID_PAGE)
150 kvm_mips_release_pfn_clean(kvm->arch.guest_pmap[i]);
151 }
152
153 if (kvm->arch.guest_pmap)
154 kfree(kvm->arch.guest_pmap);
155
156 kvm_for_each_vcpu(i, vcpu, kvm) {
157 kvm_arch_vcpu_free(vcpu);
158 }
159
160 mutex_lock(&kvm->lock);
161
162 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
163 kvm->vcpus[i] = NULL;
164
165 atomic_set(&kvm->online_vcpus, 0);
166
167 mutex_unlock(&kvm->lock);
168 }
169
170 void kvm_arch_sync_events(struct kvm *kvm)
171 {
172 }
173
174 static void kvm_mips_uninit_tlbs(void *arg)
175 {
176 /* Restore wired count */
177 write_c0_wired(0);
178 mtc0_tlbw_hazard();
179 /* Clear out all the TLBs */
180 kvm_local_flush_tlb_all();
181 }
182
183 void kvm_arch_destroy_vm(struct kvm *kvm)
184 {
185 kvm_mips_free_vcpus(kvm);
186
187 /* If this is the last instance, restore wired count */
188 if (atomic_dec_return(&kvm_mips_instance) == 0) {
189 kvm_info("%s: last KVM instance, restoring TLB parameters\n",
190 __func__);
191 on_each_cpu(kvm_mips_uninit_tlbs, NULL, 1);
192 }
193 }
194
195 long
196 kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
197 {
198 return -EINVAL;
199 }
200
201 void kvm_arch_free_memslot(struct kvm_memory_slot *free,
202 struct kvm_memory_slot *dont)
203 {
204 }
205
206 int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
207 {
208 return 0;
209 }
210
211 int kvm_arch_prepare_memory_region(struct kvm *kvm,
212 struct kvm_memory_slot *memslot,
213 struct kvm_userspace_memory_region *mem,
214 enum kvm_mr_change change)
215 {
216 return 0;
217 }
218
219 void kvm_arch_commit_memory_region(struct kvm *kvm,
220 struct kvm_userspace_memory_region *mem,
221 const struct kvm_memory_slot *old,
222 enum kvm_mr_change change)
223 {
224 unsigned long npages = 0;
225 int i, err = 0;
226
227 kvm_debug("%s: kvm: %p slot: %d, GPA: %llx, size: %llx, QVA: %llx\n",
228 __func__, kvm, mem->slot, mem->guest_phys_addr,
229 mem->memory_size, mem->userspace_addr);
230
231 /* Setup Guest PMAP table */
232 if (!kvm->arch.guest_pmap) {
233 if (mem->slot == 0)
234 npages = mem->memory_size >> PAGE_SHIFT;
235
236 if (npages) {
237 kvm->arch.guest_pmap_npages = npages;
238 kvm->arch.guest_pmap =
239 kzalloc(npages * sizeof(unsigned long), GFP_KERNEL);
240
241 if (!kvm->arch.guest_pmap) {
242 kvm_err("Failed to allocate guest PMAP");
243 err = -ENOMEM;
244 goto out;
245 }
246
247 kvm_info
248 ("Allocated space for Guest PMAP Table (%ld pages) @ %p\n",
249 npages, kvm->arch.guest_pmap);
250
251 /* Now setup the page table */
252 for (i = 0; i < npages; i++) {
253 kvm->arch.guest_pmap[i] = KVM_INVALID_PAGE;
254 }
255 }
256 }
257 out:
258 return;
259 }
260
261 void kvm_arch_flush_shadow_all(struct kvm *kvm)
262 {
263 }
264
265 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
266 struct kvm_memory_slot *slot)
267 {
268 }
269
270 void kvm_arch_flush_shadow(struct kvm *kvm)
271 {
272 }
273
274 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
275 {
276 extern char mips32_exception[], mips32_exceptionEnd[];
277 extern char mips32_GuestException[], mips32_GuestExceptionEnd[];
278 int err, size, offset;
279 void *gebase;
280 int i;
281
282 struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
283
284 if (!vcpu) {
285 err = -ENOMEM;
286 goto out;
287 }
288
289 err = kvm_vcpu_init(vcpu, kvm, id);
290
291 if (err)
292 goto out_free_cpu;
293
294 kvm_info("kvm @ %p: create cpu %d at %p\n", kvm, id, vcpu);
295
296 /* Allocate space for host mode exception handlers that handle
297 * guest mode exits
298 */
299 if (cpu_has_veic || cpu_has_vint) {
300 size = 0x200 + VECTORSPACING * 64;
301 } else {
302 size = 0x200;
303 }
304
305 /* Save Linux EBASE */
306 vcpu->arch.host_ebase = (void *)read_c0_ebase();
307
308 gebase = kzalloc(ALIGN(size, PAGE_SIZE), GFP_KERNEL);
309
310 if (!gebase) {
311 err = -ENOMEM;
312 goto out_free_cpu;
313 }
314 kvm_info("Allocated %d bytes for KVM Exception Handlers @ %p\n",
315 ALIGN(size, PAGE_SIZE), gebase);
316
317 /* Save new ebase */
318 vcpu->arch.guest_ebase = gebase;
319
320 /* Copy L1 Guest Exception handler to correct offset */
321
322 /* TLB Refill, EXL = 0 */
323 memcpy(gebase, mips32_exception,
324 mips32_exceptionEnd - mips32_exception);
325
326 /* General Exception Entry point */
327 memcpy(gebase + 0x180, mips32_exception,
328 mips32_exceptionEnd - mips32_exception);
329
330 /* For vectored interrupts poke the exception code @ all offsets 0-7 */
331 for (i = 0; i < 8; i++) {
332 kvm_debug("L1 Vectored handler @ %p\n",
333 gebase + 0x200 + (i * VECTORSPACING));
334 memcpy(gebase + 0x200 + (i * VECTORSPACING), mips32_exception,
335 mips32_exceptionEnd - mips32_exception);
336 }
337
338 /* General handler, relocate to unmapped space for sanity's sake */
339 offset = 0x2000;
340 kvm_info("Installing KVM Exception handlers @ %p, %#x bytes\n",
341 gebase + offset,
342 mips32_GuestExceptionEnd - mips32_GuestException);
343
344 memcpy(gebase + offset, mips32_GuestException,
345 mips32_GuestExceptionEnd - mips32_GuestException);
346
347 /* Invalidate the icache for these ranges */
348 mips32_SyncICache((unsigned long) gebase, ALIGN(size, PAGE_SIZE));
349
350 /* Allocate comm page for guest kernel, a TLB will be reserved for mapping GVA @ 0xFFFF8000 to this page */
351 vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL);
352
353 if (!vcpu->arch.kseg0_commpage) {
354 err = -ENOMEM;
355 goto out_free_gebase;
356 }
357
358 kvm_info("Allocated COMM page @ %p\n", vcpu->arch.kseg0_commpage);
359 kvm_mips_commpage_init(vcpu);
360
361 /* Init */
362 vcpu->arch.last_sched_cpu = -1;
363
364 /* Start off the timer */
365 kvm_mips_emulate_count(vcpu);
366
367 return vcpu;
368
369 out_free_gebase:
370 kfree(gebase);
371
372 out_free_cpu:
373 kfree(vcpu);
374
375 out:
376 return ERR_PTR(err);
377 }
378
379 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
380 {
381 hrtimer_cancel(&vcpu->arch.comparecount_timer);
382
383 kvm_vcpu_uninit(vcpu);
384
385 kvm_mips_dump_stats(vcpu);
386
387 if (vcpu->arch.guest_ebase)
388 kfree(vcpu->arch.guest_ebase);
389
390 if (vcpu->arch.kseg0_commpage)
391 kfree(vcpu->arch.kseg0_commpage);
392
393 }
394
395 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
396 {
397 kvm_arch_vcpu_free(vcpu);
398 }
399
400 int
401 kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
402 struct kvm_guest_debug *dbg)
403 {
404 return -EINVAL;
405 }
406
407 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
408 {
409 int r = 0;
410 sigset_t sigsaved;
411
412 if (vcpu->sigset_active)
413 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
414
415 if (vcpu->mmio_needed) {
416 if (!vcpu->mmio_is_write)
417 kvm_mips_complete_mmio_load(vcpu, run);
418 vcpu->mmio_needed = 0;
419 }
420
421 /* Check if we have any exceptions/interrupts pending */
422 kvm_mips_deliver_interrupts(vcpu,
423 kvm_read_c0_guest_cause(vcpu->arch.cop0));
424
425 local_irq_disable();
426 kvm_guest_enter();
427
428 r = __kvm_mips_vcpu_run(run, vcpu);
429
430 kvm_guest_exit();
431 local_irq_enable();
432
433 if (vcpu->sigset_active)
434 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
435
436 return r;
437 }
438
439 int
440 kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_mips_interrupt *irq)
441 {
442 int intr = (int)irq->irq;
443 struct kvm_vcpu *dvcpu = NULL;
444
445 if (intr == 3 || intr == -3 || intr == 4 || intr == -4)
446 kvm_debug("%s: CPU: %d, INTR: %d\n", __func__, irq->cpu,
447 (int)intr);
448
449 if (irq->cpu == -1)
450 dvcpu = vcpu;
451 else
452 dvcpu = vcpu->kvm->vcpus[irq->cpu];
453
454 if (intr == 2 || intr == 3 || intr == 4) {
455 kvm_mips_callbacks->queue_io_int(dvcpu, irq);
456
457 } else if (intr == -2 || intr == -3 || intr == -4) {
458 kvm_mips_callbacks->dequeue_io_int(dvcpu, irq);
459 } else {
460 kvm_err("%s: invalid interrupt ioctl (%d:%d)\n", __func__,
461 irq->cpu, irq->irq);
462 return -EINVAL;
463 }
464
465 dvcpu->arch.wait = 0;
466
467 if (waitqueue_active(&dvcpu->wq)) {
468 wake_up_interruptible(&dvcpu->wq);
469 }
470
471 return 0;
472 }
473
474 int
475 kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
476 struct kvm_mp_state *mp_state)
477 {
478 return -EINVAL;
479 }
480
481 int
482 kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
483 struct kvm_mp_state *mp_state)
484 {
485 return -EINVAL;
486 }
487
488 long
489 kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
490 {
491 struct kvm_vcpu *vcpu = filp->private_data;
492 void __user *argp = (void __user *)arg;
493 long r;
494 int intr;
495
496 switch (ioctl) {
497 case KVM_NMI:
498 /* Treat the NMI as a CPU reset */
499 r = kvm_mips_reset_vcpu(vcpu);
500 break;
501 case KVM_INTERRUPT:
502 {
503 struct kvm_mips_interrupt irq;
504 r = -EFAULT;
505 if (copy_from_user(&irq, argp, sizeof(irq)))
506 goto out;
507
508 intr = (int)irq.irq;
509
510 kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__,
511 irq.irq);
512
513 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
514 break;
515 }
516 default:
517 r = -EINVAL;
518 }
519
520 out:
521 return r;
522 }
523
524 /*
525 * Get (and clear) the dirty memory log for a memory slot.
526 */
527 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
528 {
529 struct kvm_memory_slot *memslot;
530 unsigned long ga, ga_end;
531 int is_dirty = 0;
532 int r;
533 unsigned long n;
534
535 mutex_lock(&kvm->slots_lock);
536
537 r = kvm_get_dirty_log(kvm, log, &is_dirty);
538 if (r)
539 goto out;
540
541 /* If nothing is dirty, don't bother messing with page tables. */
542 if (is_dirty) {
543 memslot = &kvm->memslots->memslots[log->slot];
544
545 ga = memslot->base_gfn << PAGE_SHIFT;
546 ga_end = ga + (memslot->npages << PAGE_SHIFT);
547
548 printk("%s: dirty, ga: %#lx, ga_end %#lx\n", __func__, ga,
549 ga_end);
550
551 n = kvm_dirty_bitmap_bytes(memslot);
552 memset(memslot->dirty_bitmap, 0, n);
553 }
554
555 r = 0;
556 out:
557 mutex_unlock(&kvm->slots_lock);
558 return r;
559
560 }
561
562 long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
563 {
564 long r;
565
566 switch (ioctl) {
567 default:
568 r = -EINVAL;
569 }
570
571 return r;
572 }
573
574 int kvm_arch_init(void *opaque)
575 {
576 int ret;
577
578 if (kvm_mips_callbacks) {
579 kvm_err("kvm: module already exists\n");
580 return -EEXIST;
581 }
582
583 ret = kvm_mips_emulation_init(&kvm_mips_callbacks);
584
585 return ret;
586 }
587
588 void kvm_arch_exit(void)
589 {
590 kvm_mips_callbacks = NULL;
591 }
592
593 int
594 kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
595 {
596 return -ENOTSUPP;
597 }
598
599 int
600 kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
601 {
602 return -ENOTSUPP;
603 }
604
605 int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
606 {
607 return 0;
608 }
609
610 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
611 {
612 return -ENOTSUPP;
613 }
614
615 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
616 {
617 return -ENOTSUPP;
618 }
619
620 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
621 {
622 return VM_FAULT_SIGBUS;
623 }
624
625 int kvm_dev_ioctl_check_extension(long ext)
626 {
627 int r;
628
629 switch (ext) {
630 case KVM_CAP_COALESCED_MMIO:
631 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
632 break;
633 default:
634 r = 0;
635 break;
636 }
637 return r;
638
639 }
640
641 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
642 {
643 return kvm_mips_pending_timer(vcpu);
644 }
645
646 int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
647 {
648 int i;
649 struct mips_coproc *cop0;
650
651 if (!vcpu)
652 return -1;
653
654 printk("VCPU Register Dump:\n");
655 printk("\tpc = 0x%08lx\n", vcpu->arch.pc);;
656 printk("\texceptions: %08lx\n", vcpu->arch.pending_exceptions);
657
658 for (i = 0; i < 32; i += 4) {
659 printk("\tgpr%02d: %08lx %08lx %08lx %08lx\n", i,
660 vcpu->arch.gprs[i],
661 vcpu->arch.gprs[i + 1],
662 vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]);
663 }
664 printk("\thi: 0x%08lx\n", vcpu->arch.hi);
665 printk("\tlo: 0x%08lx\n", vcpu->arch.lo);
666
667 cop0 = vcpu->arch.cop0;
668 printk("\tStatus: 0x%08lx, Cause: 0x%08lx\n",
669 kvm_read_c0_guest_status(cop0), kvm_read_c0_guest_cause(cop0));
670
671 printk("\tEPC: 0x%08lx\n", kvm_read_c0_guest_epc(cop0));
672
673 return 0;
674 }
675
676 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
677 {
678 int i;
679
680 for (i = 0; i < 32; i++)
681 vcpu->arch.gprs[i] = regs->gprs[i];
682
683 vcpu->arch.hi = regs->hi;
684 vcpu->arch.lo = regs->lo;
685 vcpu->arch.pc = regs->pc;
686
687 return kvm_mips_callbacks->vcpu_ioctl_set_regs(vcpu, regs);
688 }
689
690 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
691 {
692 int i;
693
694 for (i = 0; i < 32; i++)
695 regs->gprs[i] = vcpu->arch.gprs[i];
696
697 regs->hi = vcpu->arch.hi;
698 regs->lo = vcpu->arch.lo;
699 regs->pc = vcpu->arch.pc;
700
701 return kvm_mips_callbacks->vcpu_ioctl_get_regs(vcpu, regs);
702 }
703
704 void kvm_mips_comparecount_func(unsigned long data)
705 {
706 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
707
708 kvm_mips_callbacks->queue_timer_int(vcpu);
709
710 vcpu->arch.wait = 0;
711 if (waitqueue_active(&vcpu->wq)) {
712 wake_up_interruptible(&vcpu->wq);
713 }
714 }
715
716 /*
717 * low level hrtimer wake routine.
718 */
719 enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer)
720 {
721 struct kvm_vcpu *vcpu;
722
723 vcpu = container_of(timer, struct kvm_vcpu, arch.comparecount_timer);
724 kvm_mips_comparecount_func((unsigned long) vcpu);
725 hrtimer_forward_now(&vcpu->arch.comparecount_timer,
726 ktime_set(0, MS_TO_NS(10)));
727 return HRTIMER_RESTART;
728 }
729
730 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
731 {
732 kvm_mips_callbacks->vcpu_init(vcpu);
733 hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC,
734 HRTIMER_MODE_REL);
735 vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup;
736 kvm_mips_init_shadow_tlb(vcpu);
737 return 0;
738 }
739
740 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
741 {
742 return;
743 }
744
745 int
746 kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, struct kvm_translation *tr)
747 {
748 return 0;
749 }
750
751 /* Initial guest state */
752 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
753 {
754 return kvm_mips_callbacks->vcpu_setup(vcpu);
755 }
756
757 static
758 void kvm_mips_set_c0_status(void)
759 {
760 uint32_t status = read_c0_status();
761
762 if (cpu_has_fpu)
763 status |= (ST0_CU1);
764
765 if (cpu_has_dsp)
766 status |= (ST0_MX);
767
768 write_c0_status(status);
769 ehb();
770 }
771
772 /*
773 * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
774 */
775 int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
776 {
777 uint32_t cause = vcpu->arch.host_cp0_cause;
778 uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
779 uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
780 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
781 enum emulation_result er = EMULATE_DONE;
782 int ret = RESUME_GUEST;
783
784 /* Set a default exit reason */
785 run->exit_reason = KVM_EXIT_UNKNOWN;
786 run->ready_for_interrupt_injection = 1;
787
788 /* Set the appropriate status bits based on host CPU features, before we hit the scheduler */
789 kvm_mips_set_c0_status();
790
791 local_irq_enable();
792
793 kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n",
794 cause, opc, run, vcpu);
795
796 /* Do a privilege check, if in UM most of these exit conditions end up
797 * causing an exception to be delivered to the Guest Kernel
798 */
799 er = kvm_mips_check_privilege(cause, opc, run, vcpu);
800 if (er == EMULATE_PRIV_FAIL) {
801 goto skip_emul;
802 } else if (er == EMULATE_FAIL) {
803 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
804 ret = RESUME_HOST;
805 goto skip_emul;
806 }
807
808 switch (exccode) {
809 case T_INT:
810 kvm_debug("[%d]T_INT @ %p\n", vcpu->vcpu_id, opc);
811
812 ++vcpu->stat.int_exits;
813 trace_kvm_exit(vcpu, INT_EXITS);
814
815 if (need_resched()) {
816 cond_resched();
817 }
818
819 ret = RESUME_GUEST;
820 break;
821
822 case T_COP_UNUSABLE:
823 kvm_debug("T_COP_UNUSABLE: @ PC: %p\n", opc);
824
825 ++vcpu->stat.cop_unusable_exits;
826 trace_kvm_exit(vcpu, COP_UNUSABLE_EXITS);
827 ret = kvm_mips_callbacks->handle_cop_unusable(vcpu);
828 /* XXXKYMA: Might need to return to user space */
829 if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN) {
830 ret = RESUME_HOST;
831 }
832 break;
833
834 case T_TLB_MOD:
835 ++vcpu->stat.tlbmod_exits;
836 trace_kvm_exit(vcpu, TLBMOD_EXITS);
837 ret = kvm_mips_callbacks->handle_tlb_mod(vcpu);
838 break;
839
840 case T_TLB_ST_MISS:
841 kvm_debug
842 ("TLB ST fault: cause %#x, status %#lx, PC: %p, BadVaddr: %#lx\n",
843 cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc,
844 badvaddr);
845
846 ++vcpu->stat.tlbmiss_st_exits;
847 trace_kvm_exit(vcpu, TLBMISS_ST_EXITS);
848 ret = kvm_mips_callbacks->handle_tlb_st_miss(vcpu);
849 break;
850
851 case T_TLB_LD_MISS:
852 kvm_debug("TLB LD fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
853 cause, opc, badvaddr);
854
855 ++vcpu->stat.tlbmiss_ld_exits;
856 trace_kvm_exit(vcpu, TLBMISS_LD_EXITS);
857 ret = kvm_mips_callbacks->handle_tlb_ld_miss(vcpu);
858 break;
859
860 case T_ADDR_ERR_ST:
861 ++vcpu->stat.addrerr_st_exits;
862 trace_kvm_exit(vcpu, ADDRERR_ST_EXITS);
863 ret = kvm_mips_callbacks->handle_addr_err_st(vcpu);
864 break;
865
866 case T_ADDR_ERR_LD:
867 ++vcpu->stat.addrerr_ld_exits;
868 trace_kvm_exit(vcpu, ADDRERR_LD_EXITS);
869 ret = kvm_mips_callbacks->handle_addr_err_ld(vcpu);
870 break;
871
872 case T_SYSCALL:
873 ++vcpu->stat.syscall_exits;
874 trace_kvm_exit(vcpu, SYSCALL_EXITS);
875 ret = kvm_mips_callbacks->handle_syscall(vcpu);
876 break;
877
878 case T_RES_INST:
879 ++vcpu->stat.resvd_inst_exits;
880 trace_kvm_exit(vcpu, RESVD_INST_EXITS);
881 ret = kvm_mips_callbacks->handle_res_inst(vcpu);
882 break;
883
884 case T_BREAK:
885 ++vcpu->stat.break_inst_exits;
886 trace_kvm_exit(vcpu, BREAK_INST_EXITS);
887 ret = kvm_mips_callbacks->handle_break(vcpu);
888 break;
889
890 default:
891 kvm_err
892 ("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#lx\n",
893 exccode, opc, kvm_get_inst(opc, vcpu), badvaddr,
894 kvm_read_c0_guest_status(vcpu->arch.cop0));
895 kvm_arch_vcpu_dump_regs(vcpu);
896 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
897 ret = RESUME_HOST;
898 break;
899
900 }
901
902 skip_emul:
903 local_irq_disable();
904
905 if (er == EMULATE_DONE && !(ret & RESUME_HOST))
906 kvm_mips_deliver_interrupts(vcpu, cause);
907
908 if (!(ret & RESUME_HOST)) {
909 /* Only check for signals if not already exiting to userspace */
910 if (signal_pending(current)) {
911 run->exit_reason = KVM_EXIT_INTR;
912 ret = (-EINTR << 2) | RESUME_HOST;
913 ++vcpu->stat.signal_exits;
914 trace_kvm_exit(vcpu, SIGNAL_EXITS);
915 }
916 }
917
918 return ret;
919 }
920
921 int __init kvm_mips_init(void)
922 {
923 int ret;
924
925 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
926
927 if (ret)
928 return ret;
929
930 /* On MIPS, kernel modules are executed from "mapped space", which requires TLBs.
931 * The TLB handling code is statically linked with the rest of the kernel (kvm_tlb.c)
932 * to avoid the possibility of double faulting. The issue is that the TLB code
933 * references routines that are part of the the KVM module,
934 * which are only available once the module is loaded.
935 */
936 kvm_mips_gfn_to_pfn = gfn_to_pfn;
937 kvm_mips_release_pfn_clean = kvm_release_pfn_clean;
938 kvm_mips_is_error_pfn = is_error_pfn;
939
940 pr_info("KVM/MIPS Initialized\n");
941 return 0;
942 }
943
944 void __exit kvm_mips_exit(void)
945 {
946 kvm_exit();
947
948 kvm_mips_gfn_to_pfn = NULL;
949 kvm_mips_release_pfn_clean = NULL;
950 kvm_mips_is_error_pfn = NULL;
951
952 pr_info("KVM/MIPS unloaded\n");
953 }
954
955 module_init(kvm_mips_init);
956 module_exit(kvm_mips_exit);
957
958 EXPORT_TRACEPOINT_SYMBOL(kvm_exit);