KVM: Add hypercall host support for svm
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / kvm / kvm_main.c
CommitLineData
6aa8b732
AK
1/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
6 *
7 * Copyright (C) 2006 Qumranet, Inc.
8 *
9 * Authors:
10 * Avi Kivity <avi@qumranet.com>
11 * Yaniv Kamay <yaniv@qumranet.com>
12 *
13 * This work is licensed under the terms of the GNU GPL, version 2. See
14 * the COPYING file in the top-level directory.
15 *
16 */
17
18#include "kvm.h"
19
20#include <linux/kvm.h>
21#include <linux/module.h>
22#include <linux/errno.h>
23#include <asm/processor.h>
24#include <linux/percpu.h>
25#include <linux/gfp.h>
26#include <asm/msr.h>
27#include <linux/mm.h>
28#include <linux/miscdevice.h>
29#include <linux/vmalloc.h>
30#include <asm/uaccess.h>
31#include <linux/reboot.h>
32#include <asm/io.h>
33#include <linux/debugfs.h>
34#include <linux/highmem.h>
35#include <linux/file.h>
36#include <asm/desc.h>
59ae6c6b 37#include <linux/sysdev.h>
774c47f1 38#include <linux/cpu.h>
6aa8b732
AK
39
40#include "x86_emulate.h"
41#include "segment_descriptor.h"
42
43MODULE_AUTHOR("Qumranet");
44MODULE_LICENSE("GPL");
45
133de902
AK
46static DEFINE_SPINLOCK(kvm_lock);
47static LIST_HEAD(vm_list);
48
6aa8b732
AK
49struct kvm_arch_ops *kvm_arch_ops;
50struct kvm_stat kvm_stat;
51EXPORT_SYMBOL_GPL(kvm_stat);
52
53static struct kvm_stats_debugfs_item {
54 const char *name;
55 u32 *data;
56 struct dentry *dentry;
57} debugfs_entries[] = {
58 { "pf_fixed", &kvm_stat.pf_fixed },
59 { "pf_guest", &kvm_stat.pf_guest },
60 { "tlb_flush", &kvm_stat.tlb_flush },
61 { "invlpg", &kvm_stat.invlpg },
62 { "exits", &kvm_stat.exits },
63 { "io_exits", &kvm_stat.io_exits },
64 { "mmio_exits", &kvm_stat.mmio_exits },
65 { "signal_exits", &kvm_stat.signal_exits },
c1150d8c
DL
66 { "irq_window", &kvm_stat.irq_window_exits },
67 { "halt_exits", &kvm_stat.halt_exits },
68 { "request_irq", &kvm_stat.request_irq_exits },
6aa8b732 69 { "irq_exits", &kvm_stat.irq_exits },
8b6d44c7 70 { NULL, NULL }
6aa8b732
AK
71};
72
73static struct dentry *debugfs_dir;
74
75#define MAX_IO_MSRS 256
76
77#define CR0_RESEVED_BITS 0xffffffff1ffaffc0ULL
78#define LMSW_GUEST_MASK 0x0eULL
79#define CR4_RESEVED_BITS (~((1ULL << 11) - 1))
80#define CR8_RESEVED_BITS (~0x0fULL)
81#define EFER_RESERVED_BITS 0xfffffffffffff2fe
82
05b3e0c2 83#ifdef CONFIG_X86_64
6aa8b732
AK
84// LDT or TSS descriptor in the GDT. 16 bytes.
85struct segment_descriptor_64 {
86 struct segment_descriptor s;
87 u32 base_higher;
88 u32 pad_zero;
89};
90
91#endif
92
93unsigned long segment_base(u16 selector)
94{
95 struct descriptor_table gdt;
96 struct segment_descriptor *d;
97 unsigned long table_base;
98 typedef unsigned long ul;
99 unsigned long v;
100
101 if (selector == 0)
102 return 0;
103
104 asm ("sgdt %0" : "=m"(gdt));
105 table_base = gdt.base;
106
107 if (selector & 4) { /* from ldt */
108 u16 ldt_selector;
109
110 asm ("sldt %0" : "=g"(ldt_selector));
111 table_base = segment_base(ldt_selector);
112 }
113 d = (struct segment_descriptor *)(table_base + (selector & ~7));
114 v = d->base_low | ((ul)d->base_mid << 16) | ((ul)d->base_high << 24);
05b3e0c2 115#ifdef CONFIG_X86_64
6aa8b732
AK
116 if (d->system == 0
117 && (d->type == 2 || d->type == 9 || d->type == 11))
118 v |= ((ul)((struct segment_descriptor_64 *)d)->base_higher) << 32;
119#endif
120 return v;
121}
122EXPORT_SYMBOL_GPL(segment_base);
123
5aacf0ca
JM
124static inline int valid_vcpu(int n)
125{
126 return likely(n >= 0 && n < KVM_MAX_VCPUS);
127}
128
d27d4aca
AK
129int kvm_read_guest(struct kvm_vcpu *vcpu, gva_t addr, unsigned long size,
130 void *dest)
6aa8b732
AK
131{
132 unsigned char *host_buf = dest;
133 unsigned long req_size = size;
134
135 while (size) {
136 hpa_t paddr;
137 unsigned now;
138 unsigned offset;
139 hva_t guest_buf;
140
141 paddr = gva_to_hpa(vcpu, addr);
142
143 if (is_error_hpa(paddr))
144 break;
145
146 guest_buf = (hva_t)kmap_atomic(
147 pfn_to_page(paddr >> PAGE_SHIFT),
148 KM_USER0);
149 offset = addr & ~PAGE_MASK;
150 guest_buf |= offset;
151 now = min(size, PAGE_SIZE - offset);
152 memcpy(host_buf, (void*)guest_buf, now);
153 host_buf += now;
154 addr += now;
155 size -= now;
156 kunmap_atomic((void *)(guest_buf & PAGE_MASK), KM_USER0);
157 }
158 return req_size - size;
159}
160EXPORT_SYMBOL_GPL(kvm_read_guest);
161
d27d4aca
AK
162int kvm_write_guest(struct kvm_vcpu *vcpu, gva_t addr, unsigned long size,
163 void *data)
6aa8b732
AK
164{
165 unsigned char *host_buf = data;
166 unsigned long req_size = size;
167
168 while (size) {
169 hpa_t paddr;
170 unsigned now;
171 unsigned offset;
172 hva_t guest_buf;
173
174 paddr = gva_to_hpa(vcpu, addr);
175
176 if (is_error_hpa(paddr))
177 break;
178
179 guest_buf = (hva_t)kmap_atomic(
180 pfn_to_page(paddr >> PAGE_SHIFT), KM_USER0);
181 offset = addr & ~PAGE_MASK;
182 guest_buf |= offset;
183 now = min(size, PAGE_SIZE - offset);
184 memcpy((void*)guest_buf, host_buf, now);
185 host_buf += now;
186 addr += now;
187 size -= now;
188 kunmap_atomic((void *)(guest_buf & PAGE_MASK), KM_USER0);
189 }
190 return req_size - size;
191}
192EXPORT_SYMBOL_GPL(kvm_write_guest);
193
194static int vcpu_slot(struct kvm_vcpu *vcpu)
195{
196 return vcpu - vcpu->kvm->vcpus;
197}
198
199/*
200 * Switches to specified vcpu, until a matching vcpu_put()
201 */
202static struct kvm_vcpu *vcpu_load(struct kvm *kvm, int vcpu_slot)
203{
204 struct kvm_vcpu *vcpu = &kvm->vcpus[vcpu_slot];
205
206 mutex_lock(&vcpu->mutex);
207 if (unlikely(!vcpu->vmcs)) {
208 mutex_unlock(&vcpu->mutex);
8b6d44c7 209 return NULL;
6aa8b732
AK
210 }
211 return kvm_arch_ops->vcpu_load(vcpu);
212}
213
214static void vcpu_put(struct kvm_vcpu *vcpu)
215{
216 kvm_arch_ops->vcpu_put(vcpu);
6aa8b732
AK
217 mutex_unlock(&vcpu->mutex);
218}
219
220static int kvm_dev_open(struct inode *inode, struct file *filp)
221{
222 struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
223 int i;
224
225 if (!kvm)
226 return -ENOMEM;
227
228 spin_lock_init(&kvm->lock);
229 INIT_LIST_HEAD(&kvm->active_mmu_pages);
230 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
231 struct kvm_vcpu *vcpu = &kvm->vcpus[i];
232
233 mutex_init(&vcpu->mutex);
133de902 234 vcpu->cpu = -1;
86a2b42e 235 vcpu->kvm = kvm;
6aa8b732
AK
236 vcpu->mmu.root_hpa = INVALID_PAGE;
237 INIT_LIST_HEAD(&vcpu->free_pages);
133de902
AK
238 spin_lock(&kvm_lock);
239 list_add(&kvm->vm_list, &vm_list);
240 spin_unlock(&kvm_lock);
6aa8b732
AK
241 }
242 filp->private_data = kvm;
243 return 0;
244}
245
246/*
247 * Free any memory in @free but not in @dont.
248 */
249static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
250 struct kvm_memory_slot *dont)
251{
252 int i;
253
254 if (!dont || free->phys_mem != dont->phys_mem)
255 if (free->phys_mem) {
256 for (i = 0; i < free->npages; ++i)
55a54f79
AK
257 if (free->phys_mem[i])
258 __free_page(free->phys_mem[i]);
6aa8b732
AK
259 vfree(free->phys_mem);
260 }
261
262 if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
263 vfree(free->dirty_bitmap);
264
8b6d44c7 265 free->phys_mem = NULL;
6aa8b732 266 free->npages = 0;
8b6d44c7 267 free->dirty_bitmap = NULL;
6aa8b732
AK
268}
269
270static void kvm_free_physmem(struct kvm *kvm)
271{
272 int i;
273
274 for (i = 0; i < kvm->nmemslots; ++i)
8b6d44c7 275 kvm_free_physmem_slot(&kvm->memslots[i], NULL);
6aa8b732
AK
276}
277
278static void kvm_free_vcpu(struct kvm_vcpu *vcpu)
279{
1e8ba6fb
IM
280 if (!vcpu_load(vcpu->kvm, vcpu_slot(vcpu)))
281 return;
282
6aa8b732 283 kvm_mmu_destroy(vcpu);
08438475 284 vcpu_put(vcpu);
9ede74e0 285 kvm_arch_ops->vcpu_free(vcpu);
6aa8b732
AK
286}
287
288static void kvm_free_vcpus(struct kvm *kvm)
289{
290 unsigned int i;
291
292 for (i = 0; i < KVM_MAX_VCPUS; ++i)
293 kvm_free_vcpu(&kvm->vcpus[i]);
294}
295
296static int kvm_dev_release(struct inode *inode, struct file *filp)
297{
298 struct kvm *kvm = filp->private_data;
299
133de902
AK
300 spin_lock(&kvm_lock);
301 list_del(&kvm->vm_list);
302 spin_unlock(&kvm_lock);
6aa8b732
AK
303 kvm_free_vcpus(kvm);
304 kvm_free_physmem(kvm);
305 kfree(kvm);
306 return 0;
307}
308
309static void inject_gp(struct kvm_vcpu *vcpu)
310{
311 kvm_arch_ops->inject_gp(vcpu, 0);
312}
313
1342d353
AK
314/*
315 * Load the pae pdptrs. Return true is they are all valid.
316 */
317static int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
6aa8b732
AK
318{
319 gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
1342d353 320 unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
6aa8b732
AK
321 int i;
322 u64 pdpte;
323 u64 *pdpt;
1342d353 324 int ret;
6aa8b732
AK
325 struct kvm_memory_slot *memslot;
326
327 spin_lock(&vcpu->kvm->lock);
328 memslot = gfn_to_memslot(vcpu->kvm, pdpt_gfn);
329 /* FIXME: !memslot - emulate? 0xff? */
330 pdpt = kmap_atomic(gfn_to_page(memslot, pdpt_gfn), KM_USER0);
331
1342d353 332 ret = 1;
6aa8b732
AK
333 for (i = 0; i < 4; ++i) {
334 pdpte = pdpt[offset + i];
1342d353
AK
335 if ((pdpte & 1) && (pdpte & 0xfffffff0000001e6ull)) {
336 ret = 0;
337 goto out;
338 }
6aa8b732
AK
339 }
340
1342d353
AK
341 for (i = 0; i < 4; ++i)
342 vcpu->pdptrs[i] = pdpt[offset + i];
343
344out:
6aa8b732
AK
345 kunmap_atomic(pdpt, KM_USER0);
346 spin_unlock(&vcpu->kvm->lock);
347
1342d353 348 return ret;
6aa8b732
AK
349}
350
351void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
352{
353 if (cr0 & CR0_RESEVED_BITS) {
354 printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
355 cr0, vcpu->cr0);
356 inject_gp(vcpu);
357 return;
358 }
359
360 if ((cr0 & CR0_NW_MASK) && !(cr0 & CR0_CD_MASK)) {
361 printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n");
362 inject_gp(vcpu);
363 return;
364 }
365
366 if ((cr0 & CR0_PG_MASK) && !(cr0 & CR0_PE_MASK)) {
367 printk(KERN_DEBUG "set_cr0: #GP, set PG flag "
368 "and a clear PE flag\n");
369 inject_gp(vcpu);
370 return;
371 }
372
373 if (!is_paging(vcpu) && (cr0 & CR0_PG_MASK)) {
05b3e0c2 374#ifdef CONFIG_X86_64
6aa8b732
AK
375 if ((vcpu->shadow_efer & EFER_LME)) {
376 int cs_db, cs_l;
377
378 if (!is_pae(vcpu)) {
379 printk(KERN_DEBUG "set_cr0: #GP, start paging "
380 "in long mode while PAE is disabled\n");
381 inject_gp(vcpu);
382 return;
383 }
384 kvm_arch_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
385 if (cs_l) {
386 printk(KERN_DEBUG "set_cr0: #GP, start paging "
387 "in long mode while CS.L == 1\n");
388 inject_gp(vcpu);
389 return;
390
391 }
392 } else
393#endif
1342d353 394 if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->cr3)) {
6aa8b732
AK
395 printk(KERN_DEBUG "set_cr0: #GP, pdptrs "
396 "reserved bits\n");
397 inject_gp(vcpu);
398 return;
399 }
400
401 }
402
403 kvm_arch_ops->set_cr0(vcpu, cr0);
404 vcpu->cr0 = cr0;
405
406 spin_lock(&vcpu->kvm->lock);
407 kvm_mmu_reset_context(vcpu);
408 spin_unlock(&vcpu->kvm->lock);
409 return;
410}
411EXPORT_SYMBOL_GPL(set_cr0);
412
413void lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
414{
399badf3 415 kvm_arch_ops->decache_cr0_cr4_guest_bits(vcpu);
6aa8b732
AK
416 set_cr0(vcpu, (vcpu->cr0 & ~0x0ful) | (msw & 0x0f));
417}
418EXPORT_SYMBOL_GPL(lmsw);
419
420void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
421{
422 if (cr4 & CR4_RESEVED_BITS) {
423 printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n");
424 inject_gp(vcpu);
425 return;
426 }
427
a9058ecd 428 if (is_long_mode(vcpu)) {
6aa8b732
AK
429 if (!(cr4 & CR4_PAE_MASK)) {
430 printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while "
431 "in long mode\n");
432 inject_gp(vcpu);
433 return;
434 }
435 } else if (is_paging(vcpu) && !is_pae(vcpu) && (cr4 & CR4_PAE_MASK)
1342d353 436 && !load_pdptrs(vcpu, vcpu->cr3)) {
6aa8b732
AK
437 printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n");
438 inject_gp(vcpu);
439 }
440
441 if (cr4 & CR4_VMXE_MASK) {
442 printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n");
443 inject_gp(vcpu);
444 return;
445 }
446 kvm_arch_ops->set_cr4(vcpu, cr4);
447 spin_lock(&vcpu->kvm->lock);
448 kvm_mmu_reset_context(vcpu);
449 spin_unlock(&vcpu->kvm->lock);
450}
451EXPORT_SYMBOL_GPL(set_cr4);
452
453void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
454{
a9058ecd 455 if (is_long_mode(vcpu)) {
d27d4aca 456 if (cr3 & CR3_L_MODE_RESEVED_BITS) {
6aa8b732
AK
457 printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n");
458 inject_gp(vcpu);
459 return;
460 }
461 } else {
462 if (cr3 & CR3_RESEVED_BITS) {
463 printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n");
464 inject_gp(vcpu);
465 return;
466 }
467 if (is_paging(vcpu) && is_pae(vcpu) &&
1342d353 468 !load_pdptrs(vcpu, cr3)) {
6aa8b732
AK
469 printk(KERN_DEBUG "set_cr3: #GP, pdptrs "
470 "reserved bits\n");
471 inject_gp(vcpu);
472 return;
473 }
474 }
475
476 vcpu->cr3 = cr3;
477 spin_lock(&vcpu->kvm->lock);
d21225ee
IM
478 /*
479 * Does the new cr3 value map to physical memory? (Note, we
480 * catch an invalid cr3 even in real-mode, because it would
481 * cause trouble later on when we turn on paging anyway.)
482 *
483 * A real CPU would silently accept an invalid cr3 and would
484 * attempt to use it - with largely undefined (and often hard
485 * to debug) behavior on the guest side.
486 */
487 if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
488 inject_gp(vcpu);
489 else
490 vcpu->mmu.new_cr3(vcpu);
6aa8b732
AK
491 spin_unlock(&vcpu->kvm->lock);
492}
493EXPORT_SYMBOL_GPL(set_cr3);
494
495void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
496{
497 if ( cr8 & CR8_RESEVED_BITS) {
498 printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8);
499 inject_gp(vcpu);
500 return;
501 }
502 vcpu->cr8 = cr8;
503}
504EXPORT_SYMBOL_GPL(set_cr8);
505
506void fx_init(struct kvm_vcpu *vcpu)
507{
508 struct __attribute__ ((__packed__)) fx_image_s {
509 u16 control; //fcw
510 u16 status; //fsw
511 u16 tag; // ftw
512 u16 opcode; //fop
513 u64 ip; // fpu ip
514 u64 operand;// fpu dp
515 u32 mxcsr;
516 u32 mxcsr_mask;
517
518 } *fx_image;
519
520 fx_save(vcpu->host_fx_image);
521 fpu_init();
522 fx_save(vcpu->guest_fx_image);
523 fx_restore(vcpu->host_fx_image);
524
525 fx_image = (struct fx_image_s *)vcpu->guest_fx_image;
526 fx_image->mxcsr = 0x1f80;
527 memset(vcpu->guest_fx_image + sizeof(struct fx_image_s),
528 0, FX_IMAGE_SIZE - sizeof(struct fx_image_s));
529}
530EXPORT_SYMBOL_GPL(fx_init);
531
532/*
533 * Creates some virtual cpus. Good luck creating more than one.
534 */
535static int kvm_dev_ioctl_create_vcpu(struct kvm *kvm, int n)
536{
537 int r;
538 struct kvm_vcpu *vcpu;
539
540 r = -EINVAL;
5aacf0ca 541 if (!valid_vcpu(n))
6aa8b732
AK
542 goto out;
543
544 vcpu = &kvm->vcpus[n];
545
546 mutex_lock(&vcpu->mutex);
547
548 if (vcpu->vmcs) {
549 mutex_unlock(&vcpu->mutex);
550 return -EEXIST;
551 }
552
553 vcpu->host_fx_image = (char*)ALIGN((hva_t)vcpu->fx_buf,
554 FX_IMAGE_ALIGN);
555 vcpu->guest_fx_image = vcpu->host_fx_image + FX_IMAGE_SIZE;
556
6aa8b732
AK
557 r = kvm_arch_ops->vcpu_create(vcpu);
558 if (r < 0)
559 goto out_free_vcpus;
560
8018c27b
IM
561 r = kvm_mmu_create(vcpu);
562 if (r < 0)
563 goto out_free_vcpus;
6aa8b732 564
8018c27b
IM
565 kvm_arch_ops->vcpu_load(vcpu);
566 r = kvm_mmu_setup(vcpu);
6aa8b732 567 if (r >= 0)
8018c27b 568 r = kvm_arch_ops->vcpu_setup(vcpu);
6aa8b732
AK
569 vcpu_put(vcpu);
570
571 if (r < 0)
572 goto out_free_vcpus;
573
574 return 0;
575
576out_free_vcpus:
577 kvm_free_vcpu(vcpu);
578 mutex_unlock(&vcpu->mutex);
579out:
580 return r;
581}
582
583/*
584 * Allocate some memory and give it an address in the guest physical address
585 * space.
586 *
587 * Discontiguous memory is allowed, mostly for framebuffers.
588 */
589static int kvm_dev_ioctl_set_memory_region(struct kvm *kvm,
590 struct kvm_memory_region *mem)
591{
592 int r;
593 gfn_t base_gfn;
594 unsigned long npages;
595 unsigned long i;
596 struct kvm_memory_slot *memslot;
597 struct kvm_memory_slot old, new;
598 int memory_config_version;
599
600 r = -EINVAL;
601 /* General sanity checks */
602 if (mem->memory_size & (PAGE_SIZE - 1))
603 goto out;
604 if (mem->guest_phys_addr & (PAGE_SIZE - 1))
605 goto out;
606 if (mem->slot >= KVM_MEMORY_SLOTS)
607 goto out;
608 if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
609 goto out;
610
611 memslot = &kvm->memslots[mem->slot];
612 base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
613 npages = mem->memory_size >> PAGE_SHIFT;
614
615 if (!npages)
616 mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
617
618raced:
619 spin_lock(&kvm->lock);
620
621 memory_config_version = kvm->memory_config_version;
622 new = old = *memslot;
623
624 new.base_gfn = base_gfn;
625 new.npages = npages;
626 new.flags = mem->flags;
627
628 /* Disallow changing a memory slot's size. */
629 r = -EINVAL;
630 if (npages && old.npages && npages != old.npages)
631 goto out_unlock;
632
633 /* Check for overlaps */
634 r = -EEXIST;
635 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
636 struct kvm_memory_slot *s = &kvm->memslots[i];
637
638 if (s == memslot)
639 continue;
640 if (!((base_gfn + npages <= s->base_gfn) ||
641 (base_gfn >= s->base_gfn + s->npages)))
642 goto out_unlock;
643 }
644 /*
645 * Do memory allocations outside lock. memory_config_version will
646 * detect any races.
647 */
648 spin_unlock(&kvm->lock);
649
650 /* Deallocate if slot is being removed */
651 if (!npages)
8b6d44c7 652 new.phys_mem = NULL;
6aa8b732
AK
653
654 /* Free page dirty bitmap if unneeded */
655 if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
8b6d44c7 656 new.dirty_bitmap = NULL;
6aa8b732
AK
657
658 r = -ENOMEM;
659
660 /* Allocate if a slot is being created */
661 if (npages && !new.phys_mem) {
662 new.phys_mem = vmalloc(npages * sizeof(struct page *));
663
664 if (!new.phys_mem)
665 goto out_free;
666
667 memset(new.phys_mem, 0, npages * sizeof(struct page *));
668 for (i = 0; i < npages; ++i) {
669 new.phys_mem[i] = alloc_page(GFP_HIGHUSER
670 | __GFP_ZERO);
671 if (!new.phys_mem[i])
672 goto out_free;
5972e953 673 set_page_private(new.phys_mem[i],0);
6aa8b732
AK
674 }
675 }
676
677 /* Allocate page dirty bitmap if needed */
678 if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
679 unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8;
680
681 new.dirty_bitmap = vmalloc(dirty_bytes);
682 if (!new.dirty_bitmap)
683 goto out_free;
684 memset(new.dirty_bitmap, 0, dirty_bytes);
685 }
686
687 spin_lock(&kvm->lock);
688
689 if (memory_config_version != kvm->memory_config_version) {
690 spin_unlock(&kvm->lock);
691 kvm_free_physmem_slot(&new, &old);
692 goto raced;
693 }
694
695 r = -EAGAIN;
696 if (kvm->busy)
697 goto out_unlock;
698
699 if (mem->slot >= kvm->nmemslots)
700 kvm->nmemslots = mem->slot + 1;
701
702 *memslot = new;
703 ++kvm->memory_config_version;
704
705 spin_unlock(&kvm->lock);
706
707 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
708 struct kvm_vcpu *vcpu;
709
710 vcpu = vcpu_load(kvm, i);
711 if (!vcpu)
712 continue;
713 kvm_mmu_reset_context(vcpu);
714 vcpu_put(vcpu);
715 }
716
717 kvm_free_physmem_slot(&old, &new);
718 return 0;
719
720out_unlock:
721 spin_unlock(&kvm->lock);
722out_free:
723 kvm_free_physmem_slot(&new, &old);
724out:
725 return r;
726}
727
714b93da
AK
728static void do_remove_write_access(struct kvm_vcpu *vcpu, int slot)
729{
730 spin_lock(&vcpu->kvm->lock);
731 kvm_mmu_slot_remove_write_access(vcpu, slot);
732 spin_unlock(&vcpu->kvm->lock);
733}
734
6aa8b732
AK
735/*
736 * Get (and clear) the dirty memory log for a memory slot.
737 */
738static int kvm_dev_ioctl_get_dirty_log(struct kvm *kvm,
739 struct kvm_dirty_log *log)
740{
741 struct kvm_memory_slot *memslot;
742 int r, i;
743 int n;
714b93da 744 int cleared;
6aa8b732
AK
745 unsigned long any = 0;
746
747 spin_lock(&kvm->lock);
748
749 /*
750 * Prevent changes to guest memory configuration even while the lock
751 * is not taken.
752 */
753 ++kvm->busy;
754 spin_unlock(&kvm->lock);
755 r = -EINVAL;
756 if (log->slot >= KVM_MEMORY_SLOTS)
757 goto out;
758
759 memslot = &kvm->memslots[log->slot];
760 r = -ENOENT;
761 if (!memslot->dirty_bitmap)
762 goto out;
763
764 n = ALIGN(memslot->npages, 8) / 8;
765
766 for (i = 0; !any && i < n; ++i)
767 any = memslot->dirty_bitmap[i];
768
769 r = -EFAULT;
770 if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
771 goto out;
772
6aa8b732 773 if (any) {
714b93da 774 cleared = 0;
6aa8b732
AK
775 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
776 struct kvm_vcpu *vcpu = vcpu_load(kvm, i);
777
778 if (!vcpu)
779 continue;
714b93da
AK
780 if (!cleared) {
781 do_remove_write_access(vcpu, log->slot);
782 memset(memslot->dirty_bitmap, 0, n);
783 cleared = 1;
784 }
6aa8b732
AK
785 kvm_arch_ops->tlb_flush(vcpu);
786 vcpu_put(vcpu);
787 }
788 }
789
790 r = 0;
791
792out:
793 spin_lock(&kvm->lock);
794 --kvm->busy;
795 spin_unlock(&kvm->lock);
796 return r;
797}
798
799struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
800{
801 int i;
802
803 for (i = 0; i < kvm->nmemslots; ++i) {
804 struct kvm_memory_slot *memslot = &kvm->memslots[i];
805
806 if (gfn >= memslot->base_gfn
807 && gfn < memslot->base_gfn + memslot->npages)
808 return memslot;
809 }
8b6d44c7 810 return NULL;
6aa8b732
AK
811}
812EXPORT_SYMBOL_GPL(gfn_to_memslot);
813
814void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
815{
816 int i;
8b6d44c7 817 struct kvm_memory_slot *memslot = NULL;
6aa8b732
AK
818 unsigned long rel_gfn;
819
820 for (i = 0; i < kvm->nmemslots; ++i) {
821 memslot = &kvm->memslots[i];
822
823 if (gfn >= memslot->base_gfn
824 && gfn < memslot->base_gfn + memslot->npages) {
825
826 if (!memslot || !memslot->dirty_bitmap)
827 return;
828
829 rel_gfn = gfn - memslot->base_gfn;
830
831 /* avoid RMW */
832 if (!test_bit(rel_gfn, memslot->dirty_bitmap))
833 set_bit(rel_gfn, memslot->dirty_bitmap);
834 return;
835 }
836 }
837}
838
839static int emulator_read_std(unsigned long addr,
840 unsigned long *val,
841 unsigned int bytes,
842 struct x86_emulate_ctxt *ctxt)
843{
844 struct kvm_vcpu *vcpu = ctxt->vcpu;
845 void *data = val;
846
847 while (bytes) {
848 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
849 unsigned offset = addr & (PAGE_SIZE-1);
850 unsigned tocopy = min(bytes, (unsigned)PAGE_SIZE - offset);
851 unsigned long pfn;
852 struct kvm_memory_slot *memslot;
853 void *page;
854
855 if (gpa == UNMAPPED_GVA)
856 return X86EMUL_PROPAGATE_FAULT;
857 pfn = gpa >> PAGE_SHIFT;
858 memslot = gfn_to_memslot(vcpu->kvm, pfn);
859 if (!memslot)
860 return X86EMUL_UNHANDLEABLE;
861 page = kmap_atomic(gfn_to_page(memslot, pfn), KM_USER0);
862
863 memcpy(data, page + offset, tocopy);
864
865 kunmap_atomic(page, KM_USER0);
866
867 bytes -= tocopy;
868 data += tocopy;
869 addr += tocopy;
870 }
871
872 return X86EMUL_CONTINUE;
873}
874
875static int emulator_write_std(unsigned long addr,
876 unsigned long val,
877 unsigned int bytes,
878 struct x86_emulate_ctxt *ctxt)
879{
880 printk(KERN_ERR "emulator_write_std: addr %lx n %d\n",
881 addr, bytes);
882 return X86EMUL_UNHANDLEABLE;
883}
884
885static int emulator_read_emulated(unsigned long addr,
886 unsigned long *val,
887 unsigned int bytes,
888 struct x86_emulate_ctxt *ctxt)
889{
890 struct kvm_vcpu *vcpu = ctxt->vcpu;
891
892 if (vcpu->mmio_read_completed) {
893 memcpy(val, vcpu->mmio_data, bytes);
894 vcpu->mmio_read_completed = 0;
895 return X86EMUL_CONTINUE;
896 } else if (emulator_read_std(addr, val, bytes, ctxt)
897 == X86EMUL_CONTINUE)
898 return X86EMUL_CONTINUE;
899 else {
900 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
d27d4aca 901
6aa8b732 902 if (gpa == UNMAPPED_GVA)
d27d4aca 903 return X86EMUL_PROPAGATE_FAULT;
6aa8b732
AK
904 vcpu->mmio_needed = 1;
905 vcpu->mmio_phys_addr = gpa;
906 vcpu->mmio_size = bytes;
907 vcpu->mmio_is_write = 0;
908
909 return X86EMUL_UNHANDLEABLE;
910 }
911}
912
da4a00f0
AK
913static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
914 unsigned long val, int bytes)
915{
916 struct kvm_memory_slot *m;
917 struct page *page;
918 void *virt;
919
920 if (((gpa + bytes - 1) >> PAGE_SHIFT) != (gpa >> PAGE_SHIFT))
921 return 0;
922 m = gfn_to_memslot(vcpu->kvm, gpa >> PAGE_SHIFT);
923 if (!m)
924 return 0;
925 page = gfn_to_page(m, gpa >> PAGE_SHIFT);
926 kvm_mmu_pre_write(vcpu, gpa, bytes);
927 virt = kmap_atomic(page, KM_USER0);
928 memcpy(virt + offset_in_page(gpa), &val, bytes);
929 kunmap_atomic(virt, KM_USER0);
930 kvm_mmu_post_write(vcpu, gpa, bytes);
931 return 1;
932}
933
6aa8b732
AK
934static int emulator_write_emulated(unsigned long addr,
935 unsigned long val,
936 unsigned int bytes,
937 struct x86_emulate_ctxt *ctxt)
938{
939 struct kvm_vcpu *vcpu = ctxt->vcpu;
940 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
941
942 if (gpa == UNMAPPED_GVA)
943 return X86EMUL_PROPAGATE_FAULT;
944
da4a00f0
AK
945 if (emulator_write_phys(vcpu, gpa, val, bytes))
946 return X86EMUL_CONTINUE;
947
6aa8b732
AK
948 vcpu->mmio_needed = 1;
949 vcpu->mmio_phys_addr = gpa;
950 vcpu->mmio_size = bytes;
951 vcpu->mmio_is_write = 1;
952 memcpy(vcpu->mmio_data, &val, bytes);
953
954 return X86EMUL_CONTINUE;
955}
956
957static int emulator_cmpxchg_emulated(unsigned long addr,
958 unsigned long old,
959 unsigned long new,
960 unsigned int bytes,
961 struct x86_emulate_ctxt *ctxt)
962{
963 static int reported;
964
965 if (!reported) {
966 reported = 1;
967 printk(KERN_WARNING "kvm: emulating exchange as write\n");
968 }
969 return emulator_write_emulated(addr, new, bytes, ctxt);
970}
971
32b35627
AK
972#ifdef CONFIG_X86_32
973
974static int emulator_cmpxchg8b_emulated(unsigned long addr,
975 unsigned long old_lo,
976 unsigned long old_hi,
977 unsigned long new_lo,
978 unsigned long new_hi,
979 struct x86_emulate_ctxt *ctxt)
980{
981 static int reported;
982 int r;
983
984 if (!reported) {
985 reported = 1;
986 printk(KERN_WARNING "kvm: emulating exchange8b as write\n");
987 }
988 r = emulator_write_emulated(addr, new_lo, 4, ctxt);
989 if (r != X86EMUL_CONTINUE)
990 return r;
991 return emulator_write_emulated(addr+4, new_hi, 4, ctxt);
992}
993
994#endif
995
6aa8b732
AK
996static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
997{
998 return kvm_arch_ops->get_segment_base(vcpu, seg);
999}
1000
1001int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
1002{
6aa8b732
AK
1003 return X86EMUL_CONTINUE;
1004}
1005
1006int emulate_clts(struct kvm_vcpu *vcpu)
1007{
399badf3 1008 unsigned long cr0;
6aa8b732 1009
399badf3
AK
1010 kvm_arch_ops->decache_cr0_cr4_guest_bits(vcpu);
1011 cr0 = vcpu->cr0 & ~CR0_TS_MASK;
6aa8b732
AK
1012 kvm_arch_ops->set_cr0(vcpu, cr0);
1013 return X86EMUL_CONTINUE;
1014}
1015
1016int emulator_get_dr(struct x86_emulate_ctxt* ctxt, int dr, unsigned long *dest)
1017{
1018 struct kvm_vcpu *vcpu = ctxt->vcpu;
1019
1020 switch (dr) {
1021 case 0 ... 3:
1022 *dest = kvm_arch_ops->get_dr(vcpu, dr);
1023 return X86EMUL_CONTINUE;
1024 default:
1025 printk(KERN_DEBUG "%s: unexpected dr %u\n",
1026 __FUNCTION__, dr);
1027 return X86EMUL_UNHANDLEABLE;
1028 }
1029}
1030
1031int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
1032{
1033 unsigned long mask = (ctxt->mode == X86EMUL_MODE_PROT64) ? ~0ULL : ~0U;
1034 int exception;
1035
1036 kvm_arch_ops->set_dr(ctxt->vcpu, dr, value & mask, &exception);
1037 if (exception) {
1038 /* FIXME: better handling */
1039 return X86EMUL_UNHANDLEABLE;
1040 }
1041 return X86EMUL_CONTINUE;
1042}
1043
1044static void report_emulation_failure(struct x86_emulate_ctxt *ctxt)
1045{
1046 static int reported;
1047 u8 opcodes[4];
1048 unsigned long rip = ctxt->vcpu->rip;
1049 unsigned long rip_linear;
1050
1051 rip_linear = rip + get_segment_base(ctxt->vcpu, VCPU_SREG_CS);
1052
1053 if (reported)
1054 return;
1055
1056 emulator_read_std(rip_linear, (void *)opcodes, 4, ctxt);
1057
1058 printk(KERN_ERR "emulation failed but !mmio_needed?"
1059 " rip %lx %02x %02x %02x %02x\n",
1060 rip, opcodes[0], opcodes[1], opcodes[2], opcodes[3]);
1061 reported = 1;
1062}
1063
1064struct x86_emulate_ops emulate_ops = {
1065 .read_std = emulator_read_std,
1066 .write_std = emulator_write_std,
1067 .read_emulated = emulator_read_emulated,
1068 .write_emulated = emulator_write_emulated,
1069 .cmpxchg_emulated = emulator_cmpxchg_emulated,
32b35627
AK
1070#ifdef CONFIG_X86_32
1071 .cmpxchg8b_emulated = emulator_cmpxchg8b_emulated,
1072#endif
6aa8b732
AK
1073};
1074
1075int emulate_instruction(struct kvm_vcpu *vcpu,
1076 struct kvm_run *run,
1077 unsigned long cr2,
1078 u16 error_code)
1079{
1080 struct x86_emulate_ctxt emulate_ctxt;
1081 int r;
1082 int cs_db, cs_l;
1083
1084 kvm_arch_ops->cache_regs(vcpu);
1085
1086 kvm_arch_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
1087
1088 emulate_ctxt.vcpu = vcpu;
1089 emulate_ctxt.eflags = kvm_arch_ops->get_rflags(vcpu);
1090 emulate_ctxt.cr2 = cr2;
1091 emulate_ctxt.mode = (emulate_ctxt.eflags & X86_EFLAGS_VM)
1092 ? X86EMUL_MODE_REAL : cs_l
1093 ? X86EMUL_MODE_PROT64 : cs_db
1094 ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
1095
1096 if (emulate_ctxt.mode == X86EMUL_MODE_PROT64) {
1097 emulate_ctxt.cs_base = 0;
1098 emulate_ctxt.ds_base = 0;
1099 emulate_ctxt.es_base = 0;
1100 emulate_ctxt.ss_base = 0;
1101 } else {
1102 emulate_ctxt.cs_base = get_segment_base(vcpu, VCPU_SREG_CS);
1103 emulate_ctxt.ds_base = get_segment_base(vcpu, VCPU_SREG_DS);
1104 emulate_ctxt.es_base = get_segment_base(vcpu, VCPU_SREG_ES);
1105 emulate_ctxt.ss_base = get_segment_base(vcpu, VCPU_SREG_SS);
1106 }
1107
1108 emulate_ctxt.gs_base = get_segment_base(vcpu, VCPU_SREG_GS);
1109 emulate_ctxt.fs_base = get_segment_base(vcpu, VCPU_SREG_FS);
1110
1111 vcpu->mmio_is_write = 0;
1112 r = x86_emulate_memop(&emulate_ctxt, &emulate_ops);
1113
1114 if ((r || vcpu->mmio_is_write) && run) {
1115 run->mmio.phys_addr = vcpu->mmio_phys_addr;
1116 memcpy(run->mmio.data, vcpu->mmio_data, 8);
1117 run->mmio.len = vcpu->mmio_size;
1118 run->mmio.is_write = vcpu->mmio_is_write;
1119 }
1120
1121 if (r) {
a436036b
AK
1122 if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
1123 return EMULATE_DONE;
6aa8b732
AK
1124 if (!vcpu->mmio_needed) {
1125 report_emulation_failure(&emulate_ctxt);
1126 return EMULATE_FAIL;
1127 }
1128 return EMULATE_DO_MMIO;
1129 }
1130
1131 kvm_arch_ops->decache_regs(vcpu);
1132 kvm_arch_ops->set_rflags(vcpu, emulate_ctxt.eflags);
1133
1134 if (vcpu->mmio_is_write)
1135 return EMULATE_DO_MMIO;
1136
1137 return EMULATE_DONE;
1138}
1139EXPORT_SYMBOL_GPL(emulate_instruction);
1140
1141static u64 mk_cr_64(u64 curr_cr, u32 new_val)
1142{
1143 return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
1144}
1145
1146void realmode_lgdt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
1147{
1148 struct descriptor_table dt = { limit, base };
1149
1150 kvm_arch_ops->set_gdt(vcpu, &dt);
1151}
1152
1153void realmode_lidt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
1154{
1155 struct descriptor_table dt = { limit, base };
1156
1157 kvm_arch_ops->set_idt(vcpu, &dt);
1158}
1159
1160void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
1161 unsigned long *rflags)
1162{
1163 lmsw(vcpu, msw);
1164 *rflags = kvm_arch_ops->get_rflags(vcpu);
1165}
1166
1167unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
1168{
399badf3 1169 kvm_arch_ops->decache_cr0_cr4_guest_bits(vcpu);
6aa8b732
AK
1170 switch (cr) {
1171 case 0:
1172 return vcpu->cr0;
1173 case 2:
1174 return vcpu->cr2;
1175 case 3:
1176 return vcpu->cr3;
1177 case 4:
1178 return vcpu->cr4;
1179 default:
1180 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __FUNCTION__, cr);
1181 return 0;
1182 }
1183}
1184
1185void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val,
1186 unsigned long *rflags)
1187{
1188 switch (cr) {
1189 case 0:
1190 set_cr0(vcpu, mk_cr_64(vcpu->cr0, val));
1191 *rflags = kvm_arch_ops->get_rflags(vcpu);
1192 break;
1193 case 2:
1194 vcpu->cr2 = val;
1195 break;
1196 case 3:
1197 set_cr3(vcpu, val);
1198 break;
1199 case 4:
1200 set_cr4(vcpu, mk_cr_64(vcpu->cr4, val));
1201 break;
1202 default:
1203 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __FUNCTION__, cr);
1204 }
1205}
1206
102d8325
IM
1207/*
1208 * Register the para guest with the host:
1209 */
1210static int vcpu_register_para(struct kvm_vcpu *vcpu, gpa_t para_state_gpa)
1211{
1212 struct kvm_vcpu_para_state *para_state;
1213 hpa_t para_state_hpa, hypercall_hpa;
1214 struct page *para_state_page;
1215 unsigned char *hypercall;
1216 gpa_t hypercall_gpa;
1217
1218 printk(KERN_DEBUG "kvm: guest trying to enter paravirtual mode\n");
1219 printk(KERN_DEBUG ".... para_state_gpa: %08Lx\n", para_state_gpa);
1220
1221 /*
1222 * Needs to be page aligned:
1223 */
1224 if (para_state_gpa != PAGE_ALIGN(para_state_gpa))
1225 goto err_gp;
1226
1227 para_state_hpa = gpa_to_hpa(vcpu, para_state_gpa);
1228 printk(KERN_DEBUG ".... para_state_hpa: %08Lx\n", para_state_hpa);
1229 if (is_error_hpa(para_state_hpa))
1230 goto err_gp;
1231
1232 para_state_page = pfn_to_page(para_state_hpa >> PAGE_SHIFT);
1233 para_state = kmap_atomic(para_state_page, KM_USER0);
1234
1235 printk(KERN_DEBUG ".... guest version: %d\n", para_state->guest_version);
1236 printk(KERN_DEBUG ".... size: %d\n", para_state->size);
1237
1238 para_state->host_version = KVM_PARA_API_VERSION;
1239 /*
1240 * We cannot support guests that try to register themselves
1241 * with a newer API version than the host supports:
1242 */
1243 if (para_state->guest_version > KVM_PARA_API_VERSION) {
1244 para_state->ret = -KVM_EINVAL;
1245 goto err_kunmap_skip;
1246 }
1247
1248 hypercall_gpa = para_state->hypercall_gpa;
1249 hypercall_hpa = gpa_to_hpa(vcpu, hypercall_gpa);
1250 printk(KERN_DEBUG ".... hypercall_hpa: %08Lx\n", hypercall_hpa);
1251 if (is_error_hpa(hypercall_hpa)) {
1252 para_state->ret = -KVM_EINVAL;
1253 goto err_kunmap_skip;
1254 }
1255
1256 printk(KERN_DEBUG "kvm: para guest successfully registered.\n");
1257 vcpu->para_state_page = para_state_page;
1258 vcpu->para_state_gpa = para_state_gpa;
1259 vcpu->hypercall_gpa = hypercall_gpa;
1260
1261 hypercall = kmap_atomic(pfn_to_page(hypercall_hpa >> PAGE_SHIFT),
1262 KM_USER1) + (hypercall_hpa & ~PAGE_MASK);
1263 kvm_arch_ops->patch_hypercall(vcpu, hypercall);
1264 kunmap_atomic(hypercall, KM_USER1);
1265
1266 para_state->ret = 0;
1267err_kunmap_skip:
1268 kunmap_atomic(para_state, KM_USER0);
1269 return 0;
1270err_gp:
1271 return 1;
1272}
1273
3bab1f5d
AK
1274int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1275{
1276 u64 data;
1277
1278 switch (msr) {
1279 case 0xc0010010: /* SYSCFG */
1280 case 0xc0010015: /* HWCR */
1281 case MSR_IA32_PLATFORM_ID:
1282 case MSR_IA32_P5_MC_ADDR:
1283 case MSR_IA32_P5_MC_TYPE:
1284 case MSR_IA32_MC0_CTL:
1285 case MSR_IA32_MCG_STATUS:
1286 case MSR_IA32_MCG_CAP:
1287 case MSR_IA32_MC0_MISC:
1288 case MSR_IA32_MC0_MISC+4:
1289 case MSR_IA32_MC0_MISC+8:
1290 case MSR_IA32_MC0_MISC+12:
1291 case MSR_IA32_MC0_MISC+16:
1292 case MSR_IA32_UCODE_REV:
a8d13ea2 1293 case MSR_IA32_PERF_STATUS:
3bab1f5d
AK
1294 /* MTRR registers */
1295 case 0xfe:
1296 case 0x200 ... 0x2ff:
1297 data = 0;
1298 break;
a8d13ea2
AK
1299 case 0xcd: /* fsb frequency */
1300 data = 3;
1301 break;
3bab1f5d
AK
1302 case MSR_IA32_APICBASE:
1303 data = vcpu->apic_base;
1304 break;
6f00e68f
AK
1305 case MSR_IA32_MISC_ENABLE:
1306 data = vcpu->ia32_misc_enable_msr;
1307 break;
3bab1f5d
AK
1308#ifdef CONFIG_X86_64
1309 case MSR_EFER:
1310 data = vcpu->shadow_efer;
1311 break;
1312#endif
1313 default:
1314 printk(KERN_ERR "kvm: unhandled rdmsr: 0x%x\n", msr);
1315 return 1;
1316 }
1317 *pdata = data;
1318 return 0;
1319}
1320EXPORT_SYMBOL_GPL(kvm_get_msr_common);
1321
6aa8b732
AK
1322/*
1323 * Reads an msr value (of 'msr_index') into 'pdata'.
1324 * Returns 0 on success, non-0 otherwise.
1325 * Assumes vcpu_load() was already called.
1326 */
1327static int get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
1328{
1329 return kvm_arch_ops->get_msr(vcpu, msr_index, pdata);
1330}
1331
05b3e0c2 1332#ifdef CONFIG_X86_64
6aa8b732 1333
3bab1f5d 1334static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
6aa8b732 1335{
6aa8b732
AK
1336 if (efer & EFER_RESERVED_BITS) {
1337 printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n",
1338 efer);
1339 inject_gp(vcpu);
1340 return;
1341 }
1342
1343 if (is_paging(vcpu)
1344 && (vcpu->shadow_efer & EFER_LME) != (efer & EFER_LME)) {
1345 printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n");
1346 inject_gp(vcpu);
1347 return;
1348 }
1349
7725f0ba
AK
1350 kvm_arch_ops->set_efer(vcpu, efer);
1351
6aa8b732
AK
1352 efer &= ~EFER_LMA;
1353 efer |= vcpu->shadow_efer & EFER_LMA;
1354
1355 vcpu->shadow_efer = efer;
6aa8b732 1356}
6aa8b732
AK
1357
1358#endif
1359
3bab1f5d
AK
1360int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1361{
1362 switch (msr) {
1363#ifdef CONFIG_X86_64
1364 case MSR_EFER:
1365 set_efer(vcpu, data);
1366 break;
1367#endif
1368 case MSR_IA32_MC0_STATUS:
1369 printk(KERN_WARNING "%s: MSR_IA32_MC0_STATUS 0x%llx, nop\n",
1370 __FUNCTION__, data);
1371 break;
1372 case MSR_IA32_UCODE_REV:
1373 case MSR_IA32_UCODE_WRITE:
1374 case 0x200 ... 0x2ff: /* MTRRs */
1375 break;
1376 case MSR_IA32_APICBASE:
1377 vcpu->apic_base = data;
1378 break;
6f00e68f
AK
1379 case MSR_IA32_MISC_ENABLE:
1380 vcpu->ia32_misc_enable_msr = data;
1381 break;
102d8325
IM
1382 /*
1383 * This is the 'probe whether the host is KVM' logic:
1384 */
1385 case MSR_KVM_API_MAGIC:
1386 return vcpu_register_para(vcpu, data);
1387
3bab1f5d
AK
1388 default:
1389 printk(KERN_ERR "kvm: unhandled wrmsr: 0x%x\n", msr);
1390 return 1;
1391 }
1392 return 0;
1393}
1394EXPORT_SYMBOL_GPL(kvm_set_msr_common);
1395
6aa8b732
AK
1396/*
1397 * Writes msr value into into the appropriate "register".
1398 * Returns 0 on success, non-0 otherwise.
1399 * Assumes vcpu_load() was already called.
1400 */
1401static int set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
1402{
1403 return kvm_arch_ops->set_msr(vcpu, msr_index, data);
1404}
1405
1406void kvm_resched(struct kvm_vcpu *vcpu)
1407{
1408 vcpu_put(vcpu);
1409 cond_resched();
1410 /* Cannot fail - no vcpu unplug yet. */
1411 vcpu_load(vcpu->kvm, vcpu_slot(vcpu));
1412}
1413EXPORT_SYMBOL_GPL(kvm_resched);
1414
1415void load_msrs(struct vmx_msr_entry *e, int n)
1416{
1417 int i;
1418
1419 for (i = 0; i < n; ++i)
1420 wrmsrl(e[i].index, e[i].data);
1421}
1422EXPORT_SYMBOL_GPL(load_msrs);
1423
1424void save_msrs(struct vmx_msr_entry *e, int n)
1425{
1426 int i;
1427
1428 for (i = 0; i < n; ++i)
1429 rdmsrl(e[i].index, e[i].data);
1430}
1431EXPORT_SYMBOL_GPL(save_msrs);
1432
1433static int kvm_dev_ioctl_run(struct kvm *kvm, struct kvm_run *kvm_run)
1434{
1435 struct kvm_vcpu *vcpu;
1436 int r;
1437
5aacf0ca 1438 if (!valid_vcpu(kvm_run->vcpu))
6aa8b732
AK
1439 return -EINVAL;
1440
1441 vcpu = vcpu_load(kvm, kvm_run->vcpu);
1442 if (!vcpu)
1443 return -ENOENT;
1444
54810342
DL
1445 /* re-sync apic's tpr */
1446 vcpu->cr8 = kvm_run->cr8;
1447
6aa8b732
AK
1448 if (kvm_run->emulated) {
1449 kvm_arch_ops->skip_emulated_instruction(vcpu);
1450 kvm_run->emulated = 0;
1451 }
1452
1453 if (kvm_run->mmio_completed) {
1454 memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
1455 vcpu->mmio_read_completed = 1;
1456 }
1457
1458 vcpu->mmio_needed = 0;
1459
1460 r = kvm_arch_ops->run(vcpu, kvm_run);
1461
1462 vcpu_put(vcpu);
1463 return r;
1464}
1465
1466static int kvm_dev_ioctl_get_regs(struct kvm *kvm, struct kvm_regs *regs)
1467{
1468 struct kvm_vcpu *vcpu;
1469
5aacf0ca 1470 if (!valid_vcpu(regs->vcpu))
6aa8b732
AK
1471 return -EINVAL;
1472
1473 vcpu = vcpu_load(kvm, regs->vcpu);
1474 if (!vcpu)
1475 return -ENOENT;
1476
1477 kvm_arch_ops->cache_regs(vcpu);
1478
1479 regs->rax = vcpu->regs[VCPU_REGS_RAX];
1480 regs->rbx = vcpu->regs[VCPU_REGS_RBX];
1481 regs->rcx = vcpu->regs[VCPU_REGS_RCX];
1482 regs->rdx = vcpu->regs[VCPU_REGS_RDX];
1483 regs->rsi = vcpu->regs[VCPU_REGS_RSI];
1484 regs->rdi = vcpu->regs[VCPU_REGS_RDI];
1485 regs->rsp = vcpu->regs[VCPU_REGS_RSP];
1486 regs->rbp = vcpu->regs[VCPU_REGS_RBP];
05b3e0c2 1487#ifdef CONFIG_X86_64
6aa8b732
AK
1488 regs->r8 = vcpu->regs[VCPU_REGS_R8];
1489 regs->r9 = vcpu->regs[VCPU_REGS_R9];
1490 regs->r10 = vcpu->regs[VCPU_REGS_R10];
1491 regs->r11 = vcpu->regs[VCPU_REGS_R11];
1492 regs->r12 = vcpu->regs[VCPU_REGS_R12];
1493 regs->r13 = vcpu->regs[VCPU_REGS_R13];
1494 regs->r14 = vcpu->regs[VCPU_REGS_R14];
1495 regs->r15 = vcpu->regs[VCPU_REGS_R15];
1496#endif
1497
1498 regs->rip = vcpu->rip;
1499 regs->rflags = kvm_arch_ops->get_rflags(vcpu);
1500
1501 /*
1502 * Don't leak debug flags in case they were set for guest debugging
1503 */
1504 if (vcpu->guest_debug.enabled && vcpu->guest_debug.singlestep)
1505 regs->rflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
1506
1507 vcpu_put(vcpu);
1508
1509 return 0;
1510}
1511
1512static int kvm_dev_ioctl_set_regs(struct kvm *kvm, struct kvm_regs *regs)
1513{
1514 struct kvm_vcpu *vcpu;
1515
5aacf0ca 1516 if (!valid_vcpu(regs->vcpu))
6aa8b732
AK
1517 return -EINVAL;
1518
1519 vcpu = vcpu_load(kvm, regs->vcpu);
1520 if (!vcpu)
1521 return -ENOENT;
1522
1523 vcpu->regs[VCPU_REGS_RAX] = regs->rax;
1524 vcpu->regs[VCPU_REGS_RBX] = regs->rbx;
1525 vcpu->regs[VCPU_REGS_RCX] = regs->rcx;
1526 vcpu->regs[VCPU_REGS_RDX] = regs->rdx;
1527 vcpu->regs[VCPU_REGS_RSI] = regs->rsi;
1528 vcpu->regs[VCPU_REGS_RDI] = regs->rdi;
1529 vcpu->regs[VCPU_REGS_RSP] = regs->rsp;
1530 vcpu->regs[VCPU_REGS_RBP] = regs->rbp;
05b3e0c2 1531#ifdef CONFIG_X86_64
6aa8b732
AK
1532 vcpu->regs[VCPU_REGS_R8] = regs->r8;
1533 vcpu->regs[VCPU_REGS_R9] = regs->r9;
1534 vcpu->regs[VCPU_REGS_R10] = regs->r10;
1535 vcpu->regs[VCPU_REGS_R11] = regs->r11;
1536 vcpu->regs[VCPU_REGS_R12] = regs->r12;
1537 vcpu->regs[VCPU_REGS_R13] = regs->r13;
1538 vcpu->regs[VCPU_REGS_R14] = regs->r14;
1539 vcpu->regs[VCPU_REGS_R15] = regs->r15;
1540#endif
1541
1542 vcpu->rip = regs->rip;
1543 kvm_arch_ops->set_rflags(vcpu, regs->rflags);
1544
1545 kvm_arch_ops->decache_regs(vcpu);
1546
1547 vcpu_put(vcpu);
1548
1549 return 0;
1550}
1551
1552static void get_segment(struct kvm_vcpu *vcpu,
1553 struct kvm_segment *var, int seg)
1554{
1555 return kvm_arch_ops->get_segment(vcpu, var, seg);
1556}
1557
1558static int kvm_dev_ioctl_get_sregs(struct kvm *kvm, struct kvm_sregs *sregs)
1559{
1560 struct kvm_vcpu *vcpu;
1561 struct descriptor_table dt;
1562
5aacf0ca 1563 if (!valid_vcpu(sregs->vcpu))
6aa8b732
AK
1564 return -EINVAL;
1565 vcpu = vcpu_load(kvm, sregs->vcpu);
1566 if (!vcpu)
1567 return -ENOENT;
1568
1569 get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
1570 get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
1571 get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
1572 get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
1573 get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
1574 get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
1575
1576 get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
1577 get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
1578
1579 kvm_arch_ops->get_idt(vcpu, &dt);
1580 sregs->idt.limit = dt.limit;
1581 sregs->idt.base = dt.base;
1582 kvm_arch_ops->get_gdt(vcpu, &dt);
1583 sregs->gdt.limit = dt.limit;
1584 sregs->gdt.base = dt.base;
1585
399badf3 1586 kvm_arch_ops->decache_cr0_cr4_guest_bits(vcpu);
6aa8b732
AK
1587 sregs->cr0 = vcpu->cr0;
1588 sregs->cr2 = vcpu->cr2;
1589 sregs->cr3 = vcpu->cr3;
1590 sregs->cr4 = vcpu->cr4;
1591 sregs->cr8 = vcpu->cr8;
1592 sregs->efer = vcpu->shadow_efer;
1593 sregs->apic_base = vcpu->apic_base;
1594
1595 memcpy(sregs->interrupt_bitmap, vcpu->irq_pending,
1596 sizeof sregs->interrupt_bitmap);
1597
1598 vcpu_put(vcpu);
1599
1600 return 0;
1601}
1602
1603static void set_segment(struct kvm_vcpu *vcpu,
1604 struct kvm_segment *var, int seg)
1605{
1606 return kvm_arch_ops->set_segment(vcpu, var, seg);
1607}
1608
1609static int kvm_dev_ioctl_set_sregs(struct kvm *kvm, struct kvm_sregs *sregs)
1610{
1611 struct kvm_vcpu *vcpu;
1612 int mmu_reset_needed = 0;
1613 int i;
1614 struct descriptor_table dt;
1615
5aacf0ca 1616 if (!valid_vcpu(sregs->vcpu))
6aa8b732
AK
1617 return -EINVAL;
1618 vcpu = vcpu_load(kvm, sregs->vcpu);
1619 if (!vcpu)
1620 return -ENOENT;
1621
1622 set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
1623 set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
1624 set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
1625 set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
1626 set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
1627 set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
1628
1629 set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
1630 set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
1631
1632 dt.limit = sregs->idt.limit;
1633 dt.base = sregs->idt.base;
1634 kvm_arch_ops->set_idt(vcpu, &dt);
1635 dt.limit = sregs->gdt.limit;
1636 dt.base = sregs->gdt.base;
1637 kvm_arch_ops->set_gdt(vcpu, &dt);
1638
1639 vcpu->cr2 = sregs->cr2;
1640 mmu_reset_needed |= vcpu->cr3 != sregs->cr3;
1641 vcpu->cr3 = sregs->cr3;
1642
1643 vcpu->cr8 = sregs->cr8;
1644
1645 mmu_reset_needed |= vcpu->shadow_efer != sregs->efer;
05b3e0c2 1646#ifdef CONFIG_X86_64
6aa8b732
AK
1647 kvm_arch_ops->set_efer(vcpu, sregs->efer);
1648#endif
1649 vcpu->apic_base = sregs->apic_base;
1650
399badf3
AK
1651 kvm_arch_ops->decache_cr0_cr4_guest_bits(vcpu);
1652
6aa8b732
AK
1653 mmu_reset_needed |= vcpu->cr0 != sregs->cr0;
1654 kvm_arch_ops->set_cr0_no_modeswitch(vcpu, sregs->cr0);
1655
1656 mmu_reset_needed |= vcpu->cr4 != sregs->cr4;
1657 kvm_arch_ops->set_cr4(vcpu, sregs->cr4);
1b0973bd
AK
1658 if (!is_long_mode(vcpu) && is_pae(vcpu))
1659 load_pdptrs(vcpu, vcpu->cr3);
6aa8b732
AK
1660
1661 if (mmu_reset_needed)
1662 kvm_mmu_reset_context(vcpu);
1663
1664 memcpy(vcpu->irq_pending, sregs->interrupt_bitmap,
1665 sizeof vcpu->irq_pending);
1666 vcpu->irq_summary = 0;
1667 for (i = 0; i < NR_IRQ_WORDS; ++i)
1668 if (vcpu->irq_pending[i])
1669 __set_bit(i, &vcpu->irq_summary);
1670
1671 vcpu_put(vcpu);
1672
1673 return 0;
1674}
1675
1676/*
1677 * List of msr numbers which we expose to userspace through KVM_GET_MSRS
1678 * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
bf591b24
MR
1679 *
1680 * This list is modified at module load time to reflect the
1681 * capabilities of the host cpu.
6aa8b732
AK
1682 */
1683static u32 msrs_to_save[] = {
1684 MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
1685 MSR_K6_STAR,
05b3e0c2 1686#ifdef CONFIG_X86_64
6aa8b732
AK
1687 MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
1688#endif
1689 MSR_IA32_TIME_STAMP_COUNTER,
1690};
1691
bf591b24
MR
1692static unsigned num_msrs_to_save;
1693
6f00e68f
AK
1694static u32 emulated_msrs[] = {
1695 MSR_IA32_MISC_ENABLE,
1696};
1697
bf591b24
MR
1698static __init void kvm_init_msr_list(void)
1699{
1700 u32 dummy[2];
1701 unsigned i, j;
1702
1703 for (i = j = 0; i < ARRAY_SIZE(msrs_to_save); i++) {
1704 if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
1705 continue;
1706 if (j < i)
1707 msrs_to_save[j] = msrs_to_save[i];
1708 j++;
1709 }
1710 num_msrs_to_save = j;
1711}
6aa8b732
AK
1712
1713/*
1714 * Adapt set_msr() to msr_io()'s calling convention
1715 */
1716static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
1717{
1718 return set_msr(vcpu, index, *data);
1719}
1720
1721/*
1722 * Read or write a bunch of msrs. All parameters are kernel addresses.
1723 *
1724 * @return number of msrs set successfully.
1725 */
1726static int __msr_io(struct kvm *kvm, struct kvm_msrs *msrs,
1727 struct kvm_msr_entry *entries,
1728 int (*do_msr)(struct kvm_vcpu *vcpu,
1729 unsigned index, u64 *data))
1730{
1731 struct kvm_vcpu *vcpu;
1732 int i;
1733
5aacf0ca 1734 if (!valid_vcpu(msrs->vcpu))
6aa8b732
AK
1735 return -EINVAL;
1736
1737 vcpu = vcpu_load(kvm, msrs->vcpu);
1738 if (!vcpu)
1739 return -ENOENT;
1740
1741 for (i = 0; i < msrs->nmsrs; ++i)
1742 if (do_msr(vcpu, entries[i].index, &entries[i].data))
1743 break;
1744
1745 vcpu_put(vcpu);
1746
1747 return i;
1748}
1749
1750/*
1751 * Read or write a bunch of msrs. Parameters are user addresses.
1752 *
1753 * @return number of msrs set successfully.
1754 */
1755static int msr_io(struct kvm *kvm, struct kvm_msrs __user *user_msrs,
1756 int (*do_msr)(struct kvm_vcpu *vcpu,
1757 unsigned index, u64 *data),
1758 int writeback)
1759{
1760 struct kvm_msrs msrs;
1761 struct kvm_msr_entry *entries;
1762 int r, n;
1763 unsigned size;
1764
1765 r = -EFAULT;
1766 if (copy_from_user(&msrs, user_msrs, sizeof msrs))
1767 goto out;
1768
1769 r = -E2BIG;
1770 if (msrs.nmsrs >= MAX_IO_MSRS)
1771 goto out;
1772
1773 r = -ENOMEM;
1774 size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
1775 entries = vmalloc(size);
1776 if (!entries)
1777 goto out;
1778
1779 r = -EFAULT;
1780 if (copy_from_user(entries, user_msrs->entries, size))
1781 goto out_free;
1782
1783 r = n = __msr_io(kvm, &msrs, entries, do_msr);
1784 if (r < 0)
1785 goto out_free;
1786
1787 r = -EFAULT;
1788 if (writeback && copy_to_user(user_msrs->entries, entries, size))
1789 goto out_free;
1790
1791 r = n;
1792
1793out_free:
1794 vfree(entries);
1795out:
1796 return r;
1797}
1798
1799/*
1800 * Translate a guest virtual address to a guest physical address.
1801 */
1802static int kvm_dev_ioctl_translate(struct kvm *kvm, struct kvm_translation *tr)
1803{
1804 unsigned long vaddr = tr->linear_address;
1805 struct kvm_vcpu *vcpu;
1806 gpa_t gpa;
1807
1808 vcpu = vcpu_load(kvm, tr->vcpu);
1809 if (!vcpu)
1810 return -ENOENT;
1811 spin_lock(&kvm->lock);
1812 gpa = vcpu->mmu.gva_to_gpa(vcpu, vaddr);
1813 tr->physical_address = gpa;
1814 tr->valid = gpa != UNMAPPED_GVA;
1815 tr->writeable = 1;
1816 tr->usermode = 0;
1817 spin_unlock(&kvm->lock);
1818 vcpu_put(vcpu);
1819
1820 return 0;
1821}
1822
1823static int kvm_dev_ioctl_interrupt(struct kvm *kvm, struct kvm_interrupt *irq)
1824{
1825 struct kvm_vcpu *vcpu;
1826
5aacf0ca 1827 if (!valid_vcpu(irq->vcpu))
6aa8b732
AK
1828 return -EINVAL;
1829 if (irq->irq < 0 || irq->irq >= 256)
1830 return -EINVAL;
1831 vcpu = vcpu_load(kvm, irq->vcpu);
1832 if (!vcpu)
1833 return -ENOENT;
1834
1835 set_bit(irq->irq, vcpu->irq_pending);
1836 set_bit(irq->irq / BITS_PER_LONG, &vcpu->irq_summary);
1837
1838 vcpu_put(vcpu);
1839
1840 return 0;
1841}
1842
1843static int kvm_dev_ioctl_debug_guest(struct kvm *kvm,
1844 struct kvm_debug_guest *dbg)
1845{
1846 struct kvm_vcpu *vcpu;
1847 int r;
1848
5aacf0ca 1849 if (!valid_vcpu(dbg->vcpu))
6aa8b732
AK
1850 return -EINVAL;
1851 vcpu = vcpu_load(kvm, dbg->vcpu);
1852 if (!vcpu)
1853 return -ENOENT;
1854
1855 r = kvm_arch_ops->set_guest_debug(vcpu, dbg);
1856
1857 vcpu_put(vcpu);
1858
1859 return r;
1860}
1861
1862static long kvm_dev_ioctl(struct file *filp,
1863 unsigned int ioctl, unsigned long arg)
1864{
1865 struct kvm *kvm = filp->private_data;
2f366987 1866 void __user *argp = (void __user *)arg;
6aa8b732
AK
1867 int r = -EINVAL;
1868
1869 switch (ioctl) {
0b76e20b
AK
1870 case KVM_GET_API_VERSION:
1871 r = KVM_API_VERSION;
1872 break;
d27d4aca 1873 case KVM_CREATE_VCPU:
6aa8b732
AK
1874 r = kvm_dev_ioctl_create_vcpu(kvm, arg);
1875 if (r)
1876 goto out;
1877 break;
6aa8b732
AK
1878 case KVM_RUN: {
1879 struct kvm_run kvm_run;
1880
1881 r = -EFAULT;
2f366987 1882 if (copy_from_user(&kvm_run, argp, sizeof kvm_run))
6aa8b732
AK
1883 goto out;
1884 r = kvm_dev_ioctl_run(kvm, &kvm_run);
c1150d8c 1885 if (r < 0 && r != -EINTR)
6aa8b732 1886 goto out;
2f366987 1887 if (copy_to_user(argp, &kvm_run, sizeof kvm_run)) {
c1150d8c 1888 r = -EFAULT;
6aa8b732 1889 goto out;
c1150d8c 1890 }
6aa8b732
AK
1891 break;
1892 }
1893 case KVM_GET_REGS: {
1894 struct kvm_regs kvm_regs;
1895
1896 r = -EFAULT;
2f366987 1897 if (copy_from_user(&kvm_regs, argp, sizeof kvm_regs))
6aa8b732
AK
1898 goto out;
1899 r = kvm_dev_ioctl_get_regs(kvm, &kvm_regs);
1900 if (r)
1901 goto out;
1902 r = -EFAULT;
2f366987 1903 if (copy_to_user(argp, &kvm_regs, sizeof kvm_regs))
6aa8b732
AK
1904 goto out;
1905 r = 0;
1906 break;
1907 }
1908 case KVM_SET_REGS: {
1909 struct kvm_regs kvm_regs;
1910
1911 r = -EFAULT;
2f366987 1912 if (copy_from_user(&kvm_regs, argp, sizeof kvm_regs))
6aa8b732
AK
1913 goto out;
1914 r = kvm_dev_ioctl_set_regs(kvm, &kvm_regs);
1915 if (r)
1916 goto out;
1917 r = 0;
1918 break;
1919 }
1920 case KVM_GET_SREGS: {
1921 struct kvm_sregs kvm_sregs;
1922
1923 r = -EFAULT;
2f366987 1924 if (copy_from_user(&kvm_sregs, argp, sizeof kvm_sregs))
6aa8b732
AK
1925 goto out;
1926 r = kvm_dev_ioctl_get_sregs(kvm, &kvm_sregs);
1927 if (r)
1928 goto out;
1929 r = -EFAULT;
2f366987 1930 if (copy_to_user(argp, &kvm_sregs, sizeof kvm_sregs))
6aa8b732
AK
1931 goto out;
1932 r = 0;
1933 break;
1934 }
1935 case KVM_SET_SREGS: {
1936 struct kvm_sregs kvm_sregs;
1937
1938 r = -EFAULT;
2f366987 1939 if (copy_from_user(&kvm_sregs, argp, sizeof kvm_sregs))
6aa8b732
AK
1940 goto out;
1941 r = kvm_dev_ioctl_set_sregs(kvm, &kvm_sregs);
1942 if (r)
1943 goto out;
1944 r = 0;
1945 break;
1946 }
1947 case KVM_TRANSLATE: {
1948 struct kvm_translation tr;
1949
1950 r = -EFAULT;
2f366987 1951 if (copy_from_user(&tr, argp, sizeof tr))
6aa8b732
AK
1952 goto out;
1953 r = kvm_dev_ioctl_translate(kvm, &tr);
1954 if (r)
1955 goto out;
1956 r = -EFAULT;
2f366987 1957 if (copy_to_user(argp, &tr, sizeof tr))
6aa8b732
AK
1958 goto out;
1959 r = 0;
1960 break;
1961 }
1962 case KVM_INTERRUPT: {
1963 struct kvm_interrupt irq;
1964
1965 r = -EFAULT;
2f366987 1966 if (copy_from_user(&irq, argp, sizeof irq))
6aa8b732
AK
1967 goto out;
1968 r = kvm_dev_ioctl_interrupt(kvm, &irq);
1969 if (r)
1970 goto out;
1971 r = 0;
1972 break;
1973 }
1974 case KVM_DEBUG_GUEST: {
1975 struct kvm_debug_guest dbg;
1976
1977 r = -EFAULT;
2f366987 1978 if (copy_from_user(&dbg, argp, sizeof dbg))
6aa8b732
AK
1979 goto out;
1980 r = kvm_dev_ioctl_debug_guest(kvm, &dbg);
1981 if (r)
1982 goto out;
1983 r = 0;
1984 break;
1985 }
1986 case KVM_SET_MEMORY_REGION: {
1987 struct kvm_memory_region kvm_mem;
1988
1989 r = -EFAULT;
2f366987 1990 if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem))
6aa8b732
AK
1991 goto out;
1992 r = kvm_dev_ioctl_set_memory_region(kvm, &kvm_mem);
1993 if (r)
1994 goto out;
1995 break;
1996 }
1997 case KVM_GET_DIRTY_LOG: {
1998 struct kvm_dirty_log log;
1999
2000 r = -EFAULT;
2f366987 2001 if (copy_from_user(&log, argp, sizeof log))
6aa8b732
AK
2002 goto out;
2003 r = kvm_dev_ioctl_get_dirty_log(kvm, &log);
2004 if (r)
2005 goto out;
2006 break;
2007 }
2008 case KVM_GET_MSRS:
2f366987 2009 r = msr_io(kvm, argp, get_msr, 1);
6aa8b732
AK
2010 break;
2011 case KVM_SET_MSRS:
2f366987 2012 r = msr_io(kvm, argp, do_set_msr, 0);
6aa8b732
AK
2013 break;
2014 case KVM_GET_MSR_INDEX_LIST: {
2f366987 2015 struct kvm_msr_list __user *user_msr_list = argp;
6aa8b732
AK
2016 struct kvm_msr_list msr_list;
2017 unsigned n;
2018
2019 r = -EFAULT;
2020 if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list))
2021 goto out;
2022 n = msr_list.nmsrs;
6f00e68f 2023 msr_list.nmsrs = num_msrs_to_save + ARRAY_SIZE(emulated_msrs);
6aa8b732
AK
2024 if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list))
2025 goto out;
2026 r = -E2BIG;
bf591b24 2027 if (n < num_msrs_to_save)
6aa8b732
AK
2028 goto out;
2029 r = -EFAULT;
2030 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
bf591b24 2031 num_msrs_to_save * sizeof(u32)))
6aa8b732 2032 goto out;
6f00e68f
AK
2033 if (copy_to_user(user_msr_list->indices
2034 + num_msrs_to_save * sizeof(u32),
2035 &emulated_msrs,
2036 ARRAY_SIZE(emulated_msrs) * sizeof(u32)))
2037 goto out;
6aa8b732 2038 r = 0;
cc1d8955 2039 break;
6aa8b732
AK
2040 }
2041 default:
2042 ;
2043 }
2044out:
2045 return r;
2046}
2047
2048static struct page *kvm_dev_nopage(struct vm_area_struct *vma,
2049 unsigned long address,
2050 int *type)
2051{
2052 struct kvm *kvm = vma->vm_file->private_data;
2053 unsigned long pgoff;
2054 struct kvm_memory_slot *slot;
2055 struct page *page;
2056
2057 *type = VM_FAULT_MINOR;
2058 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
2059 slot = gfn_to_memslot(kvm, pgoff);
2060 if (!slot)
2061 return NOPAGE_SIGBUS;
2062 page = gfn_to_page(slot, pgoff);
2063 if (!page)
2064 return NOPAGE_SIGBUS;
2065 get_page(page);
2066 return page;
2067}
2068
2069static struct vm_operations_struct kvm_dev_vm_ops = {
2070 .nopage = kvm_dev_nopage,
2071};
2072
2073static int kvm_dev_mmap(struct file *file, struct vm_area_struct *vma)
2074{
2075 vma->vm_ops = &kvm_dev_vm_ops;
2076 return 0;
2077}
2078
2079static struct file_operations kvm_chardev_ops = {
2080 .open = kvm_dev_open,
2081 .release = kvm_dev_release,
2082 .unlocked_ioctl = kvm_dev_ioctl,
2083 .compat_ioctl = kvm_dev_ioctl,
2084 .mmap = kvm_dev_mmap,
2085};
2086
2087static struct miscdevice kvm_dev = {
2088 MISC_DYNAMIC_MINOR,
2089 "kvm",
2090 &kvm_chardev_ops,
2091};
2092
2093static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
2094 void *v)
2095{
2096 if (val == SYS_RESTART) {
2097 /*
2098 * Some (well, at least mine) BIOSes hang on reboot if
2099 * in vmx root mode.
2100 */
2101 printk(KERN_INFO "kvm: exiting hardware virtualization\n");
8b6d44c7 2102 on_each_cpu(kvm_arch_ops->hardware_disable, NULL, 0, 1);
6aa8b732
AK
2103 }
2104 return NOTIFY_OK;
2105}
2106
2107static struct notifier_block kvm_reboot_notifier = {
2108 .notifier_call = kvm_reboot,
2109 .priority = 0,
2110};
2111
774c47f1
AK
2112/*
2113 * Make sure that a cpu that is being hot-unplugged does not have any vcpus
2114 * cached on it.
2115 */
2116static void decache_vcpus_on_cpu(int cpu)
2117{
2118 struct kvm *vm;
2119 struct kvm_vcpu *vcpu;
2120 int i;
2121
2122 spin_lock(&kvm_lock);
2123 list_for_each_entry(vm, &vm_list, vm_list)
2124 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
2125 vcpu = &vm->vcpus[i];
2126 /*
2127 * If the vcpu is locked, then it is running on some
2128 * other cpu and therefore it is not cached on the
2129 * cpu in question.
2130 *
2131 * If it's not locked, check the last cpu it executed
2132 * on.
2133 */
2134 if (mutex_trylock(&vcpu->mutex)) {
2135 if (vcpu->cpu == cpu) {
2136 kvm_arch_ops->vcpu_decache(vcpu);
2137 vcpu->cpu = -1;
2138 }
2139 mutex_unlock(&vcpu->mutex);
2140 }
2141 }
2142 spin_unlock(&kvm_lock);
2143}
2144
2145static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
2146 void *v)
2147{
2148 int cpu = (long)v;
2149
2150 switch (val) {
43934a38 2151 case CPU_DOWN_PREPARE:
774c47f1 2152 case CPU_UP_CANCELED:
43934a38
JK
2153 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
2154 cpu);
774c47f1
AK
2155 decache_vcpus_on_cpu(cpu);
2156 smp_call_function_single(cpu, kvm_arch_ops->hardware_disable,
2157 NULL, 0, 1);
2158 break;
43934a38
JK
2159 case CPU_ONLINE:
2160 printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
2161 cpu);
774c47f1
AK
2162 smp_call_function_single(cpu, kvm_arch_ops->hardware_enable,
2163 NULL, 0, 1);
2164 break;
2165 }
2166 return NOTIFY_OK;
2167}
2168
2169static struct notifier_block kvm_cpu_notifier = {
2170 .notifier_call = kvm_cpu_hotplug,
2171 .priority = 20, /* must be > scheduler priority */
2172};
2173
6aa8b732
AK
2174static __init void kvm_init_debug(void)
2175{
2176 struct kvm_stats_debugfs_item *p;
2177
8b6d44c7 2178 debugfs_dir = debugfs_create_dir("kvm", NULL);
6aa8b732
AK
2179 for (p = debugfs_entries; p->name; ++p)
2180 p->dentry = debugfs_create_u32(p->name, 0444, debugfs_dir,
2181 p->data);
2182}
2183
2184static void kvm_exit_debug(void)
2185{
2186 struct kvm_stats_debugfs_item *p;
2187
2188 for (p = debugfs_entries; p->name; ++p)
2189 debugfs_remove(p->dentry);
2190 debugfs_remove(debugfs_dir);
2191}
2192
59ae6c6b
AK
2193static int kvm_suspend(struct sys_device *dev, pm_message_t state)
2194{
2195 decache_vcpus_on_cpu(raw_smp_processor_id());
2196 on_each_cpu(kvm_arch_ops->hardware_disable, 0, 0, 1);
2197 return 0;
2198}
2199
2200static int kvm_resume(struct sys_device *dev)
2201{
2202 on_each_cpu(kvm_arch_ops->hardware_enable, 0, 0, 1);
2203 return 0;
2204}
2205
2206static struct sysdev_class kvm_sysdev_class = {
2207 set_kset_name("kvm"),
2208 .suspend = kvm_suspend,
2209 .resume = kvm_resume,
2210};
2211
2212static struct sys_device kvm_sysdev = {
2213 .id = 0,
2214 .cls = &kvm_sysdev_class,
2215};
2216
6aa8b732
AK
2217hpa_t bad_page_address;
2218
2219int kvm_init_arch(struct kvm_arch_ops *ops, struct module *module)
2220{
2221 int r;
2222
09db28b8
YI
2223 if (kvm_arch_ops) {
2224 printk(KERN_ERR "kvm: already loaded the other module\n");
2225 return -EEXIST;
2226 }
2227
e097f35c 2228 if (!ops->cpu_has_kvm_support()) {
6aa8b732
AK
2229 printk(KERN_ERR "kvm: no hardware support\n");
2230 return -EOPNOTSUPP;
2231 }
e097f35c 2232 if (ops->disabled_by_bios()) {
6aa8b732
AK
2233 printk(KERN_ERR "kvm: disabled by bios\n");
2234 return -EOPNOTSUPP;
2235 }
2236
e097f35c
YI
2237 kvm_arch_ops = ops;
2238
6aa8b732
AK
2239 r = kvm_arch_ops->hardware_setup();
2240 if (r < 0)
2241 return r;
2242
8b6d44c7 2243 on_each_cpu(kvm_arch_ops->hardware_enable, NULL, 0, 1);
774c47f1
AK
2244 r = register_cpu_notifier(&kvm_cpu_notifier);
2245 if (r)
2246 goto out_free_1;
6aa8b732
AK
2247 register_reboot_notifier(&kvm_reboot_notifier);
2248
59ae6c6b
AK
2249 r = sysdev_class_register(&kvm_sysdev_class);
2250 if (r)
2251 goto out_free_2;
2252
2253 r = sysdev_register(&kvm_sysdev);
2254 if (r)
2255 goto out_free_3;
2256
6aa8b732
AK
2257 kvm_chardev_ops.owner = module;
2258
2259 r = misc_register(&kvm_dev);
2260 if (r) {
2261 printk (KERN_ERR "kvm: misc device register failed\n");
2262 goto out_free;
2263 }
2264
2265 return r;
2266
2267out_free:
59ae6c6b
AK
2268 sysdev_unregister(&kvm_sysdev);
2269out_free_3:
2270 sysdev_class_unregister(&kvm_sysdev_class);
2271out_free_2:
6aa8b732 2272 unregister_reboot_notifier(&kvm_reboot_notifier);
774c47f1
AK
2273 unregister_cpu_notifier(&kvm_cpu_notifier);
2274out_free_1:
8b6d44c7 2275 on_each_cpu(kvm_arch_ops->hardware_disable, NULL, 0, 1);
6aa8b732
AK
2276 kvm_arch_ops->hardware_unsetup();
2277 return r;
2278}
2279
2280void kvm_exit_arch(void)
2281{
2282 misc_deregister(&kvm_dev);
59ae6c6b
AK
2283 sysdev_unregister(&kvm_sysdev);
2284 sysdev_class_unregister(&kvm_sysdev_class);
6aa8b732 2285 unregister_reboot_notifier(&kvm_reboot_notifier);
59ae6c6b 2286 unregister_cpu_notifier(&kvm_cpu_notifier);
8b6d44c7 2287 on_each_cpu(kvm_arch_ops->hardware_disable, NULL, 0, 1);
6aa8b732 2288 kvm_arch_ops->hardware_unsetup();
09db28b8 2289 kvm_arch_ops = NULL;
6aa8b732
AK
2290}
2291
2292static __init int kvm_init(void)
2293{
2294 static struct page *bad_page;
2295 int r = 0;
2296
2297 kvm_init_debug();
2298
bf591b24
MR
2299 kvm_init_msr_list();
2300
6aa8b732
AK
2301 if ((bad_page = alloc_page(GFP_KERNEL)) == NULL) {
2302 r = -ENOMEM;
2303 goto out;
2304 }
2305
2306 bad_page_address = page_to_pfn(bad_page) << PAGE_SHIFT;
2307 memset(__va(bad_page_address), 0, PAGE_SIZE);
2308
2309 return r;
2310
2311out:
2312 kvm_exit_debug();
2313 return r;
2314}
2315
2316static __exit void kvm_exit(void)
2317{
2318 kvm_exit_debug();
2319 __free_page(pfn_to_page(bad_page_address >> PAGE_SHIFT));
2320}
2321
2322module_init(kvm_init)
2323module_exit(kvm_exit)
2324
2325EXPORT_SYMBOL_GPL(kvm_init_arch);
2326EXPORT_SYMBOL_GPL(kvm_exit_arch);