KVM: ia64: Add a guide about how to create kvm guests on ia64
[GitHub/exynos8895/android_kernel_samsung_universal8895.git] / virt / kvm / kvm_main.c
CommitLineData
6aa8b732
AK
1/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
6 *
7 * Copyright (C) 2006 Qumranet, Inc.
8 *
9 * Authors:
10 * Avi Kivity <avi@qumranet.com>
11 * Yaniv Kamay <yaniv@qumranet.com>
12 *
13 * This work is licensed under the terms of the GNU GPL, version 2. See
14 * the COPYING file in the top-level directory.
15 *
16 */
17
e2174021 18#include "iodev.h"
6aa8b732 19
edf88417 20#include <linux/kvm_host.h>
6aa8b732
AK
21#include <linux/kvm.h>
22#include <linux/module.h>
23#include <linux/errno.h>
6aa8b732
AK
24#include <linux/percpu.h>
25#include <linux/gfp.h>
6aa8b732
AK
26#include <linux/mm.h>
27#include <linux/miscdevice.h>
28#include <linux/vmalloc.h>
6aa8b732 29#include <linux/reboot.h>
6aa8b732
AK
30#include <linux/debugfs.h>
31#include <linux/highmem.h>
32#include <linux/file.h>
59ae6c6b 33#include <linux/sysdev.h>
774c47f1 34#include <linux/cpu.h>
e8edc6e0 35#include <linux/sched.h>
d9e368d6
AK
36#include <linux/cpumask.h>
37#include <linux/smp.h>
d6d28168 38#include <linux/anon_inodes.h>
04d2cc77 39#include <linux/profile.h>
7aa81cc0 40#include <linux/kvm_para.h>
6fc138d2 41#include <linux/pagemap.h>
8d4e1288 42#include <linux/mman.h>
6aa8b732 43
e495606d 44#include <asm/processor.h>
e495606d
AK
45#include <asm/io.h>
46#include <asm/uaccess.h>
3e021bf5 47#include <asm/pgtable.h>
6aa8b732
AK
48
49MODULE_AUTHOR("Qumranet");
50MODULE_LICENSE("GPL");
51
e9b11c17
ZX
52DEFINE_SPINLOCK(kvm_lock);
53LIST_HEAD(vm_list);
133de902 54
1b6c0168
AK
55static cpumask_t cpus_hardware_enabled;
56
c16f862d
RR
57struct kmem_cache *kvm_vcpu_cache;
58EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
1165f5fe 59
15ad7146
AK
60static __read_mostly struct preempt_ops kvm_preempt_ops;
61
6aa8b732
AK
62static struct dentry *debugfs_dir;
63
bccf2150
AK
64static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
65 unsigned long arg);
66
5aacf0ca
JM
67static inline int valid_vcpu(int n)
68{
69 return likely(n >= 0 && n < KVM_MAX_VCPUS);
70}
71
bccf2150
AK
72/*
73 * Switches to specified vcpu, until a matching vcpu_put()
74 */
313a3dc7 75void vcpu_load(struct kvm_vcpu *vcpu)
6aa8b732 76{
15ad7146
AK
77 int cpu;
78
bccf2150 79 mutex_lock(&vcpu->mutex);
15ad7146
AK
80 cpu = get_cpu();
81 preempt_notifier_register(&vcpu->preempt_notifier);
313a3dc7 82 kvm_arch_vcpu_load(vcpu, cpu);
15ad7146 83 put_cpu();
6aa8b732
AK
84}
85
313a3dc7 86void vcpu_put(struct kvm_vcpu *vcpu)
6aa8b732 87{
15ad7146 88 preempt_disable();
313a3dc7 89 kvm_arch_vcpu_put(vcpu);
15ad7146
AK
90 preempt_notifier_unregister(&vcpu->preempt_notifier);
91 preempt_enable();
6aa8b732
AK
92 mutex_unlock(&vcpu->mutex);
93}
94
d9e368d6
AK
95static void ack_flush(void *_completed)
96{
d9e368d6
AK
97}
98
99void kvm_flush_remote_tlbs(struct kvm *kvm)
100{
49d3bd7e 101 int i, cpu;
d9e368d6
AK
102 cpumask_t cpus;
103 struct kvm_vcpu *vcpu;
d9e368d6 104
d9e368d6 105 cpus_clear(cpus);
fb3f0f51
RR
106 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
107 vcpu = kvm->vcpus[i];
108 if (!vcpu)
109 continue;
3176bc3e 110 if (test_and_set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
d9e368d6
AK
111 continue;
112 cpu = vcpu->cpu;
113 if (cpu != -1 && cpu != raw_smp_processor_id())
49d3bd7e 114 cpu_set(cpu, cpus);
d9e368d6 115 }
0f74a24c
AK
116 if (cpus_empty(cpus))
117 return;
118 ++kvm->stat.remote_tlb_flush;
49d3bd7e 119 smp_call_function_mask(cpus, ack_flush, NULL, 1);
d9e368d6
AK
120}
121
2e53d63a
MT
122void kvm_reload_remote_mmus(struct kvm *kvm)
123{
124 int i, cpu;
125 cpumask_t cpus;
126 struct kvm_vcpu *vcpu;
127
128 cpus_clear(cpus);
129 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
130 vcpu = kvm->vcpus[i];
131 if (!vcpu)
132 continue;
133 if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
134 continue;
135 cpu = vcpu->cpu;
136 if (cpu != -1 && cpu != raw_smp_processor_id())
137 cpu_set(cpu, cpus);
138 }
139 if (cpus_empty(cpus))
140 return;
141 smp_call_function_mask(cpus, ack_flush, NULL, 1);
142}
143
144
fb3f0f51
RR
145int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
146{
147 struct page *page;
148 int r;
149
150 mutex_init(&vcpu->mutex);
151 vcpu->cpu = -1;
fb3f0f51
RR
152 vcpu->kvm = kvm;
153 vcpu->vcpu_id = id;
b6958ce4 154 init_waitqueue_head(&vcpu->wq);
fb3f0f51
RR
155
156 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
157 if (!page) {
158 r = -ENOMEM;
159 goto fail;
160 }
161 vcpu->run = page_address(page);
162
e9b11c17 163 r = kvm_arch_vcpu_init(vcpu);
fb3f0f51 164 if (r < 0)
e9b11c17 165 goto fail_free_run;
fb3f0f51
RR
166 return 0;
167
fb3f0f51
RR
168fail_free_run:
169 free_page((unsigned long)vcpu->run);
170fail:
76fafa5e 171 return r;
fb3f0f51
RR
172}
173EXPORT_SYMBOL_GPL(kvm_vcpu_init);
174
175void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
176{
e9b11c17 177 kvm_arch_vcpu_uninit(vcpu);
fb3f0f51
RR
178 free_page((unsigned long)vcpu->run);
179}
180EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);
181
f17abe9a 182static struct kvm *kvm_create_vm(void)
6aa8b732 183{
d19a9cd2 184 struct kvm *kvm = kvm_arch_create_vm();
6aa8b732 185
d19a9cd2
ZX
186 if (IS_ERR(kvm))
187 goto out;
6aa8b732 188
6d4e4c4f
AK
189 kvm->mm = current->mm;
190 atomic_inc(&kvm->mm->mm_count);
aaee2c94 191 spin_lock_init(&kvm->mmu_lock);
74906345 192 kvm_io_bus_init(&kvm->pio_bus);
11ec2804 193 mutex_init(&kvm->lock);
2eeb2e94 194 kvm_io_bus_init(&kvm->mmio_bus);
72dc67a6 195 init_rwsem(&kvm->slots_lock);
d39f13b0 196 atomic_set(&kvm->users_count, 1);
5e58cfe4
RR
197 spin_lock(&kvm_lock);
198 list_add(&kvm->vm_list, &vm_list);
199 spin_unlock(&kvm_lock);
d19a9cd2 200out:
f17abe9a
AK
201 return kvm;
202}
203
6aa8b732
AK
204/*
205 * Free any memory in @free but not in @dont.
206 */
207static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
208 struct kvm_memory_slot *dont)
209{
290fc38d
IE
210 if (!dont || free->rmap != dont->rmap)
211 vfree(free->rmap);
6aa8b732
AK
212
213 if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
214 vfree(free->dirty_bitmap);
215
05da4558
MT
216 if (!dont || free->lpage_info != dont->lpage_info)
217 vfree(free->lpage_info);
218
6aa8b732 219 free->npages = 0;
8b6d44c7 220 free->dirty_bitmap = NULL;
8d4e1288 221 free->rmap = NULL;
05da4558 222 free->lpage_info = NULL;
6aa8b732
AK
223}
224
d19a9cd2 225void kvm_free_physmem(struct kvm *kvm)
6aa8b732
AK
226{
227 int i;
228
229 for (i = 0; i < kvm->nmemslots; ++i)
8b6d44c7 230 kvm_free_physmem_slot(&kvm->memslots[i], NULL);
6aa8b732
AK
231}
232
f17abe9a
AK
233static void kvm_destroy_vm(struct kvm *kvm)
234{
6d4e4c4f
AK
235 struct mm_struct *mm = kvm->mm;
236
133de902
AK
237 spin_lock(&kvm_lock);
238 list_del(&kvm->vm_list);
239 spin_unlock(&kvm_lock);
74906345 240 kvm_io_bus_destroy(&kvm->pio_bus);
2eeb2e94 241 kvm_io_bus_destroy(&kvm->mmio_bus);
d19a9cd2 242 kvm_arch_destroy_vm(kvm);
6d4e4c4f 243 mmdrop(mm);
f17abe9a
AK
244}
245
d39f13b0
IE
246void kvm_get_kvm(struct kvm *kvm)
247{
248 atomic_inc(&kvm->users_count);
249}
250EXPORT_SYMBOL_GPL(kvm_get_kvm);
251
252void kvm_put_kvm(struct kvm *kvm)
253{
254 if (atomic_dec_and_test(&kvm->users_count))
255 kvm_destroy_vm(kvm);
256}
257EXPORT_SYMBOL_GPL(kvm_put_kvm);
258
259
f17abe9a
AK
260static int kvm_vm_release(struct inode *inode, struct file *filp)
261{
262 struct kvm *kvm = filp->private_data;
263
d39f13b0 264 kvm_put_kvm(kvm);
6aa8b732
AK
265 return 0;
266}
267
6aa8b732
AK
268/*
269 * Allocate some memory and give it an address in the guest physical address
270 * space.
271 *
272 * Discontiguous memory is allowed, mostly for framebuffers.
f78e0e2e 273 *
10589a46 274 * Must be called holding mmap_sem for write.
6aa8b732 275 */
f78e0e2e
SY
276int __kvm_set_memory_region(struct kvm *kvm,
277 struct kvm_userspace_memory_region *mem,
278 int user_alloc)
6aa8b732
AK
279{
280 int r;
281 gfn_t base_gfn;
282 unsigned long npages;
283 unsigned long i;
284 struct kvm_memory_slot *memslot;
285 struct kvm_memory_slot old, new;
6aa8b732
AK
286
287 r = -EINVAL;
288 /* General sanity checks */
289 if (mem->memory_size & (PAGE_SIZE - 1))
290 goto out;
291 if (mem->guest_phys_addr & (PAGE_SIZE - 1))
292 goto out;
e0d62c7f 293 if (mem->slot >= KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
6aa8b732
AK
294 goto out;
295 if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
296 goto out;
297
298 memslot = &kvm->memslots[mem->slot];
299 base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
300 npages = mem->memory_size >> PAGE_SHIFT;
301
302 if (!npages)
303 mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
304
6aa8b732
AK
305 new = old = *memslot;
306
307 new.base_gfn = base_gfn;
308 new.npages = npages;
309 new.flags = mem->flags;
310
311 /* Disallow changing a memory slot's size. */
312 r = -EINVAL;
313 if (npages && old.npages && npages != old.npages)
f78e0e2e 314 goto out_free;
6aa8b732
AK
315
316 /* Check for overlaps */
317 r = -EEXIST;
318 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
319 struct kvm_memory_slot *s = &kvm->memslots[i];
320
321 if (s == memslot)
322 continue;
323 if (!((base_gfn + npages <= s->base_gfn) ||
324 (base_gfn >= s->base_gfn + s->npages)))
f78e0e2e 325 goto out_free;
6aa8b732 326 }
6aa8b732 327
6aa8b732
AK
328 /* Free page dirty bitmap if unneeded */
329 if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
8b6d44c7 330 new.dirty_bitmap = NULL;
6aa8b732
AK
331
332 r = -ENOMEM;
333
334 /* Allocate if a slot is being created */
8d4e1288 335 if (npages && !new.rmap) {
d77c26fc 336 new.rmap = vmalloc(npages * sizeof(struct page *));
290fc38d
IE
337
338 if (!new.rmap)
f78e0e2e 339 goto out_free;
290fc38d 340
290fc38d 341 memset(new.rmap, 0, npages * sizeof(*new.rmap));
8d4e1288 342
80b14b5b 343 new.user_alloc = user_alloc;
0de10343 344 new.userspace_addr = mem->userspace_addr;
6aa8b732 345 }
05da4558
MT
346 if (npages && !new.lpage_info) {
347 int largepages = npages / KVM_PAGES_PER_HPAGE;
348 if (npages % KVM_PAGES_PER_HPAGE)
349 largepages++;
350 if (base_gfn % KVM_PAGES_PER_HPAGE)
351 largepages++;
352
353 new.lpage_info = vmalloc(largepages * sizeof(*new.lpage_info));
354
355 if (!new.lpage_info)
356 goto out_free;
357
358 memset(new.lpage_info, 0, largepages * sizeof(*new.lpage_info));
359
360 if (base_gfn % KVM_PAGES_PER_HPAGE)
361 new.lpage_info[0].write_count = 1;
362 if ((base_gfn+npages) % KVM_PAGES_PER_HPAGE)
363 new.lpage_info[largepages-1].write_count = 1;
364 }
6aa8b732
AK
365
366 /* Allocate page dirty bitmap if needed */
367 if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
368 unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8;
369
370 new.dirty_bitmap = vmalloc(dirty_bytes);
371 if (!new.dirty_bitmap)
f78e0e2e 372 goto out_free;
6aa8b732
AK
373 memset(new.dirty_bitmap, 0, dirty_bytes);
374 }
375
6aa8b732
AK
376 if (mem->slot >= kvm->nmemslots)
377 kvm->nmemslots = mem->slot + 1;
378
3ad82a7e
ZX
379 *memslot = new;
380
0de10343
ZX
381 r = kvm_arch_set_memory_region(kvm, mem, old, user_alloc);
382 if (r) {
383 *memslot = old;
384 goto out_free;
82ce2c96
IE
385 }
386
6aa8b732
AK
387 kvm_free_physmem_slot(&old, &new);
388 return 0;
389
f78e0e2e 390out_free:
6aa8b732
AK
391 kvm_free_physmem_slot(&new, &old);
392out:
393 return r;
210c7c4d
IE
394
395}
f78e0e2e
SY
396EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
397
398int kvm_set_memory_region(struct kvm *kvm,
399 struct kvm_userspace_memory_region *mem,
400 int user_alloc)
401{
402 int r;
403
72dc67a6 404 down_write(&kvm->slots_lock);
f78e0e2e 405 r = __kvm_set_memory_region(kvm, mem, user_alloc);
72dc67a6 406 up_write(&kvm->slots_lock);
f78e0e2e
SY
407 return r;
408}
210c7c4d
IE
409EXPORT_SYMBOL_GPL(kvm_set_memory_region);
410
1fe779f8
CO
411int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
412 struct
413 kvm_userspace_memory_region *mem,
414 int user_alloc)
210c7c4d 415{
e0d62c7f
IE
416 if (mem->slot >= KVM_MEMORY_SLOTS)
417 return -EINVAL;
210c7c4d 418 return kvm_set_memory_region(kvm, mem, user_alloc);
6aa8b732
AK
419}
420
5bb064dc
ZX
421int kvm_get_dirty_log(struct kvm *kvm,
422 struct kvm_dirty_log *log, int *is_dirty)
6aa8b732
AK
423{
424 struct kvm_memory_slot *memslot;
425 int r, i;
426 int n;
427 unsigned long any = 0;
428
6aa8b732
AK
429 r = -EINVAL;
430 if (log->slot >= KVM_MEMORY_SLOTS)
431 goto out;
432
433 memslot = &kvm->memslots[log->slot];
434 r = -ENOENT;
435 if (!memslot->dirty_bitmap)
436 goto out;
437
cd1a4a98 438 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
6aa8b732 439
cd1a4a98 440 for (i = 0; !any && i < n/sizeof(long); ++i)
6aa8b732
AK
441 any = memslot->dirty_bitmap[i];
442
443 r = -EFAULT;
444 if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
445 goto out;
446
5bb064dc
ZX
447 if (any)
448 *is_dirty = 1;
6aa8b732
AK
449
450 r = 0;
6aa8b732 451out:
6aa8b732
AK
452 return r;
453}
454
cea7bb21
IE
455int is_error_page(struct page *page)
456{
457 return page == bad_page;
458}
459EXPORT_SYMBOL_GPL(is_error_page);
460
f9d46eb0
IE
461static inline unsigned long bad_hva(void)
462{
463 return PAGE_OFFSET;
464}
465
466int kvm_is_error_hva(unsigned long addr)
467{
468 return addr == bad_hva();
469}
470EXPORT_SYMBOL_GPL(kvm_is_error_hva);
471
e8207547 472static struct kvm_memory_slot *__gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
6aa8b732
AK
473{
474 int i;
475
476 for (i = 0; i < kvm->nmemslots; ++i) {
477 struct kvm_memory_slot *memslot = &kvm->memslots[i];
478
479 if (gfn >= memslot->base_gfn
480 && gfn < memslot->base_gfn + memslot->npages)
481 return memslot;
482 }
8b6d44c7 483 return NULL;
6aa8b732 484}
e8207547
AK
485
486struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
487{
488 gfn = unalias_gfn(kvm, gfn);
489 return __gfn_to_memslot(kvm, gfn);
490}
6aa8b732 491
e0d62c7f
IE
492int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
493{
494 int i;
495
496 gfn = unalias_gfn(kvm, gfn);
497 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
498 struct kvm_memory_slot *memslot = &kvm->memslots[i];
499
500 if (gfn >= memslot->base_gfn
501 && gfn < memslot->base_gfn + memslot->npages)
502 return 1;
503 }
504 return 0;
505}
506EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
507
05da4558 508unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
539cb660
IE
509{
510 struct kvm_memory_slot *slot;
511
512 gfn = unalias_gfn(kvm, gfn);
513 slot = __gfn_to_memslot(kvm, gfn);
514 if (!slot)
515 return bad_hva();
516 return (slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE);
517}
518
aab61cc0
AL
519/*
520 * Requires current->mm->mmap_sem to be held
521 */
10589a46 522struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
954bbbc2 523{
8d4e1288 524 struct page *page[1];
539cb660 525 unsigned long addr;
8d4e1288 526 int npages;
954bbbc2 527
60395224
AK
528 might_sleep();
529
539cb660
IE
530 addr = gfn_to_hva(kvm, gfn);
531 if (kvm_is_error_hva(addr)) {
8a7ae055 532 get_page(bad_page);
cea7bb21 533 return bad_page;
8a7ae055 534 }
8d4e1288 535
539cb660
IE
536 npages = get_user_pages(current, current->mm, addr, 1, 1, 1, page,
537 NULL);
538
8d4e1288
AL
539 if (npages != 1) {
540 get_page(bad_page);
541 return bad_page;
8a7ae055 542 }
8d4e1288
AL
543
544 return page[0];
954bbbc2 545}
aab61cc0 546
954bbbc2
AK
547EXPORT_SYMBOL_GPL(gfn_to_page);
548
b4231d61
IE
549void kvm_release_page_clean(struct page *page)
550{
551 put_page(page);
552}
553EXPORT_SYMBOL_GPL(kvm_release_page_clean);
554
555void kvm_release_page_dirty(struct page *page)
8a7ae055
IE
556{
557 if (!PageReserved(page))
558 SetPageDirty(page);
559 put_page(page);
560}
b4231d61 561EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
8a7ae055 562
195aefde
IE
563static int next_segment(unsigned long len, int offset)
564{
565 if (len > PAGE_SIZE - offset)
566 return PAGE_SIZE - offset;
567 else
568 return len;
569}
570
571int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
572 int len)
573{
e0506bcb
IE
574 int r;
575 unsigned long addr;
195aefde 576
e0506bcb
IE
577 addr = gfn_to_hva(kvm, gfn);
578 if (kvm_is_error_hva(addr))
579 return -EFAULT;
580 r = copy_from_user(data, (void __user *)addr + offset, len);
581 if (r)
195aefde 582 return -EFAULT;
195aefde
IE
583 return 0;
584}
585EXPORT_SYMBOL_GPL(kvm_read_guest_page);
586
587int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
588{
589 gfn_t gfn = gpa >> PAGE_SHIFT;
590 int seg;
591 int offset = offset_in_page(gpa);
592 int ret;
593
594 while ((seg = next_segment(len, offset)) != 0) {
595 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
596 if (ret < 0)
597 return ret;
598 offset = 0;
599 len -= seg;
600 data += seg;
601 ++gfn;
602 }
603 return 0;
604}
605EXPORT_SYMBOL_GPL(kvm_read_guest);
606
7ec54588
MT
607int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
608 unsigned long len)
609{
610 int r;
611 unsigned long addr;
612 gfn_t gfn = gpa >> PAGE_SHIFT;
613 int offset = offset_in_page(gpa);
614
615 addr = gfn_to_hva(kvm, gfn);
616 if (kvm_is_error_hva(addr))
617 return -EFAULT;
0aac03f0 618 pagefault_disable();
7ec54588 619 r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len);
0aac03f0 620 pagefault_enable();
7ec54588
MT
621 if (r)
622 return -EFAULT;
623 return 0;
624}
625EXPORT_SYMBOL(kvm_read_guest_atomic);
626
195aefde
IE
627int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
628 int offset, int len)
629{
e0506bcb
IE
630 int r;
631 unsigned long addr;
195aefde 632
e0506bcb
IE
633 addr = gfn_to_hva(kvm, gfn);
634 if (kvm_is_error_hva(addr))
635 return -EFAULT;
636 r = copy_to_user((void __user *)addr + offset, data, len);
637 if (r)
195aefde 638 return -EFAULT;
195aefde
IE
639 mark_page_dirty(kvm, gfn);
640 return 0;
641}
642EXPORT_SYMBOL_GPL(kvm_write_guest_page);
643
644int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
645 unsigned long len)
646{
647 gfn_t gfn = gpa >> PAGE_SHIFT;
648 int seg;
649 int offset = offset_in_page(gpa);
650 int ret;
651
652 while ((seg = next_segment(len, offset)) != 0) {
653 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
654 if (ret < 0)
655 return ret;
656 offset = 0;
657 len -= seg;
658 data += seg;
659 ++gfn;
660 }
661 return 0;
662}
663
664int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
665{
3e021bf5 666 return kvm_write_guest_page(kvm, gfn, empty_zero_page, offset, len);
195aefde
IE
667}
668EXPORT_SYMBOL_GPL(kvm_clear_guest_page);
669
670int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
671{
672 gfn_t gfn = gpa >> PAGE_SHIFT;
673 int seg;
674 int offset = offset_in_page(gpa);
675 int ret;
676
677 while ((seg = next_segment(len, offset)) != 0) {
678 ret = kvm_clear_guest_page(kvm, gfn, offset, seg);
679 if (ret < 0)
680 return ret;
681 offset = 0;
682 len -= seg;
683 ++gfn;
684 }
685 return 0;
686}
687EXPORT_SYMBOL_GPL(kvm_clear_guest);
688
6aa8b732
AK
689void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
690{
31389947 691 struct kvm_memory_slot *memslot;
6aa8b732 692
3b6fff19 693 gfn = unalias_gfn(kvm, gfn);
7e9d619d
RR
694 memslot = __gfn_to_memslot(kvm, gfn);
695 if (memslot && memslot->dirty_bitmap) {
696 unsigned long rel_gfn = gfn - memslot->base_gfn;
6aa8b732 697
7e9d619d
RR
698 /* avoid RMW */
699 if (!test_bit(rel_gfn, memslot->dirty_bitmap))
700 set_bit(rel_gfn, memslot->dirty_bitmap);
6aa8b732
AK
701 }
702}
703
b6958ce4
ED
704/*
705 * The vCPU has executed a HLT instruction with in-kernel mode enabled.
706 */
8776e519 707void kvm_vcpu_block(struct kvm_vcpu *vcpu)
d3bef15f 708{
b6958ce4
ED
709 DECLARE_WAITQUEUE(wait, current);
710
711 add_wait_queue(&vcpu->wq, &wait);
712
713 /*
714 * We will block until either an interrupt or a signal wakes us up
715 */
c5ec1534
HQ
716 while (!kvm_cpu_has_interrupt(vcpu)
717 && !signal_pending(current)
53e0aa7b 718 && !kvm_arch_vcpu_runnable(vcpu)) {
b6958ce4
ED
719 set_current_state(TASK_INTERRUPTIBLE);
720 vcpu_put(vcpu);
721 schedule();
722 vcpu_load(vcpu);
723 }
d3bef15f 724
c5ec1534 725 __set_current_state(TASK_RUNNING);
b6958ce4 726 remove_wait_queue(&vcpu->wq, &wait);
b6958ce4
ED
727}
728
6aa8b732
AK
729void kvm_resched(struct kvm_vcpu *vcpu)
730{
3fca0365
YD
731 if (!need_resched())
732 return;
6aa8b732 733 cond_resched();
6aa8b732
AK
734}
735EXPORT_SYMBOL_GPL(kvm_resched);
736
e4a533a4 737static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
9a2bb7f4
AK
738{
739 struct kvm_vcpu *vcpu = vma->vm_file->private_data;
9a2bb7f4
AK
740 struct page *page;
741
e4a533a4 742 if (vmf->pgoff == 0)
039576c0 743 page = virt_to_page(vcpu->run);
09566765 744#ifdef CONFIG_X86
e4a533a4 745 else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
ad312c7c 746 page = virt_to_page(vcpu->arch.pio_data);
09566765 747#endif
039576c0 748 else
e4a533a4 749 return VM_FAULT_SIGBUS;
9a2bb7f4 750 get_page(page);
e4a533a4 751 vmf->page = page;
752 return 0;
9a2bb7f4
AK
753}
754
755static struct vm_operations_struct kvm_vcpu_vm_ops = {
e4a533a4 756 .fault = kvm_vcpu_fault,
9a2bb7f4
AK
757};
758
759static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
760{
761 vma->vm_ops = &kvm_vcpu_vm_ops;
762 return 0;
763}
764
bccf2150
AK
765static int kvm_vcpu_release(struct inode *inode, struct file *filp)
766{
767 struct kvm_vcpu *vcpu = filp->private_data;
768
769 fput(vcpu->kvm->filp);
770 return 0;
771}
772
5c502742 773static const struct file_operations kvm_vcpu_fops = {
bccf2150
AK
774 .release = kvm_vcpu_release,
775 .unlocked_ioctl = kvm_vcpu_ioctl,
776 .compat_ioctl = kvm_vcpu_ioctl,
9a2bb7f4 777 .mmap = kvm_vcpu_mmap,
bccf2150
AK
778};
779
780/*
781 * Allocates an inode for the vcpu.
782 */
783static int create_vcpu_fd(struct kvm_vcpu *vcpu)
784{
785 int fd, r;
786 struct inode *inode;
787 struct file *file;
788
d6d28168
AK
789 r = anon_inode_getfd(&fd, &inode, &file,
790 "kvm-vcpu", &kvm_vcpu_fops, vcpu);
791 if (r)
792 return r;
bccf2150 793 atomic_inc(&vcpu->kvm->filp->f_count);
bccf2150 794 return fd;
bccf2150
AK
795}
796
c5ea7660
AK
797/*
798 * Creates some virtual cpus. Good luck creating more than one.
799 */
800static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
801{
802 int r;
803 struct kvm_vcpu *vcpu;
804
c5ea7660 805 if (!valid_vcpu(n))
fb3f0f51 806 return -EINVAL;
c5ea7660 807
e9b11c17 808 vcpu = kvm_arch_vcpu_create(kvm, n);
fb3f0f51
RR
809 if (IS_ERR(vcpu))
810 return PTR_ERR(vcpu);
c5ea7660 811
15ad7146
AK
812 preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
813
26e5215f
AK
814 r = kvm_arch_vcpu_setup(vcpu);
815 if (r)
816 goto vcpu_destroy;
817
11ec2804 818 mutex_lock(&kvm->lock);
fb3f0f51
RR
819 if (kvm->vcpus[n]) {
820 r = -EEXIST;
11ec2804 821 mutex_unlock(&kvm->lock);
e9b11c17 822 goto vcpu_destroy;
fb3f0f51
RR
823 }
824 kvm->vcpus[n] = vcpu;
11ec2804 825 mutex_unlock(&kvm->lock);
c5ea7660 826
fb3f0f51 827 /* Now it's all set up, let userspace reach it */
bccf2150
AK
828 r = create_vcpu_fd(vcpu);
829 if (r < 0)
fb3f0f51
RR
830 goto unlink;
831 return r;
39c3b86e 832
fb3f0f51 833unlink:
11ec2804 834 mutex_lock(&kvm->lock);
fb3f0f51 835 kvm->vcpus[n] = NULL;
11ec2804 836 mutex_unlock(&kvm->lock);
e9b11c17 837vcpu_destroy:
d40ccc62 838 kvm_arch_vcpu_destroy(vcpu);
c5ea7660
AK
839 return r;
840}
841
1961d276
AK
842static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
843{
844 if (sigset) {
845 sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
846 vcpu->sigset_active = 1;
847 vcpu->sigset = *sigset;
848 } else
849 vcpu->sigset_active = 0;
850 return 0;
851}
852
bccf2150
AK
853static long kvm_vcpu_ioctl(struct file *filp,
854 unsigned int ioctl, unsigned long arg)
6aa8b732 855{
bccf2150 856 struct kvm_vcpu *vcpu = filp->private_data;
2f366987 857 void __user *argp = (void __user *)arg;
313a3dc7 858 int r;
6aa8b732 859
6d4e4c4f
AK
860 if (vcpu->kvm->mm != current->mm)
861 return -EIO;
6aa8b732 862 switch (ioctl) {
9a2bb7f4 863 case KVM_RUN:
f0fe5108
AK
864 r = -EINVAL;
865 if (arg)
866 goto out;
b6c7a5dc 867 r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run);
6aa8b732 868 break;
6aa8b732 869 case KVM_GET_REGS: {
3e4bb3ac 870 struct kvm_regs *kvm_regs;
6aa8b732 871
3e4bb3ac
XZ
872 r = -ENOMEM;
873 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
874 if (!kvm_regs)
6aa8b732 875 goto out;
3e4bb3ac
XZ
876 r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs);
877 if (r)
878 goto out_free1;
6aa8b732 879 r = -EFAULT;
3e4bb3ac
XZ
880 if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs)))
881 goto out_free1;
6aa8b732 882 r = 0;
3e4bb3ac
XZ
883out_free1:
884 kfree(kvm_regs);
6aa8b732
AK
885 break;
886 }
887 case KVM_SET_REGS: {
3e4bb3ac 888 struct kvm_regs *kvm_regs;
6aa8b732 889
3e4bb3ac
XZ
890 r = -ENOMEM;
891 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
892 if (!kvm_regs)
6aa8b732 893 goto out;
3e4bb3ac
XZ
894 r = -EFAULT;
895 if (copy_from_user(kvm_regs, argp, sizeof(struct kvm_regs)))
896 goto out_free2;
897 r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs);
6aa8b732 898 if (r)
3e4bb3ac 899 goto out_free2;
6aa8b732 900 r = 0;
3e4bb3ac
XZ
901out_free2:
902 kfree(kvm_regs);
6aa8b732
AK
903 break;
904 }
905 case KVM_GET_SREGS: {
906 struct kvm_sregs kvm_sregs;
907
bccf2150 908 memset(&kvm_sregs, 0, sizeof kvm_sregs);
b6c7a5dc 909 r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, &kvm_sregs);
6aa8b732
AK
910 if (r)
911 goto out;
912 r = -EFAULT;
2f366987 913 if (copy_to_user(argp, &kvm_sregs, sizeof kvm_sregs))
6aa8b732
AK
914 goto out;
915 r = 0;
916 break;
917 }
918 case KVM_SET_SREGS: {
919 struct kvm_sregs kvm_sregs;
920
921 r = -EFAULT;
2f366987 922 if (copy_from_user(&kvm_sregs, argp, sizeof kvm_sregs))
6aa8b732 923 goto out;
b6c7a5dc 924 r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, &kvm_sregs);
6aa8b732
AK
925 if (r)
926 goto out;
927 r = 0;
928 break;
929 }
930 case KVM_TRANSLATE: {
931 struct kvm_translation tr;
932
933 r = -EFAULT;
2f366987 934 if (copy_from_user(&tr, argp, sizeof tr))
6aa8b732 935 goto out;
8b006791 936 r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr);
6aa8b732
AK
937 if (r)
938 goto out;
939 r = -EFAULT;
2f366987 940 if (copy_to_user(argp, &tr, sizeof tr))
6aa8b732
AK
941 goto out;
942 r = 0;
943 break;
944 }
6aa8b732
AK
945 case KVM_DEBUG_GUEST: {
946 struct kvm_debug_guest dbg;
947
948 r = -EFAULT;
2f366987 949 if (copy_from_user(&dbg, argp, sizeof dbg))
6aa8b732 950 goto out;
b6c7a5dc 951 r = kvm_arch_vcpu_ioctl_debug_guest(vcpu, &dbg);
6aa8b732
AK
952 if (r)
953 goto out;
954 r = 0;
955 break;
956 }
1961d276
AK
957 case KVM_SET_SIGNAL_MASK: {
958 struct kvm_signal_mask __user *sigmask_arg = argp;
959 struct kvm_signal_mask kvm_sigmask;
960 sigset_t sigset, *p;
961
962 p = NULL;
963 if (argp) {
964 r = -EFAULT;
965 if (copy_from_user(&kvm_sigmask, argp,
966 sizeof kvm_sigmask))
967 goto out;
968 r = -EINVAL;
969 if (kvm_sigmask.len != sizeof sigset)
970 goto out;
971 r = -EFAULT;
972 if (copy_from_user(&sigset, sigmask_arg->sigset,
973 sizeof sigset))
974 goto out;
975 p = &sigset;
976 }
977 r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
978 break;
979 }
b8836737
AK
980 case KVM_GET_FPU: {
981 struct kvm_fpu fpu;
982
983 memset(&fpu, 0, sizeof fpu);
d0752060 984 r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, &fpu);
b8836737
AK
985 if (r)
986 goto out;
987 r = -EFAULT;
988 if (copy_to_user(argp, &fpu, sizeof fpu))
989 goto out;
990 r = 0;
991 break;
992 }
993 case KVM_SET_FPU: {
994 struct kvm_fpu fpu;
995
996 r = -EFAULT;
997 if (copy_from_user(&fpu, argp, sizeof fpu))
998 goto out;
d0752060 999 r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, &fpu);
b8836737
AK
1000 if (r)
1001 goto out;
1002 r = 0;
1003 break;
1004 }
bccf2150 1005 default:
313a3dc7 1006 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
bccf2150
AK
1007 }
1008out:
1009 return r;
1010}
1011
1012static long kvm_vm_ioctl(struct file *filp,
1013 unsigned int ioctl, unsigned long arg)
1014{
1015 struct kvm *kvm = filp->private_data;
1016 void __user *argp = (void __user *)arg;
1fe779f8 1017 int r;
bccf2150 1018
6d4e4c4f
AK
1019 if (kvm->mm != current->mm)
1020 return -EIO;
bccf2150
AK
1021 switch (ioctl) {
1022 case KVM_CREATE_VCPU:
1023 r = kvm_vm_ioctl_create_vcpu(kvm, arg);
1024 if (r < 0)
1025 goto out;
1026 break;
6fc138d2
IE
1027 case KVM_SET_USER_MEMORY_REGION: {
1028 struct kvm_userspace_memory_region kvm_userspace_mem;
1029
1030 r = -EFAULT;
1031 if (copy_from_user(&kvm_userspace_mem, argp,
1032 sizeof kvm_userspace_mem))
1033 goto out;
1034
1035 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 1);
6aa8b732
AK
1036 if (r)
1037 goto out;
1038 break;
1039 }
1040 case KVM_GET_DIRTY_LOG: {
1041 struct kvm_dirty_log log;
1042
1043 r = -EFAULT;
2f366987 1044 if (copy_from_user(&log, argp, sizeof log))
6aa8b732 1045 goto out;
2c6f5df9 1046 r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
6aa8b732
AK
1047 if (r)
1048 goto out;
1049 break;
1050 }
f17abe9a 1051 default:
1fe779f8 1052 r = kvm_arch_vm_ioctl(filp, ioctl, arg);
f17abe9a
AK
1053 }
1054out:
1055 return r;
1056}
1057
e4a533a4 1058static int kvm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
f17abe9a
AK
1059{
1060 struct kvm *kvm = vma->vm_file->private_data;
f17abe9a
AK
1061 struct page *page;
1062
e4a533a4 1063 if (!kvm_is_visible_gfn(kvm, vmf->pgoff))
1064 return VM_FAULT_SIGBUS;
10589a46 1065 page = gfn_to_page(kvm, vmf->pgoff);
8a7ae055 1066 if (is_error_page(page)) {
b4231d61 1067 kvm_release_page_clean(page);
e4a533a4 1068 return VM_FAULT_SIGBUS;
8a7ae055 1069 }
e4a533a4 1070 vmf->page = page;
1071 return 0;
f17abe9a
AK
1072}
1073
1074static struct vm_operations_struct kvm_vm_vm_ops = {
e4a533a4 1075 .fault = kvm_vm_fault,
f17abe9a
AK
1076};
1077
1078static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma)
1079{
1080 vma->vm_ops = &kvm_vm_vm_ops;
1081 return 0;
1082}
1083
5c502742 1084static const struct file_operations kvm_vm_fops = {
f17abe9a
AK
1085 .release = kvm_vm_release,
1086 .unlocked_ioctl = kvm_vm_ioctl,
1087 .compat_ioctl = kvm_vm_ioctl,
1088 .mmap = kvm_vm_mmap,
1089};
1090
1091static int kvm_dev_ioctl_create_vm(void)
1092{
1093 int fd, r;
1094 struct inode *inode;
1095 struct file *file;
1096 struct kvm *kvm;
1097
f17abe9a 1098 kvm = kvm_create_vm();
d6d28168
AK
1099 if (IS_ERR(kvm))
1100 return PTR_ERR(kvm);
1101 r = anon_inode_getfd(&fd, &inode, &file, "kvm-vm", &kvm_vm_fops, kvm);
1102 if (r) {
1103 kvm_destroy_vm(kvm);
1104 return r;
f17abe9a
AK
1105 }
1106
bccf2150 1107 kvm->filp = file;
f17abe9a 1108
f17abe9a 1109 return fd;
f17abe9a
AK
1110}
1111
1112static long kvm_dev_ioctl(struct file *filp,
1113 unsigned int ioctl, unsigned long arg)
1114{
1115 void __user *argp = (void __user *)arg;
07c45a36 1116 long r = -EINVAL;
f17abe9a
AK
1117
1118 switch (ioctl) {
1119 case KVM_GET_API_VERSION:
f0fe5108
AK
1120 r = -EINVAL;
1121 if (arg)
1122 goto out;
f17abe9a
AK
1123 r = KVM_API_VERSION;
1124 break;
1125 case KVM_CREATE_VM:
f0fe5108
AK
1126 r = -EINVAL;
1127 if (arg)
1128 goto out;
f17abe9a
AK
1129 r = kvm_dev_ioctl_create_vm();
1130 break;
018d00d2
ZX
1131 case KVM_CHECK_EXTENSION:
1132 r = kvm_dev_ioctl_check_extension((long)argp);
5d308f45 1133 break;
07c45a36
AK
1134 case KVM_GET_VCPU_MMAP_SIZE:
1135 r = -EINVAL;
1136 if (arg)
1137 goto out;
adb1ff46
AK
1138 r = PAGE_SIZE; /* struct kvm_run */
1139#ifdef CONFIG_X86
1140 r += PAGE_SIZE; /* pio data page */
1141#endif
07c45a36 1142 break;
6aa8b732 1143 default:
043405e1 1144 return kvm_arch_dev_ioctl(filp, ioctl, arg);
6aa8b732
AK
1145 }
1146out:
1147 return r;
1148}
1149
6aa8b732 1150static struct file_operations kvm_chardev_ops = {
6aa8b732
AK
1151 .unlocked_ioctl = kvm_dev_ioctl,
1152 .compat_ioctl = kvm_dev_ioctl,
6aa8b732
AK
1153};
1154
1155static struct miscdevice kvm_dev = {
bbe4432e 1156 KVM_MINOR,
6aa8b732
AK
1157 "kvm",
1158 &kvm_chardev_ops,
1159};
1160
1b6c0168
AK
1161static void hardware_enable(void *junk)
1162{
1163 int cpu = raw_smp_processor_id();
1164
1165 if (cpu_isset(cpu, cpus_hardware_enabled))
1166 return;
1167 cpu_set(cpu, cpus_hardware_enabled);
e9b11c17 1168 kvm_arch_hardware_enable(NULL);
1b6c0168
AK
1169}
1170
1171static void hardware_disable(void *junk)
1172{
1173 int cpu = raw_smp_processor_id();
1174
1175 if (!cpu_isset(cpu, cpus_hardware_enabled))
1176 return;
1177 cpu_clear(cpu, cpus_hardware_enabled);
1178 decache_vcpus_on_cpu(cpu);
e9b11c17 1179 kvm_arch_hardware_disable(NULL);
1b6c0168
AK
1180}
1181
774c47f1
AK
1182static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
1183 void *v)
1184{
1185 int cpu = (long)v;
1186
1a6f4d7f 1187 val &= ~CPU_TASKS_FROZEN;
774c47f1 1188 switch (val) {
cec9ad27 1189 case CPU_DYING:
6ec8a856
AK
1190 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
1191 cpu);
1192 hardware_disable(NULL);
1193 break;
774c47f1 1194 case CPU_UP_CANCELED:
43934a38
JK
1195 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
1196 cpu);
1b6c0168 1197 smp_call_function_single(cpu, hardware_disable, NULL, 0, 1);
774c47f1 1198 break;
43934a38
JK
1199 case CPU_ONLINE:
1200 printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
1201 cpu);
1b6c0168 1202 smp_call_function_single(cpu, hardware_enable, NULL, 0, 1);
774c47f1
AK
1203 break;
1204 }
1205 return NOTIFY_OK;
1206}
1207
9a2b85c6 1208static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
d77c26fc 1209 void *v)
9a2b85c6
RR
1210{
1211 if (val == SYS_RESTART) {
1212 /*
1213 * Some (well, at least mine) BIOSes hang on reboot if
1214 * in vmx root mode.
1215 */
1216 printk(KERN_INFO "kvm: exiting hardware virtualization\n");
1217 on_each_cpu(hardware_disable, NULL, 0, 1);
1218 }
1219 return NOTIFY_OK;
1220}
1221
1222static struct notifier_block kvm_reboot_notifier = {
1223 .notifier_call = kvm_reboot,
1224 .priority = 0,
1225};
1226
2eeb2e94
GH
1227void kvm_io_bus_init(struct kvm_io_bus *bus)
1228{
1229 memset(bus, 0, sizeof(*bus));
1230}
1231
1232void kvm_io_bus_destroy(struct kvm_io_bus *bus)
1233{
1234 int i;
1235
1236 for (i = 0; i < bus->dev_count; i++) {
1237 struct kvm_io_device *pos = bus->devs[i];
1238
1239 kvm_iodevice_destructor(pos);
1240 }
1241}
1242
1243struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, gpa_t addr)
1244{
1245 int i;
1246
1247 for (i = 0; i < bus->dev_count; i++) {
1248 struct kvm_io_device *pos = bus->devs[i];
1249
1250 if (pos->in_range(pos, addr))
1251 return pos;
1252 }
1253
1254 return NULL;
1255}
1256
1257void kvm_io_bus_register_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev)
1258{
1259 BUG_ON(bus->dev_count > (NR_IOBUS_DEVS-1));
1260
1261 bus->devs[bus->dev_count++] = dev;
1262}
1263
774c47f1
AK
1264static struct notifier_block kvm_cpu_notifier = {
1265 .notifier_call = kvm_cpu_hotplug,
1266 .priority = 20, /* must be > scheduler priority */
1267};
1268
8b88b099 1269static int vm_stat_get(void *_offset, u64 *val)
ba1389b7
AK
1270{
1271 unsigned offset = (long)_offset;
ba1389b7
AK
1272 struct kvm *kvm;
1273
8b88b099 1274 *val = 0;
ba1389b7
AK
1275 spin_lock(&kvm_lock);
1276 list_for_each_entry(kvm, &vm_list, vm_list)
8b88b099 1277 *val += *(u32 *)((void *)kvm + offset);
ba1389b7 1278 spin_unlock(&kvm_lock);
8b88b099 1279 return 0;
ba1389b7
AK
1280}
1281
1282DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, NULL, "%llu\n");
1283
8b88b099 1284static int vcpu_stat_get(void *_offset, u64 *val)
1165f5fe
AK
1285{
1286 unsigned offset = (long)_offset;
1165f5fe
AK
1287 struct kvm *kvm;
1288 struct kvm_vcpu *vcpu;
1289 int i;
1290
8b88b099 1291 *val = 0;
1165f5fe
AK
1292 spin_lock(&kvm_lock);
1293 list_for_each_entry(kvm, &vm_list, vm_list)
1294 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
fb3f0f51
RR
1295 vcpu = kvm->vcpus[i];
1296 if (vcpu)
8b88b099 1297 *val += *(u32 *)((void *)vcpu + offset);
1165f5fe
AK
1298 }
1299 spin_unlock(&kvm_lock);
8b88b099 1300 return 0;
1165f5fe
AK
1301}
1302
ba1389b7
AK
1303DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n");
1304
1305static struct file_operations *stat_fops[] = {
1306 [KVM_STAT_VCPU] = &vcpu_stat_fops,
1307 [KVM_STAT_VM] = &vm_stat_fops,
1308};
1165f5fe 1309
a16b043c 1310static void kvm_init_debug(void)
6aa8b732
AK
1311{
1312 struct kvm_stats_debugfs_item *p;
1313
8b6d44c7 1314 debugfs_dir = debugfs_create_dir("kvm", NULL);
6aa8b732 1315 for (p = debugfs_entries; p->name; ++p)
1165f5fe
AK
1316 p->dentry = debugfs_create_file(p->name, 0444, debugfs_dir,
1317 (void *)(long)p->offset,
ba1389b7 1318 stat_fops[p->kind]);
6aa8b732
AK
1319}
1320
1321static void kvm_exit_debug(void)
1322{
1323 struct kvm_stats_debugfs_item *p;
1324
1325 for (p = debugfs_entries; p->name; ++p)
1326 debugfs_remove(p->dentry);
1327 debugfs_remove(debugfs_dir);
1328}
1329
59ae6c6b
AK
1330static int kvm_suspend(struct sys_device *dev, pm_message_t state)
1331{
4267c41a 1332 hardware_disable(NULL);
59ae6c6b
AK
1333 return 0;
1334}
1335
1336static int kvm_resume(struct sys_device *dev)
1337{
4267c41a 1338 hardware_enable(NULL);
59ae6c6b
AK
1339 return 0;
1340}
1341
1342static struct sysdev_class kvm_sysdev_class = {
af5ca3f4 1343 .name = "kvm",
59ae6c6b
AK
1344 .suspend = kvm_suspend,
1345 .resume = kvm_resume,
1346};
1347
1348static struct sys_device kvm_sysdev = {
1349 .id = 0,
1350 .cls = &kvm_sysdev_class,
1351};
1352
cea7bb21 1353struct page *bad_page;
6aa8b732 1354
15ad7146
AK
1355static inline
1356struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
1357{
1358 return container_of(pn, struct kvm_vcpu, preempt_notifier);
1359}
1360
1361static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
1362{
1363 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
1364
e9b11c17 1365 kvm_arch_vcpu_load(vcpu, cpu);
15ad7146
AK
1366}
1367
1368static void kvm_sched_out(struct preempt_notifier *pn,
1369 struct task_struct *next)
1370{
1371 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
1372
e9b11c17 1373 kvm_arch_vcpu_put(vcpu);
15ad7146
AK
1374}
1375
f8c16bba 1376int kvm_init(void *opaque, unsigned int vcpu_size,
c16f862d 1377 struct module *module)
6aa8b732
AK
1378{
1379 int r;
002c7f7c 1380 int cpu;
6aa8b732 1381
cb498ea2
ZX
1382 kvm_init_debug();
1383
f8c16bba
ZX
1384 r = kvm_arch_init(opaque);
1385 if (r)
d2308784 1386 goto out_fail;
cb498ea2
ZX
1387
1388 bad_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
1389
1390 if (bad_page == NULL) {
1391 r = -ENOMEM;
1392 goto out;
1393 }
1394
e9b11c17 1395 r = kvm_arch_hardware_setup();
6aa8b732 1396 if (r < 0)
d2308784 1397 goto out_free_0;
6aa8b732 1398
002c7f7c
YS
1399 for_each_online_cpu(cpu) {
1400 smp_call_function_single(cpu,
e9b11c17 1401 kvm_arch_check_processor_compat,
002c7f7c
YS
1402 &r, 0, 1);
1403 if (r < 0)
d2308784 1404 goto out_free_1;
002c7f7c
YS
1405 }
1406
1b6c0168 1407 on_each_cpu(hardware_enable, NULL, 0, 1);
774c47f1
AK
1408 r = register_cpu_notifier(&kvm_cpu_notifier);
1409 if (r)
d2308784 1410 goto out_free_2;
6aa8b732
AK
1411 register_reboot_notifier(&kvm_reboot_notifier);
1412
59ae6c6b
AK
1413 r = sysdev_class_register(&kvm_sysdev_class);
1414 if (r)
d2308784 1415 goto out_free_3;
59ae6c6b
AK
1416
1417 r = sysdev_register(&kvm_sysdev);
1418 if (r)
d2308784 1419 goto out_free_4;
59ae6c6b 1420
c16f862d
RR
1421 /* A kmem cache lets us meet the alignment requirements of fx_save. */
1422 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size,
56919c5c
JP
1423 __alignof__(struct kvm_vcpu),
1424 0, NULL);
c16f862d
RR
1425 if (!kvm_vcpu_cache) {
1426 r = -ENOMEM;
d2308784 1427 goto out_free_5;
c16f862d
RR
1428 }
1429
6aa8b732
AK
1430 kvm_chardev_ops.owner = module;
1431
1432 r = misc_register(&kvm_dev);
1433 if (r) {
d77c26fc 1434 printk(KERN_ERR "kvm: misc device register failed\n");
6aa8b732
AK
1435 goto out_free;
1436 }
1437
15ad7146
AK
1438 kvm_preempt_ops.sched_in = kvm_sched_in;
1439 kvm_preempt_ops.sched_out = kvm_sched_out;
1440
c7addb90 1441 return 0;
6aa8b732
AK
1442
1443out_free:
c16f862d 1444 kmem_cache_destroy(kvm_vcpu_cache);
d2308784 1445out_free_5:
59ae6c6b 1446 sysdev_unregister(&kvm_sysdev);
d2308784 1447out_free_4:
59ae6c6b 1448 sysdev_class_unregister(&kvm_sysdev_class);
d2308784 1449out_free_3:
6aa8b732 1450 unregister_reboot_notifier(&kvm_reboot_notifier);
774c47f1 1451 unregister_cpu_notifier(&kvm_cpu_notifier);
d2308784 1452out_free_2:
1b6c0168 1453 on_each_cpu(hardware_disable, NULL, 0, 1);
d2308784 1454out_free_1:
e9b11c17 1455 kvm_arch_hardware_unsetup();
d2308784
ZX
1456out_free_0:
1457 __free_page(bad_page);
ca45aaae 1458out:
f8c16bba 1459 kvm_arch_exit();
cb498ea2 1460 kvm_exit_debug();
d2308784 1461out_fail:
6aa8b732
AK
1462 return r;
1463}
cb498ea2 1464EXPORT_SYMBOL_GPL(kvm_init);
6aa8b732 1465
cb498ea2 1466void kvm_exit(void)
6aa8b732
AK
1467{
1468 misc_deregister(&kvm_dev);
c16f862d 1469 kmem_cache_destroy(kvm_vcpu_cache);
59ae6c6b
AK
1470 sysdev_unregister(&kvm_sysdev);
1471 sysdev_class_unregister(&kvm_sysdev_class);
6aa8b732 1472 unregister_reboot_notifier(&kvm_reboot_notifier);
59ae6c6b 1473 unregister_cpu_notifier(&kvm_cpu_notifier);
1b6c0168 1474 on_each_cpu(hardware_disable, NULL, 0, 1);
e9b11c17 1475 kvm_arch_hardware_unsetup();
f8c16bba 1476 kvm_arch_exit();
6aa8b732 1477 kvm_exit_debug();
cea7bb21 1478 __free_page(bad_page);
6aa8b732 1479}
cb498ea2 1480EXPORT_SYMBOL_GPL(kvm_exit);