mm: close PageTail race
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / virt / kvm / kvm_main.c
CommitLineData
6aa8b732
AK
1/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
6 *
7 * Copyright (C) 2006 Qumranet, Inc.
9611c187 8 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
6aa8b732
AK
9 *
10 * Authors:
11 * Avi Kivity <avi@qumranet.com>
12 * Yaniv Kamay <yaniv@qumranet.com>
13 *
14 * This work is licensed under the terms of the GNU GPL, version 2. See
15 * the COPYING file in the top-level directory.
16 *
17 */
18
e2174021 19#include "iodev.h"
6aa8b732 20
edf88417 21#include <linux/kvm_host.h>
6aa8b732
AK
22#include <linux/kvm.h>
23#include <linux/module.h>
24#include <linux/errno.h>
6aa8b732 25#include <linux/percpu.h>
6aa8b732
AK
26#include <linux/mm.h>
27#include <linux/miscdevice.h>
28#include <linux/vmalloc.h>
6aa8b732 29#include <linux/reboot.h>
6aa8b732
AK
30#include <linux/debugfs.h>
31#include <linux/highmem.h>
32#include <linux/file.h>
fb3600cc 33#include <linux/syscore_ops.h>
774c47f1 34#include <linux/cpu.h>
e8edc6e0 35#include <linux/sched.h>
d9e368d6
AK
36#include <linux/cpumask.h>
37#include <linux/smp.h>
d6d28168 38#include <linux/anon_inodes.h>
04d2cc77 39#include <linux/profile.h>
7aa81cc0 40#include <linux/kvm_para.h>
6fc138d2 41#include <linux/pagemap.h>
8d4e1288 42#include <linux/mman.h>
35149e21 43#include <linux/swap.h>
e56d532f 44#include <linux/bitops.h>
547de29e 45#include <linux/spinlock.h>
6ff5894c 46#include <linux/compat.h>
bc6678a3 47#include <linux/srcu.h>
8f0b1ab6 48#include <linux/hugetlb.h>
5a0e3ad6 49#include <linux/slab.h>
743eeb0b
SL
50#include <linux/sort.h>
51#include <linux/bsearch.h>
6aa8b732 52
e495606d 53#include <asm/processor.h>
e495606d
AK
54#include <asm/io.h>
55#include <asm/uaccess.h>
3e021bf5 56#include <asm/pgtable.h>
6aa8b732 57
5f94c174 58#include "coalesced_mmio.h"
af585b92 59#include "async_pf.h"
5f94c174 60
229456fc
MT
61#define CREATE_TRACE_POINTS
62#include <trace/events/kvm.h>
63
6aa8b732
AK
64MODULE_AUTHOR("Qumranet");
65MODULE_LICENSE("GPL");
66
fa40a821
MT
67/*
68 * Ordering of locks:
69 *
fae3a353 70 * kvm->lock --> kvm->slots_lock --> kvm->irq_lock
fa40a821
MT
71 */
72
e935b837 73DEFINE_RAW_SPINLOCK(kvm_lock);
e9b11c17 74LIST_HEAD(vm_list);
133de902 75
7f59f492 76static cpumask_var_t cpus_hardware_enabled;
10474ae8
AG
77static int kvm_usage_count = 0;
78static atomic_t hardware_enable_failed;
1b6c0168 79
c16f862d
RR
80struct kmem_cache *kvm_vcpu_cache;
81EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
1165f5fe 82
15ad7146
AK
83static __read_mostly struct preempt_ops kvm_preempt_ops;
84
76f7c879 85struct dentry *kvm_debugfs_dir;
6aa8b732 86
bccf2150
AK
87static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
88 unsigned long arg);
1dda606c
AG
89#ifdef CONFIG_COMPAT
90static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl,
91 unsigned long arg);
92#endif
10474ae8
AG
93static int hardware_enable_all(void);
94static void hardware_disable_all(void);
bccf2150 95
e93f8a0f
MT
96static void kvm_io_bus_destroy(struct kvm_io_bus *bus);
97
b7c4145b
AK
98bool kvm_rebooting;
99EXPORT_SYMBOL_GPL(kvm_rebooting);
4ecac3fd 100
54dee993
MT
101static bool largepages_enabled = true;
102
a2766325 103bool kvm_is_mmio_pfn(pfn_t pfn)
cbff90a7 104{
fc5659c8 105 if (pfn_valid(pfn)) {
22e5c47e 106 int reserved;
936a5fe6 107 struct page *tail = pfn_to_page(pfn);
def52acc 108 struct page *head = compound_head(tail);
22e5c47e 109 reserved = PageReserved(head);
936a5fe6 110 if (head != tail) {
936a5fe6 111 /*
22e5c47e 112 * "head" is not a dangling pointer
def52acc 113 * (compound_head takes care of that)
22e5c47e
AA
114 * but the hugepage may have been splitted
115 * from under us (and we may not hold a
116 * reference count on the head page so it can
117 * be reused before we run PageReferenced), so
118 * we've to check PageTail before returning
119 * what we just read.
936a5fe6 120 */
22e5c47e
AA
121 smp_rmb();
122 if (PageTail(tail))
123 return reserved;
936a5fe6
AA
124 }
125 return PageReserved(tail);
fc5659c8 126 }
cbff90a7
BAY
127
128 return true;
129}
130
bccf2150
AK
131/*
132 * Switches to specified vcpu, until a matching vcpu_put()
133 */
9fc77441 134int vcpu_load(struct kvm_vcpu *vcpu)
6aa8b732 135{
15ad7146
AK
136 int cpu;
137
9fc77441
MT
138 if (mutex_lock_killable(&vcpu->mutex))
139 return -EINTR;
34bb10b7
RR
140 if (unlikely(vcpu->pid != current->pids[PIDTYPE_PID].pid)) {
141 /* The thread running this VCPU changed. */
142 struct pid *oldpid = vcpu->pid;
143 struct pid *newpid = get_task_pid(current, PIDTYPE_PID);
144 rcu_assign_pointer(vcpu->pid, newpid);
145 synchronize_rcu();
146 put_pid(oldpid);
147 }
15ad7146
AK
148 cpu = get_cpu();
149 preempt_notifier_register(&vcpu->preempt_notifier);
313a3dc7 150 kvm_arch_vcpu_load(vcpu, cpu);
15ad7146 151 put_cpu();
9fc77441 152 return 0;
6aa8b732
AK
153}
154
313a3dc7 155void vcpu_put(struct kvm_vcpu *vcpu)
6aa8b732 156{
15ad7146 157 preempt_disable();
313a3dc7 158 kvm_arch_vcpu_put(vcpu);
15ad7146
AK
159 preempt_notifier_unregister(&vcpu->preempt_notifier);
160 preempt_enable();
6aa8b732
AK
161 mutex_unlock(&vcpu->mutex);
162}
163
d9e368d6
AK
164static void ack_flush(void *_completed)
165{
d9e368d6
AK
166}
167
49846896 168static bool make_all_cpus_request(struct kvm *kvm, unsigned int req)
d9e368d6 169{
597a5f55 170 int i, cpu, me;
6ef7a1bc
RR
171 cpumask_var_t cpus;
172 bool called = true;
d9e368d6 173 struct kvm_vcpu *vcpu;
d9e368d6 174
79f55997 175 zalloc_cpumask_var(&cpus, GFP_ATOMIC);
6ef7a1bc 176
3cba4130 177 me = get_cpu();
988a2cae 178 kvm_for_each_vcpu(i, vcpu, kvm) {
3cba4130 179 kvm_make_request(req, vcpu);
d9e368d6 180 cpu = vcpu->cpu;
6b7e2d09
XG
181
182 /* Set ->requests bit before we read ->mode */
183 smp_mb();
184
185 if (cpus != NULL && cpu != -1 && cpu != me &&
186 kvm_vcpu_exiting_guest_mode(vcpu) != OUTSIDE_GUEST_MODE)
6ef7a1bc 187 cpumask_set_cpu(cpu, cpus);
49846896 188 }
6ef7a1bc
RR
189 if (unlikely(cpus == NULL))
190 smp_call_function_many(cpu_online_mask, ack_flush, NULL, 1);
191 else if (!cpumask_empty(cpus))
192 smp_call_function_many(cpus, ack_flush, NULL, 1);
193 else
194 called = false;
3cba4130 195 put_cpu();
6ef7a1bc 196 free_cpumask_var(cpus);
49846896 197 return called;
d9e368d6
AK
198}
199
49846896 200void kvm_flush_remote_tlbs(struct kvm *kvm)
2e53d63a 201{
bec87d6e 202 long dirty_count = kvm->tlbs_dirty;
a4ee1ca4
XG
203
204 smp_mb();
49846896
RR
205 if (make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
206 ++kvm->stat.remote_tlb_flush;
a4ee1ca4 207 cmpxchg(&kvm->tlbs_dirty, dirty_count, 0);
2e53d63a
MT
208}
209
49846896
RR
210void kvm_reload_remote_mmus(struct kvm *kvm)
211{
212 make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD);
213}
2e53d63a 214
d828199e
MT
215void kvm_make_mclock_inprogress_request(struct kvm *kvm)
216{
217 make_all_cpus_request(kvm, KVM_REQ_MCLOCK_INPROGRESS);
218}
219
3d81bc7e 220void kvm_make_scan_ioapic_request(struct kvm *kvm)
c7c9c56c 221{
3d81bc7e 222 make_all_cpus_request(kvm, KVM_REQ_SCAN_IOAPIC);
c7c9c56c
YZ
223}
224
fb3f0f51
RR
225int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
226{
227 struct page *page;
228 int r;
229
230 mutex_init(&vcpu->mutex);
231 vcpu->cpu = -1;
fb3f0f51
RR
232 vcpu->kvm = kvm;
233 vcpu->vcpu_id = id;
34bb10b7 234 vcpu->pid = NULL;
b6958ce4 235 init_waitqueue_head(&vcpu->wq);
af585b92 236 kvm_async_pf_vcpu_init(vcpu);
fb3f0f51
RR
237
238 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
239 if (!page) {
240 r = -ENOMEM;
241 goto fail;
242 }
243 vcpu->run = page_address(page);
244
4c088493
R
245 kvm_vcpu_set_in_spin_loop(vcpu, false);
246 kvm_vcpu_set_dy_eligible(vcpu, false);
3a08a8f9 247 vcpu->preempted = false;
4c088493 248
e9b11c17 249 r = kvm_arch_vcpu_init(vcpu);
fb3f0f51 250 if (r < 0)
e9b11c17 251 goto fail_free_run;
fb3f0f51
RR
252 return 0;
253
fb3f0f51
RR
254fail_free_run:
255 free_page((unsigned long)vcpu->run);
256fail:
76fafa5e 257 return r;
fb3f0f51
RR
258}
259EXPORT_SYMBOL_GPL(kvm_vcpu_init);
260
261void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
262{
34bb10b7 263 put_pid(vcpu->pid);
e9b11c17 264 kvm_arch_vcpu_uninit(vcpu);
fb3f0f51
RR
265 free_page((unsigned long)vcpu->run);
266}
267EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);
268
e930bffe
AA
269#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
270static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
271{
272 return container_of(mn, struct kvm, mmu_notifier);
273}
274
275static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
276 struct mm_struct *mm,
277 unsigned long address)
278{
279 struct kvm *kvm = mmu_notifier_to_kvm(mn);
bc6678a3 280 int need_tlb_flush, idx;
e930bffe
AA
281
282 /*
283 * When ->invalidate_page runs, the linux pte has been zapped
284 * already but the page is still allocated until
285 * ->invalidate_page returns. So if we increase the sequence
286 * here the kvm page fault will notice if the spte can't be
287 * established because the page is going to be freed. If
288 * instead the kvm page fault establishes the spte before
289 * ->invalidate_page runs, kvm_unmap_hva will release it
290 * before returning.
291 *
292 * The sequence increase only need to be seen at spin_unlock
293 * time, and not at spin_lock time.
294 *
295 * Increasing the sequence after the spin_unlock would be
296 * unsafe because the kvm page fault could then establish the
297 * pte after kvm_unmap_hva returned, without noticing the page
298 * is going to be freed.
299 */
bc6678a3 300 idx = srcu_read_lock(&kvm->srcu);
e930bffe 301 spin_lock(&kvm->mmu_lock);
565f3be2 302
e930bffe 303 kvm->mmu_notifier_seq++;
a4ee1ca4 304 need_tlb_flush = kvm_unmap_hva(kvm, address) | kvm->tlbs_dirty;
e930bffe
AA
305 /* we've to flush the tlb before the pages can be freed */
306 if (need_tlb_flush)
307 kvm_flush_remote_tlbs(kvm);
308
565f3be2
TY
309 spin_unlock(&kvm->mmu_lock);
310 srcu_read_unlock(&kvm->srcu, idx);
e930bffe
AA
311}
312
3da0dd43
IE
313static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
314 struct mm_struct *mm,
315 unsigned long address,
316 pte_t pte)
317{
318 struct kvm *kvm = mmu_notifier_to_kvm(mn);
bc6678a3 319 int idx;
3da0dd43 320
bc6678a3 321 idx = srcu_read_lock(&kvm->srcu);
3da0dd43
IE
322 spin_lock(&kvm->mmu_lock);
323 kvm->mmu_notifier_seq++;
324 kvm_set_spte_hva(kvm, address, pte);
325 spin_unlock(&kvm->mmu_lock);
bc6678a3 326 srcu_read_unlock(&kvm->srcu, idx);
3da0dd43
IE
327}
328
e930bffe
AA
329static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
330 struct mm_struct *mm,
331 unsigned long start,
332 unsigned long end)
333{
334 struct kvm *kvm = mmu_notifier_to_kvm(mn);
bc6678a3 335 int need_tlb_flush = 0, idx;
e930bffe 336
bc6678a3 337 idx = srcu_read_lock(&kvm->srcu);
e930bffe
AA
338 spin_lock(&kvm->mmu_lock);
339 /*
340 * The count increase must become visible at unlock time as no
341 * spte can be established without taking the mmu_lock and
342 * count is also read inside the mmu_lock critical section.
343 */
344 kvm->mmu_notifier_count++;
b3ae2096 345 need_tlb_flush = kvm_unmap_hva_range(kvm, start, end);
a4ee1ca4 346 need_tlb_flush |= kvm->tlbs_dirty;
e930bffe
AA
347 /* we've to flush the tlb before the pages can be freed */
348 if (need_tlb_flush)
349 kvm_flush_remote_tlbs(kvm);
565f3be2
TY
350
351 spin_unlock(&kvm->mmu_lock);
352 srcu_read_unlock(&kvm->srcu, idx);
e930bffe
AA
353}
354
355static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
356 struct mm_struct *mm,
357 unsigned long start,
358 unsigned long end)
359{
360 struct kvm *kvm = mmu_notifier_to_kvm(mn);
361
362 spin_lock(&kvm->mmu_lock);
363 /*
364 * This sequence increase will notify the kvm page fault that
365 * the page that is going to be mapped in the spte could have
366 * been freed.
367 */
368 kvm->mmu_notifier_seq++;
a355aa54 369 smp_wmb();
e930bffe
AA
370 /*
371 * The above sequence increase must be visible before the
a355aa54
PM
372 * below count decrease, which is ensured by the smp_wmb above
373 * in conjunction with the smp_rmb in mmu_notifier_retry().
e930bffe
AA
374 */
375 kvm->mmu_notifier_count--;
376 spin_unlock(&kvm->mmu_lock);
377
378 BUG_ON(kvm->mmu_notifier_count < 0);
379}
380
381static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
382 struct mm_struct *mm,
383 unsigned long address)
384{
385 struct kvm *kvm = mmu_notifier_to_kvm(mn);
bc6678a3 386 int young, idx;
e930bffe 387
bc6678a3 388 idx = srcu_read_lock(&kvm->srcu);
e930bffe 389 spin_lock(&kvm->mmu_lock);
e930bffe 390
565f3be2 391 young = kvm_age_hva(kvm, address);
e930bffe
AA
392 if (young)
393 kvm_flush_remote_tlbs(kvm);
394
565f3be2
TY
395 spin_unlock(&kvm->mmu_lock);
396 srcu_read_unlock(&kvm->srcu, idx);
397
e930bffe
AA
398 return young;
399}
400
8ee53820
AA
401static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn,
402 struct mm_struct *mm,
403 unsigned long address)
404{
405 struct kvm *kvm = mmu_notifier_to_kvm(mn);
406 int young, idx;
407
408 idx = srcu_read_lock(&kvm->srcu);
409 spin_lock(&kvm->mmu_lock);
410 young = kvm_test_age_hva(kvm, address);
411 spin_unlock(&kvm->mmu_lock);
412 srcu_read_unlock(&kvm->srcu, idx);
413
414 return young;
415}
416
85db06e5
MT
417static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
418 struct mm_struct *mm)
419{
420 struct kvm *kvm = mmu_notifier_to_kvm(mn);
eda2beda
LJ
421 int idx;
422
423 idx = srcu_read_lock(&kvm->srcu);
2df72e9b 424 kvm_arch_flush_shadow_all(kvm);
eda2beda 425 srcu_read_unlock(&kvm->srcu, idx);
85db06e5
MT
426}
427
e930bffe
AA
428static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
429 .invalidate_page = kvm_mmu_notifier_invalidate_page,
430 .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start,
431 .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end,
432 .clear_flush_young = kvm_mmu_notifier_clear_flush_young,
8ee53820 433 .test_young = kvm_mmu_notifier_test_young,
3da0dd43 434 .change_pte = kvm_mmu_notifier_change_pte,
85db06e5 435 .release = kvm_mmu_notifier_release,
e930bffe 436};
4c07b0a4
AK
437
438static int kvm_init_mmu_notifier(struct kvm *kvm)
439{
440 kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
441 return mmu_notifier_register(&kvm->mmu_notifier, current->mm);
442}
443
444#else /* !(CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER) */
445
446static int kvm_init_mmu_notifier(struct kvm *kvm)
447{
448 return 0;
449}
450
e930bffe
AA
451#endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
452
bf3e05bc
XG
453static void kvm_init_memslots_id(struct kvm *kvm)
454{
455 int i;
456 struct kvm_memslots *slots = kvm->memslots;
457
458 for (i = 0; i < KVM_MEM_SLOTS_NUM; i++)
f85e2cb5 459 slots->id_to_index[i] = slots->memslots[i].id = i;
bf3e05bc
XG
460}
461
e08b9637 462static struct kvm *kvm_create_vm(unsigned long type)
6aa8b732 463{
d89f5eff
JK
464 int r, i;
465 struct kvm *kvm = kvm_arch_alloc_vm();
6aa8b732 466
d89f5eff
JK
467 if (!kvm)
468 return ERR_PTR(-ENOMEM);
469
e08b9637 470 r = kvm_arch_init_vm(kvm, type);
d89f5eff
JK
471 if (r)
472 goto out_err_nodisable;
10474ae8
AG
473
474 r = hardware_enable_all();
475 if (r)
476 goto out_err_nodisable;
477
75858a84
AK
478#ifdef CONFIG_HAVE_KVM_IRQCHIP
479 INIT_HLIST_HEAD(&kvm->mask_notifier_list);
136bdfee 480 INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list);
75858a84 481#endif
6aa8b732 482
1e702d9a
AW
483 BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX);
484
46a26bf5
MT
485 r = -ENOMEM;
486 kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
487 if (!kvm->memslots)
57e7fbee 488 goto out_err_nosrcu;
bf3e05bc 489 kvm_init_memslots_id(kvm);
bc6678a3 490 if (init_srcu_struct(&kvm->srcu))
57e7fbee 491 goto out_err_nosrcu;
e93f8a0f
MT
492 for (i = 0; i < KVM_NR_BUSES; i++) {
493 kvm->buses[i] = kzalloc(sizeof(struct kvm_io_bus),
494 GFP_KERNEL);
57e7fbee 495 if (!kvm->buses[i])
e93f8a0f 496 goto out_err;
e93f8a0f 497 }
e930bffe 498
74b5c5bf 499 spin_lock_init(&kvm->mmu_lock);
6d4e4c4f
AK
500 kvm->mm = current->mm;
501 atomic_inc(&kvm->mm->mm_count);
d34e6b17 502 kvm_eventfd_init(kvm);
11ec2804 503 mutex_init(&kvm->lock);
60eead79 504 mutex_init(&kvm->irq_lock);
79fac95e 505 mutex_init(&kvm->slots_lock);
d39f13b0 506 atomic_set(&kvm->users_count, 1);
07f0a7bd 507 INIT_LIST_HEAD(&kvm->devices);
74b5c5bf
MW
508
509 r = kvm_init_mmu_notifier(kvm);
510 if (r)
511 goto out_err;
512
e935b837 513 raw_spin_lock(&kvm_lock);
5e58cfe4 514 list_add(&kvm->vm_list, &vm_list);
e935b837 515 raw_spin_unlock(&kvm_lock);
d89f5eff 516
f17abe9a 517 return kvm;
10474ae8
AG
518
519out_err:
57e7fbee
JK
520 cleanup_srcu_struct(&kvm->srcu);
521out_err_nosrcu:
10474ae8
AG
522 hardware_disable_all();
523out_err_nodisable:
e93f8a0f
MT
524 for (i = 0; i < KVM_NR_BUSES; i++)
525 kfree(kvm->buses[i]);
46a26bf5 526 kfree(kvm->memslots);
d89f5eff 527 kvm_arch_free_vm(kvm);
10474ae8 528 return ERR_PTR(r);
f17abe9a
AK
529}
530
92eca8fa
TY
531/*
532 * Avoid using vmalloc for a small buffer.
533 * Should not be used when the size is statically known.
534 */
c1a7b32a 535void *kvm_kvzalloc(unsigned long size)
92eca8fa
TY
536{
537 if (size > PAGE_SIZE)
538 return vzalloc(size);
539 else
540 return kzalloc(size, GFP_KERNEL);
541}
542
c1a7b32a 543void kvm_kvfree(const void *addr)
92eca8fa
TY
544{
545 if (is_vmalloc_addr(addr))
546 vfree(addr);
547 else
548 kfree(addr);
549}
550
a36a57b1
TY
551static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot)
552{
553 if (!memslot->dirty_bitmap)
554 return;
555
92eca8fa 556 kvm_kvfree(memslot->dirty_bitmap);
a36a57b1
TY
557 memslot->dirty_bitmap = NULL;
558}
559
6aa8b732
AK
560/*
561 * Free any memory in @free but not in @dont.
562 */
563static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
564 struct kvm_memory_slot *dont)
565{
6aa8b732 566 if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
a36a57b1 567 kvm_destroy_dirty_bitmap(free);
6aa8b732 568
db3fe4eb 569 kvm_arch_free_memslot(free, dont);
05da4558 570
6aa8b732 571 free->npages = 0;
6aa8b732
AK
572}
573
d19a9cd2 574void kvm_free_physmem(struct kvm *kvm)
6aa8b732 575{
46a26bf5 576 struct kvm_memslots *slots = kvm->memslots;
be6ba0f0 577 struct kvm_memory_slot *memslot;
46a26bf5 578
be6ba0f0
XG
579 kvm_for_each_memslot(memslot, slots)
580 kvm_free_physmem_slot(memslot, NULL);
6aa8b732 581
46a26bf5 582 kfree(kvm->memslots);
6aa8b732
AK
583}
584
07f0a7bd
SW
585static void kvm_destroy_devices(struct kvm *kvm)
586{
587 struct list_head *node, *tmp;
588
589 list_for_each_safe(node, tmp, &kvm->devices) {
590 struct kvm_device *dev =
591 list_entry(node, struct kvm_device, vm_node);
592
593 list_del(node);
594 dev->ops->destroy(dev);
595 }
596}
597
f17abe9a
AK
598static void kvm_destroy_vm(struct kvm *kvm)
599{
e93f8a0f 600 int i;
6d4e4c4f
AK
601 struct mm_struct *mm = kvm->mm;
602
ad8ba2cd 603 kvm_arch_sync_events(kvm);
e935b837 604 raw_spin_lock(&kvm_lock);
133de902 605 list_del(&kvm->vm_list);
e935b837 606 raw_spin_unlock(&kvm_lock);
399ec807 607 kvm_free_irq_routing(kvm);
e93f8a0f
MT
608 for (i = 0; i < KVM_NR_BUSES; i++)
609 kvm_io_bus_destroy(kvm->buses[i]);
980da6ce 610 kvm_coalesced_mmio_free(kvm);
e930bffe
AA
611#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
612 mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
f00be0ca 613#else
2df72e9b 614 kvm_arch_flush_shadow_all(kvm);
5f94c174 615#endif
d19a9cd2 616 kvm_arch_destroy_vm(kvm);
07f0a7bd 617 kvm_destroy_devices(kvm);
d89f5eff
JK
618 kvm_free_physmem(kvm);
619 cleanup_srcu_struct(&kvm->srcu);
620 kvm_arch_free_vm(kvm);
10474ae8 621 hardware_disable_all();
6d4e4c4f 622 mmdrop(mm);
f17abe9a
AK
623}
624
d39f13b0
IE
625void kvm_get_kvm(struct kvm *kvm)
626{
627 atomic_inc(&kvm->users_count);
628}
629EXPORT_SYMBOL_GPL(kvm_get_kvm);
630
631void kvm_put_kvm(struct kvm *kvm)
632{
633 if (atomic_dec_and_test(&kvm->users_count))
634 kvm_destroy_vm(kvm);
635}
636EXPORT_SYMBOL_GPL(kvm_put_kvm);
637
638
f17abe9a
AK
639static int kvm_vm_release(struct inode *inode, struct file *filp)
640{
641 struct kvm *kvm = filp->private_data;
642
721eecbf
GH
643 kvm_irqfd_release(kvm);
644
d39f13b0 645 kvm_put_kvm(kvm);
6aa8b732
AK
646 return 0;
647}
648
515a0127
TY
649/*
650 * Allocation size is twice as large as the actual dirty bitmap size.
93474b25 651 * See x86's kvm_vm_ioctl_get_dirty_log() why this is needed.
515a0127 652 */
a36a57b1
TY
653static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot)
654{
189a2f7b 655#ifndef CONFIG_S390
515a0127 656 unsigned long dirty_bytes = 2 * kvm_dirty_bitmap_bytes(memslot);
a36a57b1 657
92eca8fa 658 memslot->dirty_bitmap = kvm_kvzalloc(dirty_bytes);
a36a57b1
TY
659 if (!memslot->dirty_bitmap)
660 return -ENOMEM;
661
189a2f7b 662#endif /* !CONFIG_S390 */
a36a57b1
TY
663 return 0;
664}
665
bf3e05bc
XG
666static int cmp_memslot(const void *slot1, const void *slot2)
667{
668 struct kvm_memory_slot *s1, *s2;
669
670 s1 = (struct kvm_memory_slot *)slot1;
671 s2 = (struct kvm_memory_slot *)slot2;
672
673 if (s1->npages < s2->npages)
674 return 1;
675 if (s1->npages > s2->npages)
676 return -1;
677
678 return 0;
679}
680
681/*
682 * Sort the memslots base on its size, so the larger slots
683 * will get better fit.
684 */
685static void sort_memslots(struct kvm_memslots *slots)
686{
f85e2cb5
XG
687 int i;
688
bf3e05bc
XG
689 sort(slots->memslots, KVM_MEM_SLOTS_NUM,
690 sizeof(struct kvm_memory_slot), cmp_memslot, NULL);
f85e2cb5
XG
691
692 for (i = 0; i < KVM_MEM_SLOTS_NUM; i++)
693 slots->id_to_index[slots->memslots[i].id] = i;
bf3e05bc
XG
694}
695
116c14c0
AW
696void update_memslots(struct kvm_memslots *slots, struct kvm_memory_slot *new,
697 u64 last_generation)
be593d62
XG
698{
699 if (new) {
700 int id = new->id;
28a37544 701 struct kvm_memory_slot *old = id_to_memslot(slots, id);
bf3e05bc 702 unsigned long npages = old->npages;
be593d62 703
28a37544 704 *old = *new;
bf3e05bc
XG
705 if (new->npages != npages)
706 sort_memslots(slots);
be593d62
XG
707 }
708
116c14c0 709 slots->generation = last_generation + 1;
be593d62
XG
710}
711
a50d64d6
XG
712static int check_memory_region_flags(struct kvm_userspace_memory_region *mem)
713{
4d8b81ab
XG
714 u32 valid_flags = KVM_MEM_LOG_DIRTY_PAGES;
715
716#ifdef KVM_CAP_READONLY_MEM
717 valid_flags |= KVM_MEM_READONLY;
718#endif
719
720 if (mem->flags & ~valid_flags)
a50d64d6
XG
721 return -EINVAL;
722
723 return 0;
724}
725
7ec4fb44
GN
726static struct kvm_memslots *install_new_memslots(struct kvm *kvm,
727 struct kvm_memslots *slots, struct kvm_memory_slot *new)
728{
729 struct kvm_memslots *old_memslots = kvm->memslots;
730
731 update_memslots(slots, new, kvm->memslots->generation);
732 rcu_assign_pointer(kvm->memslots, slots);
733 synchronize_srcu_expedited(&kvm->srcu);
734 return old_memslots;
735}
736
6aa8b732
AK
737/*
738 * Allocate some memory and give it an address in the guest physical address
739 * space.
740 *
741 * Discontiguous memory is allowed, mostly for framebuffers.
f78e0e2e 742 *
10589a46 743 * Must be called holding mmap_sem for write.
6aa8b732 744 */
f78e0e2e 745int __kvm_set_memory_region(struct kvm *kvm,
47ae31e2 746 struct kvm_userspace_memory_region *mem)
6aa8b732 747{
8234b22e 748 int r;
6aa8b732 749 gfn_t base_gfn;
28bcb112 750 unsigned long npages;
a843fac2 751 struct kvm_memory_slot *slot;
6aa8b732 752 struct kvm_memory_slot old, new;
b7f69c55 753 struct kvm_memslots *slots = NULL, *old_memslots;
f64c0398 754 enum kvm_mr_change change;
6aa8b732 755
a50d64d6
XG
756 r = check_memory_region_flags(mem);
757 if (r)
758 goto out;
759
6aa8b732
AK
760 r = -EINVAL;
761 /* General sanity checks */
762 if (mem->memory_size & (PAGE_SIZE - 1))
763 goto out;
764 if (mem->guest_phys_addr & (PAGE_SIZE - 1))
765 goto out;
fa3d315a 766 /* We can read the guest memory with __xxx_user() later on. */
47ae31e2 767 if ((mem->slot < KVM_USER_MEM_SLOTS) &&
fa3d315a 768 ((mem->userspace_addr & (PAGE_SIZE - 1)) ||
9e3bb6b6
HC
769 !access_ok(VERIFY_WRITE,
770 (void __user *)(unsigned long)mem->userspace_addr,
771 mem->memory_size)))
78749809 772 goto out;
93a5cef0 773 if (mem->slot >= KVM_MEM_SLOTS_NUM)
6aa8b732
AK
774 goto out;
775 if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
776 goto out;
777
a843fac2 778 slot = id_to_memslot(kvm->memslots, mem->slot);
6aa8b732
AK
779 base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
780 npages = mem->memory_size >> PAGE_SHIFT;
781
660c22c4
TY
782 r = -EINVAL;
783 if (npages > KVM_MEM_MAX_NR_PAGES)
784 goto out;
785
6aa8b732
AK
786 if (!npages)
787 mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
788
a843fac2 789 new = old = *slot;
6aa8b732 790
e36d96f7 791 new.id = mem->slot;
6aa8b732
AK
792 new.base_gfn = base_gfn;
793 new.npages = npages;
794 new.flags = mem->flags;
795
6aa8b732 796 r = -EINVAL;
f64c0398
TY
797 if (npages) {
798 if (!old.npages)
799 change = KVM_MR_CREATE;
800 else { /* Modify an existing slot. */
801 if ((mem->userspace_addr != old.userspace_addr) ||
75d61fbc
TY
802 (npages != old.npages) ||
803 ((new.flags ^ old.flags) & KVM_MEM_READONLY))
f64c0398
TY
804 goto out;
805
806 if (base_gfn != old.base_gfn)
807 change = KVM_MR_MOVE;
808 else if (new.flags != old.flags)
809 change = KVM_MR_FLAGS_ONLY;
810 else { /* Nothing to change. */
811 r = 0;
812 goto out;
813 }
814 }
815 } else if (old.npages) {
816 change = KVM_MR_DELETE;
817 } else /* Modify a non-existent slot: disallowed. */
0ea75e1d 818 goto out;
6aa8b732 819
f64c0398 820 if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) {
0a706bee
TY
821 /* Check for overlaps */
822 r = -EEXIST;
823 kvm_for_each_memslot(slot, kvm->memslots) {
a843fac2
TY
824 if ((slot->id >= KVM_USER_MEM_SLOTS) ||
825 (slot->id == mem->slot))
0a706bee
TY
826 continue;
827 if (!((base_gfn + npages <= slot->base_gfn) ||
828 (base_gfn >= slot->base_gfn + slot->npages)))
829 goto out;
830 }
6aa8b732 831 }
6aa8b732 832
6aa8b732
AK
833 /* Free page dirty bitmap if unneeded */
834 if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
8b6d44c7 835 new.dirty_bitmap = NULL;
6aa8b732
AK
836
837 r = -ENOMEM;
f64c0398 838 if (change == KVM_MR_CREATE) {
189a2f7b 839 new.userspace_addr = mem->userspace_addr;
d89cc617 840
db3fe4eb
TY
841 if (kvm_arch_create_memslot(&new, npages))
842 goto out_free;
6aa8b732 843 }
ec04b260 844
6aa8b732
AK
845 /* Allocate page dirty bitmap if needed */
846 if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
a36a57b1 847 if (kvm_create_dirty_bitmap(&new) < 0)
f78e0e2e 848 goto out_free;
6aa8b732
AK
849 }
850
f64c0398 851 if ((change == KVM_MR_DELETE) || (change == KVM_MR_MOVE)) {
bc6678a3 852 r = -ENOMEM;
6da64fdb
TM
853 slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots),
854 GFP_KERNEL);
bc6678a3
MT
855 if (!slots)
856 goto out_free;
28a37544
XG
857 slot = id_to_memslot(slots, mem->slot);
858 slot->flags |= KVM_MEMSLOT_INVALID;
859
7ec4fb44 860 old_memslots = install_new_memslots(kvm, slots, NULL);
bc6678a3 861
e40f193f
AW
862 /* slot was deleted or moved, clear iommu mapping */
863 kvm_iommu_unmap_pages(kvm, &old);
12d6e753
MT
864 /* From this point no new shadow pages pointing to a deleted,
865 * or moved, memslot will be created.
bc6678a3
MT
866 *
867 * validation of sp->gfn happens in:
868 * - gfn_to_hva (kvm_read_guest, gfn_to_pfn)
869 * - kvm_is_visible_gfn (mmu_check_roots)
870 */
2df72e9b 871 kvm_arch_flush_shadow_memslot(kvm, slot);
b7f69c55 872 slots = old_memslots;
bc6678a3 873 }
34d4cb8f 874
7b6195a9 875 r = kvm_arch_prepare_memory_region(kvm, &new, mem, change);
f7784b8e 876 if (r)
b7f69c55 877 goto out_slots;
f7784b8e 878
bc6678a3 879 r = -ENOMEM;
b7f69c55
AW
880 /*
881 * We can re-use the old_memslots from above, the only difference
882 * from the currently installed memslots is the invalid flag. This
883 * will get overwritten by update_memslots anyway.
884 */
885 if (!slots) {
886 slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots),
887 GFP_KERNEL);
888 if (!slots)
889 goto out_free;
890 }
bc6678a3 891
261874b0
AW
892 /*
893 * IOMMU mapping: New slots need to be mapped. Old slots need to be
75d61fbc
TY
894 * un-mapped and re-mapped if their base changes. Since base change
895 * unmapping is handled above with slot deletion, mapping alone is
896 * needed here. Anything else the iommu might care about for existing
897 * slots (size changes, userspace addr changes and read-only flag
898 * changes) is disallowed above, so any other attribute changes getting
899 * here can be skipped.
261874b0 900 */
75d61fbc
TY
901 if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) {
902 r = kvm_iommu_map_pages(kvm, &new);
903 if (r)
904 goto out_slots;
e40f193f
AW
905 }
906
bc6678a3 907 /* actual memory is freed via old in kvm_free_physmem_slot below */
f64c0398 908 if (change == KVM_MR_DELETE) {
bc6678a3 909 new.dirty_bitmap = NULL;
db3fe4eb 910 memset(&new.arch, 0, sizeof(new.arch));
bc6678a3
MT
911 }
912
7ec4fb44 913 old_memslots = install_new_memslots(kvm, slots, &new);
3ad82a7e 914
8482644a 915 kvm_arch_commit_memory_region(kvm, mem, &old, change);
82ce2c96 916
bc6678a3
MT
917 kvm_free_physmem_slot(&old, &new);
918 kfree(old_memslots);
919
6aa8b732
AK
920 return 0;
921
e40f193f
AW
922out_slots:
923 kfree(slots);
f78e0e2e 924out_free:
6aa8b732
AK
925 kvm_free_physmem_slot(&new, &old);
926out:
927 return r;
210c7c4d 928}
f78e0e2e
SY
929EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
930
931int kvm_set_memory_region(struct kvm *kvm,
47ae31e2 932 struct kvm_userspace_memory_region *mem)
f78e0e2e
SY
933{
934 int r;
935
79fac95e 936 mutex_lock(&kvm->slots_lock);
47ae31e2 937 r = __kvm_set_memory_region(kvm, mem);
79fac95e 938 mutex_unlock(&kvm->slots_lock);
f78e0e2e
SY
939 return r;
940}
210c7c4d
IE
941EXPORT_SYMBOL_GPL(kvm_set_memory_region);
942
1fe779f8 943int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
47ae31e2 944 struct kvm_userspace_memory_region *mem)
210c7c4d 945{
bbacc0c1 946 if (mem->slot >= KVM_USER_MEM_SLOTS)
e0d62c7f 947 return -EINVAL;
47ae31e2 948 return kvm_set_memory_region(kvm, mem);
6aa8b732
AK
949}
950
5bb064dc
ZX
951int kvm_get_dirty_log(struct kvm *kvm,
952 struct kvm_dirty_log *log, int *is_dirty)
6aa8b732
AK
953{
954 struct kvm_memory_slot *memslot;
955 int r, i;
87bf6e7d 956 unsigned long n;
6aa8b732
AK
957 unsigned long any = 0;
958
6aa8b732 959 r = -EINVAL;
bbacc0c1 960 if (log->slot >= KVM_USER_MEM_SLOTS)
6aa8b732
AK
961 goto out;
962
28a37544 963 memslot = id_to_memslot(kvm->memslots, log->slot);
6aa8b732
AK
964 r = -ENOENT;
965 if (!memslot->dirty_bitmap)
966 goto out;
967
87bf6e7d 968 n = kvm_dirty_bitmap_bytes(memslot);
6aa8b732 969
cd1a4a98 970 for (i = 0; !any && i < n/sizeof(long); ++i)
6aa8b732
AK
971 any = memslot->dirty_bitmap[i];
972
973 r = -EFAULT;
974 if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
975 goto out;
976
5bb064dc
ZX
977 if (any)
978 *is_dirty = 1;
6aa8b732
AK
979
980 r = 0;
6aa8b732 981out:
6aa8b732
AK
982 return r;
983}
984
db3fe4eb
TY
985bool kvm_largepages_enabled(void)
986{
987 return largepages_enabled;
988}
989
54dee993
MT
990void kvm_disable_largepages(void)
991{
992 largepages_enabled = false;
993}
994EXPORT_SYMBOL_GPL(kvm_disable_largepages);
995
49c7754c
GN
996struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
997{
998 return __gfn_to_memslot(kvm_memslots(kvm), gfn);
999}
a1f4d395 1000EXPORT_SYMBOL_GPL(gfn_to_memslot);
6aa8b732 1001
e0d62c7f
IE
1002int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
1003{
bf3e05bc 1004 struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn);
e0d62c7f 1005
bbacc0c1 1006 if (!memslot || memslot->id >= KVM_USER_MEM_SLOTS ||
bf3e05bc
XG
1007 memslot->flags & KVM_MEMSLOT_INVALID)
1008 return 0;
e0d62c7f 1009
bf3e05bc 1010 return 1;
e0d62c7f
IE
1011}
1012EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
1013
8f0b1ab6
JR
1014unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn)
1015{
1016 struct vm_area_struct *vma;
1017 unsigned long addr, size;
1018
1019 size = PAGE_SIZE;
1020
1021 addr = gfn_to_hva(kvm, gfn);
1022 if (kvm_is_error_hva(addr))
1023 return PAGE_SIZE;
1024
1025 down_read(&current->mm->mmap_sem);
1026 vma = find_vma(current->mm, addr);
1027 if (!vma)
1028 goto out;
1029
1030 size = vma_kernel_pagesize(vma);
1031
1032out:
1033 up_read(&current->mm->mmap_sem);
1034
1035 return size;
1036}
1037
4d8b81ab
XG
1038static bool memslot_is_readonly(struct kvm_memory_slot *slot)
1039{
1040 return slot->flags & KVM_MEM_READONLY;
1041}
1042
4d8b81ab
XG
1043static unsigned long __gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn,
1044 gfn_t *nr_pages, bool write)
539cb660 1045{
bc6678a3 1046 if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
ca3a490c 1047 return KVM_HVA_ERR_BAD;
48987781 1048
4d8b81ab
XG
1049 if (memslot_is_readonly(slot) && write)
1050 return KVM_HVA_ERR_RO_BAD;
48987781
XG
1051
1052 if (nr_pages)
1053 *nr_pages = slot->npages - (gfn - slot->base_gfn);
1054
4d8b81ab 1055 return __gfn_to_hva_memslot(slot, gfn);
539cb660 1056}
48987781 1057
4d8b81ab
XG
1058static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn,
1059 gfn_t *nr_pages)
1060{
1061 return __gfn_to_hva_many(slot, gfn, nr_pages, true);
539cb660 1062}
48987781 1063
4d8b81ab
XG
1064unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot,
1065 gfn_t gfn)
1066{
1067 return gfn_to_hva_many(slot, gfn, NULL);
1068}
1069EXPORT_SYMBOL_GPL(gfn_to_hva_memslot);
1070
48987781
XG
1071unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
1072{
49c7754c 1073 return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL);
48987781 1074}
0d150298 1075EXPORT_SYMBOL_GPL(gfn_to_hva);
539cb660 1076
86ab8cff
XG
1077/*
1078 * The hva returned by this function is only allowed to be read.
1079 * It should pair with kvm_read_hva() or kvm_read_hva_atomic().
1080 */
1081static unsigned long gfn_to_hva_read(struct kvm *kvm, gfn_t gfn)
1082{
4d8b81ab 1083 return __gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL, false);
86ab8cff
XG
1084}
1085
1086static int kvm_read_hva(void *data, void __user *hva, int len)
8030089f 1087{
86ab8cff
XG
1088 return __copy_from_user(data, hva, len);
1089}
1090
1091static int kvm_read_hva_atomic(void *data, void __user *hva, int len)
1092{
1093 return __copy_from_user_inatomic(data, hva, len);
8030089f
GN
1094}
1095
39369f7a 1096static int get_user_page_nowait(struct task_struct *tsk, struct mm_struct *mm,
0857b9e9
GN
1097 unsigned long start, int write, struct page **page)
1098{
1099 int flags = FOLL_TOUCH | FOLL_NOWAIT | FOLL_HWPOISON | FOLL_GET;
1100
1101 if (write)
1102 flags |= FOLL_WRITE;
1103
1104 return __get_user_pages(tsk, mm, start, 1, flags, page, NULL, NULL);
1105}
1106
fafc3dba
HY
1107static inline int check_user_page_hwpoison(unsigned long addr)
1108{
1109 int rc, flags = FOLL_TOUCH | FOLL_HWPOISON | FOLL_WRITE;
1110
1111 rc = __get_user_pages(current, current->mm, addr, 1,
1112 flags, NULL, NULL, NULL);
1113 return rc == -EHWPOISON;
1114}
1115
2fc84311
XG
1116/*
1117 * The atomic path to get the writable pfn which will be stored in @pfn,
1118 * true indicates success, otherwise false is returned.
1119 */
1120static bool hva_to_pfn_fast(unsigned long addr, bool atomic, bool *async,
1121 bool write_fault, bool *writable, pfn_t *pfn)
954bbbc2 1122{
8d4e1288 1123 struct page *page[1];
2fc84311 1124 int npages;
954bbbc2 1125
2fc84311
XG
1126 if (!(async || atomic))
1127 return false;
af585b92 1128
12ce13fe
XG
1129 /*
1130 * Fast pin a writable pfn only if it is a write fault request
1131 * or the caller allows to map a writable pfn for a read fault
1132 * request.
1133 */
1134 if (!(write_fault || writable))
1135 return false;
612819c3 1136
2fc84311
XG
1137 npages = __get_user_pages_fast(addr, 1, 1, page);
1138 if (npages == 1) {
1139 *pfn = page_to_pfn(page[0]);
612819c3 1140
2fc84311
XG
1141 if (writable)
1142 *writable = true;
1143 return true;
1144 }
af585b92 1145
2fc84311
XG
1146 return false;
1147}
612819c3 1148
2fc84311
XG
1149/*
1150 * The slow path to get the pfn of the specified host virtual address,
1151 * 1 indicates success, -errno is returned if error is detected.
1152 */
1153static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault,
1154 bool *writable, pfn_t *pfn)
1155{
1156 struct page *page[1];
1157 int npages = 0;
612819c3 1158
2fc84311
XG
1159 might_sleep();
1160
1161 if (writable)
1162 *writable = write_fault;
1163
1164 if (async) {
1165 down_read(&current->mm->mmap_sem);
1166 npages = get_user_page_nowait(current, current->mm,
1167 addr, write_fault, page);
1168 up_read(&current->mm->mmap_sem);
1169 } else
1170 npages = get_user_pages_fast(addr, 1, write_fault,
1171 page);
1172 if (npages != 1)
1173 return npages;
1174
1175 /* map read fault as writable if possible */
12ce13fe 1176 if (unlikely(!write_fault) && writable) {
2fc84311
XG
1177 struct page *wpage[1];
1178
1179 npages = __get_user_pages_fast(addr, 1, 1, wpage);
1180 if (npages == 1) {
1181 *writable = true;
1182 put_page(page[0]);
1183 page[0] = wpage[0];
612819c3 1184 }
2fc84311
XG
1185
1186 npages = 1;
887c08ac 1187 }
2fc84311
XG
1188 *pfn = page_to_pfn(page[0]);
1189 return npages;
1190}
539cb660 1191
4d8b81ab
XG
1192static bool vma_is_valid(struct vm_area_struct *vma, bool write_fault)
1193{
1194 if (unlikely(!(vma->vm_flags & VM_READ)))
1195 return false;
2e2e3738 1196
4d8b81ab
XG
1197 if (write_fault && (unlikely(!(vma->vm_flags & VM_WRITE))))
1198 return false;
887c08ac 1199
4d8b81ab
XG
1200 return true;
1201}
bf998156 1202
12ce13fe
XG
1203/*
1204 * Pin guest page in memory and return its pfn.
1205 * @addr: host virtual address which maps memory to the guest
1206 * @atomic: whether this function can sleep
1207 * @async: whether this function need to wait IO complete if the
1208 * host page is not in the memory
1209 * @write_fault: whether we should get a writable host page
1210 * @writable: whether it allows to map a writable host page for !@write_fault
1211 *
1212 * The function will map a writable host page for these two cases:
1213 * 1): @write_fault = true
1214 * 2): @write_fault = false && @writable, @writable will tell the caller
1215 * whether the mapping is writable.
1216 */
2fc84311
XG
1217static pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async,
1218 bool write_fault, bool *writable)
1219{
1220 struct vm_area_struct *vma;
1221 pfn_t pfn = 0;
1222 int npages;
2e2e3738 1223
2fc84311
XG
1224 /* we can do it either atomically or asynchronously, not both */
1225 BUG_ON(atomic && async);
8d4e1288 1226
2fc84311
XG
1227 if (hva_to_pfn_fast(addr, atomic, async, write_fault, writable, &pfn))
1228 return pfn;
1229
1230 if (atomic)
1231 return KVM_PFN_ERR_FAULT;
1232
1233 npages = hva_to_pfn_slow(addr, async, write_fault, writable, &pfn);
1234 if (npages == 1)
1235 return pfn;
8d4e1288 1236
2fc84311
XG
1237 down_read(&current->mm->mmap_sem);
1238 if (npages == -EHWPOISON ||
1239 (!async && check_user_page_hwpoison(addr))) {
1240 pfn = KVM_PFN_ERR_HWPOISON;
1241 goto exit;
1242 }
1243
1244 vma = find_vma_intersection(current->mm, addr, addr + 1);
1245
1246 if (vma == NULL)
1247 pfn = KVM_PFN_ERR_FAULT;
1248 else if ((vma->vm_flags & VM_PFNMAP)) {
1249 pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) +
1250 vma->vm_pgoff;
1251 BUG_ON(!kvm_is_mmio_pfn(pfn));
1252 } else {
4d8b81ab 1253 if (async && vma_is_valid(vma, write_fault))
2fc84311
XG
1254 *async = true;
1255 pfn = KVM_PFN_ERR_FAULT;
1256 }
1257exit:
1258 up_read(&current->mm->mmap_sem);
2e2e3738 1259 return pfn;
35149e21
AL
1260}
1261
4d8b81ab
XG
1262static pfn_t
1263__gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, bool atomic,
1264 bool *async, bool write_fault, bool *writable)
887c08ac 1265{
4d8b81ab
XG
1266 unsigned long addr = __gfn_to_hva_many(slot, gfn, NULL, write_fault);
1267
1268 if (addr == KVM_HVA_ERR_RO_BAD)
1269 return KVM_PFN_ERR_RO_FAULT;
1270
1271 if (kvm_is_error_hva(addr))
81c52c56 1272 return KVM_PFN_NOSLOT;
4d8b81ab
XG
1273
1274 /* Do not map writable pfn in the readonly memslot. */
1275 if (writable && memslot_is_readonly(slot)) {
1276 *writable = false;
1277 writable = NULL;
1278 }
1279
1280 return hva_to_pfn(addr, atomic, async, write_fault,
1281 writable);
887c08ac 1282}
887c08ac 1283
612819c3
MT
1284static pfn_t __gfn_to_pfn(struct kvm *kvm, gfn_t gfn, bool atomic, bool *async,
1285 bool write_fault, bool *writable)
506f0d6f 1286{
4d8b81ab 1287 struct kvm_memory_slot *slot;
506f0d6f 1288
af585b92
GN
1289 if (async)
1290 *async = false;
1291
4d8b81ab 1292 slot = gfn_to_memslot(kvm, gfn);
506f0d6f 1293
4d8b81ab
XG
1294 return __gfn_to_pfn_memslot(slot, gfn, atomic, async, write_fault,
1295 writable);
365fb3fd
XG
1296}
1297
1298pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn)
1299{
612819c3 1300 return __gfn_to_pfn(kvm, gfn, true, NULL, true, NULL);
365fb3fd
XG
1301}
1302EXPORT_SYMBOL_GPL(gfn_to_pfn_atomic);
1303
612819c3
MT
1304pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async,
1305 bool write_fault, bool *writable)
af585b92 1306{
612819c3 1307 return __gfn_to_pfn(kvm, gfn, false, async, write_fault, writable);
af585b92
GN
1308}
1309EXPORT_SYMBOL_GPL(gfn_to_pfn_async);
1310
365fb3fd
XG
1311pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
1312{
612819c3 1313 return __gfn_to_pfn(kvm, gfn, false, NULL, true, NULL);
506f0d6f 1314}
35149e21
AL
1315EXPORT_SYMBOL_GPL(gfn_to_pfn);
1316
612819c3
MT
1317pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
1318 bool *writable)
1319{
1320 return __gfn_to_pfn(kvm, gfn, false, NULL, write_fault, writable);
1321}
1322EXPORT_SYMBOL_GPL(gfn_to_pfn_prot);
1323
d5661048 1324pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
506f0d6f 1325{
4d8b81ab 1326 return __gfn_to_pfn_memslot(slot, gfn, false, NULL, true, NULL);
506f0d6f
MT
1327}
1328
037d92dc 1329pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn)
506f0d6f 1330{
4d8b81ab 1331 return __gfn_to_pfn_memslot(slot, gfn, true, NULL, true, NULL);
506f0d6f 1332}
037d92dc 1333EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot_atomic);
506f0d6f 1334
48987781
XG
1335int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages,
1336 int nr_pages)
1337{
1338 unsigned long addr;
1339 gfn_t entry;
1340
49c7754c 1341 addr = gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, &entry);
48987781
XG
1342 if (kvm_is_error_hva(addr))
1343 return -1;
1344
1345 if (entry < nr_pages)
1346 return 0;
1347
1348 return __get_user_pages_fast(addr, nr_pages, 1, pages);
1349}
1350EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic);
1351
a2766325
XG
1352static struct page *kvm_pfn_to_page(pfn_t pfn)
1353{
81c52c56 1354 if (is_error_noslot_pfn(pfn))
cb9aaa30 1355 return KVM_ERR_PTR_BAD_PAGE;
a2766325 1356
cb9aaa30
XG
1357 if (kvm_is_mmio_pfn(pfn)) {
1358 WARN_ON(1);
6cede2e6 1359 return KVM_ERR_PTR_BAD_PAGE;
cb9aaa30 1360 }
a2766325
XG
1361
1362 return pfn_to_page(pfn);
1363}
1364
35149e21
AL
1365struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
1366{
2e2e3738
AL
1367 pfn_t pfn;
1368
1369 pfn = gfn_to_pfn(kvm, gfn);
2e2e3738 1370
a2766325 1371 return kvm_pfn_to_page(pfn);
954bbbc2 1372}
aab61cc0 1373
954bbbc2
AK
1374EXPORT_SYMBOL_GPL(gfn_to_page);
1375
b4231d61
IE
1376void kvm_release_page_clean(struct page *page)
1377{
32cad84f
XG
1378 WARN_ON(is_error_page(page));
1379
35149e21 1380 kvm_release_pfn_clean(page_to_pfn(page));
b4231d61
IE
1381}
1382EXPORT_SYMBOL_GPL(kvm_release_page_clean);
1383
35149e21
AL
1384void kvm_release_pfn_clean(pfn_t pfn)
1385{
81c52c56 1386 if (!is_error_noslot_pfn(pfn) && !kvm_is_mmio_pfn(pfn))
2e2e3738 1387 put_page(pfn_to_page(pfn));
35149e21
AL
1388}
1389EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
1390
b4231d61 1391void kvm_release_page_dirty(struct page *page)
8a7ae055 1392{
a2766325
XG
1393 WARN_ON(is_error_page(page));
1394
35149e21
AL
1395 kvm_release_pfn_dirty(page_to_pfn(page));
1396}
1397EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
1398
1399void kvm_release_pfn_dirty(pfn_t pfn)
1400{
1401 kvm_set_pfn_dirty(pfn);
1402 kvm_release_pfn_clean(pfn);
1403}
1404EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty);
1405
1406void kvm_set_page_dirty(struct page *page)
1407{
1408 kvm_set_pfn_dirty(page_to_pfn(page));
1409}
1410EXPORT_SYMBOL_GPL(kvm_set_page_dirty);
1411
1412void kvm_set_pfn_dirty(pfn_t pfn)
1413{
c77fb9dc 1414 if (!kvm_is_mmio_pfn(pfn)) {
2e2e3738
AL
1415 struct page *page = pfn_to_page(pfn);
1416 if (!PageReserved(page))
1417 SetPageDirty(page);
1418 }
8a7ae055 1419}
35149e21
AL
1420EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
1421
1422void kvm_set_pfn_accessed(pfn_t pfn)
1423{
c77fb9dc 1424 if (!kvm_is_mmio_pfn(pfn))
2e2e3738 1425 mark_page_accessed(pfn_to_page(pfn));
35149e21
AL
1426}
1427EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
1428
1429void kvm_get_pfn(pfn_t pfn)
1430{
c77fb9dc 1431 if (!kvm_is_mmio_pfn(pfn))
2e2e3738 1432 get_page(pfn_to_page(pfn));
35149e21
AL
1433}
1434EXPORT_SYMBOL_GPL(kvm_get_pfn);
8a7ae055 1435
195aefde
IE
1436static int next_segment(unsigned long len, int offset)
1437{
1438 if (len > PAGE_SIZE - offset)
1439 return PAGE_SIZE - offset;
1440 else
1441 return len;
1442}
1443
1444int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
1445 int len)
1446{
e0506bcb
IE
1447 int r;
1448 unsigned long addr;
195aefde 1449
86ab8cff 1450 addr = gfn_to_hva_read(kvm, gfn);
e0506bcb
IE
1451 if (kvm_is_error_hva(addr))
1452 return -EFAULT;
86ab8cff 1453 r = kvm_read_hva(data, (void __user *)addr + offset, len);
e0506bcb 1454 if (r)
195aefde 1455 return -EFAULT;
195aefde
IE
1456 return 0;
1457}
1458EXPORT_SYMBOL_GPL(kvm_read_guest_page);
1459
1460int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
1461{
1462 gfn_t gfn = gpa >> PAGE_SHIFT;
1463 int seg;
1464 int offset = offset_in_page(gpa);
1465 int ret;
1466
1467 while ((seg = next_segment(len, offset)) != 0) {
1468 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
1469 if (ret < 0)
1470 return ret;
1471 offset = 0;
1472 len -= seg;
1473 data += seg;
1474 ++gfn;
1475 }
1476 return 0;
1477}
1478EXPORT_SYMBOL_GPL(kvm_read_guest);
1479
7ec54588
MT
1480int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
1481 unsigned long len)
1482{
1483 int r;
1484 unsigned long addr;
1485 gfn_t gfn = gpa >> PAGE_SHIFT;
1486 int offset = offset_in_page(gpa);
1487
86ab8cff 1488 addr = gfn_to_hva_read(kvm, gfn);
7ec54588
MT
1489 if (kvm_is_error_hva(addr))
1490 return -EFAULT;
0aac03f0 1491 pagefault_disable();
86ab8cff 1492 r = kvm_read_hva_atomic(data, (void __user *)addr + offset, len);
0aac03f0 1493 pagefault_enable();
7ec54588
MT
1494 if (r)
1495 return -EFAULT;
1496 return 0;
1497}
1498EXPORT_SYMBOL(kvm_read_guest_atomic);
1499
195aefde
IE
1500int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
1501 int offset, int len)
1502{
e0506bcb
IE
1503 int r;
1504 unsigned long addr;
195aefde 1505
e0506bcb
IE
1506 addr = gfn_to_hva(kvm, gfn);
1507 if (kvm_is_error_hva(addr))
1508 return -EFAULT;
8b0cedff 1509 r = __copy_to_user((void __user *)addr + offset, data, len);
e0506bcb 1510 if (r)
195aefde 1511 return -EFAULT;
195aefde
IE
1512 mark_page_dirty(kvm, gfn);
1513 return 0;
1514}
1515EXPORT_SYMBOL_GPL(kvm_write_guest_page);
1516
1517int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
1518 unsigned long len)
1519{
1520 gfn_t gfn = gpa >> PAGE_SHIFT;
1521 int seg;
1522 int offset = offset_in_page(gpa);
1523 int ret;
1524
1525 while ((seg = next_segment(len, offset)) != 0) {
1526 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
1527 if (ret < 0)
1528 return ret;
1529 offset = 0;
1530 len -= seg;
1531 data += seg;
1532 ++gfn;
1533 }
1534 return 0;
1535}
1536
49c7754c 1537int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
8f964525 1538 gpa_t gpa, unsigned long len)
49c7754c
GN
1539{
1540 struct kvm_memslots *slots = kvm_memslots(kvm);
1541 int offset = offset_in_page(gpa);
8f964525
AH
1542 gfn_t start_gfn = gpa >> PAGE_SHIFT;
1543 gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT;
1544 gfn_t nr_pages_needed = end_gfn - start_gfn + 1;
1545 gfn_t nr_pages_avail;
49c7754c
GN
1546
1547 ghc->gpa = gpa;
1548 ghc->generation = slots->generation;
8f964525
AH
1549 ghc->len = len;
1550 ghc->memslot = gfn_to_memslot(kvm, start_gfn);
1551 ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, &nr_pages_avail);
1552 if (!kvm_is_error_hva(ghc->hva) && nr_pages_avail >= nr_pages_needed) {
49c7754c 1553 ghc->hva += offset;
8f964525
AH
1554 } else {
1555 /*
1556 * If the requested region crosses two memslots, we still
1557 * verify that the entire region is valid here.
1558 */
1559 while (start_gfn <= end_gfn) {
1560 ghc->memslot = gfn_to_memslot(kvm, start_gfn);
1561 ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn,
1562 &nr_pages_avail);
1563 if (kvm_is_error_hva(ghc->hva))
1564 return -EFAULT;
1565 start_gfn += nr_pages_avail;
1566 }
1567 /* Use the slow path for cross page reads and writes. */
1568 ghc->memslot = NULL;
1569 }
49c7754c
GN
1570 return 0;
1571}
1572EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init);
1573
1574int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1575 void *data, unsigned long len)
1576{
1577 struct kvm_memslots *slots = kvm_memslots(kvm);
1578 int r;
1579
8f964525
AH
1580 BUG_ON(len > ghc->len);
1581
49c7754c 1582 if (slots->generation != ghc->generation)
8f964525
AH
1583 kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len);
1584
1585 if (unlikely(!ghc->memslot))
1586 return kvm_write_guest(kvm, ghc->gpa, data, len);
49c7754c
GN
1587
1588 if (kvm_is_error_hva(ghc->hva))
1589 return -EFAULT;
1590
8b0cedff 1591 r = __copy_to_user((void __user *)ghc->hva, data, len);
49c7754c
GN
1592 if (r)
1593 return -EFAULT;
1594 mark_page_dirty_in_slot(kvm, ghc->memslot, ghc->gpa >> PAGE_SHIFT);
1595
1596 return 0;
1597}
1598EXPORT_SYMBOL_GPL(kvm_write_guest_cached);
1599
e03b644f
GN
1600int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1601 void *data, unsigned long len)
1602{
1603 struct kvm_memslots *slots = kvm_memslots(kvm);
1604 int r;
1605
8f964525
AH
1606 BUG_ON(len > ghc->len);
1607
e03b644f 1608 if (slots->generation != ghc->generation)
8f964525
AH
1609 kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len);
1610
1611 if (unlikely(!ghc->memslot))
1612 return kvm_read_guest(kvm, ghc->gpa, data, len);
e03b644f
GN
1613
1614 if (kvm_is_error_hva(ghc->hva))
1615 return -EFAULT;
1616
1617 r = __copy_from_user(data, (void __user *)ghc->hva, len);
1618 if (r)
1619 return -EFAULT;
1620
1621 return 0;
1622}
1623EXPORT_SYMBOL_GPL(kvm_read_guest_cached);
1624
195aefde
IE
1625int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
1626{
3bcc8a8c
HC
1627 return kvm_write_guest_page(kvm, gfn, (const void *) empty_zero_page,
1628 offset, len);
195aefde
IE
1629}
1630EXPORT_SYMBOL_GPL(kvm_clear_guest_page);
1631
1632int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
1633{
1634 gfn_t gfn = gpa >> PAGE_SHIFT;
1635 int seg;
1636 int offset = offset_in_page(gpa);
1637 int ret;
1638
1639 while ((seg = next_segment(len, offset)) != 0) {
1640 ret = kvm_clear_guest_page(kvm, gfn, offset, seg);
1641 if (ret < 0)
1642 return ret;
1643 offset = 0;
1644 len -= seg;
1645 ++gfn;
1646 }
1647 return 0;
1648}
1649EXPORT_SYMBOL_GPL(kvm_clear_guest);
1650
49c7754c
GN
1651void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot,
1652 gfn_t gfn)
6aa8b732 1653{
7e9d619d
RR
1654 if (memslot && memslot->dirty_bitmap) {
1655 unsigned long rel_gfn = gfn - memslot->base_gfn;
6aa8b732 1656
b74ca3b3 1657 set_bit_le(rel_gfn, memslot->dirty_bitmap);
6aa8b732
AK
1658 }
1659}
1660
49c7754c
GN
1661void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
1662{
1663 struct kvm_memory_slot *memslot;
1664
1665 memslot = gfn_to_memslot(kvm, gfn);
1666 mark_page_dirty_in_slot(kvm, memslot, gfn);
1667}
1668
b6958ce4
ED
1669/*
1670 * The vCPU has executed a HLT instruction with in-kernel mode enabled.
1671 */
8776e519 1672void kvm_vcpu_block(struct kvm_vcpu *vcpu)
d3bef15f 1673{
e5c239cf
MT
1674 DEFINE_WAIT(wait);
1675
1676 for (;;) {
1677 prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
1678
a1b37100 1679 if (kvm_arch_vcpu_runnable(vcpu)) {
a8eeb04a 1680 kvm_make_request(KVM_REQ_UNHALT, vcpu);
e5c239cf 1681 break;
d7690175 1682 }
09cec754
GN
1683 if (kvm_cpu_has_pending_timer(vcpu))
1684 break;
e5c239cf
MT
1685 if (signal_pending(current))
1686 break;
1687
b6958ce4 1688 schedule();
b6958ce4 1689 }
d3bef15f 1690
e5c239cf 1691 finish_wait(&vcpu->wq, &wait);
b6958ce4
ED
1692}
1693
8c84780d 1694#ifndef CONFIG_S390
b6d33834
CD
1695/*
1696 * Kick a sleeping VCPU, or a guest VCPU in guest mode, into host kernel mode.
1697 */
1698void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
1699{
1700 int me;
1701 int cpu = vcpu->cpu;
1702 wait_queue_head_t *wqp;
1703
1704 wqp = kvm_arch_vcpu_wq(vcpu);
1705 if (waitqueue_active(wqp)) {
1706 wake_up_interruptible(wqp);
1707 ++vcpu->stat.halt_wakeup;
1708 }
1709
1710 me = get_cpu();
1711 if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
1712 if (kvm_arch_vcpu_should_kick(vcpu))
1713 smp_send_reschedule(cpu);
1714 put_cpu();
1715}
a20ed54d 1716EXPORT_SYMBOL_GPL(kvm_vcpu_kick);
8c84780d 1717#endif /* !CONFIG_S390 */
b6d33834 1718
6aa8b732
AK
1719void kvm_resched(struct kvm_vcpu *vcpu)
1720{
3fca0365
YD
1721 if (!need_resched())
1722 return;
6aa8b732 1723 cond_resched();
6aa8b732
AK
1724}
1725EXPORT_SYMBOL_GPL(kvm_resched);
1726
41628d33
KW
1727bool kvm_vcpu_yield_to(struct kvm_vcpu *target)
1728{
1729 struct pid *pid;
1730 struct task_struct *task = NULL;
c45c528e 1731 bool ret = false;
41628d33
KW
1732
1733 rcu_read_lock();
1734 pid = rcu_dereference(target->pid);
1735 if (pid)
1736 task = get_pid_task(target->pid, PIDTYPE_PID);
1737 rcu_read_unlock();
1738 if (!task)
c45c528e 1739 return ret;
41628d33
KW
1740 if (task->flags & PF_VCPU) {
1741 put_task_struct(task);
c45c528e 1742 return ret;
41628d33 1743 }
c45c528e 1744 ret = yield_to(task, 1);
41628d33 1745 put_task_struct(task);
c45c528e
R
1746
1747 return ret;
41628d33
KW
1748}
1749EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to);
1750
06e48c51
R
1751#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
1752/*
1753 * Helper that checks whether a VCPU is eligible for directed yield.
1754 * Most eligible candidate to yield is decided by following heuristics:
1755 *
1756 * (a) VCPU which has not done pl-exit or cpu relax intercepted recently
1757 * (preempted lock holder), indicated by @in_spin_loop.
1758 * Set at the beiginning and cleared at the end of interception/PLE handler.
1759 *
1760 * (b) VCPU which has done pl-exit/ cpu relax intercepted but did not get
1761 * chance last time (mostly it has become eligible now since we have probably
1762 * yielded to lockholder in last iteration. This is done by toggling
1763 * @dy_eligible each time a VCPU checked for eligibility.)
1764 *
1765 * Yielding to a recently pl-exited/cpu relax intercepted VCPU before yielding
1766 * to preempted lock-holder could result in wrong VCPU selection and CPU
1767 * burning. Giving priority for a potential lock-holder increases lock
1768 * progress.
1769 *
1770 * Since algorithm is based on heuristics, accessing another VCPU data without
1771 * locking does not harm. It may result in trying to yield to same VCPU, fail
1772 * and continue with next VCPU and so on.
1773 */
1774bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
1775{
1776 bool eligible;
1777
1778 eligible = !vcpu->spin_loop.in_spin_loop ||
1779 (vcpu->spin_loop.in_spin_loop &&
1780 vcpu->spin_loop.dy_eligible);
1781
1782 if (vcpu->spin_loop.in_spin_loop)
1783 kvm_vcpu_set_dy_eligible(vcpu, !vcpu->spin_loop.dy_eligible);
1784
1785 return eligible;
1786}
1787#endif
c45c528e 1788
217ece61 1789void kvm_vcpu_on_spin(struct kvm_vcpu *me)
d255f4f2 1790{
217ece61
RR
1791 struct kvm *kvm = me->kvm;
1792 struct kvm_vcpu *vcpu;
1793 int last_boosted_vcpu = me->kvm->last_boosted_vcpu;
1794 int yielded = 0;
c45c528e 1795 int try = 3;
217ece61
RR
1796 int pass;
1797 int i;
d255f4f2 1798
4c088493 1799 kvm_vcpu_set_in_spin_loop(me, true);
217ece61
RR
1800 /*
1801 * We boost the priority of a VCPU that is runnable but not
1802 * currently running, because it got preempted by something
1803 * else and called schedule in __vcpu_run. Hopefully that
1804 * VCPU is holding the lock that we need and will release it.
1805 * We approximate round-robin by starting at the last boosted VCPU.
1806 */
c45c528e 1807 for (pass = 0; pass < 2 && !yielded && try; pass++) {
217ece61 1808 kvm_for_each_vcpu(i, vcpu, kvm) {
5cfc2aab 1809 if (!pass && i <= last_boosted_vcpu) {
217ece61
RR
1810 i = last_boosted_vcpu;
1811 continue;
1812 } else if (pass && i > last_boosted_vcpu)
1813 break;
7bc7ae25
R
1814 if (!ACCESS_ONCE(vcpu->preempted))
1815 continue;
217ece61
RR
1816 if (vcpu == me)
1817 continue;
1818 if (waitqueue_active(&vcpu->wq))
1819 continue;
06e48c51
R
1820 if (!kvm_vcpu_eligible_for_directed_yield(vcpu))
1821 continue;
c45c528e
R
1822
1823 yielded = kvm_vcpu_yield_to(vcpu);
1824 if (yielded > 0) {
217ece61 1825 kvm->last_boosted_vcpu = i;
217ece61 1826 break;
c45c528e
R
1827 } else if (yielded < 0) {
1828 try--;
1829 if (!try)
1830 break;
217ece61 1831 }
217ece61
RR
1832 }
1833 }
4c088493 1834 kvm_vcpu_set_in_spin_loop(me, false);
06e48c51
R
1835
1836 /* Ensure vcpu is not eligible during next spinloop */
1837 kvm_vcpu_set_dy_eligible(me, false);
d255f4f2
ZE
1838}
1839EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin);
1840
e4a533a4 1841static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
9a2bb7f4
AK
1842{
1843 struct kvm_vcpu *vcpu = vma->vm_file->private_data;
9a2bb7f4
AK
1844 struct page *page;
1845
e4a533a4 1846 if (vmf->pgoff == 0)
039576c0 1847 page = virt_to_page(vcpu->run);
09566765 1848#ifdef CONFIG_X86
e4a533a4 1849 else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
ad312c7c 1850 page = virt_to_page(vcpu->arch.pio_data);
5f94c174
LV
1851#endif
1852#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
1853 else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET)
1854 page = virt_to_page(vcpu->kvm->coalesced_mmio_ring);
09566765 1855#endif
039576c0 1856 else
5b1c1493 1857 return kvm_arch_vcpu_fault(vcpu, vmf);
9a2bb7f4 1858 get_page(page);
e4a533a4 1859 vmf->page = page;
1860 return 0;
9a2bb7f4
AK
1861}
1862
f0f37e2f 1863static const struct vm_operations_struct kvm_vcpu_vm_ops = {
e4a533a4 1864 .fault = kvm_vcpu_fault,
9a2bb7f4
AK
1865};
1866
1867static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
1868{
1869 vma->vm_ops = &kvm_vcpu_vm_ops;
1870 return 0;
1871}
1872
bccf2150
AK
1873static int kvm_vcpu_release(struct inode *inode, struct file *filp)
1874{
1875 struct kvm_vcpu *vcpu = filp->private_data;
1876
66c0b394 1877 kvm_put_kvm(vcpu->kvm);
bccf2150
AK
1878 return 0;
1879}
1880
3d3aab1b 1881static struct file_operations kvm_vcpu_fops = {
bccf2150
AK
1882 .release = kvm_vcpu_release,
1883 .unlocked_ioctl = kvm_vcpu_ioctl,
1dda606c
AG
1884#ifdef CONFIG_COMPAT
1885 .compat_ioctl = kvm_vcpu_compat_ioctl,
1886#endif
9a2bb7f4 1887 .mmap = kvm_vcpu_mmap,
6038f373 1888 .llseek = noop_llseek,
bccf2150
AK
1889};
1890
1891/*
1892 * Allocates an inode for the vcpu.
1893 */
1894static int create_vcpu_fd(struct kvm_vcpu *vcpu)
1895{
628ff7c1 1896 return anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, O_RDWR);
bccf2150
AK
1897}
1898
c5ea7660
AK
1899/*
1900 * Creates some virtual cpus. Good luck creating more than one.
1901 */
73880c80 1902static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
c5ea7660
AK
1903{
1904 int r;
988a2cae 1905 struct kvm_vcpu *vcpu, *v;
c5ea7660 1906
f4d83822
AH
1907 if (id >= KVM_MAX_VCPUS)
1908 return -EINVAL;
1909
73880c80 1910 vcpu = kvm_arch_vcpu_create(kvm, id);
fb3f0f51
RR
1911 if (IS_ERR(vcpu))
1912 return PTR_ERR(vcpu);
c5ea7660 1913
15ad7146
AK
1914 preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
1915
26e5215f
AK
1916 r = kvm_arch_vcpu_setup(vcpu);
1917 if (r)
d780592b 1918 goto vcpu_destroy;
26e5215f 1919
11ec2804 1920 mutex_lock(&kvm->lock);
3e515705
AK
1921 if (!kvm_vcpu_compatible(vcpu)) {
1922 r = -EINVAL;
1923 goto unlock_vcpu_destroy;
1924 }
73880c80
GN
1925 if (atomic_read(&kvm->online_vcpus) == KVM_MAX_VCPUS) {
1926 r = -EINVAL;
d780592b 1927 goto unlock_vcpu_destroy;
fb3f0f51 1928 }
73880c80 1929
988a2cae
GN
1930 kvm_for_each_vcpu(r, v, kvm)
1931 if (v->vcpu_id == id) {
73880c80 1932 r = -EEXIST;
d780592b 1933 goto unlock_vcpu_destroy;
73880c80
GN
1934 }
1935
1936 BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]);
c5ea7660 1937
fb3f0f51 1938 /* Now it's all set up, let userspace reach it */
66c0b394 1939 kvm_get_kvm(kvm);
bccf2150 1940 r = create_vcpu_fd(vcpu);
73880c80
GN
1941 if (r < 0) {
1942 kvm_put_kvm(kvm);
d780592b 1943 goto unlock_vcpu_destroy;
73880c80
GN
1944 }
1945
1946 kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu;
1947 smp_wmb();
1948 atomic_inc(&kvm->online_vcpus);
1949
73880c80 1950 mutex_unlock(&kvm->lock);
42897d86 1951 kvm_arch_vcpu_postcreate(vcpu);
fb3f0f51 1952 return r;
39c3b86e 1953
d780592b 1954unlock_vcpu_destroy:
7d8fece6 1955 mutex_unlock(&kvm->lock);
d780592b 1956vcpu_destroy:
d40ccc62 1957 kvm_arch_vcpu_destroy(vcpu);
c5ea7660
AK
1958 return r;
1959}
1960
1961d276
AK
1961static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
1962{
1963 if (sigset) {
1964 sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
1965 vcpu->sigset_active = 1;
1966 vcpu->sigset = *sigset;
1967 } else
1968 vcpu->sigset_active = 0;
1969 return 0;
1970}
1971
bccf2150
AK
1972static long kvm_vcpu_ioctl(struct file *filp,
1973 unsigned int ioctl, unsigned long arg)
6aa8b732 1974{
bccf2150 1975 struct kvm_vcpu *vcpu = filp->private_data;
2f366987 1976 void __user *argp = (void __user *)arg;
313a3dc7 1977 int r;
fa3795a7
DH
1978 struct kvm_fpu *fpu = NULL;
1979 struct kvm_sregs *kvm_sregs = NULL;
6aa8b732 1980
6d4e4c4f
AK
1981 if (vcpu->kvm->mm != current->mm)
1982 return -EIO;
2122ff5e 1983
2f4d9b54 1984#if defined(CONFIG_S390) || defined(CONFIG_PPC) || defined(CONFIG_MIPS)
2122ff5e
AK
1985 /*
1986 * Special cases: vcpu ioctls that are asynchronous to vcpu execution,
1987 * so vcpu_load() would break it.
1988 */
1989 if (ioctl == KVM_S390_INTERRUPT || ioctl == KVM_INTERRUPT)
1990 return kvm_arch_vcpu_ioctl(filp, ioctl, arg);
1991#endif
1992
1993
9fc77441
MT
1994 r = vcpu_load(vcpu);
1995 if (r)
1996 return r;
6aa8b732 1997 switch (ioctl) {
9a2bb7f4 1998 case KVM_RUN:
f0fe5108
AK
1999 r = -EINVAL;
2000 if (arg)
2001 goto out;
b6c7a5dc 2002 r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run);
64be5007 2003 trace_kvm_userspace_exit(vcpu->run->exit_reason, r);
6aa8b732 2004 break;
6aa8b732 2005 case KVM_GET_REGS: {
3e4bb3ac 2006 struct kvm_regs *kvm_regs;
6aa8b732 2007
3e4bb3ac
XZ
2008 r = -ENOMEM;
2009 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
2010 if (!kvm_regs)
6aa8b732 2011 goto out;
3e4bb3ac
XZ
2012 r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs);
2013 if (r)
2014 goto out_free1;
6aa8b732 2015 r = -EFAULT;
3e4bb3ac
XZ
2016 if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs)))
2017 goto out_free1;
6aa8b732 2018 r = 0;
3e4bb3ac
XZ
2019out_free1:
2020 kfree(kvm_regs);
6aa8b732
AK
2021 break;
2022 }
2023 case KVM_SET_REGS: {
3e4bb3ac 2024 struct kvm_regs *kvm_regs;
6aa8b732 2025
3e4bb3ac 2026 r = -ENOMEM;
ff5c2c03
SL
2027 kvm_regs = memdup_user(argp, sizeof(*kvm_regs));
2028 if (IS_ERR(kvm_regs)) {
2029 r = PTR_ERR(kvm_regs);
6aa8b732 2030 goto out;
ff5c2c03 2031 }
3e4bb3ac 2032 r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs);
3e4bb3ac 2033 kfree(kvm_regs);
6aa8b732
AK
2034 break;
2035 }
2036 case KVM_GET_SREGS: {
fa3795a7
DH
2037 kvm_sregs = kzalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
2038 r = -ENOMEM;
2039 if (!kvm_sregs)
2040 goto out;
2041 r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs);
6aa8b732
AK
2042 if (r)
2043 goto out;
2044 r = -EFAULT;
fa3795a7 2045 if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs)))
6aa8b732
AK
2046 goto out;
2047 r = 0;
2048 break;
2049 }
2050 case KVM_SET_SREGS: {
ff5c2c03
SL
2051 kvm_sregs = memdup_user(argp, sizeof(*kvm_sregs));
2052 if (IS_ERR(kvm_sregs)) {
2053 r = PTR_ERR(kvm_sregs);
18595411 2054 kvm_sregs = NULL;
6aa8b732 2055 goto out;
ff5c2c03 2056 }
fa3795a7 2057 r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs);
6aa8b732
AK
2058 break;
2059 }
62d9f0db
MT
2060 case KVM_GET_MP_STATE: {
2061 struct kvm_mp_state mp_state;
2062
2063 r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state);
2064 if (r)
2065 goto out;
2066 r = -EFAULT;
2067 if (copy_to_user(argp, &mp_state, sizeof mp_state))
2068 goto out;
2069 r = 0;
2070 break;
2071 }
2072 case KVM_SET_MP_STATE: {
2073 struct kvm_mp_state mp_state;
2074
2075 r = -EFAULT;
2076 if (copy_from_user(&mp_state, argp, sizeof mp_state))
2077 goto out;
2078 r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state);
62d9f0db
MT
2079 break;
2080 }
6aa8b732
AK
2081 case KVM_TRANSLATE: {
2082 struct kvm_translation tr;
2083
2084 r = -EFAULT;
2f366987 2085 if (copy_from_user(&tr, argp, sizeof tr))
6aa8b732 2086 goto out;
8b006791 2087 r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr);
6aa8b732
AK
2088 if (r)
2089 goto out;
2090 r = -EFAULT;
2f366987 2091 if (copy_to_user(argp, &tr, sizeof tr))
6aa8b732
AK
2092 goto out;
2093 r = 0;
2094 break;
2095 }
d0bfb940
JK
2096 case KVM_SET_GUEST_DEBUG: {
2097 struct kvm_guest_debug dbg;
6aa8b732
AK
2098
2099 r = -EFAULT;
2f366987 2100 if (copy_from_user(&dbg, argp, sizeof dbg))
6aa8b732 2101 goto out;
d0bfb940 2102 r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg);
6aa8b732
AK
2103 break;
2104 }
1961d276
AK
2105 case KVM_SET_SIGNAL_MASK: {
2106 struct kvm_signal_mask __user *sigmask_arg = argp;
2107 struct kvm_signal_mask kvm_sigmask;
2108 sigset_t sigset, *p;
2109
2110 p = NULL;
2111 if (argp) {
2112 r = -EFAULT;
2113 if (copy_from_user(&kvm_sigmask, argp,
2114 sizeof kvm_sigmask))
2115 goto out;
2116 r = -EINVAL;
2117 if (kvm_sigmask.len != sizeof sigset)
2118 goto out;
2119 r = -EFAULT;
2120 if (copy_from_user(&sigset, sigmask_arg->sigset,
2121 sizeof sigset))
2122 goto out;
2123 p = &sigset;
2124 }
376d41ff 2125 r = kvm_vcpu_ioctl_set_sigmask(vcpu, p);
1961d276
AK
2126 break;
2127 }
b8836737 2128 case KVM_GET_FPU: {
fa3795a7
DH
2129 fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
2130 r = -ENOMEM;
2131 if (!fpu)
2132 goto out;
2133 r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu);
b8836737
AK
2134 if (r)
2135 goto out;
2136 r = -EFAULT;
fa3795a7 2137 if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu)))
b8836737
AK
2138 goto out;
2139 r = 0;
2140 break;
2141 }
2142 case KVM_SET_FPU: {
ff5c2c03
SL
2143 fpu = memdup_user(argp, sizeof(*fpu));
2144 if (IS_ERR(fpu)) {
2145 r = PTR_ERR(fpu);
18595411 2146 fpu = NULL;
b8836737 2147 goto out;
ff5c2c03 2148 }
fa3795a7 2149 r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu);
b8836737
AK
2150 break;
2151 }
bccf2150 2152 default:
313a3dc7 2153 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
bccf2150
AK
2154 }
2155out:
2122ff5e 2156 vcpu_put(vcpu);
fa3795a7
DH
2157 kfree(fpu);
2158 kfree(kvm_sregs);
bccf2150
AK
2159 return r;
2160}
2161
1dda606c
AG
2162#ifdef CONFIG_COMPAT
2163static long kvm_vcpu_compat_ioctl(struct file *filp,
2164 unsigned int ioctl, unsigned long arg)
2165{
2166 struct kvm_vcpu *vcpu = filp->private_data;
2167 void __user *argp = compat_ptr(arg);
2168 int r;
2169
2170 if (vcpu->kvm->mm != current->mm)
2171 return -EIO;
2172
2173 switch (ioctl) {
2174 case KVM_SET_SIGNAL_MASK: {
2175 struct kvm_signal_mask __user *sigmask_arg = argp;
2176 struct kvm_signal_mask kvm_sigmask;
2177 compat_sigset_t csigset;
2178 sigset_t sigset;
2179
2180 if (argp) {
2181 r = -EFAULT;
2182 if (copy_from_user(&kvm_sigmask, argp,
2183 sizeof kvm_sigmask))
2184 goto out;
2185 r = -EINVAL;
2186 if (kvm_sigmask.len != sizeof csigset)
2187 goto out;
2188 r = -EFAULT;
2189 if (copy_from_user(&csigset, sigmask_arg->sigset,
2190 sizeof csigset))
2191 goto out;
760a9a30
AC
2192 sigset_from_compat(&sigset, &csigset);
2193 r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
2194 } else
2195 r = kvm_vcpu_ioctl_set_sigmask(vcpu, NULL);
1dda606c
AG
2196 break;
2197 }
2198 default:
2199 r = kvm_vcpu_ioctl(filp, ioctl, arg);
2200 }
2201
2202out:
2203 return r;
2204}
2205#endif
2206
852b6d57
SW
2207static int kvm_device_ioctl_attr(struct kvm_device *dev,
2208 int (*accessor)(struct kvm_device *dev,
2209 struct kvm_device_attr *attr),
2210 unsigned long arg)
2211{
2212 struct kvm_device_attr attr;
2213
2214 if (!accessor)
2215 return -EPERM;
2216
2217 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2218 return -EFAULT;
2219
2220 return accessor(dev, &attr);
2221}
2222
2223static long kvm_device_ioctl(struct file *filp, unsigned int ioctl,
2224 unsigned long arg)
2225{
2226 struct kvm_device *dev = filp->private_data;
2227
2228 switch (ioctl) {
2229 case KVM_SET_DEVICE_ATTR:
2230 return kvm_device_ioctl_attr(dev, dev->ops->set_attr, arg);
2231 case KVM_GET_DEVICE_ATTR:
2232 return kvm_device_ioctl_attr(dev, dev->ops->get_attr, arg);
2233 case KVM_HAS_DEVICE_ATTR:
2234 return kvm_device_ioctl_attr(dev, dev->ops->has_attr, arg);
2235 default:
2236 if (dev->ops->ioctl)
2237 return dev->ops->ioctl(dev, ioctl, arg);
2238
2239 return -ENOTTY;
2240 }
2241}
2242
852b6d57
SW
2243static int kvm_device_release(struct inode *inode, struct file *filp)
2244{
2245 struct kvm_device *dev = filp->private_data;
2246 struct kvm *kvm = dev->kvm;
2247
852b6d57
SW
2248 kvm_put_kvm(kvm);
2249 return 0;
2250}
2251
2252static const struct file_operations kvm_device_fops = {
2253 .unlocked_ioctl = kvm_device_ioctl,
db6ae615
SW
2254#ifdef CONFIG_COMPAT
2255 .compat_ioctl = kvm_device_ioctl,
2256#endif
852b6d57
SW
2257 .release = kvm_device_release,
2258};
2259
2260struct kvm_device *kvm_device_from_filp(struct file *filp)
2261{
2262 if (filp->f_op != &kvm_device_fops)
2263 return NULL;
2264
2265 return filp->private_data;
2266}
2267
2268static int kvm_ioctl_create_device(struct kvm *kvm,
2269 struct kvm_create_device *cd)
2270{
2271 struct kvm_device_ops *ops = NULL;
2272 struct kvm_device *dev;
2273 bool test = cd->flags & KVM_CREATE_DEVICE_TEST;
2274 int ret;
2275
2276 switch (cd->type) {
5df554ad
SW
2277#ifdef CONFIG_KVM_MPIC
2278 case KVM_DEV_TYPE_FSL_MPIC_20:
2279 case KVM_DEV_TYPE_FSL_MPIC_42:
2280 ops = &kvm_mpic_ops;
2281 break;
5975a2e0
PM
2282#endif
2283#ifdef CONFIG_KVM_XICS
2284 case KVM_DEV_TYPE_XICS:
2285 ops = &kvm_xics_ops;
2286 break;
5df554ad 2287#endif
852b6d57
SW
2288 default:
2289 return -ENODEV;
2290 }
2291
2292 if (test)
2293 return 0;
2294
2295 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
2296 if (!dev)
2297 return -ENOMEM;
2298
2299 dev->ops = ops;
2300 dev->kvm = kvm;
852b6d57
SW
2301
2302 ret = ops->create(dev, cd->type);
2303 if (ret < 0) {
2304 kfree(dev);
2305 return ret;
2306 }
2307
2308 ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR);
2309 if (ret < 0) {
2310 ops->destroy(dev);
2311 return ret;
2312 }
2313
07f0a7bd 2314 list_add(&dev->vm_node, &kvm->devices);
852b6d57
SW
2315 kvm_get_kvm(kvm);
2316 cd->fd = ret;
2317 return 0;
2318}
2319
bccf2150
AK
2320static long kvm_vm_ioctl(struct file *filp,
2321 unsigned int ioctl, unsigned long arg)
2322{
2323 struct kvm *kvm = filp->private_data;
2324 void __user *argp = (void __user *)arg;
1fe779f8 2325 int r;
bccf2150 2326
6d4e4c4f
AK
2327 if (kvm->mm != current->mm)
2328 return -EIO;
bccf2150
AK
2329 switch (ioctl) {
2330 case KVM_CREATE_VCPU:
2331 r = kvm_vm_ioctl_create_vcpu(kvm, arg);
bccf2150 2332 break;
6fc138d2
IE
2333 case KVM_SET_USER_MEMORY_REGION: {
2334 struct kvm_userspace_memory_region kvm_userspace_mem;
2335
2336 r = -EFAULT;
2337 if (copy_from_user(&kvm_userspace_mem, argp,
2338 sizeof kvm_userspace_mem))
2339 goto out;
2340
47ae31e2 2341 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem);
6aa8b732
AK
2342 break;
2343 }
2344 case KVM_GET_DIRTY_LOG: {
2345 struct kvm_dirty_log log;
2346
2347 r = -EFAULT;
2f366987 2348 if (copy_from_user(&log, argp, sizeof log))
6aa8b732 2349 goto out;
2c6f5df9 2350 r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
6aa8b732
AK
2351 break;
2352 }
5f94c174
LV
2353#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
2354 case KVM_REGISTER_COALESCED_MMIO: {
2355 struct kvm_coalesced_mmio_zone zone;
2356 r = -EFAULT;
2357 if (copy_from_user(&zone, argp, sizeof zone))
2358 goto out;
5f94c174 2359 r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone);
5f94c174
LV
2360 break;
2361 }
2362 case KVM_UNREGISTER_COALESCED_MMIO: {
2363 struct kvm_coalesced_mmio_zone zone;
2364 r = -EFAULT;
2365 if (copy_from_user(&zone, argp, sizeof zone))
2366 goto out;
5f94c174 2367 r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone);
5f94c174
LV
2368 break;
2369 }
2370#endif
721eecbf
GH
2371 case KVM_IRQFD: {
2372 struct kvm_irqfd data;
2373
2374 r = -EFAULT;
2375 if (copy_from_user(&data, argp, sizeof data))
2376 goto out;
d4db2935 2377 r = kvm_irqfd(kvm, &data);
721eecbf
GH
2378 break;
2379 }
d34e6b17
GH
2380 case KVM_IOEVENTFD: {
2381 struct kvm_ioeventfd data;
2382
2383 r = -EFAULT;
2384 if (copy_from_user(&data, argp, sizeof data))
2385 goto out;
2386 r = kvm_ioeventfd(kvm, &data);
2387 break;
2388 }
73880c80
GN
2389#ifdef CONFIG_KVM_APIC_ARCHITECTURE
2390 case KVM_SET_BOOT_CPU_ID:
2391 r = 0;
894a9c55 2392 mutex_lock(&kvm->lock);
73880c80
GN
2393 if (atomic_read(&kvm->online_vcpus) != 0)
2394 r = -EBUSY;
2395 else
2396 kvm->bsp_vcpu_id = arg;
894a9c55 2397 mutex_unlock(&kvm->lock);
73880c80 2398 break;
07975ad3
JK
2399#endif
2400#ifdef CONFIG_HAVE_KVM_MSI
2401 case KVM_SIGNAL_MSI: {
2402 struct kvm_msi msi;
2403
2404 r = -EFAULT;
2405 if (copy_from_user(&msi, argp, sizeof msi))
2406 goto out;
2407 r = kvm_send_userspace_msi(kvm, &msi);
2408 break;
2409 }
23d43cf9
CD
2410#endif
2411#ifdef __KVM_HAVE_IRQ_LINE
2412 case KVM_IRQ_LINE_STATUS:
2413 case KVM_IRQ_LINE: {
2414 struct kvm_irq_level irq_event;
2415
2416 r = -EFAULT;
2417 if (copy_from_user(&irq_event, argp, sizeof irq_event))
2418 goto out;
2419
aa2fbe6d
YZ
2420 r = kvm_vm_ioctl_irq_line(kvm, &irq_event,
2421 ioctl == KVM_IRQ_LINE_STATUS);
23d43cf9
CD
2422 if (r)
2423 goto out;
2424
2425 r = -EFAULT;
2426 if (ioctl == KVM_IRQ_LINE_STATUS) {
2427 if (copy_to_user(argp, &irq_event, sizeof irq_event))
2428 goto out;
2429 }
2430
2431 r = 0;
2432 break;
2433 }
73880c80 2434#endif
aa8d5944
AG
2435#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
2436 case KVM_SET_GSI_ROUTING: {
2437 struct kvm_irq_routing routing;
2438 struct kvm_irq_routing __user *urouting;
2439 struct kvm_irq_routing_entry *entries;
2440
2441 r = -EFAULT;
2442 if (copy_from_user(&routing, argp, sizeof(routing)))
2443 goto out;
2444 r = -EINVAL;
2445 if (routing.nr >= KVM_MAX_IRQ_ROUTES)
2446 goto out;
2447 if (routing.flags)
2448 goto out;
2449 r = -ENOMEM;
2450 entries = vmalloc(routing.nr * sizeof(*entries));
2451 if (!entries)
2452 goto out;
2453 r = -EFAULT;
2454 urouting = argp;
2455 if (copy_from_user(entries, urouting->entries,
2456 routing.nr * sizeof(*entries)))
2457 goto out_free_irq_routing;
2458 r = kvm_set_irq_routing(kvm, entries, routing.nr,
2459 routing.flags);
2460 out_free_irq_routing:
2461 vfree(entries);
2462 break;
2463 }
2464#endif /* CONFIG_HAVE_KVM_IRQ_ROUTING */
852b6d57
SW
2465 case KVM_CREATE_DEVICE: {
2466 struct kvm_create_device cd;
2467
2468 r = -EFAULT;
2469 if (copy_from_user(&cd, argp, sizeof(cd)))
2470 goto out;
2471
2472 r = kvm_ioctl_create_device(kvm, &cd);
2473 if (r)
2474 goto out;
2475
2476 r = -EFAULT;
2477 if (copy_to_user(argp, &cd, sizeof(cd)))
2478 goto out;
2479
2480 r = 0;
2481 break;
2482 }
f17abe9a 2483 default:
1fe779f8 2484 r = kvm_arch_vm_ioctl(filp, ioctl, arg);
bfd99ff5
AK
2485 if (r == -ENOTTY)
2486 r = kvm_vm_ioctl_assigned_device(kvm, ioctl, arg);
f17abe9a
AK
2487 }
2488out:
2489 return r;
2490}
2491
6ff5894c
AB
2492#ifdef CONFIG_COMPAT
2493struct compat_kvm_dirty_log {
2494 __u32 slot;
2495 __u32 padding1;
2496 union {
2497 compat_uptr_t dirty_bitmap; /* one bit per page */
2498 __u64 padding2;
2499 };
2500};
2501
2502static long kvm_vm_compat_ioctl(struct file *filp,
2503 unsigned int ioctl, unsigned long arg)
2504{
2505 struct kvm *kvm = filp->private_data;
2506 int r;
2507
2508 if (kvm->mm != current->mm)
2509 return -EIO;
2510 switch (ioctl) {
2511 case KVM_GET_DIRTY_LOG: {
2512 struct compat_kvm_dirty_log compat_log;
2513 struct kvm_dirty_log log;
2514
2515 r = -EFAULT;
2516 if (copy_from_user(&compat_log, (void __user *)arg,
2517 sizeof(compat_log)))
2518 goto out;
2519 log.slot = compat_log.slot;
2520 log.padding1 = compat_log.padding1;
2521 log.padding2 = compat_log.padding2;
2522 log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap);
2523
2524 r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
6ff5894c
AB
2525 break;
2526 }
2527 default:
2528 r = kvm_vm_ioctl(filp, ioctl, arg);
2529 }
2530
2531out:
2532 return r;
2533}
2534#endif
2535
e4a533a4 2536static int kvm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
f17abe9a 2537{
777b3f49
MT
2538 struct page *page[1];
2539 unsigned long addr;
2540 int npages;
2541 gfn_t gfn = vmf->pgoff;
f17abe9a 2542 struct kvm *kvm = vma->vm_file->private_data;
f17abe9a 2543
777b3f49
MT
2544 addr = gfn_to_hva(kvm, gfn);
2545 if (kvm_is_error_hva(addr))
e4a533a4 2546 return VM_FAULT_SIGBUS;
777b3f49
MT
2547
2548 npages = get_user_pages(current, current->mm, addr, 1, 1, 0, page,
2549 NULL);
2550 if (unlikely(npages != 1))
e4a533a4 2551 return VM_FAULT_SIGBUS;
777b3f49
MT
2552
2553 vmf->page = page[0];
e4a533a4 2554 return 0;
f17abe9a
AK
2555}
2556
f0f37e2f 2557static const struct vm_operations_struct kvm_vm_vm_ops = {
e4a533a4 2558 .fault = kvm_vm_fault,
f17abe9a
AK
2559};
2560
2561static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma)
2562{
2563 vma->vm_ops = &kvm_vm_vm_ops;
2564 return 0;
2565}
2566
3d3aab1b 2567static struct file_operations kvm_vm_fops = {
f17abe9a
AK
2568 .release = kvm_vm_release,
2569 .unlocked_ioctl = kvm_vm_ioctl,
6ff5894c
AB
2570#ifdef CONFIG_COMPAT
2571 .compat_ioctl = kvm_vm_compat_ioctl,
2572#endif
f17abe9a 2573 .mmap = kvm_vm_mmap,
6038f373 2574 .llseek = noop_llseek,
f17abe9a
AK
2575};
2576
e08b9637 2577static int kvm_dev_ioctl_create_vm(unsigned long type)
f17abe9a 2578{
aac87636 2579 int r;
f17abe9a
AK
2580 struct kvm *kvm;
2581
e08b9637 2582 kvm = kvm_create_vm(type);
d6d28168
AK
2583 if (IS_ERR(kvm))
2584 return PTR_ERR(kvm);
6ce5a090
TY
2585#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
2586 r = kvm_coalesced_mmio_init(kvm);
2587 if (r < 0) {
2588 kvm_put_kvm(kvm);
2589 return r;
2590 }
2591#endif
aac87636
HC
2592 r = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, O_RDWR);
2593 if (r < 0)
66c0b394 2594 kvm_put_kvm(kvm);
f17abe9a 2595
aac87636 2596 return r;
f17abe9a
AK
2597}
2598
1a811b61
AK
2599static long kvm_dev_ioctl_check_extension_generic(long arg)
2600{
2601 switch (arg) {
ca9edaee 2602 case KVM_CAP_USER_MEMORY:
1a811b61 2603 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
4cd481f6 2604 case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS:
73880c80
GN
2605#ifdef CONFIG_KVM_APIC_ARCHITECTURE
2606 case KVM_CAP_SET_BOOT_CPU_ID:
2607#endif
a9c7399d 2608 case KVM_CAP_INTERNAL_ERROR_DATA:
07975ad3
JK
2609#ifdef CONFIG_HAVE_KVM_MSI
2610 case KVM_CAP_SIGNAL_MSI:
7df35f54
AG
2611#endif
2612#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
2613 case KVM_CAP_IRQFD_RESAMPLE:
07975ad3 2614#endif
1a811b61 2615 return 1;
a725d56a 2616#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
399ec807 2617 case KVM_CAP_IRQ_ROUTING:
36463146 2618 return KVM_MAX_IRQ_ROUTES;
399ec807 2619#endif
1a811b61
AK
2620 default:
2621 break;
2622 }
2623 return kvm_dev_ioctl_check_extension(arg);
2624}
2625
f17abe9a
AK
2626static long kvm_dev_ioctl(struct file *filp,
2627 unsigned int ioctl, unsigned long arg)
2628{
07c45a36 2629 long r = -EINVAL;
f17abe9a
AK
2630
2631 switch (ioctl) {
2632 case KVM_GET_API_VERSION:
f0fe5108
AK
2633 r = -EINVAL;
2634 if (arg)
2635 goto out;
f17abe9a
AK
2636 r = KVM_API_VERSION;
2637 break;
2638 case KVM_CREATE_VM:
e08b9637 2639 r = kvm_dev_ioctl_create_vm(arg);
f17abe9a 2640 break;
018d00d2 2641 case KVM_CHECK_EXTENSION:
1a811b61 2642 r = kvm_dev_ioctl_check_extension_generic(arg);
5d308f45 2643 break;
07c45a36
AK
2644 case KVM_GET_VCPU_MMAP_SIZE:
2645 r = -EINVAL;
2646 if (arg)
2647 goto out;
adb1ff46
AK
2648 r = PAGE_SIZE; /* struct kvm_run */
2649#ifdef CONFIG_X86
2650 r += PAGE_SIZE; /* pio data page */
5f94c174
LV
2651#endif
2652#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
2653 r += PAGE_SIZE; /* coalesced mmio ring page */
adb1ff46 2654#endif
07c45a36 2655 break;
d4c9ff2d
FEL
2656 case KVM_TRACE_ENABLE:
2657 case KVM_TRACE_PAUSE:
2658 case KVM_TRACE_DISABLE:
2023a29c 2659 r = -EOPNOTSUPP;
d4c9ff2d 2660 break;
6aa8b732 2661 default:
043405e1 2662 return kvm_arch_dev_ioctl(filp, ioctl, arg);
6aa8b732
AK
2663 }
2664out:
2665 return r;
2666}
2667
6aa8b732 2668static struct file_operations kvm_chardev_ops = {
6aa8b732
AK
2669 .unlocked_ioctl = kvm_dev_ioctl,
2670 .compat_ioctl = kvm_dev_ioctl,
6038f373 2671 .llseek = noop_llseek,
6aa8b732
AK
2672};
2673
2674static struct miscdevice kvm_dev = {
bbe4432e 2675 KVM_MINOR,
6aa8b732
AK
2676 "kvm",
2677 &kvm_chardev_ops,
2678};
2679
75b7127c 2680static void hardware_enable_nolock(void *junk)
1b6c0168
AK
2681{
2682 int cpu = raw_smp_processor_id();
10474ae8 2683 int r;
1b6c0168 2684
7f59f492 2685 if (cpumask_test_cpu(cpu, cpus_hardware_enabled))
1b6c0168 2686 return;
10474ae8 2687
7f59f492 2688 cpumask_set_cpu(cpu, cpus_hardware_enabled);
10474ae8
AG
2689
2690 r = kvm_arch_hardware_enable(NULL);
2691
2692 if (r) {
2693 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
2694 atomic_inc(&hardware_enable_failed);
2695 printk(KERN_INFO "kvm: enabling virtualization on "
2696 "CPU%d failed\n", cpu);
2697 }
1b6c0168
AK
2698}
2699
75b7127c
TY
2700static void hardware_enable(void *junk)
2701{
e935b837 2702 raw_spin_lock(&kvm_lock);
75b7127c 2703 hardware_enable_nolock(junk);
e935b837 2704 raw_spin_unlock(&kvm_lock);
75b7127c
TY
2705}
2706
2707static void hardware_disable_nolock(void *junk)
1b6c0168
AK
2708{
2709 int cpu = raw_smp_processor_id();
2710
7f59f492 2711 if (!cpumask_test_cpu(cpu, cpus_hardware_enabled))
1b6c0168 2712 return;
7f59f492 2713 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
e9b11c17 2714 kvm_arch_hardware_disable(NULL);
1b6c0168
AK
2715}
2716
75b7127c
TY
2717static void hardware_disable(void *junk)
2718{
e935b837 2719 raw_spin_lock(&kvm_lock);
75b7127c 2720 hardware_disable_nolock(junk);
e935b837 2721 raw_spin_unlock(&kvm_lock);
75b7127c
TY
2722}
2723
10474ae8
AG
2724static void hardware_disable_all_nolock(void)
2725{
2726 BUG_ON(!kvm_usage_count);
2727
2728 kvm_usage_count--;
2729 if (!kvm_usage_count)
75b7127c 2730 on_each_cpu(hardware_disable_nolock, NULL, 1);
10474ae8
AG
2731}
2732
2733static void hardware_disable_all(void)
2734{
e935b837 2735 raw_spin_lock(&kvm_lock);
10474ae8 2736 hardware_disable_all_nolock();
e935b837 2737 raw_spin_unlock(&kvm_lock);
10474ae8
AG
2738}
2739
2740static int hardware_enable_all(void)
2741{
2742 int r = 0;
2743
e935b837 2744 raw_spin_lock(&kvm_lock);
10474ae8
AG
2745
2746 kvm_usage_count++;
2747 if (kvm_usage_count == 1) {
2748 atomic_set(&hardware_enable_failed, 0);
75b7127c 2749 on_each_cpu(hardware_enable_nolock, NULL, 1);
10474ae8
AG
2750
2751 if (atomic_read(&hardware_enable_failed)) {
2752 hardware_disable_all_nolock();
2753 r = -EBUSY;
2754 }
2755 }
2756
e935b837 2757 raw_spin_unlock(&kvm_lock);
10474ae8
AG
2758
2759 return r;
2760}
2761
774c47f1
AK
2762static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
2763 void *v)
2764{
2765 int cpu = (long)v;
2766
10474ae8
AG
2767 if (!kvm_usage_count)
2768 return NOTIFY_OK;
2769
1a6f4d7f 2770 val &= ~CPU_TASKS_FROZEN;
774c47f1 2771 switch (val) {
cec9ad27 2772 case CPU_DYING:
6ec8a856
AK
2773 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
2774 cpu);
2775 hardware_disable(NULL);
2776 break;
da908f2f 2777 case CPU_STARTING:
43934a38
JK
2778 printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
2779 cpu);
da908f2f 2780 hardware_enable(NULL);
774c47f1
AK
2781 break;
2782 }
2783 return NOTIFY_OK;
2784}
2785
9a2b85c6 2786static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
d77c26fc 2787 void *v)
9a2b85c6 2788{
8e1c1815
SY
2789 /*
2790 * Some (well, at least mine) BIOSes hang on reboot if
2791 * in vmx root mode.
2792 *
2793 * And Intel TXT required VMX off for all cpu when system shutdown.
2794 */
2795 printk(KERN_INFO "kvm: exiting hardware virtualization\n");
2796 kvm_rebooting = true;
75b7127c 2797 on_each_cpu(hardware_disable_nolock, NULL, 1);
9a2b85c6
RR
2798 return NOTIFY_OK;
2799}
2800
2801static struct notifier_block kvm_reboot_notifier = {
2802 .notifier_call = kvm_reboot,
2803 .priority = 0,
2804};
2805
e93f8a0f 2806static void kvm_io_bus_destroy(struct kvm_io_bus *bus)
2eeb2e94
GH
2807{
2808 int i;
2809
2810 for (i = 0; i < bus->dev_count; i++) {
743eeb0b 2811 struct kvm_io_device *pos = bus->range[i].dev;
2eeb2e94
GH
2812
2813 kvm_iodevice_destructor(pos);
2814 }
e93f8a0f 2815 kfree(bus);
2eeb2e94
GH
2816}
2817
39369f7a 2818static int kvm_io_bus_sort_cmp(const void *p1, const void *p2)
743eeb0b
SL
2819{
2820 const struct kvm_io_range *r1 = p1;
2821 const struct kvm_io_range *r2 = p2;
2822
2823 if (r1->addr < r2->addr)
2824 return -1;
2825 if (r1->addr + r1->len > r2->addr + r2->len)
2826 return 1;
2827 return 0;
2828}
2829
39369f7a 2830static int kvm_io_bus_insert_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev,
743eeb0b
SL
2831 gpa_t addr, int len)
2832{
743eeb0b
SL
2833 bus->range[bus->dev_count++] = (struct kvm_io_range) {
2834 .addr = addr,
2835 .len = len,
2836 .dev = dev,
2837 };
2838
2839 sort(bus->range, bus->dev_count, sizeof(struct kvm_io_range),
2840 kvm_io_bus_sort_cmp, NULL);
2841
2842 return 0;
2843}
2844
39369f7a 2845static int kvm_io_bus_get_first_dev(struct kvm_io_bus *bus,
743eeb0b
SL
2846 gpa_t addr, int len)
2847{
2848 struct kvm_io_range *range, key;
2849 int off;
2850
2851 key = (struct kvm_io_range) {
2852 .addr = addr,
2853 .len = len,
2854 };
2855
2856 range = bsearch(&key, bus->range, bus->dev_count,
2857 sizeof(struct kvm_io_range), kvm_io_bus_sort_cmp);
2858 if (range == NULL)
2859 return -ENOENT;
2860
2861 off = range - bus->range;
2862
2863 while (off > 0 && kvm_io_bus_sort_cmp(&key, &bus->range[off-1]) == 0)
2864 off--;
2865
2866 return off;
2867}
2868
bda9020e 2869/* kvm_io_bus_write - called under kvm->slots_lock */
e93f8a0f 2870int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
bda9020e 2871 int len, const void *val)
2eeb2e94 2872{
743eeb0b 2873 int idx;
90d83dc3 2874 struct kvm_io_bus *bus;
743eeb0b
SL
2875 struct kvm_io_range range;
2876
2877 range = (struct kvm_io_range) {
2878 .addr = addr,
2879 .len = len,
2880 };
90d83dc3
LJ
2881
2882 bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
743eeb0b
SL
2883 idx = kvm_io_bus_get_first_dev(bus, addr, len);
2884 if (idx < 0)
2885 return -EOPNOTSUPP;
2886
2887 while (idx < bus->dev_count &&
2888 kvm_io_bus_sort_cmp(&range, &bus->range[idx]) == 0) {
2889 if (!kvm_iodevice_write(bus->range[idx].dev, addr, len, val))
bda9020e 2890 return 0;
743eeb0b
SL
2891 idx++;
2892 }
2893
bda9020e
MT
2894 return -EOPNOTSUPP;
2895}
2eeb2e94 2896
bda9020e 2897/* kvm_io_bus_read - called under kvm->slots_lock */
e93f8a0f
MT
2898int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
2899 int len, void *val)
bda9020e 2900{
743eeb0b 2901 int idx;
90d83dc3 2902 struct kvm_io_bus *bus;
743eeb0b
SL
2903 struct kvm_io_range range;
2904
2905 range = (struct kvm_io_range) {
2906 .addr = addr,
2907 .len = len,
2908 };
e93f8a0f 2909
90d83dc3 2910 bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
743eeb0b
SL
2911 idx = kvm_io_bus_get_first_dev(bus, addr, len);
2912 if (idx < 0)
2913 return -EOPNOTSUPP;
2914
2915 while (idx < bus->dev_count &&
2916 kvm_io_bus_sort_cmp(&range, &bus->range[idx]) == 0) {
2917 if (!kvm_iodevice_read(bus->range[idx].dev, addr, len, val))
bda9020e 2918 return 0;
743eeb0b
SL
2919 idx++;
2920 }
2921
bda9020e 2922 return -EOPNOTSUPP;
2eeb2e94
GH
2923}
2924
79fac95e 2925/* Caller must hold slots_lock. */
743eeb0b
SL
2926int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
2927 int len, struct kvm_io_device *dev)
6c474694 2928{
e93f8a0f 2929 struct kvm_io_bus *new_bus, *bus;
090b7aff 2930
e93f8a0f 2931 bus = kvm->buses[bus_idx];
a1300716 2932 if (bus->dev_count > NR_IOBUS_DEVS - 1)
090b7aff 2933 return -ENOSPC;
2eeb2e94 2934
a1300716
AK
2935 new_bus = kzalloc(sizeof(*bus) + ((bus->dev_count + 1) *
2936 sizeof(struct kvm_io_range)), GFP_KERNEL);
e93f8a0f
MT
2937 if (!new_bus)
2938 return -ENOMEM;
a1300716
AK
2939 memcpy(new_bus, bus, sizeof(*bus) + (bus->dev_count *
2940 sizeof(struct kvm_io_range)));
743eeb0b 2941 kvm_io_bus_insert_dev(new_bus, dev, addr, len);
e93f8a0f
MT
2942 rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
2943 synchronize_srcu_expedited(&kvm->srcu);
2944 kfree(bus);
090b7aff
GH
2945
2946 return 0;
2947}
2948
79fac95e 2949/* Caller must hold slots_lock. */
e93f8a0f
MT
2950int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
2951 struct kvm_io_device *dev)
090b7aff 2952{
e93f8a0f
MT
2953 int i, r;
2954 struct kvm_io_bus *new_bus, *bus;
090b7aff 2955
cdfca7b3 2956 bus = kvm->buses[bus_idx];
e93f8a0f 2957 r = -ENOENT;
a1300716
AK
2958 for (i = 0; i < bus->dev_count; i++)
2959 if (bus->range[i].dev == dev) {
e93f8a0f 2960 r = 0;
090b7aff
GH
2961 break;
2962 }
e93f8a0f 2963
a1300716 2964 if (r)
e93f8a0f 2965 return r;
a1300716
AK
2966
2967 new_bus = kzalloc(sizeof(*bus) + ((bus->dev_count - 1) *
2968 sizeof(struct kvm_io_range)), GFP_KERNEL);
2969 if (!new_bus)
2970 return -ENOMEM;
2971
2972 memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range));
2973 new_bus->dev_count--;
2974 memcpy(new_bus->range + i, bus->range + i + 1,
2975 (new_bus->dev_count - i) * sizeof(struct kvm_io_range));
e93f8a0f
MT
2976
2977 rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
2978 synchronize_srcu_expedited(&kvm->srcu);
2979 kfree(bus);
2980 return r;
2eeb2e94
GH
2981}
2982
774c47f1
AK
2983static struct notifier_block kvm_cpu_notifier = {
2984 .notifier_call = kvm_cpu_hotplug,
774c47f1
AK
2985};
2986
8b88b099 2987static int vm_stat_get(void *_offset, u64 *val)
ba1389b7
AK
2988{
2989 unsigned offset = (long)_offset;
ba1389b7
AK
2990 struct kvm *kvm;
2991
8b88b099 2992 *val = 0;
e935b837 2993 raw_spin_lock(&kvm_lock);
ba1389b7 2994 list_for_each_entry(kvm, &vm_list, vm_list)
8b88b099 2995 *val += *(u32 *)((void *)kvm + offset);
e935b837 2996 raw_spin_unlock(&kvm_lock);
8b88b099 2997 return 0;
ba1389b7
AK
2998}
2999
3000DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, NULL, "%llu\n");
3001
8b88b099 3002static int vcpu_stat_get(void *_offset, u64 *val)
1165f5fe
AK
3003{
3004 unsigned offset = (long)_offset;
1165f5fe
AK
3005 struct kvm *kvm;
3006 struct kvm_vcpu *vcpu;
3007 int i;
3008
8b88b099 3009 *val = 0;
e935b837 3010 raw_spin_lock(&kvm_lock);
1165f5fe 3011 list_for_each_entry(kvm, &vm_list, vm_list)
988a2cae
GN
3012 kvm_for_each_vcpu(i, vcpu, kvm)
3013 *val += *(u32 *)((void *)vcpu + offset);
3014
e935b837 3015 raw_spin_unlock(&kvm_lock);
8b88b099 3016 return 0;
1165f5fe
AK
3017}
3018
ba1389b7
AK
3019DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n");
3020
828c0950 3021static const struct file_operations *stat_fops[] = {
ba1389b7
AK
3022 [KVM_STAT_VCPU] = &vcpu_stat_fops,
3023 [KVM_STAT_VM] = &vm_stat_fops,
3024};
1165f5fe 3025
4f69b680 3026static int kvm_init_debug(void)
6aa8b732 3027{
4f69b680 3028 int r = -EFAULT;
6aa8b732
AK
3029 struct kvm_stats_debugfs_item *p;
3030
76f7c879 3031 kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);
4f69b680
H
3032 if (kvm_debugfs_dir == NULL)
3033 goto out;
3034
3035 for (p = debugfs_entries; p->name; ++p) {
76f7c879 3036 p->dentry = debugfs_create_file(p->name, 0444, kvm_debugfs_dir,
1165f5fe 3037 (void *)(long)p->offset,
ba1389b7 3038 stat_fops[p->kind]);
4f69b680
H
3039 if (p->dentry == NULL)
3040 goto out_dir;
3041 }
3042
3043 return 0;
3044
3045out_dir:
3046 debugfs_remove_recursive(kvm_debugfs_dir);
3047out:
3048 return r;
6aa8b732
AK
3049}
3050
3051static void kvm_exit_debug(void)
3052{
3053 struct kvm_stats_debugfs_item *p;
3054
3055 for (p = debugfs_entries; p->name; ++p)
3056 debugfs_remove(p->dentry);
76f7c879 3057 debugfs_remove(kvm_debugfs_dir);
6aa8b732
AK
3058}
3059
fb3600cc 3060static int kvm_suspend(void)
59ae6c6b 3061{
10474ae8 3062 if (kvm_usage_count)
75b7127c 3063 hardware_disable_nolock(NULL);
59ae6c6b
AK
3064 return 0;
3065}
3066
fb3600cc 3067static void kvm_resume(void)
59ae6c6b 3068{
ca84d1a2 3069 if (kvm_usage_count) {
e935b837 3070 WARN_ON(raw_spin_is_locked(&kvm_lock));
75b7127c 3071 hardware_enable_nolock(NULL);
ca84d1a2 3072 }
59ae6c6b
AK
3073}
3074
fb3600cc 3075static struct syscore_ops kvm_syscore_ops = {
59ae6c6b
AK
3076 .suspend = kvm_suspend,
3077 .resume = kvm_resume,
3078};
3079
15ad7146
AK
3080static inline
3081struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
3082{
3083 return container_of(pn, struct kvm_vcpu, preempt_notifier);
3084}
3085
3086static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
3087{
3088 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
3a08a8f9
R
3089 if (vcpu->preempted)
3090 vcpu->preempted = false;
15ad7146 3091
e9b11c17 3092 kvm_arch_vcpu_load(vcpu, cpu);
15ad7146
AK
3093}
3094
3095static void kvm_sched_out(struct preempt_notifier *pn,
3096 struct task_struct *next)
3097{
3098 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
3099
3a08a8f9
R
3100 if (current->state == TASK_RUNNING)
3101 vcpu->preempted = true;
e9b11c17 3102 kvm_arch_vcpu_put(vcpu);
15ad7146
AK
3103}
3104
0ee75bea 3105int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
c16f862d 3106 struct module *module)
6aa8b732
AK
3107{
3108 int r;
002c7f7c 3109 int cpu;
6aa8b732 3110
f8c16bba
ZX
3111 r = kvm_arch_init(opaque);
3112 if (r)
d2308784 3113 goto out_fail;
cb498ea2 3114
7dac16c3
AH
3115 /*
3116 * kvm_arch_init makes sure there's at most one caller
3117 * for architectures that support multiple implementations,
3118 * like intel and amd on x86.
3119 * kvm_arch_init must be called before kvm_irqfd_init to avoid creating
3120 * conflicts in case kvm is already setup for another implementation.
3121 */
3122 r = kvm_irqfd_init();
3123 if (r)
3124 goto out_irqfd;
3125
8437a617 3126 if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) {
7f59f492
RR
3127 r = -ENOMEM;
3128 goto out_free_0;
3129 }
3130
e9b11c17 3131 r = kvm_arch_hardware_setup();
6aa8b732 3132 if (r < 0)
7f59f492 3133 goto out_free_0a;
6aa8b732 3134
002c7f7c
YS
3135 for_each_online_cpu(cpu) {
3136 smp_call_function_single(cpu,
e9b11c17 3137 kvm_arch_check_processor_compat,
8691e5a8 3138 &r, 1);
002c7f7c 3139 if (r < 0)
d2308784 3140 goto out_free_1;
002c7f7c
YS
3141 }
3142
774c47f1
AK
3143 r = register_cpu_notifier(&kvm_cpu_notifier);
3144 if (r)
d2308784 3145 goto out_free_2;
6aa8b732
AK
3146 register_reboot_notifier(&kvm_reboot_notifier);
3147
c16f862d 3148 /* A kmem cache lets us meet the alignment requirements of fx_save. */
0ee75bea
AK
3149 if (!vcpu_align)
3150 vcpu_align = __alignof__(struct kvm_vcpu);
3151 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align,
56919c5c 3152 0, NULL);
c16f862d
RR
3153 if (!kvm_vcpu_cache) {
3154 r = -ENOMEM;
fb3600cc 3155 goto out_free_3;
c16f862d
RR
3156 }
3157
af585b92
GN
3158 r = kvm_async_pf_init();
3159 if (r)
3160 goto out_free;
3161
6aa8b732 3162 kvm_chardev_ops.owner = module;
3d3aab1b
CB
3163 kvm_vm_fops.owner = module;
3164 kvm_vcpu_fops.owner = module;
6aa8b732
AK
3165
3166 r = misc_register(&kvm_dev);
3167 if (r) {
d77c26fc 3168 printk(KERN_ERR "kvm: misc device register failed\n");
af585b92 3169 goto out_unreg;
6aa8b732
AK
3170 }
3171
fb3600cc
RW
3172 register_syscore_ops(&kvm_syscore_ops);
3173
15ad7146
AK
3174 kvm_preempt_ops.sched_in = kvm_sched_in;
3175 kvm_preempt_ops.sched_out = kvm_sched_out;
3176
4f69b680
H
3177 r = kvm_init_debug();
3178 if (r) {
3179 printk(KERN_ERR "kvm: create debugfs files failed\n");
3180 goto out_undebugfs;
3181 }
0ea4ed8e 3182
c7addb90 3183 return 0;
6aa8b732 3184
4f69b680
H
3185out_undebugfs:
3186 unregister_syscore_ops(&kvm_syscore_ops);
af585b92
GN
3187out_unreg:
3188 kvm_async_pf_deinit();
6aa8b732 3189out_free:
c16f862d 3190 kmem_cache_destroy(kvm_vcpu_cache);
d2308784 3191out_free_3:
6aa8b732 3192 unregister_reboot_notifier(&kvm_reboot_notifier);
774c47f1 3193 unregister_cpu_notifier(&kvm_cpu_notifier);
d2308784 3194out_free_2:
d2308784 3195out_free_1:
e9b11c17 3196 kvm_arch_hardware_unsetup();
7f59f492
RR
3197out_free_0a:
3198 free_cpumask_var(cpus_hardware_enabled);
d2308784 3199out_free_0:
a0f155e9
CH
3200 kvm_irqfd_exit();
3201out_irqfd:
7dac16c3
AH
3202 kvm_arch_exit();
3203out_fail:
6aa8b732
AK
3204 return r;
3205}
cb498ea2 3206EXPORT_SYMBOL_GPL(kvm_init);
6aa8b732 3207
cb498ea2 3208void kvm_exit(void)
6aa8b732 3209{
0ea4ed8e 3210 kvm_exit_debug();
6aa8b732 3211 misc_deregister(&kvm_dev);
c16f862d 3212 kmem_cache_destroy(kvm_vcpu_cache);
af585b92 3213 kvm_async_pf_deinit();
fb3600cc 3214 unregister_syscore_ops(&kvm_syscore_ops);
6aa8b732 3215 unregister_reboot_notifier(&kvm_reboot_notifier);
59ae6c6b 3216 unregister_cpu_notifier(&kvm_cpu_notifier);
75b7127c 3217 on_each_cpu(hardware_disable_nolock, NULL, 1);
e9b11c17 3218 kvm_arch_hardware_unsetup();
f8c16bba 3219 kvm_arch_exit();
a0f155e9 3220 kvm_irqfd_exit();
7f59f492 3221 free_cpumask_var(cpus_hardware_enabled);
6aa8b732 3222}
cb498ea2 3223EXPORT_SYMBOL_GPL(kvm_exit);