KVM: Export necessary function for EPT
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / x86 / kvm / x86.c
CommitLineData
043405e1
CO
1/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * derived from drivers/kvm/kvm_main.c
5 *
6 * Copyright (C) 2006 Qumranet, Inc.
7 *
8 * Authors:
9 * Avi Kivity <avi@qumranet.com>
10 * Yaniv Kamay <yaniv@qumranet.com>
11 *
12 * This work is licensed under the terms of the GNU GPL, version 2. See
13 * the COPYING file in the top-level directory.
14 *
15 */
16
edf88417 17#include <linux/kvm_host.h>
313a3dc7 18#include "irq.h"
1d737c8a 19#include "mmu.h"
7837699f 20#include "i8254.h"
37817f29 21#include "tss.h"
313a3dc7 22
18068523 23#include <linux/clocksource.h>
313a3dc7
CO
24#include <linux/kvm.h>
25#include <linux/fs.h>
26#include <linux/vmalloc.h>
5fb76f9b 27#include <linux/module.h>
0de10343 28#include <linux/mman.h>
2bacc55c 29#include <linux/highmem.h>
043405e1
CO
30
31#include <asm/uaccess.h>
d825ed0a 32#include <asm/msr.h>
a5f61300 33#include <asm/desc.h>
043405e1 34
313a3dc7 35#define MAX_IO_MSRS 256
a03490ed
CO
36#define CR0_RESERVED_BITS \
37 (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
38 | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
39 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
40#define CR4_RESERVED_BITS \
41 (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
42 | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \
43 | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR \
44 | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE))
45
46#define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
50a37eb4
JR
47/* EFER defaults:
48 * - enable syscall per default because its emulated by KVM
49 * - enable LME and LMA per default on 64 bit KVM
50 */
51#ifdef CONFIG_X86_64
52static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffafeULL;
53#else
54static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffffeULL;
55#endif
313a3dc7 56
ba1389b7
AK
57#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
58#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
417bc304 59
674eea0f
AK
60static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
61 struct kvm_cpuid_entry2 __user *entries);
62
97896d04
ZX
63struct kvm_x86_ops *kvm_x86_ops;
64
417bc304 65struct kvm_stats_debugfs_item debugfs_entries[] = {
ba1389b7
AK
66 { "pf_fixed", VCPU_STAT(pf_fixed) },
67 { "pf_guest", VCPU_STAT(pf_guest) },
68 { "tlb_flush", VCPU_STAT(tlb_flush) },
69 { "invlpg", VCPU_STAT(invlpg) },
70 { "exits", VCPU_STAT(exits) },
71 { "io_exits", VCPU_STAT(io_exits) },
72 { "mmio_exits", VCPU_STAT(mmio_exits) },
73 { "signal_exits", VCPU_STAT(signal_exits) },
74 { "irq_window", VCPU_STAT(irq_window_exits) },
75 { "halt_exits", VCPU_STAT(halt_exits) },
76 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
f11c3a8d 77 { "hypercalls", VCPU_STAT(hypercalls) },
ba1389b7
AK
78 { "request_irq", VCPU_STAT(request_irq_exits) },
79 { "irq_exits", VCPU_STAT(irq_exits) },
80 { "host_state_reload", VCPU_STAT(host_state_reload) },
81 { "efer_reload", VCPU_STAT(efer_reload) },
82 { "fpu_reload", VCPU_STAT(fpu_reload) },
83 { "insn_emulation", VCPU_STAT(insn_emulation) },
84 { "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) },
4cee5764
AK
85 { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) },
86 { "mmu_pte_write", VM_STAT(mmu_pte_write) },
87 { "mmu_pte_updated", VM_STAT(mmu_pte_updated) },
88 { "mmu_pde_zapped", VM_STAT(mmu_pde_zapped) },
89 { "mmu_flooded", VM_STAT(mmu_flooded) },
90 { "mmu_recycled", VM_STAT(mmu_recycled) },
dfc5aa00 91 { "mmu_cache_miss", VM_STAT(mmu_cache_miss) },
0f74a24c 92 { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
05da4558 93 { "largepages", VM_STAT(lpages) },
417bc304
HB
94 { NULL }
95};
96
97
5fb76f9b
CO
98unsigned long segment_base(u16 selector)
99{
100 struct descriptor_table gdt;
a5f61300 101 struct desc_struct *d;
5fb76f9b
CO
102 unsigned long table_base;
103 unsigned long v;
104
105 if (selector == 0)
106 return 0;
107
108 asm("sgdt %0" : "=m"(gdt));
109 table_base = gdt.base;
110
111 if (selector & 4) { /* from ldt */
112 u16 ldt_selector;
113
114 asm("sldt %0" : "=g"(ldt_selector));
115 table_base = segment_base(ldt_selector);
116 }
a5f61300
AK
117 d = (struct desc_struct *)(table_base + (selector & ~7));
118 v = d->base0 | ((unsigned long)d->base1 << 16) |
119 ((unsigned long)d->base2 << 24);
5fb76f9b 120#ifdef CONFIG_X86_64
a5f61300
AK
121 if (d->s == 0 && (d->type == 2 || d->type == 9 || d->type == 11))
122 v |= ((unsigned long)((struct ldttss_desc64 *)d)->base3) << 32;
5fb76f9b
CO
123#endif
124 return v;
125}
126EXPORT_SYMBOL_GPL(segment_base);
127
6866b83e
CO
128u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
129{
130 if (irqchip_in_kernel(vcpu->kvm))
ad312c7c 131 return vcpu->arch.apic_base;
6866b83e 132 else
ad312c7c 133 return vcpu->arch.apic_base;
6866b83e
CO
134}
135EXPORT_SYMBOL_GPL(kvm_get_apic_base);
136
137void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data)
138{
139 /* TODO: reserve bits check */
140 if (irqchip_in_kernel(vcpu->kvm))
141 kvm_lapic_set_base(vcpu, data);
142 else
ad312c7c 143 vcpu->arch.apic_base = data;
6866b83e
CO
144}
145EXPORT_SYMBOL_GPL(kvm_set_apic_base);
146
298101da
AK
147void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
148{
ad312c7c
ZX
149 WARN_ON(vcpu->arch.exception.pending);
150 vcpu->arch.exception.pending = true;
151 vcpu->arch.exception.has_error_code = false;
152 vcpu->arch.exception.nr = nr;
298101da
AK
153}
154EXPORT_SYMBOL_GPL(kvm_queue_exception);
155
c3c91fee
AK
156void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long addr,
157 u32 error_code)
158{
159 ++vcpu->stat.pf_guest;
71c4dfaf
JR
160 if (vcpu->arch.exception.pending) {
161 if (vcpu->arch.exception.nr == PF_VECTOR) {
162 printk(KERN_DEBUG "kvm: inject_page_fault:"
163 " double fault 0x%lx\n", addr);
164 vcpu->arch.exception.nr = DF_VECTOR;
165 vcpu->arch.exception.error_code = 0;
166 } else if (vcpu->arch.exception.nr == DF_VECTOR) {
167 /* triple fault -> shutdown */
168 set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
169 }
c3c91fee
AK
170 return;
171 }
ad312c7c 172 vcpu->arch.cr2 = addr;
c3c91fee
AK
173 kvm_queue_exception_e(vcpu, PF_VECTOR, error_code);
174}
175
298101da
AK
176void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
177{
ad312c7c
ZX
178 WARN_ON(vcpu->arch.exception.pending);
179 vcpu->arch.exception.pending = true;
180 vcpu->arch.exception.has_error_code = true;
181 vcpu->arch.exception.nr = nr;
182 vcpu->arch.exception.error_code = error_code;
298101da
AK
183}
184EXPORT_SYMBOL_GPL(kvm_queue_exception_e);
185
186static void __queue_exception(struct kvm_vcpu *vcpu)
187{
ad312c7c
ZX
188 kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr,
189 vcpu->arch.exception.has_error_code,
190 vcpu->arch.exception.error_code);
298101da
AK
191}
192
a03490ed
CO
193/*
194 * Load the pae pdptrs. Return true is they are all valid.
195 */
196int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
197{
198 gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
199 unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
200 int i;
201 int ret;
ad312c7c 202 u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
a03490ed 203
a03490ed
CO
204 ret = kvm_read_guest_page(vcpu->kvm, pdpt_gfn, pdpte,
205 offset * sizeof(u64), sizeof(pdpte));
206 if (ret < 0) {
207 ret = 0;
208 goto out;
209 }
210 for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
211 if ((pdpte[i] & 1) && (pdpte[i] & 0xfffffff0000001e6ull)) {
212 ret = 0;
213 goto out;
214 }
215 }
216 ret = 1;
217
ad312c7c 218 memcpy(vcpu->arch.pdptrs, pdpte, sizeof(vcpu->arch.pdptrs));
a03490ed 219out:
a03490ed
CO
220
221 return ret;
222}
cc4b6871 223EXPORT_SYMBOL_GPL(load_pdptrs);
a03490ed 224
d835dfec
AK
225static bool pdptrs_changed(struct kvm_vcpu *vcpu)
226{
ad312c7c 227 u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
d835dfec
AK
228 bool changed = true;
229 int r;
230
231 if (is_long_mode(vcpu) || !is_pae(vcpu))
232 return false;
233
ad312c7c 234 r = kvm_read_guest(vcpu->kvm, vcpu->arch.cr3 & ~31u, pdpte, sizeof(pdpte));
d835dfec
AK
235 if (r < 0)
236 goto out;
ad312c7c 237 changed = memcmp(pdpte, vcpu->arch.pdptrs, sizeof(pdpte)) != 0;
d835dfec 238out:
d835dfec
AK
239
240 return changed;
241}
242
2d3ad1f4 243void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
a03490ed
CO
244{
245 if (cr0 & CR0_RESERVED_BITS) {
246 printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
ad312c7c 247 cr0, vcpu->arch.cr0);
c1a5d4f9 248 kvm_inject_gp(vcpu, 0);
a03490ed
CO
249 return;
250 }
251
252 if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) {
253 printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n");
c1a5d4f9 254 kvm_inject_gp(vcpu, 0);
a03490ed
CO
255 return;
256 }
257
258 if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) {
259 printk(KERN_DEBUG "set_cr0: #GP, set PG flag "
260 "and a clear PE flag\n");
c1a5d4f9 261 kvm_inject_gp(vcpu, 0);
a03490ed
CO
262 return;
263 }
264
265 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
266#ifdef CONFIG_X86_64
ad312c7c 267 if ((vcpu->arch.shadow_efer & EFER_LME)) {
a03490ed
CO
268 int cs_db, cs_l;
269
270 if (!is_pae(vcpu)) {
271 printk(KERN_DEBUG "set_cr0: #GP, start paging "
272 "in long mode while PAE is disabled\n");
c1a5d4f9 273 kvm_inject_gp(vcpu, 0);
a03490ed
CO
274 return;
275 }
276 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
277 if (cs_l) {
278 printk(KERN_DEBUG "set_cr0: #GP, start paging "
279 "in long mode while CS.L == 1\n");
c1a5d4f9 280 kvm_inject_gp(vcpu, 0);
a03490ed
CO
281 return;
282
283 }
284 } else
285#endif
ad312c7c 286 if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
a03490ed
CO
287 printk(KERN_DEBUG "set_cr0: #GP, pdptrs "
288 "reserved bits\n");
c1a5d4f9 289 kvm_inject_gp(vcpu, 0);
a03490ed
CO
290 return;
291 }
292
293 }
294
295 kvm_x86_ops->set_cr0(vcpu, cr0);
ad312c7c 296 vcpu->arch.cr0 = cr0;
a03490ed 297
a03490ed 298 kvm_mmu_reset_context(vcpu);
a03490ed
CO
299 return;
300}
2d3ad1f4 301EXPORT_SYMBOL_GPL(kvm_set_cr0);
a03490ed 302
2d3ad1f4 303void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
a03490ed 304{
2d3ad1f4 305 kvm_set_cr0(vcpu, (vcpu->arch.cr0 & ~0x0ful) | (msw & 0x0f));
2714d1d3
FEL
306 KVMTRACE_1D(LMSW, vcpu,
307 (u32)((vcpu->arch.cr0 & ~0x0ful) | (msw & 0x0f)),
308 handler);
a03490ed 309}
2d3ad1f4 310EXPORT_SYMBOL_GPL(kvm_lmsw);
a03490ed 311
2d3ad1f4 312void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
a03490ed
CO
313{
314 if (cr4 & CR4_RESERVED_BITS) {
315 printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n");
c1a5d4f9 316 kvm_inject_gp(vcpu, 0);
a03490ed
CO
317 return;
318 }
319
320 if (is_long_mode(vcpu)) {
321 if (!(cr4 & X86_CR4_PAE)) {
322 printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while "
323 "in long mode\n");
c1a5d4f9 324 kvm_inject_gp(vcpu, 0);
a03490ed
CO
325 return;
326 }
327 } else if (is_paging(vcpu) && !is_pae(vcpu) && (cr4 & X86_CR4_PAE)
ad312c7c 328 && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
a03490ed 329 printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n");
c1a5d4f9 330 kvm_inject_gp(vcpu, 0);
a03490ed
CO
331 return;
332 }
333
334 if (cr4 & X86_CR4_VMXE) {
335 printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n");
c1a5d4f9 336 kvm_inject_gp(vcpu, 0);
a03490ed
CO
337 return;
338 }
339 kvm_x86_ops->set_cr4(vcpu, cr4);
ad312c7c 340 vcpu->arch.cr4 = cr4;
a03490ed 341 kvm_mmu_reset_context(vcpu);
a03490ed 342}
2d3ad1f4 343EXPORT_SYMBOL_GPL(kvm_set_cr4);
a03490ed 344
2d3ad1f4 345void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
a03490ed 346{
ad312c7c 347 if (cr3 == vcpu->arch.cr3 && !pdptrs_changed(vcpu)) {
d835dfec
AK
348 kvm_mmu_flush_tlb(vcpu);
349 return;
350 }
351
a03490ed
CO
352 if (is_long_mode(vcpu)) {
353 if (cr3 & CR3_L_MODE_RESERVED_BITS) {
354 printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n");
c1a5d4f9 355 kvm_inject_gp(vcpu, 0);
a03490ed
CO
356 return;
357 }
358 } else {
359 if (is_pae(vcpu)) {
360 if (cr3 & CR3_PAE_RESERVED_BITS) {
361 printk(KERN_DEBUG
362 "set_cr3: #GP, reserved bits\n");
c1a5d4f9 363 kvm_inject_gp(vcpu, 0);
a03490ed
CO
364 return;
365 }
366 if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) {
367 printk(KERN_DEBUG "set_cr3: #GP, pdptrs "
368 "reserved bits\n");
c1a5d4f9 369 kvm_inject_gp(vcpu, 0);
a03490ed
CO
370 return;
371 }
372 }
373 /*
374 * We don't check reserved bits in nonpae mode, because
375 * this isn't enforced, and VMware depends on this.
376 */
377 }
378
a03490ed
CO
379 /*
380 * Does the new cr3 value map to physical memory? (Note, we
381 * catch an invalid cr3 even in real-mode, because it would
382 * cause trouble later on when we turn on paging anyway.)
383 *
384 * A real CPU would silently accept an invalid cr3 and would
385 * attempt to use it - with largely undefined (and often hard
386 * to debug) behavior on the guest side.
387 */
388 if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
c1a5d4f9 389 kvm_inject_gp(vcpu, 0);
a03490ed 390 else {
ad312c7c
ZX
391 vcpu->arch.cr3 = cr3;
392 vcpu->arch.mmu.new_cr3(vcpu);
a03490ed 393 }
a03490ed 394}
2d3ad1f4 395EXPORT_SYMBOL_GPL(kvm_set_cr3);
a03490ed 396
2d3ad1f4 397void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
a03490ed
CO
398{
399 if (cr8 & CR8_RESERVED_BITS) {
400 printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8);
c1a5d4f9 401 kvm_inject_gp(vcpu, 0);
a03490ed
CO
402 return;
403 }
404 if (irqchip_in_kernel(vcpu->kvm))
405 kvm_lapic_set_tpr(vcpu, cr8);
406 else
ad312c7c 407 vcpu->arch.cr8 = cr8;
a03490ed 408}
2d3ad1f4 409EXPORT_SYMBOL_GPL(kvm_set_cr8);
a03490ed 410
2d3ad1f4 411unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
a03490ed
CO
412{
413 if (irqchip_in_kernel(vcpu->kvm))
414 return kvm_lapic_get_cr8(vcpu);
415 else
ad312c7c 416 return vcpu->arch.cr8;
a03490ed 417}
2d3ad1f4 418EXPORT_SYMBOL_GPL(kvm_get_cr8);
a03490ed 419
043405e1
CO
420/*
421 * List of msr numbers which we expose to userspace through KVM_GET_MSRS
422 * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
423 *
424 * This list is modified at module load time to reflect the
425 * capabilities of the host cpu.
426 */
427static u32 msrs_to_save[] = {
428 MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
429 MSR_K6_STAR,
430#ifdef CONFIG_X86_64
431 MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
432#endif
18068523 433 MSR_IA32_TIME_STAMP_COUNTER, MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
847f0ad8 434 MSR_IA32_PERF_STATUS,
043405e1
CO
435};
436
437static unsigned num_msrs_to_save;
438
439static u32 emulated_msrs[] = {
440 MSR_IA32_MISC_ENABLE,
441};
442
15c4a640
CO
443static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
444{
f2b4b7dd 445 if (efer & efer_reserved_bits) {
15c4a640
CO
446 printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n",
447 efer);
c1a5d4f9 448 kvm_inject_gp(vcpu, 0);
15c4a640
CO
449 return;
450 }
451
452 if (is_paging(vcpu)
ad312c7c 453 && (vcpu->arch.shadow_efer & EFER_LME) != (efer & EFER_LME)) {
15c4a640 454 printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n");
c1a5d4f9 455 kvm_inject_gp(vcpu, 0);
15c4a640
CO
456 return;
457 }
458
459 kvm_x86_ops->set_efer(vcpu, efer);
460
461 efer &= ~EFER_LMA;
ad312c7c 462 efer |= vcpu->arch.shadow_efer & EFER_LMA;
15c4a640 463
ad312c7c 464 vcpu->arch.shadow_efer = efer;
15c4a640
CO
465}
466
f2b4b7dd
JR
467void kvm_enable_efer_bits(u64 mask)
468{
469 efer_reserved_bits &= ~mask;
470}
471EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
472
473
15c4a640
CO
474/*
475 * Writes msr value into into the appropriate "register".
476 * Returns 0 on success, non-0 otherwise.
477 * Assumes vcpu_load() was already called.
478 */
479int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
480{
481 return kvm_x86_ops->set_msr(vcpu, msr_index, data);
482}
483
313a3dc7
CO
484/*
485 * Adapt set_msr() to msr_io()'s calling convention
486 */
487static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
488{
489 return kvm_set_msr(vcpu, index, *data);
490}
491
18068523
GOC
492static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
493{
494 static int version;
495 struct kvm_wall_clock wc;
496 struct timespec wc_ts;
497
498 if (!wall_clock)
499 return;
500
501 version++;
502
18068523
GOC
503 kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
504
505 wc_ts = current_kernel_time();
506 wc.wc_sec = wc_ts.tv_sec;
507 wc.wc_nsec = wc_ts.tv_nsec;
508 wc.wc_version = version;
509
510 kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc));
511
512 version++;
513 kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
18068523
GOC
514}
515
516static void kvm_write_guest_time(struct kvm_vcpu *v)
517{
518 struct timespec ts;
519 unsigned long flags;
520 struct kvm_vcpu_arch *vcpu = &v->arch;
521 void *shared_kaddr;
522
523 if ((!vcpu->time_page))
524 return;
525
526 /* Keep irq disabled to prevent changes to the clock */
527 local_irq_save(flags);
528 kvm_get_msr(v, MSR_IA32_TIME_STAMP_COUNTER,
529 &vcpu->hv_clock.tsc_timestamp);
530 ktime_get_ts(&ts);
531 local_irq_restore(flags);
532
533 /* With all the info we got, fill in the values */
534
535 vcpu->hv_clock.system_time = ts.tv_nsec +
536 (NSEC_PER_SEC * (u64)ts.tv_sec);
537 /*
538 * The interface expects us to write an even number signaling that the
539 * update is finished. Since the guest won't see the intermediate
540 * state, we just write "2" at the end
541 */
542 vcpu->hv_clock.version = 2;
543
544 shared_kaddr = kmap_atomic(vcpu->time_page, KM_USER0);
545
546 memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
547 sizeof(vcpu->hv_clock));
548
549 kunmap_atomic(shared_kaddr, KM_USER0);
550
551 mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
552}
553
15c4a640
CO
554
555int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
556{
557 switch (msr) {
15c4a640
CO
558 case MSR_EFER:
559 set_efer(vcpu, data);
560 break;
15c4a640
CO
561 case MSR_IA32_MC0_STATUS:
562 pr_unimpl(vcpu, "%s: MSR_IA32_MC0_STATUS 0x%llx, nop\n",
b8688d51 563 __func__, data);
15c4a640
CO
564 break;
565 case MSR_IA32_MCG_STATUS:
566 pr_unimpl(vcpu, "%s: MSR_IA32_MCG_STATUS 0x%llx, nop\n",
b8688d51 567 __func__, data);
15c4a640 568 break;
c7ac679c
JR
569 case MSR_IA32_MCG_CTL:
570 pr_unimpl(vcpu, "%s: MSR_IA32_MCG_CTL 0x%llx, nop\n",
b8688d51 571 __func__, data);
c7ac679c 572 break;
15c4a640
CO
573 case MSR_IA32_UCODE_REV:
574 case MSR_IA32_UCODE_WRITE:
575 case 0x200 ... 0x2ff: /* MTRRs */
576 break;
577 case MSR_IA32_APICBASE:
578 kvm_set_apic_base(vcpu, data);
579 break;
580 case MSR_IA32_MISC_ENABLE:
ad312c7c 581 vcpu->arch.ia32_misc_enable_msr = data;
15c4a640 582 break;
18068523
GOC
583 case MSR_KVM_WALL_CLOCK:
584 vcpu->kvm->arch.wall_clock = data;
585 kvm_write_wall_clock(vcpu->kvm, data);
586 break;
587 case MSR_KVM_SYSTEM_TIME: {
588 if (vcpu->arch.time_page) {
589 kvm_release_page_dirty(vcpu->arch.time_page);
590 vcpu->arch.time_page = NULL;
591 }
592
593 vcpu->arch.time = data;
594
595 /* we verify if the enable bit is set... */
596 if (!(data & 1))
597 break;
598
599 /* ...but clean it before doing the actual write */
600 vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
601
602 vcpu->arch.hv_clock.tsc_to_system_mul =
603 clocksource_khz2mult(tsc_khz, 22);
604 vcpu->arch.hv_clock.tsc_shift = 22;
605
606 down_read(&current->mm->mmap_sem);
18068523
GOC
607 vcpu->arch.time_page =
608 gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
18068523
GOC
609 up_read(&current->mm->mmap_sem);
610
611 if (is_error_page(vcpu->arch.time_page)) {
612 kvm_release_page_clean(vcpu->arch.time_page);
613 vcpu->arch.time_page = NULL;
614 }
615
616 kvm_write_guest_time(vcpu);
617 break;
618 }
15c4a640 619 default:
565f1fbd 620 pr_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n", msr, data);
15c4a640
CO
621 return 1;
622 }
623 return 0;
624}
625EXPORT_SYMBOL_GPL(kvm_set_msr_common);
626
627
628/*
629 * Reads an msr value (of 'msr_index') into 'pdata'.
630 * Returns 0 on success, non-0 otherwise.
631 * Assumes vcpu_load() was already called.
632 */
633int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
634{
635 return kvm_x86_ops->get_msr(vcpu, msr_index, pdata);
636}
637
638int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
639{
640 u64 data;
641
642 switch (msr) {
643 case 0xc0010010: /* SYSCFG */
644 case 0xc0010015: /* HWCR */
645 case MSR_IA32_PLATFORM_ID:
646 case MSR_IA32_P5_MC_ADDR:
647 case MSR_IA32_P5_MC_TYPE:
648 case MSR_IA32_MC0_CTL:
649 case MSR_IA32_MCG_STATUS:
650 case MSR_IA32_MCG_CAP:
c7ac679c 651 case MSR_IA32_MCG_CTL:
15c4a640
CO
652 case MSR_IA32_MC0_MISC:
653 case MSR_IA32_MC0_MISC+4:
654 case MSR_IA32_MC0_MISC+8:
655 case MSR_IA32_MC0_MISC+12:
656 case MSR_IA32_MC0_MISC+16:
657 case MSR_IA32_UCODE_REV:
15c4a640
CO
658 case MSR_IA32_EBL_CR_POWERON:
659 /* MTRR registers */
660 case 0xfe:
661 case 0x200 ... 0x2ff:
662 data = 0;
663 break;
664 case 0xcd: /* fsb frequency */
665 data = 3;
666 break;
667 case MSR_IA32_APICBASE:
668 data = kvm_get_apic_base(vcpu);
669 break;
670 case MSR_IA32_MISC_ENABLE:
ad312c7c 671 data = vcpu->arch.ia32_misc_enable_msr;
15c4a640 672 break;
847f0ad8
AG
673 case MSR_IA32_PERF_STATUS:
674 /* TSC increment by tick */
675 data = 1000ULL;
676 /* CPU multiplier */
677 data |= (((uint64_t)4ULL) << 40);
678 break;
15c4a640 679 case MSR_EFER:
ad312c7c 680 data = vcpu->arch.shadow_efer;
15c4a640 681 break;
18068523
GOC
682 case MSR_KVM_WALL_CLOCK:
683 data = vcpu->kvm->arch.wall_clock;
684 break;
685 case MSR_KVM_SYSTEM_TIME:
686 data = vcpu->arch.time;
687 break;
15c4a640
CO
688 default:
689 pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
690 return 1;
691 }
692 *pdata = data;
693 return 0;
694}
695EXPORT_SYMBOL_GPL(kvm_get_msr_common);
696
313a3dc7
CO
697/*
698 * Read or write a bunch of msrs. All parameters are kernel addresses.
699 *
700 * @return number of msrs set successfully.
701 */
702static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
703 struct kvm_msr_entry *entries,
704 int (*do_msr)(struct kvm_vcpu *vcpu,
705 unsigned index, u64 *data))
706{
707 int i;
708
709 vcpu_load(vcpu);
710
3200f405 711 down_read(&vcpu->kvm->slots_lock);
313a3dc7
CO
712 for (i = 0; i < msrs->nmsrs; ++i)
713 if (do_msr(vcpu, entries[i].index, &entries[i].data))
714 break;
3200f405 715 up_read(&vcpu->kvm->slots_lock);
313a3dc7
CO
716
717 vcpu_put(vcpu);
718
719 return i;
720}
721
722/*
723 * Read or write a bunch of msrs. Parameters are user addresses.
724 *
725 * @return number of msrs set successfully.
726 */
727static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
728 int (*do_msr)(struct kvm_vcpu *vcpu,
729 unsigned index, u64 *data),
730 int writeback)
731{
732 struct kvm_msrs msrs;
733 struct kvm_msr_entry *entries;
734 int r, n;
735 unsigned size;
736
737 r = -EFAULT;
738 if (copy_from_user(&msrs, user_msrs, sizeof msrs))
739 goto out;
740
741 r = -E2BIG;
742 if (msrs.nmsrs >= MAX_IO_MSRS)
743 goto out;
744
745 r = -ENOMEM;
746 size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
747 entries = vmalloc(size);
748 if (!entries)
749 goto out;
750
751 r = -EFAULT;
752 if (copy_from_user(entries, user_msrs->entries, size))
753 goto out_free;
754
755 r = n = __msr_io(vcpu, &msrs, entries, do_msr);
756 if (r < 0)
757 goto out_free;
758
759 r = -EFAULT;
760 if (writeback && copy_to_user(user_msrs->entries, entries, size))
761 goto out_free;
762
763 r = n;
764
765out_free:
766 vfree(entries);
767out:
768 return r;
769}
770
e9b11c17
ZX
771/*
772 * Make sure that a cpu that is being hot-unplugged does not have any vcpus
773 * cached on it.
774 */
775void decache_vcpus_on_cpu(int cpu)
776{
777 struct kvm *vm;
778 struct kvm_vcpu *vcpu;
779 int i;
780
781 spin_lock(&kvm_lock);
782 list_for_each_entry(vm, &vm_list, vm_list)
783 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
784 vcpu = vm->vcpus[i];
785 if (!vcpu)
786 continue;
787 /*
788 * If the vcpu is locked, then it is running on some
789 * other cpu and therefore it is not cached on the
790 * cpu in question.
791 *
792 * If it's not locked, check the last cpu it executed
793 * on.
794 */
795 if (mutex_trylock(&vcpu->mutex)) {
796 if (vcpu->cpu == cpu) {
797 kvm_x86_ops->vcpu_decache(vcpu);
798 vcpu->cpu = -1;
799 }
800 mutex_unlock(&vcpu->mutex);
801 }
802 }
803 spin_unlock(&kvm_lock);
804}
805
018d00d2
ZX
806int kvm_dev_ioctl_check_extension(long ext)
807{
808 int r;
809
810 switch (ext) {
811 case KVM_CAP_IRQCHIP:
812 case KVM_CAP_HLT:
813 case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:
814 case KVM_CAP_USER_MEMORY:
815 case KVM_CAP_SET_TSS_ADDR:
07716717 816 case KVM_CAP_EXT_CPUID:
18068523 817 case KVM_CAP_CLOCKSOURCE:
7837699f 818 case KVM_CAP_PIT:
a28e4f5a 819 case KVM_CAP_NOP_IO_DELAY:
62d9f0db 820 case KVM_CAP_MP_STATE:
018d00d2
ZX
821 r = 1;
822 break;
774ead3a
AK
823 case KVM_CAP_VAPIC:
824 r = !kvm_x86_ops->cpu_has_accelerated_tpr();
825 break;
f725230a
AK
826 case KVM_CAP_NR_VCPUS:
827 r = KVM_MAX_VCPUS;
828 break;
a988b910
AK
829 case KVM_CAP_NR_MEMSLOTS:
830 r = KVM_MEMORY_SLOTS;
831 break;
2f333bcb
MT
832 case KVM_CAP_PV_MMU:
833 r = !tdp_enabled;
834 break;
018d00d2
ZX
835 default:
836 r = 0;
837 break;
838 }
839 return r;
840
841}
842
043405e1
CO
843long kvm_arch_dev_ioctl(struct file *filp,
844 unsigned int ioctl, unsigned long arg)
845{
846 void __user *argp = (void __user *)arg;
847 long r;
848
849 switch (ioctl) {
850 case KVM_GET_MSR_INDEX_LIST: {
851 struct kvm_msr_list __user *user_msr_list = argp;
852 struct kvm_msr_list msr_list;
853 unsigned n;
854
855 r = -EFAULT;
856 if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list))
857 goto out;
858 n = msr_list.nmsrs;
859 msr_list.nmsrs = num_msrs_to_save + ARRAY_SIZE(emulated_msrs);
860 if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list))
861 goto out;
862 r = -E2BIG;
863 if (n < num_msrs_to_save)
864 goto out;
865 r = -EFAULT;
866 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
867 num_msrs_to_save * sizeof(u32)))
868 goto out;
869 if (copy_to_user(user_msr_list->indices
870 + num_msrs_to_save * sizeof(u32),
871 &emulated_msrs,
872 ARRAY_SIZE(emulated_msrs) * sizeof(u32)))
873 goto out;
874 r = 0;
875 break;
876 }
674eea0f
AK
877 case KVM_GET_SUPPORTED_CPUID: {
878 struct kvm_cpuid2 __user *cpuid_arg = argp;
879 struct kvm_cpuid2 cpuid;
880
881 r = -EFAULT;
882 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
883 goto out;
884 r = kvm_dev_ioctl_get_supported_cpuid(&cpuid,
885 cpuid_arg->entries);
886 if (r)
887 goto out;
888
889 r = -EFAULT;
890 if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
891 goto out;
892 r = 0;
893 break;
894 }
043405e1
CO
895 default:
896 r = -EINVAL;
897 }
898out:
899 return r;
900}
901
313a3dc7
CO
902void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
903{
904 kvm_x86_ops->vcpu_load(vcpu, cpu);
18068523 905 kvm_write_guest_time(vcpu);
313a3dc7
CO
906}
907
908void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
909{
910 kvm_x86_ops->vcpu_put(vcpu);
9327fd11 911 kvm_put_guest_fpu(vcpu);
313a3dc7
CO
912}
913
07716717 914static int is_efer_nx(void)
313a3dc7
CO
915{
916 u64 efer;
313a3dc7
CO
917
918 rdmsrl(MSR_EFER, efer);
07716717
DK
919 return efer & EFER_NX;
920}
921
922static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
923{
924 int i;
925 struct kvm_cpuid_entry2 *e, *entry;
926
313a3dc7 927 entry = NULL;
ad312c7c
ZX
928 for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
929 e = &vcpu->arch.cpuid_entries[i];
313a3dc7
CO
930 if (e->function == 0x80000001) {
931 entry = e;
932 break;
933 }
934 }
07716717 935 if (entry && (entry->edx & (1 << 20)) && !is_efer_nx()) {
313a3dc7
CO
936 entry->edx &= ~(1 << 20);
937 printk(KERN_INFO "kvm: guest NX capability removed\n");
938 }
939}
940
07716717 941/* when an old userspace process fills a new kernel module */
313a3dc7
CO
942static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
943 struct kvm_cpuid *cpuid,
944 struct kvm_cpuid_entry __user *entries)
07716717
DK
945{
946 int r, i;
947 struct kvm_cpuid_entry *cpuid_entries;
948
949 r = -E2BIG;
950 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
951 goto out;
952 r = -ENOMEM;
953 cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * cpuid->nent);
954 if (!cpuid_entries)
955 goto out;
956 r = -EFAULT;
957 if (copy_from_user(cpuid_entries, entries,
958 cpuid->nent * sizeof(struct kvm_cpuid_entry)))
959 goto out_free;
960 for (i = 0; i < cpuid->nent; i++) {
ad312c7c
ZX
961 vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
962 vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
963 vcpu->arch.cpuid_entries[i].ebx = cpuid_entries[i].ebx;
964 vcpu->arch.cpuid_entries[i].ecx = cpuid_entries[i].ecx;
965 vcpu->arch.cpuid_entries[i].edx = cpuid_entries[i].edx;
966 vcpu->arch.cpuid_entries[i].index = 0;
967 vcpu->arch.cpuid_entries[i].flags = 0;
968 vcpu->arch.cpuid_entries[i].padding[0] = 0;
969 vcpu->arch.cpuid_entries[i].padding[1] = 0;
970 vcpu->arch.cpuid_entries[i].padding[2] = 0;
971 }
972 vcpu->arch.cpuid_nent = cpuid->nent;
07716717
DK
973 cpuid_fix_nx_cap(vcpu);
974 r = 0;
975
976out_free:
977 vfree(cpuid_entries);
978out:
979 return r;
980}
981
982static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
983 struct kvm_cpuid2 *cpuid,
984 struct kvm_cpuid_entry2 __user *entries)
313a3dc7
CO
985{
986 int r;
987
988 r = -E2BIG;
989 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
990 goto out;
991 r = -EFAULT;
ad312c7c 992 if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
07716717 993 cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
313a3dc7 994 goto out;
ad312c7c 995 vcpu->arch.cpuid_nent = cpuid->nent;
313a3dc7
CO
996 return 0;
997
998out:
999 return r;
1000}
1001
07716717
DK
1002static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
1003 struct kvm_cpuid2 *cpuid,
1004 struct kvm_cpuid_entry2 __user *entries)
1005{
1006 int r;
1007
1008 r = -E2BIG;
ad312c7c 1009 if (cpuid->nent < vcpu->arch.cpuid_nent)
07716717
DK
1010 goto out;
1011 r = -EFAULT;
ad312c7c
ZX
1012 if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
1013 vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
07716717
DK
1014 goto out;
1015 return 0;
1016
1017out:
ad312c7c 1018 cpuid->nent = vcpu->arch.cpuid_nent;
07716717
DK
1019 return r;
1020}
1021
1022static inline u32 bit(int bitno)
1023{
1024 return 1 << (bitno & 31);
1025}
1026
1027static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function,
1028 u32 index)
1029{
1030 entry->function = function;
1031 entry->index = index;
1032 cpuid_count(entry->function, entry->index,
1033 &entry->eax, &entry->ebx, &entry->ecx, &entry->edx);
1034 entry->flags = 0;
1035}
1036
1037static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
1038 u32 index, int *nent, int maxnent)
1039{
1040 const u32 kvm_supported_word0_x86_features = bit(X86_FEATURE_FPU) |
1041 bit(X86_FEATURE_VME) | bit(X86_FEATURE_DE) |
1042 bit(X86_FEATURE_PSE) | bit(X86_FEATURE_TSC) |
1043 bit(X86_FEATURE_MSR) | bit(X86_FEATURE_PAE) |
1044 bit(X86_FEATURE_CX8) | bit(X86_FEATURE_APIC) |
1045 bit(X86_FEATURE_SEP) | bit(X86_FEATURE_PGE) |
1046 bit(X86_FEATURE_CMOV) | bit(X86_FEATURE_PSE36) |
1047 bit(X86_FEATURE_CLFLSH) | bit(X86_FEATURE_MMX) |
1048 bit(X86_FEATURE_FXSR) | bit(X86_FEATURE_XMM) |
1049 bit(X86_FEATURE_XMM2) | bit(X86_FEATURE_SELFSNOOP);
1050 const u32 kvm_supported_word1_x86_features = bit(X86_FEATURE_FPU) |
1051 bit(X86_FEATURE_VME) | bit(X86_FEATURE_DE) |
1052 bit(X86_FEATURE_PSE) | bit(X86_FEATURE_TSC) |
1053 bit(X86_FEATURE_MSR) | bit(X86_FEATURE_PAE) |
1054 bit(X86_FEATURE_CX8) | bit(X86_FEATURE_APIC) |
1055 bit(X86_FEATURE_PGE) |
1056 bit(X86_FEATURE_CMOV) | bit(X86_FEATURE_PSE36) |
1057 bit(X86_FEATURE_MMX) | bit(X86_FEATURE_FXSR) |
1058 bit(X86_FEATURE_SYSCALL) |
1059 (bit(X86_FEATURE_NX) && is_efer_nx()) |
1060#ifdef CONFIG_X86_64
1061 bit(X86_FEATURE_LM) |
1062#endif
1063 bit(X86_FEATURE_MMXEXT) |
1064 bit(X86_FEATURE_3DNOWEXT) |
1065 bit(X86_FEATURE_3DNOW);
1066 const u32 kvm_supported_word3_x86_features =
1067 bit(X86_FEATURE_XMM3) | bit(X86_FEATURE_CX16);
1068 const u32 kvm_supported_word6_x86_features =
1069 bit(X86_FEATURE_LAHF_LM) | bit(X86_FEATURE_CMP_LEGACY);
1070
1071 /* all func 2 cpuid_count() should be called on the same cpu */
1072 get_cpu();
1073 do_cpuid_1_ent(entry, function, index);
1074 ++*nent;
1075
1076 switch (function) {
1077 case 0:
1078 entry->eax = min(entry->eax, (u32)0xb);
1079 break;
1080 case 1:
1081 entry->edx &= kvm_supported_word0_x86_features;
1082 entry->ecx &= kvm_supported_word3_x86_features;
1083 break;
1084 /* function 2 entries are STATEFUL. That is, repeated cpuid commands
1085 * may return different values. This forces us to get_cpu() before
1086 * issuing the first command, and also to emulate this annoying behavior
1087 * in kvm_emulate_cpuid() using KVM_CPUID_FLAG_STATE_READ_NEXT */
1088 case 2: {
1089 int t, times = entry->eax & 0xff;
1090
1091 entry->flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
1092 for (t = 1; t < times && *nent < maxnent; ++t) {
1093 do_cpuid_1_ent(&entry[t], function, 0);
1094 entry[t].flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
1095 ++*nent;
1096 }
1097 break;
1098 }
1099 /* function 4 and 0xb have additional index. */
1100 case 4: {
14af3f3c 1101 int i, cache_type;
07716717
DK
1102
1103 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1104 /* read more entries until cache_type is zero */
14af3f3c
HH
1105 for (i = 1; *nent < maxnent; ++i) {
1106 cache_type = entry[i - 1].eax & 0x1f;
07716717
DK
1107 if (!cache_type)
1108 break;
14af3f3c
HH
1109 do_cpuid_1_ent(&entry[i], function, i);
1110 entry[i].flags |=
07716717
DK
1111 KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1112 ++*nent;
1113 }
1114 break;
1115 }
1116 case 0xb: {
14af3f3c 1117 int i, level_type;
07716717
DK
1118
1119 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1120 /* read more entries until level_type is zero */
14af3f3c
HH
1121 for (i = 1; *nent < maxnent; ++i) {
1122 level_type = entry[i - 1].ecx & 0xff;
07716717
DK
1123 if (!level_type)
1124 break;
14af3f3c
HH
1125 do_cpuid_1_ent(&entry[i], function, i);
1126 entry[i].flags |=
07716717
DK
1127 KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1128 ++*nent;
1129 }
1130 break;
1131 }
1132 case 0x80000000:
1133 entry->eax = min(entry->eax, 0x8000001a);
1134 break;
1135 case 0x80000001:
1136 entry->edx &= kvm_supported_word1_x86_features;
1137 entry->ecx &= kvm_supported_word6_x86_features;
1138 break;
1139 }
1140 put_cpu();
1141}
1142
674eea0f 1143static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
07716717
DK
1144 struct kvm_cpuid_entry2 __user *entries)
1145{
1146 struct kvm_cpuid_entry2 *cpuid_entries;
1147 int limit, nent = 0, r = -E2BIG;
1148 u32 func;
1149
1150 if (cpuid->nent < 1)
1151 goto out;
1152 r = -ENOMEM;
1153 cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent);
1154 if (!cpuid_entries)
1155 goto out;
1156
1157 do_cpuid_ent(&cpuid_entries[0], 0, 0, &nent, cpuid->nent);
1158 limit = cpuid_entries[0].eax;
1159 for (func = 1; func <= limit && nent < cpuid->nent; ++func)
1160 do_cpuid_ent(&cpuid_entries[nent], func, 0,
1161 &nent, cpuid->nent);
1162 r = -E2BIG;
1163 if (nent >= cpuid->nent)
1164 goto out_free;
1165
1166 do_cpuid_ent(&cpuid_entries[nent], 0x80000000, 0, &nent, cpuid->nent);
1167 limit = cpuid_entries[nent - 1].eax;
1168 for (func = 0x80000001; func <= limit && nent < cpuid->nent; ++func)
1169 do_cpuid_ent(&cpuid_entries[nent], func, 0,
1170 &nent, cpuid->nent);
1171 r = -EFAULT;
1172 if (copy_to_user(entries, cpuid_entries,
1173 nent * sizeof(struct kvm_cpuid_entry2)))
1174 goto out_free;
1175 cpuid->nent = nent;
1176 r = 0;
1177
1178out_free:
1179 vfree(cpuid_entries);
1180out:
1181 return r;
1182}
1183
313a3dc7
CO
1184static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
1185 struct kvm_lapic_state *s)
1186{
1187 vcpu_load(vcpu);
ad312c7c 1188 memcpy(s->regs, vcpu->arch.apic->regs, sizeof *s);
313a3dc7
CO
1189 vcpu_put(vcpu);
1190
1191 return 0;
1192}
1193
1194static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
1195 struct kvm_lapic_state *s)
1196{
1197 vcpu_load(vcpu);
ad312c7c 1198 memcpy(vcpu->arch.apic->regs, s->regs, sizeof *s);
313a3dc7
CO
1199 kvm_apic_post_state_restore(vcpu);
1200 vcpu_put(vcpu);
1201
1202 return 0;
1203}
1204
f77bc6a4
ZX
1205static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
1206 struct kvm_interrupt *irq)
1207{
1208 if (irq->irq < 0 || irq->irq >= 256)
1209 return -EINVAL;
1210 if (irqchip_in_kernel(vcpu->kvm))
1211 return -ENXIO;
1212 vcpu_load(vcpu);
1213
ad312c7c
ZX
1214 set_bit(irq->irq, vcpu->arch.irq_pending);
1215 set_bit(irq->irq / BITS_PER_LONG, &vcpu->arch.irq_summary);
f77bc6a4
ZX
1216
1217 vcpu_put(vcpu);
1218
1219 return 0;
1220}
1221
b209749f
AK
1222static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu,
1223 struct kvm_tpr_access_ctl *tac)
1224{
1225 if (tac->flags)
1226 return -EINVAL;
1227 vcpu->arch.tpr_access_reporting = !!tac->enabled;
1228 return 0;
1229}
1230
313a3dc7
CO
1231long kvm_arch_vcpu_ioctl(struct file *filp,
1232 unsigned int ioctl, unsigned long arg)
1233{
1234 struct kvm_vcpu *vcpu = filp->private_data;
1235 void __user *argp = (void __user *)arg;
1236 int r;
1237
1238 switch (ioctl) {
1239 case KVM_GET_LAPIC: {
1240 struct kvm_lapic_state lapic;
1241
1242 memset(&lapic, 0, sizeof lapic);
1243 r = kvm_vcpu_ioctl_get_lapic(vcpu, &lapic);
1244 if (r)
1245 goto out;
1246 r = -EFAULT;
1247 if (copy_to_user(argp, &lapic, sizeof lapic))
1248 goto out;
1249 r = 0;
1250 break;
1251 }
1252 case KVM_SET_LAPIC: {
1253 struct kvm_lapic_state lapic;
1254
1255 r = -EFAULT;
1256 if (copy_from_user(&lapic, argp, sizeof lapic))
1257 goto out;
1258 r = kvm_vcpu_ioctl_set_lapic(vcpu, &lapic);;
1259 if (r)
1260 goto out;
1261 r = 0;
1262 break;
1263 }
f77bc6a4
ZX
1264 case KVM_INTERRUPT: {
1265 struct kvm_interrupt irq;
1266
1267 r = -EFAULT;
1268 if (copy_from_user(&irq, argp, sizeof irq))
1269 goto out;
1270 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
1271 if (r)
1272 goto out;
1273 r = 0;
1274 break;
1275 }
313a3dc7
CO
1276 case KVM_SET_CPUID: {
1277 struct kvm_cpuid __user *cpuid_arg = argp;
1278 struct kvm_cpuid cpuid;
1279
1280 r = -EFAULT;
1281 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
1282 goto out;
1283 r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
1284 if (r)
1285 goto out;
1286 break;
1287 }
07716717
DK
1288 case KVM_SET_CPUID2: {
1289 struct kvm_cpuid2 __user *cpuid_arg = argp;
1290 struct kvm_cpuid2 cpuid;
1291
1292 r = -EFAULT;
1293 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
1294 goto out;
1295 r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid,
1296 cpuid_arg->entries);
1297 if (r)
1298 goto out;
1299 break;
1300 }
1301 case KVM_GET_CPUID2: {
1302 struct kvm_cpuid2 __user *cpuid_arg = argp;
1303 struct kvm_cpuid2 cpuid;
1304
1305 r = -EFAULT;
1306 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
1307 goto out;
1308 r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid,
1309 cpuid_arg->entries);
1310 if (r)
1311 goto out;
1312 r = -EFAULT;
1313 if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
1314 goto out;
1315 r = 0;
1316 break;
1317 }
313a3dc7
CO
1318 case KVM_GET_MSRS:
1319 r = msr_io(vcpu, argp, kvm_get_msr, 1);
1320 break;
1321 case KVM_SET_MSRS:
1322 r = msr_io(vcpu, argp, do_set_msr, 0);
1323 break;
b209749f
AK
1324 case KVM_TPR_ACCESS_REPORTING: {
1325 struct kvm_tpr_access_ctl tac;
1326
1327 r = -EFAULT;
1328 if (copy_from_user(&tac, argp, sizeof tac))
1329 goto out;
1330 r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac);
1331 if (r)
1332 goto out;
1333 r = -EFAULT;
1334 if (copy_to_user(argp, &tac, sizeof tac))
1335 goto out;
1336 r = 0;
1337 break;
1338 };
b93463aa
AK
1339 case KVM_SET_VAPIC_ADDR: {
1340 struct kvm_vapic_addr va;
1341
1342 r = -EINVAL;
1343 if (!irqchip_in_kernel(vcpu->kvm))
1344 goto out;
1345 r = -EFAULT;
1346 if (copy_from_user(&va, argp, sizeof va))
1347 goto out;
1348 r = 0;
1349 kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
1350 break;
1351 }
313a3dc7
CO
1352 default:
1353 r = -EINVAL;
1354 }
1355out:
1356 return r;
1357}
1358
1fe779f8
CO
1359static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr)
1360{
1361 int ret;
1362
1363 if (addr > (unsigned int)(-3 * PAGE_SIZE))
1364 return -1;
1365 ret = kvm_x86_ops->set_tss_addr(kvm, addr);
1366 return ret;
1367}
1368
1369static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
1370 u32 kvm_nr_mmu_pages)
1371{
1372 if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
1373 return -EINVAL;
1374
72dc67a6 1375 down_write(&kvm->slots_lock);
1fe779f8
CO
1376
1377 kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
f05e70ac 1378 kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
1fe779f8 1379
72dc67a6 1380 up_write(&kvm->slots_lock);
1fe779f8
CO
1381 return 0;
1382}
1383
1384static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
1385{
f05e70ac 1386 return kvm->arch.n_alloc_mmu_pages;
1fe779f8
CO
1387}
1388
e9f85cde
ZX
1389gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
1390{
1391 int i;
1392 struct kvm_mem_alias *alias;
1393
d69fb81f
ZX
1394 for (i = 0; i < kvm->arch.naliases; ++i) {
1395 alias = &kvm->arch.aliases[i];
e9f85cde
ZX
1396 if (gfn >= alias->base_gfn
1397 && gfn < alias->base_gfn + alias->npages)
1398 return alias->target_gfn + gfn - alias->base_gfn;
1399 }
1400 return gfn;
1401}
1402
1fe779f8
CO
1403/*
1404 * Set a new alias region. Aliases map a portion of physical memory into
1405 * another portion. This is useful for memory windows, for example the PC
1406 * VGA region.
1407 */
1408static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
1409 struct kvm_memory_alias *alias)
1410{
1411 int r, n;
1412 struct kvm_mem_alias *p;
1413
1414 r = -EINVAL;
1415 /* General sanity checks */
1416 if (alias->memory_size & (PAGE_SIZE - 1))
1417 goto out;
1418 if (alias->guest_phys_addr & (PAGE_SIZE - 1))
1419 goto out;
1420 if (alias->slot >= KVM_ALIAS_SLOTS)
1421 goto out;
1422 if (alias->guest_phys_addr + alias->memory_size
1423 < alias->guest_phys_addr)
1424 goto out;
1425 if (alias->target_phys_addr + alias->memory_size
1426 < alias->target_phys_addr)
1427 goto out;
1428
72dc67a6 1429 down_write(&kvm->slots_lock);
1fe779f8 1430
d69fb81f 1431 p = &kvm->arch.aliases[alias->slot];
1fe779f8
CO
1432 p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT;
1433 p->npages = alias->memory_size >> PAGE_SHIFT;
1434 p->target_gfn = alias->target_phys_addr >> PAGE_SHIFT;
1435
1436 for (n = KVM_ALIAS_SLOTS; n > 0; --n)
d69fb81f 1437 if (kvm->arch.aliases[n - 1].npages)
1fe779f8 1438 break;
d69fb81f 1439 kvm->arch.naliases = n;
1fe779f8
CO
1440
1441 kvm_mmu_zap_all(kvm);
1442
72dc67a6 1443 up_write(&kvm->slots_lock);
1fe779f8
CO
1444
1445 return 0;
1446
1447out:
1448 return r;
1449}
1450
1451static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
1452{
1453 int r;
1454
1455 r = 0;
1456 switch (chip->chip_id) {
1457 case KVM_IRQCHIP_PIC_MASTER:
1458 memcpy(&chip->chip.pic,
1459 &pic_irqchip(kvm)->pics[0],
1460 sizeof(struct kvm_pic_state));
1461 break;
1462 case KVM_IRQCHIP_PIC_SLAVE:
1463 memcpy(&chip->chip.pic,
1464 &pic_irqchip(kvm)->pics[1],
1465 sizeof(struct kvm_pic_state));
1466 break;
1467 case KVM_IRQCHIP_IOAPIC:
1468 memcpy(&chip->chip.ioapic,
1469 ioapic_irqchip(kvm),
1470 sizeof(struct kvm_ioapic_state));
1471 break;
1472 default:
1473 r = -EINVAL;
1474 break;
1475 }
1476 return r;
1477}
1478
1479static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
1480{
1481 int r;
1482
1483 r = 0;
1484 switch (chip->chip_id) {
1485 case KVM_IRQCHIP_PIC_MASTER:
1486 memcpy(&pic_irqchip(kvm)->pics[0],
1487 &chip->chip.pic,
1488 sizeof(struct kvm_pic_state));
1489 break;
1490 case KVM_IRQCHIP_PIC_SLAVE:
1491 memcpy(&pic_irqchip(kvm)->pics[1],
1492 &chip->chip.pic,
1493 sizeof(struct kvm_pic_state));
1494 break;
1495 case KVM_IRQCHIP_IOAPIC:
1496 memcpy(ioapic_irqchip(kvm),
1497 &chip->chip.ioapic,
1498 sizeof(struct kvm_ioapic_state));
1499 break;
1500 default:
1501 r = -EINVAL;
1502 break;
1503 }
1504 kvm_pic_update_irq(pic_irqchip(kvm));
1505 return r;
1506}
1507
e0f63cb9
SY
1508static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps)
1509{
1510 int r = 0;
1511
1512 memcpy(ps, &kvm->arch.vpit->pit_state, sizeof(struct kvm_pit_state));
1513 return r;
1514}
1515
1516static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
1517{
1518 int r = 0;
1519
1520 memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state));
1521 kvm_pit_load_count(kvm, 0, ps->channels[0].count);
1522 return r;
1523}
1524
5bb064dc
ZX
1525/*
1526 * Get (and clear) the dirty memory log for a memory slot.
1527 */
1528int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
1529 struct kvm_dirty_log *log)
1530{
1531 int r;
1532 int n;
1533 struct kvm_memory_slot *memslot;
1534 int is_dirty = 0;
1535
72dc67a6 1536 down_write(&kvm->slots_lock);
5bb064dc
ZX
1537
1538 r = kvm_get_dirty_log(kvm, log, &is_dirty);
1539 if (r)
1540 goto out;
1541
1542 /* If nothing is dirty, don't bother messing with page tables. */
1543 if (is_dirty) {
1544 kvm_mmu_slot_remove_write_access(kvm, log->slot);
1545 kvm_flush_remote_tlbs(kvm);
1546 memslot = &kvm->memslots[log->slot];
1547 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
1548 memset(memslot->dirty_bitmap, 0, n);
1549 }
1550 r = 0;
1551out:
72dc67a6 1552 up_write(&kvm->slots_lock);
5bb064dc
ZX
1553 return r;
1554}
1555
1fe779f8
CO
1556long kvm_arch_vm_ioctl(struct file *filp,
1557 unsigned int ioctl, unsigned long arg)
1558{
1559 struct kvm *kvm = filp->private_data;
1560 void __user *argp = (void __user *)arg;
1561 int r = -EINVAL;
1562
1563 switch (ioctl) {
1564 case KVM_SET_TSS_ADDR:
1565 r = kvm_vm_ioctl_set_tss_addr(kvm, arg);
1566 if (r < 0)
1567 goto out;
1568 break;
1569 case KVM_SET_MEMORY_REGION: {
1570 struct kvm_memory_region kvm_mem;
1571 struct kvm_userspace_memory_region kvm_userspace_mem;
1572
1573 r = -EFAULT;
1574 if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem))
1575 goto out;
1576 kvm_userspace_mem.slot = kvm_mem.slot;
1577 kvm_userspace_mem.flags = kvm_mem.flags;
1578 kvm_userspace_mem.guest_phys_addr = kvm_mem.guest_phys_addr;
1579 kvm_userspace_mem.memory_size = kvm_mem.memory_size;
1580 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 0);
1581 if (r)
1582 goto out;
1583 break;
1584 }
1585 case KVM_SET_NR_MMU_PAGES:
1586 r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg);
1587 if (r)
1588 goto out;
1589 break;
1590 case KVM_GET_NR_MMU_PAGES:
1591 r = kvm_vm_ioctl_get_nr_mmu_pages(kvm);
1592 break;
1593 case KVM_SET_MEMORY_ALIAS: {
1594 struct kvm_memory_alias alias;
1595
1596 r = -EFAULT;
1597 if (copy_from_user(&alias, argp, sizeof alias))
1598 goto out;
1599 r = kvm_vm_ioctl_set_memory_alias(kvm, &alias);
1600 if (r)
1601 goto out;
1602 break;
1603 }
1604 case KVM_CREATE_IRQCHIP:
1605 r = -ENOMEM;
d7deeeb0
ZX
1606 kvm->arch.vpic = kvm_create_pic(kvm);
1607 if (kvm->arch.vpic) {
1fe779f8
CO
1608 r = kvm_ioapic_init(kvm);
1609 if (r) {
d7deeeb0
ZX
1610 kfree(kvm->arch.vpic);
1611 kvm->arch.vpic = NULL;
1fe779f8
CO
1612 goto out;
1613 }
1614 } else
1615 goto out;
1616 break;
7837699f
SY
1617 case KVM_CREATE_PIT:
1618 r = -ENOMEM;
1619 kvm->arch.vpit = kvm_create_pit(kvm);
1620 if (kvm->arch.vpit)
1621 r = 0;
1622 break;
1fe779f8
CO
1623 case KVM_IRQ_LINE: {
1624 struct kvm_irq_level irq_event;
1625
1626 r = -EFAULT;
1627 if (copy_from_user(&irq_event, argp, sizeof irq_event))
1628 goto out;
1629 if (irqchip_in_kernel(kvm)) {
1630 mutex_lock(&kvm->lock);
1631 if (irq_event.irq < 16)
1632 kvm_pic_set_irq(pic_irqchip(kvm),
1633 irq_event.irq,
1634 irq_event.level);
d7deeeb0 1635 kvm_ioapic_set_irq(kvm->arch.vioapic,
1fe779f8
CO
1636 irq_event.irq,
1637 irq_event.level);
1638 mutex_unlock(&kvm->lock);
1639 r = 0;
1640 }
1641 break;
1642 }
1643 case KVM_GET_IRQCHIP: {
1644 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
1645 struct kvm_irqchip chip;
1646
1647 r = -EFAULT;
1648 if (copy_from_user(&chip, argp, sizeof chip))
1649 goto out;
1650 r = -ENXIO;
1651 if (!irqchip_in_kernel(kvm))
1652 goto out;
1653 r = kvm_vm_ioctl_get_irqchip(kvm, &chip);
1654 if (r)
1655 goto out;
1656 r = -EFAULT;
1657 if (copy_to_user(argp, &chip, sizeof chip))
1658 goto out;
1659 r = 0;
1660 break;
1661 }
1662 case KVM_SET_IRQCHIP: {
1663 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
1664 struct kvm_irqchip chip;
1665
1666 r = -EFAULT;
1667 if (copy_from_user(&chip, argp, sizeof chip))
1668 goto out;
1669 r = -ENXIO;
1670 if (!irqchip_in_kernel(kvm))
1671 goto out;
1672 r = kvm_vm_ioctl_set_irqchip(kvm, &chip);
1673 if (r)
1674 goto out;
1675 r = 0;
1676 break;
1677 }
e0f63cb9
SY
1678 case KVM_GET_PIT: {
1679 struct kvm_pit_state ps;
1680 r = -EFAULT;
1681 if (copy_from_user(&ps, argp, sizeof ps))
1682 goto out;
1683 r = -ENXIO;
1684 if (!kvm->arch.vpit)
1685 goto out;
1686 r = kvm_vm_ioctl_get_pit(kvm, &ps);
1687 if (r)
1688 goto out;
1689 r = -EFAULT;
1690 if (copy_to_user(argp, &ps, sizeof ps))
1691 goto out;
1692 r = 0;
1693 break;
1694 }
1695 case KVM_SET_PIT: {
1696 struct kvm_pit_state ps;
1697 r = -EFAULT;
1698 if (copy_from_user(&ps, argp, sizeof ps))
1699 goto out;
1700 r = -ENXIO;
1701 if (!kvm->arch.vpit)
1702 goto out;
1703 r = kvm_vm_ioctl_set_pit(kvm, &ps);
1704 if (r)
1705 goto out;
1706 r = 0;
1707 break;
1708 }
1fe779f8
CO
1709 default:
1710 ;
1711 }
1712out:
1713 return r;
1714}
1715
a16b043c 1716static void kvm_init_msr_list(void)
043405e1
CO
1717{
1718 u32 dummy[2];
1719 unsigned i, j;
1720
1721 for (i = j = 0; i < ARRAY_SIZE(msrs_to_save); i++) {
1722 if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
1723 continue;
1724 if (j < i)
1725 msrs_to_save[j] = msrs_to_save[i];
1726 j++;
1727 }
1728 num_msrs_to_save = j;
1729}
1730
bbd9b64e
CO
1731/*
1732 * Only apic need an MMIO device hook, so shortcut now..
1733 */
1734static struct kvm_io_device *vcpu_find_pervcpu_dev(struct kvm_vcpu *vcpu,
1735 gpa_t addr)
1736{
1737 struct kvm_io_device *dev;
1738
ad312c7c
ZX
1739 if (vcpu->arch.apic) {
1740 dev = &vcpu->arch.apic->dev;
bbd9b64e
CO
1741 if (dev->in_range(dev, addr))
1742 return dev;
1743 }
1744 return NULL;
1745}
1746
1747
1748static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu,
1749 gpa_t addr)
1750{
1751 struct kvm_io_device *dev;
1752
1753 dev = vcpu_find_pervcpu_dev(vcpu, addr);
1754 if (dev == NULL)
1755 dev = kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr);
1756 return dev;
1757}
1758
1759int emulator_read_std(unsigned long addr,
1760 void *val,
1761 unsigned int bytes,
1762 struct kvm_vcpu *vcpu)
1763{
1764 void *data = val;
10589a46 1765 int r = X86EMUL_CONTINUE;
bbd9b64e
CO
1766
1767 while (bytes) {
ad312c7c 1768 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
bbd9b64e
CO
1769 unsigned offset = addr & (PAGE_SIZE-1);
1770 unsigned tocopy = min(bytes, (unsigned)PAGE_SIZE - offset);
1771 int ret;
1772
10589a46
MT
1773 if (gpa == UNMAPPED_GVA) {
1774 r = X86EMUL_PROPAGATE_FAULT;
1775 goto out;
1776 }
bbd9b64e 1777 ret = kvm_read_guest(vcpu->kvm, gpa, data, tocopy);
10589a46
MT
1778 if (ret < 0) {
1779 r = X86EMUL_UNHANDLEABLE;
1780 goto out;
1781 }
bbd9b64e
CO
1782
1783 bytes -= tocopy;
1784 data += tocopy;
1785 addr += tocopy;
1786 }
10589a46 1787out:
10589a46 1788 return r;
bbd9b64e
CO
1789}
1790EXPORT_SYMBOL_GPL(emulator_read_std);
1791
bbd9b64e
CO
1792static int emulator_read_emulated(unsigned long addr,
1793 void *val,
1794 unsigned int bytes,
1795 struct kvm_vcpu *vcpu)
1796{
1797 struct kvm_io_device *mmio_dev;
1798 gpa_t gpa;
1799
1800 if (vcpu->mmio_read_completed) {
1801 memcpy(val, vcpu->mmio_data, bytes);
1802 vcpu->mmio_read_completed = 0;
1803 return X86EMUL_CONTINUE;
1804 }
1805
ad312c7c 1806 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
bbd9b64e
CO
1807
1808 /* For APIC access vmexit */
1809 if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
1810 goto mmio;
1811
1812 if (emulator_read_std(addr, val, bytes, vcpu)
1813 == X86EMUL_CONTINUE)
1814 return X86EMUL_CONTINUE;
1815 if (gpa == UNMAPPED_GVA)
1816 return X86EMUL_PROPAGATE_FAULT;
1817
1818mmio:
1819 /*
1820 * Is this MMIO handled locally?
1821 */
10589a46 1822 mutex_lock(&vcpu->kvm->lock);
bbd9b64e
CO
1823 mmio_dev = vcpu_find_mmio_dev(vcpu, gpa);
1824 if (mmio_dev) {
1825 kvm_iodevice_read(mmio_dev, gpa, bytes, val);
10589a46 1826 mutex_unlock(&vcpu->kvm->lock);
bbd9b64e
CO
1827 return X86EMUL_CONTINUE;
1828 }
10589a46 1829 mutex_unlock(&vcpu->kvm->lock);
bbd9b64e
CO
1830
1831 vcpu->mmio_needed = 1;
1832 vcpu->mmio_phys_addr = gpa;
1833 vcpu->mmio_size = bytes;
1834 vcpu->mmio_is_write = 0;
1835
1836 return X86EMUL_UNHANDLEABLE;
1837}
1838
3200f405 1839int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
9f811285 1840 const void *val, int bytes)
bbd9b64e
CO
1841{
1842 int ret;
1843
1844 ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes);
9f811285 1845 if (ret < 0)
bbd9b64e
CO
1846 return 0;
1847 kvm_mmu_pte_write(vcpu, gpa, val, bytes);
1848 return 1;
1849}
1850
1851static int emulator_write_emulated_onepage(unsigned long addr,
1852 const void *val,
1853 unsigned int bytes,
1854 struct kvm_vcpu *vcpu)
1855{
1856 struct kvm_io_device *mmio_dev;
10589a46
MT
1857 gpa_t gpa;
1858
10589a46 1859 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
bbd9b64e
CO
1860
1861 if (gpa == UNMAPPED_GVA) {
c3c91fee 1862 kvm_inject_page_fault(vcpu, addr, 2);
bbd9b64e
CO
1863 return X86EMUL_PROPAGATE_FAULT;
1864 }
1865
1866 /* For APIC access vmexit */
1867 if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
1868 goto mmio;
1869
1870 if (emulator_write_phys(vcpu, gpa, val, bytes))
1871 return X86EMUL_CONTINUE;
1872
1873mmio:
1874 /*
1875 * Is this MMIO handled locally?
1876 */
10589a46 1877 mutex_lock(&vcpu->kvm->lock);
bbd9b64e
CO
1878 mmio_dev = vcpu_find_mmio_dev(vcpu, gpa);
1879 if (mmio_dev) {
1880 kvm_iodevice_write(mmio_dev, gpa, bytes, val);
10589a46 1881 mutex_unlock(&vcpu->kvm->lock);
bbd9b64e
CO
1882 return X86EMUL_CONTINUE;
1883 }
10589a46 1884 mutex_unlock(&vcpu->kvm->lock);
bbd9b64e
CO
1885
1886 vcpu->mmio_needed = 1;
1887 vcpu->mmio_phys_addr = gpa;
1888 vcpu->mmio_size = bytes;
1889 vcpu->mmio_is_write = 1;
1890 memcpy(vcpu->mmio_data, val, bytes);
1891
1892 return X86EMUL_CONTINUE;
1893}
1894
1895int emulator_write_emulated(unsigned long addr,
1896 const void *val,
1897 unsigned int bytes,
1898 struct kvm_vcpu *vcpu)
1899{
1900 /* Crossing a page boundary? */
1901 if (((addr + bytes - 1) ^ addr) & PAGE_MASK) {
1902 int rc, now;
1903
1904 now = -addr & ~PAGE_MASK;
1905 rc = emulator_write_emulated_onepage(addr, val, now, vcpu);
1906 if (rc != X86EMUL_CONTINUE)
1907 return rc;
1908 addr += now;
1909 val += now;
1910 bytes -= now;
1911 }
1912 return emulator_write_emulated_onepage(addr, val, bytes, vcpu);
1913}
1914EXPORT_SYMBOL_GPL(emulator_write_emulated);
1915
1916static int emulator_cmpxchg_emulated(unsigned long addr,
1917 const void *old,
1918 const void *new,
1919 unsigned int bytes,
1920 struct kvm_vcpu *vcpu)
1921{
1922 static int reported;
1923
1924 if (!reported) {
1925 reported = 1;
1926 printk(KERN_WARNING "kvm: emulating exchange as write\n");
1927 }
2bacc55c
MT
1928#ifndef CONFIG_X86_64
1929 /* guests cmpxchg8b have to be emulated atomically */
1930 if (bytes == 8) {
10589a46 1931 gpa_t gpa;
2bacc55c 1932 struct page *page;
c0b49b0d 1933 char *kaddr;
2bacc55c
MT
1934 u64 val;
1935
10589a46
MT
1936 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
1937
2bacc55c
MT
1938 if (gpa == UNMAPPED_GVA ||
1939 (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
1940 goto emul_write;
1941
1942 if (((gpa + bytes - 1) & PAGE_MASK) != (gpa & PAGE_MASK))
1943 goto emul_write;
1944
1945 val = *(u64 *)new;
72dc67a6
IE
1946
1947 down_read(&current->mm->mmap_sem);
2bacc55c 1948 page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
72dc67a6
IE
1949 up_read(&current->mm->mmap_sem);
1950
c0b49b0d
AM
1951 kaddr = kmap_atomic(page, KM_USER0);
1952 set_64bit((u64 *)(kaddr + offset_in_page(gpa)), val);
1953 kunmap_atomic(kaddr, KM_USER0);
2bacc55c
MT
1954 kvm_release_page_dirty(page);
1955 }
3200f405 1956emul_write:
2bacc55c
MT
1957#endif
1958
bbd9b64e
CO
1959 return emulator_write_emulated(addr, new, bytes, vcpu);
1960}
1961
1962static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
1963{
1964 return kvm_x86_ops->get_segment_base(vcpu, seg);
1965}
1966
1967int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
1968{
1969 return X86EMUL_CONTINUE;
1970}
1971
1972int emulate_clts(struct kvm_vcpu *vcpu)
1973{
ad312c7c 1974 kvm_x86_ops->set_cr0(vcpu, vcpu->arch.cr0 & ~X86_CR0_TS);
bbd9b64e
CO
1975 return X86EMUL_CONTINUE;
1976}
1977
1978int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest)
1979{
1980 struct kvm_vcpu *vcpu = ctxt->vcpu;
1981
1982 switch (dr) {
1983 case 0 ... 3:
1984 *dest = kvm_x86_ops->get_dr(vcpu, dr);
1985 return X86EMUL_CONTINUE;
1986 default:
b8688d51 1987 pr_unimpl(vcpu, "%s: unexpected dr %u\n", __func__, dr);
bbd9b64e
CO
1988 return X86EMUL_UNHANDLEABLE;
1989 }
1990}
1991
1992int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
1993{
1994 unsigned long mask = (ctxt->mode == X86EMUL_MODE_PROT64) ? ~0ULL : ~0U;
1995 int exception;
1996
1997 kvm_x86_ops->set_dr(ctxt->vcpu, dr, value & mask, &exception);
1998 if (exception) {
1999 /* FIXME: better handling */
2000 return X86EMUL_UNHANDLEABLE;
2001 }
2002 return X86EMUL_CONTINUE;
2003}
2004
2005void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
2006{
2007 static int reported;
2008 u8 opcodes[4];
ad312c7c 2009 unsigned long rip = vcpu->arch.rip;
bbd9b64e
CO
2010 unsigned long rip_linear;
2011
2012 rip_linear = rip + get_segment_base(vcpu, VCPU_SREG_CS);
2013
2014 if (reported)
2015 return;
2016
2017 emulator_read_std(rip_linear, (void *)opcodes, 4, vcpu);
2018
2019 printk(KERN_ERR "emulation failed (%s) rip %lx %02x %02x %02x %02x\n",
2020 context, rip, opcodes[0], opcodes[1], opcodes[2], opcodes[3]);
2021 reported = 1;
2022}
2023EXPORT_SYMBOL_GPL(kvm_report_emulation_failure);
2024
14af3f3c 2025static struct x86_emulate_ops emulate_ops = {
bbd9b64e 2026 .read_std = emulator_read_std,
bbd9b64e
CO
2027 .read_emulated = emulator_read_emulated,
2028 .write_emulated = emulator_write_emulated,
2029 .cmpxchg_emulated = emulator_cmpxchg_emulated,
2030};
2031
2032int emulate_instruction(struct kvm_vcpu *vcpu,
2033 struct kvm_run *run,
2034 unsigned long cr2,
2035 u16 error_code,
571008da 2036 int emulation_type)
bbd9b64e
CO
2037{
2038 int r;
571008da 2039 struct decode_cache *c;
bbd9b64e 2040
ad312c7c 2041 vcpu->arch.mmio_fault_cr2 = cr2;
bbd9b64e
CO
2042 kvm_x86_ops->cache_regs(vcpu);
2043
2044 vcpu->mmio_is_write = 0;
ad312c7c 2045 vcpu->arch.pio.string = 0;
bbd9b64e 2046
571008da 2047 if (!(emulation_type & EMULTYPE_NO_DECODE)) {
bbd9b64e
CO
2048 int cs_db, cs_l;
2049 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
2050
ad312c7c
ZX
2051 vcpu->arch.emulate_ctxt.vcpu = vcpu;
2052 vcpu->arch.emulate_ctxt.eflags = kvm_x86_ops->get_rflags(vcpu);
2053 vcpu->arch.emulate_ctxt.mode =
2054 (vcpu->arch.emulate_ctxt.eflags & X86_EFLAGS_VM)
bbd9b64e
CO
2055 ? X86EMUL_MODE_REAL : cs_l
2056 ? X86EMUL_MODE_PROT64 : cs_db
2057 ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
2058
ad312c7c
ZX
2059 if (vcpu->arch.emulate_ctxt.mode == X86EMUL_MODE_PROT64) {
2060 vcpu->arch.emulate_ctxt.cs_base = 0;
2061 vcpu->arch.emulate_ctxt.ds_base = 0;
2062 vcpu->arch.emulate_ctxt.es_base = 0;
2063 vcpu->arch.emulate_ctxt.ss_base = 0;
bbd9b64e 2064 } else {
ad312c7c 2065 vcpu->arch.emulate_ctxt.cs_base =
bbd9b64e 2066 get_segment_base(vcpu, VCPU_SREG_CS);
ad312c7c 2067 vcpu->arch.emulate_ctxt.ds_base =
bbd9b64e 2068 get_segment_base(vcpu, VCPU_SREG_DS);
ad312c7c 2069 vcpu->arch.emulate_ctxt.es_base =
bbd9b64e 2070 get_segment_base(vcpu, VCPU_SREG_ES);
ad312c7c 2071 vcpu->arch.emulate_ctxt.ss_base =
bbd9b64e
CO
2072 get_segment_base(vcpu, VCPU_SREG_SS);
2073 }
2074
ad312c7c 2075 vcpu->arch.emulate_ctxt.gs_base =
bbd9b64e 2076 get_segment_base(vcpu, VCPU_SREG_GS);
ad312c7c 2077 vcpu->arch.emulate_ctxt.fs_base =
bbd9b64e
CO
2078 get_segment_base(vcpu, VCPU_SREG_FS);
2079
ad312c7c 2080 r = x86_decode_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
571008da
SY
2081
2082 /* Reject the instructions other than VMCALL/VMMCALL when
2083 * try to emulate invalid opcode */
2084 c = &vcpu->arch.emulate_ctxt.decode;
2085 if ((emulation_type & EMULTYPE_TRAP_UD) &&
2086 (!(c->twobyte && c->b == 0x01 &&
2087 (c->modrm_reg == 0 || c->modrm_reg == 3) &&
2088 c->modrm_mod == 3 && c->modrm_rm == 1)))
2089 return EMULATE_FAIL;
2090
f2b5756b 2091 ++vcpu->stat.insn_emulation;
bbd9b64e 2092 if (r) {
f2b5756b 2093 ++vcpu->stat.insn_emulation_fail;
bbd9b64e
CO
2094 if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
2095 return EMULATE_DONE;
2096 return EMULATE_FAIL;
2097 }
2098 }
2099
ad312c7c 2100 r = x86_emulate_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
bbd9b64e 2101
ad312c7c 2102 if (vcpu->arch.pio.string)
bbd9b64e
CO
2103 return EMULATE_DO_MMIO;
2104
2105 if ((r || vcpu->mmio_is_write) && run) {
2106 run->exit_reason = KVM_EXIT_MMIO;
2107 run->mmio.phys_addr = vcpu->mmio_phys_addr;
2108 memcpy(run->mmio.data, vcpu->mmio_data, 8);
2109 run->mmio.len = vcpu->mmio_size;
2110 run->mmio.is_write = vcpu->mmio_is_write;
2111 }
2112
2113 if (r) {
2114 if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
2115 return EMULATE_DONE;
2116 if (!vcpu->mmio_needed) {
2117 kvm_report_emulation_failure(vcpu, "mmio");
2118 return EMULATE_FAIL;
2119 }
2120 return EMULATE_DO_MMIO;
2121 }
2122
2123 kvm_x86_ops->decache_regs(vcpu);
ad312c7c 2124 kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
bbd9b64e
CO
2125
2126 if (vcpu->mmio_is_write) {
2127 vcpu->mmio_needed = 0;
2128 return EMULATE_DO_MMIO;
2129 }
2130
2131 return EMULATE_DONE;
2132}
2133EXPORT_SYMBOL_GPL(emulate_instruction);
2134
de7d789a
CO
2135static void free_pio_guest_pages(struct kvm_vcpu *vcpu)
2136{
2137 int i;
2138
ad312c7c
ZX
2139 for (i = 0; i < ARRAY_SIZE(vcpu->arch.pio.guest_pages); ++i)
2140 if (vcpu->arch.pio.guest_pages[i]) {
2141 kvm_release_page_dirty(vcpu->arch.pio.guest_pages[i]);
2142 vcpu->arch.pio.guest_pages[i] = NULL;
de7d789a
CO
2143 }
2144}
2145
2146static int pio_copy_data(struct kvm_vcpu *vcpu)
2147{
ad312c7c 2148 void *p = vcpu->arch.pio_data;
de7d789a
CO
2149 void *q;
2150 unsigned bytes;
ad312c7c 2151 int nr_pages = vcpu->arch.pio.guest_pages[1] ? 2 : 1;
de7d789a 2152
ad312c7c 2153 q = vmap(vcpu->arch.pio.guest_pages, nr_pages, VM_READ|VM_WRITE,
de7d789a
CO
2154 PAGE_KERNEL);
2155 if (!q) {
2156 free_pio_guest_pages(vcpu);
2157 return -ENOMEM;
2158 }
ad312c7c
ZX
2159 q += vcpu->arch.pio.guest_page_offset;
2160 bytes = vcpu->arch.pio.size * vcpu->arch.pio.cur_count;
2161 if (vcpu->arch.pio.in)
de7d789a
CO
2162 memcpy(q, p, bytes);
2163 else
2164 memcpy(p, q, bytes);
ad312c7c 2165 q -= vcpu->arch.pio.guest_page_offset;
de7d789a
CO
2166 vunmap(q);
2167 free_pio_guest_pages(vcpu);
2168 return 0;
2169}
2170
2171int complete_pio(struct kvm_vcpu *vcpu)
2172{
ad312c7c 2173 struct kvm_pio_request *io = &vcpu->arch.pio;
de7d789a
CO
2174 long delta;
2175 int r;
2176
2177 kvm_x86_ops->cache_regs(vcpu);
2178
2179 if (!io->string) {
2180 if (io->in)
ad312c7c 2181 memcpy(&vcpu->arch.regs[VCPU_REGS_RAX], vcpu->arch.pio_data,
de7d789a
CO
2182 io->size);
2183 } else {
2184 if (io->in) {
2185 r = pio_copy_data(vcpu);
2186 if (r) {
2187 kvm_x86_ops->cache_regs(vcpu);
2188 return r;
2189 }
2190 }
2191
2192 delta = 1;
2193 if (io->rep) {
2194 delta *= io->cur_count;
2195 /*
2196 * The size of the register should really depend on
2197 * current address size.
2198 */
ad312c7c 2199 vcpu->arch.regs[VCPU_REGS_RCX] -= delta;
de7d789a
CO
2200 }
2201 if (io->down)
2202 delta = -delta;
2203 delta *= io->size;
2204 if (io->in)
ad312c7c 2205 vcpu->arch.regs[VCPU_REGS_RDI] += delta;
de7d789a 2206 else
ad312c7c 2207 vcpu->arch.regs[VCPU_REGS_RSI] += delta;
de7d789a
CO
2208 }
2209
2210 kvm_x86_ops->decache_regs(vcpu);
2211
2212 io->count -= io->cur_count;
2213 io->cur_count = 0;
2214
2215 return 0;
2216}
2217
2218static void kernel_pio(struct kvm_io_device *pio_dev,
2219 struct kvm_vcpu *vcpu,
2220 void *pd)
2221{
2222 /* TODO: String I/O for in kernel device */
2223
2224 mutex_lock(&vcpu->kvm->lock);
ad312c7c
ZX
2225 if (vcpu->arch.pio.in)
2226 kvm_iodevice_read(pio_dev, vcpu->arch.pio.port,
2227 vcpu->arch.pio.size,
de7d789a
CO
2228 pd);
2229 else
ad312c7c
ZX
2230 kvm_iodevice_write(pio_dev, vcpu->arch.pio.port,
2231 vcpu->arch.pio.size,
de7d789a
CO
2232 pd);
2233 mutex_unlock(&vcpu->kvm->lock);
2234}
2235
2236static void pio_string_write(struct kvm_io_device *pio_dev,
2237 struct kvm_vcpu *vcpu)
2238{
ad312c7c
ZX
2239 struct kvm_pio_request *io = &vcpu->arch.pio;
2240 void *pd = vcpu->arch.pio_data;
de7d789a
CO
2241 int i;
2242
2243 mutex_lock(&vcpu->kvm->lock);
2244 for (i = 0; i < io->cur_count; i++) {
2245 kvm_iodevice_write(pio_dev, io->port,
2246 io->size,
2247 pd);
2248 pd += io->size;
2249 }
2250 mutex_unlock(&vcpu->kvm->lock);
2251}
2252
2253static struct kvm_io_device *vcpu_find_pio_dev(struct kvm_vcpu *vcpu,
2254 gpa_t addr)
2255{
2256 return kvm_io_bus_find_dev(&vcpu->kvm->pio_bus, addr);
2257}
2258
2259int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
2260 int size, unsigned port)
2261{
2262 struct kvm_io_device *pio_dev;
2263
2264 vcpu->run->exit_reason = KVM_EXIT_IO;
2265 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
ad312c7c 2266 vcpu->run->io.size = vcpu->arch.pio.size = size;
de7d789a 2267 vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
ad312c7c
ZX
2268 vcpu->run->io.count = vcpu->arch.pio.count = vcpu->arch.pio.cur_count = 1;
2269 vcpu->run->io.port = vcpu->arch.pio.port = port;
2270 vcpu->arch.pio.in = in;
2271 vcpu->arch.pio.string = 0;
2272 vcpu->arch.pio.down = 0;
2273 vcpu->arch.pio.guest_page_offset = 0;
2274 vcpu->arch.pio.rep = 0;
de7d789a 2275
2714d1d3
FEL
2276 if (vcpu->run->io.direction == KVM_EXIT_IO_IN)
2277 KVMTRACE_2D(IO_READ, vcpu, vcpu->run->io.port, (u32)size,
2278 handler);
2279 else
2280 KVMTRACE_2D(IO_WRITE, vcpu, vcpu->run->io.port, (u32)size,
2281 handler);
2282
de7d789a 2283 kvm_x86_ops->cache_regs(vcpu);
ad312c7c 2284 memcpy(vcpu->arch.pio_data, &vcpu->arch.regs[VCPU_REGS_RAX], 4);
de7d789a
CO
2285 kvm_x86_ops->decache_regs(vcpu);
2286
2287 kvm_x86_ops->skip_emulated_instruction(vcpu);
2288
2289 pio_dev = vcpu_find_pio_dev(vcpu, port);
2290 if (pio_dev) {
ad312c7c 2291 kernel_pio(pio_dev, vcpu, vcpu->arch.pio_data);
de7d789a
CO
2292 complete_pio(vcpu);
2293 return 1;
2294 }
2295 return 0;
2296}
2297EXPORT_SYMBOL_GPL(kvm_emulate_pio);
2298
2299int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
2300 int size, unsigned long count, int down,
2301 gva_t address, int rep, unsigned port)
2302{
2303 unsigned now, in_page;
2304 int i, ret = 0;
2305 int nr_pages = 1;
2306 struct page *page;
2307 struct kvm_io_device *pio_dev;
2308
2309 vcpu->run->exit_reason = KVM_EXIT_IO;
2310 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
ad312c7c 2311 vcpu->run->io.size = vcpu->arch.pio.size = size;
de7d789a 2312 vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
ad312c7c
ZX
2313 vcpu->run->io.count = vcpu->arch.pio.count = vcpu->arch.pio.cur_count = count;
2314 vcpu->run->io.port = vcpu->arch.pio.port = port;
2315 vcpu->arch.pio.in = in;
2316 vcpu->arch.pio.string = 1;
2317 vcpu->arch.pio.down = down;
2318 vcpu->arch.pio.guest_page_offset = offset_in_page(address);
2319 vcpu->arch.pio.rep = rep;
de7d789a 2320
2714d1d3
FEL
2321 if (vcpu->run->io.direction == KVM_EXIT_IO_IN)
2322 KVMTRACE_2D(IO_READ, vcpu, vcpu->run->io.port, (u32)size,
2323 handler);
2324 else
2325 KVMTRACE_2D(IO_WRITE, vcpu, vcpu->run->io.port, (u32)size,
2326 handler);
2327
de7d789a
CO
2328 if (!count) {
2329 kvm_x86_ops->skip_emulated_instruction(vcpu);
2330 return 1;
2331 }
2332
2333 if (!down)
2334 in_page = PAGE_SIZE - offset_in_page(address);
2335 else
2336 in_page = offset_in_page(address) + size;
2337 now = min(count, (unsigned long)in_page / size);
2338 if (!now) {
2339 /*
2340 * String I/O straddles page boundary. Pin two guest pages
2341 * so that we satisfy atomicity constraints. Do just one
2342 * transaction to avoid complexity.
2343 */
2344 nr_pages = 2;
2345 now = 1;
2346 }
2347 if (down) {
2348 /*
2349 * String I/O in reverse. Yuck. Kill the guest, fix later.
2350 */
2351 pr_unimpl(vcpu, "guest string pio down\n");
c1a5d4f9 2352 kvm_inject_gp(vcpu, 0);
de7d789a
CO
2353 return 1;
2354 }
2355 vcpu->run->io.count = now;
ad312c7c 2356 vcpu->arch.pio.cur_count = now;
de7d789a 2357
ad312c7c 2358 if (vcpu->arch.pio.cur_count == vcpu->arch.pio.count)
de7d789a
CO
2359 kvm_x86_ops->skip_emulated_instruction(vcpu);
2360
2361 for (i = 0; i < nr_pages; ++i) {
de7d789a 2362 page = gva_to_page(vcpu, address + i * PAGE_SIZE);
ad312c7c 2363 vcpu->arch.pio.guest_pages[i] = page;
de7d789a 2364 if (!page) {
c1a5d4f9 2365 kvm_inject_gp(vcpu, 0);
de7d789a
CO
2366 free_pio_guest_pages(vcpu);
2367 return 1;
2368 }
2369 }
2370
2371 pio_dev = vcpu_find_pio_dev(vcpu, port);
ad312c7c 2372 if (!vcpu->arch.pio.in) {
de7d789a
CO
2373 /* string PIO write */
2374 ret = pio_copy_data(vcpu);
2375 if (ret >= 0 && pio_dev) {
2376 pio_string_write(pio_dev, vcpu);
2377 complete_pio(vcpu);
ad312c7c 2378 if (vcpu->arch.pio.count == 0)
de7d789a
CO
2379 ret = 1;
2380 }
2381 } else if (pio_dev)
2382 pr_unimpl(vcpu, "no string pio read support yet, "
2383 "port %x size %d count %ld\n",
2384 port, size, count);
2385
2386 return ret;
2387}
2388EXPORT_SYMBOL_GPL(kvm_emulate_pio_string);
2389
f8c16bba 2390int kvm_arch_init(void *opaque)
043405e1 2391{
56c6d28a 2392 int r;
f8c16bba
ZX
2393 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
2394
f8c16bba
ZX
2395 if (kvm_x86_ops) {
2396 printk(KERN_ERR "kvm: already loaded the other module\n");
56c6d28a
ZX
2397 r = -EEXIST;
2398 goto out;
f8c16bba
ZX
2399 }
2400
2401 if (!ops->cpu_has_kvm_support()) {
2402 printk(KERN_ERR "kvm: no hardware support\n");
56c6d28a
ZX
2403 r = -EOPNOTSUPP;
2404 goto out;
f8c16bba
ZX
2405 }
2406 if (ops->disabled_by_bios()) {
2407 printk(KERN_ERR "kvm: disabled by bios\n");
56c6d28a
ZX
2408 r = -EOPNOTSUPP;
2409 goto out;
f8c16bba
ZX
2410 }
2411
97db56ce
AK
2412 r = kvm_mmu_module_init();
2413 if (r)
2414 goto out;
2415
2416 kvm_init_msr_list();
2417
f8c16bba 2418 kvm_x86_ops = ops;
56c6d28a 2419 kvm_mmu_set_nonpresent_ptes(0ull, 0ull);
7b52345e
SY
2420 kvm_mmu_set_base_ptes(PT_PRESENT_MASK);
2421 kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
2422 PT_DIRTY_MASK, PT64_NX_MASK, 0);
f8c16bba 2423 return 0;
56c6d28a
ZX
2424
2425out:
56c6d28a 2426 return r;
043405e1 2427}
8776e519 2428
f8c16bba
ZX
2429void kvm_arch_exit(void)
2430{
2431 kvm_x86_ops = NULL;
56c6d28a
ZX
2432 kvm_mmu_module_exit();
2433}
f8c16bba 2434
8776e519
HB
2435int kvm_emulate_halt(struct kvm_vcpu *vcpu)
2436{
2437 ++vcpu->stat.halt_exits;
2714d1d3 2438 KVMTRACE_0D(HLT, vcpu, handler);
8776e519 2439 if (irqchip_in_kernel(vcpu->kvm)) {
a4535290 2440 vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
3200f405 2441 up_read(&vcpu->kvm->slots_lock);
8776e519 2442 kvm_vcpu_block(vcpu);
3200f405 2443 down_read(&vcpu->kvm->slots_lock);
a4535290 2444 if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE)
8776e519
HB
2445 return -EINTR;
2446 return 1;
2447 } else {
2448 vcpu->run->exit_reason = KVM_EXIT_HLT;
2449 return 0;
2450 }
2451}
2452EXPORT_SYMBOL_GPL(kvm_emulate_halt);
2453
2f333bcb
MT
2454static inline gpa_t hc_gpa(struct kvm_vcpu *vcpu, unsigned long a0,
2455 unsigned long a1)
2456{
2457 if (is_long_mode(vcpu))
2458 return a0;
2459 else
2460 return a0 | ((gpa_t)a1 << 32);
2461}
2462
8776e519
HB
2463int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
2464{
2465 unsigned long nr, a0, a1, a2, a3, ret;
2f333bcb 2466 int r = 1;
8776e519
HB
2467
2468 kvm_x86_ops->cache_regs(vcpu);
2469
ad312c7c
ZX
2470 nr = vcpu->arch.regs[VCPU_REGS_RAX];
2471 a0 = vcpu->arch.regs[VCPU_REGS_RBX];
2472 a1 = vcpu->arch.regs[VCPU_REGS_RCX];
2473 a2 = vcpu->arch.regs[VCPU_REGS_RDX];
2474 a3 = vcpu->arch.regs[VCPU_REGS_RSI];
8776e519 2475
2714d1d3
FEL
2476 KVMTRACE_1D(VMMCALL, vcpu, (u32)nr, handler);
2477
8776e519
HB
2478 if (!is_long_mode(vcpu)) {
2479 nr &= 0xFFFFFFFF;
2480 a0 &= 0xFFFFFFFF;
2481 a1 &= 0xFFFFFFFF;
2482 a2 &= 0xFFFFFFFF;
2483 a3 &= 0xFFFFFFFF;
2484 }
2485
2486 switch (nr) {
b93463aa
AK
2487 case KVM_HC_VAPIC_POLL_IRQ:
2488 ret = 0;
2489 break;
2f333bcb
MT
2490 case KVM_HC_MMU_OP:
2491 r = kvm_pv_mmu_op(vcpu, a0, hc_gpa(vcpu, a1, a2), &ret);
2492 break;
8776e519
HB
2493 default:
2494 ret = -KVM_ENOSYS;
2495 break;
2496 }
ad312c7c 2497 vcpu->arch.regs[VCPU_REGS_RAX] = ret;
8776e519 2498 kvm_x86_ops->decache_regs(vcpu);
f11c3a8d 2499 ++vcpu->stat.hypercalls;
2f333bcb 2500 return r;
8776e519
HB
2501}
2502EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
2503
2504int kvm_fix_hypercall(struct kvm_vcpu *vcpu)
2505{
2506 char instruction[3];
2507 int ret = 0;
2508
8776e519
HB
2509
2510 /*
2511 * Blow out the MMU to ensure that no other VCPU has an active mapping
2512 * to ensure that the updated hypercall appears atomically across all
2513 * VCPUs.
2514 */
2515 kvm_mmu_zap_all(vcpu->kvm);
2516
2517 kvm_x86_ops->cache_regs(vcpu);
2518 kvm_x86_ops->patch_hypercall(vcpu, instruction);
ad312c7c 2519 if (emulator_write_emulated(vcpu->arch.rip, instruction, 3, vcpu)
8776e519
HB
2520 != X86EMUL_CONTINUE)
2521 ret = -EFAULT;
2522
8776e519
HB
2523 return ret;
2524}
2525
2526static u64 mk_cr_64(u64 curr_cr, u32 new_val)
2527{
2528 return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
2529}
2530
2531void realmode_lgdt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
2532{
2533 struct descriptor_table dt = { limit, base };
2534
2535 kvm_x86_ops->set_gdt(vcpu, &dt);
2536}
2537
2538void realmode_lidt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
2539{
2540 struct descriptor_table dt = { limit, base };
2541
2542 kvm_x86_ops->set_idt(vcpu, &dt);
2543}
2544
2545void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
2546 unsigned long *rflags)
2547{
2d3ad1f4 2548 kvm_lmsw(vcpu, msw);
8776e519
HB
2549 *rflags = kvm_x86_ops->get_rflags(vcpu);
2550}
2551
2552unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
2553{
2554 kvm_x86_ops->decache_cr4_guest_bits(vcpu);
2555 switch (cr) {
2556 case 0:
ad312c7c 2557 return vcpu->arch.cr0;
8776e519 2558 case 2:
ad312c7c 2559 return vcpu->arch.cr2;
8776e519 2560 case 3:
ad312c7c 2561 return vcpu->arch.cr3;
8776e519 2562 case 4:
ad312c7c 2563 return vcpu->arch.cr4;
152ff9be 2564 case 8:
2d3ad1f4 2565 return kvm_get_cr8(vcpu);
8776e519 2566 default:
b8688d51 2567 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
8776e519
HB
2568 return 0;
2569 }
2570}
2571
2572void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val,
2573 unsigned long *rflags)
2574{
2575 switch (cr) {
2576 case 0:
2d3ad1f4 2577 kvm_set_cr0(vcpu, mk_cr_64(vcpu->arch.cr0, val));
8776e519
HB
2578 *rflags = kvm_x86_ops->get_rflags(vcpu);
2579 break;
2580 case 2:
ad312c7c 2581 vcpu->arch.cr2 = val;
8776e519
HB
2582 break;
2583 case 3:
2d3ad1f4 2584 kvm_set_cr3(vcpu, val);
8776e519
HB
2585 break;
2586 case 4:
2d3ad1f4 2587 kvm_set_cr4(vcpu, mk_cr_64(vcpu->arch.cr4, val));
8776e519 2588 break;
152ff9be 2589 case 8:
2d3ad1f4 2590 kvm_set_cr8(vcpu, val & 0xfUL);
152ff9be 2591 break;
8776e519 2592 default:
b8688d51 2593 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
8776e519
HB
2594 }
2595}
2596
07716717
DK
2597static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
2598{
ad312c7c
ZX
2599 struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
2600 int j, nent = vcpu->arch.cpuid_nent;
07716717
DK
2601
2602 e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
2603 /* when no next entry is found, the current entry[i] is reselected */
2604 for (j = i + 1; j == i; j = (j + 1) % nent) {
ad312c7c 2605 struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j];
07716717
DK
2606 if (ej->function == e->function) {
2607 ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
2608 return j;
2609 }
2610 }
2611 return 0; /* silence gcc, even though control never reaches here */
2612}
2613
2614/* find an entry with matching function, matching index (if needed), and that
2615 * should be read next (if it's stateful) */
2616static int is_matching_cpuid_entry(struct kvm_cpuid_entry2 *e,
2617 u32 function, u32 index)
2618{
2619 if (e->function != function)
2620 return 0;
2621 if ((e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) && e->index != index)
2622 return 0;
2623 if ((e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) &&
2624 !(e->flags & KVM_CPUID_FLAG_STATE_READ_NEXT))
2625 return 0;
2626 return 1;
2627}
2628
8776e519
HB
2629void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
2630{
2631 int i;
07716717
DK
2632 u32 function, index;
2633 struct kvm_cpuid_entry2 *e, *best;
8776e519
HB
2634
2635 kvm_x86_ops->cache_regs(vcpu);
ad312c7c
ZX
2636 function = vcpu->arch.regs[VCPU_REGS_RAX];
2637 index = vcpu->arch.regs[VCPU_REGS_RCX];
2638 vcpu->arch.regs[VCPU_REGS_RAX] = 0;
2639 vcpu->arch.regs[VCPU_REGS_RBX] = 0;
2640 vcpu->arch.regs[VCPU_REGS_RCX] = 0;
2641 vcpu->arch.regs[VCPU_REGS_RDX] = 0;
8776e519 2642 best = NULL;
ad312c7c
ZX
2643 for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
2644 e = &vcpu->arch.cpuid_entries[i];
07716717
DK
2645 if (is_matching_cpuid_entry(e, function, index)) {
2646 if (e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC)
2647 move_to_next_stateful_cpuid_entry(vcpu, i);
8776e519
HB
2648 best = e;
2649 break;
2650 }
2651 /*
2652 * Both basic or both extended?
2653 */
2654 if (((e->function ^ function) & 0x80000000) == 0)
2655 if (!best || e->function > best->function)
2656 best = e;
2657 }
2658 if (best) {
ad312c7c
ZX
2659 vcpu->arch.regs[VCPU_REGS_RAX] = best->eax;
2660 vcpu->arch.regs[VCPU_REGS_RBX] = best->ebx;
2661 vcpu->arch.regs[VCPU_REGS_RCX] = best->ecx;
2662 vcpu->arch.regs[VCPU_REGS_RDX] = best->edx;
8776e519
HB
2663 }
2664 kvm_x86_ops->decache_regs(vcpu);
2665 kvm_x86_ops->skip_emulated_instruction(vcpu);
2714d1d3
FEL
2666 KVMTRACE_5D(CPUID, vcpu, function,
2667 (u32)vcpu->arch.regs[VCPU_REGS_RAX],
2668 (u32)vcpu->arch.regs[VCPU_REGS_RBX],
2669 (u32)vcpu->arch.regs[VCPU_REGS_RCX],
2670 (u32)vcpu->arch.regs[VCPU_REGS_RDX], handler);
8776e519
HB
2671}
2672EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
d0752060 2673
b6c7a5dc
HB
2674/*
2675 * Check if userspace requested an interrupt window, and that the
2676 * interrupt window is open.
2677 *
2678 * No need to exit to userspace if we already have an interrupt queued.
2679 */
2680static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu,
2681 struct kvm_run *kvm_run)
2682{
ad312c7c 2683 return (!vcpu->arch.irq_summary &&
b6c7a5dc 2684 kvm_run->request_interrupt_window &&
ad312c7c 2685 vcpu->arch.interrupt_window_open &&
b6c7a5dc
HB
2686 (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF));
2687}
2688
2689static void post_kvm_run_save(struct kvm_vcpu *vcpu,
2690 struct kvm_run *kvm_run)
2691{
2692 kvm_run->if_flag = (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
2d3ad1f4 2693 kvm_run->cr8 = kvm_get_cr8(vcpu);
b6c7a5dc
HB
2694 kvm_run->apic_base = kvm_get_apic_base(vcpu);
2695 if (irqchip_in_kernel(vcpu->kvm))
2696 kvm_run->ready_for_interrupt_injection = 1;
2697 else
2698 kvm_run->ready_for_interrupt_injection =
ad312c7c
ZX
2699 (vcpu->arch.interrupt_window_open &&
2700 vcpu->arch.irq_summary == 0);
b6c7a5dc
HB
2701}
2702
b93463aa
AK
2703static void vapic_enter(struct kvm_vcpu *vcpu)
2704{
2705 struct kvm_lapic *apic = vcpu->arch.apic;
2706 struct page *page;
2707
2708 if (!apic || !apic->vapic_addr)
2709 return;
2710
10589a46 2711 down_read(&current->mm->mmap_sem);
b93463aa 2712 page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
10589a46 2713 up_read(&current->mm->mmap_sem);
72dc67a6
IE
2714
2715 vcpu->arch.apic->vapic_page = page;
b93463aa
AK
2716}
2717
2718static void vapic_exit(struct kvm_vcpu *vcpu)
2719{
2720 struct kvm_lapic *apic = vcpu->arch.apic;
2721
2722 if (!apic || !apic->vapic_addr)
2723 return;
2724
2725 kvm_release_page_dirty(apic->vapic_page);
2726 mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
2727}
2728
b6c7a5dc
HB
2729static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2730{
2731 int r;
2732
a4535290 2733 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED)) {
b6c7a5dc 2734 pr_debug("vcpu %d received sipi with vector # %x\n",
ad312c7c 2735 vcpu->vcpu_id, vcpu->arch.sipi_vector);
b6c7a5dc
HB
2736 kvm_lapic_reset(vcpu);
2737 r = kvm_x86_ops->vcpu_reset(vcpu);
2738 if (r)
2739 return r;
a4535290 2740 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
b6c7a5dc
HB
2741 }
2742
3200f405 2743 down_read(&vcpu->kvm->slots_lock);
b93463aa
AK
2744 vapic_enter(vcpu);
2745
b6c7a5dc
HB
2746preempted:
2747 if (vcpu->guest_debug.enabled)
2748 kvm_x86_ops->guest_debug_pre(vcpu);
2749
2750again:
2e53d63a
MT
2751 if (vcpu->requests)
2752 if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
2753 kvm_mmu_unload(vcpu);
2754
b6c7a5dc
HB
2755 r = kvm_mmu_reload(vcpu);
2756 if (unlikely(r))
2757 goto out;
2758
2f52d58c
AK
2759 if (vcpu->requests) {
2760 if (test_and_clear_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests))
2761 __kvm_migrate_apic_timer(vcpu);
b93463aa
AK
2762 if (test_and_clear_bit(KVM_REQ_REPORT_TPR_ACCESS,
2763 &vcpu->requests)) {
2764 kvm_run->exit_reason = KVM_EXIT_TPR_ACCESS;
2765 r = 0;
2766 goto out;
2767 }
71c4dfaf
JR
2768 if (test_and_clear_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests)) {
2769 kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
2770 r = 0;
2771 goto out;
2772 }
2f52d58c 2773 }
b93463aa 2774
b6c7a5dc
HB
2775 kvm_inject_pending_timer_irqs(vcpu);
2776
2777 preempt_disable();
2778
2779 kvm_x86_ops->prepare_guest_switch(vcpu);
2780 kvm_load_guest_fpu(vcpu);
2781
2782 local_irq_disable();
2783
6c142801
AK
2784 if (need_resched()) {
2785 local_irq_enable();
2786 preempt_enable();
2787 r = 1;
2788 goto out;
2789 }
2790
2e53d63a
MT
2791 if (vcpu->requests)
2792 if (test_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests)) {
2793 local_irq_enable();
2794 preempt_enable();
2795 r = 1;
2796 goto out;
2797 }
2798
b6c7a5dc
HB
2799 if (signal_pending(current)) {
2800 local_irq_enable();
2801 preempt_enable();
2802 r = -EINTR;
2803 kvm_run->exit_reason = KVM_EXIT_INTR;
2804 ++vcpu->stat.signal_exits;
2805 goto out;
2806 }
2807
e9571ed5
MT
2808 vcpu->guest_mode = 1;
2809 /*
2810 * Make sure that guest_mode assignment won't happen after
2811 * testing the pending IRQ vector bitmap.
2812 */
2813 smp_wmb();
2814
ad312c7c 2815 if (vcpu->arch.exception.pending)
298101da
AK
2816 __queue_exception(vcpu);
2817 else if (irqchip_in_kernel(vcpu->kvm))
b6c7a5dc 2818 kvm_x86_ops->inject_pending_irq(vcpu);
eb9774f0 2819 else
b6c7a5dc
HB
2820 kvm_x86_ops->inject_pending_vectors(vcpu, kvm_run);
2821
b93463aa
AK
2822 kvm_lapic_sync_to_vapic(vcpu);
2823
3200f405
MT
2824 up_read(&vcpu->kvm->slots_lock);
2825
b6c7a5dc
HB
2826 kvm_guest_enter();
2827
2828 if (vcpu->requests)
2829 if (test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
2830 kvm_x86_ops->tlb_flush(vcpu);
2831
2714d1d3 2832 KVMTRACE_0D(VMENTRY, vcpu, entryexit);
b6c7a5dc
HB
2833 kvm_x86_ops->run(vcpu, kvm_run);
2834
2835 vcpu->guest_mode = 0;
2836 local_irq_enable();
2837
2838 ++vcpu->stat.exits;
2839
2840 /*
2841 * We must have an instruction between local_irq_enable() and
2842 * kvm_guest_exit(), so the timer interrupt isn't delayed by
2843 * the interrupt shadow. The stat.exits increment will do nicely.
2844 * But we need to prevent reordering, hence this barrier():
2845 */
2846 barrier();
2847
2848 kvm_guest_exit();
2849
2850 preempt_enable();
2851
3200f405
MT
2852 down_read(&vcpu->kvm->slots_lock);
2853
b6c7a5dc
HB
2854 /*
2855 * Profile KVM exit RIPs:
2856 */
2857 if (unlikely(prof_on == KVM_PROFILING)) {
2858 kvm_x86_ops->cache_regs(vcpu);
ad312c7c 2859 profile_hit(KVM_PROFILING, (void *)vcpu->arch.rip);
b6c7a5dc
HB
2860 }
2861
ad312c7c
ZX
2862 if (vcpu->arch.exception.pending && kvm_x86_ops->exception_injected(vcpu))
2863 vcpu->arch.exception.pending = false;
298101da 2864
b93463aa
AK
2865 kvm_lapic_sync_from_vapic(vcpu);
2866
b6c7a5dc
HB
2867 r = kvm_x86_ops->handle_exit(kvm_run, vcpu);
2868
2869 if (r > 0) {
2870 if (dm_request_for_irq_injection(vcpu, kvm_run)) {
2871 r = -EINTR;
2872 kvm_run->exit_reason = KVM_EXIT_INTR;
2873 ++vcpu->stat.request_irq_exits;
2874 goto out;
2875 }
e1beb1d3 2876 if (!need_resched())
b6c7a5dc 2877 goto again;
b6c7a5dc
HB
2878 }
2879
2880out:
3200f405 2881 up_read(&vcpu->kvm->slots_lock);
b6c7a5dc
HB
2882 if (r > 0) {
2883 kvm_resched(vcpu);
3200f405 2884 down_read(&vcpu->kvm->slots_lock);
b6c7a5dc
HB
2885 goto preempted;
2886 }
2887
2888 post_kvm_run_save(vcpu, kvm_run);
2889
3200f405 2890 down_read(&vcpu->kvm->slots_lock);
b93463aa 2891 vapic_exit(vcpu);
3200f405 2892 up_read(&vcpu->kvm->slots_lock);
b93463aa 2893
b6c7a5dc
HB
2894 return r;
2895}
2896
2897int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2898{
2899 int r;
2900 sigset_t sigsaved;
2901
2902 vcpu_load(vcpu);
2903
a4535290 2904 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
b6c7a5dc
HB
2905 kvm_vcpu_block(vcpu);
2906 vcpu_put(vcpu);
2907 return -EAGAIN;
2908 }
2909
2910 if (vcpu->sigset_active)
2911 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
2912
2913 /* re-sync apic's tpr */
2914 if (!irqchip_in_kernel(vcpu->kvm))
2d3ad1f4 2915 kvm_set_cr8(vcpu, kvm_run->cr8);
b6c7a5dc 2916
ad312c7c 2917 if (vcpu->arch.pio.cur_count) {
b6c7a5dc
HB
2918 r = complete_pio(vcpu);
2919 if (r)
2920 goto out;
2921 }
2922#if CONFIG_HAS_IOMEM
2923 if (vcpu->mmio_needed) {
2924 memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
2925 vcpu->mmio_read_completed = 1;
2926 vcpu->mmio_needed = 0;
3200f405
MT
2927
2928 down_read(&vcpu->kvm->slots_lock);
b6c7a5dc 2929 r = emulate_instruction(vcpu, kvm_run,
571008da
SY
2930 vcpu->arch.mmio_fault_cr2, 0,
2931 EMULTYPE_NO_DECODE);
3200f405 2932 up_read(&vcpu->kvm->slots_lock);
b6c7a5dc
HB
2933 if (r == EMULATE_DO_MMIO) {
2934 /*
2935 * Read-modify-write. Back to userspace.
2936 */
2937 r = 0;
2938 goto out;
2939 }
2940 }
2941#endif
2942 if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL) {
2943 kvm_x86_ops->cache_regs(vcpu);
ad312c7c 2944 vcpu->arch.regs[VCPU_REGS_RAX] = kvm_run->hypercall.ret;
b6c7a5dc
HB
2945 kvm_x86_ops->decache_regs(vcpu);
2946 }
2947
2948 r = __vcpu_run(vcpu, kvm_run);
2949
2950out:
2951 if (vcpu->sigset_active)
2952 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
2953
2954 vcpu_put(vcpu);
2955 return r;
2956}
2957
2958int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
2959{
2960 vcpu_load(vcpu);
2961
2962 kvm_x86_ops->cache_regs(vcpu);
2963
ad312c7c
ZX
2964 regs->rax = vcpu->arch.regs[VCPU_REGS_RAX];
2965 regs->rbx = vcpu->arch.regs[VCPU_REGS_RBX];
2966 regs->rcx = vcpu->arch.regs[VCPU_REGS_RCX];
2967 regs->rdx = vcpu->arch.regs[VCPU_REGS_RDX];
2968 regs->rsi = vcpu->arch.regs[VCPU_REGS_RSI];
2969 regs->rdi = vcpu->arch.regs[VCPU_REGS_RDI];
2970 regs->rsp = vcpu->arch.regs[VCPU_REGS_RSP];
2971 regs->rbp = vcpu->arch.regs[VCPU_REGS_RBP];
b6c7a5dc 2972#ifdef CONFIG_X86_64
ad312c7c
ZX
2973 regs->r8 = vcpu->arch.regs[VCPU_REGS_R8];
2974 regs->r9 = vcpu->arch.regs[VCPU_REGS_R9];
2975 regs->r10 = vcpu->arch.regs[VCPU_REGS_R10];
2976 regs->r11 = vcpu->arch.regs[VCPU_REGS_R11];
2977 regs->r12 = vcpu->arch.regs[VCPU_REGS_R12];
2978 regs->r13 = vcpu->arch.regs[VCPU_REGS_R13];
2979 regs->r14 = vcpu->arch.regs[VCPU_REGS_R14];
2980 regs->r15 = vcpu->arch.regs[VCPU_REGS_R15];
b6c7a5dc
HB
2981#endif
2982
ad312c7c 2983 regs->rip = vcpu->arch.rip;
b6c7a5dc
HB
2984 regs->rflags = kvm_x86_ops->get_rflags(vcpu);
2985
2986 /*
2987 * Don't leak debug flags in case they were set for guest debugging
2988 */
2989 if (vcpu->guest_debug.enabled && vcpu->guest_debug.singlestep)
2990 regs->rflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
2991
2992 vcpu_put(vcpu);
2993
2994 return 0;
2995}
2996
2997int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
2998{
2999 vcpu_load(vcpu);
3000
ad312c7c
ZX
3001 vcpu->arch.regs[VCPU_REGS_RAX] = regs->rax;
3002 vcpu->arch.regs[VCPU_REGS_RBX] = regs->rbx;
3003 vcpu->arch.regs[VCPU_REGS_RCX] = regs->rcx;
3004 vcpu->arch.regs[VCPU_REGS_RDX] = regs->rdx;
3005 vcpu->arch.regs[VCPU_REGS_RSI] = regs->rsi;
3006 vcpu->arch.regs[VCPU_REGS_RDI] = regs->rdi;
3007 vcpu->arch.regs[VCPU_REGS_RSP] = regs->rsp;
3008 vcpu->arch.regs[VCPU_REGS_RBP] = regs->rbp;
b6c7a5dc 3009#ifdef CONFIG_X86_64
ad312c7c
ZX
3010 vcpu->arch.regs[VCPU_REGS_R8] = regs->r8;
3011 vcpu->arch.regs[VCPU_REGS_R9] = regs->r9;
3012 vcpu->arch.regs[VCPU_REGS_R10] = regs->r10;
3013 vcpu->arch.regs[VCPU_REGS_R11] = regs->r11;
3014 vcpu->arch.regs[VCPU_REGS_R12] = regs->r12;
3015 vcpu->arch.regs[VCPU_REGS_R13] = regs->r13;
3016 vcpu->arch.regs[VCPU_REGS_R14] = regs->r14;
3017 vcpu->arch.regs[VCPU_REGS_R15] = regs->r15;
b6c7a5dc
HB
3018#endif
3019
ad312c7c 3020 vcpu->arch.rip = regs->rip;
b6c7a5dc
HB
3021 kvm_x86_ops->set_rflags(vcpu, regs->rflags);
3022
3023 kvm_x86_ops->decache_regs(vcpu);
3024
3025 vcpu_put(vcpu);
3026
3027 return 0;
3028}
3029
3030static void get_segment(struct kvm_vcpu *vcpu,
3031 struct kvm_segment *var, int seg)
3032{
14af3f3c 3033 kvm_x86_ops->get_segment(vcpu, var, seg);
b6c7a5dc
HB
3034}
3035
3036void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
3037{
3038 struct kvm_segment cs;
3039
3040 get_segment(vcpu, &cs, VCPU_SREG_CS);
3041 *db = cs.db;
3042 *l = cs.l;
3043}
3044EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits);
3045
3046int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
3047 struct kvm_sregs *sregs)
3048{
3049 struct descriptor_table dt;
3050 int pending_vec;
3051
3052 vcpu_load(vcpu);
3053
3054 get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
3055 get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
3056 get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
3057 get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
3058 get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
3059 get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
3060
3061 get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
3062 get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
3063
3064 kvm_x86_ops->get_idt(vcpu, &dt);
3065 sregs->idt.limit = dt.limit;
3066 sregs->idt.base = dt.base;
3067 kvm_x86_ops->get_gdt(vcpu, &dt);
3068 sregs->gdt.limit = dt.limit;
3069 sregs->gdt.base = dt.base;
3070
3071 kvm_x86_ops->decache_cr4_guest_bits(vcpu);
ad312c7c
ZX
3072 sregs->cr0 = vcpu->arch.cr0;
3073 sregs->cr2 = vcpu->arch.cr2;
3074 sregs->cr3 = vcpu->arch.cr3;
3075 sregs->cr4 = vcpu->arch.cr4;
2d3ad1f4 3076 sregs->cr8 = kvm_get_cr8(vcpu);
ad312c7c 3077 sregs->efer = vcpu->arch.shadow_efer;
b6c7a5dc
HB
3078 sregs->apic_base = kvm_get_apic_base(vcpu);
3079
3080 if (irqchip_in_kernel(vcpu->kvm)) {
3081 memset(sregs->interrupt_bitmap, 0,
3082 sizeof sregs->interrupt_bitmap);
3083 pending_vec = kvm_x86_ops->get_irq(vcpu);
3084 if (pending_vec >= 0)
3085 set_bit(pending_vec,
3086 (unsigned long *)sregs->interrupt_bitmap);
3087 } else
ad312c7c 3088 memcpy(sregs->interrupt_bitmap, vcpu->arch.irq_pending,
b6c7a5dc
HB
3089 sizeof sregs->interrupt_bitmap);
3090
3091 vcpu_put(vcpu);
3092
3093 return 0;
3094}
3095
62d9f0db
MT
3096int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
3097 struct kvm_mp_state *mp_state)
3098{
3099 vcpu_load(vcpu);
3100 mp_state->mp_state = vcpu->arch.mp_state;
3101 vcpu_put(vcpu);
3102 return 0;
3103}
3104
3105int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
3106 struct kvm_mp_state *mp_state)
3107{
3108 vcpu_load(vcpu);
3109 vcpu->arch.mp_state = mp_state->mp_state;
3110 vcpu_put(vcpu);
3111 return 0;
3112}
3113
b6c7a5dc
HB
3114static void set_segment(struct kvm_vcpu *vcpu,
3115 struct kvm_segment *var, int seg)
3116{
14af3f3c 3117 kvm_x86_ops->set_segment(vcpu, var, seg);
b6c7a5dc
HB
3118}
3119
37817f29
IE
3120static void seg_desct_to_kvm_desct(struct desc_struct *seg_desc, u16 selector,
3121 struct kvm_segment *kvm_desct)
3122{
3123 kvm_desct->base = seg_desc->base0;
3124 kvm_desct->base |= seg_desc->base1 << 16;
3125 kvm_desct->base |= seg_desc->base2 << 24;
3126 kvm_desct->limit = seg_desc->limit0;
3127 kvm_desct->limit |= seg_desc->limit << 16;
3128 kvm_desct->selector = selector;
3129 kvm_desct->type = seg_desc->type;
3130 kvm_desct->present = seg_desc->p;
3131 kvm_desct->dpl = seg_desc->dpl;
3132 kvm_desct->db = seg_desc->d;
3133 kvm_desct->s = seg_desc->s;
3134 kvm_desct->l = seg_desc->l;
3135 kvm_desct->g = seg_desc->g;
3136 kvm_desct->avl = seg_desc->avl;
3137 if (!selector)
3138 kvm_desct->unusable = 1;
3139 else
3140 kvm_desct->unusable = 0;
3141 kvm_desct->padding = 0;
3142}
3143
3144static void get_segment_descritptor_dtable(struct kvm_vcpu *vcpu,
3145 u16 selector,
3146 struct descriptor_table *dtable)
3147{
3148 if (selector & 1 << 2) {
3149 struct kvm_segment kvm_seg;
3150
3151 get_segment(vcpu, &kvm_seg, VCPU_SREG_LDTR);
3152
3153 if (kvm_seg.unusable)
3154 dtable->limit = 0;
3155 else
3156 dtable->limit = kvm_seg.limit;
3157 dtable->base = kvm_seg.base;
3158 }
3159 else
3160 kvm_x86_ops->get_gdt(vcpu, dtable);
3161}
3162
3163/* allowed just for 8 bytes segments */
3164static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
3165 struct desc_struct *seg_desc)
3166{
3167 struct descriptor_table dtable;
3168 u16 index = selector >> 3;
3169
3170 get_segment_descritptor_dtable(vcpu, selector, &dtable);
3171
3172 if (dtable.limit < index * 8 + 7) {
3173 kvm_queue_exception_e(vcpu, GP_VECTOR, selector & 0xfffc);
3174 return 1;
3175 }
3176 return kvm_read_guest(vcpu->kvm, dtable.base + index * 8, seg_desc, 8);
3177}
3178
3179/* allowed just for 8 bytes segments */
3180static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
3181 struct desc_struct *seg_desc)
3182{
3183 struct descriptor_table dtable;
3184 u16 index = selector >> 3;
3185
3186 get_segment_descritptor_dtable(vcpu, selector, &dtable);
3187
3188 if (dtable.limit < index * 8 + 7)
3189 return 1;
3190 return kvm_write_guest(vcpu->kvm, dtable.base + index * 8, seg_desc, 8);
3191}
3192
3193static u32 get_tss_base_addr(struct kvm_vcpu *vcpu,
3194 struct desc_struct *seg_desc)
3195{
3196 u32 base_addr;
3197
3198 base_addr = seg_desc->base0;
3199 base_addr |= (seg_desc->base1 << 16);
3200 base_addr |= (seg_desc->base2 << 24);
3201
3202 return base_addr;
3203}
3204
3205static int load_tss_segment32(struct kvm_vcpu *vcpu,
3206 struct desc_struct *seg_desc,
3207 struct tss_segment_32 *tss)
3208{
3209 u32 base_addr;
3210
3211 base_addr = get_tss_base_addr(vcpu, seg_desc);
3212
3213 return kvm_read_guest(vcpu->kvm, base_addr, tss,
3214 sizeof(struct tss_segment_32));
3215}
3216
3217static int save_tss_segment32(struct kvm_vcpu *vcpu,
3218 struct desc_struct *seg_desc,
3219 struct tss_segment_32 *tss)
3220{
3221 u32 base_addr;
3222
3223 base_addr = get_tss_base_addr(vcpu, seg_desc);
3224
3225 return kvm_write_guest(vcpu->kvm, base_addr, tss,
3226 sizeof(struct tss_segment_32));
3227}
3228
3229static int load_tss_segment16(struct kvm_vcpu *vcpu,
3230 struct desc_struct *seg_desc,
3231 struct tss_segment_16 *tss)
3232{
3233 u32 base_addr;
3234
3235 base_addr = get_tss_base_addr(vcpu, seg_desc);
3236
3237 return kvm_read_guest(vcpu->kvm, base_addr, tss,
3238 sizeof(struct tss_segment_16));
3239}
3240
3241static int save_tss_segment16(struct kvm_vcpu *vcpu,
3242 struct desc_struct *seg_desc,
3243 struct tss_segment_16 *tss)
3244{
3245 u32 base_addr;
3246
3247 base_addr = get_tss_base_addr(vcpu, seg_desc);
3248
3249 return kvm_write_guest(vcpu->kvm, base_addr, tss,
3250 sizeof(struct tss_segment_16));
3251}
3252
3253static u16 get_segment_selector(struct kvm_vcpu *vcpu, int seg)
3254{
3255 struct kvm_segment kvm_seg;
3256
3257 get_segment(vcpu, &kvm_seg, seg);
3258 return kvm_seg.selector;
3259}
3260
3261static int load_segment_descriptor_to_kvm_desct(struct kvm_vcpu *vcpu,
3262 u16 selector,
3263 struct kvm_segment *kvm_seg)
3264{
3265 struct desc_struct seg_desc;
3266
3267 if (load_guest_segment_descriptor(vcpu, selector, &seg_desc))
3268 return 1;
3269 seg_desct_to_kvm_desct(&seg_desc, selector, kvm_seg);
3270 return 0;
3271}
3272
3273static int load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
3274 int type_bits, int seg)
3275{
3276 struct kvm_segment kvm_seg;
3277
3278 if (load_segment_descriptor_to_kvm_desct(vcpu, selector, &kvm_seg))
3279 return 1;
3280 kvm_seg.type |= type_bits;
3281
3282 if (seg != VCPU_SREG_SS && seg != VCPU_SREG_CS &&
3283 seg != VCPU_SREG_LDTR)
3284 if (!kvm_seg.s)
3285 kvm_seg.unusable = 1;
3286
3287 set_segment(vcpu, &kvm_seg, seg);
3288 return 0;
3289}
3290
3291static void save_state_to_tss32(struct kvm_vcpu *vcpu,
3292 struct tss_segment_32 *tss)
3293{
3294 tss->cr3 = vcpu->arch.cr3;
3295 tss->eip = vcpu->arch.rip;
3296 tss->eflags = kvm_x86_ops->get_rflags(vcpu);
3297 tss->eax = vcpu->arch.regs[VCPU_REGS_RAX];
3298 tss->ecx = vcpu->arch.regs[VCPU_REGS_RCX];
3299 tss->edx = vcpu->arch.regs[VCPU_REGS_RDX];
3300 tss->ebx = vcpu->arch.regs[VCPU_REGS_RBX];
3301 tss->esp = vcpu->arch.regs[VCPU_REGS_RSP];
3302 tss->ebp = vcpu->arch.regs[VCPU_REGS_RBP];
3303 tss->esi = vcpu->arch.regs[VCPU_REGS_RSI];
3304 tss->edi = vcpu->arch.regs[VCPU_REGS_RDI];
3305
3306 tss->es = get_segment_selector(vcpu, VCPU_SREG_ES);
3307 tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS);
3308 tss->ss = get_segment_selector(vcpu, VCPU_SREG_SS);
3309 tss->ds = get_segment_selector(vcpu, VCPU_SREG_DS);
3310 tss->fs = get_segment_selector(vcpu, VCPU_SREG_FS);
3311 tss->gs = get_segment_selector(vcpu, VCPU_SREG_GS);
3312 tss->ldt_selector = get_segment_selector(vcpu, VCPU_SREG_LDTR);
3313 tss->prev_task_link = get_segment_selector(vcpu, VCPU_SREG_TR);
3314}
3315
3316static int load_state_from_tss32(struct kvm_vcpu *vcpu,
3317 struct tss_segment_32 *tss)
3318{
3319 kvm_set_cr3(vcpu, tss->cr3);
3320
3321 vcpu->arch.rip = tss->eip;
3322 kvm_x86_ops->set_rflags(vcpu, tss->eflags | 2);
3323
3324 vcpu->arch.regs[VCPU_REGS_RAX] = tss->eax;
3325 vcpu->arch.regs[VCPU_REGS_RCX] = tss->ecx;
3326 vcpu->arch.regs[VCPU_REGS_RDX] = tss->edx;
3327 vcpu->arch.regs[VCPU_REGS_RBX] = tss->ebx;
3328 vcpu->arch.regs[VCPU_REGS_RSP] = tss->esp;
3329 vcpu->arch.regs[VCPU_REGS_RBP] = tss->ebp;
3330 vcpu->arch.regs[VCPU_REGS_RSI] = tss->esi;
3331 vcpu->arch.regs[VCPU_REGS_RDI] = tss->edi;
3332
3333 if (load_segment_descriptor(vcpu, tss->ldt_selector, 0, VCPU_SREG_LDTR))
3334 return 1;
3335
3336 if (load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES))
3337 return 1;
3338
3339 if (load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS))
3340 return 1;
3341
3342 if (load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS))
3343 return 1;
3344
3345 if (load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS))
3346 return 1;
3347
3348 if (load_segment_descriptor(vcpu, tss->fs, 1, VCPU_SREG_FS))
3349 return 1;
3350
3351 if (load_segment_descriptor(vcpu, tss->gs, 1, VCPU_SREG_GS))
3352 return 1;
3353 return 0;
3354}
3355
3356static void save_state_to_tss16(struct kvm_vcpu *vcpu,
3357 struct tss_segment_16 *tss)
3358{
3359 tss->ip = vcpu->arch.rip;
3360 tss->flag = kvm_x86_ops->get_rflags(vcpu);
3361 tss->ax = vcpu->arch.regs[VCPU_REGS_RAX];
3362 tss->cx = vcpu->arch.regs[VCPU_REGS_RCX];
3363 tss->dx = vcpu->arch.regs[VCPU_REGS_RDX];
3364 tss->bx = vcpu->arch.regs[VCPU_REGS_RBX];
3365 tss->sp = vcpu->arch.regs[VCPU_REGS_RSP];
3366 tss->bp = vcpu->arch.regs[VCPU_REGS_RBP];
3367 tss->si = vcpu->arch.regs[VCPU_REGS_RSI];
3368 tss->di = vcpu->arch.regs[VCPU_REGS_RDI];
3369
3370 tss->es = get_segment_selector(vcpu, VCPU_SREG_ES);
3371 tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS);
3372 tss->ss = get_segment_selector(vcpu, VCPU_SREG_SS);
3373 tss->ds = get_segment_selector(vcpu, VCPU_SREG_DS);
3374 tss->ldt = get_segment_selector(vcpu, VCPU_SREG_LDTR);
3375 tss->prev_task_link = get_segment_selector(vcpu, VCPU_SREG_TR);
3376}
3377
3378static int load_state_from_tss16(struct kvm_vcpu *vcpu,
3379 struct tss_segment_16 *tss)
3380{
3381 vcpu->arch.rip = tss->ip;
3382 kvm_x86_ops->set_rflags(vcpu, tss->flag | 2);
3383 vcpu->arch.regs[VCPU_REGS_RAX] = tss->ax;
3384 vcpu->arch.regs[VCPU_REGS_RCX] = tss->cx;
3385 vcpu->arch.regs[VCPU_REGS_RDX] = tss->dx;
3386 vcpu->arch.regs[VCPU_REGS_RBX] = tss->bx;
3387 vcpu->arch.regs[VCPU_REGS_RSP] = tss->sp;
3388 vcpu->arch.regs[VCPU_REGS_RBP] = tss->bp;
3389 vcpu->arch.regs[VCPU_REGS_RSI] = tss->si;
3390 vcpu->arch.regs[VCPU_REGS_RDI] = tss->di;
3391
3392 if (load_segment_descriptor(vcpu, tss->ldt, 0, VCPU_SREG_LDTR))
3393 return 1;
3394
3395 if (load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES))
3396 return 1;
3397
3398 if (load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS))
3399 return 1;
3400
3401 if (load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS))
3402 return 1;
3403
3404 if (load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS))
3405 return 1;
3406 return 0;
3407}
3408
3409int kvm_task_switch_16(struct kvm_vcpu *vcpu, u16 tss_selector,
3410 struct desc_struct *cseg_desc,
3411 struct desc_struct *nseg_desc)
3412{
3413 struct tss_segment_16 tss_segment_16;
3414 int ret = 0;
3415
3416 if (load_tss_segment16(vcpu, cseg_desc, &tss_segment_16))
3417 goto out;
3418
3419 save_state_to_tss16(vcpu, &tss_segment_16);
3420 save_tss_segment16(vcpu, cseg_desc, &tss_segment_16);
3421
3422 if (load_tss_segment16(vcpu, nseg_desc, &tss_segment_16))
3423 goto out;
3424 if (load_state_from_tss16(vcpu, &tss_segment_16))
3425 goto out;
3426
3427 ret = 1;
3428out:
3429 return ret;
3430}
3431
3432int kvm_task_switch_32(struct kvm_vcpu *vcpu, u16 tss_selector,
3433 struct desc_struct *cseg_desc,
3434 struct desc_struct *nseg_desc)
3435{
3436 struct tss_segment_32 tss_segment_32;
3437 int ret = 0;
3438
3439 if (load_tss_segment32(vcpu, cseg_desc, &tss_segment_32))
3440 goto out;
3441
3442 save_state_to_tss32(vcpu, &tss_segment_32);
3443 save_tss_segment32(vcpu, cseg_desc, &tss_segment_32);
3444
3445 if (load_tss_segment32(vcpu, nseg_desc, &tss_segment_32))
3446 goto out;
3447 if (load_state_from_tss32(vcpu, &tss_segment_32))
3448 goto out;
3449
3450 ret = 1;
3451out:
3452 return ret;
3453}
3454
3455int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
3456{
3457 struct kvm_segment tr_seg;
3458 struct desc_struct cseg_desc;
3459 struct desc_struct nseg_desc;
3460 int ret = 0;
3461
3462 get_segment(vcpu, &tr_seg, VCPU_SREG_TR);
3463
3464 if (load_guest_segment_descriptor(vcpu, tss_selector, &nseg_desc))
3465 goto out;
3466
3467 if (load_guest_segment_descriptor(vcpu, tr_seg.selector, &cseg_desc))
3468 goto out;
3469
3470
3471 if (reason != TASK_SWITCH_IRET) {
3472 int cpl;
3473
3474 cpl = kvm_x86_ops->get_cpl(vcpu);
3475 if ((tss_selector & 3) > nseg_desc.dpl || cpl > nseg_desc.dpl) {
3476 kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
3477 return 1;
3478 }
3479 }
3480
3481 if (!nseg_desc.p || (nseg_desc.limit0 | nseg_desc.limit << 16) < 0x67) {
3482 kvm_queue_exception_e(vcpu, TS_VECTOR, tss_selector & 0xfffc);
3483 return 1;
3484 }
3485
3486 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
3487 cseg_desc.type &= ~(1 << 8); //clear the B flag
3488 save_guest_segment_descriptor(vcpu, tr_seg.selector,
3489 &cseg_desc);
3490 }
3491
3492 if (reason == TASK_SWITCH_IRET) {
3493 u32 eflags = kvm_x86_ops->get_rflags(vcpu);
3494 kvm_x86_ops->set_rflags(vcpu, eflags & ~X86_EFLAGS_NT);
3495 }
3496
3497 kvm_x86_ops->skip_emulated_instruction(vcpu);
3498 kvm_x86_ops->cache_regs(vcpu);
3499
3500 if (nseg_desc.type & 8)
3501 ret = kvm_task_switch_32(vcpu, tss_selector, &cseg_desc,
3502 &nseg_desc);
3503 else
3504 ret = kvm_task_switch_16(vcpu, tss_selector, &cseg_desc,
3505 &nseg_desc);
3506
3507 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE) {
3508 u32 eflags = kvm_x86_ops->get_rflags(vcpu);
3509 kvm_x86_ops->set_rflags(vcpu, eflags | X86_EFLAGS_NT);
3510 }
3511
3512 if (reason != TASK_SWITCH_IRET) {
3513 nseg_desc.type |= (1 << 8);
3514 save_guest_segment_descriptor(vcpu, tss_selector,
3515 &nseg_desc);
3516 }
3517
3518 kvm_x86_ops->set_cr0(vcpu, vcpu->arch.cr0 | X86_CR0_TS);
3519 seg_desct_to_kvm_desct(&nseg_desc, tss_selector, &tr_seg);
3520 tr_seg.type = 11;
3521 set_segment(vcpu, &tr_seg, VCPU_SREG_TR);
3522out:
3523 kvm_x86_ops->decache_regs(vcpu);
3524 return ret;
3525}
3526EXPORT_SYMBOL_GPL(kvm_task_switch);
3527
b6c7a5dc
HB
3528int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
3529 struct kvm_sregs *sregs)
3530{
3531 int mmu_reset_needed = 0;
3532 int i, pending_vec, max_bits;
3533 struct descriptor_table dt;
3534
3535 vcpu_load(vcpu);
3536
3537 dt.limit = sregs->idt.limit;
3538 dt.base = sregs->idt.base;
3539 kvm_x86_ops->set_idt(vcpu, &dt);
3540 dt.limit = sregs->gdt.limit;
3541 dt.base = sregs->gdt.base;
3542 kvm_x86_ops->set_gdt(vcpu, &dt);
3543
ad312c7c
ZX
3544 vcpu->arch.cr2 = sregs->cr2;
3545 mmu_reset_needed |= vcpu->arch.cr3 != sregs->cr3;
3546 vcpu->arch.cr3 = sregs->cr3;
b6c7a5dc 3547
2d3ad1f4 3548 kvm_set_cr8(vcpu, sregs->cr8);
b6c7a5dc 3549
ad312c7c 3550 mmu_reset_needed |= vcpu->arch.shadow_efer != sregs->efer;
b6c7a5dc 3551 kvm_x86_ops->set_efer(vcpu, sregs->efer);
b6c7a5dc
HB
3552 kvm_set_apic_base(vcpu, sregs->apic_base);
3553
3554 kvm_x86_ops->decache_cr4_guest_bits(vcpu);
3555
ad312c7c 3556 mmu_reset_needed |= vcpu->arch.cr0 != sregs->cr0;
b6c7a5dc 3557 kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
d7306163 3558 vcpu->arch.cr0 = sregs->cr0;
b6c7a5dc 3559
ad312c7c 3560 mmu_reset_needed |= vcpu->arch.cr4 != sregs->cr4;
b6c7a5dc
HB
3561 kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
3562 if (!is_long_mode(vcpu) && is_pae(vcpu))
ad312c7c 3563 load_pdptrs(vcpu, vcpu->arch.cr3);
b6c7a5dc
HB
3564
3565 if (mmu_reset_needed)
3566 kvm_mmu_reset_context(vcpu);
3567
3568 if (!irqchip_in_kernel(vcpu->kvm)) {
ad312c7c
ZX
3569 memcpy(vcpu->arch.irq_pending, sregs->interrupt_bitmap,
3570 sizeof vcpu->arch.irq_pending);
3571 vcpu->arch.irq_summary = 0;
3572 for (i = 0; i < ARRAY_SIZE(vcpu->arch.irq_pending); ++i)
3573 if (vcpu->arch.irq_pending[i])
3574 __set_bit(i, &vcpu->arch.irq_summary);
b6c7a5dc
HB
3575 } else {
3576 max_bits = (sizeof sregs->interrupt_bitmap) << 3;
3577 pending_vec = find_first_bit(
3578 (const unsigned long *)sregs->interrupt_bitmap,
3579 max_bits);
3580 /* Only pending external irq is handled here */
3581 if (pending_vec < max_bits) {
3582 kvm_x86_ops->set_irq(vcpu, pending_vec);
3583 pr_debug("Set back pending irq %d\n",
3584 pending_vec);
3585 }
3586 }
3587
3588 set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
3589 set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
3590 set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
3591 set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
3592 set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
3593 set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
3594
3595 set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
3596 set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
3597
3598 vcpu_put(vcpu);
3599
3600 return 0;
3601}
3602
3603int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
3604 struct kvm_debug_guest *dbg)
3605{
3606 int r;
3607
3608 vcpu_load(vcpu);
3609
3610 r = kvm_x86_ops->set_guest_debug(vcpu, dbg);
3611
3612 vcpu_put(vcpu);
3613
3614 return r;
3615}
3616
d0752060
HB
3617/*
3618 * fxsave fpu state. Taken from x86_64/processor.h. To be killed when
3619 * we have asm/x86/processor.h
3620 */
3621struct fxsave {
3622 u16 cwd;
3623 u16 swd;
3624 u16 twd;
3625 u16 fop;
3626 u64 rip;
3627 u64 rdp;
3628 u32 mxcsr;
3629 u32 mxcsr_mask;
3630 u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
3631#ifdef CONFIG_X86_64
3632 u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 256 bytes */
3633#else
3634 u32 xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */
3635#endif
3636};
3637
8b006791
ZX
3638/*
3639 * Translate a guest virtual address to a guest physical address.
3640 */
3641int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
3642 struct kvm_translation *tr)
3643{
3644 unsigned long vaddr = tr->linear_address;
3645 gpa_t gpa;
3646
3647 vcpu_load(vcpu);
72dc67a6 3648 down_read(&vcpu->kvm->slots_lock);
ad312c7c 3649 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, vaddr);
72dc67a6 3650 up_read(&vcpu->kvm->slots_lock);
8b006791
ZX
3651 tr->physical_address = gpa;
3652 tr->valid = gpa != UNMAPPED_GVA;
3653 tr->writeable = 1;
3654 tr->usermode = 0;
8b006791
ZX
3655 vcpu_put(vcpu);
3656
3657 return 0;
3658}
3659
d0752060
HB
3660int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
3661{
ad312c7c 3662 struct fxsave *fxsave = (struct fxsave *)&vcpu->arch.guest_fx_image;
d0752060
HB
3663
3664 vcpu_load(vcpu);
3665
3666 memcpy(fpu->fpr, fxsave->st_space, 128);
3667 fpu->fcw = fxsave->cwd;
3668 fpu->fsw = fxsave->swd;
3669 fpu->ftwx = fxsave->twd;
3670 fpu->last_opcode = fxsave->fop;
3671 fpu->last_ip = fxsave->rip;
3672 fpu->last_dp = fxsave->rdp;
3673 memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space);
3674
3675 vcpu_put(vcpu);
3676
3677 return 0;
3678}
3679
3680int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
3681{
ad312c7c 3682 struct fxsave *fxsave = (struct fxsave *)&vcpu->arch.guest_fx_image;
d0752060
HB
3683
3684 vcpu_load(vcpu);
3685
3686 memcpy(fxsave->st_space, fpu->fpr, 128);
3687 fxsave->cwd = fpu->fcw;
3688 fxsave->swd = fpu->fsw;
3689 fxsave->twd = fpu->ftwx;
3690 fxsave->fop = fpu->last_opcode;
3691 fxsave->rip = fpu->last_ip;
3692 fxsave->rdp = fpu->last_dp;
3693 memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space);
3694
3695 vcpu_put(vcpu);
3696
3697 return 0;
3698}
3699
3700void fx_init(struct kvm_vcpu *vcpu)
3701{
3702 unsigned after_mxcsr_mask;
3703
3704 /* Initialize guest FPU by resetting ours and saving into guest's */
3705 preempt_disable();
ad312c7c 3706 fx_save(&vcpu->arch.host_fx_image);
d0752060 3707 fpu_init();
ad312c7c
ZX
3708 fx_save(&vcpu->arch.guest_fx_image);
3709 fx_restore(&vcpu->arch.host_fx_image);
d0752060
HB
3710 preempt_enable();
3711
ad312c7c 3712 vcpu->arch.cr0 |= X86_CR0_ET;
d0752060 3713 after_mxcsr_mask = offsetof(struct i387_fxsave_struct, st_space);
ad312c7c
ZX
3714 vcpu->arch.guest_fx_image.mxcsr = 0x1f80;
3715 memset((void *)&vcpu->arch.guest_fx_image + after_mxcsr_mask,
d0752060
HB
3716 0, sizeof(struct i387_fxsave_struct) - after_mxcsr_mask);
3717}
3718EXPORT_SYMBOL_GPL(fx_init);
3719
3720void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
3721{
3722 if (!vcpu->fpu_active || vcpu->guest_fpu_loaded)
3723 return;
3724
3725 vcpu->guest_fpu_loaded = 1;
ad312c7c
ZX
3726 fx_save(&vcpu->arch.host_fx_image);
3727 fx_restore(&vcpu->arch.guest_fx_image);
d0752060
HB
3728}
3729EXPORT_SYMBOL_GPL(kvm_load_guest_fpu);
3730
3731void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
3732{
3733 if (!vcpu->guest_fpu_loaded)
3734 return;
3735
3736 vcpu->guest_fpu_loaded = 0;
ad312c7c
ZX
3737 fx_save(&vcpu->arch.guest_fx_image);
3738 fx_restore(&vcpu->arch.host_fx_image);
f096ed85 3739 ++vcpu->stat.fpu_reload;
d0752060
HB
3740}
3741EXPORT_SYMBOL_GPL(kvm_put_guest_fpu);
e9b11c17
ZX
3742
3743void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
3744{
3745 kvm_x86_ops->vcpu_free(vcpu);
3746}
3747
3748struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
3749 unsigned int id)
3750{
26e5215f
AK
3751 return kvm_x86_ops->vcpu_create(kvm, id);
3752}
e9b11c17 3753
26e5215f
AK
3754int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
3755{
3756 int r;
e9b11c17
ZX
3757
3758 /* We do fxsave: this must be aligned. */
ad312c7c 3759 BUG_ON((unsigned long)&vcpu->arch.host_fx_image & 0xF);
e9b11c17
ZX
3760
3761 vcpu_load(vcpu);
3762 r = kvm_arch_vcpu_reset(vcpu);
3763 if (r == 0)
3764 r = kvm_mmu_setup(vcpu);
3765 vcpu_put(vcpu);
3766 if (r < 0)
3767 goto free_vcpu;
3768
26e5215f 3769 return 0;
e9b11c17
ZX
3770free_vcpu:
3771 kvm_x86_ops->vcpu_free(vcpu);
26e5215f 3772 return r;
e9b11c17
ZX
3773}
3774
d40ccc62 3775void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
e9b11c17
ZX
3776{
3777 vcpu_load(vcpu);
3778 kvm_mmu_unload(vcpu);
3779 vcpu_put(vcpu);
3780
3781 kvm_x86_ops->vcpu_free(vcpu);
3782}
3783
3784int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
3785{
3786 return kvm_x86_ops->vcpu_reset(vcpu);
3787}
3788
3789void kvm_arch_hardware_enable(void *garbage)
3790{
3791 kvm_x86_ops->hardware_enable(garbage);
3792}
3793
3794void kvm_arch_hardware_disable(void *garbage)
3795{
3796 kvm_x86_ops->hardware_disable(garbage);
3797}
3798
3799int kvm_arch_hardware_setup(void)
3800{
3801 return kvm_x86_ops->hardware_setup();
3802}
3803
3804void kvm_arch_hardware_unsetup(void)
3805{
3806 kvm_x86_ops->hardware_unsetup();
3807}
3808
3809void kvm_arch_check_processor_compat(void *rtn)
3810{
3811 kvm_x86_ops->check_processor_compatibility(rtn);
3812}
3813
3814int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
3815{
3816 struct page *page;
3817 struct kvm *kvm;
3818 int r;
3819
3820 BUG_ON(vcpu->kvm == NULL);
3821 kvm = vcpu->kvm;
3822
ad312c7c 3823 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
e9b11c17 3824 if (!irqchip_in_kernel(kvm) || vcpu->vcpu_id == 0)
a4535290 3825 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
e9b11c17 3826 else
a4535290 3827 vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
e9b11c17
ZX
3828
3829 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
3830 if (!page) {
3831 r = -ENOMEM;
3832 goto fail;
3833 }
ad312c7c 3834 vcpu->arch.pio_data = page_address(page);
e9b11c17
ZX
3835
3836 r = kvm_mmu_create(vcpu);
3837 if (r < 0)
3838 goto fail_free_pio_data;
3839
3840 if (irqchip_in_kernel(kvm)) {
3841 r = kvm_create_lapic(vcpu);
3842 if (r < 0)
3843 goto fail_mmu_destroy;
3844 }
3845
3846 return 0;
3847
3848fail_mmu_destroy:
3849 kvm_mmu_destroy(vcpu);
3850fail_free_pio_data:
ad312c7c 3851 free_page((unsigned long)vcpu->arch.pio_data);
e9b11c17
ZX
3852fail:
3853 return r;
3854}
3855
3856void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
3857{
3858 kvm_free_lapic(vcpu);
3200f405 3859 down_read(&vcpu->kvm->slots_lock);
e9b11c17 3860 kvm_mmu_destroy(vcpu);
3200f405 3861 up_read(&vcpu->kvm->slots_lock);
ad312c7c 3862 free_page((unsigned long)vcpu->arch.pio_data);
e9b11c17 3863}
d19a9cd2
ZX
3864
3865struct kvm *kvm_arch_create_vm(void)
3866{
3867 struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
3868
3869 if (!kvm)
3870 return ERR_PTR(-ENOMEM);
3871
f05e70ac 3872 INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
d19a9cd2
ZX
3873
3874 return kvm;
3875}
3876
3877static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
3878{
3879 vcpu_load(vcpu);
3880 kvm_mmu_unload(vcpu);
3881 vcpu_put(vcpu);
3882}
3883
3884static void kvm_free_vcpus(struct kvm *kvm)
3885{
3886 unsigned int i;
3887
3888 /*
3889 * Unpin any mmu pages first.
3890 */
3891 for (i = 0; i < KVM_MAX_VCPUS; ++i)
3892 if (kvm->vcpus[i])
3893 kvm_unload_vcpu_mmu(kvm->vcpus[i]);
3894 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
3895 if (kvm->vcpus[i]) {
3896 kvm_arch_vcpu_free(kvm->vcpus[i]);
3897 kvm->vcpus[i] = NULL;
3898 }
3899 }
3900
3901}
3902
3903void kvm_arch_destroy_vm(struct kvm *kvm)
3904{
7837699f 3905 kvm_free_pit(kvm);
d7deeeb0
ZX
3906 kfree(kvm->arch.vpic);
3907 kfree(kvm->arch.vioapic);
d19a9cd2
ZX
3908 kvm_free_vcpus(kvm);
3909 kvm_free_physmem(kvm);
3d45830c
AK
3910 if (kvm->arch.apic_access_page)
3911 put_page(kvm->arch.apic_access_page);
d19a9cd2
ZX
3912 kfree(kvm);
3913}
0de10343
ZX
3914
3915int kvm_arch_set_memory_region(struct kvm *kvm,
3916 struct kvm_userspace_memory_region *mem,
3917 struct kvm_memory_slot old,
3918 int user_alloc)
3919{
3920 int npages = mem->memory_size >> PAGE_SHIFT;
3921 struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot];
3922
3923 /*To keep backward compatibility with older userspace,
3924 *x86 needs to hanlde !user_alloc case.
3925 */
3926 if (!user_alloc) {
3927 if (npages && !old.rmap) {
72dc67a6 3928 down_write(&current->mm->mmap_sem);
0de10343
ZX
3929 memslot->userspace_addr = do_mmap(NULL, 0,
3930 npages * PAGE_SIZE,
3931 PROT_READ | PROT_WRITE,
3932 MAP_SHARED | MAP_ANONYMOUS,
3933 0);
72dc67a6 3934 up_write(&current->mm->mmap_sem);
0de10343
ZX
3935
3936 if (IS_ERR((void *)memslot->userspace_addr))
3937 return PTR_ERR((void *)memslot->userspace_addr);
3938 } else {
3939 if (!old.user_alloc && old.rmap) {
3940 int ret;
3941
72dc67a6 3942 down_write(&current->mm->mmap_sem);
0de10343
ZX
3943 ret = do_munmap(current->mm, old.userspace_addr,
3944 old.npages * PAGE_SIZE);
72dc67a6 3945 up_write(&current->mm->mmap_sem);
0de10343
ZX
3946 if (ret < 0)
3947 printk(KERN_WARNING
3948 "kvm_vm_ioctl_set_memory_region: "
3949 "failed to munmap memory\n");
3950 }
3951 }
3952 }
3953
f05e70ac 3954 if (!kvm->arch.n_requested_mmu_pages) {
0de10343
ZX
3955 unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
3956 kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
3957 }
3958
3959 kvm_mmu_slot_remove_write_access(kvm, mem->slot);
3960 kvm_flush_remote_tlbs(kvm);
3961
3962 return 0;
3963}
1d737c8a
ZX
3964
3965int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
3966{
a4535290
AK
3967 return vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE
3968 || vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED;
1d737c8a 3969}
5736199a
ZX
3970
3971static void vcpu_kick_intr(void *info)
3972{
3973#ifdef DEBUG
3974 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)info;
3975 printk(KERN_DEBUG "vcpu_kick_intr %p \n", vcpu);
3976#endif
3977}
3978
3979void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
3980{
3981 int ipi_pcpu = vcpu->cpu;
e9571ed5 3982 int cpu = get_cpu();
5736199a
ZX
3983
3984 if (waitqueue_active(&vcpu->wq)) {
3985 wake_up_interruptible(&vcpu->wq);
3986 ++vcpu->stat.halt_wakeup;
3987 }
e9571ed5
MT
3988 /*
3989 * We may be called synchronously with irqs disabled in guest mode,
3990 * So need not to call smp_call_function_single() in that case.
3991 */
3992 if (vcpu->guest_mode && vcpu->cpu != cpu)
5736199a 3993 smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0, 0);
e9571ed5 3994 put_cpu();
5736199a 3995}