KVM: VMX: Rename misnamed msr bits
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / x86 / kvm / x86.c
CommitLineData
043405e1
CO
1/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * derived from drivers/kvm/kvm_main.c
5 *
6 * Copyright (C) 2006 Qumranet, Inc.
7 *
8 * Authors:
9 * Avi Kivity <avi@qumranet.com>
10 * Yaniv Kamay <yaniv@qumranet.com>
11 *
12 * This work is licensed under the terms of the GNU GPL, version 2. See
13 * the COPYING file in the top-level directory.
14 *
15 */
16
edf88417 17#include <linux/kvm_host.h>
313a3dc7 18#include "irq.h"
1d737c8a 19#include "mmu.h"
7837699f 20#include "i8254.h"
37817f29 21#include "tss.h"
313a3dc7 22
18068523 23#include <linux/clocksource.h>
313a3dc7
CO
24#include <linux/kvm.h>
25#include <linux/fs.h>
26#include <linux/vmalloc.h>
5fb76f9b 27#include <linux/module.h>
0de10343 28#include <linux/mman.h>
2bacc55c 29#include <linux/highmem.h>
043405e1
CO
30
31#include <asm/uaccess.h>
d825ed0a 32#include <asm/msr.h>
a5f61300 33#include <asm/desc.h>
043405e1 34
313a3dc7 35#define MAX_IO_MSRS 256
a03490ed
CO
36#define CR0_RESERVED_BITS \
37 (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
38 | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
39 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
40#define CR4_RESERVED_BITS \
41 (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
42 | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \
43 | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR \
44 | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE))
45
46#define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
50a37eb4
JR
47/* EFER defaults:
48 * - enable syscall per default because its emulated by KVM
49 * - enable LME and LMA per default on 64 bit KVM
50 */
51#ifdef CONFIG_X86_64
52static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffafeULL;
53#else
54static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffffeULL;
55#endif
313a3dc7 56
ba1389b7
AK
57#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
58#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
417bc304 59
674eea0f
AK
60static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
61 struct kvm_cpuid_entry2 __user *entries);
62
97896d04
ZX
63struct kvm_x86_ops *kvm_x86_ops;
64
417bc304 65struct kvm_stats_debugfs_item debugfs_entries[] = {
ba1389b7
AK
66 { "pf_fixed", VCPU_STAT(pf_fixed) },
67 { "pf_guest", VCPU_STAT(pf_guest) },
68 { "tlb_flush", VCPU_STAT(tlb_flush) },
69 { "invlpg", VCPU_STAT(invlpg) },
70 { "exits", VCPU_STAT(exits) },
71 { "io_exits", VCPU_STAT(io_exits) },
72 { "mmio_exits", VCPU_STAT(mmio_exits) },
73 { "signal_exits", VCPU_STAT(signal_exits) },
74 { "irq_window", VCPU_STAT(irq_window_exits) },
f08864b4 75 { "nmi_window", VCPU_STAT(nmi_window_exits) },
ba1389b7
AK
76 { "halt_exits", VCPU_STAT(halt_exits) },
77 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
f11c3a8d 78 { "hypercalls", VCPU_STAT(hypercalls) },
ba1389b7
AK
79 { "request_irq", VCPU_STAT(request_irq_exits) },
80 { "irq_exits", VCPU_STAT(irq_exits) },
81 { "host_state_reload", VCPU_STAT(host_state_reload) },
82 { "efer_reload", VCPU_STAT(efer_reload) },
83 { "fpu_reload", VCPU_STAT(fpu_reload) },
84 { "insn_emulation", VCPU_STAT(insn_emulation) },
85 { "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) },
4cee5764
AK
86 { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) },
87 { "mmu_pte_write", VM_STAT(mmu_pte_write) },
88 { "mmu_pte_updated", VM_STAT(mmu_pte_updated) },
89 { "mmu_pde_zapped", VM_STAT(mmu_pde_zapped) },
90 { "mmu_flooded", VM_STAT(mmu_flooded) },
91 { "mmu_recycled", VM_STAT(mmu_recycled) },
dfc5aa00 92 { "mmu_cache_miss", VM_STAT(mmu_cache_miss) },
0f74a24c 93 { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
05da4558 94 { "largepages", VM_STAT(lpages) },
417bc304
HB
95 { NULL }
96};
97
98
5fb76f9b
CO
99unsigned long segment_base(u16 selector)
100{
101 struct descriptor_table gdt;
a5f61300 102 struct desc_struct *d;
5fb76f9b
CO
103 unsigned long table_base;
104 unsigned long v;
105
106 if (selector == 0)
107 return 0;
108
109 asm("sgdt %0" : "=m"(gdt));
110 table_base = gdt.base;
111
112 if (selector & 4) { /* from ldt */
113 u16 ldt_selector;
114
115 asm("sldt %0" : "=g"(ldt_selector));
116 table_base = segment_base(ldt_selector);
117 }
a5f61300
AK
118 d = (struct desc_struct *)(table_base + (selector & ~7));
119 v = d->base0 | ((unsigned long)d->base1 << 16) |
120 ((unsigned long)d->base2 << 24);
5fb76f9b 121#ifdef CONFIG_X86_64
a5f61300
AK
122 if (d->s == 0 && (d->type == 2 || d->type == 9 || d->type == 11))
123 v |= ((unsigned long)((struct ldttss_desc64 *)d)->base3) << 32;
5fb76f9b
CO
124#endif
125 return v;
126}
127EXPORT_SYMBOL_GPL(segment_base);
128
6866b83e
CO
129u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
130{
131 if (irqchip_in_kernel(vcpu->kvm))
ad312c7c 132 return vcpu->arch.apic_base;
6866b83e 133 else
ad312c7c 134 return vcpu->arch.apic_base;
6866b83e
CO
135}
136EXPORT_SYMBOL_GPL(kvm_get_apic_base);
137
138void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data)
139{
140 /* TODO: reserve bits check */
141 if (irqchip_in_kernel(vcpu->kvm))
142 kvm_lapic_set_base(vcpu, data);
143 else
ad312c7c 144 vcpu->arch.apic_base = data;
6866b83e
CO
145}
146EXPORT_SYMBOL_GPL(kvm_set_apic_base);
147
298101da
AK
148void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
149{
ad312c7c
ZX
150 WARN_ON(vcpu->arch.exception.pending);
151 vcpu->arch.exception.pending = true;
152 vcpu->arch.exception.has_error_code = false;
153 vcpu->arch.exception.nr = nr;
298101da
AK
154}
155EXPORT_SYMBOL_GPL(kvm_queue_exception);
156
c3c91fee
AK
157void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long addr,
158 u32 error_code)
159{
160 ++vcpu->stat.pf_guest;
71c4dfaf
JR
161 if (vcpu->arch.exception.pending) {
162 if (vcpu->arch.exception.nr == PF_VECTOR) {
163 printk(KERN_DEBUG "kvm: inject_page_fault:"
164 " double fault 0x%lx\n", addr);
165 vcpu->arch.exception.nr = DF_VECTOR;
166 vcpu->arch.exception.error_code = 0;
167 } else if (vcpu->arch.exception.nr == DF_VECTOR) {
168 /* triple fault -> shutdown */
169 set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
170 }
c3c91fee
AK
171 return;
172 }
ad312c7c 173 vcpu->arch.cr2 = addr;
c3c91fee
AK
174 kvm_queue_exception_e(vcpu, PF_VECTOR, error_code);
175}
176
3419ffc8
SY
177void kvm_inject_nmi(struct kvm_vcpu *vcpu)
178{
179 vcpu->arch.nmi_pending = 1;
180}
181EXPORT_SYMBOL_GPL(kvm_inject_nmi);
182
298101da
AK
183void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
184{
ad312c7c
ZX
185 WARN_ON(vcpu->arch.exception.pending);
186 vcpu->arch.exception.pending = true;
187 vcpu->arch.exception.has_error_code = true;
188 vcpu->arch.exception.nr = nr;
189 vcpu->arch.exception.error_code = error_code;
298101da
AK
190}
191EXPORT_SYMBOL_GPL(kvm_queue_exception_e);
192
193static void __queue_exception(struct kvm_vcpu *vcpu)
194{
ad312c7c
ZX
195 kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr,
196 vcpu->arch.exception.has_error_code,
197 vcpu->arch.exception.error_code);
298101da
AK
198}
199
a03490ed
CO
200/*
201 * Load the pae pdptrs. Return true is they are all valid.
202 */
203int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
204{
205 gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
206 unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
207 int i;
208 int ret;
ad312c7c 209 u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
a03490ed 210
a03490ed
CO
211 ret = kvm_read_guest_page(vcpu->kvm, pdpt_gfn, pdpte,
212 offset * sizeof(u64), sizeof(pdpte));
213 if (ret < 0) {
214 ret = 0;
215 goto out;
216 }
217 for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
218 if ((pdpte[i] & 1) && (pdpte[i] & 0xfffffff0000001e6ull)) {
219 ret = 0;
220 goto out;
221 }
222 }
223 ret = 1;
224
ad312c7c 225 memcpy(vcpu->arch.pdptrs, pdpte, sizeof(vcpu->arch.pdptrs));
a03490ed 226out:
a03490ed
CO
227
228 return ret;
229}
cc4b6871 230EXPORT_SYMBOL_GPL(load_pdptrs);
a03490ed 231
d835dfec
AK
232static bool pdptrs_changed(struct kvm_vcpu *vcpu)
233{
ad312c7c 234 u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
d835dfec
AK
235 bool changed = true;
236 int r;
237
238 if (is_long_mode(vcpu) || !is_pae(vcpu))
239 return false;
240
ad312c7c 241 r = kvm_read_guest(vcpu->kvm, vcpu->arch.cr3 & ~31u, pdpte, sizeof(pdpte));
d835dfec
AK
242 if (r < 0)
243 goto out;
ad312c7c 244 changed = memcmp(pdpte, vcpu->arch.pdptrs, sizeof(pdpte)) != 0;
d835dfec 245out:
d835dfec
AK
246
247 return changed;
248}
249
2d3ad1f4 250void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
a03490ed
CO
251{
252 if (cr0 & CR0_RESERVED_BITS) {
253 printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
ad312c7c 254 cr0, vcpu->arch.cr0);
c1a5d4f9 255 kvm_inject_gp(vcpu, 0);
a03490ed
CO
256 return;
257 }
258
259 if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) {
260 printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n");
c1a5d4f9 261 kvm_inject_gp(vcpu, 0);
a03490ed
CO
262 return;
263 }
264
265 if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) {
266 printk(KERN_DEBUG "set_cr0: #GP, set PG flag "
267 "and a clear PE flag\n");
c1a5d4f9 268 kvm_inject_gp(vcpu, 0);
a03490ed
CO
269 return;
270 }
271
272 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
273#ifdef CONFIG_X86_64
ad312c7c 274 if ((vcpu->arch.shadow_efer & EFER_LME)) {
a03490ed
CO
275 int cs_db, cs_l;
276
277 if (!is_pae(vcpu)) {
278 printk(KERN_DEBUG "set_cr0: #GP, start paging "
279 "in long mode while PAE is disabled\n");
c1a5d4f9 280 kvm_inject_gp(vcpu, 0);
a03490ed
CO
281 return;
282 }
283 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
284 if (cs_l) {
285 printk(KERN_DEBUG "set_cr0: #GP, start paging "
286 "in long mode while CS.L == 1\n");
c1a5d4f9 287 kvm_inject_gp(vcpu, 0);
a03490ed
CO
288 return;
289
290 }
291 } else
292#endif
ad312c7c 293 if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
a03490ed
CO
294 printk(KERN_DEBUG "set_cr0: #GP, pdptrs "
295 "reserved bits\n");
c1a5d4f9 296 kvm_inject_gp(vcpu, 0);
a03490ed
CO
297 return;
298 }
299
300 }
301
302 kvm_x86_ops->set_cr0(vcpu, cr0);
ad312c7c 303 vcpu->arch.cr0 = cr0;
a03490ed 304
a03490ed 305 kvm_mmu_reset_context(vcpu);
a03490ed
CO
306 return;
307}
2d3ad1f4 308EXPORT_SYMBOL_GPL(kvm_set_cr0);
a03490ed 309
2d3ad1f4 310void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
a03490ed 311{
2d3ad1f4 312 kvm_set_cr0(vcpu, (vcpu->arch.cr0 & ~0x0ful) | (msw & 0x0f));
2714d1d3
FEL
313 KVMTRACE_1D(LMSW, vcpu,
314 (u32)((vcpu->arch.cr0 & ~0x0ful) | (msw & 0x0f)),
315 handler);
a03490ed 316}
2d3ad1f4 317EXPORT_SYMBOL_GPL(kvm_lmsw);
a03490ed 318
2d3ad1f4 319void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
a03490ed
CO
320{
321 if (cr4 & CR4_RESERVED_BITS) {
322 printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n");
c1a5d4f9 323 kvm_inject_gp(vcpu, 0);
a03490ed
CO
324 return;
325 }
326
327 if (is_long_mode(vcpu)) {
328 if (!(cr4 & X86_CR4_PAE)) {
329 printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while "
330 "in long mode\n");
c1a5d4f9 331 kvm_inject_gp(vcpu, 0);
a03490ed
CO
332 return;
333 }
334 } else if (is_paging(vcpu) && !is_pae(vcpu) && (cr4 & X86_CR4_PAE)
ad312c7c 335 && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
a03490ed 336 printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n");
c1a5d4f9 337 kvm_inject_gp(vcpu, 0);
a03490ed
CO
338 return;
339 }
340
341 if (cr4 & X86_CR4_VMXE) {
342 printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n");
c1a5d4f9 343 kvm_inject_gp(vcpu, 0);
a03490ed
CO
344 return;
345 }
346 kvm_x86_ops->set_cr4(vcpu, cr4);
ad312c7c 347 vcpu->arch.cr4 = cr4;
a03490ed 348 kvm_mmu_reset_context(vcpu);
a03490ed 349}
2d3ad1f4 350EXPORT_SYMBOL_GPL(kvm_set_cr4);
a03490ed 351
2d3ad1f4 352void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
a03490ed 353{
ad312c7c 354 if (cr3 == vcpu->arch.cr3 && !pdptrs_changed(vcpu)) {
d835dfec
AK
355 kvm_mmu_flush_tlb(vcpu);
356 return;
357 }
358
a03490ed
CO
359 if (is_long_mode(vcpu)) {
360 if (cr3 & CR3_L_MODE_RESERVED_BITS) {
361 printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n");
c1a5d4f9 362 kvm_inject_gp(vcpu, 0);
a03490ed
CO
363 return;
364 }
365 } else {
366 if (is_pae(vcpu)) {
367 if (cr3 & CR3_PAE_RESERVED_BITS) {
368 printk(KERN_DEBUG
369 "set_cr3: #GP, reserved bits\n");
c1a5d4f9 370 kvm_inject_gp(vcpu, 0);
a03490ed
CO
371 return;
372 }
373 if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) {
374 printk(KERN_DEBUG "set_cr3: #GP, pdptrs "
375 "reserved bits\n");
c1a5d4f9 376 kvm_inject_gp(vcpu, 0);
a03490ed
CO
377 return;
378 }
379 }
380 /*
381 * We don't check reserved bits in nonpae mode, because
382 * this isn't enforced, and VMware depends on this.
383 */
384 }
385
a03490ed
CO
386 /*
387 * Does the new cr3 value map to physical memory? (Note, we
388 * catch an invalid cr3 even in real-mode, because it would
389 * cause trouble later on when we turn on paging anyway.)
390 *
391 * A real CPU would silently accept an invalid cr3 and would
392 * attempt to use it - with largely undefined (and often hard
393 * to debug) behavior on the guest side.
394 */
395 if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
c1a5d4f9 396 kvm_inject_gp(vcpu, 0);
a03490ed 397 else {
ad312c7c
ZX
398 vcpu->arch.cr3 = cr3;
399 vcpu->arch.mmu.new_cr3(vcpu);
a03490ed 400 }
a03490ed 401}
2d3ad1f4 402EXPORT_SYMBOL_GPL(kvm_set_cr3);
a03490ed 403
2d3ad1f4 404void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
a03490ed
CO
405{
406 if (cr8 & CR8_RESERVED_BITS) {
407 printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8);
c1a5d4f9 408 kvm_inject_gp(vcpu, 0);
a03490ed
CO
409 return;
410 }
411 if (irqchip_in_kernel(vcpu->kvm))
412 kvm_lapic_set_tpr(vcpu, cr8);
413 else
ad312c7c 414 vcpu->arch.cr8 = cr8;
a03490ed 415}
2d3ad1f4 416EXPORT_SYMBOL_GPL(kvm_set_cr8);
a03490ed 417
2d3ad1f4 418unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
a03490ed
CO
419{
420 if (irqchip_in_kernel(vcpu->kvm))
421 return kvm_lapic_get_cr8(vcpu);
422 else
ad312c7c 423 return vcpu->arch.cr8;
a03490ed 424}
2d3ad1f4 425EXPORT_SYMBOL_GPL(kvm_get_cr8);
a03490ed 426
043405e1
CO
427/*
428 * List of msr numbers which we expose to userspace through KVM_GET_MSRS
429 * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
430 *
431 * This list is modified at module load time to reflect the
432 * capabilities of the host cpu.
433 */
434static u32 msrs_to_save[] = {
435 MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
436 MSR_K6_STAR,
437#ifdef CONFIG_X86_64
438 MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
439#endif
18068523 440 MSR_IA32_TIME_STAMP_COUNTER, MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
847f0ad8 441 MSR_IA32_PERF_STATUS,
043405e1
CO
442};
443
444static unsigned num_msrs_to_save;
445
446static u32 emulated_msrs[] = {
447 MSR_IA32_MISC_ENABLE,
448};
449
15c4a640
CO
450static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
451{
f2b4b7dd 452 if (efer & efer_reserved_bits) {
15c4a640
CO
453 printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n",
454 efer);
c1a5d4f9 455 kvm_inject_gp(vcpu, 0);
15c4a640
CO
456 return;
457 }
458
459 if (is_paging(vcpu)
ad312c7c 460 && (vcpu->arch.shadow_efer & EFER_LME) != (efer & EFER_LME)) {
15c4a640 461 printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n");
c1a5d4f9 462 kvm_inject_gp(vcpu, 0);
15c4a640
CO
463 return;
464 }
465
466 kvm_x86_ops->set_efer(vcpu, efer);
467
468 efer &= ~EFER_LMA;
ad312c7c 469 efer |= vcpu->arch.shadow_efer & EFER_LMA;
15c4a640 470
ad312c7c 471 vcpu->arch.shadow_efer = efer;
15c4a640
CO
472}
473
f2b4b7dd
JR
474void kvm_enable_efer_bits(u64 mask)
475{
476 efer_reserved_bits &= ~mask;
477}
478EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
479
480
15c4a640
CO
481/*
482 * Writes msr value into into the appropriate "register".
483 * Returns 0 on success, non-0 otherwise.
484 * Assumes vcpu_load() was already called.
485 */
486int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
487{
488 return kvm_x86_ops->set_msr(vcpu, msr_index, data);
489}
490
313a3dc7
CO
491/*
492 * Adapt set_msr() to msr_io()'s calling convention
493 */
494static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
495{
496 return kvm_set_msr(vcpu, index, *data);
497}
498
18068523
GOC
499static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
500{
501 static int version;
50d0a0f9
GH
502 struct pvclock_wall_clock wc;
503 struct timespec now, sys, boot;
18068523
GOC
504
505 if (!wall_clock)
506 return;
507
508 version++;
509
18068523
GOC
510 kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
511
50d0a0f9
GH
512 /*
513 * The guest calculates current wall clock time by adding
514 * system time (updated by kvm_write_guest_time below) to the
515 * wall clock specified here. guest system time equals host
516 * system time for us, thus we must fill in host boot time here.
517 */
518 now = current_kernel_time();
519 ktime_get_ts(&sys);
520 boot = ns_to_timespec(timespec_to_ns(&now) - timespec_to_ns(&sys));
521
522 wc.sec = boot.tv_sec;
523 wc.nsec = boot.tv_nsec;
524 wc.version = version;
18068523
GOC
525
526 kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc));
527
528 version++;
529 kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
18068523
GOC
530}
531
50d0a0f9
GH
532static uint32_t div_frac(uint32_t dividend, uint32_t divisor)
533{
534 uint32_t quotient, remainder;
535
536 /* Don't try to replace with do_div(), this one calculates
537 * "(dividend << 32) / divisor" */
538 __asm__ ( "divl %4"
539 : "=a" (quotient), "=d" (remainder)
540 : "0" (0), "1" (dividend), "r" (divisor) );
541 return quotient;
542}
543
544static void kvm_set_time_scale(uint32_t tsc_khz, struct pvclock_vcpu_time_info *hv_clock)
545{
546 uint64_t nsecs = 1000000000LL;
547 int32_t shift = 0;
548 uint64_t tps64;
549 uint32_t tps32;
550
551 tps64 = tsc_khz * 1000LL;
552 while (tps64 > nsecs*2) {
553 tps64 >>= 1;
554 shift--;
555 }
556
557 tps32 = (uint32_t)tps64;
558 while (tps32 <= (uint32_t)nsecs) {
559 tps32 <<= 1;
560 shift++;
561 }
562
563 hv_clock->tsc_shift = shift;
564 hv_clock->tsc_to_system_mul = div_frac(nsecs, tps32);
565
566 pr_debug("%s: tsc_khz %u, tsc_shift %d, tsc_mul %u\n",
567 __FUNCTION__, tsc_khz, hv_clock->tsc_shift,
568 hv_clock->tsc_to_system_mul);
569}
570
18068523
GOC
571static void kvm_write_guest_time(struct kvm_vcpu *v)
572{
573 struct timespec ts;
574 unsigned long flags;
575 struct kvm_vcpu_arch *vcpu = &v->arch;
576 void *shared_kaddr;
577
578 if ((!vcpu->time_page))
579 return;
580
50d0a0f9
GH
581 if (unlikely(vcpu->hv_clock_tsc_khz != tsc_khz)) {
582 kvm_set_time_scale(tsc_khz, &vcpu->hv_clock);
583 vcpu->hv_clock_tsc_khz = tsc_khz;
584 }
585
18068523
GOC
586 /* Keep irq disabled to prevent changes to the clock */
587 local_irq_save(flags);
588 kvm_get_msr(v, MSR_IA32_TIME_STAMP_COUNTER,
589 &vcpu->hv_clock.tsc_timestamp);
590 ktime_get_ts(&ts);
591 local_irq_restore(flags);
592
593 /* With all the info we got, fill in the values */
594
595 vcpu->hv_clock.system_time = ts.tv_nsec +
596 (NSEC_PER_SEC * (u64)ts.tv_sec);
597 /*
598 * The interface expects us to write an even number signaling that the
599 * update is finished. Since the guest won't see the intermediate
50d0a0f9 600 * state, we just increase by 2 at the end.
18068523 601 */
50d0a0f9 602 vcpu->hv_clock.version += 2;
18068523
GOC
603
604 shared_kaddr = kmap_atomic(vcpu->time_page, KM_USER0);
605
606 memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
50d0a0f9 607 sizeof(vcpu->hv_clock));
18068523
GOC
608
609 kunmap_atomic(shared_kaddr, KM_USER0);
610
611 mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
612}
613
9ba075a6
AK
614static bool msr_mtrr_valid(unsigned msr)
615{
616 switch (msr) {
617 case 0x200 ... 0x200 + 2 * KVM_NR_VAR_MTRR - 1:
618 case MSR_MTRRfix64K_00000:
619 case MSR_MTRRfix16K_80000:
620 case MSR_MTRRfix16K_A0000:
621 case MSR_MTRRfix4K_C0000:
622 case MSR_MTRRfix4K_C8000:
623 case MSR_MTRRfix4K_D0000:
624 case MSR_MTRRfix4K_D8000:
625 case MSR_MTRRfix4K_E0000:
626 case MSR_MTRRfix4K_E8000:
627 case MSR_MTRRfix4K_F0000:
628 case MSR_MTRRfix4K_F8000:
629 case MSR_MTRRdefType:
630 case MSR_IA32_CR_PAT:
631 return true;
632 case 0x2f8:
633 return true;
634 }
635 return false;
636}
637
638static int set_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
639{
640 if (!msr_mtrr_valid(msr))
641 return 1;
642
643 vcpu->arch.mtrr[msr - 0x200] = data;
644 return 0;
645}
15c4a640
CO
646
647int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
648{
649 switch (msr) {
15c4a640
CO
650 case MSR_EFER:
651 set_efer(vcpu, data);
652 break;
15c4a640
CO
653 case MSR_IA32_MC0_STATUS:
654 pr_unimpl(vcpu, "%s: MSR_IA32_MC0_STATUS 0x%llx, nop\n",
b8688d51 655 __func__, data);
15c4a640
CO
656 break;
657 case MSR_IA32_MCG_STATUS:
658 pr_unimpl(vcpu, "%s: MSR_IA32_MCG_STATUS 0x%llx, nop\n",
b8688d51 659 __func__, data);
15c4a640 660 break;
c7ac679c
JR
661 case MSR_IA32_MCG_CTL:
662 pr_unimpl(vcpu, "%s: MSR_IA32_MCG_CTL 0x%llx, nop\n",
b8688d51 663 __func__, data);
c7ac679c 664 break;
15c4a640
CO
665 case MSR_IA32_UCODE_REV:
666 case MSR_IA32_UCODE_WRITE:
15c4a640 667 break;
9ba075a6
AK
668 case 0x200 ... 0x2ff:
669 return set_msr_mtrr(vcpu, msr, data);
15c4a640
CO
670 case MSR_IA32_APICBASE:
671 kvm_set_apic_base(vcpu, data);
672 break;
673 case MSR_IA32_MISC_ENABLE:
ad312c7c 674 vcpu->arch.ia32_misc_enable_msr = data;
15c4a640 675 break;
18068523
GOC
676 case MSR_KVM_WALL_CLOCK:
677 vcpu->kvm->arch.wall_clock = data;
678 kvm_write_wall_clock(vcpu->kvm, data);
679 break;
680 case MSR_KVM_SYSTEM_TIME: {
681 if (vcpu->arch.time_page) {
682 kvm_release_page_dirty(vcpu->arch.time_page);
683 vcpu->arch.time_page = NULL;
684 }
685
686 vcpu->arch.time = data;
687
688 /* we verify if the enable bit is set... */
689 if (!(data & 1))
690 break;
691
692 /* ...but clean it before doing the actual write */
693 vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
694
18068523 695 down_read(&current->mm->mmap_sem);
18068523
GOC
696 vcpu->arch.time_page =
697 gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
18068523
GOC
698 up_read(&current->mm->mmap_sem);
699
700 if (is_error_page(vcpu->arch.time_page)) {
701 kvm_release_page_clean(vcpu->arch.time_page);
702 vcpu->arch.time_page = NULL;
703 }
704
705 kvm_write_guest_time(vcpu);
706 break;
707 }
15c4a640 708 default:
565f1fbd 709 pr_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n", msr, data);
15c4a640
CO
710 return 1;
711 }
712 return 0;
713}
714EXPORT_SYMBOL_GPL(kvm_set_msr_common);
715
716
717/*
718 * Reads an msr value (of 'msr_index') into 'pdata'.
719 * Returns 0 on success, non-0 otherwise.
720 * Assumes vcpu_load() was already called.
721 */
722int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
723{
724 return kvm_x86_ops->get_msr(vcpu, msr_index, pdata);
725}
726
9ba075a6
AK
727static int get_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
728{
729 if (!msr_mtrr_valid(msr))
730 return 1;
731
732 *pdata = vcpu->arch.mtrr[msr - 0x200];
733 return 0;
734}
735
15c4a640
CO
736int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
737{
738 u64 data;
739
740 switch (msr) {
741 case 0xc0010010: /* SYSCFG */
742 case 0xc0010015: /* HWCR */
743 case MSR_IA32_PLATFORM_ID:
744 case MSR_IA32_P5_MC_ADDR:
745 case MSR_IA32_P5_MC_TYPE:
746 case MSR_IA32_MC0_CTL:
747 case MSR_IA32_MCG_STATUS:
748 case MSR_IA32_MCG_CAP:
c7ac679c 749 case MSR_IA32_MCG_CTL:
15c4a640
CO
750 case MSR_IA32_MC0_MISC:
751 case MSR_IA32_MC0_MISC+4:
752 case MSR_IA32_MC0_MISC+8:
753 case MSR_IA32_MC0_MISC+12:
754 case MSR_IA32_MC0_MISC+16:
755 case MSR_IA32_UCODE_REV:
15c4a640 756 case MSR_IA32_EBL_CR_POWERON:
15c4a640
CO
757 data = 0;
758 break;
9ba075a6
AK
759 case MSR_MTRRcap:
760 data = 0x500 | KVM_NR_VAR_MTRR;
761 break;
762 case 0x200 ... 0x2ff:
763 return get_msr_mtrr(vcpu, msr, pdata);
15c4a640
CO
764 case 0xcd: /* fsb frequency */
765 data = 3;
766 break;
767 case MSR_IA32_APICBASE:
768 data = kvm_get_apic_base(vcpu);
769 break;
770 case MSR_IA32_MISC_ENABLE:
ad312c7c 771 data = vcpu->arch.ia32_misc_enable_msr;
15c4a640 772 break;
847f0ad8
AG
773 case MSR_IA32_PERF_STATUS:
774 /* TSC increment by tick */
775 data = 1000ULL;
776 /* CPU multiplier */
777 data |= (((uint64_t)4ULL) << 40);
778 break;
15c4a640 779 case MSR_EFER:
ad312c7c 780 data = vcpu->arch.shadow_efer;
15c4a640 781 break;
18068523
GOC
782 case MSR_KVM_WALL_CLOCK:
783 data = vcpu->kvm->arch.wall_clock;
784 break;
785 case MSR_KVM_SYSTEM_TIME:
786 data = vcpu->arch.time;
787 break;
15c4a640
CO
788 default:
789 pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
790 return 1;
791 }
792 *pdata = data;
793 return 0;
794}
795EXPORT_SYMBOL_GPL(kvm_get_msr_common);
796
313a3dc7
CO
797/*
798 * Read or write a bunch of msrs. All parameters are kernel addresses.
799 *
800 * @return number of msrs set successfully.
801 */
802static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
803 struct kvm_msr_entry *entries,
804 int (*do_msr)(struct kvm_vcpu *vcpu,
805 unsigned index, u64 *data))
806{
807 int i;
808
809 vcpu_load(vcpu);
810
3200f405 811 down_read(&vcpu->kvm->slots_lock);
313a3dc7
CO
812 for (i = 0; i < msrs->nmsrs; ++i)
813 if (do_msr(vcpu, entries[i].index, &entries[i].data))
814 break;
3200f405 815 up_read(&vcpu->kvm->slots_lock);
313a3dc7
CO
816
817 vcpu_put(vcpu);
818
819 return i;
820}
821
822/*
823 * Read or write a bunch of msrs. Parameters are user addresses.
824 *
825 * @return number of msrs set successfully.
826 */
827static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
828 int (*do_msr)(struct kvm_vcpu *vcpu,
829 unsigned index, u64 *data),
830 int writeback)
831{
832 struct kvm_msrs msrs;
833 struct kvm_msr_entry *entries;
834 int r, n;
835 unsigned size;
836
837 r = -EFAULT;
838 if (copy_from_user(&msrs, user_msrs, sizeof msrs))
839 goto out;
840
841 r = -E2BIG;
842 if (msrs.nmsrs >= MAX_IO_MSRS)
843 goto out;
844
845 r = -ENOMEM;
846 size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
847 entries = vmalloc(size);
848 if (!entries)
849 goto out;
850
851 r = -EFAULT;
852 if (copy_from_user(entries, user_msrs->entries, size))
853 goto out_free;
854
855 r = n = __msr_io(vcpu, &msrs, entries, do_msr);
856 if (r < 0)
857 goto out_free;
858
859 r = -EFAULT;
860 if (writeback && copy_to_user(user_msrs->entries, entries, size))
861 goto out_free;
862
863 r = n;
864
865out_free:
866 vfree(entries);
867out:
868 return r;
869}
870
018d00d2
ZX
871int kvm_dev_ioctl_check_extension(long ext)
872{
873 int r;
874
875 switch (ext) {
876 case KVM_CAP_IRQCHIP:
877 case KVM_CAP_HLT:
878 case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:
879 case KVM_CAP_USER_MEMORY:
880 case KVM_CAP_SET_TSS_ADDR:
07716717 881 case KVM_CAP_EXT_CPUID:
18068523 882 case KVM_CAP_CLOCKSOURCE:
7837699f 883 case KVM_CAP_PIT:
a28e4f5a 884 case KVM_CAP_NOP_IO_DELAY:
62d9f0db 885 case KVM_CAP_MP_STATE:
ed848624 886 case KVM_CAP_SYNC_MMU:
018d00d2
ZX
887 r = 1;
888 break;
542472b5
LV
889 case KVM_CAP_COALESCED_MMIO:
890 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
891 break;
774ead3a
AK
892 case KVM_CAP_VAPIC:
893 r = !kvm_x86_ops->cpu_has_accelerated_tpr();
894 break;
f725230a
AK
895 case KVM_CAP_NR_VCPUS:
896 r = KVM_MAX_VCPUS;
897 break;
a988b910
AK
898 case KVM_CAP_NR_MEMSLOTS:
899 r = KVM_MEMORY_SLOTS;
900 break;
2f333bcb
MT
901 case KVM_CAP_PV_MMU:
902 r = !tdp_enabled;
903 break;
018d00d2
ZX
904 default:
905 r = 0;
906 break;
907 }
908 return r;
909
910}
911
043405e1
CO
912long kvm_arch_dev_ioctl(struct file *filp,
913 unsigned int ioctl, unsigned long arg)
914{
915 void __user *argp = (void __user *)arg;
916 long r;
917
918 switch (ioctl) {
919 case KVM_GET_MSR_INDEX_LIST: {
920 struct kvm_msr_list __user *user_msr_list = argp;
921 struct kvm_msr_list msr_list;
922 unsigned n;
923
924 r = -EFAULT;
925 if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list))
926 goto out;
927 n = msr_list.nmsrs;
928 msr_list.nmsrs = num_msrs_to_save + ARRAY_SIZE(emulated_msrs);
929 if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list))
930 goto out;
931 r = -E2BIG;
932 if (n < num_msrs_to_save)
933 goto out;
934 r = -EFAULT;
935 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
936 num_msrs_to_save * sizeof(u32)))
937 goto out;
938 if (copy_to_user(user_msr_list->indices
939 + num_msrs_to_save * sizeof(u32),
940 &emulated_msrs,
941 ARRAY_SIZE(emulated_msrs) * sizeof(u32)))
942 goto out;
943 r = 0;
944 break;
945 }
674eea0f
AK
946 case KVM_GET_SUPPORTED_CPUID: {
947 struct kvm_cpuid2 __user *cpuid_arg = argp;
948 struct kvm_cpuid2 cpuid;
949
950 r = -EFAULT;
951 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
952 goto out;
953 r = kvm_dev_ioctl_get_supported_cpuid(&cpuid,
954 cpuid_arg->entries);
955 if (r)
956 goto out;
957
958 r = -EFAULT;
959 if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
960 goto out;
961 r = 0;
962 break;
963 }
043405e1
CO
964 default:
965 r = -EINVAL;
966 }
967out:
968 return r;
969}
970
313a3dc7
CO
971void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
972{
973 kvm_x86_ops->vcpu_load(vcpu, cpu);
18068523 974 kvm_write_guest_time(vcpu);
313a3dc7
CO
975}
976
977void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
978{
979 kvm_x86_ops->vcpu_put(vcpu);
9327fd11 980 kvm_put_guest_fpu(vcpu);
313a3dc7
CO
981}
982
07716717 983static int is_efer_nx(void)
313a3dc7
CO
984{
985 u64 efer;
313a3dc7
CO
986
987 rdmsrl(MSR_EFER, efer);
07716717
DK
988 return efer & EFER_NX;
989}
990
991static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
992{
993 int i;
994 struct kvm_cpuid_entry2 *e, *entry;
995
313a3dc7 996 entry = NULL;
ad312c7c
ZX
997 for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
998 e = &vcpu->arch.cpuid_entries[i];
313a3dc7
CO
999 if (e->function == 0x80000001) {
1000 entry = e;
1001 break;
1002 }
1003 }
07716717 1004 if (entry && (entry->edx & (1 << 20)) && !is_efer_nx()) {
313a3dc7
CO
1005 entry->edx &= ~(1 << 20);
1006 printk(KERN_INFO "kvm: guest NX capability removed\n");
1007 }
1008}
1009
07716717 1010/* when an old userspace process fills a new kernel module */
313a3dc7
CO
1011static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
1012 struct kvm_cpuid *cpuid,
1013 struct kvm_cpuid_entry __user *entries)
07716717
DK
1014{
1015 int r, i;
1016 struct kvm_cpuid_entry *cpuid_entries;
1017
1018 r = -E2BIG;
1019 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
1020 goto out;
1021 r = -ENOMEM;
1022 cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * cpuid->nent);
1023 if (!cpuid_entries)
1024 goto out;
1025 r = -EFAULT;
1026 if (copy_from_user(cpuid_entries, entries,
1027 cpuid->nent * sizeof(struct kvm_cpuid_entry)))
1028 goto out_free;
1029 for (i = 0; i < cpuid->nent; i++) {
ad312c7c
ZX
1030 vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
1031 vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
1032 vcpu->arch.cpuid_entries[i].ebx = cpuid_entries[i].ebx;
1033 vcpu->arch.cpuid_entries[i].ecx = cpuid_entries[i].ecx;
1034 vcpu->arch.cpuid_entries[i].edx = cpuid_entries[i].edx;
1035 vcpu->arch.cpuid_entries[i].index = 0;
1036 vcpu->arch.cpuid_entries[i].flags = 0;
1037 vcpu->arch.cpuid_entries[i].padding[0] = 0;
1038 vcpu->arch.cpuid_entries[i].padding[1] = 0;
1039 vcpu->arch.cpuid_entries[i].padding[2] = 0;
1040 }
1041 vcpu->arch.cpuid_nent = cpuid->nent;
07716717
DK
1042 cpuid_fix_nx_cap(vcpu);
1043 r = 0;
1044
1045out_free:
1046 vfree(cpuid_entries);
1047out:
1048 return r;
1049}
1050
1051static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
1052 struct kvm_cpuid2 *cpuid,
1053 struct kvm_cpuid_entry2 __user *entries)
313a3dc7
CO
1054{
1055 int r;
1056
1057 r = -E2BIG;
1058 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
1059 goto out;
1060 r = -EFAULT;
ad312c7c 1061 if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
07716717 1062 cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
313a3dc7 1063 goto out;
ad312c7c 1064 vcpu->arch.cpuid_nent = cpuid->nent;
313a3dc7
CO
1065 return 0;
1066
1067out:
1068 return r;
1069}
1070
07716717
DK
1071static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
1072 struct kvm_cpuid2 *cpuid,
1073 struct kvm_cpuid_entry2 __user *entries)
1074{
1075 int r;
1076
1077 r = -E2BIG;
ad312c7c 1078 if (cpuid->nent < vcpu->arch.cpuid_nent)
07716717
DK
1079 goto out;
1080 r = -EFAULT;
ad312c7c
ZX
1081 if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
1082 vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
07716717
DK
1083 goto out;
1084 return 0;
1085
1086out:
ad312c7c 1087 cpuid->nent = vcpu->arch.cpuid_nent;
07716717
DK
1088 return r;
1089}
1090
1091static inline u32 bit(int bitno)
1092{
1093 return 1 << (bitno & 31);
1094}
1095
1096static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function,
1097 u32 index)
1098{
1099 entry->function = function;
1100 entry->index = index;
1101 cpuid_count(entry->function, entry->index,
1102 &entry->eax, &entry->ebx, &entry->ecx, &entry->edx);
1103 entry->flags = 0;
1104}
1105
1106static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
1107 u32 index, int *nent, int maxnent)
1108{
1109 const u32 kvm_supported_word0_x86_features = bit(X86_FEATURE_FPU) |
1110 bit(X86_FEATURE_VME) | bit(X86_FEATURE_DE) |
1111 bit(X86_FEATURE_PSE) | bit(X86_FEATURE_TSC) |
1112 bit(X86_FEATURE_MSR) | bit(X86_FEATURE_PAE) |
1113 bit(X86_FEATURE_CX8) | bit(X86_FEATURE_APIC) |
1114 bit(X86_FEATURE_SEP) | bit(X86_FEATURE_PGE) |
1115 bit(X86_FEATURE_CMOV) | bit(X86_FEATURE_PSE36) |
1116 bit(X86_FEATURE_CLFLSH) | bit(X86_FEATURE_MMX) |
1117 bit(X86_FEATURE_FXSR) | bit(X86_FEATURE_XMM) |
1118 bit(X86_FEATURE_XMM2) | bit(X86_FEATURE_SELFSNOOP);
1119 const u32 kvm_supported_word1_x86_features = bit(X86_FEATURE_FPU) |
1120 bit(X86_FEATURE_VME) | bit(X86_FEATURE_DE) |
1121 bit(X86_FEATURE_PSE) | bit(X86_FEATURE_TSC) |
1122 bit(X86_FEATURE_MSR) | bit(X86_FEATURE_PAE) |
1123 bit(X86_FEATURE_CX8) | bit(X86_FEATURE_APIC) |
1124 bit(X86_FEATURE_PGE) |
1125 bit(X86_FEATURE_CMOV) | bit(X86_FEATURE_PSE36) |
1126 bit(X86_FEATURE_MMX) | bit(X86_FEATURE_FXSR) |
1127 bit(X86_FEATURE_SYSCALL) |
1128 (bit(X86_FEATURE_NX) && is_efer_nx()) |
1129#ifdef CONFIG_X86_64
1130 bit(X86_FEATURE_LM) |
1131#endif
1132 bit(X86_FEATURE_MMXEXT) |
1133 bit(X86_FEATURE_3DNOWEXT) |
1134 bit(X86_FEATURE_3DNOW);
1135 const u32 kvm_supported_word3_x86_features =
1136 bit(X86_FEATURE_XMM3) | bit(X86_FEATURE_CX16);
1137 const u32 kvm_supported_word6_x86_features =
1138 bit(X86_FEATURE_LAHF_LM) | bit(X86_FEATURE_CMP_LEGACY);
1139
1140 /* all func 2 cpuid_count() should be called on the same cpu */
1141 get_cpu();
1142 do_cpuid_1_ent(entry, function, index);
1143 ++*nent;
1144
1145 switch (function) {
1146 case 0:
1147 entry->eax = min(entry->eax, (u32)0xb);
1148 break;
1149 case 1:
1150 entry->edx &= kvm_supported_word0_x86_features;
1151 entry->ecx &= kvm_supported_word3_x86_features;
1152 break;
1153 /* function 2 entries are STATEFUL. That is, repeated cpuid commands
1154 * may return different values. This forces us to get_cpu() before
1155 * issuing the first command, and also to emulate this annoying behavior
1156 * in kvm_emulate_cpuid() using KVM_CPUID_FLAG_STATE_READ_NEXT */
1157 case 2: {
1158 int t, times = entry->eax & 0xff;
1159
1160 entry->flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
1161 for (t = 1; t < times && *nent < maxnent; ++t) {
1162 do_cpuid_1_ent(&entry[t], function, 0);
1163 entry[t].flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
1164 ++*nent;
1165 }
1166 break;
1167 }
1168 /* function 4 and 0xb have additional index. */
1169 case 4: {
14af3f3c 1170 int i, cache_type;
07716717
DK
1171
1172 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1173 /* read more entries until cache_type is zero */
14af3f3c
HH
1174 for (i = 1; *nent < maxnent; ++i) {
1175 cache_type = entry[i - 1].eax & 0x1f;
07716717
DK
1176 if (!cache_type)
1177 break;
14af3f3c
HH
1178 do_cpuid_1_ent(&entry[i], function, i);
1179 entry[i].flags |=
07716717
DK
1180 KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1181 ++*nent;
1182 }
1183 break;
1184 }
1185 case 0xb: {
14af3f3c 1186 int i, level_type;
07716717
DK
1187
1188 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1189 /* read more entries until level_type is zero */
14af3f3c
HH
1190 for (i = 1; *nent < maxnent; ++i) {
1191 level_type = entry[i - 1].ecx & 0xff;
07716717
DK
1192 if (!level_type)
1193 break;
14af3f3c
HH
1194 do_cpuid_1_ent(&entry[i], function, i);
1195 entry[i].flags |=
07716717
DK
1196 KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1197 ++*nent;
1198 }
1199 break;
1200 }
1201 case 0x80000000:
1202 entry->eax = min(entry->eax, 0x8000001a);
1203 break;
1204 case 0x80000001:
1205 entry->edx &= kvm_supported_word1_x86_features;
1206 entry->ecx &= kvm_supported_word6_x86_features;
1207 break;
1208 }
1209 put_cpu();
1210}
1211
674eea0f 1212static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
07716717
DK
1213 struct kvm_cpuid_entry2 __user *entries)
1214{
1215 struct kvm_cpuid_entry2 *cpuid_entries;
1216 int limit, nent = 0, r = -E2BIG;
1217 u32 func;
1218
1219 if (cpuid->nent < 1)
1220 goto out;
1221 r = -ENOMEM;
1222 cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent);
1223 if (!cpuid_entries)
1224 goto out;
1225
1226 do_cpuid_ent(&cpuid_entries[0], 0, 0, &nent, cpuid->nent);
1227 limit = cpuid_entries[0].eax;
1228 for (func = 1; func <= limit && nent < cpuid->nent; ++func)
1229 do_cpuid_ent(&cpuid_entries[nent], func, 0,
1230 &nent, cpuid->nent);
1231 r = -E2BIG;
1232 if (nent >= cpuid->nent)
1233 goto out_free;
1234
1235 do_cpuid_ent(&cpuid_entries[nent], 0x80000000, 0, &nent, cpuid->nent);
1236 limit = cpuid_entries[nent - 1].eax;
1237 for (func = 0x80000001; func <= limit && nent < cpuid->nent; ++func)
1238 do_cpuid_ent(&cpuid_entries[nent], func, 0,
1239 &nent, cpuid->nent);
1240 r = -EFAULT;
1241 if (copy_to_user(entries, cpuid_entries,
1242 nent * sizeof(struct kvm_cpuid_entry2)))
1243 goto out_free;
1244 cpuid->nent = nent;
1245 r = 0;
1246
1247out_free:
1248 vfree(cpuid_entries);
1249out:
1250 return r;
1251}
1252
313a3dc7
CO
1253static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
1254 struct kvm_lapic_state *s)
1255{
1256 vcpu_load(vcpu);
ad312c7c 1257 memcpy(s->regs, vcpu->arch.apic->regs, sizeof *s);
313a3dc7
CO
1258 vcpu_put(vcpu);
1259
1260 return 0;
1261}
1262
1263static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
1264 struct kvm_lapic_state *s)
1265{
1266 vcpu_load(vcpu);
ad312c7c 1267 memcpy(vcpu->arch.apic->regs, s->regs, sizeof *s);
313a3dc7
CO
1268 kvm_apic_post_state_restore(vcpu);
1269 vcpu_put(vcpu);
1270
1271 return 0;
1272}
1273
f77bc6a4
ZX
1274static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
1275 struct kvm_interrupt *irq)
1276{
1277 if (irq->irq < 0 || irq->irq >= 256)
1278 return -EINVAL;
1279 if (irqchip_in_kernel(vcpu->kvm))
1280 return -ENXIO;
1281 vcpu_load(vcpu);
1282
ad312c7c
ZX
1283 set_bit(irq->irq, vcpu->arch.irq_pending);
1284 set_bit(irq->irq / BITS_PER_LONG, &vcpu->arch.irq_summary);
f77bc6a4
ZX
1285
1286 vcpu_put(vcpu);
1287
1288 return 0;
1289}
1290
b209749f
AK
1291static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu,
1292 struct kvm_tpr_access_ctl *tac)
1293{
1294 if (tac->flags)
1295 return -EINVAL;
1296 vcpu->arch.tpr_access_reporting = !!tac->enabled;
1297 return 0;
1298}
1299
313a3dc7
CO
1300long kvm_arch_vcpu_ioctl(struct file *filp,
1301 unsigned int ioctl, unsigned long arg)
1302{
1303 struct kvm_vcpu *vcpu = filp->private_data;
1304 void __user *argp = (void __user *)arg;
1305 int r;
1306
1307 switch (ioctl) {
1308 case KVM_GET_LAPIC: {
1309 struct kvm_lapic_state lapic;
1310
1311 memset(&lapic, 0, sizeof lapic);
1312 r = kvm_vcpu_ioctl_get_lapic(vcpu, &lapic);
1313 if (r)
1314 goto out;
1315 r = -EFAULT;
1316 if (copy_to_user(argp, &lapic, sizeof lapic))
1317 goto out;
1318 r = 0;
1319 break;
1320 }
1321 case KVM_SET_LAPIC: {
1322 struct kvm_lapic_state lapic;
1323
1324 r = -EFAULT;
1325 if (copy_from_user(&lapic, argp, sizeof lapic))
1326 goto out;
1327 r = kvm_vcpu_ioctl_set_lapic(vcpu, &lapic);;
1328 if (r)
1329 goto out;
1330 r = 0;
1331 break;
1332 }
f77bc6a4
ZX
1333 case KVM_INTERRUPT: {
1334 struct kvm_interrupt irq;
1335
1336 r = -EFAULT;
1337 if (copy_from_user(&irq, argp, sizeof irq))
1338 goto out;
1339 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
1340 if (r)
1341 goto out;
1342 r = 0;
1343 break;
1344 }
313a3dc7
CO
1345 case KVM_SET_CPUID: {
1346 struct kvm_cpuid __user *cpuid_arg = argp;
1347 struct kvm_cpuid cpuid;
1348
1349 r = -EFAULT;
1350 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
1351 goto out;
1352 r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
1353 if (r)
1354 goto out;
1355 break;
1356 }
07716717
DK
1357 case KVM_SET_CPUID2: {
1358 struct kvm_cpuid2 __user *cpuid_arg = argp;
1359 struct kvm_cpuid2 cpuid;
1360
1361 r = -EFAULT;
1362 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
1363 goto out;
1364 r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid,
1365 cpuid_arg->entries);
1366 if (r)
1367 goto out;
1368 break;
1369 }
1370 case KVM_GET_CPUID2: {
1371 struct kvm_cpuid2 __user *cpuid_arg = argp;
1372 struct kvm_cpuid2 cpuid;
1373
1374 r = -EFAULT;
1375 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
1376 goto out;
1377 r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid,
1378 cpuid_arg->entries);
1379 if (r)
1380 goto out;
1381 r = -EFAULT;
1382 if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
1383 goto out;
1384 r = 0;
1385 break;
1386 }
313a3dc7
CO
1387 case KVM_GET_MSRS:
1388 r = msr_io(vcpu, argp, kvm_get_msr, 1);
1389 break;
1390 case KVM_SET_MSRS:
1391 r = msr_io(vcpu, argp, do_set_msr, 0);
1392 break;
b209749f
AK
1393 case KVM_TPR_ACCESS_REPORTING: {
1394 struct kvm_tpr_access_ctl tac;
1395
1396 r = -EFAULT;
1397 if (copy_from_user(&tac, argp, sizeof tac))
1398 goto out;
1399 r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac);
1400 if (r)
1401 goto out;
1402 r = -EFAULT;
1403 if (copy_to_user(argp, &tac, sizeof tac))
1404 goto out;
1405 r = 0;
1406 break;
1407 };
b93463aa
AK
1408 case KVM_SET_VAPIC_ADDR: {
1409 struct kvm_vapic_addr va;
1410
1411 r = -EINVAL;
1412 if (!irqchip_in_kernel(vcpu->kvm))
1413 goto out;
1414 r = -EFAULT;
1415 if (copy_from_user(&va, argp, sizeof va))
1416 goto out;
1417 r = 0;
1418 kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
1419 break;
1420 }
313a3dc7
CO
1421 default:
1422 r = -EINVAL;
1423 }
1424out:
1425 return r;
1426}
1427
1fe779f8
CO
1428static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr)
1429{
1430 int ret;
1431
1432 if (addr > (unsigned int)(-3 * PAGE_SIZE))
1433 return -1;
1434 ret = kvm_x86_ops->set_tss_addr(kvm, addr);
1435 return ret;
1436}
1437
1438static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
1439 u32 kvm_nr_mmu_pages)
1440{
1441 if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
1442 return -EINVAL;
1443
72dc67a6 1444 down_write(&kvm->slots_lock);
1fe779f8
CO
1445
1446 kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
f05e70ac 1447 kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
1fe779f8 1448
72dc67a6 1449 up_write(&kvm->slots_lock);
1fe779f8
CO
1450 return 0;
1451}
1452
1453static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
1454{
f05e70ac 1455 return kvm->arch.n_alloc_mmu_pages;
1fe779f8
CO
1456}
1457
e9f85cde
ZX
1458gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
1459{
1460 int i;
1461 struct kvm_mem_alias *alias;
1462
d69fb81f
ZX
1463 for (i = 0; i < kvm->arch.naliases; ++i) {
1464 alias = &kvm->arch.aliases[i];
e9f85cde
ZX
1465 if (gfn >= alias->base_gfn
1466 && gfn < alias->base_gfn + alias->npages)
1467 return alias->target_gfn + gfn - alias->base_gfn;
1468 }
1469 return gfn;
1470}
1471
1fe779f8
CO
1472/*
1473 * Set a new alias region. Aliases map a portion of physical memory into
1474 * another portion. This is useful for memory windows, for example the PC
1475 * VGA region.
1476 */
1477static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
1478 struct kvm_memory_alias *alias)
1479{
1480 int r, n;
1481 struct kvm_mem_alias *p;
1482
1483 r = -EINVAL;
1484 /* General sanity checks */
1485 if (alias->memory_size & (PAGE_SIZE - 1))
1486 goto out;
1487 if (alias->guest_phys_addr & (PAGE_SIZE - 1))
1488 goto out;
1489 if (alias->slot >= KVM_ALIAS_SLOTS)
1490 goto out;
1491 if (alias->guest_phys_addr + alias->memory_size
1492 < alias->guest_phys_addr)
1493 goto out;
1494 if (alias->target_phys_addr + alias->memory_size
1495 < alias->target_phys_addr)
1496 goto out;
1497
72dc67a6 1498 down_write(&kvm->slots_lock);
a1708ce8 1499 spin_lock(&kvm->mmu_lock);
1fe779f8 1500
d69fb81f 1501 p = &kvm->arch.aliases[alias->slot];
1fe779f8
CO
1502 p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT;
1503 p->npages = alias->memory_size >> PAGE_SHIFT;
1504 p->target_gfn = alias->target_phys_addr >> PAGE_SHIFT;
1505
1506 for (n = KVM_ALIAS_SLOTS; n > 0; --n)
d69fb81f 1507 if (kvm->arch.aliases[n - 1].npages)
1fe779f8 1508 break;
d69fb81f 1509 kvm->arch.naliases = n;
1fe779f8 1510
a1708ce8 1511 spin_unlock(&kvm->mmu_lock);
1fe779f8
CO
1512 kvm_mmu_zap_all(kvm);
1513
72dc67a6 1514 up_write(&kvm->slots_lock);
1fe779f8
CO
1515
1516 return 0;
1517
1518out:
1519 return r;
1520}
1521
1522static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
1523{
1524 int r;
1525
1526 r = 0;
1527 switch (chip->chip_id) {
1528 case KVM_IRQCHIP_PIC_MASTER:
1529 memcpy(&chip->chip.pic,
1530 &pic_irqchip(kvm)->pics[0],
1531 sizeof(struct kvm_pic_state));
1532 break;
1533 case KVM_IRQCHIP_PIC_SLAVE:
1534 memcpy(&chip->chip.pic,
1535 &pic_irqchip(kvm)->pics[1],
1536 sizeof(struct kvm_pic_state));
1537 break;
1538 case KVM_IRQCHIP_IOAPIC:
1539 memcpy(&chip->chip.ioapic,
1540 ioapic_irqchip(kvm),
1541 sizeof(struct kvm_ioapic_state));
1542 break;
1543 default:
1544 r = -EINVAL;
1545 break;
1546 }
1547 return r;
1548}
1549
1550static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
1551{
1552 int r;
1553
1554 r = 0;
1555 switch (chip->chip_id) {
1556 case KVM_IRQCHIP_PIC_MASTER:
1557 memcpy(&pic_irqchip(kvm)->pics[0],
1558 &chip->chip.pic,
1559 sizeof(struct kvm_pic_state));
1560 break;
1561 case KVM_IRQCHIP_PIC_SLAVE:
1562 memcpy(&pic_irqchip(kvm)->pics[1],
1563 &chip->chip.pic,
1564 sizeof(struct kvm_pic_state));
1565 break;
1566 case KVM_IRQCHIP_IOAPIC:
1567 memcpy(ioapic_irqchip(kvm),
1568 &chip->chip.ioapic,
1569 sizeof(struct kvm_ioapic_state));
1570 break;
1571 default:
1572 r = -EINVAL;
1573 break;
1574 }
1575 kvm_pic_update_irq(pic_irqchip(kvm));
1576 return r;
1577}
1578
e0f63cb9
SY
1579static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps)
1580{
1581 int r = 0;
1582
1583 memcpy(ps, &kvm->arch.vpit->pit_state, sizeof(struct kvm_pit_state));
1584 return r;
1585}
1586
1587static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
1588{
1589 int r = 0;
1590
1591 memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state));
1592 kvm_pit_load_count(kvm, 0, ps->channels[0].count);
1593 return r;
1594}
1595
5bb064dc
ZX
1596/*
1597 * Get (and clear) the dirty memory log for a memory slot.
1598 */
1599int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
1600 struct kvm_dirty_log *log)
1601{
1602 int r;
1603 int n;
1604 struct kvm_memory_slot *memslot;
1605 int is_dirty = 0;
1606
72dc67a6 1607 down_write(&kvm->slots_lock);
5bb064dc
ZX
1608
1609 r = kvm_get_dirty_log(kvm, log, &is_dirty);
1610 if (r)
1611 goto out;
1612
1613 /* If nothing is dirty, don't bother messing with page tables. */
1614 if (is_dirty) {
1615 kvm_mmu_slot_remove_write_access(kvm, log->slot);
1616 kvm_flush_remote_tlbs(kvm);
1617 memslot = &kvm->memslots[log->slot];
1618 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
1619 memset(memslot->dirty_bitmap, 0, n);
1620 }
1621 r = 0;
1622out:
72dc67a6 1623 up_write(&kvm->slots_lock);
5bb064dc
ZX
1624 return r;
1625}
1626
1fe779f8
CO
1627long kvm_arch_vm_ioctl(struct file *filp,
1628 unsigned int ioctl, unsigned long arg)
1629{
1630 struct kvm *kvm = filp->private_data;
1631 void __user *argp = (void __user *)arg;
1632 int r = -EINVAL;
1633
1634 switch (ioctl) {
1635 case KVM_SET_TSS_ADDR:
1636 r = kvm_vm_ioctl_set_tss_addr(kvm, arg);
1637 if (r < 0)
1638 goto out;
1639 break;
1640 case KVM_SET_MEMORY_REGION: {
1641 struct kvm_memory_region kvm_mem;
1642 struct kvm_userspace_memory_region kvm_userspace_mem;
1643
1644 r = -EFAULT;
1645 if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem))
1646 goto out;
1647 kvm_userspace_mem.slot = kvm_mem.slot;
1648 kvm_userspace_mem.flags = kvm_mem.flags;
1649 kvm_userspace_mem.guest_phys_addr = kvm_mem.guest_phys_addr;
1650 kvm_userspace_mem.memory_size = kvm_mem.memory_size;
1651 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 0);
1652 if (r)
1653 goto out;
1654 break;
1655 }
1656 case KVM_SET_NR_MMU_PAGES:
1657 r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg);
1658 if (r)
1659 goto out;
1660 break;
1661 case KVM_GET_NR_MMU_PAGES:
1662 r = kvm_vm_ioctl_get_nr_mmu_pages(kvm);
1663 break;
1664 case KVM_SET_MEMORY_ALIAS: {
1665 struct kvm_memory_alias alias;
1666
1667 r = -EFAULT;
1668 if (copy_from_user(&alias, argp, sizeof alias))
1669 goto out;
1670 r = kvm_vm_ioctl_set_memory_alias(kvm, &alias);
1671 if (r)
1672 goto out;
1673 break;
1674 }
1675 case KVM_CREATE_IRQCHIP:
1676 r = -ENOMEM;
d7deeeb0
ZX
1677 kvm->arch.vpic = kvm_create_pic(kvm);
1678 if (kvm->arch.vpic) {
1fe779f8
CO
1679 r = kvm_ioapic_init(kvm);
1680 if (r) {
d7deeeb0
ZX
1681 kfree(kvm->arch.vpic);
1682 kvm->arch.vpic = NULL;
1fe779f8
CO
1683 goto out;
1684 }
1685 } else
1686 goto out;
1687 break;
7837699f
SY
1688 case KVM_CREATE_PIT:
1689 r = -ENOMEM;
1690 kvm->arch.vpit = kvm_create_pit(kvm);
1691 if (kvm->arch.vpit)
1692 r = 0;
1693 break;
1fe779f8
CO
1694 case KVM_IRQ_LINE: {
1695 struct kvm_irq_level irq_event;
1696
1697 r = -EFAULT;
1698 if (copy_from_user(&irq_event, argp, sizeof irq_event))
1699 goto out;
1700 if (irqchip_in_kernel(kvm)) {
1701 mutex_lock(&kvm->lock);
1702 if (irq_event.irq < 16)
1703 kvm_pic_set_irq(pic_irqchip(kvm),
1704 irq_event.irq,
1705 irq_event.level);
d7deeeb0 1706 kvm_ioapic_set_irq(kvm->arch.vioapic,
1fe779f8
CO
1707 irq_event.irq,
1708 irq_event.level);
1709 mutex_unlock(&kvm->lock);
1710 r = 0;
1711 }
1712 break;
1713 }
1714 case KVM_GET_IRQCHIP: {
1715 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
1716 struct kvm_irqchip chip;
1717
1718 r = -EFAULT;
1719 if (copy_from_user(&chip, argp, sizeof chip))
1720 goto out;
1721 r = -ENXIO;
1722 if (!irqchip_in_kernel(kvm))
1723 goto out;
1724 r = kvm_vm_ioctl_get_irqchip(kvm, &chip);
1725 if (r)
1726 goto out;
1727 r = -EFAULT;
1728 if (copy_to_user(argp, &chip, sizeof chip))
1729 goto out;
1730 r = 0;
1731 break;
1732 }
1733 case KVM_SET_IRQCHIP: {
1734 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
1735 struct kvm_irqchip chip;
1736
1737 r = -EFAULT;
1738 if (copy_from_user(&chip, argp, sizeof chip))
1739 goto out;
1740 r = -ENXIO;
1741 if (!irqchip_in_kernel(kvm))
1742 goto out;
1743 r = kvm_vm_ioctl_set_irqchip(kvm, &chip);
1744 if (r)
1745 goto out;
1746 r = 0;
1747 break;
1748 }
e0f63cb9
SY
1749 case KVM_GET_PIT: {
1750 struct kvm_pit_state ps;
1751 r = -EFAULT;
1752 if (copy_from_user(&ps, argp, sizeof ps))
1753 goto out;
1754 r = -ENXIO;
1755 if (!kvm->arch.vpit)
1756 goto out;
1757 r = kvm_vm_ioctl_get_pit(kvm, &ps);
1758 if (r)
1759 goto out;
1760 r = -EFAULT;
1761 if (copy_to_user(argp, &ps, sizeof ps))
1762 goto out;
1763 r = 0;
1764 break;
1765 }
1766 case KVM_SET_PIT: {
1767 struct kvm_pit_state ps;
1768 r = -EFAULT;
1769 if (copy_from_user(&ps, argp, sizeof ps))
1770 goto out;
1771 r = -ENXIO;
1772 if (!kvm->arch.vpit)
1773 goto out;
1774 r = kvm_vm_ioctl_set_pit(kvm, &ps);
1775 if (r)
1776 goto out;
1777 r = 0;
1778 break;
1779 }
1fe779f8
CO
1780 default:
1781 ;
1782 }
1783out:
1784 return r;
1785}
1786
a16b043c 1787static void kvm_init_msr_list(void)
043405e1
CO
1788{
1789 u32 dummy[2];
1790 unsigned i, j;
1791
1792 for (i = j = 0; i < ARRAY_SIZE(msrs_to_save); i++) {
1793 if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
1794 continue;
1795 if (j < i)
1796 msrs_to_save[j] = msrs_to_save[i];
1797 j++;
1798 }
1799 num_msrs_to_save = j;
1800}
1801
bbd9b64e
CO
1802/*
1803 * Only apic need an MMIO device hook, so shortcut now..
1804 */
1805static struct kvm_io_device *vcpu_find_pervcpu_dev(struct kvm_vcpu *vcpu,
92760499
LV
1806 gpa_t addr, int len,
1807 int is_write)
bbd9b64e
CO
1808{
1809 struct kvm_io_device *dev;
1810
ad312c7c
ZX
1811 if (vcpu->arch.apic) {
1812 dev = &vcpu->arch.apic->dev;
92760499 1813 if (dev->in_range(dev, addr, len, is_write))
bbd9b64e
CO
1814 return dev;
1815 }
1816 return NULL;
1817}
1818
1819
1820static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu,
92760499
LV
1821 gpa_t addr, int len,
1822 int is_write)
bbd9b64e
CO
1823{
1824 struct kvm_io_device *dev;
1825
92760499 1826 dev = vcpu_find_pervcpu_dev(vcpu, addr, len, is_write);
bbd9b64e 1827 if (dev == NULL)
92760499
LV
1828 dev = kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr, len,
1829 is_write);
bbd9b64e
CO
1830 return dev;
1831}
1832
1833int emulator_read_std(unsigned long addr,
1834 void *val,
1835 unsigned int bytes,
1836 struct kvm_vcpu *vcpu)
1837{
1838 void *data = val;
10589a46 1839 int r = X86EMUL_CONTINUE;
bbd9b64e
CO
1840
1841 while (bytes) {
ad312c7c 1842 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
bbd9b64e
CO
1843 unsigned offset = addr & (PAGE_SIZE-1);
1844 unsigned tocopy = min(bytes, (unsigned)PAGE_SIZE - offset);
1845 int ret;
1846
10589a46
MT
1847 if (gpa == UNMAPPED_GVA) {
1848 r = X86EMUL_PROPAGATE_FAULT;
1849 goto out;
1850 }
bbd9b64e 1851 ret = kvm_read_guest(vcpu->kvm, gpa, data, tocopy);
10589a46
MT
1852 if (ret < 0) {
1853 r = X86EMUL_UNHANDLEABLE;
1854 goto out;
1855 }
bbd9b64e
CO
1856
1857 bytes -= tocopy;
1858 data += tocopy;
1859 addr += tocopy;
1860 }
10589a46 1861out:
10589a46 1862 return r;
bbd9b64e
CO
1863}
1864EXPORT_SYMBOL_GPL(emulator_read_std);
1865
bbd9b64e
CO
1866static int emulator_read_emulated(unsigned long addr,
1867 void *val,
1868 unsigned int bytes,
1869 struct kvm_vcpu *vcpu)
1870{
1871 struct kvm_io_device *mmio_dev;
1872 gpa_t gpa;
1873
1874 if (vcpu->mmio_read_completed) {
1875 memcpy(val, vcpu->mmio_data, bytes);
1876 vcpu->mmio_read_completed = 0;
1877 return X86EMUL_CONTINUE;
1878 }
1879
ad312c7c 1880 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
bbd9b64e
CO
1881
1882 /* For APIC access vmexit */
1883 if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
1884 goto mmio;
1885
1886 if (emulator_read_std(addr, val, bytes, vcpu)
1887 == X86EMUL_CONTINUE)
1888 return X86EMUL_CONTINUE;
1889 if (gpa == UNMAPPED_GVA)
1890 return X86EMUL_PROPAGATE_FAULT;
1891
1892mmio:
1893 /*
1894 * Is this MMIO handled locally?
1895 */
10589a46 1896 mutex_lock(&vcpu->kvm->lock);
92760499 1897 mmio_dev = vcpu_find_mmio_dev(vcpu, gpa, bytes, 0);
bbd9b64e
CO
1898 if (mmio_dev) {
1899 kvm_iodevice_read(mmio_dev, gpa, bytes, val);
10589a46 1900 mutex_unlock(&vcpu->kvm->lock);
bbd9b64e
CO
1901 return X86EMUL_CONTINUE;
1902 }
10589a46 1903 mutex_unlock(&vcpu->kvm->lock);
bbd9b64e
CO
1904
1905 vcpu->mmio_needed = 1;
1906 vcpu->mmio_phys_addr = gpa;
1907 vcpu->mmio_size = bytes;
1908 vcpu->mmio_is_write = 0;
1909
1910 return X86EMUL_UNHANDLEABLE;
1911}
1912
3200f405 1913int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
9f811285 1914 const void *val, int bytes)
bbd9b64e
CO
1915{
1916 int ret;
1917
1918 ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes);
9f811285 1919 if (ret < 0)
bbd9b64e
CO
1920 return 0;
1921 kvm_mmu_pte_write(vcpu, gpa, val, bytes);
1922 return 1;
1923}
1924
1925static int emulator_write_emulated_onepage(unsigned long addr,
1926 const void *val,
1927 unsigned int bytes,
1928 struct kvm_vcpu *vcpu)
1929{
1930 struct kvm_io_device *mmio_dev;
10589a46
MT
1931 gpa_t gpa;
1932
10589a46 1933 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
bbd9b64e
CO
1934
1935 if (gpa == UNMAPPED_GVA) {
c3c91fee 1936 kvm_inject_page_fault(vcpu, addr, 2);
bbd9b64e
CO
1937 return X86EMUL_PROPAGATE_FAULT;
1938 }
1939
1940 /* For APIC access vmexit */
1941 if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
1942 goto mmio;
1943
1944 if (emulator_write_phys(vcpu, gpa, val, bytes))
1945 return X86EMUL_CONTINUE;
1946
1947mmio:
1948 /*
1949 * Is this MMIO handled locally?
1950 */
10589a46 1951 mutex_lock(&vcpu->kvm->lock);
92760499 1952 mmio_dev = vcpu_find_mmio_dev(vcpu, gpa, bytes, 1);
bbd9b64e
CO
1953 if (mmio_dev) {
1954 kvm_iodevice_write(mmio_dev, gpa, bytes, val);
10589a46 1955 mutex_unlock(&vcpu->kvm->lock);
bbd9b64e
CO
1956 return X86EMUL_CONTINUE;
1957 }
10589a46 1958 mutex_unlock(&vcpu->kvm->lock);
bbd9b64e
CO
1959
1960 vcpu->mmio_needed = 1;
1961 vcpu->mmio_phys_addr = gpa;
1962 vcpu->mmio_size = bytes;
1963 vcpu->mmio_is_write = 1;
1964 memcpy(vcpu->mmio_data, val, bytes);
1965
1966 return X86EMUL_CONTINUE;
1967}
1968
1969int emulator_write_emulated(unsigned long addr,
1970 const void *val,
1971 unsigned int bytes,
1972 struct kvm_vcpu *vcpu)
1973{
1974 /* Crossing a page boundary? */
1975 if (((addr + bytes - 1) ^ addr) & PAGE_MASK) {
1976 int rc, now;
1977
1978 now = -addr & ~PAGE_MASK;
1979 rc = emulator_write_emulated_onepage(addr, val, now, vcpu);
1980 if (rc != X86EMUL_CONTINUE)
1981 return rc;
1982 addr += now;
1983 val += now;
1984 bytes -= now;
1985 }
1986 return emulator_write_emulated_onepage(addr, val, bytes, vcpu);
1987}
1988EXPORT_SYMBOL_GPL(emulator_write_emulated);
1989
1990static int emulator_cmpxchg_emulated(unsigned long addr,
1991 const void *old,
1992 const void *new,
1993 unsigned int bytes,
1994 struct kvm_vcpu *vcpu)
1995{
1996 static int reported;
1997
1998 if (!reported) {
1999 reported = 1;
2000 printk(KERN_WARNING "kvm: emulating exchange as write\n");
2001 }
2bacc55c
MT
2002#ifndef CONFIG_X86_64
2003 /* guests cmpxchg8b have to be emulated atomically */
2004 if (bytes == 8) {
10589a46 2005 gpa_t gpa;
2bacc55c 2006 struct page *page;
c0b49b0d 2007 char *kaddr;
2bacc55c
MT
2008 u64 val;
2009
10589a46
MT
2010 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
2011
2bacc55c
MT
2012 if (gpa == UNMAPPED_GVA ||
2013 (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
2014 goto emul_write;
2015
2016 if (((gpa + bytes - 1) & PAGE_MASK) != (gpa & PAGE_MASK))
2017 goto emul_write;
2018
2019 val = *(u64 *)new;
72dc67a6
IE
2020
2021 down_read(&current->mm->mmap_sem);
2bacc55c 2022 page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
72dc67a6
IE
2023 up_read(&current->mm->mmap_sem);
2024
c0b49b0d
AM
2025 kaddr = kmap_atomic(page, KM_USER0);
2026 set_64bit((u64 *)(kaddr + offset_in_page(gpa)), val);
2027 kunmap_atomic(kaddr, KM_USER0);
2bacc55c
MT
2028 kvm_release_page_dirty(page);
2029 }
3200f405 2030emul_write:
2bacc55c
MT
2031#endif
2032
bbd9b64e
CO
2033 return emulator_write_emulated(addr, new, bytes, vcpu);
2034}
2035
2036static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
2037{
2038 return kvm_x86_ops->get_segment_base(vcpu, seg);
2039}
2040
2041int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
2042{
2043 return X86EMUL_CONTINUE;
2044}
2045
2046int emulate_clts(struct kvm_vcpu *vcpu)
2047{
54e445ca 2048 KVMTRACE_0D(CLTS, vcpu, handler);
ad312c7c 2049 kvm_x86_ops->set_cr0(vcpu, vcpu->arch.cr0 & ~X86_CR0_TS);
bbd9b64e
CO
2050 return X86EMUL_CONTINUE;
2051}
2052
2053int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest)
2054{
2055 struct kvm_vcpu *vcpu = ctxt->vcpu;
2056
2057 switch (dr) {
2058 case 0 ... 3:
2059 *dest = kvm_x86_ops->get_dr(vcpu, dr);
2060 return X86EMUL_CONTINUE;
2061 default:
b8688d51 2062 pr_unimpl(vcpu, "%s: unexpected dr %u\n", __func__, dr);
bbd9b64e
CO
2063 return X86EMUL_UNHANDLEABLE;
2064 }
2065}
2066
2067int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
2068{
2069 unsigned long mask = (ctxt->mode == X86EMUL_MODE_PROT64) ? ~0ULL : ~0U;
2070 int exception;
2071
2072 kvm_x86_ops->set_dr(ctxt->vcpu, dr, value & mask, &exception);
2073 if (exception) {
2074 /* FIXME: better handling */
2075 return X86EMUL_UNHANDLEABLE;
2076 }
2077 return X86EMUL_CONTINUE;
2078}
2079
2080void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
2081{
bbd9b64e 2082 u8 opcodes[4];
ad312c7c 2083 unsigned long rip = vcpu->arch.rip;
bbd9b64e
CO
2084 unsigned long rip_linear;
2085
f76c710d 2086 if (!printk_ratelimit())
bbd9b64e
CO
2087 return;
2088
25be4608
GC
2089 rip_linear = rip + get_segment_base(vcpu, VCPU_SREG_CS);
2090
bbd9b64e
CO
2091 emulator_read_std(rip_linear, (void *)opcodes, 4, vcpu);
2092
2093 printk(KERN_ERR "emulation failed (%s) rip %lx %02x %02x %02x %02x\n",
2094 context, rip, opcodes[0], opcodes[1], opcodes[2], opcodes[3]);
bbd9b64e
CO
2095}
2096EXPORT_SYMBOL_GPL(kvm_report_emulation_failure);
2097
14af3f3c 2098static struct x86_emulate_ops emulate_ops = {
bbd9b64e 2099 .read_std = emulator_read_std,
bbd9b64e
CO
2100 .read_emulated = emulator_read_emulated,
2101 .write_emulated = emulator_write_emulated,
2102 .cmpxchg_emulated = emulator_cmpxchg_emulated,
2103};
2104
2105int emulate_instruction(struct kvm_vcpu *vcpu,
2106 struct kvm_run *run,
2107 unsigned long cr2,
2108 u16 error_code,
571008da 2109 int emulation_type)
bbd9b64e
CO
2110{
2111 int r;
571008da 2112 struct decode_cache *c;
bbd9b64e 2113
ad312c7c 2114 vcpu->arch.mmio_fault_cr2 = cr2;
bbd9b64e
CO
2115 kvm_x86_ops->cache_regs(vcpu);
2116
2117 vcpu->mmio_is_write = 0;
ad312c7c 2118 vcpu->arch.pio.string = 0;
bbd9b64e 2119
571008da 2120 if (!(emulation_type & EMULTYPE_NO_DECODE)) {
bbd9b64e
CO
2121 int cs_db, cs_l;
2122 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
2123
ad312c7c
ZX
2124 vcpu->arch.emulate_ctxt.vcpu = vcpu;
2125 vcpu->arch.emulate_ctxt.eflags = kvm_x86_ops->get_rflags(vcpu);
2126 vcpu->arch.emulate_ctxt.mode =
2127 (vcpu->arch.emulate_ctxt.eflags & X86_EFLAGS_VM)
bbd9b64e
CO
2128 ? X86EMUL_MODE_REAL : cs_l
2129 ? X86EMUL_MODE_PROT64 : cs_db
2130 ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
2131
ad312c7c 2132 r = x86_decode_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
571008da
SY
2133
2134 /* Reject the instructions other than VMCALL/VMMCALL when
2135 * try to emulate invalid opcode */
2136 c = &vcpu->arch.emulate_ctxt.decode;
2137 if ((emulation_type & EMULTYPE_TRAP_UD) &&
2138 (!(c->twobyte && c->b == 0x01 &&
2139 (c->modrm_reg == 0 || c->modrm_reg == 3) &&
2140 c->modrm_mod == 3 && c->modrm_rm == 1)))
2141 return EMULATE_FAIL;
2142
f2b5756b 2143 ++vcpu->stat.insn_emulation;
bbd9b64e 2144 if (r) {
f2b5756b 2145 ++vcpu->stat.insn_emulation_fail;
bbd9b64e
CO
2146 if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
2147 return EMULATE_DONE;
2148 return EMULATE_FAIL;
2149 }
2150 }
2151
ad312c7c 2152 r = x86_emulate_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
bbd9b64e 2153
ad312c7c 2154 if (vcpu->arch.pio.string)
bbd9b64e
CO
2155 return EMULATE_DO_MMIO;
2156
2157 if ((r || vcpu->mmio_is_write) && run) {
2158 run->exit_reason = KVM_EXIT_MMIO;
2159 run->mmio.phys_addr = vcpu->mmio_phys_addr;
2160 memcpy(run->mmio.data, vcpu->mmio_data, 8);
2161 run->mmio.len = vcpu->mmio_size;
2162 run->mmio.is_write = vcpu->mmio_is_write;
2163 }
2164
2165 if (r) {
2166 if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
2167 return EMULATE_DONE;
2168 if (!vcpu->mmio_needed) {
2169 kvm_report_emulation_failure(vcpu, "mmio");
2170 return EMULATE_FAIL;
2171 }
2172 return EMULATE_DO_MMIO;
2173 }
2174
2175 kvm_x86_ops->decache_regs(vcpu);
ad312c7c 2176 kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
bbd9b64e
CO
2177
2178 if (vcpu->mmio_is_write) {
2179 vcpu->mmio_needed = 0;
2180 return EMULATE_DO_MMIO;
2181 }
2182
2183 return EMULATE_DONE;
2184}
2185EXPORT_SYMBOL_GPL(emulate_instruction);
2186
de7d789a
CO
2187static void free_pio_guest_pages(struct kvm_vcpu *vcpu)
2188{
2189 int i;
2190
ad312c7c
ZX
2191 for (i = 0; i < ARRAY_SIZE(vcpu->arch.pio.guest_pages); ++i)
2192 if (vcpu->arch.pio.guest_pages[i]) {
2193 kvm_release_page_dirty(vcpu->arch.pio.guest_pages[i]);
2194 vcpu->arch.pio.guest_pages[i] = NULL;
de7d789a
CO
2195 }
2196}
2197
2198static int pio_copy_data(struct kvm_vcpu *vcpu)
2199{
ad312c7c 2200 void *p = vcpu->arch.pio_data;
de7d789a
CO
2201 void *q;
2202 unsigned bytes;
ad312c7c 2203 int nr_pages = vcpu->arch.pio.guest_pages[1] ? 2 : 1;
de7d789a 2204
ad312c7c 2205 q = vmap(vcpu->arch.pio.guest_pages, nr_pages, VM_READ|VM_WRITE,
de7d789a
CO
2206 PAGE_KERNEL);
2207 if (!q) {
2208 free_pio_guest_pages(vcpu);
2209 return -ENOMEM;
2210 }
ad312c7c
ZX
2211 q += vcpu->arch.pio.guest_page_offset;
2212 bytes = vcpu->arch.pio.size * vcpu->arch.pio.cur_count;
2213 if (vcpu->arch.pio.in)
de7d789a
CO
2214 memcpy(q, p, bytes);
2215 else
2216 memcpy(p, q, bytes);
ad312c7c 2217 q -= vcpu->arch.pio.guest_page_offset;
de7d789a
CO
2218 vunmap(q);
2219 free_pio_guest_pages(vcpu);
2220 return 0;
2221}
2222
2223int complete_pio(struct kvm_vcpu *vcpu)
2224{
ad312c7c 2225 struct kvm_pio_request *io = &vcpu->arch.pio;
de7d789a
CO
2226 long delta;
2227 int r;
2228
2229 kvm_x86_ops->cache_regs(vcpu);
2230
2231 if (!io->string) {
2232 if (io->in)
ad312c7c 2233 memcpy(&vcpu->arch.regs[VCPU_REGS_RAX], vcpu->arch.pio_data,
de7d789a
CO
2234 io->size);
2235 } else {
2236 if (io->in) {
2237 r = pio_copy_data(vcpu);
2238 if (r) {
2239 kvm_x86_ops->cache_regs(vcpu);
2240 return r;
2241 }
2242 }
2243
2244 delta = 1;
2245 if (io->rep) {
2246 delta *= io->cur_count;
2247 /*
2248 * The size of the register should really depend on
2249 * current address size.
2250 */
ad312c7c 2251 vcpu->arch.regs[VCPU_REGS_RCX] -= delta;
de7d789a
CO
2252 }
2253 if (io->down)
2254 delta = -delta;
2255 delta *= io->size;
2256 if (io->in)
ad312c7c 2257 vcpu->arch.regs[VCPU_REGS_RDI] += delta;
de7d789a 2258 else
ad312c7c 2259 vcpu->arch.regs[VCPU_REGS_RSI] += delta;
de7d789a
CO
2260 }
2261
2262 kvm_x86_ops->decache_regs(vcpu);
2263
2264 io->count -= io->cur_count;
2265 io->cur_count = 0;
2266
2267 return 0;
2268}
2269
2270static void kernel_pio(struct kvm_io_device *pio_dev,
2271 struct kvm_vcpu *vcpu,
2272 void *pd)
2273{
2274 /* TODO: String I/O for in kernel device */
2275
2276 mutex_lock(&vcpu->kvm->lock);
ad312c7c
ZX
2277 if (vcpu->arch.pio.in)
2278 kvm_iodevice_read(pio_dev, vcpu->arch.pio.port,
2279 vcpu->arch.pio.size,
de7d789a
CO
2280 pd);
2281 else
ad312c7c
ZX
2282 kvm_iodevice_write(pio_dev, vcpu->arch.pio.port,
2283 vcpu->arch.pio.size,
de7d789a
CO
2284 pd);
2285 mutex_unlock(&vcpu->kvm->lock);
2286}
2287
2288static void pio_string_write(struct kvm_io_device *pio_dev,
2289 struct kvm_vcpu *vcpu)
2290{
ad312c7c
ZX
2291 struct kvm_pio_request *io = &vcpu->arch.pio;
2292 void *pd = vcpu->arch.pio_data;
de7d789a
CO
2293 int i;
2294
2295 mutex_lock(&vcpu->kvm->lock);
2296 for (i = 0; i < io->cur_count; i++) {
2297 kvm_iodevice_write(pio_dev, io->port,
2298 io->size,
2299 pd);
2300 pd += io->size;
2301 }
2302 mutex_unlock(&vcpu->kvm->lock);
2303}
2304
2305static struct kvm_io_device *vcpu_find_pio_dev(struct kvm_vcpu *vcpu,
92760499
LV
2306 gpa_t addr, int len,
2307 int is_write)
de7d789a 2308{
92760499 2309 return kvm_io_bus_find_dev(&vcpu->kvm->pio_bus, addr, len, is_write);
de7d789a
CO
2310}
2311
2312int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
2313 int size, unsigned port)
2314{
2315 struct kvm_io_device *pio_dev;
2316
2317 vcpu->run->exit_reason = KVM_EXIT_IO;
2318 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
ad312c7c 2319 vcpu->run->io.size = vcpu->arch.pio.size = size;
de7d789a 2320 vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
ad312c7c
ZX
2321 vcpu->run->io.count = vcpu->arch.pio.count = vcpu->arch.pio.cur_count = 1;
2322 vcpu->run->io.port = vcpu->arch.pio.port = port;
2323 vcpu->arch.pio.in = in;
2324 vcpu->arch.pio.string = 0;
2325 vcpu->arch.pio.down = 0;
2326 vcpu->arch.pio.guest_page_offset = 0;
2327 vcpu->arch.pio.rep = 0;
de7d789a 2328
2714d1d3
FEL
2329 if (vcpu->run->io.direction == KVM_EXIT_IO_IN)
2330 KVMTRACE_2D(IO_READ, vcpu, vcpu->run->io.port, (u32)size,
2331 handler);
2332 else
2333 KVMTRACE_2D(IO_WRITE, vcpu, vcpu->run->io.port, (u32)size,
2334 handler);
2335
de7d789a 2336 kvm_x86_ops->cache_regs(vcpu);
ad312c7c 2337 memcpy(vcpu->arch.pio_data, &vcpu->arch.regs[VCPU_REGS_RAX], 4);
de7d789a
CO
2338
2339 kvm_x86_ops->skip_emulated_instruction(vcpu);
2340
92760499 2341 pio_dev = vcpu_find_pio_dev(vcpu, port, size, !in);
de7d789a 2342 if (pio_dev) {
ad312c7c 2343 kernel_pio(pio_dev, vcpu, vcpu->arch.pio_data);
de7d789a
CO
2344 complete_pio(vcpu);
2345 return 1;
2346 }
2347 return 0;
2348}
2349EXPORT_SYMBOL_GPL(kvm_emulate_pio);
2350
2351int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
2352 int size, unsigned long count, int down,
2353 gva_t address, int rep, unsigned port)
2354{
2355 unsigned now, in_page;
2356 int i, ret = 0;
2357 int nr_pages = 1;
2358 struct page *page;
2359 struct kvm_io_device *pio_dev;
2360
2361 vcpu->run->exit_reason = KVM_EXIT_IO;
2362 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
ad312c7c 2363 vcpu->run->io.size = vcpu->arch.pio.size = size;
de7d789a 2364 vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
ad312c7c
ZX
2365 vcpu->run->io.count = vcpu->arch.pio.count = vcpu->arch.pio.cur_count = count;
2366 vcpu->run->io.port = vcpu->arch.pio.port = port;
2367 vcpu->arch.pio.in = in;
2368 vcpu->arch.pio.string = 1;
2369 vcpu->arch.pio.down = down;
2370 vcpu->arch.pio.guest_page_offset = offset_in_page(address);
2371 vcpu->arch.pio.rep = rep;
de7d789a 2372
2714d1d3
FEL
2373 if (vcpu->run->io.direction == KVM_EXIT_IO_IN)
2374 KVMTRACE_2D(IO_READ, vcpu, vcpu->run->io.port, (u32)size,
2375 handler);
2376 else
2377 KVMTRACE_2D(IO_WRITE, vcpu, vcpu->run->io.port, (u32)size,
2378 handler);
2379
de7d789a
CO
2380 if (!count) {
2381 kvm_x86_ops->skip_emulated_instruction(vcpu);
2382 return 1;
2383 }
2384
2385 if (!down)
2386 in_page = PAGE_SIZE - offset_in_page(address);
2387 else
2388 in_page = offset_in_page(address) + size;
2389 now = min(count, (unsigned long)in_page / size);
2390 if (!now) {
2391 /*
2392 * String I/O straddles page boundary. Pin two guest pages
2393 * so that we satisfy atomicity constraints. Do just one
2394 * transaction to avoid complexity.
2395 */
2396 nr_pages = 2;
2397 now = 1;
2398 }
2399 if (down) {
2400 /*
2401 * String I/O in reverse. Yuck. Kill the guest, fix later.
2402 */
2403 pr_unimpl(vcpu, "guest string pio down\n");
c1a5d4f9 2404 kvm_inject_gp(vcpu, 0);
de7d789a
CO
2405 return 1;
2406 }
2407 vcpu->run->io.count = now;
ad312c7c 2408 vcpu->arch.pio.cur_count = now;
de7d789a 2409
ad312c7c 2410 if (vcpu->arch.pio.cur_count == vcpu->arch.pio.count)
de7d789a
CO
2411 kvm_x86_ops->skip_emulated_instruction(vcpu);
2412
2413 for (i = 0; i < nr_pages; ++i) {
de7d789a 2414 page = gva_to_page(vcpu, address + i * PAGE_SIZE);
ad312c7c 2415 vcpu->arch.pio.guest_pages[i] = page;
de7d789a 2416 if (!page) {
c1a5d4f9 2417 kvm_inject_gp(vcpu, 0);
de7d789a
CO
2418 free_pio_guest_pages(vcpu);
2419 return 1;
2420 }
2421 }
2422
92760499
LV
2423 pio_dev = vcpu_find_pio_dev(vcpu, port,
2424 vcpu->arch.pio.cur_count,
2425 !vcpu->arch.pio.in);
ad312c7c 2426 if (!vcpu->arch.pio.in) {
de7d789a
CO
2427 /* string PIO write */
2428 ret = pio_copy_data(vcpu);
2429 if (ret >= 0 && pio_dev) {
2430 pio_string_write(pio_dev, vcpu);
2431 complete_pio(vcpu);
ad312c7c 2432 if (vcpu->arch.pio.count == 0)
de7d789a
CO
2433 ret = 1;
2434 }
2435 } else if (pio_dev)
2436 pr_unimpl(vcpu, "no string pio read support yet, "
2437 "port %x size %d count %ld\n",
2438 port, size, count);
2439
2440 return ret;
2441}
2442EXPORT_SYMBOL_GPL(kvm_emulate_pio_string);
2443
f8c16bba 2444int kvm_arch_init(void *opaque)
043405e1 2445{
56c6d28a 2446 int r;
f8c16bba
ZX
2447 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
2448
f8c16bba
ZX
2449 if (kvm_x86_ops) {
2450 printk(KERN_ERR "kvm: already loaded the other module\n");
56c6d28a
ZX
2451 r = -EEXIST;
2452 goto out;
f8c16bba
ZX
2453 }
2454
2455 if (!ops->cpu_has_kvm_support()) {
2456 printk(KERN_ERR "kvm: no hardware support\n");
56c6d28a
ZX
2457 r = -EOPNOTSUPP;
2458 goto out;
f8c16bba
ZX
2459 }
2460 if (ops->disabled_by_bios()) {
2461 printk(KERN_ERR "kvm: disabled by bios\n");
56c6d28a
ZX
2462 r = -EOPNOTSUPP;
2463 goto out;
f8c16bba
ZX
2464 }
2465
97db56ce
AK
2466 r = kvm_mmu_module_init();
2467 if (r)
2468 goto out;
2469
2470 kvm_init_msr_list();
2471
f8c16bba 2472 kvm_x86_ops = ops;
56c6d28a 2473 kvm_mmu_set_nonpresent_ptes(0ull, 0ull);
7b52345e
SY
2474 kvm_mmu_set_base_ptes(PT_PRESENT_MASK);
2475 kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
2476 PT_DIRTY_MASK, PT64_NX_MASK, 0);
f8c16bba 2477 return 0;
56c6d28a
ZX
2478
2479out:
56c6d28a 2480 return r;
043405e1 2481}
8776e519 2482
f8c16bba
ZX
2483void kvm_arch_exit(void)
2484{
2485 kvm_x86_ops = NULL;
56c6d28a
ZX
2486 kvm_mmu_module_exit();
2487}
f8c16bba 2488
8776e519
HB
2489int kvm_emulate_halt(struct kvm_vcpu *vcpu)
2490{
2491 ++vcpu->stat.halt_exits;
2714d1d3 2492 KVMTRACE_0D(HLT, vcpu, handler);
8776e519 2493 if (irqchip_in_kernel(vcpu->kvm)) {
a4535290 2494 vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
3200f405 2495 up_read(&vcpu->kvm->slots_lock);
8776e519 2496 kvm_vcpu_block(vcpu);
3200f405 2497 down_read(&vcpu->kvm->slots_lock);
a4535290 2498 if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE)
8776e519
HB
2499 return -EINTR;
2500 return 1;
2501 } else {
2502 vcpu->run->exit_reason = KVM_EXIT_HLT;
2503 return 0;
2504 }
2505}
2506EXPORT_SYMBOL_GPL(kvm_emulate_halt);
2507
2f333bcb
MT
2508static inline gpa_t hc_gpa(struct kvm_vcpu *vcpu, unsigned long a0,
2509 unsigned long a1)
2510{
2511 if (is_long_mode(vcpu))
2512 return a0;
2513 else
2514 return a0 | ((gpa_t)a1 << 32);
2515}
2516
8776e519
HB
2517int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
2518{
2519 unsigned long nr, a0, a1, a2, a3, ret;
2f333bcb 2520 int r = 1;
8776e519
HB
2521
2522 kvm_x86_ops->cache_regs(vcpu);
2523
ad312c7c
ZX
2524 nr = vcpu->arch.regs[VCPU_REGS_RAX];
2525 a0 = vcpu->arch.regs[VCPU_REGS_RBX];
2526 a1 = vcpu->arch.regs[VCPU_REGS_RCX];
2527 a2 = vcpu->arch.regs[VCPU_REGS_RDX];
2528 a3 = vcpu->arch.regs[VCPU_REGS_RSI];
8776e519 2529
2714d1d3
FEL
2530 KVMTRACE_1D(VMMCALL, vcpu, (u32)nr, handler);
2531
8776e519
HB
2532 if (!is_long_mode(vcpu)) {
2533 nr &= 0xFFFFFFFF;
2534 a0 &= 0xFFFFFFFF;
2535 a1 &= 0xFFFFFFFF;
2536 a2 &= 0xFFFFFFFF;
2537 a3 &= 0xFFFFFFFF;
2538 }
2539
2540 switch (nr) {
b93463aa
AK
2541 case KVM_HC_VAPIC_POLL_IRQ:
2542 ret = 0;
2543 break;
2f333bcb
MT
2544 case KVM_HC_MMU_OP:
2545 r = kvm_pv_mmu_op(vcpu, a0, hc_gpa(vcpu, a1, a2), &ret);
2546 break;
8776e519
HB
2547 default:
2548 ret = -KVM_ENOSYS;
2549 break;
2550 }
ad312c7c 2551 vcpu->arch.regs[VCPU_REGS_RAX] = ret;
8776e519 2552 kvm_x86_ops->decache_regs(vcpu);
f11c3a8d 2553 ++vcpu->stat.hypercalls;
2f333bcb 2554 return r;
8776e519
HB
2555}
2556EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
2557
2558int kvm_fix_hypercall(struct kvm_vcpu *vcpu)
2559{
2560 char instruction[3];
2561 int ret = 0;
2562
8776e519
HB
2563
2564 /*
2565 * Blow out the MMU to ensure that no other VCPU has an active mapping
2566 * to ensure that the updated hypercall appears atomically across all
2567 * VCPUs.
2568 */
2569 kvm_mmu_zap_all(vcpu->kvm);
2570
2571 kvm_x86_ops->cache_regs(vcpu);
2572 kvm_x86_ops->patch_hypercall(vcpu, instruction);
ad312c7c 2573 if (emulator_write_emulated(vcpu->arch.rip, instruction, 3, vcpu)
8776e519
HB
2574 != X86EMUL_CONTINUE)
2575 ret = -EFAULT;
2576
8776e519
HB
2577 return ret;
2578}
2579
2580static u64 mk_cr_64(u64 curr_cr, u32 new_val)
2581{
2582 return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
2583}
2584
2585void realmode_lgdt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
2586{
2587 struct descriptor_table dt = { limit, base };
2588
2589 kvm_x86_ops->set_gdt(vcpu, &dt);
2590}
2591
2592void realmode_lidt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
2593{
2594 struct descriptor_table dt = { limit, base };
2595
2596 kvm_x86_ops->set_idt(vcpu, &dt);
2597}
2598
2599void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
2600 unsigned long *rflags)
2601{
2d3ad1f4 2602 kvm_lmsw(vcpu, msw);
8776e519
HB
2603 *rflags = kvm_x86_ops->get_rflags(vcpu);
2604}
2605
2606unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
2607{
54e445ca
JR
2608 unsigned long value;
2609
8776e519
HB
2610 kvm_x86_ops->decache_cr4_guest_bits(vcpu);
2611 switch (cr) {
2612 case 0:
54e445ca
JR
2613 value = vcpu->arch.cr0;
2614 break;
8776e519 2615 case 2:
54e445ca
JR
2616 value = vcpu->arch.cr2;
2617 break;
8776e519 2618 case 3:
54e445ca
JR
2619 value = vcpu->arch.cr3;
2620 break;
8776e519 2621 case 4:
54e445ca
JR
2622 value = vcpu->arch.cr4;
2623 break;
152ff9be 2624 case 8:
54e445ca
JR
2625 value = kvm_get_cr8(vcpu);
2626 break;
8776e519 2627 default:
b8688d51 2628 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
8776e519
HB
2629 return 0;
2630 }
54e445ca
JR
2631 KVMTRACE_3D(CR_READ, vcpu, (u32)cr, (u32)value,
2632 (u32)((u64)value >> 32), handler);
2633
2634 return value;
8776e519
HB
2635}
2636
2637void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val,
2638 unsigned long *rflags)
2639{
54e445ca
JR
2640 KVMTRACE_3D(CR_WRITE, vcpu, (u32)cr, (u32)val,
2641 (u32)((u64)val >> 32), handler);
2642
8776e519
HB
2643 switch (cr) {
2644 case 0:
2d3ad1f4 2645 kvm_set_cr0(vcpu, mk_cr_64(vcpu->arch.cr0, val));
8776e519
HB
2646 *rflags = kvm_x86_ops->get_rflags(vcpu);
2647 break;
2648 case 2:
ad312c7c 2649 vcpu->arch.cr2 = val;
8776e519
HB
2650 break;
2651 case 3:
2d3ad1f4 2652 kvm_set_cr3(vcpu, val);
8776e519
HB
2653 break;
2654 case 4:
2d3ad1f4 2655 kvm_set_cr4(vcpu, mk_cr_64(vcpu->arch.cr4, val));
8776e519 2656 break;
152ff9be 2657 case 8:
2d3ad1f4 2658 kvm_set_cr8(vcpu, val & 0xfUL);
152ff9be 2659 break;
8776e519 2660 default:
b8688d51 2661 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
8776e519
HB
2662 }
2663}
2664
07716717
DK
2665static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
2666{
ad312c7c
ZX
2667 struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
2668 int j, nent = vcpu->arch.cpuid_nent;
07716717
DK
2669
2670 e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
2671 /* when no next entry is found, the current entry[i] is reselected */
2672 for (j = i + 1; j == i; j = (j + 1) % nent) {
ad312c7c 2673 struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j];
07716717
DK
2674 if (ej->function == e->function) {
2675 ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
2676 return j;
2677 }
2678 }
2679 return 0; /* silence gcc, even though control never reaches here */
2680}
2681
2682/* find an entry with matching function, matching index (if needed), and that
2683 * should be read next (if it's stateful) */
2684static int is_matching_cpuid_entry(struct kvm_cpuid_entry2 *e,
2685 u32 function, u32 index)
2686{
2687 if (e->function != function)
2688 return 0;
2689 if ((e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) && e->index != index)
2690 return 0;
2691 if ((e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) &&
2692 !(e->flags & KVM_CPUID_FLAG_STATE_READ_NEXT))
2693 return 0;
2694 return 1;
2695}
2696
8776e519
HB
2697void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
2698{
2699 int i;
07716717
DK
2700 u32 function, index;
2701 struct kvm_cpuid_entry2 *e, *best;
8776e519
HB
2702
2703 kvm_x86_ops->cache_regs(vcpu);
ad312c7c
ZX
2704 function = vcpu->arch.regs[VCPU_REGS_RAX];
2705 index = vcpu->arch.regs[VCPU_REGS_RCX];
2706 vcpu->arch.regs[VCPU_REGS_RAX] = 0;
2707 vcpu->arch.regs[VCPU_REGS_RBX] = 0;
2708 vcpu->arch.regs[VCPU_REGS_RCX] = 0;
2709 vcpu->arch.regs[VCPU_REGS_RDX] = 0;
8776e519 2710 best = NULL;
ad312c7c
ZX
2711 for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
2712 e = &vcpu->arch.cpuid_entries[i];
07716717
DK
2713 if (is_matching_cpuid_entry(e, function, index)) {
2714 if (e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC)
2715 move_to_next_stateful_cpuid_entry(vcpu, i);
8776e519
HB
2716 best = e;
2717 break;
2718 }
2719 /*
2720 * Both basic or both extended?
2721 */
2722 if (((e->function ^ function) & 0x80000000) == 0)
2723 if (!best || e->function > best->function)
2724 best = e;
2725 }
2726 if (best) {
ad312c7c
ZX
2727 vcpu->arch.regs[VCPU_REGS_RAX] = best->eax;
2728 vcpu->arch.regs[VCPU_REGS_RBX] = best->ebx;
2729 vcpu->arch.regs[VCPU_REGS_RCX] = best->ecx;
2730 vcpu->arch.regs[VCPU_REGS_RDX] = best->edx;
8776e519
HB
2731 }
2732 kvm_x86_ops->decache_regs(vcpu);
2733 kvm_x86_ops->skip_emulated_instruction(vcpu);
2714d1d3
FEL
2734 KVMTRACE_5D(CPUID, vcpu, function,
2735 (u32)vcpu->arch.regs[VCPU_REGS_RAX],
2736 (u32)vcpu->arch.regs[VCPU_REGS_RBX],
2737 (u32)vcpu->arch.regs[VCPU_REGS_RCX],
2738 (u32)vcpu->arch.regs[VCPU_REGS_RDX], handler);
8776e519
HB
2739}
2740EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
d0752060 2741
b6c7a5dc
HB
2742/*
2743 * Check if userspace requested an interrupt window, and that the
2744 * interrupt window is open.
2745 *
2746 * No need to exit to userspace if we already have an interrupt queued.
2747 */
2748static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu,
2749 struct kvm_run *kvm_run)
2750{
ad312c7c 2751 return (!vcpu->arch.irq_summary &&
b6c7a5dc 2752 kvm_run->request_interrupt_window &&
ad312c7c 2753 vcpu->arch.interrupt_window_open &&
b6c7a5dc
HB
2754 (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF));
2755}
2756
2757static void post_kvm_run_save(struct kvm_vcpu *vcpu,
2758 struct kvm_run *kvm_run)
2759{
2760 kvm_run->if_flag = (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
2d3ad1f4 2761 kvm_run->cr8 = kvm_get_cr8(vcpu);
b6c7a5dc
HB
2762 kvm_run->apic_base = kvm_get_apic_base(vcpu);
2763 if (irqchip_in_kernel(vcpu->kvm))
2764 kvm_run->ready_for_interrupt_injection = 1;
2765 else
2766 kvm_run->ready_for_interrupt_injection =
ad312c7c
ZX
2767 (vcpu->arch.interrupt_window_open &&
2768 vcpu->arch.irq_summary == 0);
b6c7a5dc
HB
2769}
2770
b93463aa
AK
2771static void vapic_enter(struct kvm_vcpu *vcpu)
2772{
2773 struct kvm_lapic *apic = vcpu->arch.apic;
2774 struct page *page;
2775
2776 if (!apic || !apic->vapic_addr)
2777 return;
2778
10589a46 2779 down_read(&current->mm->mmap_sem);
b93463aa 2780 page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
10589a46 2781 up_read(&current->mm->mmap_sem);
72dc67a6
IE
2782
2783 vcpu->arch.apic->vapic_page = page;
b93463aa
AK
2784}
2785
2786static void vapic_exit(struct kvm_vcpu *vcpu)
2787{
2788 struct kvm_lapic *apic = vcpu->arch.apic;
2789
2790 if (!apic || !apic->vapic_addr)
2791 return;
2792
f8b78fa3 2793 down_read(&vcpu->kvm->slots_lock);
b93463aa
AK
2794 kvm_release_page_dirty(apic->vapic_page);
2795 mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
f8b78fa3 2796 up_read(&vcpu->kvm->slots_lock);
b93463aa
AK
2797}
2798
b6c7a5dc
HB
2799static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2800{
2801 int r;
2802
a4535290 2803 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED)) {
b6c7a5dc 2804 pr_debug("vcpu %d received sipi with vector # %x\n",
ad312c7c 2805 vcpu->vcpu_id, vcpu->arch.sipi_vector);
b6c7a5dc
HB
2806 kvm_lapic_reset(vcpu);
2807 r = kvm_x86_ops->vcpu_reset(vcpu);
2808 if (r)
2809 return r;
a4535290 2810 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
b6c7a5dc
HB
2811 }
2812
3200f405 2813 down_read(&vcpu->kvm->slots_lock);
b93463aa
AK
2814 vapic_enter(vcpu);
2815
b6c7a5dc
HB
2816preempted:
2817 if (vcpu->guest_debug.enabled)
2818 kvm_x86_ops->guest_debug_pre(vcpu);
2819
2820again:
2e53d63a
MT
2821 if (vcpu->requests)
2822 if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
2823 kvm_mmu_unload(vcpu);
2824
b6c7a5dc
HB
2825 r = kvm_mmu_reload(vcpu);
2826 if (unlikely(r))
2827 goto out;
2828
2f52d58c
AK
2829 if (vcpu->requests) {
2830 if (test_and_clear_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests))
2f599714 2831 __kvm_migrate_timers(vcpu);
d4acf7e7
MT
2832 if (test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
2833 kvm_x86_ops->tlb_flush(vcpu);
b93463aa
AK
2834 if (test_and_clear_bit(KVM_REQ_REPORT_TPR_ACCESS,
2835 &vcpu->requests)) {
2836 kvm_run->exit_reason = KVM_EXIT_TPR_ACCESS;
2837 r = 0;
2838 goto out;
2839 }
71c4dfaf
JR
2840 if (test_and_clear_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests)) {
2841 kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
2842 r = 0;
2843 goto out;
2844 }
2f52d58c 2845 }
b93463aa 2846
06e05645 2847 clear_bit(KVM_REQ_PENDING_TIMER, &vcpu->requests);
b6c7a5dc
HB
2848 kvm_inject_pending_timer_irqs(vcpu);
2849
2850 preempt_disable();
2851
2852 kvm_x86_ops->prepare_guest_switch(vcpu);
2853 kvm_load_guest_fpu(vcpu);
2854
2855 local_irq_disable();
2856
d4acf7e7 2857 if (vcpu->requests || need_resched()) {
6c142801
AK
2858 local_irq_enable();
2859 preempt_enable();
2860 r = 1;
2861 goto out;
2862 }
2863
b6c7a5dc
HB
2864 if (signal_pending(current)) {
2865 local_irq_enable();
2866 preempt_enable();
2867 r = -EINTR;
2868 kvm_run->exit_reason = KVM_EXIT_INTR;
2869 ++vcpu->stat.signal_exits;
2870 goto out;
2871 }
2872
e9571ed5
MT
2873 vcpu->guest_mode = 1;
2874 /*
2875 * Make sure that guest_mode assignment won't happen after
2876 * testing the pending IRQ vector bitmap.
2877 */
2878 smp_wmb();
2879
ad312c7c 2880 if (vcpu->arch.exception.pending)
298101da
AK
2881 __queue_exception(vcpu);
2882 else if (irqchip_in_kernel(vcpu->kvm))
b6c7a5dc 2883 kvm_x86_ops->inject_pending_irq(vcpu);
eb9774f0 2884 else
b6c7a5dc
HB
2885 kvm_x86_ops->inject_pending_vectors(vcpu, kvm_run);
2886
b93463aa
AK
2887 kvm_lapic_sync_to_vapic(vcpu);
2888
3200f405
MT
2889 up_read(&vcpu->kvm->slots_lock);
2890
b6c7a5dc
HB
2891 kvm_guest_enter();
2892
b6c7a5dc 2893
2714d1d3 2894 KVMTRACE_0D(VMENTRY, vcpu, entryexit);
b6c7a5dc
HB
2895 kvm_x86_ops->run(vcpu, kvm_run);
2896
2897 vcpu->guest_mode = 0;
2898 local_irq_enable();
2899
2900 ++vcpu->stat.exits;
2901
2902 /*
2903 * We must have an instruction between local_irq_enable() and
2904 * kvm_guest_exit(), so the timer interrupt isn't delayed by
2905 * the interrupt shadow. The stat.exits increment will do nicely.
2906 * But we need to prevent reordering, hence this barrier():
2907 */
2908 barrier();
2909
2910 kvm_guest_exit();
2911
2912 preempt_enable();
2913
3200f405
MT
2914 down_read(&vcpu->kvm->slots_lock);
2915
b6c7a5dc
HB
2916 /*
2917 * Profile KVM exit RIPs:
2918 */
2919 if (unlikely(prof_on == KVM_PROFILING)) {
2920 kvm_x86_ops->cache_regs(vcpu);
ad312c7c 2921 profile_hit(KVM_PROFILING, (void *)vcpu->arch.rip);
b6c7a5dc
HB
2922 }
2923
ad312c7c
ZX
2924 if (vcpu->arch.exception.pending && kvm_x86_ops->exception_injected(vcpu))
2925 vcpu->arch.exception.pending = false;
298101da 2926
b93463aa
AK
2927 kvm_lapic_sync_from_vapic(vcpu);
2928
b6c7a5dc
HB
2929 r = kvm_x86_ops->handle_exit(kvm_run, vcpu);
2930
2931 if (r > 0) {
2932 if (dm_request_for_irq_injection(vcpu, kvm_run)) {
2933 r = -EINTR;
2934 kvm_run->exit_reason = KVM_EXIT_INTR;
2935 ++vcpu->stat.request_irq_exits;
2936 goto out;
2937 }
e1beb1d3 2938 if (!need_resched())
b6c7a5dc 2939 goto again;
b6c7a5dc
HB
2940 }
2941
2942out:
3200f405 2943 up_read(&vcpu->kvm->slots_lock);
b6c7a5dc
HB
2944 if (r > 0) {
2945 kvm_resched(vcpu);
3200f405 2946 down_read(&vcpu->kvm->slots_lock);
b6c7a5dc
HB
2947 goto preempted;
2948 }
2949
2950 post_kvm_run_save(vcpu, kvm_run);
2951
b93463aa
AK
2952 vapic_exit(vcpu);
2953
b6c7a5dc
HB
2954 return r;
2955}
2956
2957int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2958{
2959 int r;
2960 sigset_t sigsaved;
2961
2962 vcpu_load(vcpu);
2963
ac9f6dc0
AK
2964 if (vcpu->sigset_active)
2965 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
2966
a4535290 2967 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
b6c7a5dc 2968 kvm_vcpu_block(vcpu);
ac9f6dc0
AK
2969 r = -EAGAIN;
2970 goto out;
b6c7a5dc
HB
2971 }
2972
b6c7a5dc
HB
2973 /* re-sync apic's tpr */
2974 if (!irqchip_in_kernel(vcpu->kvm))
2d3ad1f4 2975 kvm_set_cr8(vcpu, kvm_run->cr8);
b6c7a5dc 2976
ad312c7c 2977 if (vcpu->arch.pio.cur_count) {
b6c7a5dc
HB
2978 r = complete_pio(vcpu);
2979 if (r)
2980 goto out;
2981 }
2982#if CONFIG_HAS_IOMEM
2983 if (vcpu->mmio_needed) {
2984 memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
2985 vcpu->mmio_read_completed = 1;
2986 vcpu->mmio_needed = 0;
3200f405
MT
2987
2988 down_read(&vcpu->kvm->slots_lock);
b6c7a5dc 2989 r = emulate_instruction(vcpu, kvm_run,
571008da
SY
2990 vcpu->arch.mmio_fault_cr2, 0,
2991 EMULTYPE_NO_DECODE);
3200f405 2992 up_read(&vcpu->kvm->slots_lock);
b6c7a5dc
HB
2993 if (r == EMULATE_DO_MMIO) {
2994 /*
2995 * Read-modify-write. Back to userspace.
2996 */
2997 r = 0;
2998 goto out;
2999 }
3000 }
3001#endif
3002 if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL) {
3003 kvm_x86_ops->cache_regs(vcpu);
ad312c7c 3004 vcpu->arch.regs[VCPU_REGS_RAX] = kvm_run->hypercall.ret;
b6c7a5dc
HB
3005 kvm_x86_ops->decache_regs(vcpu);
3006 }
3007
3008 r = __vcpu_run(vcpu, kvm_run);
3009
3010out:
3011 if (vcpu->sigset_active)
3012 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
3013
3014 vcpu_put(vcpu);
3015 return r;
3016}
3017
3018int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3019{
3020 vcpu_load(vcpu);
3021
3022 kvm_x86_ops->cache_regs(vcpu);
3023
ad312c7c
ZX
3024 regs->rax = vcpu->arch.regs[VCPU_REGS_RAX];
3025 regs->rbx = vcpu->arch.regs[VCPU_REGS_RBX];
3026 regs->rcx = vcpu->arch.regs[VCPU_REGS_RCX];
3027 regs->rdx = vcpu->arch.regs[VCPU_REGS_RDX];
3028 regs->rsi = vcpu->arch.regs[VCPU_REGS_RSI];
3029 regs->rdi = vcpu->arch.regs[VCPU_REGS_RDI];
3030 regs->rsp = vcpu->arch.regs[VCPU_REGS_RSP];
3031 regs->rbp = vcpu->arch.regs[VCPU_REGS_RBP];
b6c7a5dc 3032#ifdef CONFIG_X86_64
ad312c7c
ZX
3033 regs->r8 = vcpu->arch.regs[VCPU_REGS_R8];
3034 regs->r9 = vcpu->arch.regs[VCPU_REGS_R9];
3035 regs->r10 = vcpu->arch.regs[VCPU_REGS_R10];
3036 regs->r11 = vcpu->arch.regs[VCPU_REGS_R11];
3037 regs->r12 = vcpu->arch.regs[VCPU_REGS_R12];
3038 regs->r13 = vcpu->arch.regs[VCPU_REGS_R13];
3039 regs->r14 = vcpu->arch.regs[VCPU_REGS_R14];
3040 regs->r15 = vcpu->arch.regs[VCPU_REGS_R15];
b6c7a5dc
HB
3041#endif
3042
ad312c7c 3043 regs->rip = vcpu->arch.rip;
b6c7a5dc
HB
3044 regs->rflags = kvm_x86_ops->get_rflags(vcpu);
3045
3046 /*
3047 * Don't leak debug flags in case they were set for guest debugging
3048 */
3049 if (vcpu->guest_debug.enabled && vcpu->guest_debug.singlestep)
3050 regs->rflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
3051
3052 vcpu_put(vcpu);
3053
3054 return 0;
3055}
3056
3057int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3058{
3059 vcpu_load(vcpu);
3060
ad312c7c
ZX
3061 vcpu->arch.regs[VCPU_REGS_RAX] = regs->rax;
3062 vcpu->arch.regs[VCPU_REGS_RBX] = regs->rbx;
3063 vcpu->arch.regs[VCPU_REGS_RCX] = regs->rcx;
3064 vcpu->arch.regs[VCPU_REGS_RDX] = regs->rdx;
3065 vcpu->arch.regs[VCPU_REGS_RSI] = regs->rsi;
3066 vcpu->arch.regs[VCPU_REGS_RDI] = regs->rdi;
3067 vcpu->arch.regs[VCPU_REGS_RSP] = regs->rsp;
3068 vcpu->arch.regs[VCPU_REGS_RBP] = regs->rbp;
b6c7a5dc 3069#ifdef CONFIG_X86_64
ad312c7c
ZX
3070 vcpu->arch.regs[VCPU_REGS_R8] = regs->r8;
3071 vcpu->arch.regs[VCPU_REGS_R9] = regs->r9;
3072 vcpu->arch.regs[VCPU_REGS_R10] = regs->r10;
3073 vcpu->arch.regs[VCPU_REGS_R11] = regs->r11;
3074 vcpu->arch.regs[VCPU_REGS_R12] = regs->r12;
3075 vcpu->arch.regs[VCPU_REGS_R13] = regs->r13;
3076 vcpu->arch.regs[VCPU_REGS_R14] = regs->r14;
3077 vcpu->arch.regs[VCPU_REGS_R15] = regs->r15;
b6c7a5dc
HB
3078#endif
3079
ad312c7c 3080 vcpu->arch.rip = regs->rip;
b6c7a5dc
HB
3081 kvm_x86_ops->set_rflags(vcpu, regs->rflags);
3082
3083 kvm_x86_ops->decache_regs(vcpu);
3084
b4f14abd
JK
3085 vcpu->arch.exception.pending = false;
3086
b6c7a5dc
HB
3087 vcpu_put(vcpu);
3088
3089 return 0;
3090}
3091
3e6e0aab
GT
3092void kvm_get_segment(struct kvm_vcpu *vcpu,
3093 struct kvm_segment *var, int seg)
b6c7a5dc 3094{
14af3f3c 3095 kvm_x86_ops->get_segment(vcpu, var, seg);
b6c7a5dc
HB
3096}
3097
3098void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
3099{
3100 struct kvm_segment cs;
3101
3e6e0aab 3102 kvm_get_segment(vcpu, &cs, VCPU_SREG_CS);
b6c7a5dc
HB
3103 *db = cs.db;
3104 *l = cs.l;
3105}
3106EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits);
3107
3108int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
3109 struct kvm_sregs *sregs)
3110{
3111 struct descriptor_table dt;
3112 int pending_vec;
3113
3114 vcpu_load(vcpu);
3115
3e6e0aab
GT
3116 kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
3117 kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
3118 kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
3119 kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
3120 kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
3121 kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
b6c7a5dc 3122
3e6e0aab
GT
3123 kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
3124 kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
b6c7a5dc
HB
3125
3126 kvm_x86_ops->get_idt(vcpu, &dt);
3127 sregs->idt.limit = dt.limit;
3128 sregs->idt.base = dt.base;
3129 kvm_x86_ops->get_gdt(vcpu, &dt);
3130 sregs->gdt.limit = dt.limit;
3131 sregs->gdt.base = dt.base;
3132
3133 kvm_x86_ops->decache_cr4_guest_bits(vcpu);
ad312c7c
ZX
3134 sregs->cr0 = vcpu->arch.cr0;
3135 sregs->cr2 = vcpu->arch.cr2;
3136 sregs->cr3 = vcpu->arch.cr3;
3137 sregs->cr4 = vcpu->arch.cr4;
2d3ad1f4 3138 sregs->cr8 = kvm_get_cr8(vcpu);
ad312c7c 3139 sregs->efer = vcpu->arch.shadow_efer;
b6c7a5dc
HB
3140 sregs->apic_base = kvm_get_apic_base(vcpu);
3141
3142 if (irqchip_in_kernel(vcpu->kvm)) {
3143 memset(sregs->interrupt_bitmap, 0,
3144 sizeof sregs->interrupt_bitmap);
3145 pending_vec = kvm_x86_ops->get_irq(vcpu);
3146 if (pending_vec >= 0)
3147 set_bit(pending_vec,
3148 (unsigned long *)sregs->interrupt_bitmap);
3149 } else
ad312c7c 3150 memcpy(sregs->interrupt_bitmap, vcpu->arch.irq_pending,
b6c7a5dc
HB
3151 sizeof sregs->interrupt_bitmap);
3152
3153 vcpu_put(vcpu);
3154
3155 return 0;
3156}
3157
62d9f0db
MT
3158int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
3159 struct kvm_mp_state *mp_state)
3160{
3161 vcpu_load(vcpu);
3162 mp_state->mp_state = vcpu->arch.mp_state;
3163 vcpu_put(vcpu);
3164 return 0;
3165}
3166
3167int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
3168 struct kvm_mp_state *mp_state)
3169{
3170 vcpu_load(vcpu);
3171 vcpu->arch.mp_state = mp_state->mp_state;
3172 vcpu_put(vcpu);
3173 return 0;
3174}
3175
3e6e0aab 3176static void kvm_set_segment(struct kvm_vcpu *vcpu,
b6c7a5dc
HB
3177 struct kvm_segment *var, int seg)
3178{
14af3f3c 3179 kvm_x86_ops->set_segment(vcpu, var, seg);
b6c7a5dc
HB
3180}
3181
37817f29
IE
3182static void seg_desct_to_kvm_desct(struct desc_struct *seg_desc, u16 selector,
3183 struct kvm_segment *kvm_desct)
3184{
3185 kvm_desct->base = seg_desc->base0;
3186 kvm_desct->base |= seg_desc->base1 << 16;
3187 kvm_desct->base |= seg_desc->base2 << 24;
3188 kvm_desct->limit = seg_desc->limit0;
3189 kvm_desct->limit |= seg_desc->limit << 16;
c93cd3a5
MT
3190 if (seg_desc->g) {
3191 kvm_desct->limit <<= 12;
3192 kvm_desct->limit |= 0xfff;
3193 }
37817f29
IE
3194 kvm_desct->selector = selector;
3195 kvm_desct->type = seg_desc->type;
3196 kvm_desct->present = seg_desc->p;
3197 kvm_desct->dpl = seg_desc->dpl;
3198 kvm_desct->db = seg_desc->d;
3199 kvm_desct->s = seg_desc->s;
3200 kvm_desct->l = seg_desc->l;
3201 kvm_desct->g = seg_desc->g;
3202 kvm_desct->avl = seg_desc->avl;
3203 if (!selector)
3204 kvm_desct->unusable = 1;
3205 else
3206 kvm_desct->unusable = 0;
3207 kvm_desct->padding = 0;
3208}
3209
3210static void get_segment_descritptor_dtable(struct kvm_vcpu *vcpu,
3211 u16 selector,
3212 struct descriptor_table *dtable)
3213{
3214 if (selector & 1 << 2) {
3215 struct kvm_segment kvm_seg;
3216
3e6e0aab 3217 kvm_get_segment(vcpu, &kvm_seg, VCPU_SREG_LDTR);
37817f29
IE
3218
3219 if (kvm_seg.unusable)
3220 dtable->limit = 0;
3221 else
3222 dtable->limit = kvm_seg.limit;
3223 dtable->base = kvm_seg.base;
3224 }
3225 else
3226 kvm_x86_ops->get_gdt(vcpu, dtable);
3227}
3228
3229/* allowed just for 8 bytes segments */
3230static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
3231 struct desc_struct *seg_desc)
3232{
98899aa0 3233 gpa_t gpa;
37817f29
IE
3234 struct descriptor_table dtable;
3235 u16 index = selector >> 3;
3236
3237 get_segment_descritptor_dtable(vcpu, selector, &dtable);
3238
3239 if (dtable.limit < index * 8 + 7) {
3240 kvm_queue_exception_e(vcpu, GP_VECTOR, selector & 0xfffc);
3241 return 1;
3242 }
98899aa0
MT
3243 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, dtable.base);
3244 gpa += index * 8;
3245 return kvm_read_guest(vcpu->kvm, gpa, seg_desc, 8);
37817f29
IE
3246}
3247
3248/* allowed just for 8 bytes segments */
3249static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
3250 struct desc_struct *seg_desc)
3251{
98899aa0 3252 gpa_t gpa;
37817f29
IE
3253 struct descriptor_table dtable;
3254 u16 index = selector >> 3;
3255
3256 get_segment_descritptor_dtable(vcpu, selector, &dtable);
3257
3258 if (dtable.limit < index * 8 + 7)
3259 return 1;
98899aa0
MT
3260 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, dtable.base);
3261 gpa += index * 8;
3262 return kvm_write_guest(vcpu->kvm, gpa, seg_desc, 8);
37817f29
IE
3263}
3264
3265static u32 get_tss_base_addr(struct kvm_vcpu *vcpu,
3266 struct desc_struct *seg_desc)
3267{
3268 u32 base_addr;
3269
3270 base_addr = seg_desc->base0;
3271 base_addr |= (seg_desc->base1 << 16);
3272 base_addr |= (seg_desc->base2 << 24);
3273
98899aa0 3274 return vcpu->arch.mmu.gva_to_gpa(vcpu, base_addr);
37817f29
IE
3275}
3276
37817f29
IE
3277static u16 get_segment_selector(struct kvm_vcpu *vcpu, int seg)
3278{
3279 struct kvm_segment kvm_seg;
3280
3e6e0aab 3281 kvm_get_segment(vcpu, &kvm_seg, seg);
37817f29
IE
3282 return kvm_seg.selector;
3283}
3284
3285static int load_segment_descriptor_to_kvm_desct(struct kvm_vcpu *vcpu,
3286 u16 selector,
3287 struct kvm_segment *kvm_seg)
3288{
3289 struct desc_struct seg_desc;
3290
3291 if (load_guest_segment_descriptor(vcpu, selector, &seg_desc))
3292 return 1;
3293 seg_desct_to_kvm_desct(&seg_desc, selector, kvm_seg);
3294 return 0;
3295}
3296
3e6e0aab
GT
3297int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
3298 int type_bits, int seg)
37817f29
IE
3299{
3300 struct kvm_segment kvm_seg;
3301
3302 if (load_segment_descriptor_to_kvm_desct(vcpu, selector, &kvm_seg))
3303 return 1;
3304 kvm_seg.type |= type_bits;
3305
3306 if (seg != VCPU_SREG_SS && seg != VCPU_SREG_CS &&
3307 seg != VCPU_SREG_LDTR)
3308 if (!kvm_seg.s)
3309 kvm_seg.unusable = 1;
3310
3e6e0aab 3311 kvm_set_segment(vcpu, &kvm_seg, seg);
37817f29
IE
3312 return 0;
3313}
3314
3315static void save_state_to_tss32(struct kvm_vcpu *vcpu,
3316 struct tss_segment_32 *tss)
3317{
3318 tss->cr3 = vcpu->arch.cr3;
3319 tss->eip = vcpu->arch.rip;
3320 tss->eflags = kvm_x86_ops->get_rflags(vcpu);
3321 tss->eax = vcpu->arch.regs[VCPU_REGS_RAX];
3322 tss->ecx = vcpu->arch.regs[VCPU_REGS_RCX];
3323 tss->edx = vcpu->arch.regs[VCPU_REGS_RDX];
3324 tss->ebx = vcpu->arch.regs[VCPU_REGS_RBX];
3325 tss->esp = vcpu->arch.regs[VCPU_REGS_RSP];
3326 tss->ebp = vcpu->arch.regs[VCPU_REGS_RBP];
3327 tss->esi = vcpu->arch.regs[VCPU_REGS_RSI];
3328 tss->edi = vcpu->arch.regs[VCPU_REGS_RDI];
3329
3330 tss->es = get_segment_selector(vcpu, VCPU_SREG_ES);
3331 tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS);
3332 tss->ss = get_segment_selector(vcpu, VCPU_SREG_SS);
3333 tss->ds = get_segment_selector(vcpu, VCPU_SREG_DS);
3334 tss->fs = get_segment_selector(vcpu, VCPU_SREG_FS);
3335 tss->gs = get_segment_selector(vcpu, VCPU_SREG_GS);
3336 tss->ldt_selector = get_segment_selector(vcpu, VCPU_SREG_LDTR);
3337 tss->prev_task_link = get_segment_selector(vcpu, VCPU_SREG_TR);
3338}
3339
3340static int load_state_from_tss32(struct kvm_vcpu *vcpu,
3341 struct tss_segment_32 *tss)
3342{
3343 kvm_set_cr3(vcpu, tss->cr3);
3344
3345 vcpu->arch.rip = tss->eip;
3346 kvm_x86_ops->set_rflags(vcpu, tss->eflags | 2);
3347
3348 vcpu->arch.regs[VCPU_REGS_RAX] = tss->eax;
3349 vcpu->arch.regs[VCPU_REGS_RCX] = tss->ecx;
3350 vcpu->arch.regs[VCPU_REGS_RDX] = tss->edx;
3351 vcpu->arch.regs[VCPU_REGS_RBX] = tss->ebx;
3352 vcpu->arch.regs[VCPU_REGS_RSP] = tss->esp;
3353 vcpu->arch.regs[VCPU_REGS_RBP] = tss->ebp;
3354 vcpu->arch.regs[VCPU_REGS_RSI] = tss->esi;
3355 vcpu->arch.regs[VCPU_REGS_RDI] = tss->edi;
3356
3e6e0aab 3357 if (kvm_load_segment_descriptor(vcpu, tss->ldt_selector, 0, VCPU_SREG_LDTR))
37817f29
IE
3358 return 1;
3359
3e6e0aab 3360 if (kvm_load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES))
37817f29
IE
3361 return 1;
3362
3e6e0aab 3363 if (kvm_load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS))
37817f29
IE
3364 return 1;
3365
3e6e0aab 3366 if (kvm_load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS))
37817f29
IE
3367 return 1;
3368
3e6e0aab 3369 if (kvm_load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS))
37817f29
IE
3370 return 1;
3371
3e6e0aab 3372 if (kvm_load_segment_descriptor(vcpu, tss->fs, 1, VCPU_SREG_FS))
37817f29
IE
3373 return 1;
3374
3e6e0aab 3375 if (kvm_load_segment_descriptor(vcpu, tss->gs, 1, VCPU_SREG_GS))
37817f29
IE
3376 return 1;
3377 return 0;
3378}
3379
3380static void save_state_to_tss16(struct kvm_vcpu *vcpu,
3381 struct tss_segment_16 *tss)
3382{
3383 tss->ip = vcpu->arch.rip;
3384 tss->flag = kvm_x86_ops->get_rflags(vcpu);
3385 tss->ax = vcpu->arch.regs[VCPU_REGS_RAX];
3386 tss->cx = vcpu->arch.regs[VCPU_REGS_RCX];
3387 tss->dx = vcpu->arch.regs[VCPU_REGS_RDX];
3388 tss->bx = vcpu->arch.regs[VCPU_REGS_RBX];
3389 tss->sp = vcpu->arch.regs[VCPU_REGS_RSP];
3390 tss->bp = vcpu->arch.regs[VCPU_REGS_RBP];
3391 tss->si = vcpu->arch.regs[VCPU_REGS_RSI];
3392 tss->di = vcpu->arch.regs[VCPU_REGS_RDI];
3393
3394 tss->es = get_segment_selector(vcpu, VCPU_SREG_ES);
3395 tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS);
3396 tss->ss = get_segment_selector(vcpu, VCPU_SREG_SS);
3397 tss->ds = get_segment_selector(vcpu, VCPU_SREG_DS);
3398 tss->ldt = get_segment_selector(vcpu, VCPU_SREG_LDTR);
3399 tss->prev_task_link = get_segment_selector(vcpu, VCPU_SREG_TR);
3400}
3401
3402static int load_state_from_tss16(struct kvm_vcpu *vcpu,
3403 struct tss_segment_16 *tss)
3404{
3405 vcpu->arch.rip = tss->ip;
3406 kvm_x86_ops->set_rflags(vcpu, tss->flag | 2);
3407 vcpu->arch.regs[VCPU_REGS_RAX] = tss->ax;
3408 vcpu->arch.regs[VCPU_REGS_RCX] = tss->cx;
3409 vcpu->arch.regs[VCPU_REGS_RDX] = tss->dx;
3410 vcpu->arch.regs[VCPU_REGS_RBX] = tss->bx;
3411 vcpu->arch.regs[VCPU_REGS_RSP] = tss->sp;
3412 vcpu->arch.regs[VCPU_REGS_RBP] = tss->bp;
3413 vcpu->arch.regs[VCPU_REGS_RSI] = tss->si;
3414 vcpu->arch.regs[VCPU_REGS_RDI] = tss->di;
3415
3e6e0aab 3416 if (kvm_load_segment_descriptor(vcpu, tss->ldt, 0, VCPU_SREG_LDTR))
37817f29
IE
3417 return 1;
3418
3e6e0aab 3419 if (kvm_load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES))
37817f29
IE
3420 return 1;
3421
3e6e0aab 3422 if (kvm_load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS))
37817f29
IE
3423 return 1;
3424
3e6e0aab 3425 if (kvm_load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS))
37817f29
IE
3426 return 1;
3427
3e6e0aab 3428 if (kvm_load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS))
37817f29
IE
3429 return 1;
3430 return 0;
3431}
3432
8b2cf73c 3433static int kvm_task_switch_16(struct kvm_vcpu *vcpu, u16 tss_selector,
34198bf8 3434 u32 old_tss_base,
37817f29
IE
3435 struct desc_struct *nseg_desc)
3436{
3437 struct tss_segment_16 tss_segment_16;
3438 int ret = 0;
3439
34198bf8
MT
3440 if (kvm_read_guest(vcpu->kvm, old_tss_base, &tss_segment_16,
3441 sizeof tss_segment_16))
37817f29
IE
3442 goto out;
3443
3444 save_state_to_tss16(vcpu, &tss_segment_16);
37817f29 3445
34198bf8
MT
3446 if (kvm_write_guest(vcpu->kvm, old_tss_base, &tss_segment_16,
3447 sizeof tss_segment_16))
37817f29 3448 goto out;
34198bf8
MT
3449
3450 if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc),
3451 &tss_segment_16, sizeof tss_segment_16))
3452 goto out;
3453
37817f29
IE
3454 if (load_state_from_tss16(vcpu, &tss_segment_16))
3455 goto out;
3456
3457 ret = 1;
3458out:
3459 return ret;
3460}
3461
8b2cf73c 3462static int kvm_task_switch_32(struct kvm_vcpu *vcpu, u16 tss_selector,
34198bf8 3463 u32 old_tss_base,
37817f29
IE
3464 struct desc_struct *nseg_desc)
3465{
3466 struct tss_segment_32 tss_segment_32;
3467 int ret = 0;
3468
34198bf8
MT
3469 if (kvm_read_guest(vcpu->kvm, old_tss_base, &tss_segment_32,
3470 sizeof tss_segment_32))
37817f29
IE
3471 goto out;
3472
3473 save_state_to_tss32(vcpu, &tss_segment_32);
37817f29 3474
34198bf8
MT
3475 if (kvm_write_guest(vcpu->kvm, old_tss_base, &tss_segment_32,
3476 sizeof tss_segment_32))
3477 goto out;
3478
3479 if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc),
3480 &tss_segment_32, sizeof tss_segment_32))
37817f29 3481 goto out;
34198bf8 3482
37817f29
IE
3483 if (load_state_from_tss32(vcpu, &tss_segment_32))
3484 goto out;
3485
3486 ret = 1;
3487out:
3488 return ret;
3489}
3490
3491int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
3492{
3493 struct kvm_segment tr_seg;
3494 struct desc_struct cseg_desc;
3495 struct desc_struct nseg_desc;
3496 int ret = 0;
34198bf8
MT
3497 u32 old_tss_base = get_segment_base(vcpu, VCPU_SREG_TR);
3498 u16 old_tss_sel = get_segment_selector(vcpu, VCPU_SREG_TR);
37817f29 3499
34198bf8 3500 old_tss_base = vcpu->arch.mmu.gva_to_gpa(vcpu, old_tss_base);
37817f29 3501
34198bf8
MT
3502 /* FIXME: Handle errors. Failure to read either TSS or their
3503 * descriptors should generate a pagefault.
3504 */
37817f29
IE
3505 if (load_guest_segment_descriptor(vcpu, tss_selector, &nseg_desc))
3506 goto out;
3507
34198bf8 3508 if (load_guest_segment_descriptor(vcpu, old_tss_sel, &cseg_desc))
37817f29
IE
3509 goto out;
3510
37817f29
IE
3511 if (reason != TASK_SWITCH_IRET) {
3512 int cpl;
3513
3514 cpl = kvm_x86_ops->get_cpl(vcpu);
3515 if ((tss_selector & 3) > nseg_desc.dpl || cpl > nseg_desc.dpl) {
3516 kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
3517 return 1;
3518 }
3519 }
3520
3521 if (!nseg_desc.p || (nseg_desc.limit0 | nseg_desc.limit << 16) < 0x67) {
3522 kvm_queue_exception_e(vcpu, TS_VECTOR, tss_selector & 0xfffc);
3523 return 1;
3524 }
3525
3526 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
3fe913e7 3527 cseg_desc.type &= ~(1 << 1); //clear the B flag
34198bf8 3528 save_guest_segment_descriptor(vcpu, old_tss_sel, &cseg_desc);
37817f29
IE
3529 }
3530
3531 if (reason == TASK_SWITCH_IRET) {
3532 u32 eflags = kvm_x86_ops->get_rflags(vcpu);
3533 kvm_x86_ops->set_rflags(vcpu, eflags & ~X86_EFLAGS_NT);
3534 }
3535
3536 kvm_x86_ops->skip_emulated_instruction(vcpu);
3537 kvm_x86_ops->cache_regs(vcpu);
3538
3539 if (nseg_desc.type & 8)
34198bf8 3540 ret = kvm_task_switch_32(vcpu, tss_selector, old_tss_base,
37817f29
IE
3541 &nseg_desc);
3542 else
34198bf8 3543 ret = kvm_task_switch_16(vcpu, tss_selector, old_tss_base,
37817f29
IE
3544 &nseg_desc);
3545
3546 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE) {
3547 u32 eflags = kvm_x86_ops->get_rflags(vcpu);
3548 kvm_x86_ops->set_rflags(vcpu, eflags | X86_EFLAGS_NT);
3549 }
3550
3551 if (reason != TASK_SWITCH_IRET) {
3fe913e7 3552 nseg_desc.type |= (1 << 1);
37817f29
IE
3553 save_guest_segment_descriptor(vcpu, tss_selector,
3554 &nseg_desc);
3555 }
3556
3557 kvm_x86_ops->set_cr0(vcpu, vcpu->arch.cr0 | X86_CR0_TS);
3558 seg_desct_to_kvm_desct(&nseg_desc, tss_selector, &tr_seg);
3559 tr_seg.type = 11;
3e6e0aab 3560 kvm_set_segment(vcpu, &tr_seg, VCPU_SREG_TR);
37817f29
IE
3561out:
3562 kvm_x86_ops->decache_regs(vcpu);
3563 return ret;
3564}
3565EXPORT_SYMBOL_GPL(kvm_task_switch);
3566
b6c7a5dc
HB
3567int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
3568 struct kvm_sregs *sregs)
3569{
3570 int mmu_reset_needed = 0;
3571 int i, pending_vec, max_bits;
3572 struct descriptor_table dt;
3573
3574 vcpu_load(vcpu);
3575
3576 dt.limit = sregs->idt.limit;
3577 dt.base = sregs->idt.base;
3578 kvm_x86_ops->set_idt(vcpu, &dt);
3579 dt.limit = sregs->gdt.limit;
3580 dt.base = sregs->gdt.base;
3581 kvm_x86_ops->set_gdt(vcpu, &dt);
3582
ad312c7c
ZX
3583 vcpu->arch.cr2 = sregs->cr2;
3584 mmu_reset_needed |= vcpu->arch.cr3 != sregs->cr3;
3585 vcpu->arch.cr3 = sregs->cr3;
b6c7a5dc 3586
2d3ad1f4 3587 kvm_set_cr8(vcpu, sregs->cr8);
b6c7a5dc 3588
ad312c7c 3589 mmu_reset_needed |= vcpu->arch.shadow_efer != sregs->efer;
b6c7a5dc 3590 kvm_x86_ops->set_efer(vcpu, sregs->efer);
b6c7a5dc
HB
3591 kvm_set_apic_base(vcpu, sregs->apic_base);
3592
3593 kvm_x86_ops->decache_cr4_guest_bits(vcpu);
3594
ad312c7c 3595 mmu_reset_needed |= vcpu->arch.cr0 != sregs->cr0;
b6c7a5dc 3596 kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
d7306163 3597 vcpu->arch.cr0 = sregs->cr0;
b6c7a5dc 3598
ad312c7c 3599 mmu_reset_needed |= vcpu->arch.cr4 != sregs->cr4;
b6c7a5dc
HB
3600 kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
3601 if (!is_long_mode(vcpu) && is_pae(vcpu))
ad312c7c 3602 load_pdptrs(vcpu, vcpu->arch.cr3);
b6c7a5dc
HB
3603
3604 if (mmu_reset_needed)
3605 kvm_mmu_reset_context(vcpu);
3606
3607 if (!irqchip_in_kernel(vcpu->kvm)) {
ad312c7c
ZX
3608 memcpy(vcpu->arch.irq_pending, sregs->interrupt_bitmap,
3609 sizeof vcpu->arch.irq_pending);
3610 vcpu->arch.irq_summary = 0;
3611 for (i = 0; i < ARRAY_SIZE(vcpu->arch.irq_pending); ++i)
3612 if (vcpu->arch.irq_pending[i])
3613 __set_bit(i, &vcpu->arch.irq_summary);
b6c7a5dc
HB
3614 } else {
3615 max_bits = (sizeof sregs->interrupt_bitmap) << 3;
3616 pending_vec = find_first_bit(
3617 (const unsigned long *)sregs->interrupt_bitmap,
3618 max_bits);
3619 /* Only pending external irq is handled here */
3620 if (pending_vec < max_bits) {
3621 kvm_x86_ops->set_irq(vcpu, pending_vec);
3622 pr_debug("Set back pending irq %d\n",
3623 pending_vec);
3624 }
3625 }
3626
3e6e0aab
GT
3627 kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
3628 kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
3629 kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
3630 kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
3631 kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
3632 kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
b6c7a5dc 3633
3e6e0aab
GT
3634 kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
3635 kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
b6c7a5dc
HB
3636
3637 vcpu_put(vcpu);
3638
3639 return 0;
3640}
3641
3642int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
3643 struct kvm_debug_guest *dbg)
3644{
3645 int r;
3646
3647 vcpu_load(vcpu);
3648
3649 r = kvm_x86_ops->set_guest_debug(vcpu, dbg);
3650
3651 vcpu_put(vcpu);
3652
3653 return r;
3654}
3655
d0752060
HB
3656/*
3657 * fxsave fpu state. Taken from x86_64/processor.h. To be killed when
3658 * we have asm/x86/processor.h
3659 */
3660struct fxsave {
3661 u16 cwd;
3662 u16 swd;
3663 u16 twd;
3664 u16 fop;
3665 u64 rip;
3666 u64 rdp;
3667 u32 mxcsr;
3668 u32 mxcsr_mask;
3669 u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
3670#ifdef CONFIG_X86_64
3671 u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 256 bytes */
3672#else
3673 u32 xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */
3674#endif
3675};
3676
8b006791
ZX
3677/*
3678 * Translate a guest virtual address to a guest physical address.
3679 */
3680int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
3681 struct kvm_translation *tr)
3682{
3683 unsigned long vaddr = tr->linear_address;
3684 gpa_t gpa;
3685
3686 vcpu_load(vcpu);
72dc67a6 3687 down_read(&vcpu->kvm->slots_lock);
ad312c7c 3688 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, vaddr);
72dc67a6 3689 up_read(&vcpu->kvm->slots_lock);
8b006791
ZX
3690 tr->physical_address = gpa;
3691 tr->valid = gpa != UNMAPPED_GVA;
3692 tr->writeable = 1;
3693 tr->usermode = 0;
8b006791
ZX
3694 vcpu_put(vcpu);
3695
3696 return 0;
3697}
3698
d0752060
HB
3699int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
3700{
ad312c7c 3701 struct fxsave *fxsave = (struct fxsave *)&vcpu->arch.guest_fx_image;
d0752060
HB
3702
3703 vcpu_load(vcpu);
3704
3705 memcpy(fpu->fpr, fxsave->st_space, 128);
3706 fpu->fcw = fxsave->cwd;
3707 fpu->fsw = fxsave->swd;
3708 fpu->ftwx = fxsave->twd;
3709 fpu->last_opcode = fxsave->fop;
3710 fpu->last_ip = fxsave->rip;
3711 fpu->last_dp = fxsave->rdp;
3712 memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space);
3713
3714 vcpu_put(vcpu);
3715
3716 return 0;
3717}
3718
3719int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
3720{
ad312c7c 3721 struct fxsave *fxsave = (struct fxsave *)&vcpu->arch.guest_fx_image;
d0752060
HB
3722
3723 vcpu_load(vcpu);
3724
3725 memcpy(fxsave->st_space, fpu->fpr, 128);
3726 fxsave->cwd = fpu->fcw;
3727 fxsave->swd = fpu->fsw;
3728 fxsave->twd = fpu->ftwx;
3729 fxsave->fop = fpu->last_opcode;
3730 fxsave->rip = fpu->last_ip;
3731 fxsave->rdp = fpu->last_dp;
3732 memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space);
3733
3734 vcpu_put(vcpu);
3735
3736 return 0;
3737}
3738
3739void fx_init(struct kvm_vcpu *vcpu)
3740{
3741 unsigned after_mxcsr_mask;
3742
bc1a34f1
AA
3743 /*
3744 * Touch the fpu the first time in non atomic context as if
3745 * this is the first fpu instruction the exception handler
3746 * will fire before the instruction returns and it'll have to
3747 * allocate ram with GFP_KERNEL.
3748 */
3749 if (!used_math())
d6e88aec 3750 kvm_fx_save(&vcpu->arch.host_fx_image);
bc1a34f1 3751
d0752060
HB
3752 /* Initialize guest FPU by resetting ours and saving into guest's */
3753 preempt_disable();
d6e88aec
AK
3754 kvm_fx_save(&vcpu->arch.host_fx_image);
3755 kvm_fx_finit();
3756 kvm_fx_save(&vcpu->arch.guest_fx_image);
3757 kvm_fx_restore(&vcpu->arch.host_fx_image);
d0752060
HB
3758 preempt_enable();
3759
ad312c7c 3760 vcpu->arch.cr0 |= X86_CR0_ET;
d0752060 3761 after_mxcsr_mask = offsetof(struct i387_fxsave_struct, st_space);
ad312c7c
ZX
3762 vcpu->arch.guest_fx_image.mxcsr = 0x1f80;
3763 memset((void *)&vcpu->arch.guest_fx_image + after_mxcsr_mask,
d0752060
HB
3764 0, sizeof(struct i387_fxsave_struct) - after_mxcsr_mask);
3765}
3766EXPORT_SYMBOL_GPL(fx_init);
3767
3768void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
3769{
3770 if (!vcpu->fpu_active || vcpu->guest_fpu_loaded)
3771 return;
3772
3773 vcpu->guest_fpu_loaded = 1;
d6e88aec
AK
3774 kvm_fx_save(&vcpu->arch.host_fx_image);
3775 kvm_fx_restore(&vcpu->arch.guest_fx_image);
d0752060
HB
3776}
3777EXPORT_SYMBOL_GPL(kvm_load_guest_fpu);
3778
3779void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
3780{
3781 if (!vcpu->guest_fpu_loaded)
3782 return;
3783
3784 vcpu->guest_fpu_loaded = 0;
d6e88aec
AK
3785 kvm_fx_save(&vcpu->arch.guest_fx_image);
3786 kvm_fx_restore(&vcpu->arch.host_fx_image);
f096ed85 3787 ++vcpu->stat.fpu_reload;
d0752060
HB
3788}
3789EXPORT_SYMBOL_GPL(kvm_put_guest_fpu);
e9b11c17
ZX
3790
3791void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
3792{
3793 kvm_x86_ops->vcpu_free(vcpu);
3794}
3795
3796struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
3797 unsigned int id)
3798{
26e5215f
AK
3799 return kvm_x86_ops->vcpu_create(kvm, id);
3800}
e9b11c17 3801
26e5215f
AK
3802int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
3803{
3804 int r;
e9b11c17
ZX
3805
3806 /* We do fxsave: this must be aligned. */
ad312c7c 3807 BUG_ON((unsigned long)&vcpu->arch.host_fx_image & 0xF);
e9b11c17
ZX
3808
3809 vcpu_load(vcpu);
3810 r = kvm_arch_vcpu_reset(vcpu);
3811 if (r == 0)
3812 r = kvm_mmu_setup(vcpu);
3813 vcpu_put(vcpu);
3814 if (r < 0)
3815 goto free_vcpu;
3816
26e5215f 3817 return 0;
e9b11c17
ZX
3818free_vcpu:
3819 kvm_x86_ops->vcpu_free(vcpu);
26e5215f 3820 return r;
e9b11c17
ZX
3821}
3822
d40ccc62 3823void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
e9b11c17
ZX
3824{
3825 vcpu_load(vcpu);
3826 kvm_mmu_unload(vcpu);
3827 vcpu_put(vcpu);
3828
3829 kvm_x86_ops->vcpu_free(vcpu);
3830}
3831
3832int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
3833{
3834 return kvm_x86_ops->vcpu_reset(vcpu);
3835}
3836
3837void kvm_arch_hardware_enable(void *garbage)
3838{
3839 kvm_x86_ops->hardware_enable(garbage);
3840}
3841
3842void kvm_arch_hardware_disable(void *garbage)
3843{
3844 kvm_x86_ops->hardware_disable(garbage);
3845}
3846
3847int kvm_arch_hardware_setup(void)
3848{
3849 return kvm_x86_ops->hardware_setup();
3850}
3851
3852void kvm_arch_hardware_unsetup(void)
3853{
3854 kvm_x86_ops->hardware_unsetup();
3855}
3856
3857void kvm_arch_check_processor_compat(void *rtn)
3858{
3859 kvm_x86_ops->check_processor_compatibility(rtn);
3860}
3861
3862int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
3863{
3864 struct page *page;
3865 struct kvm *kvm;
3866 int r;
3867
3868 BUG_ON(vcpu->kvm == NULL);
3869 kvm = vcpu->kvm;
3870
ad312c7c 3871 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
e9b11c17 3872 if (!irqchip_in_kernel(kvm) || vcpu->vcpu_id == 0)
a4535290 3873 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
e9b11c17 3874 else
a4535290 3875 vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
e9b11c17
ZX
3876
3877 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
3878 if (!page) {
3879 r = -ENOMEM;
3880 goto fail;
3881 }
ad312c7c 3882 vcpu->arch.pio_data = page_address(page);
e9b11c17
ZX
3883
3884 r = kvm_mmu_create(vcpu);
3885 if (r < 0)
3886 goto fail_free_pio_data;
3887
3888 if (irqchip_in_kernel(kvm)) {
3889 r = kvm_create_lapic(vcpu);
3890 if (r < 0)
3891 goto fail_mmu_destroy;
3892 }
3893
3894 return 0;
3895
3896fail_mmu_destroy:
3897 kvm_mmu_destroy(vcpu);
3898fail_free_pio_data:
ad312c7c 3899 free_page((unsigned long)vcpu->arch.pio_data);
e9b11c17
ZX
3900fail:
3901 return r;
3902}
3903
3904void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
3905{
3906 kvm_free_lapic(vcpu);
3200f405 3907 down_read(&vcpu->kvm->slots_lock);
e9b11c17 3908 kvm_mmu_destroy(vcpu);
3200f405 3909 up_read(&vcpu->kvm->slots_lock);
ad312c7c 3910 free_page((unsigned long)vcpu->arch.pio_data);
e9b11c17 3911}
d19a9cd2
ZX
3912
3913struct kvm *kvm_arch_create_vm(void)
3914{
3915 struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
3916
3917 if (!kvm)
3918 return ERR_PTR(-ENOMEM);
3919
f05e70ac 3920 INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
d19a9cd2
ZX
3921
3922 return kvm;
3923}
3924
3925static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
3926{
3927 vcpu_load(vcpu);
3928 kvm_mmu_unload(vcpu);
3929 vcpu_put(vcpu);
3930}
3931
3932static void kvm_free_vcpus(struct kvm *kvm)
3933{
3934 unsigned int i;
3935
3936 /*
3937 * Unpin any mmu pages first.
3938 */
3939 for (i = 0; i < KVM_MAX_VCPUS; ++i)
3940 if (kvm->vcpus[i])
3941 kvm_unload_vcpu_mmu(kvm->vcpus[i]);
3942 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
3943 if (kvm->vcpus[i]) {
3944 kvm_arch_vcpu_free(kvm->vcpus[i]);
3945 kvm->vcpus[i] = NULL;
3946 }
3947 }
3948
3949}
3950
3951void kvm_arch_destroy_vm(struct kvm *kvm)
3952{
7837699f 3953 kvm_free_pit(kvm);
d7deeeb0
ZX
3954 kfree(kvm->arch.vpic);
3955 kfree(kvm->arch.vioapic);
d19a9cd2
ZX
3956 kvm_free_vcpus(kvm);
3957 kvm_free_physmem(kvm);
3d45830c
AK
3958 if (kvm->arch.apic_access_page)
3959 put_page(kvm->arch.apic_access_page);
b7ebfb05
SY
3960 if (kvm->arch.ept_identity_pagetable)
3961 put_page(kvm->arch.ept_identity_pagetable);
d19a9cd2
ZX
3962 kfree(kvm);
3963}
0de10343
ZX
3964
3965int kvm_arch_set_memory_region(struct kvm *kvm,
3966 struct kvm_userspace_memory_region *mem,
3967 struct kvm_memory_slot old,
3968 int user_alloc)
3969{
3970 int npages = mem->memory_size >> PAGE_SHIFT;
3971 struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot];
3972
3973 /*To keep backward compatibility with older userspace,
3974 *x86 needs to hanlde !user_alloc case.
3975 */
3976 if (!user_alloc) {
3977 if (npages && !old.rmap) {
604b38ac
AA
3978 unsigned long userspace_addr;
3979
72dc67a6 3980 down_write(&current->mm->mmap_sem);
604b38ac
AA
3981 userspace_addr = do_mmap(NULL, 0,
3982 npages * PAGE_SIZE,
3983 PROT_READ | PROT_WRITE,
3984 MAP_SHARED | MAP_ANONYMOUS,
3985 0);
72dc67a6 3986 up_write(&current->mm->mmap_sem);
0de10343 3987
604b38ac
AA
3988 if (IS_ERR((void *)userspace_addr))
3989 return PTR_ERR((void *)userspace_addr);
3990
3991 /* set userspace_addr atomically for kvm_hva_to_rmapp */
3992 spin_lock(&kvm->mmu_lock);
3993 memslot->userspace_addr = userspace_addr;
3994 spin_unlock(&kvm->mmu_lock);
0de10343
ZX
3995 } else {
3996 if (!old.user_alloc && old.rmap) {
3997 int ret;
3998
72dc67a6 3999 down_write(&current->mm->mmap_sem);
0de10343
ZX
4000 ret = do_munmap(current->mm, old.userspace_addr,
4001 old.npages * PAGE_SIZE);
72dc67a6 4002 up_write(&current->mm->mmap_sem);
0de10343
ZX
4003 if (ret < 0)
4004 printk(KERN_WARNING
4005 "kvm_vm_ioctl_set_memory_region: "
4006 "failed to munmap memory\n");
4007 }
4008 }
4009 }
4010
f05e70ac 4011 if (!kvm->arch.n_requested_mmu_pages) {
0de10343
ZX
4012 unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
4013 kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
4014 }
4015
4016 kvm_mmu_slot_remove_write_access(kvm, mem->slot);
4017 kvm_flush_remote_tlbs(kvm);
4018
4019 return 0;
4020}
1d737c8a 4021
34d4cb8f
MT
4022void kvm_arch_flush_shadow(struct kvm *kvm)
4023{
4024 kvm_mmu_zap_all(kvm);
4025}
4026
1d737c8a
ZX
4027int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
4028{
a4535290
AK
4029 return vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE
4030 || vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED;
1d737c8a 4031}
5736199a
ZX
4032
4033static void vcpu_kick_intr(void *info)
4034{
4035#ifdef DEBUG
4036 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)info;
4037 printk(KERN_DEBUG "vcpu_kick_intr %p \n", vcpu);
4038#endif
4039}
4040
4041void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
4042{
4043 int ipi_pcpu = vcpu->cpu;
e9571ed5 4044 int cpu = get_cpu();
5736199a
ZX
4045
4046 if (waitqueue_active(&vcpu->wq)) {
4047 wake_up_interruptible(&vcpu->wq);
4048 ++vcpu->stat.halt_wakeup;
4049 }
e9571ed5
MT
4050 /*
4051 * We may be called synchronously with irqs disabled in guest mode,
4052 * So need not to call smp_call_function_single() in that case.
4053 */
4054 if (vcpu->guest_mode && vcpu->cpu != cpu)
8691e5a8 4055 smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0);
e9571ed5 4056 put_cpu();
5736199a 4057}