KVM: Timer event should not unconditionally unhalt vcpu.
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / x86 / kvm / x86.c
CommitLineData
043405e1
CO
1/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * derived from drivers/kvm/kvm_main.c
5 *
6 * Copyright (C) 2006 Qumranet, Inc.
4d5c5d0f
BAY
7 * Copyright (C) 2008 Qumranet, Inc.
8 * Copyright IBM Corporation, 2008
043405e1
CO
9 *
10 * Authors:
11 * Avi Kivity <avi@qumranet.com>
12 * Yaniv Kamay <yaniv@qumranet.com>
4d5c5d0f
BAY
13 * Amit Shah <amit.shah@qumranet.com>
14 * Ben-Ami Yassour <benami@il.ibm.com>
043405e1
CO
15 *
16 * This work is licensed under the terms of the GNU GPL, version 2. See
17 * the COPYING file in the top-level directory.
18 *
19 */
20
edf88417 21#include <linux/kvm_host.h>
313a3dc7 22#include "irq.h"
1d737c8a 23#include "mmu.h"
7837699f 24#include "i8254.h"
37817f29 25#include "tss.h"
5fdbf976 26#include "kvm_cache_regs.h"
26eef70c 27#include "x86.h"
313a3dc7 28
18068523 29#include <linux/clocksource.h>
4d5c5d0f 30#include <linux/interrupt.h>
313a3dc7
CO
31#include <linux/kvm.h>
32#include <linux/fs.h>
33#include <linux/vmalloc.h>
5fb76f9b 34#include <linux/module.h>
0de10343 35#include <linux/mman.h>
2bacc55c 36#include <linux/highmem.h>
19de40a8 37#include <linux/iommu.h>
62c476c7 38#include <linux/intel-iommu.h>
c8076604 39#include <linux/cpufreq.h>
043405e1
CO
40
41#include <asm/uaccess.h>
d825ed0a 42#include <asm/msr.h>
a5f61300 43#include <asm/desc.h>
0bed3b56 44#include <asm/mtrr.h>
043405e1 45
313a3dc7 46#define MAX_IO_MSRS 256
a03490ed
CO
47#define CR0_RESERVED_BITS \
48 (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
49 | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
50 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
51#define CR4_RESERVED_BITS \
52 (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
53 | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \
54 | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR \
55 | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE))
56
57#define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
50a37eb4
JR
58/* EFER defaults:
59 * - enable syscall per default because its emulated by KVM
60 * - enable LME and LMA per default on 64 bit KVM
61 */
62#ifdef CONFIG_X86_64
63static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffafeULL;
64#else
65static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffffeULL;
66#endif
313a3dc7 67
ba1389b7
AK
68#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
69#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
417bc304 70
674eea0f
AK
71static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
72 struct kvm_cpuid_entry2 __user *entries);
d8017474
AG
73struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
74 u32 function, u32 index);
674eea0f 75
97896d04 76struct kvm_x86_ops *kvm_x86_ops;
5fdbf976 77EXPORT_SYMBOL_GPL(kvm_x86_ops);
97896d04 78
417bc304 79struct kvm_stats_debugfs_item debugfs_entries[] = {
ba1389b7
AK
80 { "pf_fixed", VCPU_STAT(pf_fixed) },
81 { "pf_guest", VCPU_STAT(pf_guest) },
82 { "tlb_flush", VCPU_STAT(tlb_flush) },
83 { "invlpg", VCPU_STAT(invlpg) },
84 { "exits", VCPU_STAT(exits) },
85 { "io_exits", VCPU_STAT(io_exits) },
86 { "mmio_exits", VCPU_STAT(mmio_exits) },
87 { "signal_exits", VCPU_STAT(signal_exits) },
88 { "irq_window", VCPU_STAT(irq_window_exits) },
f08864b4 89 { "nmi_window", VCPU_STAT(nmi_window_exits) },
ba1389b7
AK
90 { "halt_exits", VCPU_STAT(halt_exits) },
91 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
f11c3a8d 92 { "hypercalls", VCPU_STAT(hypercalls) },
ba1389b7 93 { "request_irq", VCPU_STAT(request_irq_exits) },
c4abb7c9 94 { "request_nmi", VCPU_STAT(request_nmi_exits) },
ba1389b7
AK
95 { "irq_exits", VCPU_STAT(irq_exits) },
96 { "host_state_reload", VCPU_STAT(host_state_reload) },
97 { "efer_reload", VCPU_STAT(efer_reload) },
98 { "fpu_reload", VCPU_STAT(fpu_reload) },
99 { "insn_emulation", VCPU_STAT(insn_emulation) },
100 { "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) },
fa89a817 101 { "irq_injections", VCPU_STAT(irq_injections) },
c4abb7c9 102 { "nmi_injections", VCPU_STAT(nmi_injections) },
4cee5764
AK
103 { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) },
104 { "mmu_pte_write", VM_STAT(mmu_pte_write) },
105 { "mmu_pte_updated", VM_STAT(mmu_pte_updated) },
106 { "mmu_pde_zapped", VM_STAT(mmu_pde_zapped) },
107 { "mmu_flooded", VM_STAT(mmu_flooded) },
108 { "mmu_recycled", VM_STAT(mmu_recycled) },
dfc5aa00 109 { "mmu_cache_miss", VM_STAT(mmu_cache_miss) },
4731d4c7 110 { "mmu_unsync", VM_STAT(mmu_unsync) },
6cffe8ca 111 { "mmu_unsync_global", VM_STAT(mmu_unsync_global) },
0f74a24c 112 { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
05da4558 113 { "largepages", VM_STAT(lpages) },
417bc304
HB
114 { NULL }
115};
116
5fb76f9b
CO
117unsigned long segment_base(u16 selector)
118{
119 struct descriptor_table gdt;
a5f61300 120 struct desc_struct *d;
5fb76f9b
CO
121 unsigned long table_base;
122 unsigned long v;
123
124 if (selector == 0)
125 return 0;
126
127 asm("sgdt %0" : "=m"(gdt));
128 table_base = gdt.base;
129
130 if (selector & 4) { /* from ldt */
131 u16 ldt_selector;
132
133 asm("sldt %0" : "=g"(ldt_selector));
134 table_base = segment_base(ldt_selector);
135 }
a5f61300
AK
136 d = (struct desc_struct *)(table_base + (selector & ~7));
137 v = d->base0 | ((unsigned long)d->base1 << 16) |
138 ((unsigned long)d->base2 << 24);
5fb76f9b 139#ifdef CONFIG_X86_64
a5f61300
AK
140 if (d->s == 0 && (d->type == 2 || d->type == 9 || d->type == 11))
141 v |= ((unsigned long)((struct ldttss_desc64 *)d)->base3) << 32;
5fb76f9b
CO
142#endif
143 return v;
144}
145EXPORT_SYMBOL_GPL(segment_base);
146
6866b83e
CO
147u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
148{
149 if (irqchip_in_kernel(vcpu->kvm))
ad312c7c 150 return vcpu->arch.apic_base;
6866b83e 151 else
ad312c7c 152 return vcpu->arch.apic_base;
6866b83e
CO
153}
154EXPORT_SYMBOL_GPL(kvm_get_apic_base);
155
156void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data)
157{
158 /* TODO: reserve bits check */
159 if (irqchip_in_kernel(vcpu->kvm))
160 kvm_lapic_set_base(vcpu, data);
161 else
ad312c7c 162 vcpu->arch.apic_base = data;
6866b83e
CO
163}
164EXPORT_SYMBOL_GPL(kvm_set_apic_base);
165
298101da
AK
166void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
167{
ad312c7c
ZX
168 WARN_ON(vcpu->arch.exception.pending);
169 vcpu->arch.exception.pending = true;
170 vcpu->arch.exception.has_error_code = false;
171 vcpu->arch.exception.nr = nr;
298101da
AK
172}
173EXPORT_SYMBOL_GPL(kvm_queue_exception);
174
c3c91fee
AK
175void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long addr,
176 u32 error_code)
177{
178 ++vcpu->stat.pf_guest;
d8017474 179
71c4dfaf
JR
180 if (vcpu->arch.exception.pending) {
181 if (vcpu->arch.exception.nr == PF_VECTOR) {
182 printk(KERN_DEBUG "kvm: inject_page_fault:"
183 " double fault 0x%lx\n", addr);
184 vcpu->arch.exception.nr = DF_VECTOR;
185 vcpu->arch.exception.error_code = 0;
186 } else if (vcpu->arch.exception.nr == DF_VECTOR) {
187 /* triple fault -> shutdown */
188 set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
189 }
c3c91fee
AK
190 return;
191 }
ad312c7c 192 vcpu->arch.cr2 = addr;
c3c91fee
AK
193 kvm_queue_exception_e(vcpu, PF_VECTOR, error_code);
194}
195
3419ffc8
SY
196void kvm_inject_nmi(struct kvm_vcpu *vcpu)
197{
198 vcpu->arch.nmi_pending = 1;
199}
200EXPORT_SYMBOL_GPL(kvm_inject_nmi);
201
298101da
AK
202void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
203{
ad312c7c
ZX
204 WARN_ON(vcpu->arch.exception.pending);
205 vcpu->arch.exception.pending = true;
206 vcpu->arch.exception.has_error_code = true;
207 vcpu->arch.exception.nr = nr;
208 vcpu->arch.exception.error_code = error_code;
298101da
AK
209}
210EXPORT_SYMBOL_GPL(kvm_queue_exception_e);
211
212static void __queue_exception(struct kvm_vcpu *vcpu)
213{
ad312c7c
ZX
214 kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr,
215 vcpu->arch.exception.has_error_code,
216 vcpu->arch.exception.error_code);
298101da
AK
217}
218
a03490ed
CO
219/*
220 * Load the pae pdptrs. Return true is they are all valid.
221 */
222int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
223{
224 gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
225 unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
226 int i;
227 int ret;
ad312c7c 228 u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
a03490ed 229
a03490ed
CO
230 ret = kvm_read_guest_page(vcpu->kvm, pdpt_gfn, pdpte,
231 offset * sizeof(u64), sizeof(pdpte));
232 if (ret < 0) {
233 ret = 0;
234 goto out;
235 }
236 for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
237 if ((pdpte[i] & 1) && (pdpte[i] & 0xfffffff0000001e6ull)) {
238 ret = 0;
239 goto out;
240 }
241 }
242 ret = 1;
243
ad312c7c 244 memcpy(vcpu->arch.pdptrs, pdpte, sizeof(vcpu->arch.pdptrs));
a03490ed 245out:
a03490ed
CO
246
247 return ret;
248}
cc4b6871 249EXPORT_SYMBOL_GPL(load_pdptrs);
a03490ed 250
d835dfec
AK
251static bool pdptrs_changed(struct kvm_vcpu *vcpu)
252{
ad312c7c 253 u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
d835dfec
AK
254 bool changed = true;
255 int r;
256
257 if (is_long_mode(vcpu) || !is_pae(vcpu))
258 return false;
259
ad312c7c 260 r = kvm_read_guest(vcpu->kvm, vcpu->arch.cr3 & ~31u, pdpte, sizeof(pdpte));
d835dfec
AK
261 if (r < 0)
262 goto out;
ad312c7c 263 changed = memcmp(pdpte, vcpu->arch.pdptrs, sizeof(pdpte)) != 0;
d835dfec 264out:
d835dfec
AK
265
266 return changed;
267}
268
2d3ad1f4 269void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
a03490ed
CO
270{
271 if (cr0 & CR0_RESERVED_BITS) {
272 printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
ad312c7c 273 cr0, vcpu->arch.cr0);
c1a5d4f9 274 kvm_inject_gp(vcpu, 0);
a03490ed
CO
275 return;
276 }
277
278 if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) {
279 printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n");
c1a5d4f9 280 kvm_inject_gp(vcpu, 0);
a03490ed
CO
281 return;
282 }
283
284 if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) {
285 printk(KERN_DEBUG "set_cr0: #GP, set PG flag "
286 "and a clear PE flag\n");
c1a5d4f9 287 kvm_inject_gp(vcpu, 0);
a03490ed
CO
288 return;
289 }
290
291 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
292#ifdef CONFIG_X86_64
ad312c7c 293 if ((vcpu->arch.shadow_efer & EFER_LME)) {
a03490ed
CO
294 int cs_db, cs_l;
295
296 if (!is_pae(vcpu)) {
297 printk(KERN_DEBUG "set_cr0: #GP, start paging "
298 "in long mode while PAE is disabled\n");
c1a5d4f9 299 kvm_inject_gp(vcpu, 0);
a03490ed
CO
300 return;
301 }
302 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
303 if (cs_l) {
304 printk(KERN_DEBUG "set_cr0: #GP, start paging "
305 "in long mode while CS.L == 1\n");
c1a5d4f9 306 kvm_inject_gp(vcpu, 0);
a03490ed
CO
307 return;
308
309 }
310 } else
311#endif
ad312c7c 312 if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
a03490ed
CO
313 printk(KERN_DEBUG "set_cr0: #GP, pdptrs "
314 "reserved bits\n");
c1a5d4f9 315 kvm_inject_gp(vcpu, 0);
a03490ed
CO
316 return;
317 }
318
319 }
320
321 kvm_x86_ops->set_cr0(vcpu, cr0);
ad312c7c 322 vcpu->arch.cr0 = cr0;
a03490ed 323
6cffe8ca 324 kvm_mmu_sync_global(vcpu);
a03490ed 325 kvm_mmu_reset_context(vcpu);
a03490ed
CO
326 return;
327}
2d3ad1f4 328EXPORT_SYMBOL_GPL(kvm_set_cr0);
a03490ed 329
2d3ad1f4 330void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
a03490ed 331{
2d3ad1f4 332 kvm_set_cr0(vcpu, (vcpu->arch.cr0 & ~0x0ful) | (msw & 0x0f));
2714d1d3
FEL
333 KVMTRACE_1D(LMSW, vcpu,
334 (u32)((vcpu->arch.cr0 & ~0x0ful) | (msw & 0x0f)),
335 handler);
a03490ed 336}
2d3ad1f4 337EXPORT_SYMBOL_GPL(kvm_lmsw);
a03490ed 338
2d3ad1f4 339void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
a03490ed 340{
a2edf57f
AK
341 unsigned long old_cr4 = vcpu->arch.cr4;
342 unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE;
343
a03490ed
CO
344 if (cr4 & CR4_RESERVED_BITS) {
345 printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n");
c1a5d4f9 346 kvm_inject_gp(vcpu, 0);
a03490ed
CO
347 return;
348 }
349
350 if (is_long_mode(vcpu)) {
351 if (!(cr4 & X86_CR4_PAE)) {
352 printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while "
353 "in long mode\n");
c1a5d4f9 354 kvm_inject_gp(vcpu, 0);
a03490ed
CO
355 return;
356 }
a2edf57f
AK
357 } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
358 && ((cr4 ^ old_cr4) & pdptr_bits)
ad312c7c 359 && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
a03490ed 360 printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n");
c1a5d4f9 361 kvm_inject_gp(vcpu, 0);
a03490ed
CO
362 return;
363 }
364
365 if (cr4 & X86_CR4_VMXE) {
366 printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n");
c1a5d4f9 367 kvm_inject_gp(vcpu, 0);
a03490ed
CO
368 return;
369 }
370 kvm_x86_ops->set_cr4(vcpu, cr4);
ad312c7c 371 vcpu->arch.cr4 = cr4;
5a41accd 372 vcpu->arch.mmu.base_role.cr4_pge = (cr4 & X86_CR4_PGE) && !tdp_enabled;
6cffe8ca 373 kvm_mmu_sync_global(vcpu);
a03490ed 374 kvm_mmu_reset_context(vcpu);
a03490ed 375}
2d3ad1f4 376EXPORT_SYMBOL_GPL(kvm_set_cr4);
a03490ed 377
2d3ad1f4 378void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
a03490ed 379{
ad312c7c 380 if (cr3 == vcpu->arch.cr3 && !pdptrs_changed(vcpu)) {
0ba73cda 381 kvm_mmu_sync_roots(vcpu);
d835dfec
AK
382 kvm_mmu_flush_tlb(vcpu);
383 return;
384 }
385
a03490ed
CO
386 if (is_long_mode(vcpu)) {
387 if (cr3 & CR3_L_MODE_RESERVED_BITS) {
388 printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n");
c1a5d4f9 389 kvm_inject_gp(vcpu, 0);
a03490ed
CO
390 return;
391 }
392 } else {
393 if (is_pae(vcpu)) {
394 if (cr3 & CR3_PAE_RESERVED_BITS) {
395 printk(KERN_DEBUG
396 "set_cr3: #GP, reserved bits\n");
c1a5d4f9 397 kvm_inject_gp(vcpu, 0);
a03490ed
CO
398 return;
399 }
400 if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) {
401 printk(KERN_DEBUG "set_cr3: #GP, pdptrs "
402 "reserved bits\n");
c1a5d4f9 403 kvm_inject_gp(vcpu, 0);
a03490ed
CO
404 return;
405 }
406 }
407 /*
408 * We don't check reserved bits in nonpae mode, because
409 * this isn't enforced, and VMware depends on this.
410 */
411 }
412
a03490ed
CO
413 /*
414 * Does the new cr3 value map to physical memory? (Note, we
415 * catch an invalid cr3 even in real-mode, because it would
416 * cause trouble later on when we turn on paging anyway.)
417 *
418 * A real CPU would silently accept an invalid cr3 and would
419 * attempt to use it - with largely undefined (and often hard
420 * to debug) behavior on the guest side.
421 */
422 if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
c1a5d4f9 423 kvm_inject_gp(vcpu, 0);
a03490ed 424 else {
ad312c7c
ZX
425 vcpu->arch.cr3 = cr3;
426 vcpu->arch.mmu.new_cr3(vcpu);
a03490ed 427 }
a03490ed 428}
2d3ad1f4 429EXPORT_SYMBOL_GPL(kvm_set_cr3);
a03490ed 430
2d3ad1f4 431void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
a03490ed
CO
432{
433 if (cr8 & CR8_RESERVED_BITS) {
434 printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8);
c1a5d4f9 435 kvm_inject_gp(vcpu, 0);
a03490ed
CO
436 return;
437 }
438 if (irqchip_in_kernel(vcpu->kvm))
439 kvm_lapic_set_tpr(vcpu, cr8);
440 else
ad312c7c 441 vcpu->arch.cr8 = cr8;
a03490ed 442}
2d3ad1f4 443EXPORT_SYMBOL_GPL(kvm_set_cr8);
a03490ed 444
2d3ad1f4 445unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
a03490ed
CO
446{
447 if (irqchip_in_kernel(vcpu->kvm))
448 return kvm_lapic_get_cr8(vcpu);
449 else
ad312c7c 450 return vcpu->arch.cr8;
a03490ed 451}
2d3ad1f4 452EXPORT_SYMBOL_GPL(kvm_get_cr8);
a03490ed 453
d8017474
AG
454static inline u32 bit(int bitno)
455{
456 return 1 << (bitno & 31);
457}
458
043405e1
CO
459/*
460 * List of msr numbers which we expose to userspace through KVM_GET_MSRS
461 * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
462 *
463 * This list is modified at module load time to reflect the
464 * capabilities of the host cpu.
465 */
466static u32 msrs_to_save[] = {
467 MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
468 MSR_K6_STAR,
469#ifdef CONFIG_X86_64
470 MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
471#endif
18068523 472 MSR_IA32_TIME_STAMP_COUNTER, MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
b286d5d8 473 MSR_IA32_PERF_STATUS, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA
043405e1
CO
474};
475
476static unsigned num_msrs_to_save;
477
478static u32 emulated_msrs[] = {
479 MSR_IA32_MISC_ENABLE,
480};
481
15c4a640
CO
482static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
483{
f2b4b7dd 484 if (efer & efer_reserved_bits) {
15c4a640
CO
485 printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n",
486 efer);
c1a5d4f9 487 kvm_inject_gp(vcpu, 0);
15c4a640
CO
488 return;
489 }
490
491 if (is_paging(vcpu)
ad312c7c 492 && (vcpu->arch.shadow_efer & EFER_LME) != (efer & EFER_LME)) {
15c4a640 493 printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n");
c1a5d4f9 494 kvm_inject_gp(vcpu, 0);
15c4a640
CO
495 return;
496 }
497
1b2fd70c
AG
498 if (efer & EFER_FFXSR) {
499 struct kvm_cpuid_entry2 *feat;
500
501 feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
502 if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) {
503 printk(KERN_DEBUG "set_efer: #GP, enable FFXSR w/o CPUID capability\n");
504 kvm_inject_gp(vcpu, 0);
505 return;
506 }
507 }
508
d8017474
AG
509 if (efer & EFER_SVME) {
510 struct kvm_cpuid_entry2 *feat;
511
512 feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
513 if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) {
514 printk(KERN_DEBUG "set_efer: #GP, enable SVM w/o SVM\n");
515 kvm_inject_gp(vcpu, 0);
516 return;
517 }
518 }
519
15c4a640
CO
520 kvm_x86_ops->set_efer(vcpu, efer);
521
522 efer &= ~EFER_LMA;
ad312c7c 523 efer |= vcpu->arch.shadow_efer & EFER_LMA;
15c4a640 524
ad312c7c 525 vcpu->arch.shadow_efer = efer;
15c4a640
CO
526}
527
f2b4b7dd
JR
528void kvm_enable_efer_bits(u64 mask)
529{
530 efer_reserved_bits &= ~mask;
531}
532EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
533
534
15c4a640
CO
535/*
536 * Writes msr value into into the appropriate "register".
537 * Returns 0 on success, non-0 otherwise.
538 * Assumes vcpu_load() was already called.
539 */
540int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
541{
542 return kvm_x86_ops->set_msr(vcpu, msr_index, data);
543}
544
313a3dc7
CO
545/*
546 * Adapt set_msr() to msr_io()'s calling convention
547 */
548static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
549{
550 return kvm_set_msr(vcpu, index, *data);
551}
552
18068523
GOC
553static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
554{
555 static int version;
50d0a0f9
GH
556 struct pvclock_wall_clock wc;
557 struct timespec now, sys, boot;
18068523
GOC
558
559 if (!wall_clock)
560 return;
561
562 version++;
563
18068523
GOC
564 kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
565
50d0a0f9
GH
566 /*
567 * The guest calculates current wall clock time by adding
568 * system time (updated by kvm_write_guest_time below) to the
569 * wall clock specified here. guest system time equals host
570 * system time for us, thus we must fill in host boot time here.
571 */
572 now = current_kernel_time();
573 ktime_get_ts(&sys);
574 boot = ns_to_timespec(timespec_to_ns(&now) - timespec_to_ns(&sys));
575
576 wc.sec = boot.tv_sec;
577 wc.nsec = boot.tv_nsec;
578 wc.version = version;
18068523
GOC
579
580 kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc));
581
582 version++;
583 kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
18068523
GOC
584}
585
50d0a0f9
GH
586static uint32_t div_frac(uint32_t dividend, uint32_t divisor)
587{
588 uint32_t quotient, remainder;
589
590 /* Don't try to replace with do_div(), this one calculates
591 * "(dividend << 32) / divisor" */
592 __asm__ ( "divl %4"
593 : "=a" (quotient), "=d" (remainder)
594 : "0" (0), "1" (dividend), "r" (divisor) );
595 return quotient;
596}
597
598static void kvm_set_time_scale(uint32_t tsc_khz, struct pvclock_vcpu_time_info *hv_clock)
599{
600 uint64_t nsecs = 1000000000LL;
601 int32_t shift = 0;
602 uint64_t tps64;
603 uint32_t tps32;
604
605 tps64 = tsc_khz * 1000LL;
606 while (tps64 > nsecs*2) {
607 tps64 >>= 1;
608 shift--;
609 }
610
611 tps32 = (uint32_t)tps64;
612 while (tps32 <= (uint32_t)nsecs) {
613 tps32 <<= 1;
614 shift++;
615 }
616
617 hv_clock->tsc_shift = shift;
618 hv_clock->tsc_to_system_mul = div_frac(nsecs, tps32);
619
620 pr_debug("%s: tsc_khz %u, tsc_shift %d, tsc_mul %u\n",
80a914dc 621 __func__, tsc_khz, hv_clock->tsc_shift,
50d0a0f9
GH
622 hv_clock->tsc_to_system_mul);
623}
624
c8076604
GH
625static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
626
18068523
GOC
627static void kvm_write_guest_time(struct kvm_vcpu *v)
628{
629 struct timespec ts;
630 unsigned long flags;
631 struct kvm_vcpu_arch *vcpu = &v->arch;
632 void *shared_kaddr;
633
634 if ((!vcpu->time_page))
635 return;
636
2dea4c84 637 preempt_disable();
c8076604
GH
638 if (unlikely(vcpu->hv_clock_tsc_khz != __get_cpu_var(cpu_tsc_khz))) {
639 kvm_set_time_scale(__get_cpu_var(cpu_tsc_khz), &vcpu->hv_clock);
640 vcpu->hv_clock_tsc_khz = __get_cpu_var(cpu_tsc_khz);
50d0a0f9 641 }
2dea4c84 642 preempt_enable();
50d0a0f9 643
18068523
GOC
644 /* Keep irq disabled to prevent changes to the clock */
645 local_irq_save(flags);
646 kvm_get_msr(v, MSR_IA32_TIME_STAMP_COUNTER,
647 &vcpu->hv_clock.tsc_timestamp);
648 ktime_get_ts(&ts);
649 local_irq_restore(flags);
650
651 /* With all the info we got, fill in the values */
652
653 vcpu->hv_clock.system_time = ts.tv_nsec +
654 (NSEC_PER_SEC * (u64)ts.tv_sec);
655 /*
656 * The interface expects us to write an even number signaling that the
657 * update is finished. Since the guest won't see the intermediate
50d0a0f9 658 * state, we just increase by 2 at the end.
18068523 659 */
50d0a0f9 660 vcpu->hv_clock.version += 2;
18068523
GOC
661
662 shared_kaddr = kmap_atomic(vcpu->time_page, KM_USER0);
663
664 memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
50d0a0f9 665 sizeof(vcpu->hv_clock));
18068523
GOC
666
667 kunmap_atomic(shared_kaddr, KM_USER0);
668
669 mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
670}
671
c8076604
GH
672static int kvm_request_guest_time_update(struct kvm_vcpu *v)
673{
674 struct kvm_vcpu_arch *vcpu = &v->arch;
675
676 if (!vcpu->time_page)
677 return 0;
678 set_bit(KVM_REQ_KVMCLOCK_UPDATE, &v->requests);
679 return 1;
680}
681
9ba075a6
AK
682static bool msr_mtrr_valid(unsigned msr)
683{
684 switch (msr) {
685 case 0x200 ... 0x200 + 2 * KVM_NR_VAR_MTRR - 1:
686 case MSR_MTRRfix64K_00000:
687 case MSR_MTRRfix16K_80000:
688 case MSR_MTRRfix16K_A0000:
689 case MSR_MTRRfix4K_C0000:
690 case MSR_MTRRfix4K_C8000:
691 case MSR_MTRRfix4K_D0000:
692 case MSR_MTRRfix4K_D8000:
693 case MSR_MTRRfix4K_E0000:
694 case MSR_MTRRfix4K_E8000:
695 case MSR_MTRRfix4K_F0000:
696 case MSR_MTRRfix4K_F8000:
697 case MSR_MTRRdefType:
698 case MSR_IA32_CR_PAT:
699 return true;
700 case 0x2f8:
701 return true;
702 }
703 return false;
704}
705
706static int set_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
707{
0bed3b56
SY
708 u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
709
9ba075a6
AK
710 if (!msr_mtrr_valid(msr))
711 return 1;
712
0bed3b56
SY
713 if (msr == MSR_MTRRdefType) {
714 vcpu->arch.mtrr_state.def_type = data;
715 vcpu->arch.mtrr_state.enabled = (data & 0xc00) >> 10;
716 } else if (msr == MSR_MTRRfix64K_00000)
717 p[0] = data;
718 else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
719 p[1 + msr - MSR_MTRRfix16K_80000] = data;
720 else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
721 p[3 + msr - MSR_MTRRfix4K_C0000] = data;
722 else if (msr == MSR_IA32_CR_PAT)
723 vcpu->arch.pat = data;
724 else { /* Variable MTRRs */
725 int idx, is_mtrr_mask;
726 u64 *pt;
727
728 idx = (msr - 0x200) / 2;
729 is_mtrr_mask = msr - 0x200 - 2 * idx;
730 if (!is_mtrr_mask)
731 pt =
732 (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
733 else
734 pt =
735 (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
736 *pt = data;
737 }
738
739 kvm_mmu_reset_context(vcpu);
9ba075a6
AK
740 return 0;
741}
15c4a640
CO
742
743int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
744{
745 switch (msr) {
15c4a640
CO
746 case MSR_EFER:
747 set_efer(vcpu, data);
748 break;
15c4a640
CO
749 case MSR_IA32_MC0_STATUS:
750 pr_unimpl(vcpu, "%s: MSR_IA32_MC0_STATUS 0x%llx, nop\n",
b8688d51 751 __func__, data);
15c4a640
CO
752 break;
753 case MSR_IA32_MCG_STATUS:
754 pr_unimpl(vcpu, "%s: MSR_IA32_MCG_STATUS 0x%llx, nop\n",
b8688d51 755 __func__, data);
15c4a640 756 break;
c7ac679c
JR
757 case MSR_IA32_MCG_CTL:
758 pr_unimpl(vcpu, "%s: MSR_IA32_MCG_CTL 0x%llx, nop\n",
b8688d51 759 __func__, data);
c7ac679c 760 break;
b5e2fec0
AG
761 case MSR_IA32_DEBUGCTLMSR:
762 if (!data) {
763 /* We support the non-activated case already */
764 break;
765 } else if (data & ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_BTF)) {
766 /* Values other than LBR and BTF are vendor-specific,
767 thus reserved and should throw a #GP */
768 return 1;
769 }
770 pr_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n",
771 __func__, data);
772 break;
15c4a640
CO
773 case MSR_IA32_UCODE_REV:
774 case MSR_IA32_UCODE_WRITE:
61a6bd67 775 case MSR_VM_HSAVE_PA:
15c4a640 776 break;
9ba075a6
AK
777 case 0x200 ... 0x2ff:
778 return set_msr_mtrr(vcpu, msr, data);
15c4a640
CO
779 case MSR_IA32_APICBASE:
780 kvm_set_apic_base(vcpu, data);
781 break;
782 case MSR_IA32_MISC_ENABLE:
ad312c7c 783 vcpu->arch.ia32_misc_enable_msr = data;
15c4a640 784 break;
18068523
GOC
785 case MSR_KVM_WALL_CLOCK:
786 vcpu->kvm->arch.wall_clock = data;
787 kvm_write_wall_clock(vcpu->kvm, data);
788 break;
789 case MSR_KVM_SYSTEM_TIME: {
790 if (vcpu->arch.time_page) {
791 kvm_release_page_dirty(vcpu->arch.time_page);
792 vcpu->arch.time_page = NULL;
793 }
794
795 vcpu->arch.time = data;
796
797 /* we verify if the enable bit is set... */
798 if (!(data & 1))
799 break;
800
801 /* ...but clean it before doing the actual write */
802 vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
803
18068523
GOC
804 vcpu->arch.time_page =
805 gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
18068523
GOC
806
807 if (is_error_page(vcpu->arch.time_page)) {
808 kvm_release_page_clean(vcpu->arch.time_page);
809 vcpu->arch.time_page = NULL;
810 }
811
c8076604 812 kvm_request_guest_time_update(vcpu);
18068523
GOC
813 break;
814 }
15c4a640 815 default:
565f1fbd 816 pr_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n", msr, data);
15c4a640
CO
817 return 1;
818 }
819 return 0;
820}
821EXPORT_SYMBOL_GPL(kvm_set_msr_common);
822
823
824/*
825 * Reads an msr value (of 'msr_index') into 'pdata'.
826 * Returns 0 on success, non-0 otherwise.
827 * Assumes vcpu_load() was already called.
828 */
829int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
830{
831 return kvm_x86_ops->get_msr(vcpu, msr_index, pdata);
832}
833
9ba075a6
AK
834static int get_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
835{
0bed3b56
SY
836 u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
837
9ba075a6
AK
838 if (!msr_mtrr_valid(msr))
839 return 1;
840
0bed3b56
SY
841 if (msr == MSR_MTRRdefType)
842 *pdata = vcpu->arch.mtrr_state.def_type +
843 (vcpu->arch.mtrr_state.enabled << 10);
844 else if (msr == MSR_MTRRfix64K_00000)
845 *pdata = p[0];
846 else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
847 *pdata = p[1 + msr - MSR_MTRRfix16K_80000];
848 else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
849 *pdata = p[3 + msr - MSR_MTRRfix4K_C0000];
850 else if (msr == MSR_IA32_CR_PAT)
851 *pdata = vcpu->arch.pat;
852 else { /* Variable MTRRs */
853 int idx, is_mtrr_mask;
854 u64 *pt;
855
856 idx = (msr - 0x200) / 2;
857 is_mtrr_mask = msr - 0x200 - 2 * idx;
858 if (!is_mtrr_mask)
859 pt =
860 (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
861 else
862 pt =
863 (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
864 *pdata = *pt;
865 }
866
9ba075a6
AK
867 return 0;
868}
869
15c4a640
CO
870int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
871{
872 u64 data;
873
874 switch (msr) {
875 case 0xc0010010: /* SYSCFG */
876 case 0xc0010015: /* HWCR */
877 case MSR_IA32_PLATFORM_ID:
878 case MSR_IA32_P5_MC_ADDR:
879 case MSR_IA32_P5_MC_TYPE:
880 case MSR_IA32_MC0_CTL:
881 case MSR_IA32_MCG_STATUS:
882 case MSR_IA32_MCG_CAP:
c7ac679c 883 case MSR_IA32_MCG_CTL:
15c4a640
CO
884 case MSR_IA32_MC0_MISC:
885 case MSR_IA32_MC0_MISC+4:
886 case MSR_IA32_MC0_MISC+8:
887 case MSR_IA32_MC0_MISC+12:
888 case MSR_IA32_MC0_MISC+16:
a89c1ad2 889 case MSR_IA32_MC0_MISC+20:
15c4a640 890 case MSR_IA32_UCODE_REV:
15c4a640 891 case MSR_IA32_EBL_CR_POWERON:
b5e2fec0
AG
892 case MSR_IA32_DEBUGCTLMSR:
893 case MSR_IA32_LASTBRANCHFROMIP:
894 case MSR_IA32_LASTBRANCHTOIP:
895 case MSR_IA32_LASTINTFROMIP:
896 case MSR_IA32_LASTINTTOIP:
61a6bd67 897 case MSR_VM_HSAVE_PA:
7fe29e0f
AS
898 case MSR_P6_EVNTSEL0:
899 case MSR_P6_EVNTSEL1:
15c4a640
CO
900 data = 0;
901 break;
9ba075a6
AK
902 case MSR_MTRRcap:
903 data = 0x500 | KVM_NR_VAR_MTRR;
904 break;
905 case 0x200 ... 0x2ff:
906 return get_msr_mtrr(vcpu, msr, pdata);
15c4a640
CO
907 case 0xcd: /* fsb frequency */
908 data = 3;
909 break;
910 case MSR_IA32_APICBASE:
911 data = kvm_get_apic_base(vcpu);
912 break;
913 case MSR_IA32_MISC_ENABLE:
ad312c7c 914 data = vcpu->arch.ia32_misc_enable_msr;
15c4a640 915 break;
847f0ad8
AG
916 case MSR_IA32_PERF_STATUS:
917 /* TSC increment by tick */
918 data = 1000ULL;
919 /* CPU multiplier */
920 data |= (((uint64_t)4ULL) << 40);
921 break;
15c4a640 922 case MSR_EFER:
ad312c7c 923 data = vcpu->arch.shadow_efer;
15c4a640 924 break;
18068523
GOC
925 case MSR_KVM_WALL_CLOCK:
926 data = vcpu->kvm->arch.wall_clock;
927 break;
928 case MSR_KVM_SYSTEM_TIME:
929 data = vcpu->arch.time;
930 break;
15c4a640
CO
931 default:
932 pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
933 return 1;
934 }
935 *pdata = data;
936 return 0;
937}
938EXPORT_SYMBOL_GPL(kvm_get_msr_common);
939
313a3dc7
CO
940/*
941 * Read or write a bunch of msrs. All parameters are kernel addresses.
942 *
943 * @return number of msrs set successfully.
944 */
945static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
946 struct kvm_msr_entry *entries,
947 int (*do_msr)(struct kvm_vcpu *vcpu,
948 unsigned index, u64 *data))
949{
950 int i;
951
952 vcpu_load(vcpu);
953
3200f405 954 down_read(&vcpu->kvm->slots_lock);
313a3dc7
CO
955 for (i = 0; i < msrs->nmsrs; ++i)
956 if (do_msr(vcpu, entries[i].index, &entries[i].data))
957 break;
3200f405 958 up_read(&vcpu->kvm->slots_lock);
313a3dc7
CO
959
960 vcpu_put(vcpu);
961
962 return i;
963}
964
965/*
966 * Read or write a bunch of msrs. Parameters are user addresses.
967 *
968 * @return number of msrs set successfully.
969 */
970static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
971 int (*do_msr)(struct kvm_vcpu *vcpu,
972 unsigned index, u64 *data),
973 int writeback)
974{
975 struct kvm_msrs msrs;
976 struct kvm_msr_entry *entries;
977 int r, n;
978 unsigned size;
979
980 r = -EFAULT;
981 if (copy_from_user(&msrs, user_msrs, sizeof msrs))
982 goto out;
983
984 r = -E2BIG;
985 if (msrs.nmsrs >= MAX_IO_MSRS)
986 goto out;
987
988 r = -ENOMEM;
989 size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
990 entries = vmalloc(size);
991 if (!entries)
992 goto out;
993
994 r = -EFAULT;
995 if (copy_from_user(entries, user_msrs->entries, size))
996 goto out_free;
997
998 r = n = __msr_io(vcpu, &msrs, entries, do_msr);
999 if (r < 0)
1000 goto out_free;
1001
1002 r = -EFAULT;
1003 if (writeback && copy_to_user(user_msrs->entries, entries, size))
1004 goto out_free;
1005
1006 r = n;
1007
1008out_free:
1009 vfree(entries);
1010out:
1011 return r;
1012}
1013
018d00d2
ZX
1014int kvm_dev_ioctl_check_extension(long ext)
1015{
1016 int r;
1017
1018 switch (ext) {
1019 case KVM_CAP_IRQCHIP:
1020 case KVM_CAP_HLT:
1021 case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:
018d00d2 1022 case KVM_CAP_SET_TSS_ADDR:
07716717 1023 case KVM_CAP_EXT_CPUID:
c8076604 1024 case KVM_CAP_CLOCKSOURCE:
7837699f 1025 case KVM_CAP_PIT:
a28e4f5a 1026 case KVM_CAP_NOP_IO_DELAY:
62d9f0db 1027 case KVM_CAP_MP_STATE:
ed848624 1028 case KVM_CAP_SYNC_MMU:
52d939a0 1029 case KVM_CAP_REINJECT_CONTROL:
4925663a 1030 case KVM_CAP_IRQ_INJECT_STATUS:
e56d532f 1031 case KVM_CAP_ASSIGN_DEV_IRQ:
018d00d2
ZX
1032 r = 1;
1033 break;
542472b5
LV
1034 case KVM_CAP_COALESCED_MMIO:
1035 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
1036 break;
774ead3a
AK
1037 case KVM_CAP_VAPIC:
1038 r = !kvm_x86_ops->cpu_has_accelerated_tpr();
1039 break;
f725230a
AK
1040 case KVM_CAP_NR_VCPUS:
1041 r = KVM_MAX_VCPUS;
1042 break;
a988b910
AK
1043 case KVM_CAP_NR_MEMSLOTS:
1044 r = KVM_MEMORY_SLOTS;
1045 break;
2f333bcb
MT
1046 case KVM_CAP_PV_MMU:
1047 r = !tdp_enabled;
1048 break;
62c476c7 1049 case KVM_CAP_IOMMU:
19de40a8 1050 r = iommu_found();
62c476c7 1051 break;
018d00d2
ZX
1052 default:
1053 r = 0;
1054 break;
1055 }
1056 return r;
1057
1058}
1059
043405e1
CO
1060long kvm_arch_dev_ioctl(struct file *filp,
1061 unsigned int ioctl, unsigned long arg)
1062{
1063 void __user *argp = (void __user *)arg;
1064 long r;
1065
1066 switch (ioctl) {
1067 case KVM_GET_MSR_INDEX_LIST: {
1068 struct kvm_msr_list __user *user_msr_list = argp;
1069 struct kvm_msr_list msr_list;
1070 unsigned n;
1071
1072 r = -EFAULT;
1073 if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list))
1074 goto out;
1075 n = msr_list.nmsrs;
1076 msr_list.nmsrs = num_msrs_to_save + ARRAY_SIZE(emulated_msrs);
1077 if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list))
1078 goto out;
1079 r = -E2BIG;
1080 if (n < num_msrs_to_save)
1081 goto out;
1082 r = -EFAULT;
1083 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
1084 num_msrs_to_save * sizeof(u32)))
1085 goto out;
1086 if (copy_to_user(user_msr_list->indices
1087 + num_msrs_to_save * sizeof(u32),
1088 &emulated_msrs,
1089 ARRAY_SIZE(emulated_msrs) * sizeof(u32)))
1090 goto out;
1091 r = 0;
1092 break;
1093 }
674eea0f
AK
1094 case KVM_GET_SUPPORTED_CPUID: {
1095 struct kvm_cpuid2 __user *cpuid_arg = argp;
1096 struct kvm_cpuid2 cpuid;
1097
1098 r = -EFAULT;
1099 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
1100 goto out;
1101 r = kvm_dev_ioctl_get_supported_cpuid(&cpuid,
19355475 1102 cpuid_arg->entries);
674eea0f
AK
1103 if (r)
1104 goto out;
1105
1106 r = -EFAULT;
1107 if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
1108 goto out;
1109 r = 0;
1110 break;
1111 }
043405e1
CO
1112 default:
1113 r = -EINVAL;
1114 }
1115out:
1116 return r;
1117}
1118
313a3dc7
CO
1119void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1120{
1121 kvm_x86_ops->vcpu_load(vcpu, cpu);
c8076604 1122 kvm_request_guest_time_update(vcpu);
313a3dc7
CO
1123}
1124
1125void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1126{
1127 kvm_x86_ops->vcpu_put(vcpu);
9327fd11 1128 kvm_put_guest_fpu(vcpu);
313a3dc7
CO
1129}
1130
07716717 1131static int is_efer_nx(void)
313a3dc7 1132{
e286e86e 1133 unsigned long long efer = 0;
313a3dc7 1134
e286e86e 1135 rdmsrl_safe(MSR_EFER, &efer);
07716717
DK
1136 return efer & EFER_NX;
1137}
1138
1139static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
1140{
1141 int i;
1142 struct kvm_cpuid_entry2 *e, *entry;
1143
313a3dc7 1144 entry = NULL;
ad312c7c
ZX
1145 for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
1146 e = &vcpu->arch.cpuid_entries[i];
313a3dc7
CO
1147 if (e->function == 0x80000001) {
1148 entry = e;
1149 break;
1150 }
1151 }
07716717 1152 if (entry && (entry->edx & (1 << 20)) && !is_efer_nx()) {
313a3dc7
CO
1153 entry->edx &= ~(1 << 20);
1154 printk(KERN_INFO "kvm: guest NX capability removed\n");
1155 }
1156}
1157
07716717 1158/* when an old userspace process fills a new kernel module */
313a3dc7
CO
1159static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
1160 struct kvm_cpuid *cpuid,
1161 struct kvm_cpuid_entry __user *entries)
07716717
DK
1162{
1163 int r, i;
1164 struct kvm_cpuid_entry *cpuid_entries;
1165
1166 r = -E2BIG;
1167 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
1168 goto out;
1169 r = -ENOMEM;
1170 cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * cpuid->nent);
1171 if (!cpuid_entries)
1172 goto out;
1173 r = -EFAULT;
1174 if (copy_from_user(cpuid_entries, entries,
1175 cpuid->nent * sizeof(struct kvm_cpuid_entry)))
1176 goto out_free;
1177 for (i = 0; i < cpuid->nent; i++) {
ad312c7c
ZX
1178 vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
1179 vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
1180 vcpu->arch.cpuid_entries[i].ebx = cpuid_entries[i].ebx;
1181 vcpu->arch.cpuid_entries[i].ecx = cpuid_entries[i].ecx;
1182 vcpu->arch.cpuid_entries[i].edx = cpuid_entries[i].edx;
1183 vcpu->arch.cpuid_entries[i].index = 0;
1184 vcpu->arch.cpuid_entries[i].flags = 0;
1185 vcpu->arch.cpuid_entries[i].padding[0] = 0;
1186 vcpu->arch.cpuid_entries[i].padding[1] = 0;
1187 vcpu->arch.cpuid_entries[i].padding[2] = 0;
1188 }
1189 vcpu->arch.cpuid_nent = cpuid->nent;
07716717
DK
1190 cpuid_fix_nx_cap(vcpu);
1191 r = 0;
1192
1193out_free:
1194 vfree(cpuid_entries);
1195out:
1196 return r;
1197}
1198
1199static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
19355475
AS
1200 struct kvm_cpuid2 *cpuid,
1201 struct kvm_cpuid_entry2 __user *entries)
313a3dc7
CO
1202{
1203 int r;
1204
1205 r = -E2BIG;
1206 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
1207 goto out;
1208 r = -EFAULT;
ad312c7c 1209 if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
07716717 1210 cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
313a3dc7 1211 goto out;
ad312c7c 1212 vcpu->arch.cpuid_nent = cpuid->nent;
313a3dc7
CO
1213 return 0;
1214
1215out:
1216 return r;
1217}
1218
07716717 1219static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
19355475
AS
1220 struct kvm_cpuid2 *cpuid,
1221 struct kvm_cpuid_entry2 __user *entries)
07716717
DK
1222{
1223 int r;
1224
1225 r = -E2BIG;
ad312c7c 1226 if (cpuid->nent < vcpu->arch.cpuid_nent)
07716717
DK
1227 goto out;
1228 r = -EFAULT;
ad312c7c 1229 if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
19355475 1230 vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
07716717
DK
1231 goto out;
1232 return 0;
1233
1234out:
ad312c7c 1235 cpuid->nent = vcpu->arch.cpuid_nent;
07716717
DK
1236 return r;
1237}
1238
07716717 1239static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function,
19355475 1240 u32 index)
07716717
DK
1241{
1242 entry->function = function;
1243 entry->index = index;
1244 cpuid_count(entry->function, entry->index,
19355475 1245 &entry->eax, &entry->ebx, &entry->ecx, &entry->edx);
07716717
DK
1246 entry->flags = 0;
1247}
1248
1249static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
1250 u32 index, int *nent, int maxnent)
1251{
1252 const u32 kvm_supported_word0_x86_features = bit(X86_FEATURE_FPU) |
1253 bit(X86_FEATURE_VME) | bit(X86_FEATURE_DE) |
1254 bit(X86_FEATURE_PSE) | bit(X86_FEATURE_TSC) |
1255 bit(X86_FEATURE_MSR) | bit(X86_FEATURE_PAE) |
1256 bit(X86_FEATURE_CX8) | bit(X86_FEATURE_APIC) |
1257 bit(X86_FEATURE_SEP) | bit(X86_FEATURE_PGE) |
1258 bit(X86_FEATURE_CMOV) | bit(X86_FEATURE_PSE36) |
1259 bit(X86_FEATURE_CLFLSH) | bit(X86_FEATURE_MMX) |
1260 bit(X86_FEATURE_FXSR) | bit(X86_FEATURE_XMM) |
1261 bit(X86_FEATURE_XMM2) | bit(X86_FEATURE_SELFSNOOP);
1262 const u32 kvm_supported_word1_x86_features = bit(X86_FEATURE_FPU) |
1263 bit(X86_FEATURE_VME) | bit(X86_FEATURE_DE) |
1264 bit(X86_FEATURE_PSE) | bit(X86_FEATURE_TSC) |
1265 bit(X86_FEATURE_MSR) | bit(X86_FEATURE_PAE) |
1266 bit(X86_FEATURE_CX8) | bit(X86_FEATURE_APIC) |
1267 bit(X86_FEATURE_PGE) |
1268 bit(X86_FEATURE_CMOV) | bit(X86_FEATURE_PSE36) |
1269 bit(X86_FEATURE_MMX) | bit(X86_FEATURE_FXSR) |
1270 bit(X86_FEATURE_SYSCALL) |
334b8ad7 1271 (is_efer_nx() ? bit(X86_FEATURE_NX) : 0) |
07716717
DK
1272#ifdef CONFIG_X86_64
1273 bit(X86_FEATURE_LM) |
1274#endif
1b2fd70c 1275 bit(X86_FEATURE_FXSR_OPT) |
07716717
DK
1276 bit(X86_FEATURE_MMXEXT) |
1277 bit(X86_FEATURE_3DNOWEXT) |
1278 bit(X86_FEATURE_3DNOW);
1279 const u32 kvm_supported_word3_x86_features =
1280 bit(X86_FEATURE_XMM3) | bit(X86_FEATURE_CX16);
1281 const u32 kvm_supported_word6_x86_features =
d8017474
AG
1282 bit(X86_FEATURE_LAHF_LM) | bit(X86_FEATURE_CMP_LEGACY) |
1283 bit(X86_FEATURE_SVM);
07716717 1284
19355475 1285 /* all calls to cpuid_count() should be made on the same cpu */
07716717
DK
1286 get_cpu();
1287 do_cpuid_1_ent(entry, function, index);
1288 ++*nent;
1289
1290 switch (function) {
1291 case 0:
1292 entry->eax = min(entry->eax, (u32)0xb);
1293 break;
1294 case 1:
1295 entry->edx &= kvm_supported_word0_x86_features;
1296 entry->ecx &= kvm_supported_word3_x86_features;
1297 break;
1298 /* function 2 entries are STATEFUL. That is, repeated cpuid commands
1299 * may return different values. This forces us to get_cpu() before
1300 * issuing the first command, and also to emulate this annoying behavior
1301 * in kvm_emulate_cpuid() using KVM_CPUID_FLAG_STATE_READ_NEXT */
1302 case 2: {
1303 int t, times = entry->eax & 0xff;
1304
1305 entry->flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
0fdf8e59 1306 entry->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
07716717
DK
1307 for (t = 1; t < times && *nent < maxnent; ++t) {
1308 do_cpuid_1_ent(&entry[t], function, 0);
1309 entry[t].flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
1310 ++*nent;
1311 }
1312 break;
1313 }
1314 /* function 4 and 0xb have additional index. */
1315 case 4: {
14af3f3c 1316 int i, cache_type;
07716717
DK
1317
1318 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1319 /* read more entries until cache_type is zero */
14af3f3c
HH
1320 for (i = 1; *nent < maxnent; ++i) {
1321 cache_type = entry[i - 1].eax & 0x1f;
07716717
DK
1322 if (!cache_type)
1323 break;
14af3f3c
HH
1324 do_cpuid_1_ent(&entry[i], function, i);
1325 entry[i].flags |=
07716717
DK
1326 KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1327 ++*nent;
1328 }
1329 break;
1330 }
1331 case 0xb: {
14af3f3c 1332 int i, level_type;
07716717
DK
1333
1334 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1335 /* read more entries until level_type is zero */
14af3f3c 1336 for (i = 1; *nent < maxnent; ++i) {
0853d2c1 1337 level_type = entry[i - 1].ecx & 0xff00;
07716717
DK
1338 if (!level_type)
1339 break;
14af3f3c
HH
1340 do_cpuid_1_ent(&entry[i], function, i);
1341 entry[i].flags |=
07716717
DK
1342 KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1343 ++*nent;
1344 }
1345 break;
1346 }
1347 case 0x80000000:
1348 entry->eax = min(entry->eax, 0x8000001a);
1349 break;
1350 case 0x80000001:
1351 entry->edx &= kvm_supported_word1_x86_features;
1352 entry->ecx &= kvm_supported_word6_x86_features;
1353 break;
1354 }
1355 put_cpu();
1356}
1357
674eea0f 1358static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
19355475 1359 struct kvm_cpuid_entry2 __user *entries)
07716717
DK
1360{
1361 struct kvm_cpuid_entry2 *cpuid_entries;
1362 int limit, nent = 0, r = -E2BIG;
1363 u32 func;
1364
1365 if (cpuid->nent < 1)
1366 goto out;
1367 r = -ENOMEM;
1368 cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent);
1369 if (!cpuid_entries)
1370 goto out;
1371
1372 do_cpuid_ent(&cpuid_entries[0], 0, 0, &nent, cpuid->nent);
1373 limit = cpuid_entries[0].eax;
1374 for (func = 1; func <= limit && nent < cpuid->nent; ++func)
1375 do_cpuid_ent(&cpuid_entries[nent], func, 0,
19355475 1376 &nent, cpuid->nent);
07716717
DK
1377 r = -E2BIG;
1378 if (nent >= cpuid->nent)
1379 goto out_free;
1380
1381 do_cpuid_ent(&cpuid_entries[nent], 0x80000000, 0, &nent, cpuid->nent);
1382 limit = cpuid_entries[nent - 1].eax;
1383 for (func = 0x80000001; func <= limit && nent < cpuid->nent; ++func)
1384 do_cpuid_ent(&cpuid_entries[nent], func, 0,
19355475 1385 &nent, cpuid->nent);
07716717
DK
1386 r = -EFAULT;
1387 if (copy_to_user(entries, cpuid_entries,
19355475 1388 nent * sizeof(struct kvm_cpuid_entry2)))
07716717
DK
1389 goto out_free;
1390 cpuid->nent = nent;
1391 r = 0;
1392
1393out_free:
1394 vfree(cpuid_entries);
1395out:
1396 return r;
1397}
1398
313a3dc7
CO
1399static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
1400 struct kvm_lapic_state *s)
1401{
1402 vcpu_load(vcpu);
ad312c7c 1403 memcpy(s->regs, vcpu->arch.apic->regs, sizeof *s);
313a3dc7
CO
1404 vcpu_put(vcpu);
1405
1406 return 0;
1407}
1408
1409static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
1410 struct kvm_lapic_state *s)
1411{
1412 vcpu_load(vcpu);
ad312c7c 1413 memcpy(vcpu->arch.apic->regs, s->regs, sizeof *s);
313a3dc7
CO
1414 kvm_apic_post_state_restore(vcpu);
1415 vcpu_put(vcpu);
1416
1417 return 0;
1418}
1419
f77bc6a4
ZX
1420static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
1421 struct kvm_interrupt *irq)
1422{
1423 if (irq->irq < 0 || irq->irq >= 256)
1424 return -EINVAL;
1425 if (irqchip_in_kernel(vcpu->kvm))
1426 return -ENXIO;
1427 vcpu_load(vcpu);
1428
ad312c7c
ZX
1429 set_bit(irq->irq, vcpu->arch.irq_pending);
1430 set_bit(irq->irq / BITS_PER_LONG, &vcpu->arch.irq_summary);
f77bc6a4
ZX
1431
1432 vcpu_put(vcpu);
1433
1434 return 0;
1435}
1436
c4abb7c9
JK
1437static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu)
1438{
1439 vcpu_load(vcpu);
1440 kvm_inject_nmi(vcpu);
1441 vcpu_put(vcpu);
1442
1443 return 0;
1444}
1445
b209749f
AK
1446static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu,
1447 struct kvm_tpr_access_ctl *tac)
1448{
1449 if (tac->flags)
1450 return -EINVAL;
1451 vcpu->arch.tpr_access_reporting = !!tac->enabled;
1452 return 0;
1453}
1454
313a3dc7
CO
1455long kvm_arch_vcpu_ioctl(struct file *filp,
1456 unsigned int ioctl, unsigned long arg)
1457{
1458 struct kvm_vcpu *vcpu = filp->private_data;
1459 void __user *argp = (void __user *)arg;
1460 int r;
b772ff36 1461 struct kvm_lapic_state *lapic = NULL;
313a3dc7
CO
1462
1463 switch (ioctl) {
1464 case KVM_GET_LAPIC: {
b772ff36 1465 lapic = kzalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
313a3dc7 1466
b772ff36
DH
1467 r = -ENOMEM;
1468 if (!lapic)
1469 goto out;
1470 r = kvm_vcpu_ioctl_get_lapic(vcpu, lapic);
313a3dc7
CO
1471 if (r)
1472 goto out;
1473 r = -EFAULT;
b772ff36 1474 if (copy_to_user(argp, lapic, sizeof(struct kvm_lapic_state)))
313a3dc7
CO
1475 goto out;
1476 r = 0;
1477 break;
1478 }
1479 case KVM_SET_LAPIC: {
b772ff36
DH
1480 lapic = kmalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
1481 r = -ENOMEM;
1482 if (!lapic)
1483 goto out;
313a3dc7 1484 r = -EFAULT;
b772ff36 1485 if (copy_from_user(lapic, argp, sizeof(struct kvm_lapic_state)))
313a3dc7 1486 goto out;
b772ff36 1487 r = kvm_vcpu_ioctl_set_lapic(vcpu, lapic);
313a3dc7
CO
1488 if (r)
1489 goto out;
1490 r = 0;
1491 break;
1492 }
f77bc6a4
ZX
1493 case KVM_INTERRUPT: {
1494 struct kvm_interrupt irq;
1495
1496 r = -EFAULT;
1497 if (copy_from_user(&irq, argp, sizeof irq))
1498 goto out;
1499 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
1500 if (r)
1501 goto out;
1502 r = 0;
1503 break;
1504 }
c4abb7c9
JK
1505 case KVM_NMI: {
1506 r = kvm_vcpu_ioctl_nmi(vcpu);
1507 if (r)
1508 goto out;
1509 r = 0;
1510 break;
1511 }
313a3dc7
CO
1512 case KVM_SET_CPUID: {
1513 struct kvm_cpuid __user *cpuid_arg = argp;
1514 struct kvm_cpuid cpuid;
1515
1516 r = -EFAULT;
1517 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
1518 goto out;
1519 r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
1520 if (r)
1521 goto out;
1522 break;
1523 }
07716717
DK
1524 case KVM_SET_CPUID2: {
1525 struct kvm_cpuid2 __user *cpuid_arg = argp;
1526 struct kvm_cpuid2 cpuid;
1527
1528 r = -EFAULT;
1529 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
1530 goto out;
1531 r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid,
19355475 1532 cpuid_arg->entries);
07716717
DK
1533 if (r)
1534 goto out;
1535 break;
1536 }
1537 case KVM_GET_CPUID2: {
1538 struct kvm_cpuid2 __user *cpuid_arg = argp;
1539 struct kvm_cpuid2 cpuid;
1540
1541 r = -EFAULT;
1542 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
1543 goto out;
1544 r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid,
19355475 1545 cpuid_arg->entries);
07716717
DK
1546 if (r)
1547 goto out;
1548 r = -EFAULT;
1549 if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
1550 goto out;
1551 r = 0;
1552 break;
1553 }
313a3dc7
CO
1554 case KVM_GET_MSRS:
1555 r = msr_io(vcpu, argp, kvm_get_msr, 1);
1556 break;
1557 case KVM_SET_MSRS:
1558 r = msr_io(vcpu, argp, do_set_msr, 0);
1559 break;
b209749f
AK
1560 case KVM_TPR_ACCESS_REPORTING: {
1561 struct kvm_tpr_access_ctl tac;
1562
1563 r = -EFAULT;
1564 if (copy_from_user(&tac, argp, sizeof tac))
1565 goto out;
1566 r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac);
1567 if (r)
1568 goto out;
1569 r = -EFAULT;
1570 if (copy_to_user(argp, &tac, sizeof tac))
1571 goto out;
1572 r = 0;
1573 break;
1574 };
b93463aa
AK
1575 case KVM_SET_VAPIC_ADDR: {
1576 struct kvm_vapic_addr va;
1577
1578 r = -EINVAL;
1579 if (!irqchip_in_kernel(vcpu->kvm))
1580 goto out;
1581 r = -EFAULT;
1582 if (copy_from_user(&va, argp, sizeof va))
1583 goto out;
1584 r = 0;
1585 kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
1586 break;
1587 }
313a3dc7
CO
1588 default:
1589 r = -EINVAL;
1590 }
1591out:
b772ff36
DH
1592 if (lapic)
1593 kfree(lapic);
313a3dc7
CO
1594 return r;
1595}
1596
1fe779f8
CO
1597static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr)
1598{
1599 int ret;
1600
1601 if (addr > (unsigned int)(-3 * PAGE_SIZE))
1602 return -1;
1603 ret = kvm_x86_ops->set_tss_addr(kvm, addr);
1604 return ret;
1605}
1606
1607static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
1608 u32 kvm_nr_mmu_pages)
1609{
1610 if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
1611 return -EINVAL;
1612
72dc67a6 1613 down_write(&kvm->slots_lock);
1fe779f8
CO
1614
1615 kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
f05e70ac 1616 kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
1fe779f8 1617
72dc67a6 1618 up_write(&kvm->slots_lock);
1fe779f8
CO
1619 return 0;
1620}
1621
1622static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
1623{
f05e70ac 1624 return kvm->arch.n_alloc_mmu_pages;
1fe779f8
CO
1625}
1626
e9f85cde
ZX
1627gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
1628{
1629 int i;
1630 struct kvm_mem_alias *alias;
1631
d69fb81f
ZX
1632 for (i = 0; i < kvm->arch.naliases; ++i) {
1633 alias = &kvm->arch.aliases[i];
e9f85cde
ZX
1634 if (gfn >= alias->base_gfn
1635 && gfn < alias->base_gfn + alias->npages)
1636 return alias->target_gfn + gfn - alias->base_gfn;
1637 }
1638 return gfn;
1639}
1640
1fe779f8
CO
1641/*
1642 * Set a new alias region. Aliases map a portion of physical memory into
1643 * another portion. This is useful for memory windows, for example the PC
1644 * VGA region.
1645 */
1646static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
1647 struct kvm_memory_alias *alias)
1648{
1649 int r, n;
1650 struct kvm_mem_alias *p;
1651
1652 r = -EINVAL;
1653 /* General sanity checks */
1654 if (alias->memory_size & (PAGE_SIZE - 1))
1655 goto out;
1656 if (alias->guest_phys_addr & (PAGE_SIZE - 1))
1657 goto out;
1658 if (alias->slot >= KVM_ALIAS_SLOTS)
1659 goto out;
1660 if (alias->guest_phys_addr + alias->memory_size
1661 < alias->guest_phys_addr)
1662 goto out;
1663 if (alias->target_phys_addr + alias->memory_size
1664 < alias->target_phys_addr)
1665 goto out;
1666
72dc67a6 1667 down_write(&kvm->slots_lock);
a1708ce8 1668 spin_lock(&kvm->mmu_lock);
1fe779f8 1669
d69fb81f 1670 p = &kvm->arch.aliases[alias->slot];
1fe779f8
CO
1671 p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT;
1672 p->npages = alias->memory_size >> PAGE_SHIFT;
1673 p->target_gfn = alias->target_phys_addr >> PAGE_SHIFT;
1674
1675 for (n = KVM_ALIAS_SLOTS; n > 0; --n)
d69fb81f 1676 if (kvm->arch.aliases[n - 1].npages)
1fe779f8 1677 break;
d69fb81f 1678 kvm->arch.naliases = n;
1fe779f8 1679
a1708ce8 1680 spin_unlock(&kvm->mmu_lock);
1fe779f8
CO
1681 kvm_mmu_zap_all(kvm);
1682
72dc67a6 1683 up_write(&kvm->slots_lock);
1fe779f8
CO
1684
1685 return 0;
1686
1687out:
1688 return r;
1689}
1690
1691static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
1692{
1693 int r;
1694
1695 r = 0;
1696 switch (chip->chip_id) {
1697 case KVM_IRQCHIP_PIC_MASTER:
1698 memcpy(&chip->chip.pic,
1699 &pic_irqchip(kvm)->pics[0],
1700 sizeof(struct kvm_pic_state));
1701 break;
1702 case KVM_IRQCHIP_PIC_SLAVE:
1703 memcpy(&chip->chip.pic,
1704 &pic_irqchip(kvm)->pics[1],
1705 sizeof(struct kvm_pic_state));
1706 break;
1707 case KVM_IRQCHIP_IOAPIC:
1708 memcpy(&chip->chip.ioapic,
1709 ioapic_irqchip(kvm),
1710 sizeof(struct kvm_ioapic_state));
1711 break;
1712 default:
1713 r = -EINVAL;
1714 break;
1715 }
1716 return r;
1717}
1718
1719static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
1720{
1721 int r;
1722
1723 r = 0;
1724 switch (chip->chip_id) {
1725 case KVM_IRQCHIP_PIC_MASTER:
1726 memcpy(&pic_irqchip(kvm)->pics[0],
1727 &chip->chip.pic,
1728 sizeof(struct kvm_pic_state));
1729 break;
1730 case KVM_IRQCHIP_PIC_SLAVE:
1731 memcpy(&pic_irqchip(kvm)->pics[1],
1732 &chip->chip.pic,
1733 sizeof(struct kvm_pic_state));
1734 break;
1735 case KVM_IRQCHIP_IOAPIC:
1736 memcpy(ioapic_irqchip(kvm),
1737 &chip->chip.ioapic,
1738 sizeof(struct kvm_ioapic_state));
1739 break;
1740 default:
1741 r = -EINVAL;
1742 break;
1743 }
1744 kvm_pic_update_irq(pic_irqchip(kvm));
1745 return r;
1746}
1747
e0f63cb9
SY
1748static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps)
1749{
1750 int r = 0;
1751
1752 memcpy(ps, &kvm->arch.vpit->pit_state, sizeof(struct kvm_pit_state));
1753 return r;
1754}
1755
1756static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
1757{
1758 int r = 0;
1759
1760 memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state));
1761 kvm_pit_load_count(kvm, 0, ps->channels[0].count);
1762 return r;
1763}
1764
52d939a0
MT
1765static int kvm_vm_ioctl_reinject(struct kvm *kvm,
1766 struct kvm_reinject_control *control)
1767{
1768 if (!kvm->arch.vpit)
1769 return -ENXIO;
1770 kvm->arch.vpit->pit_state.pit_timer.reinject = control->pit_reinject;
1771 return 0;
1772}
1773
5bb064dc
ZX
1774/*
1775 * Get (and clear) the dirty memory log for a memory slot.
1776 */
1777int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
1778 struct kvm_dirty_log *log)
1779{
1780 int r;
1781 int n;
1782 struct kvm_memory_slot *memslot;
1783 int is_dirty = 0;
1784
72dc67a6 1785 down_write(&kvm->slots_lock);
5bb064dc
ZX
1786
1787 r = kvm_get_dirty_log(kvm, log, &is_dirty);
1788 if (r)
1789 goto out;
1790
1791 /* If nothing is dirty, don't bother messing with page tables. */
1792 if (is_dirty) {
1793 kvm_mmu_slot_remove_write_access(kvm, log->slot);
1794 kvm_flush_remote_tlbs(kvm);
1795 memslot = &kvm->memslots[log->slot];
1796 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
1797 memset(memslot->dirty_bitmap, 0, n);
1798 }
1799 r = 0;
1800out:
72dc67a6 1801 up_write(&kvm->slots_lock);
5bb064dc
ZX
1802 return r;
1803}
1804
1fe779f8
CO
1805long kvm_arch_vm_ioctl(struct file *filp,
1806 unsigned int ioctl, unsigned long arg)
1807{
1808 struct kvm *kvm = filp->private_data;
1809 void __user *argp = (void __user *)arg;
1810 int r = -EINVAL;
f0d66275
DH
1811 /*
1812 * This union makes it completely explicit to gcc-3.x
1813 * that these two variables' stack usage should be
1814 * combined, not added together.
1815 */
1816 union {
1817 struct kvm_pit_state ps;
1818 struct kvm_memory_alias alias;
1819 } u;
1fe779f8
CO
1820
1821 switch (ioctl) {
1822 case KVM_SET_TSS_ADDR:
1823 r = kvm_vm_ioctl_set_tss_addr(kvm, arg);
1824 if (r < 0)
1825 goto out;
1826 break;
1827 case KVM_SET_MEMORY_REGION: {
1828 struct kvm_memory_region kvm_mem;
1829 struct kvm_userspace_memory_region kvm_userspace_mem;
1830
1831 r = -EFAULT;
1832 if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem))
1833 goto out;
1834 kvm_userspace_mem.slot = kvm_mem.slot;
1835 kvm_userspace_mem.flags = kvm_mem.flags;
1836 kvm_userspace_mem.guest_phys_addr = kvm_mem.guest_phys_addr;
1837 kvm_userspace_mem.memory_size = kvm_mem.memory_size;
1838 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 0);
1839 if (r)
1840 goto out;
1841 break;
1842 }
1843 case KVM_SET_NR_MMU_PAGES:
1844 r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg);
1845 if (r)
1846 goto out;
1847 break;
1848 case KVM_GET_NR_MMU_PAGES:
1849 r = kvm_vm_ioctl_get_nr_mmu_pages(kvm);
1850 break;
f0d66275 1851 case KVM_SET_MEMORY_ALIAS:
1fe779f8 1852 r = -EFAULT;
f0d66275 1853 if (copy_from_user(&u.alias, argp, sizeof(struct kvm_memory_alias)))
1fe779f8 1854 goto out;
f0d66275 1855 r = kvm_vm_ioctl_set_memory_alias(kvm, &u.alias);
1fe779f8
CO
1856 if (r)
1857 goto out;
1858 break;
1fe779f8
CO
1859 case KVM_CREATE_IRQCHIP:
1860 r = -ENOMEM;
d7deeeb0
ZX
1861 kvm->arch.vpic = kvm_create_pic(kvm);
1862 if (kvm->arch.vpic) {
1fe779f8
CO
1863 r = kvm_ioapic_init(kvm);
1864 if (r) {
d7deeeb0
ZX
1865 kfree(kvm->arch.vpic);
1866 kvm->arch.vpic = NULL;
1fe779f8
CO
1867 goto out;
1868 }
1869 } else
1870 goto out;
399ec807
AK
1871 r = kvm_setup_default_irq_routing(kvm);
1872 if (r) {
1873 kfree(kvm->arch.vpic);
1874 kfree(kvm->arch.vioapic);
1875 goto out;
1876 }
1fe779f8 1877 break;
7837699f 1878 case KVM_CREATE_PIT:
269e05e4
AK
1879 mutex_lock(&kvm->lock);
1880 r = -EEXIST;
1881 if (kvm->arch.vpit)
1882 goto create_pit_unlock;
7837699f
SY
1883 r = -ENOMEM;
1884 kvm->arch.vpit = kvm_create_pit(kvm);
1885 if (kvm->arch.vpit)
1886 r = 0;
269e05e4
AK
1887 create_pit_unlock:
1888 mutex_unlock(&kvm->lock);
7837699f 1889 break;
4925663a 1890 case KVM_IRQ_LINE_STATUS:
1fe779f8
CO
1891 case KVM_IRQ_LINE: {
1892 struct kvm_irq_level irq_event;
1893
1894 r = -EFAULT;
1895 if (copy_from_user(&irq_event, argp, sizeof irq_event))
1896 goto out;
1897 if (irqchip_in_kernel(kvm)) {
4925663a 1898 __s32 status;
1fe779f8 1899 mutex_lock(&kvm->lock);
4925663a
GN
1900 status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
1901 irq_event.irq, irq_event.level);
1fe779f8 1902 mutex_unlock(&kvm->lock);
4925663a
GN
1903 if (ioctl == KVM_IRQ_LINE_STATUS) {
1904 irq_event.status = status;
1905 if (copy_to_user(argp, &irq_event,
1906 sizeof irq_event))
1907 goto out;
1908 }
1fe779f8
CO
1909 r = 0;
1910 }
1911 break;
1912 }
1913 case KVM_GET_IRQCHIP: {
1914 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
f0d66275 1915 struct kvm_irqchip *chip = kmalloc(sizeof(*chip), GFP_KERNEL);
1fe779f8 1916
f0d66275
DH
1917 r = -ENOMEM;
1918 if (!chip)
1fe779f8 1919 goto out;
f0d66275
DH
1920 r = -EFAULT;
1921 if (copy_from_user(chip, argp, sizeof *chip))
1922 goto get_irqchip_out;
1fe779f8
CO
1923 r = -ENXIO;
1924 if (!irqchip_in_kernel(kvm))
f0d66275
DH
1925 goto get_irqchip_out;
1926 r = kvm_vm_ioctl_get_irqchip(kvm, chip);
1fe779f8 1927 if (r)
f0d66275 1928 goto get_irqchip_out;
1fe779f8 1929 r = -EFAULT;
f0d66275
DH
1930 if (copy_to_user(argp, chip, sizeof *chip))
1931 goto get_irqchip_out;
1fe779f8 1932 r = 0;
f0d66275
DH
1933 get_irqchip_out:
1934 kfree(chip);
1935 if (r)
1936 goto out;
1fe779f8
CO
1937 break;
1938 }
1939 case KVM_SET_IRQCHIP: {
1940 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
f0d66275 1941 struct kvm_irqchip *chip = kmalloc(sizeof(*chip), GFP_KERNEL);
1fe779f8 1942
f0d66275
DH
1943 r = -ENOMEM;
1944 if (!chip)
1fe779f8 1945 goto out;
f0d66275
DH
1946 r = -EFAULT;
1947 if (copy_from_user(chip, argp, sizeof *chip))
1948 goto set_irqchip_out;
1fe779f8
CO
1949 r = -ENXIO;
1950 if (!irqchip_in_kernel(kvm))
f0d66275
DH
1951 goto set_irqchip_out;
1952 r = kvm_vm_ioctl_set_irqchip(kvm, chip);
1fe779f8 1953 if (r)
f0d66275 1954 goto set_irqchip_out;
1fe779f8 1955 r = 0;
f0d66275
DH
1956 set_irqchip_out:
1957 kfree(chip);
1958 if (r)
1959 goto out;
1fe779f8
CO
1960 break;
1961 }
e0f63cb9 1962 case KVM_GET_PIT: {
e0f63cb9 1963 r = -EFAULT;
f0d66275 1964 if (copy_from_user(&u.ps, argp, sizeof(struct kvm_pit_state)))
e0f63cb9
SY
1965 goto out;
1966 r = -ENXIO;
1967 if (!kvm->arch.vpit)
1968 goto out;
f0d66275 1969 r = kvm_vm_ioctl_get_pit(kvm, &u.ps);
e0f63cb9
SY
1970 if (r)
1971 goto out;
1972 r = -EFAULT;
f0d66275 1973 if (copy_to_user(argp, &u.ps, sizeof(struct kvm_pit_state)))
e0f63cb9
SY
1974 goto out;
1975 r = 0;
1976 break;
1977 }
1978 case KVM_SET_PIT: {
e0f63cb9 1979 r = -EFAULT;
f0d66275 1980 if (copy_from_user(&u.ps, argp, sizeof u.ps))
e0f63cb9
SY
1981 goto out;
1982 r = -ENXIO;
1983 if (!kvm->arch.vpit)
1984 goto out;
f0d66275 1985 r = kvm_vm_ioctl_set_pit(kvm, &u.ps);
e0f63cb9
SY
1986 if (r)
1987 goto out;
1988 r = 0;
1989 break;
1990 }
52d939a0
MT
1991 case KVM_REINJECT_CONTROL: {
1992 struct kvm_reinject_control control;
1993 r = -EFAULT;
1994 if (copy_from_user(&control, argp, sizeof(control)))
1995 goto out;
1996 r = kvm_vm_ioctl_reinject(kvm, &control);
1997 if (r)
1998 goto out;
1999 r = 0;
2000 break;
2001 }
1fe779f8
CO
2002 default:
2003 ;
2004 }
2005out:
2006 return r;
2007}
2008
a16b043c 2009static void kvm_init_msr_list(void)
043405e1
CO
2010{
2011 u32 dummy[2];
2012 unsigned i, j;
2013
2014 for (i = j = 0; i < ARRAY_SIZE(msrs_to_save); i++) {
2015 if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
2016 continue;
2017 if (j < i)
2018 msrs_to_save[j] = msrs_to_save[i];
2019 j++;
2020 }
2021 num_msrs_to_save = j;
2022}
2023
bbd9b64e
CO
2024/*
2025 * Only apic need an MMIO device hook, so shortcut now..
2026 */
2027static struct kvm_io_device *vcpu_find_pervcpu_dev(struct kvm_vcpu *vcpu,
92760499
LV
2028 gpa_t addr, int len,
2029 int is_write)
bbd9b64e
CO
2030{
2031 struct kvm_io_device *dev;
2032
ad312c7c
ZX
2033 if (vcpu->arch.apic) {
2034 dev = &vcpu->arch.apic->dev;
92760499 2035 if (dev->in_range(dev, addr, len, is_write))
bbd9b64e
CO
2036 return dev;
2037 }
2038 return NULL;
2039}
2040
2041
2042static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu,
92760499
LV
2043 gpa_t addr, int len,
2044 int is_write)
bbd9b64e
CO
2045{
2046 struct kvm_io_device *dev;
2047
92760499 2048 dev = vcpu_find_pervcpu_dev(vcpu, addr, len, is_write);
bbd9b64e 2049 if (dev == NULL)
92760499
LV
2050 dev = kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr, len,
2051 is_write);
bbd9b64e
CO
2052 return dev;
2053}
2054
cded19f3
HE
2055static int kvm_read_guest_virt(gva_t addr, void *val, unsigned int bytes,
2056 struct kvm_vcpu *vcpu)
bbd9b64e
CO
2057{
2058 void *data = val;
10589a46 2059 int r = X86EMUL_CONTINUE;
bbd9b64e
CO
2060
2061 while (bytes) {
ad312c7c 2062 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
bbd9b64e 2063 unsigned offset = addr & (PAGE_SIZE-1);
77c2002e 2064 unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);
bbd9b64e
CO
2065 int ret;
2066
10589a46
MT
2067 if (gpa == UNMAPPED_GVA) {
2068 r = X86EMUL_PROPAGATE_FAULT;
2069 goto out;
2070 }
77c2002e 2071 ret = kvm_read_guest(vcpu->kvm, gpa, data, toread);
10589a46
MT
2072 if (ret < 0) {
2073 r = X86EMUL_UNHANDLEABLE;
2074 goto out;
2075 }
bbd9b64e 2076
77c2002e
IE
2077 bytes -= toread;
2078 data += toread;
2079 addr += toread;
bbd9b64e 2080 }
10589a46 2081out:
10589a46 2082 return r;
bbd9b64e 2083}
77c2002e 2084
cded19f3
HE
2085static int kvm_write_guest_virt(gva_t addr, void *val, unsigned int bytes,
2086 struct kvm_vcpu *vcpu)
77c2002e
IE
2087{
2088 void *data = val;
2089 int r = X86EMUL_CONTINUE;
2090
2091 while (bytes) {
2092 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
2093 unsigned offset = addr & (PAGE_SIZE-1);
2094 unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
2095 int ret;
2096
2097 if (gpa == UNMAPPED_GVA) {
2098 r = X86EMUL_PROPAGATE_FAULT;
2099 goto out;
2100 }
2101 ret = kvm_write_guest(vcpu->kvm, gpa, data, towrite);
2102 if (ret < 0) {
2103 r = X86EMUL_UNHANDLEABLE;
2104 goto out;
2105 }
2106
2107 bytes -= towrite;
2108 data += towrite;
2109 addr += towrite;
2110 }
2111out:
2112 return r;
2113}
2114
bbd9b64e 2115
bbd9b64e
CO
2116static int emulator_read_emulated(unsigned long addr,
2117 void *val,
2118 unsigned int bytes,
2119 struct kvm_vcpu *vcpu)
2120{
2121 struct kvm_io_device *mmio_dev;
2122 gpa_t gpa;
2123
2124 if (vcpu->mmio_read_completed) {
2125 memcpy(val, vcpu->mmio_data, bytes);
2126 vcpu->mmio_read_completed = 0;
2127 return X86EMUL_CONTINUE;
2128 }
2129
ad312c7c 2130 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
bbd9b64e
CO
2131
2132 /* For APIC access vmexit */
2133 if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
2134 goto mmio;
2135
77c2002e
IE
2136 if (kvm_read_guest_virt(addr, val, bytes, vcpu)
2137 == X86EMUL_CONTINUE)
bbd9b64e
CO
2138 return X86EMUL_CONTINUE;
2139 if (gpa == UNMAPPED_GVA)
2140 return X86EMUL_PROPAGATE_FAULT;
2141
2142mmio:
2143 /*
2144 * Is this MMIO handled locally?
2145 */
10589a46 2146 mutex_lock(&vcpu->kvm->lock);
92760499 2147 mmio_dev = vcpu_find_mmio_dev(vcpu, gpa, bytes, 0);
bbd9b64e
CO
2148 if (mmio_dev) {
2149 kvm_iodevice_read(mmio_dev, gpa, bytes, val);
10589a46 2150 mutex_unlock(&vcpu->kvm->lock);
bbd9b64e
CO
2151 return X86EMUL_CONTINUE;
2152 }
10589a46 2153 mutex_unlock(&vcpu->kvm->lock);
bbd9b64e
CO
2154
2155 vcpu->mmio_needed = 1;
2156 vcpu->mmio_phys_addr = gpa;
2157 vcpu->mmio_size = bytes;
2158 vcpu->mmio_is_write = 0;
2159
2160 return X86EMUL_UNHANDLEABLE;
2161}
2162
3200f405 2163int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
9f811285 2164 const void *val, int bytes)
bbd9b64e
CO
2165{
2166 int ret;
2167
2168 ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes);
9f811285 2169 if (ret < 0)
bbd9b64e 2170 return 0;
ad218f85 2171 kvm_mmu_pte_write(vcpu, gpa, val, bytes, 1);
bbd9b64e
CO
2172 return 1;
2173}
2174
2175static int emulator_write_emulated_onepage(unsigned long addr,
2176 const void *val,
2177 unsigned int bytes,
2178 struct kvm_vcpu *vcpu)
2179{
2180 struct kvm_io_device *mmio_dev;
10589a46
MT
2181 gpa_t gpa;
2182
10589a46 2183 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
bbd9b64e
CO
2184
2185 if (gpa == UNMAPPED_GVA) {
c3c91fee 2186 kvm_inject_page_fault(vcpu, addr, 2);
bbd9b64e
CO
2187 return X86EMUL_PROPAGATE_FAULT;
2188 }
2189
2190 /* For APIC access vmexit */
2191 if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
2192 goto mmio;
2193
2194 if (emulator_write_phys(vcpu, gpa, val, bytes))
2195 return X86EMUL_CONTINUE;
2196
2197mmio:
2198 /*
2199 * Is this MMIO handled locally?
2200 */
10589a46 2201 mutex_lock(&vcpu->kvm->lock);
92760499 2202 mmio_dev = vcpu_find_mmio_dev(vcpu, gpa, bytes, 1);
bbd9b64e
CO
2203 if (mmio_dev) {
2204 kvm_iodevice_write(mmio_dev, gpa, bytes, val);
10589a46 2205 mutex_unlock(&vcpu->kvm->lock);
bbd9b64e
CO
2206 return X86EMUL_CONTINUE;
2207 }
10589a46 2208 mutex_unlock(&vcpu->kvm->lock);
bbd9b64e
CO
2209
2210 vcpu->mmio_needed = 1;
2211 vcpu->mmio_phys_addr = gpa;
2212 vcpu->mmio_size = bytes;
2213 vcpu->mmio_is_write = 1;
2214 memcpy(vcpu->mmio_data, val, bytes);
2215
2216 return X86EMUL_CONTINUE;
2217}
2218
2219int emulator_write_emulated(unsigned long addr,
2220 const void *val,
2221 unsigned int bytes,
2222 struct kvm_vcpu *vcpu)
2223{
2224 /* Crossing a page boundary? */
2225 if (((addr + bytes - 1) ^ addr) & PAGE_MASK) {
2226 int rc, now;
2227
2228 now = -addr & ~PAGE_MASK;
2229 rc = emulator_write_emulated_onepage(addr, val, now, vcpu);
2230 if (rc != X86EMUL_CONTINUE)
2231 return rc;
2232 addr += now;
2233 val += now;
2234 bytes -= now;
2235 }
2236 return emulator_write_emulated_onepage(addr, val, bytes, vcpu);
2237}
2238EXPORT_SYMBOL_GPL(emulator_write_emulated);
2239
2240static int emulator_cmpxchg_emulated(unsigned long addr,
2241 const void *old,
2242 const void *new,
2243 unsigned int bytes,
2244 struct kvm_vcpu *vcpu)
2245{
2246 static int reported;
2247
2248 if (!reported) {
2249 reported = 1;
2250 printk(KERN_WARNING "kvm: emulating exchange as write\n");
2251 }
2bacc55c
MT
2252#ifndef CONFIG_X86_64
2253 /* guests cmpxchg8b have to be emulated atomically */
2254 if (bytes == 8) {
10589a46 2255 gpa_t gpa;
2bacc55c 2256 struct page *page;
c0b49b0d 2257 char *kaddr;
2bacc55c
MT
2258 u64 val;
2259
10589a46
MT
2260 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
2261
2bacc55c
MT
2262 if (gpa == UNMAPPED_GVA ||
2263 (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
2264 goto emul_write;
2265
2266 if (((gpa + bytes - 1) & PAGE_MASK) != (gpa & PAGE_MASK))
2267 goto emul_write;
2268
2269 val = *(u64 *)new;
72dc67a6 2270
2bacc55c 2271 page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
72dc67a6 2272
c0b49b0d
AM
2273 kaddr = kmap_atomic(page, KM_USER0);
2274 set_64bit((u64 *)(kaddr + offset_in_page(gpa)), val);
2275 kunmap_atomic(kaddr, KM_USER0);
2bacc55c
MT
2276 kvm_release_page_dirty(page);
2277 }
3200f405 2278emul_write:
2bacc55c
MT
2279#endif
2280
bbd9b64e
CO
2281 return emulator_write_emulated(addr, new, bytes, vcpu);
2282}
2283
2284static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
2285{
2286 return kvm_x86_ops->get_segment_base(vcpu, seg);
2287}
2288
2289int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
2290{
a7052897 2291 kvm_mmu_invlpg(vcpu, address);
bbd9b64e
CO
2292 return X86EMUL_CONTINUE;
2293}
2294
2295int emulate_clts(struct kvm_vcpu *vcpu)
2296{
54e445ca 2297 KVMTRACE_0D(CLTS, vcpu, handler);
ad312c7c 2298 kvm_x86_ops->set_cr0(vcpu, vcpu->arch.cr0 & ~X86_CR0_TS);
bbd9b64e
CO
2299 return X86EMUL_CONTINUE;
2300}
2301
2302int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest)
2303{
2304 struct kvm_vcpu *vcpu = ctxt->vcpu;
2305
2306 switch (dr) {
2307 case 0 ... 3:
2308 *dest = kvm_x86_ops->get_dr(vcpu, dr);
2309 return X86EMUL_CONTINUE;
2310 default:
b8688d51 2311 pr_unimpl(vcpu, "%s: unexpected dr %u\n", __func__, dr);
bbd9b64e
CO
2312 return X86EMUL_UNHANDLEABLE;
2313 }
2314}
2315
2316int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
2317{
2318 unsigned long mask = (ctxt->mode == X86EMUL_MODE_PROT64) ? ~0ULL : ~0U;
2319 int exception;
2320
2321 kvm_x86_ops->set_dr(ctxt->vcpu, dr, value & mask, &exception);
2322 if (exception) {
2323 /* FIXME: better handling */
2324 return X86EMUL_UNHANDLEABLE;
2325 }
2326 return X86EMUL_CONTINUE;
2327}
2328
2329void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
2330{
bbd9b64e 2331 u8 opcodes[4];
5fdbf976 2332 unsigned long rip = kvm_rip_read(vcpu);
bbd9b64e
CO
2333 unsigned long rip_linear;
2334
f76c710d 2335 if (!printk_ratelimit())
bbd9b64e
CO
2336 return;
2337
25be4608
GC
2338 rip_linear = rip + get_segment_base(vcpu, VCPU_SREG_CS);
2339
77c2002e 2340 kvm_read_guest_virt(rip_linear, (void *)opcodes, 4, vcpu);
bbd9b64e
CO
2341
2342 printk(KERN_ERR "emulation failed (%s) rip %lx %02x %02x %02x %02x\n",
2343 context, rip, opcodes[0], opcodes[1], opcodes[2], opcodes[3]);
bbd9b64e
CO
2344}
2345EXPORT_SYMBOL_GPL(kvm_report_emulation_failure);
2346
14af3f3c 2347static struct x86_emulate_ops emulate_ops = {
77c2002e 2348 .read_std = kvm_read_guest_virt,
bbd9b64e
CO
2349 .read_emulated = emulator_read_emulated,
2350 .write_emulated = emulator_write_emulated,
2351 .cmpxchg_emulated = emulator_cmpxchg_emulated,
2352};
2353
5fdbf976
MT
2354static void cache_all_regs(struct kvm_vcpu *vcpu)
2355{
2356 kvm_register_read(vcpu, VCPU_REGS_RAX);
2357 kvm_register_read(vcpu, VCPU_REGS_RSP);
2358 kvm_register_read(vcpu, VCPU_REGS_RIP);
2359 vcpu->arch.regs_dirty = ~0;
2360}
2361
bbd9b64e
CO
2362int emulate_instruction(struct kvm_vcpu *vcpu,
2363 struct kvm_run *run,
2364 unsigned long cr2,
2365 u16 error_code,
571008da 2366 int emulation_type)
bbd9b64e
CO
2367{
2368 int r;
571008da 2369 struct decode_cache *c;
bbd9b64e 2370
26eef70c 2371 kvm_clear_exception_queue(vcpu);
ad312c7c 2372 vcpu->arch.mmio_fault_cr2 = cr2;
5fdbf976
MT
2373 /*
2374 * TODO: fix x86_emulate.c to use guest_read/write_register
2375 * instead of direct ->regs accesses, can save hundred cycles
2376 * on Intel for instructions that don't read/change RSP, for
2377 * for example.
2378 */
2379 cache_all_regs(vcpu);
bbd9b64e
CO
2380
2381 vcpu->mmio_is_write = 0;
ad312c7c 2382 vcpu->arch.pio.string = 0;
bbd9b64e 2383
571008da 2384 if (!(emulation_type & EMULTYPE_NO_DECODE)) {
bbd9b64e
CO
2385 int cs_db, cs_l;
2386 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
2387
ad312c7c
ZX
2388 vcpu->arch.emulate_ctxt.vcpu = vcpu;
2389 vcpu->arch.emulate_ctxt.eflags = kvm_x86_ops->get_rflags(vcpu);
2390 vcpu->arch.emulate_ctxt.mode =
2391 (vcpu->arch.emulate_ctxt.eflags & X86_EFLAGS_VM)
bbd9b64e
CO
2392 ? X86EMUL_MODE_REAL : cs_l
2393 ? X86EMUL_MODE_PROT64 : cs_db
2394 ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
2395
ad312c7c 2396 r = x86_decode_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
571008da
SY
2397
2398 /* Reject the instructions other than VMCALL/VMMCALL when
2399 * try to emulate invalid opcode */
2400 c = &vcpu->arch.emulate_ctxt.decode;
2401 if ((emulation_type & EMULTYPE_TRAP_UD) &&
2402 (!(c->twobyte && c->b == 0x01 &&
2403 (c->modrm_reg == 0 || c->modrm_reg == 3) &&
2404 c->modrm_mod == 3 && c->modrm_rm == 1)))
2405 return EMULATE_FAIL;
2406
f2b5756b 2407 ++vcpu->stat.insn_emulation;
bbd9b64e 2408 if (r) {
f2b5756b 2409 ++vcpu->stat.insn_emulation_fail;
bbd9b64e
CO
2410 if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
2411 return EMULATE_DONE;
2412 return EMULATE_FAIL;
2413 }
2414 }
2415
ad312c7c 2416 r = x86_emulate_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
bbd9b64e 2417
ad312c7c 2418 if (vcpu->arch.pio.string)
bbd9b64e
CO
2419 return EMULATE_DO_MMIO;
2420
2421 if ((r || vcpu->mmio_is_write) && run) {
2422 run->exit_reason = KVM_EXIT_MMIO;
2423 run->mmio.phys_addr = vcpu->mmio_phys_addr;
2424 memcpy(run->mmio.data, vcpu->mmio_data, 8);
2425 run->mmio.len = vcpu->mmio_size;
2426 run->mmio.is_write = vcpu->mmio_is_write;
2427 }
2428
2429 if (r) {
2430 if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
2431 return EMULATE_DONE;
2432 if (!vcpu->mmio_needed) {
2433 kvm_report_emulation_failure(vcpu, "mmio");
2434 return EMULATE_FAIL;
2435 }
2436 return EMULATE_DO_MMIO;
2437 }
2438
ad312c7c 2439 kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
bbd9b64e
CO
2440
2441 if (vcpu->mmio_is_write) {
2442 vcpu->mmio_needed = 0;
2443 return EMULATE_DO_MMIO;
2444 }
2445
2446 return EMULATE_DONE;
2447}
2448EXPORT_SYMBOL_GPL(emulate_instruction);
2449
de7d789a
CO
2450static int pio_copy_data(struct kvm_vcpu *vcpu)
2451{
ad312c7c 2452 void *p = vcpu->arch.pio_data;
0f346074 2453 gva_t q = vcpu->arch.pio.guest_gva;
de7d789a 2454 unsigned bytes;
0f346074 2455 int ret;
de7d789a 2456
ad312c7c
ZX
2457 bytes = vcpu->arch.pio.size * vcpu->arch.pio.cur_count;
2458 if (vcpu->arch.pio.in)
0f346074 2459 ret = kvm_write_guest_virt(q, p, bytes, vcpu);
de7d789a 2460 else
0f346074
IE
2461 ret = kvm_read_guest_virt(q, p, bytes, vcpu);
2462 return ret;
de7d789a
CO
2463}
2464
2465int complete_pio(struct kvm_vcpu *vcpu)
2466{
ad312c7c 2467 struct kvm_pio_request *io = &vcpu->arch.pio;
de7d789a
CO
2468 long delta;
2469 int r;
5fdbf976 2470 unsigned long val;
de7d789a
CO
2471
2472 if (!io->string) {
5fdbf976
MT
2473 if (io->in) {
2474 val = kvm_register_read(vcpu, VCPU_REGS_RAX);
2475 memcpy(&val, vcpu->arch.pio_data, io->size);
2476 kvm_register_write(vcpu, VCPU_REGS_RAX, val);
2477 }
de7d789a
CO
2478 } else {
2479 if (io->in) {
2480 r = pio_copy_data(vcpu);
5fdbf976 2481 if (r)
de7d789a 2482 return r;
de7d789a
CO
2483 }
2484
2485 delta = 1;
2486 if (io->rep) {
2487 delta *= io->cur_count;
2488 /*
2489 * The size of the register should really depend on
2490 * current address size.
2491 */
5fdbf976
MT
2492 val = kvm_register_read(vcpu, VCPU_REGS_RCX);
2493 val -= delta;
2494 kvm_register_write(vcpu, VCPU_REGS_RCX, val);
de7d789a
CO
2495 }
2496 if (io->down)
2497 delta = -delta;
2498 delta *= io->size;
5fdbf976
MT
2499 if (io->in) {
2500 val = kvm_register_read(vcpu, VCPU_REGS_RDI);
2501 val += delta;
2502 kvm_register_write(vcpu, VCPU_REGS_RDI, val);
2503 } else {
2504 val = kvm_register_read(vcpu, VCPU_REGS_RSI);
2505 val += delta;
2506 kvm_register_write(vcpu, VCPU_REGS_RSI, val);
2507 }
de7d789a
CO
2508 }
2509
de7d789a
CO
2510 io->count -= io->cur_count;
2511 io->cur_count = 0;
2512
2513 return 0;
2514}
2515
2516static void kernel_pio(struct kvm_io_device *pio_dev,
2517 struct kvm_vcpu *vcpu,
2518 void *pd)
2519{
2520 /* TODO: String I/O for in kernel device */
2521
2522 mutex_lock(&vcpu->kvm->lock);
ad312c7c
ZX
2523 if (vcpu->arch.pio.in)
2524 kvm_iodevice_read(pio_dev, vcpu->arch.pio.port,
2525 vcpu->arch.pio.size,
de7d789a
CO
2526 pd);
2527 else
ad312c7c
ZX
2528 kvm_iodevice_write(pio_dev, vcpu->arch.pio.port,
2529 vcpu->arch.pio.size,
de7d789a
CO
2530 pd);
2531 mutex_unlock(&vcpu->kvm->lock);
2532}
2533
2534static void pio_string_write(struct kvm_io_device *pio_dev,
2535 struct kvm_vcpu *vcpu)
2536{
ad312c7c
ZX
2537 struct kvm_pio_request *io = &vcpu->arch.pio;
2538 void *pd = vcpu->arch.pio_data;
de7d789a
CO
2539 int i;
2540
2541 mutex_lock(&vcpu->kvm->lock);
2542 for (i = 0; i < io->cur_count; i++) {
2543 kvm_iodevice_write(pio_dev, io->port,
2544 io->size,
2545 pd);
2546 pd += io->size;
2547 }
2548 mutex_unlock(&vcpu->kvm->lock);
2549}
2550
2551static struct kvm_io_device *vcpu_find_pio_dev(struct kvm_vcpu *vcpu,
92760499
LV
2552 gpa_t addr, int len,
2553 int is_write)
de7d789a 2554{
92760499 2555 return kvm_io_bus_find_dev(&vcpu->kvm->pio_bus, addr, len, is_write);
de7d789a
CO
2556}
2557
2558int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
2559 int size, unsigned port)
2560{
2561 struct kvm_io_device *pio_dev;
5fdbf976 2562 unsigned long val;
de7d789a
CO
2563
2564 vcpu->run->exit_reason = KVM_EXIT_IO;
2565 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
ad312c7c 2566 vcpu->run->io.size = vcpu->arch.pio.size = size;
de7d789a 2567 vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
ad312c7c
ZX
2568 vcpu->run->io.count = vcpu->arch.pio.count = vcpu->arch.pio.cur_count = 1;
2569 vcpu->run->io.port = vcpu->arch.pio.port = port;
2570 vcpu->arch.pio.in = in;
2571 vcpu->arch.pio.string = 0;
2572 vcpu->arch.pio.down = 0;
ad312c7c 2573 vcpu->arch.pio.rep = 0;
de7d789a 2574
2714d1d3
FEL
2575 if (vcpu->run->io.direction == KVM_EXIT_IO_IN)
2576 KVMTRACE_2D(IO_READ, vcpu, vcpu->run->io.port, (u32)size,
2577 handler);
2578 else
2579 KVMTRACE_2D(IO_WRITE, vcpu, vcpu->run->io.port, (u32)size,
2580 handler);
2581
5fdbf976
MT
2582 val = kvm_register_read(vcpu, VCPU_REGS_RAX);
2583 memcpy(vcpu->arch.pio_data, &val, 4);
de7d789a 2584
92760499 2585 pio_dev = vcpu_find_pio_dev(vcpu, port, size, !in);
de7d789a 2586 if (pio_dev) {
ad312c7c 2587 kernel_pio(pio_dev, vcpu, vcpu->arch.pio_data);
de7d789a
CO
2588 complete_pio(vcpu);
2589 return 1;
2590 }
2591 return 0;
2592}
2593EXPORT_SYMBOL_GPL(kvm_emulate_pio);
2594
2595int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
2596 int size, unsigned long count, int down,
2597 gva_t address, int rep, unsigned port)
2598{
2599 unsigned now, in_page;
0f346074 2600 int ret = 0;
de7d789a
CO
2601 struct kvm_io_device *pio_dev;
2602
2603 vcpu->run->exit_reason = KVM_EXIT_IO;
2604 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
ad312c7c 2605 vcpu->run->io.size = vcpu->arch.pio.size = size;
de7d789a 2606 vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
ad312c7c
ZX
2607 vcpu->run->io.count = vcpu->arch.pio.count = vcpu->arch.pio.cur_count = count;
2608 vcpu->run->io.port = vcpu->arch.pio.port = port;
2609 vcpu->arch.pio.in = in;
2610 vcpu->arch.pio.string = 1;
2611 vcpu->arch.pio.down = down;
ad312c7c 2612 vcpu->arch.pio.rep = rep;
de7d789a 2613
2714d1d3
FEL
2614 if (vcpu->run->io.direction == KVM_EXIT_IO_IN)
2615 KVMTRACE_2D(IO_READ, vcpu, vcpu->run->io.port, (u32)size,
2616 handler);
2617 else
2618 KVMTRACE_2D(IO_WRITE, vcpu, vcpu->run->io.port, (u32)size,
2619 handler);
2620
de7d789a
CO
2621 if (!count) {
2622 kvm_x86_ops->skip_emulated_instruction(vcpu);
2623 return 1;
2624 }
2625
2626 if (!down)
2627 in_page = PAGE_SIZE - offset_in_page(address);
2628 else
2629 in_page = offset_in_page(address) + size;
2630 now = min(count, (unsigned long)in_page / size);
0f346074 2631 if (!now)
de7d789a 2632 now = 1;
de7d789a
CO
2633 if (down) {
2634 /*
2635 * String I/O in reverse. Yuck. Kill the guest, fix later.
2636 */
2637 pr_unimpl(vcpu, "guest string pio down\n");
c1a5d4f9 2638 kvm_inject_gp(vcpu, 0);
de7d789a
CO
2639 return 1;
2640 }
2641 vcpu->run->io.count = now;
ad312c7c 2642 vcpu->arch.pio.cur_count = now;
de7d789a 2643
ad312c7c 2644 if (vcpu->arch.pio.cur_count == vcpu->arch.pio.count)
de7d789a
CO
2645 kvm_x86_ops->skip_emulated_instruction(vcpu);
2646
0f346074 2647 vcpu->arch.pio.guest_gva = address;
de7d789a 2648
92760499
LV
2649 pio_dev = vcpu_find_pio_dev(vcpu, port,
2650 vcpu->arch.pio.cur_count,
2651 !vcpu->arch.pio.in);
ad312c7c 2652 if (!vcpu->arch.pio.in) {
de7d789a
CO
2653 /* string PIO write */
2654 ret = pio_copy_data(vcpu);
0f346074
IE
2655 if (ret == X86EMUL_PROPAGATE_FAULT) {
2656 kvm_inject_gp(vcpu, 0);
2657 return 1;
2658 }
2659 if (ret == 0 && pio_dev) {
de7d789a
CO
2660 pio_string_write(pio_dev, vcpu);
2661 complete_pio(vcpu);
ad312c7c 2662 if (vcpu->arch.pio.count == 0)
de7d789a
CO
2663 ret = 1;
2664 }
2665 } else if (pio_dev)
2666 pr_unimpl(vcpu, "no string pio read support yet, "
2667 "port %x size %d count %ld\n",
2668 port, size, count);
2669
2670 return ret;
2671}
2672EXPORT_SYMBOL_GPL(kvm_emulate_pio_string);
2673
c8076604
GH
2674static void bounce_off(void *info)
2675{
2676 /* nothing */
2677}
2678
2679static unsigned int ref_freq;
2680static unsigned long tsc_khz_ref;
2681
2682static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
2683 void *data)
2684{
2685 struct cpufreq_freqs *freq = data;
2686 struct kvm *kvm;
2687 struct kvm_vcpu *vcpu;
2688 int i, send_ipi = 0;
2689
2690 if (!ref_freq)
2691 ref_freq = freq->old;
2692
2693 if (val == CPUFREQ_PRECHANGE && freq->old > freq->new)
2694 return 0;
2695 if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new)
2696 return 0;
2697 per_cpu(cpu_tsc_khz, freq->cpu) = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new);
2698
2699 spin_lock(&kvm_lock);
2700 list_for_each_entry(kvm, &vm_list, vm_list) {
2701 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
2702 vcpu = kvm->vcpus[i];
2703 if (!vcpu)
2704 continue;
2705 if (vcpu->cpu != freq->cpu)
2706 continue;
2707 if (!kvm_request_guest_time_update(vcpu))
2708 continue;
2709 if (vcpu->cpu != smp_processor_id())
2710 send_ipi++;
2711 }
2712 }
2713 spin_unlock(&kvm_lock);
2714
2715 if (freq->old < freq->new && send_ipi) {
2716 /*
2717 * We upscale the frequency. Must make the guest
2718 * doesn't see old kvmclock values while running with
2719 * the new frequency, otherwise we risk the guest sees
2720 * time go backwards.
2721 *
2722 * In case we update the frequency for another cpu
2723 * (which might be in guest context) send an interrupt
2724 * to kick the cpu out of guest context. Next time
2725 * guest context is entered kvmclock will be updated,
2726 * so the guest will not see stale values.
2727 */
2728 smp_call_function_single(freq->cpu, bounce_off, NULL, 1);
2729 }
2730 return 0;
2731}
2732
2733static struct notifier_block kvmclock_cpufreq_notifier_block = {
2734 .notifier_call = kvmclock_cpufreq_notifier
2735};
2736
f8c16bba 2737int kvm_arch_init(void *opaque)
043405e1 2738{
c8076604 2739 int r, cpu;
f8c16bba
ZX
2740 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
2741
f8c16bba
ZX
2742 if (kvm_x86_ops) {
2743 printk(KERN_ERR "kvm: already loaded the other module\n");
56c6d28a
ZX
2744 r = -EEXIST;
2745 goto out;
f8c16bba
ZX
2746 }
2747
2748 if (!ops->cpu_has_kvm_support()) {
2749 printk(KERN_ERR "kvm: no hardware support\n");
56c6d28a
ZX
2750 r = -EOPNOTSUPP;
2751 goto out;
f8c16bba
ZX
2752 }
2753 if (ops->disabled_by_bios()) {
2754 printk(KERN_ERR "kvm: disabled by bios\n");
56c6d28a
ZX
2755 r = -EOPNOTSUPP;
2756 goto out;
f8c16bba
ZX
2757 }
2758
97db56ce
AK
2759 r = kvm_mmu_module_init();
2760 if (r)
2761 goto out;
2762
2763 kvm_init_msr_list();
2764
f8c16bba 2765 kvm_x86_ops = ops;
56c6d28a 2766 kvm_mmu_set_nonpresent_ptes(0ull, 0ull);
7b52345e
SY
2767 kvm_mmu_set_base_ptes(PT_PRESENT_MASK);
2768 kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
64d4d521 2769 PT_DIRTY_MASK, PT64_NX_MASK, 0, 0);
c8076604
GH
2770
2771 for_each_possible_cpu(cpu)
2772 per_cpu(cpu_tsc_khz, cpu) = tsc_khz;
2773 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
2774 tsc_khz_ref = tsc_khz;
2775 cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block,
2776 CPUFREQ_TRANSITION_NOTIFIER);
2777 }
2778
f8c16bba 2779 return 0;
56c6d28a
ZX
2780
2781out:
56c6d28a 2782 return r;
043405e1 2783}
8776e519 2784
f8c16bba
ZX
2785void kvm_arch_exit(void)
2786{
888d256e
JK
2787 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
2788 cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block,
2789 CPUFREQ_TRANSITION_NOTIFIER);
f8c16bba 2790 kvm_x86_ops = NULL;
56c6d28a
ZX
2791 kvm_mmu_module_exit();
2792}
f8c16bba 2793
8776e519
HB
2794int kvm_emulate_halt(struct kvm_vcpu *vcpu)
2795{
2796 ++vcpu->stat.halt_exits;
2714d1d3 2797 KVMTRACE_0D(HLT, vcpu, handler);
8776e519 2798 if (irqchip_in_kernel(vcpu->kvm)) {
a4535290 2799 vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
8776e519
HB
2800 return 1;
2801 } else {
2802 vcpu->run->exit_reason = KVM_EXIT_HLT;
2803 return 0;
2804 }
2805}
2806EXPORT_SYMBOL_GPL(kvm_emulate_halt);
2807
2f333bcb
MT
2808static inline gpa_t hc_gpa(struct kvm_vcpu *vcpu, unsigned long a0,
2809 unsigned long a1)
2810{
2811 if (is_long_mode(vcpu))
2812 return a0;
2813 else
2814 return a0 | ((gpa_t)a1 << 32);
2815}
2816
8776e519
HB
2817int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
2818{
2819 unsigned long nr, a0, a1, a2, a3, ret;
2f333bcb 2820 int r = 1;
8776e519 2821
5fdbf976
MT
2822 nr = kvm_register_read(vcpu, VCPU_REGS_RAX);
2823 a0 = kvm_register_read(vcpu, VCPU_REGS_RBX);
2824 a1 = kvm_register_read(vcpu, VCPU_REGS_RCX);
2825 a2 = kvm_register_read(vcpu, VCPU_REGS_RDX);
2826 a3 = kvm_register_read(vcpu, VCPU_REGS_RSI);
8776e519 2827
2714d1d3
FEL
2828 KVMTRACE_1D(VMMCALL, vcpu, (u32)nr, handler);
2829
8776e519
HB
2830 if (!is_long_mode(vcpu)) {
2831 nr &= 0xFFFFFFFF;
2832 a0 &= 0xFFFFFFFF;
2833 a1 &= 0xFFFFFFFF;
2834 a2 &= 0xFFFFFFFF;
2835 a3 &= 0xFFFFFFFF;
2836 }
2837
2838 switch (nr) {
b93463aa
AK
2839 case KVM_HC_VAPIC_POLL_IRQ:
2840 ret = 0;
2841 break;
2f333bcb
MT
2842 case KVM_HC_MMU_OP:
2843 r = kvm_pv_mmu_op(vcpu, a0, hc_gpa(vcpu, a1, a2), &ret);
2844 break;
8776e519
HB
2845 default:
2846 ret = -KVM_ENOSYS;
2847 break;
2848 }
5fdbf976 2849 kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
f11c3a8d 2850 ++vcpu->stat.hypercalls;
2f333bcb 2851 return r;
8776e519
HB
2852}
2853EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
2854
2855int kvm_fix_hypercall(struct kvm_vcpu *vcpu)
2856{
2857 char instruction[3];
2858 int ret = 0;
5fdbf976 2859 unsigned long rip = kvm_rip_read(vcpu);
8776e519 2860
8776e519
HB
2861
2862 /*
2863 * Blow out the MMU to ensure that no other VCPU has an active mapping
2864 * to ensure that the updated hypercall appears atomically across all
2865 * VCPUs.
2866 */
2867 kvm_mmu_zap_all(vcpu->kvm);
2868
8776e519 2869 kvm_x86_ops->patch_hypercall(vcpu, instruction);
5fdbf976 2870 if (emulator_write_emulated(rip, instruction, 3, vcpu)
8776e519
HB
2871 != X86EMUL_CONTINUE)
2872 ret = -EFAULT;
2873
8776e519
HB
2874 return ret;
2875}
2876
2877static u64 mk_cr_64(u64 curr_cr, u32 new_val)
2878{
2879 return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
2880}
2881
2882void realmode_lgdt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
2883{
2884 struct descriptor_table dt = { limit, base };
2885
2886 kvm_x86_ops->set_gdt(vcpu, &dt);
2887}
2888
2889void realmode_lidt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
2890{
2891 struct descriptor_table dt = { limit, base };
2892
2893 kvm_x86_ops->set_idt(vcpu, &dt);
2894}
2895
2896void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
2897 unsigned long *rflags)
2898{
2d3ad1f4 2899 kvm_lmsw(vcpu, msw);
8776e519
HB
2900 *rflags = kvm_x86_ops->get_rflags(vcpu);
2901}
2902
2903unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
2904{
54e445ca
JR
2905 unsigned long value;
2906
8776e519
HB
2907 kvm_x86_ops->decache_cr4_guest_bits(vcpu);
2908 switch (cr) {
2909 case 0:
54e445ca
JR
2910 value = vcpu->arch.cr0;
2911 break;
8776e519 2912 case 2:
54e445ca
JR
2913 value = vcpu->arch.cr2;
2914 break;
8776e519 2915 case 3:
54e445ca
JR
2916 value = vcpu->arch.cr3;
2917 break;
8776e519 2918 case 4:
54e445ca
JR
2919 value = vcpu->arch.cr4;
2920 break;
152ff9be 2921 case 8:
54e445ca
JR
2922 value = kvm_get_cr8(vcpu);
2923 break;
8776e519 2924 default:
b8688d51 2925 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
8776e519
HB
2926 return 0;
2927 }
54e445ca
JR
2928 KVMTRACE_3D(CR_READ, vcpu, (u32)cr, (u32)value,
2929 (u32)((u64)value >> 32), handler);
2930
2931 return value;
8776e519
HB
2932}
2933
2934void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val,
2935 unsigned long *rflags)
2936{
54e445ca
JR
2937 KVMTRACE_3D(CR_WRITE, vcpu, (u32)cr, (u32)val,
2938 (u32)((u64)val >> 32), handler);
2939
8776e519
HB
2940 switch (cr) {
2941 case 0:
2d3ad1f4 2942 kvm_set_cr0(vcpu, mk_cr_64(vcpu->arch.cr0, val));
8776e519
HB
2943 *rflags = kvm_x86_ops->get_rflags(vcpu);
2944 break;
2945 case 2:
ad312c7c 2946 vcpu->arch.cr2 = val;
8776e519
HB
2947 break;
2948 case 3:
2d3ad1f4 2949 kvm_set_cr3(vcpu, val);
8776e519
HB
2950 break;
2951 case 4:
2d3ad1f4 2952 kvm_set_cr4(vcpu, mk_cr_64(vcpu->arch.cr4, val));
8776e519 2953 break;
152ff9be 2954 case 8:
2d3ad1f4 2955 kvm_set_cr8(vcpu, val & 0xfUL);
152ff9be 2956 break;
8776e519 2957 default:
b8688d51 2958 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
8776e519
HB
2959 }
2960}
2961
07716717
DK
2962static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
2963{
ad312c7c
ZX
2964 struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
2965 int j, nent = vcpu->arch.cpuid_nent;
07716717
DK
2966
2967 e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
2968 /* when no next entry is found, the current entry[i] is reselected */
0fdf8e59 2969 for (j = i + 1; ; j = (j + 1) % nent) {
ad312c7c 2970 struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j];
07716717
DK
2971 if (ej->function == e->function) {
2972 ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
2973 return j;
2974 }
2975 }
2976 return 0; /* silence gcc, even though control never reaches here */
2977}
2978
2979/* find an entry with matching function, matching index (if needed), and that
2980 * should be read next (if it's stateful) */
2981static int is_matching_cpuid_entry(struct kvm_cpuid_entry2 *e,
2982 u32 function, u32 index)
2983{
2984 if (e->function != function)
2985 return 0;
2986 if ((e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) && e->index != index)
2987 return 0;
2988 if ((e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) &&
19355475 2989 !(e->flags & KVM_CPUID_FLAG_STATE_READ_NEXT))
07716717
DK
2990 return 0;
2991 return 1;
2992}
2993
d8017474
AG
2994struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
2995 u32 function, u32 index)
8776e519
HB
2996{
2997 int i;
d8017474 2998 struct kvm_cpuid_entry2 *best = NULL;
8776e519 2999
ad312c7c 3000 for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
d8017474
AG
3001 struct kvm_cpuid_entry2 *e;
3002
ad312c7c 3003 e = &vcpu->arch.cpuid_entries[i];
07716717
DK
3004 if (is_matching_cpuid_entry(e, function, index)) {
3005 if (e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC)
3006 move_to_next_stateful_cpuid_entry(vcpu, i);
8776e519
HB
3007 best = e;
3008 break;
3009 }
3010 /*
3011 * Both basic or both extended?
3012 */
3013 if (((e->function ^ function) & 0x80000000) == 0)
3014 if (!best || e->function > best->function)
3015 best = e;
3016 }
d8017474
AG
3017 return best;
3018}
3019
3020void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
3021{
3022 u32 function, index;
3023 struct kvm_cpuid_entry2 *best;
3024
3025 function = kvm_register_read(vcpu, VCPU_REGS_RAX);
3026 index = kvm_register_read(vcpu, VCPU_REGS_RCX);
3027 kvm_register_write(vcpu, VCPU_REGS_RAX, 0);
3028 kvm_register_write(vcpu, VCPU_REGS_RBX, 0);
3029 kvm_register_write(vcpu, VCPU_REGS_RCX, 0);
3030 kvm_register_write(vcpu, VCPU_REGS_RDX, 0);
3031 best = kvm_find_cpuid_entry(vcpu, function, index);
8776e519 3032 if (best) {
5fdbf976
MT
3033 kvm_register_write(vcpu, VCPU_REGS_RAX, best->eax);
3034 kvm_register_write(vcpu, VCPU_REGS_RBX, best->ebx);
3035 kvm_register_write(vcpu, VCPU_REGS_RCX, best->ecx);
3036 kvm_register_write(vcpu, VCPU_REGS_RDX, best->edx);
8776e519 3037 }
8776e519 3038 kvm_x86_ops->skip_emulated_instruction(vcpu);
2714d1d3 3039 KVMTRACE_5D(CPUID, vcpu, function,
5fdbf976
MT
3040 (u32)kvm_register_read(vcpu, VCPU_REGS_RAX),
3041 (u32)kvm_register_read(vcpu, VCPU_REGS_RBX),
3042 (u32)kvm_register_read(vcpu, VCPU_REGS_RCX),
3043 (u32)kvm_register_read(vcpu, VCPU_REGS_RDX), handler);
8776e519
HB
3044}
3045EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
d0752060 3046
b6c7a5dc
HB
3047/*
3048 * Check if userspace requested an interrupt window, and that the
3049 * interrupt window is open.
3050 *
3051 * No need to exit to userspace if we already have an interrupt queued.
3052 */
3053static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu,
3054 struct kvm_run *kvm_run)
3055{
ad312c7c 3056 return (!vcpu->arch.irq_summary &&
b6c7a5dc 3057 kvm_run->request_interrupt_window &&
ad312c7c 3058 vcpu->arch.interrupt_window_open &&
b6c7a5dc
HB
3059 (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF));
3060}
3061
3062static void post_kvm_run_save(struct kvm_vcpu *vcpu,
3063 struct kvm_run *kvm_run)
3064{
3065 kvm_run->if_flag = (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
2d3ad1f4 3066 kvm_run->cr8 = kvm_get_cr8(vcpu);
b6c7a5dc 3067 kvm_run->apic_base = kvm_get_apic_base(vcpu);
4531220b 3068 if (irqchip_in_kernel(vcpu->kvm))
b6c7a5dc 3069 kvm_run->ready_for_interrupt_injection = 1;
4531220b 3070 else
b6c7a5dc 3071 kvm_run->ready_for_interrupt_injection =
ad312c7c
ZX
3072 (vcpu->arch.interrupt_window_open &&
3073 vcpu->arch.irq_summary == 0);
b6c7a5dc
HB
3074}
3075
b93463aa
AK
3076static void vapic_enter(struct kvm_vcpu *vcpu)
3077{
3078 struct kvm_lapic *apic = vcpu->arch.apic;
3079 struct page *page;
3080
3081 if (!apic || !apic->vapic_addr)
3082 return;
3083
3084 page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
72dc67a6
IE
3085
3086 vcpu->arch.apic->vapic_page = page;
b93463aa
AK
3087}
3088
3089static void vapic_exit(struct kvm_vcpu *vcpu)
3090{
3091 struct kvm_lapic *apic = vcpu->arch.apic;
3092
3093 if (!apic || !apic->vapic_addr)
3094 return;
3095
f8b78fa3 3096 down_read(&vcpu->kvm->slots_lock);
b93463aa
AK
3097 kvm_release_page_dirty(apic->vapic_page);
3098 mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
f8b78fa3 3099 up_read(&vcpu->kvm->slots_lock);
b93463aa
AK
3100}
3101
d7690175 3102static int vcpu_enter_guest(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
b6c7a5dc
HB
3103{
3104 int r;
3105
2e53d63a
MT
3106 if (vcpu->requests)
3107 if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
3108 kvm_mmu_unload(vcpu);
3109
b6c7a5dc
HB
3110 r = kvm_mmu_reload(vcpu);
3111 if (unlikely(r))
3112 goto out;
3113
2f52d58c
AK
3114 if (vcpu->requests) {
3115 if (test_and_clear_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests))
2f599714 3116 __kvm_migrate_timers(vcpu);
c8076604
GH
3117 if (test_and_clear_bit(KVM_REQ_KVMCLOCK_UPDATE, &vcpu->requests))
3118 kvm_write_guest_time(vcpu);
4731d4c7
MT
3119 if (test_and_clear_bit(KVM_REQ_MMU_SYNC, &vcpu->requests))
3120 kvm_mmu_sync_roots(vcpu);
d4acf7e7
MT
3121 if (test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
3122 kvm_x86_ops->tlb_flush(vcpu);
b93463aa
AK
3123 if (test_and_clear_bit(KVM_REQ_REPORT_TPR_ACCESS,
3124 &vcpu->requests)) {
3125 kvm_run->exit_reason = KVM_EXIT_TPR_ACCESS;
3126 r = 0;
3127 goto out;
3128 }
71c4dfaf
JR
3129 if (test_and_clear_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests)) {
3130 kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
3131 r = 0;
3132 goto out;
3133 }
2f52d58c 3134 }
b93463aa 3135
b6c7a5dc
HB
3136 preempt_disable();
3137
3138 kvm_x86_ops->prepare_guest_switch(vcpu);
3139 kvm_load_guest_fpu(vcpu);
3140
3141 local_irq_disable();
3142
d7690175 3143 if (vcpu->requests || need_resched() || signal_pending(current)) {
6c142801
AK
3144 local_irq_enable();
3145 preempt_enable();
3146 r = 1;
3147 goto out;
3148 }
3149
e9571ed5
MT
3150 vcpu->guest_mode = 1;
3151 /*
3152 * Make sure that guest_mode assignment won't happen after
3153 * testing the pending IRQ vector bitmap.
3154 */
3155 smp_wmb();
3156
ad312c7c 3157 if (vcpu->arch.exception.pending)
298101da
AK
3158 __queue_exception(vcpu);
3159 else if (irqchip_in_kernel(vcpu->kvm))
b6c7a5dc 3160 kvm_x86_ops->inject_pending_irq(vcpu);
eb9774f0 3161 else
b6c7a5dc
HB
3162 kvm_x86_ops->inject_pending_vectors(vcpu, kvm_run);
3163
b93463aa
AK
3164 kvm_lapic_sync_to_vapic(vcpu);
3165
3200f405
MT
3166 up_read(&vcpu->kvm->slots_lock);
3167
b6c7a5dc
HB
3168 kvm_guest_enter();
3169
42dbaa5a
JK
3170 get_debugreg(vcpu->arch.host_dr6, 6);
3171 get_debugreg(vcpu->arch.host_dr7, 7);
3172 if (unlikely(vcpu->arch.switch_db_regs)) {
3173 get_debugreg(vcpu->arch.host_db[0], 0);
3174 get_debugreg(vcpu->arch.host_db[1], 1);
3175 get_debugreg(vcpu->arch.host_db[2], 2);
3176 get_debugreg(vcpu->arch.host_db[3], 3);
3177
3178 set_debugreg(0, 7);
3179 set_debugreg(vcpu->arch.eff_db[0], 0);
3180 set_debugreg(vcpu->arch.eff_db[1], 1);
3181 set_debugreg(vcpu->arch.eff_db[2], 2);
3182 set_debugreg(vcpu->arch.eff_db[3], 3);
3183 }
b6c7a5dc 3184
2714d1d3 3185 KVMTRACE_0D(VMENTRY, vcpu, entryexit);
b6c7a5dc
HB
3186 kvm_x86_ops->run(vcpu, kvm_run);
3187
42dbaa5a
JK
3188 if (unlikely(vcpu->arch.switch_db_regs)) {
3189 set_debugreg(0, 7);
3190 set_debugreg(vcpu->arch.host_db[0], 0);
3191 set_debugreg(vcpu->arch.host_db[1], 1);
3192 set_debugreg(vcpu->arch.host_db[2], 2);
3193 set_debugreg(vcpu->arch.host_db[3], 3);
3194 }
3195 set_debugreg(vcpu->arch.host_dr6, 6);
3196 set_debugreg(vcpu->arch.host_dr7, 7);
3197
b6c7a5dc
HB
3198 vcpu->guest_mode = 0;
3199 local_irq_enable();
3200
3201 ++vcpu->stat.exits;
3202
3203 /*
3204 * We must have an instruction between local_irq_enable() and
3205 * kvm_guest_exit(), so the timer interrupt isn't delayed by
3206 * the interrupt shadow. The stat.exits increment will do nicely.
3207 * But we need to prevent reordering, hence this barrier():
3208 */
3209 barrier();
3210
3211 kvm_guest_exit();
3212
3213 preempt_enable();
3214
3200f405
MT
3215 down_read(&vcpu->kvm->slots_lock);
3216
b6c7a5dc
HB
3217 /*
3218 * Profile KVM exit RIPs:
3219 */
3220 if (unlikely(prof_on == KVM_PROFILING)) {
5fdbf976
MT
3221 unsigned long rip = kvm_rip_read(vcpu);
3222 profile_hit(KVM_PROFILING, (void *)rip);
b6c7a5dc
HB
3223 }
3224
ad312c7c
ZX
3225 if (vcpu->arch.exception.pending && kvm_x86_ops->exception_injected(vcpu))
3226 vcpu->arch.exception.pending = false;
298101da 3227
b93463aa
AK
3228 kvm_lapic_sync_from_vapic(vcpu);
3229
b6c7a5dc 3230 r = kvm_x86_ops->handle_exit(kvm_run, vcpu);
d7690175
MT
3231out:
3232 return r;
3233}
b6c7a5dc 3234
09cec754 3235
d7690175
MT
3236static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3237{
3238 int r;
3239
3240 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED)) {
1b10bf31
JK
3241 pr_debug("vcpu %d received sipi with vector # %x\n",
3242 vcpu->vcpu_id, vcpu->arch.sipi_vector);
d7690175 3243 kvm_lapic_reset(vcpu);
5f179287 3244 r = kvm_arch_vcpu_reset(vcpu);
d7690175
MT
3245 if (r)
3246 return r;
3247 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
b6c7a5dc
HB
3248 }
3249
d7690175
MT
3250 down_read(&vcpu->kvm->slots_lock);
3251 vapic_enter(vcpu);
3252
3253 r = 1;
3254 while (r > 0) {
af2152f5 3255 if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE)
d7690175
MT
3256 r = vcpu_enter_guest(vcpu, kvm_run);
3257 else {
3258 up_read(&vcpu->kvm->slots_lock);
3259 kvm_vcpu_block(vcpu);
3260 down_read(&vcpu->kvm->slots_lock);
3261 if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests))
09cec754
GN
3262 {
3263 switch(vcpu->arch.mp_state) {
3264 case KVM_MP_STATE_HALTED:
d7690175 3265 vcpu->arch.mp_state =
09cec754
GN
3266 KVM_MP_STATE_RUNNABLE;
3267 case KVM_MP_STATE_RUNNABLE:
3268 break;
3269 case KVM_MP_STATE_SIPI_RECEIVED:
3270 default:
3271 r = -EINTR;
3272 break;
3273 }
3274 }
d7690175
MT
3275 }
3276
09cec754
GN
3277 if (r <= 0)
3278 break;
3279
3280 clear_bit(KVM_REQ_PENDING_TIMER, &vcpu->requests);
3281 if (kvm_cpu_has_pending_timer(vcpu))
3282 kvm_inject_pending_timer_irqs(vcpu);
3283
3284 if (dm_request_for_irq_injection(vcpu, kvm_run)) {
3285 r = -EINTR;
3286 kvm_run->exit_reason = KVM_EXIT_INTR;
3287 ++vcpu->stat.request_irq_exits;
3288 }
3289 if (signal_pending(current)) {
3290 r = -EINTR;
3291 kvm_run->exit_reason = KVM_EXIT_INTR;
3292 ++vcpu->stat.signal_exits;
3293 }
3294 if (need_resched()) {
3295 up_read(&vcpu->kvm->slots_lock);
3296 kvm_resched(vcpu);
3297 down_read(&vcpu->kvm->slots_lock);
d7690175 3298 }
b6c7a5dc
HB
3299 }
3300
d7690175 3301 up_read(&vcpu->kvm->slots_lock);
b6c7a5dc
HB
3302 post_kvm_run_save(vcpu, kvm_run);
3303
b93463aa
AK
3304 vapic_exit(vcpu);
3305
b6c7a5dc
HB
3306 return r;
3307}
3308
3309int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3310{
3311 int r;
3312 sigset_t sigsaved;
3313
3314 vcpu_load(vcpu);
3315
ac9f6dc0
AK
3316 if (vcpu->sigset_active)
3317 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
3318
a4535290 3319 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
b6c7a5dc 3320 kvm_vcpu_block(vcpu);
d7690175 3321 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
ac9f6dc0
AK
3322 r = -EAGAIN;
3323 goto out;
b6c7a5dc
HB
3324 }
3325
b6c7a5dc
HB
3326 /* re-sync apic's tpr */
3327 if (!irqchip_in_kernel(vcpu->kvm))
2d3ad1f4 3328 kvm_set_cr8(vcpu, kvm_run->cr8);
b6c7a5dc 3329
ad312c7c 3330 if (vcpu->arch.pio.cur_count) {
b6c7a5dc
HB
3331 r = complete_pio(vcpu);
3332 if (r)
3333 goto out;
3334 }
3335#if CONFIG_HAS_IOMEM
3336 if (vcpu->mmio_needed) {
3337 memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
3338 vcpu->mmio_read_completed = 1;
3339 vcpu->mmio_needed = 0;
3200f405
MT
3340
3341 down_read(&vcpu->kvm->slots_lock);
b6c7a5dc 3342 r = emulate_instruction(vcpu, kvm_run,
571008da
SY
3343 vcpu->arch.mmio_fault_cr2, 0,
3344 EMULTYPE_NO_DECODE);
3200f405 3345 up_read(&vcpu->kvm->slots_lock);
b6c7a5dc
HB
3346 if (r == EMULATE_DO_MMIO) {
3347 /*
3348 * Read-modify-write. Back to userspace.
3349 */
3350 r = 0;
3351 goto out;
3352 }
3353 }
3354#endif
5fdbf976
MT
3355 if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL)
3356 kvm_register_write(vcpu, VCPU_REGS_RAX,
3357 kvm_run->hypercall.ret);
b6c7a5dc
HB
3358
3359 r = __vcpu_run(vcpu, kvm_run);
3360
3361out:
3362 if (vcpu->sigset_active)
3363 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
3364
3365 vcpu_put(vcpu);
3366 return r;
3367}
3368
3369int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3370{
3371 vcpu_load(vcpu);
3372
5fdbf976
MT
3373 regs->rax = kvm_register_read(vcpu, VCPU_REGS_RAX);
3374 regs->rbx = kvm_register_read(vcpu, VCPU_REGS_RBX);
3375 regs->rcx = kvm_register_read(vcpu, VCPU_REGS_RCX);
3376 regs->rdx = kvm_register_read(vcpu, VCPU_REGS_RDX);
3377 regs->rsi = kvm_register_read(vcpu, VCPU_REGS_RSI);
3378 regs->rdi = kvm_register_read(vcpu, VCPU_REGS_RDI);
3379 regs->rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
3380 regs->rbp = kvm_register_read(vcpu, VCPU_REGS_RBP);
b6c7a5dc 3381#ifdef CONFIG_X86_64
5fdbf976
MT
3382 regs->r8 = kvm_register_read(vcpu, VCPU_REGS_R8);
3383 regs->r9 = kvm_register_read(vcpu, VCPU_REGS_R9);
3384 regs->r10 = kvm_register_read(vcpu, VCPU_REGS_R10);
3385 regs->r11 = kvm_register_read(vcpu, VCPU_REGS_R11);
3386 regs->r12 = kvm_register_read(vcpu, VCPU_REGS_R12);
3387 regs->r13 = kvm_register_read(vcpu, VCPU_REGS_R13);
3388 regs->r14 = kvm_register_read(vcpu, VCPU_REGS_R14);
3389 regs->r15 = kvm_register_read(vcpu, VCPU_REGS_R15);
b6c7a5dc
HB
3390#endif
3391
5fdbf976 3392 regs->rip = kvm_rip_read(vcpu);
b6c7a5dc
HB
3393 regs->rflags = kvm_x86_ops->get_rflags(vcpu);
3394
3395 /*
3396 * Don't leak debug flags in case they were set for guest debugging
3397 */
d0bfb940 3398 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
b6c7a5dc
HB
3399 regs->rflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
3400
3401 vcpu_put(vcpu);
3402
3403 return 0;
3404}
3405
3406int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3407{
3408 vcpu_load(vcpu);
3409
5fdbf976
MT
3410 kvm_register_write(vcpu, VCPU_REGS_RAX, regs->rax);
3411 kvm_register_write(vcpu, VCPU_REGS_RBX, regs->rbx);
3412 kvm_register_write(vcpu, VCPU_REGS_RCX, regs->rcx);
3413 kvm_register_write(vcpu, VCPU_REGS_RDX, regs->rdx);
3414 kvm_register_write(vcpu, VCPU_REGS_RSI, regs->rsi);
3415 kvm_register_write(vcpu, VCPU_REGS_RDI, regs->rdi);
3416 kvm_register_write(vcpu, VCPU_REGS_RSP, regs->rsp);
3417 kvm_register_write(vcpu, VCPU_REGS_RBP, regs->rbp);
b6c7a5dc 3418#ifdef CONFIG_X86_64
5fdbf976
MT
3419 kvm_register_write(vcpu, VCPU_REGS_R8, regs->r8);
3420 kvm_register_write(vcpu, VCPU_REGS_R9, regs->r9);
3421 kvm_register_write(vcpu, VCPU_REGS_R10, regs->r10);
3422 kvm_register_write(vcpu, VCPU_REGS_R11, regs->r11);
3423 kvm_register_write(vcpu, VCPU_REGS_R12, regs->r12);
3424 kvm_register_write(vcpu, VCPU_REGS_R13, regs->r13);
3425 kvm_register_write(vcpu, VCPU_REGS_R14, regs->r14);
3426 kvm_register_write(vcpu, VCPU_REGS_R15, regs->r15);
3427
b6c7a5dc
HB
3428#endif
3429
5fdbf976 3430 kvm_rip_write(vcpu, regs->rip);
b6c7a5dc
HB
3431 kvm_x86_ops->set_rflags(vcpu, regs->rflags);
3432
b6c7a5dc 3433
b4f14abd
JK
3434 vcpu->arch.exception.pending = false;
3435
b6c7a5dc
HB
3436 vcpu_put(vcpu);
3437
3438 return 0;
3439}
3440
3e6e0aab
GT
3441void kvm_get_segment(struct kvm_vcpu *vcpu,
3442 struct kvm_segment *var, int seg)
b6c7a5dc 3443{
14af3f3c 3444 kvm_x86_ops->get_segment(vcpu, var, seg);
b6c7a5dc
HB
3445}
3446
3447void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
3448{
3449 struct kvm_segment cs;
3450
3e6e0aab 3451 kvm_get_segment(vcpu, &cs, VCPU_SREG_CS);
b6c7a5dc
HB
3452 *db = cs.db;
3453 *l = cs.l;
3454}
3455EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits);
3456
3457int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
3458 struct kvm_sregs *sregs)
3459{
3460 struct descriptor_table dt;
3461 int pending_vec;
3462
3463 vcpu_load(vcpu);
3464
3e6e0aab
GT
3465 kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
3466 kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
3467 kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
3468 kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
3469 kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
3470 kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
b6c7a5dc 3471
3e6e0aab
GT
3472 kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
3473 kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
b6c7a5dc
HB
3474
3475 kvm_x86_ops->get_idt(vcpu, &dt);
3476 sregs->idt.limit = dt.limit;
3477 sregs->idt.base = dt.base;
3478 kvm_x86_ops->get_gdt(vcpu, &dt);
3479 sregs->gdt.limit = dt.limit;
3480 sregs->gdt.base = dt.base;
3481
3482 kvm_x86_ops->decache_cr4_guest_bits(vcpu);
ad312c7c
ZX
3483 sregs->cr0 = vcpu->arch.cr0;
3484 sregs->cr2 = vcpu->arch.cr2;
3485 sregs->cr3 = vcpu->arch.cr3;
3486 sregs->cr4 = vcpu->arch.cr4;
2d3ad1f4 3487 sregs->cr8 = kvm_get_cr8(vcpu);
ad312c7c 3488 sregs->efer = vcpu->arch.shadow_efer;
b6c7a5dc
HB
3489 sregs->apic_base = kvm_get_apic_base(vcpu);
3490
3491 if (irqchip_in_kernel(vcpu->kvm)) {
3492 memset(sregs->interrupt_bitmap, 0,
3493 sizeof sregs->interrupt_bitmap);
3494 pending_vec = kvm_x86_ops->get_irq(vcpu);
3495 if (pending_vec >= 0)
3496 set_bit(pending_vec,
3497 (unsigned long *)sregs->interrupt_bitmap);
3498 } else
ad312c7c 3499 memcpy(sregs->interrupt_bitmap, vcpu->arch.irq_pending,
b6c7a5dc
HB
3500 sizeof sregs->interrupt_bitmap);
3501
3502 vcpu_put(vcpu);
3503
3504 return 0;
3505}
3506
62d9f0db
MT
3507int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
3508 struct kvm_mp_state *mp_state)
3509{
3510 vcpu_load(vcpu);
3511 mp_state->mp_state = vcpu->arch.mp_state;
3512 vcpu_put(vcpu);
3513 return 0;
3514}
3515
3516int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
3517 struct kvm_mp_state *mp_state)
3518{
3519 vcpu_load(vcpu);
3520 vcpu->arch.mp_state = mp_state->mp_state;
3521 vcpu_put(vcpu);
3522 return 0;
3523}
3524
3e6e0aab 3525static void kvm_set_segment(struct kvm_vcpu *vcpu,
b6c7a5dc
HB
3526 struct kvm_segment *var, int seg)
3527{
14af3f3c 3528 kvm_x86_ops->set_segment(vcpu, var, seg);
b6c7a5dc
HB
3529}
3530
37817f29
IE
3531static void seg_desct_to_kvm_desct(struct desc_struct *seg_desc, u16 selector,
3532 struct kvm_segment *kvm_desct)
3533{
3534 kvm_desct->base = seg_desc->base0;
3535 kvm_desct->base |= seg_desc->base1 << 16;
3536 kvm_desct->base |= seg_desc->base2 << 24;
3537 kvm_desct->limit = seg_desc->limit0;
3538 kvm_desct->limit |= seg_desc->limit << 16;
c93cd3a5
MT
3539 if (seg_desc->g) {
3540 kvm_desct->limit <<= 12;
3541 kvm_desct->limit |= 0xfff;
3542 }
37817f29
IE
3543 kvm_desct->selector = selector;
3544 kvm_desct->type = seg_desc->type;
3545 kvm_desct->present = seg_desc->p;
3546 kvm_desct->dpl = seg_desc->dpl;
3547 kvm_desct->db = seg_desc->d;
3548 kvm_desct->s = seg_desc->s;
3549 kvm_desct->l = seg_desc->l;
3550 kvm_desct->g = seg_desc->g;
3551 kvm_desct->avl = seg_desc->avl;
3552 if (!selector)
3553 kvm_desct->unusable = 1;
3554 else
3555 kvm_desct->unusable = 0;
3556 kvm_desct->padding = 0;
3557}
3558
b8222ad2
AS
3559static void get_segment_descriptor_dtable(struct kvm_vcpu *vcpu,
3560 u16 selector,
3561 struct descriptor_table *dtable)
37817f29
IE
3562{
3563 if (selector & 1 << 2) {
3564 struct kvm_segment kvm_seg;
3565
3e6e0aab 3566 kvm_get_segment(vcpu, &kvm_seg, VCPU_SREG_LDTR);
37817f29
IE
3567
3568 if (kvm_seg.unusable)
3569 dtable->limit = 0;
3570 else
3571 dtable->limit = kvm_seg.limit;
3572 dtable->base = kvm_seg.base;
3573 }
3574 else
3575 kvm_x86_ops->get_gdt(vcpu, dtable);
3576}
3577
3578/* allowed just for 8 bytes segments */
3579static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
3580 struct desc_struct *seg_desc)
3581{
98899aa0 3582 gpa_t gpa;
37817f29
IE
3583 struct descriptor_table dtable;
3584 u16 index = selector >> 3;
3585
b8222ad2 3586 get_segment_descriptor_dtable(vcpu, selector, &dtable);
37817f29
IE
3587
3588 if (dtable.limit < index * 8 + 7) {
3589 kvm_queue_exception_e(vcpu, GP_VECTOR, selector & 0xfffc);
3590 return 1;
3591 }
98899aa0
MT
3592 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, dtable.base);
3593 gpa += index * 8;
3594 return kvm_read_guest(vcpu->kvm, gpa, seg_desc, 8);
37817f29
IE
3595}
3596
3597/* allowed just for 8 bytes segments */
3598static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
3599 struct desc_struct *seg_desc)
3600{
98899aa0 3601 gpa_t gpa;
37817f29
IE
3602 struct descriptor_table dtable;
3603 u16 index = selector >> 3;
3604
b8222ad2 3605 get_segment_descriptor_dtable(vcpu, selector, &dtable);
37817f29
IE
3606
3607 if (dtable.limit < index * 8 + 7)
3608 return 1;
98899aa0
MT
3609 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, dtable.base);
3610 gpa += index * 8;
3611 return kvm_write_guest(vcpu->kvm, gpa, seg_desc, 8);
37817f29
IE
3612}
3613
3614static u32 get_tss_base_addr(struct kvm_vcpu *vcpu,
3615 struct desc_struct *seg_desc)
3616{
3617 u32 base_addr;
3618
3619 base_addr = seg_desc->base0;
3620 base_addr |= (seg_desc->base1 << 16);
3621 base_addr |= (seg_desc->base2 << 24);
3622
98899aa0 3623 return vcpu->arch.mmu.gva_to_gpa(vcpu, base_addr);
37817f29
IE
3624}
3625
37817f29
IE
3626static u16 get_segment_selector(struct kvm_vcpu *vcpu, int seg)
3627{
3628 struct kvm_segment kvm_seg;
3629
3e6e0aab 3630 kvm_get_segment(vcpu, &kvm_seg, seg);
37817f29
IE
3631 return kvm_seg.selector;
3632}
3633
3634static int load_segment_descriptor_to_kvm_desct(struct kvm_vcpu *vcpu,
3635 u16 selector,
3636 struct kvm_segment *kvm_seg)
3637{
3638 struct desc_struct seg_desc;
3639
3640 if (load_guest_segment_descriptor(vcpu, selector, &seg_desc))
3641 return 1;
3642 seg_desct_to_kvm_desct(&seg_desc, selector, kvm_seg);
3643 return 0;
3644}
3645
2259e3a7 3646static int kvm_load_realmode_segment(struct kvm_vcpu *vcpu, u16 selector, int seg)
f4bbd9aa
AK
3647{
3648 struct kvm_segment segvar = {
3649 .base = selector << 4,
3650 .limit = 0xffff,
3651 .selector = selector,
3652 .type = 3,
3653 .present = 1,
3654 .dpl = 3,
3655 .db = 0,
3656 .s = 1,
3657 .l = 0,
3658 .g = 0,
3659 .avl = 0,
3660 .unusable = 0,
3661 };
3662 kvm_x86_ops->set_segment(vcpu, &segvar, seg);
3663 return 0;
3664}
3665
3e6e0aab
GT
3666int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
3667 int type_bits, int seg)
37817f29
IE
3668{
3669 struct kvm_segment kvm_seg;
3670
f4bbd9aa
AK
3671 if (!(vcpu->arch.cr0 & X86_CR0_PE))
3672 return kvm_load_realmode_segment(vcpu, selector, seg);
37817f29
IE
3673 if (load_segment_descriptor_to_kvm_desct(vcpu, selector, &kvm_seg))
3674 return 1;
3675 kvm_seg.type |= type_bits;
3676
3677 if (seg != VCPU_SREG_SS && seg != VCPU_SREG_CS &&
3678 seg != VCPU_SREG_LDTR)
3679 if (!kvm_seg.s)
3680 kvm_seg.unusable = 1;
3681
3e6e0aab 3682 kvm_set_segment(vcpu, &kvm_seg, seg);
37817f29
IE
3683 return 0;
3684}
3685
3686static void save_state_to_tss32(struct kvm_vcpu *vcpu,
3687 struct tss_segment_32 *tss)
3688{
3689 tss->cr3 = vcpu->arch.cr3;
5fdbf976 3690 tss->eip = kvm_rip_read(vcpu);
37817f29 3691 tss->eflags = kvm_x86_ops->get_rflags(vcpu);
5fdbf976
MT
3692 tss->eax = kvm_register_read(vcpu, VCPU_REGS_RAX);
3693 tss->ecx = kvm_register_read(vcpu, VCPU_REGS_RCX);
3694 tss->edx = kvm_register_read(vcpu, VCPU_REGS_RDX);
3695 tss->ebx = kvm_register_read(vcpu, VCPU_REGS_RBX);
3696 tss->esp = kvm_register_read(vcpu, VCPU_REGS_RSP);
3697 tss->ebp = kvm_register_read(vcpu, VCPU_REGS_RBP);
3698 tss->esi = kvm_register_read(vcpu, VCPU_REGS_RSI);
3699 tss->edi = kvm_register_read(vcpu, VCPU_REGS_RDI);
37817f29
IE
3700 tss->es = get_segment_selector(vcpu, VCPU_SREG_ES);
3701 tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS);
3702 tss->ss = get_segment_selector(vcpu, VCPU_SREG_SS);
3703 tss->ds = get_segment_selector(vcpu, VCPU_SREG_DS);
3704 tss->fs = get_segment_selector(vcpu, VCPU_SREG_FS);
3705 tss->gs = get_segment_selector(vcpu, VCPU_SREG_GS);
3706 tss->ldt_selector = get_segment_selector(vcpu, VCPU_SREG_LDTR);
3707 tss->prev_task_link = get_segment_selector(vcpu, VCPU_SREG_TR);
3708}
3709
3710static int load_state_from_tss32(struct kvm_vcpu *vcpu,
3711 struct tss_segment_32 *tss)
3712{
3713 kvm_set_cr3(vcpu, tss->cr3);
3714
5fdbf976 3715 kvm_rip_write(vcpu, tss->eip);
37817f29
IE
3716 kvm_x86_ops->set_rflags(vcpu, tss->eflags | 2);
3717
5fdbf976
MT
3718 kvm_register_write(vcpu, VCPU_REGS_RAX, tss->eax);
3719 kvm_register_write(vcpu, VCPU_REGS_RCX, tss->ecx);
3720 kvm_register_write(vcpu, VCPU_REGS_RDX, tss->edx);
3721 kvm_register_write(vcpu, VCPU_REGS_RBX, tss->ebx);
3722 kvm_register_write(vcpu, VCPU_REGS_RSP, tss->esp);
3723 kvm_register_write(vcpu, VCPU_REGS_RBP, tss->ebp);
3724 kvm_register_write(vcpu, VCPU_REGS_RSI, tss->esi);
3725 kvm_register_write(vcpu, VCPU_REGS_RDI, tss->edi);
37817f29 3726
3e6e0aab 3727 if (kvm_load_segment_descriptor(vcpu, tss->ldt_selector, 0, VCPU_SREG_LDTR))
37817f29
IE
3728 return 1;
3729
3e6e0aab 3730 if (kvm_load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES))
37817f29
IE
3731 return 1;
3732
3e6e0aab 3733 if (kvm_load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS))
37817f29
IE
3734 return 1;
3735
3e6e0aab 3736 if (kvm_load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS))
37817f29
IE
3737 return 1;
3738
3e6e0aab 3739 if (kvm_load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS))
37817f29
IE
3740 return 1;
3741
3e6e0aab 3742 if (kvm_load_segment_descriptor(vcpu, tss->fs, 1, VCPU_SREG_FS))
37817f29
IE
3743 return 1;
3744
3e6e0aab 3745 if (kvm_load_segment_descriptor(vcpu, tss->gs, 1, VCPU_SREG_GS))
37817f29
IE
3746 return 1;
3747 return 0;
3748}
3749
3750static void save_state_to_tss16(struct kvm_vcpu *vcpu,
3751 struct tss_segment_16 *tss)
3752{
5fdbf976 3753 tss->ip = kvm_rip_read(vcpu);
37817f29 3754 tss->flag = kvm_x86_ops->get_rflags(vcpu);
5fdbf976
MT
3755 tss->ax = kvm_register_read(vcpu, VCPU_REGS_RAX);
3756 tss->cx = kvm_register_read(vcpu, VCPU_REGS_RCX);
3757 tss->dx = kvm_register_read(vcpu, VCPU_REGS_RDX);
3758 tss->bx = kvm_register_read(vcpu, VCPU_REGS_RBX);
3759 tss->sp = kvm_register_read(vcpu, VCPU_REGS_RSP);
3760 tss->bp = kvm_register_read(vcpu, VCPU_REGS_RBP);
3761 tss->si = kvm_register_read(vcpu, VCPU_REGS_RSI);
3762 tss->di = kvm_register_read(vcpu, VCPU_REGS_RDI);
37817f29
IE
3763
3764 tss->es = get_segment_selector(vcpu, VCPU_SREG_ES);
3765 tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS);
3766 tss->ss = get_segment_selector(vcpu, VCPU_SREG_SS);
3767 tss->ds = get_segment_selector(vcpu, VCPU_SREG_DS);
3768 tss->ldt = get_segment_selector(vcpu, VCPU_SREG_LDTR);
3769 tss->prev_task_link = get_segment_selector(vcpu, VCPU_SREG_TR);
3770}
3771
3772static int load_state_from_tss16(struct kvm_vcpu *vcpu,
3773 struct tss_segment_16 *tss)
3774{
5fdbf976 3775 kvm_rip_write(vcpu, tss->ip);
37817f29 3776 kvm_x86_ops->set_rflags(vcpu, tss->flag | 2);
5fdbf976
MT
3777 kvm_register_write(vcpu, VCPU_REGS_RAX, tss->ax);
3778 kvm_register_write(vcpu, VCPU_REGS_RCX, tss->cx);
3779 kvm_register_write(vcpu, VCPU_REGS_RDX, tss->dx);
3780 kvm_register_write(vcpu, VCPU_REGS_RBX, tss->bx);
3781 kvm_register_write(vcpu, VCPU_REGS_RSP, tss->sp);
3782 kvm_register_write(vcpu, VCPU_REGS_RBP, tss->bp);
3783 kvm_register_write(vcpu, VCPU_REGS_RSI, tss->si);
3784 kvm_register_write(vcpu, VCPU_REGS_RDI, tss->di);
37817f29 3785
3e6e0aab 3786 if (kvm_load_segment_descriptor(vcpu, tss->ldt, 0, VCPU_SREG_LDTR))
37817f29
IE
3787 return 1;
3788
3e6e0aab 3789 if (kvm_load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES))
37817f29
IE
3790 return 1;
3791
3e6e0aab 3792 if (kvm_load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS))
37817f29
IE
3793 return 1;
3794
3e6e0aab 3795 if (kvm_load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS))
37817f29
IE
3796 return 1;
3797
3e6e0aab 3798 if (kvm_load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS))
37817f29
IE
3799 return 1;
3800 return 0;
3801}
3802
8b2cf73c 3803static int kvm_task_switch_16(struct kvm_vcpu *vcpu, u16 tss_selector,
34198bf8 3804 u32 old_tss_base,
37817f29
IE
3805 struct desc_struct *nseg_desc)
3806{
3807 struct tss_segment_16 tss_segment_16;
3808 int ret = 0;
3809
34198bf8
MT
3810 if (kvm_read_guest(vcpu->kvm, old_tss_base, &tss_segment_16,
3811 sizeof tss_segment_16))
37817f29
IE
3812 goto out;
3813
3814 save_state_to_tss16(vcpu, &tss_segment_16);
37817f29 3815
34198bf8
MT
3816 if (kvm_write_guest(vcpu->kvm, old_tss_base, &tss_segment_16,
3817 sizeof tss_segment_16))
37817f29 3818 goto out;
34198bf8
MT
3819
3820 if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc),
3821 &tss_segment_16, sizeof tss_segment_16))
3822 goto out;
3823
37817f29
IE
3824 if (load_state_from_tss16(vcpu, &tss_segment_16))
3825 goto out;
3826
3827 ret = 1;
3828out:
3829 return ret;
3830}
3831
8b2cf73c 3832static int kvm_task_switch_32(struct kvm_vcpu *vcpu, u16 tss_selector,
34198bf8 3833 u32 old_tss_base,
37817f29
IE
3834 struct desc_struct *nseg_desc)
3835{
3836 struct tss_segment_32 tss_segment_32;
3837 int ret = 0;
3838
34198bf8
MT
3839 if (kvm_read_guest(vcpu->kvm, old_tss_base, &tss_segment_32,
3840 sizeof tss_segment_32))
37817f29
IE
3841 goto out;
3842
3843 save_state_to_tss32(vcpu, &tss_segment_32);
37817f29 3844
34198bf8
MT
3845 if (kvm_write_guest(vcpu->kvm, old_tss_base, &tss_segment_32,
3846 sizeof tss_segment_32))
3847 goto out;
3848
3849 if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc),
3850 &tss_segment_32, sizeof tss_segment_32))
37817f29 3851 goto out;
34198bf8 3852
37817f29
IE
3853 if (load_state_from_tss32(vcpu, &tss_segment_32))
3854 goto out;
3855
3856 ret = 1;
3857out:
3858 return ret;
3859}
3860
3861int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
3862{
3863 struct kvm_segment tr_seg;
3864 struct desc_struct cseg_desc;
3865 struct desc_struct nseg_desc;
3866 int ret = 0;
34198bf8
MT
3867 u32 old_tss_base = get_segment_base(vcpu, VCPU_SREG_TR);
3868 u16 old_tss_sel = get_segment_selector(vcpu, VCPU_SREG_TR);
37817f29 3869
34198bf8 3870 old_tss_base = vcpu->arch.mmu.gva_to_gpa(vcpu, old_tss_base);
37817f29 3871
34198bf8
MT
3872 /* FIXME: Handle errors. Failure to read either TSS or their
3873 * descriptors should generate a pagefault.
3874 */
37817f29
IE
3875 if (load_guest_segment_descriptor(vcpu, tss_selector, &nseg_desc))
3876 goto out;
3877
34198bf8 3878 if (load_guest_segment_descriptor(vcpu, old_tss_sel, &cseg_desc))
37817f29
IE
3879 goto out;
3880
37817f29
IE
3881 if (reason != TASK_SWITCH_IRET) {
3882 int cpl;
3883
3884 cpl = kvm_x86_ops->get_cpl(vcpu);
3885 if ((tss_selector & 3) > nseg_desc.dpl || cpl > nseg_desc.dpl) {
3886 kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
3887 return 1;
3888 }
3889 }
3890
3891 if (!nseg_desc.p || (nseg_desc.limit0 | nseg_desc.limit << 16) < 0x67) {
3892 kvm_queue_exception_e(vcpu, TS_VECTOR, tss_selector & 0xfffc);
3893 return 1;
3894 }
3895
3896 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
3fe913e7 3897 cseg_desc.type &= ~(1 << 1); //clear the B flag
34198bf8 3898 save_guest_segment_descriptor(vcpu, old_tss_sel, &cseg_desc);
37817f29
IE
3899 }
3900
3901 if (reason == TASK_SWITCH_IRET) {
3902 u32 eflags = kvm_x86_ops->get_rflags(vcpu);
3903 kvm_x86_ops->set_rflags(vcpu, eflags & ~X86_EFLAGS_NT);
3904 }
3905
3906 kvm_x86_ops->skip_emulated_instruction(vcpu);
37817f29
IE
3907
3908 if (nseg_desc.type & 8)
34198bf8 3909 ret = kvm_task_switch_32(vcpu, tss_selector, old_tss_base,
37817f29
IE
3910 &nseg_desc);
3911 else
34198bf8 3912 ret = kvm_task_switch_16(vcpu, tss_selector, old_tss_base,
37817f29
IE
3913 &nseg_desc);
3914
3915 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE) {
3916 u32 eflags = kvm_x86_ops->get_rflags(vcpu);
3917 kvm_x86_ops->set_rflags(vcpu, eflags | X86_EFLAGS_NT);
3918 }
3919
3920 if (reason != TASK_SWITCH_IRET) {
3fe913e7 3921 nseg_desc.type |= (1 << 1);
37817f29
IE
3922 save_guest_segment_descriptor(vcpu, tss_selector,
3923 &nseg_desc);
3924 }
3925
3926 kvm_x86_ops->set_cr0(vcpu, vcpu->arch.cr0 | X86_CR0_TS);
3927 seg_desct_to_kvm_desct(&nseg_desc, tss_selector, &tr_seg);
3928 tr_seg.type = 11;
3e6e0aab 3929 kvm_set_segment(vcpu, &tr_seg, VCPU_SREG_TR);
37817f29 3930out:
37817f29
IE
3931 return ret;
3932}
3933EXPORT_SYMBOL_GPL(kvm_task_switch);
3934
b6c7a5dc
HB
3935int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
3936 struct kvm_sregs *sregs)
3937{
3938 int mmu_reset_needed = 0;
3939 int i, pending_vec, max_bits;
3940 struct descriptor_table dt;
3941
3942 vcpu_load(vcpu);
3943
3944 dt.limit = sregs->idt.limit;
3945 dt.base = sregs->idt.base;
3946 kvm_x86_ops->set_idt(vcpu, &dt);
3947 dt.limit = sregs->gdt.limit;
3948 dt.base = sregs->gdt.base;
3949 kvm_x86_ops->set_gdt(vcpu, &dt);
3950
ad312c7c
ZX
3951 vcpu->arch.cr2 = sregs->cr2;
3952 mmu_reset_needed |= vcpu->arch.cr3 != sregs->cr3;
3953 vcpu->arch.cr3 = sregs->cr3;
b6c7a5dc 3954
2d3ad1f4 3955 kvm_set_cr8(vcpu, sregs->cr8);
b6c7a5dc 3956
ad312c7c 3957 mmu_reset_needed |= vcpu->arch.shadow_efer != sregs->efer;
b6c7a5dc 3958 kvm_x86_ops->set_efer(vcpu, sregs->efer);
b6c7a5dc
HB
3959 kvm_set_apic_base(vcpu, sregs->apic_base);
3960
3961 kvm_x86_ops->decache_cr4_guest_bits(vcpu);
3962
ad312c7c 3963 mmu_reset_needed |= vcpu->arch.cr0 != sregs->cr0;
b6c7a5dc 3964 kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
d7306163 3965 vcpu->arch.cr0 = sregs->cr0;
b6c7a5dc 3966
ad312c7c 3967 mmu_reset_needed |= vcpu->arch.cr4 != sregs->cr4;
b6c7a5dc
HB
3968 kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
3969 if (!is_long_mode(vcpu) && is_pae(vcpu))
ad312c7c 3970 load_pdptrs(vcpu, vcpu->arch.cr3);
b6c7a5dc
HB
3971
3972 if (mmu_reset_needed)
3973 kvm_mmu_reset_context(vcpu);
3974
3975 if (!irqchip_in_kernel(vcpu->kvm)) {
ad312c7c
ZX
3976 memcpy(vcpu->arch.irq_pending, sregs->interrupt_bitmap,
3977 sizeof vcpu->arch.irq_pending);
3978 vcpu->arch.irq_summary = 0;
3979 for (i = 0; i < ARRAY_SIZE(vcpu->arch.irq_pending); ++i)
3980 if (vcpu->arch.irq_pending[i])
3981 __set_bit(i, &vcpu->arch.irq_summary);
b6c7a5dc
HB
3982 } else {
3983 max_bits = (sizeof sregs->interrupt_bitmap) << 3;
3984 pending_vec = find_first_bit(
3985 (const unsigned long *)sregs->interrupt_bitmap,
3986 max_bits);
3987 /* Only pending external irq is handled here */
3988 if (pending_vec < max_bits) {
3989 kvm_x86_ops->set_irq(vcpu, pending_vec);
3990 pr_debug("Set back pending irq %d\n",
3991 pending_vec);
3992 }
e4825800 3993 kvm_pic_clear_isr_ack(vcpu->kvm);
b6c7a5dc
HB
3994 }
3995
3e6e0aab
GT
3996 kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
3997 kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
3998 kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
3999 kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
4000 kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
4001 kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
b6c7a5dc 4002
3e6e0aab
GT
4003 kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
4004 kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
b6c7a5dc 4005
9c3e4aab
MT
4006 /* Older userspace won't unhalt the vcpu on reset. */
4007 if (vcpu->vcpu_id == 0 && kvm_rip_read(vcpu) == 0xfff0 &&
4008 sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 &&
4009 !(vcpu->arch.cr0 & X86_CR0_PE))
4010 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
4011
b6c7a5dc
HB
4012 vcpu_put(vcpu);
4013
4014 return 0;
4015}
4016
d0bfb940
JK
4017int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
4018 struct kvm_guest_debug *dbg)
b6c7a5dc 4019{
ae675ef0 4020 int i, r;
b6c7a5dc
HB
4021
4022 vcpu_load(vcpu);
4023
ae675ef0
JK
4024 if ((dbg->control & (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP)) ==
4025 (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP)) {
4026 for (i = 0; i < KVM_NR_DB_REGS; ++i)
4027 vcpu->arch.eff_db[i] = dbg->arch.debugreg[i];
4028 vcpu->arch.switch_db_regs =
4029 (dbg->arch.debugreg[7] & DR7_BP_EN_MASK);
4030 } else {
4031 for (i = 0; i < KVM_NR_DB_REGS; i++)
4032 vcpu->arch.eff_db[i] = vcpu->arch.db[i];
4033 vcpu->arch.switch_db_regs = (vcpu->arch.dr7 & DR7_BP_EN_MASK);
4034 }
4035
b6c7a5dc
HB
4036 r = kvm_x86_ops->set_guest_debug(vcpu, dbg);
4037
d0bfb940
JK
4038 if (dbg->control & KVM_GUESTDBG_INJECT_DB)
4039 kvm_queue_exception(vcpu, DB_VECTOR);
4040 else if (dbg->control & KVM_GUESTDBG_INJECT_BP)
4041 kvm_queue_exception(vcpu, BP_VECTOR);
4042
b6c7a5dc
HB
4043 vcpu_put(vcpu);
4044
4045 return r;
4046}
4047
d0752060
HB
4048/*
4049 * fxsave fpu state. Taken from x86_64/processor.h. To be killed when
4050 * we have asm/x86/processor.h
4051 */
4052struct fxsave {
4053 u16 cwd;
4054 u16 swd;
4055 u16 twd;
4056 u16 fop;
4057 u64 rip;
4058 u64 rdp;
4059 u32 mxcsr;
4060 u32 mxcsr_mask;
4061 u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
4062#ifdef CONFIG_X86_64
4063 u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 256 bytes */
4064#else
4065 u32 xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */
4066#endif
4067};
4068
8b006791
ZX
4069/*
4070 * Translate a guest virtual address to a guest physical address.
4071 */
4072int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
4073 struct kvm_translation *tr)
4074{
4075 unsigned long vaddr = tr->linear_address;
4076 gpa_t gpa;
4077
4078 vcpu_load(vcpu);
72dc67a6 4079 down_read(&vcpu->kvm->slots_lock);
ad312c7c 4080 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, vaddr);
72dc67a6 4081 up_read(&vcpu->kvm->slots_lock);
8b006791
ZX
4082 tr->physical_address = gpa;
4083 tr->valid = gpa != UNMAPPED_GVA;
4084 tr->writeable = 1;
4085 tr->usermode = 0;
8b006791
ZX
4086 vcpu_put(vcpu);
4087
4088 return 0;
4089}
4090
d0752060
HB
4091int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
4092{
ad312c7c 4093 struct fxsave *fxsave = (struct fxsave *)&vcpu->arch.guest_fx_image;
d0752060
HB
4094
4095 vcpu_load(vcpu);
4096
4097 memcpy(fpu->fpr, fxsave->st_space, 128);
4098 fpu->fcw = fxsave->cwd;
4099 fpu->fsw = fxsave->swd;
4100 fpu->ftwx = fxsave->twd;
4101 fpu->last_opcode = fxsave->fop;
4102 fpu->last_ip = fxsave->rip;
4103 fpu->last_dp = fxsave->rdp;
4104 memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space);
4105
4106 vcpu_put(vcpu);
4107
4108 return 0;
4109}
4110
4111int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
4112{
ad312c7c 4113 struct fxsave *fxsave = (struct fxsave *)&vcpu->arch.guest_fx_image;
d0752060
HB
4114
4115 vcpu_load(vcpu);
4116
4117 memcpy(fxsave->st_space, fpu->fpr, 128);
4118 fxsave->cwd = fpu->fcw;
4119 fxsave->swd = fpu->fsw;
4120 fxsave->twd = fpu->ftwx;
4121 fxsave->fop = fpu->last_opcode;
4122 fxsave->rip = fpu->last_ip;
4123 fxsave->rdp = fpu->last_dp;
4124 memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space);
4125
4126 vcpu_put(vcpu);
4127
4128 return 0;
4129}
4130
4131void fx_init(struct kvm_vcpu *vcpu)
4132{
4133 unsigned after_mxcsr_mask;
4134
bc1a34f1
AA
4135 /*
4136 * Touch the fpu the first time in non atomic context as if
4137 * this is the first fpu instruction the exception handler
4138 * will fire before the instruction returns and it'll have to
4139 * allocate ram with GFP_KERNEL.
4140 */
4141 if (!used_math())
d6e88aec 4142 kvm_fx_save(&vcpu->arch.host_fx_image);
bc1a34f1 4143
d0752060
HB
4144 /* Initialize guest FPU by resetting ours and saving into guest's */
4145 preempt_disable();
d6e88aec
AK
4146 kvm_fx_save(&vcpu->arch.host_fx_image);
4147 kvm_fx_finit();
4148 kvm_fx_save(&vcpu->arch.guest_fx_image);
4149 kvm_fx_restore(&vcpu->arch.host_fx_image);
d0752060
HB
4150 preempt_enable();
4151
ad312c7c 4152 vcpu->arch.cr0 |= X86_CR0_ET;
d0752060 4153 after_mxcsr_mask = offsetof(struct i387_fxsave_struct, st_space);
ad312c7c
ZX
4154 vcpu->arch.guest_fx_image.mxcsr = 0x1f80;
4155 memset((void *)&vcpu->arch.guest_fx_image + after_mxcsr_mask,
d0752060
HB
4156 0, sizeof(struct i387_fxsave_struct) - after_mxcsr_mask);
4157}
4158EXPORT_SYMBOL_GPL(fx_init);
4159
4160void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
4161{
4162 if (!vcpu->fpu_active || vcpu->guest_fpu_loaded)
4163 return;
4164
4165 vcpu->guest_fpu_loaded = 1;
d6e88aec
AK
4166 kvm_fx_save(&vcpu->arch.host_fx_image);
4167 kvm_fx_restore(&vcpu->arch.guest_fx_image);
d0752060
HB
4168}
4169EXPORT_SYMBOL_GPL(kvm_load_guest_fpu);
4170
4171void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
4172{
4173 if (!vcpu->guest_fpu_loaded)
4174 return;
4175
4176 vcpu->guest_fpu_loaded = 0;
d6e88aec
AK
4177 kvm_fx_save(&vcpu->arch.guest_fx_image);
4178 kvm_fx_restore(&vcpu->arch.host_fx_image);
f096ed85 4179 ++vcpu->stat.fpu_reload;
d0752060
HB
4180}
4181EXPORT_SYMBOL_GPL(kvm_put_guest_fpu);
e9b11c17
ZX
4182
4183void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
4184{
7f1ea208
JR
4185 if (vcpu->arch.time_page) {
4186 kvm_release_page_dirty(vcpu->arch.time_page);
4187 vcpu->arch.time_page = NULL;
4188 }
4189
e9b11c17
ZX
4190 kvm_x86_ops->vcpu_free(vcpu);
4191}
4192
4193struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
4194 unsigned int id)
4195{
26e5215f
AK
4196 return kvm_x86_ops->vcpu_create(kvm, id);
4197}
e9b11c17 4198
26e5215f
AK
4199int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
4200{
4201 int r;
e9b11c17
ZX
4202
4203 /* We do fxsave: this must be aligned. */
ad312c7c 4204 BUG_ON((unsigned long)&vcpu->arch.host_fx_image & 0xF);
e9b11c17 4205
0bed3b56 4206 vcpu->arch.mtrr_state.have_fixed = 1;
e9b11c17
ZX
4207 vcpu_load(vcpu);
4208 r = kvm_arch_vcpu_reset(vcpu);
4209 if (r == 0)
4210 r = kvm_mmu_setup(vcpu);
4211 vcpu_put(vcpu);
4212 if (r < 0)
4213 goto free_vcpu;
4214
26e5215f 4215 return 0;
e9b11c17
ZX
4216free_vcpu:
4217 kvm_x86_ops->vcpu_free(vcpu);
26e5215f 4218 return r;
e9b11c17
ZX
4219}
4220
d40ccc62 4221void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
e9b11c17
ZX
4222{
4223 vcpu_load(vcpu);
4224 kvm_mmu_unload(vcpu);
4225 vcpu_put(vcpu);
4226
4227 kvm_x86_ops->vcpu_free(vcpu);
4228}
4229
4230int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
4231{
448fa4a9
JK
4232 vcpu->arch.nmi_pending = false;
4233 vcpu->arch.nmi_injected = false;
4234
42dbaa5a
JK
4235 vcpu->arch.switch_db_regs = 0;
4236 memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db));
4237 vcpu->arch.dr6 = DR6_FIXED_1;
4238 vcpu->arch.dr7 = DR7_FIXED_1;
4239
e9b11c17
ZX
4240 return kvm_x86_ops->vcpu_reset(vcpu);
4241}
4242
4243void kvm_arch_hardware_enable(void *garbage)
4244{
4245 kvm_x86_ops->hardware_enable(garbage);
4246}
4247
4248void kvm_arch_hardware_disable(void *garbage)
4249{
4250 kvm_x86_ops->hardware_disable(garbage);
4251}
4252
4253int kvm_arch_hardware_setup(void)
4254{
4255 return kvm_x86_ops->hardware_setup();
4256}
4257
4258void kvm_arch_hardware_unsetup(void)
4259{
4260 kvm_x86_ops->hardware_unsetup();
4261}
4262
4263void kvm_arch_check_processor_compat(void *rtn)
4264{
4265 kvm_x86_ops->check_processor_compatibility(rtn);
4266}
4267
4268int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
4269{
4270 struct page *page;
4271 struct kvm *kvm;
4272 int r;
4273
4274 BUG_ON(vcpu->kvm == NULL);
4275 kvm = vcpu->kvm;
4276
ad312c7c 4277 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
e9b11c17 4278 if (!irqchip_in_kernel(kvm) || vcpu->vcpu_id == 0)
a4535290 4279 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
e9b11c17 4280 else
a4535290 4281 vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
e9b11c17
ZX
4282
4283 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
4284 if (!page) {
4285 r = -ENOMEM;
4286 goto fail;
4287 }
ad312c7c 4288 vcpu->arch.pio_data = page_address(page);
e9b11c17
ZX
4289
4290 r = kvm_mmu_create(vcpu);
4291 if (r < 0)
4292 goto fail_free_pio_data;
4293
4294 if (irqchip_in_kernel(kvm)) {
4295 r = kvm_create_lapic(vcpu);
4296 if (r < 0)
4297 goto fail_mmu_destroy;
4298 }
4299
4300 return 0;
4301
4302fail_mmu_destroy:
4303 kvm_mmu_destroy(vcpu);
4304fail_free_pio_data:
ad312c7c 4305 free_page((unsigned long)vcpu->arch.pio_data);
e9b11c17
ZX
4306fail:
4307 return r;
4308}
4309
4310void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
4311{
4312 kvm_free_lapic(vcpu);
3200f405 4313 down_read(&vcpu->kvm->slots_lock);
e9b11c17 4314 kvm_mmu_destroy(vcpu);
3200f405 4315 up_read(&vcpu->kvm->slots_lock);
ad312c7c 4316 free_page((unsigned long)vcpu->arch.pio_data);
e9b11c17 4317}
d19a9cd2
ZX
4318
4319struct kvm *kvm_arch_create_vm(void)
4320{
4321 struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
4322
4323 if (!kvm)
4324 return ERR_PTR(-ENOMEM);
4325
f05e70ac 4326 INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
6cffe8ca 4327 INIT_LIST_HEAD(&kvm->arch.oos_global_pages);
4d5c5d0f 4328 INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
d19a9cd2 4329
5550af4d
SY
4330 /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
4331 set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
4332
53f658b3
MT
4333 rdtscll(kvm->arch.vm_init_tsc);
4334
d19a9cd2
ZX
4335 return kvm;
4336}
4337
4338static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
4339{
4340 vcpu_load(vcpu);
4341 kvm_mmu_unload(vcpu);
4342 vcpu_put(vcpu);
4343}
4344
4345static void kvm_free_vcpus(struct kvm *kvm)
4346{
4347 unsigned int i;
4348
4349 /*
4350 * Unpin any mmu pages first.
4351 */
4352 for (i = 0; i < KVM_MAX_VCPUS; ++i)
4353 if (kvm->vcpus[i])
4354 kvm_unload_vcpu_mmu(kvm->vcpus[i]);
4355 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
4356 if (kvm->vcpus[i]) {
4357 kvm_arch_vcpu_free(kvm->vcpus[i]);
4358 kvm->vcpus[i] = NULL;
4359 }
4360 }
4361
4362}
4363
ad8ba2cd
SY
4364void kvm_arch_sync_events(struct kvm *kvm)
4365{
ba4cef31 4366 kvm_free_all_assigned_devices(kvm);
ad8ba2cd
SY
4367}
4368
d19a9cd2
ZX
4369void kvm_arch_destroy_vm(struct kvm *kvm)
4370{
6eb55818 4371 kvm_iommu_unmap_guest(kvm);
7837699f 4372 kvm_free_pit(kvm);
d7deeeb0
ZX
4373 kfree(kvm->arch.vpic);
4374 kfree(kvm->arch.vioapic);
d19a9cd2
ZX
4375 kvm_free_vcpus(kvm);
4376 kvm_free_physmem(kvm);
3d45830c
AK
4377 if (kvm->arch.apic_access_page)
4378 put_page(kvm->arch.apic_access_page);
b7ebfb05
SY
4379 if (kvm->arch.ept_identity_pagetable)
4380 put_page(kvm->arch.ept_identity_pagetable);
d19a9cd2
ZX
4381 kfree(kvm);
4382}
0de10343
ZX
4383
4384int kvm_arch_set_memory_region(struct kvm *kvm,
4385 struct kvm_userspace_memory_region *mem,
4386 struct kvm_memory_slot old,
4387 int user_alloc)
4388{
4389 int npages = mem->memory_size >> PAGE_SHIFT;
4390 struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot];
4391
4392 /*To keep backward compatibility with older userspace,
4393 *x86 needs to hanlde !user_alloc case.
4394 */
4395 if (!user_alloc) {
4396 if (npages && !old.rmap) {
604b38ac
AA
4397 unsigned long userspace_addr;
4398
72dc67a6 4399 down_write(&current->mm->mmap_sem);
604b38ac
AA
4400 userspace_addr = do_mmap(NULL, 0,
4401 npages * PAGE_SIZE,
4402 PROT_READ | PROT_WRITE,
acee3c04 4403 MAP_PRIVATE | MAP_ANONYMOUS,
604b38ac 4404 0);
72dc67a6 4405 up_write(&current->mm->mmap_sem);
0de10343 4406
604b38ac
AA
4407 if (IS_ERR((void *)userspace_addr))
4408 return PTR_ERR((void *)userspace_addr);
4409
4410 /* set userspace_addr atomically for kvm_hva_to_rmapp */
4411 spin_lock(&kvm->mmu_lock);
4412 memslot->userspace_addr = userspace_addr;
4413 spin_unlock(&kvm->mmu_lock);
0de10343
ZX
4414 } else {
4415 if (!old.user_alloc && old.rmap) {
4416 int ret;
4417
72dc67a6 4418 down_write(&current->mm->mmap_sem);
0de10343
ZX
4419 ret = do_munmap(current->mm, old.userspace_addr,
4420 old.npages * PAGE_SIZE);
72dc67a6 4421 up_write(&current->mm->mmap_sem);
0de10343
ZX
4422 if (ret < 0)
4423 printk(KERN_WARNING
4424 "kvm_vm_ioctl_set_memory_region: "
4425 "failed to munmap memory\n");
4426 }
4427 }
4428 }
4429
f05e70ac 4430 if (!kvm->arch.n_requested_mmu_pages) {
0de10343
ZX
4431 unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
4432 kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
4433 }
4434
4435 kvm_mmu_slot_remove_write_access(kvm, mem->slot);
4436 kvm_flush_remote_tlbs(kvm);
4437
4438 return 0;
4439}
1d737c8a 4440
34d4cb8f
MT
4441void kvm_arch_flush_shadow(struct kvm *kvm)
4442{
4443 kvm_mmu_zap_all(kvm);
4444}
4445
1d737c8a
ZX
4446int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
4447{
a4535290 4448 return vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE
0496fbb9
JK
4449 || vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED
4450 || vcpu->arch.nmi_pending;
1d737c8a 4451}
5736199a
ZX
4452
4453static void vcpu_kick_intr(void *info)
4454{
4455#ifdef DEBUG
4456 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)info;
4457 printk(KERN_DEBUG "vcpu_kick_intr %p \n", vcpu);
4458#endif
4459}
4460
4461void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
4462{
4463 int ipi_pcpu = vcpu->cpu;
e9571ed5 4464 int cpu = get_cpu();
5736199a
ZX
4465
4466 if (waitqueue_active(&vcpu->wq)) {
4467 wake_up_interruptible(&vcpu->wq);
4468 ++vcpu->stat.halt_wakeup;
4469 }
e9571ed5
MT
4470 /*
4471 * We may be called synchronously with irqs disabled in guest mode,
4472 * So need not to call smp_call_function_single() in that case.
4473 */
4474 if (vcpu->guest_mode && vcpu->cpu != cpu)
8691e5a8 4475 smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0);
e9571ed5 4476 put_cpu();
5736199a 4477}