KVM: Portability: Introduce kvm_vcpu_arch
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / kvm / vmx.c
CommitLineData
6aa8b732
AK
1/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
6 *
7 * Copyright (C) 2006 Qumranet, Inc.
8 *
9 * Authors:
10 * Avi Kivity <avi@qumranet.com>
11 * Yaniv Kamay <yaniv@qumranet.com>
12 *
13 * This work is licensed under the terms of the GNU GPL, version 2. See
14 * the COPYING file in the top-level directory.
15 *
16 */
17
18#include "kvm.h"
34c16eec 19#include "x86.h"
e7d5d76c 20#include "x86_emulate.h"
85f455f7 21#include "irq.h"
6aa8b732 22#include "vmx.h"
e495606d
AK
23#include "segment_descriptor.h"
24
6aa8b732 25#include <linux/module.h>
9d8f549d 26#include <linux/kernel.h>
6aa8b732
AK
27#include <linux/mm.h>
28#include <linux/highmem.h>
e8edc6e0 29#include <linux/sched.h>
c7addb90 30#include <linux/moduleparam.h>
e495606d 31
6aa8b732 32#include <asm/io.h>
3b3be0d1 33#include <asm/desc.h>
6aa8b732 34
6aa8b732
AK
35MODULE_AUTHOR("Qumranet");
36MODULE_LICENSE("GPL");
37
c7addb90
AK
38static int bypass_guest_pf = 1;
39module_param(bypass_guest_pf, bool, 0);
40
a2fa3e9f
GH
41struct vmcs {
42 u32 revision_id;
43 u32 abort;
44 char data[0];
45};
46
47struct vcpu_vmx {
fb3f0f51 48 struct kvm_vcpu vcpu;
a2fa3e9f 49 int launched;
29bd8a78 50 u8 fail;
1155f76a 51 u32 idt_vectoring_info;
a2fa3e9f
GH
52 struct kvm_msr_entry *guest_msrs;
53 struct kvm_msr_entry *host_msrs;
54 int nmsrs;
55 int save_nmsrs;
56 int msr_offset_efer;
57#ifdef CONFIG_X86_64
58 int msr_offset_kernel_gs_base;
59#endif
60 struct vmcs *vmcs;
61 struct {
62 int loaded;
63 u16 fs_sel, gs_sel, ldt_sel;
152d3f2f
LV
64 int gs_ldt_reload_needed;
65 int fs_reload_needed;
51c6cf66 66 int guest_efer_loaded;
d77c26fc 67 } host_state;
9c8cba37
AK
68 struct {
69 struct {
70 bool pending;
71 u8 vector;
72 unsigned rip;
73 } irq;
74 } rmode;
a2fa3e9f
GH
75};
76
77static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
78{
fb3f0f51 79 return container_of(vcpu, struct vcpu_vmx, vcpu);
a2fa3e9f
GH
80}
81
75880a01
AK
82static int init_rmode_tss(struct kvm *kvm);
83
6aa8b732
AK
84static DEFINE_PER_CPU(struct vmcs *, vmxarea);
85static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
86
fdef3ad1
HQ
87static struct page *vmx_io_bitmap_a;
88static struct page *vmx_io_bitmap_b;
89
1c3d14fe 90static struct vmcs_config {
6aa8b732
AK
91 int size;
92 int order;
93 u32 revision_id;
1c3d14fe
YS
94 u32 pin_based_exec_ctrl;
95 u32 cpu_based_exec_ctrl;
f78e0e2e 96 u32 cpu_based_2nd_exec_ctrl;
1c3d14fe
YS
97 u32 vmexit_ctrl;
98 u32 vmentry_ctrl;
99} vmcs_config;
6aa8b732
AK
100
101#define VMX_SEGMENT_FIELD(seg) \
102 [VCPU_SREG_##seg] = { \
103 .selector = GUEST_##seg##_SELECTOR, \
104 .base = GUEST_##seg##_BASE, \
105 .limit = GUEST_##seg##_LIMIT, \
106 .ar_bytes = GUEST_##seg##_AR_BYTES, \
107 }
108
109static struct kvm_vmx_segment_field {
110 unsigned selector;
111 unsigned base;
112 unsigned limit;
113 unsigned ar_bytes;
114} kvm_vmx_segment_fields[] = {
115 VMX_SEGMENT_FIELD(CS),
116 VMX_SEGMENT_FIELD(DS),
117 VMX_SEGMENT_FIELD(ES),
118 VMX_SEGMENT_FIELD(FS),
119 VMX_SEGMENT_FIELD(GS),
120 VMX_SEGMENT_FIELD(SS),
121 VMX_SEGMENT_FIELD(TR),
122 VMX_SEGMENT_FIELD(LDTR),
123};
124
4d56c8a7
AK
125/*
126 * Keep MSR_K6_STAR at the end, as setup_msrs() will try to optimize it
127 * away by decrementing the array size.
128 */
6aa8b732 129static const u32 vmx_msr_index[] = {
05b3e0c2 130#ifdef CONFIG_X86_64
6aa8b732
AK
131 MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR, MSR_KERNEL_GS_BASE,
132#endif
133 MSR_EFER, MSR_K6_STAR,
134};
9d8f549d 135#define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index)
6aa8b732 136
a2fa3e9f
GH
137static void load_msrs(struct kvm_msr_entry *e, int n)
138{
139 int i;
140
141 for (i = 0; i < n; ++i)
142 wrmsrl(e[i].index, e[i].data);
143}
144
145static void save_msrs(struct kvm_msr_entry *e, int n)
146{
147 int i;
148
149 for (i = 0; i < n; ++i)
150 rdmsrl(e[i].index, e[i].data);
151}
152
6aa8b732
AK
153static inline int is_page_fault(u32 intr_info)
154{
155 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
156 INTR_INFO_VALID_MASK)) ==
157 (INTR_TYPE_EXCEPTION | PF_VECTOR | INTR_INFO_VALID_MASK);
158}
159
2ab455cc
AL
160static inline int is_no_device(u32 intr_info)
161{
162 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
163 INTR_INFO_VALID_MASK)) ==
164 (INTR_TYPE_EXCEPTION | NM_VECTOR | INTR_INFO_VALID_MASK);
165}
166
7aa81cc0
AL
167static inline int is_invalid_opcode(u32 intr_info)
168{
169 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
170 INTR_INFO_VALID_MASK)) ==
171 (INTR_TYPE_EXCEPTION | UD_VECTOR | INTR_INFO_VALID_MASK);
172}
173
6aa8b732
AK
174static inline int is_external_interrupt(u32 intr_info)
175{
176 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
177 == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
178}
179
6e5d865c
YS
180static inline int cpu_has_vmx_tpr_shadow(void)
181{
182 return (vmcs_config.cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW);
183}
184
185static inline int vm_need_tpr_shadow(struct kvm *kvm)
186{
187 return ((cpu_has_vmx_tpr_shadow()) && (irqchip_in_kernel(kvm)));
188}
189
f78e0e2e
SY
190static inline int cpu_has_secondary_exec_ctrls(void)
191{
192 return (vmcs_config.cpu_based_exec_ctrl &
193 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS);
194}
195
f78e0e2e
SY
196static inline int cpu_has_vmx_virtualize_apic_accesses(void)
197{
198 return (vmcs_config.cpu_based_2nd_exec_ctrl &
199 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES);
200}
201
202static inline int vm_need_virtualize_apic_accesses(struct kvm *kvm)
203{
204 return ((cpu_has_vmx_virtualize_apic_accesses()) &&
205 (irqchip_in_kernel(kvm)));
206}
207
8b9cf98c 208static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
7725f0ba
AK
209{
210 int i;
211
a2fa3e9f
GH
212 for (i = 0; i < vmx->nmsrs; ++i)
213 if (vmx->guest_msrs[i].index == msr)
a75beee6
ED
214 return i;
215 return -1;
216}
217
8b9cf98c 218static struct kvm_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr)
a75beee6
ED
219{
220 int i;
221
8b9cf98c 222 i = __find_msr_index(vmx, msr);
a75beee6 223 if (i >= 0)
a2fa3e9f 224 return &vmx->guest_msrs[i];
8b6d44c7 225 return NULL;
7725f0ba
AK
226}
227
6aa8b732
AK
228static void vmcs_clear(struct vmcs *vmcs)
229{
230 u64 phys_addr = __pa(vmcs);
231 u8 error;
232
233 asm volatile (ASM_VMX_VMCLEAR_RAX "; setna %0"
234 : "=g"(error) : "a"(&phys_addr), "m"(phys_addr)
235 : "cc", "memory");
236 if (error)
237 printk(KERN_ERR "kvm: vmclear fail: %p/%llx\n",
238 vmcs, phys_addr);
239}
240
241static void __vcpu_clear(void *arg)
242{
8b9cf98c 243 struct vcpu_vmx *vmx = arg;
d3b2c338 244 int cpu = raw_smp_processor_id();
6aa8b732 245
8b9cf98c 246 if (vmx->vcpu.cpu == cpu)
a2fa3e9f
GH
247 vmcs_clear(vmx->vmcs);
248 if (per_cpu(current_vmcs, cpu) == vmx->vmcs)
6aa8b732 249 per_cpu(current_vmcs, cpu) = NULL;
ad312c7c 250 rdtscll(vmx->vcpu.arch.host_tsc);
6aa8b732
AK
251}
252
8b9cf98c 253static void vcpu_clear(struct vcpu_vmx *vmx)
8d0be2b3 254{
eae5ecb5
AK
255 if (vmx->vcpu.cpu == -1)
256 return;
f566e09f 257 smp_call_function_single(vmx->vcpu.cpu, __vcpu_clear, vmx, 0, 1);
8b9cf98c 258 vmx->launched = 0;
8d0be2b3
AK
259}
260
6aa8b732
AK
261static unsigned long vmcs_readl(unsigned long field)
262{
263 unsigned long value;
264
265 asm volatile (ASM_VMX_VMREAD_RDX_RAX
266 : "=a"(value) : "d"(field) : "cc");
267 return value;
268}
269
270static u16 vmcs_read16(unsigned long field)
271{
272 return vmcs_readl(field);
273}
274
275static u32 vmcs_read32(unsigned long field)
276{
277 return vmcs_readl(field);
278}
279
280static u64 vmcs_read64(unsigned long field)
281{
05b3e0c2 282#ifdef CONFIG_X86_64
6aa8b732
AK
283 return vmcs_readl(field);
284#else
285 return vmcs_readl(field) | ((u64)vmcs_readl(field+1) << 32);
286#endif
287}
288
e52de1b8
AK
289static noinline void vmwrite_error(unsigned long field, unsigned long value)
290{
291 printk(KERN_ERR "vmwrite error: reg %lx value %lx (err %d)\n",
292 field, value, vmcs_read32(VM_INSTRUCTION_ERROR));
293 dump_stack();
294}
295
6aa8b732
AK
296static void vmcs_writel(unsigned long field, unsigned long value)
297{
298 u8 error;
299
300 asm volatile (ASM_VMX_VMWRITE_RAX_RDX "; setna %0"
d77c26fc 301 : "=q"(error) : "a"(value), "d"(field) : "cc");
e52de1b8
AK
302 if (unlikely(error))
303 vmwrite_error(field, value);
6aa8b732
AK
304}
305
306static void vmcs_write16(unsigned long field, u16 value)
307{
308 vmcs_writel(field, value);
309}
310
311static void vmcs_write32(unsigned long field, u32 value)
312{
313 vmcs_writel(field, value);
314}
315
316static void vmcs_write64(unsigned long field, u64 value)
317{
05b3e0c2 318#ifdef CONFIG_X86_64
6aa8b732
AK
319 vmcs_writel(field, value);
320#else
321 vmcs_writel(field, value);
322 asm volatile ("");
323 vmcs_writel(field+1, value >> 32);
324#endif
325}
326
2ab455cc
AL
327static void vmcs_clear_bits(unsigned long field, u32 mask)
328{
329 vmcs_writel(field, vmcs_readl(field) & ~mask);
330}
331
332static void vmcs_set_bits(unsigned long field, u32 mask)
333{
334 vmcs_writel(field, vmcs_readl(field) | mask);
335}
336
abd3f2d6
AK
337static void update_exception_bitmap(struct kvm_vcpu *vcpu)
338{
339 u32 eb;
340
7aa81cc0 341 eb = (1u << PF_VECTOR) | (1u << UD_VECTOR);
abd3f2d6
AK
342 if (!vcpu->fpu_active)
343 eb |= 1u << NM_VECTOR;
344 if (vcpu->guest_debug.enabled)
345 eb |= 1u << 1;
ad312c7c 346 if (vcpu->arch.rmode.active)
abd3f2d6
AK
347 eb = ~0;
348 vmcs_write32(EXCEPTION_BITMAP, eb);
349}
350
33ed6329
AK
351static void reload_tss(void)
352{
353#ifndef CONFIG_X86_64
354
355 /*
356 * VT restores TR but not its size. Useless.
357 */
358 struct descriptor_table gdt;
359 struct segment_descriptor *descs;
360
361 get_gdt(&gdt);
362 descs = (void *)gdt.base;
363 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
364 load_TR_desc();
365#endif
366}
367
8b9cf98c 368static void load_transition_efer(struct vcpu_vmx *vmx)
2cc51560 369{
a2fa3e9f 370 int efer_offset = vmx->msr_offset_efer;
51c6cf66
AK
371 u64 host_efer = vmx->host_msrs[efer_offset].data;
372 u64 guest_efer = vmx->guest_msrs[efer_offset].data;
373 u64 ignore_bits;
374
375 if (efer_offset < 0)
376 return;
377 /*
378 * NX is emulated; LMA and LME handled by hardware; SCE meaninless
379 * outside long mode
380 */
381 ignore_bits = EFER_NX | EFER_SCE;
382#ifdef CONFIG_X86_64
383 ignore_bits |= EFER_LMA | EFER_LME;
384 /* SCE is meaningful only in long mode on Intel */
385 if (guest_efer & EFER_LMA)
386 ignore_bits &= ~(u64)EFER_SCE;
387#endif
388 if ((guest_efer & ~ignore_bits) == (host_efer & ~ignore_bits))
389 return;
2cc51560 390
51c6cf66
AK
391 vmx->host_state.guest_efer_loaded = 1;
392 guest_efer &= ~ignore_bits;
393 guest_efer |= host_efer & ignore_bits;
394 wrmsrl(MSR_EFER, guest_efer);
8b9cf98c 395 vmx->vcpu.stat.efer_reload++;
2cc51560
ED
396}
397
51c6cf66
AK
398static void reload_host_efer(struct vcpu_vmx *vmx)
399{
400 if (vmx->host_state.guest_efer_loaded) {
401 vmx->host_state.guest_efer_loaded = 0;
402 load_msrs(vmx->host_msrs + vmx->msr_offset_efer, 1);
403 }
404}
405
04d2cc77 406static void vmx_save_host_state(struct kvm_vcpu *vcpu)
33ed6329 407{
04d2cc77
AK
408 struct vcpu_vmx *vmx = to_vmx(vcpu);
409
a2fa3e9f 410 if (vmx->host_state.loaded)
33ed6329
AK
411 return;
412
a2fa3e9f 413 vmx->host_state.loaded = 1;
33ed6329
AK
414 /*
415 * Set host fs and gs selectors. Unfortunately, 22.2.3 does not
416 * allow segment selectors with cpl > 0 or ti == 1.
417 */
a2fa3e9f 418 vmx->host_state.ldt_sel = read_ldt();
152d3f2f 419 vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel;
a2fa3e9f 420 vmx->host_state.fs_sel = read_fs();
152d3f2f 421 if (!(vmx->host_state.fs_sel & 7)) {
a2fa3e9f 422 vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel);
152d3f2f
LV
423 vmx->host_state.fs_reload_needed = 0;
424 } else {
33ed6329 425 vmcs_write16(HOST_FS_SELECTOR, 0);
152d3f2f 426 vmx->host_state.fs_reload_needed = 1;
33ed6329 427 }
a2fa3e9f
GH
428 vmx->host_state.gs_sel = read_gs();
429 if (!(vmx->host_state.gs_sel & 7))
430 vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel);
33ed6329
AK
431 else {
432 vmcs_write16(HOST_GS_SELECTOR, 0);
152d3f2f 433 vmx->host_state.gs_ldt_reload_needed = 1;
33ed6329
AK
434 }
435
436#ifdef CONFIG_X86_64
437 vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE));
438 vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE));
439#else
a2fa3e9f
GH
440 vmcs_writel(HOST_FS_BASE, segment_base(vmx->host_state.fs_sel));
441 vmcs_writel(HOST_GS_BASE, segment_base(vmx->host_state.gs_sel));
33ed6329 442#endif
707c0874
AK
443
444#ifdef CONFIG_X86_64
d77c26fc 445 if (is_long_mode(&vmx->vcpu))
a2fa3e9f
GH
446 save_msrs(vmx->host_msrs +
447 vmx->msr_offset_kernel_gs_base, 1);
d77c26fc 448
707c0874 449#endif
a2fa3e9f 450 load_msrs(vmx->guest_msrs, vmx->save_nmsrs);
51c6cf66 451 load_transition_efer(vmx);
33ed6329
AK
452}
453
8b9cf98c 454static void vmx_load_host_state(struct vcpu_vmx *vmx)
33ed6329 455{
15ad7146 456 unsigned long flags;
33ed6329 457
a2fa3e9f 458 if (!vmx->host_state.loaded)
33ed6329
AK
459 return;
460
e1beb1d3 461 ++vmx->vcpu.stat.host_state_reload;
a2fa3e9f 462 vmx->host_state.loaded = 0;
152d3f2f 463 if (vmx->host_state.fs_reload_needed)
a2fa3e9f 464 load_fs(vmx->host_state.fs_sel);
152d3f2f
LV
465 if (vmx->host_state.gs_ldt_reload_needed) {
466 load_ldt(vmx->host_state.ldt_sel);
33ed6329
AK
467 /*
468 * If we have to reload gs, we must take care to
469 * preserve our gs base.
470 */
15ad7146 471 local_irq_save(flags);
a2fa3e9f 472 load_gs(vmx->host_state.gs_sel);
33ed6329
AK
473#ifdef CONFIG_X86_64
474 wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE));
475#endif
15ad7146 476 local_irq_restore(flags);
33ed6329 477 }
152d3f2f 478 reload_tss();
a2fa3e9f
GH
479 save_msrs(vmx->guest_msrs, vmx->save_nmsrs);
480 load_msrs(vmx->host_msrs, vmx->save_nmsrs);
51c6cf66 481 reload_host_efer(vmx);
33ed6329
AK
482}
483
6aa8b732
AK
484/*
485 * Switches to specified vcpu, until a matching vcpu_put(), but assumes
486 * vcpu mutex is already taken.
487 */
15ad7146 488static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
6aa8b732 489{
a2fa3e9f
GH
490 struct vcpu_vmx *vmx = to_vmx(vcpu);
491 u64 phys_addr = __pa(vmx->vmcs);
7700270e 492 u64 tsc_this, delta;
6aa8b732 493
a3d7f85f 494 if (vcpu->cpu != cpu) {
8b9cf98c 495 vcpu_clear(vmx);
a3d7f85f
ED
496 kvm_migrate_apic_timer(vcpu);
497 }
6aa8b732 498
a2fa3e9f 499 if (per_cpu(current_vmcs, cpu) != vmx->vmcs) {
6aa8b732
AK
500 u8 error;
501
a2fa3e9f 502 per_cpu(current_vmcs, cpu) = vmx->vmcs;
6aa8b732
AK
503 asm volatile (ASM_VMX_VMPTRLD_RAX "; setna %0"
504 : "=g"(error) : "a"(&phys_addr), "m"(phys_addr)
505 : "cc");
506 if (error)
507 printk(KERN_ERR "kvm: vmptrld %p/%llx fail\n",
a2fa3e9f 508 vmx->vmcs, phys_addr);
6aa8b732
AK
509 }
510
511 if (vcpu->cpu != cpu) {
512 struct descriptor_table dt;
513 unsigned long sysenter_esp;
514
515 vcpu->cpu = cpu;
516 /*
517 * Linux uses per-cpu TSS and GDT, so set these when switching
518 * processors.
519 */
520 vmcs_writel(HOST_TR_BASE, read_tr_base()); /* 22.2.4 */
521 get_gdt(&dt);
522 vmcs_writel(HOST_GDTR_BASE, dt.base); /* 22.2.4 */
523
524 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
525 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
7700270e
AK
526
527 /*
528 * Make sure the time stamp counter is monotonous.
529 */
530 rdtscll(tsc_this);
ad312c7c 531 delta = vcpu->arch.host_tsc - tsc_this;
7700270e 532 vmcs_write64(TSC_OFFSET, vmcs_read64(TSC_OFFSET) + delta);
6aa8b732 533 }
6aa8b732
AK
534}
535
536static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
537{
8b9cf98c 538 vmx_load_host_state(to_vmx(vcpu));
6aa8b732
AK
539}
540
5fd86fcf
AK
541static void vmx_fpu_activate(struct kvm_vcpu *vcpu)
542{
543 if (vcpu->fpu_active)
544 return;
545 vcpu->fpu_active = 1;
707d92fa 546 vmcs_clear_bits(GUEST_CR0, X86_CR0_TS);
ad312c7c 547 if (vcpu->arch.cr0 & X86_CR0_TS)
707d92fa 548 vmcs_set_bits(GUEST_CR0, X86_CR0_TS);
5fd86fcf
AK
549 update_exception_bitmap(vcpu);
550}
551
552static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu)
553{
554 if (!vcpu->fpu_active)
555 return;
556 vcpu->fpu_active = 0;
707d92fa 557 vmcs_set_bits(GUEST_CR0, X86_CR0_TS);
5fd86fcf
AK
558 update_exception_bitmap(vcpu);
559}
560
774c47f1
AK
561static void vmx_vcpu_decache(struct kvm_vcpu *vcpu)
562{
8b9cf98c 563 vcpu_clear(to_vmx(vcpu));
774c47f1
AK
564}
565
6aa8b732
AK
566static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
567{
568 return vmcs_readl(GUEST_RFLAGS);
569}
570
571static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
572{
ad312c7c 573 if (vcpu->arch.rmode.active)
053de044 574 rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
6aa8b732
AK
575 vmcs_writel(GUEST_RFLAGS, rflags);
576}
577
578static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
579{
580 unsigned long rip;
581 u32 interruptibility;
582
583 rip = vmcs_readl(GUEST_RIP);
584 rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
585 vmcs_writel(GUEST_RIP, rip);
586
587 /*
588 * We emulated an instruction, so temporary interrupt blocking
589 * should be removed, if set.
590 */
591 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
592 if (interruptibility & 3)
593 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO,
594 interruptibility & ~3);
ad312c7c 595 vcpu->arch.interrupt_window_open = 1;
6aa8b732
AK
596}
597
298101da
AK
598static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
599 bool has_error_code, u32 error_code)
600{
601 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
602 nr | INTR_TYPE_EXCEPTION
603 | (has_error_code ? INTR_INFO_DELIEVER_CODE_MASK : 0)
604 | INTR_INFO_VALID_MASK);
605 if (has_error_code)
606 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
607}
608
609static bool vmx_exception_injected(struct kvm_vcpu *vcpu)
610{
611 struct vcpu_vmx *vmx = to_vmx(vcpu);
612
613 return !(vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK);
614}
615
a75beee6
ED
616/*
617 * Swap MSR entry in host/guest MSR entry array.
618 */
54e11fa1 619#ifdef CONFIG_X86_64
8b9cf98c 620static void move_msr_up(struct vcpu_vmx *vmx, int from, int to)
a75beee6 621{
a2fa3e9f
GH
622 struct kvm_msr_entry tmp;
623
624 tmp = vmx->guest_msrs[to];
625 vmx->guest_msrs[to] = vmx->guest_msrs[from];
626 vmx->guest_msrs[from] = tmp;
627 tmp = vmx->host_msrs[to];
628 vmx->host_msrs[to] = vmx->host_msrs[from];
629 vmx->host_msrs[from] = tmp;
a75beee6 630}
54e11fa1 631#endif
a75beee6 632
e38aea3e
AK
633/*
634 * Set up the vmcs to automatically save and restore system
635 * msrs. Don't touch the 64-bit msrs if the guest is in legacy
636 * mode, as fiddling with msrs is very expensive.
637 */
8b9cf98c 638static void setup_msrs(struct vcpu_vmx *vmx)
e38aea3e 639{
2cc51560 640 int save_nmsrs;
e38aea3e 641
a75beee6
ED
642 save_nmsrs = 0;
643#ifdef CONFIG_X86_64
8b9cf98c 644 if (is_long_mode(&vmx->vcpu)) {
2cc51560
ED
645 int index;
646
8b9cf98c 647 index = __find_msr_index(vmx, MSR_SYSCALL_MASK);
a75beee6 648 if (index >= 0)
8b9cf98c
RR
649 move_msr_up(vmx, index, save_nmsrs++);
650 index = __find_msr_index(vmx, MSR_LSTAR);
a75beee6 651 if (index >= 0)
8b9cf98c
RR
652 move_msr_up(vmx, index, save_nmsrs++);
653 index = __find_msr_index(vmx, MSR_CSTAR);
a75beee6 654 if (index >= 0)
8b9cf98c
RR
655 move_msr_up(vmx, index, save_nmsrs++);
656 index = __find_msr_index(vmx, MSR_KERNEL_GS_BASE);
a75beee6 657 if (index >= 0)
8b9cf98c 658 move_msr_up(vmx, index, save_nmsrs++);
a75beee6
ED
659 /*
660 * MSR_K6_STAR is only needed on long mode guests, and only
661 * if efer.sce is enabled.
662 */
8b9cf98c 663 index = __find_msr_index(vmx, MSR_K6_STAR);
ad312c7c 664 if ((index >= 0) && (vmx->vcpu.arch.shadow_efer & EFER_SCE))
8b9cf98c 665 move_msr_up(vmx, index, save_nmsrs++);
a75beee6
ED
666 }
667#endif
a2fa3e9f 668 vmx->save_nmsrs = save_nmsrs;
e38aea3e 669
4d56c8a7 670#ifdef CONFIG_X86_64
a2fa3e9f 671 vmx->msr_offset_kernel_gs_base =
8b9cf98c 672 __find_msr_index(vmx, MSR_KERNEL_GS_BASE);
4d56c8a7 673#endif
8b9cf98c 674 vmx->msr_offset_efer = __find_msr_index(vmx, MSR_EFER);
e38aea3e
AK
675}
676
6aa8b732
AK
677/*
678 * reads and returns guest's timestamp counter "register"
679 * guest_tsc = host_tsc + tsc_offset -- 21.3
680 */
681static u64 guest_read_tsc(void)
682{
683 u64 host_tsc, tsc_offset;
684
685 rdtscll(host_tsc);
686 tsc_offset = vmcs_read64(TSC_OFFSET);
687 return host_tsc + tsc_offset;
688}
689
690/*
691 * writes 'guest_tsc' into guest's timestamp counter "register"
692 * guest_tsc = host_tsc + tsc_offset ==> tsc_offset = guest_tsc - host_tsc
693 */
694static void guest_write_tsc(u64 guest_tsc)
695{
696 u64 host_tsc;
697
698 rdtscll(host_tsc);
699 vmcs_write64(TSC_OFFSET, guest_tsc - host_tsc);
700}
701
6aa8b732
AK
702/*
703 * Reads an msr value (of 'msr_index') into 'pdata'.
704 * Returns 0 on success, non-0 otherwise.
705 * Assumes vcpu_load() was already called.
706 */
707static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
708{
709 u64 data;
a2fa3e9f 710 struct kvm_msr_entry *msr;
6aa8b732
AK
711
712 if (!pdata) {
713 printk(KERN_ERR "BUG: get_msr called with NULL pdata\n");
714 return -EINVAL;
715 }
716
717 switch (msr_index) {
05b3e0c2 718#ifdef CONFIG_X86_64
6aa8b732
AK
719 case MSR_FS_BASE:
720 data = vmcs_readl(GUEST_FS_BASE);
721 break;
722 case MSR_GS_BASE:
723 data = vmcs_readl(GUEST_GS_BASE);
724 break;
725 case MSR_EFER:
3bab1f5d 726 return kvm_get_msr_common(vcpu, msr_index, pdata);
6aa8b732
AK
727#endif
728 case MSR_IA32_TIME_STAMP_COUNTER:
729 data = guest_read_tsc();
730 break;
731 case MSR_IA32_SYSENTER_CS:
732 data = vmcs_read32(GUEST_SYSENTER_CS);
733 break;
734 case MSR_IA32_SYSENTER_EIP:
f5b42c33 735 data = vmcs_readl(GUEST_SYSENTER_EIP);
6aa8b732
AK
736 break;
737 case MSR_IA32_SYSENTER_ESP:
f5b42c33 738 data = vmcs_readl(GUEST_SYSENTER_ESP);
6aa8b732 739 break;
6aa8b732 740 default:
8b9cf98c 741 msr = find_msr_entry(to_vmx(vcpu), msr_index);
3bab1f5d
AK
742 if (msr) {
743 data = msr->data;
744 break;
6aa8b732 745 }
3bab1f5d 746 return kvm_get_msr_common(vcpu, msr_index, pdata);
6aa8b732
AK
747 }
748
749 *pdata = data;
750 return 0;
751}
752
753/*
754 * Writes msr value into into the appropriate "register".
755 * Returns 0 on success, non-0 otherwise.
756 * Assumes vcpu_load() was already called.
757 */
758static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
759{
a2fa3e9f
GH
760 struct vcpu_vmx *vmx = to_vmx(vcpu);
761 struct kvm_msr_entry *msr;
2cc51560
ED
762 int ret = 0;
763
6aa8b732 764 switch (msr_index) {
05b3e0c2 765#ifdef CONFIG_X86_64
3bab1f5d 766 case MSR_EFER:
2cc51560 767 ret = kvm_set_msr_common(vcpu, msr_index, data);
51c6cf66
AK
768 if (vmx->host_state.loaded) {
769 reload_host_efer(vmx);
8b9cf98c 770 load_transition_efer(vmx);
51c6cf66 771 }
2cc51560 772 break;
6aa8b732
AK
773 case MSR_FS_BASE:
774 vmcs_writel(GUEST_FS_BASE, data);
775 break;
776 case MSR_GS_BASE:
777 vmcs_writel(GUEST_GS_BASE, data);
778 break;
779#endif
780 case MSR_IA32_SYSENTER_CS:
781 vmcs_write32(GUEST_SYSENTER_CS, data);
782 break;
783 case MSR_IA32_SYSENTER_EIP:
f5b42c33 784 vmcs_writel(GUEST_SYSENTER_EIP, data);
6aa8b732
AK
785 break;
786 case MSR_IA32_SYSENTER_ESP:
f5b42c33 787 vmcs_writel(GUEST_SYSENTER_ESP, data);
6aa8b732 788 break;
d27d4aca 789 case MSR_IA32_TIME_STAMP_COUNTER:
6aa8b732
AK
790 guest_write_tsc(data);
791 break;
6aa8b732 792 default:
8b9cf98c 793 msr = find_msr_entry(vmx, msr_index);
3bab1f5d
AK
794 if (msr) {
795 msr->data = data;
a2fa3e9f
GH
796 if (vmx->host_state.loaded)
797 load_msrs(vmx->guest_msrs, vmx->save_nmsrs);
3bab1f5d 798 break;
6aa8b732 799 }
2cc51560 800 ret = kvm_set_msr_common(vcpu, msr_index, data);
6aa8b732
AK
801 }
802
2cc51560 803 return ret;
6aa8b732
AK
804}
805
806/*
807 * Sync the rsp and rip registers into the vcpu structure. This allows
ad312c7c 808 * registers to be accessed by indexing vcpu->arch.regs.
6aa8b732
AK
809 */
810static void vcpu_load_rsp_rip(struct kvm_vcpu *vcpu)
811{
ad312c7c
ZX
812 vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP);
813 vcpu->arch.rip = vmcs_readl(GUEST_RIP);
6aa8b732
AK
814}
815
816/*
817 * Syncs rsp and rip back into the vmcs. Should be called after possible
818 * modification.
819 */
820static void vcpu_put_rsp_rip(struct kvm_vcpu *vcpu)
821{
ad312c7c
ZX
822 vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
823 vmcs_writel(GUEST_RIP, vcpu->arch.rip);
6aa8b732
AK
824}
825
826static int set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg)
827{
828 unsigned long dr7 = 0x400;
6aa8b732
AK
829 int old_singlestep;
830
6aa8b732
AK
831 old_singlestep = vcpu->guest_debug.singlestep;
832
833 vcpu->guest_debug.enabled = dbg->enabled;
834 if (vcpu->guest_debug.enabled) {
835 int i;
836
837 dr7 |= 0x200; /* exact */
838 for (i = 0; i < 4; ++i) {
839 if (!dbg->breakpoints[i].enabled)
840 continue;
841 vcpu->guest_debug.bp[i] = dbg->breakpoints[i].address;
842 dr7 |= 2 << (i*2); /* global enable */
843 dr7 |= 0 << (i*4+16); /* execution breakpoint */
844 }
845
6aa8b732 846 vcpu->guest_debug.singlestep = dbg->singlestep;
abd3f2d6 847 } else
6aa8b732 848 vcpu->guest_debug.singlestep = 0;
6aa8b732
AK
849
850 if (old_singlestep && !vcpu->guest_debug.singlestep) {
851 unsigned long flags;
852
853 flags = vmcs_readl(GUEST_RFLAGS);
854 flags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
855 vmcs_writel(GUEST_RFLAGS, flags);
856 }
857
abd3f2d6 858 update_exception_bitmap(vcpu);
6aa8b732
AK
859 vmcs_writel(GUEST_DR7, dr7);
860
861 return 0;
862}
863
2a8067f1
ED
864static int vmx_get_irq(struct kvm_vcpu *vcpu)
865{
1155f76a 866 struct vcpu_vmx *vmx = to_vmx(vcpu);
2a8067f1
ED
867 u32 idtv_info_field;
868
1155f76a 869 idtv_info_field = vmx->idt_vectoring_info;
2a8067f1
ED
870 if (idtv_info_field & INTR_INFO_VALID_MASK) {
871 if (is_external_interrupt(idtv_info_field))
872 return idtv_info_field & VECTORING_INFO_VECTOR_MASK;
873 else
d77c26fc 874 printk(KERN_DEBUG "pending exception: not handled yet\n");
2a8067f1
ED
875 }
876 return -1;
877}
878
6aa8b732
AK
879static __init int cpu_has_kvm_support(void)
880{
881 unsigned long ecx = cpuid_ecx(1);
882 return test_bit(5, &ecx); /* CPUID.1:ECX.VMX[bit 5] -> VT */
883}
884
885static __init int vmx_disabled_by_bios(void)
886{
887 u64 msr;
888
889 rdmsrl(MSR_IA32_FEATURE_CONTROL, msr);
62b3ffb8
YS
890 return (msr & (MSR_IA32_FEATURE_CONTROL_LOCKED |
891 MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED))
892 == MSR_IA32_FEATURE_CONTROL_LOCKED;
893 /* locked but not enabled */
6aa8b732
AK
894}
895
774c47f1 896static void hardware_enable(void *garbage)
6aa8b732
AK
897{
898 int cpu = raw_smp_processor_id();
899 u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
900 u64 old;
901
902 rdmsrl(MSR_IA32_FEATURE_CONTROL, old);
62b3ffb8
YS
903 if ((old & (MSR_IA32_FEATURE_CONTROL_LOCKED |
904 MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED))
905 != (MSR_IA32_FEATURE_CONTROL_LOCKED |
906 MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED))
6aa8b732 907 /* enable and lock */
62b3ffb8
YS
908 wrmsrl(MSR_IA32_FEATURE_CONTROL, old |
909 MSR_IA32_FEATURE_CONTROL_LOCKED |
910 MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED);
66aee91a 911 write_cr4(read_cr4() | X86_CR4_VMXE); /* FIXME: not cpu hotplug safe */
6aa8b732
AK
912 asm volatile (ASM_VMX_VMXON_RAX : : "a"(&phys_addr), "m"(phys_addr)
913 : "memory", "cc");
914}
915
916static void hardware_disable(void *garbage)
917{
918 asm volatile (ASM_VMX_VMXOFF : : : "cc");
919}
920
1c3d14fe 921static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt,
d77c26fc 922 u32 msr, u32 *result)
1c3d14fe
YS
923{
924 u32 vmx_msr_low, vmx_msr_high;
925 u32 ctl = ctl_min | ctl_opt;
926
927 rdmsr(msr, vmx_msr_low, vmx_msr_high);
928
929 ctl &= vmx_msr_high; /* bit == 0 in high word ==> must be zero */
930 ctl |= vmx_msr_low; /* bit == 1 in low word ==> must be one */
931
932 /* Ensure minimum (required) set of control bits are supported. */
933 if (ctl_min & ~ctl)
002c7f7c 934 return -EIO;
1c3d14fe
YS
935
936 *result = ctl;
937 return 0;
938}
939
002c7f7c 940static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
6aa8b732
AK
941{
942 u32 vmx_msr_low, vmx_msr_high;
1c3d14fe
YS
943 u32 min, opt;
944 u32 _pin_based_exec_control = 0;
945 u32 _cpu_based_exec_control = 0;
f78e0e2e 946 u32 _cpu_based_2nd_exec_control = 0;
1c3d14fe
YS
947 u32 _vmexit_control = 0;
948 u32 _vmentry_control = 0;
949
950 min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING;
951 opt = 0;
952 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PINBASED_CTLS,
953 &_pin_based_exec_control) < 0)
002c7f7c 954 return -EIO;
1c3d14fe
YS
955
956 min = CPU_BASED_HLT_EXITING |
957#ifdef CONFIG_X86_64
958 CPU_BASED_CR8_LOAD_EXITING |
959 CPU_BASED_CR8_STORE_EXITING |
960#endif
961 CPU_BASED_USE_IO_BITMAPS |
962 CPU_BASED_MOV_DR_EXITING |
963 CPU_BASED_USE_TSC_OFFSETING;
f78e0e2e
SY
964 opt = CPU_BASED_TPR_SHADOW |
965 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
1c3d14fe
YS
966 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS,
967 &_cpu_based_exec_control) < 0)
002c7f7c 968 return -EIO;
6e5d865c
YS
969#ifdef CONFIG_X86_64
970 if ((_cpu_based_exec_control & CPU_BASED_TPR_SHADOW))
971 _cpu_based_exec_control &= ~CPU_BASED_CR8_LOAD_EXITING &
972 ~CPU_BASED_CR8_STORE_EXITING;
973#endif
f78e0e2e
SY
974 if (_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) {
975 min = 0;
e5edaa01
ED
976 opt = SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
977 SECONDARY_EXEC_WBINVD_EXITING;
f78e0e2e
SY
978 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS2,
979 &_cpu_based_2nd_exec_control) < 0)
980 return -EIO;
981 }
982#ifndef CONFIG_X86_64
983 if (!(_cpu_based_2nd_exec_control &
984 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
985 _cpu_based_exec_control &= ~CPU_BASED_TPR_SHADOW;
986#endif
1c3d14fe
YS
987
988 min = 0;
989#ifdef CONFIG_X86_64
990 min |= VM_EXIT_HOST_ADDR_SPACE_SIZE;
991#endif
992 opt = 0;
993 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_EXIT_CTLS,
994 &_vmexit_control) < 0)
002c7f7c 995 return -EIO;
1c3d14fe
YS
996
997 min = opt = 0;
998 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_ENTRY_CTLS,
999 &_vmentry_control) < 0)
002c7f7c 1000 return -EIO;
6aa8b732 1001
c68876fd 1002 rdmsr(MSR_IA32_VMX_BASIC, vmx_msr_low, vmx_msr_high);
1c3d14fe
YS
1003
1004 /* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */
1005 if ((vmx_msr_high & 0x1fff) > PAGE_SIZE)
002c7f7c 1006 return -EIO;
1c3d14fe
YS
1007
1008#ifdef CONFIG_X86_64
1009 /* IA-32 SDM Vol 3B: 64-bit CPUs always have VMX_BASIC_MSR[48]==0. */
1010 if (vmx_msr_high & (1u<<16))
002c7f7c 1011 return -EIO;
1c3d14fe
YS
1012#endif
1013
1014 /* Require Write-Back (WB) memory type for VMCS accesses. */
1015 if (((vmx_msr_high >> 18) & 15) != 6)
002c7f7c 1016 return -EIO;
1c3d14fe 1017
002c7f7c
YS
1018 vmcs_conf->size = vmx_msr_high & 0x1fff;
1019 vmcs_conf->order = get_order(vmcs_config.size);
1020 vmcs_conf->revision_id = vmx_msr_low;
1c3d14fe 1021
002c7f7c
YS
1022 vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control;
1023 vmcs_conf->cpu_based_exec_ctrl = _cpu_based_exec_control;
f78e0e2e 1024 vmcs_conf->cpu_based_2nd_exec_ctrl = _cpu_based_2nd_exec_control;
002c7f7c
YS
1025 vmcs_conf->vmexit_ctrl = _vmexit_control;
1026 vmcs_conf->vmentry_ctrl = _vmentry_control;
1c3d14fe
YS
1027
1028 return 0;
c68876fd 1029}
6aa8b732
AK
1030
1031static struct vmcs *alloc_vmcs_cpu(int cpu)
1032{
1033 int node = cpu_to_node(cpu);
1034 struct page *pages;
1035 struct vmcs *vmcs;
1036
1c3d14fe 1037 pages = alloc_pages_node(node, GFP_KERNEL, vmcs_config.order);
6aa8b732
AK
1038 if (!pages)
1039 return NULL;
1040 vmcs = page_address(pages);
1c3d14fe
YS
1041 memset(vmcs, 0, vmcs_config.size);
1042 vmcs->revision_id = vmcs_config.revision_id; /* vmcs revision id */
6aa8b732
AK
1043 return vmcs;
1044}
1045
1046static struct vmcs *alloc_vmcs(void)
1047{
d3b2c338 1048 return alloc_vmcs_cpu(raw_smp_processor_id());
6aa8b732
AK
1049}
1050
1051static void free_vmcs(struct vmcs *vmcs)
1052{
1c3d14fe 1053 free_pages((unsigned long)vmcs, vmcs_config.order);
6aa8b732
AK
1054}
1055
39959588 1056static void free_kvm_area(void)
6aa8b732
AK
1057{
1058 int cpu;
1059
1060 for_each_online_cpu(cpu)
1061 free_vmcs(per_cpu(vmxarea, cpu));
1062}
1063
6aa8b732
AK
1064static __init int alloc_kvm_area(void)
1065{
1066 int cpu;
1067
1068 for_each_online_cpu(cpu) {
1069 struct vmcs *vmcs;
1070
1071 vmcs = alloc_vmcs_cpu(cpu);
1072 if (!vmcs) {
1073 free_kvm_area();
1074 return -ENOMEM;
1075 }
1076
1077 per_cpu(vmxarea, cpu) = vmcs;
1078 }
1079 return 0;
1080}
1081
1082static __init int hardware_setup(void)
1083{
002c7f7c
YS
1084 if (setup_vmcs_config(&vmcs_config) < 0)
1085 return -EIO;
6aa8b732
AK
1086 return alloc_kvm_area();
1087}
1088
1089static __exit void hardware_unsetup(void)
1090{
1091 free_kvm_area();
1092}
1093
6aa8b732
AK
1094static void fix_pmode_dataseg(int seg, struct kvm_save_segment *save)
1095{
1096 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
1097
6af11b9e 1098 if (vmcs_readl(sf->base) == save->base && (save->base & AR_S_MASK)) {
6aa8b732
AK
1099 vmcs_write16(sf->selector, save->selector);
1100 vmcs_writel(sf->base, save->base);
1101 vmcs_write32(sf->limit, save->limit);
1102 vmcs_write32(sf->ar_bytes, save->ar);
1103 } else {
1104 u32 dpl = (vmcs_read16(sf->selector) & SELECTOR_RPL_MASK)
1105 << AR_DPL_SHIFT;
1106 vmcs_write32(sf->ar_bytes, 0x93 | dpl);
1107 }
1108}
1109
1110static void enter_pmode(struct kvm_vcpu *vcpu)
1111{
1112 unsigned long flags;
1113
ad312c7c 1114 vcpu->arch.rmode.active = 0;
6aa8b732 1115
ad312c7c
ZX
1116 vmcs_writel(GUEST_TR_BASE, vcpu->arch.rmode.tr.base);
1117 vmcs_write32(GUEST_TR_LIMIT, vcpu->arch.rmode.tr.limit);
1118 vmcs_write32(GUEST_TR_AR_BYTES, vcpu->arch.rmode.tr.ar);
6aa8b732
AK
1119
1120 flags = vmcs_readl(GUEST_RFLAGS);
053de044 1121 flags &= ~(X86_EFLAGS_IOPL | X86_EFLAGS_VM);
ad312c7c 1122 flags |= (vcpu->arch.rmode.save_iopl << IOPL_SHIFT);
6aa8b732
AK
1123 vmcs_writel(GUEST_RFLAGS, flags);
1124
66aee91a
RR
1125 vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) |
1126 (vmcs_readl(CR4_READ_SHADOW) & X86_CR4_VME));
6aa8b732
AK
1127
1128 update_exception_bitmap(vcpu);
1129
ad312c7c
ZX
1130 fix_pmode_dataseg(VCPU_SREG_ES, &vcpu->arch.rmode.es);
1131 fix_pmode_dataseg(VCPU_SREG_DS, &vcpu->arch.rmode.ds);
1132 fix_pmode_dataseg(VCPU_SREG_GS, &vcpu->arch.rmode.gs);
1133 fix_pmode_dataseg(VCPU_SREG_FS, &vcpu->arch.rmode.fs);
6aa8b732
AK
1134
1135 vmcs_write16(GUEST_SS_SELECTOR, 0);
1136 vmcs_write32(GUEST_SS_AR_BYTES, 0x93);
1137
1138 vmcs_write16(GUEST_CS_SELECTOR,
1139 vmcs_read16(GUEST_CS_SELECTOR) & ~SELECTOR_RPL_MASK);
1140 vmcs_write32(GUEST_CS_AR_BYTES, 0x9b);
1141}
1142
d77c26fc 1143static gva_t rmode_tss_base(struct kvm *kvm)
6aa8b732 1144{
cbc94022
IE
1145 if (!kvm->tss_addr) {
1146 gfn_t base_gfn = kvm->memslots[0].base_gfn +
1147 kvm->memslots[0].npages - 3;
1148 return base_gfn << PAGE_SHIFT;
1149 }
1150 return kvm->tss_addr;
6aa8b732
AK
1151}
1152
1153static void fix_rmode_seg(int seg, struct kvm_save_segment *save)
1154{
1155 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
1156
1157 save->selector = vmcs_read16(sf->selector);
1158 save->base = vmcs_readl(sf->base);
1159 save->limit = vmcs_read32(sf->limit);
1160 save->ar = vmcs_read32(sf->ar_bytes);
15b00f32
JK
1161 vmcs_write16(sf->selector, save->base >> 4);
1162 vmcs_write32(sf->base, save->base & 0xfffff);
6aa8b732
AK
1163 vmcs_write32(sf->limit, 0xffff);
1164 vmcs_write32(sf->ar_bytes, 0xf3);
1165}
1166
1167static void enter_rmode(struct kvm_vcpu *vcpu)
1168{
1169 unsigned long flags;
1170
ad312c7c 1171 vcpu->arch.rmode.active = 1;
6aa8b732 1172
ad312c7c 1173 vcpu->arch.rmode.tr.base = vmcs_readl(GUEST_TR_BASE);
6aa8b732
AK
1174 vmcs_writel(GUEST_TR_BASE, rmode_tss_base(vcpu->kvm));
1175
ad312c7c 1176 vcpu->arch.rmode.tr.limit = vmcs_read32(GUEST_TR_LIMIT);
6aa8b732
AK
1177 vmcs_write32(GUEST_TR_LIMIT, RMODE_TSS_SIZE - 1);
1178
ad312c7c 1179 vcpu->arch.rmode.tr.ar = vmcs_read32(GUEST_TR_AR_BYTES);
6aa8b732
AK
1180 vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
1181
1182 flags = vmcs_readl(GUEST_RFLAGS);
ad312c7c
ZX
1183 vcpu->arch.rmode.save_iopl
1184 = (flags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
6aa8b732 1185
053de044 1186 flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
6aa8b732
AK
1187
1188 vmcs_writel(GUEST_RFLAGS, flags);
66aee91a 1189 vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | X86_CR4_VME);
6aa8b732
AK
1190 update_exception_bitmap(vcpu);
1191
1192 vmcs_write16(GUEST_SS_SELECTOR, vmcs_readl(GUEST_SS_BASE) >> 4);
1193 vmcs_write32(GUEST_SS_LIMIT, 0xffff);
1194 vmcs_write32(GUEST_SS_AR_BYTES, 0xf3);
1195
1196 vmcs_write32(GUEST_CS_AR_BYTES, 0xf3);
abacf8df 1197 vmcs_write32(GUEST_CS_LIMIT, 0xffff);
8cb5b033
AK
1198 if (vmcs_readl(GUEST_CS_BASE) == 0xffff0000)
1199 vmcs_writel(GUEST_CS_BASE, 0xf0000);
6aa8b732
AK
1200 vmcs_write16(GUEST_CS_SELECTOR, vmcs_readl(GUEST_CS_BASE) >> 4);
1201
ad312c7c
ZX
1202 fix_rmode_seg(VCPU_SREG_ES, &vcpu->arch.rmode.es);
1203 fix_rmode_seg(VCPU_SREG_DS, &vcpu->arch.rmode.ds);
1204 fix_rmode_seg(VCPU_SREG_GS, &vcpu->arch.rmode.gs);
1205 fix_rmode_seg(VCPU_SREG_FS, &vcpu->arch.rmode.fs);
75880a01 1206
8668a3c4 1207 kvm_mmu_reset_context(vcpu);
75880a01 1208 init_rmode_tss(vcpu->kvm);
6aa8b732
AK
1209}
1210
05b3e0c2 1211#ifdef CONFIG_X86_64
6aa8b732
AK
1212
1213static void enter_lmode(struct kvm_vcpu *vcpu)
1214{
1215 u32 guest_tr_ar;
1216
1217 guest_tr_ar = vmcs_read32(GUEST_TR_AR_BYTES);
1218 if ((guest_tr_ar & AR_TYPE_MASK) != AR_TYPE_BUSY_64_TSS) {
1219 printk(KERN_DEBUG "%s: tss fixup for long mode. \n",
1220 __FUNCTION__);
1221 vmcs_write32(GUEST_TR_AR_BYTES,
1222 (guest_tr_ar & ~AR_TYPE_MASK)
1223 | AR_TYPE_BUSY_64_TSS);
1224 }
1225
ad312c7c 1226 vcpu->arch.shadow_efer |= EFER_LMA;
6aa8b732 1227
8b9cf98c 1228 find_msr_entry(to_vmx(vcpu), MSR_EFER)->data |= EFER_LMA | EFER_LME;
6aa8b732
AK
1229 vmcs_write32(VM_ENTRY_CONTROLS,
1230 vmcs_read32(VM_ENTRY_CONTROLS)
1e4e6e00 1231 | VM_ENTRY_IA32E_MODE);
6aa8b732
AK
1232}
1233
1234static void exit_lmode(struct kvm_vcpu *vcpu)
1235{
ad312c7c 1236 vcpu->arch.shadow_efer &= ~EFER_LMA;
6aa8b732
AK
1237
1238 vmcs_write32(VM_ENTRY_CONTROLS,
1239 vmcs_read32(VM_ENTRY_CONTROLS)
1e4e6e00 1240 & ~VM_ENTRY_IA32E_MODE);
6aa8b732
AK
1241}
1242
1243#endif
1244
25c4c276 1245static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
399badf3 1246{
ad312c7c
ZX
1247 vcpu->arch.cr4 &= KVM_GUEST_CR4_MASK;
1248 vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & ~KVM_GUEST_CR4_MASK;
399badf3
AK
1249}
1250
6aa8b732
AK
1251static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1252{
5fd86fcf
AK
1253 vmx_fpu_deactivate(vcpu);
1254
ad312c7c 1255 if (vcpu->arch.rmode.active && (cr0 & X86_CR0_PE))
6aa8b732
AK
1256 enter_pmode(vcpu);
1257
ad312c7c 1258 if (!vcpu->arch.rmode.active && !(cr0 & X86_CR0_PE))
6aa8b732
AK
1259 enter_rmode(vcpu);
1260
05b3e0c2 1261#ifdef CONFIG_X86_64
ad312c7c 1262 if (vcpu->arch.shadow_efer & EFER_LME) {
707d92fa 1263 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG))
6aa8b732 1264 enter_lmode(vcpu);
707d92fa 1265 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG))
6aa8b732
AK
1266 exit_lmode(vcpu);
1267 }
1268#endif
1269
1270 vmcs_writel(CR0_READ_SHADOW, cr0);
1271 vmcs_writel(GUEST_CR0,
1272 (cr0 & ~KVM_GUEST_CR0_MASK) | KVM_VM_CR0_ALWAYS_ON);
ad312c7c 1273 vcpu->arch.cr0 = cr0;
5fd86fcf 1274
707d92fa 1275 if (!(cr0 & X86_CR0_TS) || !(cr0 & X86_CR0_PE))
5fd86fcf 1276 vmx_fpu_activate(vcpu);
6aa8b732
AK
1277}
1278
6aa8b732
AK
1279static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
1280{
1281 vmcs_writel(GUEST_CR3, cr3);
ad312c7c 1282 if (vcpu->arch.cr0 & X86_CR0_PE)
5fd86fcf 1283 vmx_fpu_deactivate(vcpu);
6aa8b732
AK
1284}
1285
1286static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
1287{
1288 vmcs_writel(CR4_READ_SHADOW, cr4);
ad312c7c 1289 vmcs_writel(GUEST_CR4, cr4 | (vcpu->arch.rmode.active ?
6aa8b732 1290 KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON));
ad312c7c 1291 vcpu->arch.cr4 = cr4;
6aa8b732
AK
1292}
1293
05b3e0c2 1294#ifdef CONFIG_X86_64
6aa8b732
AK
1295
1296static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
1297{
8b9cf98c
RR
1298 struct vcpu_vmx *vmx = to_vmx(vcpu);
1299 struct kvm_msr_entry *msr = find_msr_entry(vmx, MSR_EFER);
6aa8b732 1300
ad312c7c 1301 vcpu->arch.shadow_efer = efer;
6aa8b732
AK
1302 if (efer & EFER_LMA) {
1303 vmcs_write32(VM_ENTRY_CONTROLS,
1304 vmcs_read32(VM_ENTRY_CONTROLS) |
1e4e6e00 1305 VM_ENTRY_IA32E_MODE);
6aa8b732
AK
1306 msr->data = efer;
1307
1308 } else {
1309 vmcs_write32(VM_ENTRY_CONTROLS,
1310 vmcs_read32(VM_ENTRY_CONTROLS) &
1e4e6e00 1311 ~VM_ENTRY_IA32E_MODE);
6aa8b732
AK
1312
1313 msr->data = efer & ~EFER_LME;
1314 }
8b9cf98c 1315 setup_msrs(vmx);
6aa8b732
AK
1316}
1317
1318#endif
1319
1320static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg)
1321{
1322 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
1323
1324 return vmcs_readl(sf->base);
1325}
1326
1327static void vmx_get_segment(struct kvm_vcpu *vcpu,
1328 struct kvm_segment *var, int seg)
1329{
1330 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
1331 u32 ar;
1332
1333 var->base = vmcs_readl(sf->base);
1334 var->limit = vmcs_read32(sf->limit);
1335 var->selector = vmcs_read16(sf->selector);
1336 ar = vmcs_read32(sf->ar_bytes);
1337 if (ar & AR_UNUSABLE_MASK)
1338 ar = 0;
1339 var->type = ar & 15;
1340 var->s = (ar >> 4) & 1;
1341 var->dpl = (ar >> 5) & 3;
1342 var->present = (ar >> 7) & 1;
1343 var->avl = (ar >> 12) & 1;
1344 var->l = (ar >> 13) & 1;
1345 var->db = (ar >> 14) & 1;
1346 var->g = (ar >> 15) & 1;
1347 var->unusable = (ar >> 16) & 1;
1348}
1349
653e3108 1350static u32 vmx_segment_access_rights(struct kvm_segment *var)
6aa8b732 1351{
6aa8b732
AK
1352 u32 ar;
1353
653e3108 1354 if (var->unusable)
6aa8b732
AK
1355 ar = 1 << 16;
1356 else {
1357 ar = var->type & 15;
1358 ar |= (var->s & 1) << 4;
1359 ar |= (var->dpl & 3) << 5;
1360 ar |= (var->present & 1) << 7;
1361 ar |= (var->avl & 1) << 12;
1362 ar |= (var->l & 1) << 13;
1363 ar |= (var->db & 1) << 14;
1364 ar |= (var->g & 1) << 15;
1365 }
f7fbf1fd
UL
1366 if (ar == 0) /* a 0 value means unusable */
1367 ar = AR_UNUSABLE_MASK;
653e3108
AK
1368
1369 return ar;
1370}
1371
1372static void vmx_set_segment(struct kvm_vcpu *vcpu,
1373 struct kvm_segment *var, int seg)
1374{
1375 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
1376 u32 ar;
1377
ad312c7c
ZX
1378 if (vcpu->arch.rmode.active && seg == VCPU_SREG_TR) {
1379 vcpu->arch.rmode.tr.selector = var->selector;
1380 vcpu->arch.rmode.tr.base = var->base;
1381 vcpu->arch.rmode.tr.limit = var->limit;
1382 vcpu->arch.rmode.tr.ar = vmx_segment_access_rights(var);
653e3108
AK
1383 return;
1384 }
1385 vmcs_writel(sf->base, var->base);
1386 vmcs_write32(sf->limit, var->limit);
1387 vmcs_write16(sf->selector, var->selector);
ad312c7c 1388 if (vcpu->arch.rmode.active && var->s) {
653e3108
AK
1389 /*
1390 * Hack real-mode segments into vm86 compatibility.
1391 */
1392 if (var->base == 0xffff0000 && var->selector == 0xf000)
1393 vmcs_writel(sf->base, 0xf0000);
1394 ar = 0xf3;
1395 } else
1396 ar = vmx_segment_access_rights(var);
6aa8b732
AK
1397 vmcs_write32(sf->ar_bytes, ar);
1398}
1399
6aa8b732
AK
1400static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
1401{
1402 u32 ar = vmcs_read32(GUEST_CS_AR_BYTES);
1403
1404 *db = (ar >> 14) & 1;
1405 *l = (ar >> 13) & 1;
1406}
1407
1408static void vmx_get_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
1409{
1410 dt->limit = vmcs_read32(GUEST_IDTR_LIMIT);
1411 dt->base = vmcs_readl(GUEST_IDTR_BASE);
1412}
1413
1414static void vmx_set_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
1415{
1416 vmcs_write32(GUEST_IDTR_LIMIT, dt->limit);
1417 vmcs_writel(GUEST_IDTR_BASE, dt->base);
1418}
1419
1420static void vmx_get_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
1421{
1422 dt->limit = vmcs_read32(GUEST_GDTR_LIMIT);
1423 dt->base = vmcs_readl(GUEST_GDTR_BASE);
1424}
1425
1426static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
1427{
1428 vmcs_write32(GUEST_GDTR_LIMIT, dt->limit);
1429 vmcs_writel(GUEST_GDTR_BASE, dt->base);
1430}
1431
d77c26fc 1432static int init_rmode_tss(struct kvm *kvm)
6aa8b732 1433{
6aa8b732 1434 gfn_t fn = rmode_tss_base(kvm) >> PAGE_SHIFT;
195aefde
IE
1435 u16 data = 0;
1436 int r;
6aa8b732 1437
195aefde
IE
1438 r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE);
1439 if (r < 0)
1440 return 0;
1441 data = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE;
1442 r = kvm_write_guest_page(kvm, fn++, &data, 0x66, sizeof(u16));
1443 if (r < 0)
1444 return 0;
1445 r = kvm_clear_guest_page(kvm, fn++, 0, PAGE_SIZE);
1446 if (r < 0)
1447 return 0;
1448 r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE);
1449 if (r < 0)
1450 return 0;
1451 data = ~0;
1452 r = kvm_write_guest_page(kvm, fn, &data, RMODE_TSS_SIZE - 2 * PAGE_SIZE - 1,
1453 sizeof(u8));
1454 if (r < 0)
6aa8b732 1455 return 0;
6aa8b732
AK
1456 return 1;
1457}
1458
6aa8b732
AK
1459static void seg_setup(int seg)
1460{
1461 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
1462
1463 vmcs_write16(sf->selector, 0);
1464 vmcs_writel(sf->base, 0);
1465 vmcs_write32(sf->limit, 0xffff);
1466 vmcs_write32(sf->ar_bytes, 0x93);
1467}
1468
f78e0e2e
SY
1469static int alloc_apic_access_page(struct kvm *kvm)
1470{
1471 struct kvm_userspace_memory_region kvm_userspace_mem;
1472 int r = 0;
1473
1474 mutex_lock(&kvm->lock);
1475 if (kvm->apic_access_page)
1476 goto out;
1477 kvm_userspace_mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT;
1478 kvm_userspace_mem.flags = 0;
1479 kvm_userspace_mem.guest_phys_addr = 0xfee00000ULL;
1480 kvm_userspace_mem.memory_size = PAGE_SIZE;
1481 r = __kvm_set_memory_region(kvm, &kvm_userspace_mem, 0);
1482 if (r)
1483 goto out;
1484 kvm->apic_access_page = gfn_to_page(kvm, 0xfee00);
1485out:
1486 mutex_unlock(&kvm->lock);
1487 return r;
1488}
1489
6aa8b732
AK
1490/*
1491 * Sets up the vmcs for emulated real mode.
1492 */
8b9cf98c 1493static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
6aa8b732
AK
1494{
1495 u32 host_sysenter_cs;
1496 u32 junk;
1497 unsigned long a;
1498 struct descriptor_table dt;
1499 int i;
cd2276a7 1500 unsigned long kvm_vmx_return;
6e5d865c 1501 u32 exec_control;
6aa8b732 1502
6aa8b732 1503 /* I/O */
fdef3ad1
HQ
1504 vmcs_write64(IO_BITMAP_A, page_to_phys(vmx_io_bitmap_a));
1505 vmcs_write64(IO_BITMAP_B, page_to_phys(vmx_io_bitmap_b));
6aa8b732 1506
6aa8b732
AK
1507 vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */
1508
6aa8b732 1509 /* Control */
1c3d14fe
YS
1510 vmcs_write32(PIN_BASED_VM_EXEC_CONTROL,
1511 vmcs_config.pin_based_exec_ctrl);
6e5d865c
YS
1512
1513 exec_control = vmcs_config.cpu_based_exec_ctrl;
1514 if (!vm_need_tpr_shadow(vmx->vcpu.kvm)) {
1515 exec_control &= ~CPU_BASED_TPR_SHADOW;
1516#ifdef CONFIG_X86_64
1517 exec_control |= CPU_BASED_CR8_STORE_EXITING |
1518 CPU_BASED_CR8_LOAD_EXITING;
1519#endif
1520 }
1521 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, exec_control);
6aa8b732 1522
83ff3b9d
SY
1523 if (cpu_has_secondary_exec_ctrls()) {
1524 exec_control = vmcs_config.cpu_based_2nd_exec_ctrl;
1525 if (!vm_need_virtualize_apic_accesses(vmx->vcpu.kvm))
1526 exec_control &=
1527 ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
1528 vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
1529 }
f78e0e2e 1530
c7addb90
AK
1531 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, !!bypass_guest_pf);
1532 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, !!bypass_guest_pf);
6aa8b732
AK
1533 vmcs_write32(CR3_TARGET_COUNT, 0); /* 22.2.1 */
1534
1535 vmcs_writel(HOST_CR0, read_cr0()); /* 22.2.3 */
1536 vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */
1537 vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
1538
1539 vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
1540 vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
1541 vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */
1542 vmcs_write16(HOST_FS_SELECTOR, read_fs()); /* 22.2.4 */
1543 vmcs_write16(HOST_GS_SELECTOR, read_gs()); /* 22.2.4 */
1544 vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
05b3e0c2 1545#ifdef CONFIG_X86_64
6aa8b732
AK
1546 rdmsrl(MSR_FS_BASE, a);
1547 vmcs_writel(HOST_FS_BASE, a); /* 22.2.4 */
1548 rdmsrl(MSR_GS_BASE, a);
1549 vmcs_writel(HOST_GS_BASE, a); /* 22.2.4 */
1550#else
1551 vmcs_writel(HOST_FS_BASE, 0); /* 22.2.4 */
1552 vmcs_writel(HOST_GS_BASE, 0); /* 22.2.4 */
1553#endif
1554
1555 vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8); /* 22.2.4 */
1556
1557 get_idt(&dt);
1558 vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */
1559
d77c26fc 1560 asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
cd2276a7 1561 vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
2cc51560
ED
1562 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
1563 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
1564 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
6aa8b732
AK
1565
1566 rdmsr(MSR_IA32_SYSENTER_CS, host_sysenter_cs, junk);
1567 vmcs_write32(HOST_IA32_SYSENTER_CS, host_sysenter_cs);
1568 rdmsrl(MSR_IA32_SYSENTER_ESP, a);
1569 vmcs_writel(HOST_IA32_SYSENTER_ESP, a); /* 22.2.3 */
1570 rdmsrl(MSR_IA32_SYSENTER_EIP, a);
1571 vmcs_writel(HOST_IA32_SYSENTER_EIP, a); /* 22.2.3 */
1572
6aa8b732
AK
1573 for (i = 0; i < NR_VMX_MSR; ++i) {
1574 u32 index = vmx_msr_index[i];
1575 u32 data_low, data_high;
1576 u64 data;
a2fa3e9f 1577 int j = vmx->nmsrs;
6aa8b732
AK
1578
1579 if (rdmsr_safe(index, &data_low, &data_high) < 0)
1580 continue;
432bd6cb
AK
1581 if (wrmsr_safe(index, data_low, data_high) < 0)
1582 continue;
6aa8b732 1583 data = data_low | ((u64)data_high << 32);
a2fa3e9f
GH
1584 vmx->host_msrs[j].index = index;
1585 vmx->host_msrs[j].reserved = 0;
1586 vmx->host_msrs[j].data = data;
1587 vmx->guest_msrs[j] = vmx->host_msrs[j];
1588 ++vmx->nmsrs;
6aa8b732 1589 }
6aa8b732 1590
1c3d14fe 1591 vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl);
6aa8b732
AK
1592
1593 /* 22.2.1, 20.8.1 */
1c3d14fe
YS
1594 vmcs_write32(VM_ENTRY_CONTROLS, vmcs_config.vmentry_ctrl);
1595
e00c8cf2
AK
1596 vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL);
1597 vmcs_writel(CR4_GUEST_HOST_MASK, KVM_GUEST_CR4_MASK);
1598
f78e0e2e
SY
1599 if (vm_need_virtualize_apic_accesses(vmx->vcpu.kvm))
1600 if (alloc_apic_access_page(vmx->vcpu.kvm) != 0)
1601 return -ENOMEM;
1602
e00c8cf2
AK
1603 return 0;
1604}
1605
1606static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
1607{
1608 struct vcpu_vmx *vmx = to_vmx(vcpu);
1609 u64 msr;
1610 int ret;
1611
1612 if (!init_rmode_tss(vmx->vcpu.kvm)) {
1613 ret = -ENOMEM;
1614 goto out;
1615 }
1616
ad312c7c 1617 vmx->vcpu.arch.rmode.active = 0;
e00c8cf2 1618
ad312c7c 1619 vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val();
e00c8cf2
AK
1620 set_cr8(&vmx->vcpu, 0);
1621 msr = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
1622 if (vmx->vcpu.vcpu_id == 0)
1623 msr |= MSR_IA32_APICBASE_BSP;
1624 kvm_set_apic_base(&vmx->vcpu, msr);
1625
1626 fx_init(&vmx->vcpu);
1627
1628 /*
1629 * GUEST_CS_BASE should really be 0xffff0000, but VT vm86 mode
1630 * insists on having GUEST_CS_BASE == GUEST_CS_SELECTOR << 4. Sigh.
1631 */
1632 if (vmx->vcpu.vcpu_id == 0) {
1633 vmcs_write16(GUEST_CS_SELECTOR, 0xf000);
1634 vmcs_writel(GUEST_CS_BASE, 0x000f0000);
1635 } else {
ad312c7c
ZX
1636 vmcs_write16(GUEST_CS_SELECTOR, vmx->vcpu.arch.sipi_vector << 8);
1637 vmcs_writel(GUEST_CS_BASE, vmx->vcpu.arch.sipi_vector << 12);
e00c8cf2
AK
1638 }
1639 vmcs_write32(GUEST_CS_LIMIT, 0xffff);
1640 vmcs_write32(GUEST_CS_AR_BYTES, 0x9b);
1641
1642 seg_setup(VCPU_SREG_DS);
1643 seg_setup(VCPU_SREG_ES);
1644 seg_setup(VCPU_SREG_FS);
1645 seg_setup(VCPU_SREG_GS);
1646 seg_setup(VCPU_SREG_SS);
1647
1648 vmcs_write16(GUEST_TR_SELECTOR, 0);
1649 vmcs_writel(GUEST_TR_BASE, 0);
1650 vmcs_write32(GUEST_TR_LIMIT, 0xffff);
1651 vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
1652
1653 vmcs_write16(GUEST_LDTR_SELECTOR, 0);
1654 vmcs_writel(GUEST_LDTR_BASE, 0);
1655 vmcs_write32(GUEST_LDTR_LIMIT, 0xffff);
1656 vmcs_write32(GUEST_LDTR_AR_BYTES, 0x00082);
1657
1658 vmcs_write32(GUEST_SYSENTER_CS, 0);
1659 vmcs_writel(GUEST_SYSENTER_ESP, 0);
1660 vmcs_writel(GUEST_SYSENTER_EIP, 0);
1661
1662 vmcs_writel(GUEST_RFLAGS, 0x02);
1663 if (vmx->vcpu.vcpu_id == 0)
1664 vmcs_writel(GUEST_RIP, 0xfff0);
1665 else
1666 vmcs_writel(GUEST_RIP, 0);
1667 vmcs_writel(GUEST_RSP, 0);
1668
1669 /* todo: dr0 = dr1 = dr2 = dr3 = 0; dr6 = 0xffff0ff0 */
1670 vmcs_writel(GUEST_DR7, 0x400);
1671
1672 vmcs_writel(GUEST_GDTR_BASE, 0);
1673 vmcs_write32(GUEST_GDTR_LIMIT, 0xffff);
1674
1675 vmcs_writel(GUEST_IDTR_BASE, 0);
1676 vmcs_write32(GUEST_IDTR_LIMIT, 0xffff);
1677
1678 vmcs_write32(GUEST_ACTIVITY_STATE, 0);
1679 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 0);
1680 vmcs_write32(GUEST_PENDING_DBG_EXCEPTIONS, 0);
1681
1682 guest_write_tsc(0);
1683
1684 /* Special registers */
1685 vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
1686
1687 setup_msrs(vmx);
1688
6aa8b732
AK
1689 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); /* 22.2.1 */
1690
f78e0e2e
SY
1691 if (cpu_has_vmx_tpr_shadow()) {
1692 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 0);
1693 if (vm_need_tpr_shadow(vmx->vcpu.kvm))
1694 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR,
ad312c7c 1695 page_to_phys(vmx->vcpu.arch.apic->regs_page));
f78e0e2e
SY
1696 vmcs_write32(TPR_THRESHOLD, 0);
1697 }
1698
1699 if (vm_need_virtualize_apic_accesses(vmx->vcpu.kvm))
1700 vmcs_write64(APIC_ACCESS_ADDR,
1701 page_to_phys(vmx->vcpu.kvm->apic_access_page));
6aa8b732 1702
ad312c7c
ZX
1703 vmx->vcpu.arch.cr0 = 0x60000010;
1704 vmx_set_cr0(&vmx->vcpu, vmx->vcpu.arch.cr0); /* enter rmode */
8b9cf98c 1705 vmx_set_cr4(&vmx->vcpu, 0);
05b3e0c2 1706#ifdef CONFIG_X86_64
8b9cf98c 1707 vmx_set_efer(&vmx->vcpu, 0);
6aa8b732 1708#endif
8b9cf98c
RR
1709 vmx_fpu_activate(&vmx->vcpu);
1710 update_exception_bitmap(&vmx->vcpu);
6aa8b732
AK
1711
1712 return 0;
1713
6aa8b732
AK
1714out:
1715 return ret;
1716}
1717
85f455f7
ED
1718static void vmx_inject_irq(struct kvm_vcpu *vcpu, int irq)
1719{
9c8cba37
AK
1720 struct vcpu_vmx *vmx = to_vmx(vcpu);
1721
ad312c7c 1722 if (vcpu->arch.rmode.active) {
9c8cba37
AK
1723 vmx->rmode.irq.pending = true;
1724 vmx->rmode.irq.vector = irq;
1725 vmx->rmode.irq.rip = vmcs_readl(GUEST_RIP);
9c5623e3
AK
1726 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
1727 irq | INTR_TYPE_SOFT_INTR | INTR_INFO_VALID_MASK);
1728 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 1);
9c8cba37 1729 vmcs_writel(GUEST_RIP, vmx->rmode.irq.rip - 1);
85f455f7
ED
1730 return;
1731 }
1732 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
1733 irq | INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
1734}
1735
6aa8b732
AK
1736static void kvm_do_inject_irq(struct kvm_vcpu *vcpu)
1737{
ad312c7c
ZX
1738 int word_index = __ffs(vcpu->arch.irq_summary);
1739 int bit_index = __ffs(vcpu->arch.irq_pending[word_index]);
6aa8b732
AK
1740 int irq = word_index * BITS_PER_LONG + bit_index;
1741
ad312c7c
ZX
1742 clear_bit(bit_index, &vcpu->arch.irq_pending[word_index]);
1743 if (!vcpu->arch.irq_pending[word_index])
1744 clear_bit(word_index, &vcpu->arch.irq_summary);
85f455f7 1745 vmx_inject_irq(vcpu, irq);
6aa8b732
AK
1746}
1747
c1150d8c
DL
1748
1749static void do_interrupt_requests(struct kvm_vcpu *vcpu,
1750 struct kvm_run *kvm_run)
6aa8b732 1751{
c1150d8c
DL
1752 u32 cpu_based_vm_exec_control;
1753
ad312c7c 1754 vcpu->arch.interrupt_window_open =
c1150d8c
DL
1755 ((vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
1756 (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0);
1757
ad312c7c
ZX
1758 if (vcpu->arch.interrupt_window_open &&
1759 vcpu->arch.irq_summary &&
c1150d8c 1760 !(vmcs_read32(VM_ENTRY_INTR_INFO_FIELD) & INTR_INFO_VALID_MASK))
6aa8b732 1761 /*
c1150d8c 1762 * If interrupts enabled, and not blocked by sti or mov ss. Good.
6aa8b732
AK
1763 */
1764 kvm_do_inject_irq(vcpu);
c1150d8c
DL
1765
1766 cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
ad312c7c
ZX
1767 if (!vcpu->arch.interrupt_window_open &&
1768 (vcpu->arch.irq_summary || kvm_run->request_interrupt_window))
6aa8b732
AK
1769 /*
1770 * Interrupts blocked. Wait for unblock.
1771 */
c1150d8c
DL
1772 cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING;
1773 else
1774 cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
1775 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
6aa8b732
AK
1776}
1777
cbc94022
IE
1778static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr)
1779{
1780 int ret;
1781 struct kvm_userspace_memory_region tss_mem = {
1782 .slot = 8,
1783 .guest_phys_addr = addr,
1784 .memory_size = PAGE_SIZE * 3,
1785 .flags = 0,
1786 };
1787
1788 ret = kvm_set_memory_region(kvm, &tss_mem, 0);
1789 if (ret)
1790 return ret;
1791 kvm->tss_addr = addr;
1792 return 0;
1793}
1794
6aa8b732
AK
1795static void kvm_guest_debug_pre(struct kvm_vcpu *vcpu)
1796{
1797 struct kvm_guest_debug *dbg = &vcpu->guest_debug;
1798
1799 set_debugreg(dbg->bp[0], 0);
1800 set_debugreg(dbg->bp[1], 1);
1801 set_debugreg(dbg->bp[2], 2);
1802 set_debugreg(dbg->bp[3], 3);
1803
1804 if (dbg->singlestep) {
1805 unsigned long flags;
1806
1807 flags = vmcs_readl(GUEST_RFLAGS);
1808 flags |= X86_EFLAGS_TF | X86_EFLAGS_RF;
1809 vmcs_writel(GUEST_RFLAGS, flags);
1810 }
1811}
1812
1813static int handle_rmode_exception(struct kvm_vcpu *vcpu,
1814 int vec, u32 err_code)
1815{
ad312c7c 1816 if (!vcpu->arch.rmode.active)
6aa8b732
AK
1817 return 0;
1818
b3f37707
NK
1819 /*
1820 * Instruction with address size override prefix opcode 0x67
1821 * Cause the #SS fault with 0 error code in VM86 mode.
1822 */
1823 if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0)
3427318f 1824 if (emulate_instruction(vcpu, NULL, 0, 0, 0) == EMULATE_DONE)
6aa8b732
AK
1825 return 1;
1826 return 0;
1827}
1828
1829static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1830{
1155f76a 1831 struct vcpu_vmx *vmx = to_vmx(vcpu);
6aa8b732
AK
1832 u32 intr_info, error_code;
1833 unsigned long cr2, rip;
1834 u32 vect_info;
1835 enum emulation_result er;
1836
1155f76a 1837 vect_info = vmx->idt_vectoring_info;
6aa8b732
AK
1838 intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
1839
1840 if ((vect_info & VECTORING_INFO_VALID_MASK) &&
d77c26fc 1841 !is_page_fault(intr_info))
6aa8b732
AK
1842 printk(KERN_ERR "%s: unexpected, vectoring info 0x%x "
1843 "intr info 0x%x\n", __FUNCTION__, vect_info, intr_info);
6aa8b732 1844
85f455f7 1845 if (!irqchip_in_kernel(vcpu->kvm) && is_external_interrupt(vect_info)) {
6aa8b732 1846 int irq = vect_info & VECTORING_INFO_VECTOR_MASK;
ad312c7c
ZX
1847 set_bit(irq, vcpu->arch.irq_pending);
1848 set_bit(irq / BITS_PER_LONG, &vcpu->arch.irq_summary);
6aa8b732
AK
1849 }
1850
1b6269db
AK
1851 if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == 0x200) /* nmi */
1852 return 1; /* already handled by vmx_vcpu_run() */
2ab455cc
AL
1853
1854 if (is_no_device(intr_info)) {
5fd86fcf 1855 vmx_fpu_activate(vcpu);
2ab455cc
AL
1856 return 1;
1857 }
1858
7aa81cc0 1859 if (is_invalid_opcode(intr_info)) {
3427318f 1860 er = emulate_instruction(vcpu, kvm_run, 0, 0, 0);
7aa81cc0 1861 if (er != EMULATE_DONE)
7ee5d940 1862 kvm_queue_exception(vcpu, UD_VECTOR);
7aa81cc0
AL
1863 return 1;
1864 }
1865
6aa8b732
AK
1866 error_code = 0;
1867 rip = vmcs_readl(GUEST_RIP);
1868 if (intr_info & INTR_INFO_DELIEVER_CODE_MASK)
1869 error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
1870 if (is_page_fault(intr_info)) {
1871 cr2 = vmcs_readl(EXIT_QUALIFICATION);
3067714c 1872 return kvm_mmu_page_fault(vcpu, cr2, error_code);
6aa8b732
AK
1873 }
1874
ad312c7c 1875 if (vcpu->arch.rmode.active &&
6aa8b732 1876 handle_rmode_exception(vcpu, intr_info & INTR_INFO_VECTOR_MASK,
72d6e5a0 1877 error_code)) {
ad312c7c
ZX
1878 if (vcpu->arch.halt_request) {
1879 vcpu->arch.halt_request = 0;
72d6e5a0
AK
1880 return kvm_emulate_halt(vcpu);
1881 }
6aa8b732 1882 return 1;
72d6e5a0 1883 }
6aa8b732 1884
d77c26fc
MD
1885 if ((intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK)) ==
1886 (INTR_TYPE_EXCEPTION | 1)) {
6aa8b732
AK
1887 kvm_run->exit_reason = KVM_EXIT_DEBUG;
1888 return 0;
1889 }
1890 kvm_run->exit_reason = KVM_EXIT_EXCEPTION;
1891 kvm_run->ex.exception = intr_info & INTR_INFO_VECTOR_MASK;
1892 kvm_run->ex.error_code = error_code;
1893 return 0;
1894}
1895
1896static int handle_external_interrupt(struct kvm_vcpu *vcpu,
1897 struct kvm_run *kvm_run)
1898{
1165f5fe 1899 ++vcpu->stat.irq_exits;
6aa8b732
AK
1900 return 1;
1901}
1902
988ad74f
AK
1903static int handle_triple_fault(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1904{
1905 kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
1906 return 0;
1907}
6aa8b732 1908
6aa8b732
AK
1909static int handle_io(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1910{
bfdaab09 1911 unsigned long exit_qualification;
039576c0
AK
1912 int size, down, in, string, rep;
1913 unsigned port;
6aa8b732 1914
1165f5fe 1915 ++vcpu->stat.io_exits;
bfdaab09 1916 exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
039576c0 1917 string = (exit_qualification & 16) != 0;
e70669ab
LV
1918
1919 if (string) {
3427318f
LV
1920 if (emulate_instruction(vcpu,
1921 kvm_run, 0, 0, 0) == EMULATE_DO_MMIO)
e70669ab
LV
1922 return 0;
1923 return 1;
1924 }
1925
1926 size = (exit_qualification & 7) + 1;
1927 in = (exit_qualification & 8) != 0;
039576c0 1928 down = (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_DF) != 0;
039576c0
AK
1929 rep = (exit_qualification & 32) != 0;
1930 port = exit_qualification >> 16;
e70669ab 1931
3090dd73 1932 return kvm_emulate_pio(vcpu, kvm_run, in, size, port);
6aa8b732
AK
1933}
1934
102d8325
IM
1935static void
1936vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
1937{
1938 /*
1939 * Patch in the VMCALL instruction:
1940 */
1941 hypercall[0] = 0x0f;
1942 hypercall[1] = 0x01;
1943 hypercall[2] = 0xc1;
102d8325
IM
1944}
1945
6aa8b732
AK
1946static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1947{
bfdaab09 1948 unsigned long exit_qualification;
6aa8b732
AK
1949 int cr;
1950 int reg;
1951
bfdaab09 1952 exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
6aa8b732
AK
1953 cr = exit_qualification & 15;
1954 reg = (exit_qualification >> 8) & 15;
1955 switch ((exit_qualification >> 4) & 3) {
1956 case 0: /* mov to cr */
1957 switch (cr) {
1958 case 0:
1959 vcpu_load_rsp_rip(vcpu);
ad312c7c 1960 set_cr0(vcpu, vcpu->arch.regs[reg]);
6aa8b732
AK
1961 skip_emulated_instruction(vcpu);
1962 return 1;
1963 case 3:
1964 vcpu_load_rsp_rip(vcpu);
ad312c7c 1965 set_cr3(vcpu, vcpu->arch.regs[reg]);
6aa8b732
AK
1966 skip_emulated_instruction(vcpu);
1967 return 1;
1968 case 4:
1969 vcpu_load_rsp_rip(vcpu);
ad312c7c 1970 set_cr4(vcpu, vcpu->arch.regs[reg]);
6aa8b732
AK
1971 skip_emulated_instruction(vcpu);
1972 return 1;
1973 case 8:
1974 vcpu_load_rsp_rip(vcpu);
ad312c7c 1975 set_cr8(vcpu, vcpu->arch.regs[reg]);
6aa8b732 1976 skip_emulated_instruction(vcpu);
e5314067
AK
1977 if (irqchip_in_kernel(vcpu->kvm))
1978 return 1;
253abdee
YS
1979 kvm_run->exit_reason = KVM_EXIT_SET_TPR;
1980 return 0;
6aa8b732
AK
1981 };
1982 break;
25c4c276
AL
1983 case 2: /* clts */
1984 vcpu_load_rsp_rip(vcpu);
5fd86fcf 1985 vmx_fpu_deactivate(vcpu);
ad312c7c
ZX
1986 vcpu->arch.cr0 &= ~X86_CR0_TS;
1987 vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0);
5fd86fcf 1988 vmx_fpu_activate(vcpu);
25c4c276
AL
1989 skip_emulated_instruction(vcpu);
1990 return 1;
6aa8b732
AK
1991 case 1: /*mov from cr*/
1992 switch (cr) {
1993 case 3:
1994 vcpu_load_rsp_rip(vcpu);
ad312c7c 1995 vcpu->arch.regs[reg] = vcpu->arch.cr3;
6aa8b732
AK
1996 vcpu_put_rsp_rip(vcpu);
1997 skip_emulated_instruction(vcpu);
1998 return 1;
1999 case 8:
6aa8b732 2000 vcpu_load_rsp_rip(vcpu);
ad312c7c 2001 vcpu->arch.regs[reg] = get_cr8(vcpu);
6aa8b732
AK
2002 vcpu_put_rsp_rip(vcpu);
2003 skip_emulated_instruction(vcpu);
2004 return 1;
2005 }
2006 break;
2007 case 3: /* lmsw */
2008 lmsw(vcpu, (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f);
2009
2010 skip_emulated_instruction(vcpu);
2011 return 1;
2012 default:
2013 break;
2014 }
2015 kvm_run->exit_reason = 0;
f0242478 2016 pr_unimpl(vcpu, "unhandled control register: op %d cr %d\n",
6aa8b732
AK
2017 (int)(exit_qualification >> 4) & 3, cr);
2018 return 0;
2019}
2020
2021static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2022{
bfdaab09 2023 unsigned long exit_qualification;
6aa8b732
AK
2024 unsigned long val;
2025 int dr, reg;
2026
2027 /*
2028 * FIXME: this code assumes the host is debugging the guest.
2029 * need to deal with guest debugging itself too.
2030 */
bfdaab09 2031 exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
6aa8b732
AK
2032 dr = exit_qualification & 7;
2033 reg = (exit_qualification >> 8) & 15;
2034 vcpu_load_rsp_rip(vcpu);
2035 if (exit_qualification & 16) {
2036 /* mov from dr */
2037 switch (dr) {
2038 case 6:
2039 val = 0xffff0ff0;
2040 break;
2041 case 7:
2042 val = 0x400;
2043 break;
2044 default:
2045 val = 0;
2046 }
ad312c7c 2047 vcpu->arch.regs[reg] = val;
6aa8b732
AK
2048 } else {
2049 /* mov to dr */
2050 }
2051 vcpu_put_rsp_rip(vcpu);
2052 skip_emulated_instruction(vcpu);
2053 return 1;
2054}
2055
2056static int handle_cpuid(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2057{
06465c5a
AK
2058 kvm_emulate_cpuid(vcpu);
2059 return 1;
6aa8b732
AK
2060}
2061
2062static int handle_rdmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2063{
ad312c7c 2064 u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX];
6aa8b732
AK
2065 u64 data;
2066
2067 if (vmx_get_msr(vcpu, ecx, &data)) {
c1a5d4f9 2068 kvm_inject_gp(vcpu, 0);
6aa8b732
AK
2069 return 1;
2070 }
2071
2072 /* FIXME: handling of bits 32:63 of rax, rdx */
ad312c7c
ZX
2073 vcpu->arch.regs[VCPU_REGS_RAX] = data & -1u;
2074 vcpu->arch.regs[VCPU_REGS_RDX] = (data >> 32) & -1u;
6aa8b732
AK
2075 skip_emulated_instruction(vcpu);
2076 return 1;
2077}
2078
2079static int handle_wrmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2080{
ad312c7c
ZX
2081 u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX];
2082 u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u)
2083 | ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32);
6aa8b732
AK
2084
2085 if (vmx_set_msr(vcpu, ecx, data) != 0) {
c1a5d4f9 2086 kvm_inject_gp(vcpu, 0);
6aa8b732
AK
2087 return 1;
2088 }
2089
2090 skip_emulated_instruction(vcpu);
2091 return 1;
2092}
2093
6e5d865c
YS
2094static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu,
2095 struct kvm_run *kvm_run)
2096{
2097 return 1;
2098}
2099
6aa8b732
AK
2100static int handle_interrupt_window(struct kvm_vcpu *vcpu,
2101 struct kvm_run *kvm_run)
2102{
85f455f7
ED
2103 u32 cpu_based_vm_exec_control;
2104
2105 /* clear pending irq */
2106 cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
2107 cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
2108 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
c1150d8c
DL
2109 /*
2110 * If the user space waits to inject interrupts, exit as soon as
2111 * possible
2112 */
2113 if (kvm_run->request_interrupt_window &&
ad312c7c 2114 !vcpu->arch.irq_summary) {
c1150d8c 2115 kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
1165f5fe 2116 ++vcpu->stat.irq_window_exits;
c1150d8c
DL
2117 return 0;
2118 }
6aa8b732
AK
2119 return 1;
2120}
2121
2122static int handle_halt(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2123{
2124 skip_emulated_instruction(vcpu);
d3bef15f 2125 return kvm_emulate_halt(vcpu);
6aa8b732
AK
2126}
2127
c21415e8
IM
2128static int handle_vmcall(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2129{
510043da 2130 skip_emulated_instruction(vcpu);
7aa81cc0
AL
2131 kvm_emulate_hypercall(vcpu);
2132 return 1;
c21415e8
IM
2133}
2134
e5edaa01
ED
2135static int handle_wbinvd(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2136{
2137 skip_emulated_instruction(vcpu);
2138 /* TODO: Add support for VT-d/pass-through device */
2139 return 1;
2140}
2141
f78e0e2e
SY
2142static int handle_apic_access(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2143{
2144 u64 exit_qualification;
2145 enum emulation_result er;
2146 unsigned long offset;
2147
2148 exit_qualification = vmcs_read64(EXIT_QUALIFICATION);
2149 offset = exit_qualification & 0xffful;
2150
2151 er = emulate_instruction(vcpu, kvm_run, 0, 0, 0);
2152
2153 if (er != EMULATE_DONE) {
2154 printk(KERN_ERR
2155 "Fail to handle apic access vmexit! Offset is 0x%lx\n",
2156 offset);
2157 return -ENOTSUPP;
2158 }
2159 return 1;
2160}
2161
6aa8b732
AK
2162/*
2163 * The exit handlers return 1 if the exit was handled fully and guest execution
2164 * may resume. Otherwise they set the kvm_run parameter to indicate what needs
2165 * to be done to userspace and return 0.
2166 */
2167static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu,
2168 struct kvm_run *kvm_run) = {
2169 [EXIT_REASON_EXCEPTION_NMI] = handle_exception,
2170 [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt,
988ad74f 2171 [EXIT_REASON_TRIPLE_FAULT] = handle_triple_fault,
6aa8b732 2172 [EXIT_REASON_IO_INSTRUCTION] = handle_io,
6aa8b732
AK
2173 [EXIT_REASON_CR_ACCESS] = handle_cr,
2174 [EXIT_REASON_DR_ACCESS] = handle_dr,
2175 [EXIT_REASON_CPUID] = handle_cpuid,
2176 [EXIT_REASON_MSR_READ] = handle_rdmsr,
2177 [EXIT_REASON_MSR_WRITE] = handle_wrmsr,
2178 [EXIT_REASON_PENDING_INTERRUPT] = handle_interrupt_window,
2179 [EXIT_REASON_HLT] = handle_halt,
c21415e8 2180 [EXIT_REASON_VMCALL] = handle_vmcall,
f78e0e2e
SY
2181 [EXIT_REASON_TPR_BELOW_THRESHOLD] = handle_tpr_below_threshold,
2182 [EXIT_REASON_APIC_ACCESS] = handle_apic_access,
e5edaa01 2183 [EXIT_REASON_WBINVD] = handle_wbinvd,
6aa8b732
AK
2184};
2185
2186static const int kvm_vmx_max_exit_handlers =
50a3485c 2187 ARRAY_SIZE(kvm_vmx_exit_handlers);
6aa8b732
AK
2188
2189/*
2190 * The guest has exited. See if we can fix it or if we need userspace
2191 * assistance.
2192 */
2193static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
2194{
6aa8b732 2195 u32 exit_reason = vmcs_read32(VM_EXIT_REASON);
29bd8a78 2196 struct vcpu_vmx *vmx = to_vmx(vcpu);
1155f76a 2197 u32 vectoring_info = vmx->idt_vectoring_info;
29bd8a78
AK
2198
2199 if (unlikely(vmx->fail)) {
2200 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
2201 kvm_run->fail_entry.hardware_entry_failure_reason
2202 = vmcs_read32(VM_INSTRUCTION_ERROR);
2203 return 0;
2204 }
6aa8b732 2205
d77c26fc
MD
2206 if ((vectoring_info & VECTORING_INFO_VALID_MASK) &&
2207 exit_reason != EXIT_REASON_EXCEPTION_NMI)
6aa8b732
AK
2208 printk(KERN_WARNING "%s: unexpected, valid vectoring info and "
2209 "exit reason is 0x%x\n", __FUNCTION__, exit_reason);
6aa8b732
AK
2210 if (exit_reason < kvm_vmx_max_exit_handlers
2211 && kvm_vmx_exit_handlers[exit_reason])
2212 return kvm_vmx_exit_handlers[exit_reason](vcpu, kvm_run);
2213 else {
2214 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
2215 kvm_run->hw.hardware_exit_reason = exit_reason;
2216 }
2217 return 0;
2218}
2219
d9e368d6
AK
2220static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
2221{
d9e368d6
AK
2222}
2223
6e5d865c
YS
2224static void update_tpr_threshold(struct kvm_vcpu *vcpu)
2225{
2226 int max_irr, tpr;
2227
2228 if (!vm_need_tpr_shadow(vcpu->kvm))
2229 return;
2230
2231 if (!kvm_lapic_enabled(vcpu) ||
2232 ((max_irr = kvm_lapic_find_highest_irr(vcpu)) == -1)) {
2233 vmcs_write32(TPR_THRESHOLD, 0);
2234 return;
2235 }
2236
2237 tpr = (kvm_lapic_get_cr8(vcpu) & 0x0f) << 4;
2238 vmcs_write32(TPR_THRESHOLD, (max_irr > tpr) ? tpr >> 4 : max_irr >> 4);
2239}
2240
85f455f7
ED
2241static void enable_irq_window(struct kvm_vcpu *vcpu)
2242{
2243 u32 cpu_based_vm_exec_control;
2244
2245 cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
2246 cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING;
2247 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
2248}
2249
2250static void vmx_intr_assist(struct kvm_vcpu *vcpu)
2251{
1155f76a 2252 struct vcpu_vmx *vmx = to_vmx(vcpu);
85f455f7
ED
2253 u32 idtv_info_field, intr_info_field;
2254 int has_ext_irq, interrupt_window_open;
1b9778da 2255 int vector;
85f455f7 2256
6e5d865c
YS
2257 update_tpr_threshold(vcpu);
2258
85f455f7
ED
2259 has_ext_irq = kvm_cpu_has_interrupt(vcpu);
2260 intr_info_field = vmcs_read32(VM_ENTRY_INTR_INFO_FIELD);
1155f76a 2261 idtv_info_field = vmx->idt_vectoring_info;
85f455f7
ED
2262 if (intr_info_field & INTR_INFO_VALID_MASK) {
2263 if (idtv_info_field & INTR_INFO_VALID_MASK) {
2264 /* TODO: fault when IDT_Vectoring */
2265 printk(KERN_ERR "Fault when IDT_Vectoring\n");
2266 }
2267 if (has_ext_irq)
2268 enable_irq_window(vcpu);
2269 return;
2270 }
2271 if (unlikely(idtv_info_field & INTR_INFO_VALID_MASK)) {
9c8cba37
AK
2272 if ((idtv_info_field & VECTORING_INFO_TYPE_MASK)
2273 == INTR_TYPE_EXT_INTR
ad312c7c 2274 && vcpu->arch.rmode.active) {
9c8cba37
AK
2275 u8 vect = idtv_info_field & VECTORING_INFO_VECTOR_MASK;
2276
2277 vmx_inject_irq(vcpu, vect);
2278 if (unlikely(has_ext_irq))
2279 enable_irq_window(vcpu);
2280 return;
2281 }
2282
85f455f7
ED
2283 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, idtv_info_field);
2284 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
2285 vmcs_read32(VM_EXIT_INSTRUCTION_LEN));
2286
2287 if (unlikely(idtv_info_field & INTR_INFO_DELIEVER_CODE_MASK))
2288 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE,
2289 vmcs_read32(IDT_VECTORING_ERROR_CODE));
2290 if (unlikely(has_ext_irq))
2291 enable_irq_window(vcpu);
2292 return;
2293 }
2294 if (!has_ext_irq)
2295 return;
2296 interrupt_window_open =
2297 ((vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
2298 (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0);
1b9778da
ED
2299 if (interrupt_window_open) {
2300 vector = kvm_cpu_get_interrupt(vcpu);
2301 vmx_inject_irq(vcpu, vector);
2302 kvm_timer_intr_post(vcpu, vector);
2303 } else
85f455f7
ED
2304 enable_irq_window(vcpu);
2305}
2306
9c8cba37
AK
2307/*
2308 * Failure to inject an interrupt should give us the information
2309 * in IDT_VECTORING_INFO_FIELD. However, if the failure occurs
2310 * when fetching the interrupt redirection bitmap in the real-mode
2311 * tss, this doesn't happen. So we do it ourselves.
2312 */
2313static void fixup_rmode_irq(struct vcpu_vmx *vmx)
2314{
2315 vmx->rmode.irq.pending = 0;
2316 if (vmcs_readl(GUEST_RIP) + 1 != vmx->rmode.irq.rip)
2317 return;
2318 vmcs_writel(GUEST_RIP, vmx->rmode.irq.rip);
2319 if (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK) {
2320 vmx->idt_vectoring_info &= ~VECTORING_INFO_TYPE_MASK;
2321 vmx->idt_vectoring_info |= INTR_TYPE_EXT_INTR;
2322 return;
2323 }
2324 vmx->idt_vectoring_info =
2325 VECTORING_INFO_VALID_MASK
2326 | INTR_TYPE_EXT_INTR
2327 | vmx->rmode.irq.vector;
2328}
2329
04d2cc77 2330static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
6aa8b732 2331{
a2fa3e9f 2332 struct vcpu_vmx *vmx = to_vmx(vcpu);
1b6269db 2333 u32 intr_info;
e6adf283
AK
2334
2335 /*
2336 * Loading guest fpu may have cleared host cr0.ts
2337 */
2338 vmcs_writel(HOST_CR0, read_cr0());
2339
d77c26fc 2340 asm(
6aa8b732 2341 /* Store host registers */
05b3e0c2 2342#ifdef CONFIG_X86_64
c2036300 2343 "push %%rdx; push %%rbp;"
6aa8b732 2344 "push %%rcx \n\t"
6aa8b732 2345#else
ff593e5a
LV
2346 "push %%edx; push %%ebp;"
2347 "push %%ecx \n\t"
6aa8b732 2348#endif
c2036300 2349 ASM_VMX_VMWRITE_RSP_RDX "\n\t"
6aa8b732 2350 /* Check if vmlaunch of vmresume is needed */
e08aa78a 2351 "cmpl $0, %c[launched](%0) \n\t"
6aa8b732 2352 /* Load guest registers. Don't clobber flags. */
05b3e0c2 2353#ifdef CONFIG_X86_64
e08aa78a 2354 "mov %c[cr2](%0), %%rax \n\t"
6aa8b732 2355 "mov %%rax, %%cr2 \n\t"
e08aa78a
AK
2356 "mov %c[rax](%0), %%rax \n\t"
2357 "mov %c[rbx](%0), %%rbx \n\t"
2358 "mov %c[rdx](%0), %%rdx \n\t"
2359 "mov %c[rsi](%0), %%rsi \n\t"
2360 "mov %c[rdi](%0), %%rdi \n\t"
2361 "mov %c[rbp](%0), %%rbp \n\t"
2362 "mov %c[r8](%0), %%r8 \n\t"
2363 "mov %c[r9](%0), %%r9 \n\t"
2364 "mov %c[r10](%0), %%r10 \n\t"
2365 "mov %c[r11](%0), %%r11 \n\t"
2366 "mov %c[r12](%0), %%r12 \n\t"
2367 "mov %c[r13](%0), %%r13 \n\t"
2368 "mov %c[r14](%0), %%r14 \n\t"
2369 "mov %c[r15](%0), %%r15 \n\t"
2370 "mov %c[rcx](%0), %%rcx \n\t" /* kills %0 (rcx) */
6aa8b732 2371#else
e08aa78a 2372 "mov %c[cr2](%0), %%eax \n\t"
6aa8b732 2373 "mov %%eax, %%cr2 \n\t"
e08aa78a
AK
2374 "mov %c[rax](%0), %%eax \n\t"
2375 "mov %c[rbx](%0), %%ebx \n\t"
2376 "mov %c[rdx](%0), %%edx \n\t"
2377 "mov %c[rsi](%0), %%esi \n\t"
2378 "mov %c[rdi](%0), %%edi \n\t"
2379 "mov %c[rbp](%0), %%ebp \n\t"
2380 "mov %c[rcx](%0), %%ecx \n\t" /* kills %0 (ecx) */
6aa8b732
AK
2381#endif
2382 /* Enter guest mode */
cd2276a7 2383 "jne .Llaunched \n\t"
6aa8b732 2384 ASM_VMX_VMLAUNCH "\n\t"
cd2276a7
AK
2385 "jmp .Lkvm_vmx_return \n\t"
2386 ".Llaunched: " ASM_VMX_VMRESUME "\n\t"
2387 ".Lkvm_vmx_return: "
6aa8b732 2388 /* Save guest registers, load host registers, keep flags */
05b3e0c2 2389#ifdef CONFIG_X86_64
e08aa78a
AK
2390 "xchg %0, (%%rsp) \n\t"
2391 "mov %%rax, %c[rax](%0) \n\t"
2392 "mov %%rbx, %c[rbx](%0) \n\t"
2393 "pushq (%%rsp); popq %c[rcx](%0) \n\t"
2394 "mov %%rdx, %c[rdx](%0) \n\t"
2395 "mov %%rsi, %c[rsi](%0) \n\t"
2396 "mov %%rdi, %c[rdi](%0) \n\t"
2397 "mov %%rbp, %c[rbp](%0) \n\t"
2398 "mov %%r8, %c[r8](%0) \n\t"
2399 "mov %%r9, %c[r9](%0) \n\t"
2400 "mov %%r10, %c[r10](%0) \n\t"
2401 "mov %%r11, %c[r11](%0) \n\t"
2402 "mov %%r12, %c[r12](%0) \n\t"
2403 "mov %%r13, %c[r13](%0) \n\t"
2404 "mov %%r14, %c[r14](%0) \n\t"
2405 "mov %%r15, %c[r15](%0) \n\t"
6aa8b732 2406 "mov %%cr2, %%rax \n\t"
e08aa78a 2407 "mov %%rax, %c[cr2](%0) \n\t"
6aa8b732 2408
e08aa78a 2409 "pop %%rbp; pop %%rbp; pop %%rdx \n\t"
6aa8b732 2410#else
e08aa78a
AK
2411 "xchg %0, (%%esp) \n\t"
2412 "mov %%eax, %c[rax](%0) \n\t"
2413 "mov %%ebx, %c[rbx](%0) \n\t"
2414 "pushl (%%esp); popl %c[rcx](%0) \n\t"
2415 "mov %%edx, %c[rdx](%0) \n\t"
2416 "mov %%esi, %c[rsi](%0) \n\t"
2417 "mov %%edi, %c[rdi](%0) \n\t"
2418 "mov %%ebp, %c[rbp](%0) \n\t"
6aa8b732 2419 "mov %%cr2, %%eax \n\t"
e08aa78a 2420 "mov %%eax, %c[cr2](%0) \n\t"
6aa8b732 2421
e08aa78a 2422 "pop %%ebp; pop %%ebp; pop %%edx \n\t"
6aa8b732 2423#endif
e08aa78a
AK
2424 "setbe %c[fail](%0) \n\t"
2425 : : "c"(vmx), "d"((unsigned long)HOST_RSP),
2426 [launched]"i"(offsetof(struct vcpu_vmx, launched)),
2427 [fail]"i"(offsetof(struct vcpu_vmx, fail)),
ad312c7c
ZX
2428 [rax]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RAX])),
2429 [rbx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBX])),
2430 [rcx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RCX])),
2431 [rdx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDX])),
2432 [rsi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RSI])),
2433 [rdi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDI])),
2434 [rbp]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBP])),
05b3e0c2 2435#ifdef CONFIG_X86_64
ad312c7c
ZX
2436 [r8]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R8])),
2437 [r9]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R9])),
2438 [r10]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R10])),
2439 [r11]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R11])),
2440 [r12]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R12])),
2441 [r13]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R13])),
2442 [r14]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R14])),
2443 [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])),
6aa8b732 2444#endif
ad312c7c 2445 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2))
c2036300
LV
2446 : "cc", "memory"
2447#ifdef CONFIG_X86_64
2448 , "rbx", "rdi", "rsi"
2449 , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
ff593e5a
LV
2450#else
2451 , "ebx", "edi", "rsi"
c2036300
LV
2452#endif
2453 );
6aa8b732 2454
1155f76a 2455 vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
9c8cba37
AK
2456 if (vmx->rmode.irq.pending)
2457 fixup_rmode_irq(vmx);
1155f76a 2458
ad312c7c 2459 vcpu->arch.interrupt_window_open =
d77c26fc 2460 (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0;
6aa8b732 2461
d77c26fc 2462 asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
15ad7146 2463 vmx->launched = 1;
1b6269db
AK
2464
2465 intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
2466
2467 /* We need to handle NMIs before interrupts are enabled */
2468 if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == 0x200) /* nmi */
2469 asm("int $2");
6aa8b732
AK
2470}
2471
6aa8b732
AK
2472static void vmx_free_vmcs(struct kvm_vcpu *vcpu)
2473{
a2fa3e9f
GH
2474 struct vcpu_vmx *vmx = to_vmx(vcpu);
2475
2476 if (vmx->vmcs) {
8b9cf98c 2477 on_each_cpu(__vcpu_clear, vmx, 0, 1);
a2fa3e9f
GH
2478 free_vmcs(vmx->vmcs);
2479 vmx->vmcs = NULL;
6aa8b732
AK
2480 }
2481}
2482
2483static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
2484{
fb3f0f51
RR
2485 struct vcpu_vmx *vmx = to_vmx(vcpu);
2486
6aa8b732 2487 vmx_free_vmcs(vcpu);
fb3f0f51
RR
2488 kfree(vmx->host_msrs);
2489 kfree(vmx->guest_msrs);
2490 kvm_vcpu_uninit(vcpu);
a4770347 2491 kmem_cache_free(kvm_vcpu_cache, vmx);
6aa8b732
AK
2492}
2493
fb3f0f51 2494static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
6aa8b732 2495{
fb3f0f51 2496 int err;
c16f862d 2497 struct vcpu_vmx *vmx = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
15ad7146 2498 int cpu;
6aa8b732 2499
a2fa3e9f 2500 if (!vmx)
fb3f0f51
RR
2501 return ERR_PTR(-ENOMEM);
2502
2503 err = kvm_vcpu_init(&vmx->vcpu, kvm, id);
2504 if (err)
2505 goto free_vcpu;
965b58a5 2506
a2fa3e9f 2507 vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
fb3f0f51
RR
2508 if (!vmx->guest_msrs) {
2509 err = -ENOMEM;
2510 goto uninit_vcpu;
2511 }
965b58a5 2512
a2fa3e9f
GH
2513 vmx->host_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
2514 if (!vmx->host_msrs)
fb3f0f51 2515 goto free_guest_msrs;
965b58a5 2516
a2fa3e9f
GH
2517 vmx->vmcs = alloc_vmcs();
2518 if (!vmx->vmcs)
fb3f0f51 2519 goto free_msrs;
a2fa3e9f
GH
2520
2521 vmcs_clear(vmx->vmcs);
2522
15ad7146
AK
2523 cpu = get_cpu();
2524 vmx_vcpu_load(&vmx->vcpu, cpu);
8b9cf98c 2525 err = vmx_vcpu_setup(vmx);
fb3f0f51 2526 vmx_vcpu_put(&vmx->vcpu);
15ad7146 2527 put_cpu();
fb3f0f51
RR
2528 if (err)
2529 goto free_vmcs;
2530
2531 return &vmx->vcpu;
2532
2533free_vmcs:
2534 free_vmcs(vmx->vmcs);
2535free_msrs:
2536 kfree(vmx->host_msrs);
2537free_guest_msrs:
2538 kfree(vmx->guest_msrs);
2539uninit_vcpu:
2540 kvm_vcpu_uninit(&vmx->vcpu);
2541free_vcpu:
a4770347 2542 kmem_cache_free(kvm_vcpu_cache, vmx);
fb3f0f51 2543 return ERR_PTR(err);
6aa8b732
AK
2544}
2545
002c7f7c
YS
2546static void __init vmx_check_processor_compat(void *rtn)
2547{
2548 struct vmcs_config vmcs_conf;
2549
2550 *(int *)rtn = 0;
2551 if (setup_vmcs_config(&vmcs_conf) < 0)
2552 *(int *)rtn = -EIO;
2553 if (memcmp(&vmcs_config, &vmcs_conf, sizeof(struct vmcs_config)) != 0) {
2554 printk(KERN_ERR "kvm: CPU %d feature inconsistency!\n",
2555 smp_processor_id());
2556 *(int *)rtn = -EIO;
2557 }
2558}
2559
cbdd1bea 2560static struct kvm_x86_ops vmx_x86_ops = {
6aa8b732
AK
2561 .cpu_has_kvm_support = cpu_has_kvm_support,
2562 .disabled_by_bios = vmx_disabled_by_bios,
2563 .hardware_setup = hardware_setup,
2564 .hardware_unsetup = hardware_unsetup,
002c7f7c 2565 .check_processor_compatibility = vmx_check_processor_compat,
6aa8b732
AK
2566 .hardware_enable = hardware_enable,
2567 .hardware_disable = hardware_disable,
2568
2569 .vcpu_create = vmx_create_vcpu,
2570 .vcpu_free = vmx_free_vcpu,
04d2cc77 2571 .vcpu_reset = vmx_vcpu_reset,
6aa8b732 2572
04d2cc77 2573 .prepare_guest_switch = vmx_save_host_state,
6aa8b732
AK
2574 .vcpu_load = vmx_vcpu_load,
2575 .vcpu_put = vmx_vcpu_put,
774c47f1 2576 .vcpu_decache = vmx_vcpu_decache,
6aa8b732
AK
2577
2578 .set_guest_debug = set_guest_debug,
04d2cc77 2579 .guest_debug_pre = kvm_guest_debug_pre,
6aa8b732
AK
2580 .get_msr = vmx_get_msr,
2581 .set_msr = vmx_set_msr,
2582 .get_segment_base = vmx_get_segment_base,
2583 .get_segment = vmx_get_segment,
2584 .set_segment = vmx_set_segment,
6aa8b732 2585 .get_cs_db_l_bits = vmx_get_cs_db_l_bits,
25c4c276 2586 .decache_cr4_guest_bits = vmx_decache_cr4_guest_bits,
6aa8b732 2587 .set_cr0 = vmx_set_cr0,
6aa8b732
AK
2588 .set_cr3 = vmx_set_cr3,
2589 .set_cr4 = vmx_set_cr4,
05b3e0c2 2590#ifdef CONFIG_X86_64
6aa8b732
AK
2591 .set_efer = vmx_set_efer,
2592#endif
2593 .get_idt = vmx_get_idt,
2594 .set_idt = vmx_set_idt,
2595 .get_gdt = vmx_get_gdt,
2596 .set_gdt = vmx_set_gdt,
2597 .cache_regs = vcpu_load_rsp_rip,
2598 .decache_regs = vcpu_put_rsp_rip,
2599 .get_rflags = vmx_get_rflags,
2600 .set_rflags = vmx_set_rflags,
2601
2602 .tlb_flush = vmx_flush_tlb,
6aa8b732 2603
6aa8b732 2604 .run = vmx_vcpu_run,
04d2cc77 2605 .handle_exit = kvm_handle_exit,
6aa8b732 2606 .skip_emulated_instruction = skip_emulated_instruction,
102d8325 2607 .patch_hypercall = vmx_patch_hypercall,
2a8067f1
ED
2608 .get_irq = vmx_get_irq,
2609 .set_irq = vmx_inject_irq,
298101da
AK
2610 .queue_exception = vmx_queue_exception,
2611 .exception_injected = vmx_exception_injected,
04d2cc77
AK
2612 .inject_pending_irq = vmx_intr_assist,
2613 .inject_pending_vectors = do_interrupt_requests,
cbc94022
IE
2614
2615 .set_tss_addr = vmx_set_tss_addr,
6aa8b732
AK
2616};
2617
2618static int __init vmx_init(void)
2619{
fdef3ad1
HQ
2620 void *iova;
2621 int r;
2622
2623 vmx_io_bitmap_a = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
2624 if (!vmx_io_bitmap_a)
2625 return -ENOMEM;
2626
2627 vmx_io_bitmap_b = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
2628 if (!vmx_io_bitmap_b) {
2629 r = -ENOMEM;
2630 goto out;
2631 }
2632
2633 /*
2634 * Allow direct access to the PC debug port (it is often used for I/O
2635 * delays, but the vmexits simply slow things down).
2636 */
2637 iova = kmap(vmx_io_bitmap_a);
2638 memset(iova, 0xff, PAGE_SIZE);
2639 clear_bit(0x80, iova);
cd0536d7 2640 kunmap(vmx_io_bitmap_a);
fdef3ad1
HQ
2641
2642 iova = kmap(vmx_io_bitmap_b);
2643 memset(iova, 0xff, PAGE_SIZE);
cd0536d7 2644 kunmap(vmx_io_bitmap_b);
fdef3ad1 2645
cb498ea2 2646 r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx), THIS_MODULE);
fdef3ad1
HQ
2647 if (r)
2648 goto out1;
2649
c7addb90
AK
2650 if (bypass_guest_pf)
2651 kvm_mmu_set_nonpresent_ptes(~0xffeull, 0ull);
2652
fdef3ad1
HQ
2653 return 0;
2654
2655out1:
2656 __free_page(vmx_io_bitmap_b);
2657out:
2658 __free_page(vmx_io_bitmap_a);
2659 return r;
6aa8b732
AK
2660}
2661
2662static void __exit vmx_exit(void)
2663{
fdef3ad1
HQ
2664 __free_page(vmx_io_bitmap_b);
2665 __free_page(vmx_io_bitmap_a);
2666
cb498ea2 2667 kvm_exit();
6aa8b732
AK
2668}
2669
2670module_init(vmx_init)
2671module_exit(vmx_exit)