KVM: x86: use kvm_get_gdt() and kvm_read_ldt()
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / x86 / kvm / vmx.c
CommitLineData
6aa8b732
AK
1/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
6 *
7 * Copyright (C) 2006 Qumranet, Inc.
8 *
9 * Authors:
10 * Avi Kivity <avi@qumranet.com>
11 * Yaniv Kamay <yaniv@qumranet.com>
12 *
13 * This work is licensed under the terms of the GNU GPL, version 2. See
14 * the COPYING file in the top-level directory.
15 *
16 */
17
85f455f7 18#include "irq.h"
1d737c8a 19#include "mmu.h"
e495606d 20
edf88417 21#include <linux/kvm_host.h>
6aa8b732 22#include <linux/module.h>
9d8f549d 23#include <linux/kernel.h>
6aa8b732
AK
24#include <linux/mm.h>
25#include <linux/highmem.h>
e8edc6e0 26#include <linux/sched.h>
c7addb90 27#include <linux/moduleparam.h>
229456fc 28#include <linux/ftrace_event.h>
5fdbf976 29#include "kvm_cache_regs.h"
35920a35 30#include "x86.h"
e495606d 31
6aa8b732 32#include <asm/io.h>
3b3be0d1 33#include <asm/desc.h>
13673a90 34#include <asm/vmx.h>
6210e37b 35#include <asm/virtext.h>
a0861c02 36#include <asm/mce.h>
6aa8b732 37
229456fc
MT
38#include "trace.h"
39
4ecac3fd
AK
40#define __ex(x) __kvm_handle_fault_on_reboot(x)
41
6aa8b732
AK
42MODULE_AUTHOR("Qumranet");
43MODULE_LICENSE("GPL");
44
4462d21a 45static int __read_mostly bypass_guest_pf = 1;
c1f8bc04 46module_param(bypass_guest_pf, bool, S_IRUGO);
c7addb90 47
4462d21a 48static int __read_mostly enable_vpid = 1;
736caefe 49module_param_named(vpid, enable_vpid, bool, 0444);
2384d2b3 50
4462d21a 51static int __read_mostly flexpriority_enabled = 1;
736caefe 52module_param_named(flexpriority, flexpriority_enabled, bool, S_IRUGO);
4c9fc8ef 53
4462d21a 54static int __read_mostly enable_ept = 1;
736caefe 55module_param_named(ept, enable_ept, bool, S_IRUGO);
d56f546d 56
3a624e29
NK
57static int __read_mostly enable_unrestricted_guest = 1;
58module_param_named(unrestricted_guest,
59 enable_unrestricted_guest, bool, S_IRUGO);
60
4462d21a 61static int __read_mostly emulate_invalid_guest_state = 0;
c1f8bc04 62module_param(emulate_invalid_guest_state, bool, S_IRUGO);
04fa4d32 63
a2fa3e9f
GH
64struct vmcs {
65 u32 revision_id;
66 u32 abort;
67 char data[0];
68};
69
70struct vcpu_vmx {
fb3f0f51 71 struct kvm_vcpu vcpu;
543e4243 72 struct list_head local_vcpus_link;
313dbd49 73 unsigned long host_rsp;
a2fa3e9f 74 int launched;
29bd8a78 75 u8 fail;
1155f76a 76 u32 idt_vectoring_info;
a2fa3e9f
GH
77 struct kvm_msr_entry *guest_msrs;
78 struct kvm_msr_entry *host_msrs;
79 int nmsrs;
80 int save_nmsrs;
81 int msr_offset_efer;
82#ifdef CONFIG_X86_64
83 int msr_offset_kernel_gs_base;
84#endif
85 struct vmcs *vmcs;
86 struct {
87 int loaded;
88 u16 fs_sel, gs_sel, ldt_sel;
152d3f2f
LV
89 int gs_ldt_reload_needed;
90 int fs_reload_needed;
51c6cf66 91 int guest_efer_loaded;
d77c26fc 92 } host_state;
9c8cba37 93 struct {
7ffd92c5
AK
94 int vm86_active;
95 u8 save_iopl;
96 struct kvm_save_segment {
97 u16 selector;
98 unsigned long base;
99 u32 limit;
100 u32 ar;
101 } tr, es, ds, fs, gs;
9c8cba37
AK
102 struct {
103 bool pending;
104 u8 vector;
105 unsigned rip;
106 } irq;
107 } rmode;
2384d2b3 108 int vpid;
04fa4d32 109 bool emulation_required;
8b3079a5 110 enum emulation_result invalid_state_emulation_result;
3b86cd99
JK
111
112 /* Support for vnmi-less CPUs */
113 int soft_vnmi_blocked;
114 ktime_t entry_time;
115 s64 vnmi_blocked_time;
a0861c02 116 u32 exit_reason;
a2fa3e9f
GH
117};
118
119static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
120{
fb3f0f51 121 return container_of(vcpu, struct vcpu_vmx, vcpu);
a2fa3e9f
GH
122}
123
b7ebfb05 124static int init_rmode(struct kvm *kvm);
4e1096d2 125static u64 construct_eptp(unsigned long root_hpa);
75880a01 126
6aa8b732
AK
127static DEFINE_PER_CPU(struct vmcs *, vmxarea);
128static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
543e4243 129static DEFINE_PER_CPU(struct list_head, vcpus_on_cpu);
6aa8b732 130
3e7c73e9
AK
131static unsigned long *vmx_io_bitmap_a;
132static unsigned long *vmx_io_bitmap_b;
5897297b
AK
133static unsigned long *vmx_msr_bitmap_legacy;
134static unsigned long *vmx_msr_bitmap_longmode;
fdef3ad1 135
2384d2b3
SY
136static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS);
137static DEFINE_SPINLOCK(vmx_vpid_lock);
138
1c3d14fe 139static struct vmcs_config {
6aa8b732
AK
140 int size;
141 int order;
142 u32 revision_id;
1c3d14fe
YS
143 u32 pin_based_exec_ctrl;
144 u32 cpu_based_exec_ctrl;
f78e0e2e 145 u32 cpu_based_2nd_exec_ctrl;
1c3d14fe
YS
146 u32 vmexit_ctrl;
147 u32 vmentry_ctrl;
148} vmcs_config;
6aa8b732 149
efff9e53 150static struct vmx_capability {
d56f546d
SY
151 u32 ept;
152 u32 vpid;
153} vmx_capability;
154
6aa8b732
AK
155#define VMX_SEGMENT_FIELD(seg) \
156 [VCPU_SREG_##seg] = { \
157 .selector = GUEST_##seg##_SELECTOR, \
158 .base = GUEST_##seg##_BASE, \
159 .limit = GUEST_##seg##_LIMIT, \
160 .ar_bytes = GUEST_##seg##_AR_BYTES, \
161 }
162
163static struct kvm_vmx_segment_field {
164 unsigned selector;
165 unsigned base;
166 unsigned limit;
167 unsigned ar_bytes;
168} kvm_vmx_segment_fields[] = {
169 VMX_SEGMENT_FIELD(CS),
170 VMX_SEGMENT_FIELD(DS),
171 VMX_SEGMENT_FIELD(ES),
172 VMX_SEGMENT_FIELD(FS),
173 VMX_SEGMENT_FIELD(GS),
174 VMX_SEGMENT_FIELD(SS),
175 VMX_SEGMENT_FIELD(TR),
176 VMX_SEGMENT_FIELD(LDTR),
177};
178
6de4f3ad
AK
179static void ept_save_pdptrs(struct kvm_vcpu *vcpu);
180
4d56c8a7
AK
181/*
182 * Keep MSR_K6_STAR at the end, as setup_msrs() will try to optimize it
183 * away by decrementing the array size.
184 */
6aa8b732 185static const u32 vmx_msr_index[] = {
05b3e0c2 186#ifdef CONFIG_X86_64
6aa8b732
AK
187 MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR, MSR_KERNEL_GS_BASE,
188#endif
189 MSR_EFER, MSR_K6_STAR,
190};
9d8f549d 191#define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index)
6aa8b732 192
a2fa3e9f
GH
193static void load_msrs(struct kvm_msr_entry *e, int n)
194{
195 int i;
196
197 for (i = 0; i < n; ++i)
198 wrmsrl(e[i].index, e[i].data);
199}
200
201static void save_msrs(struct kvm_msr_entry *e, int n)
202{
203 int i;
204
205 for (i = 0; i < n; ++i)
206 rdmsrl(e[i].index, e[i].data);
207}
208
6aa8b732
AK
209static inline int is_page_fault(u32 intr_info)
210{
211 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
212 INTR_INFO_VALID_MASK)) ==
8ab2d2e2 213 (INTR_TYPE_HARD_EXCEPTION | PF_VECTOR | INTR_INFO_VALID_MASK);
6aa8b732
AK
214}
215
2ab455cc
AL
216static inline int is_no_device(u32 intr_info)
217{
218 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
219 INTR_INFO_VALID_MASK)) ==
8ab2d2e2 220 (INTR_TYPE_HARD_EXCEPTION | NM_VECTOR | INTR_INFO_VALID_MASK);
2ab455cc
AL
221}
222
7aa81cc0
AL
223static inline int is_invalid_opcode(u32 intr_info)
224{
225 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
226 INTR_INFO_VALID_MASK)) ==
8ab2d2e2 227 (INTR_TYPE_HARD_EXCEPTION | UD_VECTOR | INTR_INFO_VALID_MASK);
7aa81cc0
AL
228}
229
6aa8b732
AK
230static inline int is_external_interrupt(u32 intr_info)
231{
232 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
233 == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
234}
235
a0861c02
AK
236static inline int is_machine_check(u32 intr_info)
237{
238 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
239 INTR_INFO_VALID_MASK)) ==
240 (INTR_TYPE_HARD_EXCEPTION | MC_VECTOR | INTR_INFO_VALID_MASK);
241}
242
25c5f225
SY
243static inline int cpu_has_vmx_msr_bitmap(void)
244{
04547156 245 return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_USE_MSR_BITMAPS;
25c5f225
SY
246}
247
6e5d865c
YS
248static inline int cpu_has_vmx_tpr_shadow(void)
249{
04547156 250 return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW;
6e5d865c
YS
251}
252
253static inline int vm_need_tpr_shadow(struct kvm *kvm)
254{
04547156 255 return (cpu_has_vmx_tpr_shadow()) && (irqchip_in_kernel(kvm));
6e5d865c
YS
256}
257
f78e0e2e
SY
258static inline int cpu_has_secondary_exec_ctrls(void)
259{
04547156
SY
260 return vmcs_config.cpu_based_exec_ctrl &
261 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
f78e0e2e
SY
262}
263
774ead3a 264static inline bool cpu_has_vmx_virtualize_apic_accesses(void)
f78e0e2e 265{
04547156
SY
266 return vmcs_config.cpu_based_2nd_exec_ctrl &
267 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
268}
269
270static inline bool cpu_has_vmx_flexpriority(void)
271{
272 return cpu_has_vmx_tpr_shadow() &&
273 cpu_has_vmx_virtualize_apic_accesses();
f78e0e2e
SY
274}
275
e799794e
MT
276static inline bool cpu_has_vmx_ept_execute_only(void)
277{
278 return !!(vmx_capability.ept & VMX_EPT_EXECUTE_ONLY_BIT);
279}
280
281static inline bool cpu_has_vmx_eptp_uncacheable(void)
282{
283 return !!(vmx_capability.ept & VMX_EPTP_UC_BIT);
284}
285
286static inline bool cpu_has_vmx_eptp_writeback(void)
287{
288 return !!(vmx_capability.ept & VMX_EPTP_WB_BIT);
289}
290
291static inline bool cpu_has_vmx_ept_2m_page(void)
292{
293 return !!(vmx_capability.ept & VMX_EPT_2MB_PAGE_BIT);
294}
295
d56f546d
SY
296static inline int cpu_has_vmx_invept_individual_addr(void)
297{
04547156 298 return !!(vmx_capability.ept & VMX_EPT_EXTENT_INDIVIDUAL_BIT);
d56f546d
SY
299}
300
301static inline int cpu_has_vmx_invept_context(void)
302{
04547156 303 return !!(vmx_capability.ept & VMX_EPT_EXTENT_CONTEXT_BIT);
d56f546d
SY
304}
305
306static inline int cpu_has_vmx_invept_global(void)
307{
04547156 308 return !!(vmx_capability.ept & VMX_EPT_EXTENT_GLOBAL_BIT);
d56f546d
SY
309}
310
311static inline int cpu_has_vmx_ept(void)
312{
04547156
SY
313 return vmcs_config.cpu_based_2nd_exec_ctrl &
314 SECONDARY_EXEC_ENABLE_EPT;
d56f546d
SY
315}
316
3a624e29
NK
317static inline int cpu_has_vmx_unrestricted_guest(void)
318{
319 return vmcs_config.cpu_based_2nd_exec_ctrl &
320 SECONDARY_EXEC_UNRESTRICTED_GUEST;
321}
322
f78e0e2e
SY
323static inline int vm_need_virtualize_apic_accesses(struct kvm *kvm)
324{
04547156
SY
325 return flexpriority_enabled &&
326 (cpu_has_vmx_virtualize_apic_accesses()) &&
327 (irqchip_in_kernel(kvm));
f78e0e2e
SY
328}
329
2384d2b3
SY
330static inline int cpu_has_vmx_vpid(void)
331{
04547156
SY
332 return vmcs_config.cpu_based_2nd_exec_ctrl &
333 SECONDARY_EXEC_ENABLE_VPID;
2384d2b3
SY
334}
335
f08864b4
SY
336static inline int cpu_has_virtual_nmis(void)
337{
338 return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS;
339}
340
04547156
SY
341static inline bool report_flexpriority(void)
342{
343 return flexpriority_enabled;
344}
345
8b9cf98c 346static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
7725f0ba
AK
347{
348 int i;
349
a2fa3e9f
GH
350 for (i = 0; i < vmx->nmsrs; ++i)
351 if (vmx->guest_msrs[i].index == msr)
a75beee6
ED
352 return i;
353 return -1;
354}
355
2384d2b3
SY
356static inline void __invvpid(int ext, u16 vpid, gva_t gva)
357{
358 struct {
359 u64 vpid : 16;
360 u64 rsvd : 48;
361 u64 gva;
362 } operand = { vpid, 0, gva };
363
4ecac3fd 364 asm volatile (__ex(ASM_VMX_INVVPID)
2384d2b3
SY
365 /* CF==1 or ZF==1 --> rc = -1 */
366 "; ja 1f ; ud2 ; 1:"
367 : : "a"(&operand), "c"(ext) : "cc", "memory");
368}
369
1439442c
SY
370static inline void __invept(int ext, u64 eptp, gpa_t gpa)
371{
372 struct {
373 u64 eptp, gpa;
374 } operand = {eptp, gpa};
375
4ecac3fd 376 asm volatile (__ex(ASM_VMX_INVEPT)
1439442c
SY
377 /* CF==1 or ZF==1 --> rc = -1 */
378 "; ja 1f ; ud2 ; 1:\n"
379 : : "a" (&operand), "c" (ext) : "cc", "memory");
380}
381
8b9cf98c 382static struct kvm_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr)
a75beee6
ED
383{
384 int i;
385
8b9cf98c 386 i = __find_msr_index(vmx, msr);
a75beee6 387 if (i >= 0)
a2fa3e9f 388 return &vmx->guest_msrs[i];
8b6d44c7 389 return NULL;
7725f0ba
AK
390}
391
6aa8b732
AK
392static void vmcs_clear(struct vmcs *vmcs)
393{
394 u64 phys_addr = __pa(vmcs);
395 u8 error;
396
4ecac3fd 397 asm volatile (__ex(ASM_VMX_VMCLEAR_RAX) "; setna %0"
6aa8b732
AK
398 : "=g"(error) : "a"(&phys_addr), "m"(phys_addr)
399 : "cc", "memory");
400 if (error)
401 printk(KERN_ERR "kvm: vmclear fail: %p/%llx\n",
402 vmcs, phys_addr);
403}
404
405static void __vcpu_clear(void *arg)
406{
8b9cf98c 407 struct vcpu_vmx *vmx = arg;
d3b2c338 408 int cpu = raw_smp_processor_id();
6aa8b732 409
8b9cf98c 410 if (vmx->vcpu.cpu == cpu)
a2fa3e9f
GH
411 vmcs_clear(vmx->vmcs);
412 if (per_cpu(current_vmcs, cpu) == vmx->vmcs)
6aa8b732 413 per_cpu(current_vmcs, cpu) = NULL;
ad312c7c 414 rdtscll(vmx->vcpu.arch.host_tsc);
543e4243
AK
415 list_del(&vmx->local_vcpus_link);
416 vmx->vcpu.cpu = -1;
417 vmx->launched = 0;
6aa8b732
AK
418}
419
8b9cf98c 420static void vcpu_clear(struct vcpu_vmx *vmx)
8d0be2b3 421{
eae5ecb5
AK
422 if (vmx->vcpu.cpu == -1)
423 return;
8691e5a8 424 smp_call_function_single(vmx->vcpu.cpu, __vcpu_clear, vmx, 1);
8d0be2b3
AK
425}
426
2384d2b3
SY
427static inline void vpid_sync_vcpu_all(struct vcpu_vmx *vmx)
428{
429 if (vmx->vpid == 0)
430 return;
431
432 __invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vmx->vpid, 0);
433}
434
1439442c
SY
435static inline void ept_sync_global(void)
436{
437 if (cpu_has_vmx_invept_global())
438 __invept(VMX_EPT_EXTENT_GLOBAL, 0, 0);
439}
440
441static inline void ept_sync_context(u64 eptp)
442{
089d034e 443 if (enable_ept) {
1439442c
SY
444 if (cpu_has_vmx_invept_context())
445 __invept(VMX_EPT_EXTENT_CONTEXT, eptp, 0);
446 else
447 ept_sync_global();
448 }
449}
450
451static inline void ept_sync_individual_addr(u64 eptp, gpa_t gpa)
452{
089d034e 453 if (enable_ept) {
1439442c
SY
454 if (cpu_has_vmx_invept_individual_addr())
455 __invept(VMX_EPT_EXTENT_INDIVIDUAL_ADDR,
456 eptp, gpa);
457 else
458 ept_sync_context(eptp);
459 }
460}
461
6aa8b732
AK
462static unsigned long vmcs_readl(unsigned long field)
463{
464 unsigned long value;
465
4ecac3fd 466 asm volatile (__ex(ASM_VMX_VMREAD_RDX_RAX)
6aa8b732
AK
467 : "=a"(value) : "d"(field) : "cc");
468 return value;
469}
470
471static u16 vmcs_read16(unsigned long field)
472{
473 return vmcs_readl(field);
474}
475
476static u32 vmcs_read32(unsigned long field)
477{
478 return vmcs_readl(field);
479}
480
481static u64 vmcs_read64(unsigned long field)
482{
05b3e0c2 483#ifdef CONFIG_X86_64
6aa8b732
AK
484 return vmcs_readl(field);
485#else
486 return vmcs_readl(field) | ((u64)vmcs_readl(field+1) << 32);
487#endif
488}
489
e52de1b8
AK
490static noinline void vmwrite_error(unsigned long field, unsigned long value)
491{
492 printk(KERN_ERR "vmwrite error: reg %lx value %lx (err %d)\n",
493 field, value, vmcs_read32(VM_INSTRUCTION_ERROR));
494 dump_stack();
495}
496
6aa8b732
AK
497static void vmcs_writel(unsigned long field, unsigned long value)
498{
499 u8 error;
500
4ecac3fd 501 asm volatile (__ex(ASM_VMX_VMWRITE_RAX_RDX) "; setna %0"
d77c26fc 502 : "=q"(error) : "a"(value), "d"(field) : "cc");
e52de1b8
AK
503 if (unlikely(error))
504 vmwrite_error(field, value);
6aa8b732
AK
505}
506
507static void vmcs_write16(unsigned long field, u16 value)
508{
509 vmcs_writel(field, value);
510}
511
512static void vmcs_write32(unsigned long field, u32 value)
513{
514 vmcs_writel(field, value);
515}
516
517static void vmcs_write64(unsigned long field, u64 value)
518{
6aa8b732 519 vmcs_writel(field, value);
7682f2d0 520#ifndef CONFIG_X86_64
6aa8b732
AK
521 asm volatile ("");
522 vmcs_writel(field+1, value >> 32);
523#endif
524}
525
2ab455cc
AL
526static void vmcs_clear_bits(unsigned long field, u32 mask)
527{
528 vmcs_writel(field, vmcs_readl(field) & ~mask);
529}
530
531static void vmcs_set_bits(unsigned long field, u32 mask)
532{
533 vmcs_writel(field, vmcs_readl(field) | mask);
534}
535
abd3f2d6
AK
536static void update_exception_bitmap(struct kvm_vcpu *vcpu)
537{
538 u32 eb;
539
a0861c02 540 eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR);
abd3f2d6
AK
541 if (!vcpu->fpu_active)
542 eb |= 1u << NM_VECTOR;
d0bfb940
JK
543 if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
544 if (vcpu->guest_debug &
545 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
546 eb |= 1u << DB_VECTOR;
547 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
548 eb |= 1u << BP_VECTOR;
549 }
7ffd92c5 550 if (to_vmx(vcpu)->rmode.vm86_active)
abd3f2d6 551 eb = ~0;
089d034e 552 if (enable_ept)
1439442c 553 eb &= ~(1u << PF_VECTOR); /* bypass_guest_pf = 0 */
abd3f2d6
AK
554 vmcs_write32(EXCEPTION_BITMAP, eb);
555}
556
33ed6329
AK
557static void reload_tss(void)
558{
33ed6329
AK
559 /*
560 * VT restores TR but not its size. Useless.
561 */
562 struct descriptor_table gdt;
a5f61300 563 struct desc_struct *descs;
33ed6329 564
d6e88aec 565 kvm_get_gdt(&gdt);
33ed6329
AK
566 descs = (void *)gdt.base;
567 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
568 load_TR_desc();
33ed6329
AK
569}
570
8b9cf98c 571static void load_transition_efer(struct vcpu_vmx *vmx)
2cc51560 572{
a2fa3e9f 573 int efer_offset = vmx->msr_offset_efer;
51c6cf66
AK
574 u64 host_efer = vmx->host_msrs[efer_offset].data;
575 u64 guest_efer = vmx->guest_msrs[efer_offset].data;
576 u64 ignore_bits;
577
578 if (efer_offset < 0)
579 return;
580 /*
581 * NX is emulated; LMA and LME handled by hardware; SCE meaninless
582 * outside long mode
583 */
584 ignore_bits = EFER_NX | EFER_SCE;
585#ifdef CONFIG_X86_64
586 ignore_bits |= EFER_LMA | EFER_LME;
587 /* SCE is meaningful only in long mode on Intel */
588 if (guest_efer & EFER_LMA)
589 ignore_bits &= ~(u64)EFER_SCE;
590#endif
591 if ((guest_efer & ~ignore_bits) == (host_efer & ~ignore_bits))
592 return;
2cc51560 593
51c6cf66
AK
594 vmx->host_state.guest_efer_loaded = 1;
595 guest_efer &= ~ignore_bits;
596 guest_efer |= host_efer & ignore_bits;
597 wrmsrl(MSR_EFER, guest_efer);
8b9cf98c 598 vmx->vcpu.stat.efer_reload++;
2cc51560
ED
599}
600
51c6cf66
AK
601static void reload_host_efer(struct vcpu_vmx *vmx)
602{
603 if (vmx->host_state.guest_efer_loaded) {
604 vmx->host_state.guest_efer_loaded = 0;
605 load_msrs(vmx->host_msrs + vmx->msr_offset_efer, 1);
606 }
607}
608
04d2cc77 609static void vmx_save_host_state(struct kvm_vcpu *vcpu)
33ed6329 610{
04d2cc77
AK
611 struct vcpu_vmx *vmx = to_vmx(vcpu);
612
a2fa3e9f 613 if (vmx->host_state.loaded)
33ed6329
AK
614 return;
615
a2fa3e9f 616 vmx->host_state.loaded = 1;
33ed6329
AK
617 /*
618 * Set host fs and gs selectors. Unfortunately, 22.2.3 does not
619 * allow segment selectors with cpl > 0 or ti == 1.
620 */
d6e88aec 621 vmx->host_state.ldt_sel = kvm_read_ldt();
152d3f2f 622 vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel;
d6e88aec 623 vmx->host_state.fs_sel = kvm_read_fs();
152d3f2f 624 if (!(vmx->host_state.fs_sel & 7)) {
a2fa3e9f 625 vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel);
152d3f2f
LV
626 vmx->host_state.fs_reload_needed = 0;
627 } else {
33ed6329 628 vmcs_write16(HOST_FS_SELECTOR, 0);
152d3f2f 629 vmx->host_state.fs_reload_needed = 1;
33ed6329 630 }
d6e88aec 631 vmx->host_state.gs_sel = kvm_read_gs();
a2fa3e9f
GH
632 if (!(vmx->host_state.gs_sel & 7))
633 vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel);
33ed6329
AK
634 else {
635 vmcs_write16(HOST_GS_SELECTOR, 0);
152d3f2f 636 vmx->host_state.gs_ldt_reload_needed = 1;
33ed6329
AK
637 }
638
639#ifdef CONFIG_X86_64
640 vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE));
641 vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE));
642#else
a2fa3e9f
GH
643 vmcs_writel(HOST_FS_BASE, segment_base(vmx->host_state.fs_sel));
644 vmcs_writel(HOST_GS_BASE, segment_base(vmx->host_state.gs_sel));
33ed6329 645#endif
707c0874
AK
646
647#ifdef CONFIG_X86_64
d77c26fc 648 if (is_long_mode(&vmx->vcpu))
a2fa3e9f
GH
649 save_msrs(vmx->host_msrs +
650 vmx->msr_offset_kernel_gs_base, 1);
d77c26fc 651
707c0874 652#endif
a2fa3e9f 653 load_msrs(vmx->guest_msrs, vmx->save_nmsrs);
51c6cf66 654 load_transition_efer(vmx);
33ed6329
AK
655}
656
a9b21b62 657static void __vmx_load_host_state(struct vcpu_vmx *vmx)
33ed6329 658{
15ad7146 659 unsigned long flags;
33ed6329 660
a2fa3e9f 661 if (!vmx->host_state.loaded)
33ed6329
AK
662 return;
663
e1beb1d3 664 ++vmx->vcpu.stat.host_state_reload;
a2fa3e9f 665 vmx->host_state.loaded = 0;
152d3f2f 666 if (vmx->host_state.fs_reload_needed)
d6e88aec 667 kvm_load_fs(vmx->host_state.fs_sel);
152d3f2f 668 if (vmx->host_state.gs_ldt_reload_needed) {
d6e88aec 669 kvm_load_ldt(vmx->host_state.ldt_sel);
33ed6329
AK
670 /*
671 * If we have to reload gs, we must take care to
672 * preserve our gs base.
673 */
15ad7146 674 local_irq_save(flags);
d6e88aec 675 kvm_load_gs(vmx->host_state.gs_sel);
33ed6329
AK
676#ifdef CONFIG_X86_64
677 wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE));
678#endif
15ad7146 679 local_irq_restore(flags);
33ed6329 680 }
152d3f2f 681 reload_tss();
a2fa3e9f
GH
682 save_msrs(vmx->guest_msrs, vmx->save_nmsrs);
683 load_msrs(vmx->host_msrs, vmx->save_nmsrs);
51c6cf66 684 reload_host_efer(vmx);
33ed6329
AK
685}
686
a9b21b62
AK
687static void vmx_load_host_state(struct vcpu_vmx *vmx)
688{
689 preempt_disable();
690 __vmx_load_host_state(vmx);
691 preempt_enable();
692}
693
6aa8b732
AK
694/*
695 * Switches to specified vcpu, until a matching vcpu_put(), but assumes
696 * vcpu mutex is already taken.
697 */
15ad7146 698static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
6aa8b732 699{
a2fa3e9f
GH
700 struct vcpu_vmx *vmx = to_vmx(vcpu);
701 u64 phys_addr = __pa(vmx->vmcs);
019960ae 702 u64 tsc_this, delta, new_offset;
6aa8b732 703
a3d7f85f 704 if (vcpu->cpu != cpu) {
8b9cf98c 705 vcpu_clear(vmx);
2f599714 706 kvm_migrate_timers(vcpu);
2384d2b3 707 vpid_sync_vcpu_all(vmx);
543e4243
AK
708 local_irq_disable();
709 list_add(&vmx->local_vcpus_link,
710 &per_cpu(vcpus_on_cpu, cpu));
711 local_irq_enable();
a3d7f85f 712 }
6aa8b732 713
a2fa3e9f 714 if (per_cpu(current_vmcs, cpu) != vmx->vmcs) {
6aa8b732
AK
715 u8 error;
716
a2fa3e9f 717 per_cpu(current_vmcs, cpu) = vmx->vmcs;
4ecac3fd 718 asm volatile (__ex(ASM_VMX_VMPTRLD_RAX) "; setna %0"
6aa8b732
AK
719 : "=g"(error) : "a"(&phys_addr), "m"(phys_addr)
720 : "cc");
721 if (error)
722 printk(KERN_ERR "kvm: vmptrld %p/%llx fail\n",
a2fa3e9f 723 vmx->vmcs, phys_addr);
6aa8b732
AK
724 }
725
726 if (vcpu->cpu != cpu) {
727 struct descriptor_table dt;
728 unsigned long sysenter_esp;
729
730 vcpu->cpu = cpu;
731 /*
732 * Linux uses per-cpu TSS and GDT, so set these when switching
733 * processors.
734 */
d6e88aec
AK
735 vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
736 kvm_get_gdt(&dt);
6aa8b732
AK
737 vmcs_writel(HOST_GDTR_BASE, dt.base); /* 22.2.4 */
738
739 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
740 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
7700270e
AK
741
742 /*
743 * Make sure the time stamp counter is monotonous.
744 */
745 rdtscll(tsc_this);
019960ae
AK
746 if (tsc_this < vcpu->arch.host_tsc) {
747 delta = vcpu->arch.host_tsc - tsc_this;
748 new_offset = vmcs_read64(TSC_OFFSET) + delta;
749 vmcs_write64(TSC_OFFSET, new_offset);
750 }
6aa8b732 751 }
6aa8b732
AK
752}
753
754static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
755{
a9b21b62 756 __vmx_load_host_state(to_vmx(vcpu));
6aa8b732
AK
757}
758
5fd86fcf
AK
759static void vmx_fpu_activate(struct kvm_vcpu *vcpu)
760{
761 if (vcpu->fpu_active)
762 return;
763 vcpu->fpu_active = 1;
707d92fa 764 vmcs_clear_bits(GUEST_CR0, X86_CR0_TS);
ad312c7c 765 if (vcpu->arch.cr0 & X86_CR0_TS)
707d92fa 766 vmcs_set_bits(GUEST_CR0, X86_CR0_TS);
5fd86fcf
AK
767 update_exception_bitmap(vcpu);
768}
769
770static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu)
771{
772 if (!vcpu->fpu_active)
773 return;
774 vcpu->fpu_active = 0;
707d92fa 775 vmcs_set_bits(GUEST_CR0, X86_CR0_TS);
5fd86fcf
AK
776 update_exception_bitmap(vcpu);
777}
778
6aa8b732
AK
779static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
780{
781 return vmcs_readl(GUEST_RFLAGS);
782}
783
784static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
785{
7ffd92c5 786 if (to_vmx(vcpu)->rmode.vm86_active)
053de044 787 rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
6aa8b732
AK
788 vmcs_writel(GUEST_RFLAGS, rflags);
789}
790
2809f5d2
GC
791static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
792{
793 u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
794 int ret = 0;
795
796 if (interruptibility & GUEST_INTR_STATE_STI)
797 ret |= X86_SHADOW_INT_STI;
798 if (interruptibility & GUEST_INTR_STATE_MOV_SS)
799 ret |= X86_SHADOW_INT_MOV_SS;
800
801 return ret & mask;
802}
803
804static void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
805{
806 u32 interruptibility_old = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
807 u32 interruptibility = interruptibility_old;
808
809 interruptibility &= ~(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS);
810
811 if (mask & X86_SHADOW_INT_MOV_SS)
812 interruptibility |= GUEST_INTR_STATE_MOV_SS;
813 if (mask & X86_SHADOW_INT_STI)
814 interruptibility |= GUEST_INTR_STATE_STI;
815
816 if ((interruptibility != interruptibility_old))
817 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, interruptibility);
818}
819
6aa8b732
AK
820static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
821{
822 unsigned long rip;
6aa8b732 823
5fdbf976 824 rip = kvm_rip_read(vcpu);
6aa8b732 825 rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
5fdbf976 826 kvm_rip_write(vcpu, rip);
6aa8b732 827
2809f5d2
GC
828 /* skipping an emulated instruction also counts */
829 vmx_set_interrupt_shadow(vcpu, 0);
6aa8b732
AK
830}
831
298101da
AK
832static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
833 bool has_error_code, u32 error_code)
834{
77ab6db0 835 struct vcpu_vmx *vmx = to_vmx(vcpu);
8ab2d2e2 836 u32 intr_info = nr | INTR_INFO_VALID_MASK;
77ab6db0 837
8ab2d2e2 838 if (has_error_code) {
77ab6db0 839 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
8ab2d2e2
JK
840 intr_info |= INTR_INFO_DELIVER_CODE_MASK;
841 }
77ab6db0 842
7ffd92c5 843 if (vmx->rmode.vm86_active) {
77ab6db0
JK
844 vmx->rmode.irq.pending = true;
845 vmx->rmode.irq.vector = nr;
846 vmx->rmode.irq.rip = kvm_rip_read(vcpu);
ae0bb3e0
GN
847 if (kvm_exception_is_soft(nr))
848 vmx->rmode.irq.rip +=
849 vmx->vcpu.arch.event_exit_inst_len;
8ab2d2e2
JK
850 intr_info |= INTR_TYPE_SOFT_INTR;
851 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info);
77ab6db0
JK
852 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 1);
853 kvm_rip_write(vcpu, vmx->rmode.irq.rip - 1);
854 return;
855 }
856
66fd3f7f
GN
857 if (kvm_exception_is_soft(nr)) {
858 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
859 vmx->vcpu.arch.event_exit_inst_len);
8ab2d2e2
JK
860 intr_info |= INTR_TYPE_SOFT_EXCEPTION;
861 } else
862 intr_info |= INTR_TYPE_HARD_EXCEPTION;
863
864 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info);
298101da
AK
865}
866
a75beee6
ED
867/*
868 * Swap MSR entry in host/guest MSR entry array.
869 */
54e11fa1 870#ifdef CONFIG_X86_64
8b9cf98c 871static void move_msr_up(struct vcpu_vmx *vmx, int from, int to)
a75beee6 872{
a2fa3e9f
GH
873 struct kvm_msr_entry tmp;
874
875 tmp = vmx->guest_msrs[to];
876 vmx->guest_msrs[to] = vmx->guest_msrs[from];
877 vmx->guest_msrs[from] = tmp;
878 tmp = vmx->host_msrs[to];
879 vmx->host_msrs[to] = vmx->host_msrs[from];
880 vmx->host_msrs[from] = tmp;
a75beee6 881}
54e11fa1 882#endif
a75beee6 883
e38aea3e
AK
884/*
885 * Set up the vmcs to automatically save and restore system
886 * msrs. Don't touch the 64-bit msrs if the guest is in legacy
887 * mode, as fiddling with msrs is very expensive.
888 */
8b9cf98c 889static void setup_msrs(struct vcpu_vmx *vmx)
e38aea3e 890{
2cc51560 891 int save_nmsrs;
5897297b 892 unsigned long *msr_bitmap;
e38aea3e 893
33f9c505 894 vmx_load_host_state(vmx);
a75beee6
ED
895 save_nmsrs = 0;
896#ifdef CONFIG_X86_64
8b9cf98c 897 if (is_long_mode(&vmx->vcpu)) {
2cc51560
ED
898 int index;
899
8b9cf98c 900 index = __find_msr_index(vmx, MSR_SYSCALL_MASK);
a75beee6 901 if (index >= 0)
8b9cf98c
RR
902 move_msr_up(vmx, index, save_nmsrs++);
903 index = __find_msr_index(vmx, MSR_LSTAR);
a75beee6 904 if (index >= 0)
8b9cf98c
RR
905 move_msr_up(vmx, index, save_nmsrs++);
906 index = __find_msr_index(vmx, MSR_CSTAR);
a75beee6 907 if (index >= 0)
8b9cf98c
RR
908 move_msr_up(vmx, index, save_nmsrs++);
909 index = __find_msr_index(vmx, MSR_KERNEL_GS_BASE);
a75beee6 910 if (index >= 0)
8b9cf98c 911 move_msr_up(vmx, index, save_nmsrs++);
a75beee6
ED
912 /*
913 * MSR_K6_STAR is only needed on long mode guests, and only
914 * if efer.sce is enabled.
915 */
8b9cf98c 916 index = __find_msr_index(vmx, MSR_K6_STAR);
ad312c7c 917 if ((index >= 0) && (vmx->vcpu.arch.shadow_efer & EFER_SCE))
8b9cf98c 918 move_msr_up(vmx, index, save_nmsrs++);
a75beee6
ED
919 }
920#endif
a2fa3e9f 921 vmx->save_nmsrs = save_nmsrs;
e38aea3e 922
4d56c8a7 923#ifdef CONFIG_X86_64
a2fa3e9f 924 vmx->msr_offset_kernel_gs_base =
8b9cf98c 925 __find_msr_index(vmx, MSR_KERNEL_GS_BASE);
4d56c8a7 926#endif
8b9cf98c 927 vmx->msr_offset_efer = __find_msr_index(vmx, MSR_EFER);
5897297b
AK
928
929 if (cpu_has_vmx_msr_bitmap()) {
930 if (is_long_mode(&vmx->vcpu))
931 msr_bitmap = vmx_msr_bitmap_longmode;
932 else
933 msr_bitmap = vmx_msr_bitmap_legacy;
934
935 vmcs_write64(MSR_BITMAP, __pa(msr_bitmap));
936 }
e38aea3e
AK
937}
938
6aa8b732
AK
939/*
940 * reads and returns guest's timestamp counter "register"
941 * guest_tsc = host_tsc + tsc_offset -- 21.3
942 */
943static u64 guest_read_tsc(void)
944{
945 u64 host_tsc, tsc_offset;
946
947 rdtscll(host_tsc);
948 tsc_offset = vmcs_read64(TSC_OFFSET);
949 return host_tsc + tsc_offset;
950}
951
952/*
953 * writes 'guest_tsc' into guest's timestamp counter "register"
954 * guest_tsc = host_tsc + tsc_offset ==> tsc_offset = guest_tsc - host_tsc
955 */
53f658b3 956static void guest_write_tsc(u64 guest_tsc, u64 host_tsc)
6aa8b732 957{
6aa8b732
AK
958 vmcs_write64(TSC_OFFSET, guest_tsc - host_tsc);
959}
960
6aa8b732
AK
961/*
962 * Reads an msr value (of 'msr_index') into 'pdata'.
963 * Returns 0 on success, non-0 otherwise.
964 * Assumes vcpu_load() was already called.
965 */
966static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
967{
968 u64 data;
a2fa3e9f 969 struct kvm_msr_entry *msr;
6aa8b732
AK
970
971 if (!pdata) {
972 printk(KERN_ERR "BUG: get_msr called with NULL pdata\n");
973 return -EINVAL;
974 }
975
976 switch (msr_index) {
05b3e0c2 977#ifdef CONFIG_X86_64
6aa8b732
AK
978 case MSR_FS_BASE:
979 data = vmcs_readl(GUEST_FS_BASE);
980 break;
981 case MSR_GS_BASE:
982 data = vmcs_readl(GUEST_GS_BASE);
983 break;
984 case MSR_EFER:
3bab1f5d 985 return kvm_get_msr_common(vcpu, msr_index, pdata);
6aa8b732 986#endif
af24a4e4 987 case MSR_IA32_TSC:
6aa8b732
AK
988 data = guest_read_tsc();
989 break;
990 case MSR_IA32_SYSENTER_CS:
991 data = vmcs_read32(GUEST_SYSENTER_CS);
992 break;
993 case MSR_IA32_SYSENTER_EIP:
f5b42c33 994 data = vmcs_readl(GUEST_SYSENTER_EIP);
6aa8b732
AK
995 break;
996 case MSR_IA32_SYSENTER_ESP:
f5b42c33 997 data = vmcs_readl(GUEST_SYSENTER_ESP);
6aa8b732 998 break;
6aa8b732 999 default:
516a1a7e 1000 vmx_load_host_state(to_vmx(vcpu));
8b9cf98c 1001 msr = find_msr_entry(to_vmx(vcpu), msr_index);
3bab1f5d
AK
1002 if (msr) {
1003 data = msr->data;
1004 break;
6aa8b732 1005 }
3bab1f5d 1006 return kvm_get_msr_common(vcpu, msr_index, pdata);
6aa8b732
AK
1007 }
1008
1009 *pdata = data;
1010 return 0;
1011}
1012
1013/*
1014 * Writes msr value into into the appropriate "register".
1015 * Returns 0 on success, non-0 otherwise.
1016 * Assumes vcpu_load() was already called.
1017 */
1018static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
1019{
a2fa3e9f
GH
1020 struct vcpu_vmx *vmx = to_vmx(vcpu);
1021 struct kvm_msr_entry *msr;
53f658b3 1022 u64 host_tsc;
2cc51560
ED
1023 int ret = 0;
1024
6aa8b732 1025 switch (msr_index) {
3bab1f5d 1026 case MSR_EFER:
a9b21b62 1027 vmx_load_host_state(vmx);
2cc51560 1028 ret = kvm_set_msr_common(vcpu, msr_index, data);
2cc51560 1029 break;
16175a79 1030#ifdef CONFIG_X86_64
6aa8b732
AK
1031 case MSR_FS_BASE:
1032 vmcs_writel(GUEST_FS_BASE, data);
1033 break;
1034 case MSR_GS_BASE:
1035 vmcs_writel(GUEST_GS_BASE, data);
1036 break;
1037#endif
1038 case MSR_IA32_SYSENTER_CS:
1039 vmcs_write32(GUEST_SYSENTER_CS, data);
1040 break;
1041 case MSR_IA32_SYSENTER_EIP:
f5b42c33 1042 vmcs_writel(GUEST_SYSENTER_EIP, data);
6aa8b732
AK
1043 break;
1044 case MSR_IA32_SYSENTER_ESP:
f5b42c33 1045 vmcs_writel(GUEST_SYSENTER_ESP, data);
6aa8b732 1046 break;
af24a4e4 1047 case MSR_IA32_TSC:
53f658b3
MT
1048 rdtscll(host_tsc);
1049 guest_write_tsc(data, host_tsc);
6aa8b732 1050 break;
468d472f
SY
1051 case MSR_IA32_CR_PAT:
1052 if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
1053 vmcs_write64(GUEST_IA32_PAT, data);
1054 vcpu->arch.pat = data;
1055 break;
1056 }
1057 /* Otherwise falls through to kvm_set_msr_common */
6aa8b732 1058 default:
a9b21b62 1059 vmx_load_host_state(vmx);
8b9cf98c 1060 msr = find_msr_entry(vmx, msr_index);
3bab1f5d
AK
1061 if (msr) {
1062 msr->data = data;
1063 break;
6aa8b732 1064 }
2cc51560 1065 ret = kvm_set_msr_common(vcpu, msr_index, data);
6aa8b732
AK
1066 }
1067
2cc51560 1068 return ret;
6aa8b732
AK
1069}
1070
5fdbf976 1071static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
6aa8b732 1072{
5fdbf976
MT
1073 __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
1074 switch (reg) {
1075 case VCPU_REGS_RSP:
1076 vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP);
1077 break;
1078 case VCPU_REGS_RIP:
1079 vcpu->arch.regs[VCPU_REGS_RIP] = vmcs_readl(GUEST_RIP);
1080 break;
6de4f3ad
AK
1081 case VCPU_EXREG_PDPTR:
1082 if (enable_ept)
1083 ept_save_pdptrs(vcpu);
1084 break;
5fdbf976
MT
1085 default:
1086 break;
1087 }
6aa8b732
AK
1088}
1089
d0bfb940 1090static int set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg)
6aa8b732 1091{
d0bfb940
JK
1092 int old_debug = vcpu->guest_debug;
1093 unsigned long flags;
6aa8b732 1094
d0bfb940
JK
1095 vcpu->guest_debug = dbg->control;
1096 if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE))
1097 vcpu->guest_debug = 0;
6aa8b732 1098
ae675ef0
JK
1099 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
1100 vmcs_writel(GUEST_DR7, dbg->arch.debugreg[7]);
1101 else
1102 vmcs_writel(GUEST_DR7, vcpu->arch.dr7);
1103
d0bfb940
JK
1104 flags = vmcs_readl(GUEST_RFLAGS);
1105 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
1106 flags |= X86_EFLAGS_TF | X86_EFLAGS_RF;
1107 else if (old_debug & KVM_GUESTDBG_SINGLESTEP)
6aa8b732 1108 flags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
d0bfb940 1109 vmcs_writel(GUEST_RFLAGS, flags);
6aa8b732 1110
abd3f2d6 1111 update_exception_bitmap(vcpu);
6aa8b732
AK
1112
1113 return 0;
1114}
1115
1116static __init int cpu_has_kvm_support(void)
1117{
6210e37b 1118 return cpu_has_vmx();
6aa8b732
AK
1119}
1120
1121static __init int vmx_disabled_by_bios(void)
1122{
1123 u64 msr;
1124
1125 rdmsrl(MSR_IA32_FEATURE_CONTROL, msr);
9ea542fa
SY
1126 return (msr & (FEATURE_CONTROL_LOCKED |
1127 FEATURE_CONTROL_VMXON_ENABLED))
1128 == FEATURE_CONTROL_LOCKED;
62b3ffb8 1129 /* locked but not enabled */
6aa8b732
AK
1130}
1131
774c47f1 1132static void hardware_enable(void *garbage)
6aa8b732
AK
1133{
1134 int cpu = raw_smp_processor_id();
1135 u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
1136 u64 old;
1137
543e4243 1138 INIT_LIST_HEAD(&per_cpu(vcpus_on_cpu, cpu));
6aa8b732 1139 rdmsrl(MSR_IA32_FEATURE_CONTROL, old);
9ea542fa
SY
1140 if ((old & (FEATURE_CONTROL_LOCKED |
1141 FEATURE_CONTROL_VMXON_ENABLED))
1142 != (FEATURE_CONTROL_LOCKED |
1143 FEATURE_CONTROL_VMXON_ENABLED))
6aa8b732 1144 /* enable and lock */
62b3ffb8 1145 wrmsrl(MSR_IA32_FEATURE_CONTROL, old |
9ea542fa
SY
1146 FEATURE_CONTROL_LOCKED |
1147 FEATURE_CONTROL_VMXON_ENABLED);
66aee91a 1148 write_cr4(read_cr4() | X86_CR4_VMXE); /* FIXME: not cpu hotplug safe */
4ecac3fd
AK
1149 asm volatile (ASM_VMX_VMXON_RAX
1150 : : "a"(&phys_addr), "m"(phys_addr)
6aa8b732
AK
1151 : "memory", "cc");
1152}
1153
543e4243
AK
1154static void vmclear_local_vcpus(void)
1155{
1156 int cpu = raw_smp_processor_id();
1157 struct vcpu_vmx *vmx, *n;
1158
1159 list_for_each_entry_safe(vmx, n, &per_cpu(vcpus_on_cpu, cpu),
1160 local_vcpus_link)
1161 __vcpu_clear(vmx);
1162}
1163
710ff4a8
EH
1164
1165/* Just like cpu_vmxoff(), but with the __kvm_handle_fault_on_reboot()
1166 * tricks.
1167 */
1168static void kvm_cpu_vmxoff(void)
6aa8b732 1169{
4ecac3fd 1170 asm volatile (__ex(ASM_VMX_VMXOFF) : : : "cc");
e693d71b 1171 write_cr4(read_cr4() & ~X86_CR4_VMXE);
6aa8b732
AK
1172}
1173
710ff4a8
EH
1174static void hardware_disable(void *garbage)
1175{
1176 vmclear_local_vcpus();
1177 kvm_cpu_vmxoff();
1178}
1179
1c3d14fe 1180static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt,
d77c26fc 1181 u32 msr, u32 *result)
1c3d14fe
YS
1182{
1183 u32 vmx_msr_low, vmx_msr_high;
1184 u32 ctl = ctl_min | ctl_opt;
1185
1186 rdmsr(msr, vmx_msr_low, vmx_msr_high);
1187
1188 ctl &= vmx_msr_high; /* bit == 0 in high word ==> must be zero */
1189 ctl |= vmx_msr_low; /* bit == 1 in low word ==> must be one */
1190
1191 /* Ensure minimum (required) set of control bits are supported. */
1192 if (ctl_min & ~ctl)
002c7f7c 1193 return -EIO;
1c3d14fe
YS
1194
1195 *result = ctl;
1196 return 0;
1197}
1198
002c7f7c 1199static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
6aa8b732
AK
1200{
1201 u32 vmx_msr_low, vmx_msr_high;
d56f546d 1202 u32 min, opt, min2, opt2;
1c3d14fe
YS
1203 u32 _pin_based_exec_control = 0;
1204 u32 _cpu_based_exec_control = 0;
f78e0e2e 1205 u32 _cpu_based_2nd_exec_control = 0;
1c3d14fe
YS
1206 u32 _vmexit_control = 0;
1207 u32 _vmentry_control = 0;
1208
1209 min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING;
f08864b4 1210 opt = PIN_BASED_VIRTUAL_NMIS;
1c3d14fe
YS
1211 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PINBASED_CTLS,
1212 &_pin_based_exec_control) < 0)
002c7f7c 1213 return -EIO;
1c3d14fe
YS
1214
1215 min = CPU_BASED_HLT_EXITING |
1216#ifdef CONFIG_X86_64
1217 CPU_BASED_CR8_LOAD_EXITING |
1218 CPU_BASED_CR8_STORE_EXITING |
1219#endif
d56f546d
SY
1220 CPU_BASED_CR3_LOAD_EXITING |
1221 CPU_BASED_CR3_STORE_EXITING |
1c3d14fe
YS
1222 CPU_BASED_USE_IO_BITMAPS |
1223 CPU_BASED_MOV_DR_EXITING |
a7052897
MT
1224 CPU_BASED_USE_TSC_OFFSETING |
1225 CPU_BASED_INVLPG_EXITING;
f78e0e2e 1226 opt = CPU_BASED_TPR_SHADOW |
25c5f225 1227 CPU_BASED_USE_MSR_BITMAPS |
f78e0e2e 1228 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
1c3d14fe
YS
1229 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS,
1230 &_cpu_based_exec_control) < 0)
002c7f7c 1231 return -EIO;
6e5d865c
YS
1232#ifdef CONFIG_X86_64
1233 if ((_cpu_based_exec_control & CPU_BASED_TPR_SHADOW))
1234 _cpu_based_exec_control &= ~CPU_BASED_CR8_LOAD_EXITING &
1235 ~CPU_BASED_CR8_STORE_EXITING;
1236#endif
f78e0e2e 1237 if (_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) {
d56f546d
SY
1238 min2 = 0;
1239 opt2 = SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2384d2b3 1240 SECONDARY_EXEC_WBINVD_EXITING |
d56f546d 1241 SECONDARY_EXEC_ENABLE_VPID |
3a624e29
NK
1242 SECONDARY_EXEC_ENABLE_EPT |
1243 SECONDARY_EXEC_UNRESTRICTED_GUEST;
d56f546d
SY
1244 if (adjust_vmx_controls(min2, opt2,
1245 MSR_IA32_VMX_PROCBASED_CTLS2,
f78e0e2e
SY
1246 &_cpu_based_2nd_exec_control) < 0)
1247 return -EIO;
1248 }
1249#ifndef CONFIG_X86_64
1250 if (!(_cpu_based_2nd_exec_control &
1251 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
1252 _cpu_based_exec_control &= ~CPU_BASED_TPR_SHADOW;
1253#endif
d56f546d 1254 if (_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_EPT) {
a7052897
MT
1255 /* CR3 accesses and invlpg don't need to cause VM Exits when EPT
1256 enabled */
d56f546d 1257 min &= ~(CPU_BASED_CR3_LOAD_EXITING |
a7052897
MT
1258 CPU_BASED_CR3_STORE_EXITING |
1259 CPU_BASED_INVLPG_EXITING);
d56f546d
SY
1260 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS,
1261 &_cpu_based_exec_control) < 0)
1262 return -EIO;
1263 rdmsr(MSR_IA32_VMX_EPT_VPID_CAP,
1264 vmx_capability.ept, vmx_capability.vpid);
1265 }
1c3d14fe
YS
1266
1267 min = 0;
1268#ifdef CONFIG_X86_64
1269 min |= VM_EXIT_HOST_ADDR_SPACE_SIZE;
1270#endif
468d472f 1271 opt = VM_EXIT_SAVE_IA32_PAT | VM_EXIT_LOAD_IA32_PAT;
1c3d14fe
YS
1272 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_EXIT_CTLS,
1273 &_vmexit_control) < 0)
002c7f7c 1274 return -EIO;
1c3d14fe 1275
468d472f
SY
1276 min = 0;
1277 opt = VM_ENTRY_LOAD_IA32_PAT;
1c3d14fe
YS
1278 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_ENTRY_CTLS,
1279 &_vmentry_control) < 0)
002c7f7c 1280 return -EIO;
6aa8b732 1281
c68876fd 1282 rdmsr(MSR_IA32_VMX_BASIC, vmx_msr_low, vmx_msr_high);
1c3d14fe
YS
1283
1284 /* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */
1285 if ((vmx_msr_high & 0x1fff) > PAGE_SIZE)
002c7f7c 1286 return -EIO;
1c3d14fe
YS
1287
1288#ifdef CONFIG_X86_64
1289 /* IA-32 SDM Vol 3B: 64-bit CPUs always have VMX_BASIC_MSR[48]==0. */
1290 if (vmx_msr_high & (1u<<16))
002c7f7c 1291 return -EIO;
1c3d14fe
YS
1292#endif
1293
1294 /* Require Write-Back (WB) memory type for VMCS accesses. */
1295 if (((vmx_msr_high >> 18) & 15) != 6)
002c7f7c 1296 return -EIO;
1c3d14fe 1297
002c7f7c
YS
1298 vmcs_conf->size = vmx_msr_high & 0x1fff;
1299 vmcs_conf->order = get_order(vmcs_config.size);
1300 vmcs_conf->revision_id = vmx_msr_low;
1c3d14fe 1301
002c7f7c
YS
1302 vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control;
1303 vmcs_conf->cpu_based_exec_ctrl = _cpu_based_exec_control;
f78e0e2e 1304 vmcs_conf->cpu_based_2nd_exec_ctrl = _cpu_based_2nd_exec_control;
002c7f7c
YS
1305 vmcs_conf->vmexit_ctrl = _vmexit_control;
1306 vmcs_conf->vmentry_ctrl = _vmentry_control;
1c3d14fe
YS
1307
1308 return 0;
c68876fd 1309}
6aa8b732
AK
1310
1311static struct vmcs *alloc_vmcs_cpu(int cpu)
1312{
1313 int node = cpu_to_node(cpu);
1314 struct page *pages;
1315 struct vmcs *vmcs;
1316
6484eb3e 1317 pages = alloc_pages_exact_node(node, GFP_KERNEL, vmcs_config.order);
6aa8b732
AK
1318 if (!pages)
1319 return NULL;
1320 vmcs = page_address(pages);
1c3d14fe
YS
1321 memset(vmcs, 0, vmcs_config.size);
1322 vmcs->revision_id = vmcs_config.revision_id; /* vmcs revision id */
6aa8b732
AK
1323 return vmcs;
1324}
1325
1326static struct vmcs *alloc_vmcs(void)
1327{
d3b2c338 1328 return alloc_vmcs_cpu(raw_smp_processor_id());
6aa8b732
AK
1329}
1330
1331static void free_vmcs(struct vmcs *vmcs)
1332{
1c3d14fe 1333 free_pages((unsigned long)vmcs, vmcs_config.order);
6aa8b732
AK
1334}
1335
39959588 1336static void free_kvm_area(void)
6aa8b732
AK
1337{
1338 int cpu;
1339
1340 for_each_online_cpu(cpu)
1341 free_vmcs(per_cpu(vmxarea, cpu));
1342}
1343
6aa8b732
AK
1344static __init int alloc_kvm_area(void)
1345{
1346 int cpu;
1347
1348 for_each_online_cpu(cpu) {
1349 struct vmcs *vmcs;
1350
1351 vmcs = alloc_vmcs_cpu(cpu);
1352 if (!vmcs) {
1353 free_kvm_area();
1354 return -ENOMEM;
1355 }
1356
1357 per_cpu(vmxarea, cpu) = vmcs;
1358 }
1359 return 0;
1360}
1361
1362static __init int hardware_setup(void)
1363{
002c7f7c
YS
1364 if (setup_vmcs_config(&vmcs_config) < 0)
1365 return -EIO;
50a37eb4
JR
1366
1367 if (boot_cpu_has(X86_FEATURE_NX))
1368 kvm_enable_efer_bits(EFER_NX);
1369
93ba03c2
SY
1370 if (!cpu_has_vmx_vpid())
1371 enable_vpid = 0;
1372
3a624e29 1373 if (!cpu_has_vmx_ept()) {
93ba03c2 1374 enable_ept = 0;
3a624e29
NK
1375 enable_unrestricted_guest = 0;
1376 }
1377
1378 if (!cpu_has_vmx_unrestricted_guest())
1379 enable_unrestricted_guest = 0;
93ba03c2
SY
1380
1381 if (!cpu_has_vmx_flexpriority())
1382 flexpriority_enabled = 0;
1383
95ba8273
GN
1384 if (!cpu_has_vmx_tpr_shadow())
1385 kvm_x86_ops->update_cr8_intercept = NULL;
1386
54dee993
MT
1387 if (enable_ept && !cpu_has_vmx_ept_2m_page())
1388 kvm_disable_largepages();
1389
6aa8b732
AK
1390 return alloc_kvm_area();
1391}
1392
1393static __exit void hardware_unsetup(void)
1394{
1395 free_kvm_area();
1396}
1397
6aa8b732
AK
1398static void fix_pmode_dataseg(int seg, struct kvm_save_segment *save)
1399{
1400 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
1401
6af11b9e 1402 if (vmcs_readl(sf->base) == save->base && (save->base & AR_S_MASK)) {
6aa8b732
AK
1403 vmcs_write16(sf->selector, save->selector);
1404 vmcs_writel(sf->base, save->base);
1405 vmcs_write32(sf->limit, save->limit);
1406 vmcs_write32(sf->ar_bytes, save->ar);
1407 } else {
1408 u32 dpl = (vmcs_read16(sf->selector) & SELECTOR_RPL_MASK)
1409 << AR_DPL_SHIFT;
1410 vmcs_write32(sf->ar_bytes, 0x93 | dpl);
1411 }
1412}
1413
1414static void enter_pmode(struct kvm_vcpu *vcpu)
1415{
1416 unsigned long flags;
a89a8fb9 1417 struct vcpu_vmx *vmx = to_vmx(vcpu);
6aa8b732 1418
a89a8fb9 1419 vmx->emulation_required = 1;
7ffd92c5 1420 vmx->rmode.vm86_active = 0;
6aa8b732 1421
7ffd92c5
AK
1422 vmcs_writel(GUEST_TR_BASE, vmx->rmode.tr.base);
1423 vmcs_write32(GUEST_TR_LIMIT, vmx->rmode.tr.limit);
1424 vmcs_write32(GUEST_TR_AR_BYTES, vmx->rmode.tr.ar);
6aa8b732
AK
1425
1426 flags = vmcs_readl(GUEST_RFLAGS);
053de044 1427 flags &= ~(X86_EFLAGS_IOPL | X86_EFLAGS_VM);
7ffd92c5 1428 flags |= (vmx->rmode.save_iopl << IOPL_SHIFT);
6aa8b732
AK
1429 vmcs_writel(GUEST_RFLAGS, flags);
1430
66aee91a
RR
1431 vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) |
1432 (vmcs_readl(CR4_READ_SHADOW) & X86_CR4_VME));
6aa8b732
AK
1433
1434 update_exception_bitmap(vcpu);
1435
a89a8fb9
MG
1436 if (emulate_invalid_guest_state)
1437 return;
1438
7ffd92c5
AK
1439 fix_pmode_dataseg(VCPU_SREG_ES, &vmx->rmode.es);
1440 fix_pmode_dataseg(VCPU_SREG_DS, &vmx->rmode.ds);
1441 fix_pmode_dataseg(VCPU_SREG_GS, &vmx->rmode.gs);
1442 fix_pmode_dataseg(VCPU_SREG_FS, &vmx->rmode.fs);
6aa8b732
AK
1443
1444 vmcs_write16(GUEST_SS_SELECTOR, 0);
1445 vmcs_write32(GUEST_SS_AR_BYTES, 0x93);
1446
1447 vmcs_write16(GUEST_CS_SELECTOR,
1448 vmcs_read16(GUEST_CS_SELECTOR) & ~SELECTOR_RPL_MASK);
1449 vmcs_write32(GUEST_CS_AR_BYTES, 0x9b);
1450}
1451
d77c26fc 1452static gva_t rmode_tss_base(struct kvm *kvm)
6aa8b732 1453{
bfc6d222 1454 if (!kvm->arch.tss_addr) {
cbc94022
IE
1455 gfn_t base_gfn = kvm->memslots[0].base_gfn +
1456 kvm->memslots[0].npages - 3;
1457 return base_gfn << PAGE_SHIFT;
1458 }
bfc6d222 1459 return kvm->arch.tss_addr;
6aa8b732
AK
1460}
1461
1462static void fix_rmode_seg(int seg, struct kvm_save_segment *save)
1463{
1464 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
1465
1466 save->selector = vmcs_read16(sf->selector);
1467 save->base = vmcs_readl(sf->base);
1468 save->limit = vmcs_read32(sf->limit);
1469 save->ar = vmcs_read32(sf->ar_bytes);
15b00f32
JK
1470 vmcs_write16(sf->selector, save->base >> 4);
1471 vmcs_write32(sf->base, save->base & 0xfffff);
6aa8b732
AK
1472 vmcs_write32(sf->limit, 0xffff);
1473 vmcs_write32(sf->ar_bytes, 0xf3);
1474}
1475
1476static void enter_rmode(struct kvm_vcpu *vcpu)
1477{
1478 unsigned long flags;
a89a8fb9 1479 struct vcpu_vmx *vmx = to_vmx(vcpu);
6aa8b732 1480
3a624e29
NK
1481 if (enable_unrestricted_guest)
1482 return;
1483
a89a8fb9 1484 vmx->emulation_required = 1;
7ffd92c5 1485 vmx->rmode.vm86_active = 1;
6aa8b732 1486
7ffd92c5 1487 vmx->rmode.tr.base = vmcs_readl(GUEST_TR_BASE);
6aa8b732
AK
1488 vmcs_writel(GUEST_TR_BASE, rmode_tss_base(vcpu->kvm));
1489
7ffd92c5 1490 vmx->rmode.tr.limit = vmcs_read32(GUEST_TR_LIMIT);
6aa8b732
AK
1491 vmcs_write32(GUEST_TR_LIMIT, RMODE_TSS_SIZE - 1);
1492
7ffd92c5 1493 vmx->rmode.tr.ar = vmcs_read32(GUEST_TR_AR_BYTES);
6aa8b732
AK
1494 vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
1495
1496 flags = vmcs_readl(GUEST_RFLAGS);
7ffd92c5 1497 vmx->rmode.save_iopl
ad312c7c 1498 = (flags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
6aa8b732 1499
053de044 1500 flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
6aa8b732
AK
1501
1502 vmcs_writel(GUEST_RFLAGS, flags);
66aee91a 1503 vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | X86_CR4_VME);
6aa8b732
AK
1504 update_exception_bitmap(vcpu);
1505
a89a8fb9
MG
1506 if (emulate_invalid_guest_state)
1507 goto continue_rmode;
1508
6aa8b732
AK
1509 vmcs_write16(GUEST_SS_SELECTOR, vmcs_readl(GUEST_SS_BASE) >> 4);
1510 vmcs_write32(GUEST_SS_LIMIT, 0xffff);
1511 vmcs_write32(GUEST_SS_AR_BYTES, 0xf3);
1512
1513 vmcs_write32(GUEST_CS_AR_BYTES, 0xf3);
abacf8df 1514 vmcs_write32(GUEST_CS_LIMIT, 0xffff);
8cb5b033
AK
1515 if (vmcs_readl(GUEST_CS_BASE) == 0xffff0000)
1516 vmcs_writel(GUEST_CS_BASE, 0xf0000);
6aa8b732
AK
1517 vmcs_write16(GUEST_CS_SELECTOR, vmcs_readl(GUEST_CS_BASE) >> 4);
1518
7ffd92c5
AK
1519 fix_rmode_seg(VCPU_SREG_ES, &vmx->rmode.es);
1520 fix_rmode_seg(VCPU_SREG_DS, &vmx->rmode.ds);
1521 fix_rmode_seg(VCPU_SREG_GS, &vmx->rmode.gs);
1522 fix_rmode_seg(VCPU_SREG_FS, &vmx->rmode.fs);
75880a01 1523
a89a8fb9 1524continue_rmode:
8668a3c4 1525 kvm_mmu_reset_context(vcpu);
b7ebfb05 1526 init_rmode(vcpu->kvm);
6aa8b732
AK
1527}
1528
401d10de
AS
1529static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
1530{
1531 struct vcpu_vmx *vmx = to_vmx(vcpu);
1532 struct kvm_msr_entry *msr = find_msr_entry(vmx, MSR_EFER);
1533
1534 vcpu->arch.shadow_efer = efer;
1535 if (!msr)
1536 return;
1537 if (efer & EFER_LMA) {
1538 vmcs_write32(VM_ENTRY_CONTROLS,
1539 vmcs_read32(VM_ENTRY_CONTROLS) |
1540 VM_ENTRY_IA32E_MODE);
1541 msr->data = efer;
1542 } else {
1543 vmcs_write32(VM_ENTRY_CONTROLS,
1544 vmcs_read32(VM_ENTRY_CONTROLS) &
1545 ~VM_ENTRY_IA32E_MODE);
1546
1547 msr->data = efer & ~EFER_LME;
1548 }
1549 setup_msrs(vmx);
1550}
1551
05b3e0c2 1552#ifdef CONFIG_X86_64
6aa8b732
AK
1553
1554static void enter_lmode(struct kvm_vcpu *vcpu)
1555{
1556 u32 guest_tr_ar;
1557
1558 guest_tr_ar = vmcs_read32(GUEST_TR_AR_BYTES);
1559 if ((guest_tr_ar & AR_TYPE_MASK) != AR_TYPE_BUSY_64_TSS) {
1560 printk(KERN_DEBUG "%s: tss fixup for long mode. \n",
b8688d51 1561 __func__);
6aa8b732
AK
1562 vmcs_write32(GUEST_TR_AR_BYTES,
1563 (guest_tr_ar & ~AR_TYPE_MASK)
1564 | AR_TYPE_BUSY_64_TSS);
1565 }
ad312c7c 1566 vcpu->arch.shadow_efer |= EFER_LMA;
401d10de 1567 vmx_set_efer(vcpu, vcpu->arch.shadow_efer);
6aa8b732
AK
1568}
1569
1570static void exit_lmode(struct kvm_vcpu *vcpu)
1571{
ad312c7c 1572 vcpu->arch.shadow_efer &= ~EFER_LMA;
6aa8b732
AK
1573
1574 vmcs_write32(VM_ENTRY_CONTROLS,
1575 vmcs_read32(VM_ENTRY_CONTROLS)
1e4e6e00 1576 & ~VM_ENTRY_IA32E_MODE);
6aa8b732
AK
1577}
1578
1579#endif
1580
2384d2b3
SY
1581static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
1582{
1583 vpid_sync_vcpu_all(to_vmx(vcpu));
089d034e 1584 if (enable_ept)
4e1096d2 1585 ept_sync_context(construct_eptp(vcpu->arch.mmu.root_hpa));
2384d2b3
SY
1586}
1587
25c4c276 1588static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
399badf3 1589{
ad312c7c
ZX
1590 vcpu->arch.cr4 &= KVM_GUEST_CR4_MASK;
1591 vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & ~KVM_GUEST_CR4_MASK;
399badf3
AK
1592}
1593
1439442c
SY
1594static void ept_load_pdptrs(struct kvm_vcpu *vcpu)
1595{
6de4f3ad
AK
1596 if (!test_bit(VCPU_EXREG_PDPTR,
1597 (unsigned long *)&vcpu->arch.regs_dirty))
1598 return;
1599
1439442c 1600 if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
1439442c
SY
1601 vmcs_write64(GUEST_PDPTR0, vcpu->arch.pdptrs[0]);
1602 vmcs_write64(GUEST_PDPTR1, vcpu->arch.pdptrs[1]);
1603 vmcs_write64(GUEST_PDPTR2, vcpu->arch.pdptrs[2]);
1604 vmcs_write64(GUEST_PDPTR3, vcpu->arch.pdptrs[3]);
1605 }
1606}
1607
8f5d549f
AK
1608static void ept_save_pdptrs(struct kvm_vcpu *vcpu)
1609{
1610 if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
1611 vcpu->arch.pdptrs[0] = vmcs_read64(GUEST_PDPTR0);
1612 vcpu->arch.pdptrs[1] = vmcs_read64(GUEST_PDPTR1);
1613 vcpu->arch.pdptrs[2] = vmcs_read64(GUEST_PDPTR2);
1614 vcpu->arch.pdptrs[3] = vmcs_read64(GUEST_PDPTR3);
1615 }
6de4f3ad
AK
1616
1617 __set_bit(VCPU_EXREG_PDPTR,
1618 (unsigned long *)&vcpu->arch.regs_avail);
1619 __set_bit(VCPU_EXREG_PDPTR,
1620 (unsigned long *)&vcpu->arch.regs_dirty);
8f5d549f
AK
1621}
1622
1439442c
SY
1623static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
1624
1625static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
1626 unsigned long cr0,
1627 struct kvm_vcpu *vcpu)
1628{
1629 if (!(cr0 & X86_CR0_PG)) {
1630 /* From paging/starting to nonpaging */
1631 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
65267ea1 1632 vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) |
1439442c
SY
1633 (CPU_BASED_CR3_LOAD_EXITING |
1634 CPU_BASED_CR3_STORE_EXITING));
1635 vcpu->arch.cr0 = cr0;
1636 vmx_set_cr4(vcpu, vcpu->arch.cr4);
1439442c
SY
1637 *hw_cr0 &= ~X86_CR0_WP;
1638 } else if (!is_paging(vcpu)) {
1639 /* From nonpaging to paging */
1640 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
65267ea1 1641 vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) &
1439442c
SY
1642 ~(CPU_BASED_CR3_LOAD_EXITING |
1643 CPU_BASED_CR3_STORE_EXITING));
1644 vcpu->arch.cr0 = cr0;
1645 vmx_set_cr4(vcpu, vcpu->arch.cr4);
1646 if (!(vcpu->arch.cr0 & X86_CR0_WP))
1647 *hw_cr0 &= ~X86_CR0_WP;
1648 }
1649}
1650
1651static void ept_update_paging_mode_cr4(unsigned long *hw_cr4,
1652 struct kvm_vcpu *vcpu)
1653{
1654 if (!is_paging(vcpu)) {
1655 *hw_cr4 &= ~X86_CR4_PAE;
1656 *hw_cr4 |= X86_CR4_PSE;
1657 } else if (!(vcpu->arch.cr4 & X86_CR4_PAE))
1658 *hw_cr4 &= ~X86_CR4_PAE;
1659}
1660
6aa8b732
AK
1661static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1662{
7ffd92c5 1663 struct vcpu_vmx *vmx = to_vmx(vcpu);
3a624e29
NK
1664 unsigned long hw_cr0;
1665
1666 if (enable_unrestricted_guest)
1667 hw_cr0 = (cr0 & ~KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST)
1668 | KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST;
1669 else
1670 hw_cr0 = (cr0 & ~KVM_GUEST_CR0_MASK) | KVM_VM_CR0_ALWAYS_ON;
1439442c 1671
5fd86fcf
AK
1672 vmx_fpu_deactivate(vcpu);
1673
7ffd92c5 1674 if (vmx->rmode.vm86_active && (cr0 & X86_CR0_PE))
6aa8b732
AK
1675 enter_pmode(vcpu);
1676
7ffd92c5 1677 if (!vmx->rmode.vm86_active && !(cr0 & X86_CR0_PE))
6aa8b732
AK
1678 enter_rmode(vcpu);
1679
05b3e0c2 1680#ifdef CONFIG_X86_64
ad312c7c 1681 if (vcpu->arch.shadow_efer & EFER_LME) {
707d92fa 1682 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG))
6aa8b732 1683 enter_lmode(vcpu);
707d92fa 1684 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG))
6aa8b732
AK
1685 exit_lmode(vcpu);
1686 }
1687#endif
1688
089d034e 1689 if (enable_ept)
1439442c
SY
1690 ept_update_paging_mode_cr0(&hw_cr0, cr0, vcpu);
1691
6aa8b732 1692 vmcs_writel(CR0_READ_SHADOW, cr0);
1439442c 1693 vmcs_writel(GUEST_CR0, hw_cr0);
ad312c7c 1694 vcpu->arch.cr0 = cr0;
5fd86fcf 1695
707d92fa 1696 if (!(cr0 & X86_CR0_TS) || !(cr0 & X86_CR0_PE))
5fd86fcf 1697 vmx_fpu_activate(vcpu);
6aa8b732
AK
1698}
1699
1439442c
SY
1700static u64 construct_eptp(unsigned long root_hpa)
1701{
1702 u64 eptp;
1703
1704 /* TODO write the value reading from MSR */
1705 eptp = VMX_EPT_DEFAULT_MT |
1706 VMX_EPT_DEFAULT_GAW << VMX_EPT_GAW_EPTP_SHIFT;
1707 eptp |= (root_hpa & PAGE_MASK);
1708
1709 return eptp;
1710}
1711
6aa8b732
AK
1712static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
1713{
1439442c
SY
1714 unsigned long guest_cr3;
1715 u64 eptp;
1716
1717 guest_cr3 = cr3;
089d034e 1718 if (enable_ept) {
1439442c
SY
1719 eptp = construct_eptp(cr3);
1720 vmcs_write64(EPT_POINTER, eptp);
1439442c
SY
1721 guest_cr3 = is_paging(vcpu) ? vcpu->arch.cr3 :
1722 VMX_EPT_IDENTITY_PAGETABLE_ADDR;
1723 }
1724
2384d2b3 1725 vmx_flush_tlb(vcpu);
1439442c 1726 vmcs_writel(GUEST_CR3, guest_cr3);
ad312c7c 1727 if (vcpu->arch.cr0 & X86_CR0_PE)
5fd86fcf 1728 vmx_fpu_deactivate(vcpu);
6aa8b732
AK
1729}
1730
1731static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
1732{
7ffd92c5 1733 unsigned long hw_cr4 = cr4 | (to_vmx(vcpu)->rmode.vm86_active ?
1439442c
SY
1734 KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON);
1735
ad312c7c 1736 vcpu->arch.cr4 = cr4;
089d034e 1737 if (enable_ept)
1439442c
SY
1738 ept_update_paging_mode_cr4(&hw_cr4, vcpu);
1739
1740 vmcs_writel(CR4_READ_SHADOW, cr4);
1741 vmcs_writel(GUEST_CR4, hw_cr4);
6aa8b732
AK
1742}
1743
6aa8b732
AK
1744static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg)
1745{
1746 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
1747
1748 return vmcs_readl(sf->base);
1749}
1750
1751static void vmx_get_segment(struct kvm_vcpu *vcpu,
1752 struct kvm_segment *var, int seg)
1753{
1754 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
1755 u32 ar;
1756
1757 var->base = vmcs_readl(sf->base);
1758 var->limit = vmcs_read32(sf->limit);
1759 var->selector = vmcs_read16(sf->selector);
1760 ar = vmcs_read32(sf->ar_bytes);
9fd4a3b7 1761 if ((ar & AR_UNUSABLE_MASK) && !emulate_invalid_guest_state)
6aa8b732
AK
1762 ar = 0;
1763 var->type = ar & 15;
1764 var->s = (ar >> 4) & 1;
1765 var->dpl = (ar >> 5) & 3;
1766 var->present = (ar >> 7) & 1;
1767 var->avl = (ar >> 12) & 1;
1768 var->l = (ar >> 13) & 1;
1769 var->db = (ar >> 14) & 1;
1770 var->g = (ar >> 15) & 1;
1771 var->unusable = (ar >> 16) & 1;
1772}
1773
2e4d2653
IE
1774static int vmx_get_cpl(struct kvm_vcpu *vcpu)
1775{
1776 struct kvm_segment kvm_seg;
1777
1778 if (!(vcpu->arch.cr0 & X86_CR0_PE)) /* if real mode */
1779 return 0;
1780
1781 if (vmx_get_rflags(vcpu) & X86_EFLAGS_VM) /* if virtual 8086 */
1782 return 3;
1783
1784 vmx_get_segment(vcpu, &kvm_seg, VCPU_SREG_CS);
1785 return kvm_seg.selector & 3;
1786}
1787
653e3108 1788static u32 vmx_segment_access_rights(struct kvm_segment *var)
6aa8b732 1789{
6aa8b732
AK
1790 u32 ar;
1791
653e3108 1792 if (var->unusable)
6aa8b732
AK
1793 ar = 1 << 16;
1794 else {
1795 ar = var->type & 15;
1796 ar |= (var->s & 1) << 4;
1797 ar |= (var->dpl & 3) << 5;
1798 ar |= (var->present & 1) << 7;
1799 ar |= (var->avl & 1) << 12;
1800 ar |= (var->l & 1) << 13;
1801 ar |= (var->db & 1) << 14;
1802 ar |= (var->g & 1) << 15;
1803 }
f7fbf1fd
UL
1804 if (ar == 0) /* a 0 value means unusable */
1805 ar = AR_UNUSABLE_MASK;
653e3108
AK
1806
1807 return ar;
1808}
1809
1810static void vmx_set_segment(struct kvm_vcpu *vcpu,
1811 struct kvm_segment *var, int seg)
1812{
7ffd92c5 1813 struct vcpu_vmx *vmx = to_vmx(vcpu);
653e3108
AK
1814 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
1815 u32 ar;
1816
7ffd92c5
AK
1817 if (vmx->rmode.vm86_active && seg == VCPU_SREG_TR) {
1818 vmx->rmode.tr.selector = var->selector;
1819 vmx->rmode.tr.base = var->base;
1820 vmx->rmode.tr.limit = var->limit;
1821 vmx->rmode.tr.ar = vmx_segment_access_rights(var);
653e3108
AK
1822 return;
1823 }
1824 vmcs_writel(sf->base, var->base);
1825 vmcs_write32(sf->limit, var->limit);
1826 vmcs_write16(sf->selector, var->selector);
7ffd92c5 1827 if (vmx->rmode.vm86_active && var->s) {
653e3108
AK
1828 /*
1829 * Hack real-mode segments into vm86 compatibility.
1830 */
1831 if (var->base == 0xffff0000 && var->selector == 0xf000)
1832 vmcs_writel(sf->base, 0xf0000);
1833 ar = 0xf3;
1834 } else
1835 ar = vmx_segment_access_rights(var);
3a624e29
NK
1836
1837 /*
1838 * Fix the "Accessed" bit in AR field of segment registers for older
1839 * qemu binaries.
1840 * IA32 arch specifies that at the time of processor reset the
1841 * "Accessed" bit in the AR field of segment registers is 1. And qemu
1842 * is setting it to 0 in the usedland code. This causes invalid guest
1843 * state vmexit when "unrestricted guest" mode is turned on.
1844 * Fix for this setup issue in cpu_reset is being pushed in the qemu
1845 * tree. Newer qemu binaries with that qemu fix would not need this
1846 * kvm hack.
1847 */
1848 if (enable_unrestricted_guest && (seg != VCPU_SREG_LDTR))
1849 ar |= 0x1; /* Accessed */
1850
6aa8b732
AK
1851 vmcs_write32(sf->ar_bytes, ar);
1852}
1853
6aa8b732
AK
1854static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
1855{
1856 u32 ar = vmcs_read32(GUEST_CS_AR_BYTES);
1857
1858 *db = (ar >> 14) & 1;
1859 *l = (ar >> 13) & 1;
1860}
1861
1862static void vmx_get_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
1863{
1864 dt->limit = vmcs_read32(GUEST_IDTR_LIMIT);
1865 dt->base = vmcs_readl(GUEST_IDTR_BASE);
1866}
1867
1868static void vmx_set_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
1869{
1870 vmcs_write32(GUEST_IDTR_LIMIT, dt->limit);
1871 vmcs_writel(GUEST_IDTR_BASE, dt->base);
1872}
1873
1874static void vmx_get_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
1875{
1876 dt->limit = vmcs_read32(GUEST_GDTR_LIMIT);
1877 dt->base = vmcs_readl(GUEST_GDTR_BASE);
1878}
1879
1880static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
1881{
1882 vmcs_write32(GUEST_GDTR_LIMIT, dt->limit);
1883 vmcs_writel(GUEST_GDTR_BASE, dt->base);
1884}
1885
648dfaa7
MG
1886static bool rmode_segment_valid(struct kvm_vcpu *vcpu, int seg)
1887{
1888 struct kvm_segment var;
1889 u32 ar;
1890
1891 vmx_get_segment(vcpu, &var, seg);
1892 ar = vmx_segment_access_rights(&var);
1893
1894 if (var.base != (var.selector << 4))
1895 return false;
1896 if (var.limit != 0xffff)
1897 return false;
1898 if (ar != 0xf3)
1899 return false;
1900
1901 return true;
1902}
1903
1904static bool code_segment_valid(struct kvm_vcpu *vcpu)
1905{
1906 struct kvm_segment cs;
1907 unsigned int cs_rpl;
1908
1909 vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
1910 cs_rpl = cs.selector & SELECTOR_RPL_MASK;
1911
1872a3f4
AK
1912 if (cs.unusable)
1913 return false;
648dfaa7
MG
1914 if (~cs.type & (AR_TYPE_CODE_MASK|AR_TYPE_ACCESSES_MASK))
1915 return false;
1916 if (!cs.s)
1917 return false;
1872a3f4 1918 if (cs.type & AR_TYPE_WRITEABLE_MASK) {
648dfaa7
MG
1919 if (cs.dpl > cs_rpl)
1920 return false;
1872a3f4 1921 } else {
648dfaa7
MG
1922 if (cs.dpl != cs_rpl)
1923 return false;
1924 }
1925 if (!cs.present)
1926 return false;
1927
1928 /* TODO: Add Reserved field check, this'll require a new member in the kvm_segment_field structure */
1929 return true;
1930}
1931
1932static bool stack_segment_valid(struct kvm_vcpu *vcpu)
1933{
1934 struct kvm_segment ss;
1935 unsigned int ss_rpl;
1936
1937 vmx_get_segment(vcpu, &ss, VCPU_SREG_SS);
1938 ss_rpl = ss.selector & SELECTOR_RPL_MASK;
1939
1872a3f4
AK
1940 if (ss.unusable)
1941 return true;
1942 if (ss.type != 3 && ss.type != 7)
648dfaa7
MG
1943 return false;
1944 if (!ss.s)
1945 return false;
1946 if (ss.dpl != ss_rpl) /* DPL != RPL */
1947 return false;
1948 if (!ss.present)
1949 return false;
1950
1951 return true;
1952}
1953
1954static bool data_segment_valid(struct kvm_vcpu *vcpu, int seg)
1955{
1956 struct kvm_segment var;
1957 unsigned int rpl;
1958
1959 vmx_get_segment(vcpu, &var, seg);
1960 rpl = var.selector & SELECTOR_RPL_MASK;
1961
1872a3f4
AK
1962 if (var.unusable)
1963 return true;
648dfaa7
MG
1964 if (!var.s)
1965 return false;
1966 if (!var.present)
1967 return false;
1968 if (~var.type & (AR_TYPE_CODE_MASK|AR_TYPE_WRITEABLE_MASK)) {
1969 if (var.dpl < rpl) /* DPL < RPL */
1970 return false;
1971 }
1972
1973 /* TODO: Add other members to kvm_segment_field to allow checking for other access
1974 * rights flags
1975 */
1976 return true;
1977}
1978
1979static bool tr_valid(struct kvm_vcpu *vcpu)
1980{
1981 struct kvm_segment tr;
1982
1983 vmx_get_segment(vcpu, &tr, VCPU_SREG_TR);
1984
1872a3f4
AK
1985 if (tr.unusable)
1986 return false;
648dfaa7
MG
1987 if (tr.selector & SELECTOR_TI_MASK) /* TI = 1 */
1988 return false;
1872a3f4 1989 if (tr.type != 3 && tr.type != 11) /* TODO: Check if guest is in IA32e mode */
648dfaa7
MG
1990 return false;
1991 if (!tr.present)
1992 return false;
1993
1994 return true;
1995}
1996
1997static bool ldtr_valid(struct kvm_vcpu *vcpu)
1998{
1999 struct kvm_segment ldtr;
2000
2001 vmx_get_segment(vcpu, &ldtr, VCPU_SREG_LDTR);
2002
1872a3f4
AK
2003 if (ldtr.unusable)
2004 return true;
648dfaa7
MG
2005 if (ldtr.selector & SELECTOR_TI_MASK) /* TI = 1 */
2006 return false;
2007 if (ldtr.type != 2)
2008 return false;
2009 if (!ldtr.present)
2010 return false;
2011
2012 return true;
2013}
2014
2015static bool cs_ss_rpl_check(struct kvm_vcpu *vcpu)
2016{
2017 struct kvm_segment cs, ss;
2018
2019 vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
2020 vmx_get_segment(vcpu, &ss, VCPU_SREG_SS);
2021
2022 return ((cs.selector & SELECTOR_RPL_MASK) ==
2023 (ss.selector & SELECTOR_RPL_MASK));
2024}
2025
2026/*
2027 * Check if guest state is valid. Returns true if valid, false if
2028 * not.
2029 * We assume that registers are always usable
2030 */
2031static bool guest_state_valid(struct kvm_vcpu *vcpu)
2032{
2033 /* real mode guest state checks */
2034 if (!(vcpu->arch.cr0 & X86_CR0_PE)) {
2035 if (!rmode_segment_valid(vcpu, VCPU_SREG_CS))
2036 return false;
2037 if (!rmode_segment_valid(vcpu, VCPU_SREG_SS))
2038 return false;
2039 if (!rmode_segment_valid(vcpu, VCPU_SREG_DS))
2040 return false;
2041 if (!rmode_segment_valid(vcpu, VCPU_SREG_ES))
2042 return false;
2043 if (!rmode_segment_valid(vcpu, VCPU_SREG_FS))
2044 return false;
2045 if (!rmode_segment_valid(vcpu, VCPU_SREG_GS))
2046 return false;
2047 } else {
2048 /* protected mode guest state checks */
2049 if (!cs_ss_rpl_check(vcpu))
2050 return false;
2051 if (!code_segment_valid(vcpu))
2052 return false;
2053 if (!stack_segment_valid(vcpu))
2054 return false;
2055 if (!data_segment_valid(vcpu, VCPU_SREG_DS))
2056 return false;
2057 if (!data_segment_valid(vcpu, VCPU_SREG_ES))
2058 return false;
2059 if (!data_segment_valid(vcpu, VCPU_SREG_FS))
2060 return false;
2061 if (!data_segment_valid(vcpu, VCPU_SREG_GS))
2062 return false;
2063 if (!tr_valid(vcpu))
2064 return false;
2065 if (!ldtr_valid(vcpu))
2066 return false;
2067 }
2068 /* TODO:
2069 * - Add checks on RIP
2070 * - Add checks on RFLAGS
2071 */
2072
2073 return true;
2074}
2075
d77c26fc 2076static int init_rmode_tss(struct kvm *kvm)
6aa8b732 2077{
6aa8b732 2078 gfn_t fn = rmode_tss_base(kvm) >> PAGE_SHIFT;
195aefde 2079 u16 data = 0;
10589a46 2080 int ret = 0;
195aefde 2081 int r;
6aa8b732 2082
195aefde
IE
2083 r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE);
2084 if (r < 0)
10589a46 2085 goto out;
195aefde 2086 data = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE;
464d17c8
SY
2087 r = kvm_write_guest_page(kvm, fn++, &data,
2088 TSS_IOPB_BASE_OFFSET, sizeof(u16));
195aefde 2089 if (r < 0)
10589a46 2090 goto out;
195aefde
IE
2091 r = kvm_clear_guest_page(kvm, fn++, 0, PAGE_SIZE);
2092 if (r < 0)
10589a46 2093 goto out;
195aefde
IE
2094 r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE);
2095 if (r < 0)
10589a46 2096 goto out;
195aefde 2097 data = ~0;
10589a46
MT
2098 r = kvm_write_guest_page(kvm, fn, &data,
2099 RMODE_TSS_SIZE - 2 * PAGE_SIZE - 1,
2100 sizeof(u8));
195aefde 2101 if (r < 0)
10589a46
MT
2102 goto out;
2103
2104 ret = 1;
2105out:
10589a46 2106 return ret;
6aa8b732
AK
2107}
2108
b7ebfb05
SY
2109static int init_rmode_identity_map(struct kvm *kvm)
2110{
2111 int i, r, ret;
2112 pfn_t identity_map_pfn;
2113 u32 tmp;
2114
089d034e 2115 if (!enable_ept)
b7ebfb05
SY
2116 return 1;
2117 if (unlikely(!kvm->arch.ept_identity_pagetable)) {
2118 printk(KERN_ERR "EPT: identity-mapping pagetable "
2119 "haven't been allocated!\n");
2120 return 0;
2121 }
2122 if (likely(kvm->arch.ept_identity_pagetable_done))
2123 return 1;
2124 ret = 0;
2125 identity_map_pfn = VMX_EPT_IDENTITY_PAGETABLE_ADDR >> PAGE_SHIFT;
2126 r = kvm_clear_guest_page(kvm, identity_map_pfn, 0, PAGE_SIZE);
2127 if (r < 0)
2128 goto out;
2129 /* Set up identity-mapping pagetable for EPT in real mode */
2130 for (i = 0; i < PT32_ENT_PER_PAGE; i++) {
2131 tmp = (i << 22) + (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER |
2132 _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_PSE);
2133 r = kvm_write_guest_page(kvm, identity_map_pfn,
2134 &tmp, i * sizeof(tmp), sizeof(tmp));
2135 if (r < 0)
2136 goto out;
2137 }
2138 kvm->arch.ept_identity_pagetable_done = true;
2139 ret = 1;
2140out:
2141 return ret;
2142}
2143
6aa8b732
AK
2144static void seg_setup(int seg)
2145{
2146 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
3a624e29 2147 unsigned int ar;
6aa8b732
AK
2148
2149 vmcs_write16(sf->selector, 0);
2150 vmcs_writel(sf->base, 0);
2151 vmcs_write32(sf->limit, 0xffff);
3a624e29
NK
2152 if (enable_unrestricted_guest) {
2153 ar = 0x93;
2154 if (seg == VCPU_SREG_CS)
2155 ar |= 0x08; /* code segment */
2156 } else
2157 ar = 0xf3;
2158
2159 vmcs_write32(sf->ar_bytes, ar);
6aa8b732
AK
2160}
2161
f78e0e2e
SY
2162static int alloc_apic_access_page(struct kvm *kvm)
2163{
2164 struct kvm_userspace_memory_region kvm_userspace_mem;
2165 int r = 0;
2166
72dc67a6 2167 down_write(&kvm->slots_lock);
bfc6d222 2168 if (kvm->arch.apic_access_page)
f78e0e2e
SY
2169 goto out;
2170 kvm_userspace_mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT;
2171 kvm_userspace_mem.flags = 0;
2172 kvm_userspace_mem.guest_phys_addr = 0xfee00000ULL;
2173 kvm_userspace_mem.memory_size = PAGE_SIZE;
2174 r = __kvm_set_memory_region(kvm, &kvm_userspace_mem, 0);
2175 if (r)
2176 goto out;
72dc67a6 2177
bfc6d222 2178 kvm->arch.apic_access_page = gfn_to_page(kvm, 0xfee00);
f78e0e2e 2179out:
72dc67a6 2180 up_write(&kvm->slots_lock);
f78e0e2e
SY
2181 return r;
2182}
2183
b7ebfb05
SY
2184static int alloc_identity_pagetable(struct kvm *kvm)
2185{
2186 struct kvm_userspace_memory_region kvm_userspace_mem;
2187 int r = 0;
2188
2189 down_write(&kvm->slots_lock);
2190 if (kvm->arch.ept_identity_pagetable)
2191 goto out;
2192 kvm_userspace_mem.slot = IDENTITY_PAGETABLE_PRIVATE_MEMSLOT;
2193 kvm_userspace_mem.flags = 0;
2194 kvm_userspace_mem.guest_phys_addr = VMX_EPT_IDENTITY_PAGETABLE_ADDR;
2195 kvm_userspace_mem.memory_size = PAGE_SIZE;
2196 r = __kvm_set_memory_region(kvm, &kvm_userspace_mem, 0);
2197 if (r)
2198 goto out;
2199
b7ebfb05
SY
2200 kvm->arch.ept_identity_pagetable = gfn_to_page(kvm,
2201 VMX_EPT_IDENTITY_PAGETABLE_ADDR >> PAGE_SHIFT);
b7ebfb05
SY
2202out:
2203 up_write(&kvm->slots_lock);
2204 return r;
2205}
2206
2384d2b3
SY
2207static void allocate_vpid(struct vcpu_vmx *vmx)
2208{
2209 int vpid;
2210
2211 vmx->vpid = 0;
919818ab 2212 if (!enable_vpid)
2384d2b3
SY
2213 return;
2214 spin_lock(&vmx_vpid_lock);
2215 vpid = find_first_zero_bit(vmx_vpid_bitmap, VMX_NR_VPIDS);
2216 if (vpid < VMX_NR_VPIDS) {
2217 vmx->vpid = vpid;
2218 __set_bit(vpid, vmx_vpid_bitmap);
2219 }
2220 spin_unlock(&vmx_vpid_lock);
2221}
2222
5897297b 2223static void __vmx_disable_intercept_for_msr(unsigned long *msr_bitmap, u32 msr)
25c5f225 2224{
3e7c73e9 2225 int f = sizeof(unsigned long);
25c5f225
SY
2226
2227 if (!cpu_has_vmx_msr_bitmap())
2228 return;
2229
2230 /*
2231 * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
2232 * have the write-low and read-high bitmap offsets the wrong way round.
2233 * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
2234 */
25c5f225 2235 if (msr <= 0x1fff) {
3e7c73e9
AK
2236 __clear_bit(msr, msr_bitmap + 0x000 / f); /* read-low */
2237 __clear_bit(msr, msr_bitmap + 0x800 / f); /* write-low */
25c5f225
SY
2238 } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
2239 msr &= 0x1fff;
3e7c73e9
AK
2240 __clear_bit(msr, msr_bitmap + 0x400 / f); /* read-high */
2241 __clear_bit(msr, msr_bitmap + 0xc00 / f); /* write-high */
25c5f225 2242 }
25c5f225
SY
2243}
2244
5897297b
AK
2245static void vmx_disable_intercept_for_msr(u32 msr, bool longmode_only)
2246{
2247 if (!longmode_only)
2248 __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy, msr);
2249 __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode, msr);
2250}
2251
6aa8b732
AK
2252/*
2253 * Sets up the vmcs for emulated real mode.
2254 */
8b9cf98c 2255static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
6aa8b732 2256{
468d472f 2257 u32 host_sysenter_cs, msr_low, msr_high;
6aa8b732 2258 u32 junk;
53f658b3 2259 u64 host_pat, tsc_this, tsc_base;
6aa8b732
AK
2260 unsigned long a;
2261 struct descriptor_table dt;
2262 int i;
cd2276a7 2263 unsigned long kvm_vmx_return;
6e5d865c 2264 u32 exec_control;
6aa8b732 2265
6aa8b732 2266 /* I/O */
3e7c73e9
AK
2267 vmcs_write64(IO_BITMAP_A, __pa(vmx_io_bitmap_a));
2268 vmcs_write64(IO_BITMAP_B, __pa(vmx_io_bitmap_b));
6aa8b732 2269
25c5f225 2270 if (cpu_has_vmx_msr_bitmap())
5897297b 2271 vmcs_write64(MSR_BITMAP, __pa(vmx_msr_bitmap_legacy));
25c5f225 2272
6aa8b732
AK
2273 vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */
2274
6aa8b732 2275 /* Control */
1c3d14fe
YS
2276 vmcs_write32(PIN_BASED_VM_EXEC_CONTROL,
2277 vmcs_config.pin_based_exec_ctrl);
6e5d865c
YS
2278
2279 exec_control = vmcs_config.cpu_based_exec_ctrl;
2280 if (!vm_need_tpr_shadow(vmx->vcpu.kvm)) {
2281 exec_control &= ~CPU_BASED_TPR_SHADOW;
2282#ifdef CONFIG_X86_64
2283 exec_control |= CPU_BASED_CR8_STORE_EXITING |
2284 CPU_BASED_CR8_LOAD_EXITING;
2285#endif
2286 }
089d034e 2287 if (!enable_ept)
d56f546d 2288 exec_control |= CPU_BASED_CR3_STORE_EXITING |
83dbc83a
MT
2289 CPU_BASED_CR3_LOAD_EXITING |
2290 CPU_BASED_INVLPG_EXITING;
6e5d865c 2291 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, exec_control);
6aa8b732 2292
83ff3b9d
SY
2293 if (cpu_has_secondary_exec_ctrls()) {
2294 exec_control = vmcs_config.cpu_based_2nd_exec_ctrl;
2295 if (!vm_need_virtualize_apic_accesses(vmx->vcpu.kvm))
2296 exec_control &=
2297 ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
2384d2b3
SY
2298 if (vmx->vpid == 0)
2299 exec_control &= ~SECONDARY_EXEC_ENABLE_VPID;
089d034e 2300 if (!enable_ept)
d56f546d 2301 exec_control &= ~SECONDARY_EXEC_ENABLE_EPT;
3a624e29
NK
2302 if (!enable_unrestricted_guest)
2303 exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST;
83ff3b9d
SY
2304 vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
2305 }
f78e0e2e 2306
c7addb90
AK
2307 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, !!bypass_guest_pf);
2308 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, !!bypass_guest_pf);
6aa8b732
AK
2309 vmcs_write32(CR3_TARGET_COUNT, 0); /* 22.2.1 */
2310
2311 vmcs_writel(HOST_CR0, read_cr0()); /* 22.2.3 */
2312 vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */
2313 vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
2314
2315 vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
2316 vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
2317 vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */
d6e88aec
AK
2318 vmcs_write16(HOST_FS_SELECTOR, kvm_read_fs()); /* 22.2.4 */
2319 vmcs_write16(HOST_GS_SELECTOR, kvm_read_gs()); /* 22.2.4 */
6aa8b732 2320 vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
05b3e0c2 2321#ifdef CONFIG_X86_64
6aa8b732
AK
2322 rdmsrl(MSR_FS_BASE, a);
2323 vmcs_writel(HOST_FS_BASE, a); /* 22.2.4 */
2324 rdmsrl(MSR_GS_BASE, a);
2325 vmcs_writel(HOST_GS_BASE, a); /* 22.2.4 */
2326#else
2327 vmcs_writel(HOST_FS_BASE, 0); /* 22.2.4 */
2328 vmcs_writel(HOST_GS_BASE, 0); /* 22.2.4 */
2329#endif
2330
2331 vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8); /* 22.2.4 */
2332
d6e88aec 2333 kvm_get_idt(&dt);
6aa8b732
AK
2334 vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */
2335
d77c26fc 2336 asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
cd2276a7 2337 vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
2cc51560
ED
2338 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
2339 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
2340 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
6aa8b732
AK
2341
2342 rdmsr(MSR_IA32_SYSENTER_CS, host_sysenter_cs, junk);
2343 vmcs_write32(HOST_IA32_SYSENTER_CS, host_sysenter_cs);
2344 rdmsrl(MSR_IA32_SYSENTER_ESP, a);
2345 vmcs_writel(HOST_IA32_SYSENTER_ESP, a); /* 22.2.3 */
2346 rdmsrl(MSR_IA32_SYSENTER_EIP, a);
2347 vmcs_writel(HOST_IA32_SYSENTER_EIP, a); /* 22.2.3 */
2348
468d472f
SY
2349 if (vmcs_config.vmexit_ctrl & VM_EXIT_LOAD_IA32_PAT) {
2350 rdmsr(MSR_IA32_CR_PAT, msr_low, msr_high);
2351 host_pat = msr_low | ((u64) msr_high << 32);
2352 vmcs_write64(HOST_IA32_PAT, host_pat);
2353 }
2354 if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
2355 rdmsr(MSR_IA32_CR_PAT, msr_low, msr_high);
2356 host_pat = msr_low | ((u64) msr_high << 32);
2357 /* Write the default value follow host pat */
2358 vmcs_write64(GUEST_IA32_PAT, host_pat);
2359 /* Keep arch.pat sync with GUEST_IA32_PAT */
2360 vmx->vcpu.arch.pat = host_pat;
2361 }
2362
6aa8b732
AK
2363 for (i = 0; i < NR_VMX_MSR; ++i) {
2364 u32 index = vmx_msr_index[i];
2365 u32 data_low, data_high;
2366 u64 data;
a2fa3e9f 2367 int j = vmx->nmsrs;
6aa8b732
AK
2368
2369 if (rdmsr_safe(index, &data_low, &data_high) < 0)
2370 continue;
432bd6cb
AK
2371 if (wrmsr_safe(index, data_low, data_high) < 0)
2372 continue;
6aa8b732 2373 data = data_low | ((u64)data_high << 32);
a2fa3e9f
GH
2374 vmx->host_msrs[j].index = index;
2375 vmx->host_msrs[j].reserved = 0;
2376 vmx->host_msrs[j].data = data;
2377 vmx->guest_msrs[j] = vmx->host_msrs[j];
2378 ++vmx->nmsrs;
6aa8b732 2379 }
6aa8b732 2380
1c3d14fe 2381 vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl);
6aa8b732
AK
2382
2383 /* 22.2.1, 20.8.1 */
1c3d14fe
YS
2384 vmcs_write32(VM_ENTRY_CONTROLS, vmcs_config.vmentry_ctrl);
2385
e00c8cf2
AK
2386 vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL);
2387 vmcs_writel(CR4_GUEST_HOST_MASK, KVM_GUEST_CR4_MASK);
2388
53f658b3
MT
2389 tsc_base = vmx->vcpu.kvm->arch.vm_init_tsc;
2390 rdtscll(tsc_this);
2391 if (tsc_this < vmx->vcpu.kvm->arch.vm_init_tsc)
2392 tsc_base = tsc_this;
2393
2394 guest_write_tsc(0, tsc_base);
f78e0e2e 2395
e00c8cf2
AK
2396 return 0;
2397}
2398
b7ebfb05
SY
2399static int init_rmode(struct kvm *kvm)
2400{
2401 if (!init_rmode_tss(kvm))
2402 return 0;
2403 if (!init_rmode_identity_map(kvm))
2404 return 0;
2405 return 1;
2406}
2407
e00c8cf2
AK
2408static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
2409{
2410 struct vcpu_vmx *vmx = to_vmx(vcpu);
2411 u64 msr;
2412 int ret;
2413
5fdbf976 2414 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP));
3200f405 2415 down_read(&vcpu->kvm->slots_lock);
b7ebfb05 2416 if (!init_rmode(vmx->vcpu.kvm)) {
e00c8cf2
AK
2417 ret = -ENOMEM;
2418 goto out;
2419 }
2420
7ffd92c5 2421 vmx->rmode.vm86_active = 0;
e00c8cf2 2422
3b86cd99
JK
2423 vmx->soft_vnmi_blocked = 0;
2424
ad312c7c 2425 vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val();
2d3ad1f4 2426 kvm_set_cr8(&vmx->vcpu, 0);
e00c8cf2 2427 msr = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
c5af89b6 2428 if (kvm_vcpu_is_bsp(&vmx->vcpu))
e00c8cf2
AK
2429 msr |= MSR_IA32_APICBASE_BSP;
2430 kvm_set_apic_base(&vmx->vcpu, msr);
2431
2432 fx_init(&vmx->vcpu);
2433
5706be0d 2434 seg_setup(VCPU_SREG_CS);
e00c8cf2
AK
2435 /*
2436 * GUEST_CS_BASE should really be 0xffff0000, but VT vm86 mode
2437 * insists on having GUEST_CS_BASE == GUEST_CS_SELECTOR << 4. Sigh.
2438 */
c5af89b6 2439 if (kvm_vcpu_is_bsp(&vmx->vcpu)) {
e00c8cf2
AK
2440 vmcs_write16(GUEST_CS_SELECTOR, 0xf000);
2441 vmcs_writel(GUEST_CS_BASE, 0x000f0000);
2442 } else {
ad312c7c
ZX
2443 vmcs_write16(GUEST_CS_SELECTOR, vmx->vcpu.arch.sipi_vector << 8);
2444 vmcs_writel(GUEST_CS_BASE, vmx->vcpu.arch.sipi_vector << 12);
e00c8cf2 2445 }
e00c8cf2
AK
2446
2447 seg_setup(VCPU_SREG_DS);
2448 seg_setup(VCPU_SREG_ES);
2449 seg_setup(VCPU_SREG_FS);
2450 seg_setup(VCPU_SREG_GS);
2451 seg_setup(VCPU_SREG_SS);
2452
2453 vmcs_write16(GUEST_TR_SELECTOR, 0);
2454 vmcs_writel(GUEST_TR_BASE, 0);
2455 vmcs_write32(GUEST_TR_LIMIT, 0xffff);
2456 vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
2457
2458 vmcs_write16(GUEST_LDTR_SELECTOR, 0);
2459 vmcs_writel(GUEST_LDTR_BASE, 0);
2460 vmcs_write32(GUEST_LDTR_LIMIT, 0xffff);
2461 vmcs_write32(GUEST_LDTR_AR_BYTES, 0x00082);
2462
2463 vmcs_write32(GUEST_SYSENTER_CS, 0);
2464 vmcs_writel(GUEST_SYSENTER_ESP, 0);
2465 vmcs_writel(GUEST_SYSENTER_EIP, 0);
2466
2467 vmcs_writel(GUEST_RFLAGS, 0x02);
c5af89b6 2468 if (kvm_vcpu_is_bsp(&vmx->vcpu))
5fdbf976 2469 kvm_rip_write(vcpu, 0xfff0);
e00c8cf2 2470 else
5fdbf976
MT
2471 kvm_rip_write(vcpu, 0);
2472 kvm_register_write(vcpu, VCPU_REGS_RSP, 0);
e00c8cf2 2473
e00c8cf2
AK
2474 vmcs_writel(GUEST_DR7, 0x400);
2475
2476 vmcs_writel(GUEST_GDTR_BASE, 0);
2477 vmcs_write32(GUEST_GDTR_LIMIT, 0xffff);
2478
2479 vmcs_writel(GUEST_IDTR_BASE, 0);
2480 vmcs_write32(GUEST_IDTR_LIMIT, 0xffff);
2481
2482 vmcs_write32(GUEST_ACTIVITY_STATE, 0);
2483 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 0);
2484 vmcs_write32(GUEST_PENDING_DBG_EXCEPTIONS, 0);
2485
e00c8cf2
AK
2486 /* Special registers */
2487 vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
2488
2489 setup_msrs(vmx);
2490
6aa8b732
AK
2491 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); /* 22.2.1 */
2492
f78e0e2e
SY
2493 if (cpu_has_vmx_tpr_shadow()) {
2494 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 0);
2495 if (vm_need_tpr_shadow(vmx->vcpu.kvm))
2496 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR,
ad312c7c 2497 page_to_phys(vmx->vcpu.arch.apic->regs_page));
f78e0e2e
SY
2498 vmcs_write32(TPR_THRESHOLD, 0);
2499 }
2500
2501 if (vm_need_virtualize_apic_accesses(vmx->vcpu.kvm))
2502 vmcs_write64(APIC_ACCESS_ADDR,
bfc6d222 2503 page_to_phys(vmx->vcpu.kvm->arch.apic_access_page));
6aa8b732 2504
2384d2b3
SY
2505 if (vmx->vpid != 0)
2506 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
2507
ad312c7c
ZX
2508 vmx->vcpu.arch.cr0 = 0x60000010;
2509 vmx_set_cr0(&vmx->vcpu, vmx->vcpu.arch.cr0); /* enter rmode */
8b9cf98c 2510 vmx_set_cr4(&vmx->vcpu, 0);
8b9cf98c 2511 vmx_set_efer(&vmx->vcpu, 0);
8b9cf98c
RR
2512 vmx_fpu_activate(&vmx->vcpu);
2513 update_exception_bitmap(&vmx->vcpu);
6aa8b732 2514
2384d2b3
SY
2515 vpid_sync_vcpu_all(vmx);
2516
3200f405 2517 ret = 0;
6aa8b732 2518
a89a8fb9
MG
2519 /* HACK: Don't enable emulation on guest boot/reset */
2520 vmx->emulation_required = 0;
2521
6aa8b732 2522out:
3200f405 2523 up_read(&vcpu->kvm->slots_lock);
6aa8b732
AK
2524 return ret;
2525}
2526
3b86cd99
JK
2527static void enable_irq_window(struct kvm_vcpu *vcpu)
2528{
2529 u32 cpu_based_vm_exec_control;
2530
2531 cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
2532 cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING;
2533 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
2534}
2535
2536static void enable_nmi_window(struct kvm_vcpu *vcpu)
2537{
2538 u32 cpu_based_vm_exec_control;
2539
2540 if (!cpu_has_virtual_nmis()) {
2541 enable_irq_window(vcpu);
2542 return;
2543 }
2544
2545 cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
2546 cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_NMI_PENDING;
2547 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
2548}
2549
66fd3f7f 2550static void vmx_inject_irq(struct kvm_vcpu *vcpu)
85f455f7 2551{
9c8cba37 2552 struct vcpu_vmx *vmx = to_vmx(vcpu);
66fd3f7f
GN
2553 uint32_t intr;
2554 int irq = vcpu->arch.interrupt.nr;
9c8cba37 2555
229456fc 2556 trace_kvm_inj_virq(irq);
2714d1d3 2557
fa89a817 2558 ++vcpu->stat.irq_injections;
7ffd92c5 2559 if (vmx->rmode.vm86_active) {
9c8cba37
AK
2560 vmx->rmode.irq.pending = true;
2561 vmx->rmode.irq.vector = irq;
5fdbf976 2562 vmx->rmode.irq.rip = kvm_rip_read(vcpu);
ae0bb3e0
GN
2563 if (vcpu->arch.interrupt.soft)
2564 vmx->rmode.irq.rip +=
2565 vmx->vcpu.arch.event_exit_inst_len;
9c5623e3
AK
2566 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
2567 irq | INTR_TYPE_SOFT_INTR | INTR_INFO_VALID_MASK);
2568 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 1);
5fdbf976 2569 kvm_rip_write(vcpu, vmx->rmode.irq.rip - 1);
85f455f7
ED
2570 return;
2571 }
66fd3f7f
GN
2572 intr = irq | INTR_INFO_VALID_MASK;
2573 if (vcpu->arch.interrupt.soft) {
2574 intr |= INTR_TYPE_SOFT_INTR;
2575 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
2576 vmx->vcpu.arch.event_exit_inst_len);
2577 } else
2578 intr |= INTR_TYPE_EXT_INTR;
2579 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr);
85f455f7
ED
2580}
2581
f08864b4
SY
2582static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
2583{
66a5a347
JK
2584 struct vcpu_vmx *vmx = to_vmx(vcpu);
2585
3b86cd99
JK
2586 if (!cpu_has_virtual_nmis()) {
2587 /*
2588 * Tracking the NMI-blocked state in software is built upon
2589 * finding the next open IRQ window. This, in turn, depends on
2590 * well-behaving guests: They have to keep IRQs disabled at
2591 * least as long as the NMI handler runs. Otherwise we may
2592 * cause NMI nesting, maybe breaking the guest. But as this is
2593 * highly unlikely, we can live with the residual risk.
2594 */
2595 vmx->soft_vnmi_blocked = 1;
2596 vmx->vnmi_blocked_time = 0;
2597 }
2598
487b391d 2599 ++vcpu->stat.nmi_injections;
7ffd92c5 2600 if (vmx->rmode.vm86_active) {
66a5a347
JK
2601 vmx->rmode.irq.pending = true;
2602 vmx->rmode.irq.vector = NMI_VECTOR;
2603 vmx->rmode.irq.rip = kvm_rip_read(vcpu);
2604 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
2605 NMI_VECTOR | INTR_TYPE_SOFT_INTR |
2606 INTR_INFO_VALID_MASK);
2607 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 1);
2608 kvm_rip_write(vcpu, vmx->rmode.irq.rip - 1);
2609 return;
2610 }
f08864b4
SY
2611 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
2612 INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR);
f08864b4
SY
2613}
2614
c4282df9 2615static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
33f089ca 2616{
3b86cd99 2617 if (!cpu_has_virtual_nmis() && to_vmx(vcpu)->soft_vnmi_blocked)
c4282df9 2618 return 0;
33f089ca 2619
c4282df9
GN
2620 return !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
2621 (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS |
2622 GUEST_INTR_STATE_NMI));
33f089ca
JK
2623}
2624
78646121
GN
2625static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu)
2626{
c4282df9
GN
2627 return (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
2628 !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
2629 (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS));
78646121
GN
2630}
2631
cbc94022
IE
2632static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr)
2633{
2634 int ret;
2635 struct kvm_userspace_memory_region tss_mem = {
6fe63979 2636 .slot = TSS_PRIVATE_MEMSLOT,
cbc94022
IE
2637 .guest_phys_addr = addr,
2638 .memory_size = PAGE_SIZE * 3,
2639 .flags = 0,
2640 };
2641
2642 ret = kvm_set_memory_region(kvm, &tss_mem, 0);
2643 if (ret)
2644 return ret;
bfc6d222 2645 kvm->arch.tss_addr = addr;
cbc94022
IE
2646 return 0;
2647}
2648
6aa8b732
AK
2649static int handle_rmode_exception(struct kvm_vcpu *vcpu,
2650 int vec, u32 err_code)
2651{
b3f37707
NK
2652 /*
2653 * Instruction with address size override prefix opcode 0x67
2654 * Cause the #SS fault with 0 error code in VM86 mode.
2655 */
2656 if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0)
3427318f 2657 if (emulate_instruction(vcpu, NULL, 0, 0, 0) == EMULATE_DONE)
6aa8b732 2658 return 1;
77ab6db0
JK
2659 /*
2660 * Forward all other exceptions that are valid in real mode.
2661 * FIXME: Breaks guest debugging in real mode, needs to be fixed with
2662 * the required debugging infrastructure rework.
2663 */
2664 switch (vec) {
77ab6db0 2665 case DB_VECTOR:
d0bfb940
JK
2666 if (vcpu->guest_debug &
2667 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
2668 return 0;
2669 kvm_queue_exception(vcpu, vec);
2670 return 1;
77ab6db0 2671 case BP_VECTOR:
d0bfb940
JK
2672 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
2673 return 0;
2674 /* fall through */
2675 case DE_VECTOR:
77ab6db0
JK
2676 case OF_VECTOR:
2677 case BR_VECTOR:
2678 case UD_VECTOR:
2679 case DF_VECTOR:
2680 case SS_VECTOR:
2681 case GP_VECTOR:
2682 case MF_VECTOR:
2683 kvm_queue_exception(vcpu, vec);
2684 return 1;
2685 }
6aa8b732
AK
2686 return 0;
2687}
2688
a0861c02
AK
2689/*
2690 * Trigger machine check on the host. We assume all the MSRs are already set up
2691 * by the CPU and that we still run on the same CPU as the MCE occurred on.
2692 * We pass a fake environment to the machine check handler because we want
2693 * the guest to be always treated like user space, no matter what context
2694 * it used internally.
2695 */
2696static void kvm_machine_check(void)
2697{
2698#if defined(CONFIG_X86_MCE) && defined(CONFIG_X86_64)
2699 struct pt_regs regs = {
2700 .cs = 3, /* Fake ring 3 no matter what the guest ran on */
2701 .flags = X86_EFLAGS_IF,
2702 };
2703
2704 do_machine_check(&regs, 0);
2705#endif
2706}
2707
2708static int handle_machine_check(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2709{
2710 /* already handled by vcpu_run */
2711 return 1;
2712}
2713
6aa8b732
AK
2714static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2715{
1155f76a 2716 struct vcpu_vmx *vmx = to_vmx(vcpu);
d0bfb940 2717 u32 intr_info, ex_no, error_code;
42dbaa5a 2718 unsigned long cr2, rip, dr6;
6aa8b732
AK
2719 u32 vect_info;
2720 enum emulation_result er;
2721
1155f76a 2722 vect_info = vmx->idt_vectoring_info;
6aa8b732
AK
2723 intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
2724
a0861c02
AK
2725 if (is_machine_check(intr_info))
2726 return handle_machine_check(vcpu, kvm_run);
2727
6aa8b732 2728 if ((vect_info & VECTORING_INFO_VALID_MASK) &&
d77c26fc 2729 !is_page_fault(intr_info))
6aa8b732 2730 printk(KERN_ERR "%s: unexpected, vectoring info 0x%x "
b8688d51 2731 "intr info 0x%x\n", __func__, vect_info, intr_info);
6aa8b732 2732
e4a41889 2733 if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR)
1b6269db 2734 return 1; /* already handled by vmx_vcpu_run() */
2ab455cc
AL
2735
2736 if (is_no_device(intr_info)) {
5fd86fcf 2737 vmx_fpu_activate(vcpu);
2ab455cc
AL
2738 return 1;
2739 }
2740
7aa81cc0 2741 if (is_invalid_opcode(intr_info)) {
571008da 2742 er = emulate_instruction(vcpu, kvm_run, 0, 0, EMULTYPE_TRAP_UD);
7aa81cc0 2743 if (er != EMULATE_DONE)
7ee5d940 2744 kvm_queue_exception(vcpu, UD_VECTOR);
7aa81cc0
AL
2745 return 1;
2746 }
2747
6aa8b732 2748 error_code = 0;
5fdbf976 2749 rip = kvm_rip_read(vcpu);
2e11384c 2750 if (intr_info & INTR_INFO_DELIVER_CODE_MASK)
6aa8b732
AK
2751 error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
2752 if (is_page_fault(intr_info)) {
1439442c 2753 /* EPT won't cause page fault directly */
089d034e 2754 if (enable_ept)
1439442c 2755 BUG();
6aa8b732 2756 cr2 = vmcs_readl(EXIT_QUALIFICATION);
229456fc
MT
2757 trace_kvm_page_fault(cr2, error_code);
2758
3298b75c 2759 if (kvm_event_needs_reinjection(vcpu))
577bdc49 2760 kvm_mmu_unprotect_page_virt(vcpu, cr2);
3067714c 2761 return kvm_mmu_page_fault(vcpu, cr2, error_code);
6aa8b732
AK
2762 }
2763
7ffd92c5 2764 if (vmx->rmode.vm86_active &&
6aa8b732 2765 handle_rmode_exception(vcpu, intr_info & INTR_INFO_VECTOR_MASK,
72d6e5a0 2766 error_code)) {
ad312c7c
ZX
2767 if (vcpu->arch.halt_request) {
2768 vcpu->arch.halt_request = 0;
72d6e5a0
AK
2769 return kvm_emulate_halt(vcpu);
2770 }
6aa8b732 2771 return 1;
72d6e5a0 2772 }
6aa8b732 2773
d0bfb940 2774 ex_no = intr_info & INTR_INFO_VECTOR_MASK;
42dbaa5a
JK
2775 switch (ex_no) {
2776 case DB_VECTOR:
2777 dr6 = vmcs_readl(EXIT_QUALIFICATION);
2778 if (!(vcpu->guest_debug &
2779 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) {
2780 vcpu->arch.dr6 = dr6 | DR6_FIXED_1;
2781 kvm_queue_exception(vcpu, DB_VECTOR);
2782 return 1;
2783 }
2784 kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1;
2785 kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7);
2786 /* fall through */
2787 case BP_VECTOR:
6aa8b732 2788 kvm_run->exit_reason = KVM_EXIT_DEBUG;
d0bfb940
JK
2789 kvm_run->debug.arch.pc = vmcs_readl(GUEST_CS_BASE) + rip;
2790 kvm_run->debug.arch.exception = ex_no;
42dbaa5a
JK
2791 break;
2792 default:
d0bfb940
JK
2793 kvm_run->exit_reason = KVM_EXIT_EXCEPTION;
2794 kvm_run->ex.exception = ex_no;
2795 kvm_run->ex.error_code = error_code;
42dbaa5a 2796 break;
6aa8b732 2797 }
6aa8b732
AK
2798 return 0;
2799}
2800
2801static int handle_external_interrupt(struct kvm_vcpu *vcpu,
2802 struct kvm_run *kvm_run)
2803{
1165f5fe 2804 ++vcpu->stat.irq_exits;
6aa8b732
AK
2805 return 1;
2806}
2807
988ad74f
AK
2808static int handle_triple_fault(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2809{
2810 kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
2811 return 0;
2812}
6aa8b732 2813
6aa8b732
AK
2814static int handle_io(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2815{
bfdaab09 2816 unsigned long exit_qualification;
34c33d16 2817 int size, in, string;
039576c0 2818 unsigned port;
6aa8b732 2819
1165f5fe 2820 ++vcpu->stat.io_exits;
bfdaab09 2821 exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
039576c0 2822 string = (exit_qualification & 16) != 0;
e70669ab
LV
2823
2824 if (string) {
3427318f
LV
2825 if (emulate_instruction(vcpu,
2826 kvm_run, 0, 0, 0) == EMULATE_DO_MMIO)
e70669ab
LV
2827 return 0;
2828 return 1;
2829 }
2830
2831 size = (exit_qualification & 7) + 1;
2832 in = (exit_qualification & 8) != 0;
039576c0 2833 port = exit_qualification >> 16;
e70669ab 2834
e93f36bc 2835 skip_emulated_instruction(vcpu);
3090dd73 2836 return kvm_emulate_pio(vcpu, kvm_run, in, size, port);
6aa8b732
AK
2837}
2838
102d8325
IM
2839static void
2840vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
2841{
2842 /*
2843 * Patch in the VMCALL instruction:
2844 */
2845 hypercall[0] = 0x0f;
2846 hypercall[1] = 0x01;
2847 hypercall[2] = 0xc1;
102d8325
IM
2848}
2849
6aa8b732
AK
2850static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2851{
229456fc 2852 unsigned long exit_qualification, val;
6aa8b732
AK
2853 int cr;
2854 int reg;
2855
bfdaab09 2856 exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
6aa8b732
AK
2857 cr = exit_qualification & 15;
2858 reg = (exit_qualification >> 8) & 15;
2859 switch ((exit_qualification >> 4) & 3) {
2860 case 0: /* mov to cr */
229456fc
MT
2861 val = kvm_register_read(vcpu, reg);
2862 trace_kvm_cr_write(cr, val);
6aa8b732
AK
2863 switch (cr) {
2864 case 0:
229456fc 2865 kvm_set_cr0(vcpu, val);
6aa8b732
AK
2866 skip_emulated_instruction(vcpu);
2867 return 1;
2868 case 3:
229456fc 2869 kvm_set_cr3(vcpu, val);
6aa8b732
AK
2870 skip_emulated_instruction(vcpu);
2871 return 1;
2872 case 4:
229456fc 2873 kvm_set_cr4(vcpu, val);
6aa8b732
AK
2874 skip_emulated_instruction(vcpu);
2875 return 1;
0a5fff19
GN
2876 case 8: {
2877 u8 cr8_prev = kvm_get_cr8(vcpu);
2878 u8 cr8 = kvm_register_read(vcpu, reg);
2879 kvm_set_cr8(vcpu, cr8);
2880 skip_emulated_instruction(vcpu);
2881 if (irqchip_in_kernel(vcpu->kvm))
2882 return 1;
2883 if (cr8_prev <= cr8)
2884 return 1;
2885 kvm_run->exit_reason = KVM_EXIT_SET_TPR;
2886 return 0;
2887 }
6aa8b732
AK
2888 };
2889 break;
25c4c276 2890 case 2: /* clts */
5fd86fcf 2891 vmx_fpu_deactivate(vcpu);
ad312c7c
ZX
2892 vcpu->arch.cr0 &= ~X86_CR0_TS;
2893 vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0);
5fd86fcf 2894 vmx_fpu_activate(vcpu);
25c4c276
AL
2895 skip_emulated_instruction(vcpu);
2896 return 1;
6aa8b732
AK
2897 case 1: /*mov from cr*/
2898 switch (cr) {
2899 case 3:
5fdbf976 2900 kvm_register_write(vcpu, reg, vcpu->arch.cr3);
229456fc 2901 trace_kvm_cr_read(cr, vcpu->arch.cr3);
6aa8b732
AK
2902 skip_emulated_instruction(vcpu);
2903 return 1;
2904 case 8:
229456fc
MT
2905 val = kvm_get_cr8(vcpu);
2906 kvm_register_write(vcpu, reg, val);
2907 trace_kvm_cr_read(cr, val);
6aa8b732
AK
2908 skip_emulated_instruction(vcpu);
2909 return 1;
2910 }
2911 break;
2912 case 3: /* lmsw */
2d3ad1f4 2913 kvm_lmsw(vcpu, (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f);
6aa8b732
AK
2914
2915 skip_emulated_instruction(vcpu);
2916 return 1;
2917 default:
2918 break;
2919 }
2920 kvm_run->exit_reason = 0;
f0242478 2921 pr_unimpl(vcpu, "unhandled control register: op %d cr %d\n",
6aa8b732
AK
2922 (int)(exit_qualification >> 4) & 3, cr);
2923 return 0;
2924}
2925
2926static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2927{
bfdaab09 2928 unsigned long exit_qualification;
6aa8b732
AK
2929 unsigned long val;
2930 int dr, reg;
2931
42dbaa5a
JK
2932 dr = vmcs_readl(GUEST_DR7);
2933 if (dr & DR7_GD) {
2934 /*
2935 * As the vm-exit takes precedence over the debug trap, we
2936 * need to emulate the latter, either for the host or the
2937 * guest debugging itself.
2938 */
2939 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
2940 kvm_run->debug.arch.dr6 = vcpu->arch.dr6;
2941 kvm_run->debug.arch.dr7 = dr;
2942 kvm_run->debug.arch.pc =
2943 vmcs_readl(GUEST_CS_BASE) +
2944 vmcs_readl(GUEST_RIP);
2945 kvm_run->debug.arch.exception = DB_VECTOR;
2946 kvm_run->exit_reason = KVM_EXIT_DEBUG;
2947 return 0;
2948 } else {
2949 vcpu->arch.dr7 &= ~DR7_GD;
2950 vcpu->arch.dr6 |= DR6_BD;
2951 vmcs_writel(GUEST_DR7, vcpu->arch.dr7);
2952 kvm_queue_exception(vcpu, DB_VECTOR);
2953 return 1;
2954 }
2955 }
2956
bfdaab09 2957 exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
42dbaa5a
JK
2958 dr = exit_qualification & DEBUG_REG_ACCESS_NUM;
2959 reg = DEBUG_REG_ACCESS_REG(exit_qualification);
2960 if (exit_qualification & TYPE_MOV_FROM_DR) {
6aa8b732 2961 switch (dr) {
42dbaa5a
JK
2962 case 0 ... 3:
2963 val = vcpu->arch.db[dr];
2964 break;
6aa8b732 2965 case 6:
42dbaa5a 2966 val = vcpu->arch.dr6;
6aa8b732
AK
2967 break;
2968 case 7:
42dbaa5a 2969 val = vcpu->arch.dr7;
6aa8b732
AK
2970 break;
2971 default:
2972 val = 0;
2973 }
5fdbf976 2974 kvm_register_write(vcpu, reg, val);
6aa8b732 2975 } else {
42dbaa5a
JK
2976 val = vcpu->arch.regs[reg];
2977 switch (dr) {
2978 case 0 ... 3:
2979 vcpu->arch.db[dr] = val;
2980 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
2981 vcpu->arch.eff_db[dr] = val;
2982 break;
2983 case 4 ... 5:
2984 if (vcpu->arch.cr4 & X86_CR4_DE)
2985 kvm_queue_exception(vcpu, UD_VECTOR);
2986 break;
2987 case 6:
2988 if (val & 0xffffffff00000000ULL) {
2989 kvm_queue_exception(vcpu, GP_VECTOR);
2990 break;
2991 }
2992 vcpu->arch.dr6 = (val & DR6_VOLATILE) | DR6_FIXED_1;
2993 break;
2994 case 7:
2995 if (val & 0xffffffff00000000ULL) {
2996 kvm_queue_exception(vcpu, GP_VECTOR);
2997 break;
2998 }
2999 vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1;
3000 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) {
3001 vmcs_writel(GUEST_DR7, vcpu->arch.dr7);
3002 vcpu->arch.switch_db_regs =
3003 (val & DR7_BP_EN_MASK);
3004 }
3005 break;
3006 }
6aa8b732 3007 }
6aa8b732
AK
3008 skip_emulated_instruction(vcpu);
3009 return 1;
3010}
3011
3012static int handle_cpuid(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3013{
06465c5a
AK
3014 kvm_emulate_cpuid(vcpu);
3015 return 1;
6aa8b732
AK
3016}
3017
3018static int handle_rdmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3019{
ad312c7c 3020 u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX];
6aa8b732
AK
3021 u64 data;
3022
3023 if (vmx_get_msr(vcpu, ecx, &data)) {
c1a5d4f9 3024 kvm_inject_gp(vcpu, 0);
6aa8b732
AK
3025 return 1;
3026 }
3027
229456fc 3028 trace_kvm_msr_read(ecx, data);
2714d1d3 3029
6aa8b732 3030 /* FIXME: handling of bits 32:63 of rax, rdx */
ad312c7c
ZX
3031 vcpu->arch.regs[VCPU_REGS_RAX] = data & -1u;
3032 vcpu->arch.regs[VCPU_REGS_RDX] = (data >> 32) & -1u;
6aa8b732
AK
3033 skip_emulated_instruction(vcpu);
3034 return 1;
3035}
3036
3037static int handle_wrmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3038{
ad312c7c
ZX
3039 u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX];
3040 u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u)
3041 | ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32);
6aa8b732 3042
229456fc 3043 trace_kvm_msr_write(ecx, data);
2714d1d3 3044
6aa8b732 3045 if (vmx_set_msr(vcpu, ecx, data) != 0) {
c1a5d4f9 3046 kvm_inject_gp(vcpu, 0);
6aa8b732
AK
3047 return 1;
3048 }
3049
3050 skip_emulated_instruction(vcpu);
3051 return 1;
3052}
3053
6e5d865c
YS
3054static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu,
3055 struct kvm_run *kvm_run)
3056{
3057 return 1;
3058}
3059
6aa8b732
AK
3060static int handle_interrupt_window(struct kvm_vcpu *vcpu,
3061 struct kvm_run *kvm_run)
3062{
85f455f7
ED
3063 u32 cpu_based_vm_exec_control;
3064
3065 /* clear pending irq */
3066 cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
3067 cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
3068 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
2714d1d3 3069
a26bf12a 3070 ++vcpu->stat.irq_window_exits;
2714d1d3 3071
c1150d8c
DL
3072 /*
3073 * If the user space waits to inject interrupts, exit as soon as
3074 * possible
3075 */
8061823a
GN
3076 if (!irqchip_in_kernel(vcpu->kvm) &&
3077 kvm_run->request_interrupt_window &&
3078 !kvm_cpu_has_interrupt(vcpu)) {
c1150d8c 3079 kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
c1150d8c
DL
3080 return 0;
3081 }
6aa8b732
AK
3082 return 1;
3083}
3084
3085static int handle_halt(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3086{
3087 skip_emulated_instruction(vcpu);
d3bef15f 3088 return kvm_emulate_halt(vcpu);
6aa8b732
AK
3089}
3090
c21415e8
IM
3091static int handle_vmcall(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3092{
510043da 3093 skip_emulated_instruction(vcpu);
7aa81cc0
AL
3094 kvm_emulate_hypercall(vcpu);
3095 return 1;
c21415e8
IM
3096}
3097
e3c7cb6a
AK
3098static int handle_vmx_insn(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3099{
3100 kvm_queue_exception(vcpu, UD_VECTOR);
3101 return 1;
3102}
3103
a7052897
MT
3104static int handle_invlpg(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3105{
f9c617f6 3106 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
a7052897
MT
3107
3108 kvm_mmu_invlpg(vcpu, exit_qualification);
3109 skip_emulated_instruction(vcpu);
3110 return 1;
3111}
3112
e5edaa01
ED
3113static int handle_wbinvd(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3114{
3115 skip_emulated_instruction(vcpu);
3116 /* TODO: Add support for VT-d/pass-through device */
3117 return 1;
3118}
3119
f78e0e2e
SY
3120static int handle_apic_access(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3121{
f9c617f6 3122 unsigned long exit_qualification;
f78e0e2e
SY
3123 enum emulation_result er;
3124 unsigned long offset;
3125
f9c617f6 3126 exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
f78e0e2e
SY
3127 offset = exit_qualification & 0xffful;
3128
3129 er = emulate_instruction(vcpu, kvm_run, 0, 0, 0);
3130
3131 if (er != EMULATE_DONE) {
3132 printk(KERN_ERR
3133 "Fail to handle apic access vmexit! Offset is 0x%lx\n",
3134 offset);
3135 return -ENOTSUPP;
3136 }
3137 return 1;
3138}
3139
37817f29
IE
3140static int handle_task_switch(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3141{
60637aac 3142 struct vcpu_vmx *vmx = to_vmx(vcpu);
37817f29
IE
3143 unsigned long exit_qualification;
3144 u16 tss_selector;
64a7ec06
GN
3145 int reason, type, idt_v;
3146
3147 idt_v = (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK);
3148 type = (vmx->idt_vectoring_info & VECTORING_INFO_TYPE_MASK);
37817f29
IE
3149
3150 exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
3151
3152 reason = (u32)exit_qualification >> 30;
64a7ec06
GN
3153 if (reason == TASK_SWITCH_GATE && idt_v) {
3154 switch (type) {
3155 case INTR_TYPE_NMI_INTR:
3156 vcpu->arch.nmi_injected = false;
3157 if (cpu_has_virtual_nmis())
3158 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
3159 GUEST_INTR_STATE_NMI);
3160 break;
3161 case INTR_TYPE_EXT_INTR:
66fd3f7f 3162 case INTR_TYPE_SOFT_INTR:
64a7ec06
GN
3163 kvm_clear_interrupt_queue(vcpu);
3164 break;
3165 case INTR_TYPE_HARD_EXCEPTION:
3166 case INTR_TYPE_SOFT_EXCEPTION:
3167 kvm_clear_exception_queue(vcpu);
3168 break;
3169 default:
3170 break;
3171 }
60637aac 3172 }
37817f29
IE
3173 tss_selector = exit_qualification;
3174
64a7ec06
GN
3175 if (!idt_v || (type != INTR_TYPE_HARD_EXCEPTION &&
3176 type != INTR_TYPE_EXT_INTR &&
3177 type != INTR_TYPE_NMI_INTR))
3178 skip_emulated_instruction(vcpu);
3179
42dbaa5a
JK
3180 if (!kvm_task_switch(vcpu, tss_selector, reason))
3181 return 0;
3182
3183 /* clear all local breakpoint enable flags */
3184 vmcs_writel(GUEST_DR7, vmcs_readl(GUEST_DR7) & ~55);
3185
3186 /*
3187 * TODO: What about debug traps on tss switch?
3188 * Are we supposed to inject them and update dr6?
3189 */
3190
3191 return 1;
37817f29
IE
3192}
3193
1439442c
SY
3194static int handle_ept_violation(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3195{
f9c617f6 3196 unsigned long exit_qualification;
1439442c 3197 gpa_t gpa;
1439442c 3198 int gla_validity;
1439442c 3199
f9c617f6 3200 exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
1439442c
SY
3201
3202 if (exit_qualification & (1 << 6)) {
3203 printk(KERN_ERR "EPT: GPA exceeds GAW!\n");
3204 return -ENOTSUPP;
3205 }
3206
3207 gla_validity = (exit_qualification >> 7) & 0x3;
3208 if (gla_validity != 0x3 && gla_validity != 0x1 && gla_validity != 0) {
3209 printk(KERN_ERR "EPT: Handling EPT violation failed!\n");
3210 printk(KERN_ERR "EPT: GPA: 0x%lx, GVA: 0x%lx\n",
3211 (long unsigned int)vmcs_read64(GUEST_PHYSICAL_ADDRESS),
f9c617f6 3212 vmcs_readl(GUEST_LINEAR_ADDRESS));
1439442c
SY
3213 printk(KERN_ERR "EPT: Exit qualification is 0x%lx\n",
3214 (long unsigned int)exit_qualification);
3215 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
596ae895
AK
3216 kvm_run->hw.hardware_exit_reason = EXIT_REASON_EPT_VIOLATION;
3217 return 0;
1439442c
SY
3218 }
3219
3220 gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
229456fc 3221 trace_kvm_page_fault(gpa, exit_qualification);
49cd7d22 3222 return kvm_mmu_page_fault(vcpu, gpa & PAGE_MASK, 0);
1439442c
SY
3223}
3224
68f89400
MT
3225static u64 ept_rsvd_mask(u64 spte, int level)
3226{
3227 int i;
3228 u64 mask = 0;
3229
3230 for (i = 51; i > boot_cpu_data.x86_phys_bits; i--)
3231 mask |= (1ULL << i);
3232
3233 if (level > 2)
3234 /* bits 7:3 reserved */
3235 mask |= 0xf8;
3236 else if (level == 2) {
3237 if (spte & (1ULL << 7))
3238 /* 2MB ref, bits 20:12 reserved */
3239 mask |= 0x1ff000;
3240 else
3241 /* bits 6:3 reserved */
3242 mask |= 0x78;
3243 }
3244
3245 return mask;
3246}
3247
3248static void ept_misconfig_inspect_spte(struct kvm_vcpu *vcpu, u64 spte,
3249 int level)
3250{
3251 printk(KERN_ERR "%s: spte 0x%llx level %d\n", __func__, spte, level);
3252
3253 /* 010b (write-only) */
3254 WARN_ON((spte & 0x7) == 0x2);
3255
3256 /* 110b (write/execute) */
3257 WARN_ON((spte & 0x7) == 0x6);
3258
3259 /* 100b (execute-only) and value not supported by logical processor */
3260 if (!cpu_has_vmx_ept_execute_only())
3261 WARN_ON((spte & 0x7) == 0x4);
3262
3263 /* not 000b */
3264 if ((spte & 0x7)) {
3265 u64 rsvd_bits = spte & ept_rsvd_mask(spte, level);
3266
3267 if (rsvd_bits != 0) {
3268 printk(KERN_ERR "%s: rsvd_bits = 0x%llx\n",
3269 __func__, rsvd_bits);
3270 WARN_ON(1);
3271 }
3272
3273 if (level == 1 || (level == 2 && (spte & (1ULL << 7)))) {
3274 u64 ept_mem_type = (spte & 0x38) >> 3;
3275
3276 if (ept_mem_type == 2 || ept_mem_type == 3 ||
3277 ept_mem_type == 7) {
3278 printk(KERN_ERR "%s: ept_mem_type=0x%llx\n",
3279 __func__, ept_mem_type);
3280 WARN_ON(1);
3281 }
3282 }
3283 }
3284}
3285
3286static int handle_ept_misconfig(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3287{
3288 u64 sptes[4];
3289 int nr_sptes, i;
3290 gpa_t gpa;
3291
3292 gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
3293
3294 printk(KERN_ERR "EPT: Misconfiguration.\n");
3295 printk(KERN_ERR "EPT: GPA: 0x%llx\n", gpa);
3296
3297 nr_sptes = kvm_mmu_get_spte_hierarchy(vcpu, gpa, sptes);
3298
3299 for (i = PT64_ROOT_LEVEL; i > PT64_ROOT_LEVEL - nr_sptes; --i)
3300 ept_misconfig_inspect_spte(vcpu, sptes[i-1], i);
3301
3302 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
3303 kvm_run->hw.hardware_exit_reason = EXIT_REASON_EPT_MISCONFIG;
3304
3305 return 0;
3306}
3307
f08864b4
SY
3308static int handle_nmi_window(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3309{
3310 u32 cpu_based_vm_exec_control;
3311
3312 /* clear pending NMI */
3313 cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
3314 cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING;
3315 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
3316 ++vcpu->stat.nmi_window_exits;
3317
3318 return 1;
3319}
3320
ea953ef0
MG
3321static void handle_invalid_guest_state(struct kvm_vcpu *vcpu,
3322 struct kvm_run *kvm_run)
3323{
8b3079a5
AK
3324 struct vcpu_vmx *vmx = to_vmx(vcpu);
3325 enum emulation_result err = EMULATE_DONE;
ea953ef0 3326
ea953ef0 3327 local_irq_enable();
34f0c1ad 3328 preempt_enable();
ea953ef0
MG
3329
3330 while (!guest_state_valid(vcpu)) {
3331 err = emulate_instruction(vcpu, kvm_run, 0, 0, 0);
3332
1d5a4d9b
GT
3333 if (err == EMULATE_DO_MMIO)
3334 break;
3335
3336 if (err != EMULATE_DONE) {
3337 kvm_report_emulation_failure(vcpu, "emulation failure");
263799a3 3338 break;
ea953ef0
MG
3339 }
3340
3341 if (signal_pending(current))
3342 break;
3343 if (need_resched())
3344 schedule();
3345 }
3346
ea953ef0 3347 preempt_disable();
34f0c1ad 3348 local_irq_disable();
8b3079a5
AK
3349
3350 vmx->invalid_state_emulation_result = err;
ea953ef0
MG
3351}
3352
6aa8b732
AK
3353/*
3354 * The exit handlers return 1 if the exit was handled fully and guest execution
3355 * may resume. Otherwise they set the kvm_run parameter to indicate what needs
3356 * to be done to userspace and return 0.
3357 */
3358static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu,
3359 struct kvm_run *kvm_run) = {
3360 [EXIT_REASON_EXCEPTION_NMI] = handle_exception,
3361 [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt,
988ad74f 3362 [EXIT_REASON_TRIPLE_FAULT] = handle_triple_fault,
f08864b4 3363 [EXIT_REASON_NMI_WINDOW] = handle_nmi_window,
6aa8b732 3364 [EXIT_REASON_IO_INSTRUCTION] = handle_io,
6aa8b732
AK
3365 [EXIT_REASON_CR_ACCESS] = handle_cr,
3366 [EXIT_REASON_DR_ACCESS] = handle_dr,
3367 [EXIT_REASON_CPUID] = handle_cpuid,
3368 [EXIT_REASON_MSR_READ] = handle_rdmsr,
3369 [EXIT_REASON_MSR_WRITE] = handle_wrmsr,
3370 [EXIT_REASON_PENDING_INTERRUPT] = handle_interrupt_window,
3371 [EXIT_REASON_HLT] = handle_halt,
a7052897 3372 [EXIT_REASON_INVLPG] = handle_invlpg,
c21415e8 3373 [EXIT_REASON_VMCALL] = handle_vmcall,
e3c7cb6a
AK
3374 [EXIT_REASON_VMCLEAR] = handle_vmx_insn,
3375 [EXIT_REASON_VMLAUNCH] = handle_vmx_insn,
3376 [EXIT_REASON_VMPTRLD] = handle_vmx_insn,
3377 [EXIT_REASON_VMPTRST] = handle_vmx_insn,
3378 [EXIT_REASON_VMREAD] = handle_vmx_insn,
3379 [EXIT_REASON_VMRESUME] = handle_vmx_insn,
3380 [EXIT_REASON_VMWRITE] = handle_vmx_insn,
3381 [EXIT_REASON_VMOFF] = handle_vmx_insn,
3382 [EXIT_REASON_VMON] = handle_vmx_insn,
f78e0e2e
SY
3383 [EXIT_REASON_TPR_BELOW_THRESHOLD] = handle_tpr_below_threshold,
3384 [EXIT_REASON_APIC_ACCESS] = handle_apic_access,
e5edaa01 3385 [EXIT_REASON_WBINVD] = handle_wbinvd,
37817f29 3386 [EXIT_REASON_TASK_SWITCH] = handle_task_switch,
a0861c02 3387 [EXIT_REASON_MCE_DURING_VMENTRY] = handle_machine_check,
68f89400
MT
3388 [EXIT_REASON_EPT_VIOLATION] = handle_ept_violation,
3389 [EXIT_REASON_EPT_MISCONFIG] = handle_ept_misconfig,
6aa8b732
AK
3390};
3391
3392static const int kvm_vmx_max_exit_handlers =
50a3485c 3393 ARRAY_SIZE(kvm_vmx_exit_handlers);
6aa8b732
AK
3394
3395/*
3396 * The guest has exited. See if we can fix it or if we need userspace
3397 * assistance.
3398 */
6062d012 3399static int vmx_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
6aa8b732 3400{
29bd8a78 3401 struct vcpu_vmx *vmx = to_vmx(vcpu);
a0861c02 3402 u32 exit_reason = vmx->exit_reason;
1155f76a 3403 u32 vectoring_info = vmx->idt_vectoring_info;
29bd8a78 3404
229456fc 3405 trace_kvm_exit(exit_reason, kvm_rip_read(vcpu));
2714d1d3 3406
1d5a4d9b
GT
3407 /* If we need to emulate an MMIO from handle_invalid_guest_state
3408 * we just return 0 */
10f32d84
AK
3409 if (vmx->emulation_required && emulate_invalid_guest_state) {
3410 if (guest_state_valid(vcpu))
3411 vmx->emulation_required = 0;
8b3079a5 3412 return vmx->invalid_state_emulation_result != EMULATE_DO_MMIO;
10f32d84 3413 }
1d5a4d9b 3414
1439442c
SY
3415 /* Access CR3 don't cause VMExit in paging mode, so we need
3416 * to sync with guest real CR3. */
6de4f3ad 3417 if (enable_ept && is_paging(vcpu))
1439442c 3418 vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
1439442c 3419
29bd8a78
AK
3420 if (unlikely(vmx->fail)) {
3421 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
3422 kvm_run->fail_entry.hardware_entry_failure_reason
3423 = vmcs_read32(VM_INSTRUCTION_ERROR);
3424 return 0;
3425 }
6aa8b732 3426
d77c26fc 3427 if ((vectoring_info & VECTORING_INFO_VALID_MASK) &&
1439442c 3428 (exit_reason != EXIT_REASON_EXCEPTION_NMI &&
60637aac
JK
3429 exit_reason != EXIT_REASON_EPT_VIOLATION &&
3430 exit_reason != EXIT_REASON_TASK_SWITCH))
3431 printk(KERN_WARNING "%s: unexpected, valid vectoring info "
3432 "(0x%x) and exit reason is 0x%x\n",
3433 __func__, vectoring_info, exit_reason);
3b86cd99
JK
3434
3435 if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked)) {
c4282df9 3436 if (vmx_interrupt_allowed(vcpu)) {
3b86cd99 3437 vmx->soft_vnmi_blocked = 0;
3b86cd99 3438 } else if (vmx->vnmi_blocked_time > 1000000000LL &&
4531220b 3439 vcpu->arch.nmi_pending) {
3b86cd99
JK
3440 /*
3441 * This CPU don't support us in finding the end of an
3442 * NMI-blocked window if the guest runs with IRQs
3443 * disabled. So we pull the trigger after 1 s of
3444 * futile waiting, but inform the user about this.
3445 */
3446 printk(KERN_WARNING "%s: Breaking out of NMI-blocked "
3447 "state on VCPU %d after 1 s timeout\n",
3448 __func__, vcpu->vcpu_id);
3449 vmx->soft_vnmi_blocked = 0;
3b86cd99 3450 }
3b86cd99
JK
3451 }
3452
6aa8b732
AK
3453 if (exit_reason < kvm_vmx_max_exit_handlers
3454 && kvm_vmx_exit_handlers[exit_reason])
3455 return kvm_vmx_exit_handlers[exit_reason](vcpu, kvm_run);
3456 else {
3457 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
3458 kvm_run->hw.hardware_exit_reason = exit_reason;
3459 }
3460 return 0;
3461}
3462
95ba8273 3463static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
6e5d865c 3464{
95ba8273 3465 if (irr == -1 || tpr < irr) {
6e5d865c
YS
3466 vmcs_write32(TPR_THRESHOLD, 0);
3467 return;
3468 }
3469
95ba8273 3470 vmcs_write32(TPR_THRESHOLD, irr);
6e5d865c
YS
3471}
3472
cf393f75
AK
3473static void vmx_complete_interrupts(struct vcpu_vmx *vmx)
3474{
3475 u32 exit_intr_info;
7b4a25cb 3476 u32 idt_vectoring_info = vmx->idt_vectoring_info;
cf393f75
AK
3477 bool unblock_nmi;
3478 u8 vector;
668f612f
AK
3479 int type;
3480 bool idtv_info_valid;
cf393f75
AK
3481
3482 exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
20f65983 3483
a0861c02
AK
3484 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
3485
3486 /* Handle machine checks before interrupts are enabled */
3487 if ((vmx->exit_reason == EXIT_REASON_MCE_DURING_VMENTRY)
3488 || (vmx->exit_reason == EXIT_REASON_EXCEPTION_NMI
3489 && is_machine_check(exit_intr_info)))
3490 kvm_machine_check();
3491
20f65983
GN
3492 /* We need to handle NMIs before interrupts are enabled */
3493 if ((exit_intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR &&
229456fc 3494 (exit_intr_info & INTR_INFO_VALID_MASK))
20f65983 3495 asm("int $2");
20f65983
GN
3496
3497 idtv_info_valid = idt_vectoring_info & VECTORING_INFO_VALID_MASK;
3498
cf393f75
AK
3499 if (cpu_has_virtual_nmis()) {
3500 unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0;
3501 vector = exit_intr_info & INTR_INFO_VECTOR_MASK;
3502 /*
7b4a25cb 3503 * SDM 3: 27.7.1.2 (September 2008)
cf393f75
AK
3504 * Re-set bit "block by NMI" before VM entry if vmexit caused by
3505 * a guest IRET fault.
7b4a25cb
GN
3506 * SDM 3: 23.2.2 (September 2008)
3507 * Bit 12 is undefined in any of the following cases:
3508 * If the VM exit sets the valid bit in the IDT-vectoring
3509 * information field.
3510 * If the VM exit is due to a double fault.
cf393f75 3511 */
7b4a25cb
GN
3512 if ((exit_intr_info & INTR_INFO_VALID_MASK) && unblock_nmi &&
3513 vector != DF_VECTOR && !idtv_info_valid)
cf393f75
AK
3514 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
3515 GUEST_INTR_STATE_NMI);
3b86cd99
JK
3516 } else if (unlikely(vmx->soft_vnmi_blocked))
3517 vmx->vnmi_blocked_time +=
3518 ktime_to_ns(ktime_sub(ktime_get(), vmx->entry_time));
668f612f 3519
37b96e98
GN
3520 vmx->vcpu.arch.nmi_injected = false;
3521 kvm_clear_exception_queue(&vmx->vcpu);
3522 kvm_clear_interrupt_queue(&vmx->vcpu);
3523
3524 if (!idtv_info_valid)
3525 return;
3526
668f612f
AK
3527 vector = idt_vectoring_info & VECTORING_INFO_VECTOR_MASK;
3528 type = idt_vectoring_info & VECTORING_INFO_TYPE_MASK;
37b96e98 3529
64a7ec06 3530 switch (type) {
37b96e98
GN
3531 case INTR_TYPE_NMI_INTR:
3532 vmx->vcpu.arch.nmi_injected = true;
668f612f 3533 /*
7b4a25cb 3534 * SDM 3: 27.7.1.2 (September 2008)
37b96e98
GN
3535 * Clear bit "block by NMI" before VM entry if a NMI
3536 * delivery faulted.
668f612f 3537 */
37b96e98
GN
3538 vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO,
3539 GUEST_INTR_STATE_NMI);
3540 break;
37b96e98 3541 case INTR_TYPE_SOFT_EXCEPTION:
66fd3f7f
GN
3542 vmx->vcpu.arch.event_exit_inst_len =
3543 vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
3544 /* fall through */
3545 case INTR_TYPE_HARD_EXCEPTION:
35920a35 3546 if (idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK) {
37b96e98
GN
3547 u32 err = vmcs_read32(IDT_VECTORING_ERROR_CODE);
3548 kvm_queue_exception_e(&vmx->vcpu, vector, err);
35920a35
AK
3549 } else
3550 kvm_queue_exception(&vmx->vcpu, vector);
37b96e98 3551 break;
66fd3f7f
GN
3552 case INTR_TYPE_SOFT_INTR:
3553 vmx->vcpu.arch.event_exit_inst_len =
3554 vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
3555 /* fall through */
37b96e98 3556 case INTR_TYPE_EXT_INTR:
66fd3f7f
GN
3557 kvm_queue_interrupt(&vmx->vcpu, vector,
3558 type == INTR_TYPE_SOFT_INTR);
37b96e98
GN
3559 break;
3560 default:
3561 break;
f7d9238f 3562 }
cf393f75
AK
3563}
3564
9c8cba37
AK
3565/*
3566 * Failure to inject an interrupt should give us the information
3567 * in IDT_VECTORING_INFO_FIELD. However, if the failure occurs
3568 * when fetching the interrupt redirection bitmap in the real-mode
3569 * tss, this doesn't happen. So we do it ourselves.
3570 */
3571static void fixup_rmode_irq(struct vcpu_vmx *vmx)
3572{
3573 vmx->rmode.irq.pending = 0;
5fdbf976 3574 if (kvm_rip_read(&vmx->vcpu) + 1 != vmx->rmode.irq.rip)
9c8cba37 3575 return;
5fdbf976 3576 kvm_rip_write(&vmx->vcpu, vmx->rmode.irq.rip);
9c8cba37
AK
3577 if (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK) {
3578 vmx->idt_vectoring_info &= ~VECTORING_INFO_TYPE_MASK;
3579 vmx->idt_vectoring_info |= INTR_TYPE_EXT_INTR;
3580 return;
3581 }
3582 vmx->idt_vectoring_info =
3583 VECTORING_INFO_VALID_MASK
3584 | INTR_TYPE_EXT_INTR
3585 | vmx->rmode.irq.vector;
3586}
3587
c801949d
AK
3588#ifdef CONFIG_X86_64
3589#define R "r"
3590#define Q "q"
3591#else
3592#define R "e"
3593#define Q "l"
3594#endif
3595
04d2cc77 3596static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
6aa8b732 3597{
a2fa3e9f 3598 struct vcpu_vmx *vmx = to_vmx(vcpu);
e6adf283 3599
8f5d549f
AK
3600 if (enable_ept && is_paging(vcpu)) {
3601 vmcs_writel(GUEST_CR3, vcpu->arch.cr3);
3602 ept_load_pdptrs(vcpu);
3603 }
3b86cd99
JK
3604 /* Record the guest's net vcpu time for enforced NMI injections. */
3605 if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked))
3606 vmx->entry_time = ktime_get();
3607
a89a8fb9
MG
3608 /* Handle invalid guest state instead of entering VMX */
3609 if (vmx->emulation_required && emulate_invalid_guest_state) {
3610 handle_invalid_guest_state(vcpu, kvm_run);
3611 return;
3612 }
3613
5fdbf976
MT
3614 if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty))
3615 vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
3616 if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty))
3617 vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
3618
787ff736
GN
3619 /* When single-stepping over STI and MOV SS, we must clear the
3620 * corresponding interruptibility bits in the guest state. Otherwise
3621 * vmentry fails as it then expects bit 14 (BS) in pending debug
3622 * exceptions being set, but that's not correct for the guest debugging
3623 * case. */
3624 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
3625 vmx_set_interrupt_shadow(vcpu, 0);
3626
e6adf283
AK
3627 /*
3628 * Loading guest fpu may have cleared host cr0.ts
3629 */
3630 vmcs_writel(HOST_CR0, read_cr0());
3631
42dbaa5a
JK
3632 set_debugreg(vcpu->arch.dr6, 6);
3633
d77c26fc 3634 asm(
6aa8b732 3635 /* Store host registers */
c801949d
AK
3636 "push %%"R"dx; push %%"R"bp;"
3637 "push %%"R"cx \n\t"
313dbd49
AK
3638 "cmp %%"R"sp, %c[host_rsp](%0) \n\t"
3639 "je 1f \n\t"
3640 "mov %%"R"sp, %c[host_rsp](%0) \n\t"
4ecac3fd 3641 __ex(ASM_VMX_VMWRITE_RSP_RDX) "\n\t"
313dbd49 3642 "1: \n\t"
d3edefc0
AK
3643 /* Reload cr2 if changed */
3644 "mov %c[cr2](%0), %%"R"ax \n\t"
3645 "mov %%cr2, %%"R"dx \n\t"
3646 "cmp %%"R"ax, %%"R"dx \n\t"
3647 "je 2f \n\t"
3648 "mov %%"R"ax, %%cr2 \n\t"
3649 "2: \n\t"
6aa8b732 3650 /* Check if vmlaunch of vmresume is needed */
e08aa78a 3651 "cmpl $0, %c[launched](%0) \n\t"
6aa8b732 3652 /* Load guest registers. Don't clobber flags. */
c801949d
AK
3653 "mov %c[rax](%0), %%"R"ax \n\t"
3654 "mov %c[rbx](%0), %%"R"bx \n\t"
3655 "mov %c[rdx](%0), %%"R"dx \n\t"
3656 "mov %c[rsi](%0), %%"R"si \n\t"
3657 "mov %c[rdi](%0), %%"R"di \n\t"
3658 "mov %c[rbp](%0), %%"R"bp \n\t"
05b3e0c2 3659#ifdef CONFIG_X86_64
e08aa78a
AK
3660 "mov %c[r8](%0), %%r8 \n\t"
3661 "mov %c[r9](%0), %%r9 \n\t"
3662 "mov %c[r10](%0), %%r10 \n\t"
3663 "mov %c[r11](%0), %%r11 \n\t"
3664 "mov %c[r12](%0), %%r12 \n\t"
3665 "mov %c[r13](%0), %%r13 \n\t"
3666 "mov %c[r14](%0), %%r14 \n\t"
3667 "mov %c[r15](%0), %%r15 \n\t"
6aa8b732 3668#endif
c801949d
AK
3669 "mov %c[rcx](%0), %%"R"cx \n\t" /* kills %0 (ecx) */
3670
6aa8b732 3671 /* Enter guest mode */
cd2276a7 3672 "jne .Llaunched \n\t"
4ecac3fd 3673 __ex(ASM_VMX_VMLAUNCH) "\n\t"
cd2276a7 3674 "jmp .Lkvm_vmx_return \n\t"
4ecac3fd 3675 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
cd2276a7 3676 ".Lkvm_vmx_return: "
6aa8b732 3677 /* Save guest registers, load host registers, keep flags */
c801949d
AK
3678 "xchg %0, (%%"R"sp) \n\t"
3679 "mov %%"R"ax, %c[rax](%0) \n\t"
3680 "mov %%"R"bx, %c[rbx](%0) \n\t"
3681 "push"Q" (%%"R"sp); pop"Q" %c[rcx](%0) \n\t"
3682 "mov %%"R"dx, %c[rdx](%0) \n\t"
3683 "mov %%"R"si, %c[rsi](%0) \n\t"
3684 "mov %%"R"di, %c[rdi](%0) \n\t"
3685 "mov %%"R"bp, %c[rbp](%0) \n\t"
05b3e0c2 3686#ifdef CONFIG_X86_64
e08aa78a
AK
3687 "mov %%r8, %c[r8](%0) \n\t"
3688 "mov %%r9, %c[r9](%0) \n\t"
3689 "mov %%r10, %c[r10](%0) \n\t"
3690 "mov %%r11, %c[r11](%0) \n\t"
3691 "mov %%r12, %c[r12](%0) \n\t"
3692 "mov %%r13, %c[r13](%0) \n\t"
3693 "mov %%r14, %c[r14](%0) \n\t"
3694 "mov %%r15, %c[r15](%0) \n\t"
6aa8b732 3695#endif
c801949d
AK
3696 "mov %%cr2, %%"R"ax \n\t"
3697 "mov %%"R"ax, %c[cr2](%0) \n\t"
3698
3699 "pop %%"R"bp; pop %%"R"bp; pop %%"R"dx \n\t"
e08aa78a
AK
3700 "setbe %c[fail](%0) \n\t"
3701 : : "c"(vmx), "d"((unsigned long)HOST_RSP),
3702 [launched]"i"(offsetof(struct vcpu_vmx, launched)),
3703 [fail]"i"(offsetof(struct vcpu_vmx, fail)),
313dbd49 3704 [host_rsp]"i"(offsetof(struct vcpu_vmx, host_rsp)),
ad312c7c
ZX
3705 [rax]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RAX])),
3706 [rbx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBX])),
3707 [rcx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RCX])),
3708 [rdx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDX])),
3709 [rsi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RSI])),
3710 [rdi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDI])),
3711 [rbp]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBP])),
05b3e0c2 3712#ifdef CONFIG_X86_64
ad312c7c
ZX
3713 [r8]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R8])),
3714 [r9]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R9])),
3715 [r10]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R10])),
3716 [r11]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R11])),
3717 [r12]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R12])),
3718 [r13]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R13])),
3719 [r14]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R14])),
3720 [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])),
6aa8b732 3721#endif
ad312c7c 3722 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2))
c2036300 3723 : "cc", "memory"
c801949d 3724 , R"bx", R"di", R"si"
c2036300 3725#ifdef CONFIG_X86_64
c2036300
LV
3726 , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
3727#endif
3728 );
6aa8b732 3729
6de4f3ad
AK
3730 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
3731 | (1 << VCPU_EXREG_PDPTR));
5fdbf976
MT
3732 vcpu->arch.regs_dirty = 0;
3733
42dbaa5a
JK
3734 get_debugreg(vcpu->arch.dr6, 6);
3735
1155f76a 3736 vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
9c8cba37
AK
3737 if (vmx->rmode.irq.pending)
3738 fixup_rmode_irq(vmx);
1155f76a 3739
d77c26fc 3740 asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
15ad7146 3741 vmx->launched = 1;
1b6269db 3742
cf393f75 3743 vmx_complete_interrupts(vmx);
6aa8b732
AK
3744}
3745
c801949d
AK
3746#undef R
3747#undef Q
3748
6aa8b732
AK
3749static void vmx_free_vmcs(struct kvm_vcpu *vcpu)
3750{
a2fa3e9f
GH
3751 struct vcpu_vmx *vmx = to_vmx(vcpu);
3752
3753 if (vmx->vmcs) {
543e4243 3754 vcpu_clear(vmx);
a2fa3e9f
GH
3755 free_vmcs(vmx->vmcs);
3756 vmx->vmcs = NULL;
6aa8b732
AK
3757 }
3758}
3759
3760static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
3761{
fb3f0f51
RR
3762 struct vcpu_vmx *vmx = to_vmx(vcpu);
3763
2384d2b3
SY
3764 spin_lock(&vmx_vpid_lock);
3765 if (vmx->vpid != 0)
3766 __clear_bit(vmx->vpid, vmx_vpid_bitmap);
3767 spin_unlock(&vmx_vpid_lock);
6aa8b732 3768 vmx_free_vmcs(vcpu);
fb3f0f51
RR
3769 kfree(vmx->host_msrs);
3770 kfree(vmx->guest_msrs);
3771 kvm_vcpu_uninit(vcpu);
a4770347 3772 kmem_cache_free(kvm_vcpu_cache, vmx);
6aa8b732
AK
3773}
3774
fb3f0f51 3775static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
6aa8b732 3776{
fb3f0f51 3777 int err;
c16f862d 3778 struct vcpu_vmx *vmx = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
15ad7146 3779 int cpu;
6aa8b732 3780
a2fa3e9f 3781 if (!vmx)
fb3f0f51
RR
3782 return ERR_PTR(-ENOMEM);
3783
2384d2b3
SY
3784 allocate_vpid(vmx);
3785
fb3f0f51
RR
3786 err = kvm_vcpu_init(&vmx->vcpu, kvm, id);
3787 if (err)
3788 goto free_vcpu;
965b58a5 3789
a2fa3e9f 3790 vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
fb3f0f51
RR
3791 if (!vmx->guest_msrs) {
3792 err = -ENOMEM;
3793 goto uninit_vcpu;
3794 }
965b58a5 3795
a2fa3e9f
GH
3796 vmx->host_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
3797 if (!vmx->host_msrs)
fb3f0f51 3798 goto free_guest_msrs;
965b58a5 3799
a2fa3e9f
GH
3800 vmx->vmcs = alloc_vmcs();
3801 if (!vmx->vmcs)
fb3f0f51 3802 goto free_msrs;
a2fa3e9f
GH
3803
3804 vmcs_clear(vmx->vmcs);
3805
15ad7146
AK
3806 cpu = get_cpu();
3807 vmx_vcpu_load(&vmx->vcpu, cpu);
8b9cf98c 3808 err = vmx_vcpu_setup(vmx);
fb3f0f51 3809 vmx_vcpu_put(&vmx->vcpu);
15ad7146 3810 put_cpu();
fb3f0f51
RR
3811 if (err)
3812 goto free_vmcs;
5e4a0b3c
MT
3813 if (vm_need_virtualize_apic_accesses(kvm))
3814 if (alloc_apic_access_page(kvm) != 0)
3815 goto free_vmcs;
fb3f0f51 3816
089d034e 3817 if (enable_ept)
b7ebfb05
SY
3818 if (alloc_identity_pagetable(kvm) != 0)
3819 goto free_vmcs;
3820
fb3f0f51
RR
3821 return &vmx->vcpu;
3822
3823free_vmcs:
3824 free_vmcs(vmx->vmcs);
3825free_msrs:
3826 kfree(vmx->host_msrs);
3827free_guest_msrs:
3828 kfree(vmx->guest_msrs);
3829uninit_vcpu:
3830 kvm_vcpu_uninit(&vmx->vcpu);
3831free_vcpu:
a4770347 3832 kmem_cache_free(kvm_vcpu_cache, vmx);
fb3f0f51 3833 return ERR_PTR(err);
6aa8b732
AK
3834}
3835
002c7f7c
YS
3836static void __init vmx_check_processor_compat(void *rtn)
3837{
3838 struct vmcs_config vmcs_conf;
3839
3840 *(int *)rtn = 0;
3841 if (setup_vmcs_config(&vmcs_conf) < 0)
3842 *(int *)rtn = -EIO;
3843 if (memcmp(&vmcs_config, &vmcs_conf, sizeof(struct vmcs_config)) != 0) {
3844 printk(KERN_ERR "kvm: CPU %d feature inconsistency!\n",
3845 smp_processor_id());
3846 *(int *)rtn = -EIO;
3847 }
3848}
3849
67253af5
SY
3850static int get_ept_level(void)
3851{
3852 return VMX_EPT_DEFAULT_GAW + 1;
3853}
3854
4b12f0de 3855static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
64d4d521 3856{
4b12f0de
SY
3857 u64 ret;
3858
522c68c4
SY
3859 /* For VT-d and EPT combination
3860 * 1. MMIO: always map as UC
3861 * 2. EPT with VT-d:
3862 * a. VT-d without snooping control feature: can't guarantee the
3863 * result, try to trust guest.
3864 * b. VT-d with snooping control feature: snooping control feature of
3865 * VT-d engine can guarantee the cache correctness. Just set it
3866 * to WB to keep consistent with host. So the same as item 3.
3867 * 3. EPT without VT-d: always map as WB and set IGMT=1 to keep
3868 * consistent with host MTRR
3869 */
4b12f0de
SY
3870 if (is_mmio)
3871 ret = MTRR_TYPE_UNCACHABLE << VMX_EPT_MT_EPTE_SHIFT;
522c68c4
SY
3872 else if (vcpu->kvm->arch.iommu_domain &&
3873 !(vcpu->kvm->arch.iommu_flags & KVM_IOMMU_CACHE_COHERENCY))
3874 ret = kvm_get_guest_memory_type(vcpu, gfn) <<
3875 VMX_EPT_MT_EPTE_SHIFT;
4b12f0de 3876 else
522c68c4
SY
3877 ret = (MTRR_TYPE_WRBACK << VMX_EPT_MT_EPTE_SHIFT)
3878 | VMX_EPT_IGMT_BIT;
4b12f0de
SY
3879
3880 return ret;
64d4d521
SY
3881}
3882
229456fc
MT
3883static const struct trace_print_flags vmx_exit_reasons_str[] = {
3884 { EXIT_REASON_EXCEPTION_NMI, "exception" },
3885 { EXIT_REASON_EXTERNAL_INTERRUPT, "ext_irq" },
3886 { EXIT_REASON_TRIPLE_FAULT, "triple_fault" },
3887 { EXIT_REASON_NMI_WINDOW, "nmi_window" },
3888 { EXIT_REASON_IO_INSTRUCTION, "io_instruction" },
3889 { EXIT_REASON_CR_ACCESS, "cr_access" },
3890 { EXIT_REASON_DR_ACCESS, "dr_access" },
3891 { EXIT_REASON_CPUID, "cpuid" },
3892 { EXIT_REASON_MSR_READ, "rdmsr" },
3893 { EXIT_REASON_MSR_WRITE, "wrmsr" },
3894 { EXIT_REASON_PENDING_INTERRUPT, "interrupt_window" },
3895 { EXIT_REASON_HLT, "halt" },
3896 { EXIT_REASON_INVLPG, "invlpg" },
3897 { EXIT_REASON_VMCALL, "hypercall" },
3898 { EXIT_REASON_TPR_BELOW_THRESHOLD, "tpr_below_thres" },
3899 { EXIT_REASON_APIC_ACCESS, "apic_access" },
3900 { EXIT_REASON_WBINVD, "wbinvd" },
3901 { EXIT_REASON_TASK_SWITCH, "task_switch" },
3902 { EXIT_REASON_EPT_VIOLATION, "ept_violation" },
3903 { -1, NULL }
3904};
3905
cbdd1bea 3906static struct kvm_x86_ops vmx_x86_ops = {
6aa8b732
AK
3907 .cpu_has_kvm_support = cpu_has_kvm_support,
3908 .disabled_by_bios = vmx_disabled_by_bios,
3909 .hardware_setup = hardware_setup,
3910 .hardware_unsetup = hardware_unsetup,
002c7f7c 3911 .check_processor_compatibility = vmx_check_processor_compat,
6aa8b732
AK
3912 .hardware_enable = hardware_enable,
3913 .hardware_disable = hardware_disable,
04547156 3914 .cpu_has_accelerated_tpr = report_flexpriority,
6aa8b732
AK
3915
3916 .vcpu_create = vmx_create_vcpu,
3917 .vcpu_free = vmx_free_vcpu,
04d2cc77 3918 .vcpu_reset = vmx_vcpu_reset,
6aa8b732 3919
04d2cc77 3920 .prepare_guest_switch = vmx_save_host_state,
6aa8b732
AK
3921 .vcpu_load = vmx_vcpu_load,
3922 .vcpu_put = vmx_vcpu_put,
3923
3924 .set_guest_debug = set_guest_debug,
3925 .get_msr = vmx_get_msr,
3926 .set_msr = vmx_set_msr,
3927 .get_segment_base = vmx_get_segment_base,
3928 .get_segment = vmx_get_segment,
3929 .set_segment = vmx_set_segment,
2e4d2653 3930 .get_cpl = vmx_get_cpl,
6aa8b732 3931 .get_cs_db_l_bits = vmx_get_cs_db_l_bits,
25c4c276 3932 .decache_cr4_guest_bits = vmx_decache_cr4_guest_bits,
6aa8b732 3933 .set_cr0 = vmx_set_cr0,
6aa8b732
AK
3934 .set_cr3 = vmx_set_cr3,
3935 .set_cr4 = vmx_set_cr4,
6aa8b732 3936 .set_efer = vmx_set_efer,
6aa8b732
AK
3937 .get_idt = vmx_get_idt,
3938 .set_idt = vmx_set_idt,
3939 .get_gdt = vmx_get_gdt,
3940 .set_gdt = vmx_set_gdt,
5fdbf976 3941 .cache_reg = vmx_cache_reg,
6aa8b732
AK
3942 .get_rflags = vmx_get_rflags,
3943 .set_rflags = vmx_set_rflags,
3944
3945 .tlb_flush = vmx_flush_tlb,
6aa8b732 3946
6aa8b732 3947 .run = vmx_vcpu_run,
6062d012 3948 .handle_exit = vmx_handle_exit,
6aa8b732 3949 .skip_emulated_instruction = skip_emulated_instruction,
2809f5d2
GC
3950 .set_interrupt_shadow = vmx_set_interrupt_shadow,
3951 .get_interrupt_shadow = vmx_get_interrupt_shadow,
102d8325 3952 .patch_hypercall = vmx_patch_hypercall,
2a8067f1 3953 .set_irq = vmx_inject_irq,
95ba8273 3954 .set_nmi = vmx_inject_nmi,
298101da 3955 .queue_exception = vmx_queue_exception,
78646121 3956 .interrupt_allowed = vmx_interrupt_allowed,
95ba8273
GN
3957 .nmi_allowed = vmx_nmi_allowed,
3958 .enable_nmi_window = enable_nmi_window,
3959 .enable_irq_window = enable_irq_window,
3960 .update_cr8_intercept = update_cr8_intercept,
95ba8273 3961
cbc94022 3962 .set_tss_addr = vmx_set_tss_addr,
67253af5 3963 .get_tdp_level = get_ept_level,
4b12f0de 3964 .get_mt_mask = vmx_get_mt_mask,
229456fc
MT
3965
3966 .exit_reasons_str = vmx_exit_reasons_str,
6aa8b732
AK
3967};
3968
3969static int __init vmx_init(void)
3970{
fdef3ad1
HQ
3971 int r;
3972
3e7c73e9 3973 vmx_io_bitmap_a = (unsigned long *)__get_free_page(GFP_KERNEL);
fdef3ad1
HQ
3974 if (!vmx_io_bitmap_a)
3975 return -ENOMEM;
3976
3e7c73e9 3977 vmx_io_bitmap_b = (unsigned long *)__get_free_page(GFP_KERNEL);
fdef3ad1
HQ
3978 if (!vmx_io_bitmap_b) {
3979 r = -ENOMEM;
3980 goto out;
3981 }
3982
5897297b
AK
3983 vmx_msr_bitmap_legacy = (unsigned long *)__get_free_page(GFP_KERNEL);
3984 if (!vmx_msr_bitmap_legacy) {
25c5f225
SY
3985 r = -ENOMEM;
3986 goto out1;
3987 }
3988
5897297b
AK
3989 vmx_msr_bitmap_longmode = (unsigned long *)__get_free_page(GFP_KERNEL);
3990 if (!vmx_msr_bitmap_longmode) {
3991 r = -ENOMEM;
3992 goto out2;
3993 }
3994
fdef3ad1
HQ
3995 /*
3996 * Allow direct access to the PC debug port (it is often used for I/O
3997 * delays, but the vmexits simply slow things down).
3998 */
3e7c73e9
AK
3999 memset(vmx_io_bitmap_a, 0xff, PAGE_SIZE);
4000 clear_bit(0x80, vmx_io_bitmap_a);
fdef3ad1 4001
3e7c73e9 4002 memset(vmx_io_bitmap_b, 0xff, PAGE_SIZE);
fdef3ad1 4003
5897297b
AK
4004 memset(vmx_msr_bitmap_legacy, 0xff, PAGE_SIZE);
4005 memset(vmx_msr_bitmap_longmode, 0xff, PAGE_SIZE);
25c5f225 4006
2384d2b3
SY
4007 set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */
4008
cb498ea2 4009 r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx), THIS_MODULE);
fdef3ad1 4010 if (r)
5897297b 4011 goto out3;
25c5f225 4012
5897297b
AK
4013 vmx_disable_intercept_for_msr(MSR_FS_BASE, false);
4014 vmx_disable_intercept_for_msr(MSR_GS_BASE, false);
4015 vmx_disable_intercept_for_msr(MSR_KERNEL_GS_BASE, true);
4016 vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false);
4017 vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false);
4018 vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false);
fdef3ad1 4019
089d034e 4020 if (enable_ept) {
1439442c 4021 bypass_guest_pf = 0;
5fdbcb9d 4022 kvm_mmu_set_base_ptes(VMX_EPT_READABLE_MASK |
2aaf69dc 4023 VMX_EPT_WRITABLE_MASK);
534e38b4 4024 kvm_mmu_set_mask_ptes(0ull, 0ull, 0ull, 0ull,
4b12f0de 4025 VMX_EPT_EXECUTABLE_MASK);
5fdbcb9d
SY
4026 kvm_enable_tdp();
4027 } else
4028 kvm_disable_tdp();
1439442c 4029
c7addb90
AK
4030 if (bypass_guest_pf)
4031 kvm_mmu_set_nonpresent_ptes(~0xffeull, 0ull);
4032
1439442c
SY
4033 ept_sync_global();
4034
fdef3ad1
HQ
4035 return 0;
4036
5897297b
AK
4037out3:
4038 free_page((unsigned long)vmx_msr_bitmap_longmode);
25c5f225 4039out2:
5897297b 4040 free_page((unsigned long)vmx_msr_bitmap_legacy);
fdef3ad1 4041out1:
3e7c73e9 4042 free_page((unsigned long)vmx_io_bitmap_b);
fdef3ad1 4043out:
3e7c73e9 4044 free_page((unsigned long)vmx_io_bitmap_a);
fdef3ad1 4045 return r;
6aa8b732
AK
4046}
4047
4048static void __exit vmx_exit(void)
4049{
5897297b
AK
4050 free_page((unsigned long)vmx_msr_bitmap_legacy);
4051 free_page((unsigned long)vmx_msr_bitmap_longmode);
3e7c73e9
AK
4052 free_page((unsigned long)vmx_io_bitmap_b);
4053 free_page((unsigned long)vmx_io_bitmap_a);
fdef3ad1 4054
cb498ea2 4055 kvm_exit();
6aa8b732
AK
4056}
4057
4058module_init(vmx_init)
4059module_exit(vmx_exit)