KVM: Add fpu get/set operations
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / kvm / svm.c
CommitLineData
6aa8b732
AK
1/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * AMD SVM support
5 *
6 * Copyright (C) 2006 Qumranet, Inc.
7 *
8 * Authors:
9 * Yaniv Kamay <yaniv@qumranet.com>
10 * Avi Kivity <avi@qumranet.com>
11 *
12 * This work is licensed under the terms of the GNU GPL, version 2. See
13 * the COPYING file in the top-level directory.
14 *
15 */
16
17#include <linux/module.h>
9d8f549d 18#include <linux/kernel.h>
6aa8b732
AK
19#include <linux/vmalloc.h>
20#include <linux/highmem.h>
07031e14 21#include <linux/profile.h>
6aa8b732
AK
22#include <asm/desc.h>
23
24#include "kvm_svm.h"
25#include "x86_emulate.h"
26
27MODULE_AUTHOR("Qumranet");
28MODULE_LICENSE("GPL");
29
30#define IOPM_ALLOC_ORDER 2
31#define MSRPM_ALLOC_ORDER 1
32
33#define DB_VECTOR 1
34#define UD_VECTOR 6
35#define GP_VECTOR 13
36
37#define DR7_GD_MASK (1 << 13)
38#define DR6_BD_MASK (1 << 13)
39#define CR4_DE_MASK (1UL << 3)
40
41#define SEG_TYPE_LDT 2
42#define SEG_TYPE_BUSY_TSS16 3
43
44#define KVM_EFER_LMA (1 << 10)
45#define KVM_EFER_LME (1 << 8)
46
47unsigned long iopm_base;
48unsigned long msrpm_base;
49
50struct kvm_ldttss_desc {
51 u16 limit0;
52 u16 base0;
53 unsigned base1 : 8, type : 5, dpl : 2, p : 1;
54 unsigned limit1 : 4, zero0 : 3, g : 1, base2 : 8;
55 u32 base3;
56 u32 zero1;
57} __attribute__((packed));
58
59struct svm_cpu_data {
60 int cpu;
61
62 uint64_t asid_generation;
63 uint32_t max_asid;
64 uint32_t next_asid;
65 struct kvm_ldttss_desc *tss_desc;
66
67 struct page *save_area;
68};
69
70static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
71
72struct svm_init_data {
73 int cpu;
74 int r;
75};
76
77static u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000};
78
9d8f549d 79#define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges)
6aa8b732
AK
80#define MSRS_RANGE_SIZE 2048
81#define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2)
82
83#define MAX_INST_SIZE 15
84
85static unsigned get_addr_size(struct kvm_vcpu *vcpu)
86{
87 struct vmcb_save_area *sa = &vcpu->svm->vmcb->save;
88 u16 cs_attrib;
89
90 if (!(sa->cr0 & CR0_PE_MASK) || (sa->rflags & X86_EFLAGS_VM))
91 return 2;
92
93 cs_attrib = sa->cs.attrib;
94
95 return (cs_attrib & SVM_SELECTOR_L_MASK) ? 8 :
96 (cs_attrib & SVM_SELECTOR_DB_MASK) ? 4 : 2;
97}
98
99static inline u8 pop_irq(struct kvm_vcpu *vcpu)
100{
101 int word_index = __ffs(vcpu->irq_summary);
102 int bit_index = __ffs(vcpu->irq_pending[word_index]);
103 int irq = word_index * BITS_PER_LONG + bit_index;
104
105 clear_bit(bit_index, &vcpu->irq_pending[word_index]);
106 if (!vcpu->irq_pending[word_index])
107 clear_bit(word_index, &vcpu->irq_summary);
108 return irq;
109}
110
111static inline void push_irq(struct kvm_vcpu *vcpu, u8 irq)
112{
113 set_bit(irq, vcpu->irq_pending);
114 set_bit(irq / BITS_PER_LONG, &vcpu->irq_summary);
115}
116
117static inline void clgi(void)
118{
119 asm volatile (SVM_CLGI);
120}
121
122static inline void stgi(void)
123{
124 asm volatile (SVM_STGI);
125}
126
127static inline void invlpga(unsigned long addr, u32 asid)
128{
129 asm volatile (SVM_INVLPGA :: "a"(addr), "c"(asid));
130}
131
132static inline unsigned long kvm_read_cr2(void)
133{
134 unsigned long cr2;
135
136 asm volatile ("mov %%cr2, %0" : "=r" (cr2));
137 return cr2;
138}
139
140static inline void kvm_write_cr2(unsigned long val)
141{
142 asm volatile ("mov %0, %%cr2" :: "r" (val));
143}
144
145static inline unsigned long read_dr6(void)
146{
147 unsigned long dr6;
148
149 asm volatile ("mov %%dr6, %0" : "=r" (dr6));
150 return dr6;
151}
152
153static inline void write_dr6(unsigned long val)
154{
155 asm volatile ("mov %0, %%dr6" :: "r" (val));
156}
157
158static inline unsigned long read_dr7(void)
159{
160 unsigned long dr7;
161
162 asm volatile ("mov %%dr7, %0" : "=r" (dr7));
163 return dr7;
164}
165
166static inline void write_dr7(unsigned long val)
167{
168 asm volatile ("mov %0, %%dr7" :: "r" (val));
169}
170
6aa8b732
AK
171static inline void force_new_asid(struct kvm_vcpu *vcpu)
172{
173 vcpu->svm->asid_generation--;
174}
175
176static inline void flush_guest_tlb(struct kvm_vcpu *vcpu)
177{
178 force_new_asid(vcpu);
179}
180
181static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
182{
183 if (!(efer & KVM_EFER_LMA))
184 efer &= ~KVM_EFER_LME;
185
186 vcpu->svm->vmcb->save.efer = efer | MSR_EFER_SVME_MASK;
187 vcpu->shadow_efer = efer;
188}
189
190static void svm_inject_gp(struct kvm_vcpu *vcpu, unsigned error_code)
191{
192 vcpu->svm->vmcb->control.event_inj = SVM_EVTINJ_VALID |
193 SVM_EVTINJ_VALID_ERR |
194 SVM_EVTINJ_TYPE_EXEPT |
195 GP_VECTOR;
196 vcpu->svm->vmcb->control.event_inj_err = error_code;
197}
198
199static void inject_ud(struct kvm_vcpu *vcpu)
200{
201 vcpu->svm->vmcb->control.event_inj = SVM_EVTINJ_VALID |
202 SVM_EVTINJ_TYPE_EXEPT |
203 UD_VECTOR;
204}
205
6aa8b732
AK
206static int is_page_fault(uint32_t info)
207{
208 info &= SVM_EVTINJ_VEC_MASK | SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID;
209 return info == (PF_VECTOR | SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_EXEPT);
210}
211
212static int is_external_interrupt(u32 info)
213{
214 info &= SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID;
215 return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR);
216}
217
218static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
219{
220 if (!vcpu->svm->next_rip) {
221 printk(KERN_DEBUG "%s: NOP\n", __FUNCTION__);
222 return;
223 }
224 if (vcpu->svm->next_rip - vcpu->svm->vmcb->save.rip > 15) {
225 printk(KERN_ERR "%s: ip 0x%llx next 0x%llx\n",
226 __FUNCTION__,
227 vcpu->svm->vmcb->save.rip,
228 vcpu->svm->next_rip);
229 }
230
231 vcpu->rip = vcpu->svm->vmcb->save.rip = vcpu->svm->next_rip;
232 vcpu->svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
c1150d8c
DL
233
234 vcpu->interrupt_window_open = 1;
6aa8b732
AK
235}
236
237static int has_svm(void)
238{
239 uint32_t eax, ebx, ecx, edx;
240
1e885461 241 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
6aa8b732
AK
242 printk(KERN_INFO "has_svm: not amd\n");
243 return 0;
244 }
245
246 cpuid(0x80000000, &eax, &ebx, &ecx, &edx);
247 if (eax < SVM_CPUID_FUNC) {
248 printk(KERN_INFO "has_svm: can't execute cpuid_8000000a\n");
249 return 0;
250 }
251
252 cpuid(0x80000001, &eax, &ebx, &ecx, &edx);
253 if (!(ecx & (1 << SVM_CPUID_FEATURE_SHIFT))) {
254 printk(KERN_DEBUG "has_svm: svm not available\n");
255 return 0;
256 }
257 return 1;
258}
259
260static void svm_hardware_disable(void *garbage)
261{
262 struct svm_cpu_data *svm_data
263 = per_cpu(svm_data, raw_smp_processor_id());
264
265 if (svm_data) {
266 uint64_t efer;
267
268 wrmsrl(MSR_VM_HSAVE_PA, 0);
269 rdmsrl(MSR_EFER, efer);
270 wrmsrl(MSR_EFER, efer & ~MSR_EFER_SVME_MASK);
8b6d44c7 271 per_cpu(svm_data, raw_smp_processor_id()) = NULL;
6aa8b732
AK
272 __free_page(svm_data->save_area);
273 kfree(svm_data);
274 }
275}
276
277static void svm_hardware_enable(void *garbage)
278{
279
280 struct svm_cpu_data *svm_data;
281 uint64_t efer;
05b3e0c2 282#ifdef CONFIG_X86_64
6aa8b732
AK
283 struct desc_ptr gdt_descr;
284#else
285 struct Xgt_desc_struct gdt_descr;
286#endif
287 struct desc_struct *gdt;
288 int me = raw_smp_processor_id();
289
290 if (!has_svm()) {
291 printk(KERN_ERR "svm_cpu_init: err EOPNOTSUPP on %d\n", me);
292 return;
293 }
294 svm_data = per_cpu(svm_data, me);
295
296 if (!svm_data) {
297 printk(KERN_ERR "svm_cpu_init: svm_data is NULL on %d\n",
298 me);
299 return;
300 }
301
302 svm_data->asid_generation = 1;
303 svm_data->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
304 svm_data->next_asid = svm_data->max_asid + 1;
305
306 asm volatile ( "sgdt %0" : "=m"(gdt_descr) );
307 gdt = (struct desc_struct *)gdt_descr.address;
308 svm_data->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
309
310 rdmsrl(MSR_EFER, efer);
311 wrmsrl(MSR_EFER, efer | MSR_EFER_SVME_MASK);
312
313 wrmsrl(MSR_VM_HSAVE_PA,
314 page_to_pfn(svm_data->save_area) << PAGE_SHIFT);
315}
316
317static int svm_cpu_init(int cpu)
318{
319 struct svm_cpu_data *svm_data;
320 int r;
321
322 svm_data = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL);
323 if (!svm_data)
324 return -ENOMEM;
325 svm_data->cpu = cpu;
326 svm_data->save_area = alloc_page(GFP_KERNEL);
327 r = -ENOMEM;
328 if (!svm_data->save_area)
329 goto err_1;
330
331 per_cpu(svm_data, cpu) = svm_data;
332
333 return 0;
334
335err_1:
336 kfree(svm_data);
337 return r;
338
339}
340
341static int set_msr_interception(u32 *msrpm, unsigned msr,
342 int read, int write)
343{
344 int i;
345
346 for (i = 0; i < NUM_MSR_MAPS; i++) {
347 if (msr >= msrpm_ranges[i] &&
348 msr < msrpm_ranges[i] + MSRS_IN_RANGE) {
349 u32 msr_offset = (i * MSRS_IN_RANGE + msr -
350 msrpm_ranges[i]) * 2;
351
352 u32 *base = msrpm + (msr_offset / 32);
353 u32 msr_shift = msr_offset % 32;
354 u32 mask = ((write) ? 0 : 2) | ((read) ? 0 : 1);
355 *base = (*base & ~(0x3 << msr_shift)) |
356 (mask << msr_shift);
357 return 1;
358 }
359 }
360 printk(KERN_DEBUG "%s: not found 0x%x\n", __FUNCTION__, msr);
361 return 0;
362}
363
364static __init int svm_hardware_setup(void)
365{
366 int cpu;
367 struct page *iopm_pages;
368 struct page *msrpm_pages;
369 void *msrpm_va;
370 int r;
371
873a7c42 372 kvm_emulator_want_group7_invlpg();
6aa8b732
AK
373
374 iopm_pages = alloc_pages(GFP_KERNEL, IOPM_ALLOC_ORDER);
375
376 if (!iopm_pages)
377 return -ENOMEM;
378 memset(page_address(iopm_pages), 0xff,
379 PAGE_SIZE * (1 << IOPM_ALLOC_ORDER));
380 iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;
381
382
383 msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
384
385 r = -ENOMEM;
386 if (!msrpm_pages)
387 goto err_1;
388
389 msrpm_va = page_address(msrpm_pages);
390 memset(msrpm_va, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER));
391 msrpm_base = page_to_pfn(msrpm_pages) << PAGE_SHIFT;
392
05b3e0c2 393#ifdef CONFIG_X86_64
6aa8b732
AK
394 set_msr_interception(msrpm_va, MSR_GS_BASE, 1, 1);
395 set_msr_interception(msrpm_va, MSR_FS_BASE, 1, 1);
396 set_msr_interception(msrpm_va, MSR_KERNEL_GS_BASE, 1, 1);
6aa8b732
AK
397 set_msr_interception(msrpm_va, MSR_LSTAR, 1, 1);
398 set_msr_interception(msrpm_va, MSR_CSTAR, 1, 1);
399 set_msr_interception(msrpm_va, MSR_SYSCALL_MASK, 1, 1);
400#endif
0e859cac 401 set_msr_interception(msrpm_va, MSR_K6_STAR, 1, 1);
6aa8b732
AK
402 set_msr_interception(msrpm_va, MSR_IA32_SYSENTER_CS, 1, 1);
403 set_msr_interception(msrpm_va, MSR_IA32_SYSENTER_ESP, 1, 1);
404 set_msr_interception(msrpm_va, MSR_IA32_SYSENTER_EIP, 1, 1);
405
406 for_each_online_cpu(cpu) {
407 r = svm_cpu_init(cpu);
408 if (r)
409 goto err_2;
410 }
411 return 0;
412
413err_2:
414 __free_pages(msrpm_pages, MSRPM_ALLOC_ORDER);
415 msrpm_base = 0;
416err_1:
417 __free_pages(iopm_pages, IOPM_ALLOC_ORDER);
418 iopm_base = 0;
419 return r;
420}
421
422static __exit void svm_hardware_unsetup(void)
423{
424 __free_pages(pfn_to_page(msrpm_base >> PAGE_SHIFT), MSRPM_ALLOC_ORDER);
425 __free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), IOPM_ALLOC_ORDER);
426 iopm_base = msrpm_base = 0;
427}
428
429static void init_seg(struct vmcb_seg *seg)
430{
431 seg->selector = 0;
432 seg->attrib = SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK |
433 SVM_SELECTOR_WRITE_MASK; /* Read/Write Data Segment */
434 seg->limit = 0xffff;
435 seg->base = 0;
436}
437
438static void init_sys_seg(struct vmcb_seg *seg, uint32_t type)
439{
440 seg->selector = 0;
441 seg->attrib = SVM_SELECTOR_P_MASK | type;
442 seg->limit = 0xffff;
443 seg->base = 0;
444}
445
446static int svm_vcpu_setup(struct kvm_vcpu *vcpu)
447{
448 return 0;
449}
450
451static void init_vmcb(struct vmcb *vmcb)
452{
453 struct vmcb_control_area *control = &vmcb->control;
454 struct vmcb_save_area *save = &vmcb->save;
6aa8b732
AK
455
456 control->intercept_cr_read = INTERCEPT_CR0_MASK |
457 INTERCEPT_CR3_MASK |
458 INTERCEPT_CR4_MASK;
459
460 control->intercept_cr_write = INTERCEPT_CR0_MASK |
461 INTERCEPT_CR3_MASK |
462 INTERCEPT_CR4_MASK;
463
464 control->intercept_dr_read = INTERCEPT_DR0_MASK |
465 INTERCEPT_DR1_MASK |
466 INTERCEPT_DR2_MASK |
467 INTERCEPT_DR3_MASK;
468
469 control->intercept_dr_write = INTERCEPT_DR0_MASK |
470 INTERCEPT_DR1_MASK |
471 INTERCEPT_DR2_MASK |
472 INTERCEPT_DR3_MASK |
473 INTERCEPT_DR5_MASK |
474 INTERCEPT_DR7_MASK;
475
476 control->intercept_exceptions = 1 << PF_VECTOR;
477
478
479 control->intercept = (1ULL << INTERCEPT_INTR) |
480 (1ULL << INTERCEPT_NMI) |
0152527b 481 (1ULL << INTERCEPT_SMI) |
6aa8b732
AK
482 /*
483 * selective cr0 intercept bug?
484 * 0: 0f 22 d8 mov %eax,%cr3
485 * 3: 0f 20 c0 mov %cr0,%eax
486 * 6: 0d 00 00 00 80 or $0x80000000,%eax
487 * b: 0f 22 c0 mov %eax,%cr0
488 * set cr3 ->interception
489 * get cr0 ->interception
490 * set cr0 -> no interception
491 */
492 /* (1ULL << INTERCEPT_SELECTIVE_CR0) | */
493 (1ULL << INTERCEPT_CPUID) |
494 (1ULL << INTERCEPT_HLT) |
6aa8b732
AK
495 (1ULL << INTERCEPT_INVLPGA) |
496 (1ULL << INTERCEPT_IOIO_PROT) |
497 (1ULL << INTERCEPT_MSR_PROT) |
498 (1ULL << INTERCEPT_TASK_SWITCH) |
46fe4ddd 499 (1ULL << INTERCEPT_SHUTDOWN) |
6aa8b732
AK
500 (1ULL << INTERCEPT_VMRUN) |
501 (1ULL << INTERCEPT_VMMCALL) |
502 (1ULL << INTERCEPT_VMLOAD) |
503 (1ULL << INTERCEPT_VMSAVE) |
504 (1ULL << INTERCEPT_STGI) |
505 (1ULL << INTERCEPT_CLGI) |
916ce236
JR
506 (1ULL << INTERCEPT_SKINIT) |
507 (1ULL << INTERCEPT_MONITOR) |
508 (1ULL << INTERCEPT_MWAIT);
6aa8b732
AK
509
510 control->iopm_base_pa = iopm_base;
511 control->msrpm_base_pa = msrpm_base;
0cc5064d 512 control->tsc_offset = 0;
6aa8b732
AK
513 control->int_ctl = V_INTR_MASKING_MASK;
514
515 init_seg(&save->es);
516 init_seg(&save->ss);
517 init_seg(&save->ds);
518 init_seg(&save->fs);
519 init_seg(&save->gs);
520
521 save->cs.selector = 0xf000;
522 /* Executable/Readable Code Segment */
523 save->cs.attrib = SVM_SELECTOR_READ_MASK | SVM_SELECTOR_P_MASK |
524 SVM_SELECTOR_S_MASK | SVM_SELECTOR_CODE_MASK;
525 save->cs.limit = 0xffff;
d92899a0
AK
526 /*
527 * cs.base should really be 0xffff0000, but vmx can't handle that, so
528 * be consistent with it.
529 *
530 * Replace when we have real mode working for vmx.
531 */
532 save->cs.base = 0xf0000;
6aa8b732
AK
533
534 save->gdtr.limit = 0xffff;
535 save->idtr.limit = 0xffff;
536
537 init_sys_seg(&save->ldtr, SEG_TYPE_LDT);
538 init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16);
539
540 save->efer = MSR_EFER_SVME_MASK;
541
542 save->dr6 = 0xffff0ff0;
543 save->dr7 = 0x400;
544 save->rflags = 2;
545 save->rip = 0x0000fff0;
546
547 /*
548 * cr0 val on cpu init should be 0x60000010, we enable cpu
549 * cache by default. the orderly way is to enable cache in bios.
550 */
cd205625 551 save->cr0 = 0x00000010 | CR0_PG_MASK | CR0_WP_MASK;
6aa8b732
AK
552 save->cr4 = CR4_PAE_MASK;
553 /* rdx = ?? */
554}
555
556static int svm_create_vcpu(struct kvm_vcpu *vcpu)
557{
558 struct page *page;
559 int r;
560
561 r = -ENOMEM;
562 vcpu->svm = kzalloc(sizeof *vcpu->svm, GFP_KERNEL);
563 if (!vcpu->svm)
564 goto out1;
565 page = alloc_page(GFP_KERNEL);
566 if (!page)
567 goto out2;
568
569 vcpu->svm->vmcb = page_address(page);
570 memset(vcpu->svm->vmcb, 0, PAGE_SIZE);
571 vcpu->svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT;
6aa8b732
AK
572 vcpu->svm->asid_generation = 0;
573 memset(vcpu->svm->db_regs, 0, sizeof(vcpu->svm->db_regs));
574 init_vmcb(vcpu->svm->vmcb);
575
36241b8c 576 fx_init(vcpu);
6722c51c
AK
577 vcpu->apic_base = 0xfee00000 |
578 /*for vcpu 0*/ MSR_IA32_APICBASE_BSP |
579 MSR_IA32_APICBASE_ENABLE;
36241b8c 580
6aa8b732
AK
581 return 0;
582
583out2:
584 kfree(vcpu->svm);
585out1:
586 return r;
587}
588
589static void svm_free_vcpu(struct kvm_vcpu *vcpu)
590{
591 if (!vcpu->svm)
592 return;
593 if (vcpu->svm->vmcb)
594 __free_page(pfn_to_page(vcpu->svm->vmcb_pa >> PAGE_SHIFT));
595 kfree(vcpu->svm);
596}
597
bccf2150 598static void svm_vcpu_load(struct kvm_vcpu *vcpu)
6aa8b732 599{
0cc5064d
AK
600 int cpu;
601
602 cpu = get_cpu();
603 if (unlikely(cpu != vcpu->cpu)) {
604 u64 tsc_this, delta;
605
606 /*
607 * Make sure that the guest sees a monotonically
608 * increasing TSC.
609 */
610 rdtscll(tsc_this);
611 delta = vcpu->host_tsc - tsc_this;
612 vcpu->svm->vmcb->control.tsc_offset += delta;
613 vcpu->cpu = cpu;
614 }
6aa8b732
AK
615}
616
617static void svm_vcpu_put(struct kvm_vcpu *vcpu)
618{
0cc5064d 619 rdtscll(vcpu->host_tsc);
6aa8b732
AK
620 put_cpu();
621}
622
774c47f1
AK
623static void svm_vcpu_decache(struct kvm_vcpu *vcpu)
624{
625}
626
6aa8b732
AK
627static void svm_cache_regs(struct kvm_vcpu *vcpu)
628{
629 vcpu->regs[VCPU_REGS_RAX] = vcpu->svm->vmcb->save.rax;
630 vcpu->regs[VCPU_REGS_RSP] = vcpu->svm->vmcb->save.rsp;
631 vcpu->rip = vcpu->svm->vmcb->save.rip;
632}
633
634static void svm_decache_regs(struct kvm_vcpu *vcpu)
635{
636 vcpu->svm->vmcb->save.rax = vcpu->regs[VCPU_REGS_RAX];
637 vcpu->svm->vmcb->save.rsp = vcpu->regs[VCPU_REGS_RSP];
638 vcpu->svm->vmcb->save.rip = vcpu->rip;
639}
640
641static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
642{
643 return vcpu->svm->vmcb->save.rflags;
644}
645
646static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
647{
648 vcpu->svm->vmcb->save.rflags = rflags;
649}
650
651static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg)
652{
653 struct vmcb_save_area *save = &vcpu->svm->vmcb->save;
654
655 switch (seg) {
656 case VCPU_SREG_CS: return &save->cs;
657 case VCPU_SREG_DS: return &save->ds;
658 case VCPU_SREG_ES: return &save->es;
659 case VCPU_SREG_FS: return &save->fs;
660 case VCPU_SREG_GS: return &save->gs;
661 case VCPU_SREG_SS: return &save->ss;
662 case VCPU_SREG_TR: return &save->tr;
663 case VCPU_SREG_LDTR: return &save->ldtr;
664 }
665 BUG();
8b6d44c7 666 return NULL;
6aa8b732
AK
667}
668
669static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg)
670{
671 struct vmcb_seg *s = svm_seg(vcpu, seg);
672
673 return s->base;
674}
675
676static void svm_get_segment(struct kvm_vcpu *vcpu,
677 struct kvm_segment *var, int seg)
678{
679 struct vmcb_seg *s = svm_seg(vcpu, seg);
680
681 var->base = s->base;
682 var->limit = s->limit;
683 var->selector = s->selector;
684 var->type = s->attrib & SVM_SELECTOR_TYPE_MASK;
685 var->s = (s->attrib >> SVM_SELECTOR_S_SHIFT) & 1;
686 var->dpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3;
687 var->present = (s->attrib >> SVM_SELECTOR_P_SHIFT) & 1;
688 var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1;
689 var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1;
690 var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1;
691 var->g = (s->attrib >> SVM_SELECTOR_G_SHIFT) & 1;
692 var->unusable = !var->present;
693}
694
695static void svm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
696{
697 struct vmcb_seg *s = svm_seg(vcpu, VCPU_SREG_CS);
698
699 *db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1;
700 *l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1;
701}
702
703static void svm_get_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
704{
bce66ca4
LN
705 dt->limit = vcpu->svm->vmcb->save.idtr.limit;
706 dt->base = vcpu->svm->vmcb->save.idtr.base;
6aa8b732
AK
707}
708
709static void svm_set_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
710{
bce66ca4
LN
711 vcpu->svm->vmcb->save.idtr.limit = dt->limit;
712 vcpu->svm->vmcb->save.idtr.base = dt->base ;
6aa8b732
AK
713}
714
715static void svm_get_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
716{
717 dt->limit = vcpu->svm->vmcb->save.gdtr.limit;
718 dt->base = vcpu->svm->vmcb->save.gdtr.base;
719}
720
721static void svm_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
722{
723 vcpu->svm->vmcb->save.gdtr.limit = dt->limit;
724 vcpu->svm->vmcb->save.gdtr.base = dt->base ;
725}
726
399badf3
AK
727static void svm_decache_cr0_cr4_guest_bits(struct kvm_vcpu *vcpu)
728{
729}
730
6aa8b732
AK
731static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
732{
05b3e0c2 733#ifdef CONFIG_X86_64
6aa8b732
AK
734 if (vcpu->shadow_efer & KVM_EFER_LME) {
735 if (!is_paging(vcpu) && (cr0 & CR0_PG_MASK)) {
736 vcpu->shadow_efer |= KVM_EFER_LMA;
737 vcpu->svm->vmcb->save.efer |= KVM_EFER_LMA | KVM_EFER_LME;
738 }
739
740 if (is_paging(vcpu) && !(cr0 & CR0_PG_MASK) ) {
741 vcpu->shadow_efer &= ~KVM_EFER_LMA;
742 vcpu->svm->vmcb->save.efer &= ~(KVM_EFER_LMA | KVM_EFER_LME);
743 }
744 }
745#endif
6aa8b732 746 vcpu->cr0 = cr0;
6da63cf9
AK
747 cr0 |= CR0_PG_MASK | CR0_WP_MASK;
748 cr0 &= ~(CR0_CD_MASK | CR0_NW_MASK);
749 vcpu->svm->vmcb->save.cr0 = cr0;
6aa8b732
AK
750}
751
752static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
753{
754 vcpu->cr4 = cr4;
755 vcpu->svm->vmcb->save.cr4 = cr4 | CR4_PAE_MASK;
756}
757
758static void svm_set_segment(struct kvm_vcpu *vcpu,
759 struct kvm_segment *var, int seg)
760{
761 struct vmcb_seg *s = svm_seg(vcpu, seg);
762
763 s->base = var->base;
764 s->limit = var->limit;
765 s->selector = var->selector;
766 if (var->unusable)
767 s->attrib = 0;
768 else {
769 s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
770 s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
771 s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
772 s->attrib |= (var->present & 1) << SVM_SELECTOR_P_SHIFT;
773 s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
774 s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
775 s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
776 s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
777 }
778 if (seg == VCPU_SREG_CS)
779 vcpu->svm->vmcb->save.cpl
780 = (vcpu->svm->vmcb->save.cs.attrib
781 >> SVM_SELECTOR_DPL_SHIFT) & 3;
782
783}
784
785/* FIXME:
786
787 vcpu->svm->vmcb->control.int_ctl &= ~V_TPR_MASK;
788 vcpu->svm->vmcb->control.int_ctl |= (sregs->cr8 & V_TPR_MASK);
789
790*/
791
792static int svm_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg)
793{
794 return -EOPNOTSUPP;
795}
796
797static void load_host_msrs(struct kvm_vcpu *vcpu)
798{
799 int i;
800
801 for ( i = 0; i < NR_HOST_SAVE_MSRS; i++)
802 wrmsrl(host_save_msrs[i], vcpu->svm->host_msrs[i]);
803}
804
805static void save_host_msrs(struct kvm_vcpu *vcpu)
806{
807 int i;
808
809 for ( i = 0; i < NR_HOST_SAVE_MSRS; i++)
810 rdmsrl(host_save_msrs[i], vcpu->svm->host_msrs[i]);
811}
812
813static void new_asid(struct kvm_vcpu *vcpu, struct svm_cpu_data *svm_data)
814{
815 if (svm_data->next_asid > svm_data->max_asid) {
816 ++svm_data->asid_generation;
817 svm_data->next_asid = 1;
818 vcpu->svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
819 }
820
821 vcpu->cpu = svm_data->cpu;
822 vcpu->svm->asid_generation = svm_data->asid_generation;
823 vcpu->svm->vmcb->control.asid = svm_data->next_asid++;
824}
825
826static void svm_invlpg(struct kvm_vcpu *vcpu, gva_t address)
827{
828 invlpga(address, vcpu->svm->vmcb->control.asid); // is needed?
829}
830
831static unsigned long svm_get_dr(struct kvm_vcpu *vcpu, int dr)
832{
833 return vcpu->svm->db_regs[dr];
834}
835
836static void svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value,
837 int *exception)
838{
839 *exception = 0;
840
841 if (vcpu->svm->vmcb->save.dr7 & DR7_GD_MASK) {
842 vcpu->svm->vmcb->save.dr7 &= ~DR7_GD_MASK;
843 vcpu->svm->vmcb->save.dr6 |= DR6_BD_MASK;
844 *exception = DB_VECTOR;
845 return;
846 }
847
848 switch (dr) {
849 case 0 ... 3:
850 vcpu->svm->db_regs[dr] = value;
851 return;
852 case 4 ... 5:
853 if (vcpu->cr4 & CR4_DE_MASK) {
854 *exception = UD_VECTOR;
855 return;
856 }
857 case 7: {
858 if (value & ~((1ULL << 32) - 1)) {
859 *exception = GP_VECTOR;
860 return;
861 }
862 vcpu->svm->vmcb->save.dr7 = value;
863 return;
864 }
865 default:
866 printk(KERN_DEBUG "%s: unexpected dr %u\n",
867 __FUNCTION__, dr);
868 *exception = UD_VECTOR;
869 return;
870 }
871}
872
873static int pf_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
874{
875 u32 exit_int_info = vcpu->svm->vmcb->control.exit_int_info;
876 u64 fault_address;
877 u32 error_code;
878 enum emulation_result er;
e2dec939 879 int r;
6aa8b732
AK
880
881 if (is_external_interrupt(exit_int_info))
882 push_irq(vcpu, exit_int_info & SVM_EVTINJ_VEC_MASK);
883
884 spin_lock(&vcpu->kvm->lock);
885
886 fault_address = vcpu->svm->vmcb->control.exit_info_2;
887 error_code = vcpu->svm->vmcb->control.exit_info_1;
e2dec939
AK
888 r = kvm_mmu_page_fault(vcpu, fault_address, error_code);
889 if (r < 0) {
890 spin_unlock(&vcpu->kvm->lock);
891 return r;
892 }
893 if (!r) {
6aa8b732
AK
894 spin_unlock(&vcpu->kvm->lock);
895 return 1;
896 }
897 er = emulate_instruction(vcpu, kvm_run, fault_address, error_code);
898 spin_unlock(&vcpu->kvm->lock);
899
900 switch (er) {
901 case EMULATE_DONE:
902 return 1;
903 case EMULATE_DO_MMIO:
904 ++kvm_stat.mmio_exits;
905 kvm_run->exit_reason = KVM_EXIT_MMIO;
906 return 0;
907 case EMULATE_FAIL:
908 vcpu_printf(vcpu, "%s: emulate fail\n", __FUNCTION__);
909 break;
910 default:
911 BUG();
912 }
913
914 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
915 return 0;
916}
917
46fe4ddd
JR
918static int shutdown_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
919{
920 /*
921 * VMCB is undefined after a SHUTDOWN intercept
922 * so reinitialize it.
923 */
924 memset(vcpu->svm->vmcb, 0, PAGE_SIZE);
925 init_vmcb(vcpu->svm->vmcb);
926
927 kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
928 return 0;
929}
930
6aa8b732
AK
931static int io_get_override(struct kvm_vcpu *vcpu,
932 struct vmcb_seg **seg,
933 int *addr_override)
934{
935 u8 inst[MAX_INST_SIZE];
936 unsigned ins_length;
937 gva_t rip;
938 int i;
939
940 rip = vcpu->svm->vmcb->save.rip;
941 ins_length = vcpu->svm->next_rip - rip;
942 rip += vcpu->svm->vmcb->save.cs.base;
943
944 if (ins_length > MAX_INST_SIZE)
945 printk(KERN_DEBUG
946 "%s: inst length err, cs base 0x%llx rip 0x%llx "
947 "next rip 0x%llx ins_length %u\n",
948 __FUNCTION__,
949 vcpu->svm->vmcb->save.cs.base,
950 vcpu->svm->vmcb->save.rip,
951 vcpu->svm->vmcb->control.exit_info_2,
952 ins_length);
953
954 if (kvm_read_guest(vcpu, rip, ins_length, inst) != ins_length)
955 /* #PF */
956 return 0;
957
958 *addr_override = 0;
8b6d44c7 959 *seg = NULL;
6aa8b732
AK
960 for (i = 0; i < ins_length; i++)
961 switch (inst[i]) {
962 case 0xf0:
963 case 0xf2:
964 case 0xf3:
965 case 0x66:
966 continue;
967 case 0x67:
968 *addr_override = 1;
969 continue;
970 case 0x2e:
971 *seg = &vcpu->svm->vmcb->save.cs;
972 continue;
973 case 0x36:
974 *seg = &vcpu->svm->vmcb->save.ss;
975 continue;
976 case 0x3e:
977 *seg = &vcpu->svm->vmcb->save.ds;
978 continue;
979 case 0x26:
980 *seg = &vcpu->svm->vmcb->save.es;
981 continue;
982 case 0x64:
983 *seg = &vcpu->svm->vmcb->save.fs;
984 continue;
985 case 0x65:
986 *seg = &vcpu->svm->vmcb->save.gs;
987 continue;
988 default:
989 return 1;
990 }
991 printk(KERN_DEBUG "%s: unexpected\n", __FUNCTION__);
992 return 0;
993}
994
039576c0 995static unsigned long io_adress(struct kvm_vcpu *vcpu, int ins, gva_t *address)
6aa8b732
AK
996{
997 unsigned long addr_mask;
998 unsigned long *reg;
999 struct vmcb_seg *seg;
1000 int addr_override;
1001 struct vmcb_save_area *save_area = &vcpu->svm->vmcb->save;
1002 u16 cs_attrib = save_area->cs.attrib;
1003 unsigned addr_size = get_addr_size(vcpu);
1004
1005 if (!io_get_override(vcpu, &seg, &addr_override))
1006 return 0;
1007
1008 if (addr_override)
1009 addr_size = (addr_size == 2) ? 4: (addr_size >> 1);
1010
1011 if (ins) {
1012 reg = &vcpu->regs[VCPU_REGS_RDI];
1013 seg = &vcpu->svm->vmcb->save.es;
1014 } else {
1015 reg = &vcpu->regs[VCPU_REGS_RSI];
1016 seg = (seg) ? seg : &vcpu->svm->vmcb->save.ds;
1017 }
1018
1019 addr_mask = ~0ULL >> (64 - (addr_size * 8));
1020
1021 if ((cs_attrib & SVM_SELECTOR_L_MASK) &&
1022 !(vcpu->svm->vmcb->save.rflags & X86_EFLAGS_VM)) {
1023 *address = (*reg & addr_mask);
1024 return addr_mask;
1025 }
1026
1027 if (!(seg->attrib & SVM_SELECTOR_P_SHIFT)) {
1028 svm_inject_gp(vcpu, 0);
1029 return 0;
1030 }
1031
1032 *address = (*reg & addr_mask) + seg->base;
1033 return addr_mask;
1034}
1035
1036static int io_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1037{
1038 u32 io_info = vcpu->svm->vmcb->control.exit_info_1; //address size bug?
039576c0
AK
1039 int size, down, in, string, rep;
1040 unsigned port;
1041 unsigned long count;
1042 gva_t address = 0;
6aa8b732
AK
1043
1044 ++kvm_stat.io_exits;
1045
1046 vcpu->svm->next_rip = vcpu->svm->vmcb->control.exit_info_2;
1047
039576c0
AK
1048 in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
1049 port = io_info >> 16;
1050 size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
1051 string = (io_info & SVM_IOIO_STR_MASK) != 0;
1052 rep = (io_info & SVM_IOIO_REP_MASK) != 0;
1053 count = 1;
1054 down = (vcpu->svm->vmcb->save.rflags & X86_EFLAGS_DF) != 0;
6aa8b732 1055
039576c0 1056 if (string) {
6aa8b732
AK
1057 unsigned addr_mask;
1058
039576c0 1059 addr_mask = io_adress(vcpu, in, &address);
6aa8b732 1060 if (!addr_mask) {
d27d4aca
AK
1061 printk(KERN_DEBUG "%s: get io address failed\n",
1062 __FUNCTION__);
6aa8b732
AK
1063 return 1;
1064 }
1065
039576c0
AK
1066 if (rep)
1067 count = vcpu->regs[VCPU_REGS_RCX] & addr_mask;
1068 }
1069 return kvm_setup_pio(vcpu, kvm_run, in, size, count, string, down,
1070 address, rep, port);
6aa8b732
AK
1071}
1072
6aa8b732
AK
1073static int nop_on_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1074{
1075 return 1;
1076}
1077
1078static int halt_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1079{
1080 vcpu->svm->next_rip = vcpu->svm->vmcb->save.rip + 1;
1081 skip_emulated_instruction(vcpu);
c1150d8c 1082 if (vcpu->irq_summary)
6aa8b732
AK
1083 return 1;
1084
1085 kvm_run->exit_reason = KVM_EXIT_HLT;
c1150d8c 1086 ++kvm_stat.halt_exits;
6aa8b732
AK
1087 return 0;
1088}
1089
02e235bc
AK
1090static int vmmcall_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1091{
510043da
DL
1092 vcpu->svm->next_rip = vcpu->svm->vmcb->save.rip + 3;
1093 skip_emulated_instruction(vcpu);
270fd9b9 1094 return kvm_hypercall(vcpu, kvm_run);
02e235bc
AK
1095}
1096
6aa8b732
AK
1097static int invalid_op_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1098{
1099 inject_ud(vcpu);
1100 return 1;
1101}
1102
1103static int task_switch_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1104{
1105 printk(KERN_DEBUG "%s: task swiche is unsupported\n", __FUNCTION__);
1106 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
1107 return 0;
1108}
1109
1110static int cpuid_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1111{
1112 vcpu->svm->next_rip = vcpu->svm->vmcb->save.rip + 2;
06465c5a
AK
1113 kvm_emulate_cpuid(vcpu);
1114 return 1;
6aa8b732
AK
1115}
1116
1117static int emulate_on_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1118{
8b6d44c7 1119 if (emulate_instruction(vcpu, NULL, 0, 0) != EMULATE_DONE)
6aa8b732
AK
1120 printk(KERN_ERR "%s: failed\n", __FUNCTION__);
1121 return 1;
1122}
1123
1124static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
1125{
1126 switch (ecx) {
6aa8b732
AK
1127 case MSR_IA32_TIME_STAMP_COUNTER: {
1128 u64 tsc;
1129
1130 rdtscll(tsc);
1131 *data = vcpu->svm->vmcb->control.tsc_offset + tsc;
1132 break;
1133 }
0e859cac 1134 case MSR_K6_STAR:
6aa8b732
AK
1135 *data = vcpu->svm->vmcb->save.star;
1136 break;
0e859cac 1137#ifdef CONFIG_X86_64
6aa8b732
AK
1138 case MSR_LSTAR:
1139 *data = vcpu->svm->vmcb->save.lstar;
1140 break;
1141 case MSR_CSTAR:
1142 *data = vcpu->svm->vmcb->save.cstar;
1143 break;
1144 case MSR_KERNEL_GS_BASE:
1145 *data = vcpu->svm->vmcb->save.kernel_gs_base;
1146 break;
1147 case MSR_SYSCALL_MASK:
1148 *data = vcpu->svm->vmcb->save.sfmask;
1149 break;
1150#endif
1151 case MSR_IA32_SYSENTER_CS:
1152 *data = vcpu->svm->vmcb->save.sysenter_cs;
1153 break;
1154 case MSR_IA32_SYSENTER_EIP:
1155 *data = vcpu->svm->vmcb->save.sysenter_eip;
1156 break;
1157 case MSR_IA32_SYSENTER_ESP:
1158 *data = vcpu->svm->vmcb->save.sysenter_esp;
1159 break;
1160 default:
3bab1f5d 1161 return kvm_get_msr_common(vcpu, ecx, data);
6aa8b732
AK
1162 }
1163 return 0;
1164}
1165
1166static int rdmsr_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1167{
1168 u32 ecx = vcpu->regs[VCPU_REGS_RCX];
1169 u64 data;
1170
1171 if (svm_get_msr(vcpu, ecx, &data))
1172 svm_inject_gp(vcpu, 0);
1173 else {
1174 vcpu->svm->vmcb->save.rax = data & 0xffffffff;
1175 vcpu->regs[VCPU_REGS_RDX] = data >> 32;
1176 vcpu->svm->next_rip = vcpu->svm->vmcb->save.rip + 2;
1177 skip_emulated_instruction(vcpu);
1178 }
1179 return 1;
1180}
1181
1182static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
1183{
1184 switch (ecx) {
6aa8b732
AK
1185 case MSR_IA32_TIME_STAMP_COUNTER: {
1186 u64 tsc;
1187
1188 rdtscll(tsc);
1189 vcpu->svm->vmcb->control.tsc_offset = data - tsc;
1190 break;
1191 }
0e859cac 1192 case MSR_K6_STAR:
6aa8b732
AK
1193 vcpu->svm->vmcb->save.star = data;
1194 break;
49b14f24 1195#ifdef CONFIG_X86_64
6aa8b732
AK
1196 case MSR_LSTAR:
1197 vcpu->svm->vmcb->save.lstar = data;
1198 break;
1199 case MSR_CSTAR:
1200 vcpu->svm->vmcb->save.cstar = data;
1201 break;
1202 case MSR_KERNEL_GS_BASE:
1203 vcpu->svm->vmcb->save.kernel_gs_base = data;
1204 break;
1205 case MSR_SYSCALL_MASK:
1206 vcpu->svm->vmcb->save.sfmask = data;
1207 break;
1208#endif
1209 case MSR_IA32_SYSENTER_CS:
1210 vcpu->svm->vmcb->save.sysenter_cs = data;
1211 break;
1212 case MSR_IA32_SYSENTER_EIP:
1213 vcpu->svm->vmcb->save.sysenter_eip = data;
1214 break;
1215 case MSR_IA32_SYSENTER_ESP:
1216 vcpu->svm->vmcb->save.sysenter_esp = data;
1217 break;
1218 default:
3bab1f5d 1219 return kvm_set_msr_common(vcpu, ecx, data);
6aa8b732
AK
1220 }
1221 return 0;
1222}
1223
1224static int wrmsr_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1225{
1226 u32 ecx = vcpu->regs[VCPU_REGS_RCX];
1227 u64 data = (vcpu->svm->vmcb->save.rax & -1u)
1228 | ((u64)(vcpu->regs[VCPU_REGS_RDX] & -1u) << 32);
1229 vcpu->svm->next_rip = vcpu->svm->vmcb->save.rip + 2;
1230 if (svm_set_msr(vcpu, ecx, data))
1231 svm_inject_gp(vcpu, 0);
1232 else
1233 skip_emulated_instruction(vcpu);
1234 return 1;
1235}
1236
1237static int msr_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1238{
1239 if (vcpu->svm->vmcb->control.exit_info_1)
1240 return wrmsr_interception(vcpu, kvm_run);
1241 else
1242 return rdmsr_interception(vcpu, kvm_run);
1243}
1244
c1150d8c
DL
1245static int interrupt_window_interception(struct kvm_vcpu *vcpu,
1246 struct kvm_run *kvm_run)
1247{
1248 /*
1249 * If the user space waits to inject interrupts, exit as soon as
1250 * possible
1251 */
1252 if (kvm_run->request_interrupt_window &&
022a9308 1253 !vcpu->irq_summary) {
c1150d8c
DL
1254 ++kvm_stat.irq_window_exits;
1255 kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
1256 return 0;
1257 }
1258
1259 return 1;
1260}
1261
6aa8b732
AK
1262static int (*svm_exit_handlers[])(struct kvm_vcpu *vcpu,
1263 struct kvm_run *kvm_run) = {
1264 [SVM_EXIT_READ_CR0] = emulate_on_interception,
1265 [SVM_EXIT_READ_CR3] = emulate_on_interception,
1266 [SVM_EXIT_READ_CR4] = emulate_on_interception,
1267 /* for now: */
1268 [SVM_EXIT_WRITE_CR0] = emulate_on_interception,
1269 [SVM_EXIT_WRITE_CR3] = emulate_on_interception,
1270 [SVM_EXIT_WRITE_CR4] = emulate_on_interception,
1271 [SVM_EXIT_READ_DR0] = emulate_on_interception,
1272 [SVM_EXIT_READ_DR1] = emulate_on_interception,
1273 [SVM_EXIT_READ_DR2] = emulate_on_interception,
1274 [SVM_EXIT_READ_DR3] = emulate_on_interception,
1275 [SVM_EXIT_WRITE_DR0] = emulate_on_interception,
1276 [SVM_EXIT_WRITE_DR1] = emulate_on_interception,
1277 [SVM_EXIT_WRITE_DR2] = emulate_on_interception,
1278 [SVM_EXIT_WRITE_DR3] = emulate_on_interception,
1279 [SVM_EXIT_WRITE_DR5] = emulate_on_interception,
1280 [SVM_EXIT_WRITE_DR7] = emulate_on_interception,
1281 [SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception,
1282 [SVM_EXIT_INTR] = nop_on_interception,
1283 [SVM_EXIT_NMI] = nop_on_interception,
1284 [SVM_EXIT_SMI] = nop_on_interception,
1285 [SVM_EXIT_INIT] = nop_on_interception,
c1150d8c 1286 [SVM_EXIT_VINTR] = interrupt_window_interception,
6aa8b732
AK
1287 /* [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception, */
1288 [SVM_EXIT_CPUID] = cpuid_interception,
1289 [SVM_EXIT_HLT] = halt_interception,
1290 [SVM_EXIT_INVLPG] = emulate_on_interception,
1291 [SVM_EXIT_INVLPGA] = invalid_op_interception,
1292 [SVM_EXIT_IOIO] = io_interception,
1293 [SVM_EXIT_MSR] = msr_interception,
1294 [SVM_EXIT_TASK_SWITCH] = task_switch_interception,
46fe4ddd 1295 [SVM_EXIT_SHUTDOWN] = shutdown_interception,
6aa8b732 1296 [SVM_EXIT_VMRUN] = invalid_op_interception,
02e235bc 1297 [SVM_EXIT_VMMCALL] = vmmcall_interception,
6aa8b732
AK
1298 [SVM_EXIT_VMLOAD] = invalid_op_interception,
1299 [SVM_EXIT_VMSAVE] = invalid_op_interception,
1300 [SVM_EXIT_STGI] = invalid_op_interception,
1301 [SVM_EXIT_CLGI] = invalid_op_interception,
1302 [SVM_EXIT_SKINIT] = invalid_op_interception,
916ce236
JR
1303 [SVM_EXIT_MONITOR] = invalid_op_interception,
1304 [SVM_EXIT_MWAIT] = invalid_op_interception,
6aa8b732
AK
1305};
1306
1307
1308static int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1309{
1310 u32 exit_code = vcpu->svm->vmcb->control.exit_code;
1311
6aa8b732
AK
1312 if (is_external_interrupt(vcpu->svm->vmcb->control.exit_int_info) &&
1313 exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR)
1314 printk(KERN_ERR "%s: unexpected exit_ini_info 0x%x "
1315 "exit_code 0x%x\n",
1316 __FUNCTION__, vcpu->svm->vmcb->control.exit_int_info,
1317 exit_code);
1318
9d8f549d 1319 if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
6aa8b732
AK
1320 || svm_exit_handlers[exit_code] == 0) {
1321 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
1322 printk(KERN_ERR "%s: 0x%x @ 0x%llx cr0 0x%lx rflags 0x%llx\n",
1323 __FUNCTION__,
1324 exit_code,
1325 vcpu->svm->vmcb->save.rip,
1326 vcpu->cr0,
1327 vcpu->svm->vmcb->save.rflags);
1328 return 0;
1329 }
1330
1331 return svm_exit_handlers[exit_code](vcpu, kvm_run);
1332}
1333
1334static void reload_tss(struct kvm_vcpu *vcpu)
1335{
1336 int cpu = raw_smp_processor_id();
1337
1338 struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
1339 svm_data->tss_desc->type = 9; //available 32/64-bit TSS
1340 load_TR_desc();
1341}
1342
1343static void pre_svm_run(struct kvm_vcpu *vcpu)
1344{
1345 int cpu = raw_smp_processor_id();
1346
1347 struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
1348
1349 vcpu->svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
1350 if (vcpu->cpu != cpu ||
1351 vcpu->svm->asid_generation != svm_data->asid_generation)
1352 new_asid(vcpu, svm_data);
1353}
1354
1355
c1150d8c 1356static inline void kvm_do_inject_irq(struct kvm_vcpu *vcpu)
6aa8b732
AK
1357{
1358 struct vmcb_control_area *control;
1359
6aa8b732 1360 control = &vcpu->svm->vmcb->control;
6aa8b732
AK
1361 control->int_vector = pop_irq(vcpu);
1362 control->int_ctl &= ~V_INTR_PRIO_MASK;
1363 control->int_ctl |= V_IRQ_MASK |
1364 ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT);
1365}
1366
1367static void kvm_reput_irq(struct kvm_vcpu *vcpu)
1368{
1369 struct vmcb_control_area *control = &vcpu->svm->vmcb->control;
1370
1371 if (control->int_ctl & V_IRQ_MASK) {
1372 control->int_ctl &= ~V_IRQ_MASK;
1373 push_irq(vcpu, control->int_vector);
1374 }
c1150d8c
DL
1375
1376 vcpu->interrupt_window_open =
1377 !(control->int_state & SVM_INTERRUPT_SHADOW_MASK);
1378}
1379
1380static void do_interrupt_requests(struct kvm_vcpu *vcpu,
1381 struct kvm_run *kvm_run)
1382{
1383 struct vmcb_control_area *control = &vcpu->svm->vmcb->control;
1384
1385 vcpu->interrupt_window_open =
1386 (!(control->int_state & SVM_INTERRUPT_SHADOW_MASK) &&
1387 (vcpu->svm->vmcb->save.rflags & X86_EFLAGS_IF));
1388
1389 if (vcpu->interrupt_window_open && vcpu->irq_summary)
1390 /*
1391 * If interrupts enabled, and not blocked by sti or mov ss. Good.
1392 */
1393 kvm_do_inject_irq(vcpu);
1394
1395 /*
1396 * Interrupts blocked. Wait for unblock.
1397 */
1398 if (!vcpu->interrupt_window_open &&
1399 (vcpu->irq_summary || kvm_run->request_interrupt_window)) {
1400 control->intercept |= 1ULL << INTERCEPT_VINTR;
1401 } else
1402 control->intercept &= ~(1ULL << INTERCEPT_VINTR);
1403}
1404
1405static void post_kvm_run_save(struct kvm_vcpu *vcpu,
1406 struct kvm_run *kvm_run)
1407{
1408 kvm_run->ready_for_interrupt_injection = (vcpu->interrupt_window_open &&
1409 vcpu->irq_summary == 0);
1410 kvm_run->if_flag = (vcpu->svm->vmcb->save.rflags & X86_EFLAGS_IF) != 0;
1411 kvm_run->cr8 = vcpu->cr8;
1412 kvm_run->apic_base = vcpu->apic_base;
1413}
1414
1415/*
1416 * Check if userspace requested an interrupt window, and that the
1417 * interrupt window is open.
1418 *
1419 * No need to exit to userspace if we already have an interrupt queued.
1420 */
1421static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu,
1422 struct kvm_run *kvm_run)
1423{
1424 return (!vcpu->irq_summary &&
1425 kvm_run->request_interrupt_window &&
1426 vcpu->interrupt_window_open &&
1427 (vcpu->svm->vmcb->save.rflags & X86_EFLAGS_IF));
6aa8b732
AK
1428}
1429
1430static void save_db_regs(unsigned long *db_regs)
1431{
5aff458e
AK
1432 asm volatile ("mov %%dr0, %0" : "=r"(db_regs[0]));
1433 asm volatile ("mov %%dr1, %0" : "=r"(db_regs[1]));
1434 asm volatile ("mov %%dr2, %0" : "=r"(db_regs[2]));
1435 asm volatile ("mov %%dr3, %0" : "=r"(db_regs[3]));
6aa8b732
AK
1436}
1437
1438static void load_db_regs(unsigned long *db_regs)
1439{
5aff458e
AK
1440 asm volatile ("mov %0, %%dr0" : : "r"(db_regs[0]));
1441 asm volatile ("mov %0, %%dr1" : : "r"(db_regs[1]));
1442 asm volatile ("mov %0, %%dr2" : : "r"(db_regs[2]));
1443 asm volatile ("mov %0, %%dr3" : : "r"(db_regs[3]));
6aa8b732
AK
1444}
1445
1446static int svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1447{
1448 u16 fs_selector;
1449 u16 gs_selector;
1450 u16 ldt_selector;
e2dec939 1451 int r;
6aa8b732
AK
1452
1453again:
cccf748b
AK
1454 if (!vcpu->mmio_read_completed)
1455 do_interrupt_requests(vcpu, kvm_run);
6aa8b732
AK
1456
1457 clgi();
1458
1459 pre_svm_run(vcpu);
1460
1461 save_host_msrs(vcpu);
1462 fs_selector = read_fs();
1463 gs_selector = read_gs();
1464 ldt_selector = read_ldt();
1465 vcpu->svm->host_cr2 = kvm_read_cr2();
1466 vcpu->svm->host_dr6 = read_dr6();
1467 vcpu->svm->host_dr7 = read_dr7();
1468 vcpu->svm->vmcb->save.cr2 = vcpu->cr2;
1469
1470 if (vcpu->svm->vmcb->save.dr7 & 0xff) {
1471 write_dr7(0);
1472 save_db_regs(vcpu->svm->host_db_regs);
1473 load_db_regs(vcpu->svm->db_regs);
1474 }
36241b8c
AK
1475
1476 fx_save(vcpu->host_fx_image);
1477 fx_restore(vcpu->guest_fx_image);
1478
6aa8b732 1479 asm volatile (
05b3e0c2 1480#ifdef CONFIG_X86_64
6aa8b732
AK
1481 "push %%rbx; push %%rcx; push %%rdx;"
1482 "push %%rsi; push %%rdi; push %%rbp;"
1483 "push %%r8; push %%r9; push %%r10; push %%r11;"
1484 "push %%r12; push %%r13; push %%r14; push %%r15;"
1485#else
1486 "push %%ebx; push %%ecx; push %%edx;"
1487 "push %%esi; push %%edi; push %%ebp;"
1488#endif
1489
05b3e0c2 1490#ifdef CONFIG_X86_64
6aa8b732
AK
1491 "mov %c[rbx](%[vcpu]), %%rbx \n\t"
1492 "mov %c[rcx](%[vcpu]), %%rcx \n\t"
1493 "mov %c[rdx](%[vcpu]), %%rdx \n\t"
1494 "mov %c[rsi](%[vcpu]), %%rsi \n\t"
1495 "mov %c[rdi](%[vcpu]), %%rdi \n\t"
1496 "mov %c[rbp](%[vcpu]), %%rbp \n\t"
1497 "mov %c[r8](%[vcpu]), %%r8 \n\t"
1498 "mov %c[r9](%[vcpu]), %%r9 \n\t"
1499 "mov %c[r10](%[vcpu]), %%r10 \n\t"
1500 "mov %c[r11](%[vcpu]), %%r11 \n\t"
1501 "mov %c[r12](%[vcpu]), %%r12 \n\t"
1502 "mov %c[r13](%[vcpu]), %%r13 \n\t"
1503 "mov %c[r14](%[vcpu]), %%r14 \n\t"
1504 "mov %c[r15](%[vcpu]), %%r15 \n\t"
1505#else
1506 "mov %c[rbx](%[vcpu]), %%ebx \n\t"
1507 "mov %c[rcx](%[vcpu]), %%ecx \n\t"
1508 "mov %c[rdx](%[vcpu]), %%edx \n\t"
1509 "mov %c[rsi](%[vcpu]), %%esi \n\t"
1510 "mov %c[rdi](%[vcpu]), %%edi \n\t"
1511 "mov %c[rbp](%[vcpu]), %%ebp \n\t"
1512#endif
1513
05b3e0c2 1514#ifdef CONFIG_X86_64
6aa8b732
AK
1515 /* Enter guest mode */
1516 "push %%rax \n\t"
1517 "mov %c[svm](%[vcpu]), %%rax \n\t"
1518 "mov %c[vmcb](%%rax), %%rax \n\t"
1519 SVM_VMLOAD "\n\t"
1520 SVM_VMRUN "\n\t"
1521 SVM_VMSAVE "\n\t"
1522 "pop %%rax \n\t"
1523#else
1524 /* Enter guest mode */
1525 "push %%eax \n\t"
1526 "mov %c[svm](%[vcpu]), %%eax \n\t"
1527 "mov %c[vmcb](%%eax), %%eax \n\t"
1528 SVM_VMLOAD "\n\t"
1529 SVM_VMRUN "\n\t"
1530 SVM_VMSAVE "\n\t"
1531 "pop %%eax \n\t"
1532#endif
1533
1534 /* Save guest registers, load host registers */
05b3e0c2 1535#ifdef CONFIG_X86_64
6aa8b732
AK
1536 "mov %%rbx, %c[rbx](%[vcpu]) \n\t"
1537 "mov %%rcx, %c[rcx](%[vcpu]) \n\t"
1538 "mov %%rdx, %c[rdx](%[vcpu]) \n\t"
1539 "mov %%rsi, %c[rsi](%[vcpu]) \n\t"
1540 "mov %%rdi, %c[rdi](%[vcpu]) \n\t"
1541 "mov %%rbp, %c[rbp](%[vcpu]) \n\t"
1542 "mov %%r8, %c[r8](%[vcpu]) \n\t"
1543 "mov %%r9, %c[r9](%[vcpu]) \n\t"
1544 "mov %%r10, %c[r10](%[vcpu]) \n\t"
1545 "mov %%r11, %c[r11](%[vcpu]) \n\t"
1546 "mov %%r12, %c[r12](%[vcpu]) \n\t"
1547 "mov %%r13, %c[r13](%[vcpu]) \n\t"
1548 "mov %%r14, %c[r14](%[vcpu]) \n\t"
1549 "mov %%r15, %c[r15](%[vcpu]) \n\t"
1550
1551 "pop %%r15; pop %%r14; pop %%r13; pop %%r12;"
1552 "pop %%r11; pop %%r10; pop %%r9; pop %%r8;"
1553 "pop %%rbp; pop %%rdi; pop %%rsi;"
1554 "pop %%rdx; pop %%rcx; pop %%rbx; \n\t"
1555#else
1556 "mov %%ebx, %c[rbx](%[vcpu]) \n\t"
1557 "mov %%ecx, %c[rcx](%[vcpu]) \n\t"
1558 "mov %%edx, %c[rdx](%[vcpu]) \n\t"
1559 "mov %%esi, %c[rsi](%[vcpu]) \n\t"
1560 "mov %%edi, %c[rdi](%[vcpu]) \n\t"
1561 "mov %%ebp, %c[rbp](%[vcpu]) \n\t"
1562
1563 "pop %%ebp; pop %%edi; pop %%esi;"
1564 "pop %%edx; pop %%ecx; pop %%ebx; \n\t"
1565#endif
1566 :
1567 : [vcpu]"a"(vcpu),
1568 [svm]"i"(offsetof(struct kvm_vcpu, svm)),
1569 [vmcb]"i"(offsetof(struct vcpu_svm, vmcb_pa)),
1570 [rbx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RBX])),
1571 [rcx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RCX])),
1572 [rdx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RDX])),
1573 [rsi]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RSI])),
1574 [rdi]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RDI])),
1575 [rbp]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RBP]))
05b3e0c2 1576#ifdef CONFIG_X86_64
6aa8b732
AK
1577 ,[r8 ]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R8 ])),
1578 [r9 ]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R9 ])),
1579 [r10]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R10])),
1580 [r11]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R11])),
1581 [r12]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R12])),
1582 [r13]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R13])),
1583 [r14]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R14])),
1584 [r15]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R15]))
1585#endif
1586 : "cc", "memory" );
1587
36241b8c
AK
1588 fx_save(vcpu->guest_fx_image);
1589 fx_restore(vcpu->host_fx_image);
1590
6aa8b732
AK
1591 if ((vcpu->svm->vmcb->save.dr7 & 0xff))
1592 load_db_regs(vcpu->svm->host_db_regs);
1593
1594 vcpu->cr2 = vcpu->svm->vmcb->save.cr2;
1595
1596 write_dr6(vcpu->svm->host_dr6);
1597 write_dr7(vcpu->svm->host_dr7);
1598 kvm_write_cr2(vcpu->svm->host_cr2);
1599
1600 load_fs(fs_selector);
1601 load_gs(gs_selector);
1602 load_ldt(ldt_selector);
1603 load_host_msrs(vcpu);
1604
1605 reload_tss(vcpu);
1606
07031e14
IM
1607 /*
1608 * Profile KVM exit RIPs:
1609 */
1610 if (unlikely(prof_on == KVM_PROFILING))
1611 profile_hit(KVM_PROFILING,
1612 (void *)(unsigned long)vcpu->svm->vmcb->save.rip);
1613
6aa8b732
AK
1614 stgi();
1615
1616 kvm_reput_irq(vcpu);
1617
1618 vcpu->svm->next_rip = 0;
1619
1620 if (vcpu->svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
8eb7d334
AK
1621 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
1622 kvm_run->fail_entry.hardware_entry_failure_reason
1623 = vcpu->svm->vmcb->control.exit_code;
c1150d8c 1624 post_kvm_run_save(vcpu, kvm_run);
6aa8b732
AK
1625 return 0;
1626 }
1627
e2dec939
AK
1628 r = handle_exit(vcpu, kvm_run);
1629 if (r > 0) {
6aa8b732
AK
1630 if (signal_pending(current)) {
1631 ++kvm_stat.signal_exits;
c1150d8c 1632 post_kvm_run_save(vcpu, kvm_run);
1b19f3e6 1633 kvm_run->exit_reason = KVM_EXIT_INTR;
c1150d8c
DL
1634 return -EINTR;
1635 }
1636
1637 if (dm_request_for_irq_injection(vcpu, kvm_run)) {
1638 ++kvm_stat.request_irq_exits;
1639 post_kvm_run_save(vcpu, kvm_run);
1b19f3e6 1640 kvm_run->exit_reason = KVM_EXIT_INTR;
6aa8b732
AK
1641 return -EINTR;
1642 }
1643 kvm_resched(vcpu);
1644 goto again;
1645 }
c1150d8c 1646 post_kvm_run_save(vcpu, kvm_run);
e2dec939 1647 return r;
6aa8b732
AK
1648}
1649
1650static void svm_flush_tlb(struct kvm_vcpu *vcpu)
1651{
1652 force_new_asid(vcpu);
1653}
1654
1655static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
1656{
1657 vcpu->svm->vmcb->save.cr3 = root;
1658 force_new_asid(vcpu);
1659}
1660
1661static void svm_inject_page_fault(struct kvm_vcpu *vcpu,
1662 unsigned long addr,
1663 uint32_t err_code)
1664{
1665 uint32_t exit_int_info = vcpu->svm->vmcb->control.exit_int_info;
1666
1667 ++kvm_stat.pf_guest;
1668
1669 if (is_page_fault(exit_int_info)) {
1670
1671 vcpu->svm->vmcb->control.event_inj_err = 0;
1672 vcpu->svm->vmcb->control.event_inj = SVM_EVTINJ_VALID |
1673 SVM_EVTINJ_VALID_ERR |
1674 SVM_EVTINJ_TYPE_EXEPT |
1675 DF_VECTOR;
1676 return;
1677 }
1678 vcpu->cr2 = addr;
1679 vcpu->svm->vmcb->save.cr2 = addr;
1680 vcpu->svm->vmcb->control.event_inj = SVM_EVTINJ_VALID |
1681 SVM_EVTINJ_VALID_ERR |
1682 SVM_EVTINJ_TYPE_EXEPT |
1683 PF_VECTOR;
1684 vcpu->svm->vmcb->control.event_inj_err = err_code;
1685}
1686
1687
1688static int is_disabled(void)
1689{
1690 return 0;
1691}
1692
102d8325
IM
1693static void
1694svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
1695{
1696 /*
1697 * Patch in the VMMCALL instruction:
1698 */
1699 hypercall[0] = 0x0f;
1700 hypercall[1] = 0x01;
1701 hypercall[2] = 0xd9;
1702 hypercall[3] = 0xc3;
1703}
1704
6aa8b732
AK
1705static struct kvm_arch_ops svm_arch_ops = {
1706 .cpu_has_kvm_support = has_svm,
1707 .disabled_by_bios = is_disabled,
1708 .hardware_setup = svm_hardware_setup,
1709 .hardware_unsetup = svm_hardware_unsetup,
1710 .hardware_enable = svm_hardware_enable,
1711 .hardware_disable = svm_hardware_disable,
1712
1713 .vcpu_create = svm_create_vcpu,
1714 .vcpu_free = svm_free_vcpu,
1715
1716 .vcpu_load = svm_vcpu_load,
1717 .vcpu_put = svm_vcpu_put,
774c47f1 1718 .vcpu_decache = svm_vcpu_decache,
6aa8b732
AK
1719
1720 .set_guest_debug = svm_guest_debug,
1721 .get_msr = svm_get_msr,
1722 .set_msr = svm_set_msr,
1723 .get_segment_base = svm_get_segment_base,
1724 .get_segment = svm_get_segment,
1725 .set_segment = svm_set_segment,
6aa8b732 1726 .get_cs_db_l_bits = svm_get_cs_db_l_bits,
399badf3 1727 .decache_cr0_cr4_guest_bits = svm_decache_cr0_cr4_guest_bits,
6aa8b732 1728 .set_cr0 = svm_set_cr0,
6aa8b732
AK
1729 .set_cr3 = svm_set_cr3,
1730 .set_cr4 = svm_set_cr4,
1731 .set_efer = svm_set_efer,
1732 .get_idt = svm_get_idt,
1733 .set_idt = svm_set_idt,
1734 .get_gdt = svm_get_gdt,
1735 .set_gdt = svm_set_gdt,
1736 .get_dr = svm_get_dr,
1737 .set_dr = svm_set_dr,
1738 .cache_regs = svm_cache_regs,
1739 .decache_regs = svm_decache_regs,
1740 .get_rflags = svm_get_rflags,
1741 .set_rflags = svm_set_rflags,
1742
1743 .invlpg = svm_invlpg,
1744 .tlb_flush = svm_flush_tlb,
1745 .inject_page_fault = svm_inject_page_fault,
1746
1747 .inject_gp = svm_inject_gp,
1748
1749 .run = svm_vcpu_run,
1750 .skip_emulated_instruction = skip_emulated_instruction,
1751 .vcpu_setup = svm_vcpu_setup,
102d8325 1752 .patch_hypercall = svm_patch_hypercall,
6aa8b732
AK
1753};
1754
1755static int __init svm_init(void)
1756{
873a7c42 1757 return kvm_init_arch(&svm_arch_ops, THIS_MODULE);
6aa8b732
AK
1758}
1759
1760static void __exit svm_exit(void)
1761{
1762 kvm_exit_arch();
1763}
1764
1765module_init(svm_init)
1766module_exit(svm_exit)