[WATCHDOG] ib700wdt.c - convert to platform_device part 2
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / kvm / kvm.h
1 #ifndef __KVM_H
2 #define __KVM_H
3
4 /*
5 * This work is licensed under the terms of the GNU GPL, version 2. See
6 * the COPYING file in the top-level directory.
7 */
8
9 #include <linux/types.h>
10 #include <linux/list.h>
11 #include <linux/mutex.h>
12 #include <linux/spinlock.h>
13 #include <linux/mm.h>
14
15 #include "vmx.h"
16 #include <linux/kvm.h>
17
18 #define CR0_PE_MASK (1ULL << 0)
19 #define CR0_TS_MASK (1ULL << 3)
20 #define CR0_NE_MASK (1ULL << 5)
21 #define CR0_WP_MASK (1ULL << 16)
22 #define CR0_NW_MASK (1ULL << 29)
23 #define CR0_CD_MASK (1ULL << 30)
24 #define CR0_PG_MASK (1ULL << 31)
25
26 #define CR3_WPT_MASK (1ULL << 3)
27 #define CR3_PCD_MASK (1ULL << 4)
28
29 #define CR3_RESEVED_BITS 0x07ULL
30 #define CR3_L_MODE_RESEVED_BITS (~((1ULL << 40) - 1) | 0x0fe7ULL)
31 #define CR3_FLAGS_MASK ((1ULL << 5) - 1)
32
33 #define CR4_VME_MASK (1ULL << 0)
34 #define CR4_PSE_MASK (1ULL << 4)
35 #define CR4_PAE_MASK (1ULL << 5)
36 #define CR4_PGE_MASK (1ULL << 7)
37 #define CR4_VMXE_MASK (1ULL << 13)
38
39 #define KVM_GUEST_CR0_MASK \
40 (CR0_PG_MASK | CR0_PE_MASK | CR0_WP_MASK | CR0_NE_MASK \
41 | CR0_NW_MASK | CR0_CD_MASK)
42 #define KVM_VM_CR0_ALWAYS_ON \
43 (CR0_PG_MASK | CR0_PE_MASK | CR0_WP_MASK | CR0_NE_MASK)
44 #define KVM_GUEST_CR4_MASK \
45 (CR4_PSE_MASK | CR4_PAE_MASK | CR4_PGE_MASK | CR4_VMXE_MASK | CR4_VME_MASK)
46 #define KVM_PMODE_VM_CR4_ALWAYS_ON (CR4_VMXE_MASK | CR4_PAE_MASK)
47 #define KVM_RMODE_VM_CR4_ALWAYS_ON (CR4_VMXE_MASK | CR4_PAE_MASK | CR4_VME_MASK)
48
49 #define INVALID_PAGE (~(hpa_t)0)
50 #define UNMAPPED_GVA (~(gpa_t)0)
51
52 #define KVM_MAX_VCPUS 1
53 #define KVM_MEMORY_SLOTS 4
54 #define KVM_NUM_MMU_PAGES 256
55
56 #define FX_IMAGE_SIZE 512
57 #define FX_IMAGE_ALIGN 16
58 #define FX_BUF_SIZE (2 * FX_IMAGE_SIZE + FX_IMAGE_ALIGN)
59
60 #define DE_VECTOR 0
61 #define DF_VECTOR 8
62 #define TS_VECTOR 10
63 #define NP_VECTOR 11
64 #define SS_VECTOR 12
65 #define GP_VECTOR 13
66 #define PF_VECTOR 14
67
68 #define SELECTOR_TI_MASK (1 << 2)
69 #define SELECTOR_RPL_MASK 0x03
70
71 #define IOPL_SHIFT 12
72
73 /*
74 * Address types:
75 *
76 * gva - guest virtual address
77 * gpa - guest physical address
78 * gfn - guest frame number
79 * hva - host virtual address
80 * hpa - host physical address
81 * hfn - host frame number
82 */
83
84 typedef unsigned long gva_t;
85 typedef u64 gpa_t;
86 typedef unsigned long gfn_t;
87
88 typedef unsigned long hva_t;
89 typedef u64 hpa_t;
90 typedef unsigned long hfn_t;
91
92 struct kvm_mmu_page {
93 struct list_head link;
94 hpa_t page_hpa;
95 unsigned long slot_bitmap; /* One bit set per slot which has memory
96 * in this shadow page.
97 */
98 int global; /* Set if all ptes in this page are global */
99 u64 *parent_pte;
100 };
101
102 struct vmcs {
103 u32 revision_id;
104 u32 abort;
105 char data[0];
106 };
107
108 #define vmx_msr_entry kvm_msr_entry
109
110 struct kvm_vcpu;
111
112 /*
113 * x86 supports 3 paging modes (4-level 64-bit, 3-level 64-bit, and 2-level
114 * 32-bit). The kvm_mmu structure abstracts the details of the current mmu
115 * mode.
116 */
117 struct kvm_mmu {
118 void (*new_cr3)(struct kvm_vcpu *vcpu);
119 int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err);
120 void (*inval_page)(struct kvm_vcpu *vcpu, gva_t gva);
121 void (*free)(struct kvm_vcpu *vcpu);
122 gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva);
123 hpa_t root_hpa;
124 int root_level;
125 int shadow_root_level;
126 };
127
128 struct kvm_guest_debug {
129 int enabled;
130 unsigned long bp[4];
131 int singlestep;
132 };
133
134 enum {
135 VCPU_REGS_RAX = 0,
136 VCPU_REGS_RCX = 1,
137 VCPU_REGS_RDX = 2,
138 VCPU_REGS_RBX = 3,
139 VCPU_REGS_RSP = 4,
140 VCPU_REGS_RBP = 5,
141 VCPU_REGS_RSI = 6,
142 VCPU_REGS_RDI = 7,
143 #ifdef CONFIG_X86_64
144 VCPU_REGS_R8 = 8,
145 VCPU_REGS_R9 = 9,
146 VCPU_REGS_R10 = 10,
147 VCPU_REGS_R11 = 11,
148 VCPU_REGS_R12 = 12,
149 VCPU_REGS_R13 = 13,
150 VCPU_REGS_R14 = 14,
151 VCPU_REGS_R15 = 15,
152 #endif
153 NR_VCPU_REGS
154 };
155
156 enum {
157 VCPU_SREG_CS,
158 VCPU_SREG_DS,
159 VCPU_SREG_ES,
160 VCPU_SREG_FS,
161 VCPU_SREG_GS,
162 VCPU_SREG_SS,
163 VCPU_SREG_TR,
164 VCPU_SREG_LDTR,
165 };
166
167 struct kvm_vcpu {
168 struct kvm *kvm;
169 union {
170 struct vmcs *vmcs;
171 struct vcpu_svm *svm;
172 };
173 struct mutex mutex;
174 int cpu;
175 int launched;
176 unsigned long irq_summary; /* bit vector: 1 per word in irq_pending */
177 #define NR_IRQ_WORDS KVM_IRQ_BITMAP_SIZE(unsigned long)
178 unsigned long irq_pending[NR_IRQ_WORDS];
179 unsigned long regs[NR_VCPU_REGS]; /* for rsp: vcpu_load_rsp_rip() */
180 unsigned long rip; /* needs vcpu_load_rsp_rip() */
181
182 unsigned long cr0;
183 unsigned long cr2;
184 unsigned long cr3;
185 unsigned long cr4;
186 unsigned long cr8;
187 u64 shadow_efer;
188 u64 apic_base;
189 int nmsrs;
190 struct vmx_msr_entry *guest_msrs;
191 struct vmx_msr_entry *host_msrs;
192
193 struct list_head free_pages;
194 struct kvm_mmu_page page_header_buf[KVM_NUM_MMU_PAGES];
195 struct kvm_mmu mmu;
196
197 struct kvm_guest_debug guest_debug;
198
199 char fx_buf[FX_BUF_SIZE];
200 char *host_fx_image;
201 char *guest_fx_image;
202
203 int mmio_needed;
204 int mmio_read_completed;
205 int mmio_is_write;
206 int mmio_size;
207 unsigned char mmio_data[8];
208 gpa_t mmio_phys_addr;
209
210 struct {
211 int active;
212 u8 save_iopl;
213 struct kvm_save_segment {
214 u16 selector;
215 unsigned long base;
216 u32 limit;
217 u32 ar;
218 } tr, es, ds, fs, gs;
219 } rmode;
220 };
221
222 struct kvm_memory_slot {
223 gfn_t base_gfn;
224 unsigned long npages;
225 unsigned long flags;
226 struct page **phys_mem;
227 unsigned long *dirty_bitmap;
228 };
229
230 struct kvm {
231 spinlock_t lock; /* protects everything except vcpus */
232 int nmemslots;
233 struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS];
234 struct list_head active_mmu_pages;
235 struct kvm_vcpu vcpus[KVM_MAX_VCPUS];
236 int memory_config_version;
237 int busy;
238 };
239
240 struct kvm_stat {
241 u32 pf_fixed;
242 u32 pf_guest;
243 u32 tlb_flush;
244 u32 invlpg;
245
246 u32 exits;
247 u32 io_exits;
248 u32 mmio_exits;
249 u32 signal_exits;
250 u32 irq_exits;
251 };
252
253 struct descriptor_table {
254 u16 limit;
255 unsigned long base;
256 } __attribute__((packed));
257
258 struct kvm_arch_ops {
259 int (*cpu_has_kvm_support)(void); /* __init */
260 int (*disabled_by_bios)(void); /* __init */
261 void (*hardware_enable)(void *dummy); /* __init */
262 void (*hardware_disable)(void *dummy);
263 int (*hardware_setup)(void); /* __init */
264 void (*hardware_unsetup)(void); /* __exit */
265
266 int (*vcpu_create)(struct kvm_vcpu *vcpu);
267 void (*vcpu_free)(struct kvm_vcpu *vcpu);
268
269 struct kvm_vcpu *(*vcpu_load)(struct kvm_vcpu *vcpu);
270 void (*vcpu_put)(struct kvm_vcpu *vcpu);
271
272 int (*set_guest_debug)(struct kvm_vcpu *vcpu,
273 struct kvm_debug_guest *dbg);
274 int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata);
275 int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
276 u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg);
277 void (*get_segment)(struct kvm_vcpu *vcpu,
278 struct kvm_segment *var, int seg);
279 void (*set_segment)(struct kvm_vcpu *vcpu,
280 struct kvm_segment *var, int seg);
281 int (*is_long_mode)(struct kvm_vcpu *vcpu);
282 void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l);
283 void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
284 void (*set_cr0_no_modeswitch)(struct kvm_vcpu *vcpu,
285 unsigned long cr0);
286 void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
287 void (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
288 void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer);
289 void (*get_idt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
290 void (*set_idt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
291 void (*get_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
292 void (*set_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
293 unsigned long (*get_dr)(struct kvm_vcpu *vcpu, int dr);
294 void (*set_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long value,
295 int *exception);
296 void (*cache_regs)(struct kvm_vcpu *vcpu);
297 void (*decache_regs)(struct kvm_vcpu *vcpu);
298 unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
299 void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
300
301 void (*invlpg)(struct kvm_vcpu *vcpu, gva_t addr);
302 void (*tlb_flush)(struct kvm_vcpu *vcpu);
303 void (*inject_page_fault)(struct kvm_vcpu *vcpu,
304 unsigned long addr, u32 err_code);
305
306 void (*inject_gp)(struct kvm_vcpu *vcpu, unsigned err_code);
307
308 int (*run)(struct kvm_vcpu *vcpu, struct kvm_run *run);
309 int (*vcpu_setup)(struct kvm_vcpu *vcpu);
310 void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);
311 };
312
313 extern struct kvm_stat kvm_stat;
314 extern struct kvm_arch_ops *kvm_arch_ops;
315
316 #define kvm_printf(kvm, fmt ...) printk(KERN_DEBUG fmt)
317 #define vcpu_printf(vcpu, fmt...) kvm_printf(vcpu->kvm, fmt)
318
319 int kvm_init_arch(struct kvm_arch_ops *ops, struct module *module);
320 void kvm_exit_arch(void);
321
322 void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
323 int kvm_mmu_init(struct kvm_vcpu *vcpu);
324
325 int kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
326 void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot);
327
328 hpa_t gpa_to_hpa(struct kvm_vcpu *vcpu, gpa_t gpa);
329 #define HPA_MSB ((sizeof(hpa_t) * 8) - 1)
330 #define HPA_ERR_MASK ((hpa_t)1 << HPA_MSB)
331 static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; }
332 hpa_t gva_to_hpa(struct kvm_vcpu *vcpu, gva_t gva);
333
334 void kvm_emulator_want_group7_invlpg(void);
335
336 extern hpa_t bad_page_address;
337
338 static inline struct page *gfn_to_page(struct kvm_memory_slot *slot, gfn_t gfn)
339 {
340 return slot->phys_mem[gfn - slot->base_gfn];
341 }
342
343 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
344 void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
345
346 enum emulation_result {
347 EMULATE_DONE, /* no further processing */
348 EMULATE_DO_MMIO, /* kvm_run filled with mmio request */
349 EMULATE_FAIL, /* can't emulate this instruction */
350 };
351
352 int emulate_instruction(struct kvm_vcpu *vcpu, struct kvm_run *run,
353 unsigned long cr2, u16 error_code);
354 void realmode_lgdt(struct kvm_vcpu *vcpu, u16 size, unsigned long address);
355 void realmode_lidt(struct kvm_vcpu *vcpu, u16 size, unsigned long address);
356 void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
357 unsigned long *rflags);
358
359 unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr);
360 void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long value,
361 unsigned long *rflags);
362
363 struct x86_emulate_ctxt;
364
365 int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address);
366 int emulate_clts(struct kvm_vcpu *vcpu);
367 int emulator_get_dr(struct x86_emulate_ctxt* ctxt, int dr,
368 unsigned long *dest);
369 int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr,
370 unsigned long value);
371
372 void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
373 void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr0);
374 void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr0);
375 void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr0);
376 void lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
377
378 #ifdef CONFIG_X86_64
379 void set_efer(struct kvm_vcpu *vcpu, u64 efer);
380 #endif
381
382 void fx_init(struct kvm_vcpu *vcpu);
383
384 void load_msrs(struct vmx_msr_entry *e, int n);
385 void save_msrs(struct vmx_msr_entry *e, int n);
386 void kvm_resched(struct kvm_vcpu *vcpu);
387
388 int kvm_read_guest(struct kvm_vcpu *vcpu,
389 gva_t addr,
390 unsigned long size,
391 void *dest);
392
393 int kvm_write_guest(struct kvm_vcpu *vcpu,
394 gva_t addr,
395 unsigned long size,
396 void *data);
397
398 unsigned long segment_base(u16 selector);
399
400 static inline struct page *_gfn_to_page(struct kvm *kvm, gfn_t gfn)
401 {
402 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
403 return (slot) ? slot->phys_mem[gfn - slot->base_gfn] : NULL;
404 }
405
406 static inline int is_pae(struct kvm_vcpu *vcpu)
407 {
408 return vcpu->cr4 & CR4_PAE_MASK;
409 }
410
411 static inline int is_pse(struct kvm_vcpu *vcpu)
412 {
413 return vcpu->cr4 & CR4_PSE_MASK;
414 }
415
416 static inline int is_paging(struct kvm_vcpu *vcpu)
417 {
418 return vcpu->cr0 & CR0_PG_MASK;
419 }
420
421 static inline int memslot_id(struct kvm *kvm, struct kvm_memory_slot *slot)
422 {
423 return slot - kvm->memslots;
424 }
425
426 static inline struct kvm_mmu_page *page_header(hpa_t shadow_page)
427 {
428 struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT);
429
430 return (struct kvm_mmu_page *)page->private;
431 }
432
433 static inline u16 read_fs(void)
434 {
435 u16 seg;
436 asm ("mov %%fs, %0" : "=g"(seg));
437 return seg;
438 }
439
440 static inline u16 read_gs(void)
441 {
442 u16 seg;
443 asm ("mov %%gs, %0" : "=g"(seg));
444 return seg;
445 }
446
447 static inline u16 read_ldt(void)
448 {
449 u16 ldt;
450 asm ("sldt %0" : "=g"(ldt));
451 return ldt;
452 }
453
454 static inline void load_fs(u16 sel)
455 {
456 asm ("mov %0, %%fs" : : "rm"(sel));
457 }
458
459 static inline void load_gs(u16 sel)
460 {
461 asm ("mov %0, %%gs" : : "rm"(sel));
462 }
463
464 #ifndef load_ldt
465 static inline void load_ldt(u16 sel)
466 {
467 asm ("lldt %0" : : "g"(sel));
468 }
469 #endif
470
471 static inline void get_idt(struct descriptor_table *table)
472 {
473 asm ("sidt %0" : "=m"(*table));
474 }
475
476 static inline void get_gdt(struct descriptor_table *table)
477 {
478 asm ("sgdt %0" : "=m"(*table));
479 }
480
481 static inline unsigned long read_tr_base(void)
482 {
483 u16 tr;
484 asm ("str %0" : "=g"(tr));
485 return segment_base(tr);
486 }
487
488 #ifdef CONFIG_X86_64
489 static inline unsigned long read_msr(unsigned long msr)
490 {
491 u64 value;
492
493 rdmsrl(msr, value);
494 return value;
495 }
496 #endif
497
498 static inline void fx_save(void *image)
499 {
500 asm ("fxsave (%0)":: "r" (image));
501 }
502
503 static inline void fx_restore(void *image)
504 {
505 asm ("fxrstor (%0)":: "r" (image));
506 }
507
508 static inline void fpu_init(void)
509 {
510 asm ("finit");
511 }
512
513 static inline u32 get_rdx_init_val(void)
514 {
515 return 0x600; /* P6 family */
516 }
517
518 #define ASM_VMX_VMCLEAR_RAX ".byte 0x66, 0x0f, 0xc7, 0x30"
519 #define ASM_VMX_VMLAUNCH ".byte 0x0f, 0x01, 0xc2"
520 #define ASM_VMX_VMRESUME ".byte 0x0f, 0x01, 0xc3"
521 #define ASM_VMX_VMPTRLD_RAX ".byte 0x0f, 0xc7, 0x30"
522 #define ASM_VMX_VMREAD_RDX_RAX ".byte 0x0f, 0x78, 0xd0"
523 #define ASM_VMX_VMWRITE_RAX_RDX ".byte 0x0f, 0x79, 0xd0"
524 #define ASM_VMX_VMWRITE_RSP_RDX ".byte 0x0f, 0x79, 0xd4"
525 #define ASM_VMX_VMXOFF ".byte 0x0f, 0x01, 0xc4"
526 #define ASM_VMX_VMXON_RAX ".byte 0xf3, 0x0f, 0xc7, 0x30"
527
528 #define MSR_IA32_TIME_STAMP_COUNTER 0x010
529
530 #define TSS_IOPB_BASE_OFFSET 0x66
531 #define TSS_BASE_SIZE 0x68
532 #define TSS_IOPB_SIZE (65536 / 8)
533 #define TSS_REDIRECTION_SIZE (256 / 8)
534 #define RMODE_TSS_SIZE (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1)
535
536 #ifdef CONFIG_X86_64
537
538 /*
539 * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64. Therefore
540 * we need to allocate shadow page tables in the first 4GB of memory, which
541 * happens to fit the DMA32 zone.
542 */
543 #define GFP_KVM_MMU (GFP_KERNEL | __GFP_DMA32)
544
545 #else
546
547 #define GFP_KVM_MMU GFP_KERNEL
548
549 #endif
550
551 #endif