x86: consolidate header guards
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / include / asm-x86 / xen / interface.h
1 /******************************************************************************
2 * arch-x86_32.h
3 *
4 * Guest OS interface to x86 Xen.
5 *
6 * Copyright (c) 2004, K A Fraser
7 */
8
9 #ifndef ASM_X86__XEN__INTERFACE_H
10 #define ASM_X86__XEN__INTERFACE_H
11
12 #ifdef __XEN__
13 #define __DEFINE_GUEST_HANDLE(name, type) \
14 typedef struct { type *p; } __guest_handle_ ## name
15 #else
16 #define __DEFINE_GUEST_HANDLE(name, type) \
17 typedef type * __guest_handle_ ## name
18 #endif
19
20 #define DEFINE_GUEST_HANDLE_STRUCT(name) \
21 __DEFINE_GUEST_HANDLE(name, struct name)
22 #define DEFINE_GUEST_HANDLE(name) __DEFINE_GUEST_HANDLE(name, name)
23 #define GUEST_HANDLE(name) __guest_handle_ ## name
24
25 #ifdef __XEN__
26 #if defined(__i386__)
27 #define set_xen_guest_handle(hnd, val) \
28 do { \
29 if (sizeof(hnd) == 8) \
30 *(uint64_t *)&(hnd) = 0; \
31 (hnd).p = val; \
32 } while (0)
33 #elif defined(__x86_64__)
34 #define set_xen_guest_handle(hnd, val) do { (hnd).p = val; } while (0)
35 #endif
36 #else
37 #if defined(__i386__)
38 #define set_xen_guest_handle(hnd, val) \
39 do { \
40 if (sizeof(hnd) == 8) \
41 *(uint64_t *)&(hnd) = 0; \
42 (hnd) = val; \
43 } while (0)
44 #elif defined(__x86_64__)
45 #define set_xen_guest_handle(hnd, val) do { (hnd) = val; } while (0)
46 #endif
47 #endif
48
49 #ifndef __ASSEMBLY__
50 /* Guest handles for primitive C types. */
51 __DEFINE_GUEST_HANDLE(uchar, unsigned char);
52 __DEFINE_GUEST_HANDLE(uint, unsigned int);
53 __DEFINE_GUEST_HANDLE(ulong, unsigned long);
54 DEFINE_GUEST_HANDLE(char);
55 DEFINE_GUEST_HANDLE(int);
56 DEFINE_GUEST_HANDLE(long);
57 DEFINE_GUEST_HANDLE(void);
58 #endif
59
60 #ifndef HYPERVISOR_VIRT_START
61 #define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START)
62 #endif
63
64 #ifndef machine_to_phys_mapping
65 #define machine_to_phys_mapping ((unsigned long *)HYPERVISOR_VIRT_START)
66 #endif
67
68 /* Maximum number of virtual CPUs in multi-processor guests. */
69 #define MAX_VIRT_CPUS 32
70
71 /*
72 * SEGMENT DESCRIPTOR TABLES
73 */
74 /*
75 * A number of GDT entries are reserved by Xen. These are not situated at the
76 * start of the GDT because some stupid OSes export hard-coded selector values
77 * in their ABI. These hard-coded values are always near the start of the GDT,
78 * so Xen places itself out of the way, at the far end of the GDT.
79 */
80 #define FIRST_RESERVED_GDT_PAGE 14
81 #define FIRST_RESERVED_GDT_BYTE (FIRST_RESERVED_GDT_PAGE * 4096)
82 #define FIRST_RESERVED_GDT_ENTRY (FIRST_RESERVED_GDT_BYTE / 8)
83
84 /*
85 * Send an array of these to HYPERVISOR_set_trap_table()
86 * The privilege level specifies which modes may enter a trap via a software
87 * interrupt. On x86/64, since rings 1 and 2 are unavailable, we allocate
88 * privilege levels as follows:
89 * Level == 0: Noone may enter
90 * Level == 1: Kernel may enter
91 * Level == 2: Kernel may enter
92 * Level == 3: Everyone may enter
93 */
94 #define TI_GET_DPL(_ti) ((_ti)->flags & 3)
95 #define TI_GET_IF(_ti) ((_ti)->flags & 4)
96 #define TI_SET_DPL(_ti, _dpl) ((_ti)->flags |= (_dpl))
97 #define TI_SET_IF(_ti, _if) ((_ti)->flags |= ((!!(_if))<<2))
98
99 #ifndef __ASSEMBLY__
100 struct trap_info {
101 uint8_t vector; /* exception vector */
102 uint8_t flags; /* 0-3: privilege level; 4: clear event enable? */
103 uint16_t cs; /* code selector */
104 unsigned long address; /* code offset */
105 };
106 DEFINE_GUEST_HANDLE_STRUCT(trap_info);
107
108 struct arch_shared_info {
109 unsigned long max_pfn; /* max pfn that appears in table */
110 /* Frame containing list of mfns containing list of mfns containing p2m. */
111 unsigned long pfn_to_mfn_frame_list_list;
112 unsigned long nmi_reason;
113 };
114 #endif /* !__ASSEMBLY__ */
115
116 #ifdef CONFIG_X86_32
117 #include "interface_32.h"
118 #else
119 #include "interface_64.h"
120 #endif
121
122 #ifndef __ASSEMBLY__
123 /*
124 * The following is all CPU context. Note that the fpu_ctxt block is filled
125 * in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used.
126 */
127 struct vcpu_guest_context {
128 /* FPU registers come first so they can be aligned for FXSAVE/FXRSTOR. */
129 struct { char x[512]; } fpu_ctxt; /* User-level FPU registers */
130 #define VGCF_I387_VALID (1<<0)
131 #define VGCF_HVM_GUEST (1<<1)
132 #define VGCF_IN_KERNEL (1<<2)
133 unsigned long flags; /* VGCF_* flags */
134 struct cpu_user_regs user_regs; /* User-level CPU registers */
135 struct trap_info trap_ctxt[256]; /* Virtual IDT */
136 unsigned long ldt_base, ldt_ents; /* LDT (linear address, # ents) */
137 unsigned long gdt_frames[16], gdt_ents; /* GDT (machine frames, # ents) */
138 unsigned long kernel_ss, kernel_sp; /* Virtual TSS (only SS1/SP1) */
139 /* NB. User pagetable on x86/64 is placed in ctrlreg[1]. */
140 unsigned long ctrlreg[8]; /* CR0-CR7 (control registers) */
141 unsigned long debugreg[8]; /* DB0-DB7 (debug registers) */
142 #ifdef __i386__
143 unsigned long event_callback_cs; /* CS:EIP of event callback */
144 unsigned long event_callback_eip;
145 unsigned long failsafe_callback_cs; /* CS:EIP of failsafe callback */
146 unsigned long failsafe_callback_eip;
147 #else
148 unsigned long event_callback_eip;
149 unsigned long failsafe_callback_eip;
150 unsigned long syscall_callback_eip;
151 #endif
152 unsigned long vm_assist; /* VMASST_TYPE_* bitmap */
153 #ifdef __x86_64__
154 /* Segment base addresses. */
155 uint64_t fs_base;
156 uint64_t gs_base_kernel;
157 uint64_t gs_base_user;
158 #endif
159 };
160 DEFINE_GUEST_HANDLE_STRUCT(vcpu_guest_context);
161 #endif /* !__ASSEMBLY__ */
162
163 /*
164 * Prefix forces emulation of some non-trapping instructions.
165 * Currently only CPUID.
166 */
167 #ifdef __ASSEMBLY__
168 #define XEN_EMULATE_PREFIX .byte 0x0f,0x0b,0x78,0x65,0x6e ;
169 #define XEN_CPUID XEN_EMULATE_PREFIX cpuid
170 #else
171 #define XEN_EMULATE_PREFIX ".byte 0x0f,0x0b,0x78,0x65,0x6e ; "
172 #define XEN_CPUID XEN_EMULATE_PREFIX "cpuid"
173 #endif
174
175 #endif /* ASM_X86__XEN__INTERFACE_H */