Merge branches 'pxa' and 'orion-fixes1'
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / include / asm-x86 / xen / interface.h
1 /******************************************************************************
2 * arch-x86_32.h
3 *
4 * Guest OS interface to x86 32-bit Xen.
5 *
6 * Copyright (c) 2004, K A Fraser
7 */
8
9 #ifndef __XEN_PUBLIC_ARCH_X86_32_H__
10 #define __XEN_PUBLIC_ARCH_X86_32_H__
11
12 #ifdef __XEN__
13 #define __DEFINE_GUEST_HANDLE(name, type) \
14 typedef struct { type *p; } __guest_handle_ ## name
15 #else
16 #define __DEFINE_GUEST_HANDLE(name, type) \
17 typedef type * __guest_handle_ ## name
18 #endif
19
20 #define DEFINE_GUEST_HANDLE_STRUCT(name) \
21 __DEFINE_GUEST_HANDLE(name, struct name)
22 #define DEFINE_GUEST_HANDLE(name) __DEFINE_GUEST_HANDLE(name, name)
23 #define GUEST_HANDLE(name) __guest_handle_ ## name
24
25 #ifdef __XEN__
26 #if defined(__i386__)
27 #define set_xen_guest_handle(hnd, val) \
28 do { \
29 if (sizeof(hnd) == 8) \
30 *(uint64_t *)&(hnd) = 0; \
31 (hnd).p = val; \
32 } while (0)
33 #elif defined(__x86_64__)
34 #define set_xen_guest_handle(hnd, val) do { (hnd).p = val; } while (0)
35 #endif
36 #else
37 #if defined(__i386__)
38 #define set_xen_guest_handle(hnd, val) \
39 do { \
40 if (sizeof(hnd) == 8) \
41 *(uint64_t *)&(hnd) = 0; \
42 (hnd) = val; \
43 } while (0)
44 #elif defined(__x86_64__)
45 #define set_xen_guest_handle(hnd, val) do { (hnd) = val; } while (0)
46 #endif
47 #endif
48
49 #ifndef __ASSEMBLY__
50 /* Guest handles for primitive C types. */
51 __DEFINE_GUEST_HANDLE(uchar, unsigned char);
52 __DEFINE_GUEST_HANDLE(uint, unsigned int);
53 __DEFINE_GUEST_HANDLE(ulong, unsigned long);
54 DEFINE_GUEST_HANDLE(char);
55 DEFINE_GUEST_HANDLE(int);
56 DEFINE_GUEST_HANDLE(long);
57 DEFINE_GUEST_HANDLE(void);
58 #endif
59
60 /*
61 * SEGMENT DESCRIPTOR TABLES
62 */
63 /*
64 * A number of GDT entries are reserved by Xen. These are not situated at the
65 * start of the GDT because some stupid OSes export hard-coded selector values
66 * in their ABI. These hard-coded values are always near the start of the GDT,
67 * so Xen places itself out of the way, at the far end of the GDT.
68 */
69 #define FIRST_RESERVED_GDT_PAGE 14
70 #define FIRST_RESERVED_GDT_BYTE (FIRST_RESERVED_GDT_PAGE * 4096)
71 #define FIRST_RESERVED_GDT_ENTRY (FIRST_RESERVED_GDT_BYTE / 8)
72
73 /*
74 * These flat segments are in the Xen-private section of every GDT. Since these
75 * are also present in the initial GDT, many OSes will be able to avoid
76 * installing their own GDT.
77 */
78 #define FLAT_RING1_CS 0xe019 /* GDT index 259 */
79 #define FLAT_RING1_DS 0xe021 /* GDT index 260 */
80 #define FLAT_RING1_SS 0xe021 /* GDT index 260 */
81 #define FLAT_RING3_CS 0xe02b /* GDT index 261 */
82 #define FLAT_RING3_DS 0xe033 /* GDT index 262 */
83 #define FLAT_RING3_SS 0xe033 /* GDT index 262 */
84
85 #define FLAT_KERNEL_CS FLAT_RING1_CS
86 #define FLAT_KERNEL_DS FLAT_RING1_DS
87 #define FLAT_KERNEL_SS FLAT_RING1_SS
88 #define FLAT_USER_CS FLAT_RING3_CS
89 #define FLAT_USER_DS FLAT_RING3_DS
90 #define FLAT_USER_SS FLAT_RING3_SS
91
92 /* And the trap vector is... */
93 #define TRAP_INSTR "int $0x82"
94
95 /*
96 * Virtual addresses beyond this are not modifiable by guest OSes. The
97 * machine->physical mapping table starts at this address, read-only.
98 */
99 #ifdef CONFIG_X86_PAE
100 #define __HYPERVISOR_VIRT_START 0xF5800000
101 #else
102 #define __HYPERVISOR_VIRT_START 0xFC000000
103 #endif
104
105 #ifndef HYPERVISOR_VIRT_START
106 #define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START)
107 #endif
108
109 #ifndef machine_to_phys_mapping
110 #define machine_to_phys_mapping ((unsigned long *)HYPERVISOR_VIRT_START)
111 #endif
112
113 /* Maximum number of virtual CPUs in multi-processor guests. */
114 #define MAX_VIRT_CPUS 32
115
116 #ifndef __ASSEMBLY__
117
118 /*
119 * Send an array of these to HYPERVISOR_set_trap_table()
120 */
121 #define TI_GET_DPL(_ti) ((_ti)->flags & 3)
122 #define TI_GET_IF(_ti) ((_ti)->flags & 4)
123 #define TI_SET_DPL(_ti, _dpl) ((_ti)->flags |= (_dpl))
124 #define TI_SET_IF(_ti, _if) ((_ti)->flags |= ((!!(_if))<<2))
125
126 struct trap_info {
127 uint8_t vector; /* exception vector */
128 uint8_t flags; /* 0-3: privilege level; 4: clear event enable? */
129 uint16_t cs; /* code selector */
130 unsigned long address; /* code offset */
131 };
132 DEFINE_GUEST_HANDLE_STRUCT(trap_info);
133
134 struct cpu_user_regs {
135 uint32_t ebx;
136 uint32_t ecx;
137 uint32_t edx;
138 uint32_t esi;
139 uint32_t edi;
140 uint32_t ebp;
141 uint32_t eax;
142 uint16_t error_code; /* private */
143 uint16_t entry_vector; /* private */
144 uint32_t eip;
145 uint16_t cs;
146 uint8_t saved_upcall_mask;
147 uint8_t _pad0;
148 uint32_t eflags; /* eflags.IF == !saved_upcall_mask */
149 uint32_t esp;
150 uint16_t ss, _pad1;
151 uint16_t es, _pad2;
152 uint16_t ds, _pad3;
153 uint16_t fs, _pad4;
154 uint16_t gs, _pad5;
155 };
156 DEFINE_GUEST_HANDLE_STRUCT(cpu_user_regs);
157
158 typedef uint64_t tsc_timestamp_t; /* RDTSC timestamp */
159
160 /*
161 * The following is all CPU context. Note that the fpu_ctxt block is filled
162 * in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used.
163 */
164 struct vcpu_guest_context {
165 /* FPU registers come first so they can be aligned for FXSAVE/FXRSTOR. */
166 struct { char x[512]; } fpu_ctxt; /* User-level FPU registers */
167 #define VGCF_I387_VALID (1<<0)
168 #define VGCF_HVM_GUEST (1<<1)
169 #define VGCF_IN_KERNEL (1<<2)
170 unsigned long flags; /* VGCF_* flags */
171 struct cpu_user_regs user_regs; /* User-level CPU registers */
172 struct trap_info trap_ctxt[256]; /* Virtual IDT */
173 unsigned long ldt_base, ldt_ents; /* LDT (linear address, # ents) */
174 unsigned long gdt_frames[16], gdt_ents; /* GDT (machine frames, # ents) */
175 unsigned long kernel_ss, kernel_sp; /* Virtual TSS (only SS1/SP1) */
176 unsigned long ctrlreg[8]; /* CR0-CR7 (control registers) */
177 unsigned long debugreg[8]; /* DB0-DB7 (debug registers) */
178 unsigned long event_callback_cs; /* CS:EIP of event callback */
179 unsigned long event_callback_eip;
180 unsigned long failsafe_callback_cs; /* CS:EIP of failsafe callback */
181 unsigned long failsafe_callback_eip;
182 unsigned long vm_assist; /* VMASST_TYPE_* bitmap */
183 };
184 DEFINE_GUEST_HANDLE_STRUCT(vcpu_guest_context);
185
186 struct arch_shared_info {
187 unsigned long max_pfn; /* max pfn that appears in table */
188 /* Frame containing list of mfns containing list of mfns containing p2m. */
189 unsigned long pfn_to_mfn_frame_list_list;
190 unsigned long nmi_reason;
191 };
192
193 struct arch_vcpu_info {
194 unsigned long cr2;
195 unsigned long pad[5]; /* sizeof(struct vcpu_info) == 64 */
196 };
197
198 struct xen_callback {
199 unsigned long cs;
200 unsigned long eip;
201 };
202 #endif /* !__ASSEMBLY__ */
203
204 /*
205 * Prefix forces emulation of some non-trapping instructions.
206 * Currently only CPUID.
207 */
208 #ifdef __ASSEMBLY__
209 #define XEN_EMULATE_PREFIX .byte 0x0f,0x0b,0x78,0x65,0x6e ;
210 #define XEN_CPUID XEN_EMULATE_PREFIX cpuid
211 #else
212 #define XEN_EMULATE_PREFIX ".byte 0x0f,0x0b,0x78,0x65,0x6e ; "
213 #define XEN_CPUID XEN_EMULATE_PREFIX "cpuid"
214 #endif
215
216 #endif