Merge branch 'x86/debug' into x86/cpu
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / include / asm-x86 / paravirt.h
1 #ifndef __ASM_PARAVIRT_H
2 #define __ASM_PARAVIRT_H
3 /* Various instructions on x86 need to be replaced for
4 * para-virtualization: those hooks are defined here. */
5
6 #ifdef CONFIG_PARAVIRT
7 #include <asm/page.h>
8 #include <asm/asm.h>
9
10 /* Bitmask of what can be clobbered: usually at least eax. */
11 #define CLBR_NONE 0
12 #define CLBR_EAX (1 << 0)
13 #define CLBR_ECX (1 << 1)
14 #define CLBR_EDX (1 << 2)
15
16 #ifdef CONFIG_X86_64
17 #define CLBR_RSI (1 << 3)
18 #define CLBR_RDI (1 << 4)
19 #define CLBR_R8 (1 << 5)
20 #define CLBR_R9 (1 << 6)
21 #define CLBR_R10 (1 << 7)
22 #define CLBR_R11 (1 << 8)
23 #define CLBR_ANY ((1 << 9) - 1)
24 #include <asm/desc_defs.h>
25 #else
26 /* CLBR_ANY should match all regs platform has. For i386, that's just it */
27 #define CLBR_ANY ((1 << 3) - 1)
28 #endif /* X86_64 */
29
30 #ifndef __ASSEMBLY__
31 #include <linux/types.h>
32 #include <linux/cpumask.h>
33 #include <asm/kmap_types.h>
34 #include <asm/desc_defs.h>
35
36 struct page;
37 struct thread_struct;
38 struct desc_ptr;
39 struct tss_struct;
40 struct mm_struct;
41 struct desc_struct;
42
43 /* general info */
44 struct pv_info {
45 unsigned int kernel_rpl;
46 int shared_kernel_pmd;
47 int paravirt_enabled;
48 const char *name;
49 };
50
51 struct pv_init_ops {
52 /*
53 * Patch may replace one of the defined code sequences with
54 * arbitrary code, subject to the same register constraints.
55 * This generally means the code is not free to clobber any
56 * registers other than EAX. The patch function should return
57 * the number of bytes of code generated, as we nop pad the
58 * rest in generic code.
59 */
60 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
61 unsigned long addr, unsigned len);
62
63 /* Basic arch-specific setup */
64 void (*arch_setup)(void);
65 char *(*memory_setup)(void);
66 void (*post_allocator_init)(void);
67
68 /* Print a banner to identify the environment */
69 void (*banner)(void);
70 };
71
72
73 struct pv_lazy_ops {
74 /* Set deferred update mode, used for batching operations. */
75 void (*enter)(void);
76 void (*leave)(void);
77 };
78
79 struct pv_time_ops {
80 void (*time_init)(void);
81
82 /* Set and set time of day */
83 unsigned long (*get_wallclock)(void);
84 int (*set_wallclock)(unsigned long);
85
86 unsigned long long (*sched_clock)(void);
87 unsigned long (*get_tsc_khz)(void);
88 };
89
90 struct pv_cpu_ops {
91 /* hooks for various privileged instructions */
92 unsigned long (*get_debugreg)(int regno);
93 void (*set_debugreg)(int regno, unsigned long value);
94
95 void (*clts)(void);
96
97 unsigned long (*read_cr0)(void);
98 void (*write_cr0)(unsigned long);
99
100 unsigned long (*read_cr4_safe)(void);
101 unsigned long (*read_cr4)(void);
102 void (*write_cr4)(unsigned long);
103
104 #ifdef CONFIG_X86_64
105 unsigned long (*read_cr8)(void);
106 void (*write_cr8)(unsigned long);
107 #endif
108
109 /* Segment descriptor handling */
110 void (*load_tr_desc)(void);
111 void (*load_gdt)(const struct desc_ptr *);
112 void (*load_idt)(const struct desc_ptr *);
113 void (*store_gdt)(struct desc_ptr *);
114 void (*store_idt)(struct desc_ptr *);
115 void (*set_ldt)(const void *desc, unsigned entries);
116 unsigned long (*store_tr)(void);
117 void (*load_tls)(struct thread_struct *t, unsigned int cpu);
118 #ifdef CONFIG_X86_64
119 void (*load_gs_index)(unsigned int idx);
120 #endif
121 void (*write_ldt_entry)(struct desc_struct *ldt, int entrynum,
122 const void *desc);
123 void (*write_gdt_entry)(struct desc_struct *,
124 int entrynum, const void *desc, int size);
125 void (*write_idt_entry)(gate_desc *,
126 int entrynum, const gate_desc *gate);
127 void (*load_sp0)(struct tss_struct *tss, struct thread_struct *t);
128
129 void (*set_iopl_mask)(unsigned mask);
130
131 void (*wbinvd)(void);
132 void (*io_delay)(void);
133
134 /* cpuid emulation, mostly so that caps bits can be disabled */
135 void (*cpuid)(unsigned int *eax, unsigned int *ebx,
136 unsigned int *ecx, unsigned int *edx);
137
138 /* MSR, PMC and TSR operations.
139 err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */
140 u64 (*read_msr_amd)(unsigned int msr, int *err);
141 u64 (*read_msr)(unsigned int msr, int *err);
142 int (*write_msr)(unsigned int msr, unsigned low, unsigned high);
143
144 u64 (*read_tsc)(void);
145 u64 (*read_pmc)(int counter);
146 unsigned long long (*read_tscp)(unsigned int *aux);
147
148 /*
149 * Atomically enable interrupts and return to userspace. This
150 * is only ever used to return to 32-bit processes; in a
151 * 64-bit kernel, it's used for 32-on-64 compat processes, but
152 * never native 64-bit processes. (Jump, not call.)
153 */
154 void (*irq_enable_sysexit)(void);
155
156 /*
157 * Switch to usermode gs and return to 64-bit usermode using
158 * sysret. Only used in 64-bit kernels to return to 64-bit
159 * processes. Usermode register state, including %rsp, must
160 * already be restored.
161 */
162 void (*usergs_sysret64)(void);
163
164 /*
165 * Switch to usermode gs and return to 32-bit usermode using
166 * sysret. Used to return to 32-on-64 compat processes.
167 * Other usermode register state, including %esp, must already
168 * be restored.
169 */
170 void (*usergs_sysret32)(void);
171
172 /* Normal iret. Jump to this with the standard iret stack
173 frame set up. */
174 void (*iret)(void);
175
176 void (*swapgs)(void);
177
178 struct pv_lazy_ops lazy_mode;
179 };
180
181 struct pv_irq_ops {
182 void (*init_IRQ)(void);
183
184 /*
185 * Get/set interrupt state. save_fl and restore_fl are only
186 * expected to use X86_EFLAGS_IF; all other bits
187 * returned from save_fl are undefined, and may be ignored by
188 * restore_fl.
189 */
190 unsigned long (*save_fl)(void);
191 void (*restore_fl)(unsigned long);
192 void (*irq_disable)(void);
193 void (*irq_enable)(void);
194 void (*safe_halt)(void);
195 void (*halt)(void);
196
197 #ifdef CONFIG_X86_64
198 void (*adjust_exception_frame)(void);
199 #endif
200 };
201
202 struct pv_apic_ops {
203 #ifdef CONFIG_X86_LOCAL_APIC
204 /*
205 * Direct APIC operations, principally for VMI. Ideally
206 * these shouldn't be in this interface.
207 */
208 void (*apic_write)(unsigned long reg, u32 v);
209 u32 (*apic_read)(unsigned long reg);
210 void (*setup_boot_clock)(void);
211 void (*setup_secondary_clock)(void);
212
213 void (*startup_ipi_hook)(int phys_apicid,
214 unsigned long start_eip,
215 unsigned long start_esp);
216 #endif
217 };
218
219 struct pv_mmu_ops {
220 /*
221 * Called before/after init_mm pagetable setup. setup_start
222 * may reset %cr3, and may pre-install parts of the pagetable;
223 * pagetable setup is expected to preserve any existing
224 * mapping.
225 */
226 void (*pagetable_setup_start)(pgd_t *pgd_base);
227 void (*pagetable_setup_done)(pgd_t *pgd_base);
228
229 unsigned long (*read_cr2)(void);
230 void (*write_cr2)(unsigned long);
231
232 unsigned long (*read_cr3)(void);
233 void (*write_cr3)(unsigned long);
234
235 /*
236 * Hooks for intercepting the creation/use/destruction of an
237 * mm_struct.
238 */
239 void (*activate_mm)(struct mm_struct *prev,
240 struct mm_struct *next);
241 void (*dup_mmap)(struct mm_struct *oldmm,
242 struct mm_struct *mm);
243 void (*exit_mmap)(struct mm_struct *mm);
244
245
246 /* TLB operations */
247 void (*flush_tlb_user)(void);
248 void (*flush_tlb_kernel)(void);
249 void (*flush_tlb_single)(unsigned long addr);
250 void (*flush_tlb_others)(const cpumask_t *cpus, struct mm_struct *mm,
251 unsigned long va);
252
253 /* Hooks for allocating and freeing a pagetable top-level */
254 int (*pgd_alloc)(struct mm_struct *mm);
255 void (*pgd_free)(struct mm_struct *mm, pgd_t *pgd);
256
257 /*
258 * Hooks for allocating/releasing pagetable pages when they're
259 * attached to a pagetable
260 */
261 void (*alloc_pte)(struct mm_struct *mm, u32 pfn);
262 void (*alloc_pmd)(struct mm_struct *mm, u32 pfn);
263 void (*alloc_pmd_clone)(u32 pfn, u32 clonepfn, u32 start, u32 count);
264 void (*alloc_pud)(struct mm_struct *mm, u32 pfn);
265 void (*release_pte)(u32 pfn);
266 void (*release_pmd)(u32 pfn);
267 void (*release_pud)(u32 pfn);
268
269 /* Pagetable manipulation functions */
270 void (*set_pte)(pte_t *ptep, pte_t pteval);
271 void (*set_pte_at)(struct mm_struct *mm, unsigned long addr,
272 pte_t *ptep, pte_t pteval);
273 void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval);
274 void (*pte_update)(struct mm_struct *mm, unsigned long addr,
275 pte_t *ptep);
276 void (*pte_update_defer)(struct mm_struct *mm,
277 unsigned long addr, pte_t *ptep);
278
279 pte_t (*ptep_modify_prot_start)(struct mm_struct *mm, unsigned long addr,
280 pte_t *ptep);
281 void (*ptep_modify_prot_commit)(struct mm_struct *mm, unsigned long addr,
282 pte_t *ptep, pte_t pte);
283
284 pteval_t (*pte_val)(pte_t);
285 pteval_t (*pte_flags)(pte_t);
286 pte_t (*make_pte)(pteval_t pte);
287
288 pgdval_t (*pgd_val)(pgd_t);
289 pgd_t (*make_pgd)(pgdval_t pgd);
290
291 #if PAGETABLE_LEVELS >= 3
292 #ifdef CONFIG_X86_PAE
293 void (*set_pte_atomic)(pte_t *ptep, pte_t pteval);
294 void (*set_pte_present)(struct mm_struct *mm, unsigned long addr,
295 pte_t *ptep, pte_t pte);
296 void (*pte_clear)(struct mm_struct *mm, unsigned long addr,
297 pte_t *ptep);
298 void (*pmd_clear)(pmd_t *pmdp);
299
300 #endif /* CONFIG_X86_PAE */
301
302 void (*set_pud)(pud_t *pudp, pud_t pudval);
303
304 pmdval_t (*pmd_val)(pmd_t);
305 pmd_t (*make_pmd)(pmdval_t pmd);
306
307 #if PAGETABLE_LEVELS == 4
308 pudval_t (*pud_val)(pud_t);
309 pud_t (*make_pud)(pudval_t pud);
310
311 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
312 #endif /* PAGETABLE_LEVELS == 4 */
313 #endif /* PAGETABLE_LEVELS >= 3 */
314
315 #ifdef CONFIG_HIGHPTE
316 void *(*kmap_atomic_pte)(struct page *page, enum km_type type);
317 #endif
318
319 struct pv_lazy_ops lazy_mode;
320
321 /* dom0 ops */
322
323 /* Sometimes the physical address is a pfn, and sometimes its
324 an mfn. We can tell which is which from the index. */
325 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
326 unsigned long phys, pgprot_t flags);
327 };
328
329 struct raw_spinlock;
330 struct pv_lock_ops {
331 int (*spin_is_locked)(struct raw_spinlock *lock);
332 int (*spin_is_contended)(struct raw_spinlock *lock);
333 void (*spin_lock)(struct raw_spinlock *lock);
334 int (*spin_trylock)(struct raw_spinlock *lock);
335 void (*spin_unlock)(struct raw_spinlock *lock);
336 };
337
338 /* This contains all the paravirt structures: we get a convenient
339 * number for each function using the offset which we use to indicate
340 * what to patch. */
341 struct paravirt_patch_template {
342 struct pv_init_ops pv_init_ops;
343 struct pv_time_ops pv_time_ops;
344 struct pv_cpu_ops pv_cpu_ops;
345 struct pv_irq_ops pv_irq_ops;
346 struct pv_apic_ops pv_apic_ops;
347 struct pv_mmu_ops pv_mmu_ops;
348 struct pv_lock_ops pv_lock_ops;
349 };
350
351 extern struct pv_info pv_info;
352 extern struct pv_init_ops pv_init_ops;
353 extern struct pv_time_ops pv_time_ops;
354 extern struct pv_cpu_ops pv_cpu_ops;
355 extern struct pv_irq_ops pv_irq_ops;
356 extern struct pv_apic_ops pv_apic_ops;
357 extern struct pv_mmu_ops pv_mmu_ops;
358 extern struct pv_lock_ops pv_lock_ops;
359
360 #define PARAVIRT_PATCH(x) \
361 (offsetof(struct paravirt_patch_template, x) / sizeof(void *))
362
363 #define paravirt_type(op) \
364 [paravirt_typenum] "i" (PARAVIRT_PATCH(op)), \
365 [paravirt_opptr] "m" (op)
366 #define paravirt_clobber(clobber) \
367 [paravirt_clobber] "i" (clobber)
368
369 /*
370 * Generate some code, and mark it as patchable by the
371 * apply_paravirt() alternate instruction patcher.
372 */
373 #define _paravirt_alt(insn_string, type, clobber) \
374 "771:\n\t" insn_string "\n" "772:\n" \
375 ".pushsection .parainstructions,\"a\"\n" \
376 _ASM_ALIGN "\n" \
377 _ASM_PTR " 771b\n" \
378 " .byte " type "\n" \
379 " .byte 772b-771b\n" \
380 " .short " clobber "\n" \
381 ".popsection\n"
382
383 /* Generate patchable code, with the default asm parameters. */
384 #define paravirt_alt(insn_string) \
385 _paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]")
386
387 /* Simple instruction patching code. */
388 #define DEF_NATIVE(ops, name, code) \
389 extern const char start_##ops##_##name[], end_##ops##_##name[]; \
390 asm("start_" #ops "_" #name ": " code "; end_" #ops "_" #name ":")
391
392 unsigned paravirt_patch_nop(void);
393 unsigned paravirt_patch_ignore(unsigned len);
394 unsigned paravirt_patch_call(void *insnbuf,
395 const void *target, u16 tgt_clobbers,
396 unsigned long addr, u16 site_clobbers,
397 unsigned len);
398 unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
399 unsigned long addr, unsigned len);
400 unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
401 unsigned long addr, unsigned len);
402
403 unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
404 const char *start, const char *end);
405
406 unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
407 unsigned long addr, unsigned len);
408
409 int paravirt_disable_iospace(void);
410
411 /*
412 * This generates an indirect call based on the operation type number.
413 * The type number, computed in PARAVIRT_PATCH, is derived from the
414 * offset into the paravirt_patch_template structure, and can therefore be
415 * freely converted back into a structure offset.
416 */
417 #define PARAVIRT_CALL "call *%[paravirt_opptr];"
418
419 /*
420 * These macros are intended to wrap calls through one of the paravirt
421 * ops structs, so that they can be later identified and patched at
422 * runtime.
423 *
424 * Normally, a call to a pv_op function is a simple indirect call:
425 * (pv_op_struct.operations)(args...).
426 *
427 * Unfortunately, this is a relatively slow operation for modern CPUs,
428 * because it cannot necessarily determine what the destination
429 * address is. In this case, the address is a runtime constant, so at
430 * the very least we can patch the call to e a simple direct call, or
431 * ideally, patch an inline implementation into the callsite. (Direct
432 * calls are essentially free, because the call and return addresses
433 * are completely predictable.)
434 *
435 * For i386, these macros rely on the standard gcc "regparm(3)" calling
436 * convention, in which the first three arguments are placed in %eax,
437 * %edx, %ecx (in that order), and the remaining arguments are placed
438 * on the stack. All caller-save registers (eax,edx,ecx) are expected
439 * to be modified (either clobbered or used for return values).
440 * X86_64, on the other hand, already specifies a register-based calling
441 * conventions, returning at %rax, with parameteres going on %rdi, %rsi,
442 * %rdx, and %rcx. Note that for this reason, x86_64 does not need any
443 * special handling for dealing with 4 arguments, unlike i386.
444 * However, x86_64 also have to clobber all caller saved registers, which
445 * unfortunately, are quite a bit (r8 - r11)
446 *
447 * The call instruction itself is marked by placing its start address
448 * and size into the .parainstructions section, so that
449 * apply_paravirt() in arch/i386/kernel/alternative.c can do the
450 * appropriate patching under the control of the backend pv_init_ops
451 * implementation.
452 *
453 * Unfortunately there's no way to get gcc to generate the args setup
454 * for the call, and then allow the call itself to be generated by an
455 * inline asm. Because of this, we must do the complete arg setup and
456 * return value handling from within these macros. This is fairly
457 * cumbersome.
458 *
459 * There are 5 sets of PVOP_* macros for dealing with 0-4 arguments.
460 * It could be extended to more arguments, but there would be little
461 * to be gained from that. For each number of arguments, there are
462 * the two VCALL and CALL variants for void and non-void functions.
463 *
464 * When there is a return value, the invoker of the macro must specify
465 * the return type. The macro then uses sizeof() on that type to
466 * determine whether its a 32 or 64 bit value, and places the return
467 * in the right register(s) (just %eax for 32-bit, and %edx:%eax for
468 * 64-bit). For x86_64 machines, it just returns at %rax regardless of
469 * the return value size.
470 *
471 * 64-bit arguments are passed as a pair of adjacent 32-bit arguments
472 * i386 also passes 64-bit arguments as a pair of adjacent 32-bit arguments
473 * in low,high order
474 *
475 * Small structures are passed and returned in registers. The macro
476 * calling convention can't directly deal with this, so the wrapper
477 * functions must do this.
478 *
479 * These PVOP_* macros are only defined within this header. This
480 * means that all uses must be wrapped in inline functions. This also
481 * makes sure the incoming and outgoing types are always correct.
482 */
483 #ifdef CONFIG_X86_32
484 #define PVOP_VCALL_ARGS unsigned long __eax, __edx, __ecx
485 #define PVOP_CALL_ARGS PVOP_VCALL_ARGS
486 #define PVOP_VCALL_CLOBBERS "=a" (__eax), "=d" (__edx), \
487 "=c" (__ecx)
488 #define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS
489 #define EXTRA_CLOBBERS
490 #define VEXTRA_CLOBBERS
491 #else
492 #define PVOP_VCALL_ARGS unsigned long __edi, __esi, __edx, __ecx
493 #define PVOP_CALL_ARGS PVOP_VCALL_ARGS, __eax
494 #define PVOP_VCALL_CLOBBERS "=D" (__edi), \
495 "=S" (__esi), "=d" (__edx), \
496 "=c" (__ecx)
497
498 #define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax)
499
500 #define EXTRA_CLOBBERS , "r8", "r9", "r10", "r11"
501 #define VEXTRA_CLOBBERS , "rax", "r8", "r9", "r10", "r11"
502 #endif
503
504 #ifdef CONFIG_PARAVIRT_DEBUG
505 #define PVOP_TEST_NULL(op) BUG_ON(op == NULL)
506 #else
507 #define PVOP_TEST_NULL(op) ((void)op)
508 #endif
509
510 #define __PVOP_CALL(rettype, op, pre, post, ...) \
511 ({ \
512 rettype __ret; \
513 PVOP_CALL_ARGS; \
514 PVOP_TEST_NULL(op); \
515 /* This is 32-bit specific, but is okay in 64-bit */ \
516 /* since this condition will never hold */ \
517 if (sizeof(rettype) > sizeof(unsigned long)) { \
518 asm volatile(pre \
519 paravirt_alt(PARAVIRT_CALL) \
520 post \
521 : PVOP_CALL_CLOBBERS \
522 : paravirt_type(op), \
523 paravirt_clobber(CLBR_ANY), \
524 ##__VA_ARGS__ \
525 : "memory", "cc" EXTRA_CLOBBERS); \
526 __ret = (rettype)((((u64)__edx) << 32) | __eax); \
527 } else { \
528 asm volatile(pre \
529 paravirt_alt(PARAVIRT_CALL) \
530 post \
531 : PVOP_CALL_CLOBBERS \
532 : paravirt_type(op), \
533 paravirt_clobber(CLBR_ANY), \
534 ##__VA_ARGS__ \
535 : "memory", "cc" EXTRA_CLOBBERS); \
536 __ret = (rettype)__eax; \
537 } \
538 __ret; \
539 })
540 #define __PVOP_VCALL(op, pre, post, ...) \
541 ({ \
542 PVOP_VCALL_ARGS; \
543 PVOP_TEST_NULL(op); \
544 asm volatile(pre \
545 paravirt_alt(PARAVIRT_CALL) \
546 post \
547 : PVOP_VCALL_CLOBBERS \
548 : paravirt_type(op), \
549 paravirt_clobber(CLBR_ANY), \
550 ##__VA_ARGS__ \
551 : "memory", "cc" VEXTRA_CLOBBERS); \
552 })
553
554 #define PVOP_CALL0(rettype, op) \
555 __PVOP_CALL(rettype, op, "", "")
556 #define PVOP_VCALL0(op) \
557 __PVOP_VCALL(op, "", "")
558
559 #define PVOP_CALL1(rettype, op, arg1) \
560 __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)))
561 #define PVOP_VCALL1(op, arg1) \
562 __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)))
563
564 #define PVOP_CALL2(rettype, op, arg1, arg2) \
565 __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)), \
566 "1" ((unsigned long)(arg2)))
567 #define PVOP_VCALL2(op, arg1, arg2) \
568 __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)), \
569 "1" ((unsigned long)(arg2)))
570
571 #define PVOP_CALL3(rettype, op, arg1, arg2, arg3) \
572 __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)), \
573 "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)))
574 #define PVOP_VCALL3(op, arg1, arg2, arg3) \
575 __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)), \
576 "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)))
577
578 /* This is the only difference in x86_64. We can make it much simpler */
579 #ifdef CONFIG_X86_32
580 #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
581 __PVOP_CALL(rettype, op, \
582 "push %[_arg4];", "lea 4(%%esp),%%esp;", \
583 "0" ((u32)(arg1)), "1" ((u32)(arg2)), \
584 "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
585 #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
586 __PVOP_VCALL(op, \
587 "push %[_arg4];", "lea 4(%%esp),%%esp;", \
588 "0" ((u32)(arg1)), "1" ((u32)(arg2)), \
589 "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
590 #else
591 #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
592 __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)), \
593 "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)), \
594 "3"((unsigned long)(arg4)))
595 #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
596 __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)), \
597 "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)), \
598 "3"((unsigned long)(arg4)))
599 #endif
600
601 static inline int paravirt_enabled(void)
602 {
603 return pv_info.paravirt_enabled;
604 }
605
606 static inline void load_sp0(struct tss_struct *tss,
607 struct thread_struct *thread)
608 {
609 PVOP_VCALL2(pv_cpu_ops.load_sp0, tss, thread);
610 }
611
612 #define ARCH_SETUP pv_init_ops.arch_setup();
613 static inline unsigned long get_wallclock(void)
614 {
615 return PVOP_CALL0(unsigned long, pv_time_ops.get_wallclock);
616 }
617
618 static inline int set_wallclock(unsigned long nowtime)
619 {
620 return PVOP_CALL1(int, pv_time_ops.set_wallclock, nowtime);
621 }
622
623 static inline void (*choose_time_init(void))(void)
624 {
625 return pv_time_ops.time_init;
626 }
627
628 /* The paravirtualized CPUID instruction. */
629 static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
630 unsigned int *ecx, unsigned int *edx)
631 {
632 PVOP_VCALL4(pv_cpu_ops.cpuid, eax, ebx, ecx, edx);
633 }
634
635 /*
636 * These special macros can be used to get or set a debugging register
637 */
638 static inline unsigned long paravirt_get_debugreg(int reg)
639 {
640 return PVOP_CALL1(unsigned long, pv_cpu_ops.get_debugreg, reg);
641 }
642 #define get_debugreg(var, reg) var = paravirt_get_debugreg(reg)
643 static inline void set_debugreg(unsigned long val, int reg)
644 {
645 PVOP_VCALL2(pv_cpu_ops.set_debugreg, reg, val);
646 }
647
648 static inline void clts(void)
649 {
650 PVOP_VCALL0(pv_cpu_ops.clts);
651 }
652
653 static inline unsigned long read_cr0(void)
654 {
655 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr0);
656 }
657
658 static inline void write_cr0(unsigned long x)
659 {
660 PVOP_VCALL1(pv_cpu_ops.write_cr0, x);
661 }
662
663 static inline unsigned long read_cr2(void)
664 {
665 return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr2);
666 }
667
668 static inline void write_cr2(unsigned long x)
669 {
670 PVOP_VCALL1(pv_mmu_ops.write_cr2, x);
671 }
672
673 static inline unsigned long read_cr3(void)
674 {
675 return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr3);
676 }
677
678 static inline void write_cr3(unsigned long x)
679 {
680 PVOP_VCALL1(pv_mmu_ops.write_cr3, x);
681 }
682
683 static inline unsigned long read_cr4(void)
684 {
685 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4);
686 }
687 static inline unsigned long read_cr4_safe(void)
688 {
689 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4_safe);
690 }
691
692 static inline void write_cr4(unsigned long x)
693 {
694 PVOP_VCALL1(pv_cpu_ops.write_cr4, x);
695 }
696
697 #ifdef CONFIG_X86_64
698 static inline unsigned long read_cr8(void)
699 {
700 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr8);
701 }
702
703 static inline void write_cr8(unsigned long x)
704 {
705 PVOP_VCALL1(pv_cpu_ops.write_cr8, x);
706 }
707 #endif
708
709 static inline void raw_safe_halt(void)
710 {
711 PVOP_VCALL0(pv_irq_ops.safe_halt);
712 }
713
714 static inline void halt(void)
715 {
716 PVOP_VCALL0(pv_irq_ops.safe_halt);
717 }
718
719 static inline void wbinvd(void)
720 {
721 PVOP_VCALL0(pv_cpu_ops.wbinvd);
722 }
723
724 #define get_kernel_rpl() (pv_info.kernel_rpl)
725
726 static inline u64 paravirt_read_msr(unsigned msr, int *err)
727 {
728 return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err);
729 }
730 static inline u64 paravirt_read_msr_amd(unsigned msr, int *err)
731 {
732 return PVOP_CALL2(u64, pv_cpu_ops.read_msr_amd, msr, err);
733 }
734 static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high)
735 {
736 return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high);
737 }
738
739 /* These should all do BUG_ON(_err), but our headers are too tangled. */
740 #define rdmsr(msr, val1, val2) \
741 do { \
742 int _err; \
743 u64 _l = paravirt_read_msr(msr, &_err); \
744 val1 = (u32)_l; \
745 val2 = _l >> 32; \
746 } while (0)
747
748 #define wrmsr(msr, val1, val2) \
749 do { \
750 paravirt_write_msr(msr, val1, val2); \
751 } while (0)
752
753 #define rdmsrl(msr, val) \
754 do { \
755 int _err; \
756 val = paravirt_read_msr(msr, &_err); \
757 } while (0)
758
759 #define wrmsrl(msr, val) wrmsr(msr, (u32)((u64)(val)), ((u64)(val))>>32)
760 #define wrmsr_safe(msr, a, b) paravirt_write_msr(msr, a, b)
761
762 /* rdmsr with exception handling */
763 #define rdmsr_safe(msr, a, b) \
764 ({ \
765 int _err; \
766 u64 _l = paravirt_read_msr(msr, &_err); \
767 (*a) = (u32)_l; \
768 (*b) = _l >> 32; \
769 _err; \
770 })
771
772 static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
773 {
774 int err;
775
776 *p = paravirt_read_msr(msr, &err);
777 return err;
778 }
779 static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
780 {
781 int err;
782
783 *p = paravirt_read_msr_amd(msr, &err);
784 return err;
785 }
786
787 static inline u64 paravirt_read_tsc(void)
788 {
789 return PVOP_CALL0(u64, pv_cpu_ops.read_tsc);
790 }
791
792 #define rdtscl(low) \
793 do { \
794 u64 _l = paravirt_read_tsc(); \
795 low = (int)_l; \
796 } while (0)
797
798 #define rdtscll(val) (val = paravirt_read_tsc())
799
800 static inline unsigned long long paravirt_sched_clock(void)
801 {
802 return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock);
803 }
804 #define calibrate_tsc() (pv_time_ops.get_tsc_khz())
805
806 static inline unsigned long long paravirt_read_pmc(int counter)
807 {
808 return PVOP_CALL1(u64, pv_cpu_ops.read_pmc, counter);
809 }
810
811 #define rdpmc(counter, low, high) \
812 do { \
813 u64 _l = paravirt_read_pmc(counter); \
814 low = (u32)_l; \
815 high = _l >> 32; \
816 } while (0)
817
818 static inline unsigned long long paravirt_rdtscp(unsigned int *aux)
819 {
820 return PVOP_CALL1(u64, pv_cpu_ops.read_tscp, aux);
821 }
822
823 #define rdtscp(low, high, aux) \
824 do { \
825 int __aux; \
826 unsigned long __val = paravirt_rdtscp(&__aux); \
827 (low) = (u32)__val; \
828 (high) = (u32)(__val >> 32); \
829 (aux) = __aux; \
830 } while (0)
831
832 #define rdtscpll(val, aux) \
833 do { \
834 unsigned long __aux; \
835 val = paravirt_rdtscp(&__aux); \
836 (aux) = __aux; \
837 } while (0)
838
839 static inline void load_TR_desc(void)
840 {
841 PVOP_VCALL0(pv_cpu_ops.load_tr_desc);
842 }
843 static inline void load_gdt(const struct desc_ptr *dtr)
844 {
845 PVOP_VCALL1(pv_cpu_ops.load_gdt, dtr);
846 }
847 static inline void load_idt(const struct desc_ptr *dtr)
848 {
849 PVOP_VCALL1(pv_cpu_ops.load_idt, dtr);
850 }
851 static inline void set_ldt(const void *addr, unsigned entries)
852 {
853 PVOP_VCALL2(pv_cpu_ops.set_ldt, addr, entries);
854 }
855 static inline void store_gdt(struct desc_ptr *dtr)
856 {
857 PVOP_VCALL1(pv_cpu_ops.store_gdt, dtr);
858 }
859 static inline void store_idt(struct desc_ptr *dtr)
860 {
861 PVOP_VCALL1(pv_cpu_ops.store_idt, dtr);
862 }
863 static inline unsigned long paravirt_store_tr(void)
864 {
865 return PVOP_CALL0(unsigned long, pv_cpu_ops.store_tr);
866 }
867 #define store_tr(tr) ((tr) = paravirt_store_tr())
868 static inline void load_TLS(struct thread_struct *t, unsigned cpu)
869 {
870 PVOP_VCALL2(pv_cpu_ops.load_tls, t, cpu);
871 }
872
873 #ifdef CONFIG_X86_64
874 static inline void load_gs_index(unsigned int gs)
875 {
876 PVOP_VCALL1(pv_cpu_ops.load_gs_index, gs);
877 }
878 #endif
879
880 static inline void write_ldt_entry(struct desc_struct *dt, int entry,
881 const void *desc)
882 {
883 PVOP_VCALL3(pv_cpu_ops.write_ldt_entry, dt, entry, desc);
884 }
885
886 static inline void write_gdt_entry(struct desc_struct *dt, int entry,
887 void *desc, int type)
888 {
889 PVOP_VCALL4(pv_cpu_ops.write_gdt_entry, dt, entry, desc, type);
890 }
891
892 static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
893 {
894 PVOP_VCALL3(pv_cpu_ops.write_idt_entry, dt, entry, g);
895 }
896 static inline void set_iopl_mask(unsigned mask)
897 {
898 PVOP_VCALL1(pv_cpu_ops.set_iopl_mask, mask);
899 }
900
901 /* The paravirtualized I/O functions */
902 static inline void slow_down_io(void)
903 {
904 pv_cpu_ops.io_delay();
905 #ifdef REALLY_SLOW_IO
906 pv_cpu_ops.io_delay();
907 pv_cpu_ops.io_delay();
908 pv_cpu_ops.io_delay();
909 #endif
910 }
911
912 #ifdef CONFIG_X86_LOCAL_APIC
913 /*
914 * Basic functions accessing APICs.
915 */
916 static inline void apic_write(unsigned long reg, u32 v)
917 {
918 PVOP_VCALL2(pv_apic_ops.apic_write, reg, v);
919 }
920
921 static inline u32 apic_read(unsigned long reg)
922 {
923 return PVOP_CALL1(unsigned long, pv_apic_ops.apic_read, reg);
924 }
925
926 static inline void setup_boot_clock(void)
927 {
928 PVOP_VCALL0(pv_apic_ops.setup_boot_clock);
929 }
930
931 static inline void setup_secondary_clock(void)
932 {
933 PVOP_VCALL0(pv_apic_ops.setup_secondary_clock);
934 }
935 #endif
936
937 static inline void paravirt_post_allocator_init(void)
938 {
939 if (pv_init_ops.post_allocator_init)
940 (*pv_init_ops.post_allocator_init)();
941 }
942
943 static inline void paravirt_pagetable_setup_start(pgd_t *base)
944 {
945 (*pv_mmu_ops.pagetable_setup_start)(base);
946 }
947
948 static inline void paravirt_pagetable_setup_done(pgd_t *base)
949 {
950 (*pv_mmu_ops.pagetable_setup_done)(base);
951 }
952
953 #ifdef CONFIG_SMP
954 static inline void startup_ipi_hook(int phys_apicid, unsigned long start_eip,
955 unsigned long start_esp)
956 {
957 PVOP_VCALL3(pv_apic_ops.startup_ipi_hook,
958 phys_apicid, start_eip, start_esp);
959 }
960 #endif
961
962 static inline void paravirt_activate_mm(struct mm_struct *prev,
963 struct mm_struct *next)
964 {
965 PVOP_VCALL2(pv_mmu_ops.activate_mm, prev, next);
966 }
967
968 static inline void arch_dup_mmap(struct mm_struct *oldmm,
969 struct mm_struct *mm)
970 {
971 PVOP_VCALL2(pv_mmu_ops.dup_mmap, oldmm, mm);
972 }
973
974 static inline void arch_exit_mmap(struct mm_struct *mm)
975 {
976 PVOP_VCALL1(pv_mmu_ops.exit_mmap, mm);
977 }
978
979 static inline void __flush_tlb(void)
980 {
981 PVOP_VCALL0(pv_mmu_ops.flush_tlb_user);
982 }
983 static inline void __flush_tlb_global(void)
984 {
985 PVOP_VCALL0(pv_mmu_ops.flush_tlb_kernel);
986 }
987 static inline void __flush_tlb_single(unsigned long addr)
988 {
989 PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr);
990 }
991
992 static inline void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
993 unsigned long va)
994 {
995 PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, &cpumask, mm, va);
996 }
997
998 static inline int paravirt_pgd_alloc(struct mm_struct *mm)
999 {
1000 return PVOP_CALL1(int, pv_mmu_ops.pgd_alloc, mm);
1001 }
1002
1003 static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd)
1004 {
1005 PVOP_VCALL2(pv_mmu_ops.pgd_free, mm, pgd);
1006 }
1007
1008 static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned pfn)
1009 {
1010 PVOP_VCALL2(pv_mmu_ops.alloc_pte, mm, pfn);
1011 }
1012 static inline void paravirt_release_pte(unsigned pfn)
1013 {
1014 PVOP_VCALL1(pv_mmu_ops.release_pte, pfn);
1015 }
1016
1017 static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned pfn)
1018 {
1019 PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn);
1020 }
1021
1022 static inline void paravirt_alloc_pmd_clone(unsigned pfn, unsigned clonepfn,
1023 unsigned start, unsigned count)
1024 {
1025 PVOP_VCALL4(pv_mmu_ops.alloc_pmd_clone, pfn, clonepfn, start, count);
1026 }
1027 static inline void paravirt_release_pmd(unsigned pfn)
1028 {
1029 PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn);
1030 }
1031
1032 static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned pfn)
1033 {
1034 PVOP_VCALL2(pv_mmu_ops.alloc_pud, mm, pfn);
1035 }
1036 static inline void paravirt_release_pud(unsigned pfn)
1037 {
1038 PVOP_VCALL1(pv_mmu_ops.release_pud, pfn);
1039 }
1040
1041 #ifdef CONFIG_HIGHPTE
1042 static inline void *kmap_atomic_pte(struct page *page, enum km_type type)
1043 {
1044 unsigned long ret;
1045 ret = PVOP_CALL2(unsigned long, pv_mmu_ops.kmap_atomic_pte, page, type);
1046 return (void *)ret;
1047 }
1048 #endif
1049
1050 static inline void pte_update(struct mm_struct *mm, unsigned long addr,
1051 pte_t *ptep)
1052 {
1053 PVOP_VCALL3(pv_mmu_ops.pte_update, mm, addr, ptep);
1054 }
1055
1056 static inline void pte_update_defer(struct mm_struct *mm, unsigned long addr,
1057 pte_t *ptep)
1058 {
1059 PVOP_VCALL3(pv_mmu_ops.pte_update_defer, mm, addr, ptep);
1060 }
1061
1062 static inline pte_t __pte(pteval_t val)
1063 {
1064 pteval_t ret;
1065
1066 if (sizeof(pteval_t) > sizeof(long))
1067 ret = PVOP_CALL2(pteval_t,
1068 pv_mmu_ops.make_pte,
1069 val, (u64)val >> 32);
1070 else
1071 ret = PVOP_CALL1(pteval_t,
1072 pv_mmu_ops.make_pte,
1073 val);
1074
1075 return (pte_t) { .pte = ret };
1076 }
1077
1078 static inline pteval_t pte_val(pte_t pte)
1079 {
1080 pteval_t ret;
1081
1082 if (sizeof(pteval_t) > sizeof(long))
1083 ret = PVOP_CALL2(pteval_t, pv_mmu_ops.pte_val,
1084 pte.pte, (u64)pte.pte >> 32);
1085 else
1086 ret = PVOP_CALL1(pteval_t, pv_mmu_ops.pte_val,
1087 pte.pte);
1088
1089 return ret;
1090 }
1091
1092 static inline pteval_t pte_flags(pte_t pte)
1093 {
1094 pteval_t ret;
1095
1096 if (sizeof(pteval_t) > sizeof(long))
1097 ret = PVOP_CALL2(pteval_t, pv_mmu_ops.pte_flags,
1098 pte.pte, (u64)pte.pte >> 32);
1099 else
1100 ret = PVOP_CALL1(pteval_t, pv_mmu_ops.pte_flags,
1101 pte.pte);
1102
1103 #ifdef CONFIG_PARAVIRT_DEBUG
1104 BUG_ON(ret & PTE_PFN_MASK);
1105 #endif
1106 return ret;
1107 }
1108
1109 static inline pgd_t __pgd(pgdval_t val)
1110 {
1111 pgdval_t ret;
1112
1113 if (sizeof(pgdval_t) > sizeof(long))
1114 ret = PVOP_CALL2(pgdval_t, pv_mmu_ops.make_pgd,
1115 val, (u64)val >> 32);
1116 else
1117 ret = PVOP_CALL1(pgdval_t, pv_mmu_ops.make_pgd,
1118 val);
1119
1120 return (pgd_t) { ret };
1121 }
1122
1123 static inline pgdval_t pgd_val(pgd_t pgd)
1124 {
1125 pgdval_t ret;
1126
1127 if (sizeof(pgdval_t) > sizeof(long))
1128 ret = PVOP_CALL2(pgdval_t, pv_mmu_ops.pgd_val,
1129 pgd.pgd, (u64)pgd.pgd >> 32);
1130 else
1131 ret = PVOP_CALL1(pgdval_t, pv_mmu_ops.pgd_val,
1132 pgd.pgd);
1133
1134 return ret;
1135 }
1136
1137 #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
1138 static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr,
1139 pte_t *ptep)
1140 {
1141 pteval_t ret;
1142
1143 ret = PVOP_CALL3(pteval_t, pv_mmu_ops.ptep_modify_prot_start,
1144 mm, addr, ptep);
1145
1146 return (pte_t) { .pte = ret };
1147 }
1148
1149 static inline void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
1150 pte_t *ptep, pte_t pte)
1151 {
1152 if (sizeof(pteval_t) > sizeof(long))
1153 /* 5 arg words */
1154 pv_mmu_ops.ptep_modify_prot_commit(mm, addr, ptep, pte);
1155 else
1156 PVOP_VCALL4(pv_mmu_ops.ptep_modify_prot_commit,
1157 mm, addr, ptep, pte.pte);
1158 }
1159
1160 static inline void set_pte(pte_t *ptep, pte_t pte)
1161 {
1162 if (sizeof(pteval_t) > sizeof(long))
1163 PVOP_VCALL3(pv_mmu_ops.set_pte, ptep,
1164 pte.pte, (u64)pte.pte >> 32);
1165 else
1166 PVOP_VCALL2(pv_mmu_ops.set_pte, ptep,
1167 pte.pte);
1168 }
1169
1170 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
1171 pte_t *ptep, pte_t pte)
1172 {
1173 if (sizeof(pteval_t) > sizeof(long))
1174 /* 5 arg words */
1175 pv_mmu_ops.set_pte_at(mm, addr, ptep, pte);
1176 else
1177 PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, pte.pte);
1178 }
1179
1180 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
1181 {
1182 pmdval_t val = native_pmd_val(pmd);
1183
1184 if (sizeof(pmdval_t) > sizeof(long))
1185 PVOP_VCALL3(pv_mmu_ops.set_pmd, pmdp, val, (u64)val >> 32);
1186 else
1187 PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, val);
1188 }
1189
1190 #if PAGETABLE_LEVELS >= 3
1191 static inline pmd_t __pmd(pmdval_t val)
1192 {
1193 pmdval_t ret;
1194
1195 if (sizeof(pmdval_t) > sizeof(long))
1196 ret = PVOP_CALL2(pmdval_t, pv_mmu_ops.make_pmd,
1197 val, (u64)val >> 32);
1198 else
1199 ret = PVOP_CALL1(pmdval_t, pv_mmu_ops.make_pmd,
1200 val);
1201
1202 return (pmd_t) { ret };
1203 }
1204
1205 static inline pmdval_t pmd_val(pmd_t pmd)
1206 {
1207 pmdval_t ret;
1208
1209 if (sizeof(pmdval_t) > sizeof(long))
1210 ret = PVOP_CALL2(pmdval_t, pv_mmu_ops.pmd_val,
1211 pmd.pmd, (u64)pmd.pmd >> 32);
1212 else
1213 ret = PVOP_CALL1(pmdval_t, pv_mmu_ops.pmd_val,
1214 pmd.pmd);
1215
1216 return ret;
1217 }
1218
1219 static inline void set_pud(pud_t *pudp, pud_t pud)
1220 {
1221 pudval_t val = native_pud_val(pud);
1222
1223 if (sizeof(pudval_t) > sizeof(long))
1224 PVOP_VCALL3(pv_mmu_ops.set_pud, pudp,
1225 val, (u64)val >> 32);
1226 else
1227 PVOP_VCALL2(pv_mmu_ops.set_pud, pudp,
1228 val);
1229 }
1230 #if PAGETABLE_LEVELS == 4
1231 static inline pud_t __pud(pudval_t val)
1232 {
1233 pudval_t ret;
1234
1235 if (sizeof(pudval_t) > sizeof(long))
1236 ret = PVOP_CALL2(pudval_t, pv_mmu_ops.make_pud,
1237 val, (u64)val >> 32);
1238 else
1239 ret = PVOP_CALL1(pudval_t, pv_mmu_ops.make_pud,
1240 val);
1241
1242 return (pud_t) { ret };
1243 }
1244
1245 static inline pudval_t pud_val(pud_t pud)
1246 {
1247 pudval_t ret;
1248
1249 if (sizeof(pudval_t) > sizeof(long))
1250 ret = PVOP_CALL2(pudval_t, pv_mmu_ops.pud_val,
1251 pud.pud, (u64)pud.pud >> 32);
1252 else
1253 ret = PVOP_CALL1(pudval_t, pv_mmu_ops.pud_val,
1254 pud.pud);
1255
1256 return ret;
1257 }
1258
1259 static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
1260 {
1261 pgdval_t val = native_pgd_val(pgd);
1262
1263 if (sizeof(pgdval_t) > sizeof(long))
1264 PVOP_VCALL3(pv_mmu_ops.set_pgd, pgdp,
1265 val, (u64)val >> 32);
1266 else
1267 PVOP_VCALL2(pv_mmu_ops.set_pgd, pgdp,
1268 val);
1269 }
1270
1271 static inline void pgd_clear(pgd_t *pgdp)
1272 {
1273 set_pgd(pgdp, __pgd(0));
1274 }
1275
1276 static inline void pud_clear(pud_t *pudp)
1277 {
1278 set_pud(pudp, __pud(0));
1279 }
1280
1281 #endif /* PAGETABLE_LEVELS == 4 */
1282
1283 #endif /* PAGETABLE_LEVELS >= 3 */
1284
1285 #ifdef CONFIG_X86_PAE
1286 /* Special-case pte-setting operations for PAE, which can't update a
1287 64-bit pte atomically */
1288 static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
1289 {
1290 PVOP_VCALL3(pv_mmu_ops.set_pte_atomic, ptep,
1291 pte.pte, pte.pte >> 32);
1292 }
1293
1294 static inline void set_pte_present(struct mm_struct *mm, unsigned long addr,
1295 pte_t *ptep, pte_t pte)
1296 {
1297 /* 5 arg words */
1298 pv_mmu_ops.set_pte_present(mm, addr, ptep, pte);
1299 }
1300
1301 static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
1302 pte_t *ptep)
1303 {
1304 PVOP_VCALL3(pv_mmu_ops.pte_clear, mm, addr, ptep);
1305 }
1306
1307 static inline void pmd_clear(pmd_t *pmdp)
1308 {
1309 PVOP_VCALL1(pv_mmu_ops.pmd_clear, pmdp);
1310 }
1311 #else /* !CONFIG_X86_PAE */
1312 static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
1313 {
1314 set_pte(ptep, pte);
1315 }
1316
1317 static inline void set_pte_present(struct mm_struct *mm, unsigned long addr,
1318 pte_t *ptep, pte_t pte)
1319 {
1320 set_pte(ptep, pte);
1321 }
1322
1323 static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
1324 pte_t *ptep)
1325 {
1326 set_pte_at(mm, addr, ptep, __pte(0));
1327 }
1328
1329 static inline void pmd_clear(pmd_t *pmdp)
1330 {
1331 set_pmd(pmdp, __pmd(0));
1332 }
1333 #endif /* CONFIG_X86_PAE */
1334
1335 /* Lazy mode for batching updates / context switch */
1336 enum paravirt_lazy_mode {
1337 PARAVIRT_LAZY_NONE,
1338 PARAVIRT_LAZY_MMU,
1339 PARAVIRT_LAZY_CPU,
1340 };
1341
1342 enum paravirt_lazy_mode paravirt_get_lazy_mode(void);
1343 void paravirt_enter_lazy_cpu(void);
1344 void paravirt_leave_lazy_cpu(void);
1345 void paravirt_enter_lazy_mmu(void);
1346 void paravirt_leave_lazy_mmu(void);
1347 void paravirt_leave_lazy(enum paravirt_lazy_mode mode);
1348
1349 #define __HAVE_ARCH_ENTER_LAZY_CPU_MODE
1350 static inline void arch_enter_lazy_cpu_mode(void)
1351 {
1352 PVOP_VCALL0(pv_cpu_ops.lazy_mode.enter);
1353 }
1354
1355 static inline void arch_leave_lazy_cpu_mode(void)
1356 {
1357 PVOP_VCALL0(pv_cpu_ops.lazy_mode.leave);
1358 }
1359
1360 static inline void arch_flush_lazy_cpu_mode(void)
1361 {
1362 if (unlikely(paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU)) {
1363 arch_leave_lazy_cpu_mode();
1364 arch_enter_lazy_cpu_mode();
1365 }
1366 }
1367
1368
1369 #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
1370 static inline void arch_enter_lazy_mmu_mode(void)
1371 {
1372 PVOP_VCALL0(pv_mmu_ops.lazy_mode.enter);
1373 }
1374
1375 static inline void arch_leave_lazy_mmu_mode(void)
1376 {
1377 PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
1378 }
1379
1380 static inline void arch_flush_lazy_mmu_mode(void)
1381 {
1382 if (unlikely(paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU)) {
1383 arch_leave_lazy_mmu_mode();
1384 arch_enter_lazy_mmu_mode();
1385 }
1386 }
1387
1388 static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
1389 unsigned long phys, pgprot_t flags)
1390 {
1391 pv_mmu_ops.set_fixmap(idx, phys, flags);
1392 }
1393
1394 void _paravirt_nop(void);
1395 #define paravirt_nop ((void *)_paravirt_nop)
1396
1397 void paravirt_use_bytelocks(void);
1398
1399 #ifdef CONFIG_SMP
1400
1401 static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
1402 {
1403 return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock);
1404 }
1405
1406 static inline int __raw_spin_is_contended(struct raw_spinlock *lock)
1407 {
1408 return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock);
1409 }
1410
1411 static __always_inline void __raw_spin_lock(struct raw_spinlock *lock)
1412 {
1413 PVOP_VCALL1(pv_lock_ops.spin_lock, lock);
1414 }
1415
1416 static __always_inline int __raw_spin_trylock(struct raw_spinlock *lock)
1417 {
1418 return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock);
1419 }
1420
1421 static __always_inline void __raw_spin_unlock(struct raw_spinlock *lock)
1422 {
1423 PVOP_VCALL1(pv_lock_ops.spin_unlock, lock);
1424 }
1425
1426 #endif
1427
1428 /* These all sit in the .parainstructions section to tell us what to patch. */
1429 struct paravirt_patch_site {
1430 u8 *instr; /* original instructions */
1431 u8 instrtype; /* type of this instruction */
1432 u8 len; /* length of original instruction */
1433 u16 clobbers; /* what registers you may clobber */
1434 };
1435
1436 extern struct paravirt_patch_site __parainstructions[],
1437 __parainstructions_end[];
1438
1439 #ifdef CONFIG_X86_32
1440 #define PV_SAVE_REGS "pushl %%ecx; pushl %%edx;"
1441 #define PV_RESTORE_REGS "popl %%edx; popl %%ecx"
1442 #define PV_FLAGS_ARG "0"
1443 #define PV_EXTRA_CLOBBERS
1444 #define PV_VEXTRA_CLOBBERS
1445 #else
1446 /* We save some registers, but all of them, that's too much. We clobber all
1447 * caller saved registers but the argument parameter */
1448 #define PV_SAVE_REGS "pushq %%rdi;"
1449 #define PV_RESTORE_REGS "popq %%rdi;"
1450 #define PV_EXTRA_CLOBBERS EXTRA_CLOBBERS, "rcx" , "rdx", "rsi"
1451 #define PV_VEXTRA_CLOBBERS EXTRA_CLOBBERS, "rdi", "rcx" , "rdx", "rsi"
1452 #define PV_FLAGS_ARG "D"
1453 #endif
1454
1455 static inline unsigned long __raw_local_save_flags(void)
1456 {
1457 unsigned long f;
1458
1459 asm volatile(paravirt_alt(PV_SAVE_REGS
1460 PARAVIRT_CALL
1461 PV_RESTORE_REGS)
1462 : "=a"(f)
1463 : paravirt_type(pv_irq_ops.save_fl),
1464 paravirt_clobber(CLBR_EAX)
1465 : "memory", "cc" PV_VEXTRA_CLOBBERS);
1466 return f;
1467 }
1468
1469 static inline void raw_local_irq_restore(unsigned long f)
1470 {
1471 asm volatile(paravirt_alt(PV_SAVE_REGS
1472 PARAVIRT_CALL
1473 PV_RESTORE_REGS)
1474 : "=a"(f)
1475 : PV_FLAGS_ARG(f),
1476 paravirt_type(pv_irq_ops.restore_fl),
1477 paravirt_clobber(CLBR_EAX)
1478 : "memory", "cc" PV_EXTRA_CLOBBERS);
1479 }
1480
1481 static inline void raw_local_irq_disable(void)
1482 {
1483 asm volatile(paravirt_alt(PV_SAVE_REGS
1484 PARAVIRT_CALL
1485 PV_RESTORE_REGS)
1486 :
1487 : paravirt_type(pv_irq_ops.irq_disable),
1488 paravirt_clobber(CLBR_EAX)
1489 : "memory", "eax", "cc" PV_EXTRA_CLOBBERS);
1490 }
1491
1492 static inline void raw_local_irq_enable(void)
1493 {
1494 asm volatile(paravirt_alt(PV_SAVE_REGS
1495 PARAVIRT_CALL
1496 PV_RESTORE_REGS)
1497 :
1498 : paravirt_type(pv_irq_ops.irq_enable),
1499 paravirt_clobber(CLBR_EAX)
1500 : "memory", "eax", "cc" PV_EXTRA_CLOBBERS);
1501 }
1502
1503 static inline unsigned long __raw_local_irq_save(void)
1504 {
1505 unsigned long f;
1506
1507 f = __raw_local_save_flags();
1508 raw_local_irq_disable();
1509 return f;
1510 }
1511
1512
1513 /* Make sure as little as possible of this mess escapes. */
1514 #undef PARAVIRT_CALL
1515 #undef __PVOP_CALL
1516 #undef __PVOP_VCALL
1517 #undef PVOP_VCALL0
1518 #undef PVOP_CALL0
1519 #undef PVOP_VCALL1
1520 #undef PVOP_CALL1
1521 #undef PVOP_VCALL2
1522 #undef PVOP_CALL2
1523 #undef PVOP_VCALL3
1524 #undef PVOP_CALL3
1525 #undef PVOP_VCALL4
1526 #undef PVOP_CALL4
1527
1528 #else /* __ASSEMBLY__ */
1529
1530 #define _PVSITE(ptype, clobbers, ops, word, algn) \
1531 771:; \
1532 ops; \
1533 772:; \
1534 .pushsection .parainstructions,"a"; \
1535 .align algn; \
1536 word 771b; \
1537 .byte ptype; \
1538 .byte 772b-771b; \
1539 .short clobbers; \
1540 .popsection
1541
1542
1543 #ifdef CONFIG_X86_64
1544 #define PV_SAVE_REGS \
1545 push %rax; \
1546 push %rcx; \
1547 push %rdx; \
1548 push %rsi; \
1549 push %rdi; \
1550 push %r8; \
1551 push %r9; \
1552 push %r10; \
1553 push %r11
1554 #define PV_RESTORE_REGS \
1555 pop %r11; \
1556 pop %r10; \
1557 pop %r9; \
1558 pop %r8; \
1559 pop %rdi; \
1560 pop %rsi; \
1561 pop %rdx; \
1562 pop %rcx; \
1563 pop %rax
1564 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 8)
1565 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8)
1566 #define PARA_INDIRECT(addr) *addr(%rip)
1567 #else
1568 #define PV_SAVE_REGS pushl %eax; pushl %edi; pushl %ecx; pushl %edx
1569 #define PV_RESTORE_REGS popl %edx; popl %ecx; popl %edi; popl %eax
1570 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
1571 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
1572 #define PARA_INDIRECT(addr) *%cs:addr
1573 #endif
1574
1575 #define INTERRUPT_RETURN \
1576 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE, \
1577 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret))
1578
1579 #define DISABLE_INTERRUPTS(clobbers) \
1580 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
1581 PV_SAVE_REGS; \
1582 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable); \
1583 PV_RESTORE_REGS;) \
1584
1585 #define ENABLE_INTERRUPTS(clobbers) \
1586 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \
1587 PV_SAVE_REGS; \
1588 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable); \
1589 PV_RESTORE_REGS;)
1590
1591 #define USERGS_SYSRET32 \
1592 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret32), \
1593 CLBR_NONE, \
1594 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret32))
1595
1596 #ifdef CONFIG_X86_32
1597 #define GET_CR0_INTO_EAX \
1598 push %ecx; push %edx; \
1599 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
1600 pop %edx; pop %ecx
1601
1602 #define ENABLE_INTERRUPTS_SYSEXIT \
1603 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
1604 CLBR_NONE, \
1605 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
1606
1607
1608 #else /* !CONFIG_X86_32 */
1609
1610 /*
1611 * If swapgs is used while the userspace stack is still current,
1612 * there's no way to call a pvop. The PV replacement *must* be
1613 * inlined, or the swapgs instruction must be trapped and emulated.
1614 */
1615 #define SWAPGS_UNSAFE_STACK \
1616 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
1617 swapgs)
1618
1619 #define SWAPGS \
1620 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
1621 PV_SAVE_REGS; \
1622 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs); \
1623 PV_RESTORE_REGS \
1624 )
1625
1626 #define GET_CR2_INTO_RCX \
1627 call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2); \
1628 movq %rax, %rcx; \
1629 xorq %rax, %rax;
1630
1631 #define PARAVIRT_ADJUST_EXCEPTION_FRAME \
1632 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_adjust_exception_frame), \
1633 CLBR_NONE, \
1634 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_adjust_exception_frame))
1635
1636 #define USERGS_SYSRET64 \
1637 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64), \
1638 CLBR_NONE, \
1639 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64))
1640
1641 #define ENABLE_INTERRUPTS_SYSEXIT32 \
1642 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
1643 CLBR_NONE, \
1644 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
1645 #endif /* CONFIG_X86_32 */
1646
1647 #endif /* __ASSEMBLY__ */
1648 #endif /* CONFIG_PARAVIRT */
1649 #endif /* __ASM_PARAVIRT_H */