1 #ifndef __ASM_PARAVIRT_H
2 #define __ASM_PARAVIRT_H
3 /* Various instructions on x86 need to be replaced for
4 * para-virtualization: those hooks are defined here. */
10 /* Bitmask of what can be clobbered: usually at least eax. */
12 #define CLBR_EAX (1 << 0)
13 #define CLBR_ECX (1 << 1)
14 #define CLBR_EDX (1 << 2)
17 #define CLBR_RSI (1 << 3)
18 #define CLBR_RDI (1 << 4)
19 #define CLBR_R8 (1 << 5)
20 #define CLBR_R9 (1 << 6)
21 #define CLBR_R10 (1 << 7)
22 #define CLBR_R11 (1 << 8)
23 #define CLBR_ANY ((1 << 9) - 1)
24 #include <asm/desc_defs.h>
26 /* CLBR_ANY should match all regs platform has. For i386, that's just it */
27 #define CLBR_ANY ((1 << 3) - 1)
31 #include <linux/types.h>
32 #include <linux/cpumask.h>
33 #include <asm/kmap_types.h>
34 #include <asm/desc_defs.h>
45 unsigned int kernel_rpl
;
46 int shared_kernel_pmd
;
53 * Patch may replace one of the defined code sequences with
54 * arbitrary code, subject to the same register constraints.
55 * This generally means the code is not free to clobber any
56 * registers other than EAX. The patch function should return
57 * the number of bytes of code generated, as we nop pad the
58 * rest in generic code.
60 unsigned (*patch
)(u8 type
, u16 clobber
, void *insnbuf
,
61 unsigned long addr
, unsigned len
);
63 /* Basic arch-specific setup */
64 void (*arch_setup
)(void);
65 char *(*memory_setup
)(void);
66 void (*post_allocator_init
)(void);
68 /* Print a banner to identify the environment */
74 /* Set deferred update mode, used for batching operations. */
80 void (*time_init
)(void);
82 /* Set and set time of day */
83 unsigned long (*get_wallclock
)(void);
84 int (*set_wallclock
)(unsigned long);
86 unsigned long long (*sched_clock
)(void);
87 unsigned long (*get_tsc_khz
)(void);
91 /* hooks for various privileged instructions */
92 unsigned long (*get_debugreg
)(int regno
);
93 void (*set_debugreg
)(int regno
, unsigned long value
);
97 unsigned long (*read_cr0
)(void);
98 void (*write_cr0
)(unsigned long);
100 unsigned long (*read_cr4_safe
)(void);
101 unsigned long (*read_cr4
)(void);
102 void (*write_cr4
)(unsigned long);
105 unsigned long (*read_cr8
)(void);
106 void (*write_cr8
)(unsigned long);
109 /* Segment descriptor handling */
110 void (*load_tr_desc
)(void);
111 void (*load_gdt
)(const struct desc_ptr
*);
112 void (*load_idt
)(const struct desc_ptr
*);
113 void (*store_gdt
)(struct desc_ptr
*);
114 void (*store_idt
)(struct desc_ptr
*);
115 void (*set_ldt
)(const void *desc
, unsigned entries
);
116 unsigned long (*store_tr
)(void);
117 void (*load_tls
)(struct thread_struct
*t
, unsigned int cpu
);
119 void (*load_gs_index
)(unsigned int idx
);
121 void (*write_ldt_entry
)(struct desc_struct
*ldt
, int entrynum
,
123 void (*write_gdt_entry
)(struct desc_struct
*,
124 int entrynum
, const void *desc
, int size
);
125 void (*write_idt_entry
)(gate_desc
*,
126 int entrynum
, const gate_desc
*gate
);
127 void (*load_sp0
)(struct tss_struct
*tss
, struct thread_struct
*t
);
129 void (*set_iopl_mask
)(unsigned mask
);
131 void (*wbinvd
)(void);
132 void (*io_delay
)(void);
134 /* cpuid emulation, mostly so that caps bits can be disabled */
135 void (*cpuid
)(unsigned int *eax
, unsigned int *ebx
,
136 unsigned int *ecx
, unsigned int *edx
);
138 /* MSR, PMC and TSR operations.
139 err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */
140 u64 (*read_msr_amd
)(unsigned int msr
, int *err
);
141 u64 (*read_msr
)(unsigned int msr
, int *err
);
142 int (*write_msr
)(unsigned int msr
, unsigned low
, unsigned high
);
144 u64 (*read_tsc
)(void);
145 u64 (*read_pmc
)(int counter
);
146 unsigned long long (*read_tscp
)(unsigned int *aux
);
149 * Atomically enable interrupts and return to userspace. This
150 * is only ever used to return to 32-bit processes; in a
151 * 64-bit kernel, it's used for 32-on-64 compat processes, but
152 * never native 64-bit processes. (Jump, not call.)
154 void (*irq_enable_sysexit
)(void);
157 * Switch to usermode gs and return to 64-bit usermode using
158 * sysret. Only used in 64-bit kernels to return to 64-bit
159 * processes. Usermode register state, including %rsp, must
160 * already be restored.
162 void (*usergs_sysret64
)(void);
165 * Switch to usermode gs and return to 32-bit usermode using
166 * sysret. Used to return to 32-on-64 compat processes.
167 * Other usermode register state, including %esp, must already
170 void (*usergs_sysret32
)(void);
172 /* Normal iret. Jump to this with the standard iret stack
176 void (*swapgs
)(void);
178 struct pv_lazy_ops lazy_mode
;
182 void (*init_IRQ
)(void);
185 * Get/set interrupt state. save_fl and restore_fl are only
186 * expected to use X86_EFLAGS_IF; all other bits
187 * returned from save_fl are undefined, and may be ignored by
190 unsigned long (*save_fl
)(void);
191 void (*restore_fl
)(unsigned long);
192 void (*irq_disable
)(void);
193 void (*irq_enable
)(void);
194 void (*safe_halt
)(void);
198 void (*adjust_exception_frame
)(void);
203 #ifdef CONFIG_X86_LOCAL_APIC
205 * Direct APIC operations, principally for VMI. Ideally
206 * these shouldn't be in this interface.
208 void (*apic_write
)(unsigned long reg
, u32 v
);
209 u32 (*apic_read
)(unsigned long reg
);
210 void (*setup_boot_clock
)(void);
211 void (*setup_secondary_clock
)(void);
213 void (*startup_ipi_hook
)(int phys_apicid
,
214 unsigned long start_eip
,
215 unsigned long start_esp
);
221 * Called before/after init_mm pagetable setup. setup_start
222 * may reset %cr3, and may pre-install parts of the pagetable;
223 * pagetable setup is expected to preserve any existing
226 void (*pagetable_setup_start
)(pgd_t
*pgd_base
);
227 void (*pagetable_setup_done
)(pgd_t
*pgd_base
);
229 unsigned long (*read_cr2
)(void);
230 void (*write_cr2
)(unsigned long);
232 unsigned long (*read_cr3
)(void);
233 void (*write_cr3
)(unsigned long);
236 * Hooks for intercepting the creation/use/destruction of an
239 void (*activate_mm
)(struct mm_struct
*prev
,
240 struct mm_struct
*next
);
241 void (*dup_mmap
)(struct mm_struct
*oldmm
,
242 struct mm_struct
*mm
);
243 void (*exit_mmap
)(struct mm_struct
*mm
);
247 void (*flush_tlb_user
)(void);
248 void (*flush_tlb_kernel
)(void);
249 void (*flush_tlb_single
)(unsigned long addr
);
250 void (*flush_tlb_others
)(const cpumask_t
*cpus
, struct mm_struct
*mm
,
253 /* Hooks for allocating and freeing a pagetable top-level */
254 int (*pgd_alloc
)(struct mm_struct
*mm
);
255 void (*pgd_free
)(struct mm_struct
*mm
, pgd_t
*pgd
);
258 * Hooks for allocating/releasing pagetable pages when they're
259 * attached to a pagetable
261 void (*alloc_pte
)(struct mm_struct
*mm
, u32 pfn
);
262 void (*alloc_pmd
)(struct mm_struct
*mm
, u32 pfn
);
263 void (*alloc_pmd_clone
)(u32 pfn
, u32 clonepfn
, u32 start
, u32 count
);
264 void (*alloc_pud
)(struct mm_struct
*mm
, u32 pfn
);
265 void (*release_pte
)(u32 pfn
);
266 void (*release_pmd
)(u32 pfn
);
267 void (*release_pud
)(u32 pfn
);
269 /* Pagetable manipulation functions */
270 void (*set_pte
)(pte_t
*ptep
, pte_t pteval
);
271 void (*set_pte_at
)(struct mm_struct
*mm
, unsigned long addr
,
272 pte_t
*ptep
, pte_t pteval
);
273 void (*set_pmd
)(pmd_t
*pmdp
, pmd_t pmdval
);
274 void (*pte_update
)(struct mm_struct
*mm
, unsigned long addr
,
276 void (*pte_update_defer
)(struct mm_struct
*mm
,
277 unsigned long addr
, pte_t
*ptep
);
279 pte_t (*ptep_modify_prot_start
)(struct mm_struct
*mm
, unsigned long addr
,
281 void (*ptep_modify_prot_commit
)(struct mm_struct
*mm
, unsigned long addr
,
282 pte_t
*ptep
, pte_t pte
);
284 pteval_t (*pte_val
)(pte_t
);
285 pteval_t (*pte_flags
)(pte_t
);
286 pte_t (*make_pte
)(pteval_t pte
);
288 pgdval_t (*pgd_val
)(pgd_t
);
289 pgd_t (*make_pgd
)(pgdval_t pgd
);
291 #if PAGETABLE_LEVELS >= 3
292 #ifdef CONFIG_X86_PAE
293 void (*set_pte_atomic
)(pte_t
*ptep
, pte_t pteval
);
294 void (*set_pte_present
)(struct mm_struct
*mm
, unsigned long addr
,
295 pte_t
*ptep
, pte_t pte
);
296 void (*pte_clear
)(struct mm_struct
*mm
, unsigned long addr
,
298 void (*pmd_clear
)(pmd_t
*pmdp
);
300 #endif /* CONFIG_X86_PAE */
302 void (*set_pud
)(pud_t
*pudp
, pud_t pudval
);
304 pmdval_t (*pmd_val
)(pmd_t
);
305 pmd_t (*make_pmd
)(pmdval_t pmd
);
307 #if PAGETABLE_LEVELS == 4
308 pudval_t (*pud_val
)(pud_t
);
309 pud_t (*make_pud
)(pudval_t pud
);
311 void (*set_pgd
)(pgd_t
*pudp
, pgd_t pgdval
);
312 #endif /* PAGETABLE_LEVELS == 4 */
313 #endif /* PAGETABLE_LEVELS >= 3 */
315 #ifdef CONFIG_HIGHPTE
316 void *(*kmap_atomic_pte
)(struct page
*page
, enum km_type type
);
319 struct pv_lazy_ops lazy_mode
;
323 /* Sometimes the physical address is a pfn, and sometimes its
324 an mfn. We can tell which is which from the index. */
325 void (*set_fixmap
)(unsigned /* enum fixed_addresses */ idx
,
326 unsigned long phys
, pgprot_t flags
);
331 int (*spin_is_locked
)(struct raw_spinlock
*lock
);
332 int (*spin_is_contended
)(struct raw_spinlock
*lock
);
333 void (*spin_lock
)(struct raw_spinlock
*lock
);
334 int (*spin_trylock
)(struct raw_spinlock
*lock
);
335 void (*spin_unlock
)(struct raw_spinlock
*lock
);
338 /* This contains all the paravirt structures: we get a convenient
339 * number for each function using the offset which we use to indicate
341 struct paravirt_patch_template
{
342 struct pv_init_ops pv_init_ops
;
343 struct pv_time_ops pv_time_ops
;
344 struct pv_cpu_ops pv_cpu_ops
;
345 struct pv_irq_ops pv_irq_ops
;
346 struct pv_apic_ops pv_apic_ops
;
347 struct pv_mmu_ops pv_mmu_ops
;
348 struct pv_lock_ops pv_lock_ops
;
351 extern struct pv_info pv_info
;
352 extern struct pv_init_ops pv_init_ops
;
353 extern struct pv_time_ops pv_time_ops
;
354 extern struct pv_cpu_ops pv_cpu_ops
;
355 extern struct pv_irq_ops pv_irq_ops
;
356 extern struct pv_apic_ops pv_apic_ops
;
357 extern struct pv_mmu_ops pv_mmu_ops
;
358 extern struct pv_lock_ops pv_lock_ops
;
360 #define PARAVIRT_PATCH(x) \
361 (offsetof(struct paravirt_patch_template, x) / sizeof(void *))
363 #define paravirt_type(op) \
364 [paravirt_typenum] "i" (PARAVIRT_PATCH(op)), \
365 [paravirt_opptr] "m" (op)
366 #define paravirt_clobber(clobber) \
367 [paravirt_clobber] "i" (clobber)
370 * Generate some code, and mark it as patchable by the
371 * apply_paravirt() alternate instruction patcher.
373 #define _paravirt_alt(insn_string, type, clobber) \
374 "771:\n\t" insn_string "\n" "772:\n" \
375 ".pushsection .parainstructions,\"a\"\n" \
378 " .byte " type "\n" \
379 " .byte 772b-771b\n" \
380 " .short " clobber "\n" \
383 /* Generate patchable code, with the default asm parameters. */
384 #define paravirt_alt(insn_string) \
385 _paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]")
387 /* Simple instruction patching code. */
388 #define DEF_NATIVE(ops, name, code) \
389 extern const char start_##ops##_##name[], end_##ops##_##name[]; \
390 asm("start_" #ops "_" #name ": " code "; end_" #ops "_" #name ":")
392 unsigned paravirt_patch_nop(void);
393 unsigned paravirt_patch_ignore(unsigned len
);
394 unsigned paravirt_patch_call(void *insnbuf
,
395 const void *target
, u16 tgt_clobbers
,
396 unsigned long addr
, u16 site_clobbers
,
398 unsigned paravirt_patch_jmp(void *insnbuf
, const void *target
,
399 unsigned long addr
, unsigned len
);
400 unsigned paravirt_patch_default(u8 type
, u16 clobbers
, void *insnbuf
,
401 unsigned long addr
, unsigned len
);
403 unsigned paravirt_patch_insns(void *insnbuf
, unsigned len
,
404 const char *start
, const char *end
);
406 unsigned native_patch(u8 type
, u16 clobbers
, void *ibuf
,
407 unsigned long addr
, unsigned len
);
409 int paravirt_disable_iospace(void);
412 * This generates an indirect call based on the operation type number.
413 * The type number, computed in PARAVIRT_PATCH, is derived from the
414 * offset into the paravirt_patch_template structure, and can therefore be
415 * freely converted back into a structure offset.
417 #define PARAVIRT_CALL "call *%[paravirt_opptr];"
420 * These macros are intended to wrap calls through one of the paravirt
421 * ops structs, so that they can be later identified and patched at
424 * Normally, a call to a pv_op function is a simple indirect call:
425 * (pv_op_struct.operations)(args...).
427 * Unfortunately, this is a relatively slow operation for modern CPUs,
428 * because it cannot necessarily determine what the destination
429 * address is. In this case, the address is a runtime constant, so at
430 * the very least we can patch the call to e a simple direct call, or
431 * ideally, patch an inline implementation into the callsite. (Direct
432 * calls are essentially free, because the call and return addresses
433 * are completely predictable.)
435 * For i386, these macros rely on the standard gcc "regparm(3)" calling
436 * convention, in which the first three arguments are placed in %eax,
437 * %edx, %ecx (in that order), and the remaining arguments are placed
438 * on the stack. All caller-save registers (eax,edx,ecx) are expected
439 * to be modified (either clobbered or used for return values).
440 * X86_64, on the other hand, already specifies a register-based calling
441 * conventions, returning at %rax, with parameteres going on %rdi, %rsi,
442 * %rdx, and %rcx. Note that for this reason, x86_64 does not need any
443 * special handling for dealing with 4 arguments, unlike i386.
444 * However, x86_64 also have to clobber all caller saved registers, which
445 * unfortunately, are quite a bit (r8 - r11)
447 * The call instruction itself is marked by placing its start address
448 * and size into the .parainstructions section, so that
449 * apply_paravirt() in arch/i386/kernel/alternative.c can do the
450 * appropriate patching under the control of the backend pv_init_ops
453 * Unfortunately there's no way to get gcc to generate the args setup
454 * for the call, and then allow the call itself to be generated by an
455 * inline asm. Because of this, we must do the complete arg setup and
456 * return value handling from within these macros. This is fairly
459 * There are 5 sets of PVOP_* macros for dealing with 0-4 arguments.
460 * It could be extended to more arguments, but there would be little
461 * to be gained from that. For each number of arguments, there are
462 * the two VCALL and CALL variants for void and non-void functions.
464 * When there is a return value, the invoker of the macro must specify
465 * the return type. The macro then uses sizeof() on that type to
466 * determine whether its a 32 or 64 bit value, and places the return
467 * in the right register(s) (just %eax for 32-bit, and %edx:%eax for
468 * 64-bit). For x86_64 machines, it just returns at %rax regardless of
469 * the return value size.
471 * 64-bit arguments are passed as a pair of adjacent 32-bit arguments
472 * i386 also passes 64-bit arguments as a pair of adjacent 32-bit arguments
475 * Small structures are passed and returned in registers. The macro
476 * calling convention can't directly deal with this, so the wrapper
477 * functions must do this.
479 * These PVOP_* macros are only defined within this header. This
480 * means that all uses must be wrapped in inline functions. This also
481 * makes sure the incoming and outgoing types are always correct.
484 #define PVOP_VCALL_ARGS unsigned long __eax, __edx, __ecx
485 #define PVOP_CALL_ARGS PVOP_VCALL_ARGS
486 #define PVOP_VCALL_CLOBBERS "=a" (__eax), "=d" (__edx), \
488 #define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS
489 #define EXTRA_CLOBBERS
490 #define VEXTRA_CLOBBERS
492 #define PVOP_VCALL_ARGS unsigned long __edi, __esi, __edx, __ecx
493 #define PVOP_CALL_ARGS PVOP_VCALL_ARGS, __eax
494 #define PVOP_VCALL_CLOBBERS "=D" (__edi), \
495 "=S" (__esi), "=d" (__edx), \
498 #define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax)
500 #define EXTRA_CLOBBERS , "r8", "r9", "r10", "r11"
501 #define VEXTRA_CLOBBERS , "rax", "r8", "r9", "r10", "r11"
504 #ifdef CONFIG_PARAVIRT_DEBUG
505 #define PVOP_TEST_NULL(op) BUG_ON(op == NULL)
507 #define PVOP_TEST_NULL(op) ((void)op)
510 #define __PVOP_CALL(rettype, op, pre, post, ...) \
514 PVOP_TEST_NULL(op); \
515 /* This is 32-bit specific, but is okay in 64-bit */ \
516 /* since this condition will never hold */ \
517 if (sizeof(rettype) > sizeof(unsigned long)) { \
519 paravirt_alt(PARAVIRT_CALL) \
521 : PVOP_CALL_CLOBBERS \
522 : paravirt_type(op), \
523 paravirt_clobber(CLBR_ANY), \
525 : "memory", "cc" EXTRA_CLOBBERS); \
526 __ret = (rettype)((((u64)__edx) << 32) | __eax); \
529 paravirt_alt(PARAVIRT_CALL) \
531 : PVOP_CALL_CLOBBERS \
532 : paravirt_type(op), \
533 paravirt_clobber(CLBR_ANY), \
535 : "memory", "cc" EXTRA_CLOBBERS); \
536 __ret = (rettype)__eax; \
540 #define __PVOP_VCALL(op, pre, post, ...) \
543 PVOP_TEST_NULL(op); \
545 paravirt_alt(PARAVIRT_CALL) \
547 : PVOP_VCALL_CLOBBERS \
548 : paravirt_type(op), \
549 paravirt_clobber(CLBR_ANY), \
551 : "memory", "cc" VEXTRA_CLOBBERS); \
554 #define PVOP_CALL0(rettype, op) \
555 __PVOP_CALL(rettype, op, "", "")
556 #define PVOP_VCALL0(op) \
557 __PVOP_VCALL(op, "", "")
559 #define PVOP_CALL1(rettype, op, arg1) \
560 __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)))
561 #define PVOP_VCALL1(op, arg1) \
562 __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)))
564 #define PVOP_CALL2(rettype, op, arg1, arg2) \
565 __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)), \
566 "1" ((unsigned long)(arg2)))
567 #define PVOP_VCALL2(op, arg1, arg2) \
568 __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)), \
569 "1" ((unsigned long)(arg2)))
571 #define PVOP_CALL3(rettype, op, arg1, arg2, arg3) \
572 __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)), \
573 "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)))
574 #define PVOP_VCALL3(op, arg1, arg2, arg3) \
575 __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)), \
576 "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)))
578 /* This is the only difference in x86_64. We can make it much simpler */
580 #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
581 __PVOP_CALL(rettype, op, \
582 "push %[_arg4];", "lea 4(%%esp),%%esp;", \
583 "0" ((u32)(arg1)), "1" ((u32)(arg2)), \
584 "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
585 #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
587 "push %[_arg4];", "lea 4(%%esp),%%esp;", \
588 "0" ((u32)(arg1)), "1" ((u32)(arg2)), \
589 "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
591 #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
592 __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)), \
593 "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)), \
594 "3"((unsigned long)(arg4)))
595 #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
596 __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)), \
597 "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)), \
598 "3"((unsigned long)(arg4)))
601 static inline int paravirt_enabled(void)
603 return pv_info
.paravirt_enabled
;
606 static inline void load_sp0(struct tss_struct
*tss
,
607 struct thread_struct
*thread
)
609 PVOP_VCALL2(pv_cpu_ops
.load_sp0
, tss
, thread
);
612 #define ARCH_SETUP pv_init_ops.arch_setup();
613 static inline unsigned long get_wallclock(void)
615 return PVOP_CALL0(unsigned long, pv_time_ops
.get_wallclock
);
618 static inline int set_wallclock(unsigned long nowtime
)
620 return PVOP_CALL1(int, pv_time_ops
.set_wallclock
, nowtime
);
623 static inline void (*choose_time_init(void))(void)
625 return pv_time_ops
.time_init
;
628 /* The paravirtualized CPUID instruction. */
629 static inline void __cpuid(unsigned int *eax
, unsigned int *ebx
,
630 unsigned int *ecx
, unsigned int *edx
)
632 PVOP_VCALL4(pv_cpu_ops
.cpuid
, eax
, ebx
, ecx
, edx
);
636 * These special macros can be used to get or set a debugging register
638 static inline unsigned long paravirt_get_debugreg(int reg
)
640 return PVOP_CALL1(unsigned long, pv_cpu_ops
.get_debugreg
, reg
);
642 #define get_debugreg(var, reg) var = paravirt_get_debugreg(reg)
643 static inline void set_debugreg(unsigned long val
, int reg
)
645 PVOP_VCALL2(pv_cpu_ops
.set_debugreg
, reg
, val
);
648 static inline void clts(void)
650 PVOP_VCALL0(pv_cpu_ops
.clts
);
653 static inline unsigned long read_cr0(void)
655 return PVOP_CALL0(unsigned long, pv_cpu_ops
.read_cr0
);
658 static inline void write_cr0(unsigned long x
)
660 PVOP_VCALL1(pv_cpu_ops
.write_cr0
, x
);
663 static inline unsigned long read_cr2(void)
665 return PVOP_CALL0(unsigned long, pv_mmu_ops
.read_cr2
);
668 static inline void write_cr2(unsigned long x
)
670 PVOP_VCALL1(pv_mmu_ops
.write_cr2
, x
);
673 static inline unsigned long read_cr3(void)
675 return PVOP_CALL0(unsigned long, pv_mmu_ops
.read_cr3
);
678 static inline void write_cr3(unsigned long x
)
680 PVOP_VCALL1(pv_mmu_ops
.write_cr3
, x
);
683 static inline unsigned long read_cr4(void)
685 return PVOP_CALL0(unsigned long, pv_cpu_ops
.read_cr4
);
687 static inline unsigned long read_cr4_safe(void)
689 return PVOP_CALL0(unsigned long, pv_cpu_ops
.read_cr4_safe
);
692 static inline void write_cr4(unsigned long x
)
694 PVOP_VCALL1(pv_cpu_ops
.write_cr4
, x
);
698 static inline unsigned long read_cr8(void)
700 return PVOP_CALL0(unsigned long, pv_cpu_ops
.read_cr8
);
703 static inline void write_cr8(unsigned long x
)
705 PVOP_VCALL1(pv_cpu_ops
.write_cr8
, x
);
709 static inline void raw_safe_halt(void)
711 PVOP_VCALL0(pv_irq_ops
.safe_halt
);
714 static inline void halt(void)
716 PVOP_VCALL0(pv_irq_ops
.safe_halt
);
719 static inline void wbinvd(void)
721 PVOP_VCALL0(pv_cpu_ops
.wbinvd
);
724 #define get_kernel_rpl() (pv_info.kernel_rpl)
726 static inline u64
paravirt_read_msr(unsigned msr
, int *err
)
728 return PVOP_CALL2(u64
, pv_cpu_ops
.read_msr
, msr
, err
);
730 static inline u64
paravirt_read_msr_amd(unsigned msr
, int *err
)
732 return PVOP_CALL2(u64
, pv_cpu_ops
.read_msr_amd
, msr
, err
);
734 static inline int paravirt_write_msr(unsigned msr
, unsigned low
, unsigned high
)
736 return PVOP_CALL3(int, pv_cpu_ops
.write_msr
, msr
, low
, high
);
739 /* These should all do BUG_ON(_err), but our headers are too tangled. */
740 #define rdmsr(msr, val1, val2) \
743 u64 _l = paravirt_read_msr(msr, &_err); \
748 #define wrmsr(msr, val1, val2) \
750 paravirt_write_msr(msr, val1, val2); \
753 #define rdmsrl(msr, val) \
756 val = paravirt_read_msr(msr, &_err); \
759 #define wrmsrl(msr, val) wrmsr(msr, (u32)((u64)(val)), ((u64)(val))>>32)
760 #define wrmsr_safe(msr, a, b) paravirt_write_msr(msr, a, b)
762 /* rdmsr with exception handling */
763 #define rdmsr_safe(msr, a, b) \
766 u64 _l = paravirt_read_msr(msr, &_err); \
772 static inline int rdmsrl_safe(unsigned msr
, unsigned long long *p
)
776 *p
= paravirt_read_msr(msr
, &err
);
779 static inline int rdmsrl_amd_safe(unsigned msr
, unsigned long long *p
)
783 *p
= paravirt_read_msr_amd(msr
, &err
);
787 static inline u64
paravirt_read_tsc(void)
789 return PVOP_CALL0(u64
, pv_cpu_ops
.read_tsc
);
792 #define rdtscl(low) \
794 u64 _l = paravirt_read_tsc(); \
798 #define rdtscll(val) (val = paravirt_read_tsc())
800 static inline unsigned long long paravirt_sched_clock(void)
802 return PVOP_CALL0(unsigned long long, pv_time_ops
.sched_clock
);
804 #define calibrate_tsc() (pv_time_ops.get_tsc_khz())
806 static inline unsigned long long paravirt_read_pmc(int counter
)
808 return PVOP_CALL1(u64
, pv_cpu_ops
.read_pmc
, counter
);
811 #define rdpmc(counter, low, high) \
813 u64 _l = paravirt_read_pmc(counter); \
818 static inline unsigned long long paravirt_rdtscp(unsigned int *aux
)
820 return PVOP_CALL1(u64
, pv_cpu_ops
.read_tscp
, aux
);
823 #define rdtscp(low, high, aux) \
826 unsigned long __val = paravirt_rdtscp(&__aux); \
827 (low) = (u32)__val; \
828 (high) = (u32)(__val >> 32); \
832 #define rdtscpll(val, aux) \
834 unsigned long __aux; \
835 val = paravirt_rdtscp(&__aux); \
839 static inline void load_TR_desc(void)
841 PVOP_VCALL0(pv_cpu_ops
.load_tr_desc
);
843 static inline void load_gdt(const struct desc_ptr
*dtr
)
845 PVOP_VCALL1(pv_cpu_ops
.load_gdt
, dtr
);
847 static inline void load_idt(const struct desc_ptr
*dtr
)
849 PVOP_VCALL1(pv_cpu_ops
.load_idt
, dtr
);
851 static inline void set_ldt(const void *addr
, unsigned entries
)
853 PVOP_VCALL2(pv_cpu_ops
.set_ldt
, addr
, entries
);
855 static inline void store_gdt(struct desc_ptr
*dtr
)
857 PVOP_VCALL1(pv_cpu_ops
.store_gdt
, dtr
);
859 static inline void store_idt(struct desc_ptr
*dtr
)
861 PVOP_VCALL1(pv_cpu_ops
.store_idt
, dtr
);
863 static inline unsigned long paravirt_store_tr(void)
865 return PVOP_CALL0(unsigned long, pv_cpu_ops
.store_tr
);
867 #define store_tr(tr) ((tr) = paravirt_store_tr())
868 static inline void load_TLS(struct thread_struct
*t
, unsigned cpu
)
870 PVOP_VCALL2(pv_cpu_ops
.load_tls
, t
, cpu
);
874 static inline void load_gs_index(unsigned int gs
)
876 PVOP_VCALL1(pv_cpu_ops
.load_gs_index
, gs
);
880 static inline void write_ldt_entry(struct desc_struct
*dt
, int entry
,
883 PVOP_VCALL3(pv_cpu_ops
.write_ldt_entry
, dt
, entry
, desc
);
886 static inline void write_gdt_entry(struct desc_struct
*dt
, int entry
,
887 void *desc
, int type
)
889 PVOP_VCALL4(pv_cpu_ops
.write_gdt_entry
, dt
, entry
, desc
, type
);
892 static inline void write_idt_entry(gate_desc
*dt
, int entry
, const gate_desc
*g
)
894 PVOP_VCALL3(pv_cpu_ops
.write_idt_entry
, dt
, entry
, g
);
896 static inline void set_iopl_mask(unsigned mask
)
898 PVOP_VCALL1(pv_cpu_ops
.set_iopl_mask
, mask
);
901 /* The paravirtualized I/O functions */
902 static inline void slow_down_io(void)
904 pv_cpu_ops
.io_delay();
905 #ifdef REALLY_SLOW_IO
906 pv_cpu_ops
.io_delay();
907 pv_cpu_ops
.io_delay();
908 pv_cpu_ops
.io_delay();
912 #ifdef CONFIG_X86_LOCAL_APIC
914 * Basic functions accessing APICs.
916 static inline void apic_write(unsigned long reg
, u32 v
)
918 PVOP_VCALL2(pv_apic_ops
.apic_write
, reg
, v
);
921 static inline u32
apic_read(unsigned long reg
)
923 return PVOP_CALL1(unsigned long, pv_apic_ops
.apic_read
, reg
);
926 static inline void setup_boot_clock(void)
928 PVOP_VCALL0(pv_apic_ops
.setup_boot_clock
);
931 static inline void setup_secondary_clock(void)
933 PVOP_VCALL0(pv_apic_ops
.setup_secondary_clock
);
937 static inline void paravirt_post_allocator_init(void)
939 if (pv_init_ops
.post_allocator_init
)
940 (*pv_init_ops
.post_allocator_init
)();
943 static inline void paravirt_pagetable_setup_start(pgd_t
*base
)
945 (*pv_mmu_ops
.pagetable_setup_start
)(base
);
948 static inline void paravirt_pagetable_setup_done(pgd_t
*base
)
950 (*pv_mmu_ops
.pagetable_setup_done
)(base
);
954 static inline void startup_ipi_hook(int phys_apicid
, unsigned long start_eip
,
955 unsigned long start_esp
)
957 PVOP_VCALL3(pv_apic_ops
.startup_ipi_hook
,
958 phys_apicid
, start_eip
, start_esp
);
962 static inline void paravirt_activate_mm(struct mm_struct
*prev
,
963 struct mm_struct
*next
)
965 PVOP_VCALL2(pv_mmu_ops
.activate_mm
, prev
, next
);
968 static inline void arch_dup_mmap(struct mm_struct
*oldmm
,
969 struct mm_struct
*mm
)
971 PVOP_VCALL2(pv_mmu_ops
.dup_mmap
, oldmm
, mm
);
974 static inline void arch_exit_mmap(struct mm_struct
*mm
)
976 PVOP_VCALL1(pv_mmu_ops
.exit_mmap
, mm
);
979 static inline void __flush_tlb(void)
981 PVOP_VCALL0(pv_mmu_ops
.flush_tlb_user
);
983 static inline void __flush_tlb_global(void)
985 PVOP_VCALL0(pv_mmu_ops
.flush_tlb_kernel
);
987 static inline void __flush_tlb_single(unsigned long addr
)
989 PVOP_VCALL1(pv_mmu_ops
.flush_tlb_single
, addr
);
992 static inline void flush_tlb_others(cpumask_t cpumask
, struct mm_struct
*mm
,
995 PVOP_VCALL3(pv_mmu_ops
.flush_tlb_others
, &cpumask
, mm
, va
);
998 static inline int paravirt_pgd_alloc(struct mm_struct
*mm
)
1000 return PVOP_CALL1(int, pv_mmu_ops
.pgd_alloc
, mm
);
1003 static inline void paravirt_pgd_free(struct mm_struct
*mm
, pgd_t
*pgd
)
1005 PVOP_VCALL2(pv_mmu_ops
.pgd_free
, mm
, pgd
);
1008 static inline void paravirt_alloc_pte(struct mm_struct
*mm
, unsigned pfn
)
1010 PVOP_VCALL2(pv_mmu_ops
.alloc_pte
, mm
, pfn
);
1012 static inline void paravirt_release_pte(unsigned pfn
)
1014 PVOP_VCALL1(pv_mmu_ops
.release_pte
, pfn
);
1017 static inline void paravirt_alloc_pmd(struct mm_struct
*mm
, unsigned pfn
)
1019 PVOP_VCALL2(pv_mmu_ops
.alloc_pmd
, mm
, pfn
);
1022 static inline void paravirt_alloc_pmd_clone(unsigned pfn
, unsigned clonepfn
,
1023 unsigned start
, unsigned count
)
1025 PVOP_VCALL4(pv_mmu_ops
.alloc_pmd_clone
, pfn
, clonepfn
, start
, count
);
1027 static inline void paravirt_release_pmd(unsigned pfn
)
1029 PVOP_VCALL1(pv_mmu_ops
.release_pmd
, pfn
);
1032 static inline void paravirt_alloc_pud(struct mm_struct
*mm
, unsigned pfn
)
1034 PVOP_VCALL2(pv_mmu_ops
.alloc_pud
, mm
, pfn
);
1036 static inline void paravirt_release_pud(unsigned pfn
)
1038 PVOP_VCALL1(pv_mmu_ops
.release_pud
, pfn
);
1041 #ifdef CONFIG_HIGHPTE
1042 static inline void *kmap_atomic_pte(struct page
*page
, enum km_type type
)
1045 ret
= PVOP_CALL2(unsigned long, pv_mmu_ops
.kmap_atomic_pte
, page
, type
);
1050 static inline void pte_update(struct mm_struct
*mm
, unsigned long addr
,
1053 PVOP_VCALL3(pv_mmu_ops
.pte_update
, mm
, addr
, ptep
);
1056 static inline void pte_update_defer(struct mm_struct
*mm
, unsigned long addr
,
1059 PVOP_VCALL3(pv_mmu_ops
.pte_update_defer
, mm
, addr
, ptep
);
1062 static inline pte_t
__pte(pteval_t val
)
1066 if (sizeof(pteval_t
) > sizeof(long))
1067 ret
= PVOP_CALL2(pteval_t
,
1068 pv_mmu_ops
.make_pte
,
1069 val
, (u64
)val
>> 32);
1071 ret
= PVOP_CALL1(pteval_t
,
1072 pv_mmu_ops
.make_pte
,
1075 return (pte_t
) { .pte
= ret
};
1078 static inline pteval_t
pte_val(pte_t pte
)
1082 if (sizeof(pteval_t
) > sizeof(long))
1083 ret
= PVOP_CALL2(pteval_t
, pv_mmu_ops
.pte_val
,
1084 pte
.pte
, (u64
)pte
.pte
>> 32);
1086 ret
= PVOP_CALL1(pteval_t
, pv_mmu_ops
.pte_val
,
1092 static inline pteval_t
pte_flags(pte_t pte
)
1096 if (sizeof(pteval_t
) > sizeof(long))
1097 ret
= PVOP_CALL2(pteval_t
, pv_mmu_ops
.pte_flags
,
1098 pte
.pte
, (u64
)pte
.pte
>> 32);
1100 ret
= PVOP_CALL1(pteval_t
, pv_mmu_ops
.pte_flags
,
1103 #ifdef CONFIG_PARAVIRT_DEBUG
1104 BUG_ON(ret
& PTE_PFN_MASK
);
1109 static inline pgd_t
__pgd(pgdval_t val
)
1113 if (sizeof(pgdval_t
) > sizeof(long))
1114 ret
= PVOP_CALL2(pgdval_t
, pv_mmu_ops
.make_pgd
,
1115 val
, (u64
)val
>> 32);
1117 ret
= PVOP_CALL1(pgdval_t
, pv_mmu_ops
.make_pgd
,
1120 return (pgd_t
) { ret
};
1123 static inline pgdval_t
pgd_val(pgd_t pgd
)
1127 if (sizeof(pgdval_t
) > sizeof(long))
1128 ret
= PVOP_CALL2(pgdval_t
, pv_mmu_ops
.pgd_val
,
1129 pgd
.pgd
, (u64
)pgd
.pgd
>> 32);
1131 ret
= PVOP_CALL1(pgdval_t
, pv_mmu_ops
.pgd_val
,
1137 #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
1138 static inline pte_t
ptep_modify_prot_start(struct mm_struct
*mm
, unsigned long addr
,
1143 ret
= PVOP_CALL3(pteval_t
, pv_mmu_ops
.ptep_modify_prot_start
,
1146 return (pte_t
) { .pte
= ret
};
1149 static inline void ptep_modify_prot_commit(struct mm_struct
*mm
, unsigned long addr
,
1150 pte_t
*ptep
, pte_t pte
)
1152 if (sizeof(pteval_t
) > sizeof(long))
1154 pv_mmu_ops
.ptep_modify_prot_commit(mm
, addr
, ptep
, pte
);
1156 PVOP_VCALL4(pv_mmu_ops
.ptep_modify_prot_commit
,
1157 mm
, addr
, ptep
, pte
.pte
);
1160 static inline void set_pte(pte_t
*ptep
, pte_t pte
)
1162 if (sizeof(pteval_t
) > sizeof(long))
1163 PVOP_VCALL3(pv_mmu_ops
.set_pte
, ptep
,
1164 pte
.pte
, (u64
)pte
.pte
>> 32);
1166 PVOP_VCALL2(pv_mmu_ops
.set_pte
, ptep
,
1170 static inline void set_pte_at(struct mm_struct
*mm
, unsigned long addr
,
1171 pte_t
*ptep
, pte_t pte
)
1173 if (sizeof(pteval_t
) > sizeof(long))
1175 pv_mmu_ops
.set_pte_at(mm
, addr
, ptep
, pte
);
1177 PVOP_VCALL4(pv_mmu_ops
.set_pte_at
, mm
, addr
, ptep
, pte
.pte
);
1180 static inline void set_pmd(pmd_t
*pmdp
, pmd_t pmd
)
1182 pmdval_t val
= native_pmd_val(pmd
);
1184 if (sizeof(pmdval_t
) > sizeof(long))
1185 PVOP_VCALL3(pv_mmu_ops
.set_pmd
, pmdp
, val
, (u64
)val
>> 32);
1187 PVOP_VCALL2(pv_mmu_ops
.set_pmd
, pmdp
, val
);
1190 #if PAGETABLE_LEVELS >= 3
1191 static inline pmd_t
__pmd(pmdval_t val
)
1195 if (sizeof(pmdval_t
) > sizeof(long))
1196 ret
= PVOP_CALL2(pmdval_t
, pv_mmu_ops
.make_pmd
,
1197 val
, (u64
)val
>> 32);
1199 ret
= PVOP_CALL1(pmdval_t
, pv_mmu_ops
.make_pmd
,
1202 return (pmd_t
) { ret
};
1205 static inline pmdval_t
pmd_val(pmd_t pmd
)
1209 if (sizeof(pmdval_t
) > sizeof(long))
1210 ret
= PVOP_CALL2(pmdval_t
, pv_mmu_ops
.pmd_val
,
1211 pmd
.pmd
, (u64
)pmd
.pmd
>> 32);
1213 ret
= PVOP_CALL1(pmdval_t
, pv_mmu_ops
.pmd_val
,
1219 static inline void set_pud(pud_t
*pudp
, pud_t pud
)
1221 pudval_t val
= native_pud_val(pud
);
1223 if (sizeof(pudval_t
) > sizeof(long))
1224 PVOP_VCALL3(pv_mmu_ops
.set_pud
, pudp
,
1225 val
, (u64
)val
>> 32);
1227 PVOP_VCALL2(pv_mmu_ops
.set_pud
, pudp
,
1230 #if PAGETABLE_LEVELS == 4
1231 static inline pud_t
__pud(pudval_t val
)
1235 if (sizeof(pudval_t
) > sizeof(long))
1236 ret
= PVOP_CALL2(pudval_t
, pv_mmu_ops
.make_pud
,
1237 val
, (u64
)val
>> 32);
1239 ret
= PVOP_CALL1(pudval_t
, pv_mmu_ops
.make_pud
,
1242 return (pud_t
) { ret
};
1245 static inline pudval_t
pud_val(pud_t pud
)
1249 if (sizeof(pudval_t
) > sizeof(long))
1250 ret
= PVOP_CALL2(pudval_t
, pv_mmu_ops
.pud_val
,
1251 pud
.pud
, (u64
)pud
.pud
>> 32);
1253 ret
= PVOP_CALL1(pudval_t
, pv_mmu_ops
.pud_val
,
1259 static inline void set_pgd(pgd_t
*pgdp
, pgd_t pgd
)
1261 pgdval_t val
= native_pgd_val(pgd
);
1263 if (sizeof(pgdval_t
) > sizeof(long))
1264 PVOP_VCALL3(pv_mmu_ops
.set_pgd
, pgdp
,
1265 val
, (u64
)val
>> 32);
1267 PVOP_VCALL2(pv_mmu_ops
.set_pgd
, pgdp
,
1271 static inline void pgd_clear(pgd_t
*pgdp
)
1273 set_pgd(pgdp
, __pgd(0));
1276 static inline void pud_clear(pud_t
*pudp
)
1278 set_pud(pudp
, __pud(0));
1281 #endif /* PAGETABLE_LEVELS == 4 */
1283 #endif /* PAGETABLE_LEVELS >= 3 */
1285 #ifdef CONFIG_X86_PAE
1286 /* Special-case pte-setting operations for PAE, which can't update a
1287 64-bit pte atomically */
1288 static inline void set_pte_atomic(pte_t
*ptep
, pte_t pte
)
1290 PVOP_VCALL3(pv_mmu_ops
.set_pte_atomic
, ptep
,
1291 pte
.pte
, pte
.pte
>> 32);
1294 static inline void set_pte_present(struct mm_struct
*mm
, unsigned long addr
,
1295 pte_t
*ptep
, pte_t pte
)
1298 pv_mmu_ops
.set_pte_present(mm
, addr
, ptep
, pte
);
1301 static inline void pte_clear(struct mm_struct
*mm
, unsigned long addr
,
1304 PVOP_VCALL3(pv_mmu_ops
.pte_clear
, mm
, addr
, ptep
);
1307 static inline void pmd_clear(pmd_t
*pmdp
)
1309 PVOP_VCALL1(pv_mmu_ops
.pmd_clear
, pmdp
);
1311 #else /* !CONFIG_X86_PAE */
1312 static inline void set_pte_atomic(pte_t
*ptep
, pte_t pte
)
1317 static inline void set_pte_present(struct mm_struct
*mm
, unsigned long addr
,
1318 pte_t
*ptep
, pte_t pte
)
1323 static inline void pte_clear(struct mm_struct
*mm
, unsigned long addr
,
1326 set_pte_at(mm
, addr
, ptep
, __pte(0));
1329 static inline void pmd_clear(pmd_t
*pmdp
)
1331 set_pmd(pmdp
, __pmd(0));
1333 #endif /* CONFIG_X86_PAE */
1335 /* Lazy mode for batching updates / context switch */
1336 enum paravirt_lazy_mode
{
1342 enum paravirt_lazy_mode
paravirt_get_lazy_mode(void);
1343 void paravirt_enter_lazy_cpu(void);
1344 void paravirt_leave_lazy_cpu(void);
1345 void paravirt_enter_lazy_mmu(void);
1346 void paravirt_leave_lazy_mmu(void);
1347 void paravirt_leave_lazy(enum paravirt_lazy_mode mode
);
1349 #define __HAVE_ARCH_ENTER_LAZY_CPU_MODE
1350 static inline void arch_enter_lazy_cpu_mode(void)
1352 PVOP_VCALL0(pv_cpu_ops
.lazy_mode
.enter
);
1355 static inline void arch_leave_lazy_cpu_mode(void)
1357 PVOP_VCALL0(pv_cpu_ops
.lazy_mode
.leave
);
1360 static inline void arch_flush_lazy_cpu_mode(void)
1362 if (unlikely(paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU
)) {
1363 arch_leave_lazy_cpu_mode();
1364 arch_enter_lazy_cpu_mode();
1369 #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
1370 static inline void arch_enter_lazy_mmu_mode(void)
1372 PVOP_VCALL0(pv_mmu_ops
.lazy_mode
.enter
);
1375 static inline void arch_leave_lazy_mmu_mode(void)
1377 PVOP_VCALL0(pv_mmu_ops
.lazy_mode
.leave
);
1380 static inline void arch_flush_lazy_mmu_mode(void)
1382 if (unlikely(paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU
)) {
1383 arch_leave_lazy_mmu_mode();
1384 arch_enter_lazy_mmu_mode();
1388 static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx
,
1389 unsigned long phys
, pgprot_t flags
)
1391 pv_mmu_ops
.set_fixmap(idx
, phys
, flags
);
1394 void _paravirt_nop(void);
1395 #define paravirt_nop ((void *)_paravirt_nop)
1397 void paravirt_use_bytelocks(void);
1401 static inline int __raw_spin_is_locked(struct raw_spinlock
*lock
)
1403 return PVOP_CALL1(int, pv_lock_ops
.spin_is_locked
, lock
);
1406 static inline int __raw_spin_is_contended(struct raw_spinlock
*lock
)
1408 return PVOP_CALL1(int, pv_lock_ops
.spin_is_contended
, lock
);
1411 static __always_inline
void __raw_spin_lock(struct raw_spinlock
*lock
)
1413 PVOP_VCALL1(pv_lock_ops
.spin_lock
, lock
);
1416 static __always_inline
int __raw_spin_trylock(struct raw_spinlock
*lock
)
1418 return PVOP_CALL1(int, pv_lock_ops
.spin_trylock
, lock
);
1421 static __always_inline
void __raw_spin_unlock(struct raw_spinlock
*lock
)
1423 PVOP_VCALL1(pv_lock_ops
.spin_unlock
, lock
);
1428 /* These all sit in the .parainstructions section to tell us what to patch. */
1429 struct paravirt_patch_site
{
1430 u8
*instr
; /* original instructions */
1431 u8 instrtype
; /* type of this instruction */
1432 u8 len
; /* length of original instruction */
1433 u16 clobbers
; /* what registers you may clobber */
1436 extern struct paravirt_patch_site __parainstructions
[],
1437 __parainstructions_end
[];
1439 #ifdef CONFIG_X86_32
1440 #define PV_SAVE_REGS "pushl %%ecx; pushl %%edx;"
1441 #define PV_RESTORE_REGS "popl %%edx; popl %%ecx"
1442 #define PV_FLAGS_ARG "0"
1443 #define PV_EXTRA_CLOBBERS
1444 #define PV_VEXTRA_CLOBBERS
1446 /* We save some registers, but all of them, that's too much. We clobber all
1447 * caller saved registers but the argument parameter */
1448 #define PV_SAVE_REGS "pushq %%rdi;"
1449 #define PV_RESTORE_REGS "popq %%rdi;"
1450 #define PV_EXTRA_CLOBBERS EXTRA_CLOBBERS, "rcx" , "rdx", "rsi"
1451 #define PV_VEXTRA_CLOBBERS EXTRA_CLOBBERS, "rdi", "rcx" , "rdx", "rsi"
1452 #define PV_FLAGS_ARG "D"
1455 static inline unsigned long __raw_local_save_flags(void)
1459 asm volatile(paravirt_alt(PV_SAVE_REGS
1463 : paravirt_type(pv_irq_ops
.save_fl
),
1464 paravirt_clobber(CLBR_EAX
)
1465 : "memory", "cc" PV_VEXTRA_CLOBBERS
);
1469 static inline void raw_local_irq_restore(unsigned long f
)
1471 asm volatile(paravirt_alt(PV_SAVE_REGS
1476 paravirt_type(pv_irq_ops
.restore_fl
),
1477 paravirt_clobber(CLBR_EAX
)
1478 : "memory", "cc" PV_EXTRA_CLOBBERS
);
1481 static inline void raw_local_irq_disable(void)
1483 asm volatile(paravirt_alt(PV_SAVE_REGS
1487 : paravirt_type(pv_irq_ops
.irq_disable
),
1488 paravirt_clobber(CLBR_EAX
)
1489 : "memory", "eax", "cc" PV_EXTRA_CLOBBERS
);
1492 static inline void raw_local_irq_enable(void)
1494 asm volatile(paravirt_alt(PV_SAVE_REGS
1498 : paravirt_type(pv_irq_ops
.irq_enable
),
1499 paravirt_clobber(CLBR_EAX
)
1500 : "memory", "eax", "cc" PV_EXTRA_CLOBBERS
);
1503 static inline unsigned long __raw_local_irq_save(void)
1507 f
= __raw_local_save_flags();
1508 raw_local_irq_disable();
1513 /* Make sure as little as possible of this mess escapes. */
1514 #undef PARAVIRT_CALL
1528 #else /* __ASSEMBLY__ */
1530 #define _PVSITE(ptype, clobbers, ops, word, algn) \
1534 .pushsection .parainstructions,"a"; \
1543 #ifdef CONFIG_X86_64
1544 #define PV_SAVE_REGS \
1554 #define PV_RESTORE_REGS \
1564 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 8)
1565 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8)
1566 #define PARA_INDIRECT(addr) *addr(%rip)
1568 #define PV_SAVE_REGS pushl %eax; pushl %edi; pushl %ecx; pushl %edx
1569 #define PV_RESTORE_REGS popl %edx; popl %ecx; popl %edi; popl %eax
1570 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
1571 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
1572 #define PARA_INDIRECT(addr) *%cs:addr
1575 #define INTERRUPT_RETURN \
1576 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE, \
1577 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret))
1579 #define DISABLE_INTERRUPTS(clobbers) \
1580 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
1582 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable); \
1585 #define ENABLE_INTERRUPTS(clobbers) \
1586 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \
1588 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable); \
1591 #define USERGS_SYSRET32 \
1592 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret32), \
1594 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret32))
1596 #ifdef CONFIG_X86_32
1597 #define GET_CR0_INTO_EAX \
1598 push %ecx; push %edx; \
1599 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
1602 #define ENABLE_INTERRUPTS_SYSEXIT \
1603 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
1605 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
1608 #else /* !CONFIG_X86_32 */
1611 * If swapgs is used while the userspace stack is still current,
1612 * there's no way to call a pvop. The PV replacement *must* be
1613 * inlined, or the swapgs instruction must be trapped and emulated.
1615 #define SWAPGS_UNSAFE_STACK \
1616 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
1620 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
1622 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs); \
1626 #define GET_CR2_INTO_RCX \
1627 call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2); \
1631 #define PARAVIRT_ADJUST_EXCEPTION_FRAME \
1632 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_adjust_exception_frame), \
1634 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_adjust_exception_frame))
1636 #define USERGS_SYSRET64 \
1637 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64), \
1639 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64))
1641 #define ENABLE_INTERRUPTS_SYSEXIT32 \
1642 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
1644 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
1645 #endif /* CONFIG_X86_32 */
1647 #endif /* __ASSEMBLY__ */
1648 #endif /* CONFIG_PARAVIRT */
1649 #endif /* __ASM_PARAVIRT_H */