Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/gerg/m68knommu
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / x86 / include / asm / alternative.h
1 #ifndef _ASM_X86_ALTERNATIVE_H
2 #define _ASM_X86_ALTERNATIVE_H
3
4 #include <linux/types.h>
5 #include <linux/stddef.h>
6 #include <linux/stringify.h>
7 #include <linux/jump_label.h>
8 #include <asm/asm.h>
9
10 /*
11 * Alternative inline assembly for SMP.
12 *
13 * The LOCK_PREFIX macro defined here replaces the LOCK and
14 * LOCK_PREFIX macros used everywhere in the source tree.
15 *
16 * SMP alternatives use the same data structures as the other
17 * alternatives and the X86_FEATURE_UP flag to indicate the case of a
18 * UP system running a SMP kernel. The existing apply_alternatives()
19 * works fine for patching a SMP kernel for UP.
20 *
21 * The SMP alternative tables can be kept after boot and contain both
22 * UP and SMP versions of the instructions to allow switching back to
23 * SMP at runtime, when hotplugging in a new CPU, which is especially
24 * useful in virtualized environments.
25 *
26 * The very common lock prefix is handled as special case in a
27 * separate table which is a pure address list without replacement ptr
28 * and size information. That keeps the table sizes small.
29 */
30
31 #ifdef CONFIG_SMP
32 #define LOCK_PREFIX_HERE \
33 ".section .smp_locks,\"a\"\n" \
34 ".balign 4\n" \
35 ".long 671f - .\n" /* offset */ \
36 ".previous\n" \
37 "671:"
38
39 #define LOCK_PREFIX LOCK_PREFIX_HERE "\n\tlock; "
40
41 #else /* ! CONFIG_SMP */
42 #define LOCK_PREFIX_HERE ""
43 #define LOCK_PREFIX ""
44 #endif
45
46 struct alt_instr {
47 u8 *instr; /* original instruction */
48 u8 *replacement;
49 u16 cpuid; /* cpuid bit set for replacement */
50 u8 instrlen; /* length of original instruction */
51 u8 replacementlen; /* length of new instruction, <= instrlen */
52 #ifdef CONFIG_X86_64
53 u32 pad2;
54 #endif
55 };
56
57 extern void alternative_instructions(void);
58 extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
59
60 struct module;
61
62 #ifdef CONFIG_SMP
63 extern void alternatives_smp_module_add(struct module *mod, char *name,
64 void *locks, void *locks_end,
65 void *text, void *text_end);
66 extern void alternatives_smp_module_del(struct module *mod);
67 extern void alternatives_smp_switch(int smp);
68 extern int alternatives_text_reserved(void *start, void *end);
69 #else
70 static inline void alternatives_smp_module_add(struct module *mod, char *name,
71 void *locks, void *locks_end,
72 void *text, void *text_end) {}
73 static inline void alternatives_smp_module_del(struct module *mod) {}
74 static inline void alternatives_smp_switch(int smp) {}
75 static inline int alternatives_text_reserved(void *start, void *end)
76 {
77 return 0;
78 }
79 #endif /* CONFIG_SMP */
80
81 /* alternative assembly primitive: */
82 #define ALTERNATIVE(oldinstr, newinstr, feature) \
83 \
84 "661:\n\t" oldinstr "\n662:\n" \
85 ".section .altinstructions,\"a\"\n" \
86 _ASM_ALIGN "\n" \
87 _ASM_PTR "661b\n" /* label */ \
88 _ASM_PTR "663f\n" /* new instruction */ \
89 " .word " __stringify(feature) "\n" /* feature bit */ \
90 " .byte 662b-661b\n" /* sourcelen */ \
91 " .byte 664f-663f\n" /* replacementlen */ \
92 ".previous\n" \
93 ".section .discard,\"aw\",@progbits\n" \
94 " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
95 ".previous\n" \
96 ".section .altinstr_replacement, \"ax\"\n" \
97 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
98 ".previous"
99
100 /*
101 * This must be included *after* the definition of ALTERNATIVE due to
102 * <asm/arch_hweight.h>
103 */
104 #include <asm/cpufeature.h>
105
106 /*
107 * Alternative instructions for different CPU types or capabilities.
108 *
109 * This allows to use optimized instructions even on generic binary
110 * kernels.
111 *
112 * length of oldinstr must be longer or equal the length of newinstr
113 * It can be padded with nops as needed.
114 *
115 * For non barrier like inlines please define new variants
116 * without volatile and memory clobber.
117 */
118 #define alternative(oldinstr, newinstr, feature) \
119 asm volatile (ALTERNATIVE(oldinstr, newinstr, feature) : : : "memory")
120
121 /*
122 * Alternative inline assembly with input.
123 *
124 * Pecularities:
125 * No memory clobber here.
126 * Argument numbers start with 1.
127 * Best is to use constraints that are fixed size (like (%1) ... "r")
128 * If you use variable sized constraints like "m" or "g" in the
129 * replacement make sure to pad to the worst case length.
130 * Leaving an unused argument 0 to keep API compatibility.
131 */
132 #define alternative_input(oldinstr, newinstr, feature, input...) \
133 asm volatile (ALTERNATIVE(oldinstr, newinstr, feature) \
134 : : "i" (0), ## input)
135
136 /* Like alternative_input, but with a single output argument */
137 #define alternative_io(oldinstr, newinstr, feature, output, input...) \
138 asm volatile (ALTERNATIVE(oldinstr, newinstr, feature) \
139 : output : "i" (0), ## input)
140
141 /* Like alternative_io, but for replacing a direct call with another one. */
142 #define alternative_call(oldfunc, newfunc, feature, output, input...) \
143 asm volatile (ALTERNATIVE("call %P[old]", "call %P[new]", feature) \
144 : output : [old] "i" (oldfunc), [new] "i" (newfunc), ## input)
145
146 /*
147 * use this macro(s) if you need more than one output parameter
148 * in alternative_io
149 */
150 #define ASM_OUTPUT2(a...) a
151
152 struct paravirt_patch_site;
153 #ifdef CONFIG_PARAVIRT
154 void apply_paravirt(struct paravirt_patch_site *start,
155 struct paravirt_patch_site *end);
156 #else
157 static inline void apply_paravirt(struct paravirt_patch_site *start,
158 struct paravirt_patch_site *end)
159 {}
160 #define __parainstructions NULL
161 #define __parainstructions_end NULL
162 #endif
163
164 extern void *text_poke_early(void *addr, const void *opcode, size_t len);
165
166 /*
167 * Clear and restore the kernel write-protection flag on the local CPU.
168 * Allows the kernel to edit read-only pages.
169 * Side-effect: any interrupt handler running between save and restore will have
170 * the ability to write to read-only pages.
171 *
172 * Warning:
173 * Code patching in the UP case is safe if NMIs and MCE handlers are stopped and
174 * no thread can be preempted in the instructions being modified (no iret to an
175 * invalid instruction possible) or if the instructions are changed from a
176 * consistent state to another consistent state atomically.
177 * More care must be taken when modifying code in the SMP case because of
178 * Intel's errata. text_poke_smp() takes care that errata, but still
179 * doesn't support NMI/MCE handler code modifying.
180 * On the local CPU you need to be protected again NMI or MCE handlers seeing an
181 * inconsistent instruction while you patch.
182 */
183 extern void *text_poke(void *addr, const void *opcode, size_t len);
184 extern void *text_poke_smp(void *addr, const void *opcode, size_t len);
185
186 #if defined(CONFIG_DYNAMIC_FTRACE) || defined(HAVE_JUMP_LABEL)
187 #define IDEAL_NOP_SIZE_5 5
188 extern unsigned char ideal_nop5[IDEAL_NOP_SIZE_5];
189 extern void arch_init_ideal_nop5(void);
190 #else
191 static inline void arch_init_ideal_nop5(void) {}
192 #endif
193
194 #endif /* _ASM_X86_ALTERNATIVE_H */