Merge branch 'timer/cleanup' into late/mvebu2
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / mips / kernel / ftrace.c
1 /*
2 * Code for replacing ftrace calls with jumps.
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2009, 2010 DSLab, Lanzhou University, China
6 * Author: Wu Zhangjin <wuzhangjin@gmail.com>
7 *
8 * Thanks goes to Steven Rostedt for writing the original x86 version.
9 */
10
11 #include <linux/uaccess.h>
12 #include <linux/init.h>
13 #include <linux/ftrace.h>
14
15 #include <asm/asm.h>
16 #include <asm/asm-offsets.h>
17 #include <asm/cacheflush.h>
18 #include <asm/uasm.h>
19
20 #include <asm-generic/sections.h>
21
22 #if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT)
23 #define MCOUNT_OFFSET_INSNS 5
24 #else
25 #define MCOUNT_OFFSET_INSNS 4
26 #endif
27
28 /* Arch override because MIPS doesn't need to run this from stop_machine() */
29 void arch_ftrace_update_code(int command)
30 {
31 ftrace_modify_all_code(command);
32 }
33
34 /*
35 * Check if the address is in kernel space
36 *
37 * Clone core_kernel_text() from kernel/extable.c, but doesn't call
38 * init_kernel_text() for Ftrace doesn't trace functions in init sections.
39 */
40 static inline int in_kernel_space(unsigned long ip)
41 {
42 if (ip >= (unsigned long)_stext &&
43 ip <= (unsigned long)_etext)
44 return 1;
45 return 0;
46 }
47
48 #ifdef CONFIG_DYNAMIC_FTRACE
49
50 #define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */
51 #define ADDR_MASK 0x03ffffff /* op_code|addr : 31...26|25 ....0 */
52 #define JUMP_RANGE_MASK ((1UL << 28) - 1)
53
54 #define INSN_NOP 0x00000000 /* nop */
55 #define INSN_JAL(addr) \
56 ((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK)))
57
58 static unsigned int insn_jal_ftrace_caller __read_mostly;
59 static unsigned int insn_lui_v1_hi16_mcount __read_mostly;
60 static unsigned int insn_j_ftrace_graph_caller __maybe_unused __read_mostly;
61
62 static inline void ftrace_dyn_arch_init_insns(void)
63 {
64 u32 *buf;
65 unsigned int v1;
66
67 /* lui v1, hi16_mcount */
68 v1 = 3;
69 buf = (u32 *)&insn_lui_v1_hi16_mcount;
70 UASM_i_LA_mostly(&buf, v1, MCOUNT_ADDR);
71
72 /* jal (ftrace_caller + 8), jump over the first two instruction */
73 buf = (u32 *)&insn_jal_ftrace_caller;
74 uasm_i_jal(&buf, (FTRACE_ADDR + 8) & JUMP_RANGE_MASK);
75
76 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
77 /* j ftrace_graph_caller */
78 buf = (u32 *)&insn_j_ftrace_graph_caller;
79 uasm_i_j(&buf, (unsigned long)ftrace_graph_caller & JUMP_RANGE_MASK);
80 #endif
81 }
82
83 static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
84 {
85 int faulted;
86
87 /* *(unsigned int *)ip = new_code; */
88 safe_store_code(new_code, ip, faulted);
89
90 if (unlikely(faulted))
91 return -EFAULT;
92
93 flush_icache_range(ip, ip + 8);
94
95 return 0;
96 }
97
98 #ifndef CONFIG_64BIT
99 static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1,
100 unsigned int new_code2)
101 {
102 int faulted;
103
104 safe_store_code(new_code1, ip, faulted);
105 if (unlikely(faulted))
106 return -EFAULT;
107 ip += 4;
108 safe_store_code(new_code2, ip, faulted);
109 if (unlikely(faulted))
110 return -EFAULT;
111 flush_icache_range(ip, ip + 8); /* original ip + 12 */
112 return 0;
113 }
114 #endif
115
116 /*
117 * The details about the calling site of mcount on MIPS
118 *
119 * 1. For kernel:
120 *
121 * move at, ra
122 * jal _mcount --> nop
123 *
124 * 2. For modules:
125 *
126 * 2.1 For KBUILD_MCOUNT_RA_ADDRESS and CONFIG_32BIT
127 *
128 * lui v1, hi_16bit_of_mcount --> b 1f (0x10000005)
129 * addiu v1, v1, low_16bit_of_mcount
130 * move at, ra
131 * move $12, ra_address
132 * jalr v1
133 * sub sp, sp, 8
134 * 1: offset = 5 instructions
135 * 2.2 For the Other situations
136 *
137 * lui v1, hi_16bit_of_mcount --> b 1f (0x10000004)
138 * addiu v1, v1, low_16bit_of_mcount
139 * move at, ra
140 * jalr v1
141 * nop | move $12, ra_address | sub sp, sp, 8
142 * 1: offset = 4 instructions
143 */
144
145 #define INSN_B_1F (0x10000000 | MCOUNT_OFFSET_INSNS)
146
147 int ftrace_make_nop(struct module *mod,
148 struct dyn_ftrace *rec, unsigned long addr)
149 {
150 unsigned int new;
151 unsigned long ip = rec->ip;
152
153 /*
154 * If ip is in kernel space, no long call, otherwise, long call is
155 * needed.
156 */
157 new = in_kernel_space(ip) ? INSN_NOP : INSN_B_1F;
158 #ifdef CONFIG_64BIT
159 return ftrace_modify_code(ip, new);
160 #else
161 /*
162 * On 32 bit MIPS platforms, gcc adds a stack adjust
163 * instruction in the delay slot after the branch to
164 * mcount and expects mcount to restore the sp on return.
165 * This is based on a legacy API and does nothing but
166 * waste instructions so it's being removed at runtime.
167 */
168 return ftrace_modify_code_2(ip, new, INSN_NOP);
169 #endif
170 }
171
172 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
173 {
174 unsigned int new;
175 unsigned long ip = rec->ip;
176
177 new = in_kernel_space(ip) ? insn_jal_ftrace_caller :
178 insn_lui_v1_hi16_mcount;
179
180 return ftrace_modify_code(ip, new);
181 }
182
183 #define FTRACE_CALL_IP ((unsigned long)(&ftrace_call))
184
185 int ftrace_update_ftrace_func(ftrace_func_t func)
186 {
187 unsigned int new;
188
189 new = INSN_JAL((unsigned long)func);
190
191 return ftrace_modify_code(FTRACE_CALL_IP, new);
192 }
193
194 int __init ftrace_dyn_arch_init(void *data)
195 {
196 /* Encode the instructions when booting */
197 ftrace_dyn_arch_init_insns();
198
199 /* Remove "b ftrace_stub" to ensure ftrace_caller() is executed */
200 ftrace_modify_code(MCOUNT_ADDR, INSN_NOP);
201
202 /* The return code is retured via data */
203 *(unsigned long *)data = 0;
204
205 return 0;
206 }
207 #endif /* CONFIG_DYNAMIC_FTRACE */
208
209 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
210
211 #ifdef CONFIG_DYNAMIC_FTRACE
212
213 extern void ftrace_graph_call(void);
214 #define FTRACE_GRAPH_CALL_IP ((unsigned long)(&ftrace_graph_call))
215
216 int ftrace_enable_ftrace_graph_caller(void)
217 {
218 return ftrace_modify_code(FTRACE_GRAPH_CALL_IP,
219 insn_j_ftrace_graph_caller);
220 }
221
222 int ftrace_disable_ftrace_graph_caller(void)
223 {
224 return ftrace_modify_code(FTRACE_GRAPH_CALL_IP, INSN_NOP);
225 }
226
227 #endif /* CONFIG_DYNAMIC_FTRACE */
228
229 #ifndef KBUILD_MCOUNT_RA_ADDRESS
230
231 #define S_RA_SP (0xafbf << 16) /* s{d,w} ra, offset(sp) */
232 #define S_R_SP (0xafb0 << 16) /* s{d,w} R, offset(sp) */
233 #define OFFSET_MASK 0xffff /* stack offset range: 0 ~ PT_SIZE */
234
235 unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long
236 old_parent_ra, unsigned long parent_ra_addr, unsigned long fp)
237 {
238 unsigned long sp, ip, tmp;
239 unsigned int code;
240 int faulted;
241
242 /*
243 * For module, move the ip from the return address after the
244 * instruction "lui v1, hi_16bit_of_mcount"(offset is 24), but for
245 * kernel, move after the instruction "move ra, at"(offset is 16)
246 */
247 ip = self_ra - (in_kernel_space(self_ra) ? 16 : 24);
248
249 /*
250 * search the text until finding the non-store instruction or "s{d,w}
251 * ra, offset(sp)" instruction
252 */
253 do {
254 /* get the code at "ip": code = *(unsigned int *)ip; */
255 safe_load_code(code, ip, faulted);
256
257 if (unlikely(faulted))
258 return 0;
259 /*
260 * If we hit the non-store instruction before finding where the
261 * ra is stored, then this is a leaf function and it does not
262 * store the ra on the stack
263 */
264 if ((code & S_R_SP) != S_R_SP)
265 return parent_ra_addr;
266
267 /* Move to the next instruction */
268 ip -= 4;
269 } while ((code & S_RA_SP) != S_RA_SP);
270
271 sp = fp + (code & OFFSET_MASK);
272
273 /* tmp = *(unsigned long *)sp; */
274 safe_load_stack(tmp, sp, faulted);
275 if (unlikely(faulted))
276 return 0;
277
278 if (tmp == old_parent_ra)
279 return sp;
280 return 0;
281 }
282
283 #endif /* !KBUILD_MCOUNT_RA_ADDRESS */
284
285 /*
286 * Hook the return address and push it in the stack of return addrs
287 * in current thread info.
288 */
289 void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra,
290 unsigned long fp)
291 {
292 unsigned long old_parent_ra;
293 struct ftrace_graph_ent trace;
294 unsigned long return_hooker = (unsigned long)
295 &return_to_handler;
296 int faulted, insns;
297
298 if (unlikely(atomic_read(&current->tracing_graph_pause)))
299 return;
300
301 /*
302 * "parent_ra_addr" is the stack address saved the return address of
303 * the caller of _mcount.
304 *
305 * if the gcc < 4.5, a leaf function does not save the return address
306 * in the stack address, so, we "emulate" one in _mcount's stack space,
307 * and hijack it directly, but for a non-leaf function, it save the
308 * return address to the its own stack space, we can not hijack it
309 * directly, but need to find the real stack address,
310 * ftrace_get_parent_addr() does it!
311 *
312 * if gcc>= 4.5, with the new -mmcount-ra-address option, for a
313 * non-leaf function, the location of the return address will be saved
314 * to $12 for us, and for a leaf function, only put a zero into $12. we
315 * do it in ftrace_graph_caller of mcount.S.
316 */
317
318 /* old_parent_ra = *parent_ra_addr; */
319 safe_load_stack(old_parent_ra, parent_ra_addr, faulted);
320 if (unlikely(faulted))
321 goto out;
322 #ifndef KBUILD_MCOUNT_RA_ADDRESS
323 parent_ra_addr = (unsigned long *)ftrace_get_parent_ra_addr(self_ra,
324 old_parent_ra, (unsigned long)parent_ra_addr, fp);
325 /*
326 * If fails when getting the stack address of the non-leaf function's
327 * ra, stop function graph tracer and return
328 */
329 if (parent_ra_addr == 0)
330 goto out;
331 #endif
332 /* *parent_ra_addr = return_hooker; */
333 safe_store_stack(return_hooker, parent_ra_addr, faulted);
334 if (unlikely(faulted))
335 goto out;
336
337 if (ftrace_push_return_trace(old_parent_ra, self_ra, &trace.depth, fp)
338 == -EBUSY) {
339 *parent_ra_addr = old_parent_ra;
340 return;
341 }
342
343 /*
344 * Get the recorded ip of the current mcount calling site in the
345 * __mcount_loc section, which will be used to filter the function
346 * entries configured through the tracing/set_graph_function interface.
347 */
348
349 insns = in_kernel_space(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1;
350 trace.func = self_ra - (MCOUNT_INSN_SIZE * insns);
351
352 /* Only trace if the calling function expects to */
353 if (!ftrace_graph_entry(&trace)) {
354 current->curr_ret_stack--;
355 *parent_ra_addr = old_parent_ra;
356 }
357 return;
358 out:
359 ftrace_graph_stop();
360 WARN_ON(1);
361 }
362 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */