MIPS: microMIPS: Fix macro naming in micro-assembler.
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / mips / kernel / ftrace.c
CommitLineData
538f1952
WZ
1/*
2 * Code for replacing ftrace calls with jumps.
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
e4240540 5 * Copyright (C) 2009, 2010 DSLab, Lanzhou University, China
f7a904df 6 * Author: Wu Zhangjin <wuzhangjin@gmail.com>
538f1952
WZ
7 *
8 * Thanks goes to Steven Rostedt for writing the original x86 version.
9 */
10
11#include <linux/uaccess.h>
12#include <linux/init.h>
13#include <linux/ftrace.h>
14
29c5d346
WZ
15#include <asm/asm.h>
16#include <asm/asm-offsets.h>
e4240540
WZ
17#include <asm/cacheflush.h>
18#include <asm/uasm.h>
538f1952 19
d9cdb2f1 20#include <asm-generic/sections.h>
c9f84873 21
49de830a
TG
22#if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT)
23#define MCOUNT_OFFSET_INSNS 5
24#else
25#define MCOUNT_OFFSET_INSNS 4
26#endif
27
58b69401
AC
28/* Arch override because MIPS doesn't need to run this from stop_machine() */
29void arch_ftrace_update_code(int command)
30{
31 ftrace_modify_all_code(command);
32}
33
49de830a
TG
34/*
35 * Check if the address is in kernel space
36 *
37 * Clone core_kernel_text() from kernel/extable.c, but doesn't call
38 * init_kernel_text() for Ftrace doesn't trace functions in init sections.
39 */
40static inline int in_kernel_space(unsigned long ip)
41{
42 if (ip >= (unsigned long)_stext &&
43 ip <= (unsigned long)_etext)
44 return 1;
45 return 0;
46}
47
538f1952
WZ
48#ifdef CONFIG_DYNAMIC_FTRACE
49
50#define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */
51#define ADDR_MASK 0x03ffffff /* op_code|addr : 31...26|25 ....0 */
c54794d1 52#define JUMP_RANGE_MASK ((1UL << 28) - 1)
538f1952 53
4d6829f9 54#define INSN_NOP 0x00000000 /* nop */
e4240540
WZ
55#define INSN_JAL(addr) \
56 ((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK)))
57
58static unsigned int insn_jal_ftrace_caller __read_mostly;
59static unsigned int insn_lui_v1_hi16_mcount __read_mostly;
60static unsigned int insn_j_ftrace_graph_caller __maybe_unused __read_mostly;
61
62static inline void ftrace_dyn_arch_init_insns(void)
63{
64 u32 *buf;
65 unsigned int v1;
66
67 /* lui v1, hi16_mcount */
68 v1 = 3;
69 buf = (u32 *)&insn_lui_v1_hi16_mcount;
70 UASM_i_LA_mostly(&buf, v1, MCOUNT_ADDR);
71
72 /* jal (ftrace_caller + 8), jump over the first two instruction */
73 buf = (u32 *)&insn_jal_ftrace_caller;
c54794d1 74 uasm_i_jal(&buf, (FTRACE_ADDR + 8) & JUMP_RANGE_MASK);
e4240540
WZ
75
76#ifdef CONFIG_FUNCTION_GRAPH_TRACER
77 /* j ftrace_graph_caller */
78 buf = (u32 *)&insn_j_ftrace_graph_caller;
c54794d1 79 uasm_i_j(&buf, (unsigned long)ftrace_graph_caller & JUMP_RANGE_MASK);
e4240540
WZ
80#endif
81}
538f1952
WZ
82
83static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
84{
046199ca
WZ
85 int faulted;
86
87 /* *(unsigned int *)ip = new_code; */
88 safe_store_code(new_code, ip, faulted);
89
90 if (unlikely(faulted))
91 return -EFAULT;
538f1952
WZ
92
93 flush_icache_range(ip, ip + 8);
94
95 return 0;
96}
97
58b69401
AC
98#ifndef CONFIG_64BIT
99static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1,
100 unsigned int new_code2)
101{
102 int faulted;
103
104 safe_store_code(new_code1, ip, faulted);
105 if (unlikely(faulted))
106 return -EFAULT;
107 ip += 4;
108 safe_store_code(new_code2, ip, faulted);
109 if (unlikely(faulted))
110 return -EFAULT;
111 flush_icache_range(ip, ip + 8); /* original ip + 12 */
112 return 0;
113}
114#endif
115
7f21a609
WZ
116/*
117 * The details about the calling site of mcount on MIPS
118 *
119 * 1. For kernel:
120 *
121 * move at, ra
122 * jal _mcount --> nop
123 *
124 * 2. For modules:
125 *
126 * 2.1 For KBUILD_MCOUNT_RA_ADDRESS and CONFIG_32BIT
127 *
70342287 128 * lui v1, hi_16bit_of_mcount --> b 1f (0x10000005)
7f21a609
WZ
129 * addiu v1, v1, low_16bit_of_mcount
130 * move at, ra
131 * move $12, ra_address
132 * jalr v1
133 * sub sp, sp, 8
70342287 134 * 1: offset = 5 instructions
7f21a609
WZ
135 * 2.2 For the Other situations
136 *
70342287 137 * lui v1, hi_16bit_of_mcount --> b 1f (0x10000004)
7f21a609
WZ
138 * addiu v1, v1, low_16bit_of_mcount
139 * move at, ra
140 * jalr v1
141 * nop | move $12, ra_address | sub sp, sp, 8
70342287 142 * 1: offset = 4 instructions
7f21a609
WZ
143 */
144
7f21a609
WZ
145#define INSN_B_1F (0x10000000 | MCOUNT_OFFSET_INSNS)
146
538f1952
WZ
147int ftrace_make_nop(struct module *mod,
148 struct dyn_ftrace *rec, unsigned long addr)
149{
150 unsigned int new;
151 unsigned long ip = rec->ip;
152
e4240540 153 /*
d9cdb2f1
WZ
154 * If ip is in kernel space, no long call, otherwise, long call is
155 * needed.
e4240540 156 */
7f21a609 157 new = in_kernel_space(ip) ? INSN_NOP : INSN_B_1F;
58b69401 158#ifdef CONFIG_64BIT
538f1952 159 return ftrace_modify_code(ip, new);
58b69401
AC
160#else
161 /*
162 * On 32 bit MIPS platforms, gcc adds a stack adjust
163 * instruction in the delay slot after the branch to
164 * mcount and expects mcount to restore the sp on return.
165 * This is based on a legacy API and does nothing but
166 * waste instructions so it's being removed at runtime.
167 */
168 return ftrace_modify_code_2(ip, new, INSN_NOP);
169#endif
538f1952
WZ
170}
171
538f1952
WZ
172int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
173{
174 unsigned int new;
175 unsigned long ip = rec->ip;
176
d9cdb2f1
WZ
177 new = in_kernel_space(ip) ? insn_jal_ftrace_caller :
178 insn_lui_v1_hi16_mcount;
538f1952
WZ
179
180 return ftrace_modify_code(ip, new);
181}
182
183#define FTRACE_CALL_IP ((unsigned long)(&ftrace_call))
184
185int ftrace_update_ftrace_func(ftrace_func_t func)
186{
187 unsigned int new;
188
4d6829f9 189 new = INSN_JAL((unsigned long)func);
538f1952
WZ
190
191 return ftrace_modify_code(FTRACE_CALL_IP, new);
192}
193
194int __init ftrace_dyn_arch_init(void *data)
195{
e4240540
WZ
196 /* Encode the instructions when booting */
197 ftrace_dyn_arch_init_insns();
198
199 /* Remove "b ftrace_stub" to ensure ftrace_caller() is executed */
200 ftrace_modify_code(MCOUNT_ADDR, INSN_NOP);
201
538f1952
WZ
202 /* The return code is retured via data */
203 *(unsigned long *)data = 0;
204
205 return 0;
206}
68ccf752 207#endif /* CONFIG_DYNAMIC_FTRACE */
29c5d346
WZ
208
209#ifdef CONFIG_FUNCTION_GRAPH_TRACER
210
e17ff5fe
WZ
211#ifdef CONFIG_DYNAMIC_FTRACE
212
213extern void ftrace_graph_call(void);
e17ff5fe
WZ
214#define FTRACE_GRAPH_CALL_IP ((unsigned long)(&ftrace_graph_call))
215
216int ftrace_enable_ftrace_graph_caller(void)
217{
218 return ftrace_modify_code(FTRACE_GRAPH_CALL_IP,
e4240540 219 insn_j_ftrace_graph_caller);
e17ff5fe
WZ
220}
221
222int ftrace_disable_ftrace_graph_caller(void)
223{
4d6829f9 224 return ftrace_modify_code(FTRACE_GRAPH_CALL_IP, INSN_NOP);
e17ff5fe
WZ
225}
226
68ccf752 227#endif /* CONFIG_DYNAMIC_FTRACE */
e17ff5fe 228
7326c4e5 229#ifndef KBUILD_MCOUNT_RA_ADDRESS
68ccf752 230
70342287
RB
231#define S_RA_SP (0xafbf << 16) /* s{d,w} ra, offset(sp) */
232#define S_R_SP (0xafb0 << 16) /* s{d,w} R, offset(sp) */
29c5d346
WZ
233#define OFFSET_MASK 0xffff /* stack offset range: 0 ~ PT_SIZE */
234
2816e325
WZ
235unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long
236 old_parent_ra, unsigned long parent_ra_addr, unsigned long fp)
29c5d346 237{
2816e325 238 unsigned long sp, ip, tmp;
29c5d346 239 unsigned int code;
046199ca 240 int faulted;
29c5d346 241
68ccf752 242 /*
2816e325 243 * For module, move the ip from the return address after the
9a620a55
WZ
244 * instruction "lui v1, hi_16bit_of_mcount"(offset is 24), but for
245 * kernel, move after the instruction "move ra, at"(offset is 16)
68ccf752 246 */
2816e325 247 ip = self_ra - (in_kernel_space(self_ra) ? 16 : 24);
29c5d346 248
68ccf752
WZ
249 /*
250 * search the text until finding the non-store instruction or "s{d,w}
251 * ra, offset(sp)" instruction
252 */
29c5d346 253 do {
046199ca
WZ
254 /* get the code at "ip": code = *(unsigned int *)ip; */
255 safe_load_code(code, ip, faulted);
256
257 if (unlikely(faulted))
258 return 0;
68ccf752
WZ
259 /*
260 * If we hit the non-store instruction before finding where the
29c5d346 261 * ra is stored, then this is a leaf function and it does not
68ccf752
WZ
262 * store the ra on the stack
263 */
29c5d346 264 if ((code & S_R_SP) != S_R_SP)
2816e325 265 return parent_ra_addr;
29c5d346 266
9a620a55
WZ
267 /* Move to the next instruction */
268 ip -= 4;
269 } while ((code & S_RA_SP) != S_RA_SP);
29c5d346
WZ
270
271 sp = fp + (code & OFFSET_MASK);
046199ca 272
2816e325
WZ
273 /* tmp = *(unsigned long *)sp; */
274 safe_load_stack(tmp, sp, faulted);
046199ca
WZ
275 if (unlikely(faulted))
276 return 0;
29c5d346 277
2816e325 278 if (tmp == old_parent_ra)
29c5d346 279 return sp;
29c5d346
WZ
280 return 0;
281}
282
68ccf752 283#endif /* !KBUILD_MCOUNT_RA_ADDRESS */
7326c4e5 284
29c5d346
WZ
285/*
286 * Hook the return address and push it in the stack of return addrs
287 * in current thread info.
288 */
2816e325 289void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra,
29c5d346
WZ
290 unsigned long fp)
291{
2816e325 292 unsigned long old_parent_ra;
29c5d346
WZ
293 struct ftrace_graph_ent trace;
294 unsigned long return_hooker = (unsigned long)
295 &return_to_handler;
b9f07eb2 296 int faulted, insns;
29c5d346
WZ
297
298 if (unlikely(atomic_read(&current->tracing_graph_pause)))
299 return;
300
68ccf752 301 /*
2816e325
WZ
302 * "parent_ra_addr" is the stack address saved the return address of
303 * the caller of _mcount.
7326c4e5
WZ
304 *
305 * if the gcc < 4.5, a leaf function does not save the return address
306 * in the stack address, so, we "emulate" one in _mcount's stack space,
307 * and hijack it directly, but for a non-leaf function, it save the
308 * return address to the its own stack space, we can not hijack it
309 * directly, but need to find the real stack address,
29c5d346 310 * ftrace_get_parent_addr() does it!
7326c4e5
WZ
311 *
312 * if gcc>= 4.5, with the new -mmcount-ra-address option, for a
313 * non-leaf function, the location of the return address will be saved
314 * to $12 for us, and for a leaf function, only put a zero into $12. we
315 * do it in ftrace_graph_caller of mcount.S.
29c5d346
WZ
316 */
317
2816e325
WZ
318 /* old_parent_ra = *parent_ra_addr; */
319 safe_load_stack(old_parent_ra, parent_ra_addr, faulted);
046199ca
WZ
320 if (unlikely(faulted))
321 goto out;
7326c4e5 322#ifndef KBUILD_MCOUNT_RA_ADDRESS
2816e325
WZ
323 parent_ra_addr = (unsigned long *)ftrace_get_parent_ra_addr(self_ra,
324 old_parent_ra, (unsigned long)parent_ra_addr, fp);
68ccf752
WZ
325 /*
326 * If fails when getting the stack address of the non-leaf function's
327 * ra, stop function graph tracer and return
328 */
2816e325 329 if (parent_ra_addr == 0)
046199ca 330 goto out;
7326c4e5 331#endif
2816e325
WZ
332 /* *parent_ra_addr = return_hooker; */
333 safe_store_stack(return_hooker, parent_ra_addr, faulted);
046199ca
WZ
334 if (unlikely(faulted))
335 goto out;
29c5d346 336
2816e325
WZ
337 if (ftrace_push_return_trace(old_parent_ra, self_ra, &trace.depth, fp)
338 == -EBUSY) {
339 *parent_ra_addr = old_parent_ra;
29c5d346
WZ
340 return;
341 }
342
b9f07eb2
WZ
343 /*
344 * Get the recorded ip of the current mcount calling site in the
345 * __mcount_loc section, which will be used to filter the function
346 * entries configured through the tracing/set_graph_function interface.
347 */
348
349 insns = in_kernel_space(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1;
350 trace.func = self_ra - (MCOUNT_INSN_SIZE * insns);
29c5d346
WZ
351
352 /* Only trace if the calling function expects to */
353 if (!ftrace_graph_entry(&trace)) {
354 current->curr_ret_stack--;
2816e325 355 *parent_ra_addr = old_parent_ra;
29c5d346 356 }
046199ca
WZ
357 return;
358out:
359 ftrace_graph_stop();
360 WARN_ON(1);
29c5d346 361}
68ccf752 362#endif /* CONFIG_FUNCTION_GRAPH_TRACER */