Commit | Line | Data |
---|---|---|
4e491d14 SR |
1 | /* |
2 | * Code for replacing ftrace calls with jumps. | |
3 | * | |
4 | * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> | |
5 | * | |
6 | * Thanks goes out to P.A. Semi, Inc for supplying me with a PPC64 box. | |
7 | * | |
6794c782 SR |
8 | * Added function graph tracer code, taken from x86 that was written |
9 | * by Frederic Weisbecker, and ported to PPC by Steven Rostedt. | |
10 | * | |
4e491d14 SR |
11 | */ |
12 | ||
13 | #include <linux/spinlock.h> | |
14 | #include <linux/hardirq.h> | |
e4486fe3 | 15 | #include <linux/uaccess.h> |
f48cb8b4 | 16 | #include <linux/module.h> |
4e491d14 SR |
17 | #include <linux/ftrace.h> |
18 | #include <linux/percpu.h> | |
19 | #include <linux/init.h> | |
20 | #include <linux/list.h> | |
21 | ||
22 | #include <asm/cacheflush.h> | |
f48cb8b4 | 23 | #include <asm/code-patching.h> |
395a59d0 | 24 | #include <asm/ftrace.h> |
02424d89 | 25 | #include <asm/syscall.h> |
4e491d14 | 26 | |
4e491d14 | 27 | |
6794c782 | 28 | #ifdef CONFIG_DYNAMIC_FTRACE |
b54dcfe1 | 29 | static unsigned int |
46542888 | 30 | ftrace_call_replace(unsigned long ip, unsigned long addr, int link) |
4e491d14 | 31 | { |
b54dcfe1 | 32 | unsigned int op; |
4e491d14 | 33 | |
4a9e3f8e | 34 | addr = ppc_function_entry((void *)addr); |
4e491d14 | 35 | |
46542888 | 36 | /* if (link) set op to 'bl' else 'b' */ |
bb9b9035 | 37 | op = create_branch((unsigned int *)ip, addr, link ? 1 : 0); |
4e491d14 | 38 | |
b54dcfe1 | 39 | return op; |
4e491d14 SR |
40 | } |
41 | ||
8fd6e5a8 | 42 | static int |
b54dcfe1 | 43 | ftrace_modify_code(unsigned long ip, unsigned int old, unsigned int new) |
4e491d14 | 44 | { |
b54dcfe1 | 45 | unsigned int replaced; |
4e491d14 | 46 | |
4e491d14 SR |
47 | /* |
48 | * Note: Due to modules and __init, code can | |
49 | * disappear and change, we need to protect against faulting | |
e4486fe3 SR |
50 | * as well as code changing. We do this by using the |
51 | * probe_kernel_* functions. | |
4e491d14 SR |
52 | * |
53 | * No real locking needed, this code is run through | |
e4486fe3 | 54 | * kstop_machine, or before SMP starts. |
4e491d14 | 55 | */ |
e4486fe3 SR |
56 | |
57 | /* read the text we want to modify */ | |
b54dcfe1 | 58 | if (probe_kernel_read(&replaced, (void *)ip, MCOUNT_INSN_SIZE)) |
e4486fe3 SR |
59 | return -EFAULT; |
60 | ||
61 | /* Make sure it is what we expect it to be */ | |
b54dcfe1 | 62 | if (replaced != old) |
e4486fe3 SR |
63 | return -EINVAL; |
64 | ||
65 | /* replace the text with the new text */ | |
65b8c722 | 66 | if (patch_instruction((unsigned int *)ip, new)) |
e4486fe3 SR |
67 | return -EPERM; |
68 | ||
e4486fe3 | 69 | return 0; |
4e491d14 SR |
70 | } |
71 | ||
f48cb8b4 SR |
72 | /* |
73 | * Helper functions that are the same for both PPC64 and PPC32. | |
74 | */ | |
8fd6e5a8 SR |
75 | static int test_24bit_addr(unsigned long ip, unsigned long addr) |
76 | { | |
8fd6e5a8 | 77 | |
0029ff87 SR |
78 | /* use the create_branch to verify that this offset can be branched */ |
79 | return create_branch((unsigned int *)ip, addr, 0); | |
8fd6e5a8 SR |
80 | } |
81 | ||
17be5b3d SR |
82 | #ifdef CONFIG_MODULES |
83 | ||
f48cb8b4 SR |
84 | static int is_bl_op(unsigned int op) |
85 | { | |
86 | return (op & 0xfc000003) == 0x48000001; | |
87 | } | |
88 | ||
f48cb8b4 SR |
89 | static unsigned long find_bl_target(unsigned long ip, unsigned int op) |
90 | { | |
91 | static int offset; | |
92 | ||
93 | offset = (op & 0x03fffffc); | |
94 | /* make it signed */ | |
95 | if (offset & 0x02000000) | |
96 | offset |= 0xfe000000; | |
97 | ||
98 | return ip + (long)offset; | |
99 | } | |
100 | ||
f48cb8b4 SR |
101 | #ifdef CONFIG_PPC64 |
102 | static int | |
103 | __ftrace_make_nop(struct module *mod, | |
104 | struct dyn_ftrace *rec, unsigned long addr) | |
105 | { | |
d9af12b7 SR |
106 | unsigned int op; |
107 | unsigned int jmp[5]; | |
108 | unsigned long ptr; | |
f48cb8b4 SR |
109 | unsigned long ip = rec->ip; |
110 | unsigned long tramp; | |
111 | int offset; | |
112 | ||
113 | /* read where this goes */ | |
d9af12b7 | 114 | if (probe_kernel_read(&op, (void *)ip, sizeof(int))) |
f48cb8b4 SR |
115 | return -EFAULT; |
116 | ||
117 | /* Make sure that that this is still a 24bit jump */ | |
d9af12b7 SR |
118 | if (!is_bl_op(op)) { |
119 | printk(KERN_ERR "Not expected bl: opcode is %x\n", op); | |
f48cb8b4 SR |
120 | return -EINVAL; |
121 | } | |
122 | ||
123 | /* lets find where the pointer goes */ | |
d9af12b7 | 124 | tramp = find_bl_target(ip, op); |
f48cb8b4 SR |
125 | |
126 | /* | |
127 | * On PPC64 the trampoline looks like: | |
128 | * 0x3d, 0x82, 0x00, 0x00, addis r12,r2, <high> | |
129 | * 0x39, 0x8c, 0x00, 0x00, addi r12,r12, <low> | |
130 | * Where the bytes 2,3,6 and 7 make up the 32bit offset | |
131 | * to the TOC that holds the pointer. | |
132 | * to jump to. | |
133 | * 0xf8, 0x41, 0x00, 0x28, std r2,40(r1) | |
134 | * 0xe9, 0x6c, 0x00, 0x20, ld r11,32(r12) | |
135 | * The actually address is 32 bytes from the offset | |
136 | * into the TOC. | |
137 | * 0xe8, 0x4c, 0x00, 0x28, ld r2,40(r12) | |
138 | */ | |
139 | ||
021376a3 | 140 | pr_devel("ip:%lx jumps to %lx r2: %lx", ip, tramp, mod->arch.toc); |
f48cb8b4 SR |
141 | |
142 | /* Find where the trampoline jumps to */ | |
d9af12b7 | 143 | if (probe_kernel_read(jmp, (void *)tramp, sizeof(jmp))) { |
f48cb8b4 SR |
144 | printk(KERN_ERR "Failed to read %lx\n", tramp); |
145 | return -EFAULT; | |
146 | } | |
147 | ||
021376a3 | 148 | pr_devel(" %08x %08x", jmp[0], jmp[1]); |
d9af12b7 SR |
149 | |
150 | /* verify that this is what we expect it to be */ | |
151 | if (((jmp[0] & 0xffff0000) != 0x3d820000) || | |
152 | ((jmp[1] & 0xffff0000) != 0x398c0000) || | |
153 | (jmp[2] != 0xf8410028) || | |
154 | (jmp[3] != 0xe96c0020) || | |
155 | (jmp[4] != 0xe84c0028)) { | |
156 | printk(KERN_ERR "Not a trampoline\n"); | |
157 | return -EINVAL; | |
158 | } | |
f48cb8b4 | 159 | |
f25f9074 SR |
160 | /* The bottom half is signed extended */ |
161 | offset = ((unsigned)((unsigned short)jmp[0]) << 16) + | |
162 | (int)((short)jmp[1]); | |
f48cb8b4 | 163 | |
021376a3 | 164 | pr_devel(" %x ", offset); |
f48cb8b4 SR |
165 | |
166 | /* get the address this jumps too */ | |
167 | tramp = mod->arch.toc + offset + 32; | |
021376a3 | 168 | pr_devel("toc: %lx", tramp); |
f48cb8b4 SR |
169 | |
170 | if (probe_kernel_read(jmp, (void *)tramp, 8)) { | |
171 | printk(KERN_ERR "Failed to read %lx\n", tramp); | |
172 | return -EFAULT; | |
173 | } | |
174 | ||
021376a3 | 175 | pr_devel(" %08x %08x\n", jmp[0], jmp[1]); |
d9af12b7 SR |
176 | |
177 | ptr = ((unsigned long)jmp[0] << 32) + jmp[1]; | |
f48cb8b4 SR |
178 | |
179 | /* This should match what was called */ | |
4a9e3f8e | 180 | if (ptr != ppc_function_entry((void *)addr)) { |
d9af12b7 | 181 | printk(KERN_ERR "addr does not match %lx\n", ptr); |
f48cb8b4 SR |
182 | return -EINVAL; |
183 | } | |
184 | ||
185 | /* | |
186 | * We want to nop the line, but the next line is | |
187 | * 0xe8, 0x41, 0x00, 0x28 ld r2,40(r1) | |
188 | * This needs to be turned to a nop too. | |
189 | */ | |
d9af12b7 | 190 | if (probe_kernel_read(&op, (void *)(ip+4), MCOUNT_INSN_SIZE)) |
f48cb8b4 SR |
191 | return -EFAULT; |
192 | ||
d9af12b7 SR |
193 | if (op != 0xe8410028) { |
194 | printk(KERN_ERR "Next line is not ld! (%08x)\n", op); | |
f48cb8b4 SR |
195 | return -EINVAL; |
196 | } | |
197 | ||
198 | /* | |
199 | * Milton Miller pointed out that we can not blindly do nops. | |
200 | * If a task was preempted when calling a trace function, | |
201 | * the nops will remove the way to restore the TOC in r2 | |
202 | * and the r2 TOC will get corrupted. | |
203 | */ | |
204 | ||
205 | /* | |
206 | * Replace: | |
207 | * bl <tramp> <==== will be replaced with "b 1f" | |
208 | * ld r2,40(r1) | |
209 | * 1: | |
210 | */ | |
d9af12b7 | 211 | op = 0x48000008; /* b +8 */ |
f48cb8b4 | 212 | |
65b8c722 | 213 | if (patch_instruction((unsigned int *)ip, op)) |
f48cb8b4 SR |
214 | return -EPERM; |
215 | ||
216 | return 0; | |
217 | } | |
218 | ||
219 | #else /* !PPC64 */ | |
220 | static int | |
221 | __ftrace_make_nop(struct module *mod, | |
222 | struct dyn_ftrace *rec, unsigned long addr) | |
223 | { | |
d9af12b7 SR |
224 | unsigned int op; |
225 | unsigned int jmp[4]; | |
7cc45e64 SR |
226 | unsigned long ip = rec->ip; |
227 | unsigned long tramp; | |
7cc45e64 | 228 | |
d9af12b7 | 229 | if (probe_kernel_read(&op, (void *)ip, MCOUNT_INSN_SIZE)) |
7cc45e64 SR |
230 | return -EFAULT; |
231 | ||
232 | /* Make sure that that this is still a 24bit jump */ | |
d9af12b7 SR |
233 | if (!is_bl_op(op)) { |
234 | printk(KERN_ERR "Not expected bl: opcode is %x\n", op); | |
7cc45e64 SR |
235 | return -EINVAL; |
236 | } | |
237 | ||
238 | /* lets find where the pointer goes */ | |
d9af12b7 | 239 | tramp = find_bl_target(ip, op); |
7cc45e64 SR |
240 | |
241 | /* | |
242 | * On PPC32 the trampoline looks like: | |
fd5a4298 | 243 | * 0x3d, 0x80, 0x00, 0x00 lis r12,sym@ha |
244 | * 0x39, 0x8c, 0x00, 0x00 addi r12,r12,sym@l | |
245 | * 0x7d, 0x89, 0x03, 0xa6 mtctr r12 | |
d9af12b7 | 246 | * 0x4e, 0x80, 0x04, 0x20 bctr |
7cc45e64 SR |
247 | */ |
248 | ||
021376a3 | 249 | pr_devel("ip:%lx jumps to %lx", ip, tramp); |
7cc45e64 SR |
250 | |
251 | /* Find where the trampoline jumps to */ | |
d9af12b7 | 252 | if (probe_kernel_read(jmp, (void *)tramp, sizeof(jmp))) { |
7cc45e64 SR |
253 | printk(KERN_ERR "Failed to read %lx\n", tramp); |
254 | return -EFAULT; | |
255 | } | |
256 | ||
021376a3 | 257 | pr_devel(" %08x %08x ", jmp[0], jmp[1]); |
d9af12b7 SR |
258 | |
259 | /* verify that this is what we expect it to be */ | |
fd5a4298 | 260 | if (((jmp[0] & 0xffff0000) != 0x3d800000) || |
261 | ((jmp[1] & 0xffff0000) != 0x398c0000) || | |
262 | (jmp[2] != 0x7d8903a6) || | |
d9af12b7 SR |
263 | (jmp[3] != 0x4e800420)) { |
264 | printk(KERN_ERR "Not a trampoline\n"); | |
265 | return -EINVAL; | |
266 | } | |
7cc45e64 | 267 | |
d9af12b7 SR |
268 | tramp = (jmp[1] & 0xffff) | |
269 | ((jmp[0] & 0xffff) << 16); | |
7cc45e64 SR |
270 | if (tramp & 0x8000) |
271 | tramp -= 0x10000; | |
272 | ||
021376a3 | 273 | pr_devel(" %lx ", tramp); |
7cc45e64 SR |
274 | |
275 | if (tramp != addr) { | |
276 | printk(KERN_ERR | |
277 | "Trampoline location %08lx does not match addr\n", | |
278 | tramp); | |
279 | return -EINVAL; | |
280 | } | |
281 | ||
16c57b36 | 282 | op = PPC_INST_NOP; |
7cc45e64 | 283 | |
65b8c722 | 284 | if (patch_instruction((unsigned int *)ip, op)) |
7cc45e64 SR |
285 | return -EPERM; |
286 | ||
f48cb8b4 SR |
287 | return 0; |
288 | } | |
289 | #endif /* PPC64 */ | |
17be5b3d | 290 | #endif /* CONFIG_MODULES */ |
f48cb8b4 | 291 | |
8fd6e5a8 SR |
292 | int ftrace_make_nop(struct module *mod, |
293 | struct dyn_ftrace *rec, unsigned long addr) | |
294 | { | |
f48cb8b4 | 295 | unsigned long ip = rec->ip; |
b54dcfe1 | 296 | unsigned int old, new; |
8fd6e5a8 SR |
297 | |
298 | /* | |
299 | * If the calling address is more that 24 bits away, | |
300 | * then we had to use a trampoline to make the call. | |
301 | * Otherwise just update the call site. | |
302 | */ | |
f48cb8b4 | 303 | if (test_24bit_addr(ip, addr)) { |
8fd6e5a8 | 304 | /* within range */ |
46542888 | 305 | old = ftrace_call_replace(ip, addr, 1); |
92e02a51 | 306 | new = PPC_INST_NOP; |
f48cb8b4 SR |
307 | return ftrace_modify_code(ip, old, new); |
308 | } | |
309 | ||
17be5b3d | 310 | #ifdef CONFIG_MODULES |
f48cb8b4 SR |
311 | /* |
312 | * Out of range jumps are called from modules. | |
313 | * We should either already have a pointer to the module | |
314 | * or it has been passed in. | |
315 | */ | |
316 | if (!rec->arch.mod) { | |
317 | if (!mod) { | |
318 | printk(KERN_ERR "No module loaded addr=%lx\n", | |
319 | addr); | |
320 | return -EFAULT; | |
321 | } | |
322 | rec->arch.mod = mod; | |
323 | } else if (mod) { | |
324 | if (mod != rec->arch.mod) { | |
325 | printk(KERN_ERR | |
326 | "Record mod %p not equal to passed in mod %p\n", | |
327 | rec->arch.mod, mod); | |
328 | return -EINVAL; | |
329 | } | |
330 | /* nothing to do if mod == rec->arch.mod */ | |
331 | } else | |
332 | mod = rec->arch.mod; | |
f48cb8b4 SR |
333 | |
334 | return __ftrace_make_nop(mod, rec, addr); | |
17be5b3d SR |
335 | #else |
336 | /* We should not get here without modules */ | |
337 | return -EINVAL; | |
338 | #endif /* CONFIG_MODULES */ | |
f48cb8b4 SR |
339 | } |
340 | ||
17be5b3d | 341 | #ifdef CONFIG_MODULES |
f48cb8b4 SR |
342 | #ifdef CONFIG_PPC64 |
343 | static int | |
344 | __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) | |
345 | { | |
d9af12b7 | 346 | unsigned int op[2]; |
f48cb8b4 | 347 | unsigned long ip = rec->ip; |
f48cb8b4 SR |
348 | |
349 | /* read where this goes */ | |
d9af12b7 | 350 | if (probe_kernel_read(op, (void *)ip, MCOUNT_INSN_SIZE * 2)) |
f48cb8b4 SR |
351 | return -EFAULT; |
352 | ||
353 | /* | |
354 | * It should be pointing to two nops or | |
355 | * b +8; ld r2,40(r1) | |
356 | */ | |
357 | if (((op[0] != 0x48000008) || (op[1] != 0xe8410028)) && | |
16c57b36 | 358 | ((op[0] != PPC_INST_NOP) || (op[1] != PPC_INST_NOP))) { |
f48cb8b4 SR |
359 | printk(KERN_ERR "Expected NOPs but have %x %x\n", op[0], op[1]); |
360 | return -EINVAL; | |
361 | } | |
362 | ||
363 | /* If we never set up a trampoline to ftrace_caller, then bail */ | |
364 | if (!rec->arch.mod->arch.tramp) { | |
365 | printk(KERN_ERR "No ftrace trampoline\n"); | |
366 | return -EINVAL; | |
367 | } | |
368 | ||
0029ff87 SR |
369 | /* create the branch to the trampoline */ |
370 | op[0] = create_branch((unsigned int *)ip, | |
371 | rec->arch.mod->arch.tramp, BRANCH_SET_LINK); | |
372 | if (!op[0]) { | |
373 | printk(KERN_ERR "REL24 out of range!\n"); | |
f48cb8b4 | 374 | return -EINVAL; |
8fd6e5a8 SR |
375 | } |
376 | ||
f48cb8b4 SR |
377 | /* ld r2,40(r1) */ |
378 | op[1] = 0xe8410028; | |
379 | ||
021376a3 | 380 | pr_devel("write to %lx\n", rec->ip); |
f48cb8b4 | 381 | |
d9af12b7 | 382 | if (probe_kernel_write((void *)ip, op, MCOUNT_INSN_SIZE * 2)) |
f48cb8b4 SR |
383 | return -EPERM; |
384 | ||
ec682cef SR |
385 | flush_icache_range(ip, ip + 8); |
386 | ||
8fd6e5a8 SR |
387 | return 0; |
388 | } | |
f48cb8b4 SR |
389 | #else |
390 | static int | |
391 | __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) | |
392 | { | |
d9af12b7 | 393 | unsigned int op; |
7cc45e64 | 394 | unsigned long ip = rec->ip; |
7cc45e64 SR |
395 | |
396 | /* read where this goes */ | |
d9af12b7 | 397 | if (probe_kernel_read(&op, (void *)ip, MCOUNT_INSN_SIZE)) |
7cc45e64 SR |
398 | return -EFAULT; |
399 | ||
400 | /* It should be pointing to a nop */ | |
16c57b36 | 401 | if (op != PPC_INST_NOP) { |
d9af12b7 | 402 | printk(KERN_ERR "Expected NOP but have %x\n", op); |
7cc45e64 SR |
403 | return -EINVAL; |
404 | } | |
405 | ||
406 | /* If we never set up a trampoline to ftrace_caller, then bail */ | |
407 | if (!rec->arch.mod->arch.tramp) { | |
408 | printk(KERN_ERR "No ftrace trampoline\n"); | |
409 | return -EINVAL; | |
410 | } | |
411 | ||
0029ff87 SR |
412 | /* create the branch to the trampoline */ |
413 | op = create_branch((unsigned int *)ip, | |
414 | rec->arch.mod->arch.tramp, BRANCH_SET_LINK); | |
415 | if (!op) { | |
416 | printk(KERN_ERR "REL24 out of range!\n"); | |
7cc45e64 SR |
417 | return -EINVAL; |
418 | } | |
419 | ||
021376a3 | 420 | pr_devel("write to %lx\n", rec->ip); |
7cc45e64 | 421 | |
65b8c722 | 422 | if (patch_instruction((unsigned int *)ip, op)) |
7cc45e64 SR |
423 | return -EPERM; |
424 | ||
f48cb8b4 SR |
425 | return 0; |
426 | } | |
427 | #endif /* CONFIG_PPC64 */ | |
17be5b3d | 428 | #endif /* CONFIG_MODULES */ |
8fd6e5a8 SR |
429 | |
430 | int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) | |
431 | { | |
f48cb8b4 | 432 | unsigned long ip = rec->ip; |
b54dcfe1 | 433 | unsigned int old, new; |
8fd6e5a8 SR |
434 | |
435 | /* | |
436 | * If the calling address is more that 24 bits away, | |
437 | * then we had to use a trampoline to make the call. | |
438 | * Otherwise just update the call site. | |
439 | */ | |
f48cb8b4 | 440 | if (test_24bit_addr(ip, addr)) { |
8fd6e5a8 | 441 | /* within range */ |
92e02a51 | 442 | old = PPC_INST_NOP; |
46542888 | 443 | new = ftrace_call_replace(ip, addr, 1); |
f48cb8b4 | 444 | return ftrace_modify_code(ip, old, new); |
8fd6e5a8 SR |
445 | } |
446 | ||
17be5b3d | 447 | #ifdef CONFIG_MODULES |
f48cb8b4 SR |
448 | /* |
449 | * Out of range jumps are called from modules. | |
450 | * Being that we are converting from nop, it had better | |
451 | * already have a module defined. | |
452 | */ | |
453 | if (!rec->arch.mod) { | |
454 | printk(KERN_ERR "No module loaded\n"); | |
455 | return -EINVAL; | |
456 | } | |
f48cb8b4 SR |
457 | |
458 | return __ftrace_make_call(rec, addr); | |
17be5b3d SR |
459 | #else |
460 | /* We should not get here without modules */ | |
461 | return -EINVAL; | |
462 | #endif /* CONFIG_MODULES */ | |
8fd6e5a8 SR |
463 | } |
464 | ||
15adc048 | 465 | int ftrace_update_ftrace_func(ftrace_func_t func) |
4e491d14 SR |
466 | { |
467 | unsigned long ip = (unsigned long)(&ftrace_call); | |
b54dcfe1 | 468 | unsigned int old, new; |
4e491d14 SR |
469 | int ret; |
470 | ||
b54dcfe1 | 471 | old = *(unsigned int *)&ftrace_call; |
46542888 | 472 | new = ftrace_call_replace(ip, (unsigned long)func, 1); |
4e491d14 SR |
473 | ret = ftrace_modify_code(ip, old, new); |
474 | ||
475 | return ret; | |
476 | } | |
477 | ||
ee456bb3 SR |
478 | static int __ftrace_replace_code(struct dyn_ftrace *rec, int enable) |
479 | { | |
480 | unsigned long ftrace_addr = (unsigned long)FTRACE_ADDR; | |
481 | int ret; | |
482 | ||
483 | ret = ftrace_update_record(rec, enable); | |
484 | ||
485 | switch (ret) { | |
486 | case FTRACE_UPDATE_IGNORE: | |
487 | return 0; | |
488 | case FTRACE_UPDATE_MAKE_CALL: | |
489 | return ftrace_make_call(rec, ftrace_addr); | |
490 | case FTRACE_UPDATE_MAKE_NOP: | |
491 | return ftrace_make_nop(NULL, rec, ftrace_addr); | |
492 | } | |
493 | ||
494 | return 0; | |
495 | } | |
496 | ||
497 | void ftrace_replace_code(int enable) | |
498 | { | |
499 | struct ftrace_rec_iter *iter; | |
500 | struct dyn_ftrace *rec; | |
501 | int ret; | |
502 | ||
503 | for (iter = ftrace_rec_iter_start(); iter; | |
504 | iter = ftrace_rec_iter_next(iter)) { | |
505 | rec = ftrace_rec_iter_record(iter); | |
506 | ret = __ftrace_replace_code(rec, enable); | |
507 | if (ret) { | |
508 | ftrace_bug(ret, rec->ip); | |
509 | return; | |
510 | } | |
511 | } | |
512 | } | |
513 | ||
514 | void arch_ftrace_update_code(int command) | |
515 | { | |
516 | if (command & FTRACE_UPDATE_CALLS) | |
517 | ftrace_replace_code(1); | |
518 | else if (command & FTRACE_DISABLE_CALLS) | |
519 | ftrace_replace_code(0); | |
520 | ||
521 | if (command & FTRACE_UPDATE_TRACE_FUNC) | |
522 | ftrace_update_ftrace_func(ftrace_trace_function); | |
523 | ||
524 | if (command & FTRACE_START_FUNC_RET) | |
525 | ftrace_enable_ftrace_graph_caller(); | |
526 | else if (command & FTRACE_STOP_FUNC_RET) | |
527 | ftrace_disable_ftrace_graph_caller(); | |
528 | } | |
529 | ||
6fa3eb70 | 530 | int __init ftrace_dyn_arch_init(void) |
4e491d14 | 531 | { |
4e491d14 SR |
532 | return 0; |
533 | } | |
6794c782 SR |
534 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
535 | ||
536 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | |
537 | ||
46542888 SR |
538 | #ifdef CONFIG_DYNAMIC_FTRACE |
539 | extern void ftrace_graph_call(void); | |
540 | extern void ftrace_graph_stub(void); | |
541 | ||
542 | int ftrace_enable_ftrace_graph_caller(void) | |
543 | { | |
544 | unsigned long ip = (unsigned long)(&ftrace_graph_call); | |
545 | unsigned long addr = (unsigned long)(&ftrace_graph_caller); | |
546 | unsigned long stub = (unsigned long)(&ftrace_graph_stub); | |
b54dcfe1 | 547 | unsigned int old, new; |
46542888 | 548 | |
b54dcfe1 | 549 | old = ftrace_call_replace(ip, stub, 0); |
46542888 SR |
550 | new = ftrace_call_replace(ip, addr, 0); |
551 | ||
552 | return ftrace_modify_code(ip, old, new); | |
553 | } | |
554 | ||
555 | int ftrace_disable_ftrace_graph_caller(void) | |
556 | { | |
557 | unsigned long ip = (unsigned long)(&ftrace_graph_call); | |
558 | unsigned long addr = (unsigned long)(&ftrace_graph_caller); | |
559 | unsigned long stub = (unsigned long)(&ftrace_graph_stub); | |
b54dcfe1 | 560 | unsigned int old, new; |
46542888 | 561 | |
b54dcfe1 | 562 | old = ftrace_call_replace(ip, addr, 0); |
46542888 SR |
563 | new = ftrace_call_replace(ip, stub, 0); |
564 | ||
565 | return ftrace_modify_code(ip, old, new); | |
566 | } | |
567 | #endif /* CONFIG_DYNAMIC_FTRACE */ | |
568 | ||
bb725340 SR |
569 | #ifdef CONFIG_PPC64 |
570 | extern void mod_return_to_handler(void); | |
571 | #endif | |
572 | ||
6794c782 SR |
573 | /* |
574 | * Hook the return address and push it in the stack of return addrs | |
575 | * in current thread info. | |
576 | */ | |
577 | void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) | |
578 | { | |
579 | unsigned long old; | |
6794c782 SR |
580 | int faulted; |
581 | struct ftrace_graph_ent trace; | |
bb725340 | 582 | unsigned long return_hooker = (unsigned long)&return_to_handler; |
6794c782 SR |
583 | |
584 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) | |
585 | return; | |
586 | ||
f4952f6c | 587 | #ifdef CONFIG_PPC64 |
bb725340 SR |
588 | /* non core kernel code needs to save and restore the TOC */ |
589 | if (REGION_ID(self_addr) != KERNEL_REGION_ID) | |
590 | return_hooker = (unsigned long)&mod_return_to_handler; | |
591 | #endif | |
592 | ||
4a9e3f8e | 593 | return_hooker = ppc_function_entry((void *)return_hooker); |
6794c782 SR |
594 | |
595 | /* | |
596 | * Protect against fault, even if it shouldn't | |
597 | * happen. This tool is too much intrusive to | |
598 | * ignore such a protection. | |
599 | */ | |
600 | asm volatile( | |
601 | "1: " PPC_LL "%[old], 0(%[parent])\n" | |
602 | "2: " PPC_STL "%[return_hooker], 0(%[parent])\n" | |
603 | " li %[faulted], 0\n" | |
fad4f47c | 604 | "3:\n" |
6794c782 SR |
605 | |
606 | ".section .fixup, \"ax\"\n" | |
607 | "4: li %[faulted], 1\n" | |
608 | " b 3b\n" | |
609 | ".previous\n" | |
610 | ||
611 | ".section __ex_table,\"a\"\n" | |
612 | PPC_LONG_ALIGN "\n" | |
613 | PPC_LONG "1b,4b\n" | |
614 | PPC_LONG "2b,4b\n" | |
615 | ".previous" | |
616 | ||
c3cf8667 | 617 | : [old] "=&r" (old), [faulted] "=r" (faulted) |
6794c782 SR |
618 | : [parent] "r" (parent), [return_hooker] "r" (return_hooker) |
619 | : "memory" | |
620 | ); | |
621 | ||
622 | if (unlikely(faulted)) { | |
623 | ftrace_graph_stop(); | |
624 | WARN_ON(1); | |
625 | return; | |
626 | } | |
627 | ||
6794c782 | 628 | trace.func = self_addr; |
bac821a6 | 629 | trace.depth = current->curr_ret_stack + 1; |
6794c782 SR |
630 | |
631 | /* Only trace if the calling function expects to */ | |
632 | if (!ftrace_graph_entry(&trace)) { | |
6794c782 | 633 | *parent = old; |
bac821a6 | 634 | return; |
6794c782 | 635 | } |
bac821a6 SR |
636 | |
637 | if (ftrace_push_return_trace(old, self_addr, &trace.depth, 0) == -EBUSY) | |
638 | *parent = old; | |
6794c782 SR |
639 | } |
640 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | |
02424d89 IM |
641 | |
642 | #if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_PPC64) | |
643 | unsigned long __init arch_syscall_addr(int nr) | |
644 | { | |
645 | return sys_call_table[nr*2]; | |
646 | } | |
647 | #endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_PPC64 */ |