Commit | Line | Data |
---|---|---|
4e491d14 SR |
1 | /* |
2 | * Code for replacing ftrace calls with jumps. | |
3 | * | |
4 | * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> | |
5 | * | |
6 | * Thanks goes out to P.A. Semi, Inc for supplying me with a PPC64 box. | |
7 | * | |
6794c782 SR |
8 | * Added function graph tracer code, taken from x86 that was written |
9 | * by Frederic Weisbecker, and ported to PPC by Steven Rostedt. | |
10 | * | |
4e491d14 SR |
11 | */ |
12 | ||
13 | #include <linux/spinlock.h> | |
14 | #include <linux/hardirq.h> | |
e4486fe3 | 15 | #include <linux/uaccess.h> |
f48cb8b4 | 16 | #include <linux/module.h> |
4e491d14 SR |
17 | #include <linux/ftrace.h> |
18 | #include <linux/percpu.h> | |
19 | #include <linux/init.h> | |
20 | #include <linux/list.h> | |
21 | ||
22 | #include <asm/cacheflush.h> | |
f48cb8b4 | 23 | #include <asm/code-patching.h> |
395a59d0 | 24 | #include <asm/ftrace.h> |
02424d89 | 25 | #include <asm/syscall.h> |
4e491d14 | 26 | |
4e491d14 | 27 | |
6794c782 | 28 | #ifdef CONFIG_DYNAMIC_FTRACE |
b54dcfe1 | 29 | static unsigned int |
46542888 | 30 | ftrace_call_replace(unsigned long ip, unsigned long addr, int link) |
4e491d14 | 31 | { |
b54dcfe1 | 32 | unsigned int op; |
4e491d14 | 33 | |
4a9e3f8e | 34 | addr = ppc_function_entry((void *)addr); |
4e491d14 | 35 | |
46542888 | 36 | /* if (link) set op to 'bl' else 'b' */ |
bb9b9035 | 37 | op = create_branch((unsigned int *)ip, addr, link ? 1 : 0); |
4e491d14 | 38 | |
b54dcfe1 | 39 | return op; |
4e491d14 SR |
40 | } |
41 | ||
8fd6e5a8 | 42 | static int |
b54dcfe1 | 43 | ftrace_modify_code(unsigned long ip, unsigned int old, unsigned int new) |
4e491d14 | 44 | { |
b54dcfe1 | 45 | unsigned int replaced; |
4e491d14 | 46 | |
4e491d14 SR |
47 | /* |
48 | * Note: Due to modules and __init, code can | |
49 | * disappear and change, we need to protect against faulting | |
e4486fe3 SR |
50 | * as well as code changing. We do this by using the |
51 | * probe_kernel_* functions. | |
4e491d14 SR |
52 | * |
53 | * No real locking needed, this code is run through | |
e4486fe3 | 54 | * kstop_machine, or before SMP starts. |
4e491d14 | 55 | */ |
e4486fe3 SR |
56 | |
57 | /* read the text we want to modify */ | |
b54dcfe1 | 58 | if (probe_kernel_read(&replaced, (void *)ip, MCOUNT_INSN_SIZE)) |
e4486fe3 SR |
59 | return -EFAULT; |
60 | ||
61 | /* Make sure it is what we expect it to be */ | |
b54dcfe1 | 62 | if (replaced != old) |
e4486fe3 SR |
63 | return -EINVAL; |
64 | ||
65 | /* replace the text with the new text */ | |
65b8c722 | 66 | if (patch_instruction((unsigned int *)ip, new)) |
e4486fe3 SR |
67 | return -EPERM; |
68 | ||
e4486fe3 | 69 | return 0; |
4e491d14 SR |
70 | } |
71 | ||
f48cb8b4 SR |
72 | /* |
73 | * Helper functions that are the same for both PPC64 and PPC32. | |
74 | */ | |
8fd6e5a8 SR |
75 | static int test_24bit_addr(unsigned long ip, unsigned long addr) |
76 | { | |
a95fc585 | 77 | addr = ppc_function_entry((void *)addr); |
8fd6e5a8 | 78 | |
0029ff87 SR |
79 | /* use the create_branch to verify that this offset can be branched */ |
80 | return create_branch((unsigned int *)ip, addr, 0); | |
8fd6e5a8 SR |
81 | } |
82 | ||
17be5b3d SR |
83 | #ifdef CONFIG_MODULES |
84 | ||
f48cb8b4 SR |
85 | static int is_bl_op(unsigned int op) |
86 | { | |
87 | return (op & 0xfc000003) == 0x48000001; | |
88 | } | |
89 | ||
f48cb8b4 SR |
90 | static unsigned long find_bl_target(unsigned long ip, unsigned int op) |
91 | { | |
92 | static int offset; | |
93 | ||
94 | offset = (op & 0x03fffffc); | |
95 | /* make it signed */ | |
96 | if (offset & 0x02000000) | |
97 | offset |= 0xfe000000; | |
98 | ||
99 | return ip + (long)offset; | |
100 | } | |
101 | ||
f48cb8b4 SR |
102 | #ifdef CONFIG_PPC64 |
103 | static int | |
104 | __ftrace_make_nop(struct module *mod, | |
105 | struct dyn_ftrace *rec, unsigned long addr) | |
106 | { | |
d9af12b7 | 107 | unsigned int op; |
d9af12b7 | 108 | unsigned long ptr; |
f48cb8b4 | 109 | unsigned long ip = rec->ip; |
62c9da6a | 110 | void *tramp; |
f48cb8b4 SR |
111 | |
112 | /* read where this goes */ | |
d9af12b7 | 113 | if (probe_kernel_read(&op, (void *)ip, sizeof(int))) |
f48cb8b4 SR |
114 | return -EFAULT; |
115 | ||
116 | /* Make sure that that this is still a 24bit jump */ | |
d9af12b7 SR |
117 | if (!is_bl_op(op)) { |
118 | printk(KERN_ERR "Not expected bl: opcode is %x\n", op); | |
f48cb8b4 SR |
119 | return -EINVAL; |
120 | } | |
121 | ||
122 | /* lets find where the pointer goes */ | |
62c9da6a | 123 | tramp = (void *)find_bl_target(ip, op); |
f48cb8b4 | 124 | |
62c9da6a | 125 | pr_devel("ip:%lx jumps to %p", ip, tramp); |
f48cb8b4 | 126 | |
62c9da6a | 127 | if (!is_module_trampoline(tramp)) { |
d9af12b7 SR |
128 | printk(KERN_ERR "Not a trampoline\n"); |
129 | return -EINVAL; | |
130 | } | |
f48cb8b4 | 131 | |
62c9da6a AB |
132 | if (module_trampoline_target(mod, tramp, &ptr)) { |
133 | printk(KERN_ERR "Failed to get trampoline target\n"); | |
f48cb8b4 SR |
134 | return -EFAULT; |
135 | } | |
136 | ||
62c9da6a | 137 | pr_devel("trampoline target %lx", ptr); |
f48cb8b4 SR |
138 | |
139 | /* This should match what was called */ | |
4a9e3f8e | 140 | if (ptr != ppc_function_entry((void *)addr)) { |
62c9da6a AB |
141 | printk(KERN_ERR "addr %lx does not match expected %lx\n", |
142 | ptr, ppc_function_entry((void *)addr)); | |
f48cb8b4 SR |
143 | return -EINVAL; |
144 | } | |
145 | ||
146 | /* | |
62c9da6a AB |
147 | * Our original call site looks like: |
148 | * | |
149 | * bl <tramp> | |
150 | * ld r2,XX(r1) | |
151 | * | |
152 | * Milton Miller pointed out that we can not simply nop the branch. | |
153 | * If a task was preempted when calling a trace function, the nops | |
154 | * will remove the way to restore the TOC in r2 and the r2 TOC will | |
155 | * get corrupted. | |
156 | * | |
157 | * Use a b +8 to jump over the load. | |
f48cb8b4 | 158 | */ |
d9af12b7 | 159 | op = 0x48000008; /* b +8 */ |
f48cb8b4 | 160 | |
65b8c722 | 161 | if (patch_instruction((unsigned int *)ip, op)) |
f48cb8b4 SR |
162 | return -EPERM; |
163 | ||
164 | return 0; | |
165 | } | |
166 | ||
167 | #else /* !PPC64 */ | |
168 | static int | |
169 | __ftrace_make_nop(struct module *mod, | |
170 | struct dyn_ftrace *rec, unsigned long addr) | |
171 | { | |
d9af12b7 SR |
172 | unsigned int op; |
173 | unsigned int jmp[4]; | |
7cc45e64 SR |
174 | unsigned long ip = rec->ip; |
175 | unsigned long tramp; | |
7cc45e64 | 176 | |
d9af12b7 | 177 | if (probe_kernel_read(&op, (void *)ip, MCOUNT_INSN_SIZE)) |
7cc45e64 SR |
178 | return -EFAULT; |
179 | ||
180 | /* Make sure that that this is still a 24bit jump */ | |
d9af12b7 SR |
181 | if (!is_bl_op(op)) { |
182 | printk(KERN_ERR "Not expected bl: opcode is %x\n", op); | |
7cc45e64 SR |
183 | return -EINVAL; |
184 | } | |
185 | ||
186 | /* lets find where the pointer goes */ | |
d9af12b7 | 187 | tramp = find_bl_target(ip, op); |
7cc45e64 SR |
188 | |
189 | /* | |
190 | * On PPC32 the trampoline looks like: | |
fd5a4298 | 191 | * 0x3d, 0x80, 0x00, 0x00 lis r12,sym@ha |
192 | * 0x39, 0x8c, 0x00, 0x00 addi r12,r12,sym@l | |
193 | * 0x7d, 0x89, 0x03, 0xa6 mtctr r12 | |
d9af12b7 | 194 | * 0x4e, 0x80, 0x04, 0x20 bctr |
7cc45e64 SR |
195 | */ |
196 | ||
021376a3 | 197 | pr_devel("ip:%lx jumps to %lx", ip, tramp); |
7cc45e64 SR |
198 | |
199 | /* Find where the trampoline jumps to */ | |
d9af12b7 | 200 | if (probe_kernel_read(jmp, (void *)tramp, sizeof(jmp))) { |
7cc45e64 SR |
201 | printk(KERN_ERR "Failed to read %lx\n", tramp); |
202 | return -EFAULT; | |
203 | } | |
204 | ||
021376a3 | 205 | pr_devel(" %08x %08x ", jmp[0], jmp[1]); |
d9af12b7 SR |
206 | |
207 | /* verify that this is what we expect it to be */ | |
fd5a4298 | 208 | if (((jmp[0] & 0xffff0000) != 0x3d800000) || |
209 | ((jmp[1] & 0xffff0000) != 0x398c0000) || | |
210 | (jmp[2] != 0x7d8903a6) || | |
d9af12b7 SR |
211 | (jmp[3] != 0x4e800420)) { |
212 | printk(KERN_ERR "Not a trampoline\n"); | |
213 | return -EINVAL; | |
214 | } | |
7cc45e64 | 215 | |
d9af12b7 SR |
216 | tramp = (jmp[1] & 0xffff) | |
217 | ((jmp[0] & 0xffff) << 16); | |
7cc45e64 SR |
218 | if (tramp & 0x8000) |
219 | tramp -= 0x10000; | |
220 | ||
021376a3 | 221 | pr_devel(" %lx ", tramp); |
7cc45e64 SR |
222 | |
223 | if (tramp != addr) { | |
224 | printk(KERN_ERR | |
225 | "Trampoline location %08lx does not match addr\n", | |
226 | tramp); | |
227 | return -EINVAL; | |
228 | } | |
229 | ||
16c57b36 | 230 | op = PPC_INST_NOP; |
7cc45e64 | 231 | |
65b8c722 | 232 | if (patch_instruction((unsigned int *)ip, op)) |
7cc45e64 SR |
233 | return -EPERM; |
234 | ||
f48cb8b4 SR |
235 | return 0; |
236 | } | |
237 | #endif /* PPC64 */ | |
17be5b3d | 238 | #endif /* CONFIG_MODULES */ |
f48cb8b4 | 239 | |
8fd6e5a8 SR |
240 | int ftrace_make_nop(struct module *mod, |
241 | struct dyn_ftrace *rec, unsigned long addr) | |
242 | { | |
f48cb8b4 | 243 | unsigned long ip = rec->ip; |
b54dcfe1 | 244 | unsigned int old, new; |
8fd6e5a8 SR |
245 | |
246 | /* | |
247 | * If the calling address is more that 24 bits away, | |
248 | * then we had to use a trampoline to make the call. | |
249 | * Otherwise just update the call site. | |
250 | */ | |
f48cb8b4 | 251 | if (test_24bit_addr(ip, addr)) { |
8fd6e5a8 | 252 | /* within range */ |
46542888 | 253 | old = ftrace_call_replace(ip, addr, 1); |
92e02a51 | 254 | new = PPC_INST_NOP; |
f48cb8b4 SR |
255 | return ftrace_modify_code(ip, old, new); |
256 | } | |
257 | ||
17be5b3d | 258 | #ifdef CONFIG_MODULES |
f48cb8b4 SR |
259 | /* |
260 | * Out of range jumps are called from modules. | |
261 | * We should either already have a pointer to the module | |
262 | * or it has been passed in. | |
263 | */ | |
264 | if (!rec->arch.mod) { | |
265 | if (!mod) { | |
266 | printk(KERN_ERR "No module loaded addr=%lx\n", | |
267 | addr); | |
268 | return -EFAULT; | |
269 | } | |
270 | rec->arch.mod = mod; | |
271 | } else if (mod) { | |
272 | if (mod != rec->arch.mod) { | |
273 | printk(KERN_ERR | |
274 | "Record mod %p not equal to passed in mod %p\n", | |
275 | rec->arch.mod, mod); | |
276 | return -EINVAL; | |
277 | } | |
278 | /* nothing to do if mod == rec->arch.mod */ | |
279 | } else | |
280 | mod = rec->arch.mod; | |
f48cb8b4 SR |
281 | |
282 | return __ftrace_make_nop(mod, rec, addr); | |
17be5b3d SR |
283 | #else |
284 | /* We should not get here without modules */ | |
285 | return -EINVAL; | |
286 | #endif /* CONFIG_MODULES */ | |
f48cb8b4 SR |
287 | } |
288 | ||
17be5b3d | 289 | #ifdef CONFIG_MODULES |
f48cb8b4 SR |
290 | #ifdef CONFIG_PPC64 |
291 | static int | |
292 | __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) | |
293 | { | |
d9af12b7 | 294 | unsigned int op[2]; |
24a1bdc3 | 295 | void *ip = (void *)rec->ip; |
f48cb8b4 SR |
296 | |
297 | /* read where this goes */ | |
24a1bdc3 | 298 | if (probe_kernel_read(op, ip, sizeof(op))) |
f48cb8b4 SR |
299 | return -EFAULT; |
300 | ||
301 | /* | |
24a1bdc3 AB |
302 | * We expect to see: |
303 | * | |
304 | * b +8 | |
305 | * ld r2,XX(r1) | |
306 | * | |
307 | * The load offset is different depending on the ABI. For simplicity | |
308 | * just mask it out when doing the compare. | |
f48cb8b4 | 309 | */ |
dfc382a1 | 310 | if ((op[0] != 0x48000008) || ((op[1] & 0xffff0000) != 0xe8410000)) { |
24a1bdc3 AB |
311 | printk(KERN_ERR "Unexpected call sequence: %x %x\n", |
312 | op[0], op[1]); | |
f48cb8b4 SR |
313 | return -EINVAL; |
314 | } | |
315 | ||
316 | /* If we never set up a trampoline to ftrace_caller, then bail */ | |
317 | if (!rec->arch.mod->arch.tramp) { | |
318 | printk(KERN_ERR "No ftrace trampoline\n"); | |
319 | return -EINVAL; | |
320 | } | |
321 | ||
24a1bdc3 AB |
322 | /* Ensure branch is within 24 bits */ |
323 | if (create_branch(ip, rec->arch.mod->arch.tramp, BRANCH_SET_LINK)) { | |
324 | printk(KERN_ERR "Branch out of range"); | |
f48cb8b4 | 325 | return -EINVAL; |
8fd6e5a8 SR |
326 | } |
327 | ||
24a1bdc3 AB |
328 | if (patch_branch(ip, rec->arch.mod->arch.tramp, BRANCH_SET_LINK)) { |
329 | printk(KERN_ERR "REL24 out of range!\n"); | |
330 | return -EINVAL; | |
331 | } | |
ec682cef | 332 | |
8fd6e5a8 SR |
333 | return 0; |
334 | } | |
f48cb8b4 SR |
335 | #else |
336 | static int | |
337 | __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) | |
338 | { | |
d9af12b7 | 339 | unsigned int op; |
7cc45e64 | 340 | unsigned long ip = rec->ip; |
7cc45e64 SR |
341 | |
342 | /* read where this goes */ | |
d9af12b7 | 343 | if (probe_kernel_read(&op, (void *)ip, MCOUNT_INSN_SIZE)) |
7cc45e64 SR |
344 | return -EFAULT; |
345 | ||
346 | /* It should be pointing to a nop */ | |
16c57b36 | 347 | if (op != PPC_INST_NOP) { |
d9af12b7 | 348 | printk(KERN_ERR "Expected NOP but have %x\n", op); |
7cc45e64 SR |
349 | return -EINVAL; |
350 | } | |
351 | ||
352 | /* If we never set up a trampoline to ftrace_caller, then bail */ | |
353 | if (!rec->arch.mod->arch.tramp) { | |
354 | printk(KERN_ERR "No ftrace trampoline\n"); | |
355 | return -EINVAL; | |
356 | } | |
357 | ||
0029ff87 SR |
358 | /* create the branch to the trampoline */ |
359 | op = create_branch((unsigned int *)ip, | |
360 | rec->arch.mod->arch.tramp, BRANCH_SET_LINK); | |
361 | if (!op) { | |
362 | printk(KERN_ERR "REL24 out of range!\n"); | |
7cc45e64 SR |
363 | return -EINVAL; |
364 | } | |
365 | ||
021376a3 | 366 | pr_devel("write to %lx\n", rec->ip); |
7cc45e64 | 367 | |
65b8c722 | 368 | if (patch_instruction((unsigned int *)ip, op)) |
7cc45e64 SR |
369 | return -EPERM; |
370 | ||
f48cb8b4 SR |
371 | return 0; |
372 | } | |
373 | #endif /* CONFIG_PPC64 */ | |
17be5b3d | 374 | #endif /* CONFIG_MODULES */ |
8fd6e5a8 SR |
375 | |
376 | int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) | |
377 | { | |
f48cb8b4 | 378 | unsigned long ip = rec->ip; |
b54dcfe1 | 379 | unsigned int old, new; |
8fd6e5a8 SR |
380 | |
381 | /* | |
382 | * If the calling address is more that 24 bits away, | |
383 | * then we had to use a trampoline to make the call. | |
384 | * Otherwise just update the call site. | |
385 | */ | |
f48cb8b4 | 386 | if (test_24bit_addr(ip, addr)) { |
8fd6e5a8 | 387 | /* within range */ |
92e02a51 | 388 | old = PPC_INST_NOP; |
46542888 | 389 | new = ftrace_call_replace(ip, addr, 1); |
f48cb8b4 | 390 | return ftrace_modify_code(ip, old, new); |
8fd6e5a8 SR |
391 | } |
392 | ||
17be5b3d | 393 | #ifdef CONFIG_MODULES |
f48cb8b4 SR |
394 | /* |
395 | * Out of range jumps are called from modules. | |
396 | * Being that we are converting from nop, it had better | |
397 | * already have a module defined. | |
398 | */ | |
399 | if (!rec->arch.mod) { | |
400 | printk(KERN_ERR "No module loaded\n"); | |
401 | return -EINVAL; | |
402 | } | |
f48cb8b4 SR |
403 | |
404 | return __ftrace_make_call(rec, addr); | |
17be5b3d SR |
405 | #else |
406 | /* We should not get here without modules */ | |
407 | return -EINVAL; | |
408 | #endif /* CONFIG_MODULES */ | |
8fd6e5a8 SR |
409 | } |
410 | ||
15adc048 | 411 | int ftrace_update_ftrace_func(ftrace_func_t func) |
4e491d14 SR |
412 | { |
413 | unsigned long ip = (unsigned long)(&ftrace_call); | |
b54dcfe1 | 414 | unsigned int old, new; |
4e491d14 SR |
415 | int ret; |
416 | ||
b54dcfe1 | 417 | old = *(unsigned int *)&ftrace_call; |
46542888 | 418 | new = ftrace_call_replace(ip, (unsigned long)func, 1); |
4e491d14 SR |
419 | ret = ftrace_modify_code(ip, old, new); |
420 | ||
421 | return ret; | |
422 | } | |
423 | ||
ee456bb3 SR |
424 | static int __ftrace_replace_code(struct dyn_ftrace *rec, int enable) |
425 | { | |
426 | unsigned long ftrace_addr = (unsigned long)FTRACE_ADDR; | |
427 | int ret; | |
428 | ||
429 | ret = ftrace_update_record(rec, enable); | |
430 | ||
431 | switch (ret) { | |
432 | case FTRACE_UPDATE_IGNORE: | |
433 | return 0; | |
434 | case FTRACE_UPDATE_MAKE_CALL: | |
435 | return ftrace_make_call(rec, ftrace_addr); | |
436 | case FTRACE_UPDATE_MAKE_NOP: | |
437 | return ftrace_make_nop(NULL, rec, ftrace_addr); | |
438 | } | |
439 | ||
440 | return 0; | |
441 | } | |
442 | ||
443 | void ftrace_replace_code(int enable) | |
444 | { | |
445 | struct ftrace_rec_iter *iter; | |
446 | struct dyn_ftrace *rec; | |
447 | int ret; | |
448 | ||
449 | for (iter = ftrace_rec_iter_start(); iter; | |
450 | iter = ftrace_rec_iter_next(iter)) { | |
451 | rec = ftrace_rec_iter_record(iter); | |
452 | ret = __ftrace_replace_code(rec, enable); | |
453 | if (ret) { | |
454 | ftrace_bug(ret, rec->ip); | |
455 | return; | |
456 | } | |
457 | } | |
458 | } | |
459 | ||
460 | void arch_ftrace_update_code(int command) | |
461 | { | |
462 | if (command & FTRACE_UPDATE_CALLS) | |
463 | ftrace_replace_code(1); | |
464 | else if (command & FTRACE_DISABLE_CALLS) | |
465 | ftrace_replace_code(0); | |
466 | ||
467 | if (command & FTRACE_UPDATE_TRACE_FUNC) | |
468 | ftrace_update_ftrace_func(ftrace_trace_function); | |
469 | ||
470 | if (command & FTRACE_START_FUNC_RET) | |
471 | ftrace_enable_ftrace_graph_caller(); | |
472 | else if (command & FTRACE_STOP_FUNC_RET) | |
473 | ftrace_disable_ftrace_graph_caller(); | |
474 | } | |
475 | ||
3a36cb11 | 476 | int __init ftrace_dyn_arch_init(void) |
4e491d14 | 477 | { |
4e491d14 SR |
478 | return 0; |
479 | } | |
6794c782 SR |
480 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
481 | ||
482 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | |
483 | ||
46542888 SR |
484 | #ifdef CONFIG_DYNAMIC_FTRACE |
485 | extern void ftrace_graph_call(void); | |
486 | extern void ftrace_graph_stub(void); | |
487 | ||
488 | int ftrace_enable_ftrace_graph_caller(void) | |
489 | { | |
490 | unsigned long ip = (unsigned long)(&ftrace_graph_call); | |
491 | unsigned long addr = (unsigned long)(&ftrace_graph_caller); | |
492 | unsigned long stub = (unsigned long)(&ftrace_graph_stub); | |
b54dcfe1 | 493 | unsigned int old, new; |
46542888 | 494 | |
b54dcfe1 | 495 | old = ftrace_call_replace(ip, stub, 0); |
46542888 SR |
496 | new = ftrace_call_replace(ip, addr, 0); |
497 | ||
498 | return ftrace_modify_code(ip, old, new); | |
499 | } | |
500 | ||
501 | int ftrace_disable_ftrace_graph_caller(void) | |
502 | { | |
503 | unsigned long ip = (unsigned long)(&ftrace_graph_call); | |
504 | unsigned long addr = (unsigned long)(&ftrace_graph_caller); | |
505 | unsigned long stub = (unsigned long)(&ftrace_graph_stub); | |
b54dcfe1 | 506 | unsigned int old, new; |
46542888 | 507 | |
b54dcfe1 | 508 | old = ftrace_call_replace(ip, addr, 0); |
46542888 SR |
509 | new = ftrace_call_replace(ip, stub, 0); |
510 | ||
511 | return ftrace_modify_code(ip, old, new); | |
512 | } | |
513 | #endif /* CONFIG_DYNAMIC_FTRACE */ | |
514 | ||
bb725340 SR |
515 | #ifdef CONFIG_PPC64 |
516 | extern void mod_return_to_handler(void); | |
517 | #endif | |
518 | ||
6794c782 SR |
519 | /* |
520 | * Hook the return address and push it in the stack of return addrs | |
521 | * in current thread info. | |
522 | */ | |
523 | void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) | |
524 | { | |
525 | unsigned long old; | |
6794c782 SR |
526 | int faulted; |
527 | struct ftrace_graph_ent trace; | |
bb725340 | 528 | unsigned long return_hooker = (unsigned long)&return_to_handler; |
6794c782 SR |
529 | |
530 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) | |
531 | return; | |
532 | ||
f4952f6c | 533 | #ifdef CONFIG_PPC64 |
bb725340 SR |
534 | /* non core kernel code needs to save and restore the TOC */ |
535 | if (REGION_ID(self_addr) != KERNEL_REGION_ID) | |
536 | return_hooker = (unsigned long)&mod_return_to_handler; | |
537 | #endif | |
538 | ||
4a9e3f8e | 539 | return_hooker = ppc_function_entry((void *)return_hooker); |
6794c782 SR |
540 | |
541 | /* | |
542 | * Protect against fault, even if it shouldn't | |
543 | * happen. This tool is too much intrusive to | |
544 | * ignore such a protection. | |
545 | */ | |
546 | asm volatile( | |
547 | "1: " PPC_LL "%[old], 0(%[parent])\n" | |
548 | "2: " PPC_STL "%[return_hooker], 0(%[parent])\n" | |
549 | " li %[faulted], 0\n" | |
fad4f47c | 550 | "3:\n" |
6794c782 SR |
551 | |
552 | ".section .fixup, \"ax\"\n" | |
553 | "4: li %[faulted], 1\n" | |
554 | " b 3b\n" | |
555 | ".previous\n" | |
556 | ||
557 | ".section __ex_table,\"a\"\n" | |
558 | PPC_LONG_ALIGN "\n" | |
559 | PPC_LONG "1b,4b\n" | |
560 | PPC_LONG "2b,4b\n" | |
561 | ".previous" | |
562 | ||
c3cf8667 | 563 | : [old] "=&r" (old), [faulted] "=r" (faulted) |
6794c782 SR |
564 | : [parent] "r" (parent), [return_hooker] "r" (return_hooker) |
565 | : "memory" | |
566 | ); | |
567 | ||
568 | if (unlikely(faulted)) { | |
569 | ftrace_graph_stop(); | |
570 | WARN_ON(1); | |
571 | return; | |
572 | } | |
573 | ||
6794c782 | 574 | trace.func = self_addr; |
bac821a6 | 575 | trace.depth = current->curr_ret_stack + 1; |
6794c782 SR |
576 | |
577 | /* Only trace if the calling function expects to */ | |
578 | if (!ftrace_graph_entry(&trace)) { | |
6794c782 | 579 | *parent = old; |
bac821a6 | 580 | return; |
6794c782 | 581 | } |
bac821a6 SR |
582 | |
583 | if (ftrace_push_return_trace(old, self_addr, &trace.depth, 0) == -EBUSY) | |
584 | *parent = old; | |
6794c782 SR |
585 | } |
586 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | |
02424d89 IM |
587 | |
588 | #if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_PPC64) | |
589 | unsigned long __init arch_syscall_addr(int nr) | |
590 | { | |
591 | return sys_call_table[nr*2]; | |
592 | } | |
593 | #endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_PPC64 */ |