Commit | Line | Data |
---|---|---|
4e491d14 SR |
1 | /* |
2 | * Code for replacing ftrace calls with jumps. | |
3 | * | |
4 | * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> | |
5 | * | |
6 | * Thanks goes out to P.A. Semi, Inc for supplying me with a PPC64 box. | |
7 | * | |
6794c782 SR |
8 | * Added function graph tracer code, taken from x86 that was written |
9 | * by Frederic Weisbecker, and ported to PPC by Steven Rostedt. | |
10 | * | |
4e491d14 SR |
11 | */ |
12 | ||
13 | #include <linux/spinlock.h> | |
14 | #include <linux/hardirq.h> | |
e4486fe3 | 15 | #include <linux/uaccess.h> |
f48cb8b4 | 16 | #include <linux/module.h> |
4e491d14 SR |
17 | #include <linux/ftrace.h> |
18 | #include <linux/percpu.h> | |
19 | #include <linux/init.h> | |
20 | #include <linux/list.h> | |
21 | ||
22 | #include <asm/cacheflush.h> | |
f48cb8b4 | 23 | #include <asm/code-patching.h> |
395a59d0 | 24 | #include <asm/ftrace.h> |
4e491d14 | 25 | |
4e491d14 SR |
26 | #ifdef CONFIG_PPC32 |
27 | # define GET_ADDR(addr) addr | |
28 | #else | |
29 | /* PowerPC64's functions are data that points to the functions */ | |
f48cb8b4 | 30 | # define GET_ADDR(addr) (*(unsigned long *)addr) |
4e491d14 SR |
31 | #endif |
32 | ||
6794c782 SR |
33 | #ifdef CONFIG_DYNAMIC_FTRACE |
34 | static unsigned int ftrace_nop = PPC_NOP_INSTR; | |
395a59d0 | 35 | |
15adc048 | 36 | static unsigned int ftrace_calc_offset(long ip, long addr) |
4e491d14 | 37 | { |
395a59d0 | 38 | return (int)(addr - ip); |
4e491d14 SR |
39 | } |
40 | ||
8fd6e5a8 | 41 | static unsigned char *ftrace_nop_replace(void) |
4e491d14 SR |
42 | { |
43 | return (char *)&ftrace_nop; | |
44 | } | |
45 | ||
8fd6e5a8 | 46 | static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) |
4e491d14 SR |
47 | { |
48 | static unsigned int op; | |
49 | ||
ccbfac29 SR |
50 | /* |
51 | * It would be nice to just use create_function_call, but that will | |
52 | * update the code itself. Here we need to just return the | |
53 | * instruction that is going to be modified, without modifying the | |
54 | * code. | |
55 | */ | |
4e491d14 SR |
56 | addr = GET_ADDR(addr); |
57 | ||
58 | /* Set to "bl addr" */ | |
ccbfac29 | 59 | op = 0x48000001 | (ftrace_calc_offset(ip, addr) & 0x03fffffc); |
4e491d14 SR |
60 | |
61 | /* | |
62 | * No locking needed, this must be called via kstop_machine | |
63 | * which in essence is like running on a uniprocessor machine. | |
64 | */ | |
65 | return (unsigned char *)&op; | |
66 | } | |
67 | ||
68 | #ifdef CONFIG_PPC64 | |
69 | # define _ASM_ALIGN " .align 3 " | |
70 | # define _ASM_PTR " .llong " | |
71 | #else | |
72 | # define _ASM_ALIGN " .align 2 " | |
73 | # define _ASM_PTR " .long " | |
74 | #endif | |
75 | ||
8fd6e5a8 | 76 | static int |
4e491d14 SR |
77 | ftrace_modify_code(unsigned long ip, unsigned char *old_code, |
78 | unsigned char *new_code) | |
79 | { | |
e4486fe3 | 80 | unsigned char replaced[MCOUNT_INSN_SIZE]; |
4e491d14 | 81 | |
4e491d14 SR |
82 | /* |
83 | * Note: Due to modules and __init, code can | |
84 | * disappear and change, we need to protect against faulting | |
e4486fe3 SR |
85 | * as well as code changing. We do this by using the |
86 | * probe_kernel_* functions. | |
4e491d14 SR |
87 | * |
88 | * No real locking needed, this code is run through | |
e4486fe3 | 89 | * kstop_machine, or before SMP starts. |
4e491d14 | 90 | */ |
e4486fe3 SR |
91 | |
92 | /* read the text we want to modify */ | |
93 | if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE)) | |
94 | return -EFAULT; | |
95 | ||
96 | /* Make sure it is what we expect it to be */ | |
97 | if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0) | |
98 | return -EINVAL; | |
99 | ||
100 | /* replace the text with the new text */ | |
101 | if (probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE)) | |
102 | return -EPERM; | |
103 | ||
104 | flush_icache_range(ip, ip + 8); | |
105 | ||
106 | return 0; | |
4e491d14 SR |
107 | } |
108 | ||
f48cb8b4 SR |
109 | /* |
110 | * Helper functions that are the same for both PPC64 and PPC32. | |
111 | */ | |
8fd6e5a8 SR |
112 | static int test_24bit_addr(unsigned long ip, unsigned long addr) |
113 | { | |
8fd6e5a8 | 114 | |
0029ff87 SR |
115 | /* use the create_branch to verify that this offset can be branched */ |
116 | return create_branch((unsigned int *)ip, addr, 0); | |
8fd6e5a8 SR |
117 | } |
118 | ||
17be5b3d SR |
119 | #ifdef CONFIG_MODULES |
120 | ||
f48cb8b4 SR |
121 | static int is_bl_op(unsigned int op) |
122 | { | |
123 | return (op & 0xfc000003) == 0x48000001; | |
124 | } | |
125 | ||
f48cb8b4 SR |
126 | static unsigned long find_bl_target(unsigned long ip, unsigned int op) |
127 | { | |
128 | static int offset; | |
129 | ||
130 | offset = (op & 0x03fffffc); | |
131 | /* make it signed */ | |
132 | if (offset & 0x02000000) | |
133 | offset |= 0xfe000000; | |
134 | ||
135 | return ip + (long)offset; | |
136 | } | |
137 | ||
f48cb8b4 SR |
138 | #ifdef CONFIG_PPC64 |
139 | static int | |
140 | __ftrace_make_nop(struct module *mod, | |
141 | struct dyn_ftrace *rec, unsigned long addr) | |
142 | { | |
d9af12b7 SR |
143 | unsigned int op; |
144 | unsigned int jmp[5]; | |
145 | unsigned long ptr; | |
f48cb8b4 SR |
146 | unsigned long ip = rec->ip; |
147 | unsigned long tramp; | |
148 | int offset; | |
149 | ||
150 | /* read where this goes */ | |
d9af12b7 | 151 | if (probe_kernel_read(&op, (void *)ip, sizeof(int))) |
f48cb8b4 SR |
152 | return -EFAULT; |
153 | ||
154 | /* Make sure that that this is still a 24bit jump */ | |
d9af12b7 SR |
155 | if (!is_bl_op(op)) { |
156 | printk(KERN_ERR "Not expected bl: opcode is %x\n", op); | |
f48cb8b4 SR |
157 | return -EINVAL; |
158 | } | |
159 | ||
160 | /* lets find where the pointer goes */ | |
d9af12b7 | 161 | tramp = find_bl_target(ip, op); |
f48cb8b4 SR |
162 | |
163 | /* | |
164 | * On PPC64 the trampoline looks like: | |
165 | * 0x3d, 0x82, 0x00, 0x00, addis r12,r2, <high> | |
166 | * 0x39, 0x8c, 0x00, 0x00, addi r12,r12, <low> | |
167 | * Where the bytes 2,3,6 and 7 make up the 32bit offset | |
168 | * to the TOC that holds the pointer. | |
169 | * to jump to. | |
170 | * 0xf8, 0x41, 0x00, 0x28, std r2,40(r1) | |
171 | * 0xe9, 0x6c, 0x00, 0x20, ld r11,32(r12) | |
172 | * The actually address is 32 bytes from the offset | |
173 | * into the TOC. | |
174 | * 0xe8, 0x4c, 0x00, 0x28, ld r2,40(r12) | |
175 | */ | |
176 | ||
44e1d064 | 177 | pr_debug("ip:%lx jumps to %lx r2: %lx", ip, tramp, mod->arch.toc); |
f48cb8b4 SR |
178 | |
179 | /* Find where the trampoline jumps to */ | |
d9af12b7 | 180 | if (probe_kernel_read(jmp, (void *)tramp, sizeof(jmp))) { |
f48cb8b4 SR |
181 | printk(KERN_ERR "Failed to read %lx\n", tramp); |
182 | return -EFAULT; | |
183 | } | |
184 | ||
44e1d064 | 185 | pr_debug(" %08x %08x", jmp[0], jmp[1]); |
d9af12b7 SR |
186 | |
187 | /* verify that this is what we expect it to be */ | |
188 | if (((jmp[0] & 0xffff0000) != 0x3d820000) || | |
189 | ((jmp[1] & 0xffff0000) != 0x398c0000) || | |
190 | (jmp[2] != 0xf8410028) || | |
191 | (jmp[3] != 0xe96c0020) || | |
192 | (jmp[4] != 0xe84c0028)) { | |
193 | printk(KERN_ERR "Not a trampoline\n"); | |
194 | return -EINVAL; | |
195 | } | |
f48cb8b4 | 196 | |
f25f9074 SR |
197 | /* The bottom half is signed extended */ |
198 | offset = ((unsigned)((unsigned short)jmp[0]) << 16) + | |
199 | (int)((short)jmp[1]); | |
f48cb8b4 | 200 | |
44e1d064 | 201 | pr_debug(" %x ", offset); |
f48cb8b4 SR |
202 | |
203 | /* get the address this jumps too */ | |
204 | tramp = mod->arch.toc + offset + 32; | |
44e1d064 | 205 | pr_debug("toc: %lx", tramp); |
f48cb8b4 SR |
206 | |
207 | if (probe_kernel_read(jmp, (void *)tramp, 8)) { | |
208 | printk(KERN_ERR "Failed to read %lx\n", tramp); | |
209 | return -EFAULT; | |
210 | } | |
211 | ||
44e1d064 | 212 | pr_debug(" %08x %08x\n", jmp[0], jmp[1]); |
d9af12b7 SR |
213 | |
214 | ptr = ((unsigned long)jmp[0] << 32) + jmp[1]; | |
f48cb8b4 SR |
215 | |
216 | /* This should match what was called */ | |
d9af12b7 SR |
217 | if (ptr != GET_ADDR(addr)) { |
218 | printk(KERN_ERR "addr does not match %lx\n", ptr); | |
f48cb8b4 SR |
219 | return -EINVAL; |
220 | } | |
221 | ||
222 | /* | |
223 | * We want to nop the line, but the next line is | |
224 | * 0xe8, 0x41, 0x00, 0x28 ld r2,40(r1) | |
225 | * This needs to be turned to a nop too. | |
226 | */ | |
d9af12b7 | 227 | if (probe_kernel_read(&op, (void *)(ip+4), MCOUNT_INSN_SIZE)) |
f48cb8b4 SR |
228 | return -EFAULT; |
229 | ||
d9af12b7 SR |
230 | if (op != 0xe8410028) { |
231 | printk(KERN_ERR "Next line is not ld! (%08x)\n", op); | |
f48cb8b4 SR |
232 | return -EINVAL; |
233 | } | |
234 | ||
235 | /* | |
236 | * Milton Miller pointed out that we can not blindly do nops. | |
237 | * If a task was preempted when calling a trace function, | |
238 | * the nops will remove the way to restore the TOC in r2 | |
239 | * and the r2 TOC will get corrupted. | |
240 | */ | |
241 | ||
242 | /* | |
243 | * Replace: | |
244 | * bl <tramp> <==== will be replaced with "b 1f" | |
245 | * ld r2,40(r1) | |
246 | * 1: | |
247 | */ | |
d9af12b7 | 248 | op = 0x48000008; /* b +8 */ |
f48cb8b4 | 249 | |
d9af12b7 | 250 | if (probe_kernel_write((void *)ip, &op, MCOUNT_INSN_SIZE)) |
f48cb8b4 SR |
251 | return -EPERM; |
252 | ||
ec682cef SR |
253 | |
254 | flush_icache_range(ip, ip + 8); | |
255 | ||
f48cb8b4 SR |
256 | return 0; |
257 | } | |
258 | ||
259 | #else /* !PPC64 */ | |
260 | static int | |
261 | __ftrace_make_nop(struct module *mod, | |
262 | struct dyn_ftrace *rec, unsigned long addr) | |
263 | { | |
d9af12b7 SR |
264 | unsigned int op; |
265 | unsigned int jmp[4]; | |
7cc45e64 SR |
266 | unsigned long ip = rec->ip; |
267 | unsigned long tramp; | |
7cc45e64 | 268 | |
d9af12b7 | 269 | if (probe_kernel_read(&op, (void *)ip, MCOUNT_INSN_SIZE)) |
7cc45e64 SR |
270 | return -EFAULT; |
271 | ||
272 | /* Make sure that that this is still a 24bit jump */ | |
d9af12b7 SR |
273 | if (!is_bl_op(op)) { |
274 | printk(KERN_ERR "Not expected bl: opcode is %x\n", op); | |
7cc45e64 SR |
275 | return -EINVAL; |
276 | } | |
277 | ||
278 | /* lets find where the pointer goes */ | |
d9af12b7 | 279 | tramp = find_bl_target(ip, op); |
7cc45e64 SR |
280 | |
281 | /* | |
282 | * On PPC32 the trampoline looks like: | |
d9af12b7 SR |
283 | * 0x3d, 0x60, 0x00, 0x00 lis r11,sym@ha |
284 | * 0x39, 0x6b, 0x00, 0x00 addi r11,r11,sym@l | |
285 | * 0x7d, 0x69, 0x03, 0xa6 mtctr r11 | |
286 | * 0x4e, 0x80, 0x04, 0x20 bctr | |
7cc45e64 SR |
287 | */ |
288 | ||
44e1d064 | 289 | pr_debug("ip:%lx jumps to %lx", ip, tramp); |
7cc45e64 SR |
290 | |
291 | /* Find where the trampoline jumps to */ | |
d9af12b7 | 292 | if (probe_kernel_read(jmp, (void *)tramp, sizeof(jmp))) { |
7cc45e64 SR |
293 | printk(KERN_ERR "Failed to read %lx\n", tramp); |
294 | return -EFAULT; | |
295 | } | |
296 | ||
44e1d064 | 297 | pr_debug(" %08x %08x ", jmp[0], jmp[1]); |
d9af12b7 SR |
298 | |
299 | /* verify that this is what we expect it to be */ | |
300 | if (((jmp[0] & 0xffff0000) != 0x3d600000) || | |
301 | ((jmp[1] & 0xffff0000) != 0x396b0000) || | |
302 | (jmp[2] != 0x7d6903a6) || | |
303 | (jmp[3] != 0x4e800420)) { | |
304 | printk(KERN_ERR "Not a trampoline\n"); | |
305 | return -EINVAL; | |
306 | } | |
7cc45e64 | 307 | |
d9af12b7 SR |
308 | tramp = (jmp[1] & 0xffff) | |
309 | ((jmp[0] & 0xffff) << 16); | |
7cc45e64 SR |
310 | if (tramp & 0x8000) |
311 | tramp -= 0x10000; | |
312 | ||
44e1d064 | 313 | pr_debug(" %x ", tramp); |
7cc45e64 SR |
314 | |
315 | if (tramp != addr) { | |
316 | printk(KERN_ERR | |
317 | "Trampoline location %08lx does not match addr\n", | |
318 | tramp); | |
319 | return -EINVAL; | |
320 | } | |
321 | ||
d9af12b7 | 322 | op = PPC_NOP_INSTR; |
7cc45e64 | 323 | |
d9af12b7 | 324 | if (probe_kernel_write((void *)ip, &op, MCOUNT_INSN_SIZE)) |
7cc45e64 SR |
325 | return -EPERM; |
326 | ||
ec682cef SR |
327 | flush_icache_range(ip, ip + 8); |
328 | ||
f48cb8b4 SR |
329 | return 0; |
330 | } | |
331 | #endif /* PPC64 */ | |
17be5b3d | 332 | #endif /* CONFIG_MODULES */ |
f48cb8b4 | 333 | |
8fd6e5a8 SR |
334 | int ftrace_make_nop(struct module *mod, |
335 | struct dyn_ftrace *rec, unsigned long addr) | |
336 | { | |
337 | unsigned char *old, *new; | |
f48cb8b4 | 338 | unsigned long ip = rec->ip; |
8fd6e5a8 SR |
339 | |
340 | /* | |
341 | * If the calling address is more that 24 bits away, | |
342 | * then we had to use a trampoline to make the call. | |
343 | * Otherwise just update the call site. | |
344 | */ | |
f48cb8b4 | 345 | if (test_24bit_addr(ip, addr)) { |
8fd6e5a8 | 346 | /* within range */ |
f48cb8b4 | 347 | old = ftrace_call_replace(ip, addr); |
8fd6e5a8 | 348 | new = ftrace_nop_replace(); |
f48cb8b4 SR |
349 | return ftrace_modify_code(ip, old, new); |
350 | } | |
351 | ||
17be5b3d | 352 | #ifdef CONFIG_MODULES |
f48cb8b4 SR |
353 | /* |
354 | * Out of range jumps are called from modules. | |
355 | * We should either already have a pointer to the module | |
356 | * or it has been passed in. | |
357 | */ | |
358 | if (!rec->arch.mod) { | |
359 | if (!mod) { | |
360 | printk(KERN_ERR "No module loaded addr=%lx\n", | |
361 | addr); | |
362 | return -EFAULT; | |
363 | } | |
364 | rec->arch.mod = mod; | |
365 | } else if (mod) { | |
366 | if (mod != rec->arch.mod) { | |
367 | printk(KERN_ERR | |
368 | "Record mod %p not equal to passed in mod %p\n", | |
369 | rec->arch.mod, mod); | |
370 | return -EINVAL; | |
371 | } | |
372 | /* nothing to do if mod == rec->arch.mod */ | |
373 | } else | |
374 | mod = rec->arch.mod; | |
f48cb8b4 SR |
375 | |
376 | return __ftrace_make_nop(mod, rec, addr); | |
17be5b3d SR |
377 | #else |
378 | /* We should not get here without modules */ | |
379 | return -EINVAL; | |
380 | #endif /* CONFIG_MODULES */ | |
f48cb8b4 SR |
381 | } |
382 | ||
17be5b3d | 383 | #ifdef CONFIG_MODULES |
f48cb8b4 SR |
384 | #ifdef CONFIG_PPC64 |
385 | static int | |
386 | __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) | |
387 | { | |
d9af12b7 | 388 | unsigned int op[2]; |
f48cb8b4 | 389 | unsigned long ip = rec->ip; |
f48cb8b4 SR |
390 | |
391 | /* read where this goes */ | |
d9af12b7 | 392 | if (probe_kernel_read(op, (void *)ip, MCOUNT_INSN_SIZE * 2)) |
f48cb8b4 SR |
393 | return -EFAULT; |
394 | ||
395 | /* | |
396 | * It should be pointing to two nops or | |
397 | * b +8; ld r2,40(r1) | |
398 | */ | |
399 | if (((op[0] != 0x48000008) || (op[1] != 0xe8410028)) && | |
400 | ((op[0] != PPC_NOP_INSTR) || (op[1] != PPC_NOP_INSTR))) { | |
401 | printk(KERN_ERR "Expected NOPs but have %x %x\n", op[0], op[1]); | |
402 | return -EINVAL; | |
403 | } | |
404 | ||
405 | /* If we never set up a trampoline to ftrace_caller, then bail */ | |
406 | if (!rec->arch.mod->arch.tramp) { | |
407 | printk(KERN_ERR "No ftrace trampoline\n"); | |
408 | return -EINVAL; | |
409 | } | |
410 | ||
0029ff87 SR |
411 | /* create the branch to the trampoline */ |
412 | op[0] = create_branch((unsigned int *)ip, | |
413 | rec->arch.mod->arch.tramp, BRANCH_SET_LINK); | |
414 | if (!op[0]) { | |
415 | printk(KERN_ERR "REL24 out of range!\n"); | |
f48cb8b4 | 416 | return -EINVAL; |
8fd6e5a8 SR |
417 | } |
418 | ||
f48cb8b4 SR |
419 | /* ld r2,40(r1) */ |
420 | op[1] = 0xe8410028; | |
421 | ||
44e1d064 | 422 | pr_debug("write to %lx\n", rec->ip); |
f48cb8b4 | 423 | |
d9af12b7 | 424 | if (probe_kernel_write((void *)ip, op, MCOUNT_INSN_SIZE * 2)) |
f48cb8b4 SR |
425 | return -EPERM; |
426 | ||
ec682cef SR |
427 | flush_icache_range(ip, ip + 8); |
428 | ||
8fd6e5a8 SR |
429 | return 0; |
430 | } | |
f48cb8b4 SR |
431 | #else |
432 | static int | |
433 | __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) | |
434 | { | |
d9af12b7 | 435 | unsigned int op; |
7cc45e64 | 436 | unsigned long ip = rec->ip; |
7cc45e64 SR |
437 | |
438 | /* read where this goes */ | |
d9af12b7 | 439 | if (probe_kernel_read(&op, (void *)ip, MCOUNT_INSN_SIZE)) |
7cc45e64 SR |
440 | return -EFAULT; |
441 | ||
442 | /* It should be pointing to a nop */ | |
d9af12b7 SR |
443 | if (op != PPC_NOP_INSTR) { |
444 | printk(KERN_ERR "Expected NOP but have %x\n", op); | |
7cc45e64 SR |
445 | return -EINVAL; |
446 | } | |
447 | ||
448 | /* If we never set up a trampoline to ftrace_caller, then bail */ | |
449 | if (!rec->arch.mod->arch.tramp) { | |
450 | printk(KERN_ERR "No ftrace trampoline\n"); | |
451 | return -EINVAL; | |
452 | } | |
453 | ||
0029ff87 SR |
454 | /* create the branch to the trampoline */ |
455 | op = create_branch((unsigned int *)ip, | |
456 | rec->arch.mod->arch.tramp, BRANCH_SET_LINK); | |
457 | if (!op) { | |
458 | printk(KERN_ERR "REL24 out of range!\n"); | |
7cc45e64 SR |
459 | return -EINVAL; |
460 | } | |
461 | ||
44e1d064 | 462 | pr_debug("write to %lx\n", rec->ip); |
7cc45e64 | 463 | |
d9af12b7 | 464 | if (probe_kernel_write((void *)ip, &op, MCOUNT_INSN_SIZE)) |
7cc45e64 SR |
465 | return -EPERM; |
466 | ||
ec682cef SR |
467 | flush_icache_range(ip, ip + 8); |
468 | ||
f48cb8b4 SR |
469 | return 0; |
470 | } | |
471 | #endif /* CONFIG_PPC64 */ | |
17be5b3d | 472 | #endif /* CONFIG_MODULES */ |
8fd6e5a8 SR |
473 | |
474 | int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) | |
475 | { | |
476 | unsigned char *old, *new; | |
f48cb8b4 | 477 | unsigned long ip = rec->ip; |
8fd6e5a8 SR |
478 | |
479 | /* | |
480 | * If the calling address is more that 24 bits away, | |
481 | * then we had to use a trampoline to make the call. | |
482 | * Otherwise just update the call site. | |
483 | */ | |
f48cb8b4 | 484 | if (test_24bit_addr(ip, addr)) { |
8fd6e5a8 SR |
485 | /* within range */ |
486 | old = ftrace_nop_replace(); | |
f48cb8b4 SR |
487 | new = ftrace_call_replace(ip, addr); |
488 | return ftrace_modify_code(ip, old, new); | |
8fd6e5a8 SR |
489 | } |
490 | ||
17be5b3d | 491 | #ifdef CONFIG_MODULES |
f48cb8b4 SR |
492 | /* |
493 | * Out of range jumps are called from modules. | |
494 | * Being that we are converting from nop, it had better | |
495 | * already have a module defined. | |
496 | */ | |
497 | if (!rec->arch.mod) { | |
498 | printk(KERN_ERR "No module loaded\n"); | |
499 | return -EINVAL; | |
500 | } | |
f48cb8b4 SR |
501 | |
502 | return __ftrace_make_call(rec, addr); | |
17be5b3d SR |
503 | #else |
504 | /* We should not get here without modules */ | |
505 | return -EINVAL; | |
506 | #endif /* CONFIG_MODULES */ | |
8fd6e5a8 SR |
507 | } |
508 | ||
15adc048 | 509 | int ftrace_update_ftrace_func(ftrace_func_t func) |
4e491d14 SR |
510 | { |
511 | unsigned long ip = (unsigned long)(&ftrace_call); | |
395a59d0 | 512 | unsigned char old[MCOUNT_INSN_SIZE], *new; |
4e491d14 SR |
513 | int ret; |
514 | ||
395a59d0 | 515 | memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE); |
4e491d14 SR |
516 | new = ftrace_call_replace(ip, (unsigned long)func); |
517 | ret = ftrace_modify_code(ip, old, new); | |
518 | ||
519 | return ret; | |
520 | } | |
521 | ||
4e491d14 SR |
522 | int __init ftrace_dyn_arch_init(void *data) |
523 | { | |
8fd6e5a8 SR |
524 | /* caller expects data to be zero */ |
525 | unsigned long *p = data; | |
4e491d14 | 526 | |
8fd6e5a8 | 527 | *p = 0; |
4e491d14 SR |
528 | |
529 | return 0; | |
530 | } | |
6794c782 SR |
531 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
532 | ||
533 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | |
534 | ||
535 | /* | |
536 | * Hook the return address and push it in the stack of return addrs | |
537 | * in current thread info. | |
538 | */ | |
539 | void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) | |
540 | { | |
541 | unsigned long old; | |
542 | unsigned long long calltime; | |
543 | int faulted; | |
544 | struct ftrace_graph_ent trace; | |
545 | unsigned long return_hooker = (unsigned long) | |
546 | &return_to_handler; | |
547 | ||
548 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) | |
549 | return; | |
550 | ||
551 | return_hooker = GET_ADDR(return_hooker); | |
552 | ||
553 | /* | |
554 | * Protect against fault, even if it shouldn't | |
555 | * happen. This tool is too much intrusive to | |
556 | * ignore such a protection. | |
557 | */ | |
558 | asm volatile( | |
559 | "1: " PPC_LL "%[old], 0(%[parent])\n" | |
560 | "2: " PPC_STL "%[return_hooker], 0(%[parent])\n" | |
561 | " li %[faulted], 0\n" | |
562 | "3:" | |
563 | ||
564 | ".section .fixup, \"ax\"\n" | |
565 | "4: li %[faulted], 1\n" | |
566 | " b 3b\n" | |
567 | ".previous\n" | |
568 | ||
569 | ".section __ex_table,\"a\"\n" | |
570 | PPC_LONG_ALIGN "\n" | |
571 | PPC_LONG "1b,4b\n" | |
572 | PPC_LONG "2b,4b\n" | |
573 | ".previous" | |
574 | ||
575 | : [old] "=r" (old), [faulted] "=r" (faulted) | |
576 | : [parent] "r" (parent), [return_hooker] "r" (return_hooker) | |
577 | : "memory" | |
578 | ); | |
579 | ||
580 | if (unlikely(faulted)) { | |
581 | ftrace_graph_stop(); | |
582 | WARN_ON(1); | |
583 | return; | |
584 | } | |
585 | ||
586 | calltime = cpu_clock(raw_smp_processor_id()); | |
587 | ||
588 | if (ftrace_push_return_trace(old, calltime, | |
589 | self_addr, &trace.depth) == -EBUSY) { | |
590 | *parent = old; | |
591 | return; | |
592 | } | |
593 | ||
594 | trace.func = self_addr; | |
595 | ||
596 | /* Only trace if the calling function expects to */ | |
597 | if (!ftrace_graph_entry(&trace)) { | |
598 | current->curr_ret_stack--; | |
599 | *parent = old; | |
600 | } | |
601 | } | |
602 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |