Commit | Line | Data |
---|---|---|
3d083395 SR |
1 | /* |
2 | * Code for replacing ftrace calls with jumps. | |
3 | * | |
4 | * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> | |
5 | * | |
6 | * Thanks goes to Ingo Molnar, for suggesting the idea. | |
7 | * Mathieu Desnoyers, for suggesting postponing the modifications. | |
8 | * Arjan van de Ven, for keeping me straight, and explaining to me | |
9 | * the dangers of modifying code on the run. | |
10 | */ | |
11 | ||
12 | #include <linux/spinlock.h> | |
13 | #include <linux/hardirq.h> | |
6f93fc07 | 14 | #include <linux/uaccess.h> |
3d083395 SR |
15 | #include <linux/ftrace.h> |
16 | #include <linux/percpu.h> | |
19b3e967 | 17 | #include <linux/sched.h> |
3d083395 SR |
18 | #include <linux/init.h> |
19 | #include <linux/list.h> | |
20 | ||
395a59d0 | 21 | #include <asm/ftrace.h> |
caf4b323 | 22 | #include <linux/ftrace.h> |
732f3ca7 | 23 | #include <asm/nops.h> |
caf4b323 | 24 | #include <asm/nmi.h> |
3d083395 | 25 | |
3d083395 | 26 | |
caf4b323 FW |
27 | |
28 | #ifdef CONFIG_FUNCTION_RET_TRACER | |
29 | ||
30 | /* | |
31 | * These functions are picked from those used on | |
32 | * this page for dynamic ftrace. They have been | |
33 | * simplified to ignore all traces in NMI context. | |
34 | */ | |
35 | static atomic_t in_nmi; | |
36 | ||
37 | void ftrace_nmi_enter(void) | |
38 | { | |
39 | atomic_inc(&in_nmi); | |
40 | } | |
41 | ||
42 | void ftrace_nmi_exit(void) | |
43 | { | |
44 | atomic_dec(&in_nmi); | |
45 | } | |
46 | ||
caf4b323 FW |
47 | /* Add a function return address to the trace stack on thread info.*/ |
48 | static int push_return_trace(unsigned long ret, unsigned long long time, | |
49 | unsigned long func) | |
50 | { | |
51 | int index; | |
62d59d17 | 52 | struct thread_info *ti = current_thread_info(); |
caf4b323 | 53 | |
caf4b323 | 54 | /* The return trace stack is full */ |
62d59d17 FW |
55 | if (ti->curr_ret_stack == FTRACE_RET_STACK_SIZE - 1) |
56 | return -EBUSY; | |
caf4b323 FW |
57 | |
58 | index = ++ti->curr_ret_stack; | |
b01c7466 | 59 | barrier(); |
caf4b323 FW |
60 | ti->ret_stack[index].ret = ret; |
61 | ti->ret_stack[index].func = func; | |
62 | ti->ret_stack[index].calltime = time; | |
63 | ||
62d59d17 | 64 | return 0; |
caf4b323 FW |
65 | } |
66 | ||
67 | /* Retrieve a function return address to the trace stack on thread info.*/ | |
68 | static void pop_return_trace(unsigned long *ret, unsigned long long *time, | |
69 | unsigned long *func) | |
70 | { | |
caf4b323 | 71 | int index; |
caf4b323 | 72 | |
62d59d17 | 73 | struct thread_info *ti = current_thread_info(); |
caf4b323 FW |
74 | index = ti->curr_ret_stack; |
75 | *ret = ti->ret_stack[index].ret; | |
76 | *func = ti->ret_stack[index].func; | |
77 | *time = ti->ret_stack[index].calltime; | |
78 | ti->curr_ret_stack--; | |
caf4b323 FW |
79 | } |
80 | ||
81 | /* | |
82 | * Send the trace to the ring-buffer. | |
83 | * @return the original return address. | |
84 | */ | |
85 | unsigned long ftrace_return_to_handler(void) | |
86 | { | |
87 | struct ftrace_retfunc trace; | |
88 | pop_return_trace(&trace.ret, &trace.calltime, &trace.func); | |
89 | trace.rettime = cpu_clock(raw_smp_processor_id()); | |
90 | ftrace_function_return(&trace); | |
91 | ||
92 | return trace.ret; | |
93 | } | |
94 | ||
95 | /* | |
96 | * Hook the return address and push it in the stack of return addrs | |
97 | * in current thread info. | |
98 | */ | |
caf4b323 FW |
99 | void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) |
100 | { | |
101 | unsigned long old; | |
102 | unsigned long long calltime; | |
103 | int faulted; | |
104 | unsigned long return_hooker = (unsigned long) | |
105 | &return_to_handler; | |
106 | ||
107 | /* Nmi's are currently unsupported */ | |
108 | if (atomic_read(&in_nmi)) | |
109 | return; | |
110 | ||
111 | /* | |
112 | * Protect against fault, even if it shouldn't | |
113 | * happen. This tool is too much intrusive to | |
114 | * ignore such a protection. | |
115 | */ | |
116 | asm volatile( | |
117 | "1: movl (%[parent_old]), %[old]\n" | |
118 | "2: movl %[return_hooker], (%[parent_replaced])\n" | |
119 | " movl $0, %[faulted]\n" | |
120 | ||
121 | ".section .fixup, \"ax\"\n" | |
122 | "3: movl $1, %[faulted]\n" | |
123 | ".previous\n" | |
124 | ||
125 | ".section __ex_table, \"a\"\n" | |
126 | " .long 1b, 3b\n" | |
127 | " .long 2b, 3b\n" | |
128 | ".previous\n" | |
129 | ||
867f7fb3 | 130 | : [parent_replaced] "=r" (parent), [old] "=r" (old), |
caf4b323 FW |
131 | [faulted] "=r" (faulted) |
132 | : [parent_old] "0" (parent), [return_hooker] "r" (return_hooker) | |
133 | : "memory" | |
134 | ); | |
135 | ||
136 | if (WARN_ON(faulted)) { | |
137 | unregister_ftrace_return(); | |
138 | return; | |
139 | } | |
140 | ||
141 | if (WARN_ON(!__kernel_text_address(old))) { | |
142 | unregister_ftrace_return(); | |
143 | *parent = old; | |
144 | return; | |
145 | } | |
146 | ||
147 | calltime = cpu_clock(raw_smp_processor_id()); | |
148 | ||
149 | if (push_return_trace(old, calltime, self_addr) == -EBUSY) | |
150 | *parent = old; | |
151 | } | |
152 | ||
caf4b323 FW |
153 | #endif |
154 | ||
155 | #ifdef CONFIG_DYNAMIC_FTRACE | |
3d083395 | 156 | |
3d083395 | 157 | union ftrace_code_union { |
395a59d0 | 158 | char code[MCOUNT_INSN_SIZE]; |
3d083395 SR |
159 | struct { |
160 | char e8; | |
161 | int offset; | |
162 | } __attribute__((packed)); | |
163 | }; | |
164 | ||
15adc048 | 165 | static int ftrace_calc_offset(long ip, long addr) |
3c1720f0 SR |
166 | { |
167 | return (int)(addr - ip); | |
168 | } | |
3d083395 | 169 | |
31e88909 | 170 | static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) |
3c1720f0 SR |
171 | { |
172 | static union ftrace_code_union calc; | |
3d083395 | 173 | |
3c1720f0 | 174 | calc.e8 = 0xe8; |
395a59d0 | 175 | calc.offset = ftrace_calc_offset(ip + MCOUNT_INSN_SIZE, addr); |
3c1720f0 SR |
176 | |
177 | /* | |
178 | * No locking needed, this must be called via kstop_machine | |
179 | * which in essence is like running on a uniprocessor machine. | |
180 | */ | |
181 | return calc.code; | |
3d083395 SR |
182 | } |
183 | ||
17666f02 SR |
184 | /* |
185 | * Modifying code must take extra care. On an SMP machine, if | |
186 | * the code being modified is also being executed on another CPU | |
187 | * that CPU will have undefined results and possibly take a GPF. | |
188 | * We use kstop_machine to stop other CPUS from exectuing code. | |
189 | * But this does not stop NMIs from happening. We still need | |
190 | * to protect against that. We separate out the modification of | |
191 | * the code to take care of this. | |
192 | * | |
193 | * Two buffers are added: An IP buffer and a "code" buffer. | |
194 | * | |
a26a2a27 | 195 | * 1) Put the instruction pointer into the IP buffer |
17666f02 SR |
196 | * and the new code into the "code" buffer. |
197 | * 2) Set a flag that says we are modifying code | |
198 | * 3) Wait for any running NMIs to finish. | |
199 | * 4) Write the code | |
200 | * 5) clear the flag. | |
201 | * 6) Wait for any running NMIs to finish. | |
202 | * | |
203 | * If an NMI is executed, the first thing it does is to call | |
204 | * "ftrace_nmi_enter". This will check if the flag is set to write | |
205 | * and if it is, it will write what is in the IP and "code" buffers. | |
206 | * | |
207 | * The trick is, it does not matter if everyone is writing the same | |
208 | * content to the code location. Also, if a CPU is executing code | |
209 | * it is OK to write to that code location if the contents being written | |
210 | * are the same as what exists. | |
211 | */ | |
212 | ||
a26a2a27 SR |
213 | static atomic_t in_nmi = ATOMIC_INIT(0); |
214 | static int mod_code_status; /* holds return value of text write */ | |
215 | static int mod_code_write; /* set when NMI should do the write */ | |
216 | static void *mod_code_ip; /* holds the IP to write to */ | |
217 | static void *mod_code_newcode; /* holds the text to write to the IP */ | |
17666f02 | 218 | |
a26a2a27 SR |
219 | static unsigned nmi_wait_count; |
220 | static atomic_t nmi_update_count = ATOMIC_INIT(0); | |
b807c3d0 SR |
221 | |
222 | int ftrace_arch_read_dyn_info(char *buf, int size) | |
223 | { | |
224 | int r; | |
225 | ||
226 | r = snprintf(buf, size, "%u %u", | |
227 | nmi_wait_count, | |
228 | atomic_read(&nmi_update_count)); | |
229 | return r; | |
230 | } | |
231 | ||
17666f02 SR |
232 | static void ftrace_mod_code(void) |
233 | { | |
234 | /* | |
235 | * Yes, more than one CPU process can be writing to mod_code_status. | |
236 | * (and the code itself) | |
237 | * But if one were to fail, then they all should, and if one were | |
238 | * to succeed, then they all should. | |
239 | */ | |
240 | mod_code_status = probe_kernel_write(mod_code_ip, mod_code_newcode, | |
241 | MCOUNT_INSN_SIZE); | |
242 | ||
243 | } | |
244 | ||
245 | void ftrace_nmi_enter(void) | |
246 | { | |
247 | atomic_inc(&in_nmi); | |
248 | /* Must have in_nmi seen before reading write flag */ | |
249 | smp_mb(); | |
b807c3d0 | 250 | if (mod_code_write) { |
17666f02 | 251 | ftrace_mod_code(); |
b807c3d0 SR |
252 | atomic_inc(&nmi_update_count); |
253 | } | |
17666f02 SR |
254 | } |
255 | ||
256 | void ftrace_nmi_exit(void) | |
257 | { | |
258 | /* Finish all executions before clearing in_nmi */ | |
259 | smp_wmb(); | |
260 | atomic_dec(&in_nmi); | |
261 | } | |
262 | ||
263 | static void wait_for_nmi(void) | |
264 | { | |
b807c3d0 SR |
265 | int waited = 0; |
266 | ||
267 | while (atomic_read(&in_nmi)) { | |
268 | waited = 1; | |
17666f02 | 269 | cpu_relax(); |
b807c3d0 SR |
270 | } |
271 | ||
272 | if (waited) | |
273 | nmi_wait_count++; | |
17666f02 SR |
274 | } |
275 | ||
276 | static int | |
277 | do_ftrace_mod_code(unsigned long ip, void *new_code) | |
278 | { | |
279 | mod_code_ip = (void *)ip; | |
280 | mod_code_newcode = new_code; | |
281 | ||
282 | /* The buffers need to be visible before we let NMIs write them */ | |
283 | smp_wmb(); | |
284 | ||
285 | mod_code_write = 1; | |
286 | ||
287 | /* Make sure write bit is visible before we wait on NMIs */ | |
288 | smp_mb(); | |
289 | ||
290 | wait_for_nmi(); | |
291 | ||
292 | /* Make sure all running NMIs have finished before we write the code */ | |
293 | smp_mb(); | |
294 | ||
295 | ftrace_mod_code(); | |
296 | ||
297 | /* Make sure the write happens before clearing the bit */ | |
298 | smp_wmb(); | |
299 | ||
300 | mod_code_write = 0; | |
301 | ||
302 | /* make sure NMIs see the cleared bit */ | |
303 | smp_mb(); | |
304 | ||
305 | wait_for_nmi(); | |
306 | ||
307 | return mod_code_status; | |
308 | } | |
309 | ||
310 | ||
caf4b323 FW |
311 | |
312 | ||
313 | static unsigned char ftrace_nop[MCOUNT_INSN_SIZE]; | |
314 | ||
31e88909 | 315 | static unsigned char *ftrace_nop_replace(void) |
caf4b323 FW |
316 | { |
317 | return ftrace_nop; | |
318 | } | |
319 | ||
31e88909 | 320 | static int |
3d083395 SR |
321 | ftrace_modify_code(unsigned long ip, unsigned char *old_code, |
322 | unsigned char *new_code) | |
323 | { | |
6f93fc07 | 324 | unsigned char replaced[MCOUNT_INSN_SIZE]; |
3d083395 SR |
325 | |
326 | /* | |
327 | * Note: Due to modules and __init, code can | |
328 | * disappear and change, we need to protect against faulting | |
76aefee5 | 329 | * as well as code changing. We do this by using the |
ab9a0918 | 330 | * probe_kernel_* functions. |
3d083395 SR |
331 | * |
332 | * No real locking needed, this code is run through | |
6f93fc07 | 333 | * kstop_machine, or before SMP starts. |
3d083395 | 334 | */ |
76aefee5 SR |
335 | |
336 | /* read the text we want to modify */ | |
ab9a0918 | 337 | if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE)) |
593eb8a2 | 338 | return -EFAULT; |
6f93fc07 | 339 | |
76aefee5 | 340 | /* Make sure it is what we expect it to be */ |
6f93fc07 | 341 | if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0) |
593eb8a2 | 342 | return -EINVAL; |
3d083395 | 343 | |
76aefee5 | 344 | /* replace the text with the new text */ |
17666f02 | 345 | if (do_ftrace_mod_code(ip, new_code)) |
593eb8a2 | 346 | return -EPERM; |
6f93fc07 SR |
347 | |
348 | sync_core(); | |
3d083395 | 349 | |
6f93fc07 | 350 | return 0; |
3d083395 SR |
351 | } |
352 | ||
31e88909 SR |
353 | int ftrace_make_nop(struct module *mod, |
354 | struct dyn_ftrace *rec, unsigned long addr) | |
355 | { | |
356 | unsigned char *new, *old; | |
357 | unsigned long ip = rec->ip; | |
358 | ||
359 | old = ftrace_call_replace(ip, addr); | |
360 | new = ftrace_nop_replace(); | |
361 | ||
362 | return ftrace_modify_code(rec->ip, old, new); | |
363 | } | |
364 | ||
365 | int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) | |
366 | { | |
367 | unsigned char *new, *old; | |
368 | unsigned long ip = rec->ip; | |
369 | ||
370 | old = ftrace_nop_replace(); | |
371 | new = ftrace_call_replace(ip, addr); | |
372 | ||
373 | return ftrace_modify_code(rec->ip, old, new); | |
374 | } | |
375 | ||
15adc048 | 376 | int ftrace_update_ftrace_func(ftrace_func_t func) |
d61f82d0 SR |
377 | { |
378 | unsigned long ip = (unsigned long)(&ftrace_call); | |
395a59d0 | 379 | unsigned char old[MCOUNT_INSN_SIZE], *new; |
d61f82d0 SR |
380 | int ret; |
381 | ||
395a59d0 | 382 | memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE); |
d61f82d0 SR |
383 | new = ftrace_call_replace(ip, (unsigned long)func); |
384 | ret = ftrace_modify_code(ip, old, new); | |
385 | ||
386 | return ret; | |
387 | } | |
388 | ||
d61f82d0 | 389 | int __init ftrace_dyn_arch_init(void *data) |
3d083395 | 390 | { |
732f3ca7 SR |
391 | extern const unsigned char ftrace_test_p6nop[]; |
392 | extern const unsigned char ftrace_test_nop5[]; | |
393 | extern const unsigned char ftrace_test_jmp[]; | |
394 | int faulted = 0; | |
d61f82d0 | 395 | |
732f3ca7 SR |
396 | /* |
397 | * There is no good nop for all x86 archs. | |
398 | * We will default to using the P6_NOP5, but first we | |
399 | * will test to make sure that the nop will actually | |
400 | * work on this CPU. If it faults, we will then | |
401 | * go to a lesser efficient 5 byte nop. If that fails | |
402 | * we then just use a jmp as our nop. This isn't the most | |
403 | * efficient nop, but we can not use a multi part nop | |
404 | * since we would then risk being preempted in the middle | |
405 | * of that nop, and if we enabled tracing then, it might | |
406 | * cause a system crash. | |
407 | * | |
408 | * TODO: check the cpuid to determine the best nop. | |
409 | */ | |
410 | asm volatile ( | |
732f3ca7 SR |
411 | "ftrace_test_jmp:" |
412 | "jmp ftrace_test_p6nop\n" | |
8b27386a AK |
413 | "nop\n" |
414 | "nop\n" | |
415 | "nop\n" /* 2 byte jmp + 3 bytes */ | |
732f3ca7 SR |
416 | "ftrace_test_p6nop:" |
417 | P6_NOP5 | |
418 | "jmp 1f\n" | |
419 | "ftrace_test_nop5:" | |
420 | ".byte 0x66,0x66,0x66,0x66,0x90\n" | |
732f3ca7 SR |
421 | "1:" |
422 | ".section .fixup, \"ax\"\n" | |
423 | "2: movl $1, %0\n" | |
424 | " jmp ftrace_test_nop5\n" | |
425 | "3: movl $2, %0\n" | |
426 | " jmp 1b\n" | |
427 | ".previous\n" | |
428 | _ASM_EXTABLE(ftrace_test_p6nop, 2b) | |
429 | _ASM_EXTABLE(ftrace_test_nop5, 3b) | |
430 | : "=r"(faulted) : "0" (faulted)); | |
431 | ||
432 | switch (faulted) { | |
433 | case 0: | |
434 | pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n"); | |
8115f3f0 | 435 | memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE); |
732f3ca7 SR |
436 | break; |
437 | case 1: | |
438 | pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n"); | |
8115f3f0 | 439 | memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE); |
732f3ca7 SR |
440 | break; |
441 | case 2: | |
8b27386a | 442 | pr_info("ftrace: converting mcount calls to jmp . + 5\n"); |
8115f3f0 | 443 | memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE); |
732f3ca7 SR |
444 | break; |
445 | } | |
446 | ||
447 | /* The return code is retured via data */ | |
448 | *(unsigned long *)data = 0; | |
dfa60aba | 449 | |
3d083395 SR |
450 | return 0; |
451 | } | |
caf4b323 | 452 | #endif |