2 * Code for replacing ftrace calls with jumps.
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6 * Thanks goes to Ingo Molnar, for suggesting the idea.
7 * Mathieu Desnoyers, for suggesting postponing the modifications.
8 * Arjan van de Ven, for keeping me straight, and explaining to me
9 * the dangers of modifying code on the run.
12 #include <linux/spinlock.h>
13 #include <linux/hardirq.h>
14 #include <linux/uaccess.h>
15 #include <linux/ftrace.h>
16 #include <linux/percpu.h>
17 #include <linux/init.h>
18 #include <linux/list.h>
20 #include <asm/ftrace.h>
24 /* Long is fine, even if it is only 4 bytes ;-) */
25 static unsigned long *ftrace_nop
;
27 union ftrace_code_union
{
28 char code
[MCOUNT_INSN_SIZE
];
32 } __attribute__((packed
));
36 static int notrace
ftrace_calc_offset(long ip
, long addr
)
38 return (int)(addr
- ip
);
41 notrace
unsigned char *ftrace_nop_replace(void)
43 return (char *)ftrace_nop
;
46 notrace
unsigned char *ftrace_call_replace(unsigned long ip
, unsigned long addr
)
48 static union ftrace_code_union calc
;
51 calc
.offset
= ftrace_calc_offset(ip
+ MCOUNT_INSN_SIZE
, addr
);
54 * No locking needed, this must be called via kstop_machine
55 * which in essence is like running on a uniprocessor machine.
61 ftrace_modify_code(unsigned long ip
, unsigned char *old_code
,
62 unsigned char *new_code
)
64 unsigned char replaced
[MCOUNT_INSN_SIZE
];
68 * Note: Due to modules and __init, code can
69 * disappear and change, we need to protect against faulting
70 * as well as code changing.
72 * No real locking needed, this code is run through
73 * kstop_machine, or before SMP starts.
75 if (__copy_from_user_inatomic(replaced
, (char __user
*)ip
, MCOUNT_INSN_SIZE
))
78 if (memcmp(replaced
, old_code
, MCOUNT_INSN_SIZE
) != 0)
81 ret
= __copy_to_user_inatomic((char __user
*)ip
, new_code
,
90 notrace
int ftrace_update_ftrace_func(ftrace_func_t func
)
92 unsigned long ip
= (unsigned long)(&ftrace_call
);
93 unsigned char old
[MCOUNT_INSN_SIZE
], *new;
96 memcpy(old
, &ftrace_call
, MCOUNT_INSN_SIZE
);
97 new = ftrace_call_replace(ip
, (unsigned long)func
);
98 ret
= ftrace_modify_code(ip
, old
, new);
103 notrace
int ftrace_mcount_set(unsigned long *data
)
105 /* mcount is initialized as a nop */
110 int __init
ftrace_dyn_arch_init(void *data
)
112 extern const unsigned char ftrace_test_p6nop
[];
113 extern const unsigned char ftrace_test_nop5
[];
114 extern const unsigned char ftrace_test_jmp
[];
118 * There is no good nop for all x86 archs.
119 * We will default to using the P6_NOP5, but first we
120 * will test to make sure that the nop will actually
121 * work on this CPU. If it faults, we will then
122 * go to a lesser efficient 5 byte nop. If that fails
123 * we then just use a jmp as our nop. This isn't the most
124 * efficient nop, but we can not use a multi part nop
125 * since we would then risk being preempted in the middle
126 * of that nop, and if we enabled tracing then, it might
127 * cause a system crash.
129 * TODO: check the cpuid to determine the best nop.
132 "jmp ftrace_test_jmp\n"
133 /* This code needs to stay around */
134 ".section .text, \"ax\"\n"
136 "jmp ftrace_test_p6nop\n"
139 "nop\n" /* 2 byte jmp + 3 bytes */
144 ".byte 0x66,0x66,0x66,0x66,0x90\n"
148 ".section .fixup, \"ax\"\n"
150 " jmp ftrace_test_nop5\n"
154 _ASM_EXTABLE(ftrace_test_p6nop
, 2b
)
155 _ASM_EXTABLE(ftrace_test_nop5
, 3b
)
156 : "=r"(faulted
) : "0" (faulted
));
160 pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n");
161 ftrace_nop
= (unsigned long *)ftrace_test_p6nop
;
164 pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n");
165 ftrace_nop
= (unsigned long *)ftrace_test_nop5
;
168 pr_info("ftrace: converting mcount calls to jmp . + 5\n");
169 ftrace_nop
= (unsigned long *)ftrace_test_jmp
;
173 /* The return code is retured via data */
174 *(unsigned long *)data
= 0;
This page took 0.039645 seconds and 6 git commands to generate.