MIPS: Tracing: Reduce the overhead of dynamic Function Tracer
[deliverable/linux.git] / arch / mips / kernel / ftrace.c
CommitLineData
538f1952
WZ
1/*
2 * Code for replacing ftrace calls with jumps.
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
e4240540 5 * Copyright (C) 2009, 2010 DSLab, Lanzhou University, China
f7a904df 6 * Author: Wu Zhangjin <wuzhangjin@gmail.com>
538f1952
WZ
7 *
8 * Thanks goes to Steven Rostedt for writing the original x86 version.
9 */
10
11#include <linux/uaccess.h>
12#include <linux/init.h>
13#include <linux/ftrace.h>
14
29c5d346
WZ
15#include <asm/asm.h>
16#include <asm/asm-offsets.h>
e4240540
WZ
17#include <asm/cacheflush.h>
18#include <asm/uasm.h>
538f1952
WZ
19
20#ifdef CONFIG_DYNAMIC_FTRACE
21
22#define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */
23#define ADDR_MASK 0x03ffffff /* op_code|addr : 31...26|25 ....0 */
538f1952 24
4d6829f9
WZ
25#define INSN_B_1F_4 0x10000004 /* b 1f; offset = 4 */
26#define INSN_B_1F_5 0x10000005 /* b 1f; offset = 5 */
27#define INSN_NOP 0x00000000 /* nop */
e4240540
WZ
28#define INSN_JAL(addr) \
29 ((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK)))
30
31static unsigned int insn_jal_ftrace_caller __read_mostly;
32static unsigned int insn_lui_v1_hi16_mcount __read_mostly;
33static unsigned int insn_j_ftrace_graph_caller __maybe_unused __read_mostly;
34
35static inline void ftrace_dyn_arch_init_insns(void)
36{
37 u32 *buf;
38 unsigned int v1;
39
40 /* lui v1, hi16_mcount */
41 v1 = 3;
42 buf = (u32 *)&insn_lui_v1_hi16_mcount;
43 UASM_i_LA_mostly(&buf, v1, MCOUNT_ADDR);
44
45 /* jal (ftrace_caller + 8), jump over the first two instruction */
46 buf = (u32 *)&insn_jal_ftrace_caller;
47 uasm_i_jal(&buf, (FTRACE_ADDR + 8));
48
49#ifdef CONFIG_FUNCTION_GRAPH_TRACER
50 /* j ftrace_graph_caller */
51 buf = (u32 *)&insn_j_ftrace_graph_caller;
52 uasm_i_j(&buf, (unsigned long)ftrace_graph_caller);
53#endif
54}
538f1952
WZ
55
56static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
57{
046199ca
WZ
58 int faulted;
59
60 /* *(unsigned int *)ip = new_code; */
61 safe_store_code(new_code, ip, faulted);
62
63 if (unlikely(faulted))
64 return -EFAULT;
538f1952
WZ
65
66 flush_icache_range(ip, ip + 8);
67
68 return 0;
69}
70
538f1952
WZ
71int ftrace_make_nop(struct module *mod,
72 struct dyn_ftrace *rec, unsigned long addr)
73{
74 unsigned int new;
75 unsigned long ip = rec->ip;
76
e4240540
WZ
77 /*
78 * We have compiled module with -mlong-calls, but compiled the kernel
79 * without it, we need to cope with them respectively.
80 */
538f1952 81 if (ip & 0x40000000) {
3a2af2dc 82#if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT)
e4240540
WZ
83 /*
84 * lui v1, hi_16bit_of_mcount --> b 1f (0x10000005)
3a2af2dc
WZ
85 * addiu v1, v1, low_16bit_of_mcount
86 * move at, ra
87 * move $12, ra_address
88 * jalr v1
89 * sub sp, sp, 8
90 * 1: offset = 5 instructions
91 */
4d6829f9 92 new = INSN_B_1F_5;
3a2af2dc 93#else
e4240540
WZ
94 /*
95 * lui v1, hi_16bit_of_mcount --> b 1f (0x10000004)
538f1952
WZ
96 * addiu v1, v1, low_16bit_of_mcount
97 * move at, ra
98 * jalr v1
3a2af2dc
WZ
99 * nop | move $12, ra_address | sub sp, sp, 8
100 * 1: offset = 4 instructions
538f1952 101 */
4d6829f9 102 new = INSN_B_1F_4;
3a2af2dc 103#endif
538f1952 104 } else {
e4240540
WZ
105 /*
106 * move at, ra
107 * jal _mcount --> nop
538f1952 108 */
4d6829f9 109 new = INSN_NOP;
538f1952
WZ
110 }
111 return ftrace_modify_code(ip, new);
112}
113
538f1952
WZ
114int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
115{
116 unsigned int new;
117 unsigned long ip = rec->ip;
118
538f1952 119 /* ip, module: 0xc0000000, kernel: 0x80000000 */
e4240540 120 new = (ip & 0x40000000) ? insn_lui_v1_hi16_mcount : insn_jal_ftrace_caller;
538f1952
WZ
121
122 return ftrace_modify_code(ip, new);
123}
124
125#define FTRACE_CALL_IP ((unsigned long)(&ftrace_call))
126
127int ftrace_update_ftrace_func(ftrace_func_t func)
128{
129 unsigned int new;
130
4d6829f9 131 new = INSN_JAL((unsigned long)func);
538f1952
WZ
132
133 return ftrace_modify_code(FTRACE_CALL_IP, new);
134}
135
136int __init ftrace_dyn_arch_init(void *data)
137{
e4240540
WZ
138 /* Encode the instructions when booting */
139 ftrace_dyn_arch_init_insns();
140
141 /* Remove "b ftrace_stub" to ensure ftrace_caller() is executed */
142 ftrace_modify_code(MCOUNT_ADDR, INSN_NOP);
143
538f1952
WZ
144 /* The return code is retured via data */
145 *(unsigned long *)data = 0;
146
147 return 0;
148}
149#endif /* CONFIG_DYNAMIC_FTRACE */
29c5d346
WZ
150
151#ifdef CONFIG_FUNCTION_GRAPH_TRACER
152
e17ff5fe
WZ
153#ifdef CONFIG_DYNAMIC_FTRACE
154
155extern void ftrace_graph_call(void);
e17ff5fe
WZ
156#define FTRACE_GRAPH_CALL_IP ((unsigned long)(&ftrace_graph_call))
157
158int ftrace_enable_ftrace_graph_caller(void)
159{
160 return ftrace_modify_code(FTRACE_GRAPH_CALL_IP,
e4240540 161 insn_j_ftrace_graph_caller);
e17ff5fe
WZ
162}
163
164int ftrace_disable_ftrace_graph_caller(void)
165{
4d6829f9 166 return ftrace_modify_code(FTRACE_GRAPH_CALL_IP, INSN_NOP);
e17ff5fe
WZ
167}
168
169#endif /* !CONFIG_DYNAMIC_FTRACE */
170
7326c4e5 171#ifndef KBUILD_MCOUNT_RA_ADDRESS
29c5d346
WZ
172#define S_RA_SP (0xafbf << 16) /* s{d,w} ra, offset(sp) */
173#define S_R_SP (0xafb0 << 16) /* s{d,w} R, offset(sp) */
174#define OFFSET_MASK 0xffff /* stack offset range: 0 ~ PT_SIZE */
175
176unsigned long ftrace_get_parent_addr(unsigned long self_addr,
177 unsigned long parent,
178 unsigned long parent_addr,
179 unsigned long fp)
180{
181 unsigned long sp, ip, ra;
182 unsigned int code;
046199ca 183 int faulted;
29c5d346
WZ
184
185 /* in module or kernel? */
186 if (self_addr & 0x40000000) {
187 /* module: move to the instruction "lui v1, HI_16BIT_OF_MCOUNT" */
188 ip = self_addr - 20;
189 } else {
190 /* kernel: move to the instruction "move ra, at" */
191 ip = self_addr - 12;
192 }
193
194 /* search the text until finding the non-store instruction or "s{d,w}
195 * ra, offset(sp)" instruction */
196 do {
197 ip -= 4;
198
046199ca
WZ
199 /* get the code at "ip": code = *(unsigned int *)ip; */
200 safe_load_code(code, ip, faulted);
201
202 if (unlikely(faulted))
203 return 0;
29c5d346
WZ
204
205 /* If we hit the non-store instruction before finding where the
206 * ra is stored, then this is a leaf function and it does not
207 * store the ra on the stack. */
208 if ((code & S_R_SP) != S_R_SP)
209 return parent_addr;
210
211 } while (((code & S_RA_SP) != S_RA_SP));
212
213 sp = fp + (code & OFFSET_MASK);
046199ca
WZ
214
215 /* ra = *(unsigned long *)sp; */
216 safe_load_stack(ra, sp, faulted);
217 if (unlikely(faulted))
218 return 0;
29c5d346
WZ
219
220 if (ra == parent)
221 return sp;
29c5d346
WZ
222 return 0;
223}
224
7326c4e5
WZ
225#endif
226
29c5d346
WZ
227/*
228 * Hook the return address and push it in the stack of return addrs
229 * in current thread info.
230 */
231void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
232 unsigned long fp)
233{
234 unsigned long old;
235 struct ftrace_graph_ent trace;
236 unsigned long return_hooker = (unsigned long)
237 &return_to_handler;
046199ca 238 int faulted;
29c5d346
WZ
239
240 if (unlikely(atomic_read(&current->tracing_graph_pause)))
241 return;
242
243 /* "parent" is the stack address saved the return address of the caller
7326c4e5
WZ
244 * of _mcount.
245 *
246 * if the gcc < 4.5, a leaf function does not save the return address
247 * in the stack address, so, we "emulate" one in _mcount's stack space,
248 * and hijack it directly, but for a non-leaf function, it save the
249 * return address to the its own stack space, we can not hijack it
250 * directly, but need to find the real stack address,
29c5d346 251 * ftrace_get_parent_addr() does it!
7326c4e5
WZ
252 *
253 * if gcc>= 4.5, with the new -mmcount-ra-address option, for a
254 * non-leaf function, the location of the return address will be saved
255 * to $12 for us, and for a leaf function, only put a zero into $12. we
256 * do it in ftrace_graph_caller of mcount.S.
29c5d346
WZ
257 */
258
046199ca
WZ
259 /* old = *parent; */
260 safe_load_stack(old, parent, faulted);
261 if (unlikely(faulted))
262 goto out;
7326c4e5 263#ifndef KBUILD_MCOUNT_RA_ADDRESS
29c5d346
WZ
264 parent = (unsigned long *)ftrace_get_parent_addr(self_addr, old,
265 (unsigned long)parent,
266 fp);
29c5d346
WZ
267 /* If fails when getting the stack address of the non-leaf function's
268 * ra, stop function graph tracer and return */
046199ca
WZ
269 if (parent == 0)
270 goto out;
7326c4e5 271#endif
046199ca
WZ
272 /* *parent = return_hooker; */
273 safe_store_stack(return_hooker, parent, faulted);
274 if (unlikely(faulted))
275 goto out;
29c5d346
WZ
276
277 if (ftrace_push_return_trace(old, self_addr, &trace.depth, fp) ==
278 -EBUSY) {
279 *parent = old;
280 return;
281 }
282
283 trace.func = self_addr;
284
285 /* Only trace if the calling function expects to */
286 if (!ftrace_graph_entry(&trace)) {
287 current->curr_ret_stack--;
288 *parent = old;
289 }
046199ca
WZ
290 return;
291out:
292 ftrace_graph_stop();
293 WARN_ON(1);
29c5d346
WZ
294}
295#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
This page took 0.074557 seconds and 5 git commands to generate.