Commit | Line | Data |
---|---|---|
1c873be7 MF |
1 | /* |
2 | * mcount and friends -- ftrace stuff | |
3 | * | |
4 | * Copyright (C) 2009 Analog Devices Inc. | |
5 | * Licensed under the GPL-2 or later. | |
6 | */ | |
7 | ||
8 | #include <linux/linkage.h> | |
9 | #include <asm/ftrace.h> | |
10 | ||
11 | .text | |
12 | ||
13 | /* GCC will have called us before setting up the function prologue, so we | |
14 | * can clobber the normal scratch registers, but we need to make sure to | |
15 | * save/restore the registers used for argument passing (R0-R2) in case | |
16 | * the profiled function is using them. With data registers, R3 is the | |
17 | * only one we can blow away. With pointer registers, we have P0-P2. | |
18 | * | |
19 | * Upon entry, the RETS will point to the top of the current profiled | |
5bf9cbef YL |
20 | * function. And since GCC pushed the previous RETS for us, the previous |
21 | * function will be waiting there. mmmm pie. | |
1c873be7 MF |
22 | */ |
23 | ENTRY(__mcount) | |
24 | /* save third function arg early so we can do testing below */ | |
25 | [--sp] = r2; | |
26 | ||
27 | /* load the function pointer to the tracer */ | |
28 | p0.l = _ftrace_trace_function; | |
29 | p0.h = _ftrace_trace_function; | |
30 | r3 = [p0]; | |
31 | ||
32 | /* optional micro optimization: don't call the stub tracer */ | |
33 | r2.l = _ftrace_stub; | |
34 | r2.h = _ftrace_stub; | |
35 | cc = r2 == r3; | |
36 | if ! cc jump .Ldo_trace; | |
37 | ||
1ee76d7e MF |
38 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
39 | /* if the ftrace_graph_return function pointer is not set to | |
40 | * the ftrace_stub entry, call prepare_ftrace_return(). | |
41 | */ | |
42 | p0.l = _ftrace_graph_return; | |
43 | p0.h = _ftrace_graph_return; | |
44 | r3 = [p0]; | |
45 | cc = r2 == r3; | |
46 | if ! cc jump _ftrace_graph_caller; | |
47 | ||
48 | /* similarly, if the ftrace_graph_entry function pointer is not | |
49 | * set to the ftrace_graph_entry_stub entry, ... | |
50 | */ | |
51 | p0.l = _ftrace_graph_entry; | |
52 | p0.h = _ftrace_graph_entry; | |
53 | r2.l = _ftrace_graph_entry_stub; | |
54 | r2.h = _ftrace_graph_entry_stub; | |
55 | r3 = [p0]; | |
56 | cc = r2 == r3; | |
57 | if ! cc jump _ftrace_graph_caller; | |
58 | #endif | |
59 | ||
1c873be7 MF |
60 | r2 = [sp++]; |
61 | rts; | |
62 | ||
63 | .Ldo_trace: | |
64 | ||
65 | /* save first/second function arg and the return register */ | |
66 | [--sp] = r0; | |
67 | [--sp] = r1; | |
68 | [--sp] = rets; | |
69 | ||
70 | /* setup the tracer function */ | |
71 | p0 = r3; | |
72 | ||
5bf9cbef YL |
73 | /* function_trace_call(unsigned long ip, unsigned long parent_ip): |
74 | * ip: this point was called by ... | |
75 | * parent_ip: ... this function | |
76 | * the ip itself will need adjusting for the mcount call | |
1c873be7 | 77 | */ |
5bf9cbef YL |
78 | r0 = rets; |
79 | r1 = [sp + 16]; /* skip the 4 local regs on stack */ | |
80 | r0 += -MCOUNT_INSN_SIZE; | |
1c873be7 MF |
81 | |
82 | /* call the tracer */ | |
83 | call (p0); | |
84 | ||
85 | /* restore state and get out of dodge */ | |
1ee76d7e | 86 | .Lfinish_trace: |
1c873be7 MF |
87 | rets = [sp++]; |
88 | r1 = [sp++]; | |
89 | r0 = [sp++]; | |
90 | r2 = [sp++]; | |
91 | ||
92 | .globl _ftrace_stub | |
93 | _ftrace_stub: | |
94 | rts; | |
95 | ENDPROC(__mcount) | |
1ee76d7e MF |
96 | |
97 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | |
98 | /* The prepare_ftrace_return() function is similar to the trace function | |
99 | * except it takes a pointer to the location of the frompc. This is so | |
100 | * the prepare_ftrace_return() can hijack it temporarily for probing | |
101 | * purposes. | |
102 | */ | |
103 | ENTRY(_ftrace_graph_caller) | |
104 | /* save first/second function arg and the return register */ | |
105 | [--sp] = r0; | |
106 | [--sp] = r1; | |
107 | [--sp] = rets; | |
108 | ||
5bf9cbef YL |
109 | /* prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) */ |
110 | r0 = sp; | |
1ee76d7e | 111 | r1 = rets; |
5bf9cbef | 112 | r0 += 16; /* skip the 4 local regs on stack */ |
1ee76d7e MF |
113 | r1 += -MCOUNT_INSN_SIZE; |
114 | call _prepare_ftrace_return; | |
115 | ||
116 | jump .Lfinish_trace; | |
117 | ENDPROC(_ftrace_graph_caller) | |
118 | ||
119 | /* Undo the rewrite caused by ftrace_graph_caller(). The common function | |
120 | * ftrace_return_to_handler() will return the original rets so we can | |
121 | * restore it and be on our way. | |
122 | */ | |
123 | ENTRY(_return_to_handler) | |
124 | /* make sure original return values are saved */ | |
125 | [--sp] = p0; | |
126 | [--sp] = r0; | |
127 | [--sp] = r1; | |
128 | ||
129 | /* get original return address */ | |
130 | call _ftrace_return_to_handler; | |
131 | rets = r0; | |
132 | ||
133 | /* anomaly 05000371 - make sure we have at least three instructions | |
134 | * between rets setting and the return | |
135 | */ | |
136 | r1 = [sp++]; | |
137 | r0 = [sp++]; | |
138 | p0 = [sp++]; | |
139 | rts; | |
140 | ENDPROC(_return_to_handler) | |
141 | #endif |