Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wirel...
[deliverable/linux.git] / arch / s390 / kernel / ftrace.c
CommitLineData
dfd9f7ab
HC
1/*
2 * Dynamic function tracer architecture backend.
3 *
4 * Copyright IBM Corp. 2009
5 *
6 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
7 *
8 */
9
88dbd203 10#include <linux/hardirq.h>
dfd9f7ab
HC
11#include <linux/uaccess.h>
12#include <linux/ftrace.h>
13#include <linux/kernel.h>
14#include <linux/types.h>
9bf1226b 15#include <trace/syscall.h>
dfd9f7ab
HC
16#include <asm/lowcore.h>
17
88dbd203
HC
18#ifdef CONFIG_DYNAMIC_FTRACE
19
dfd9f7ab 20void ftrace_disable_code(void);
88dbd203 21void ftrace_disable_return(void);
dfd9f7ab
HC
22void ftrace_call_code(void);
23void ftrace_nop_code(void);
24
25#define FTRACE_INSN_SIZE 4
26
27#ifdef CONFIG_64BIT
28
29asm(
30 " .align 4\n"
31 "ftrace_disable_code:\n"
32 " j 0f\n"
33 " .word 0x0024\n"
34 " lg %r1,"__stringify(__LC_FTRACE_FUNC)"\n"
35 " basr %r14,%r1\n"
88dbd203 36 "ftrace_disable_return:\n"
dfd9f7ab
HC
37 " lg %r14,8(15)\n"
38 " lgr %r0,%r0\n"
39 "0:\n");
40
41asm(
42 " .align 4\n"
43 "ftrace_nop_code:\n"
44 " j .+"__stringify(MCOUNT_INSN_SIZE)"\n");
45
46asm(
47 " .align 4\n"
48 "ftrace_call_code:\n"
49 " stg %r14,8(%r15)\n");
50
51#else /* CONFIG_64BIT */
52
53asm(
54 " .align 4\n"
55 "ftrace_disable_code:\n"
56 " j 0f\n"
57 " l %r1,"__stringify(__LC_FTRACE_FUNC)"\n"
58 " basr %r14,%r1\n"
88dbd203 59 "ftrace_disable_return:\n"
dfd9f7ab
HC
60 " l %r14,4(%r15)\n"
61 " j 0f\n"
62 " bcr 0,%r7\n"
63 " bcr 0,%r7\n"
64 " bcr 0,%r7\n"
65 " bcr 0,%r7\n"
66 " bcr 0,%r7\n"
67 " bcr 0,%r7\n"
68 "0:\n");
69
70asm(
71 " .align 4\n"
72 "ftrace_nop_code:\n"
73 " j .+"__stringify(MCOUNT_INSN_SIZE)"\n");
74
75asm(
76 " .align 4\n"
77 "ftrace_call_code:\n"
78 " st %r14,4(%r15)\n");
79
80#endif /* CONFIG_64BIT */
81
82static int ftrace_modify_code(unsigned long ip,
83 void *old_code, int old_size,
84 void *new_code, int new_size)
85{
86 unsigned char replaced[MCOUNT_INSN_SIZE];
87
88 /*
89 * Note: Due to modules code can disappear and change.
90 * We need to protect against faulting as well as code
91 * changing. We do this by using the probe_kernel_*
92 * functions.
93 * This however is just a simple sanity check.
94 */
95 if (probe_kernel_read(replaced, (void *)ip, old_size))
96 return -EFAULT;
97 if (memcmp(replaced, old_code, old_size) != 0)
98 return -EINVAL;
99 if (probe_kernel_write((void *)ip, new_code, new_size))
100 return -EPERM;
101 return 0;
102}
103
104static int ftrace_make_initial_nop(struct module *mod, struct dyn_ftrace *rec,
105 unsigned long addr)
106{
107 return ftrace_modify_code(rec->ip,
108 ftrace_call_code, FTRACE_INSN_SIZE,
109 ftrace_disable_code, MCOUNT_INSN_SIZE);
110}
111
112int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
113 unsigned long addr)
114{
115 if (addr == MCOUNT_ADDR)
116 return ftrace_make_initial_nop(mod, rec, addr);
117 return ftrace_modify_code(rec->ip,
118 ftrace_call_code, FTRACE_INSN_SIZE,
119 ftrace_nop_code, FTRACE_INSN_SIZE);
120}
121
122int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
123{
124 return ftrace_modify_code(rec->ip,
125 ftrace_nop_code, FTRACE_INSN_SIZE,
126 ftrace_call_code, FTRACE_INSN_SIZE);
127}
128
129int ftrace_update_ftrace_func(ftrace_func_t func)
130{
131 ftrace_dyn_func = (unsigned long)func;
132 return 0;
133}
134
135int __init ftrace_dyn_arch_init(void *data)
136{
137 *(unsigned long *)data = 0;
138 return 0;
139}
88dbd203
HC
140
141#endif /* CONFIG_DYNAMIC_FTRACE */
142
143#ifdef CONFIG_FUNCTION_GRAPH_TRACER
144#ifdef CONFIG_DYNAMIC_FTRACE
145/*
146 * Patch the kernel code at ftrace_graph_caller location:
147 * The instruction there is branch relative on condition. The condition mask
148 * is either all ones (always branch aka disable ftrace_graph_caller) or all
149 * zeroes (nop aka enable ftrace_graph_caller).
150 * Instruction format for brc is a7m4xxxx where m is the condition mask.
151 */
152int ftrace_enable_ftrace_graph_caller(void)
153{
154 unsigned short opcode = 0xa704;
155
156 return probe_kernel_write(ftrace_graph_caller, &opcode, sizeof(opcode));
157}
158
159int ftrace_disable_ftrace_graph_caller(void)
160{
161 unsigned short opcode = 0xa7f4;
162
163 return probe_kernel_write(ftrace_graph_caller, &opcode, sizeof(opcode));
164}
165
166static inline unsigned long ftrace_mcount_call_adjust(unsigned long addr)
167{
168 return addr - (ftrace_disable_return - ftrace_disable_code);
169}
170
171#else /* CONFIG_DYNAMIC_FTRACE */
172
173static inline unsigned long ftrace_mcount_call_adjust(unsigned long addr)
174{
175 return addr - MCOUNT_OFFSET_RET;
176}
177
178#endif /* CONFIG_DYNAMIC_FTRACE */
179
180/*
181 * Hook the return address and push it in the stack of return addresses
182 * in current thread info.
183 */
184unsigned long prepare_ftrace_return(unsigned long ip, unsigned long parent)
185{
186 struct ftrace_graph_ent trace;
187
188 /* Nmi's are currently unsupported. */
189 if (unlikely(in_nmi()))
190 goto out;
191 if (unlikely(atomic_read(&current->tracing_graph_pause)))
192 goto out;
71e308a2 193 if (ftrace_push_return_trace(parent, ip, &trace.depth, 0) == -EBUSY)
88dbd203
HC
194 goto out;
195 trace.func = ftrace_mcount_call_adjust(ip) & PSW_ADDR_INSN;
196 /* Only trace if the calling function expects to. */
197 if (!ftrace_graph_entry(&trace)) {
198 current->curr_ret_stack--;
199 goto out;
200 }
201 parent = (unsigned long)return_to_handler;
202out:
203 return parent;
204}
205#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
9bf1226b
HC
206
207#ifdef CONFIG_FTRACE_SYSCALLS
208
209extern unsigned long __start_syscalls_metadata[];
210extern unsigned long __stop_syscalls_metadata[];
211extern unsigned int sys_call_table[];
212
213static struct syscall_metadata **syscalls_metadata;
214
215struct syscall_metadata *syscall_nr_to_meta(int nr)
216{
217 if (!syscalls_metadata || nr >= NR_syscalls || nr < 0)
218 return NULL;
219
220 return syscalls_metadata[nr];
221}
222
7515bf59
HB
223int syscall_name_to_nr(char *name)
224{
225 int i;
226
227 if (!syscalls_metadata)
228 return -1;
229 for (i = 0; i < NR_syscalls; i++)
230 if (syscalls_metadata[i])
231 if (!strcmp(syscalls_metadata[i]->name, name))
232 return i;
233 return -1;
234}
235
236void set_syscall_enter_id(int num, int id)
237{
238 syscalls_metadata[num]->enter_id = id;
239}
240
241void set_syscall_exit_id(int num, int id)
242{
243 syscalls_metadata[num]->exit_id = id;
244}
245
9bf1226b
HC
246static struct syscall_metadata *find_syscall_meta(unsigned long syscall)
247{
248 struct syscall_metadata *start;
249 struct syscall_metadata *stop;
250 char str[KSYM_SYMBOL_LEN];
251
252 start = (struct syscall_metadata *)__start_syscalls_metadata;
253 stop = (struct syscall_metadata *)__stop_syscalls_metadata;
254 kallsyms_lookup(syscall, NULL, NULL, NULL, str);
255
256 for ( ; start < stop; start++) {
257 if (start->name && !strcmp(start->name + 3, str + 3))
258 return start;
259 }
260 return NULL;
261}
262
7515bf59 263static int __init arch_init_ftrace_syscalls(void)
9bf1226b
HC
264{
265 struct syscall_metadata *meta;
266 int i;
9bf1226b
HC
267 syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) * NR_syscalls,
268 GFP_KERNEL);
269 if (!syscalls_metadata)
7515bf59 270 return -ENOMEM;
9bf1226b
HC
271 for (i = 0; i < NR_syscalls; i++) {
272 meta = find_syscall_meta((unsigned long)sys_call_table[i]);
273 syscalls_metadata[i] = meta;
274 }
7515bf59 275 return 0;
9bf1226b 276}
7515bf59 277arch_initcall(arch_init_ftrace_syscalls);
9bf1226b 278#endif
This page took 0.043668 seconds and 5 git commands to generate.