Commit | Line | Data |
---|---|---|
9849ed4d MF |
1 | /* |
2 | * Ftrace header. For implementation details beyond the random comments | |
3 | * scattered below, see: Documentation/trace/ftrace-design.txt | |
4 | */ | |
5 | ||
16444a8a ACM |
6 | #ifndef _LINUX_FTRACE_H |
7 | #define _LINUX_FTRACE_H | |
8 | ||
0012693a | 9 | #include <linux/trace_clock.h> |
5601020f | 10 | #include <linux/kallsyms.h> |
0012693a | 11 | #include <linux/linkage.h> |
ea4e2bc4 | 12 | #include <linux/bitops.h> |
a1e2e31d | 13 | #include <linux/ptrace.h> |
0012693a | 14 | #include <linux/ktime.h> |
21a8c466 | 15 | #include <linux/sched.h> |
0012693a FW |
16 | #include <linux/types.h> |
17 | #include <linux/init.h> | |
18 | #include <linux/fs.h> | |
16444a8a | 19 | |
c79a61f5 UKK |
20 | #include <asm/ftrace.h> |
21 | ||
2f5f6ad9 SR |
22 | /* |
23 | * If the arch supports passing the variable contents of | |
24 | * function_trace_op as the third parameter back from the | |
25 | * mcount call, then the arch should define this as 1. | |
26 | */ | |
27 | #ifndef ARCH_SUPPORTS_FTRACE_OPS | |
28 | #define ARCH_SUPPORTS_FTRACE_OPS 0 | |
29 | #endif | |
30 | ||
ccf3672d SR |
31 | /* |
32 | * If the arch's mcount caller does not support all of ftrace's | |
33 | * features, then it must call an indirect function that | |
34 | * does. Or at least does enough to prevent any unwelcomed side effects. | |
35 | */ | |
7544256a | 36 | #if !ARCH_SUPPORTS_FTRACE_OPS |
ccf3672d SR |
37 | # define FTRACE_FORCE_LIST_FUNC 1 |
38 | #else | |
39 | # define FTRACE_FORCE_LIST_FUNC 0 | |
40 | #endif | |
41 | ||
5f893b26 SRRH |
42 | /* Main tracing buffer and events set up */ |
43 | #ifdef CONFIG_TRACING | |
44 | void trace_init(void); | |
45 | #else | |
46 | static inline void trace_init(void) { } | |
47 | #endif | |
ccf3672d | 48 | |
de477254 | 49 | struct module; |
04da85b8 SR |
50 | struct ftrace_hash; |
51 | ||
606576ce | 52 | #ifdef CONFIG_FUNCTION_TRACER |
3e1932ad | 53 | |
b0fc494f SR |
54 | extern int ftrace_enabled; |
55 | extern int | |
56 | ftrace_enable_sysctl(struct ctl_table *table, int write, | |
8d65af78 | 57 | void __user *buffer, size_t *lenp, |
b0fc494f SR |
58 | loff_t *ppos); |
59 | ||
2f5f6ad9 SR |
60 | struct ftrace_ops; |
61 | ||
62 | typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip, | |
a1e2e31d | 63 | struct ftrace_ops *op, struct pt_regs *regs); |
16444a8a | 64 | |
87354059 SRRH |
65 | ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops); |
66 | ||
e248491a JO |
67 | /* |
68 | * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are | |
69 | * set in the flags member. | |
f8b8be8a MH |
70 | * CONTROL, SAVE_REGS, SAVE_REGS_IF_SUPPORTED, RECURSION_SAFE, STUB and |
71 | * IPMODIFY are a kind of attribute flags which can be set only before | |
72 | * registering the ftrace_ops, and can not be modified while registered. | |
73 | * Changing those attribute flags after regsitering ftrace_ops will | |
74 | * cause unexpected results. | |
e248491a JO |
75 | * |
76 | * ENABLED - set/unset when ftrace_ops is registered/unregistered | |
e248491a JO |
77 | * DYNAMIC - set when ftrace_ops is registered to denote dynamically |
78 | * allocated ftrace_ops which need special care | |
79 | * CONTROL - set manualy by ftrace_ops user to denote the ftrace_ops | |
80 | * could be controled by following calls: | |
81 | * ftrace_function_local_enable | |
82 | * ftrace_function_local_disable | |
08f6fba5 SR |
83 | * SAVE_REGS - The ftrace_ops wants regs saved at each function called |
84 | * and passed to the callback. If this flag is set, but the | |
85 | * architecture does not support passing regs | |
06aeaaea | 86 | * (CONFIG_DYNAMIC_FTRACE_WITH_REGS is not defined), then the |
08f6fba5 SR |
87 | * ftrace_ops will fail to register, unless the next flag |
88 | * is set. | |
89 | * SAVE_REGS_IF_SUPPORTED - This is the same as SAVE_REGS, but if the | |
90 | * handler can handle an arch that does not save regs | |
91 | * (the handler tests if regs == NULL), then it can set | |
92 | * this flag instead. It will not fail registering the ftrace_ops | |
93 | * but, the regs field will be NULL if the arch does not support | |
94 | * passing regs to the handler. | |
95 | * Note, if this flag is set, the SAVE_REGS flag will automatically | |
96 | * get set upon registering the ftrace_ops, if the arch supports it. | |
4740974a SR |
97 | * RECURSION_SAFE - The ftrace_ops can set this to tell the ftrace infrastructure |
98 | * that the call back has its own recursion protection. If it does | |
99 | * not set this, then the ftrace infrastructure will add recursion | |
100 | * protection for the caller. | |
395b97a3 | 101 | * STUB - The ftrace_ops is just a place holder. |
f04f24fb MH |
102 | * INITIALIZED - The ftrace_ops has already been initialized (first use time |
103 | * register_ftrace_function() is called, it will initialized the ops) | |
591dffda | 104 | * DELETED - The ops are being deleted, do not let them be registered again. |
e1effa01 SRRH |
105 | * ADDING - The ops is in the process of being added. |
106 | * REMOVING - The ops is in the process of being removed. | |
107 | * MODIFYING - The ops is in the process of changing its filter functions. | |
f3bea491 SRRH |
108 | * ALLOC_TRAMP - A dynamic trampoline was allocated by the core code. |
109 | * The arch specific code sets this flag when it allocated a | |
110 | * trampoline. This lets the arch know that it can update the | |
111 | * trampoline in case the callback function changes. | |
112 | * The ftrace_ops trampoline can be set by the ftrace users, and | |
113 | * in such cases the arch must not modify it. Only the arch ftrace | |
114 | * core code should set this flag. | |
f8b8be8a MH |
115 | * IPMODIFY - The ops can modify the IP register. This can only be set with |
116 | * SAVE_REGS. If another ops with this flag set is already registered | |
117 | * for any of the functions that this ops will be registered for, then | |
118 | * this ops will fail to register or set_filter_ip. | |
e3eea140 | 119 | * PID - Is affected by set_ftrace_pid (allows filtering on those pids) |
e248491a | 120 | */ |
b848914c | 121 | enum { |
08f6fba5 | 122 | FTRACE_OPS_FL_ENABLED = 1 << 0, |
4104d326 SRRH |
123 | FTRACE_OPS_FL_DYNAMIC = 1 << 1, |
124 | FTRACE_OPS_FL_CONTROL = 1 << 2, | |
125 | FTRACE_OPS_FL_SAVE_REGS = 1 << 3, | |
126 | FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 4, | |
127 | FTRACE_OPS_FL_RECURSION_SAFE = 1 << 5, | |
128 | FTRACE_OPS_FL_STUB = 1 << 6, | |
129 | FTRACE_OPS_FL_INITIALIZED = 1 << 7, | |
130 | FTRACE_OPS_FL_DELETED = 1 << 8, | |
e1effa01 SRRH |
131 | FTRACE_OPS_FL_ADDING = 1 << 9, |
132 | FTRACE_OPS_FL_REMOVING = 1 << 10, | |
133 | FTRACE_OPS_FL_MODIFYING = 1 << 11, | |
f3bea491 | 134 | FTRACE_OPS_FL_ALLOC_TRAMP = 1 << 12, |
f8b8be8a | 135 | FTRACE_OPS_FL_IPMODIFY = 1 << 13, |
e3eea140 | 136 | FTRACE_OPS_FL_PID = 1 << 14, |
b848914c SR |
137 | }; |
138 | ||
33b7f99c SRRH |
139 | #ifdef CONFIG_DYNAMIC_FTRACE |
140 | /* The hash used to know what functions callbacks trace */ | |
141 | struct ftrace_ops_hash { | |
142 | struct ftrace_hash *notrace_hash; | |
143 | struct ftrace_hash *filter_hash; | |
144 | struct mutex regex_lock; | |
145 | }; | |
146 | #endif | |
147 | ||
b7e00a6c SRRH |
148 | /* |
149 | * Note, ftrace_ops can be referenced outside of RCU protection. | |
150 | * (Although, for perf, the control ops prevent that). If ftrace_ops is | |
151 | * allocated and not part of kernel core data, the unregistering of it will | |
152 | * perform a scheduling on all CPUs to make sure that there are no more users. | |
153 | * Depending on the load of the system that may take a bit of time. | |
154 | * | |
155 | * Any private data added must also take care not to be freed and if private | |
156 | * data is added to a ftrace_ops that is in core code, the user of the | |
157 | * ftrace_ops must perform a schedule_on_each_cpu() before freeing it. | |
158 | */ | |
16444a8a | 159 | struct ftrace_ops { |
f45948e8 SR |
160 | ftrace_func_t func; |
161 | struct ftrace_ops *next; | |
b848914c | 162 | unsigned long flags; |
b7e00a6c | 163 | void *private; |
e3eea140 | 164 | ftrace_func_t saved_func; |
79922b80 | 165 | int __percpu *disabled; |
f45948e8 | 166 | #ifdef CONFIG_DYNAMIC_FTRACE |
0162d621 | 167 | int nr_trampolines; |
33b7f99c SRRH |
168 | struct ftrace_ops_hash local_hash; |
169 | struct ftrace_ops_hash *func_hash; | |
fef5aeee | 170 | struct ftrace_ops_hash old_hash; |
79922b80 | 171 | unsigned long trampoline; |
aec0be2d | 172 | unsigned long trampoline_size; |
f45948e8 | 173 | #endif |
16444a8a ACM |
174 | }; |
175 | ||
e7d3737e FW |
176 | /* |
177 | * Type of the current tracing. | |
178 | */ | |
179 | enum ftrace_tracing_type_t { | |
180 | FTRACE_TYPE_ENTER = 0, /* Hook the call of the function */ | |
181 | FTRACE_TYPE_RETURN, /* Hook the return of the function */ | |
182 | }; | |
183 | ||
184 | /* Current tracing type, default is FTRACE_TYPE_ENTER */ | |
185 | extern enum ftrace_tracing_type_t ftrace_tracing_type; | |
186 | ||
16444a8a ACM |
187 | /* |
188 | * The ftrace_ops must be a static and should also | |
189 | * be read_mostly. These functions do modify read_mostly variables | |
190 | * so use them sparely. Never free an ftrace_op or modify the | |
191 | * next pointer after it has been registered. Even after unregistering | |
192 | * it, the next pointer may still be used internally. | |
193 | */ | |
194 | int register_ftrace_function(struct ftrace_ops *ops); | |
195 | int unregister_ftrace_function(struct ftrace_ops *ops); | |
196 | void clear_ftrace_function(void); | |
197 | ||
e248491a JO |
198 | /** |
199 | * ftrace_function_local_enable - enable controlled ftrace_ops on current cpu | |
200 | * | |
201 | * This function enables tracing on current cpu by decreasing | |
202 | * the per cpu control variable. | |
203 | * It must be called with preemption disabled and only on ftrace_ops | |
204 | * registered with FTRACE_OPS_FL_CONTROL. If called without preemption | |
205 | * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled. | |
206 | */ | |
207 | static inline void ftrace_function_local_enable(struct ftrace_ops *ops) | |
208 | { | |
209 | if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL))) | |
210 | return; | |
211 | ||
212 | (*this_cpu_ptr(ops->disabled))--; | |
213 | } | |
214 | ||
215 | /** | |
216 | * ftrace_function_local_disable - enable controlled ftrace_ops on current cpu | |
217 | * | |
218 | * This function enables tracing on current cpu by decreasing | |
219 | * the per cpu control variable. | |
220 | * It must be called with preemption disabled and only on ftrace_ops | |
221 | * registered with FTRACE_OPS_FL_CONTROL. If called without preemption | |
222 | * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled. | |
223 | */ | |
224 | static inline void ftrace_function_local_disable(struct ftrace_ops *ops) | |
225 | { | |
226 | if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL))) | |
227 | return; | |
228 | ||
229 | (*this_cpu_ptr(ops->disabled))++; | |
230 | } | |
231 | ||
232 | /** | |
233 | * ftrace_function_local_disabled - returns ftrace_ops disabled value | |
234 | * on current cpu | |
235 | * | |
236 | * This function returns value of ftrace_ops::disabled on current cpu. | |
237 | * It must be called with preemption disabled and only on ftrace_ops | |
238 | * registered with FTRACE_OPS_FL_CONTROL. If called without preemption | |
239 | * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled. | |
240 | */ | |
241 | static inline int ftrace_function_local_disabled(struct ftrace_ops *ops) | |
242 | { | |
243 | WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL)); | |
244 | return *this_cpu_ptr(ops->disabled); | |
245 | } | |
246 | ||
a1e2e31d SR |
247 | extern void ftrace_stub(unsigned long a0, unsigned long a1, |
248 | struct ftrace_ops *op, struct pt_regs *regs); | |
16444a8a | 249 | |
606576ce | 250 | #else /* !CONFIG_FUNCTION_TRACER */ |
4dbf6bc2 SR |
251 | /* |
252 | * (un)register_ftrace_function must be a macro since the ops parameter | |
253 | * must not be evaluated. | |
254 | */ | |
255 | #define register_ftrace_function(ops) ({ 0; }) | |
256 | #define unregister_ftrace_function(ops) ({ 0; }) | |
ea701f11 SR |
257 | static inline int ftrace_nr_registered_ops(void) |
258 | { | |
259 | return 0; | |
260 | } | |
4dbf6bc2 | 261 | static inline void clear_ftrace_function(void) { } |
81adbdc0 | 262 | static inline void ftrace_kill(void) { } |
606576ce | 263 | #endif /* CONFIG_FUNCTION_TRACER */ |
352ad25a | 264 | |
f38f1d2a | 265 | #ifdef CONFIG_STACK_TRACER |
bb99d8cc AT |
266 | |
267 | #define STACK_TRACE_ENTRIES 500 | |
268 | ||
269 | struct stack_trace; | |
270 | ||
271 | extern unsigned stack_trace_index[]; | |
272 | extern struct stack_trace stack_trace_max; | |
273 | extern unsigned long stack_trace_max_size; | |
274 | extern arch_spinlock_t max_stack_lock; | |
275 | ||
f38f1d2a | 276 | extern int stack_tracer_enabled; |
bb99d8cc | 277 | void stack_trace_print(void); |
f38f1d2a SR |
278 | int |
279 | stack_trace_sysctl(struct ctl_table *table, int write, | |
8d65af78 | 280 | void __user *buffer, size_t *lenp, |
f38f1d2a SR |
281 | loff_t *ppos); |
282 | #endif | |
283 | ||
f6180773 SR |
284 | struct ftrace_func_command { |
285 | struct list_head list; | |
286 | char *name; | |
43dd61c9 SR |
287 | int (*func)(struct ftrace_hash *hash, |
288 | char *func, char *cmd, | |
f6180773 SR |
289 | char *params, int enable); |
290 | }; | |
291 | ||
3d083395 | 292 | #ifdef CONFIG_DYNAMIC_FTRACE |
31e88909 | 293 | |
000ab691 SR |
294 | int ftrace_arch_code_modify_prepare(void); |
295 | int ftrace_arch_code_modify_post_process(void); | |
296 | ||
4fd3279b SRRH |
297 | struct dyn_ftrace; |
298 | ||
299 | void ftrace_bug(int err, struct dyn_ftrace *rec); | |
c88fd863 | 300 | |
809dcf29 SR |
301 | struct seq_file; |
302 | ||
b6887d79 | 303 | struct ftrace_probe_ops { |
59df055f SR |
304 | void (*func)(unsigned long ip, |
305 | unsigned long parent_ip, | |
306 | void **data); | |
e67efb93 SRRH |
307 | int (*init)(struct ftrace_probe_ops *ops, |
308 | unsigned long ip, void **data); | |
309 | void (*free)(struct ftrace_probe_ops *ops, | |
310 | unsigned long ip, void **data); | |
809dcf29 SR |
311 | int (*print)(struct seq_file *m, |
312 | unsigned long ip, | |
b6887d79 | 313 | struct ftrace_probe_ops *ops, |
809dcf29 | 314 | void *data); |
59df055f SR |
315 | }; |
316 | ||
317 | extern int | |
b6887d79 | 318 | register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, |
59df055f SR |
319 | void *data); |
320 | extern void | |
b6887d79 | 321 | unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, |
59df055f SR |
322 | void *data); |
323 | extern void | |
b6887d79 SR |
324 | unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops); |
325 | extern void unregister_ftrace_function_probe_all(char *glob); | |
59df055f | 326 | |
d88471cb | 327 | extern int ftrace_text_reserved(const void *start, const void *end); |
2cfa1978 | 328 | |
ea701f11 SR |
329 | extern int ftrace_nr_registered_ops(void); |
330 | ||
aec0be2d SRRH |
331 | bool is_ftrace_trampoline(unsigned long addr); |
332 | ||
08f6fba5 SR |
333 | /* |
334 | * The dyn_ftrace record's flags field is split into two parts. | |
335 | * the first part which is '0-FTRACE_REF_MAX' is a counter of | |
336 | * the number of callbacks that have registered the function that | |
337 | * the dyn_ftrace descriptor represents. | |
338 | * | |
339 | * The second part is a mask: | |
340 | * ENABLED - the function is being traced | |
341 | * REGS - the record wants the function to save regs | |
342 | * REGS_EN - the function is set up to save regs. | |
f8b8be8a | 343 | * IPMODIFY - the record allows for the IP address to be changed. |
08f6fba5 SR |
344 | * |
345 | * When a new ftrace_ops is registered and wants a function to save | |
346 | * pt_regs, the rec->flag REGS is set. When the function has been | |
347 | * set up to save regs, the REG_EN flag is set. Once a function | |
348 | * starts saving regs it will do so until all ftrace_ops are removed | |
349 | * from tracing that function. | |
350 | */ | |
3c1720f0 | 351 | enum { |
79922b80 | 352 | FTRACE_FL_ENABLED = (1UL << 31), |
08f6fba5 | 353 | FTRACE_FL_REGS = (1UL << 30), |
79922b80 SRRH |
354 | FTRACE_FL_REGS_EN = (1UL << 29), |
355 | FTRACE_FL_TRAMP = (1UL << 28), | |
356 | FTRACE_FL_TRAMP_EN = (1UL << 27), | |
f8b8be8a | 357 | FTRACE_FL_IPMODIFY = (1UL << 26), |
3c1720f0 SR |
358 | }; |
359 | ||
f8b8be8a MH |
360 | #define FTRACE_REF_MAX_SHIFT 26 |
361 | #define FTRACE_FL_BITS 6 | |
cf2cb0b2 SRRH |
362 | #define FTRACE_FL_MASKED_BITS ((1UL << FTRACE_FL_BITS) - 1) |
363 | #define FTRACE_FL_MASK (FTRACE_FL_MASKED_BITS << FTRACE_REF_MAX_SHIFT) | |
364 | #define FTRACE_REF_MAX ((1UL << FTRACE_REF_MAX_SHIFT) - 1) | |
ed926f9b | 365 | |
0376bde1 SRRH |
366 | #define ftrace_rec_count(rec) ((rec)->flags & ~FTRACE_FL_MASK) |
367 | ||
3d083395 | 368 | struct dyn_ftrace { |
a762782d | 369 | unsigned long ip; /* address of mcount call-site */ |
85ae32ae | 370 | unsigned long flags; |
a762782d | 371 | struct dyn_arch_ftrace arch; |
3d083395 SR |
372 | }; |
373 | ||
e1c08bdd | 374 | int ftrace_force_update(void); |
647664ea MH |
375 | int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip, |
376 | int remove, int reset); | |
ac483c44 | 377 | int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, |
936e074b | 378 | int len, int reset); |
ac483c44 | 379 | int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, |
936e074b SR |
380 | int len, int reset); |
381 | void ftrace_set_global_filter(unsigned char *buf, int len, int reset); | |
382 | void ftrace_set_global_notrace(unsigned char *buf, int len, int reset); | |
5500fa51 | 383 | void ftrace_free_filter(struct ftrace_ops *ops); |
e1c08bdd | 384 | |
f6180773 SR |
385 | int register_ftrace_command(struct ftrace_func_command *cmd); |
386 | int unregister_ftrace_command(struct ftrace_func_command *cmd); | |
387 | ||
c88fd863 SR |
388 | enum { |
389 | FTRACE_UPDATE_CALLS = (1 << 0), | |
390 | FTRACE_DISABLE_CALLS = (1 << 1), | |
391 | FTRACE_UPDATE_TRACE_FUNC = (1 << 2), | |
392 | FTRACE_START_FUNC_RET = (1 << 3), | |
393 | FTRACE_STOP_FUNC_RET = (1 << 4), | |
394 | }; | |
395 | ||
08f6fba5 SR |
396 | /* |
397 | * The FTRACE_UPDATE_* enum is used to pass information back | |
398 | * from the ftrace_update_record() and ftrace_test_record() | |
399 | * functions. These are called by the code update routines | |
400 | * to find out what is to be done for a given function. | |
401 | * | |
402 | * IGNORE - The function is already what we want it to be | |
403 | * MAKE_CALL - Start tracing the function | |
404 | * MODIFY_CALL - Stop saving regs for the function | |
08f6fba5 SR |
405 | * MAKE_NOP - Stop tracing the function |
406 | */ | |
c88fd863 SR |
407 | enum { |
408 | FTRACE_UPDATE_IGNORE, | |
409 | FTRACE_UPDATE_MAKE_CALL, | |
08f6fba5 | 410 | FTRACE_UPDATE_MODIFY_CALL, |
c88fd863 SR |
411 | FTRACE_UPDATE_MAKE_NOP, |
412 | }; | |
413 | ||
fc13cb0c SR |
414 | enum { |
415 | FTRACE_ITER_FILTER = (1 << 0), | |
416 | FTRACE_ITER_NOTRACE = (1 << 1), | |
417 | FTRACE_ITER_PRINTALL = (1 << 2), | |
69a3083c SR |
418 | FTRACE_ITER_DO_HASH = (1 << 3), |
419 | FTRACE_ITER_HASH = (1 << 4), | |
420 | FTRACE_ITER_ENABLED = (1 << 5), | |
fc13cb0c SR |
421 | }; |
422 | ||
c88fd863 SR |
423 | void arch_ftrace_update_code(int command); |
424 | ||
425 | struct ftrace_rec_iter; | |
426 | ||
427 | struct ftrace_rec_iter *ftrace_rec_iter_start(void); | |
428 | struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter); | |
429 | struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter); | |
430 | ||
08d636b6 SR |
431 | #define for_ftrace_rec_iter(iter) \ |
432 | for (iter = ftrace_rec_iter_start(); \ | |
433 | iter; \ | |
434 | iter = ftrace_rec_iter_next(iter)) | |
435 | ||
436 | ||
c88fd863 SR |
437 | int ftrace_update_record(struct dyn_ftrace *rec, int enable); |
438 | int ftrace_test_record(struct dyn_ftrace *rec, int enable); | |
439 | void ftrace_run_stop_machine(int command); | |
f0cf973a | 440 | unsigned long ftrace_location(unsigned long ip); |
7413af1f SRRH |
441 | unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec); |
442 | unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec); | |
c88fd863 SR |
443 | |
444 | extern ftrace_func_t ftrace_trace_function; | |
445 | ||
fc13cb0c SR |
446 | int ftrace_regex_open(struct ftrace_ops *ops, int flag, |
447 | struct inode *inode, struct file *file); | |
448 | ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf, | |
449 | size_t cnt, loff_t *ppos); | |
450 | ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf, | |
451 | size_t cnt, loff_t *ppos); | |
fc13cb0c SR |
452 | int ftrace_regex_release(struct inode *inode, struct file *file); |
453 | ||
2a85a37f SR |
454 | void __init |
455 | ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable); | |
456 | ||
3d083395 | 457 | /* defined in arch */ |
3c1720f0 | 458 | extern int ftrace_ip_converted(unsigned long ip); |
3a36cb11 | 459 | extern int ftrace_dyn_arch_init(void); |
e4f5d544 | 460 | extern void ftrace_replace_code(int enable); |
d61f82d0 SR |
461 | extern int ftrace_update_ftrace_func(ftrace_func_t func); |
462 | extern void ftrace_caller(void); | |
08f6fba5 | 463 | extern void ftrace_regs_caller(void); |
d61f82d0 | 464 | extern void ftrace_call(void); |
08f6fba5 | 465 | extern void ftrace_regs_call(void); |
d61f82d0 | 466 | extern void mcount_call(void); |
f0001207 | 467 | |
8ed3e2cf SR |
468 | void ftrace_modify_all_code(int command); |
469 | ||
f0001207 SL |
470 | #ifndef FTRACE_ADDR |
471 | #define FTRACE_ADDR ((unsigned long)ftrace_caller) | |
472 | #endif | |
08f6fba5 | 473 | |
79922b80 SRRH |
474 | #ifndef FTRACE_GRAPH_ADDR |
475 | #define FTRACE_GRAPH_ADDR ((unsigned long)ftrace_graph_caller) | |
476 | #endif | |
477 | ||
08f6fba5 | 478 | #ifndef FTRACE_REGS_ADDR |
06aeaaea | 479 | #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS |
08f6fba5 SR |
480 | # define FTRACE_REGS_ADDR ((unsigned long)ftrace_regs_caller) |
481 | #else | |
482 | # define FTRACE_REGS_ADDR FTRACE_ADDR | |
483 | #endif | |
484 | #endif | |
485 | ||
646d7043 SRRH |
486 | /* |
487 | * If an arch would like functions that are only traced | |
488 | * by the function graph tracer to jump directly to its own | |
489 | * trampoline, then they can define FTRACE_GRAPH_TRAMP_ADDR | |
490 | * to be that address to jump to. | |
491 | */ | |
492 | #ifndef FTRACE_GRAPH_TRAMP_ADDR | |
493 | #define FTRACE_GRAPH_TRAMP_ADDR ((unsigned long) 0) | |
494 | #endif | |
495 | ||
fb52607a FW |
496 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
497 | extern void ftrace_graph_caller(void); | |
5a45cfe1 SR |
498 | extern int ftrace_enable_ftrace_graph_caller(void); |
499 | extern int ftrace_disable_ftrace_graph_caller(void); | |
500 | #else | |
501 | static inline int ftrace_enable_ftrace_graph_caller(void) { return 0; } | |
502 | static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; } | |
e7d3737e | 503 | #endif |
ad90c0e3 | 504 | |
31e88909 | 505 | /** |
57794a9d | 506 | * ftrace_make_nop - convert code into nop |
31e88909 SR |
507 | * @mod: module structure if called by module load initialization |
508 | * @rec: the mcount call site record | |
509 | * @addr: the address that the call site should be calling | |
510 | * | |
511 | * This is a very sensitive operation and great care needs | |
512 | * to be taken by the arch. The operation should carefully | |
513 | * read the location, check to see if what is read is indeed | |
514 | * what we expect it to be, and then on success of the compare, | |
515 | * it should write to the location. | |
516 | * | |
517 | * The code segment at @rec->ip should be a caller to @addr | |
518 | * | |
519 | * Return must be: | |
520 | * 0 on success | |
521 | * -EFAULT on error reading the location | |
522 | * -EINVAL on a failed compare of the contents | |
523 | * -EPERM on error writing to the location | |
524 | * Any other value will be considered a failure. | |
525 | */ | |
526 | extern int ftrace_make_nop(struct module *mod, | |
527 | struct dyn_ftrace *rec, unsigned long addr); | |
a26a2a27 | 528 | |
593eb8a2 | 529 | /** |
31e88909 SR |
530 | * ftrace_make_call - convert a nop call site into a call to addr |
531 | * @rec: the mcount call site record | |
532 | * @addr: the address that the call site should call | |
593eb8a2 SR |
533 | * |
534 | * This is a very sensitive operation and great care needs | |
535 | * to be taken by the arch. The operation should carefully | |
536 | * read the location, check to see if what is read is indeed | |
537 | * what we expect it to be, and then on success of the compare, | |
538 | * it should write to the location. | |
539 | * | |
31e88909 SR |
540 | * The code segment at @rec->ip should be a nop |
541 | * | |
593eb8a2 SR |
542 | * Return must be: |
543 | * 0 on success | |
544 | * -EFAULT on error reading the location | |
545 | * -EINVAL on a failed compare of the contents | |
546 | * -EPERM on error writing to the location | |
547 | * Any other value will be considered a failure. | |
548 | */ | |
31e88909 SR |
549 | extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr); |
550 | ||
06aeaaea | 551 | #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS |
08f6fba5 SR |
552 | /** |
553 | * ftrace_modify_call - convert from one addr to another (no nop) | |
554 | * @rec: the mcount call site record | |
555 | * @old_addr: the address expected to be currently called to | |
556 | * @addr: the address to change to | |
557 | * | |
558 | * This is a very sensitive operation and great care needs | |
559 | * to be taken by the arch. The operation should carefully | |
560 | * read the location, check to see if what is read is indeed | |
561 | * what we expect it to be, and then on success of the compare, | |
562 | * it should write to the location. | |
563 | * | |
564 | * The code segment at @rec->ip should be a caller to @old_addr | |
565 | * | |
566 | * Return must be: | |
567 | * 0 on success | |
568 | * -EFAULT on error reading the location | |
569 | * -EINVAL on a failed compare of the contents | |
570 | * -EPERM on error writing to the location | |
571 | * Any other value will be considered a failure. | |
572 | */ | |
573 | extern int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, | |
574 | unsigned long addr); | |
575 | #else | |
576 | /* Should never be called */ | |
577 | static inline int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, | |
578 | unsigned long addr) | |
579 | { | |
580 | return -EINVAL; | |
581 | } | |
582 | #endif | |
583 | ||
31e88909 SR |
584 | /* May be defined in arch */ |
585 | extern int ftrace_arch_read_dyn_info(char *buf, int size); | |
593eb8a2 | 586 | |
ecea656d | 587 | extern int skip_trace(unsigned long ip); |
a949ae56 | 588 | extern void ftrace_module_init(struct module *mod); |
ecea656d | 589 | |
c0719e5a SR |
590 | extern void ftrace_disable_daemon(void); |
591 | extern void ftrace_enable_daemon(void); | |
4dc93676 | 592 | #else /* CONFIG_DYNAMIC_FTRACE */ |
4dbf6bc2 SR |
593 | static inline int skip_trace(unsigned long ip) { return 0; } |
594 | static inline int ftrace_force_update(void) { return 0; } | |
4dbf6bc2 SR |
595 | static inline void ftrace_disable_daemon(void) { } |
596 | static inline void ftrace_enable_daemon(void) { } | |
e7247a15 | 597 | static inline void ftrace_release_mod(struct module *mod) {} |
a949ae56 | 598 | static inline void ftrace_module_init(struct module *mod) {} |
38de93ab | 599 | static inline __init int register_ftrace_command(struct ftrace_func_command *cmd) |
f6180773 | 600 | { |
97d0bb8d | 601 | return -EINVAL; |
f6180773 | 602 | } |
38de93ab | 603 | static inline __init int unregister_ftrace_command(char *cmd_name) |
f6180773 | 604 | { |
97d0bb8d | 605 | return -EINVAL; |
f6180773 | 606 | } |
d88471cb | 607 | static inline int ftrace_text_reserved(const void *start, const void *end) |
2cfa1978 MH |
608 | { |
609 | return 0; | |
610 | } | |
4dc93676 SR |
611 | static inline unsigned long ftrace_location(unsigned long ip) |
612 | { | |
613 | return 0; | |
614 | } | |
fc13cb0c SR |
615 | |
616 | /* | |
617 | * Again users of functions that have ftrace_ops may not | |
618 | * have them defined when ftrace is not enabled, but these | |
619 | * functions may still be called. Use a macro instead of inline. | |
620 | */ | |
621 | #define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; }) | |
96de37b6 | 622 | #define ftrace_set_early_filter(ops, buf, enable) do { } while (0) |
647664ea | 623 | #define ftrace_set_filter_ip(ops, ip, remove, reset) ({ -ENODEV; }) |
5500fa51 JO |
624 | #define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; }) |
625 | #define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; }) | |
626 | #define ftrace_free_filter(ops) do { } while (0) | |
fc13cb0c SR |
627 | |
628 | static inline ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf, | |
629 | size_t cnt, loff_t *ppos) { return -ENODEV; } | |
630 | static inline ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf, | |
631 | size_t cnt, loff_t *ppos) { return -ENODEV; } | |
fc13cb0c SR |
632 | static inline int |
633 | ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; } | |
aec0be2d SRRH |
634 | |
635 | static inline bool is_ftrace_trampoline(unsigned long addr) | |
636 | { | |
637 | return false; | |
638 | } | |
ecea656d | 639 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
352ad25a | 640 | |
aeaee8a2 IM |
641 | /* totally disable ftrace - can not re-enable after this */ |
642 | void ftrace_kill(void); | |
643 | ||
f43fdad8 IM |
644 | static inline void tracer_disable(void) |
645 | { | |
606576ce | 646 | #ifdef CONFIG_FUNCTION_TRACER |
f43fdad8 IM |
647 | ftrace_enabled = 0; |
648 | #endif | |
649 | } | |
650 | ||
37002735 HY |
651 | /* |
652 | * Ftrace disable/restore without lock. Some synchronization mechanism | |
9bdeb7b5 | 653 | * must be used to prevent ftrace_enabled to be changed between |
37002735 HY |
654 | * disable/restore. |
655 | */ | |
9bdeb7b5 HY |
656 | static inline int __ftrace_enabled_save(void) |
657 | { | |
606576ce | 658 | #ifdef CONFIG_FUNCTION_TRACER |
9bdeb7b5 HY |
659 | int saved_ftrace_enabled = ftrace_enabled; |
660 | ftrace_enabled = 0; | |
661 | return saved_ftrace_enabled; | |
662 | #else | |
663 | return 0; | |
664 | #endif | |
665 | } | |
666 | ||
667 | static inline void __ftrace_enabled_restore(int enabled) | |
668 | { | |
606576ce | 669 | #ifdef CONFIG_FUNCTION_TRACER |
9bdeb7b5 HY |
670 | ftrace_enabled = enabled; |
671 | #endif | |
672 | } | |
673 | ||
eed542d6 AT |
674 | /* All archs should have this, but we define it for consistency */ |
675 | #ifndef ftrace_return_address0 | |
676 | # define ftrace_return_address0 __builtin_return_address(0) | |
677 | #endif | |
678 | ||
679 | /* Archs may use other ways for ADDR1 and beyond */ | |
680 | #ifndef ftrace_return_address | |
c79a61f5 | 681 | # ifdef CONFIG_FRAME_POINTER |
eed542d6 | 682 | # define ftrace_return_address(n) __builtin_return_address(n) |
c79a61f5 | 683 | # else |
eed542d6 | 684 | # define ftrace_return_address(n) 0UL |
c79a61f5 | 685 | # endif |
eed542d6 AT |
686 | #endif |
687 | ||
688 | #define CALLER_ADDR0 ((unsigned long)ftrace_return_address0) | |
689 | #define CALLER_ADDR1 ((unsigned long)ftrace_return_address(1)) | |
690 | #define CALLER_ADDR2 ((unsigned long)ftrace_return_address(2)) | |
691 | #define CALLER_ADDR3 ((unsigned long)ftrace_return_address(3)) | |
692 | #define CALLER_ADDR4 ((unsigned long)ftrace_return_address(4)) | |
693 | #define CALLER_ADDR5 ((unsigned long)ftrace_return_address(5)) | |
694 | #define CALLER_ADDR6 ((unsigned long)ftrace_return_address(6)) | |
352ad25a | 695 | |
81d68a96 | 696 | #ifdef CONFIG_IRQSOFF_TRACER |
489f1396 IM |
697 | extern void time_hardirqs_on(unsigned long a0, unsigned long a1); |
698 | extern void time_hardirqs_off(unsigned long a0, unsigned long a1); | |
81d68a96 | 699 | #else |
4dbf6bc2 SR |
700 | static inline void time_hardirqs_on(unsigned long a0, unsigned long a1) { } |
701 | static inline void time_hardirqs_off(unsigned long a0, unsigned long a1) { } | |
81d68a96 SR |
702 | #endif |
703 | ||
6cd8a4bb | 704 | #ifdef CONFIG_PREEMPT_TRACER |
489f1396 IM |
705 | extern void trace_preempt_on(unsigned long a0, unsigned long a1); |
706 | extern void trace_preempt_off(unsigned long a0, unsigned long a1); | |
6cd8a4bb | 707 | #else |
b02ee9a3 MB |
708 | /* |
709 | * Use defines instead of static inlines because some arches will make code out | |
710 | * of the CALLER_ADDR, when we really want these to be a real nop. | |
711 | */ | |
712 | # define trace_preempt_on(a0, a1) do { } while (0) | |
713 | # define trace_preempt_off(a0, a1) do { } while (0) | |
6cd8a4bb SR |
714 | #endif |
715 | ||
68bf21aa SR |
716 | #ifdef CONFIG_FTRACE_MCOUNT_RECORD |
717 | extern void ftrace_init(void); | |
718 | #else | |
719 | static inline void ftrace_init(void) { } | |
720 | #endif | |
721 | ||
287b6e68 FW |
722 | /* |
723 | * Structure that defines an entry function trace. | |
724 | */ | |
725 | struct ftrace_graph_ent { | |
726 | unsigned long func; /* Current function */ | |
727 | int depth; | |
728 | }; | |
dd0e545f | 729 | |
caf4b323 FW |
730 | /* |
731 | * Structure that defines a return function trace. | |
732 | */ | |
fb52607a | 733 | struct ftrace_graph_ret { |
caf4b323 FW |
734 | unsigned long func; /* Current function */ |
735 | unsigned long long calltime; | |
736 | unsigned long long rettime; | |
0231022c FW |
737 | /* Number of functions that overran the depth limit for current task */ |
738 | unsigned long overrun; | |
287b6e68 | 739 | int depth; |
caf4b323 FW |
740 | }; |
741 | ||
62b915f1 JO |
742 | /* Type of the callback handlers for tracing function graph*/ |
743 | typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */ | |
744 | typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */ | |
745 | ||
fb52607a | 746 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
8b96f011 | 747 | |
5ac9f622 | 748 | /* for init task */ |
f876d346 | 749 | #define INIT_FTRACE_GRAPH .ret_stack = NULL, |
5ac9f622 | 750 | |
712406a6 SR |
751 | /* |
752 | * Stack of return addresses for functions | |
753 | * of a thread. | |
754 | * Used in struct thread_info | |
755 | */ | |
756 | struct ftrace_ret_stack { | |
757 | unsigned long ret; | |
758 | unsigned long func; | |
759 | unsigned long long calltime; | |
a2a16d6a | 760 | unsigned long long subtime; |
71e308a2 | 761 | unsigned long fp; |
712406a6 SR |
762 | }; |
763 | ||
764 | /* | |
765 | * Primary handler of a function return. | |
766 | * It relays on ftrace_return_to_handler. | |
767 | * Defined in entry_32/64.S | |
768 | */ | |
769 | extern void return_to_handler(void); | |
770 | ||
771 | extern int | |
71e308a2 SR |
772 | ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth, |
773 | unsigned long frame_pointer); | |
712406a6 | 774 | |
8b96f011 FW |
775 | /* |
776 | * Sometimes we don't want to trace a function with the function | |
777 | * graph tracer but we want them to keep traced by the usual function | |
778 | * tracer if the function graph tracer is not configured. | |
779 | */ | |
780 | #define __notrace_funcgraph notrace | |
781 | ||
bcbc4f20 FW |
782 | /* |
783 | * We want to which function is an entrypoint of a hardirq. | |
784 | * That will help us to put a signal on output. | |
785 | */ | |
786 | #define __irq_entry __attribute__((__section__(".irqentry.text"))) | |
787 | ||
788 | /* Limits of hardirq entrypoints */ | |
789 | extern char __irqentry_text_start[]; | |
790 | extern char __irqentry_text_end[]; | |
791 | ||
29ad23b0 | 792 | #define FTRACE_NOTRACE_DEPTH 65536 |
f201ae23 FW |
793 | #define FTRACE_RETFUNC_DEPTH 50 |
794 | #define FTRACE_RETSTACK_ALLOC_SIZE 32 | |
287b6e68 FW |
795 | extern int register_ftrace_graph(trace_func_graph_ret_t retfunc, |
796 | trace_func_graph_ent_t entryfunc); | |
797 | ||
1b2f121c | 798 | extern bool ftrace_graph_is_dead(void); |
14a866c5 SR |
799 | extern void ftrace_graph_stop(void); |
800 | ||
287b6e68 FW |
801 | /* The current handlers in use */ |
802 | extern trace_func_graph_ret_t ftrace_graph_return; | |
803 | extern trace_func_graph_ent_t ftrace_graph_entry; | |
caf4b323 | 804 | |
fb52607a | 805 | extern void unregister_ftrace_graph(void); |
f201ae23 | 806 | |
fb52607a FW |
807 | extern void ftrace_graph_init_task(struct task_struct *t); |
808 | extern void ftrace_graph_exit_task(struct task_struct *t); | |
868baf07 | 809 | extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu); |
21a8c466 FW |
810 | |
811 | static inline int task_curr_ret_stack(struct task_struct *t) | |
812 | { | |
813 | return t->curr_ret_stack; | |
814 | } | |
380c4b14 FW |
815 | |
816 | static inline void pause_graph_tracing(void) | |
817 | { | |
818 | atomic_inc(¤t->tracing_graph_pause); | |
819 | } | |
820 | ||
821 | static inline void unpause_graph_tracing(void) | |
822 | { | |
823 | atomic_dec(¤t->tracing_graph_pause); | |
824 | } | |
5ac9f622 | 825 | #else /* !CONFIG_FUNCTION_GRAPH_TRACER */ |
8b96f011 FW |
826 | |
827 | #define __notrace_funcgraph | |
bcbc4f20 | 828 | #define __irq_entry |
5ac9f622 | 829 | #define INIT_FTRACE_GRAPH |
8b96f011 | 830 | |
fb52607a FW |
831 | static inline void ftrace_graph_init_task(struct task_struct *t) { } |
832 | static inline void ftrace_graph_exit_task(struct task_struct *t) { } | |
868baf07 | 833 | static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { } |
21a8c466 | 834 | |
62b915f1 JO |
835 | static inline int register_ftrace_graph(trace_func_graph_ret_t retfunc, |
836 | trace_func_graph_ent_t entryfunc) | |
837 | { | |
838 | return -1; | |
839 | } | |
840 | static inline void unregister_ftrace_graph(void) { } | |
841 | ||
21a8c466 FW |
842 | static inline int task_curr_ret_stack(struct task_struct *tsk) |
843 | { | |
844 | return -1; | |
845 | } | |
380c4b14 FW |
846 | |
847 | static inline void pause_graph_tracing(void) { } | |
848 | static inline void unpause_graph_tracing(void) { } | |
5ac9f622 | 849 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
caf4b323 | 850 | |
ea4e2bc4 | 851 | #ifdef CONFIG_TRACING |
ea4e2bc4 SR |
852 | |
853 | /* flags for current->trace */ | |
854 | enum { | |
855 | TSK_TRACE_FL_TRACE_BIT = 0, | |
856 | TSK_TRACE_FL_GRAPH_BIT = 1, | |
857 | }; | |
858 | enum { | |
859 | TSK_TRACE_FL_TRACE = 1 << TSK_TRACE_FL_TRACE_BIT, | |
860 | TSK_TRACE_FL_GRAPH = 1 << TSK_TRACE_FL_GRAPH_BIT, | |
861 | }; | |
862 | ||
863 | static inline void set_tsk_trace_trace(struct task_struct *tsk) | |
864 | { | |
865 | set_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace); | |
866 | } | |
867 | ||
868 | static inline void clear_tsk_trace_trace(struct task_struct *tsk) | |
869 | { | |
870 | clear_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace); | |
871 | } | |
872 | ||
873 | static inline int test_tsk_trace_trace(struct task_struct *tsk) | |
874 | { | |
875 | return tsk->trace & TSK_TRACE_FL_TRACE; | |
876 | } | |
877 | ||
878 | static inline void set_tsk_trace_graph(struct task_struct *tsk) | |
879 | { | |
880 | set_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace); | |
881 | } | |
882 | ||
883 | static inline void clear_tsk_trace_graph(struct task_struct *tsk) | |
884 | { | |
885 | clear_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace); | |
886 | } | |
887 | ||
888 | static inline int test_tsk_trace_graph(struct task_struct *tsk) | |
889 | { | |
890 | return tsk->trace & TSK_TRACE_FL_GRAPH; | |
891 | } | |
892 | ||
cecbca96 FW |
893 | enum ftrace_dump_mode; |
894 | ||
895 | extern enum ftrace_dump_mode ftrace_dump_on_oops; | |
0daa2302 | 896 | extern int tracepoint_printk; |
526211bc | 897 | |
de7edd31 SRRH |
898 | extern void disable_trace_on_warning(void); |
899 | extern int __disable_trace_on_warning; | |
900 | ||
261842b7 SR |
901 | #ifdef CONFIG_PREEMPT |
902 | #define INIT_TRACE_RECURSION .trace_recursion = 0, | |
903 | #endif | |
904 | ||
de7edd31 SRRH |
905 | #else /* CONFIG_TRACING */ |
906 | static inline void disable_trace_on_warning(void) { } | |
ea4e2bc4 SR |
907 | #endif /* CONFIG_TRACING */ |
908 | ||
261842b7 SR |
909 | #ifndef INIT_TRACE_RECURSION |
910 | #define INIT_TRACE_RECURSION | |
911 | #endif | |
b1818748 | 912 | |
e7b8e675 MF |
913 | #ifdef CONFIG_FTRACE_SYSCALLS |
914 | ||
915 | unsigned long arch_syscall_addr(int nr); | |
916 | ||
917 | #endif /* CONFIG_FTRACE_SYSCALLS */ | |
918 | ||
16444a8a | 919 | #endif /* _LINUX_FTRACE_H */ |