Commit | Line | Data |
---|---|---|
9849ed4d MF |
1 | /* |
2 | * Ftrace header. For implementation details beyond the random comments | |
3 | * scattered below, see: Documentation/trace/ftrace-design.txt | |
4 | */ | |
5 | ||
16444a8a ACM |
6 | #ifndef _LINUX_FTRACE_H |
7 | #define _LINUX_FTRACE_H | |
8 | ||
0012693a | 9 | #include <linux/trace_clock.h> |
5601020f | 10 | #include <linux/kallsyms.h> |
0012693a | 11 | #include <linux/linkage.h> |
ea4e2bc4 | 12 | #include <linux/bitops.h> |
a1e2e31d | 13 | #include <linux/ptrace.h> |
0012693a | 14 | #include <linux/ktime.h> |
21a8c466 | 15 | #include <linux/sched.h> |
0012693a FW |
16 | #include <linux/types.h> |
17 | #include <linux/init.h> | |
18 | #include <linux/fs.h> | |
16444a8a | 19 | |
c79a61f5 UKK |
20 | #include <asm/ftrace.h> |
21 | ||
2f5f6ad9 SR |
22 | /* |
23 | * If the arch supports passing the variable contents of | |
24 | * function_trace_op as the third parameter back from the | |
25 | * mcount call, then the arch should define this as 1. | |
26 | */ | |
27 | #ifndef ARCH_SUPPORTS_FTRACE_OPS | |
28 | #define ARCH_SUPPORTS_FTRACE_OPS 0 | |
29 | #endif | |
30 | ||
ccf3672d SR |
31 | /* |
32 | * If the arch's mcount caller does not support all of ftrace's | |
33 | * features, then it must call an indirect function that | |
34 | * does. Or at least does enough to prevent any unwelcomed side effects. | |
35 | */ | |
7544256a | 36 | #if !ARCH_SUPPORTS_FTRACE_OPS |
ccf3672d SR |
37 | # define FTRACE_FORCE_LIST_FUNC 1 |
38 | #else | |
39 | # define FTRACE_FORCE_LIST_FUNC 0 | |
40 | #endif | |
41 | ||
42 | ||
de477254 | 43 | struct module; |
04da85b8 SR |
44 | struct ftrace_hash; |
45 | ||
606576ce | 46 | #ifdef CONFIG_FUNCTION_TRACER |
3e1932ad | 47 | |
b0fc494f SR |
48 | extern int ftrace_enabled; |
49 | extern int | |
50 | ftrace_enable_sysctl(struct ctl_table *table, int write, | |
8d65af78 | 51 | void __user *buffer, size_t *lenp, |
b0fc494f SR |
52 | loff_t *ppos); |
53 | ||
2f5f6ad9 SR |
54 | struct ftrace_ops; |
55 | ||
56 | typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip, | |
a1e2e31d | 57 | struct ftrace_ops *op, struct pt_regs *regs); |
16444a8a | 58 | |
87354059 SRRH |
59 | ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops); |
60 | ||
e248491a JO |
61 | /* |
62 | * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are | |
63 | * set in the flags member. | |
64 | * | |
65 | * ENABLED - set/unset when ftrace_ops is registered/unregistered | |
e248491a JO |
66 | * DYNAMIC - set when ftrace_ops is registered to denote dynamically |
67 | * allocated ftrace_ops which need special care | |
68 | * CONTROL - set manualy by ftrace_ops user to denote the ftrace_ops | |
69 | * could be controled by following calls: | |
70 | * ftrace_function_local_enable | |
71 | * ftrace_function_local_disable | |
08f6fba5 SR |
72 | * SAVE_REGS - The ftrace_ops wants regs saved at each function called |
73 | * and passed to the callback. If this flag is set, but the | |
74 | * architecture does not support passing regs | |
06aeaaea | 75 | * (CONFIG_DYNAMIC_FTRACE_WITH_REGS is not defined), then the |
08f6fba5 SR |
76 | * ftrace_ops will fail to register, unless the next flag |
77 | * is set. | |
78 | * SAVE_REGS_IF_SUPPORTED - This is the same as SAVE_REGS, but if the | |
79 | * handler can handle an arch that does not save regs | |
80 | * (the handler tests if regs == NULL), then it can set | |
81 | * this flag instead. It will not fail registering the ftrace_ops | |
82 | * but, the regs field will be NULL if the arch does not support | |
83 | * passing regs to the handler. | |
84 | * Note, if this flag is set, the SAVE_REGS flag will automatically | |
85 | * get set upon registering the ftrace_ops, if the arch supports it. | |
4740974a SR |
86 | * RECURSION_SAFE - The ftrace_ops can set this to tell the ftrace infrastructure |
87 | * that the call back has its own recursion protection. If it does | |
88 | * not set this, then the ftrace infrastructure will add recursion | |
89 | * protection for the caller. | |
395b97a3 | 90 | * STUB - The ftrace_ops is just a place holder. |
f04f24fb MH |
91 | * INITIALIZED - The ftrace_ops has already been initialized (first use time |
92 | * register_ftrace_function() is called, it will initialized the ops) | |
591dffda | 93 | * DELETED - The ops are being deleted, do not let them be registered again. |
e1effa01 SRRH |
94 | * ADDING - The ops is in the process of being added. |
95 | * REMOVING - The ops is in the process of being removed. | |
96 | * MODIFYING - The ops is in the process of changing its filter functions. | |
e248491a | 97 | */ |
b848914c | 98 | enum { |
08f6fba5 | 99 | FTRACE_OPS_FL_ENABLED = 1 << 0, |
4104d326 SRRH |
100 | FTRACE_OPS_FL_DYNAMIC = 1 << 1, |
101 | FTRACE_OPS_FL_CONTROL = 1 << 2, | |
102 | FTRACE_OPS_FL_SAVE_REGS = 1 << 3, | |
103 | FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 4, | |
104 | FTRACE_OPS_FL_RECURSION_SAFE = 1 << 5, | |
105 | FTRACE_OPS_FL_STUB = 1 << 6, | |
106 | FTRACE_OPS_FL_INITIALIZED = 1 << 7, | |
107 | FTRACE_OPS_FL_DELETED = 1 << 8, | |
e1effa01 SRRH |
108 | FTRACE_OPS_FL_ADDING = 1 << 9, |
109 | FTRACE_OPS_FL_REMOVING = 1 << 10, | |
110 | FTRACE_OPS_FL_MODIFYING = 1 << 11, | |
b848914c SR |
111 | }; |
112 | ||
33b7f99c SRRH |
113 | #ifdef CONFIG_DYNAMIC_FTRACE |
114 | /* The hash used to know what functions callbacks trace */ | |
115 | struct ftrace_ops_hash { | |
116 | struct ftrace_hash *notrace_hash; | |
117 | struct ftrace_hash *filter_hash; | |
118 | struct mutex regex_lock; | |
119 | }; | |
120 | #endif | |
121 | ||
b7e00a6c SRRH |
122 | /* |
123 | * Note, ftrace_ops can be referenced outside of RCU protection. | |
124 | * (Although, for perf, the control ops prevent that). If ftrace_ops is | |
125 | * allocated and not part of kernel core data, the unregistering of it will | |
126 | * perform a scheduling on all CPUs to make sure that there are no more users. | |
127 | * Depending on the load of the system that may take a bit of time. | |
128 | * | |
129 | * Any private data added must also take care not to be freed and if private | |
130 | * data is added to a ftrace_ops that is in core code, the user of the | |
131 | * ftrace_ops must perform a schedule_on_each_cpu() before freeing it. | |
132 | */ | |
16444a8a | 133 | struct ftrace_ops { |
f45948e8 SR |
134 | ftrace_func_t func; |
135 | struct ftrace_ops *next; | |
b848914c | 136 | unsigned long flags; |
b7e00a6c | 137 | void *private; |
79922b80 | 138 | int __percpu *disabled; |
f45948e8 | 139 | #ifdef CONFIG_DYNAMIC_FTRACE |
0162d621 | 140 | int nr_trampolines; |
33b7f99c SRRH |
141 | struct ftrace_ops_hash local_hash; |
142 | struct ftrace_ops_hash *func_hash; | |
fef5aeee | 143 | struct ftrace_ops_hash old_hash; |
79922b80 | 144 | unsigned long trampoline; |
f45948e8 | 145 | #endif |
16444a8a ACM |
146 | }; |
147 | ||
e7d3737e FW |
148 | /* |
149 | * Type of the current tracing. | |
150 | */ | |
151 | enum ftrace_tracing_type_t { | |
152 | FTRACE_TYPE_ENTER = 0, /* Hook the call of the function */ | |
153 | FTRACE_TYPE_RETURN, /* Hook the return of the function */ | |
154 | }; | |
155 | ||
156 | /* Current tracing type, default is FTRACE_TYPE_ENTER */ | |
157 | extern enum ftrace_tracing_type_t ftrace_tracing_type; | |
158 | ||
16444a8a ACM |
159 | /* |
160 | * The ftrace_ops must be a static and should also | |
161 | * be read_mostly. These functions do modify read_mostly variables | |
162 | * so use them sparely. Never free an ftrace_op or modify the | |
163 | * next pointer after it has been registered. Even after unregistering | |
164 | * it, the next pointer may still be used internally. | |
165 | */ | |
166 | int register_ftrace_function(struct ftrace_ops *ops); | |
167 | int unregister_ftrace_function(struct ftrace_ops *ops); | |
168 | void clear_ftrace_function(void); | |
169 | ||
e248491a JO |
170 | /** |
171 | * ftrace_function_local_enable - enable controlled ftrace_ops on current cpu | |
172 | * | |
173 | * This function enables tracing on current cpu by decreasing | |
174 | * the per cpu control variable. | |
175 | * It must be called with preemption disabled and only on ftrace_ops | |
176 | * registered with FTRACE_OPS_FL_CONTROL. If called without preemption | |
177 | * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled. | |
178 | */ | |
179 | static inline void ftrace_function_local_enable(struct ftrace_ops *ops) | |
180 | { | |
181 | if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL))) | |
182 | return; | |
183 | ||
184 | (*this_cpu_ptr(ops->disabled))--; | |
185 | } | |
186 | ||
187 | /** | |
188 | * ftrace_function_local_disable - enable controlled ftrace_ops on current cpu | |
189 | * | |
190 | * This function enables tracing on current cpu by decreasing | |
191 | * the per cpu control variable. | |
192 | * It must be called with preemption disabled and only on ftrace_ops | |
193 | * registered with FTRACE_OPS_FL_CONTROL. If called without preemption | |
194 | * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled. | |
195 | */ | |
196 | static inline void ftrace_function_local_disable(struct ftrace_ops *ops) | |
197 | { | |
198 | if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL))) | |
199 | return; | |
200 | ||
201 | (*this_cpu_ptr(ops->disabled))++; | |
202 | } | |
203 | ||
204 | /** | |
205 | * ftrace_function_local_disabled - returns ftrace_ops disabled value | |
206 | * on current cpu | |
207 | * | |
208 | * This function returns value of ftrace_ops::disabled on current cpu. | |
209 | * It must be called with preemption disabled and only on ftrace_ops | |
210 | * registered with FTRACE_OPS_FL_CONTROL. If called without preemption | |
211 | * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled. | |
212 | */ | |
213 | static inline int ftrace_function_local_disabled(struct ftrace_ops *ops) | |
214 | { | |
215 | WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL)); | |
216 | return *this_cpu_ptr(ops->disabled); | |
217 | } | |
218 | ||
a1e2e31d SR |
219 | extern void ftrace_stub(unsigned long a0, unsigned long a1, |
220 | struct ftrace_ops *op, struct pt_regs *regs); | |
16444a8a | 221 | |
606576ce | 222 | #else /* !CONFIG_FUNCTION_TRACER */ |
4dbf6bc2 SR |
223 | /* |
224 | * (un)register_ftrace_function must be a macro since the ops parameter | |
225 | * must not be evaluated. | |
226 | */ | |
227 | #define register_ftrace_function(ops) ({ 0; }) | |
228 | #define unregister_ftrace_function(ops) ({ 0; }) | |
ea701f11 SR |
229 | static inline int ftrace_nr_registered_ops(void) |
230 | { | |
231 | return 0; | |
232 | } | |
4dbf6bc2 | 233 | static inline void clear_ftrace_function(void) { } |
81adbdc0 | 234 | static inline void ftrace_kill(void) { } |
606576ce | 235 | #endif /* CONFIG_FUNCTION_TRACER */ |
352ad25a | 236 | |
f38f1d2a SR |
237 | #ifdef CONFIG_STACK_TRACER |
238 | extern int stack_tracer_enabled; | |
239 | int | |
240 | stack_trace_sysctl(struct ctl_table *table, int write, | |
8d65af78 | 241 | void __user *buffer, size_t *lenp, |
f38f1d2a SR |
242 | loff_t *ppos); |
243 | #endif | |
244 | ||
f6180773 SR |
245 | struct ftrace_func_command { |
246 | struct list_head list; | |
247 | char *name; | |
43dd61c9 SR |
248 | int (*func)(struct ftrace_hash *hash, |
249 | char *func, char *cmd, | |
f6180773 SR |
250 | char *params, int enable); |
251 | }; | |
252 | ||
3d083395 | 253 | #ifdef CONFIG_DYNAMIC_FTRACE |
31e88909 | 254 | |
000ab691 SR |
255 | int ftrace_arch_code_modify_prepare(void); |
256 | int ftrace_arch_code_modify_post_process(void); | |
257 | ||
c88fd863 SR |
258 | void ftrace_bug(int err, unsigned long ip); |
259 | ||
809dcf29 SR |
260 | struct seq_file; |
261 | ||
b6887d79 | 262 | struct ftrace_probe_ops { |
59df055f SR |
263 | void (*func)(unsigned long ip, |
264 | unsigned long parent_ip, | |
265 | void **data); | |
e67efb93 SRRH |
266 | int (*init)(struct ftrace_probe_ops *ops, |
267 | unsigned long ip, void **data); | |
268 | void (*free)(struct ftrace_probe_ops *ops, | |
269 | unsigned long ip, void **data); | |
809dcf29 SR |
270 | int (*print)(struct seq_file *m, |
271 | unsigned long ip, | |
b6887d79 | 272 | struct ftrace_probe_ops *ops, |
809dcf29 | 273 | void *data); |
59df055f SR |
274 | }; |
275 | ||
276 | extern int | |
b6887d79 | 277 | register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, |
59df055f SR |
278 | void *data); |
279 | extern void | |
b6887d79 | 280 | unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, |
59df055f SR |
281 | void *data); |
282 | extern void | |
b6887d79 SR |
283 | unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops); |
284 | extern void unregister_ftrace_function_probe_all(char *glob); | |
59df055f | 285 | |
d88471cb | 286 | extern int ftrace_text_reserved(const void *start, const void *end); |
2cfa1978 | 287 | |
ea701f11 SR |
288 | extern int ftrace_nr_registered_ops(void); |
289 | ||
08f6fba5 SR |
290 | /* |
291 | * The dyn_ftrace record's flags field is split into two parts. | |
292 | * the first part which is '0-FTRACE_REF_MAX' is a counter of | |
293 | * the number of callbacks that have registered the function that | |
294 | * the dyn_ftrace descriptor represents. | |
295 | * | |
296 | * The second part is a mask: | |
297 | * ENABLED - the function is being traced | |
298 | * REGS - the record wants the function to save regs | |
299 | * REGS_EN - the function is set up to save regs. | |
300 | * | |
301 | * When a new ftrace_ops is registered and wants a function to save | |
302 | * pt_regs, the rec->flag REGS is set. When the function has been | |
303 | * set up to save regs, the REG_EN flag is set. Once a function | |
304 | * starts saving regs it will do so until all ftrace_ops are removed | |
305 | * from tracing that function. | |
306 | */ | |
3c1720f0 | 307 | enum { |
79922b80 | 308 | FTRACE_FL_ENABLED = (1UL << 31), |
08f6fba5 | 309 | FTRACE_FL_REGS = (1UL << 30), |
79922b80 SRRH |
310 | FTRACE_FL_REGS_EN = (1UL << 29), |
311 | FTRACE_FL_TRAMP = (1UL << 28), | |
312 | FTRACE_FL_TRAMP_EN = (1UL << 27), | |
3c1720f0 SR |
313 | }; |
314 | ||
79922b80 SRRH |
315 | #define FTRACE_REF_MAX_SHIFT 27 |
316 | #define FTRACE_FL_BITS 5 | |
cf2cb0b2 SRRH |
317 | #define FTRACE_FL_MASKED_BITS ((1UL << FTRACE_FL_BITS) - 1) |
318 | #define FTRACE_FL_MASK (FTRACE_FL_MASKED_BITS << FTRACE_REF_MAX_SHIFT) | |
319 | #define FTRACE_REF_MAX ((1UL << FTRACE_REF_MAX_SHIFT) - 1) | |
ed926f9b | 320 | |
0376bde1 SRRH |
321 | #define ftrace_rec_count(rec) ((rec)->flags & ~FTRACE_FL_MASK) |
322 | ||
3d083395 | 323 | struct dyn_ftrace { |
a762782d | 324 | unsigned long ip; /* address of mcount call-site */ |
85ae32ae | 325 | unsigned long flags; |
a762782d | 326 | struct dyn_arch_ftrace arch; |
3d083395 SR |
327 | }; |
328 | ||
e1c08bdd | 329 | int ftrace_force_update(void); |
647664ea MH |
330 | int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip, |
331 | int remove, int reset); | |
ac483c44 | 332 | int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, |
936e074b | 333 | int len, int reset); |
ac483c44 | 334 | int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, |
936e074b SR |
335 | int len, int reset); |
336 | void ftrace_set_global_filter(unsigned char *buf, int len, int reset); | |
337 | void ftrace_set_global_notrace(unsigned char *buf, int len, int reset); | |
5500fa51 | 338 | void ftrace_free_filter(struct ftrace_ops *ops); |
e1c08bdd | 339 | |
f6180773 SR |
340 | int register_ftrace_command(struct ftrace_func_command *cmd); |
341 | int unregister_ftrace_command(struct ftrace_func_command *cmd); | |
342 | ||
c88fd863 SR |
343 | enum { |
344 | FTRACE_UPDATE_CALLS = (1 << 0), | |
345 | FTRACE_DISABLE_CALLS = (1 << 1), | |
346 | FTRACE_UPDATE_TRACE_FUNC = (1 << 2), | |
347 | FTRACE_START_FUNC_RET = (1 << 3), | |
348 | FTRACE_STOP_FUNC_RET = (1 << 4), | |
349 | }; | |
350 | ||
08f6fba5 SR |
351 | /* |
352 | * The FTRACE_UPDATE_* enum is used to pass information back | |
353 | * from the ftrace_update_record() and ftrace_test_record() | |
354 | * functions. These are called by the code update routines | |
355 | * to find out what is to be done for a given function. | |
356 | * | |
357 | * IGNORE - The function is already what we want it to be | |
358 | * MAKE_CALL - Start tracing the function | |
359 | * MODIFY_CALL - Stop saving regs for the function | |
08f6fba5 SR |
360 | * MAKE_NOP - Stop tracing the function |
361 | */ | |
c88fd863 SR |
362 | enum { |
363 | FTRACE_UPDATE_IGNORE, | |
364 | FTRACE_UPDATE_MAKE_CALL, | |
08f6fba5 | 365 | FTRACE_UPDATE_MODIFY_CALL, |
c88fd863 SR |
366 | FTRACE_UPDATE_MAKE_NOP, |
367 | }; | |
368 | ||
fc13cb0c SR |
369 | enum { |
370 | FTRACE_ITER_FILTER = (1 << 0), | |
371 | FTRACE_ITER_NOTRACE = (1 << 1), | |
372 | FTRACE_ITER_PRINTALL = (1 << 2), | |
69a3083c SR |
373 | FTRACE_ITER_DO_HASH = (1 << 3), |
374 | FTRACE_ITER_HASH = (1 << 4), | |
375 | FTRACE_ITER_ENABLED = (1 << 5), | |
fc13cb0c SR |
376 | }; |
377 | ||
c88fd863 SR |
378 | void arch_ftrace_update_code(int command); |
379 | ||
380 | struct ftrace_rec_iter; | |
381 | ||
382 | struct ftrace_rec_iter *ftrace_rec_iter_start(void); | |
383 | struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter); | |
384 | struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter); | |
385 | ||
08d636b6 SR |
386 | #define for_ftrace_rec_iter(iter) \ |
387 | for (iter = ftrace_rec_iter_start(); \ | |
388 | iter; \ | |
389 | iter = ftrace_rec_iter_next(iter)) | |
390 | ||
391 | ||
c88fd863 SR |
392 | int ftrace_update_record(struct dyn_ftrace *rec, int enable); |
393 | int ftrace_test_record(struct dyn_ftrace *rec, int enable); | |
394 | void ftrace_run_stop_machine(int command); | |
f0cf973a | 395 | unsigned long ftrace_location(unsigned long ip); |
7413af1f SRRH |
396 | unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec); |
397 | unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec); | |
c88fd863 SR |
398 | |
399 | extern ftrace_func_t ftrace_trace_function; | |
400 | ||
fc13cb0c SR |
401 | int ftrace_regex_open(struct ftrace_ops *ops, int flag, |
402 | struct inode *inode, struct file *file); | |
403 | ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf, | |
404 | size_t cnt, loff_t *ppos); | |
405 | ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf, | |
406 | size_t cnt, loff_t *ppos); | |
fc13cb0c SR |
407 | int ftrace_regex_release(struct inode *inode, struct file *file); |
408 | ||
2a85a37f SR |
409 | void __init |
410 | ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable); | |
411 | ||
3d083395 | 412 | /* defined in arch */ |
3c1720f0 | 413 | extern int ftrace_ip_converted(unsigned long ip); |
3a36cb11 | 414 | extern int ftrace_dyn_arch_init(void); |
e4f5d544 | 415 | extern void ftrace_replace_code(int enable); |
d61f82d0 SR |
416 | extern int ftrace_update_ftrace_func(ftrace_func_t func); |
417 | extern void ftrace_caller(void); | |
08f6fba5 | 418 | extern void ftrace_regs_caller(void); |
d61f82d0 | 419 | extern void ftrace_call(void); |
08f6fba5 | 420 | extern void ftrace_regs_call(void); |
d61f82d0 | 421 | extern void mcount_call(void); |
f0001207 | 422 | |
8ed3e2cf SR |
423 | void ftrace_modify_all_code(int command); |
424 | ||
f0001207 SL |
425 | #ifndef FTRACE_ADDR |
426 | #define FTRACE_ADDR ((unsigned long)ftrace_caller) | |
427 | #endif | |
08f6fba5 | 428 | |
79922b80 SRRH |
429 | #ifndef FTRACE_GRAPH_ADDR |
430 | #define FTRACE_GRAPH_ADDR ((unsigned long)ftrace_graph_caller) | |
431 | #endif | |
432 | ||
08f6fba5 | 433 | #ifndef FTRACE_REGS_ADDR |
06aeaaea | 434 | #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS |
08f6fba5 SR |
435 | # define FTRACE_REGS_ADDR ((unsigned long)ftrace_regs_caller) |
436 | #else | |
437 | # define FTRACE_REGS_ADDR FTRACE_ADDR | |
438 | #endif | |
439 | #endif | |
440 | ||
646d7043 SRRH |
441 | /* |
442 | * If an arch would like functions that are only traced | |
443 | * by the function graph tracer to jump directly to its own | |
444 | * trampoline, then they can define FTRACE_GRAPH_TRAMP_ADDR | |
445 | * to be that address to jump to. | |
446 | */ | |
447 | #ifndef FTRACE_GRAPH_TRAMP_ADDR | |
448 | #define FTRACE_GRAPH_TRAMP_ADDR ((unsigned long) 0) | |
449 | #endif | |
450 | ||
fb52607a FW |
451 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
452 | extern void ftrace_graph_caller(void); | |
5a45cfe1 SR |
453 | extern int ftrace_enable_ftrace_graph_caller(void); |
454 | extern int ftrace_disable_ftrace_graph_caller(void); | |
455 | #else | |
456 | static inline int ftrace_enable_ftrace_graph_caller(void) { return 0; } | |
457 | static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; } | |
e7d3737e | 458 | #endif |
ad90c0e3 | 459 | |
31e88909 | 460 | /** |
57794a9d | 461 | * ftrace_make_nop - convert code into nop |
31e88909 SR |
462 | * @mod: module structure if called by module load initialization |
463 | * @rec: the mcount call site record | |
464 | * @addr: the address that the call site should be calling | |
465 | * | |
466 | * This is a very sensitive operation and great care needs | |
467 | * to be taken by the arch. The operation should carefully | |
468 | * read the location, check to see if what is read is indeed | |
469 | * what we expect it to be, and then on success of the compare, | |
470 | * it should write to the location. | |
471 | * | |
472 | * The code segment at @rec->ip should be a caller to @addr | |
473 | * | |
474 | * Return must be: | |
475 | * 0 on success | |
476 | * -EFAULT on error reading the location | |
477 | * -EINVAL on a failed compare of the contents | |
478 | * -EPERM on error writing to the location | |
479 | * Any other value will be considered a failure. | |
480 | */ | |
481 | extern int ftrace_make_nop(struct module *mod, | |
482 | struct dyn_ftrace *rec, unsigned long addr); | |
a26a2a27 | 483 | |
593eb8a2 | 484 | /** |
31e88909 SR |
485 | * ftrace_make_call - convert a nop call site into a call to addr |
486 | * @rec: the mcount call site record | |
487 | * @addr: the address that the call site should call | |
593eb8a2 SR |
488 | * |
489 | * This is a very sensitive operation and great care needs | |
490 | * to be taken by the arch. The operation should carefully | |
491 | * read the location, check to see if what is read is indeed | |
492 | * what we expect it to be, and then on success of the compare, | |
493 | * it should write to the location. | |
494 | * | |
31e88909 SR |
495 | * The code segment at @rec->ip should be a nop |
496 | * | |
593eb8a2 SR |
497 | * Return must be: |
498 | * 0 on success | |
499 | * -EFAULT on error reading the location | |
500 | * -EINVAL on a failed compare of the contents | |
501 | * -EPERM on error writing to the location | |
502 | * Any other value will be considered a failure. | |
503 | */ | |
31e88909 SR |
504 | extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr); |
505 | ||
06aeaaea | 506 | #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS |
08f6fba5 SR |
507 | /** |
508 | * ftrace_modify_call - convert from one addr to another (no nop) | |
509 | * @rec: the mcount call site record | |
510 | * @old_addr: the address expected to be currently called to | |
511 | * @addr: the address to change to | |
512 | * | |
513 | * This is a very sensitive operation and great care needs | |
514 | * to be taken by the arch. The operation should carefully | |
515 | * read the location, check to see if what is read is indeed | |
516 | * what we expect it to be, and then on success of the compare, | |
517 | * it should write to the location. | |
518 | * | |
519 | * The code segment at @rec->ip should be a caller to @old_addr | |
520 | * | |
521 | * Return must be: | |
522 | * 0 on success | |
523 | * -EFAULT on error reading the location | |
524 | * -EINVAL on a failed compare of the contents | |
525 | * -EPERM on error writing to the location | |
526 | * Any other value will be considered a failure. | |
527 | */ | |
528 | extern int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, | |
529 | unsigned long addr); | |
530 | #else | |
531 | /* Should never be called */ | |
532 | static inline int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, | |
533 | unsigned long addr) | |
534 | { | |
535 | return -EINVAL; | |
536 | } | |
537 | #endif | |
538 | ||
31e88909 SR |
539 | /* May be defined in arch */ |
540 | extern int ftrace_arch_read_dyn_info(char *buf, int size); | |
593eb8a2 | 541 | |
ecea656d | 542 | extern int skip_trace(unsigned long ip); |
a949ae56 | 543 | extern void ftrace_module_init(struct module *mod); |
ecea656d | 544 | |
c0719e5a SR |
545 | extern void ftrace_disable_daemon(void); |
546 | extern void ftrace_enable_daemon(void); | |
4dc93676 | 547 | #else /* CONFIG_DYNAMIC_FTRACE */ |
4dbf6bc2 SR |
548 | static inline int skip_trace(unsigned long ip) { return 0; } |
549 | static inline int ftrace_force_update(void) { return 0; } | |
4dbf6bc2 SR |
550 | static inline void ftrace_disable_daemon(void) { } |
551 | static inline void ftrace_enable_daemon(void) { } | |
e7247a15 | 552 | static inline void ftrace_release_mod(struct module *mod) {} |
a949ae56 | 553 | static inline void ftrace_module_init(struct module *mod) {} |
38de93ab | 554 | static inline __init int register_ftrace_command(struct ftrace_func_command *cmd) |
f6180773 | 555 | { |
97d0bb8d | 556 | return -EINVAL; |
f6180773 | 557 | } |
38de93ab | 558 | static inline __init int unregister_ftrace_command(char *cmd_name) |
f6180773 | 559 | { |
97d0bb8d | 560 | return -EINVAL; |
f6180773 | 561 | } |
d88471cb | 562 | static inline int ftrace_text_reserved(const void *start, const void *end) |
2cfa1978 MH |
563 | { |
564 | return 0; | |
565 | } | |
4dc93676 SR |
566 | static inline unsigned long ftrace_location(unsigned long ip) |
567 | { | |
568 | return 0; | |
569 | } | |
fc13cb0c SR |
570 | |
571 | /* | |
572 | * Again users of functions that have ftrace_ops may not | |
573 | * have them defined when ftrace is not enabled, but these | |
574 | * functions may still be called. Use a macro instead of inline. | |
575 | */ | |
576 | #define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; }) | |
96de37b6 | 577 | #define ftrace_set_early_filter(ops, buf, enable) do { } while (0) |
647664ea | 578 | #define ftrace_set_filter_ip(ops, ip, remove, reset) ({ -ENODEV; }) |
5500fa51 JO |
579 | #define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; }) |
580 | #define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; }) | |
581 | #define ftrace_free_filter(ops) do { } while (0) | |
fc13cb0c SR |
582 | |
583 | static inline ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf, | |
584 | size_t cnt, loff_t *ppos) { return -ENODEV; } | |
585 | static inline ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf, | |
586 | size_t cnt, loff_t *ppos) { return -ENODEV; } | |
fc13cb0c SR |
587 | static inline int |
588 | ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; } | |
ecea656d | 589 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
352ad25a | 590 | |
aeaee8a2 IM |
591 | /* totally disable ftrace - can not re-enable after this */ |
592 | void ftrace_kill(void); | |
593 | ||
f43fdad8 IM |
594 | static inline void tracer_disable(void) |
595 | { | |
606576ce | 596 | #ifdef CONFIG_FUNCTION_TRACER |
f43fdad8 IM |
597 | ftrace_enabled = 0; |
598 | #endif | |
599 | } | |
600 | ||
37002735 HY |
601 | /* |
602 | * Ftrace disable/restore without lock. Some synchronization mechanism | |
9bdeb7b5 | 603 | * must be used to prevent ftrace_enabled to be changed between |
37002735 HY |
604 | * disable/restore. |
605 | */ | |
9bdeb7b5 HY |
606 | static inline int __ftrace_enabled_save(void) |
607 | { | |
606576ce | 608 | #ifdef CONFIG_FUNCTION_TRACER |
9bdeb7b5 HY |
609 | int saved_ftrace_enabled = ftrace_enabled; |
610 | ftrace_enabled = 0; | |
611 | return saved_ftrace_enabled; | |
612 | #else | |
613 | return 0; | |
614 | #endif | |
615 | } | |
616 | ||
617 | static inline void __ftrace_enabled_restore(int enabled) | |
618 | { | |
606576ce | 619 | #ifdef CONFIG_FUNCTION_TRACER |
9bdeb7b5 HY |
620 | ftrace_enabled = enabled; |
621 | #endif | |
622 | } | |
623 | ||
eed542d6 AT |
624 | /* All archs should have this, but we define it for consistency */ |
625 | #ifndef ftrace_return_address0 | |
626 | # define ftrace_return_address0 __builtin_return_address(0) | |
627 | #endif | |
628 | ||
629 | /* Archs may use other ways for ADDR1 and beyond */ | |
630 | #ifndef ftrace_return_address | |
c79a61f5 | 631 | # ifdef CONFIG_FRAME_POINTER |
eed542d6 | 632 | # define ftrace_return_address(n) __builtin_return_address(n) |
c79a61f5 | 633 | # else |
eed542d6 | 634 | # define ftrace_return_address(n) 0UL |
c79a61f5 | 635 | # endif |
eed542d6 AT |
636 | #endif |
637 | ||
638 | #define CALLER_ADDR0 ((unsigned long)ftrace_return_address0) | |
639 | #define CALLER_ADDR1 ((unsigned long)ftrace_return_address(1)) | |
640 | #define CALLER_ADDR2 ((unsigned long)ftrace_return_address(2)) | |
641 | #define CALLER_ADDR3 ((unsigned long)ftrace_return_address(3)) | |
642 | #define CALLER_ADDR4 ((unsigned long)ftrace_return_address(4)) | |
643 | #define CALLER_ADDR5 ((unsigned long)ftrace_return_address(5)) | |
644 | #define CALLER_ADDR6 ((unsigned long)ftrace_return_address(6)) | |
352ad25a | 645 | |
81d68a96 | 646 | #ifdef CONFIG_IRQSOFF_TRACER |
489f1396 IM |
647 | extern void time_hardirqs_on(unsigned long a0, unsigned long a1); |
648 | extern void time_hardirqs_off(unsigned long a0, unsigned long a1); | |
81d68a96 | 649 | #else |
4dbf6bc2 SR |
650 | static inline void time_hardirqs_on(unsigned long a0, unsigned long a1) { } |
651 | static inline void time_hardirqs_off(unsigned long a0, unsigned long a1) { } | |
81d68a96 SR |
652 | #endif |
653 | ||
6cd8a4bb | 654 | #ifdef CONFIG_PREEMPT_TRACER |
489f1396 IM |
655 | extern void trace_preempt_on(unsigned long a0, unsigned long a1); |
656 | extern void trace_preempt_off(unsigned long a0, unsigned long a1); | |
6cd8a4bb | 657 | #else |
b02ee9a3 MB |
658 | /* |
659 | * Use defines instead of static inlines because some arches will make code out | |
660 | * of the CALLER_ADDR, when we really want these to be a real nop. | |
661 | */ | |
662 | # define trace_preempt_on(a0, a1) do { } while (0) | |
663 | # define trace_preempt_off(a0, a1) do { } while (0) | |
6cd8a4bb SR |
664 | #endif |
665 | ||
68bf21aa SR |
666 | #ifdef CONFIG_FTRACE_MCOUNT_RECORD |
667 | extern void ftrace_init(void); | |
668 | #else | |
669 | static inline void ftrace_init(void) { } | |
670 | #endif | |
671 | ||
287b6e68 FW |
672 | /* |
673 | * Structure that defines an entry function trace. | |
674 | */ | |
675 | struct ftrace_graph_ent { | |
676 | unsigned long func; /* Current function */ | |
677 | int depth; | |
678 | }; | |
dd0e545f | 679 | |
caf4b323 FW |
680 | /* |
681 | * Structure that defines a return function trace. | |
682 | */ | |
fb52607a | 683 | struct ftrace_graph_ret { |
caf4b323 FW |
684 | unsigned long func; /* Current function */ |
685 | unsigned long long calltime; | |
686 | unsigned long long rettime; | |
0231022c FW |
687 | /* Number of functions that overran the depth limit for current task */ |
688 | unsigned long overrun; | |
287b6e68 | 689 | int depth; |
caf4b323 FW |
690 | }; |
691 | ||
62b915f1 JO |
692 | /* Type of the callback handlers for tracing function graph*/ |
693 | typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */ | |
694 | typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */ | |
695 | ||
fb52607a | 696 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
8b96f011 | 697 | |
5ac9f622 | 698 | /* for init task */ |
f876d346 | 699 | #define INIT_FTRACE_GRAPH .ret_stack = NULL, |
5ac9f622 | 700 | |
712406a6 SR |
701 | /* |
702 | * Stack of return addresses for functions | |
703 | * of a thread. | |
704 | * Used in struct thread_info | |
705 | */ | |
706 | struct ftrace_ret_stack { | |
707 | unsigned long ret; | |
708 | unsigned long func; | |
709 | unsigned long long calltime; | |
a2a16d6a | 710 | unsigned long long subtime; |
71e308a2 | 711 | unsigned long fp; |
712406a6 SR |
712 | }; |
713 | ||
714 | /* | |
715 | * Primary handler of a function return. | |
716 | * It relays on ftrace_return_to_handler. | |
717 | * Defined in entry_32/64.S | |
718 | */ | |
719 | extern void return_to_handler(void); | |
720 | ||
721 | extern int | |
71e308a2 SR |
722 | ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth, |
723 | unsigned long frame_pointer); | |
712406a6 | 724 | |
8b96f011 FW |
725 | /* |
726 | * Sometimes we don't want to trace a function with the function | |
727 | * graph tracer but we want them to keep traced by the usual function | |
728 | * tracer if the function graph tracer is not configured. | |
729 | */ | |
730 | #define __notrace_funcgraph notrace | |
731 | ||
bcbc4f20 FW |
732 | /* |
733 | * We want to which function is an entrypoint of a hardirq. | |
734 | * That will help us to put a signal on output. | |
735 | */ | |
736 | #define __irq_entry __attribute__((__section__(".irqentry.text"))) | |
737 | ||
738 | /* Limits of hardirq entrypoints */ | |
739 | extern char __irqentry_text_start[]; | |
740 | extern char __irqentry_text_end[]; | |
741 | ||
29ad23b0 | 742 | #define FTRACE_NOTRACE_DEPTH 65536 |
f201ae23 FW |
743 | #define FTRACE_RETFUNC_DEPTH 50 |
744 | #define FTRACE_RETSTACK_ALLOC_SIZE 32 | |
287b6e68 FW |
745 | extern int register_ftrace_graph(trace_func_graph_ret_t retfunc, |
746 | trace_func_graph_ent_t entryfunc); | |
747 | ||
1b2f121c | 748 | extern bool ftrace_graph_is_dead(void); |
14a866c5 SR |
749 | extern void ftrace_graph_stop(void); |
750 | ||
287b6e68 FW |
751 | /* The current handlers in use */ |
752 | extern trace_func_graph_ret_t ftrace_graph_return; | |
753 | extern trace_func_graph_ent_t ftrace_graph_entry; | |
caf4b323 | 754 | |
fb52607a | 755 | extern void unregister_ftrace_graph(void); |
f201ae23 | 756 | |
fb52607a FW |
757 | extern void ftrace_graph_init_task(struct task_struct *t); |
758 | extern void ftrace_graph_exit_task(struct task_struct *t); | |
868baf07 | 759 | extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu); |
21a8c466 FW |
760 | |
761 | static inline int task_curr_ret_stack(struct task_struct *t) | |
762 | { | |
763 | return t->curr_ret_stack; | |
764 | } | |
380c4b14 FW |
765 | |
766 | static inline void pause_graph_tracing(void) | |
767 | { | |
768 | atomic_inc(¤t->tracing_graph_pause); | |
769 | } | |
770 | ||
771 | static inline void unpause_graph_tracing(void) | |
772 | { | |
773 | atomic_dec(¤t->tracing_graph_pause); | |
774 | } | |
5ac9f622 | 775 | #else /* !CONFIG_FUNCTION_GRAPH_TRACER */ |
8b96f011 FW |
776 | |
777 | #define __notrace_funcgraph | |
bcbc4f20 | 778 | #define __irq_entry |
5ac9f622 | 779 | #define INIT_FTRACE_GRAPH |
8b96f011 | 780 | |
fb52607a FW |
781 | static inline void ftrace_graph_init_task(struct task_struct *t) { } |
782 | static inline void ftrace_graph_exit_task(struct task_struct *t) { } | |
868baf07 | 783 | static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { } |
21a8c466 | 784 | |
62b915f1 JO |
785 | static inline int register_ftrace_graph(trace_func_graph_ret_t retfunc, |
786 | trace_func_graph_ent_t entryfunc) | |
787 | { | |
788 | return -1; | |
789 | } | |
790 | static inline void unregister_ftrace_graph(void) { } | |
791 | ||
21a8c466 FW |
792 | static inline int task_curr_ret_stack(struct task_struct *tsk) |
793 | { | |
794 | return -1; | |
795 | } | |
380c4b14 FW |
796 | |
797 | static inline void pause_graph_tracing(void) { } | |
798 | static inline void unpause_graph_tracing(void) { } | |
5ac9f622 | 799 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
caf4b323 | 800 | |
ea4e2bc4 | 801 | #ifdef CONFIG_TRACING |
ea4e2bc4 SR |
802 | |
803 | /* flags for current->trace */ | |
804 | enum { | |
805 | TSK_TRACE_FL_TRACE_BIT = 0, | |
806 | TSK_TRACE_FL_GRAPH_BIT = 1, | |
807 | }; | |
808 | enum { | |
809 | TSK_TRACE_FL_TRACE = 1 << TSK_TRACE_FL_TRACE_BIT, | |
810 | TSK_TRACE_FL_GRAPH = 1 << TSK_TRACE_FL_GRAPH_BIT, | |
811 | }; | |
812 | ||
813 | static inline void set_tsk_trace_trace(struct task_struct *tsk) | |
814 | { | |
815 | set_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace); | |
816 | } | |
817 | ||
818 | static inline void clear_tsk_trace_trace(struct task_struct *tsk) | |
819 | { | |
820 | clear_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace); | |
821 | } | |
822 | ||
823 | static inline int test_tsk_trace_trace(struct task_struct *tsk) | |
824 | { | |
825 | return tsk->trace & TSK_TRACE_FL_TRACE; | |
826 | } | |
827 | ||
828 | static inline void set_tsk_trace_graph(struct task_struct *tsk) | |
829 | { | |
830 | set_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace); | |
831 | } | |
832 | ||
833 | static inline void clear_tsk_trace_graph(struct task_struct *tsk) | |
834 | { | |
835 | clear_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace); | |
836 | } | |
837 | ||
838 | static inline int test_tsk_trace_graph(struct task_struct *tsk) | |
839 | { | |
840 | return tsk->trace & TSK_TRACE_FL_GRAPH; | |
841 | } | |
842 | ||
cecbca96 FW |
843 | enum ftrace_dump_mode; |
844 | ||
845 | extern enum ftrace_dump_mode ftrace_dump_on_oops; | |
526211bc | 846 | |
de7edd31 SRRH |
847 | extern void disable_trace_on_warning(void); |
848 | extern int __disable_trace_on_warning; | |
849 | ||
261842b7 SR |
850 | #ifdef CONFIG_PREEMPT |
851 | #define INIT_TRACE_RECURSION .trace_recursion = 0, | |
852 | #endif | |
853 | ||
de7edd31 SRRH |
854 | #else /* CONFIG_TRACING */ |
855 | static inline void disable_trace_on_warning(void) { } | |
ea4e2bc4 SR |
856 | #endif /* CONFIG_TRACING */ |
857 | ||
261842b7 SR |
858 | #ifndef INIT_TRACE_RECURSION |
859 | #define INIT_TRACE_RECURSION | |
860 | #endif | |
b1818748 | 861 | |
e7b8e675 MF |
862 | #ifdef CONFIG_FTRACE_SYSCALLS |
863 | ||
864 | unsigned long arch_syscall_addr(int nr); | |
865 | ||
866 | #endif /* CONFIG_FTRACE_SYSCALLS */ | |
867 | ||
16444a8a | 868 | #endif /* _LINUX_FTRACE_H */ |