ipvlan: scrub skb before routing in L3 mode.
[deliverable/linux.git] / include / linux / ftrace.h
CommitLineData
9849ed4d
MF
1/*
2 * Ftrace header. For implementation details beyond the random comments
3 * scattered below, see: Documentation/trace/ftrace-design.txt
4 */
5
16444a8a
ACM
6#ifndef _LINUX_FTRACE_H
7#define _LINUX_FTRACE_H
8
0012693a 9#include <linux/trace_clock.h>
5601020f 10#include <linux/kallsyms.h>
0012693a 11#include <linux/linkage.h>
ea4e2bc4 12#include <linux/bitops.h>
a1e2e31d 13#include <linux/ptrace.h>
0012693a 14#include <linux/ktime.h>
21a8c466 15#include <linux/sched.h>
0012693a
FW
16#include <linux/types.h>
17#include <linux/init.h>
18#include <linux/fs.h>
16444a8a 19
c79a61f5
UKK
20#include <asm/ftrace.h>
21
2f5f6ad9
SR
22/*
23 * If the arch supports passing the variable contents of
24 * function_trace_op as the third parameter back from the
25 * mcount call, then the arch should define this as 1.
26 */
27#ifndef ARCH_SUPPORTS_FTRACE_OPS
28#define ARCH_SUPPORTS_FTRACE_OPS 0
29#endif
30
ccf3672d
SR
31/*
32 * If the arch's mcount caller does not support all of ftrace's
33 * features, then it must call an indirect function that
34 * does. Or at least does enough to prevent any unwelcomed side effects.
35 */
7544256a 36#if !ARCH_SUPPORTS_FTRACE_OPS
ccf3672d
SR
37# define FTRACE_FORCE_LIST_FUNC 1
38#else
39# define FTRACE_FORCE_LIST_FUNC 0
40#endif
41
5f893b26
SRRH
42/* Main tracing buffer and events set up */
43#ifdef CONFIG_TRACING
44void trace_init(void);
45#else
46static inline void trace_init(void) { }
47#endif
ccf3672d 48
de477254 49struct module;
04da85b8
SR
50struct ftrace_hash;
51
606576ce 52#ifdef CONFIG_FUNCTION_TRACER
3e1932ad 53
b0fc494f
SR
54extern int ftrace_enabled;
55extern int
56ftrace_enable_sysctl(struct ctl_table *table, int write,
8d65af78 57 void __user *buffer, size_t *lenp,
b0fc494f
SR
58 loff_t *ppos);
59
2f5f6ad9
SR
60struct ftrace_ops;
61
62typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
a1e2e31d 63 struct ftrace_ops *op, struct pt_regs *regs);
16444a8a 64
87354059
SRRH
65ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
66
e248491a
JO
67/*
68 * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are
69 * set in the flags member.
f8b8be8a
MH
70 * CONTROL, SAVE_REGS, SAVE_REGS_IF_SUPPORTED, RECURSION_SAFE, STUB and
71 * IPMODIFY are a kind of attribute flags which can be set only before
72 * registering the ftrace_ops, and can not be modified while registered.
73 * Changing those attribute flags after regsitering ftrace_ops will
74 * cause unexpected results.
e248491a
JO
75 *
76 * ENABLED - set/unset when ftrace_ops is registered/unregistered
e248491a
JO
77 * DYNAMIC - set when ftrace_ops is registered to denote dynamically
78 * allocated ftrace_ops which need special care
ba27f2bc
SRRH
79 * PER_CPU - set manualy by ftrace_ops user to denote the ftrace_ops
80 * could be controlled by following calls:
e248491a
JO
81 * ftrace_function_local_enable
82 * ftrace_function_local_disable
08f6fba5
SR
83 * SAVE_REGS - The ftrace_ops wants regs saved at each function called
84 * and passed to the callback. If this flag is set, but the
85 * architecture does not support passing regs
06aeaaea 86 * (CONFIG_DYNAMIC_FTRACE_WITH_REGS is not defined), then the
08f6fba5
SR
87 * ftrace_ops will fail to register, unless the next flag
88 * is set.
89 * SAVE_REGS_IF_SUPPORTED - This is the same as SAVE_REGS, but if the
90 * handler can handle an arch that does not save regs
91 * (the handler tests if regs == NULL), then it can set
92 * this flag instead. It will not fail registering the ftrace_ops
93 * but, the regs field will be NULL if the arch does not support
94 * passing regs to the handler.
95 * Note, if this flag is set, the SAVE_REGS flag will automatically
96 * get set upon registering the ftrace_ops, if the arch supports it.
4740974a
SR
97 * RECURSION_SAFE - The ftrace_ops can set this to tell the ftrace infrastructure
98 * that the call back has its own recursion protection. If it does
99 * not set this, then the ftrace infrastructure will add recursion
100 * protection for the caller.
395b97a3 101 * STUB - The ftrace_ops is just a place holder.
f04f24fb
MH
102 * INITIALIZED - The ftrace_ops has already been initialized (first use time
103 * register_ftrace_function() is called, it will initialized the ops)
591dffda 104 * DELETED - The ops are being deleted, do not let them be registered again.
e1effa01
SRRH
105 * ADDING - The ops is in the process of being added.
106 * REMOVING - The ops is in the process of being removed.
107 * MODIFYING - The ops is in the process of changing its filter functions.
f3bea491
SRRH
108 * ALLOC_TRAMP - A dynamic trampoline was allocated by the core code.
109 * The arch specific code sets this flag when it allocated a
110 * trampoline. This lets the arch know that it can update the
111 * trampoline in case the callback function changes.
112 * The ftrace_ops trampoline can be set by the ftrace users, and
113 * in such cases the arch must not modify it. Only the arch ftrace
114 * core code should set this flag.
f8b8be8a
MH
115 * IPMODIFY - The ops can modify the IP register. This can only be set with
116 * SAVE_REGS. If another ops with this flag set is already registered
117 * for any of the functions that this ops will be registered for, then
118 * this ops will fail to register or set_filter_ip.
e3eea140 119 * PID - Is affected by set_ftrace_pid (allows filtering on those pids)
e248491a 120 */
b848914c 121enum {
08f6fba5 122 FTRACE_OPS_FL_ENABLED = 1 << 0,
4104d326 123 FTRACE_OPS_FL_DYNAMIC = 1 << 1,
ba27f2bc 124 FTRACE_OPS_FL_PER_CPU = 1 << 2,
4104d326
SRRH
125 FTRACE_OPS_FL_SAVE_REGS = 1 << 3,
126 FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 4,
127 FTRACE_OPS_FL_RECURSION_SAFE = 1 << 5,
128 FTRACE_OPS_FL_STUB = 1 << 6,
129 FTRACE_OPS_FL_INITIALIZED = 1 << 7,
130 FTRACE_OPS_FL_DELETED = 1 << 8,
e1effa01
SRRH
131 FTRACE_OPS_FL_ADDING = 1 << 9,
132 FTRACE_OPS_FL_REMOVING = 1 << 10,
133 FTRACE_OPS_FL_MODIFYING = 1 << 11,
f3bea491 134 FTRACE_OPS_FL_ALLOC_TRAMP = 1 << 12,
f8b8be8a 135 FTRACE_OPS_FL_IPMODIFY = 1 << 13,
e3eea140 136 FTRACE_OPS_FL_PID = 1 << 14,
ba27f2bc 137 FTRACE_OPS_FL_RCU = 1 << 15,
b848914c
SR
138};
139
33b7f99c
SRRH
140#ifdef CONFIG_DYNAMIC_FTRACE
141/* The hash used to know what functions callbacks trace */
142struct ftrace_ops_hash {
143 struct ftrace_hash *notrace_hash;
144 struct ftrace_hash *filter_hash;
145 struct mutex regex_lock;
146};
147#endif
148
b7e00a6c 149/*
ba27f2bc
SRRH
150 * Note, ftrace_ops can be referenced outside of RCU protection, unless
151 * the RCU flag is set. If ftrace_ops is allocated and not part of kernel
152 * core data, the unregistering of it will perform a scheduling on all CPUs
153 * to make sure that there are no more users. Depending on the load of the
154 * system that may take a bit of time.
b7e00a6c
SRRH
155 *
156 * Any private data added must also take care not to be freed and if private
157 * data is added to a ftrace_ops that is in core code, the user of the
158 * ftrace_ops must perform a schedule_on_each_cpu() before freeing it.
159 */
16444a8a 160struct ftrace_ops {
f45948e8
SR
161 ftrace_func_t func;
162 struct ftrace_ops *next;
b848914c 163 unsigned long flags;
b7e00a6c 164 void *private;
e3eea140 165 ftrace_func_t saved_func;
79922b80 166 int __percpu *disabled;
f45948e8 167#ifdef CONFIG_DYNAMIC_FTRACE
33b7f99c
SRRH
168 struct ftrace_ops_hash local_hash;
169 struct ftrace_ops_hash *func_hash;
fef5aeee 170 struct ftrace_ops_hash old_hash;
79922b80 171 unsigned long trampoline;
aec0be2d 172 unsigned long trampoline_size;
f45948e8 173#endif
16444a8a
ACM
174};
175
e7d3737e
FW
176/*
177 * Type of the current tracing.
178 */
179enum ftrace_tracing_type_t {
180 FTRACE_TYPE_ENTER = 0, /* Hook the call of the function */
181 FTRACE_TYPE_RETURN, /* Hook the return of the function */
182};
183
184/* Current tracing type, default is FTRACE_TYPE_ENTER */
185extern enum ftrace_tracing_type_t ftrace_tracing_type;
186
16444a8a
ACM
187/*
188 * The ftrace_ops must be a static and should also
189 * be read_mostly. These functions do modify read_mostly variables
190 * so use them sparely. Never free an ftrace_op or modify the
191 * next pointer after it has been registered. Even after unregistering
192 * it, the next pointer may still be used internally.
193 */
194int register_ftrace_function(struct ftrace_ops *ops);
195int unregister_ftrace_function(struct ftrace_ops *ops);
196void clear_ftrace_function(void);
197
e248491a 198/**
ba27f2bc 199 * ftrace_function_local_enable - enable ftrace_ops on current cpu
e248491a
JO
200 *
201 * This function enables tracing on current cpu by decreasing
202 * the per cpu control variable.
203 * It must be called with preemption disabled and only on ftrace_ops
ba27f2bc 204 * registered with FTRACE_OPS_FL_PER_CPU. If called without preemption
e248491a
JO
205 * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
206 */
207static inline void ftrace_function_local_enable(struct ftrace_ops *ops)
208{
ba27f2bc 209 if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_PER_CPU)))
e248491a
JO
210 return;
211
212 (*this_cpu_ptr(ops->disabled))--;
213}
214
215/**
ba27f2bc 216 * ftrace_function_local_disable - disable ftrace_ops on current cpu
e248491a 217 *
ba27f2bc 218 * This function disables tracing on current cpu by increasing
e248491a
JO
219 * the per cpu control variable.
220 * It must be called with preemption disabled and only on ftrace_ops
ba27f2bc 221 * registered with FTRACE_OPS_FL_PER_CPU. If called without preemption
e248491a
JO
222 * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
223 */
224static inline void ftrace_function_local_disable(struct ftrace_ops *ops)
225{
ba27f2bc 226 if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_PER_CPU)))
e248491a
JO
227 return;
228
229 (*this_cpu_ptr(ops->disabled))++;
230}
231
232/**
233 * ftrace_function_local_disabled - returns ftrace_ops disabled value
234 * on current cpu
235 *
236 * This function returns value of ftrace_ops::disabled on current cpu.
237 * It must be called with preemption disabled and only on ftrace_ops
ba27f2bc 238 * registered with FTRACE_OPS_FL_PER_CPU. If called without preemption
e248491a
JO
239 * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
240 */
241static inline int ftrace_function_local_disabled(struct ftrace_ops *ops)
242{
ba27f2bc 243 WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_PER_CPU));
e248491a
JO
244 return *this_cpu_ptr(ops->disabled);
245}
246
a1e2e31d
SR
247extern void ftrace_stub(unsigned long a0, unsigned long a1,
248 struct ftrace_ops *op, struct pt_regs *regs);
16444a8a 249
606576ce 250#else /* !CONFIG_FUNCTION_TRACER */
4dbf6bc2
SR
251/*
252 * (un)register_ftrace_function must be a macro since the ops parameter
253 * must not be evaluated.
254 */
255#define register_ftrace_function(ops) ({ 0; })
256#define unregister_ftrace_function(ops) ({ 0; })
ea701f11
SR
257static inline int ftrace_nr_registered_ops(void)
258{
259 return 0;
260}
4dbf6bc2 261static inline void clear_ftrace_function(void) { }
81adbdc0 262static inline void ftrace_kill(void) { }
606576ce 263#endif /* CONFIG_FUNCTION_TRACER */
352ad25a 264
f38f1d2a 265#ifdef CONFIG_STACK_TRACER
bb99d8cc
AT
266
267#define STACK_TRACE_ENTRIES 500
268
269struct stack_trace;
270
271extern unsigned stack_trace_index[];
272extern struct stack_trace stack_trace_max;
273extern unsigned long stack_trace_max_size;
d332736d 274extern arch_spinlock_t stack_trace_max_lock;
bb99d8cc 275
f38f1d2a 276extern int stack_tracer_enabled;
bb99d8cc 277void stack_trace_print(void);
f38f1d2a
SR
278int
279stack_trace_sysctl(struct ctl_table *table, int write,
8d65af78 280 void __user *buffer, size_t *lenp,
f38f1d2a
SR
281 loff_t *ppos);
282#endif
283
f6180773
SR
284struct ftrace_func_command {
285 struct list_head list;
286 char *name;
43dd61c9
SR
287 int (*func)(struct ftrace_hash *hash,
288 char *func, char *cmd,
f6180773
SR
289 char *params, int enable);
290};
291
3d083395 292#ifdef CONFIG_DYNAMIC_FTRACE
31e88909 293
000ab691
SR
294int ftrace_arch_code_modify_prepare(void);
295int ftrace_arch_code_modify_post_process(void);
296
4fd3279b
SRRH
297struct dyn_ftrace;
298
02a392a0
SRRH
299enum ftrace_bug_type {
300 FTRACE_BUG_UNKNOWN,
301 FTRACE_BUG_INIT,
302 FTRACE_BUG_NOP,
303 FTRACE_BUG_CALL,
304 FTRACE_BUG_UPDATE,
305};
306extern enum ftrace_bug_type ftrace_bug_type;
307
b05086c7
SRRH
308/*
309 * Archs can set this to point to a variable that holds the value that was
310 * expected at the call site before calling ftrace_bug().
311 */
312extern const void *ftrace_expected;
313
4fd3279b 314void ftrace_bug(int err, struct dyn_ftrace *rec);
c88fd863 315
809dcf29
SR
316struct seq_file;
317
b6887d79 318struct ftrace_probe_ops {
59df055f
SR
319 void (*func)(unsigned long ip,
320 unsigned long parent_ip,
321 void **data);
e67efb93
SRRH
322 int (*init)(struct ftrace_probe_ops *ops,
323 unsigned long ip, void **data);
324 void (*free)(struct ftrace_probe_ops *ops,
325 unsigned long ip, void **data);
809dcf29
SR
326 int (*print)(struct seq_file *m,
327 unsigned long ip,
b6887d79 328 struct ftrace_probe_ops *ops,
809dcf29 329 void *data);
59df055f
SR
330};
331
332extern int
b6887d79 333register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
59df055f
SR
334 void *data);
335extern void
b6887d79 336unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
59df055f
SR
337 void *data);
338extern void
b6887d79
SR
339unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops);
340extern void unregister_ftrace_function_probe_all(char *glob);
59df055f 341
d88471cb 342extern int ftrace_text_reserved(const void *start, const void *end);
2cfa1978 343
ea701f11
SR
344extern int ftrace_nr_registered_ops(void);
345
aec0be2d
SRRH
346bool is_ftrace_trampoline(unsigned long addr);
347
08f6fba5
SR
348/*
349 * The dyn_ftrace record's flags field is split into two parts.
350 * the first part which is '0-FTRACE_REF_MAX' is a counter of
351 * the number of callbacks that have registered the function that
352 * the dyn_ftrace descriptor represents.
353 *
354 * The second part is a mask:
355 * ENABLED - the function is being traced
356 * REGS - the record wants the function to save regs
357 * REGS_EN - the function is set up to save regs.
f8b8be8a 358 * IPMODIFY - the record allows for the IP address to be changed.
b7ffffbb 359 * DISABLED - the record is not ready to be touched yet
08f6fba5
SR
360 *
361 * When a new ftrace_ops is registered and wants a function to save
362 * pt_regs, the rec->flag REGS is set. When the function has been
363 * set up to save regs, the REG_EN flag is set. Once a function
364 * starts saving regs it will do so until all ftrace_ops are removed
365 * from tracing that function.
366 */
3c1720f0 367enum {
79922b80 368 FTRACE_FL_ENABLED = (1UL << 31),
08f6fba5 369 FTRACE_FL_REGS = (1UL << 30),
79922b80
SRRH
370 FTRACE_FL_REGS_EN = (1UL << 29),
371 FTRACE_FL_TRAMP = (1UL << 28),
372 FTRACE_FL_TRAMP_EN = (1UL << 27),
f8b8be8a 373 FTRACE_FL_IPMODIFY = (1UL << 26),
b7ffffbb 374 FTRACE_FL_DISABLED = (1UL << 25),
3c1720f0
SR
375};
376
b7ffffbb
SRRH
377#define FTRACE_REF_MAX_SHIFT 25
378#define FTRACE_FL_BITS 7
cf2cb0b2
SRRH
379#define FTRACE_FL_MASKED_BITS ((1UL << FTRACE_FL_BITS) - 1)
380#define FTRACE_FL_MASK (FTRACE_FL_MASKED_BITS << FTRACE_REF_MAX_SHIFT)
381#define FTRACE_REF_MAX ((1UL << FTRACE_REF_MAX_SHIFT) - 1)
ed926f9b 382
0376bde1
SRRH
383#define ftrace_rec_count(rec) ((rec)->flags & ~FTRACE_FL_MASK)
384
3d083395 385struct dyn_ftrace {
a762782d 386 unsigned long ip; /* address of mcount call-site */
85ae32ae 387 unsigned long flags;
a762782d 388 struct dyn_arch_ftrace arch;
3d083395
SR
389};
390
e1c08bdd 391int ftrace_force_update(void);
647664ea
MH
392int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
393 int remove, int reset);
ac483c44 394int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
936e074b 395 int len, int reset);
ac483c44 396int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
936e074b
SR
397 int len, int reset);
398void ftrace_set_global_filter(unsigned char *buf, int len, int reset);
399void ftrace_set_global_notrace(unsigned char *buf, int len, int reset);
5500fa51 400void ftrace_free_filter(struct ftrace_ops *ops);
e1c08bdd 401
f6180773
SR
402int register_ftrace_command(struct ftrace_func_command *cmd);
403int unregister_ftrace_command(struct ftrace_func_command *cmd);
404
c88fd863
SR
405enum {
406 FTRACE_UPDATE_CALLS = (1 << 0),
407 FTRACE_DISABLE_CALLS = (1 << 1),
408 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
409 FTRACE_START_FUNC_RET = (1 << 3),
410 FTRACE_STOP_FUNC_RET = (1 << 4),
411};
412
08f6fba5
SR
413/*
414 * The FTRACE_UPDATE_* enum is used to pass information back
415 * from the ftrace_update_record() and ftrace_test_record()
416 * functions. These are called by the code update routines
417 * to find out what is to be done for a given function.
418 *
419 * IGNORE - The function is already what we want it to be
420 * MAKE_CALL - Start tracing the function
421 * MODIFY_CALL - Stop saving regs for the function
08f6fba5
SR
422 * MAKE_NOP - Stop tracing the function
423 */
c88fd863
SR
424enum {
425 FTRACE_UPDATE_IGNORE,
426 FTRACE_UPDATE_MAKE_CALL,
08f6fba5 427 FTRACE_UPDATE_MODIFY_CALL,
c88fd863
SR
428 FTRACE_UPDATE_MAKE_NOP,
429};
430
fc13cb0c
SR
431enum {
432 FTRACE_ITER_FILTER = (1 << 0),
433 FTRACE_ITER_NOTRACE = (1 << 1),
434 FTRACE_ITER_PRINTALL = (1 << 2),
69a3083c
SR
435 FTRACE_ITER_DO_HASH = (1 << 3),
436 FTRACE_ITER_HASH = (1 << 4),
437 FTRACE_ITER_ENABLED = (1 << 5),
fc13cb0c
SR
438};
439
c88fd863
SR
440void arch_ftrace_update_code(int command);
441
442struct ftrace_rec_iter;
443
444struct ftrace_rec_iter *ftrace_rec_iter_start(void);
445struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter);
446struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter);
447
08d636b6
SR
448#define for_ftrace_rec_iter(iter) \
449 for (iter = ftrace_rec_iter_start(); \
450 iter; \
451 iter = ftrace_rec_iter_next(iter))
452
453
c88fd863
SR
454int ftrace_update_record(struct dyn_ftrace *rec, int enable);
455int ftrace_test_record(struct dyn_ftrace *rec, int enable);
456void ftrace_run_stop_machine(int command);
f0cf973a 457unsigned long ftrace_location(unsigned long ip);
7413af1f
SRRH
458unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec);
459unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec);
c88fd863
SR
460
461extern ftrace_func_t ftrace_trace_function;
462
fc13cb0c
SR
463int ftrace_regex_open(struct ftrace_ops *ops, int flag,
464 struct inode *inode, struct file *file);
465ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
466 size_t cnt, loff_t *ppos);
467ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
468 size_t cnt, loff_t *ppos);
fc13cb0c
SR
469int ftrace_regex_release(struct inode *inode, struct file *file);
470
2a85a37f
SR
471void __init
472ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable);
473
3d083395 474/* defined in arch */
3c1720f0 475extern int ftrace_ip_converted(unsigned long ip);
3a36cb11 476extern int ftrace_dyn_arch_init(void);
e4f5d544 477extern void ftrace_replace_code(int enable);
d61f82d0
SR
478extern int ftrace_update_ftrace_func(ftrace_func_t func);
479extern void ftrace_caller(void);
08f6fba5 480extern void ftrace_regs_caller(void);
d61f82d0 481extern void ftrace_call(void);
08f6fba5 482extern void ftrace_regs_call(void);
d61f82d0 483extern void mcount_call(void);
f0001207 484
8ed3e2cf
SR
485void ftrace_modify_all_code(int command);
486
f0001207
SL
487#ifndef FTRACE_ADDR
488#define FTRACE_ADDR ((unsigned long)ftrace_caller)
489#endif
08f6fba5 490
79922b80
SRRH
491#ifndef FTRACE_GRAPH_ADDR
492#define FTRACE_GRAPH_ADDR ((unsigned long)ftrace_graph_caller)
493#endif
494
08f6fba5 495#ifndef FTRACE_REGS_ADDR
06aeaaea 496#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
08f6fba5
SR
497# define FTRACE_REGS_ADDR ((unsigned long)ftrace_regs_caller)
498#else
499# define FTRACE_REGS_ADDR FTRACE_ADDR
500#endif
501#endif
502
646d7043
SRRH
503/*
504 * If an arch would like functions that are only traced
505 * by the function graph tracer to jump directly to its own
506 * trampoline, then they can define FTRACE_GRAPH_TRAMP_ADDR
507 * to be that address to jump to.
508 */
509#ifndef FTRACE_GRAPH_TRAMP_ADDR
510#define FTRACE_GRAPH_TRAMP_ADDR ((unsigned long) 0)
511#endif
512
fb52607a
FW
513#ifdef CONFIG_FUNCTION_GRAPH_TRACER
514extern void ftrace_graph_caller(void);
5a45cfe1
SR
515extern int ftrace_enable_ftrace_graph_caller(void);
516extern int ftrace_disable_ftrace_graph_caller(void);
517#else
518static inline int ftrace_enable_ftrace_graph_caller(void) { return 0; }
519static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; }
e7d3737e 520#endif
ad90c0e3 521
31e88909 522/**
57794a9d 523 * ftrace_make_nop - convert code into nop
31e88909
SR
524 * @mod: module structure if called by module load initialization
525 * @rec: the mcount call site record
526 * @addr: the address that the call site should be calling
527 *
528 * This is a very sensitive operation and great care needs
529 * to be taken by the arch. The operation should carefully
530 * read the location, check to see if what is read is indeed
531 * what we expect it to be, and then on success of the compare,
532 * it should write to the location.
533 *
534 * The code segment at @rec->ip should be a caller to @addr
535 *
536 * Return must be:
537 * 0 on success
538 * -EFAULT on error reading the location
539 * -EINVAL on a failed compare of the contents
540 * -EPERM on error writing to the location
541 * Any other value will be considered a failure.
542 */
543extern int ftrace_make_nop(struct module *mod,
544 struct dyn_ftrace *rec, unsigned long addr);
a26a2a27 545
593eb8a2 546/**
31e88909
SR
547 * ftrace_make_call - convert a nop call site into a call to addr
548 * @rec: the mcount call site record
549 * @addr: the address that the call site should call
593eb8a2
SR
550 *
551 * This is a very sensitive operation and great care needs
552 * to be taken by the arch. The operation should carefully
553 * read the location, check to see if what is read is indeed
554 * what we expect it to be, and then on success of the compare,
555 * it should write to the location.
556 *
31e88909
SR
557 * The code segment at @rec->ip should be a nop
558 *
593eb8a2
SR
559 * Return must be:
560 * 0 on success
561 * -EFAULT on error reading the location
562 * -EINVAL on a failed compare of the contents
563 * -EPERM on error writing to the location
564 * Any other value will be considered a failure.
565 */
31e88909
SR
566extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr);
567
06aeaaea 568#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
08f6fba5
SR
569/**
570 * ftrace_modify_call - convert from one addr to another (no nop)
571 * @rec: the mcount call site record
572 * @old_addr: the address expected to be currently called to
573 * @addr: the address to change to
574 *
575 * This is a very sensitive operation and great care needs
576 * to be taken by the arch. The operation should carefully
577 * read the location, check to see if what is read is indeed
578 * what we expect it to be, and then on success of the compare,
579 * it should write to the location.
580 *
581 * The code segment at @rec->ip should be a caller to @old_addr
582 *
583 * Return must be:
584 * 0 on success
585 * -EFAULT on error reading the location
586 * -EINVAL on a failed compare of the contents
587 * -EPERM on error writing to the location
588 * Any other value will be considered a failure.
589 */
590extern int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
591 unsigned long addr);
592#else
593/* Should never be called */
594static inline int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
595 unsigned long addr)
596{
597 return -EINVAL;
598}
599#endif
600
31e88909
SR
601/* May be defined in arch */
602extern int ftrace_arch_read_dyn_info(char *buf, int size);
593eb8a2 603
ecea656d 604extern int skip_trace(unsigned long ip);
a949ae56 605extern void ftrace_module_init(struct module *mod);
049fb9bd 606extern void ftrace_release_mod(struct module *mod);
ecea656d 607
c0719e5a
SR
608extern void ftrace_disable_daemon(void);
609extern void ftrace_enable_daemon(void);
4dc93676 610#else /* CONFIG_DYNAMIC_FTRACE */
4dbf6bc2
SR
611static inline int skip_trace(unsigned long ip) { return 0; }
612static inline int ftrace_force_update(void) { return 0; }
4dbf6bc2
SR
613static inline void ftrace_disable_daemon(void) { }
614static inline void ftrace_enable_daemon(void) { }
e7247a15 615static inline void ftrace_release_mod(struct module *mod) {}
a949ae56 616static inline void ftrace_module_init(struct module *mod) {}
38de93ab 617static inline __init int register_ftrace_command(struct ftrace_func_command *cmd)
f6180773 618{
97d0bb8d 619 return -EINVAL;
f6180773 620}
38de93ab 621static inline __init int unregister_ftrace_command(char *cmd_name)
f6180773 622{
97d0bb8d 623 return -EINVAL;
f6180773 624}
d88471cb 625static inline int ftrace_text_reserved(const void *start, const void *end)
2cfa1978
MH
626{
627 return 0;
628}
4dc93676
SR
629static inline unsigned long ftrace_location(unsigned long ip)
630{
631 return 0;
632}
fc13cb0c
SR
633
634/*
635 * Again users of functions that have ftrace_ops may not
636 * have them defined when ftrace is not enabled, but these
637 * functions may still be called. Use a macro instead of inline.
638 */
639#define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; })
96de37b6 640#define ftrace_set_early_filter(ops, buf, enable) do { } while (0)
647664ea 641#define ftrace_set_filter_ip(ops, ip, remove, reset) ({ -ENODEV; })
5500fa51
JO
642#define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; })
643#define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; })
644#define ftrace_free_filter(ops) do { } while (0)
fc13cb0c
SR
645
646static inline ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
647 size_t cnt, loff_t *ppos) { return -ENODEV; }
648static inline ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
649 size_t cnt, loff_t *ppos) { return -ENODEV; }
fc13cb0c
SR
650static inline int
651ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; }
aec0be2d
SRRH
652
653static inline bool is_ftrace_trampoline(unsigned long addr)
654{
655 return false;
656}
ecea656d 657#endif /* CONFIG_DYNAMIC_FTRACE */
352ad25a 658
aeaee8a2
IM
659/* totally disable ftrace - can not re-enable after this */
660void ftrace_kill(void);
661
f43fdad8
IM
662static inline void tracer_disable(void)
663{
606576ce 664#ifdef CONFIG_FUNCTION_TRACER
f43fdad8
IM
665 ftrace_enabled = 0;
666#endif
667}
668
37002735
HY
669/*
670 * Ftrace disable/restore without lock. Some synchronization mechanism
9bdeb7b5 671 * must be used to prevent ftrace_enabled to be changed between
37002735
HY
672 * disable/restore.
673 */
9bdeb7b5
HY
674static inline int __ftrace_enabled_save(void)
675{
606576ce 676#ifdef CONFIG_FUNCTION_TRACER
9bdeb7b5
HY
677 int saved_ftrace_enabled = ftrace_enabled;
678 ftrace_enabled = 0;
679 return saved_ftrace_enabled;
680#else
681 return 0;
682#endif
683}
684
685static inline void __ftrace_enabled_restore(int enabled)
686{
606576ce 687#ifdef CONFIG_FUNCTION_TRACER
9bdeb7b5
HY
688 ftrace_enabled = enabled;
689#endif
690}
691
eed542d6
AT
692/* All archs should have this, but we define it for consistency */
693#ifndef ftrace_return_address0
694# define ftrace_return_address0 __builtin_return_address(0)
695#endif
696
697/* Archs may use other ways for ADDR1 and beyond */
698#ifndef ftrace_return_address
c79a61f5 699# ifdef CONFIG_FRAME_POINTER
eed542d6 700# define ftrace_return_address(n) __builtin_return_address(n)
c79a61f5 701# else
eed542d6 702# define ftrace_return_address(n) 0UL
c79a61f5 703# endif
eed542d6
AT
704#endif
705
706#define CALLER_ADDR0 ((unsigned long)ftrace_return_address0)
707#define CALLER_ADDR1 ((unsigned long)ftrace_return_address(1))
708#define CALLER_ADDR2 ((unsigned long)ftrace_return_address(2))
709#define CALLER_ADDR3 ((unsigned long)ftrace_return_address(3))
710#define CALLER_ADDR4 ((unsigned long)ftrace_return_address(4))
711#define CALLER_ADDR5 ((unsigned long)ftrace_return_address(5))
712#define CALLER_ADDR6 ((unsigned long)ftrace_return_address(6))
352ad25a 713
81d68a96 714#ifdef CONFIG_IRQSOFF_TRACER
489f1396
IM
715 extern void time_hardirqs_on(unsigned long a0, unsigned long a1);
716 extern void time_hardirqs_off(unsigned long a0, unsigned long a1);
81d68a96 717#else
4dbf6bc2
SR
718 static inline void time_hardirqs_on(unsigned long a0, unsigned long a1) { }
719 static inline void time_hardirqs_off(unsigned long a0, unsigned long a1) { }
81d68a96
SR
720#endif
721
6cd8a4bb 722#ifdef CONFIG_PREEMPT_TRACER
489f1396
IM
723 extern void trace_preempt_on(unsigned long a0, unsigned long a1);
724 extern void trace_preempt_off(unsigned long a0, unsigned long a1);
6cd8a4bb 725#else
b02ee9a3
MB
726/*
727 * Use defines instead of static inlines because some arches will make code out
728 * of the CALLER_ADDR, when we really want these to be a real nop.
729 */
730# define trace_preempt_on(a0, a1) do { } while (0)
731# define trace_preempt_off(a0, a1) do { } while (0)
6cd8a4bb
SR
732#endif
733
68bf21aa
SR
734#ifdef CONFIG_FTRACE_MCOUNT_RECORD
735extern void ftrace_init(void);
736#else
737static inline void ftrace_init(void) { }
738#endif
739
287b6e68
FW
740/*
741 * Structure that defines an entry function trace.
742 */
743struct ftrace_graph_ent {
744 unsigned long func; /* Current function */
745 int depth;
746};
dd0e545f 747
caf4b323
FW
748/*
749 * Structure that defines a return function trace.
750 */
fb52607a 751struct ftrace_graph_ret {
caf4b323
FW
752 unsigned long func; /* Current function */
753 unsigned long long calltime;
754 unsigned long long rettime;
0231022c
FW
755 /* Number of functions that overran the depth limit for current task */
756 unsigned long overrun;
287b6e68 757 int depth;
caf4b323
FW
758};
759
62b915f1
JO
760/* Type of the callback handlers for tracing function graph*/
761typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */
762typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */
763
fb52607a 764#ifdef CONFIG_FUNCTION_GRAPH_TRACER
8b96f011 765
5ac9f622 766/* for init task */
f876d346 767#define INIT_FTRACE_GRAPH .ret_stack = NULL,
5ac9f622 768
712406a6
SR
769/*
770 * Stack of return addresses for functions
771 * of a thread.
772 * Used in struct thread_info
773 */
774struct ftrace_ret_stack {
775 unsigned long ret;
776 unsigned long func;
777 unsigned long long calltime;
a2a16d6a 778 unsigned long long subtime;
71e308a2 779 unsigned long fp;
712406a6
SR
780};
781
782/*
783 * Primary handler of a function return.
784 * It relays on ftrace_return_to_handler.
785 * Defined in entry_32/64.S
786 */
787extern void return_to_handler(void);
788
789extern int
71e308a2
SR
790ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
791 unsigned long frame_pointer);
712406a6 792
8b96f011
FW
793/*
794 * Sometimes we don't want to trace a function with the function
795 * graph tracer but we want them to keep traced by the usual function
796 * tracer if the function graph tracer is not configured.
797 */
798#define __notrace_funcgraph notrace
799
bcbc4f20
FW
800/*
801 * We want to which function is an entrypoint of a hardirq.
802 * That will help us to put a signal on output.
803 */
804#define __irq_entry __attribute__((__section__(".irqentry.text")))
805
806/* Limits of hardirq entrypoints */
807extern char __irqentry_text_start[];
808extern char __irqentry_text_end[];
809
29ad23b0 810#define FTRACE_NOTRACE_DEPTH 65536
f201ae23
FW
811#define FTRACE_RETFUNC_DEPTH 50
812#define FTRACE_RETSTACK_ALLOC_SIZE 32
287b6e68
FW
813extern int register_ftrace_graph(trace_func_graph_ret_t retfunc,
814 trace_func_graph_ent_t entryfunc);
815
1b2f121c 816extern bool ftrace_graph_is_dead(void);
14a866c5
SR
817extern void ftrace_graph_stop(void);
818
287b6e68
FW
819/* The current handlers in use */
820extern trace_func_graph_ret_t ftrace_graph_return;
821extern trace_func_graph_ent_t ftrace_graph_entry;
caf4b323 822
fb52607a 823extern void unregister_ftrace_graph(void);
f201ae23 824
fb52607a
FW
825extern void ftrace_graph_init_task(struct task_struct *t);
826extern void ftrace_graph_exit_task(struct task_struct *t);
868baf07 827extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu);
21a8c466
FW
828
829static inline int task_curr_ret_stack(struct task_struct *t)
830{
831 return t->curr_ret_stack;
832}
380c4b14
FW
833
834static inline void pause_graph_tracing(void)
835{
836 atomic_inc(&current->tracing_graph_pause);
837}
838
839static inline void unpause_graph_tracing(void)
840{
841 atomic_dec(&current->tracing_graph_pause);
842}
5ac9f622 843#else /* !CONFIG_FUNCTION_GRAPH_TRACER */
8b96f011
FW
844
845#define __notrace_funcgraph
bcbc4f20 846#define __irq_entry
5ac9f622 847#define INIT_FTRACE_GRAPH
8b96f011 848
fb52607a
FW
849static inline void ftrace_graph_init_task(struct task_struct *t) { }
850static inline void ftrace_graph_exit_task(struct task_struct *t) { }
868baf07 851static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { }
21a8c466 852
62b915f1
JO
853static inline int register_ftrace_graph(trace_func_graph_ret_t retfunc,
854 trace_func_graph_ent_t entryfunc)
855{
856 return -1;
857}
858static inline void unregister_ftrace_graph(void) { }
859
21a8c466
FW
860static inline int task_curr_ret_stack(struct task_struct *tsk)
861{
862 return -1;
863}
380c4b14
FW
864
865static inline void pause_graph_tracing(void) { }
866static inline void unpause_graph_tracing(void) { }
5ac9f622 867#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
caf4b323 868
ea4e2bc4 869#ifdef CONFIG_TRACING
ea4e2bc4
SR
870
871/* flags for current->trace */
872enum {
873 TSK_TRACE_FL_TRACE_BIT = 0,
874 TSK_TRACE_FL_GRAPH_BIT = 1,
875};
876enum {
877 TSK_TRACE_FL_TRACE = 1 << TSK_TRACE_FL_TRACE_BIT,
878 TSK_TRACE_FL_GRAPH = 1 << TSK_TRACE_FL_GRAPH_BIT,
879};
880
881static inline void set_tsk_trace_trace(struct task_struct *tsk)
882{
883 set_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
884}
885
886static inline void clear_tsk_trace_trace(struct task_struct *tsk)
887{
888 clear_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
889}
890
891static inline int test_tsk_trace_trace(struct task_struct *tsk)
892{
893 return tsk->trace & TSK_TRACE_FL_TRACE;
894}
895
896static inline void set_tsk_trace_graph(struct task_struct *tsk)
897{
898 set_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
899}
900
901static inline void clear_tsk_trace_graph(struct task_struct *tsk)
902{
903 clear_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
904}
905
906static inline int test_tsk_trace_graph(struct task_struct *tsk)
907{
908 return tsk->trace & TSK_TRACE_FL_GRAPH;
909}
910
cecbca96
FW
911enum ftrace_dump_mode;
912
913extern enum ftrace_dump_mode ftrace_dump_on_oops;
0daa2302 914extern int tracepoint_printk;
526211bc 915
de7edd31
SRRH
916extern void disable_trace_on_warning(void);
917extern int __disable_trace_on_warning;
918
261842b7
SR
919#ifdef CONFIG_PREEMPT
920#define INIT_TRACE_RECURSION .trace_recursion = 0,
921#endif
922
de7edd31
SRRH
923#else /* CONFIG_TRACING */
924static inline void disable_trace_on_warning(void) { }
ea4e2bc4
SR
925#endif /* CONFIG_TRACING */
926
261842b7
SR
927#ifndef INIT_TRACE_RECURSION
928#define INIT_TRACE_RECURSION
929#endif
b1818748 930
e7b8e675
MF
931#ifdef CONFIG_FTRACE_SYSCALLS
932
933unsigned long arch_syscall_addr(int nr);
934
935#endif /* CONFIG_FTRACE_SYSCALLS */
936
16444a8a 937#endif /* _LINUX_FTRACE_H */
This page took 0.628515 seconds and 5 git commands to generate.