2 * Ftrace header. For implementation details beyond the random comments
3 * scattered below, see: Documentation/trace/ftrace-design.txt
6 #ifndef _LINUX_FTRACE_H
7 #define _LINUX_FTRACE_H
9 #include <linux/trace_clock.h>
10 #include <linux/kallsyms.h>
11 #include <linux/linkage.h>
12 #include <linux/bitops.h>
13 #include <linux/ptrace.h>
14 #include <linux/ktime.h>
15 #include <linux/sched.h>
16 #include <linux/types.h>
17 #include <linux/init.h>
20 #include <asm/ftrace.h>
23 * If the arch supports passing the variable contents of
24 * function_trace_op as the third parameter back from the
25 * mcount call, then the arch should define this as 1.
27 #ifndef ARCH_SUPPORTS_FTRACE_OPS
28 #define ARCH_SUPPORTS_FTRACE_OPS 0
32 * If the arch's mcount caller does not support all of ftrace's
33 * features, then it must call an indirect function that
34 * does. Or at least does enough to prevent any unwelcomed side effects.
36 #if !defined(CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST) || \
37 !ARCH_SUPPORTS_FTRACE_OPS
38 # define FTRACE_FORCE_LIST_FUNC 1
40 # define FTRACE_FORCE_LIST_FUNC 0
47 #ifdef CONFIG_FUNCTION_TRACER
49 extern int ftrace_enabled
;
51 ftrace_enable_sysctl(struct ctl_table
*table
, int write
,
52 void __user
*buffer
, size_t *lenp
,
57 typedef void (*ftrace_func_t
)(unsigned long ip
, unsigned long parent_ip
,
58 struct ftrace_ops
*op
, struct pt_regs
*regs
);
61 * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are
62 * set in the flags member.
64 * ENABLED - set/unset when ftrace_ops is registered/unregistered
65 * DYNAMIC - set when ftrace_ops is registered to denote dynamically
66 * allocated ftrace_ops which need special care
67 * CONTROL - set manualy by ftrace_ops user to denote the ftrace_ops
68 * could be controled by following calls:
69 * ftrace_function_local_enable
70 * ftrace_function_local_disable
71 * SAVE_REGS - The ftrace_ops wants regs saved at each function called
72 * and passed to the callback. If this flag is set, but the
73 * architecture does not support passing regs
74 * (CONFIG_DYNAMIC_FTRACE_WITH_REGS is not defined), then the
75 * ftrace_ops will fail to register, unless the next flag
77 * SAVE_REGS_IF_SUPPORTED - This is the same as SAVE_REGS, but if the
78 * handler can handle an arch that does not save regs
79 * (the handler tests if regs == NULL), then it can set
80 * this flag instead. It will not fail registering the ftrace_ops
81 * but, the regs field will be NULL if the arch does not support
82 * passing regs to the handler.
83 * Note, if this flag is set, the SAVE_REGS flag will automatically
84 * get set upon registering the ftrace_ops, if the arch supports it.
85 * RECURSION_SAFE - The ftrace_ops can set this to tell the ftrace infrastructure
86 * that the call back has its own recursion protection. If it does
87 * not set this, then the ftrace infrastructure will add recursion
88 * protection for the caller.
89 * STUB - The ftrace_ops is just a place holder.
90 * INITIALIZED - The ftrace_ops has already been initialized (first use time
91 * register_ftrace_function() is called, it will initialized the ops)
92 * DELETED - The ops are being deleted, do not let them be registered again.
95 FTRACE_OPS_FL_ENABLED
= 1 << 0,
96 FTRACE_OPS_FL_DYNAMIC
= 1 << 1,
97 FTRACE_OPS_FL_CONTROL
= 1 << 2,
98 FTRACE_OPS_FL_SAVE_REGS
= 1 << 3,
99 FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED
= 1 << 4,
100 FTRACE_OPS_FL_RECURSION_SAFE
= 1 << 5,
101 FTRACE_OPS_FL_STUB
= 1 << 6,
102 FTRACE_OPS_FL_INITIALIZED
= 1 << 7,
103 FTRACE_OPS_FL_DELETED
= 1 << 8,
107 * Note, ftrace_ops can be referenced outside of RCU protection.
108 * (Although, for perf, the control ops prevent that). If ftrace_ops is
109 * allocated and not part of kernel core data, the unregistering of it will
110 * perform a scheduling on all CPUs to make sure that there are no more users.
111 * Depending on the load of the system that may take a bit of time.
113 * Any private data added must also take care not to be freed and if private
114 * data is added to a ftrace_ops that is in core code, the user of the
115 * ftrace_ops must perform a schedule_on_each_cpu() before freeing it.
119 struct ftrace_ops
*next
;
122 int __percpu
*disabled
;
123 #ifdef CONFIG_DYNAMIC_FTRACE
125 struct ftrace_hash
*notrace_hash
;
126 struct ftrace_hash
*filter_hash
;
127 struct ftrace_hash
*tramp_hash
;
128 struct mutex regex_lock
;
129 unsigned long trampoline
;
133 extern int function_trace_stop
;
136 * Type of the current tracing.
138 enum ftrace_tracing_type_t
{
139 FTRACE_TYPE_ENTER
= 0, /* Hook the call of the function */
140 FTRACE_TYPE_RETURN
, /* Hook the return of the function */
143 /* Current tracing type, default is FTRACE_TYPE_ENTER */
144 extern enum ftrace_tracing_type_t ftrace_tracing_type
;
147 * The ftrace_ops must be a static and should also
148 * be read_mostly. These functions do modify read_mostly variables
149 * so use them sparely. Never free an ftrace_op or modify the
150 * next pointer after it has been registered. Even after unregistering
151 * it, the next pointer may still be used internally.
153 int register_ftrace_function(struct ftrace_ops
*ops
);
154 int unregister_ftrace_function(struct ftrace_ops
*ops
);
155 void clear_ftrace_function(void);
158 * ftrace_function_local_enable - enable controlled ftrace_ops on current cpu
160 * This function enables tracing on current cpu by decreasing
161 * the per cpu control variable.
162 * It must be called with preemption disabled and only on ftrace_ops
163 * registered with FTRACE_OPS_FL_CONTROL. If called without preemption
164 * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
166 static inline void ftrace_function_local_enable(struct ftrace_ops
*ops
)
168 if (WARN_ON_ONCE(!(ops
->flags
& FTRACE_OPS_FL_CONTROL
)))
171 (*this_cpu_ptr(ops
->disabled
))--;
175 * ftrace_function_local_disable - enable controlled ftrace_ops on current cpu
177 * This function enables tracing on current cpu by decreasing
178 * the per cpu control variable.
179 * It must be called with preemption disabled and only on ftrace_ops
180 * registered with FTRACE_OPS_FL_CONTROL. If called without preemption
181 * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
183 static inline void ftrace_function_local_disable(struct ftrace_ops
*ops
)
185 if (WARN_ON_ONCE(!(ops
->flags
& FTRACE_OPS_FL_CONTROL
)))
188 (*this_cpu_ptr(ops
->disabled
))++;
192 * ftrace_function_local_disabled - returns ftrace_ops disabled value
195 * This function returns value of ftrace_ops::disabled on current cpu.
196 * It must be called with preemption disabled and only on ftrace_ops
197 * registered with FTRACE_OPS_FL_CONTROL. If called without preemption
198 * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
200 static inline int ftrace_function_local_disabled(struct ftrace_ops
*ops
)
202 WARN_ON_ONCE(!(ops
->flags
& FTRACE_OPS_FL_CONTROL
));
203 return *this_cpu_ptr(ops
->disabled
);
206 extern void ftrace_stub(unsigned long a0
, unsigned long a1
,
207 struct ftrace_ops
*op
, struct pt_regs
*regs
);
209 #else /* !CONFIG_FUNCTION_TRACER */
211 * (un)register_ftrace_function must be a macro since the ops parameter
212 * must not be evaluated.
214 #define register_ftrace_function(ops) ({ 0; })
215 #define unregister_ftrace_function(ops) ({ 0; })
216 static inline int ftrace_nr_registered_ops(void)
220 static inline void clear_ftrace_function(void) { }
221 static inline void ftrace_kill(void) { }
222 #endif /* CONFIG_FUNCTION_TRACER */
224 #ifdef CONFIG_STACK_TRACER
225 extern int stack_tracer_enabled
;
227 stack_trace_sysctl(struct ctl_table
*table
, int write
,
228 void __user
*buffer
, size_t *lenp
,
232 struct ftrace_func_command
{
233 struct list_head list
;
235 int (*func
)(struct ftrace_hash
*hash
,
236 char *func
, char *cmd
,
237 char *params
, int enable
);
240 #ifdef CONFIG_DYNAMIC_FTRACE
242 int ftrace_arch_code_modify_prepare(void);
243 int ftrace_arch_code_modify_post_process(void);
245 void ftrace_bug(int err
, unsigned long ip
);
249 struct ftrace_probe_ops
{
250 void (*func
)(unsigned long ip
,
251 unsigned long parent_ip
,
253 int (*init
)(struct ftrace_probe_ops
*ops
,
254 unsigned long ip
, void **data
);
255 void (*free
)(struct ftrace_probe_ops
*ops
,
256 unsigned long ip
, void **data
);
257 int (*print
)(struct seq_file
*m
,
259 struct ftrace_probe_ops
*ops
,
264 register_ftrace_function_probe(char *glob
, struct ftrace_probe_ops
*ops
,
267 unregister_ftrace_function_probe(char *glob
, struct ftrace_probe_ops
*ops
,
270 unregister_ftrace_function_probe_func(char *glob
, struct ftrace_probe_ops
*ops
);
271 extern void unregister_ftrace_function_probe_all(char *glob
);
273 extern int ftrace_text_reserved(const void *start
, const void *end
);
275 extern int ftrace_nr_registered_ops(void);
278 * The dyn_ftrace record's flags field is split into two parts.
279 * the first part which is '0-FTRACE_REF_MAX' is a counter of
280 * the number of callbacks that have registered the function that
281 * the dyn_ftrace descriptor represents.
283 * The second part is a mask:
284 * ENABLED - the function is being traced
285 * REGS - the record wants the function to save regs
286 * REGS_EN - the function is set up to save regs.
288 * When a new ftrace_ops is registered and wants a function to save
289 * pt_regs, the rec->flag REGS is set. When the function has been
290 * set up to save regs, the REG_EN flag is set. Once a function
291 * starts saving regs it will do so until all ftrace_ops are removed
292 * from tracing that function.
295 FTRACE_FL_ENABLED
= (1UL << 31),
296 FTRACE_FL_REGS
= (1UL << 30),
297 FTRACE_FL_REGS_EN
= (1UL << 29),
298 FTRACE_FL_TRAMP
= (1UL << 28),
299 FTRACE_FL_TRAMP_EN
= (1UL << 27),
302 #define FTRACE_REF_MAX_SHIFT 27
303 #define FTRACE_FL_BITS 5
304 #define FTRACE_FL_MASKED_BITS ((1UL << FTRACE_FL_BITS) - 1)
305 #define FTRACE_FL_MASK (FTRACE_FL_MASKED_BITS << FTRACE_REF_MAX_SHIFT)
306 #define FTRACE_REF_MAX ((1UL << FTRACE_REF_MAX_SHIFT) - 1)
308 #define ftrace_rec_count(rec) ((rec)->flags & ~FTRACE_FL_MASK)
311 unsigned long ip
; /* address of mcount call-site */
313 struct dyn_arch_ftrace arch
;
316 int ftrace_force_update(void);
317 int ftrace_set_filter_ip(struct ftrace_ops
*ops
, unsigned long ip
,
318 int remove
, int reset
);
319 int ftrace_set_filter(struct ftrace_ops
*ops
, unsigned char *buf
,
321 int ftrace_set_notrace(struct ftrace_ops
*ops
, unsigned char *buf
,
323 void ftrace_set_global_filter(unsigned char *buf
, int len
, int reset
);
324 void ftrace_set_global_notrace(unsigned char *buf
, int len
, int reset
);
325 void ftrace_free_filter(struct ftrace_ops
*ops
);
327 int register_ftrace_command(struct ftrace_func_command
*cmd
);
328 int unregister_ftrace_command(struct ftrace_func_command
*cmd
);
331 FTRACE_UPDATE_CALLS
= (1 << 0),
332 FTRACE_DISABLE_CALLS
= (1 << 1),
333 FTRACE_UPDATE_TRACE_FUNC
= (1 << 2),
334 FTRACE_START_FUNC_RET
= (1 << 3),
335 FTRACE_STOP_FUNC_RET
= (1 << 4),
339 * The FTRACE_UPDATE_* enum is used to pass information back
340 * from the ftrace_update_record() and ftrace_test_record()
341 * functions. These are called by the code update routines
342 * to find out what is to be done for a given function.
344 * IGNORE - The function is already what we want it to be
345 * MAKE_CALL - Start tracing the function
346 * MODIFY_CALL - Stop saving regs for the function
347 * MAKE_NOP - Stop tracing the function
350 FTRACE_UPDATE_IGNORE
,
351 FTRACE_UPDATE_MAKE_CALL
,
352 FTRACE_UPDATE_MODIFY_CALL
,
353 FTRACE_UPDATE_MAKE_NOP
,
357 FTRACE_ITER_FILTER
= (1 << 0),
358 FTRACE_ITER_NOTRACE
= (1 << 1),
359 FTRACE_ITER_PRINTALL
= (1 << 2),
360 FTRACE_ITER_DO_HASH
= (1 << 3),
361 FTRACE_ITER_HASH
= (1 << 4),
362 FTRACE_ITER_ENABLED
= (1 << 5),
365 void arch_ftrace_update_code(int command
);
367 struct ftrace_rec_iter
;
369 struct ftrace_rec_iter
*ftrace_rec_iter_start(void);
370 struct ftrace_rec_iter
*ftrace_rec_iter_next(struct ftrace_rec_iter
*iter
);
371 struct dyn_ftrace
*ftrace_rec_iter_record(struct ftrace_rec_iter
*iter
);
373 #define for_ftrace_rec_iter(iter) \
374 for (iter = ftrace_rec_iter_start(); \
376 iter = ftrace_rec_iter_next(iter))
379 int ftrace_update_record(struct dyn_ftrace
*rec
, int enable
);
380 int ftrace_test_record(struct dyn_ftrace
*rec
, int enable
);
381 void ftrace_run_stop_machine(int command
);
382 unsigned long ftrace_location(unsigned long ip
);
383 unsigned long ftrace_get_addr_new(struct dyn_ftrace
*rec
);
384 unsigned long ftrace_get_addr_curr(struct dyn_ftrace
*rec
);
386 extern ftrace_func_t ftrace_trace_function
;
388 int ftrace_regex_open(struct ftrace_ops
*ops
, int flag
,
389 struct inode
*inode
, struct file
*file
);
390 ssize_t
ftrace_filter_write(struct file
*file
, const char __user
*ubuf
,
391 size_t cnt
, loff_t
*ppos
);
392 ssize_t
ftrace_notrace_write(struct file
*file
, const char __user
*ubuf
,
393 size_t cnt
, loff_t
*ppos
);
394 int ftrace_regex_release(struct inode
*inode
, struct file
*file
);
397 ftrace_set_early_filter(struct ftrace_ops
*ops
, char *buf
, int enable
);
399 /* defined in arch */
400 extern int ftrace_ip_converted(unsigned long ip
);
401 extern int ftrace_dyn_arch_init(void);
402 extern void ftrace_replace_code(int enable
);
403 extern int ftrace_update_ftrace_func(ftrace_func_t func
);
404 extern void ftrace_caller(void);
405 extern void ftrace_regs_caller(void);
406 extern void ftrace_call(void);
407 extern void ftrace_regs_call(void);
408 extern void mcount_call(void);
410 void ftrace_modify_all_code(int command
);
413 #define FTRACE_ADDR ((unsigned long)ftrace_caller)
416 #ifndef FTRACE_GRAPH_ADDR
417 #define FTRACE_GRAPH_ADDR ((unsigned long)ftrace_graph_caller)
420 #ifndef FTRACE_REGS_ADDR
421 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
422 # define FTRACE_REGS_ADDR ((unsigned long)ftrace_regs_caller)
424 # define FTRACE_REGS_ADDR FTRACE_ADDR
429 * If an arch would like functions that are only traced
430 * by the function graph tracer to jump directly to its own
431 * trampoline, then they can define FTRACE_GRAPH_TRAMP_ADDR
432 * to be that address to jump to.
434 #ifndef FTRACE_GRAPH_TRAMP_ADDR
435 #define FTRACE_GRAPH_TRAMP_ADDR ((unsigned long) 0)
438 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
439 extern void ftrace_graph_caller(void);
440 extern int ftrace_enable_ftrace_graph_caller(void);
441 extern int ftrace_disable_ftrace_graph_caller(void);
443 static inline int ftrace_enable_ftrace_graph_caller(void) { return 0; }
444 static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; }
448 * ftrace_make_nop - convert code into nop
449 * @mod: module structure if called by module load initialization
450 * @rec: the mcount call site record
451 * @addr: the address that the call site should be calling
453 * This is a very sensitive operation and great care needs
454 * to be taken by the arch. The operation should carefully
455 * read the location, check to see if what is read is indeed
456 * what we expect it to be, and then on success of the compare,
457 * it should write to the location.
459 * The code segment at @rec->ip should be a caller to @addr
463 * -EFAULT on error reading the location
464 * -EINVAL on a failed compare of the contents
465 * -EPERM on error writing to the location
466 * Any other value will be considered a failure.
468 extern int ftrace_make_nop(struct module
*mod
,
469 struct dyn_ftrace
*rec
, unsigned long addr
);
472 * ftrace_make_call - convert a nop call site into a call to addr
473 * @rec: the mcount call site record
474 * @addr: the address that the call site should call
476 * This is a very sensitive operation and great care needs
477 * to be taken by the arch. The operation should carefully
478 * read the location, check to see if what is read is indeed
479 * what we expect it to be, and then on success of the compare,
480 * it should write to the location.
482 * The code segment at @rec->ip should be a nop
486 * -EFAULT on error reading the location
487 * -EINVAL on a failed compare of the contents
488 * -EPERM on error writing to the location
489 * Any other value will be considered a failure.
491 extern int ftrace_make_call(struct dyn_ftrace
*rec
, unsigned long addr
);
493 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
495 * ftrace_modify_call - convert from one addr to another (no nop)
496 * @rec: the mcount call site record
497 * @old_addr: the address expected to be currently called to
498 * @addr: the address to change to
500 * This is a very sensitive operation and great care needs
501 * to be taken by the arch. The operation should carefully
502 * read the location, check to see if what is read is indeed
503 * what we expect it to be, and then on success of the compare,
504 * it should write to the location.
506 * The code segment at @rec->ip should be a caller to @old_addr
510 * -EFAULT on error reading the location
511 * -EINVAL on a failed compare of the contents
512 * -EPERM on error writing to the location
513 * Any other value will be considered a failure.
515 extern int ftrace_modify_call(struct dyn_ftrace
*rec
, unsigned long old_addr
,
518 /* Should never be called */
519 static inline int ftrace_modify_call(struct dyn_ftrace
*rec
, unsigned long old_addr
,
526 /* May be defined in arch */
527 extern int ftrace_arch_read_dyn_info(char *buf
, int size
);
529 extern int skip_trace(unsigned long ip
);
530 extern void ftrace_module_init(struct module
*mod
);
532 extern void ftrace_disable_daemon(void);
533 extern void ftrace_enable_daemon(void);
534 #else /* CONFIG_DYNAMIC_FTRACE */
535 static inline int skip_trace(unsigned long ip
) { return 0; }
536 static inline int ftrace_force_update(void) { return 0; }
537 static inline void ftrace_disable_daemon(void) { }
538 static inline void ftrace_enable_daemon(void) { }
539 static inline void ftrace_release_mod(struct module
*mod
) {}
540 static inline void ftrace_module_init(struct module
*mod
) {}
541 static inline __init
int register_ftrace_command(struct ftrace_func_command
*cmd
)
545 static inline __init
int unregister_ftrace_command(char *cmd_name
)
549 static inline int ftrace_text_reserved(const void *start
, const void *end
)
553 static inline unsigned long ftrace_location(unsigned long ip
)
559 * Again users of functions that have ftrace_ops may not
560 * have them defined when ftrace is not enabled, but these
561 * functions may still be called. Use a macro instead of inline.
563 #define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; })
564 #define ftrace_set_early_filter(ops, buf, enable) do { } while (0)
565 #define ftrace_set_filter_ip(ops, ip, remove, reset) ({ -ENODEV; })
566 #define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; })
567 #define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; })
568 #define ftrace_free_filter(ops) do { } while (0)
570 static inline ssize_t
ftrace_filter_write(struct file
*file
, const char __user
*ubuf
,
571 size_t cnt
, loff_t
*ppos
) { return -ENODEV
; }
572 static inline ssize_t
ftrace_notrace_write(struct file
*file
, const char __user
*ubuf
,
573 size_t cnt
, loff_t
*ppos
) { return -ENODEV
; }
575 ftrace_regex_release(struct inode
*inode
, struct file
*file
) { return -ENODEV
; }
576 #endif /* CONFIG_DYNAMIC_FTRACE */
578 /* totally disable ftrace - can not re-enable after this */
579 void ftrace_kill(void);
581 static inline void tracer_disable(void)
583 #ifdef CONFIG_FUNCTION_TRACER
589 * Ftrace disable/restore without lock. Some synchronization mechanism
590 * must be used to prevent ftrace_enabled to be changed between
593 static inline int __ftrace_enabled_save(void)
595 #ifdef CONFIG_FUNCTION_TRACER
596 int saved_ftrace_enabled
= ftrace_enabled
;
598 return saved_ftrace_enabled
;
604 static inline void __ftrace_enabled_restore(int enabled
)
606 #ifdef CONFIG_FUNCTION_TRACER
607 ftrace_enabled
= enabled
;
611 /* All archs should have this, but we define it for consistency */
612 #ifndef ftrace_return_address0
613 # define ftrace_return_address0 __builtin_return_address(0)
616 /* Archs may use other ways for ADDR1 and beyond */
617 #ifndef ftrace_return_address
618 # ifdef CONFIG_FRAME_POINTER
619 # define ftrace_return_address(n) __builtin_return_address(n)
621 # define ftrace_return_address(n) 0UL
625 #define CALLER_ADDR0 ((unsigned long)ftrace_return_address0)
626 #define CALLER_ADDR1 ((unsigned long)ftrace_return_address(1))
627 #define CALLER_ADDR2 ((unsigned long)ftrace_return_address(2))
628 #define CALLER_ADDR3 ((unsigned long)ftrace_return_address(3))
629 #define CALLER_ADDR4 ((unsigned long)ftrace_return_address(4))
630 #define CALLER_ADDR5 ((unsigned long)ftrace_return_address(5))
631 #define CALLER_ADDR6 ((unsigned long)ftrace_return_address(6))
633 #ifdef CONFIG_IRQSOFF_TRACER
634 extern void time_hardirqs_on(unsigned long a0
, unsigned long a1
);
635 extern void time_hardirqs_off(unsigned long a0
, unsigned long a1
);
637 static inline void time_hardirqs_on(unsigned long a0
, unsigned long a1
) { }
638 static inline void time_hardirqs_off(unsigned long a0
, unsigned long a1
) { }
641 #ifdef CONFIG_PREEMPT_TRACER
642 extern void trace_preempt_on(unsigned long a0
, unsigned long a1
);
643 extern void trace_preempt_off(unsigned long a0
, unsigned long a1
);
646 * Use defines instead of static inlines because some arches will make code out
647 * of the CALLER_ADDR, when we really want these to be a real nop.
649 # define trace_preempt_on(a0, a1) do { } while (0)
650 # define trace_preempt_off(a0, a1) do { } while (0)
653 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
654 extern void ftrace_init(void);
656 static inline void ftrace_init(void) { }
660 * Structure that defines an entry function trace.
662 struct ftrace_graph_ent
{
663 unsigned long func
; /* Current function */
668 * Structure that defines a return function trace.
670 struct ftrace_graph_ret
{
671 unsigned long func
; /* Current function */
672 unsigned long long calltime
;
673 unsigned long long rettime
;
674 /* Number of functions that overran the depth limit for current task */
675 unsigned long overrun
;
679 /* Type of the callback handlers for tracing function graph*/
680 typedef void (*trace_func_graph_ret_t
)(struct ftrace_graph_ret
*); /* return */
681 typedef int (*trace_func_graph_ent_t
)(struct ftrace_graph_ent
*); /* entry */
683 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
686 #define INIT_FTRACE_GRAPH .ret_stack = NULL,
689 * Stack of return addresses for functions
691 * Used in struct thread_info
693 struct ftrace_ret_stack
{
696 unsigned long long calltime
;
697 unsigned long long subtime
;
702 * Primary handler of a function return.
703 * It relays on ftrace_return_to_handler.
704 * Defined in entry_32/64.S
706 extern void return_to_handler(void);
709 ftrace_push_return_trace(unsigned long ret
, unsigned long func
, int *depth
,
710 unsigned long frame_pointer
);
713 * Sometimes we don't want to trace a function with the function
714 * graph tracer but we want them to keep traced by the usual function
715 * tracer if the function graph tracer is not configured.
717 #define __notrace_funcgraph notrace
720 * We want to which function is an entrypoint of a hardirq.
721 * That will help us to put a signal on output.
723 #define __irq_entry __attribute__((__section__(".irqentry.text")))
725 /* Limits of hardirq entrypoints */
726 extern char __irqentry_text_start
[];
727 extern char __irqentry_text_end
[];
729 #define FTRACE_NOTRACE_DEPTH 65536
730 #define FTRACE_RETFUNC_DEPTH 50
731 #define FTRACE_RETSTACK_ALLOC_SIZE 32
732 extern int register_ftrace_graph(trace_func_graph_ret_t retfunc
,
733 trace_func_graph_ent_t entryfunc
);
735 extern bool ftrace_graph_is_dead(void);
736 extern void ftrace_graph_stop(void);
738 /* The current handlers in use */
739 extern trace_func_graph_ret_t ftrace_graph_return
;
740 extern trace_func_graph_ent_t ftrace_graph_entry
;
742 extern void unregister_ftrace_graph(void);
744 extern void ftrace_graph_init_task(struct task_struct
*t
);
745 extern void ftrace_graph_exit_task(struct task_struct
*t
);
746 extern void ftrace_graph_init_idle_task(struct task_struct
*t
, int cpu
);
748 static inline int task_curr_ret_stack(struct task_struct
*t
)
750 return t
->curr_ret_stack
;
753 static inline void pause_graph_tracing(void)
755 atomic_inc(¤t
->tracing_graph_pause
);
758 static inline void unpause_graph_tracing(void)
760 atomic_dec(¤t
->tracing_graph_pause
);
762 #else /* !CONFIG_FUNCTION_GRAPH_TRACER */
764 #define __notrace_funcgraph
766 #define INIT_FTRACE_GRAPH
768 static inline void ftrace_graph_init_task(struct task_struct
*t
) { }
769 static inline void ftrace_graph_exit_task(struct task_struct
*t
) { }
770 static inline void ftrace_graph_init_idle_task(struct task_struct
*t
, int cpu
) { }
772 static inline int register_ftrace_graph(trace_func_graph_ret_t retfunc
,
773 trace_func_graph_ent_t entryfunc
)
777 static inline void unregister_ftrace_graph(void) { }
779 static inline int task_curr_ret_stack(struct task_struct
*tsk
)
784 static inline void pause_graph_tracing(void) { }
785 static inline void unpause_graph_tracing(void) { }
786 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
788 #ifdef CONFIG_TRACING
790 /* flags for current->trace */
792 TSK_TRACE_FL_TRACE_BIT
= 0,
793 TSK_TRACE_FL_GRAPH_BIT
= 1,
796 TSK_TRACE_FL_TRACE
= 1 << TSK_TRACE_FL_TRACE_BIT
,
797 TSK_TRACE_FL_GRAPH
= 1 << TSK_TRACE_FL_GRAPH_BIT
,
800 static inline void set_tsk_trace_trace(struct task_struct
*tsk
)
802 set_bit(TSK_TRACE_FL_TRACE_BIT
, &tsk
->trace
);
805 static inline void clear_tsk_trace_trace(struct task_struct
*tsk
)
807 clear_bit(TSK_TRACE_FL_TRACE_BIT
, &tsk
->trace
);
810 static inline int test_tsk_trace_trace(struct task_struct
*tsk
)
812 return tsk
->trace
& TSK_TRACE_FL_TRACE
;
815 static inline void set_tsk_trace_graph(struct task_struct
*tsk
)
817 set_bit(TSK_TRACE_FL_GRAPH_BIT
, &tsk
->trace
);
820 static inline void clear_tsk_trace_graph(struct task_struct
*tsk
)
822 clear_bit(TSK_TRACE_FL_GRAPH_BIT
, &tsk
->trace
);
825 static inline int test_tsk_trace_graph(struct task_struct
*tsk
)
827 return tsk
->trace
& TSK_TRACE_FL_GRAPH
;
830 enum ftrace_dump_mode
;
832 extern enum ftrace_dump_mode ftrace_dump_on_oops
;
834 extern void disable_trace_on_warning(void);
835 extern int __disable_trace_on_warning
;
837 #ifdef CONFIG_PREEMPT
838 #define INIT_TRACE_RECURSION .trace_recursion = 0,
841 #else /* CONFIG_TRACING */
842 static inline void disable_trace_on_warning(void) { }
843 #endif /* CONFIG_TRACING */
845 #ifndef INIT_TRACE_RECURSION
846 #define INIT_TRACE_RECURSION
849 #ifdef CONFIG_FTRACE_SYSCALLS
851 unsigned long arch_syscall_addr(int nr
);
853 #endif /* CONFIG_FTRACE_SYSCALLS */
855 #endif /* _LINUX_FTRACE_H */