Merge branch 'tip/perf/core-4' of git://git.kernel.org/pub/scm/linux/kernel/git/roste...
[deliverable/linux.git] / include / linux / ftrace.h
... / ...
CommitLineData
1/*
2 * Ftrace header. For implementation details beyond the random comments
3 * scattered below, see: Documentation/trace/ftrace-design.txt
4 */
5
6#ifndef _LINUX_FTRACE_H
7#define _LINUX_FTRACE_H
8
9#include <linux/trace_clock.h>
10#include <linux/kallsyms.h>
11#include <linux/linkage.h>
12#include <linux/bitops.h>
13#include <linux/ktime.h>
14#include <linux/sched.h>
15#include <linux/types.h>
16#include <linux/init.h>
17#include <linux/fs.h>
18
19#include <asm/ftrace.h>
20
21struct module;
22struct ftrace_hash;
23
24#ifdef CONFIG_FUNCTION_TRACER
25
26extern int ftrace_enabled;
27extern int
28ftrace_enable_sysctl(struct ctl_table *table, int write,
29 void __user *buffer, size_t *lenp,
30 loff_t *ppos);
31
32typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip);
33
34/*
35 * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are
36 * set in the flags member.
37 *
38 * ENABLED - set/unset when ftrace_ops is registered/unregistered
39 * GLOBAL - set manualy by ftrace_ops user to denote the ftrace_ops
40 * is part of the global tracers sharing the same filter
41 * via set_ftrace_* debugfs files.
42 * DYNAMIC - set when ftrace_ops is registered to denote dynamically
43 * allocated ftrace_ops which need special care
44 * CONTROL - set manualy by ftrace_ops user to denote the ftrace_ops
45 * could be controled by following calls:
46 * ftrace_function_local_enable
47 * ftrace_function_local_disable
48 */
49enum {
50 FTRACE_OPS_FL_ENABLED = 1 << 0,
51 FTRACE_OPS_FL_GLOBAL = 1 << 1,
52 FTRACE_OPS_FL_DYNAMIC = 1 << 2,
53 FTRACE_OPS_FL_CONTROL = 1 << 3,
54};
55
56struct ftrace_ops {
57 ftrace_func_t func;
58 struct ftrace_ops *next;
59 unsigned long flags;
60 int __percpu *disabled;
61#ifdef CONFIG_DYNAMIC_FTRACE
62 struct ftrace_hash *notrace_hash;
63 struct ftrace_hash *filter_hash;
64#endif
65};
66
67extern int function_trace_stop;
68
69/*
70 * Type of the current tracing.
71 */
72enum ftrace_tracing_type_t {
73 FTRACE_TYPE_ENTER = 0, /* Hook the call of the function */
74 FTRACE_TYPE_RETURN, /* Hook the return of the function */
75};
76
77/* Current tracing type, default is FTRACE_TYPE_ENTER */
78extern enum ftrace_tracing_type_t ftrace_tracing_type;
79
80/**
81 * ftrace_stop - stop function tracer.
82 *
83 * A quick way to stop the function tracer. Note this an on off switch,
84 * it is not something that is recursive like preempt_disable.
85 * This does not disable the calling of mcount, it only stops the
86 * calling of functions from mcount.
87 */
88static inline void ftrace_stop(void)
89{
90 function_trace_stop = 1;
91}
92
93/**
94 * ftrace_start - start the function tracer.
95 *
96 * This function is the inverse of ftrace_stop. This does not enable
97 * the function tracing if the function tracer is disabled. This only
98 * sets the function tracer flag to continue calling the functions
99 * from mcount.
100 */
101static inline void ftrace_start(void)
102{
103 function_trace_stop = 0;
104}
105
106/*
107 * The ftrace_ops must be a static and should also
108 * be read_mostly. These functions do modify read_mostly variables
109 * so use them sparely. Never free an ftrace_op or modify the
110 * next pointer after it has been registered. Even after unregistering
111 * it, the next pointer may still be used internally.
112 */
113int register_ftrace_function(struct ftrace_ops *ops);
114int unregister_ftrace_function(struct ftrace_ops *ops);
115void clear_ftrace_function(void);
116
117/**
118 * ftrace_function_local_enable - enable controlled ftrace_ops on current cpu
119 *
120 * This function enables tracing on current cpu by decreasing
121 * the per cpu control variable.
122 * It must be called with preemption disabled and only on ftrace_ops
123 * registered with FTRACE_OPS_FL_CONTROL. If called without preemption
124 * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
125 */
126static inline void ftrace_function_local_enable(struct ftrace_ops *ops)
127{
128 if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL)))
129 return;
130
131 (*this_cpu_ptr(ops->disabled))--;
132}
133
134/**
135 * ftrace_function_local_disable - enable controlled ftrace_ops on current cpu
136 *
137 * This function enables tracing on current cpu by decreasing
138 * the per cpu control variable.
139 * It must be called with preemption disabled and only on ftrace_ops
140 * registered with FTRACE_OPS_FL_CONTROL. If called without preemption
141 * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
142 */
143static inline void ftrace_function_local_disable(struct ftrace_ops *ops)
144{
145 if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL)))
146 return;
147
148 (*this_cpu_ptr(ops->disabled))++;
149}
150
151/**
152 * ftrace_function_local_disabled - returns ftrace_ops disabled value
153 * on current cpu
154 *
155 * This function returns value of ftrace_ops::disabled on current cpu.
156 * It must be called with preemption disabled and only on ftrace_ops
157 * registered with FTRACE_OPS_FL_CONTROL. If called without preemption
158 * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
159 */
160static inline int ftrace_function_local_disabled(struct ftrace_ops *ops)
161{
162 WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL));
163 return *this_cpu_ptr(ops->disabled);
164}
165
166extern void ftrace_stub(unsigned long a0, unsigned long a1);
167
168#else /* !CONFIG_FUNCTION_TRACER */
169/*
170 * (un)register_ftrace_function must be a macro since the ops parameter
171 * must not be evaluated.
172 */
173#define register_ftrace_function(ops) ({ 0; })
174#define unregister_ftrace_function(ops) ({ 0; })
175static inline void clear_ftrace_function(void) { }
176static inline void ftrace_kill(void) { }
177static inline void ftrace_stop(void) { }
178static inline void ftrace_start(void) { }
179#endif /* CONFIG_FUNCTION_TRACER */
180
181#ifdef CONFIG_STACK_TRACER
182extern int stack_tracer_enabled;
183int
184stack_trace_sysctl(struct ctl_table *table, int write,
185 void __user *buffer, size_t *lenp,
186 loff_t *ppos);
187#endif
188
189struct ftrace_func_command {
190 struct list_head list;
191 char *name;
192 int (*func)(struct ftrace_hash *hash,
193 char *func, char *cmd,
194 char *params, int enable);
195};
196
197#ifdef CONFIG_DYNAMIC_FTRACE
198
199int ftrace_arch_code_modify_prepare(void);
200int ftrace_arch_code_modify_post_process(void);
201
202void ftrace_bug(int err, unsigned long ip);
203
204struct seq_file;
205
206struct ftrace_probe_ops {
207 void (*func)(unsigned long ip,
208 unsigned long parent_ip,
209 void **data);
210 int (*callback)(unsigned long ip, void **data);
211 void (*free)(void **data);
212 int (*print)(struct seq_file *m,
213 unsigned long ip,
214 struct ftrace_probe_ops *ops,
215 void *data);
216};
217
218extern int
219register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
220 void *data);
221extern void
222unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
223 void *data);
224extern void
225unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops);
226extern void unregister_ftrace_function_probe_all(char *glob);
227
228extern int ftrace_text_reserved(void *start, void *end);
229
230enum {
231 FTRACE_FL_ENABLED = (1 << 30),
232};
233
234#define FTRACE_FL_MASK (0x3UL << 30)
235#define FTRACE_REF_MAX ((1 << 30) - 1)
236
237struct dyn_ftrace {
238 union {
239 unsigned long ip; /* address of mcount call-site */
240 struct dyn_ftrace *freelist;
241 };
242 unsigned long flags;
243 struct dyn_arch_ftrace arch;
244};
245
246int ftrace_force_update(void);
247int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
248 int len, int reset);
249int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
250 int len, int reset);
251void ftrace_set_global_filter(unsigned char *buf, int len, int reset);
252void ftrace_set_global_notrace(unsigned char *buf, int len, int reset);
253void ftrace_free_filter(struct ftrace_ops *ops);
254
255int register_ftrace_command(struct ftrace_func_command *cmd);
256int unregister_ftrace_command(struct ftrace_func_command *cmd);
257
258enum {
259 FTRACE_UPDATE_CALLS = (1 << 0),
260 FTRACE_DISABLE_CALLS = (1 << 1),
261 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
262 FTRACE_START_FUNC_RET = (1 << 3),
263 FTRACE_STOP_FUNC_RET = (1 << 4),
264};
265
266enum {
267 FTRACE_UPDATE_IGNORE,
268 FTRACE_UPDATE_MAKE_CALL,
269 FTRACE_UPDATE_MAKE_NOP,
270};
271
272enum {
273 FTRACE_ITER_FILTER = (1 << 0),
274 FTRACE_ITER_NOTRACE = (1 << 1),
275 FTRACE_ITER_PRINTALL = (1 << 2),
276 FTRACE_ITER_DO_HASH = (1 << 3),
277 FTRACE_ITER_HASH = (1 << 4),
278 FTRACE_ITER_ENABLED = (1 << 5),
279};
280
281void arch_ftrace_update_code(int command);
282
283struct ftrace_rec_iter;
284
285struct ftrace_rec_iter *ftrace_rec_iter_start(void);
286struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter);
287struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter);
288
289#define for_ftrace_rec_iter(iter) \
290 for (iter = ftrace_rec_iter_start(); \
291 iter; \
292 iter = ftrace_rec_iter_next(iter))
293
294
295int ftrace_update_record(struct dyn_ftrace *rec, int enable);
296int ftrace_test_record(struct dyn_ftrace *rec, int enable);
297void ftrace_run_stop_machine(int command);
298int ftrace_location(unsigned long ip);
299
300extern ftrace_func_t ftrace_trace_function;
301
302int ftrace_regex_open(struct ftrace_ops *ops, int flag,
303 struct inode *inode, struct file *file);
304ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
305 size_t cnt, loff_t *ppos);
306ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
307 size_t cnt, loff_t *ppos);
308loff_t ftrace_regex_lseek(struct file *file, loff_t offset, int origin);
309int ftrace_regex_release(struct inode *inode, struct file *file);
310
311void __init
312ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable);
313
314/* defined in arch */
315extern int ftrace_ip_converted(unsigned long ip);
316extern int ftrace_dyn_arch_init(void *data);
317extern int ftrace_update_ftrace_func(ftrace_func_t func);
318extern void ftrace_caller(void);
319extern void ftrace_call(void);
320extern void mcount_call(void);
321
322#ifndef FTRACE_ADDR
323#define FTRACE_ADDR ((unsigned long)ftrace_caller)
324#endif
325#ifdef CONFIG_FUNCTION_GRAPH_TRACER
326extern void ftrace_graph_caller(void);
327extern int ftrace_enable_ftrace_graph_caller(void);
328extern int ftrace_disable_ftrace_graph_caller(void);
329#else
330static inline int ftrace_enable_ftrace_graph_caller(void) { return 0; }
331static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; }
332#endif
333
334/**
335 * ftrace_make_nop - convert code into nop
336 * @mod: module structure if called by module load initialization
337 * @rec: the mcount call site record
338 * @addr: the address that the call site should be calling
339 *
340 * This is a very sensitive operation and great care needs
341 * to be taken by the arch. The operation should carefully
342 * read the location, check to see if what is read is indeed
343 * what we expect it to be, and then on success of the compare,
344 * it should write to the location.
345 *
346 * The code segment at @rec->ip should be a caller to @addr
347 *
348 * Return must be:
349 * 0 on success
350 * -EFAULT on error reading the location
351 * -EINVAL on a failed compare of the contents
352 * -EPERM on error writing to the location
353 * Any other value will be considered a failure.
354 */
355extern int ftrace_make_nop(struct module *mod,
356 struct dyn_ftrace *rec, unsigned long addr);
357
358/**
359 * ftrace_make_call - convert a nop call site into a call to addr
360 * @rec: the mcount call site record
361 * @addr: the address that the call site should call
362 *
363 * This is a very sensitive operation and great care needs
364 * to be taken by the arch. The operation should carefully
365 * read the location, check to see if what is read is indeed
366 * what we expect it to be, and then on success of the compare,
367 * it should write to the location.
368 *
369 * The code segment at @rec->ip should be a nop
370 *
371 * Return must be:
372 * 0 on success
373 * -EFAULT on error reading the location
374 * -EINVAL on a failed compare of the contents
375 * -EPERM on error writing to the location
376 * Any other value will be considered a failure.
377 */
378extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr);
379
380/* May be defined in arch */
381extern int ftrace_arch_read_dyn_info(char *buf, int size);
382
383extern int skip_trace(unsigned long ip);
384
385extern void ftrace_disable_daemon(void);
386extern void ftrace_enable_daemon(void);
387#else
388static inline int skip_trace(unsigned long ip) { return 0; }
389static inline int ftrace_force_update(void) { return 0; }
390static inline void ftrace_disable_daemon(void) { }
391static inline void ftrace_enable_daemon(void) { }
392static inline void ftrace_release_mod(struct module *mod) {}
393static inline int register_ftrace_command(struct ftrace_func_command *cmd)
394{
395 return -EINVAL;
396}
397static inline int unregister_ftrace_command(char *cmd_name)
398{
399 return -EINVAL;
400}
401static inline int ftrace_text_reserved(void *start, void *end)
402{
403 return 0;
404}
405
406/*
407 * Again users of functions that have ftrace_ops may not
408 * have them defined when ftrace is not enabled, but these
409 * functions may still be called. Use a macro instead of inline.
410 */
411#define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; })
412#define ftrace_set_early_filter(ops, buf, enable) do { } while (0)
413#define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; })
414#define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; })
415#define ftrace_free_filter(ops) do { } while (0)
416
417static inline ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
418 size_t cnt, loff_t *ppos) { return -ENODEV; }
419static inline ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
420 size_t cnt, loff_t *ppos) { return -ENODEV; }
421static inline loff_t ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
422{
423 return -ENODEV;
424}
425static inline int
426ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; }
427#endif /* CONFIG_DYNAMIC_FTRACE */
428
429/* totally disable ftrace - can not re-enable after this */
430void ftrace_kill(void);
431
432static inline void tracer_disable(void)
433{
434#ifdef CONFIG_FUNCTION_TRACER
435 ftrace_enabled = 0;
436#endif
437}
438
439/*
440 * Ftrace disable/restore without lock. Some synchronization mechanism
441 * must be used to prevent ftrace_enabled to be changed between
442 * disable/restore.
443 */
444static inline int __ftrace_enabled_save(void)
445{
446#ifdef CONFIG_FUNCTION_TRACER
447 int saved_ftrace_enabled = ftrace_enabled;
448 ftrace_enabled = 0;
449 return saved_ftrace_enabled;
450#else
451 return 0;
452#endif
453}
454
455static inline void __ftrace_enabled_restore(int enabled)
456{
457#ifdef CONFIG_FUNCTION_TRACER
458 ftrace_enabled = enabled;
459#endif
460}
461
462#ifndef HAVE_ARCH_CALLER_ADDR
463# ifdef CONFIG_FRAME_POINTER
464# define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
465# define CALLER_ADDR1 ((unsigned long)__builtin_return_address(1))
466# define CALLER_ADDR2 ((unsigned long)__builtin_return_address(2))
467# define CALLER_ADDR3 ((unsigned long)__builtin_return_address(3))
468# define CALLER_ADDR4 ((unsigned long)__builtin_return_address(4))
469# define CALLER_ADDR5 ((unsigned long)__builtin_return_address(5))
470# define CALLER_ADDR6 ((unsigned long)__builtin_return_address(6))
471# else
472# define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
473# define CALLER_ADDR1 0UL
474# define CALLER_ADDR2 0UL
475# define CALLER_ADDR3 0UL
476# define CALLER_ADDR4 0UL
477# define CALLER_ADDR5 0UL
478# define CALLER_ADDR6 0UL
479# endif
480#endif /* ifndef HAVE_ARCH_CALLER_ADDR */
481
482#ifdef CONFIG_IRQSOFF_TRACER
483 extern void time_hardirqs_on(unsigned long a0, unsigned long a1);
484 extern void time_hardirqs_off(unsigned long a0, unsigned long a1);
485#else
486 static inline void time_hardirqs_on(unsigned long a0, unsigned long a1) { }
487 static inline void time_hardirqs_off(unsigned long a0, unsigned long a1) { }
488#endif
489
490#ifdef CONFIG_PREEMPT_TRACER
491 extern void trace_preempt_on(unsigned long a0, unsigned long a1);
492 extern void trace_preempt_off(unsigned long a0, unsigned long a1);
493#else
494 static inline void trace_preempt_on(unsigned long a0, unsigned long a1) { }
495 static inline void trace_preempt_off(unsigned long a0, unsigned long a1) { }
496#endif
497
498#ifdef CONFIG_FTRACE_MCOUNT_RECORD
499extern void ftrace_init(void);
500#else
501static inline void ftrace_init(void) { }
502#endif
503
504/*
505 * Structure that defines an entry function trace.
506 */
507struct ftrace_graph_ent {
508 unsigned long func; /* Current function */
509 int depth;
510};
511
512/*
513 * Structure that defines a return function trace.
514 */
515struct ftrace_graph_ret {
516 unsigned long func; /* Current function */
517 unsigned long long calltime;
518 unsigned long long rettime;
519 /* Number of functions that overran the depth limit for current task */
520 unsigned long overrun;
521 int depth;
522};
523
524/* Type of the callback handlers for tracing function graph*/
525typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */
526typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */
527
528#ifdef CONFIG_FUNCTION_GRAPH_TRACER
529
530/* for init task */
531#define INIT_FTRACE_GRAPH .ret_stack = NULL,
532
533/*
534 * Stack of return addresses for functions
535 * of a thread.
536 * Used in struct thread_info
537 */
538struct ftrace_ret_stack {
539 unsigned long ret;
540 unsigned long func;
541 unsigned long long calltime;
542 unsigned long long subtime;
543 unsigned long fp;
544};
545
546/*
547 * Primary handler of a function return.
548 * It relays on ftrace_return_to_handler.
549 * Defined in entry_32/64.S
550 */
551extern void return_to_handler(void);
552
553extern int
554ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
555 unsigned long frame_pointer);
556
557/*
558 * Sometimes we don't want to trace a function with the function
559 * graph tracer but we want them to keep traced by the usual function
560 * tracer if the function graph tracer is not configured.
561 */
562#define __notrace_funcgraph notrace
563
564/*
565 * We want to which function is an entrypoint of a hardirq.
566 * That will help us to put a signal on output.
567 */
568#define __irq_entry __attribute__((__section__(".irqentry.text")))
569
570/* Limits of hardirq entrypoints */
571extern char __irqentry_text_start[];
572extern char __irqentry_text_end[];
573
574#define FTRACE_RETFUNC_DEPTH 50
575#define FTRACE_RETSTACK_ALLOC_SIZE 32
576extern int register_ftrace_graph(trace_func_graph_ret_t retfunc,
577 trace_func_graph_ent_t entryfunc);
578
579extern void ftrace_graph_stop(void);
580
581/* The current handlers in use */
582extern trace_func_graph_ret_t ftrace_graph_return;
583extern trace_func_graph_ent_t ftrace_graph_entry;
584
585extern void unregister_ftrace_graph(void);
586
587extern void ftrace_graph_init_task(struct task_struct *t);
588extern void ftrace_graph_exit_task(struct task_struct *t);
589extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu);
590
591static inline int task_curr_ret_stack(struct task_struct *t)
592{
593 return t->curr_ret_stack;
594}
595
596static inline void pause_graph_tracing(void)
597{
598 atomic_inc(&current->tracing_graph_pause);
599}
600
601static inline void unpause_graph_tracing(void)
602{
603 atomic_dec(&current->tracing_graph_pause);
604}
605#else /* !CONFIG_FUNCTION_GRAPH_TRACER */
606
607#define __notrace_funcgraph
608#define __irq_entry
609#define INIT_FTRACE_GRAPH
610
611static inline void ftrace_graph_init_task(struct task_struct *t) { }
612static inline void ftrace_graph_exit_task(struct task_struct *t) { }
613static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { }
614
615static inline int register_ftrace_graph(trace_func_graph_ret_t retfunc,
616 trace_func_graph_ent_t entryfunc)
617{
618 return -1;
619}
620static inline void unregister_ftrace_graph(void) { }
621
622static inline int task_curr_ret_stack(struct task_struct *tsk)
623{
624 return -1;
625}
626
627static inline void pause_graph_tracing(void) { }
628static inline void unpause_graph_tracing(void) { }
629#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
630
631#ifdef CONFIG_TRACING
632
633/* flags for current->trace */
634enum {
635 TSK_TRACE_FL_TRACE_BIT = 0,
636 TSK_TRACE_FL_GRAPH_BIT = 1,
637};
638enum {
639 TSK_TRACE_FL_TRACE = 1 << TSK_TRACE_FL_TRACE_BIT,
640 TSK_TRACE_FL_GRAPH = 1 << TSK_TRACE_FL_GRAPH_BIT,
641};
642
643static inline void set_tsk_trace_trace(struct task_struct *tsk)
644{
645 set_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
646}
647
648static inline void clear_tsk_trace_trace(struct task_struct *tsk)
649{
650 clear_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
651}
652
653static inline int test_tsk_trace_trace(struct task_struct *tsk)
654{
655 return tsk->trace & TSK_TRACE_FL_TRACE;
656}
657
658static inline void set_tsk_trace_graph(struct task_struct *tsk)
659{
660 set_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
661}
662
663static inline void clear_tsk_trace_graph(struct task_struct *tsk)
664{
665 clear_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
666}
667
668static inline int test_tsk_trace_graph(struct task_struct *tsk)
669{
670 return tsk->trace & TSK_TRACE_FL_GRAPH;
671}
672
673enum ftrace_dump_mode;
674
675extern enum ftrace_dump_mode ftrace_dump_on_oops;
676
677#ifdef CONFIG_PREEMPT
678#define INIT_TRACE_RECURSION .trace_recursion = 0,
679#endif
680
681#endif /* CONFIG_TRACING */
682
683#ifndef INIT_TRACE_RECURSION
684#define INIT_TRACE_RECURSION
685#endif
686
687#ifdef CONFIG_FTRACE_SYSCALLS
688
689unsigned long arch_syscall_addr(int nr);
690
691#endif /* CONFIG_FTRACE_SYSCALLS */
692
693#endif /* _LINUX_FTRACE_H */
This page took 0.027016 seconds and 5 git commands to generate.