1 #ifndef _LINUX_KERNEL_TRACE_H
2 #define _LINUX_KERNEL_TRACE_H
5 #include <asm/atomic.h>
6 #include <linux/sched.h>
7 #include <linux/clocksource.h>
8 #include <linux/ring_buffer.h>
9 #include <linux/mmiotrace.h>
10 #include <linux/ftrace.h>
11 #include <trace/boot.h>
12 #include <trace/kmemtrace.h>
13 #include <trace/power.h>
16 __TRACE_FIRST_TYPE
= 0,
42 * The trace entry - the most basic unit of tracing. This is what
43 * is printed in the end as a single line in the trace output, such as:
45 * bash-15816 [01] 235.197585: idle_cpu <- irq_enter
50 unsigned char preempt_count
;
56 * Function trace entry - function address and parent function addres:
59 struct trace_entry ent
;
61 unsigned long parent_ip
;
64 /* Function call entry */
65 struct ftrace_graph_ent_entry
{
66 struct trace_entry ent
;
67 struct ftrace_graph_ent graph_ent
;
70 /* Function return entry */
71 struct ftrace_graph_ret_entry
{
72 struct trace_entry ent
;
73 struct ftrace_graph_ret ret
;
75 extern struct tracer boot_tracer
;
78 * Context switch trace entry - which task (and prio) we switched from/to:
80 struct ctx_switch_entry
{
81 struct trace_entry ent
;
82 unsigned int prev_pid
;
83 unsigned char prev_prio
;
84 unsigned char prev_state
;
85 unsigned int next_pid
;
86 unsigned char next_prio
;
87 unsigned char next_state
;
88 unsigned int next_cpu
;
92 * Special (free-form) trace entry:
94 struct special_entry
{
95 struct trace_entry ent
;
105 #define FTRACE_STACK_ENTRIES 8
108 struct trace_entry ent
;
109 unsigned long caller
[FTRACE_STACK_ENTRIES
];
112 struct userstack_entry
{
113 struct trace_entry ent
;
114 unsigned long caller
[FTRACE_STACK_ENTRIES
];
118 * ftrace_printk entry:
121 struct trace_entry ent
;
127 #define TRACE_OLD_SIZE 88
129 struct trace_field_cont
{
131 /* Temporary till we get rid of this completely */
132 char buf
[TRACE_OLD_SIZE
- 1];
135 struct trace_mmiotrace_rw
{
136 struct trace_entry ent
;
137 struct mmiotrace_rw rw
;
140 struct trace_mmiotrace_map
{
141 struct trace_entry ent
;
142 struct mmiotrace_map map
;
145 struct trace_boot_call
{
146 struct trace_entry ent
;
147 struct boot_trace_call boot_call
;
150 struct trace_boot_ret
{
151 struct trace_entry ent
;
152 struct boot_trace_ret boot_ret
;
155 #define TRACE_FUNC_SIZE 30
156 #define TRACE_FILE_SIZE 20
157 struct trace_branch
{
158 struct trace_entry ent
;
160 char func
[TRACE_FUNC_SIZE
+1];
161 char file
[TRACE_FILE_SIZE
+1];
165 struct hw_branch_entry
{
166 struct trace_entry ent
;
172 struct trace_entry ent
;
173 struct power_trace state_data
;
176 struct kmemtrace_alloc_entry
{
177 struct trace_entry ent
;
178 enum kmemtrace_type_id type_id
;
179 unsigned long call_site
;
187 struct kmemtrace_free_entry
{
188 struct trace_entry ent
;
189 enum kmemtrace_type_id type_id
;
190 unsigned long call_site
;
195 * trace_flag_type is an enumeration that holds different
196 * states when a trace occurs. These are:
197 * IRQS_OFF - interrupts were disabled
198 * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags
199 * NEED_RESCED - reschedule is requested
200 * HARDIRQ - inside an interrupt handler
201 * SOFTIRQ - inside a softirq handler
203 enum trace_flag_type
{
204 TRACE_FLAG_IRQS_OFF
= 0x01,
205 TRACE_FLAG_IRQS_NOSUPPORT
= 0x02,
206 TRACE_FLAG_NEED_RESCHED
= 0x04,
207 TRACE_FLAG_HARDIRQ
= 0x08,
208 TRACE_FLAG_SOFTIRQ
= 0x10,
211 #define TRACE_BUF_SIZE 1024
214 * The CPU trace array - it consists of thousands of trace entries
215 * plus some other descriptor data: (for example which task started
218 struct trace_array_cpu
{
221 /* these fields get copied into max-trace: */
222 unsigned long trace_idx
;
223 unsigned long overrun
;
224 unsigned long saved_latency
;
225 unsigned long critical_start
;
226 unsigned long critical_end
;
227 unsigned long critical_sequence
;
229 unsigned long policy
;
230 unsigned long rt_priority
;
231 cycle_t preempt_timestamp
;
234 char comm
[TASK_COMM_LEN
];
237 struct trace_iterator
;
240 * The trace array - an array of per-CPU trace arrays. This is the
241 * highest level data structure that individual tracers deal with.
242 * They have on/off state as well:
245 struct ring_buffer
*buffer
;
246 unsigned long entries
;
249 struct task_struct
*waiter
;
250 struct trace_array_cpu
*data
[NR_CPUS
];
253 #define FTRACE_CMP_TYPE(var, type) \
254 __builtin_types_compatible_p(typeof(var), type *)
257 #define IF_ASSIGN(var, entry, etype, id) \
258 if (FTRACE_CMP_TYPE(var, etype)) { \
259 var = (typeof(var))(entry); \
260 WARN_ON(id && (entry)->type != id); \
264 /* Will cause compile errors if type is not found. */
265 extern void __ftrace_bad_type(void);
268 * The trace_assign_type is a verifier that the entry type is
269 * the same as the type being assigned. To add new types simply
270 * add a line with the following format:
272 * IF_ASSIGN(var, ent, type, id);
274 * Where "type" is the trace type that includes the trace_entry
275 * as the "ent" item. And "id" is the trace identifier that is
276 * used in the trace_type enum.
278 * If the type can have more than one id, then use zero.
280 #define trace_assign_type(var, ent) \
282 IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \
283 IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \
284 IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \
285 IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\
286 IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \
287 IF_ASSIGN(var, ent, struct special_entry, 0); \
288 IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \
290 IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \
292 IF_ASSIGN(var, ent, struct trace_boot_call, TRACE_BOOT_CALL);\
293 IF_ASSIGN(var, ent, struct trace_boot_ret, TRACE_BOOT_RET);\
294 IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \
295 IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \
297 IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \
299 IF_ASSIGN(var, ent, struct hw_branch_entry, TRACE_HW_BRANCHES);\
300 IF_ASSIGN(var, ent, struct trace_power, TRACE_POWER); \
301 IF_ASSIGN(var, ent, struct kmemtrace_alloc_entry, \
303 IF_ASSIGN(var, ent, struct kmemtrace_free_entry, \
305 __ftrace_bad_type(); \
308 /* Return values for print_line callback */
310 TRACE_TYPE_PARTIAL_LINE
= 0, /* Retry after flushing the seq */
311 TRACE_TYPE_HANDLED
= 1,
312 TRACE_TYPE_UNHANDLED
= 2, /* Relay to other output functions */
313 TRACE_TYPE_NO_CONSUME
= 3 /* Handled but ask to not consume */
318 * An option specific to a tracer. This is a boolean value.
319 * The bit is the bit index that sets its value on the
320 * flags value in struct tracer_flags.
323 const char *name
; /* Will appear on the trace_options file */
324 u32 bit
; /* Mask assigned in val field in tracer_flags */
328 * The set of specific options for a tracer. Your tracer
329 * have to set the initial value of the flags val.
331 struct tracer_flags
{
333 struct tracer_opt
*opts
;
336 /* Makes more easy to define a tracer opt */
337 #define TRACER_OPT(s, b) .name = #s, .bit = b
341 * A specific tracer, represented by methods that operate on a trace array:
345 /* Your tracer should raise a warning if init fails */
346 int (*init
)(struct trace_array
*tr
);
347 void (*reset
)(struct trace_array
*tr
);
348 void (*start
)(struct trace_array
*tr
);
349 void (*stop
)(struct trace_array
*tr
);
350 void (*open
)(struct trace_iterator
*iter
);
351 void (*pipe_open
)(struct trace_iterator
*iter
);
352 void (*close
)(struct trace_iterator
*iter
);
353 ssize_t (*read
)(struct trace_iterator
*iter
,
354 struct file
*filp
, char __user
*ubuf
,
355 size_t cnt
, loff_t
*ppos
);
356 #ifdef CONFIG_FTRACE_STARTUP_TEST
357 int (*selftest
)(struct tracer
*trace
,
358 struct trace_array
*tr
);
360 void (*print_header
)(struct seq_file
*m
);
361 enum print_line_t (*print_line
)(struct trace_iterator
*iter
);
362 /* If you handled the flag setting, return 0 */
363 int (*set_flag
)(u32 old_flags
, u32 bit
, int set
);
366 struct tracer_flags
*flags
;
367 struct tracer_stat
*stats
;
371 unsigned char buffer
[PAGE_SIZE
];
373 unsigned int readpos
;
377 * Trace iterator - used by printout routines who present trace
378 * results to users and which routines might sleep, etc:
380 struct trace_iterator
{
381 struct trace_array
*tr
;
382 struct tracer
*trace
;
384 struct ring_buffer_iter
*buffer_iter
[NR_CPUS
];
386 /* The below is zeroed out in pipe_read */
387 struct trace_seq seq
;
388 struct trace_entry
*ent
;
392 unsigned long iter_flags
;
396 cpumask_var_t started
;
399 int tracer_init(struct tracer
*t
, struct trace_array
*tr
);
400 int tracing_is_enabled(void);
401 void trace_wake_up(void);
402 void tracing_reset(struct trace_array
*tr
, int cpu
);
403 void tracing_reset_online_cpus(struct trace_array
*tr
);
404 int tracing_open_generic(struct inode
*inode
, struct file
*filp
);
405 struct dentry
*tracing_init_dentry(void);
406 void init_tracer_sysprof_debugfs(struct dentry
*d_tracer
);
408 struct ring_buffer_event
;
410 struct ring_buffer_event
*trace_buffer_lock_reserve(struct trace_array
*tr
,
415 void trace_buffer_unlock_commit(struct trace_array
*tr
,
416 struct ring_buffer_event
*event
,
417 unsigned long flags
, int pc
);
419 struct trace_entry
*tracing_get_trace_entry(struct trace_array
*tr
,
420 struct trace_array_cpu
*data
);
422 struct trace_entry
*trace_find_next_entry(struct trace_iterator
*iter
,
423 int *ent_cpu
, u64
*ent_ts
);
425 void tracing_generic_entry_update(struct trace_entry
*entry
,
429 void ftrace(struct trace_array
*tr
,
430 struct trace_array_cpu
*data
,
432 unsigned long parent_ip
,
433 unsigned long flags
, int pc
);
434 void tracing_sched_switch_trace(struct trace_array
*tr
,
435 struct task_struct
*prev
,
436 struct task_struct
*next
,
437 unsigned long flags
, int pc
);
438 void tracing_record_cmdline(struct task_struct
*tsk
);
440 void tracing_sched_wakeup_trace(struct trace_array
*tr
,
441 struct task_struct
*wakee
,
442 struct task_struct
*cur
,
443 unsigned long flags
, int pc
);
444 void trace_special(struct trace_array
*tr
,
445 struct trace_array_cpu
*data
,
448 unsigned long arg3
, int pc
);
449 void trace_function(struct trace_array
*tr
,
451 unsigned long parent_ip
,
452 unsigned long flags
, int pc
);
454 void trace_graph_return(struct ftrace_graph_ret
*trace
);
455 int trace_graph_entry(struct ftrace_graph_ent
*trace
);
457 void tracing_start_cmdline_record(void);
458 void tracing_stop_cmdline_record(void);
459 void tracing_sched_switch_assign_trace(struct trace_array
*tr
);
460 void tracing_stop_sched_switch_record(void);
461 void tracing_start_sched_switch_record(void);
462 int register_tracer(struct tracer
*type
);
463 void unregister_tracer(struct tracer
*type
);
465 extern unsigned long nsecs_to_usecs(unsigned long nsecs
);
467 extern unsigned long tracing_max_latency
;
468 extern unsigned long tracing_thresh
;
470 void update_max_tr(struct trace_array
*tr
, struct task_struct
*tsk
, int cpu
);
471 void update_max_tr_single(struct trace_array
*tr
,
472 struct task_struct
*tsk
, int cpu
);
474 void __trace_stack(struct trace_array
*tr
,
478 extern cycle_t
ftrace_now(int cpu
);
480 #ifdef CONFIG_CONTEXT_SWITCH_TRACER
482 (*tracer_switch_func_t
)(void *private,
484 struct task_struct
*prev
,
485 struct task_struct
*next
);
487 struct tracer_switch_ops
{
488 tracer_switch_func_t func
;
490 struct tracer_switch_ops
*next
;
492 #endif /* CONFIG_CONTEXT_SWITCH_TRACER */
494 extern char *trace_find_cmdline(int pid
);
496 #ifdef CONFIG_DYNAMIC_FTRACE
497 extern unsigned long ftrace_update_tot_cnt
;
498 #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
499 extern int DYN_FTRACE_TEST_NAME(void);
502 #ifdef CONFIG_FTRACE_STARTUP_TEST
503 extern int trace_selftest_startup_function(struct tracer
*trace
,
504 struct trace_array
*tr
);
505 extern int trace_selftest_startup_function_graph(struct tracer
*trace
,
506 struct trace_array
*tr
);
507 extern int trace_selftest_startup_irqsoff(struct tracer
*trace
,
508 struct trace_array
*tr
);
509 extern int trace_selftest_startup_preemptoff(struct tracer
*trace
,
510 struct trace_array
*tr
);
511 extern int trace_selftest_startup_preemptirqsoff(struct tracer
*trace
,
512 struct trace_array
*tr
);
513 extern int trace_selftest_startup_wakeup(struct tracer
*trace
,
514 struct trace_array
*tr
);
515 extern int trace_selftest_startup_nop(struct tracer
*trace
,
516 struct trace_array
*tr
);
517 extern int trace_selftest_startup_sched_switch(struct tracer
*trace
,
518 struct trace_array
*tr
);
519 extern int trace_selftest_startup_sysprof(struct tracer
*trace
,
520 struct trace_array
*tr
);
521 extern int trace_selftest_startup_branch(struct tracer
*trace
,
522 struct trace_array
*tr
);
523 #endif /* CONFIG_FTRACE_STARTUP_TEST */
525 extern void *head_page(struct trace_array_cpu
*data
);
526 extern long ns2usecs(cycle_t nsec
);
528 trace_vprintk(unsigned long ip
, int depth
, const char *fmt
, va_list args
);
530 extern unsigned long trace_flags
;
532 /* Standard output formatting function used for function return traces */
533 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
534 extern enum print_line_t
print_graph_function(struct trace_iterator
*iter
);
536 #ifdef CONFIG_DYNAMIC_FTRACE
537 /* TODO: make this variable */
538 #define FTRACE_GRAPH_MAX_FUNCS 32
539 extern int ftrace_graph_count
;
540 extern unsigned long ftrace_graph_funcs
[FTRACE_GRAPH_MAX_FUNCS
];
542 static inline int ftrace_graph_addr(unsigned long addr
)
546 if (!ftrace_graph_count
|| test_tsk_trace_graph(current
))
549 for (i
= 0; i
< ftrace_graph_count
; i
++) {
550 if (addr
== ftrace_graph_funcs
[i
])
557 static inline int ftrace_trace_addr(unsigned long addr
)
561 static inline int ftrace_graph_addr(unsigned long addr
)
565 #endif /* CONFIG_DYNAMIC_FTRACE */
567 #else /* CONFIG_FUNCTION_GRAPH_TRACER */
568 static inline enum print_line_t
569 print_graph_function(struct trace_iterator
*iter
)
571 return TRACE_TYPE_UNHANDLED
;
573 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
575 extern struct pid
*ftrace_pid_trace
;
577 static inline int ftrace_trace_task(struct task_struct
*task
)
579 if (!ftrace_pid_trace
)
582 return test_tsk_trace_trace(task
);
586 * trace_iterator_flags is an enumeration that defines bit
587 * positions into trace_flags that controls the output.
589 * NOTE: These bits must match the trace_options array in
592 enum trace_iterator_flags
{
593 TRACE_ITER_PRINT_PARENT
= 0x01,
594 TRACE_ITER_SYM_OFFSET
= 0x02,
595 TRACE_ITER_SYM_ADDR
= 0x04,
596 TRACE_ITER_VERBOSE
= 0x08,
597 TRACE_ITER_RAW
= 0x10,
598 TRACE_ITER_HEX
= 0x20,
599 TRACE_ITER_BIN
= 0x40,
600 TRACE_ITER_BLOCK
= 0x80,
601 TRACE_ITER_STACKTRACE
= 0x100,
602 TRACE_ITER_SCHED_TREE
= 0x200,
603 TRACE_ITER_PRINTK
= 0x400,
604 TRACE_ITER_PREEMPTONLY
= 0x800,
605 TRACE_ITER_BRANCH
= 0x1000,
606 TRACE_ITER_ANNOTATE
= 0x2000,
607 TRACE_ITER_USERSTACKTRACE
= 0x4000,
608 TRACE_ITER_SYM_USEROBJ
= 0x8000,
609 TRACE_ITER_PRINTK_MSGONLY
= 0x10000,
610 TRACE_ITER_CONTEXT_INFO
= 0x20000 /* Print pid/cpu/time */
614 * TRACE_ITER_SYM_MASK masks the options in trace_flags that
615 * control the output of kernel symbols.
617 #define TRACE_ITER_SYM_MASK \
618 (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
620 extern struct tracer nop_trace
;
623 * ftrace_preempt_disable - disable preemption scheduler safe
625 * When tracing can happen inside the scheduler, there exists
626 * cases that the tracing might happen before the need_resched
627 * flag is checked. If this happens and the tracer calls
628 * preempt_enable (after a disable), a schedule might take place
629 * causing an infinite recursion.
631 * To prevent this, we read the need_resched flag before
632 * disabling preemption. When we want to enable preemption we
633 * check the flag, if it is set, then we call preempt_enable_no_resched.
634 * Otherwise, we call preempt_enable.
636 * The rational for doing the above is that if need_resched is set
637 * and we have yet to reschedule, we are either in an atomic location
638 * (where we do not need to check for scheduling) or we are inside
639 * the scheduler and do not want to resched.
641 static inline int ftrace_preempt_disable(void)
645 resched
= need_resched();
646 preempt_disable_notrace();
652 * ftrace_preempt_enable - enable preemption scheduler safe
653 * @resched: the return value from ftrace_preempt_disable
655 * This is a scheduler safe way to enable preemption and not miss
656 * any preemption checks. The disabled saved the state of preemption.
657 * If resched is set, then we are either inside an atomic or
658 * are inside the scheduler (we would have already scheduled
659 * otherwise). In this case, we do not want to call normal
660 * preempt_enable, but preempt_enable_no_resched instead.
662 static inline void ftrace_preempt_enable(int resched
)
665 preempt_enable_no_resched_notrace();
667 preempt_enable_notrace();
670 #ifdef CONFIG_BRANCH_TRACER
671 extern int enable_branch_tracing(struct trace_array
*tr
);
672 extern void disable_branch_tracing(void);
673 static inline int trace_branch_enable(struct trace_array
*tr
)
675 if (trace_flags
& TRACE_ITER_BRANCH
)
676 return enable_branch_tracing(tr
);
679 static inline void trace_branch_disable(void)
681 /* due to races, always disable */
682 disable_branch_tracing();
685 static inline int trace_branch_enable(struct trace_array
*tr
)
689 static inline void trace_branch_disable(void)
692 #endif /* CONFIG_BRANCH_TRACER */
694 #endif /* _LINUX_KERNEL_TRACE_H */