ftrace: latency tracer infrastructure
[deliverable/linux.git] / kernel / trace / trace.h
CommitLineData
bc0c38d1
SR
1#ifndef _LINUX_KERNEL_TRACE_H
2#define _LINUX_KERNEL_TRACE_H
3
4#include <linux/fs.h>
5#include <asm/atomic.h>
6#include <linux/sched.h>
7#include <linux/clocksource.h>
8
9/*
10 * Function trace entry - function address and parent function addres:
11 */
12struct ftrace_entry {
13 unsigned long ip;
14 unsigned long parent_ip;
15};
16
17/*
18 * Context switch trace entry - which task (and prio) we switched from/to:
19 */
20struct ctx_switch_entry {
21 unsigned int prev_pid;
22 unsigned char prev_prio;
23 unsigned char prev_state;
24 unsigned int next_pid;
25 unsigned char next_prio;
26};
27
28/*
29 * The trace entry - the most basic unit of tracing. This is what
30 * is printed in the end as a single line in the trace output, such as:
31 *
32 * bash-15816 [01] 235.197585: idle_cpu <- irq_enter
33 */
34struct trace_entry {
35 char type;
36 char cpu;
37 char flags;
38 char preempt_count;
39 int pid;
40 cycle_t t;
41 unsigned long idx;
42 union {
43 struct ftrace_entry fn;
44 struct ctx_switch_entry ctx;
45 };
46};
47
48#define TRACE_ENTRY_SIZE sizeof(struct trace_entry)
49
50/*
51 * The CPU trace array - it consists of thousands of trace entries
52 * plus some other descriptor data: (for example which task started
53 * the trace, etc.)
54 */
55struct trace_array_cpu {
56 void *trace;
57 unsigned long trace_idx;
58 atomic_t disabled;
59 atomic_t underrun;
60 unsigned long saved_latency;
61 unsigned long critical_start;
62 unsigned long critical_end;
63 unsigned long critical_sequence;
64 unsigned long nice;
65 unsigned long policy;
66 unsigned long rt_priority;
67 cycle_t preempt_timestamp;
68 pid_t pid;
69 uid_t uid;
70 char comm[TASK_COMM_LEN];
71};
72
73struct trace_iterator;
74
75/*
76 * The trace array - an array of per-CPU trace arrays. This is the
77 * highest level data structure that individual tracers deal with.
78 * They have on/off state as well:
79 */
80struct trace_array {
81 unsigned long entries;
82 long ctrl;
83 int cpu;
84 cycle_t time_start;
85 struct trace_array_cpu *data[NR_CPUS];
86};
87
88/*
89 * A specific tracer, represented by methods that operate on a trace array:
90 */
91struct tracer {
92 const char *name;
93 void (*init)(struct trace_array *tr);
94 void (*reset)(struct trace_array *tr);
95 void (*open)(struct trace_iterator *iter);
96 void (*close)(struct trace_iterator *iter);
97 void (*start)(struct trace_iterator *iter);
98 void (*stop)(struct trace_iterator *iter);
99 void (*ctrl_update)(struct trace_array *tr);
100 struct tracer *next;
101 int print_max;
102};
103
104/*
105 * Trace iterator - used by printout routines who present trace
106 * results to users and which routines might sleep, etc:
107 */
108struct trace_iterator {
109 struct trace_array *tr;
110 struct tracer *trace;
111 struct trace_entry *ent;
112 unsigned long iter_flags;
113 loff_t pos;
114 unsigned long next_idx[NR_CPUS];
115 int cpu;
116 int idx;
117};
118
119void notrace tracing_reset(struct trace_array_cpu *data);
120int tracing_open_generic(struct inode *inode, struct file *filp);
121struct dentry *tracing_init_dentry(void);
122void ftrace(struct trace_array *tr,
123 struct trace_array_cpu *data,
124 unsigned long ip,
125 unsigned long parent_ip,
126 unsigned long flags);
127void tracing_sched_switch_trace(struct trace_array *tr,
128 struct trace_array_cpu *data,
129 struct task_struct *prev,
130 struct task_struct *next,
131 unsigned long flags);
132void tracing_record_cmdline(struct task_struct *tsk);
133
134void tracing_start_function_trace(void);
135void tracing_stop_function_trace(void);
136int register_tracer(struct tracer *type);
137void unregister_tracer(struct tracer *type);
138
139extern unsigned long nsecs_to_usecs(unsigned long nsecs);
140
141extern unsigned long tracing_max_latency;
142extern unsigned long tracing_thresh;
143
144void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu);
145void update_max_tr_single(struct trace_array *tr,
146 struct task_struct *tsk, int cpu);
147
148static inline notrace cycle_t now(int cpu)
149{
150 return cpu_clock(cpu);
151}
152
153#ifdef CONFIG_SCHED_TRACER
154extern void notrace
155wakeup_sched_switch(struct task_struct *prev, struct task_struct *next);
156#else
157static inline void
158wakeup_sched_switch(struct task_struct *prev, struct task_struct *next)
159{
160}
161#endif
162
163#ifdef CONFIG_CONTEXT_SWITCH_TRACER
164typedef void
165(*tracer_switch_func_t)(void *private,
166 struct task_struct *prev,
167 struct task_struct *next);
168
169struct tracer_switch_ops {
170 tracer_switch_func_t func;
171 void *private;
172 struct tracer_switch_ops *next;
173};
174
175extern int register_tracer_switch(struct tracer_switch_ops *ops);
176extern int unregister_tracer_switch(struct tracer_switch_ops *ops);
177
178#endif /* CONFIG_CONTEXT_SWITCH_TRACER */
179
180#ifdef CONFIG_DYNAMIC_FTRACE
181extern unsigned long ftrace_update_tot_cnt;
182#endif
183
184#endif /* _LINUX_KERNEL_TRACE_H */
This page took 0.03495 seconds and 5 git commands to generate.