ftrace: use raw_smp_processor_id for mcount functions
[deliverable/linux.git] / kernel / trace / trace.h
CommitLineData
bc0c38d1
SR
1#ifndef _LINUX_KERNEL_TRACE_H
2#define _LINUX_KERNEL_TRACE_H
3
4#include <linux/fs.h>
5#include <asm/atomic.h>
6#include <linux/sched.h>
7#include <linux/clocksource.h>
8
72829bc3
TG
9enum trace_type {
10 __TRACE_FIRST_TYPE = 0,
11
12 TRACE_FN,
13 TRACE_CTX,
14 TRACE_WAKE,
15 TRACE_STACK,
16 TRACE_SPECIAL,
17
18 __TRACE_LAST_TYPE
19};
20
bc0c38d1
SR
21/*
22 * Function trace entry - function address and parent function addres:
23 */
24struct ftrace_entry {
25 unsigned long ip;
26 unsigned long parent_ip;
27};
28
29/*
30 * Context switch trace entry - which task (and prio) we switched from/to:
31 */
32struct ctx_switch_entry {
33 unsigned int prev_pid;
34 unsigned char prev_prio;
35 unsigned char prev_state;
36 unsigned int next_pid;
37 unsigned char next_prio;
bac524d3 38 unsigned char next_state;
bc0c38d1
SR
39};
40
f0a920d5
IM
41/*
42 * Special (free-form) trace entry:
43 */
44struct special_entry {
45 unsigned long arg1;
46 unsigned long arg2;
47 unsigned long arg3;
48};
49
86387f7e
IM
50/*
51 * Stack-trace entry:
52 */
53
54#define FTRACE_STACK_ENTRIES 5
55
56struct stack_entry {
57 unsigned long caller[FTRACE_STACK_ENTRIES];
58};
59
bc0c38d1
SR
60/*
61 * The trace entry - the most basic unit of tracing. This is what
62 * is printed in the end as a single line in the trace output, such as:
63 *
64 * bash-15816 [01] 235.197585: idle_cpu <- irq_enter
65 */
66struct trace_entry {
67 char type;
68 char cpu;
69 char flags;
70 char preempt_count;
71 int pid;
72 cycle_t t;
bc0c38d1
SR
73 union {
74 struct ftrace_entry fn;
75 struct ctx_switch_entry ctx;
f0a920d5 76 struct special_entry special;
86387f7e 77 struct stack_entry stack;
bc0c38d1
SR
78 };
79};
80
81#define TRACE_ENTRY_SIZE sizeof(struct trace_entry)
82
83/*
84 * The CPU trace array - it consists of thousands of trace entries
85 * plus some other descriptor data: (for example which task started
86 * the trace, etc.)
87 */
88struct trace_array_cpu {
4c11d7ae 89 struct list_head trace_pages;
bc0c38d1 90 atomic_t disabled;
92205c23 91 raw_spinlock_t lock;
d4c5a2f5 92 struct lock_class_key lock_key;
4e3c3333 93
c7aafc54 94 /* these fields get copied into max-trace: */
93a588f4
SR
95 unsigned trace_head_idx;
96 unsigned trace_tail_idx;
97 void *trace_head; /* producer */
98 void *trace_tail; /* consumer */
c7aafc54 99 unsigned long trace_idx;
53d0aa77 100 unsigned long overrun;
bc0c38d1
SR
101 unsigned long saved_latency;
102 unsigned long critical_start;
103 unsigned long critical_end;
104 unsigned long critical_sequence;
105 unsigned long nice;
106 unsigned long policy;
107 unsigned long rt_priority;
108 cycle_t preempt_timestamp;
109 pid_t pid;
110 uid_t uid;
111 char comm[TASK_COMM_LEN];
112};
113
114struct trace_iterator;
115
116/*
117 * The trace array - an array of per-CPU trace arrays. This is the
118 * highest level data structure that individual tracers deal with.
119 * They have on/off state as well:
120 */
121struct trace_array {
122 unsigned long entries;
123 long ctrl;
124 int cpu;
125 cycle_t time_start;
b3806b43 126 struct task_struct *waiter;
bc0c38d1
SR
127 struct trace_array_cpu *data[NR_CPUS];
128};
129
130/*
131 * A specific tracer, represented by methods that operate on a trace array:
132 */
133struct tracer {
134 const char *name;
135 void (*init)(struct trace_array *tr);
136 void (*reset)(struct trace_array *tr);
137 void (*open)(struct trace_iterator *iter);
107bad8b 138 void (*pipe_open)(struct trace_iterator *iter);
bc0c38d1
SR
139 void (*close)(struct trace_iterator *iter);
140 void (*start)(struct trace_iterator *iter);
141 void (*stop)(struct trace_iterator *iter);
107bad8b
SR
142 ssize_t (*read)(struct trace_iterator *iter,
143 struct file *filp, char __user *ubuf,
144 size_t cnt, loff_t *ppos);
bc0c38d1 145 void (*ctrl_update)(struct trace_array *tr);
60a11774
SR
146#ifdef CONFIG_FTRACE_STARTUP_TEST
147 int (*selftest)(struct tracer *trace,
148 struct trace_array *tr);
149#endif
72829bc3 150 int (*print_line)(struct trace_iterator *iter);
bc0c38d1
SR
151 struct tracer *next;
152 int print_max;
153};
154
214023c3
SR
155struct trace_seq {
156 unsigned char buffer[PAGE_SIZE];
157 unsigned int len;
158};
159
bc0c38d1
SR
160/*
161 * Trace iterator - used by printout routines who present trace
162 * results to users and which routines might sleep, etc:
163 */
164struct trace_iterator {
165 struct trace_array *tr;
166 struct tracer *trace;
107bad8b 167 void *private;
53d0aa77
SR
168 long last_overrun[NR_CPUS];
169 long overrun[NR_CPUS];
4e3c3333 170
53d0aa77
SR
171 /* The below is zeroed out in pipe_read */
172 struct trace_seq seq;
bc0c38d1 173 struct trace_entry *ent;
4e3c3333
IM
174 int cpu;
175
176 struct trace_entry *prev_ent;
177 int prev_cpu;
178
bc0c38d1
SR
179 unsigned long iter_flags;
180 loff_t pos;
181 unsigned long next_idx[NR_CPUS];
4c11d7ae
SR
182 struct list_head *next_page[NR_CPUS];
183 unsigned next_page_idx[NR_CPUS];
184 long idx;
bc0c38d1
SR
185};
186
e309b41d 187void tracing_reset(struct trace_array_cpu *data);
bc0c38d1
SR
188int tracing_open_generic(struct inode *inode, struct file *filp);
189struct dentry *tracing_init_dentry(void);
190void ftrace(struct trace_array *tr,
191 struct trace_array_cpu *data,
192 unsigned long ip,
193 unsigned long parent_ip,
194 unsigned long flags);
195void tracing_sched_switch_trace(struct trace_array *tr,
196 struct trace_array_cpu *data,
197 struct task_struct *prev,
198 struct task_struct *next,
199 unsigned long flags);
200void tracing_record_cmdline(struct task_struct *tsk);
57422797
IM
201
202void tracing_sched_wakeup_trace(struct trace_array *tr,
203 struct trace_array_cpu *data,
204 struct task_struct *wakee,
205 struct task_struct *cur,
206 unsigned long flags);
f0a920d5
IM
207void trace_special(struct trace_array *tr,
208 struct trace_array_cpu *data,
209 unsigned long arg1,
210 unsigned long arg2,
211 unsigned long arg3);
6fb44b71
SR
212void trace_function(struct trace_array *tr,
213 struct trace_array_cpu *data,
214 unsigned long ip,
215 unsigned long parent_ip,
216 unsigned long flags);
bc0c38d1
SR
217
218void tracing_start_function_trace(void);
219void tracing_stop_function_trace(void);
220int register_tracer(struct tracer *type);
221void unregister_tracer(struct tracer *type);
222
223extern unsigned long nsecs_to_usecs(unsigned long nsecs);
224
225extern unsigned long tracing_max_latency;
226extern unsigned long tracing_thresh;
227
25b0b44a
SR
228extern atomic_t trace_record_cmdline_enabled;
229
bc0c38d1
SR
230void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu);
231void update_max_tr_single(struct trace_array *tr,
232 struct task_struct *tsk, int cpu);
233
e309b41d 234extern cycle_t ftrace_now(int cpu);
bc0c38d1
SR
235
236#ifdef CONFIG_SCHED_TRACER
e309b41d 237extern void
bc0c38d1 238wakeup_sched_switch(struct task_struct *prev, struct task_struct *next);
57422797
IM
239extern void
240wakeup_sched_wakeup(struct task_struct *wakee, struct task_struct *curr);
bc0c38d1
SR
241#else
242static inline void
243wakeup_sched_switch(struct task_struct *prev, struct task_struct *next)
244{
245}
57422797
IM
246static inline void
247wakeup_sched_wakeup(struct task_struct *wakee, struct task_struct *curr)
248{
249}
bc0c38d1
SR
250#endif
251
252#ifdef CONFIG_CONTEXT_SWITCH_TRACER
253typedef void
254(*tracer_switch_func_t)(void *private,
255 struct task_struct *prev,
256 struct task_struct *next);
257
258struct tracer_switch_ops {
259 tracer_switch_func_t func;
260 void *private;
261 struct tracer_switch_ops *next;
262};
263
264extern int register_tracer_switch(struct tracer_switch_ops *ops);
265extern int unregister_tracer_switch(struct tracer_switch_ops *ops);
266
267#endif /* CONFIG_CONTEXT_SWITCH_TRACER */
268
269#ifdef CONFIG_DYNAMIC_FTRACE
270extern unsigned long ftrace_update_tot_cnt;
d05cdb25
SR
271#define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
272extern int DYN_FTRACE_TEST_NAME(void);
bc0c38d1
SR
273#endif
274
60a11774
SR
275#ifdef CONFIG_FTRACE_STARTUP_TEST
276#ifdef CONFIG_FTRACE
277extern int trace_selftest_startup_function(struct tracer *trace,
278 struct trace_array *tr);
279#endif
280#ifdef CONFIG_IRQSOFF_TRACER
281extern int trace_selftest_startup_irqsoff(struct tracer *trace,
282 struct trace_array *tr);
283#endif
284#ifdef CONFIG_PREEMPT_TRACER
285extern int trace_selftest_startup_preemptoff(struct tracer *trace,
286 struct trace_array *tr);
287#endif
288#if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
289extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace,
290 struct trace_array *tr);
291#endif
292#ifdef CONFIG_SCHED_TRACER
293extern int trace_selftest_startup_wakeup(struct tracer *trace,
294 struct trace_array *tr);
295#endif
296#ifdef CONFIG_CONTEXT_SWITCH_TRACER
297extern int trace_selftest_startup_sched_switch(struct tracer *trace,
298 struct trace_array *tr);
299#endif
300#endif /* CONFIG_FTRACE_STARTUP_TEST */
301
c7aafc54 302extern void *head_page(struct trace_array_cpu *data);
72829bc3
TG
303extern int trace_seq_printf(struct trace_seq *s, const char *fmt, ...);
304extern long ns2usecs(cycle_t nsec);
c7aafc54 305
4e655519
IM
306extern unsigned long trace_flags;
307
4fcdae83
SR
308/*
309 * trace_iterator_flags is an enumeration that defines bit
310 * positions into trace_flags that controls the output.
311 *
312 * NOTE: These bits must match the trace_options array in
313 * trace.c.
314 */
4e655519
IM
315enum trace_iterator_flags {
316 TRACE_ITER_PRINT_PARENT = 0x01,
317 TRACE_ITER_SYM_OFFSET = 0x02,
318 TRACE_ITER_SYM_ADDR = 0x04,
319 TRACE_ITER_VERBOSE = 0x08,
320 TRACE_ITER_RAW = 0x10,
321 TRACE_ITER_HEX = 0x20,
322 TRACE_ITER_BIN = 0x40,
323 TRACE_ITER_BLOCK = 0x80,
324 TRACE_ITER_STACKTRACE = 0x100,
4ac3ba41 325 TRACE_ITER_SCHED_TREE = 0x200,
4e655519
IM
326};
327
bc0c38d1 328#endif /* _LINUX_KERNEL_TRACE_H */
This page took 0.067845 seconds and 5 git commands to generate.