Commit | Line | Data |
---|---|---|
bc0c38d1 SR |
1 | #ifndef _LINUX_KERNEL_TRACE_H |
2 | #define _LINUX_KERNEL_TRACE_H | |
3 | ||
4 | #include <linux/fs.h> | |
5 | #include <asm/atomic.h> | |
6 | #include <linux/sched.h> | |
7 | #include <linux/clocksource.h> | |
3928a8a2 | 8 | #include <linux/ring_buffer.h> |
bd8ac686 | 9 | #include <linux/mmiotrace.h> |
d13744cd | 10 | #include <linux/ftrace.h> |
3f5ec136 | 11 | #include <trace/boot.h> |
bc0c38d1 | 12 | |
72829bc3 TG |
13 | enum trace_type { |
14 | __TRACE_FIRST_TYPE = 0, | |
15 | ||
16 | TRACE_FN, | |
17 | TRACE_CTX, | |
18 | TRACE_WAKE, | |
dd0e545f | 19 | TRACE_CONT, |
72829bc3 | 20 | TRACE_STACK, |
dd0e545f | 21 | TRACE_PRINT, |
72829bc3 | 22 | TRACE_SPECIAL, |
bd8ac686 PP |
23 | TRACE_MMIO_RW, |
24 | TRACE_MMIO_MAP, | |
9f029e83 | 25 | TRACE_BRANCH, |
74239072 FW |
26 | TRACE_BOOT_CALL, |
27 | TRACE_BOOT_RET, | |
287b6e68 FW |
28 | TRACE_GRAPH_RET, |
29 | TRACE_GRAPH_ENT, | |
02b67518 | 30 | TRACE_USER_STACK, |
1e9b51c2 | 31 | TRACE_BTS, |
72829bc3 TG |
32 | |
33 | __TRACE_LAST_TYPE | |
34 | }; | |
35 | ||
777e208d SR |
36 | /* |
37 | * The trace entry - the most basic unit of tracing. This is what | |
38 | * is printed in the end as a single line in the trace output, such as: | |
39 | * | |
40 | * bash-15816 [01] 235.197585: idle_cpu <- irq_enter | |
41 | */ | |
42 | struct trace_entry { | |
43 | unsigned char type; | |
44 | unsigned char cpu; | |
45 | unsigned char flags; | |
46 | unsigned char preempt_count; | |
47 | int pid; | |
02b67518 | 48 | int tgid; |
777e208d SR |
49 | }; |
50 | ||
bc0c38d1 SR |
51 | /* |
52 | * Function trace entry - function address and parent function addres: | |
53 | */ | |
54 | struct ftrace_entry { | |
777e208d | 55 | struct trace_entry ent; |
bc0c38d1 SR |
56 | unsigned long ip; |
57 | unsigned long parent_ip; | |
58 | }; | |
15e6cb36 | 59 | |
287b6e68 FW |
60 | /* Function call entry */ |
61 | struct ftrace_graph_ent_entry { | |
62 | struct trace_entry ent; | |
63 | struct ftrace_graph_ent graph_ent; | |
64 | }; | |
65 | ||
15e6cb36 | 66 | /* Function return entry */ |
287b6e68 FW |
67 | struct ftrace_graph_ret_entry { |
68 | struct trace_entry ent; | |
69 | struct ftrace_graph_ret ret; | |
15e6cb36 | 70 | }; |
d13744cd | 71 | extern struct tracer boot_tracer; |
bc0c38d1 SR |
72 | |
73 | /* | |
74 | * Context switch trace entry - which task (and prio) we switched from/to: | |
75 | */ | |
76 | struct ctx_switch_entry { | |
777e208d | 77 | struct trace_entry ent; |
bc0c38d1 SR |
78 | unsigned int prev_pid; |
79 | unsigned char prev_prio; | |
80 | unsigned char prev_state; | |
81 | unsigned int next_pid; | |
82 | unsigned char next_prio; | |
bac524d3 | 83 | unsigned char next_state; |
80b5e940 | 84 | unsigned int next_cpu; |
bc0c38d1 SR |
85 | }; |
86 | ||
f0a920d5 IM |
87 | /* |
88 | * Special (free-form) trace entry: | |
89 | */ | |
90 | struct special_entry { | |
777e208d | 91 | struct trace_entry ent; |
f0a920d5 IM |
92 | unsigned long arg1; |
93 | unsigned long arg2; | |
94 | unsigned long arg3; | |
95 | }; | |
96 | ||
86387f7e IM |
97 | /* |
98 | * Stack-trace entry: | |
99 | */ | |
100 | ||
74f4e369 | 101 | #define FTRACE_STACK_ENTRIES 8 |
86387f7e IM |
102 | |
103 | struct stack_entry { | |
777e208d | 104 | struct trace_entry ent; |
86387f7e IM |
105 | unsigned long caller[FTRACE_STACK_ENTRIES]; |
106 | }; | |
107 | ||
02b67518 TE |
108 | struct userstack_entry { |
109 | struct trace_entry ent; | |
110 | unsigned long caller[FTRACE_STACK_ENTRIES]; | |
111 | }; | |
112 | ||
dd0e545f SR |
113 | /* |
114 | * ftrace_printk entry: | |
115 | */ | |
116 | struct print_entry { | |
777e208d | 117 | struct trace_entry ent; |
dd0e545f SR |
118 | unsigned long ip; |
119 | char buf[]; | |
120 | }; | |
121 | ||
777e208d SR |
122 | #define TRACE_OLD_SIZE 88 |
123 | ||
124 | struct trace_field_cont { | |
125 | unsigned char type; | |
126 | /* Temporary till we get rid of this completely */ | |
127 | char buf[TRACE_OLD_SIZE - 1]; | |
128 | }; | |
129 | ||
130 | struct trace_mmiotrace_rw { | |
131 | struct trace_entry ent; | |
132 | struct mmiotrace_rw rw; | |
133 | }; | |
134 | ||
135 | struct trace_mmiotrace_map { | |
136 | struct trace_entry ent; | |
137 | struct mmiotrace_map map; | |
138 | }; | |
139 | ||
74239072 | 140 | struct trace_boot_call { |
777e208d | 141 | struct trace_entry ent; |
74239072 FW |
142 | struct boot_trace_call boot_call; |
143 | }; | |
144 | ||
145 | struct trace_boot_ret { | |
146 | struct trace_entry ent; | |
147 | struct boot_trace_ret boot_ret; | |
777e208d SR |
148 | }; |
149 | ||
52f232cb SR |
150 | #define TRACE_FUNC_SIZE 30 |
151 | #define TRACE_FILE_SIZE 20 | |
9f029e83 | 152 | struct trace_branch { |
52f232cb SR |
153 | struct trace_entry ent; |
154 | unsigned line; | |
155 | char func[TRACE_FUNC_SIZE+1]; | |
156 | char file[TRACE_FILE_SIZE+1]; | |
157 | char correct; | |
158 | }; | |
159 | ||
1e9b51c2 MM |
160 | struct bts_entry { |
161 | struct trace_entry ent; | |
162 | unsigned long from; | |
163 | unsigned long to; | |
164 | }; | |
165 | ||
fc5e27ae PP |
166 | /* |
167 | * trace_flag_type is an enumeration that holds different | |
168 | * states when a trace occurs. These are: | |
9244489a SR |
169 | * IRQS_OFF - interrupts were disabled |
170 | * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags | |
171 | * NEED_RESCED - reschedule is requested | |
172 | * HARDIRQ - inside an interrupt handler | |
173 | * SOFTIRQ - inside a softirq handler | |
174 | * CONT - multiple entries hold the trace item | |
fc5e27ae PP |
175 | */ |
176 | enum trace_flag_type { | |
177 | TRACE_FLAG_IRQS_OFF = 0x01, | |
9244489a SR |
178 | TRACE_FLAG_IRQS_NOSUPPORT = 0x02, |
179 | TRACE_FLAG_NEED_RESCHED = 0x04, | |
180 | TRACE_FLAG_HARDIRQ = 0x08, | |
181 | TRACE_FLAG_SOFTIRQ = 0x10, | |
182 | TRACE_FLAG_CONT = 0x20, | |
fc5e27ae PP |
183 | }; |
184 | ||
5bf9a1ee | 185 | #define TRACE_BUF_SIZE 1024 |
bc0c38d1 SR |
186 | |
187 | /* | |
188 | * The CPU trace array - it consists of thousands of trace entries | |
189 | * plus some other descriptor data: (for example which task started | |
190 | * the trace, etc.) | |
191 | */ | |
192 | struct trace_array_cpu { | |
bc0c38d1 | 193 | atomic_t disabled; |
4e3c3333 | 194 | |
c7aafc54 | 195 | /* these fields get copied into max-trace: */ |
c7aafc54 | 196 | unsigned long trace_idx; |
53d0aa77 | 197 | unsigned long overrun; |
bc0c38d1 SR |
198 | unsigned long saved_latency; |
199 | unsigned long critical_start; | |
200 | unsigned long critical_end; | |
201 | unsigned long critical_sequence; | |
202 | unsigned long nice; | |
203 | unsigned long policy; | |
204 | unsigned long rt_priority; | |
205 | cycle_t preempt_timestamp; | |
206 | pid_t pid; | |
207 | uid_t uid; | |
208 | char comm[TASK_COMM_LEN]; | |
209 | }; | |
210 | ||
211 | struct trace_iterator; | |
212 | ||
213 | /* | |
214 | * The trace array - an array of per-CPU trace arrays. This is the | |
215 | * highest level data structure that individual tracers deal with. | |
216 | * They have on/off state as well: | |
217 | */ | |
218 | struct trace_array { | |
3928a8a2 | 219 | struct ring_buffer *buffer; |
bc0c38d1 | 220 | unsigned long entries; |
bc0c38d1 SR |
221 | int cpu; |
222 | cycle_t time_start; | |
b3806b43 | 223 | struct task_struct *waiter; |
bc0c38d1 SR |
224 | struct trace_array_cpu *data[NR_CPUS]; |
225 | }; | |
226 | ||
7104f300 SR |
227 | #define FTRACE_CMP_TYPE(var, type) \ |
228 | __builtin_types_compatible_p(typeof(var), type *) | |
229 | ||
230 | #undef IF_ASSIGN | |
231 | #define IF_ASSIGN(var, entry, etype, id) \ | |
232 | if (FTRACE_CMP_TYPE(var, etype)) { \ | |
233 | var = (typeof(var))(entry); \ | |
234 | WARN_ON(id && (entry)->type != id); \ | |
235 | break; \ | |
236 | } | |
237 | ||
238 | /* Will cause compile errors if type is not found. */ | |
239 | extern void __ftrace_bad_type(void); | |
240 | ||
241 | /* | |
242 | * The trace_assign_type is a verifier that the entry type is | |
243 | * the same as the type being assigned. To add new types simply | |
244 | * add a line with the following format: | |
245 | * | |
246 | * IF_ASSIGN(var, ent, type, id); | |
247 | * | |
248 | * Where "type" is the trace type that includes the trace_entry | |
249 | * as the "ent" item. And "id" is the trace identifier that is | |
250 | * used in the trace_type enum. | |
251 | * | |
252 | * If the type can have more than one id, then use zero. | |
253 | */ | |
254 | #define trace_assign_type(var, ent) \ | |
255 | do { \ | |
256 | IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \ | |
257 | IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \ | |
258 | IF_ASSIGN(var, ent, struct trace_field_cont, TRACE_CONT); \ | |
259 | IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \ | |
02b67518 | 260 | IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\ |
7104f300 SR |
261 | IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \ |
262 | IF_ASSIGN(var, ent, struct special_entry, 0); \ | |
263 | IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \ | |
264 | TRACE_MMIO_RW); \ | |
265 | IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \ | |
266 | TRACE_MMIO_MAP); \ | |
74239072 FW |
267 | IF_ASSIGN(var, ent, struct trace_boot_call, TRACE_BOOT_CALL);\ |
268 | IF_ASSIGN(var, ent, struct trace_boot_ret, TRACE_BOOT_RET);\ | |
9f029e83 | 269 | IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \ |
287b6e68 FW |
270 | IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \ |
271 | TRACE_GRAPH_ENT); \ | |
272 | IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \ | |
273 | TRACE_GRAPH_RET); \ | |
1e9b51c2 | 274 | IF_ASSIGN(var, ent, struct bts_entry, TRACE_BTS);\ |
7104f300 SR |
275 | __ftrace_bad_type(); \ |
276 | } while (0) | |
2c4f035f FW |
277 | |
278 | /* Return values for print_line callback */ | |
279 | enum print_line_t { | |
280 | TRACE_TYPE_PARTIAL_LINE = 0, /* Retry after flushing the seq */ | |
281 | TRACE_TYPE_HANDLED = 1, | |
282 | TRACE_TYPE_UNHANDLED = 2 /* Relay to other output functions */ | |
283 | }; | |
284 | ||
adf9f195 FW |
285 | |
286 | /* | |
287 | * An option specific to a tracer. This is a boolean value. | |
288 | * The bit is the bit index that sets its value on the | |
289 | * flags value in struct tracer_flags. | |
290 | */ | |
291 | struct tracer_opt { | |
292 | const char *name; /* Will appear on the trace_options file */ | |
293 | u32 bit; /* Mask assigned in val field in tracer_flags */ | |
294 | }; | |
295 | ||
296 | /* | |
297 | * The set of specific options for a tracer. Your tracer | |
298 | * have to set the initial value of the flags val. | |
299 | */ | |
300 | struct tracer_flags { | |
301 | u32 val; | |
302 | struct tracer_opt *opts; | |
303 | }; | |
304 | ||
305 | /* Makes more easy to define a tracer opt */ | |
306 | #define TRACER_OPT(s, b) .name = #s, .bit = b | |
307 | ||
bc0c38d1 SR |
308 | /* |
309 | * A specific tracer, represented by methods that operate on a trace array: | |
310 | */ | |
311 | struct tracer { | |
312 | const char *name; | |
1c80025a FW |
313 | /* Your tracer should raise a warning if init fails */ |
314 | int (*init)(struct trace_array *tr); | |
bc0c38d1 | 315 | void (*reset)(struct trace_array *tr); |
9036990d SR |
316 | void (*start)(struct trace_array *tr); |
317 | void (*stop)(struct trace_array *tr); | |
bc0c38d1 | 318 | void (*open)(struct trace_iterator *iter); |
107bad8b | 319 | void (*pipe_open)(struct trace_iterator *iter); |
bc0c38d1 | 320 | void (*close)(struct trace_iterator *iter); |
107bad8b SR |
321 | ssize_t (*read)(struct trace_iterator *iter, |
322 | struct file *filp, char __user *ubuf, | |
323 | size_t cnt, loff_t *ppos); | |
60a11774 SR |
324 | #ifdef CONFIG_FTRACE_STARTUP_TEST |
325 | int (*selftest)(struct tracer *trace, | |
326 | struct trace_array *tr); | |
327 | #endif | |
8bba1bf5 | 328 | void (*print_header)(struct seq_file *m); |
2c4f035f | 329 | enum print_line_t (*print_line)(struct trace_iterator *iter); |
adf9f195 FW |
330 | /* If you handled the flag setting, return 0 */ |
331 | int (*set_flag)(u32 old_flags, u32 bit, int set); | |
bc0c38d1 SR |
332 | struct tracer *next; |
333 | int print_max; | |
adf9f195 | 334 | struct tracer_flags *flags; |
bc0c38d1 SR |
335 | }; |
336 | ||
214023c3 SR |
337 | struct trace_seq { |
338 | unsigned char buffer[PAGE_SIZE]; | |
339 | unsigned int len; | |
6c6c2796 | 340 | unsigned int readpos; |
214023c3 SR |
341 | }; |
342 | ||
bc0c38d1 SR |
343 | /* |
344 | * Trace iterator - used by printout routines who present trace | |
345 | * results to users and which routines might sleep, etc: | |
346 | */ | |
347 | struct trace_iterator { | |
348 | struct trace_array *tr; | |
349 | struct tracer *trace; | |
107bad8b | 350 | void *private; |
3928a8a2 | 351 | struct ring_buffer_iter *buffer_iter[NR_CPUS]; |
4e3c3333 | 352 | |
53d0aa77 SR |
353 | /* The below is zeroed out in pipe_read */ |
354 | struct trace_seq seq; | |
bc0c38d1 | 355 | struct trace_entry *ent; |
4e3c3333 | 356 | int cpu; |
3928a8a2 | 357 | u64 ts; |
4e3c3333 | 358 | |
bc0c38d1 SR |
359 | unsigned long iter_flags; |
360 | loff_t pos; | |
4c11d7ae | 361 | long idx; |
a309720c SR |
362 | |
363 | cpumask_t started; | |
bc0c38d1 SR |
364 | }; |
365 | ||
9036990d | 366 | int tracing_is_enabled(void); |
45dcd8b8 | 367 | void trace_wake_up(void); |
3928a8a2 | 368 | void tracing_reset(struct trace_array *tr, int cpu); |
bc0c38d1 SR |
369 | int tracing_open_generic(struct inode *inode, struct file *filp); |
370 | struct dentry *tracing_init_dentry(void); | |
d618b3e6 IM |
371 | void init_tracer_sysprof_debugfs(struct dentry *d_tracer); |
372 | ||
45dcd8b8 PP |
373 | struct trace_entry *tracing_get_trace_entry(struct trace_array *tr, |
374 | struct trace_array_cpu *data); | |
375 | void tracing_generic_entry_update(struct trace_entry *entry, | |
38697053 SR |
376 | unsigned long flags, |
377 | int pc); | |
45dcd8b8 | 378 | |
bc0c38d1 SR |
379 | void ftrace(struct trace_array *tr, |
380 | struct trace_array_cpu *data, | |
381 | unsigned long ip, | |
382 | unsigned long parent_ip, | |
38697053 | 383 | unsigned long flags, int pc); |
bc0c38d1 SR |
384 | void tracing_sched_switch_trace(struct trace_array *tr, |
385 | struct trace_array_cpu *data, | |
386 | struct task_struct *prev, | |
387 | struct task_struct *next, | |
38697053 | 388 | unsigned long flags, int pc); |
bc0c38d1 | 389 | void tracing_record_cmdline(struct task_struct *tsk); |
57422797 IM |
390 | |
391 | void tracing_sched_wakeup_trace(struct trace_array *tr, | |
392 | struct trace_array_cpu *data, | |
393 | struct task_struct *wakee, | |
394 | struct task_struct *cur, | |
38697053 | 395 | unsigned long flags, int pc); |
f0a920d5 IM |
396 | void trace_special(struct trace_array *tr, |
397 | struct trace_array_cpu *data, | |
398 | unsigned long arg1, | |
399 | unsigned long arg2, | |
38697053 | 400 | unsigned long arg3, int pc); |
6fb44b71 SR |
401 | void trace_function(struct trace_array *tr, |
402 | struct trace_array_cpu *data, | |
403 | unsigned long ip, | |
404 | unsigned long parent_ip, | |
38697053 | 405 | unsigned long flags, int pc); |
bc0c38d1 | 406 | |
287b6e68 FW |
407 | void trace_graph_return(struct ftrace_graph_ret *trace); |
408 | void trace_graph_entry(struct ftrace_graph_ent *trace); | |
1e9b51c2 MM |
409 | void trace_bts(struct trace_array *tr, |
410 | unsigned long from, | |
411 | unsigned long to); | |
412 | ||
41bc8144 SR |
413 | void tracing_start_cmdline_record(void); |
414 | void tracing_stop_cmdline_record(void); | |
e168e051 SR |
415 | void tracing_sched_switch_assign_trace(struct trace_array *tr); |
416 | void tracing_stop_sched_switch_record(void); | |
417 | void tracing_start_sched_switch_record(void); | |
bc0c38d1 SR |
418 | int register_tracer(struct tracer *type); |
419 | void unregister_tracer(struct tracer *type); | |
420 | ||
421 | extern unsigned long nsecs_to_usecs(unsigned long nsecs); | |
422 | ||
423 | extern unsigned long tracing_max_latency; | |
424 | extern unsigned long tracing_thresh; | |
425 | ||
426 | void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu); | |
427 | void update_max_tr_single(struct trace_array *tr, | |
428 | struct task_struct *tsk, int cpu); | |
429 | ||
e309b41d | 430 | extern cycle_t ftrace_now(int cpu); |
bc0c38d1 | 431 | |
606576ce | 432 | #ifdef CONFIG_FUNCTION_TRACER |
001b6767 SR |
433 | void tracing_start_function_trace(void); |
434 | void tracing_stop_function_trace(void); | |
435 | #else | |
436 | # define tracing_start_function_trace() do { } while (0) | |
437 | # define tracing_stop_function_trace() do { } while (0) | |
438 | #endif | |
439 | ||
bc0c38d1 SR |
440 | #ifdef CONFIG_CONTEXT_SWITCH_TRACER |
441 | typedef void | |
442 | (*tracer_switch_func_t)(void *private, | |
5b82a1b0 | 443 | void *__rq, |
bc0c38d1 SR |
444 | struct task_struct *prev, |
445 | struct task_struct *next); | |
446 | ||
447 | struct tracer_switch_ops { | |
448 | tracer_switch_func_t func; | |
449 | void *private; | |
450 | struct tracer_switch_ops *next; | |
451 | }; | |
452 | ||
bc0c38d1 SR |
453 | #endif /* CONFIG_CONTEXT_SWITCH_TRACER */ |
454 | ||
455 | #ifdef CONFIG_DYNAMIC_FTRACE | |
456 | extern unsigned long ftrace_update_tot_cnt; | |
d05cdb25 SR |
457 | #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func |
458 | extern int DYN_FTRACE_TEST_NAME(void); | |
bc0c38d1 SR |
459 | #endif |
460 | ||
60a11774 | 461 | #ifdef CONFIG_FTRACE_STARTUP_TEST |
60a11774 SR |
462 | extern int trace_selftest_startup_function(struct tracer *trace, |
463 | struct trace_array *tr); | |
60a11774 SR |
464 | extern int trace_selftest_startup_irqsoff(struct tracer *trace, |
465 | struct trace_array *tr); | |
60a11774 SR |
466 | extern int trace_selftest_startup_preemptoff(struct tracer *trace, |
467 | struct trace_array *tr); | |
60a11774 SR |
468 | extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace, |
469 | struct trace_array *tr); | |
60a11774 SR |
470 | extern int trace_selftest_startup_wakeup(struct tracer *trace, |
471 | struct trace_array *tr); | |
fb1b6d8b SN |
472 | extern int trace_selftest_startup_nop(struct tracer *trace, |
473 | struct trace_array *tr); | |
60a11774 SR |
474 | extern int trace_selftest_startup_sched_switch(struct tracer *trace, |
475 | struct trace_array *tr); | |
a6dd24f8 IM |
476 | extern int trace_selftest_startup_sysprof(struct tracer *trace, |
477 | struct trace_array *tr); | |
80e5ea45 SR |
478 | extern int trace_selftest_startup_branch(struct tracer *trace, |
479 | struct trace_array *tr); | |
60a11774 SR |
480 | #endif /* CONFIG_FTRACE_STARTUP_TEST */ |
481 | ||
c7aafc54 | 482 | extern void *head_page(struct trace_array_cpu *data); |
72829bc3 | 483 | extern int trace_seq_printf(struct trace_seq *s, const char *fmt, ...); |
fc5e27ae PP |
484 | extern void trace_seq_print_cont(struct trace_seq *s, |
485 | struct trace_iterator *iter); | |
15e6cb36 FW |
486 | |
487 | extern int | |
488 | seq_print_ip_sym(struct trace_seq *s, unsigned long ip, | |
489 | unsigned long sym_flags); | |
6c6c2796 PP |
490 | extern ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, |
491 | size_t cnt); | |
72829bc3 | 492 | extern long ns2usecs(cycle_t nsec); |
801fe400 | 493 | extern int trace_vprintk(unsigned long ip, const char *fmt, va_list args); |
c7aafc54 | 494 | |
4e655519 IM |
495 | extern unsigned long trace_flags; |
496 | ||
15e6cb36 | 497 | /* Standard output formatting function used for function return traces */ |
fb52607a FW |
498 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
499 | extern enum print_line_t print_graph_function(struct trace_iterator *iter); | |
15e6cb36 FW |
500 | #else |
501 | static inline enum print_line_t | |
fb52607a | 502 | print_graph_function(struct trace_iterator *iter) |
15e6cb36 FW |
503 | { |
504 | return TRACE_TYPE_UNHANDLED; | |
505 | } | |
506 | #endif | |
507 | ||
4fcdae83 SR |
508 | /* |
509 | * trace_iterator_flags is an enumeration that defines bit | |
510 | * positions into trace_flags that controls the output. | |
511 | * | |
512 | * NOTE: These bits must match the trace_options array in | |
513 | * trace.c. | |
514 | */ | |
4e655519 IM |
515 | enum trace_iterator_flags { |
516 | TRACE_ITER_PRINT_PARENT = 0x01, | |
517 | TRACE_ITER_SYM_OFFSET = 0x02, | |
518 | TRACE_ITER_SYM_ADDR = 0x04, | |
519 | TRACE_ITER_VERBOSE = 0x08, | |
520 | TRACE_ITER_RAW = 0x10, | |
521 | TRACE_ITER_HEX = 0x20, | |
522 | TRACE_ITER_BIN = 0x40, | |
523 | TRACE_ITER_BLOCK = 0x80, | |
524 | TRACE_ITER_STACKTRACE = 0x100, | |
4ac3ba41 | 525 | TRACE_ITER_SCHED_TREE = 0x200, |
f09ce573 | 526 | TRACE_ITER_PRINTK = 0x400, |
b2a866f9 | 527 | TRACE_ITER_PREEMPTONLY = 0x800, |
9f029e83 | 528 | TRACE_ITER_BRANCH = 0x1000, |
12ef7d44 | 529 | TRACE_ITER_ANNOTATE = 0x2000, |
b54d3de9 TE |
530 | TRACE_ITER_USERSTACKTRACE = 0x4000, |
531 | TRACE_ITER_SYM_USEROBJ = 0x8000 | |
4e655519 IM |
532 | }; |
533 | ||
15e6cb36 FW |
534 | /* |
535 | * TRACE_ITER_SYM_MASK masks the options in trace_flags that | |
536 | * control the output of kernel symbols. | |
537 | */ | |
538 | #define TRACE_ITER_SYM_MASK \ | |
539 | (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR) | |
540 | ||
43a15386 FW |
541 | extern struct tracer nop_trace; |
542 | ||
8f0a056f SR |
543 | /** |
544 | * ftrace_preempt_disable - disable preemption scheduler safe | |
545 | * | |
546 | * When tracing can happen inside the scheduler, there exists | |
547 | * cases that the tracing might happen before the need_resched | |
548 | * flag is checked. If this happens and the tracer calls | |
549 | * preempt_enable (after a disable), a schedule might take place | |
550 | * causing an infinite recursion. | |
551 | * | |
552 | * To prevent this, we read the need_recshed flag before | |
553 | * disabling preemption. When we want to enable preemption we | |
554 | * check the flag, if it is set, then we call preempt_enable_no_resched. | |
555 | * Otherwise, we call preempt_enable. | |
556 | * | |
557 | * The rational for doing the above is that if need resched is set | |
558 | * and we have yet to reschedule, we are either in an atomic location | |
559 | * (where we do not need to check for scheduling) or we are inside | |
560 | * the scheduler and do not want to resched. | |
561 | */ | |
562 | static inline int ftrace_preempt_disable(void) | |
563 | { | |
564 | int resched; | |
565 | ||
566 | resched = need_resched(); | |
567 | preempt_disable_notrace(); | |
568 | ||
569 | return resched; | |
570 | } | |
571 | ||
572 | /** | |
573 | * ftrace_preempt_enable - enable preemption scheduler safe | |
574 | * @resched: the return value from ftrace_preempt_disable | |
575 | * | |
576 | * This is a scheduler safe way to enable preemption and not miss | |
577 | * any preemption checks. The disabled saved the state of preemption. | |
578 | * If resched is set, then we were either inside an atomic or | |
579 | * are inside the scheduler (we would have already scheduled | |
580 | * otherwise). In this case, we do not want to call normal | |
581 | * preempt_enable, but preempt_enable_no_resched instead. | |
582 | */ | |
583 | static inline void ftrace_preempt_enable(int resched) | |
584 | { | |
585 | if (resched) | |
586 | preempt_enable_no_resched_notrace(); | |
587 | else | |
588 | preempt_enable_notrace(); | |
589 | } | |
590 | ||
2ed84eeb | 591 | #ifdef CONFIG_BRANCH_TRACER |
9f029e83 SR |
592 | extern int enable_branch_tracing(struct trace_array *tr); |
593 | extern void disable_branch_tracing(void); | |
594 | static inline int trace_branch_enable(struct trace_array *tr) | |
52f232cb | 595 | { |
9f029e83 SR |
596 | if (trace_flags & TRACE_ITER_BRANCH) |
597 | return enable_branch_tracing(tr); | |
52f232cb SR |
598 | return 0; |
599 | } | |
9f029e83 | 600 | static inline void trace_branch_disable(void) |
52f232cb SR |
601 | { |
602 | /* due to races, always disable */ | |
9f029e83 | 603 | disable_branch_tracing(); |
52f232cb SR |
604 | } |
605 | #else | |
9f029e83 | 606 | static inline int trace_branch_enable(struct trace_array *tr) |
52f232cb SR |
607 | { |
608 | return 0; | |
609 | } | |
9f029e83 | 610 | static inline void trace_branch_disable(void) |
52f232cb SR |
611 | { |
612 | } | |
2ed84eeb | 613 | #endif /* CONFIG_BRANCH_TRACER */ |
52f232cb | 614 | |
bc0c38d1 | 615 | #endif /* _LINUX_KERNEL_TRACE_H */ |