ip: ip_ra_control() rcu fix
[deliverable/linux.git] / kernel / trace / trace.h
1 #ifndef _LINUX_KERNEL_TRACE_H
2 #define _LINUX_KERNEL_TRACE_H
3
4 #include <linux/fs.h>
5 #include <asm/atomic.h>
6 #include <linux/sched.h>
7 #include <linux/clocksource.h>
8 #include <linux/ring_buffer.h>
9 #include <linux/mmiotrace.h>
10 #include <linux/tracepoint.h>
11 #include <linux/ftrace.h>
12 #include <trace/boot.h>
13 #include <linux/kmemtrace.h>
14 #include <linux/hw_breakpoint.h>
15
16 #include <linux/trace_seq.h>
17 #include <linux/ftrace_event.h>
18
19 enum trace_type {
20 __TRACE_FIRST_TYPE = 0,
21
22 TRACE_FN,
23 TRACE_CTX,
24 TRACE_WAKE,
25 TRACE_STACK,
26 TRACE_PRINT,
27 TRACE_BPRINT,
28 TRACE_SPECIAL,
29 TRACE_MMIO_RW,
30 TRACE_MMIO_MAP,
31 TRACE_BRANCH,
32 TRACE_BOOT_CALL,
33 TRACE_BOOT_RET,
34 TRACE_GRAPH_RET,
35 TRACE_GRAPH_ENT,
36 TRACE_USER_STACK,
37 TRACE_KMEM_ALLOC,
38 TRACE_KMEM_FREE,
39 TRACE_BLK,
40 TRACE_KSYM,
41
42 __TRACE_LAST_TYPE,
43 };
44
45 enum kmemtrace_type_id {
46 KMEMTRACE_TYPE_KMALLOC = 0, /* kmalloc() or kfree(). */
47 KMEMTRACE_TYPE_CACHE, /* kmem_cache_*(). */
48 KMEMTRACE_TYPE_PAGES, /* __get_free_pages() and friends. */
49 };
50
51 extern struct tracer boot_tracer;
52
53 #undef __field
54 #define __field(type, item) type item;
55
56 #undef __field_struct
57 #define __field_struct(type, item) __field(type, item)
58
59 #undef __field_desc
60 #define __field_desc(type, container, item)
61
62 #undef __array
63 #define __array(type, item, size) type item[size];
64
65 #undef __array_desc
66 #define __array_desc(type, container, item, size)
67
68 #undef __dynamic_array
69 #define __dynamic_array(type, item) type item[];
70
71 #undef F_STRUCT
72 #define F_STRUCT(args...) args
73
74 #undef FTRACE_ENTRY
75 #define FTRACE_ENTRY(name, struct_name, id, tstruct, print) \
76 struct struct_name { \
77 struct trace_entry ent; \
78 tstruct \
79 }
80
81 #undef TP_ARGS
82 #define TP_ARGS(args...) args
83
84 #undef FTRACE_ENTRY_DUP
85 #define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk)
86
87 #include "trace_entries.h"
88
89 /*
90 * syscalls are special, and need special handling, this is why
91 * they are not included in trace_entries.h
92 */
93 struct syscall_trace_enter {
94 struct trace_entry ent;
95 int nr;
96 unsigned long args[];
97 };
98
99 struct syscall_trace_exit {
100 struct trace_entry ent;
101 int nr;
102 long ret;
103 };
104
105 struct kprobe_trace_entry_head {
106 struct trace_entry ent;
107 unsigned long ip;
108 };
109
110 struct kretprobe_trace_entry_head {
111 struct trace_entry ent;
112 unsigned long func;
113 unsigned long ret_ip;
114 };
115
116 /*
117 * trace_flag_type is an enumeration that holds different
118 * states when a trace occurs. These are:
119 * IRQS_OFF - interrupts were disabled
120 * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags
121 * NEED_RESCHED - reschedule is requested
122 * HARDIRQ - inside an interrupt handler
123 * SOFTIRQ - inside a softirq handler
124 */
125 enum trace_flag_type {
126 TRACE_FLAG_IRQS_OFF = 0x01,
127 TRACE_FLAG_IRQS_NOSUPPORT = 0x02,
128 TRACE_FLAG_NEED_RESCHED = 0x04,
129 TRACE_FLAG_HARDIRQ = 0x08,
130 TRACE_FLAG_SOFTIRQ = 0x10,
131 };
132
133 #define TRACE_BUF_SIZE 1024
134
135 /*
136 * The CPU trace array - it consists of thousands of trace entries
137 * plus some other descriptor data: (for example which task started
138 * the trace, etc.)
139 */
140 struct trace_array_cpu {
141 atomic_t disabled;
142 void *buffer_page; /* ring buffer spare */
143
144 unsigned long saved_latency;
145 unsigned long critical_start;
146 unsigned long critical_end;
147 unsigned long critical_sequence;
148 unsigned long nice;
149 unsigned long policy;
150 unsigned long rt_priority;
151 unsigned long skipped_entries;
152 cycle_t preempt_timestamp;
153 pid_t pid;
154 uid_t uid;
155 char comm[TASK_COMM_LEN];
156 };
157
158 /*
159 * The trace array - an array of per-CPU trace arrays. This is the
160 * highest level data structure that individual tracers deal with.
161 * They have on/off state as well:
162 */
163 struct trace_array {
164 struct ring_buffer *buffer;
165 unsigned long entries;
166 int cpu;
167 cycle_t time_start;
168 struct task_struct *waiter;
169 struct trace_array_cpu *data[NR_CPUS];
170 };
171
172 #define FTRACE_CMP_TYPE(var, type) \
173 __builtin_types_compatible_p(typeof(var), type *)
174
175 #undef IF_ASSIGN
176 #define IF_ASSIGN(var, entry, etype, id) \
177 if (FTRACE_CMP_TYPE(var, etype)) { \
178 var = (typeof(var))(entry); \
179 WARN_ON(id && (entry)->type != id); \
180 break; \
181 }
182
183 /* Will cause compile errors if type is not found. */
184 extern void __ftrace_bad_type(void);
185
186 /*
187 * The trace_assign_type is a verifier that the entry type is
188 * the same as the type being assigned. To add new types simply
189 * add a line with the following format:
190 *
191 * IF_ASSIGN(var, ent, type, id);
192 *
193 * Where "type" is the trace type that includes the trace_entry
194 * as the "ent" item. And "id" is the trace identifier that is
195 * used in the trace_type enum.
196 *
197 * If the type can have more than one id, then use zero.
198 */
199 #define trace_assign_type(var, ent) \
200 do { \
201 IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \
202 IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \
203 IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \
204 IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\
205 IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \
206 IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \
207 IF_ASSIGN(var, ent, struct special_entry, 0); \
208 IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \
209 TRACE_MMIO_RW); \
210 IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \
211 TRACE_MMIO_MAP); \
212 IF_ASSIGN(var, ent, struct trace_boot_call, TRACE_BOOT_CALL);\
213 IF_ASSIGN(var, ent, struct trace_boot_ret, TRACE_BOOT_RET);\
214 IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \
215 IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \
216 TRACE_GRAPH_ENT); \
217 IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \
218 TRACE_GRAPH_RET); \
219 IF_ASSIGN(var, ent, struct kmemtrace_alloc_entry, \
220 TRACE_KMEM_ALLOC); \
221 IF_ASSIGN(var, ent, struct kmemtrace_free_entry, \
222 TRACE_KMEM_FREE); \
223 IF_ASSIGN(var, ent, struct ksym_trace_entry, TRACE_KSYM);\
224 __ftrace_bad_type(); \
225 } while (0)
226
227 /*
228 * An option specific to a tracer. This is a boolean value.
229 * The bit is the bit index that sets its value on the
230 * flags value in struct tracer_flags.
231 */
232 struct tracer_opt {
233 const char *name; /* Will appear on the trace_options file */
234 u32 bit; /* Mask assigned in val field in tracer_flags */
235 };
236
237 /*
238 * The set of specific options for a tracer. Your tracer
239 * have to set the initial value of the flags val.
240 */
241 struct tracer_flags {
242 u32 val;
243 struct tracer_opt *opts;
244 };
245
246 /* Makes more easy to define a tracer opt */
247 #define TRACER_OPT(s, b) .name = #s, .bit = b
248
249
250 /**
251 * struct tracer - a specific tracer and its callbacks to interact with debugfs
252 * @name: the name chosen to select it on the available_tracers file
253 * @init: called when one switches to this tracer (echo name > current_tracer)
254 * @reset: called when one switches to another tracer
255 * @start: called when tracing is unpaused (echo 1 > tracing_enabled)
256 * @stop: called when tracing is paused (echo 0 > tracing_enabled)
257 * @open: called when the trace file is opened
258 * @pipe_open: called when the trace_pipe file is opened
259 * @wait_pipe: override how the user waits for traces on trace_pipe
260 * @close: called when the trace file is released
261 * @pipe_close: called when the trace_pipe file is released
262 * @read: override the default read callback on trace_pipe
263 * @splice_read: override the default splice_read callback on trace_pipe
264 * @selftest: selftest to run on boot (see trace_selftest.c)
265 * @print_headers: override the first lines that describe your columns
266 * @print_line: callback that prints a trace
267 * @set_flag: signals one of your private flags changed (trace_options file)
268 * @flags: your private flags
269 */
270 struct tracer {
271 const char *name;
272 int (*init)(struct trace_array *tr);
273 void (*reset)(struct trace_array *tr);
274 void (*start)(struct trace_array *tr);
275 void (*stop)(struct trace_array *tr);
276 void (*open)(struct trace_iterator *iter);
277 void (*pipe_open)(struct trace_iterator *iter);
278 void (*wait_pipe)(struct trace_iterator *iter);
279 void (*close)(struct trace_iterator *iter);
280 void (*pipe_close)(struct trace_iterator *iter);
281 ssize_t (*read)(struct trace_iterator *iter,
282 struct file *filp, char __user *ubuf,
283 size_t cnt, loff_t *ppos);
284 ssize_t (*splice_read)(struct trace_iterator *iter,
285 struct file *filp,
286 loff_t *ppos,
287 struct pipe_inode_info *pipe,
288 size_t len,
289 unsigned int flags);
290 #ifdef CONFIG_FTRACE_STARTUP_TEST
291 int (*selftest)(struct tracer *trace,
292 struct trace_array *tr);
293 #endif
294 void (*print_header)(struct seq_file *m);
295 enum print_line_t (*print_line)(struct trace_iterator *iter);
296 /* If you handled the flag setting, return 0 */
297 int (*set_flag)(u32 old_flags, u32 bit, int set);
298 struct tracer *next;
299 int print_max;
300 struct tracer_flags *flags;
301 };
302
303
304 #define TRACE_PIPE_ALL_CPU -1
305
306 int tracer_init(struct tracer *t, struct trace_array *tr);
307 int tracing_is_enabled(void);
308 void trace_wake_up(void);
309 void tracing_reset(struct trace_array *tr, int cpu);
310 void tracing_reset_online_cpus(struct trace_array *tr);
311 void tracing_reset_current(int cpu);
312 void tracing_reset_current_online_cpus(void);
313 int tracing_open_generic(struct inode *inode, struct file *filp);
314 struct dentry *trace_create_file(const char *name,
315 mode_t mode,
316 struct dentry *parent,
317 void *data,
318 const struct file_operations *fops);
319
320 struct dentry *tracing_init_dentry(void);
321 void init_tracer_sysprof_debugfs(struct dentry *d_tracer);
322
323 struct ring_buffer_event;
324
325 struct ring_buffer_event *
326 trace_buffer_lock_reserve(struct ring_buffer *buffer,
327 int type,
328 unsigned long len,
329 unsigned long flags,
330 int pc);
331 void trace_buffer_unlock_commit(struct ring_buffer *buffer,
332 struct ring_buffer_event *event,
333 unsigned long flags, int pc);
334
335 struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
336 struct trace_array_cpu *data);
337
338 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
339 int *ent_cpu, u64 *ent_ts);
340
341 void default_wait_pipe(struct trace_iterator *iter);
342 void poll_wait_pipe(struct trace_iterator *iter);
343
344 void ftrace(struct trace_array *tr,
345 struct trace_array_cpu *data,
346 unsigned long ip,
347 unsigned long parent_ip,
348 unsigned long flags, int pc);
349 void tracing_sched_switch_trace(struct trace_array *tr,
350 struct task_struct *prev,
351 struct task_struct *next,
352 unsigned long flags, int pc);
353
354 void tracing_sched_wakeup_trace(struct trace_array *tr,
355 struct task_struct *wakee,
356 struct task_struct *cur,
357 unsigned long flags, int pc);
358 void trace_special(struct trace_array *tr,
359 struct trace_array_cpu *data,
360 unsigned long arg1,
361 unsigned long arg2,
362 unsigned long arg3, int pc);
363 void trace_function(struct trace_array *tr,
364 unsigned long ip,
365 unsigned long parent_ip,
366 unsigned long flags, int pc);
367 void trace_default_header(struct seq_file *m);
368 void print_trace_header(struct seq_file *m, struct trace_iterator *iter);
369 int trace_empty(struct trace_iterator *iter);
370
371 void trace_graph_return(struct ftrace_graph_ret *trace);
372 int trace_graph_entry(struct ftrace_graph_ent *trace);
373 void set_graph_array(struct trace_array *tr);
374
375 void tracing_start_cmdline_record(void);
376 void tracing_stop_cmdline_record(void);
377 void tracing_sched_switch_assign_trace(struct trace_array *tr);
378 void tracing_stop_sched_switch_record(void);
379 void tracing_start_sched_switch_record(void);
380 int register_tracer(struct tracer *type);
381 void unregister_tracer(struct tracer *type);
382 int is_tracing_stopped(void);
383
384 extern int process_new_ksym_entry(char *ksymname, int op, unsigned long addr);
385
386 extern unsigned long nsecs_to_usecs(unsigned long nsecs);
387
388 extern unsigned long tracing_thresh;
389
390 #ifdef CONFIG_TRACER_MAX_TRACE
391 extern unsigned long tracing_max_latency;
392
393 void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu);
394 void update_max_tr_single(struct trace_array *tr,
395 struct task_struct *tsk, int cpu);
396 #endif /* CONFIG_TRACER_MAX_TRACE */
397
398 #ifdef CONFIG_STACKTRACE
399 void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
400 int skip, int pc);
401
402 void ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags,
403 int pc);
404
405 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
406 int pc);
407 #else
408 static inline void ftrace_trace_stack(struct ring_buffer *buffer,
409 unsigned long flags, int skip, int pc)
410 {
411 }
412
413 static inline void ftrace_trace_userstack(struct ring_buffer *buffer,
414 unsigned long flags, int pc)
415 {
416 }
417
418 static inline void __trace_stack(struct trace_array *tr, unsigned long flags,
419 int skip, int pc)
420 {
421 }
422 #endif /* CONFIG_STACKTRACE */
423
424 extern cycle_t ftrace_now(int cpu);
425
426 extern void trace_find_cmdline(int pid, char comm[]);
427
428 #ifdef CONFIG_DYNAMIC_FTRACE
429 extern unsigned long ftrace_update_tot_cnt;
430 #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
431 extern int DYN_FTRACE_TEST_NAME(void);
432 #endif
433
434 extern int ring_buffer_expanded;
435 extern bool tracing_selftest_disabled;
436 DECLARE_PER_CPU(int, ftrace_cpu_disabled);
437
438 #ifdef CONFIG_FTRACE_STARTUP_TEST
439 extern int trace_selftest_startup_function(struct tracer *trace,
440 struct trace_array *tr);
441 extern int trace_selftest_startup_function_graph(struct tracer *trace,
442 struct trace_array *tr);
443 extern int trace_selftest_startup_irqsoff(struct tracer *trace,
444 struct trace_array *tr);
445 extern int trace_selftest_startup_preemptoff(struct tracer *trace,
446 struct trace_array *tr);
447 extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace,
448 struct trace_array *tr);
449 extern int trace_selftest_startup_wakeup(struct tracer *trace,
450 struct trace_array *tr);
451 extern int trace_selftest_startup_nop(struct tracer *trace,
452 struct trace_array *tr);
453 extern int trace_selftest_startup_sched_switch(struct tracer *trace,
454 struct trace_array *tr);
455 extern int trace_selftest_startup_sysprof(struct tracer *trace,
456 struct trace_array *tr);
457 extern int trace_selftest_startup_branch(struct tracer *trace,
458 struct trace_array *tr);
459 extern int trace_selftest_startup_ksym(struct tracer *trace,
460 struct trace_array *tr);
461 #endif /* CONFIG_FTRACE_STARTUP_TEST */
462
463 extern void *head_page(struct trace_array_cpu *data);
464 extern unsigned long long ns2usecs(cycle_t nsec);
465 extern int
466 trace_vbprintk(unsigned long ip, const char *fmt, va_list args);
467 extern int
468 trace_vprintk(unsigned long ip, const char *fmt, va_list args);
469 extern int
470 trace_array_vprintk(struct trace_array *tr,
471 unsigned long ip, const char *fmt, va_list args);
472 int trace_array_printk(struct trace_array *tr,
473 unsigned long ip, const char *fmt, ...);
474
475 extern unsigned long trace_flags;
476
477 extern int trace_clock_id;
478
479 /* Standard output formatting function used for function return traces */
480 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
481
482 /* Flag options */
483 #define TRACE_GRAPH_PRINT_OVERRUN 0x1
484 #define TRACE_GRAPH_PRINT_CPU 0x2
485 #define TRACE_GRAPH_PRINT_OVERHEAD 0x4
486 #define TRACE_GRAPH_PRINT_PROC 0x8
487 #define TRACE_GRAPH_PRINT_DURATION 0x10
488 #define TRACE_GRAPH_PRINT_ABS_TIME 0x20
489
490 extern enum print_line_t
491 print_graph_function_flags(struct trace_iterator *iter, u32 flags);
492 extern void print_graph_headers_flags(struct seq_file *s, u32 flags);
493 extern enum print_line_t
494 trace_print_graph_duration(unsigned long long duration, struct trace_seq *s);
495 extern void graph_trace_open(struct trace_iterator *iter);
496 extern void graph_trace_close(struct trace_iterator *iter);
497 extern int __trace_graph_entry(struct trace_array *tr,
498 struct ftrace_graph_ent *trace,
499 unsigned long flags, int pc);
500 extern void __trace_graph_return(struct trace_array *tr,
501 struct ftrace_graph_ret *trace,
502 unsigned long flags, int pc);
503
504
505 #ifdef CONFIG_DYNAMIC_FTRACE
506 /* TODO: make this variable */
507 #define FTRACE_GRAPH_MAX_FUNCS 32
508 extern int ftrace_graph_filter_enabled;
509 extern int ftrace_graph_count;
510 extern unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS];
511
512 static inline int ftrace_graph_addr(unsigned long addr)
513 {
514 int i;
515
516 if (!ftrace_graph_filter_enabled)
517 return 1;
518
519 for (i = 0; i < ftrace_graph_count; i++) {
520 if (addr == ftrace_graph_funcs[i])
521 return 1;
522 }
523
524 return 0;
525 }
526 #else
527 static inline int ftrace_graph_addr(unsigned long addr)
528 {
529 return 1;
530 }
531 #endif /* CONFIG_DYNAMIC_FTRACE */
532 #else /* CONFIG_FUNCTION_GRAPH_TRACER */
533 static inline enum print_line_t
534 print_graph_function_flags(struct trace_iterator *iter, u32 flags)
535 {
536 return TRACE_TYPE_UNHANDLED;
537 }
538 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
539
540 extern struct list_head ftrace_pids;
541
542 #ifdef CONFIG_FUNCTION_TRACER
543 static inline int ftrace_trace_task(struct task_struct *task)
544 {
545 if (list_empty(&ftrace_pids))
546 return 1;
547
548 return test_tsk_trace_trace(task);
549 }
550 #else
551 static inline int ftrace_trace_task(struct task_struct *task)
552 {
553 return 1;
554 }
555 #endif
556
557 /*
558 * struct trace_parser - servers for reading the user input separated by spaces
559 * @cont: set if the input is not complete - no final space char was found
560 * @buffer: holds the parsed user input
561 * @idx: user input length
562 * @size: buffer size
563 */
564 struct trace_parser {
565 bool cont;
566 char *buffer;
567 unsigned idx;
568 unsigned size;
569 };
570
571 static inline bool trace_parser_loaded(struct trace_parser *parser)
572 {
573 return (parser->idx != 0);
574 }
575
576 static inline bool trace_parser_cont(struct trace_parser *parser)
577 {
578 return parser->cont;
579 }
580
581 static inline void trace_parser_clear(struct trace_parser *parser)
582 {
583 parser->cont = false;
584 parser->idx = 0;
585 }
586
587 extern int trace_parser_get_init(struct trace_parser *parser, int size);
588 extern void trace_parser_put(struct trace_parser *parser);
589 extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
590 size_t cnt, loff_t *ppos);
591
592 /*
593 * trace_iterator_flags is an enumeration that defines bit
594 * positions into trace_flags that controls the output.
595 *
596 * NOTE: These bits must match the trace_options array in
597 * trace.c.
598 */
599 enum trace_iterator_flags {
600 TRACE_ITER_PRINT_PARENT = 0x01,
601 TRACE_ITER_SYM_OFFSET = 0x02,
602 TRACE_ITER_SYM_ADDR = 0x04,
603 TRACE_ITER_VERBOSE = 0x08,
604 TRACE_ITER_RAW = 0x10,
605 TRACE_ITER_HEX = 0x20,
606 TRACE_ITER_BIN = 0x40,
607 TRACE_ITER_BLOCK = 0x80,
608 TRACE_ITER_STACKTRACE = 0x100,
609 TRACE_ITER_PRINTK = 0x200,
610 TRACE_ITER_PREEMPTONLY = 0x400,
611 TRACE_ITER_BRANCH = 0x800,
612 TRACE_ITER_ANNOTATE = 0x1000,
613 TRACE_ITER_USERSTACKTRACE = 0x2000,
614 TRACE_ITER_SYM_USEROBJ = 0x4000,
615 TRACE_ITER_PRINTK_MSGONLY = 0x8000,
616 TRACE_ITER_CONTEXT_INFO = 0x10000, /* Print pid/cpu/time */
617 TRACE_ITER_LATENCY_FMT = 0x20000,
618 TRACE_ITER_SLEEP_TIME = 0x40000,
619 TRACE_ITER_GRAPH_TIME = 0x80000,
620 };
621
622 /*
623 * TRACE_ITER_SYM_MASK masks the options in trace_flags that
624 * control the output of kernel symbols.
625 */
626 #define TRACE_ITER_SYM_MASK \
627 (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
628
629 extern struct tracer nop_trace;
630
631 /**
632 * ftrace_preempt_disable - disable preemption scheduler safe
633 *
634 * When tracing can happen inside the scheduler, there exists
635 * cases that the tracing might happen before the need_resched
636 * flag is checked. If this happens and the tracer calls
637 * preempt_enable (after a disable), a schedule might take place
638 * causing an infinite recursion.
639 *
640 * To prevent this, we read the need_resched flag before
641 * disabling preemption. When we want to enable preemption we
642 * check the flag, if it is set, then we call preempt_enable_no_resched.
643 * Otherwise, we call preempt_enable.
644 *
645 * The rational for doing the above is that if need_resched is set
646 * and we have yet to reschedule, we are either in an atomic location
647 * (where we do not need to check for scheduling) or we are inside
648 * the scheduler and do not want to resched.
649 */
650 static inline int ftrace_preempt_disable(void)
651 {
652 int resched;
653
654 resched = need_resched();
655 preempt_disable_notrace();
656
657 return resched;
658 }
659
660 /**
661 * ftrace_preempt_enable - enable preemption scheduler safe
662 * @resched: the return value from ftrace_preempt_disable
663 *
664 * This is a scheduler safe way to enable preemption and not miss
665 * any preemption checks. The disabled saved the state of preemption.
666 * If resched is set, then we are either inside an atomic or
667 * are inside the scheduler (we would have already scheduled
668 * otherwise). In this case, we do not want to call normal
669 * preempt_enable, but preempt_enable_no_resched instead.
670 */
671 static inline void ftrace_preempt_enable(int resched)
672 {
673 if (resched)
674 preempt_enable_no_resched_notrace();
675 else
676 preempt_enable_notrace();
677 }
678
679 #ifdef CONFIG_BRANCH_TRACER
680 extern int enable_branch_tracing(struct trace_array *tr);
681 extern void disable_branch_tracing(void);
682 static inline int trace_branch_enable(struct trace_array *tr)
683 {
684 if (trace_flags & TRACE_ITER_BRANCH)
685 return enable_branch_tracing(tr);
686 return 0;
687 }
688 static inline void trace_branch_disable(void)
689 {
690 /* due to races, always disable */
691 disable_branch_tracing();
692 }
693 #else
694 static inline int trace_branch_enable(struct trace_array *tr)
695 {
696 return 0;
697 }
698 static inline void trace_branch_disable(void)
699 {
700 }
701 #endif /* CONFIG_BRANCH_TRACER */
702
703 /* set ring buffers to default size if not already done so */
704 int tracing_update_buffers(void);
705
706 /* trace event type bit fields, not numeric */
707 enum {
708 TRACE_EVENT_TYPE_PRINTF = 1,
709 TRACE_EVENT_TYPE_RAW = 2,
710 };
711
712 struct ftrace_event_field {
713 struct list_head link;
714 char *name;
715 char *type;
716 int filter_type;
717 int offset;
718 int size;
719 int is_signed;
720 };
721
722 struct event_filter {
723 int n_preds;
724 struct filter_pred **preds;
725 char *filter_string;
726 };
727
728 struct event_subsystem {
729 struct list_head list;
730 const char *name;
731 struct dentry *entry;
732 struct event_filter *filter;
733 int nr_events;
734 };
735
736 struct filter_pred;
737 struct regex;
738
739 typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event,
740 int val1, int val2);
741
742 typedef int (*regex_match_func)(char *str, struct regex *r, int len);
743
744 enum regex_type {
745 MATCH_FULL = 0,
746 MATCH_FRONT_ONLY,
747 MATCH_MIDDLE_ONLY,
748 MATCH_END_ONLY,
749 };
750
751 struct regex {
752 char pattern[MAX_FILTER_STR_VAL];
753 int len;
754 int field_len;
755 regex_match_func match;
756 };
757
758 struct filter_pred {
759 filter_pred_fn_t fn;
760 u64 val;
761 struct regex regex;
762 char *field_name;
763 int offset;
764 int not;
765 int op;
766 int pop_n;
767 };
768
769 extern enum regex_type
770 filter_parse_regex(char *buff, int len, char **search, int *not);
771 extern void print_event_filter(struct ftrace_event_call *call,
772 struct trace_seq *s);
773 extern int apply_event_filter(struct ftrace_event_call *call,
774 char *filter_string);
775 extern int apply_subsystem_event_filter(struct event_subsystem *system,
776 char *filter_string);
777 extern void print_subsystem_event_filter(struct event_subsystem *system,
778 struct trace_seq *s);
779 extern int filter_assign_type(const char *type);
780
781 struct list_head *
782 trace_get_fields(struct ftrace_event_call *event_call);
783
784 static inline int
785 filter_check_discard(struct ftrace_event_call *call, void *rec,
786 struct ring_buffer *buffer,
787 struct ring_buffer_event *event)
788 {
789 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
790 !filter_match_preds(call->filter, rec)) {
791 ring_buffer_discard_commit(buffer, event);
792 return 1;
793 }
794
795 return 0;
796 }
797
798 extern struct mutex event_mutex;
799 extern struct list_head ftrace_events;
800
801 extern const char *__start___trace_bprintk_fmt[];
802 extern const char *__stop___trace_bprintk_fmt[];
803
804 #undef FTRACE_ENTRY
805 #define FTRACE_ENTRY(call, struct_name, id, tstruct, print) \
806 extern struct ftrace_event_call \
807 __attribute__((__aligned__(4))) event_##call;
808 #undef FTRACE_ENTRY_DUP
809 #define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print) \
810 FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print))
811 #include "trace_entries.h"
812
813 #endif /* _LINUX_KERNEL_TRACE_H */
This page took 0.053953 seconds and 5 git commands to generate.