ARM: pxa: add memory resource to SA1100 RTC device
[deliverable/linux.git] / kernel / trace / trace.h
1
2 #ifndef _LINUX_KERNEL_TRACE_H
3 #define _LINUX_KERNEL_TRACE_H
4
5 #include <linux/fs.h>
6 #include <linux/atomic.h>
7 #include <linux/sched.h>
8 #include <linux/clocksource.h>
9 #include <linux/ring_buffer.h>
10 #include <linux/mmiotrace.h>
11 #include <linux/tracepoint.h>
12 #include <linux/ftrace.h>
13 #include <linux/hw_breakpoint.h>
14 #include <linux/trace_seq.h>
15 #include <linux/trace_events.h>
16 #include <linux/compiler.h>
17 #include <linux/trace_seq.h>
18
19 #ifdef CONFIG_FTRACE_SYSCALLS
20 #include <asm/unistd.h> /* For NR_SYSCALLS */
21 #include <asm/syscall.h> /* some archs define it here */
22 #endif
23
24 enum trace_type {
25 __TRACE_FIRST_TYPE = 0,
26
27 TRACE_FN,
28 TRACE_CTX,
29 TRACE_WAKE,
30 TRACE_STACK,
31 TRACE_PRINT,
32 TRACE_BPRINT,
33 TRACE_MMIO_RW,
34 TRACE_MMIO_MAP,
35 TRACE_BRANCH,
36 TRACE_GRAPH_RET,
37 TRACE_GRAPH_ENT,
38 TRACE_USER_STACK,
39 TRACE_BLK,
40 TRACE_BPUTS,
41
42 __TRACE_LAST_TYPE,
43 };
44
45
46 #undef __field
47 #define __field(type, item) type item;
48
49 #undef __field_struct
50 #define __field_struct(type, item) __field(type, item)
51
52 #undef __field_desc
53 #define __field_desc(type, container, item)
54
55 #undef __array
56 #define __array(type, item, size) type item[size];
57
58 #undef __array_desc
59 #define __array_desc(type, container, item, size)
60
61 #undef __dynamic_array
62 #define __dynamic_array(type, item) type item[];
63
64 #undef F_STRUCT
65 #define F_STRUCT(args...) args
66
67 #undef FTRACE_ENTRY
68 #define FTRACE_ENTRY(name, struct_name, id, tstruct, print, filter) \
69 struct struct_name { \
70 struct trace_entry ent; \
71 tstruct \
72 }
73
74 #undef TP_ARGS
75 #define TP_ARGS(args...) args
76
77 #undef FTRACE_ENTRY_DUP
78 #define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk, filter)
79
80 #undef FTRACE_ENTRY_REG
81 #define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print, \
82 filter, regfn) \
83 FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \
84 filter)
85
86 #include "trace_entries.h"
87
88 /*
89 * syscalls are special, and need special handling, this is why
90 * they are not included in trace_entries.h
91 */
92 struct syscall_trace_enter {
93 struct trace_entry ent;
94 int nr;
95 unsigned long args[];
96 };
97
98 struct syscall_trace_exit {
99 struct trace_entry ent;
100 int nr;
101 long ret;
102 };
103
104 struct kprobe_trace_entry_head {
105 struct trace_entry ent;
106 unsigned long ip;
107 };
108
109 struct kretprobe_trace_entry_head {
110 struct trace_entry ent;
111 unsigned long func;
112 unsigned long ret_ip;
113 };
114
115 /*
116 * trace_flag_type is an enumeration that holds different
117 * states when a trace occurs. These are:
118 * IRQS_OFF - interrupts were disabled
119 * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags
120 * NEED_RESCHED - reschedule is requested
121 * HARDIRQ - inside an interrupt handler
122 * SOFTIRQ - inside a softirq handler
123 */
124 enum trace_flag_type {
125 TRACE_FLAG_IRQS_OFF = 0x01,
126 TRACE_FLAG_IRQS_NOSUPPORT = 0x02,
127 TRACE_FLAG_NEED_RESCHED = 0x04,
128 TRACE_FLAG_HARDIRQ = 0x08,
129 TRACE_FLAG_SOFTIRQ = 0x10,
130 TRACE_FLAG_PREEMPT_RESCHED = 0x20,
131 };
132
133 #define TRACE_BUF_SIZE 1024
134
135 struct trace_array;
136
137 /*
138 * The CPU trace array - it consists of thousands of trace entries
139 * plus some other descriptor data: (for example which task started
140 * the trace, etc.)
141 */
142 struct trace_array_cpu {
143 atomic_t disabled;
144 void *buffer_page; /* ring buffer spare */
145
146 unsigned long entries;
147 unsigned long saved_latency;
148 unsigned long critical_start;
149 unsigned long critical_end;
150 unsigned long critical_sequence;
151 unsigned long nice;
152 unsigned long policy;
153 unsigned long rt_priority;
154 unsigned long skipped_entries;
155 cycle_t preempt_timestamp;
156 pid_t pid;
157 kuid_t uid;
158 char comm[TASK_COMM_LEN];
159 };
160
161 struct tracer;
162
163 struct trace_buffer {
164 struct trace_array *tr;
165 struct ring_buffer *buffer;
166 struct trace_array_cpu __percpu *data;
167 cycle_t time_start;
168 int cpu;
169 };
170
171 /*
172 * The trace array - an array of per-CPU trace arrays. This is the
173 * highest level data structure that individual tracers deal with.
174 * They have on/off state as well:
175 */
176 struct trace_array {
177 struct list_head list;
178 char *name;
179 struct trace_buffer trace_buffer;
180 #ifdef CONFIG_TRACER_MAX_TRACE
181 /*
182 * The max_buffer is used to snapshot the trace when a maximum
183 * latency is reached, or when the user initiates a snapshot.
184 * Some tracers will use this to store a maximum trace while
185 * it continues examining live traces.
186 *
187 * The buffers for the max_buffer are set up the same as the trace_buffer
188 * When a snapshot is taken, the buffer of the max_buffer is swapped
189 * with the buffer of the trace_buffer and the buffers are reset for
190 * the trace_buffer so the tracing can continue.
191 */
192 struct trace_buffer max_buffer;
193 bool allocated_snapshot;
194 unsigned long max_latency;
195 #endif
196 /*
197 * max_lock is used to protect the swapping of buffers
198 * when taking a max snapshot. The buffers themselves are
199 * protected by per_cpu spinlocks. But the action of the swap
200 * needs its own lock.
201 *
202 * This is defined as a arch_spinlock_t in order to help
203 * with performance when lockdep debugging is enabled.
204 *
205 * It is also used in other places outside the update_max_tr
206 * so it needs to be defined outside of the
207 * CONFIG_TRACER_MAX_TRACE.
208 */
209 arch_spinlock_t max_lock;
210 int buffer_disabled;
211 #ifdef CONFIG_FTRACE_SYSCALLS
212 int sys_refcount_enter;
213 int sys_refcount_exit;
214 struct trace_event_file __rcu *enter_syscall_files[NR_syscalls];
215 struct trace_event_file __rcu *exit_syscall_files[NR_syscalls];
216 #endif
217 int stop_count;
218 int clock_id;
219 struct tracer *current_trace;
220 unsigned int flags;
221 raw_spinlock_t start_lock;
222 struct dentry *dir;
223 struct dentry *options;
224 struct dentry *percpu_dir;
225 struct dentry *event_dir;
226 struct list_head systems;
227 struct list_head events;
228 cpumask_var_t tracing_cpumask; /* only trace on set CPUs */
229 int ref;
230 #ifdef CONFIG_FUNCTION_TRACER
231 struct ftrace_ops *ops;
232 /* function tracing enabled */
233 int function_enabled;
234 #endif
235 };
236
237 enum {
238 TRACE_ARRAY_FL_GLOBAL = (1 << 0)
239 };
240
241 extern struct list_head ftrace_trace_arrays;
242
243 extern struct mutex trace_types_lock;
244
245 extern int trace_array_get(struct trace_array *tr);
246 extern void trace_array_put(struct trace_array *tr);
247
248 /*
249 * The global tracer (top) should be the first trace array added,
250 * but we check the flag anyway.
251 */
252 static inline struct trace_array *top_trace_array(void)
253 {
254 struct trace_array *tr;
255
256 if (list_empty(&ftrace_trace_arrays))
257 return NULL;
258
259 tr = list_entry(ftrace_trace_arrays.prev,
260 typeof(*tr), list);
261 WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL));
262 return tr;
263 }
264
265 #define FTRACE_CMP_TYPE(var, type) \
266 __builtin_types_compatible_p(typeof(var), type *)
267
268 #undef IF_ASSIGN
269 #define IF_ASSIGN(var, entry, etype, id) \
270 if (FTRACE_CMP_TYPE(var, etype)) { \
271 var = (typeof(var))(entry); \
272 WARN_ON(id && (entry)->type != id); \
273 break; \
274 }
275
276 /* Will cause compile errors if type is not found. */
277 extern void __ftrace_bad_type(void);
278
279 /*
280 * The trace_assign_type is a verifier that the entry type is
281 * the same as the type being assigned. To add new types simply
282 * add a line with the following format:
283 *
284 * IF_ASSIGN(var, ent, type, id);
285 *
286 * Where "type" is the trace type that includes the trace_entry
287 * as the "ent" item. And "id" is the trace identifier that is
288 * used in the trace_type enum.
289 *
290 * If the type can have more than one id, then use zero.
291 */
292 #define trace_assign_type(var, ent) \
293 do { \
294 IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \
295 IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \
296 IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \
297 IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\
298 IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \
299 IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \
300 IF_ASSIGN(var, ent, struct bputs_entry, TRACE_BPUTS); \
301 IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \
302 TRACE_MMIO_RW); \
303 IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \
304 TRACE_MMIO_MAP); \
305 IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \
306 IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \
307 TRACE_GRAPH_ENT); \
308 IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \
309 TRACE_GRAPH_RET); \
310 __ftrace_bad_type(); \
311 } while (0)
312
313 /*
314 * An option specific to a tracer. This is a boolean value.
315 * The bit is the bit index that sets its value on the
316 * flags value in struct tracer_flags.
317 */
318 struct tracer_opt {
319 const char *name; /* Will appear on the trace_options file */
320 u32 bit; /* Mask assigned in val field in tracer_flags */
321 };
322
323 /*
324 * The set of specific options for a tracer. Your tracer
325 * have to set the initial value of the flags val.
326 */
327 struct tracer_flags {
328 u32 val;
329 struct tracer_opt *opts;
330 };
331
332 /* Makes more easy to define a tracer opt */
333 #define TRACER_OPT(s, b) .name = #s, .bit = b
334
335
336 /**
337 * struct tracer - a specific tracer and its callbacks to interact with tracefs
338 * @name: the name chosen to select it on the available_tracers file
339 * @init: called when one switches to this tracer (echo name > current_tracer)
340 * @reset: called when one switches to another tracer
341 * @start: called when tracing is unpaused (echo 1 > tracing_enabled)
342 * @stop: called when tracing is paused (echo 0 > tracing_enabled)
343 * @update_thresh: called when tracing_thresh is updated
344 * @open: called when the trace file is opened
345 * @pipe_open: called when the trace_pipe file is opened
346 * @close: called when the trace file is released
347 * @pipe_close: called when the trace_pipe file is released
348 * @read: override the default read callback on trace_pipe
349 * @splice_read: override the default splice_read callback on trace_pipe
350 * @selftest: selftest to run on boot (see trace_selftest.c)
351 * @print_headers: override the first lines that describe your columns
352 * @print_line: callback that prints a trace
353 * @set_flag: signals one of your private flags changed (trace_options file)
354 * @flags: your private flags
355 */
356 struct tracer {
357 const char *name;
358 int (*init)(struct trace_array *tr);
359 void (*reset)(struct trace_array *tr);
360 void (*start)(struct trace_array *tr);
361 void (*stop)(struct trace_array *tr);
362 int (*update_thresh)(struct trace_array *tr);
363 void (*open)(struct trace_iterator *iter);
364 void (*pipe_open)(struct trace_iterator *iter);
365 void (*close)(struct trace_iterator *iter);
366 void (*pipe_close)(struct trace_iterator *iter);
367 ssize_t (*read)(struct trace_iterator *iter,
368 struct file *filp, char __user *ubuf,
369 size_t cnt, loff_t *ppos);
370 ssize_t (*splice_read)(struct trace_iterator *iter,
371 struct file *filp,
372 loff_t *ppos,
373 struct pipe_inode_info *pipe,
374 size_t len,
375 unsigned int flags);
376 #ifdef CONFIG_FTRACE_STARTUP_TEST
377 int (*selftest)(struct tracer *trace,
378 struct trace_array *tr);
379 #endif
380 void (*print_header)(struct seq_file *m);
381 enum print_line_t (*print_line)(struct trace_iterator *iter);
382 /* If you handled the flag setting, return 0 */
383 int (*set_flag)(struct trace_array *tr,
384 u32 old_flags, u32 bit, int set);
385 /* Return 0 if OK with change, else return non-zero */
386 int (*flag_changed)(struct trace_array *tr,
387 u32 mask, int set);
388 struct tracer *next;
389 struct tracer_flags *flags;
390 int enabled;
391 int ref;
392 bool print_max;
393 bool allow_instances;
394 #ifdef CONFIG_TRACER_MAX_TRACE
395 bool use_max_tr;
396 #endif
397 };
398
399
400 /* Only current can touch trace_recursion */
401
402 /*
403 * For function tracing recursion:
404 * The order of these bits are important.
405 *
406 * When function tracing occurs, the following steps are made:
407 * If arch does not support a ftrace feature:
408 * call internal function (uses INTERNAL bits) which calls...
409 * If callback is registered to the "global" list, the list
410 * function is called and recursion checks the GLOBAL bits.
411 * then this function calls...
412 * The function callback, which can use the FTRACE bits to
413 * check for recursion.
414 *
415 * Now if the arch does not suppport a feature, and it calls
416 * the global list function which calls the ftrace callback
417 * all three of these steps will do a recursion protection.
418 * There's no reason to do one if the previous caller already
419 * did. The recursion that we are protecting against will
420 * go through the same steps again.
421 *
422 * To prevent the multiple recursion checks, if a recursion
423 * bit is set that is higher than the MAX bit of the current
424 * check, then we know that the check was made by the previous
425 * caller, and we can skip the current check.
426 */
427 enum {
428 TRACE_BUFFER_BIT,
429 TRACE_BUFFER_NMI_BIT,
430 TRACE_BUFFER_IRQ_BIT,
431 TRACE_BUFFER_SIRQ_BIT,
432
433 /* Start of function recursion bits */
434 TRACE_FTRACE_BIT,
435 TRACE_FTRACE_NMI_BIT,
436 TRACE_FTRACE_IRQ_BIT,
437 TRACE_FTRACE_SIRQ_BIT,
438
439 /* INTERNAL_BITs must be greater than FTRACE_BITs */
440 TRACE_INTERNAL_BIT,
441 TRACE_INTERNAL_NMI_BIT,
442 TRACE_INTERNAL_IRQ_BIT,
443 TRACE_INTERNAL_SIRQ_BIT,
444
445 TRACE_CONTROL_BIT,
446
447 TRACE_BRANCH_BIT,
448 /*
449 * Abuse of the trace_recursion.
450 * As we need a way to maintain state if we are tracing the function
451 * graph in irq because we want to trace a particular function that
452 * was called in irq context but we have irq tracing off. Since this
453 * can only be modified by current, we can reuse trace_recursion.
454 */
455 TRACE_IRQ_BIT,
456 };
457
458 #define trace_recursion_set(bit) do { (current)->trace_recursion |= (1<<(bit)); } while (0)
459 #define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(1<<(bit)); } while (0)
460 #define trace_recursion_test(bit) ((current)->trace_recursion & (1<<(bit)))
461
462 #define TRACE_CONTEXT_BITS 4
463
464 #define TRACE_FTRACE_START TRACE_FTRACE_BIT
465 #define TRACE_FTRACE_MAX ((1 << (TRACE_FTRACE_START + TRACE_CONTEXT_BITS)) - 1)
466
467 #define TRACE_LIST_START TRACE_INTERNAL_BIT
468 #define TRACE_LIST_MAX ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1)
469
470 #define TRACE_CONTEXT_MASK TRACE_LIST_MAX
471
472 static __always_inline int trace_get_context_bit(void)
473 {
474 int bit;
475
476 if (in_interrupt()) {
477 if (in_nmi())
478 bit = 0;
479
480 else if (in_irq())
481 bit = 1;
482 else
483 bit = 2;
484 } else
485 bit = 3;
486
487 return bit;
488 }
489
490 static __always_inline int trace_test_and_set_recursion(int start, int max)
491 {
492 unsigned int val = current->trace_recursion;
493 int bit;
494
495 /* A previous recursion check was made */
496 if ((val & TRACE_CONTEXT_MASK) > max)
497 return 0;
498
499 bit = trace_get_context_bit() + start;
500 if (unlikely(val & (1 << bit)))
501 return -1;
502
503 val |= 1 << bit;
504 current->trace_recursion = val;
505 barrier();
506
507 return bit;
508 }
509
510 static __always_inline void trace_clear_recursion(int bit)
511 {
512 unsigned int val = current->trace_recursion;
513
514 if (!bit)
515 return;
516
517 bit = 1 << bit;
518 val &= ~bit;
519
520 barrier();
521 current->trace_recursion = val;
522 }
523
524 static inline struct ring_buffer_iter *
525 trace_buffer_iter(struct trace_iterator *iter, int cpu)
526 {
527 if (iter->buffer_iter && iter->buffer_iter[cpu])
528 return iter->buffer_iter[cpu];
529 return NULL;
530 }
531
532 int tracer_init(struct tracer *t, struct trace_array *tr);
533 int tracing_is_enabled(void);
534 void tracing_reset(struct trace_buffer *buf, int cpu);
535 void tracing_reset_online_cpus(struct trace_buffer *buf);
536 void tracing_reset_current(int cpu);
537 void tracing_reset_all_online_cpus(void);
538 int tracing_open_generic(struct inode *inode, struct file *filp);
539 bool tracing_is_disabled(void);
540 struct dentry *trace_create_file(const char *name,
541 umode_t mode,
542 struct dentry *parent,
543 void *data,
544 const struct file_operations *fops);
545
546 struct dentry *tracing_init_dentry(void);
547
548 struct ring_buffer_event;
549
550 struct ring_buffer_event *
551 trace_buffer_lock_reserve(struct ring_buffer *buffer,
552 int type,
553 unsigned long len,
554 unsigned long flags,
555 int pc);
556
557 struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
558 struct trace_array_cpu *data);
559
560 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
561 int *ent_cpu, u64 *ent_ts);
562
563 void __buffer_unlock_commit(struct ring_buffer *buffer,
564 struct ring_buffer_event *event);
565
566 int trace_empty(struct trace_iterator *iter);
567
568 void *trace_find_next_entry_inc(struct trace_iterator *iter);
569
570 void trace_init_global_iter(struct trace_iterator *iter);
571
572 void tracing_iter_reset(struct trace_iterator *iter, int cpu);
573
574 void trace_function(struct trace_array *tr,
575 unsigned long ip,
576 unsigned long parent_ip,
577 unsigned long flags, int pc);
578 void trace_graph_function(struct trace_array *tr,
579 unsigned long ip,
580 unsigned long parent_ip,
581 unsigned long flags, int pc);
582 void trace_latency_header(struct seq_file *m);
583 void trace_default_header(struct seq_file *m);
584 void print_trace_header(struct seq_file *m, struct trace_iterator *iter);
585 int trace_empty(struct trace_iterator *iter);
586
587 void trace_graph_return(struct ftrace_graph_ret *trace);
588 int trace_graph_entry(struct ftrace_graph_ent *trace);
589 void set_graph_array(struct trace_array *tr);
590
591 void tracing_start_cmdline_record(void);
592 void tracing_stop_cmdline_record(void);
593 int register_tracer(struct tracer *type);
594 int is_tracing_stopped(void);
595
596 loff_t tracing_lseek(struct file *file, loff_t offset, int whence);
597
598 extern cpumask_var_t __read_mostly tracing_buffer_mask;
599
600 #define for_each_tracing_cpu(cpu) \
601 for_each_cpu(cpu, tracing_buffer_mask)
602
603 extern unsigned long nsecs_to_usecs(unsigned long nsecs);
604
605 extern unsigned long tracing_thresh;
606
607 #ifdef CONFIG_TRACER_MAX_TRACE
608 void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu);
609 void update_max_tr_single(struct trace_array *tr,
610 struct task_struct *tsk, int cpu);
611 #endif /* CONFIG_TRACER_MAX_TRACE */
612
613 #ifdef CONFIG_STACKTRACE
614 void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
615 int skip, int pc);
616
617 void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
618 int skip, int pc, struct pt_regs *regs);
619
620 void ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags,
621 int pc);
622
623 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
624 int pc);
625 #else
626 static inline void ftrace_trace_stack(struct ring_buffer *buffer,
627 unsigned long flags, int skip, int pc)
628 {
629 }
630
631 static inline void ftrace_trace_stack_regs(struct ring_buffer *buffer,
632 unsigned long flags, int skip,
633 int pc, struct pt_regs *regs)
634 {
635 }
636
637 static inline void ftrace_trace_userstack(struct ring_buffer *buffer,
638 unsigned long flags, int pc)
639 {
640 }
641
642 static inline void __trace_stack(struct trace_array *tr, unsigned long flags,
643 int skip, int pc)
644 {
645 }
646 #endif /* CONFIG_STACKTRACE */
647
648 extern cycle_t ftrace_now(int cpu);
649
650 extern void trace_find_cmdline(int pid, char comm[]);
651
652 #ifdef CONFIG_DYNAMIC_FTRACE
653 extern unsigned long ftrace_update_tot_cnt;
654 #endif
655 #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
656 extern int DYN_FTRACE_TEST_NAME(void);
657 #define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2
658 extern int DYN_FTRACE_TEST_NAME2(void);
659
660 extern bool ring_buffer_expanded;
661 extern bool tracing_selftest_disabled;
662 DECLARE_PER_CPU(int, ftrace_cpu_disabled);
663
664 #ifdef CONFIG_FTRACE_STARTUP_TEST
665 extern int trace_selftest_startup_function(struct tracer *trace,
666 struct trace_array *tr);
667 extern int trace_selftest_startup_function_graph(struct tracer *trace,
668 struct trace_array *tr);
669 extern int trace_selftest_startup_irqsoff(struct tracer *trace,
670 struct trace_array *tr);
671 extern int trace_selftest_startup_preemptoff(struct tracer *trace,
672 struct trace_array *tr);
673 extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace,
674 struct trace_array *tr);
675 extern int trace_selftest_startup_wakeup(struct tracer *trace,
676 struct trace_array *tr);
677 extern int trace_selftest_startup_nop(struct tracer *trace,
678 struct trace_array *tr);
679 extern int trace_selftest_startup_sched_switch(struct tracer *trace,
680 struct trace_array *tr);
681 extern int trace_selftest_startup_branch(struct tracer *trace,
682 struct trace_array *tr);
683 /*
684 * Tracer data references selftest functions that only occur
685 * on boot up. These can be __init functions. Thus, when selftests
686 * are enabled, then the tracers need to reference __init functions.
687 */
688 #define __tracer_data __refdata
689 #else
690 /* Tracers are seldom changed. Optimize when selftests are disabled. */
691 #define __tracer_data __read_mostly
692 #endif /* CONFIG_FTRACE_STARTUP_TEST */
693
694 extern void *head_page(struct trace_array_cpu *data);
695 extern unsigned long long ns2usecs(cycle_t nsec);
696 extern int
697 trace_vbprintk(unsigned long ip, const char *fmt, va_list args);
698 extern int
699 trace_vprintk(unsigned long ip, const char *fmt, va_list args);
700 extern int
701 trace_array_vprintk(struct trace_array *tr,
702 unsigned long ip, const char *fmt, va_list args);
703 int trace_array_printk(struct trace_array *tr,
704 unsigned long ip, const char *fmt, ...);
705 int trace_array_printk_buf(struct ring_buffer *buffer,
706 unsigned long ip, const char *fmt, ...);
707 void trace_printk_seq(struct trace_seq *s);
708 enum print_line_t print_trace_line(struct trace_iterator *iter);
709
710 extern unsigned long trace_flags;
711
712 extern char trace_find_mark(unsigned long long duration);
713
714 /* Standard output formatting function used for function return traces */
715 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
716
717 /* Flag options */
718 #define TRACE_GRAPH_PRINT_OVERRUN 0x1
719 #define TRACE_GRAPH_PRINT_CPU 0x2
720 #define TRACE_GRAPH_PRINT_OVERHEAD 0x4
721 #define TRACE_GRAPH_PRINT_PROC 0x8
722 #define TRACE_GRAPH_PRINT_DURATION 0x10
723 #define TRACE_GRAPH_PRINT_ABS_TIME 0x20
724 #define TRACE_GRAPH_PRINT_IRQS 0x40
725 #define TRACE_GRAPH_PRINT_TAIL 0x80
726 #define TRACE_GRAPH_PRINT_FILL_SHIFT 28
727 #define TRACE_GRAPH_PRINT_FILL_MASK (0x3 << TRACE_GRAPH_PRINT_FILL_SHIFT)
728
729 extern enum print_line_t
730 print_graph_function_flags(struct trace_iterator *iter, u32 flags);
731 extern void print_graph_headers_flags(struct seq_file *s, u32 flags);
732 extern void
733 trace_print_graph_duration(unsigned long long duration, struct trace_seq *s);
734 extern void graph_trace_open(struct trace_iterator *iter);
735 extern void graph_trace_close(struct trace_iterator *iter);
736 extern int __trace_graph_entry(struct trace_array *tr,
737 struct ftrace_graph_ent *trace,
738 unsigned long flags, int pc);
739 extern void __trace_graph_return(struct trace_array *tr,
740 struct ftrace_graph_ret *trace,
741 unsigned long flags, int pc);
742
743
744 #ifdef CONFIG_DYNAMIC_FTRACE
745 /* TODO: make this variable */
746 #define FTRACE_GRAPH_MAX_FUNCS 32
747 extern int ftrace_graph_count;
748 extern unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS];
749 extern int ftrace_graph_notrace_count;
750 extern unsigned long ftrace_graph_notrace_funcs[FTRACE_GRAPH_MAX_FUNCS];
751
752 static inline int ftrace_graph_addr(unsigned long addr)
753 {
754 int i;
755
756 if (!ftrace_graph_count)
757 return 1;
758
759 for (i = 0; i < ftrace_graph_count; i++) {
760 if (addr == ftrace_graph_funcs[i]) {
761 /*
762 * If no irqs are to be traced, but a set_graph_function
763 * is set, and called by an interrupt handler, we still
764 * want to trace it.
765 */
766 if (in_irq())
767 trace_recursion_set(TRACE_IRQ_BIT);
768 else
769 trace_recursion_clear(TRACE_IRQ_BIT);
770 return 1;
771 }
772 }
773
774 return 0;
775 }
776
777 static inline int ftrace_graph_notrace_addr(unsigned long addr)
778 {
779 int i;
780
781 if (!ftrace_graph_notrace_count)
782 return 0;
783
784 for (i = 0; i < ftrace_graph_notrace_count; i++) {
785 if (addr == ftrace_graph_notrace_funcs[i])
786 return 1;
787 }
788
789 return 0;
790 }
791 #else
792 static inline int ftrace_graph_addr(unsigned long addr)
793 {
794 return 1;
795 }
796
797 static inline int ftrace_graph_notrace_addr(unsigned long addr)
798 {
799 return 0;
800 }
801 #endif /* CONFIG_DYNAMIC_FTRACE */
802 #else /* CONFIG_FUNCTION_GRAPH_TRACER */
803 static inline enum print_line_t
804 print_graph_function_flags(struct trace_iterator *iter, u32 flags)
805 {
806 return TRACE_TYPE_UNHANDLED;
807 }
808 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
809
810 extern struct list_head ftrace_pids;
811
812 #ifdef CONFIG_FUNCTION_TRACER
813 extern bool ftrace_filter_param __initdata;
814 static inline int ftrace_trace_task(struct task_struct *task)
815 {
816 if (list_empty(&ftrace_pids))
817 return 1;
818
819 return test_tsk_trace_trace(task);
820 }
821 extern int ftrace_is_dead(void);
822 int ftrace_create_function_files(struct trace_array *tr,
823 struct dentry *parent);
824 void ftrace_destroy_function_files(struct trace_array *tr);
825 void ftrace_init_global_array_ops(struct trace_array *tr);
826 void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func);
827 void ftrace_reset_array_ops(struct trace_array *tr);
828 int using_ftrace_ops_list_func(void);
829 #else
830 static inline int ftrace_trace_task(struct task_struct *task)
831 {
832 return 1;
833 }
834 static inline int ftrace_is_dead(void) { return 0; }
835 static inline int
836 ftrace_create_function_files(struct trace_array *tr,
837 struct dentry *parent)
838 {
839 return 0;
840 }
841 static inline void ftrace_destroy_function_files(struct trace_array *tr) { }
842 static inline __init void
843 ftrace_init_global_array_ops(struct trace_array *tr) { }
844 static inline void ftrace_reset_array_ops(struct trace_array *tr) { }
845 /* ftace_func_t type is not defined, use macro instead of static inline */
846 #define ftrace_init_array_ops(tr, func) do { } while (0)
847 #endif /* CONFIG_FUNCTION_TRACER */
848
849 #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE)
850 void ftrace_create_filter_files(struct ftrace_ops *ops,
851 struct dentry *parent);
852 void ftrace_destroy_filter_files(struct ftrace_ops *ops);
853 #else
854 /*
855 * The ops parameter passed in is usually undefined.
856 * This must be a macro.
857 */
858 #define ftrace_create_filter_files(ops, parent) do { } while (0)
859 #define ftrace_destroy_filter_files(ops) do { } while (0)
860 #endif /* CONFIG_FUNCTION_TRACER && CONFIG_DYNAMIC_FTRACE */
861
862 int ftrace_event_is_function(struct trace_event_call *call);
863
864 /*
865 * struct trace_parser - servers for reading the user input separated by spaces
866 * @cont: set if the input is not complete - no final space char was found
867 * @buffer: holds the parsed user input
868 * @idx: user input length
869 * @size: buffer size
870 */
871 struct trace_parser {
872 bool cont;
873 char *buffer;
874 unsigned idx;
875 unsigned size;
876 };
877
878 static inline bool trace_parser_loaded(struct trace_parser *parser)
879 {
880 return (parser->idx != 0);
881 }
882
883 static inline bool trace_parser_cont(struct trace_parser *parser)
884 {
885 return parser->cont;
886 }
887
888 static inline void trace_parser_clear(struct trace_parser *parser)
889 {
890 parser->cont = false;
891 parser->idx = 0;
892 }
893
894 extern int trace_parser_get_init(struct trace_parser *parser, int size);
895 extern void trace_parser_put(struct trace_parser *parser);
896 extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
897 size_t cnt, loff_t *ppos);
898
899 /*
900 * trace_iterator_flags is an enumeration that defines bit
901 * positions into trace_flags that controls the output.
902 *
903 * NOTE: These bits must match the trace_options array in
904 * trace.c.
905 */
906 enum trace_iterator_flags {
907 TRACE_ITER_PRINT_PARENT = 0x01,
908 TRACE_ITER_SYM_OFFSET = 0x02,
909 TRACE_ITER_SYM_ADDR = 0x04,
910 TRACE_ITER_VERBOSE = 0x08,
911 TRACE_ITER_RAW = 0x10,
912 TRACE_ITER_HEX = 0x20,
913 TRACE_ITER_BIN = 0x40,
914 TRACE_ITER_BLOCK = 0x80,
915 TRACE_ITER_STACKTRACE = 0x100,
916 TRACE_ITER_PRINTK = 0x200,
917 TRACE_ITER_PREEMPTONLY = 0x400,
918 TRACE_ITER_BRANCH = 0x800,
919 TRACE_ITER_ANNOTATE = 0x1000,
920 TRACE_ITER_USERSTACKTRACE = 0x2000,
921 TRACE_ITER_SYM_USEROBJ = 0x4000,
922 TRACE_ITER_PRINTK_MSGONLY = 0x8000,
923 TRACE_ITER_CONTEXT_INFO = 0x10000, /* Print pid/cpu/time */
924 TRACE_ITER_LATENCY_FMT = 0x20000,
925 TRACE_ITER_SLEEP_TIME = 0x40000,
926 TRACE_ITER_GRAPH_TIME = 0x80000,
927 TRACE_ITER_RECORD_CMD = 0x100000,
928 TRACE_ITER_OVERWRITE = 0x200000,
929 TRACE_ITER_STOP_ON_FREE = 0x400000,
930 TRACE_ITER_IRQ_INFO = 0x800000,
931 TRACE_ITER_MARKERS = 0x1000000,
932 TRACE_ITER_FUNCTION = 0x2000000,
933 };
934
935 /*
936 * TRACE_ITER_SYM_MASK masks the options in trace_flags that
937 * control the output of kernel symbols.
938 */
939 #define TRACE_ITER_SYM_MASK \
940 (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
941
942 extern struct tracer nop_trace;
943
944 #ifdef CONFIG_BRANCH_TRACER
945 extern int enable_branch_tracing(struct trace_array *tr);
946 extern void disable_branch_tracing(void);
947 static inline int trace_branch_enable(struct trace_array *tr)
948 {
949 if (trace_flags & TRACE_ITER_BRANCH)
950 return enable_branch_tracing(tr);
951 return 0;
952 }
953 static inline void trace_branch_disable(void)
954 {
955 /* due to races, always disable */
956 disable_branch_tracing();
957 }
958 #else
959 static inline int trace_branch_enable(struct trace_array *tr)
960 {
961 return 0;
962 }
963 static inline void trace_branch_disable(void)
964 {
965 }
966 #endif /* CONFIG_BRANCH_TRACER */
967
968 /* set ring buffers to default size if not already done so */
969 int tracing_update_buffers(void);
970
971 struct ftrace_event_field {
972 struct list_head link;
973 const char *name;
974 const char *type;
975 int filter_type;
976 int offset;
977 int size;
978 int is_signed;
979 };
980
981 struct event_filter {
982 int n_preds; /* Number assigned */
983 int a_preds; /* allocated */
984 struct filter_pred *preds;
985 struct filter_pred *root;
986 char *filter_string;
987 };
988
989 struct event_subsystem {
990 struct list_head list;
991 const char *name;
992 struct event_filter *filter;
993 int ref_count;
994 };
995
996 struct trace_subsystem_dir {
997 struct list_head list;
998 struct event_subsystem *subsystem;
999 struct trace_array *tr;
1000 struct dentry *entry;
1001 int ref_count;
1002 int nr_events;
1003 };
1004
1005 #define FILTER_PRED_INVALID ((unsigned short)-1)
1006 #define FILTER_PRED_IS_RIGHT (1 << 15)
1007 #define FILTER_PRED_FOLD (1 << 15)
1008
1009 /*
1010 * The max preds is the size of unsigned short with
1011 * two flags at the MSBs. One bit is used for both the IS_RIGHT
1012 * and FOLD flags. The other is reserved.
1013 *
1014 * 2^14 preds is way more than enough.
1015 */
1016 #define MAX_FILTER_PRED 16384
1017
1018 struct filter_pred;
1019 struct regex;
1020
1021 typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event);
1022
1023 typedef int (*regex_match_func)(char *str, struct regex *r, int len);
1024
1025 enum regex_type {
1026 MATCH_FULL = 0,
1027 MATCH_FRONT_ONLY,
1028 MATCH_MIDDLE_ONLY,
1029 MATCH_END_ONLY,
1030 };
1031
1032 struct regex {
1033 char pattern[MAX_FILTER_STR_VAL];
1034 int len;
1035 int field_len;
1036 regex_match_func match;
1037 };
1038
1039 struct filter_pred {
1040 filter_pred_fn_t fn;
1041 u64 val;
1042 struct regex regex;
1043 unsigned short *ops;
1044 struct ftrace_event_field *field;
1045 int offset;
1046 int not;
1047 int op;
1048 unsigned short index;
1049 unsigned short parent;
1050 unsigned short left;
1051 unsigned short right;
1052 };
1053
1054 extern enum regex_type
1055 filter_parse_regex(char *buff, int len, char **search, int *not);
1056 extern void print_event_filter(struct trace_event_file *file,
1057 struct trace_seq *s);
1058 extern int apply_event_filter(struct trace_event_file *file,
1059 char *filter_string);
1060 extern int apply_subsystem_event_filter(struct trace_subsystem_dir *dir,
1061 char *filter_string);
1062 extern void print_subsystem_event_filter(struct event_subsystem *system,
1063 struct trace_seq *s);
1064 extern int filter_assign_type(const char *type);
1065 extern int create_event_filter(struct trace_event_call *call,
1066 char *filter_str, bool set_str,
1067 struct event_filter **filterp);
1068 extern void free_event_filter(struct event_filter *filter);
1069
1070 struct ftrace_event_field *
1071 trace_find_event_field(struct trace_event_call *call, char *name);
1072
1073 extern void trace_event_enable_cmd_record(bool enable);
1074 extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr);
1075 extern int event_trace_del_tracer(struct trace_array *tr);
1076
1077 extern struct trace_event_file *find_event_file(struct trace_array *tr,
1078 const char *system,
1079 const char *event);
1080
1081 static inline void *event_file_data(struct file *filp)
1082 {
1083 return ACCESS_ONCE(file_inode(filp)->i_private);
1084 }
1085
1086 extern struct mutex event_mutex;
1087 extern struct list_head ftrace_events;
1088
1089 extern const struct file_operations event_trigger_fops;
1090
1091 extern int register_trigger_cmds(void);
1092 extern void clear_event_triggers(struct trace_array *tr);
1093
1094 struct event_trigger_data {
1095 unsigned long count;
1096 int ref;
1097 struct event_trigger_ops *ops;
1098 struct event_command *cmd_ops;
1099 struct event_filter __rcu *filter;
1100 char *filter_str;
1101 void *private_data;
1102 struct list_head list;
1103 };
1104
1105 /**
1106 * struct event_trigger_ops - callbacks for trace event triggers
1107 *
1108 * The methods in this structure provide per-event trigger hooks for
1109 * various trigger operations.
1110 *
1111 * All the methods below, except for @init() and @free(), must be
1112 * implemented.
1113 *
1114 * @func: The trigger 'probe' function called when the triggering
1115 * event occurs. The data passed into this callback is the data
1116 * that was supplied to the event_command @reg() function that
1117 * registered the trigger (see struct event_command).
1118 *
1119 * @init: An optional initialization function called for the trigger
1120 * when the trigger is registered (via the event_command reg()
1121 * function). This can be used to perform per-trigger
1122 * initialization such as incrementing a per-trigger reference
1123 * count, for instance. This is usually implemented by the
1124 * generic utility function @event_trigger_init() (see
1125 * trace_event_triggers.c).
1126 *
1127 * @free: An optional de-initialization function called for the
1128 * trigger when the trigger is unregistered (via the
1129 * event_command @reg() function). This can be used to perform
1130 * per-trigger de-initialization such as decrementing a
1131 * per-trigger reference count and freeing corresponding trigger
1132 * data, for instance. This is usually implemented by the
1133 * generic utility function @event_trigger_free() (see
1134 * trace_event_triggers.c).
1135 *
1136 * @print: The callback function invoked to have the trigger print
1137 * itself. This is usually implemented by a wrapper function
1138 * that calls the generic utility function @event_trigger_print()
1139 * (see trace_event_triggers.c).
1140 */
1141 struct event_trigger_ops {
1142 void (*func)(struct event_trigger_data *data);
1143 int (*init)(struct event_trigger_ops *ops,
1144 struct event_trigger_data *data);
1145 void (*free)(struct event_trigger_ops *ops,
1146 struct event_trigger_data *data);
1147 int (*print)(struct seq_file *m,
1148 struct event_trigger_ops *ops,
1149 struct event_trigger_data *data);
1150 };
1151
1152 /**
1153 * struct event_command - callbacks and data members for event commands
1154 *
1155 * Event commands are invoked by users by writing the command name
1156 * into the 'trigger' file associated with a trace event. The
1157 * parameters associated with a specific invocation of an event
1158 * command are used to create an event trigger instance, which is
1159 * added to the list of trigger instances associated with that trace
1160 * event. When the event is hit, the set of triggers associated with
1161 * that event is invoked.
1162 *
1163 * The data members in this structure provide per-event command data
1164 * for various event commands.
1165 *
1166 * All the data members below, except for @post_trigger, must be set
1167 * for each event command.
1168 *
1169 * @name: The unique name that identifies the event command. This is
1170 * the name used when setting triggers via trigger files.
1171 *
1172 * @trigger_type: A unique id that identifies the event command
1173 * 'type'. This value has two purposes, the first to ensure that
1174 * only one trigger of the same type can be set at a given time
1175 * for a particular event e.g. it doesn't make sense to have both
1176 * a traceon and traceoff trigger attached to a single event at
1177 * the same time, so traceon and traceoff have the same type
1178 * though they have different names. The @trigger_type value is
1179 * also used as a bit value for deferring the actual trigger
1180 * action until after the current event is finished. Some
1181 * commands need to do this if they themselves log to the trace
1182 * buffer (see the @post_trigger() member below). @trigger_type
1183 * values are defined by adding new values to the trigger_type
1184 * enum in include/linux/trace_events.h.
1185 *
1186 * @post_trigger: A flag that says whether or not this command needs
1187 * to have its action delayed until after the current event has
1188 * been closed. Some triggers need to avoid being invoked while
1189 * an event is currently in the process of being logged, since
1190 * the trigger may itself log data into the trace buffer. Thus
1191 * we make sure the current event is committed before invoking
1192 * those triggers. To do that, the trigger invocation is split
1193 * in two - the first part checks the filter using the current
1194 * trace record; if a command has the @post_trigger flag set, it
1195 * sets a bit for itself in the return value, otherwise it
1196 * directly invokes the trigger. Once all commands have been
1197 * either invoked or set their return flag, the current record is
1198 * either committed or discarded. At that point, if any commands
1199 * have deferred their triggers, those commands are finally
1200 * invoked following the close of the current event. In other
1201 * words, if the event_trigger_ops @func() probe implementation
1202 * itself logs to the trace buffer, this flag should be set,
1203 * otherwise it can be left unspecified.
1204 *
1205 * All the methods below, except for @set_filter(), must be
1206 * implemented.
1207 *
1208 * @func: The callback function responsible for parsing and
1209 * registering the trigger written to the 'trigger' file by the
1210 * user. It allocates the trigger instance and registers it with
1211 * the appropriate trace event. It makes use of the other
1212 * event_command callback functions to orchestrate this, and is
1213 * usually implemented by the generic utility function
1214 * @event_trigger_callback() (see trace_event_triggers.c).
1215 *
1216 * @reg: Adds the trigger to the list of triggers associated with the
1217 * event, and enables the event trigger itself, after
1218 * initializing it (via the event_trigger_ops @init() function).
1219 * This is also where commands can use the @trigger_type value to
1220 * make the decision as to whether or not multiple instances of
1221 * the trigger should be allowed. This is usually implemented by
1222 * the generic utility function @register_trigger() (see
1223 * trace_event_triggers.c).
1224 *
1225 * @unreg: Removes the trigger from the list of triggers associated
1226 * with the event, and disables the event trigger itself, after
1227 * initializing it (via the event_trigger_ops @free() function).
1228 * This is usually implemented by the generic utility function
1229 * @unregister_trigger() (see trace_event_triggers.c).
1230 *
1231 * @set_filter: An optional function called to parse and set a filter
1232 * for the trigger. If no @set_filter() method is set for the
1233 * event command, filters set by the user for the command will be
1234 * ignored. This is usually implemented by the generic utility
1235 * function @set_trigger_filter() (see trace_event_triggers.c).
1236 *
1237 * @get_trigger_ops: The callback function invoked to retrieve the
1238 * event_trigger_ops implementation associated with the command.
1239 */
1240 struct event_command {
1241 struct list_head list;
1242 char *name;
1243 enum event_trigger_type trigger_type;
1244 bool post_trigger;
1245 int (*func)(struct event_command *cmd_ops,
1246 struct trace_event_file *file,
1247 char *glob, char *cmd, char *params);
1248 int (*reg)(char *glob,
1249 struct event_trigger_ops *ops,
1250 struct event_trigger_data *data,
1251 struct trace_event_file *file);
1252 void (*unreg)(char *glob,
1253 struct event_trigger_ops *ops,
1254 struct event_trigger_data *data,
1255 struct trace_event_file *file);
1256 int (*set_filter)(char *filter_str,
1257 struct event_trigger_data *data,
1258 struct trace_event_file *file);
1259 struct event_trigger_ops *(*get_trigger_ops)(char *cmd, char *param);
1260 };
1261
1262 extern int trace_event_enable_disable(struct trace_event_file *file,
1263 int enable, int soft_disable);
1264 extern int tracing_alloc_snapshot(void);
1265
1266 extern const char *__start___trace_bprintk_fmt[];
1267 extern const char *__stop___trace_bprintk_fmt[];
1268
1269 extern const char *__start___tracepoint_str[];
1270 extern const char *__stop___tracepoint_str[];
1271
1272 void trace_printk_init_buffers(void);
1273 void trace_printk_start_comm(void);
1274 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
1275 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
1276
1277 /*
1278 * Normal trace_printk() and friends allocates special buffers
1279 * to do the manipulation, as well as saves the print formats
1280 * into sections to display. But the trace infrastructure wants
1281 * to use these without the added overhead at the price of being
1282 * a bit slower (used mainly for warnings, where we don't care
1283 * about performance). The internal_trace_puts() is for such
1284 * a purpose.
1285 */
1286 #define internal_trace_puts(str) __trace_puts(_THIS_IP_, str, strlen(str))
1287
1288 #undef FTRACE_ENTRY
1289 #define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \
1290 extern struct trace_event_call \
1291 __aligned(4) event_##call;
1292 #undef FTRACE_ENTRY_DUP
1293 #define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print, filter) \
1294 FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \
1295 filter)
1296 #include "trace_entries.h"
1297
1298 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_FUNCTION_TRACER)
1299 int perf_ftrace_event_register(struct trace_event_call *call,
1300 enum trace_reg type, void *data);
1301 #else
1302 #define perf_ftrace_event_register NULL
1303 #endif
1304
1305 #ifdef CONFIG_FTRACE_SYSCALLS
1306 void init_ftrace_syscalls(void);
1307 #else
1308 static inline void init_ftrace_syscalls(void) { }
1309 #endif
1310
1311 #ifdef CONFIG_EVENT_TRACING
1312 void trace_event_init(void);
1313 void trace_event_enum_update(struct trace_enum_map **map, int len);
1314 #else
1315 static inline void __init trace_event_init(void) { }
1316 static inline void trace_event_enum_update(struct trace_enum_map **map, int len) { }
1317 #endif
1318
1319 extern struct trace_iterator *tracepoint_print_iter;
1320
1321 #endif /* _LINUX_KERNEL_TRACE_H */
This page took 0.071231 seconds and 5 git commands to generate.