2 * ring buffer based function tracer
4 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
12 * Copyright (C) 2004 Nadia Yvette Chambers
14 #include <linux/ring_buffer.h>
15 #include <generated/utsrelease.h>
16 #include <linux/stacktrace.h>
17 #include <linux/writeback.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/notifier.h>
21 #include <linux/irqflags.h>
22 #include <linux/debugfs.h>
23 #include <linux/pagemap.h>
24 #include <linux/hardirq.h>
25 #include <linux/linkage.h>
26 #include <linux/uaccess.h>
27 #include <linux/kprobes.h>
28 #include <linux/ftrace.h>
29 #include <linux/module.h>
30 #include <linux/percpu.h>
31 #include <linux/splice.h>
32 #include <linux/kdebug.h>
33 #include <linux/string.h>
34 #include <linux/rwsem.h>
35 #include <linux/slab.h>
36 #include <linux/ctype.h>
37 #include <linux/init.h>
38 #include <linux/poll.h>
39 #include <linux/nmi.h>
41 #include <linux/sched/rt.h>
44 #include "trace_output.h"
47 * On boot up, the ring buffer is set to the minimum size, so that
48 * we do not waste memory on systems that are not using tracing.
50 bool ring_buffer_expanded
;
53 * We need to change this state when a selftest is running.
54 * A selftest will lurk into the ring-buffer to count the
55 * entries inserted during the selftest although some concurrent
56 * insertions into the ring-buffer such as trace_printk could occurred
57 * at the same time, giving false positive or negative results.
59 static bool __read_mostly tracing_selftest_running
;
62 * If a tracer is running, we do not want to run SELFTEST.
64 bool __read_mostly tracing_selftest_disabled
;
66 /* For tracers that don't implement custom flags */
67 static struct tracer_opt dummy_tracer_opt
[] = {
71 static struct tracer_flags dummy_tracer_flags
= {
73 .opts
= dummy_tracer_opt
77 dummy_set_flag(struct trace_array
*tr
, u32 old_flags
, u32 bit
, int set
)
83 * To prevent the comm cache from being overwritten when no
84 * tracing is active, only save the comm when a trace event
87 static DEFINE_PER_CPU(bool, trace_cmdline_save
);
90 * Kill all tracing for good (never come back).
91 * It is initialized to 1 but will turn to zero if the initialization
92 * of the tracer is successful. But that is the only place that sets
95 static int tracing_disabled
= 1;
97 DEFINE_PER_CPU(int, ftrace_cpu_disabled
);
99 cpumask_var_t __read_mostly tracing_buffer_mask
;
102 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
104 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
105 * is set, then ftrace_dump is called. This will output the contents
106 * of the ftrace buffers to the console. This is very useful for
107 * capturing traces that lead to crashes and outputing it to a
110 * It is default off, but you can enable it with either specifying
111 * "ftrace_dump_on_oops" in the kernel command line, or setting
112 * /proc/sys/kernel/ftrace_dump_on_oops
113 * Set 1 if you want to dump buffers of all CPUs
114 * Set 2 if you want to dump the buffer of the CPU that triggered oops
117 enum ftrace_dump_mode ftrace_dump_on_oops
;
119 /* When set, tracing will stop when a WARN*() is hit */
120 int __disable_trace_on_warning
;
122 static int tracing_set_tracer(struct trace_array
*tr
, const char *buf
);
124 #define MAX_TRACER_SIZE 100
125 static char bootup_tracer_buf
[MAX_TRACER_SIZE
] __initdata
;
126 static char *default_bootup_tracer
;
128 static bool allocate_snapshot
;
130 static int __init
set_cmdline_ftrace(char *str
)
132 strlcpy(bootup_tracer_buf
, str
, MAX_TRACER_SIZE
);
133 default_bootup_tracer
= bootup_tracer_buf
;
134 /* We are using ftrace early, expand it */
135 ring_buffer_expanded
= true;
138 __setup("ftrace=", set_cmdline_ftrace
);
140 static int __init
set_ftrace_dump_on_oops(char *str
)
142 if (*str
++ != '=' || !*str
) {
143 ftrace_dump_on_oops
= DUMP_ALL
;
147 if (!strcmp("orig_cpu", str
)) {
148 ftrace_dump_on_oops
= DUMP_ORIG
;
154 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops
);
156 static int __init
stop_trace_on_warning(char *str
)
158 if ((strcmp(str
, "=0") != 0 && strcmp(str
, "=off") != 0))
159 __disable_trace_on_warning
= 1;
162 __setup("traceoff_on_warning", stop_trace_on_warning
);
164 static int __init
boot_alloc_snapshot(char *str
)
166 allocate_snapshot
= true;
167 /* We also need the main ring buffer expanded */
168 ring_buffer_expanded
= true;
171 __setup("alloc_snapshot", boot_alloc_snapshot
);
174 static char trace_boot_options_buf
[MAX_TRACER_SIZE
] __initdata
;
175 static char *trace_boot_options __initdata
;
177 static int __init
set_trace_boot_options(char *str
)
179 strlcpy(trace_boot_options_buf
, str
, MAX_TRACER_SIZE
);
180 trace_boot_options
= trace_boot_options_buf
;
183 __setup("trace_options=", set_trace_boot_options
);
185 static char trace_boot_clock_buf
[MAX_TRACER_SIZE
] __initdata
;
186 static char *trace_boot_clock __initdata
;
188 static int __init
set_trace_boot_clock(char *str
)
190 strlcpy(trace_boot_clock_buf
, str
, MAX_TRACER_SIZE
);
191 trace_boot_clock
= trace_boot_clock_buf
;
194 __setup("trace_clock=", set_trace_boot_clock
);
197 unsigned long long ns2usecs(cycle_t nsec
)
205 * The global_trace is the descriptor that holds the tracing
206 * buffers for the live tracing. For each CPU, it contains
207 * a link list of pages that will store trace entries. The
208 * page descriptor of the pages in the memory is used to hold
209 * the link list by linking the lru item in the page descriptor
210 * to each of the pages in the buffer per CPU.
212 * For each active CPU there is a data field that holds the
213 * pages for the buffer for that CPU. Each CPU has the same number
214 * of pages allocated for its buffer.
216 static struct trace_array global_trace
;
218 LIST_HEAD(ftrace_trace_arrays
);
220 int trace_array_get(struct trace_array
*this_tr
)
222 struct trace_array
*tr
;
225 mutex_lock(&trace_types_lock
);
226 list_for_each_entry(tr
, &ftrace_trace_arrays
, list
) {
233 mutex_unlock(&trace_types_lock
);
238 static void __trace_array_put(struct trace_array
*this_tr
)
240 WARN_ON(!this_tr
->ref
);
244 void trace_array_put(struct trace_array
*this_tr
)
246 mutex_lock(&trace_types_lock
);
247 __trace_array_put(this_tr
);
248 mutex_unlock(&trace_types_lock
);
251 int filter_check_discard(struct ftrace_event_file
*file
, void *rec
,
252 struct ring_buffer
*buffer
,
253 struct ring_buffer_event
*event
)
255 if (unlikely(file
->flags
& FTRACE_EVENT_FL_FILTERED
) &&
256 !filter_match_preds(file
->filter
, rec
)) {
257 ring_buffer_discard_commit(buffer
, event
);
263 EXPORT_SYMBOL_GPL(filter_check_discard
);
265 int call_filter_check_discard(struct ftrace_event_call
*call
, void *rec
,
266 struct ring_buffer
*buffer
,
267 struct ring_buffer_event
*event
)
269 if (unlikely(call
->flags
& TRACE_EVENT_FL_FILTERED
) &&
270 !filter_match_preds(call
->filter
, rec
)) {
271 ring_buffer_discard_commit(buffer
, event
);
277 EXPORT_SYMBOL_GPL(call_filter_check_discard
);
279 static cycle_t
buffer_ftrace_now(struct trace_buffer
*buf
, int cpu
)
283 /* Early boot up does not have a buffer yet */
285 return trace_clock_local();
287 ts
= ring_buffer_time_stamp(buf
->buffer
, cpu
);
288 ring_buffer_normalize_time_stamp(buf
->buffer
, cpu
, &ts
);
293 cycle_t
ftrace_now(int cpu
)
295 return buffer_ftrace_now(&global_trace
.trace_buffer
, cpu
);
299 * tracing_is_enabled - Show if global_trace has been disabled
301 * Shows if the global trace has been enabled or not. It uses the
302 * mirror flag "buffer_disabled" to be used in fast paths such as for
303 * the irqsoff tracer. But it may be inaccurate due to races. If you
304 * need to know the accurate state, use tracing_is_on() which is a little
305 * slower, but accurate.
307 int tracing_is_enabled(void)
310 * For quick access (irqsoff uses this in fast path), just
311 * return the mirror variable of the state of the ring buffer.
312 * It's a little racy, but we don't really care.
315 return !global_trace
.buffer_disabled
;
319 * trace_buf_size is the size in bytes that is allocated
320 * for a buffer. Note, the number of bytes is always rounded
323 * This number is purposely set to a low number of 16384.
324 * If the dump on oops happens, it will be much appreciated
325 * to not have to wait for all that output. Anyway this can be
326 * boot time and run time configurable.
328 #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
330 static unsigned long trace_buf_size
= TRACE_BUF_SIZE_DEFAULT
;
332 /* trace_types holds a link list of available tracers. */
333 static struct tracer
*trace_types __read_mostly
;
336 * trace_types_lock is used to protect the trace_types list.
338 DEFINE_MUTEX(trace_types_lock
);
341 * serialize the access of the ring buffer
343 * ring buffer serializes readers, but it is low level protection.
344 * The validity of the events (which returns by ring_buffer_peek() ..etc)
345 * are not protected by ring buffer.
347 * The content of events may become garbage if we allow other process consumes
348 * these events concurrently:
349 * A) the page of the consumed events may become a normal page
350 * (not reader page) in ring buffer, and this page will be rewrited
351 * by events producer.
352 * B) The page of the consumed events may become a page for splice_read,
353 * and this page will be returned to system.
355 * These primitives allow multi process access to different cpu ring buffer
358 * These primitives don't distinguish read-only and read-consume access.
359 * Multi read-only access are also serialized.
363 static DECLARE_RWSEM(all_cpu_access_lock
);
364 static DEFINE_PER_CPU(struct mutex
, cpu_access_lock
);
366 static inline void trace_access_lock(int cpu
)
368 if (cpu
== RING_BUFFER_ALL_CPUS
) {
369 /* gain it for accessing the whole ring buffer. */
370 down_write(&all_cpu_access_lock
);
372 /* gain it for accessing a cpu ring buffer. */
374 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
375 down_read(&all_cpu_access_lock
);
377 /* Secondly block other access to this @cpu ring buffer. */
378 mutex_lock(&per_cpu(cpu_access_lock
, cpu
));
382 static inline void trace_access_unlock(int cpu
)
384 if (cpu
== RING_BUFFER_ALL_CPUS
) {
385 up_write(&all_cpu_access_lock
);
387 mutex_unlock(&per_cpu(cpu_access_lock
, cpu
));
388 up_read(&all_cpu_access_lock
);
392 static inline void trace_access_lock_init(void)
396 for_each_possible_cpu(cpu
)
397 mutex_init(&per_cpu(cpu_access_lock
, cpu
));
402 static DEFINE_MUTEX(access_lock
);
404 static inline void trace_access_lock(int cpu
)
407 mutex_lock(&access_lock
);
410 static inline void trace_access_unlock(int cpu
)
413 mutex_unlock(&access_lock
);
416 static inline void trace_access_lock_init(void)
422 /* trace_flags holds trace_options default values */
423 unsigned long trace_flags
= TRACE_ITER_PRINT_PARENT
| TRACE_ITER_PRINTK
|
424 TRACE_ITER_ANNOTATE
| TRACE_ITER_CONTEXT_INFO
| TRACE_ITER_SLEEP_TIME
|
425 TRACE_ITER_GRAPH_TIME
| TRACE_ITER_RECORD_CMD
| TRACE_ITER_OVERWRITE
|
426 TRACE_ITER_IRQ_INFO
| TRACE_ITER_MARKERS
| TRACE_ITER_FUNCTION
;
428 static void tracer_tracing_on(struct trace_array
*tr
)
430 if (tr
->trace_buffer
.buffer
)
431 ring_buffer_record_on(tr
->trace_buffer
.buffer
);
433 * This flag is looked at when buffers haven't been allocated
434 * yet, or by some tracers (like irqsoff), that just want to
435 * know if the ring buffer has been disabled, but it can handle
436 * races of where it gets disabled but we still do a record.
437 * As the check is in the fast path of the tracers, it is more
438 * important to be fast than accurate.
440 tr
->buffer_disabled
= 0;
441 /* Make the flag seen by readers */
446 * tracing_on - enable tracing buffers
448 * This function enables tracing buffers that may have been
449 * disabled with tracing_off.
451 void tracing_on(void)
453 tracer_tracing_on(&global_trace
);
455 EXPORT_SYMBOL_GPL(tracing_on
);
458 * __trace_puts - write a constant string into the trace buffer.
459 * @ip: The address of the caller
460 * @str: The constant string to write
461 * @size: The size of the string.
463 int __trace_puts(unsigned long ip
, const char *str
, int size
)
465 struct ring_buffer_event
*event
;
466 struct ring_buffer
*buffer
;
467 struct print_entry
*entry
;
468 unsigned long irq_flags
;
472 if (!(trace_flags
& TRACE_ITER_PRINTK
))
475 pc
= preempt_count();
477 if (unlikely(tracing_selftest_running
|| tracing_disabled
))
480 alloc
= sizeof(*entry
) + size
+ 2; /* possible \n added */
482 local_save_flags(irq_flags
);
483 buffer
= global_trace
.trace_buffer
.buffer
;
484 event
= trace_buffer_lock_reserve(buffer
, TRACE_PRINT
, alloc
,
489 entry
= ring_buffer_event_data(event
);
492 memcpy(&entry
->buf
, str
, size
);
494 /* Add a newline if necessary */
495 if (entry
->buf
[size
- 1] != '\n') {
496 entry
->buf
[size
] = '\n';
497 entry
->buf
[size
+ 1] = '\0';
499 entry
->buf
[size
] = '\0';
501 __buffer_unlock_commit(buffer
, event
);
502 ftrace_trace_stack(buffer
, irq_flags
, 4, pc
);
506 EXPORT_SYMBOL_GPL(__trace_puts
);
509 * __trace_bputs - write the pointer to a constant string into trace buffer
510 * @ip: The address of the caller
511 * @str: The constant string to write to the buffer to
513 int __trace_bputs(unsigned long ip
, const char *str
)
515 struct ring_buffer_event
*event
;
516 struct ring_buffer
*buffer
;
517 struct bputs_entry
*entry
;
518 unsigned long irq_flags
;
519 int size
= sizeof(struct bputs_entry
);
522 if (!(trace_flags
& TRACE_ITER_PRINTK
))
525 pc
= preempt_count();
527 if (unlikely(tracing_selftest_running
|| tracing_disabled
))
530 local_save_flags(irq_flags
);
531 buffer
= global_trace
.trace_buffer
.buffer
;
532 event
= trace_buffer_lock_reserve(buffer
, TRACE_BPUTS
, size
,
537 entry
= ring_buffer_event_data(event
);
541 __buffer_unlock_commit(buffer
, event
);
542 ftrace_trace_stack(buffer
, irq_flags
, 4, pc
);
546 EXPORT_SYMBOL_GPL(__trace_bputs
);
548 #ifdef CONFIG_TRACER_SNAPSHOT
550 * trace_snapshot - take a snapshot of the current buffer.
552 * This causes a swap between the snapshot buffer and the current live
553 * tracing buffer. You can use this to take snapshots of the live
554 * trace when some condition is triggered, but continue to trace.
556 * Note, make sure to allocate the snapshot with either
557 * a tracing_snapshot_alloc(), or by doing it manually
558 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
560 * If the snapshot buffer is not allocated, it will stop tracing.
561 * Basically making a permanent snapshot.
563 void tracing_snapshot(void)
565 struct trace_array
*tr
= &global_trace
;
566 struct tracer
*tracer
= tr
->current_trace
;
570 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
571 internal_trace_puts("*** snapshot is being ignored ***\n");
575 if (!tr
->allocated_snapshot
) {
576 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
577 internal_trace_puts("*** stopping trace here! ***\n");
582 /* Note, snapshot can not be used when the tracer uses it */
583 if (tracer
->use_max_tr
) {
584 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
585 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
589 local_irq_save(flags
);
590 update_max_tr(tr
, current
, smp_processor_id());
591 local_irq_restore(flags
);
593 EXPORT_SYMBOL_GPL(tracing_snapshot
);
595 static int resize_buffer_duplicate_size(struct trace_buffer
*trace_buf
,
596 struct trace_buffer
*size_buf
, int cpu_id
);
597 static void set_buffer_entries(struct trace_buffer
*buf
, unsigned long val
);
599 static int alloc_snapshot(struct trace_array
*tr
)
603 if (!tr
->allocated_snapshot
) {
605 /* allocate spare buffer */
606 ret
= resize_buffer_duplicate_size(&tr
->max_buffer
,
607 &tr
->trace_buffer
, RING_BUFFER_ALL_CPUS
);
611 tr
->allocated_snapshot
= true;
617 static void free_snapshot(struct trace_array
*tr
)
620 * We don't free the ring buffer. instead, resize it because
621 * The max_tr ring buffer has some state (e.g. ring->clock) and
622 * we want preserve it.
624 ring_buffer_resize(tr
->max_buffer
.buffer
, 1, RING_BUFFER_ALL_CPUS
);
625 set_buffer_entries(&tr
->max_buffer
, 1);
626 tracing_reset_online_cpus(&tr
->max_buffer
);
627 tr
->allocated_snapshot
= false;
631 * tracing_alloc_snapshot - allocate snapshot buffer.
633 * This only allocates the snapshot buffer if it isn't already
634 * allocated - it doesn't also take a snapshot.
636 * This is meant to be used in cases where the snapshot buffer needs
637 * to be set up for events that can't sleep but need to be able to
638 * trigger a snapshot.
640 int tracing_alloc_snapshot(void)
642 struct trace_array
*tr
= &global_trace
;
645 ret
= alloc_snapshot(tr
);
650 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot
);
653 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
655 * This is similar to trace_snapshot(), but it will allocate the
656 * snapshot buffer if it isn't already allocated. Use this only
657 * where it is safe to sleep, as the allocation may sleep.
659 * This causes a swap between the snapshot buffer and the current live
660 * tracing buffer. You can use this to take snapshots of the live
661 * trace when some condition is triggered, but continue to trace.
663 void tracing_snapshot_alloc(void)
667 ret
= tracing_alloc_snapshot();
673 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc
);
675 void tracing_snapshot(void)
677 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
679 EXPORT_SYMBOL_GPL(tracing_snapshot
);
680 int tracing_alloc_snapshot(void)
682 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
685 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot
);
686 void tracing_snapshot_alloc(void)
691 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc
);
692 #endif /* CONFIG_TRACER_SNAPSHOT */
694 static void tracer_tracing_off(struct trace_array
*tr
)
696 if (tr
->trace_buffer
.buffer
)
697 ring_buffer_record_off(tr
->trace_buffer
.buffer
);
699 * This flag is looked at when buffers haven't been allocated
700 * yet, or by some tracers (like irqsoff), that just want to
701 * know if the ring buffer has been disabled, but it can handle
702 * races of where it gets disabled but we still do a record.
703 * As the check is in the fast path of the tracers, it is more
704 * important to be fast than accurate.
706 tr
->buffer_disabled
= 1;
707 /* Make the flag seen by readers */
712 * tracing_off - turn off tracing buffers
714 * This function stops the tracing buffers from recording data.
715 * It does not disable any overhead the tracers themselves may
716 * be causing. This function simply causes all recording to
717 * the ring buffers to fail.
719 void tracing_off(void)
721 tracer_tracing_off(&global_trace
);
723 EXPORT_SYMBOL_GPL(tracing_off
);
725 void disable_trace_on_warning(void)
727 if (__disable_trace_on_warning
)
732 * tracer_tracing_is_on - show real state of ring buffer enabled
733 * @tr : the trace array to know if ring buffer is enabled
735 * Shows real state of the ring buffer if it is enabled or not.
737 static int tracer_tracing_is_on(struct trace_array
*tr
)
739 if (tr
->trace_buffer
.buffer
)
740 return ring_buffer_record_is_on(tr
->trace_buffer
.buffer
);
741 return !tr
->buffer_disabled
;
745 * tracing_is_on - show state of ring buffers enabled
747 int tracing_is_on(void)
749 return tracer_tracing_is_on(&global_trace
);
751 EXPORT_SYMBOL_GPL(tracing_is_on
);
753 static int __init
set_buf_size(char *str
)
755 unsigned long buf_size
;
759 buf_size
= memparse(str
, &str
);
760 /* nr_entries can not be zero */
763 trace_buf_size
= buf_size
;
766 __setup("trace_buf_size=", set_buf_size
);
768 static int __init
set_tracing_thresh(char *str
)
770 unsigned long threshold
;
775 ret
= kstrtoul(str
, 0, &threshold
);
778 tracing_thresh
= threshold
* 1000;
781 __setup("tracing_thresh=", set_tracing_thresh
);
783 unsigned long nsecs_to_usecs(unsigned long nsecs
)
788 /* These must match the bit postions in trace_iterator_flags */
789 static const char *trace_options
[] = {
822 int in_ns
; /* is this clock in nanoseconds? */
824 { trace_clock_local
, "local", 1 },
825 { trace_clock_global
, "global", 1 },
826 { trace_clock_counter
, "counter", 0 },
827 { trace_clock_jiffies
, "uptime", 0 },
828 { trace_clock
, "perf", 1 },
829 { ktime_get_mono_fast_ns
, "mono", 1 },
834 * trace_parser_get_init - gets the buffer for trace parser
836 int trace_parser_get_init(struct trace_parser
*parser
, int size
)
838 memset(parser
, 0, sizeof(*parser
));
840 parser
->buffer
= kmalloc(size
, GFP_KERNEL
);
849 * trace_parser_put - frees the buffer for trace parser
851 void trace_parser_put(struct trace_parser
*parser
)
853 kfree(parser
->buffer
);
857 * trace_get_user - reads the user input string separated by space
858 * (matched by isspace(ch))
860 * For each string found the 'struct trace_parser' is updated,
861 * and the function returns.
863 * Returns number of bytes read.
865 * See kernel/trace/trace.h for 'struct trace_parser' details.
867 int trace_get_user(struct trace_parser
*parser
, const char __user
*ubuf
,
868 size_t cnt
, loff_t
*ppos
)
875 trace_parser_clear(parser
);
877 ret
= get_user(ch
, ubuf
++);
885 * The parser is not finished with the last write,
886 * continue reading the user input without skipping spaces.
889 /* skip white space */
890 while (cnt
&& isspace(ch
)) {
891 ret
= get_user(ch
, ubuf
++);
898 /* only spaces were written */
908 /* read the non-space input */
909 while (cnt
&& !isspace(ch
)) {
910 if (parser
->idx
< parser
->size
- 1)
911 parser
->buffer
[parser
->idx
++] = ch
;
916 ret
= get_user(ch
, ubuf
++);
923 /* We either got finished input or we have to wait for another call. */
925 parser
->buffer
[parser
->idx
] = 0;
926 parser
->cont
= false;
927 } else if (parser
->idx
< parser
->size
- 1) {
929 parser
->buffer
[parser
->idx
++] = ch
;
942 /* TODO add a seq_buf_to_buffer() */
943 static ssize_t
trace_seq_to_buffer(struct trace_seq
*s
, void *buf
, size_t cnt
)
947 if (s
->seq
.len
<= s
->seq
.readpos
)
950 len
= s
->seq
.len
- s
->seq
.readpos
;
953 memcpy(buf
, s
->buffer
+ s
->seq
.readpos
, cnt
);
955 s
->seq
.readpos
+= cnt
;
959 unsigned long __read_mostly tracing_thresh
;
961 #ifdef CONFIG_TRACER_MAX_TRACE
963 * Copy the new maximum trace into the separate maximum-trace
964 * structure. (this way the maximum trace is permanently saved,
965 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
968 __update_max_tr(struct trace_array
*tr
, struct task_struct
*tsk
, int cpu
)
970 struct trace_buffer
*trace_buf
= &tr
->trace_buffer
;
971 struct trace_buffer
*max_buf
= &tr
->max_buffer
;
972 struct trace_array_cpu
*data
= per_cpu_ptr(trace_buf
->data
, cpu
);
973 struct trace_array_cpu
*max_data
= per_cpu_ptr(max_buf
->data
, cpu
);
976 max_buf
->time_start
= data
->preempt_timestamp
;
978 max_data
->saved_latency
= tr
->max_latency
;
979 max_data
->critical_start
= data
->critical_start
;
980 max_data
->critical_end
= data
->critical_end
;
982 memcpy(max_data
->comm
, tsk
->comm
, TASK_COMM_LEN
);
983 max_data
->pid
= tsk
->pid
;
985 * If tsk == current, then use current_uid(), as that does not use
986 * RCU. The irq tracer can be called out of RCU scope.
989 max_data
->uid
= current_uid();
991 max_data
->uid
= task_uid(tsk
);
993 max_data
->nice
= tsk
->static_prio
- 20 - MAX_RT_PRIO
;
994 max_data
->policy
= tsk
->policy
;
995 max_data
->rt_priority
= tsk
->rt_priority
;
997 /* record this tasks comm */
998 tracing_record_cmdline(tsk
);
1002 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1004 * @tsk: the task with the latency
1005 * @cpu: The cpu that initiated the trace.
1007 * Flip the buffers between the @tr and the max_tr and record information
1008 * about which task was the cause of this latency.
1011 update_max_tr(struct trace_array
*tr
, struct task_struct
*tsk
, int cpu
)
1013 struct ring_buffer
*buf
;
1018 WARN_ON_ONCE(!irqs_disabled());
1020 if (!tr
->allocated_snapshot
) {
1021 /* Only the nop tracer should hit this when disabling */
1022 WARN_ON_ONCE(tr
->current_trace
!= &nop_trace
);
1026 arch_spin_lock(&tr
->max_lock
);
1028 buf
= tr
->trace_buffer
.buffer
;
1029 tr
->trace_buffer
.buffer
= tr
->max_buffer
.buffer
;
1030 tr
->max_buffer
.buffer
= buf
;
1032 __update_max_tr(tr
, tsk
, cpu
);
1033 arch_spin_unlock(&tr
->max_lock
);
1037 * update_max_tr_single - only copy one trace over, and reset the rest
1039 * @tsk - task with the latency
1040 * @cpu - the cpu of the buffer to copy.
1042 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1045 update_max_tr_single(struct trace_array
*tr
, struct task_struct
*tsk
, int cpu
)
1052 WARN_ON_ONCE(!irqs_disabled());
1053 if (!tr
->allocated_snapshot
) {
1054 /* Only the nop tracer should hit this when disabling */
1055 WARN_ON_ONCE(tr
->current_trace
!= &nop_trace
);
1059 arch_spin_lock(&tr
->max_lock
);
1061 ret
= ring_buffer_swap_cpu(tr
->max_buffer
.buffer
, tr
->trace_buffer
.buffer
, cpu
);
1063 if (ret
== -EBUSY
) {
1065 * We failed to swap the buffer due to a commit taking
1066 * place on this CPU. We fail to record, but we reset
1067 * the max trace buffer (no one writes directly to it)
1068 * and flag that it failed.
1070 trace_array_printk_buf(tr
->max_buffer
.buffer
, _THIS_IP_
,
1071 "Failed to swap buffers due to commit in progress\n");
1074 WARN_ON_ONCE(ret
&& ret
!= -EAGAIN
&& ret
!= -EBUSY
);
1076 __update_max_tr(tr
, tsk
, cpu
);
1077 arch_spin_unlock(&tr
->max_lock
);
1079 #endif /* CONFIG_TRACER_MAX_TRACE */
1081 static int wait_on_pipe(struct trace_iterator
*iter
)
1083 /* Iterators are static, they should be filled or empty */
1084 if (trace_buffer_iter(iter
, iter
->cpu_file
))
1087 return ring_buffer_wait(iter
->trace_buffer
->buffer
, iter
->cpu_file
);
1090 #ifdef CONFIG_FTRACE_STARTUP_TEST
1091 static int run_tracer_selftest(struct tracer
*type
)
1093 struct trace_array
*tr
= &global_trace
;
1094 struct tracer
*saved_tracer
= tr
->current_trace
;
1097 if (!type
->selftest
|| tracing_selftest_disabled
)
1101 * Run a selftest on this tracer.
1102 * Here we reset the trace buffer, and set the current
1103 * tracer to be this tracer. The tracer can then run some
1104 * internal tracing to verify that everything is in order.
1105 * If we fail, we do not register this tracer.
1107 tracing_reset_online_cpus(&tr
->trace_buffer
);
1109 tr
->current_trace
= type
;
1111 #ifdef CONFIG_TRACER_MAX_TRACE
1112 if (type
->use_max_tr
) {
1113 /* If we expanded the buffers, make sure the max is expanded too */
1114 if (ring_buffer_expanded
)
1115 ring_buffer_resize(tr
->max_buffer
.buffer
, trace_buf_size
,
1116 RING_BUFFER_ALL_CPUS
);
1117 tr
->allocated_snapshot
= true;
1121 /* the test is responsible for initializing and enabling */
1122 pr_info("Testing tracer %s: ", type
->name
);
1123 ret
= type
->selftest(type
, tr
);
1124 /* the test is responsible for resetting too */
1125 tr
->current_trace
= saved_tracer
;
1127 printk(KERN_CONT
"FAILED!\n");
1128 /* Add the warning after printing 'FAILED' */
1132 /* Only reset on passing, to avoid touching corrupted buffers */
1133 tracing_reset_online_cpus(&tr
->trace_buffer
);
1135 #ifdef CONFIG_TRACER_MAX_TRACE
1136 if (type
->use_max_tr
) {
1137 tr
->allocated_snapshot
= false;
1139 /* Shrink the max buffer again */
1140 if (ring_buffer_expanded
)
1141 ring_buffer_resize(tr
->max_buffer
.buffer
, 1,
1142 RING_BUFFER_ALL_CPUS
);
1146 printk(KERN_CONT
"PASSED\n");
1150 static inline int run_tracer_selftest(struct tracer
*type
)
1154 #endif /* CONFIG_FTRACE_STARTUP_TEST */
1157 * register_tracer - register a tracer with the ftrace system.
1158 * @type - the plugin for the tracer
1160 * Register a new plugin tracer.
1162 int register_tracer(struct tracer
*type
)
1168 pr_info("Tracer must have a name\n");
1172 if (strlen(type
->name
) >= MAX_TRACER_SIZE
) {
1173 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE
);
1177 mutex_lock(&trace_types_lock
);
1179 tracing_selftest_running
= true;
1181 for (t
= trace_types
; t
; t
= t
->next
) {
1182 if (strcmp(type
->name
, t
->name
) == 0) {
1184 pr_info("Tracer %s already registered\n",
1191 if (!type
->set_flag
)
1192 type
->set_flag
= &dummy_set_flag
;
1194 type
->flags
= &dummy_tracer_flags
;
1196 if (!type
->flags
->opts
)
1197 type
->flags
->opts
= dummy_tracer_opt
;
1199 ret
= run_tracer_selftest(type
);
1203 type
->next
= trace_types
;
1207 tracing_selftest_running
= false;
1208 mutex_unlock(&trace_types_lock
);
1210 if (ret
|| !default_bootup_tracer
)
1213 if (strncmp(default_bootup_tracer
, type
->name
, MAX_TRACER_SIZE
))
1216 printk(KERN_INFO
"Starting tracer '%s'\n", type
->name
);
1217 /* Do we want this tracer to start on bootup? */
1218 tracing_set_tracer(&global_trace
, type
->name
);
1219 default_bootup_tracer
= NULL
;
1220 /* disable other selftests, since this will break it. */
1221 tracing_selftest_disabled
= true;
1222 #ifdef CONFIG_FTRACE_STARTUP_TEST
1223 printk(KERN_INFO
"Disabling FTRACE selftests due to running tracer '%s'\n",
1231 void tracing_reset(struct trace_buffer
*buf
, int cpu
)
1233 struct ring_buffer
*buffer
= buf
->buffer
;
1238 ring_buffer_record_disable(buffer
);
1240 /* Make sure all commits have finished */
1241 synchronize_sched();
1242 ring_buffer_reset_cpu(buffer
, cpu
);
1244 ring_buffer_record_enable(buffer
);
1247 void tracing_reset_online_cpus(struct trace_buffer
*buf
)
1249 struct ring_buffer
*buffer
= buf
->buffer
;
1255 ring_buffer_record_disable(buffer
);
1257 /* Make sure all commits have finished */
1258 synchronize_sched();
1260 buf
->time_start
= buffer_ftrace_now(buf
, buf
->cpu
);
1262 for_each_online_cpu(cpu
)
1263 ring_buffer_reset_cpu(buffer
, cpu
);
1265 ring_buffer_record_enable(buffer
);
1268 /* Must have trace_types_lock held */
1269 void tracing_reset_all_online_cpus(void)
1271 struct trace_array
*tr
;
1273 list_for_each_entry(tr
, &ftrace_trace_arrays
, list
) {
1274 tracing_reset_online_cpus(&tr
->trace_buffer
);
1275 #ifdef CONFIG_TRACER_MAX_TRACE
1276 tracing_reset_online_cpus(&tr
->max_buffer
);
1281 #define SAVED_CMDLINES_DEFAULT 128
1282 #define NO_CMDLINE_MAP UINT_MAX
1283 static arch_spinlock_t trace_cmdline_lock
= __ARCH_SPIN_LOCK_UNLOCKED
;
1284 struct saved_cmdlines_buffer
{
1285 unsigned map_pid_to_cmdline
[PID_MAX_DEFAULT
+1];
1286 unsigned *map_cmdline_to_pid
;
1287 unsigned cmdline_num
;
1289 char *saved_cmdlines
;
1291 static struct saved_cmdlines_buffer
*savedcmd
;
1293 /* temporary disable recording */
1294 static atomic_t trace_record_cmdline_disabled __read_mostly
;
1296 static inline char *get_saved_cmdlines(int idx
)
1298 return &savedcmd
->saved_cmdlines
[idx
* TASK_COMM_LEN
];
1301 static inline void set_cmdline(int idx
, const char *cmdline
)
1303 memcpy(get_saved_cmdlines(idx
), cmdline
, TASK_COMM_LEN
);
1306 static int allocate_cmdlines_buffer(unsigned int val
,
1307 struct saved_cmdlines_buffer
*s
)
1309 s
->map_cmdline_to_pid
= kmalloc(val
* sizeof(*s
->map_cmdline_to_pid
),
1311 if (!s
->map_cmdline_to_pid
)
1314 s
->saved_cmdlines
= kmalloc(val
* TASK_COMM_LEN
, GFP_KERNEL
);
1315 if (!s
->saved_cmdlines
) {
1316 kfree(s
->map_cmdline_to_pid
);
1321 s
->cmdline_num
= val
;
1322 memset(&s
->map_pid_to_cmdline
, NO_CMDLINE_MAP
,
1323 sizeof(s
->map_pid_to_cmdline
));
1324 memset(s
->map_cmdline_to_pid
, NO_CMDLINE_MAP
,
1325 val
* sizeof(*s
->map_cmdline_to_pid
));
1330 static int trace_create_savedcmd(void)
1334 savedcmd
= kmalloc(sizeof(*savedcmd
), GFP_KERNEL
);
1338 ret
= allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT
, savedcmd
);
1348 int is_tracing_stopped(void)
1350 return global_trace
.stop_count
;
1354 * tracing_start - quick start of the tracer
1356 * If tracing is enabled but was stopped by tracing_stop,
1357 * this will start the tracer back up.
1359 void tracing_start(void)
1361 struct ring_buffer
*buffer
;
1362 unsigned long flags
;
1364 if (tracing_disabled
)
1367 raw_spin_lock_irqsave(&global_trace
.start_lock
, flags
);
1368 if (--global_trace
.stop_count
) {
1369 if (global_trace
.stop_count
< 0) {
1370 /* Someone screwed up their debugging */
1372 global_trace
.stop_count
= 0;
1377 /* Prevent the buffers from switching */
1378 arch_spin_lock(&global_trace
.max_lock
);
1380 buffer
= global_trace
.trace_buffer
.buffer
;
1382 ring_buffer_record_enable(buffer
);
1384 #ifdef CONFIG_TRACER_MAX_TRACE
1385 buffer
= global_trace
.max_buffer
.buffer
;
1387 ring_buffer_record_enable(buffer
);
1390 arch_spin_unlock(&global_trace
.max_lock
);
1393 raw_spin_unlock_irqrestore(&global_trace
.start_lock
, flags
);
1396 static void tracing_start_tr(struct trace_array
*tr
)
1398 struct ring_buffer
*buffer
;
1399 unsigned long flags
;
1401 if (tracing_disabled
)
1404 /* If global, we need to also start the max tracer */
1405 if (tr
->flags
& TRACE_ARRAY_FL_GLOBAL
)
1406 return tracing_start();
1408 raw_spin_lock_irqsave(&tr
->start_lock
, flags
);
1410 if (--tr
->stop_count
) {
1411 if (tr
->stop_count
< 0) {
1412 /* Someone screwed up their debugging */
1419 buffer
= tr
->trace_buffer
.buffer
;
1421 ring_buffer_record_enable(buffer
);
1424 raw_spin_unlock_irqrestore(&tr
->start_lock
, flags
);
1428 * tracing_stop - quick stop of the tracer
1430 * Light weight way to stop tracing. Use in conjunction with
1433 void tracing_stop(void)
1435 struct ring_buffer
*buffer
;
1436 unsigned long flags
;
1438 raw_spin_lock_irqsave(&global_trace
.start_lock
, flags
);
1439 if (global_trace
.stop_count
++)
1442 /* Prevent the buffers from switching */
1443 arch_spin_lock(&global_trace
.max_lock
);
1445 buffer
= global_trace
.trace_buffer
.buffer
;
1447 ring_buffer_record_disable(buffer
);
1449 #ifdef CONFIG_TRACER_MAX_TRACE
1450 buffer
= global_trace
.max_buffer
.buffer
;
1452 ring_buffer_record_disable(buffer
);
1455 arch_spin_unlock(&global_trace
.max_lock
);
1458 raw_spin_unlock_irqrestore(&global_trace
.start_lock
, flags
);
1461 static void tracing_stop_tr(struct trace_array
*tr
)
1463 struct ring_buffer
*buffer
;
1464 unsigned long flags
;
1466 /* If global, we need to also stop the max tracer */
1467 if (tr
->flags
& TRACE_ARRAY_FL_GLOBAL
)
1468 return tracing_stop();
1470 raw_spin_lock_irqsave(&tr
->start_lock
, flags
);
1471 if (tr
->stop_count
++)
1474 buffer
= tr
->trace_buffer
.buffer
;
1476 ring_buffer_record_disable(buffer
);
1479 raw_spin_unlock_irqrestore(&tr
->start_lock
, flags
);
1482 void trace_stop_cmdline_recording(void);
1484 static int trace_save_cmdline(struct task_struct
*tsk
)
1488 if (!tsk
->pid
|| unlikely(tsk
->pid
> PID_MAX_DEFAULT
))
1492 * It's not the end of the world if we don't get
1493 * the lock, but we also don't want to spin
1494 * nor do we want to disable interrupts,
1495 * so if we miss here, then better luck next time.
1497 if (!arch_spin_trylock(&trace_cmdline_lock
))
1500 idx
= savedcmd
->map_pid_to_cmdline
[tsk
->pid
];
1501 if (idx
== NO_CMDLINE_MAP
) {
1502 idx
= (savedcmd
->cmdline_idx
+ 1) % savedcmd
->cmdline_num
;
1505 * Check whether the cmdline buffer at idx has a pid
1506 * mapped. We are going to overwrite that entry so we
1507 * need to clear the map_pid_to_cmdline. Otherwise we
1508 * would read the new comm for the old pid.
1510 pid
= savedcmd
->map_cmdline_to_pid
[idx
];
1511 if (pid
!= NO_CMDLINE_MAP
)
1512 savedcmd
->map_pid_to_cmdline
[pid
] = NO_CMDLINE_MAP
;
1514 savedcmd
->map_cmdline_to_pid
[idx
] = tsk
->pid
;
1515 savedcmd
->map_pid_to_cmdline
[tsk
->pid
] = idx
;
1517 savedcmd
->cmdline_idx
= idx
;
1520 set_cmdline(idx
, tsk
->comm
);
1522 arch_spin_unlock(&trace_cmdline_lock
);
1527 static void __trace_find_cmdline(int pid
, char comm
[])
1532 strcpy(comm
, "<idle>");
1536 if (WARN_ON_ONCE(pid
< 0)) {
1537 strcpy(comm
, "<XXX>");
1541 if (pid
> PID_MAX_DEFAULT
) {
1542 strcpy(comm
, "<...>");
1546 map
= savedcmd
->map_pid_to_cmdline
[pid
];
1547 if (map
!= NO_CMDLINE_MAP
)
1548 strcpy(comm
, get_saved_cmdlines(map
));
1550 strcpy(comm
, "<...>");
1553 void trace_find_cmdline(int pid
, char comm
[])
1556 arch_spin_lock(&trace_cmdline_lock
);
1558 __trace_find_cmdline(pid
, comm
);
1560 arch_spin_unlock(&trace_cmdline_lock
);
1564 void tracing_record_cmdline(struct task_struct
*tsk
)
1566 if (atomic_read(&trace_record_cmdline_disabled
) || !tracing_is_on())
1569 if (!__this_cpu_read(trace_cmdline_save
))
1572 if (trace_save_cmdline(tsk
))
1573 __this_cpu_write(trace_cmdline_save
, false);
1577 tracing_generic_entry_update(struct trace_entry
*entry
, unsigned long flags
,
1580 struct task_struct
*tsk
= current
;
1582 entry
->preempt_count
= pc
& 0xff;
1583 entry
->pid
= (tsk
) ? tsk
->pid
: 0;
1585 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
1586 (irqs_disabled_flags(flags
) ? TRACE_FLAG_IRQS_OFF
: 0) |
1588 TRACE_FLAG_IRQS_NOSUPPORT
|
1590 ((pc
& HARDIRQ_MASK
) ? TRACE_FLAG_HARDIRQ
: 0) |
1591 ((pc
& SOFTIRQ_MASK
) ? TRACE_FLAG_SOFTIRQ
: 0) |
1592 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED
: 0) |
1593 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED
: 0);
1595 EXPORT_SYMBOL_GPL(tracing_generic_entry_update
);
1597 struct ring_buffer_event
*
1598 trace_buffer_lock_reserve(struct ring_buffer
*buffer
,
1601 unsigned long flags
, int pc
)
1603 struct ring_buffer_event
*event
;
1605 event
= ring_buffer_lock_reserve(buffer
, len
);
1606 if (event
!= NULL
) {
1607 struct trace_entry
*ent
= ring_buffer_event_data(event
);
1609 tracing_generic_entry_update(ent
, flags
, pc
);
1617 __buffer_unlock_commit(struct ring_buffer
*buffer
, struct ring_buffer_event
*event
)
1619 __this_cpu_write(trace_cmdline_save
, true);
1620 ring_buffer_unlock_commit(buffer
, event
);
1624 __trace_buffer_unlock_commit(struct ring_buffer
*buffer
,
1625 struct ring_buffer_event
*event
,
1626 unsigned long flags
, int pc
)
1628 __buffer_unlock_commit(buffer
, event
);
1630 ftrace_trace_stack(buffer
, flags
, 6, pc
);
1631 ftrace_trace_userstack(buffer
, flags
, pc
);
1634 void trace_buffer_unlock_commit(struct ring_buffer
*buffer
,
1635 struct ring_buffer_event
*event
,
1636 unsigned long flags
, int pc
)
1638 __trace_buffer_unlock_commit(buffer
, event
, flags
, pc
);
1640 EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit
);
1642 static struct ring_buffer
*temp_buffer
;
1644 struct ring_buffer_event
*
1645 trace_event_buffer_lock_reserve(struct ring_buffer
**current_rb
,
1646 struct ftrace_event_file
*ftrace_file
,
1647 int type
, unsigned long len
,
1648 unsigned long flags
, int pc
)
1650 struct ring_buffer_event
*entry
;
1652 *current_rb
= ftrace_file
->tr
->trace_buffer
.buffer
;
1653 entry
= trace_buffer_lock_reserve(*current_rb
,
1654 type
, len
, flags
, pc
);
1656 * If tracing is off, but we have triggers enabled
1657 * we still need to look at the event data. Use the temp_buffer
1658 * to store the trace event for the tigger to use. It's recusive
1659 * safe and will not be recorded anywhere.
1661 if (!entry
&& ftrace_file
->flags
& FTRACE_EVENT_FL_TRIGGER_COND
) {
1662 *current_rb
= temp_buffer
;
1663 entry
= trace_buffer_lock_reserve(*current_rb
,
1664 type
, len
, flags
, pc
);
1668 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve
);
1670 struct ring_buffer_event
*
1671 trace_current_buffer_lock_reserve(struct ring_buffer
**current_rb
,
1672 int type
, unsigned long len
,
1673 unsigned long flags
, int pc
)
1675 *current_rb
= global_trace
.trace_buffer
.buffer
;
1676 return trace_buffer_lock_reserve(*current_rb
,
1677 type
, len
, flags
, pc
);
1679 EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve
);
1681 void trace_current_buffer_unlock_commit(struct ring_buffer
*buffer
,
1682 struct ring_buffer_event
*event
,
1683 unsigned long flags
, int pc
)
1685 __trace_buffer_unlock_commit(buffer
, event
, flags
, pc
);
1687 EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit
);
1689 void trace_buffer_unlock_commit_regs(struct ring_buffer
*buffer
,
1690 struct ring_buffer_event
*event
,
1691 unsigned long flags
, int pc
,
1692 struct pt_regs
*regs
)
1694 __buffer_unlock_commit(buffer
, event
);
1696 ftrace_trace_stack_regs(buffer
, flags
, 0, pc
, regs
);
1697 ftrace_trace_userstack(buffer
, flags
, pc
);
1699 EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs
);
1701 void trace_current_buffer_discard_commit(struct ring_buffer
*buffer
,
1702 struct ring_buffer_event
*event
)
1704 ring_buffer_discard_commit(buffer
, event
);
1706 EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit
);
1709 trace_function(struct trace_array
*tr
,
1710 unsigned long ip
, unsigned long parent_ip
, unsigned long flags
,
1713 struct ftrace_event_call
*call
= &event_function
;
1714 struct ring_buffer
*buffer
= tr
->trace_buffer
.buffer
;
1715 struct ring_buffer_event
*event
;
1716 struct ftrace_entry
*entry
;
1718 /* If we are reading the ring buffer, don't trace */
1719 if (unlikely(__this_cpu_read(ftrace_cpu_disabled
)))
1722 event
= trace_buffer_lock_reserve(buffer
, TRACE_FN
, sizeof(*entry
),
1726 entry
= ring_buffer_event_data(event
);
1728 entry
->parent_ip
= parent_ip
;
1730 if (!call_filter_check_discard(call
, entry
, buffer
, event
))
1731 __buffer_unlock_commit(buffer
, event
);
1734 #ifdef CONFIG_STACKTRACE
1736 #define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1737 struct ftrace_stack
{
1738 unsigned long calls
[FTRACE_STACK_MAX_ENTRIES
];
1741 static DEFINE_PER_CPU(struct ftrace_stack
, ftrace_stack
);
1742 static DEFINE_PER_CPU(int, ftrace_stack_reserve
);
1744 static void __ftrace_trace_stack(struct ring_buffer
*buffer
,
1745 unsigned long flags
,
1746 int skip
, int pc
, struct pt_regs
*regs
)
1748 struct ftrace_event_call
*call
= &event_kernel_stack
;
1749 struct ring_buffer_event
*event
;
1750 struct stack_entry
*entry
;
1751 struct stack_trace trace
;
1753 int size
= FTRACE_STACK_ENTRIES
;
1755 trace
.nr_entries
= 0;
1759 * Since events can happen in NMIs there's no safe way to
1760 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1761 * or NMI comes in, it will just have to use the default
1762 * FTRACE_STACK_SIZE.
1764 preempt_disable_notrace();
1766 use_stack
= __this_cpu_inc_return(ftrace_stack_reserve
);
1768 * We don't need any atomic variables, just a barrier.
1769 * If an interrupt comes in, we don't care, because it would
1770 * have exited and put the counter back to what we want.
1771 * We just need a barrier to keep gcc from moving things
1775 if (use_stack
== 1) {
1776 trace
.entries
= this_cpu_ptr(ftrace_stack
.calls
);
1777 trace
.max_entries
= FTRACE_STACK_MAX_ENTRIES
;
1780 save_stack_trace_regs(regs
, &trace
);
1782 save_stack_trace(&trace
);
1784 if (trace
.nr_entries
> size
)
1785 size
= trace
.nr_entries
;
1787 /* From now on, use_stack is a boolean */
1790 size
*= sizeof(unsigned long);
1792 event
= trace_buffer_lock_reserve(buffer
, TRACE_STACK
,
1793 sizeof(*entry
) + size
, flags
, pc
);
1796 entry
= ring_buffer_event_data(event
);
1798 memset(&entry
->caller
, 0, size
);
1801 memcpy(&entry
->caller
, trace
.entries
,
1802 trace
.nr_entries
* sizeof(unsigned long));
1804 trace
.max_entries
= FTRACE_STACK_ENTRIES
;
1805 trace
.entries
= entry
->caller
;
1807 save_stack_trace_regs(regs
, &trace
);
1809 save_stack_trace(&trace
);
1812 entry
->size
= trace
.nr_entries
;
1814 if (!call_filter_check_discard(call
, entry
, buffer
, event
))
1815 __buffer_unlock_commit(buffer
, event
);
1818 /* Again, don't let gcc optimize things here */
1820 __this_cpu_dec(ftrace_stack_reserve
);
1821 preempt_enable_notrace();
1825 void ftrace_trace_stack_regs(struct ring_buffer
*buffer
, unsigned long flags
,
1826 int skip
, int pc
, struct pt_regs
*regs
)
1828 if (!(trace_flags
& TRACE_ITER_STACKTRACE
))
1831 __ftrace_trace_stack(buffer
, flags
, skip
, pc
, regs
);
1834 void ftrace_trace_stack(struct ring_buffer
*buffer
, unsigned long flags
,
1837 if (!(trace_flags
& TRACE_ITER_STACKTRACE
))
1840 __ftrace_trace_stack(buffer
, flags
, skip
, pc
, NULL
);
1843 void __trace_stack(struct trace_array
*tr
, unsigned long flags
, int skip
,
1846 __ftrace_trace_stack(tr
->trace_buffer
.buffer
, flags
, skip
, pc
, NULL
);
1850 * trace_dump_stack - record a stack back trace in the trace buffer
1851 * @skip: Number of functions to skip (helper handlers)
1853 void trace_dump_stack(int skip
)
1855 unsigned long flags
;
1857 if (tracing_disabled
|| tracing_selftest_running
)
1860 local_save_flags(flags
);
1863 * Skip 3 more, seems to get us at the caller of
1867 __ftrace_trace_stack(global_trace
.trace_buffer
.buffer
,
1868 flags
, skip
, preempt_count(), NULL
);
1871 static DEFINE_PER_CPU(int, user_stack_count
);
1874 ftrace_trace_userstack(struct ring_buffer
*buffer
, unsigned long flags
, int pc
)
1876 struct ftrace_event_call
*call
= &event_user_stack
;
1877 struct ring_buffer_event
*event
;
1878 struct userstack_entry
*entry
;
1879 struct stack_trace trace
;
1881 if (!(trace_flags
& TRACE_ITER_USERSTACKTRACE
))
1885 * NMIs can not handle page faults, even with fix ups.
1886 * The save user stack can (and often does) fault.
1888 if (unlikely(in_nmi()))
1892 * prevent recursion, since the user stack tracing may
1893 * trigger other kernel events.
1896 if (__this_cpu_read(user_stack_count
))
1899 __this_cpu_inc(user_stack_count
);
1901 event
= trace_buffer_lock_reserve(buffer
, TRACE_USER_STACK
,
1902 sizeof(*entry
), flags
, pc
);
1904 goto out_drop_count
;
1905 entry
= ring_buffer_event_data(event
);
1907 entry
->tgid
= current
->tgid
;
1908 memset(&entry
->caller
, 0, sizeof(entry
->caller
));
1910 trace
.nr_entries
= 0;
1911 trace
.max_entries
= FTRACE_STACK_ENTRIES
;
1913 trace
.entries
= entry
->caller
;
1915 save_stack_trace_user(&trace
);
1916 if (!call_filter_check_discard(call
, entry
, buffer
, event
))
1917 __buffer_unlock_commit(buffer
, event
);
1920 __this_cpu_dec(user_stack_count
);
1926 static void __trace_userstack(struct trace_array
*tr
, unsigned long flags
)
1928 ftrace_trace_userstack(tr
, flags
, preempt_count());
1932 #endif /* CONFIG_STACKTRACE */
1934 /* created for use with alloc_percpu */
1935 struct trace_buffer_struct
{
1936 char buffer
[TRACE_BUF_SIZE
];
1939 static struct trace_buffer_struct
*trace_percpu_buffer
;
1940 static struct trace_buffer_struct
*trace_percpu_sirq_buffer
;
1941 static struct trace_buffer_struct
*trace_percpu_irq_buffer
;
1942 static struct trace_buffer_struct
*trace_percpu_nmi_buffer
;
1945 * The buffer used is dependent on the context. There is a per cpu
1946 * buffer for normal context, softirq contex, hard irq context and
1947 * for NMI context. Thise allows for lockless recording.
1949 * Note, if the buffers failed to be allocated, then this returns NULL
1951 static char *get_trace_buf(void)
1953 struct trace_buffer_struct
*percpu_buffer
;
1956 * If we have allocated per cpu buffers, then we do not
1957 * need to do any locking.
1960 percpu_buffer
= trace_percpu_nmi_buffer
;
1962 percpu_buffer
= trace_percpu_irq_buffer
;
1963 else if (in_softirq())
1964 percpu_buffer
= trace_percpu_sirq_buffer
;
1966 percpu_buffer
= trace_percpu_buffer
;
1971 return this_cpu_ptr(&percpu_buffer
->buffer
[0]);
1974 static int alloc_percpu_trace_buffer(void)
1976 struct trace_buffer_struct
*buffers
;
1977 struct trace_buffer_struct
*sirq_buffers
;
1978 struct trace_buffer_struct
*irq_buffers
;
1979 struct trace_buffer_struct
*nmi_buffers
;
1981 buffers
= alloc_percpu(struct trace_buffer_struct
);
1985 sirq_buffers
= alloc_percpu(struct trace_buffer_struct
);
1989 irq_buffers
= alloc_percpu(struct trace_buffer_struct
);
1993 nmi_buffers
= alloc_percpu(struct trace_buffer_struct
);
1997 trace_percpu_buffer
= buffers
;
1998 trace_percpu_sirq_buffer
= sirq_buffers
;
1999 trace_percpu_irq_buffer
= irq_buffers
;
2000 trace_percpu_nmi_buffer
= nmi_buffers
;
2005 free_percpu(irq_buffers
);
2007 free_percpu(sirq_buffers
);
2009 free_percpu(buffers
);
2011 WARN(1, "Could not allocate percpu trace_printk buffer");
2015 static int buffers_allocated
;
2017 void trace_printk_init_buffers(void)
2019 if (buffers_allocated
)
2022 if (alloc_percpu_trace_buffer())
2025 /* trace_printk() is for debug use only. Don't use it in production. */
2027 pr_warning("\n**********************************************************\n");
2028 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2029 pr_warning("** **\n");
2030 pr_warning("** trace_printk() being used. Allocating extra memory. **\n");
2031 pr_warning("** **\n");
2032 pr_warning("** This means that this is a DEBUG kernel and it is **\n");
2033 pr_warning("** unsafe for produciton use. **\n");
2034 pr_warning("** **\n");
2035 pr_warning("** If you see this message and you are not debugging **\n");
2036 pr_warning("** the kernel, report this immediately to your vendor! **\n");
2037 pr_warning("** **\n");
2038 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2039 pr_warning("**********************************************************\n");
2041 /* Expand the buffers to set size */
2042 tracing_update_buffers();
2044 buffers_allocated
= 1;
2047 * trace_printk_init_buffers() can be called by modules.
2048 * If that happens, then we need to start cmdline recording
2049 * directly here. If the global_trace.buffer is already
2050 * allocated here, then this was called by module code.
2052 if (global_trace
.trace_buffer
.buffer
)
2053 tracing_start_cmdline_record();
2056 void trace_printk_start_comm(void)
2058 /* Start tracing comms if trace printk is set */
2059 if (!buffers_allocated
)
2061 tracing_start_cmdline_record();
2064 static void trace_printk_start_stop_comm(int enabled
)
2066 if (!buffers_allocated
)
2070 tracing_start_cmdline_record();
2072 tracing_stop_cmdline_record();
2076 * trace_vbprintk - write binary msg to tracing buffer
2079 int trace_vbprintk(unsigned long ip
, const char *fmt
, va_list args
)
2081 struct ftrace_event_call
*call
= &event_bprint
;
2082 struct ring_buffer_event
*event
;
2083 struct ring_buffer
*buffer
;
2084 struct trace_array
*tr
= &global_trace
;
2085 struct bprint_entry
*entry
;
2086 unsigned long flags
;
2088 int len
= 0, size
, pc
;
2090 if (unlikely(tracing_selftest_running
|| tracing_disabled
))
2093 /* Don't pollute graph traces with trace_vprintk internals */
2094 pause_graph_tracing();
2096 pc
= preempt_count();
2097 preempt_disable_notrace();
2099 tbuffer
= get_trace_buf();
2105 len
= vbin_printf((u32
*)tbuffer
, TRACE_BUF_SIZE
/sizeof(int), fmt
, args
);
2107 if (len
> TRACE_BUF_SIZE
/sizeof(int) || len
< 0)
2110 local_save_flags(flags
);
2111 size
= sizeof(*entry
) + sizeof(u32
) * len
;
2112 buffer
= tr
->trace_buffer
.buffer
;
2113 event
= trace_buffer_lock_reserve(buffer
, TRACE_BPRINT
, size
,
2117 entry
= ring_buffer_event_data(event
);
2121 memcpy(entry
->buf
, tbuffer
, sizeof(u32
) * len
);
2122 if (!call_filter_check_discard(call
, entry
, buffer
, event
)) {
2123 __buffer_unlock_commit(buffer
, event
);
2124 ftrace_trace_stack(buffer
, flags
, 6, pc
);
2128 preempt_enable_notrace();
2129 unpause_graph_tracing();
2133 EXPORT_SYMBOL_GPL(trace_vbprintk
);
2136 __trace_array_vprintk(struct ring_buffer
*buffer
,
2137 unsigned long ip
, const char *fmt
, va_list args
)
2139 struct ftrace_event_call
*call
= &event_print
;
2140 struct ring_buffer_event
*event
;
2141 int len
= 0, size
, pc
;
2142 struct print_entry
*entry
;
2143 unsigned long flags
;
2146 if (tracing_disabled
|| tracing_selftest_running
)
2149 /* Don't pollute graph traces with trace_vprintk internals */
2150 pause_graph_tracing();
2152 pc
= preempt_count();
2153 preempt_disable_notrace();
2156 tbuffer
= get_trace_buf();
2162 len
= vsnprintf(tbuffer
, TRACE_BUF_SIZE
, fmt
, args
);
2163 if (len
> TRACE_BUF_SIZE
)
2166 local_save_flags(flags
);
2167 size
= sizeof(*entry
) + len
+ 1;
2168 event
= trace_buffer_lock_reserve(buffer
, TRACE_PRINT
, size
,
2172 entry
= ring_buffer_event_data(event
);
2175 memcpy(&entry
->buf
, tbuffer
, len
);
2176 entry
->buf
[len
] = '\0';
2177 if (!call_filter_check_discard(call
, entry
, buffer
, event
)) {
2178 __buffer_unlock_commit(buffer
, event
);
2179 ftrace_trace_stack(buffer
, flags
, 6, pc
);
2182 preempt_enable_notrace();
2183 unpause_graph_tracing();
2188 int trace_array_vprintk(struct trace_array
*tr
,
2189 unsigned long ip
, const char *fmt
, va_list args
)
2191 return __trace_array_vprintk(tr
->trace_buffer
.buffer
, ip
, fmt
, args
);
2194 int trace_array_printk(struct trace_array
*tr
,
2195 unsigned long ip
, const char *fmt
, ...)
2200 if (!(trace_flags
& TRACE_ITER_PRINTK
))
2204 ret
= trace_array_vprintk(tr
, ip
, fmt
, ap
);
2209 int trace_array_printk_buf(struct ring_buffer
*buffer
,
2210 unsigned long ip
, const char *fmt
, ...)
2215 if (!(trace_flags
& TRACE_ITER_PRINTK
))
2219 ret
= __trace_array_vprintk(buffer
, ip
, fmt
, ap
);
2224 int trace_vprintk(unsigned long ip
, const char *fmt
, va_list args
)
2226 return trace_array_vprintk(&global_trace
, ip
, fmt
, args
);
2228 EXPORT_SYMBOL_GPL(trace_vprintk
);
2230 static void trace_iterator_increment(struct trace_iterator
*iter
)
2232 struct ring_buffer_iter
*buf_iter
= trace_buffer_iter(iter
, iter
->cpu
);
2236 ring_buffer_read(buf_iter
, NULL
);
2239 static struct trace_entry
*
2240 peek_next_entry(struct trace_iterator
*iter
, int cpu
, u64
*ts
,
2241 unsigned long *lost_events
)
2243 struct ring_buffer_event
*event
;
2244 struct ring_buffer_iter
*buf_iter
= trace_buffer_iter(iter
, cpu
);
2247 event
= ring_buffer_iter_peek(buf_iter
, ts
);
2249 event
= ring_buffer_peek(iter
->trace_buffer
->buffer
, cpu
, ts
,
2253 iter
->ent_size
= ring_buffer_event_length(event
);
2254 return ring_buffer_event_data(event
);
2260 static struct trace_entry
*
2261 __find_next_entry(struct trace_iterator
*iter
, int *ent_cpu
,
2262 unsigned long *missing_events
, u64
*ent_ts
)
2264 struct ring_buffer
*buffer
= iter
->trace_buffer
->buffer
;
2265 struct trace_entry
*ent
, *next
= NULL
;
2266 unsigned long lost_events
= 0, next_lost
= 0;
2267 int cpu_file
= iter
->cpu_file
;
2268 u64 next_ts
= 0, ts
;
2274 * If we are in a per_cpu trace file, don't bother by iterating over
2275 * all cpu and peek directly.
2277 if (cpu_file
> RING_BUFFER_ALL_CPUS
) {
2278 if (ring_buffer_empty_cpu(buffer
, cpu_file
))
2280 ent
= peek_next_entry(iter
, cpu_file
, ent_ts
, missing_events
);
2282 *ent_cpu
= cpu_file
;
2287 for_each_tracing_cpu(cpu
) {
2289 if (ring_buffer_empty_cpu(buffer
, cpu
))
2292 ent
= peek_next_entry(iter
, cpu
, &ts
, &lost_events
);
2295 * Pick the entry with the smallest timestamp:
2297 if (ent
&& (!next
|| ts
< next_ts
)) {
2301 next_lost
= lost_events
;
2302 next_size
= iter
->ent_size
;
2306 iter
->ent_size
= next_size
;
2309 *ent_cpu
= next_cpu
;
2315 *missing_events
= next_lost
;
2320 /* Find the next real entry, without updating the iterator itself */
2321 struct trace_entry
*trace_find_next_entry(struct trace_iterator
*iter
,
2322 int *ent_cpu
, u64
*ent_ts
)
2324 return __find_next_entry(iter
, ent_cpu
, NULL
, ent_ts
);
2327 /* Find the next real entry, and increment the iterator to the next entry */
2328 void *trace_find_next_entry_inc(struct trace_iterator
*iter
)
2330 iter
->ent
= __find_next_entry(iter
, &iter
->cpu
,
2331 &iter
->lost_events
, &iter
->ts
);
2334 trace_iterator_increment(iter
);
2336 return iter
->ent
? iter
: NULL
;
2339 static void trace_consume(struct trace_iterator
*iter
)
2341 ring_buffer_consume(iter
->trace_buffer
->buffer
, iter
->cpu
, &iter
->ts
,
2342 &iter
->lost_events
);
2345 static void *s_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
2347 struct trace_iterator
*iter
= m
->private;
2351 WARN_ON_ONCE(iter
->leftover
);
2355 /* can't go backwards */
2360 ent
= trace_find_next_entry_inc(iter
);
2364 while (ent
&& iter
->idx
< i
)
2365 ent
= trace_find_next_entry_inc(iter
);
2372 void tracing_iter_reset(struct trace_iterator
*iter
, int cpu
)
2374 struct ring_buffer_event
*event
;
2375 struct ring_buffer_iter
*buf_iter
;
2376 unsigned long entries
= 0;
2379 per_cpu_ptr(iter
->trace_buffer
->data
, cpu
)->skipped_entries
= 0;
2381 buf_iter
= trace_buffer_iter(iter
, cpu
);
2385 ring_buffer_iter_reset(buf_iter
);
2388 * We could have the case with the max latency tracers
2389 * that a reset never took place on a cpu. This is evident
2390 * by the timestamp being before the start of the buffer.
2392 while ((event
= ring_buffer_iter_peek(buf_iter
, &ts
))) {
2393 if (ts
>= iter
->trace_buffer
->time_start
)
2396 ring_buffer_read(buf_iter
, NULL
);
2399 per_cpu_ptr(iter
->trace_buffer
->data
, cpu
)->skipped_entries
= entries
;
2403 * The current tracer is copied to avoid a global locking
2406 static void *s_start(struct seq_file
*m
, loff_t
*pos
)
2408 struct trace_iterator
*iter
= m
->private;
2409 struct trace_array
*tr
= iter
->tr
;
2410 int cpu_file
= iter
->cpu_file
;
2416 * copy the tracer to avoid using a global lock all around.
2417 * iter->trace is a copy of current_trace, the pointer to the
2418 * name may be used instead of a strcmp(), as iter->trace->name
2419 * will point to the same string as current_trace->name.
2421 mutex_lock(&trace_types_lock
);
2422 if (unlikely(tr
->current_trace
&& iter
->trace
->name
!= tr
->current_trace
->name
))
2423 *iter
->trace
= *tr
->current_trace
;
2424 mutex_unlock(&trace_types_lock
);
2426 #ifdef CONFIG_TRACER_MAX_TRACE
2427 if (iter
->snapshot
&& iter
->trace
->use_max_tr
)
2428 return ERR_PTR(-EBUSY
);
2431 if (!iter
->snapshot
)
2432 atomic_inc(&trace_record_cmdline_disabled
);
2434 if (*pos
!= iter
->pos
) {
2439 if (cpu_file
== RING_BUFFER_ALL_CPUS
) {
2440 for_each_tracing_cpu(cpu
)
2441 tracing_iter_reset(iter
, cpu
);
2443 tracing_iter_reset(iter
, cpu_file
);
2446 for (p
= iter
; p
&& l
< *pos
; p
= s_next(m
, p
, &l
))
2451 * If we overflowed the seq_file before, then we want
2452 * to just reuse the trace_seq buffer again.
2458 p
= s_next(m
, p
, &l
);
2462 trace_event_read_lock();
2463 trace_access_lock(cpu_file
);
2467 static void s_stop(struct seq_file
*m
, void *p
)
2469 struct trace_iterator
*iter
= m
->private;
2471 #ifdef CONFIG_TRACER_MAX_TRACE
2472 if (iter
->snapshot
&& iter
->trace
->use_max_tr
)
2476 if (!iter
->snapshot
)
2477 atomic_dec(&trace_record_cmdline_disabled
);
2479 trace_access_unlock(iter
->cpu_file
);
2480 trace_event_read_unlock();
2484 get_total_entries(struct trace_buffer
*buf
,
2485 unsigned long *total
, unsigned long *entries
)
2487 unsigned long count
;
2493 for_each_tracing_cpu(cpu
) {
2494 count
= ring_buffer_entries_cpu(buf
->buffer
, cpu
);
2496 * If this buffer has skipped entries, then we hold all
2497 * entries for the trace and we need to ignore the
2498 * ones before the time stamp.
2500 if (per_cpu_ptr(buf
->data
, cpu
)->skipped_entries
) {
2501 count
-= per_cpu_ptr(buf
->data
, cpu
)->skipped_entries
;
2502 /* total is the same as the entries */
2506 ring_buffer_overrun_cpu(buf
->buffer
, cpu
);
2511 static void print_lat_help_header(struct seq_file
*m
)
2513 seq_puts(m
, "# _------=> CPU# \n"
2514 "# / _-----=> irqs-off \n"
2515 "# | / _----=> need-resched \n"
2516 "# || / _---=> hardirq/softirq \n"
2517 "# ||| / _--=> preempt-depth \n"
2519 "# cmd pid ||||| time | caller \n"
2520 "# \\ / ||||| \\ | / \n");
2523 static void print_event_info(struct trace_buffer
*buf
, struct seq_file
*m
)
2525 unsigned long total
;
2526 unsigned long entries
;
2528 get_total_entries(buf
, &total
, &entries
);
2529 seq_printf(m
, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
2530 entries
, total
, num_online_cpus());
2534 static void print_func_help_header(struct trace_buffer
*buf
, struct seq_file
*m
)
2536 print_event_info(buf
, m
);
2537 seq_puts(m
, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"
2541 static void print_func_help_header_irq(struct trace_buffer
*buf
, struct seq_file
*m
)
2543 print_event_info(buf
, m
);
2544 seq_puts(m
, "# _-----=> irqs-off\n"
2545 "# / _----=> need-resched\n"
2546 "# | / _---=> hardirq/softirq\n"
2547 "# || / _--=> preempt-depth\n"
2549 "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n"
2550 "# | | | |||| | |\n");
2554 print_trace_header(struct seq_file
*m
, struct trace_iterator
*iter
)
2556 unsigned long sym_flags
= (trace_flags
& TRACE_ITER_SYM_MASK
);
2557 struct trace_buffer
*buf
= iter
->trace_buffer
;
2558 struct trace_array_cpu
*data
= per_cpu_ptr(buf
->data
, buf
->cpu
);
2559 struct tracer
*type
= iter
->trace
;
2560 unsigned long entries
;
2561 unsigned long total
;
2562 const char *name
= "preemption";
2566 get_total_entries(buf
, &total
, &entries
);
2568 seq_printf(m
, "# %s latency trace v1.1.5 on %s\n",
2570 seq_puts(m
, "# -----------------------------------"
2571 "---------------------------------\n");
2572 seq_printf(m
, "# latency: %lu us, #%lu/%lu, CPU#%d |"
2573 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
2574 nsecs_to_usecs(data
->saved_latency
),
2578 #if defined(CONFIG_PREEMPT_NONE)
2580 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
2582 #elif defined(CONFIG_PREEMPT)
2587 /* These are reserved for later use */
2590 seq_printf(m
, " #P:%d)\n", num_online_cpus());
2594 seq_puts(m
, "# -----------------\n");
2595 seq_printf(m
, "# | task: %.16s-%d "
2596 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
2597 data
->comm
, data
->pid
,
2598 from_kuid_munged(seq_user_ns(m
), data
->uid
), data
->nice
,
2599 data
->policy
, data
->rt_priority
);
2600 seq_puts(m
, "# -----------------\n");
2602 if (data
->critical_start
) {
2603 seq_puts(m
, "# => started at: ");
2604 seq_print_ip_sym(&iter
->seq
, data
->critical_start
, sym_flags
);
2605 trace_print_seq(m
, &iter
->seq
);
2606 seq_puts(m
, "\n# => ended at: ");
2607 seq_print_ip_sym(&iter
->seq
, data
->critical_end
, sym_flags
);
2608 trace_print_seq(m
, &iter
->seq
);
2609 seq_puts(m
, "\n#\n");
2615 static void test_cpu_buff_start(struct trace_iterator
*iter
)
2617 struct trace_seq
*s
= &iter
->seq
;
2619 if (!(trace_flags
& TRACE_ITER_ANNOTATE
))
2622 if (!(iter
->iter_flags
& TRACE_FILE_ANNOTATE
))
2625 if (cpumask_test_cpu(iter
->cpu
, iter
->started
))
2628 if (per_cpu_ptr(iter
->trace_buffer
->data
, iter
->cpu
)->skipped_entries
)
2631 cpumask_set_cpu(iter
->cpu
, iter
->started
);
2633 /* Don't print started cpu buffer for the first entry of the trace */
2635 trace_seq_printf(s
, "##### CPU %u buffer started ####\n",
2639 static enum print_line_t
print_trace_fmt(struct trace_iterator
*iter
)
2641 struct trace_seq
*s
= &iter
->seq
;
2642 unsigned long sym_flags
= (trace_flags
& TRACE_ITER_SYM_MASK
);
2643 struct trace_entry
*entry
;
2644 struct trace_event
*event
;
2648 test_cpu_buff_start(iter
);
2650 event
= ftrace_find_event(entry
->type
);
2652 if (trace_flags
& TRACE_ITER_CONTEXT_INFO
) {
2653 if (iter
->iter_flags
& TRACE_FILE_LAT_FMT
)
2654 trace_print_lat_context(iter
);
2656 trace_print_context(iter
);
2659 if (trace_seq_has_overflowed(s
))
2660 return TRACE_TYPE_PARTIAL_LINE
;
2663 return event
->funcs
->trace(iter
, sym_flags
, event
);
2665 trace_seq_printf(s
, "Unknown type %d\n", entry
->type
);
2667 return trace_handle_return(s
);
2670 static enum print_line_t
print_raw_fmt(struct trace_iterator
*iter
)
2672 struct trace_seq
*s
= &iter
->seq
;
2673 struct trace_entry
*entry
;
2674 struct trace_event
*event
;
2678 if (trace_flags
& TRACE_ITER_CONTEXT_INFO
)
2679 trace_seq_printf(s
, "%d %d %llu ",
2680 entry
->pid
, iter
->cpu
, iter
->ts
);
2682 if (trace_seq_has_overflowed(s
))
2683 return TRACE_TYPE_PARTIAL_LINE
;
2685 event
= ftrace_find_event(entry
->type
);
2687 return event
->funcs
->raw(iter
, 0, event
);
2689 trace_seq_printf(s
, "%d ?\n", entry
->type
);
2691 return trace_handle_return(s
);
2694 static enum print_line_t
print_hex_fmt(struct trace_iterator
*iter
)
2696 struct trace_seq
*s
= &iter
->seq
;
2697 unsigned char newline
= '\n';
2698 struct trace_entry
*entry
;
2699 struct trace_event
*event
;
2703 if (trace_flags
& TRACE_ITER_CONTEXT_INFO
) {
2704 SEQ_PUT_HEX_FIELD(s
, entry
->pid
);
2705 SEQ_PUT_HEX_FIELD(s
, iter
->cpu
);
2706 SEQ_PUT_HEX_FIELD(s
, iter
->ts
);
2707 if (trace_seq_has_overflowed(s
))
2708 return TRACE_TYPE_PARTIAL_LINE
;
2711 event
= ftrace_find_event(entry
->type
);
2713 enum print_line_t ret
= event
->funcs
->hex(iter
, 0, event
);
2714 if (ret
!= TRACE_TYPE_HANDLED
)
2718 SEQ_PUT_FIELD(s
, newline
);
2720 return trace_handle_return(s
);
2723 static enum print_line_t
print_bin_fmt(struct trace_iterator
*iter
)
2725 struct trace_seq
*s
= &iter
->seq
;
2726 struct trace_entry
*entry
;
2727 struct trace_event
*event
;
2731 if (trace_flags
& TRACE_ITER_CONTEXT_INFO
) {
2732 SEQ_PUT_FIELD(s
, entry
->pid
);
2733 SEQ_PUT_FIELD(s
, iter
->cpu
);
2734 SEQ_PUT_FIELD(s
, iter
->ts
);
2735 if (trace_seq_has_overflowed(s
))
2736 return TRACE_TYPE_PARTIAL_LINE
;
2739 event
= ftrace_find_event(entry
->type
);
2740 return event
? event
->funcs
->binary(iter
, 0, event
) :
2744 int trace_empty(struct trace_iterator
*iter
)
2746 struct ring_buffer_iter
*buf_iter
;
2749 /* If we are looking at one CPU buffer, only check that one */
2750 if (iter
->cpu_file
!= RING_BUFFER_ALL_CPUS
) {
2751 cpu
= iter
->cpu_file
;
2752 buf_iter
= trace_buffer_iter(iter
, cpu
);
2754 if (!ring_buffer_iter_empty(buf_iter
))
2757 if (!ring_buffer_empty_cpu(iter
->trace_buffer
->buffer
, cpu
))
2763 for_each_tracing_cpu(cpu
) {
2764 buf_iter
= trace_buffer_iter(iter
, cpu
);
2766 if (!ring_buffer_iter_empty(buf_iter
))
2769 if (!ring_buffer_empty_cpu(iter
->trace_buffer
->buffer
, cpu
))
2777 /* Called with trace_event_read_lock() held. */
2778 enum print_line_t
print_trace_line(struct trace_iterator
*iter
)
2780 enum print_line_t ret
;
2782 if (iter
->lost_events
) {
2783 trace_seq_printf(&iter
->seq
, "CPU:%d [LOST %lu EVENTS]\n",
2784 iter
->cpu
, iter
->lost_events
);
2785 if (trace_seq_has_overflowed(&iter
->seq
))
2786 return TRACE_TYPE_PARTIAL_LINE
;
2789 if (iter
->trace
&& iter
->trace
->print_line
) {
2790 ret
= iter
->trace
->print_line(iter
);
2791 if (ret
!= TRACE_TYPE_UNHANDLED
)
2795 if (iter
->ent
->type
== TRACE_BPUTS
&&
2796 trace_flags
& TRACE_ITER_PRINTK
&&
2797 trace_flags
& TRACE_ITER_PRINTK_MSGONLY
)
2798 return trace_print_bputs_msg_only(iter
);
2800 if (iter
->ent
->type
== TRACE_BPRINT
&&
2801 trace_flags
& TRACE_ITER_PRINTK
&&
2802 trace_flags
& TRACE_ITER_PRINTK_MSGONLY
)
2803 return trace_print_bprintk_msg_only(iter
);
2805 if (iter
->ent
->type
== TRACE_PRINT
&&
2806 trace_flags
& TRACE_ITER_PRINTK
&&
2807 trace_flags
& TRACE_ITER_PRINTK_MSGONLY
)
2808 return trace_print_printk_msg_only(iter
);
2810 if (trace_flags
& TRACE_ITER_BIN
)
2811 return print_bin_fmt(iter
);
2813 if (trace_flags
& TRACE_ITER_HEX
)
2814 return print_hex_fmt(iter
);
2816 if (trace_flags
& TRACE_ITER_RAW
)
2817 return print_raw_fmt(iter
);
2819 return print_trace_fmt(iter
);
2822 void trace_latency_header(struct seq_file
*m
)
2824 struct trace_iterator
*iter
= m
->private;
2826 /* print nothing if the buffers are empty */
2827 if (trace_empty(iter
))
2830 if (iter
->iter_flags
& TRACE_FILE_LAT_FMT
)
2831 print_trace_header(m
, iter
);
2833 if (!(trace_flags
& TRACE_ITER_VERBOSE
))
2834 print_lat_help_header(m
);
2837 void trace_default_header(struct seq_file
*m
)
2839 struct trace_iterator
*iter
= m
->private;
2841 if (!(trace_flags
& TRACE_ITER_CONTEXT_INFO
))
2844 if (iter
->iter_flags
& TRACE_FILE_LAT_FMT
) {
2845 /* print nothing if the buffers are empty */
2846 if (trace_empty(iter
))
2848 print_trace_header(m
, iter
);
2849 if (!(trace_flags
& TRACE_ITER_VERBOSE
))
2850 print_lat_help_header(m
);
2852 if (!(trace_flags
& TRACE_ITER_VERBOSE
)) {
2853 if (trace_flags
& TRACE_ITER_IRQ_INFO
)
2854 print_func_help_header_irq(iter
->trace_buffer
, m
);
2856 print_func_help_header(iter
->trace_buffer
, m
);
2861 static void test_ftrace_alive(struct seq_file
*m
)
2863 if (!ftrace_is_dead())
2865 seq_puts(m
, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
2866 "# MAY BE MISSING FUNCTION EVENTS\n");
2869 #ifdef CONFIG_TRACER_MAX_TRACE
2870 static void show_snapshot_main_help(struct seq_file
*m
)
2872 seq_puts(m
, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
2873 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2874 "# Takes a snapshot of the main buffer.\n"
2875 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
2876 "# (Doesn't have to be '2' works with any number that\n"
2877 "# is not a '0' or '1')\n");
2880 static void show_snapshot_percpu_help(struct seq_file
*m
)
2882 seq_puts(m
, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
2883 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2884 seq_puts(m
, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2885 "# Takes a snapshot of the main buffer for this cpu.\n");
2887 seq_puts(m
, "# echo 1 > snapshot : Not supported with this kernel.\n"
2888 "# Must use main snapshot file to allocate.\n");
2890 seq_puts(m
, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
2891 "# (Doesn't have to be '2' works with any number that\n"
2892 "# is not a '0' or '1')\n");
2895 static void print_snapshot_help(struct seq_file
*m
, struct trace_iterator
*iter
)
2897 if (iter
->tr
->allocated_snapshot
)
2898 seq_puts(m
, "#\n# * Snapshot is allocated *\n#\n");
2900 seq_puts(m
, "#\n# * Snapshot is freed *\n#\n");
2902 seq_puts(m
, "# Snapshot commands:\n");
2903 if (iter
->cpu_file
== RING_BUFFER_ALL_CPUS
)
2904 show_snapshot_main_help(m
);
2906 show_snapshot_percpu_help(m
);
2909 /* Should never be called */
2910 static inline void print_snapshot_help(struct seq_file
*m
, struct trace_iterator
*iter
) { }
2913 static int s_show(struct seq_file
*m
, void *v
)
2915 struct trace_iterator
*iter
= v
;
2918 if (iter
->ent
== NULL
) {
2920 seq_printf(m
, "# tracer: %s\n", iter
->trace
->name
);
2922 test_ftrace_alive(m
);
2924 if (iter
->snapshot
&& trace_empty(iter
))
2925 print_snapshot_help(m
, iter
);
2926 else if (iter
->trace
&& iter
->trace
->print_header
)
2927 iter
->trace
->print_header(m
);
2929 trace_default_header(m
);
2931 } else if (iter
->leftover
) {
2933 * If we filled the seq_file buffer earlier, we
2934 * want to just show it now.
2936 ret
= trace_print_seq(m
, &iter
->seq
);
2938 /* ret should this time be zero, but you never know */
2939 iter
->leftover
= ret
;
2942 print_trace_line(iter
);
2943 ret
= trace_print_seq(m
, &iter
->seq
);
2945 * If we overflow the seq_file buffer, then it will
2946 * ask us for this data again at start up.
2948 * ret is 0 if seq_file write succeeded.
2951 iter
->leftover
= ret
;
2958 * Should be used after trace_array_get(), trace_types_lock
2959 * ensures that i_cdev was already initialized.
2961 static inline int tracing_get_cpu(struct inode
*inode
)
2963 if (inode
->i_cdev
) /* See trace_create_cpu_file() */
2964 return (long)inode
->i_cdev
- 1;
2965 return RING_BUFFER_ALL_CPUS
;
2968 static const struct seq_operations tracer_seq_ops
= {
2975 static struct trace_iterator
*
2976 __tracing_open(struct inode
*inode
, struct file
*file
, bool snapshot
)
2978 struct trace_array
*tr
= inode
->i_private
;
2979 struct trace_iterator
*iter
;
2982 if (tracing_disabled
)
2983 return ERR_PTR(-ENODEV
);
2985 iter
= __seq_open_private(file
, &tracer_seq_ops
, sizeof(*iter
));
2987 return ERR_PTR(-ENOMEM
);
2989 iter
->buffer_iter
= kzalloc(sizeof(*iter
->buffer_iter
) * num_possible_cpus(),
2991 if (!iter
->buffer_iter
)
2995 * We make a copy of the current tracer to avoid concurrent
2996 * changes on it while we are reading.
2998 mutex_lock(&trace_types_lock
);
2999 iter
->trace
= kzalloc(sizeof(*iter
->trace
), GFP_KERNEL
);
3003 *iter
->trace
= *tr
->current_trace
;
3005 if (!zalloc_cpumask_var(&iter
->started
, GFP_KERNEL
))
3010 #ifdef CONFIG_TRACER_MAX_TRACE
3011 /* Currently only the top directory has a snapshot */
3012 if (tr
->current_trace
->print_max
|| snapshot
)
3013 iter
->trace_buffer
= &tr
->max_buffer
;
3016 iter
->trace_buffer
= &tr
->trace_buffer
;
3017 iter
->snapshot
= snapshot
;
3019 iter
->cpu_file
= tracing_get_cpu(inode
);
3020 mutex_init(&iter
->mutex
);
3022 /* Notify the tracer early; before we stop tracing. */
3023 if (iter
->trace
&& iter
->trace
->open
)
3024 iter
->trace
->open(iter
);
3026 /* Annotate start of buffers if we had overruns */
3027 if (ring_buffer_overruns(iter
->trace_buffer
->buffer
))
3028 iter
->iter_flags
|= TRACE_FILE_ANNOTATE
;
3030 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
3031 if (trace_clocks
[tr
->clock_id
].in_ns
)
3032 iter
->iter_flags
|= TRACE_FILE_TIME_IN_NS
;
3034 /* stop the trace while dumping if we are not opening "snapshot" */
3035 if (!iter
->snapshot
)
3036 tracing_stop_tr(tr
);
3038 if (iter
->cpu_file
== RING_BUFFER_ALL_CPUS
) {
3039 for_each_tracing_cpu(cpu
) {
3040 iter
->buffer_iter
[cpu
] =
3041 ring_buffer_read_prepare(iter
->trace_buffer
->buffer
, cpu
);
3043 ring_buffer_read_prepare_sync();
3044 for_each_tracing_cpu(cpu
) {
3045 ring_buffer_read_start(iter
->buffer_iter
[cpu
]);
3046 tracing_iter_reset(iter
, cpu
);
3049 cpu
= iter
->cpu_file
;
3050 iter
->buffer_iter
[cpu
] =
3051 ring_buffer_read_prepare(iter
->trace_buffer
->buffer
, cpu
);
3052 ring_buffer_read_prepare_sync();
3053 ring_buffer_read_start(iter
->buffer_iter
[cpu
]);
3054 tracing_iter_reset(iter
, cpu
);
3057 mutex_unlock(&trace_types_lock
);
3062 mutex_unlock(&trace_types_lock
);
3064 kfree(iter
->buffer_iter
);
3066 seq_release_private(inode
, file
);
3067 return ERR_PTR(-ENOMEM
);
3070 int tracing_open_generic(struct inode
*inode
, struct file
*filp
)
3072 if (tracing_disabled
)
3075 filp
->private_data
= inode
->i_private
;
3079 bool tracing_is_disabled(void)
3081 return (tracing_disabled
) ? true: false;
3085 * Open and update trace_array ref count.
3086 * Must have the current trace_array passed to it.
3088 static int tracing_open_generic_tr(struct inode
*inode
, struct file
*filp
)
3090 struct trace_array
*tr
= inode
->i_private
;
3092 if (tracing_disabled
)
3095 if (trace_array_get(tr
) < 0)
3098 filp
->private_data
= inode
->i_private
;
3103 static int tracing_release(struct inode
*inode
, struct file
*file
)
3105 struct trace_array
*tr
= inode
->i_private
;
3106 struct seq_file
*m
= file
->private_data
;
3107 struct trace_iterator
*iter
;
3110 if (!(file
->f_mode
& FMODE_READ
)) {
3111 trace_array_put(tr
);
3115 /* Writes do not use seq_file */
3117 mutex_lock(&trace_types_lock
);
3119 for_each_tracing_cpu(cpu
) {
3120 if (iter
->buffer_iter
[cpu
])
3121 ring_buffer_read_finish(iter
->buffer_iter
[cpu
]);
3124 if (iter
->trace
&& iter
->trace
->close
)
3125 iter
->trace
->close(iter
);
3127 if (!iter
->snapshot
)
3128 /* reenable tracing if it was previously enabled */
3129 tracing_start_tr(tr
);
3131 __trace_array_put(tr
);
3133 mutex_unlock(&trace_types_lock
);
3135 mutex_destroy(&iter
->mutex
);
3136 free_cpumask_var(iter
->started
);
3138 kfree(iter
->buffer_iter
);
3139 seq_release_private(inode
, file
);
3144 static int tracing_release_generic_tr(struct inode
*inode
, struct file
*file
)
3146 struct trace_array
*tr
= inode
->i_private
;
3148 trace_array_put(tr
);
3152 static int tracing_single_release_tr(struct inode
*inode
, struct file
*file
)
3154 struct trace_array
*tr
= inode
->i_private
;
3156 trace_array_put(tr
);
3158 return single_release(inode
, file
);
3161 static int tracing_open(struct inode
*inode
, struct file
*file
)
3163 struct trace_array
*tr
= inode
->i_private
;
3164 struct trace_iterator
*iter
;
3167 if (trace_array_get(tr
) < 0)
3170 /* If this file was open for write, then erase contents */
3171 if ((file
->f_mode
& FMODE_WRITE
) && (file
->f_flags
& O_TRUNC
)) {
3172 int cpu
= tracing_get_cpu(inode
);
3174 if (cpu
== RING_BUFFER_ALL_CPUS
)
3175 tracing_reset_online_cpus(&tr
->trace_buffer
);
3177 tracing_reset(&tr
->trace_buffer
, cpu
);
3180 if (file
->f_mode
& FMODE_READ
) {
3181 iter
= __tracing_open(inode
, file
, false);
3183 ret
= PTR_ERR(iter
);
3184 else if (trace_flags
& TRACE_ITER_LATENCY_FMT
)
3185 iter
->iter_flags
|= TRACE_FILE_LAT_FMT
;
3189 trace_array_put(tr
);
3195 * Some tracers are not suitable for instance buffers.
3196 * A tracer is always available for the global array (toplevel)
3197 * or if it explicitly states that it is.
3200 trace_ok_for_array(struct tracer
*t
, struct trace_array
*tr
)
3202 return (tr
->flags
& TRACE_ARRAY_FL_GLOBAL
) || t
->allow_instances
;
3205 /* Find the next tracer that this trace array may use */
3206 static struct tracer
*
3207 get_tracer_for_array(struct trace_array
*tr
, struct tracer
*t
)
3209 while (t
&& !trace_ok_for_array(t
, tr
))
3216 t_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
3218 struct trace_array
*tr
= m
->private;
3219 struct tracer
*t
= v
;
3224 t
= get_tracer_for_array(tr
, t
->next
);
3229 static void *t_start(struct seq_file
*m
, loff_t
*pos
)
3231 struct trace_array
*tr
= m
->private;
3235 mutex_lock(&trace_types_lock
);
3237 t
= get_tracer_for_array(tr
, trace_types
);
3238 for (; t
&& l
< *pos
; t
= t_next(m
, t
, &l
))
3244 static void t_stop(struct seq_file
*m
, void *p
)
3246 mutex_unlock(&trace_types_lock
);
3249 static int t_show(struct seq_file
*m
, void *v
)
3251 struct tracer
*t
= v
;
3256 seq_puts(m
, t
->name
);
3265 static const struct seq_operations show_traces_seq_ops
= {
3272 static int show_traces_open(struct inode
*inode
, struct file
*file
)
3274 struct trace_array
*tr
= inode
->i_private
;
3278 if (tracing_disabled
)
3281 ret
= seq_open(file
, &show_traces_seq_ops
);
3285 m
= file
->private_data
;
3292 tracing_write_stub(struct file
*filp
, const char __user
*ubuf
,
3293 size_t count
, loff_t
*ppos
)
3298 loff_t
tracing_lseek(struct file
*file
, loff_t offset
, int whence
)
3302 if (file
->f_mode
& FMODE_READ
)
3303 ret
= seq_lseek(file
, offset
, whence
);
3305 file
->f_pos
= ret
= 0;
3310 static const struct file_operations tracing_fops
= {
3311 .open
= tracing_open
,
3313 .write
= tracing_write_stub
,
3314 .llseek
= tracing_lseek
,
3315 .release
= tracing_release
,
3318 static const struct file_operations show_traces_fops
= {
3319 .open
= show_traces_open
,
3321 .release
= seq_release
,
3322 .llseek
= seq_lseek
,
3326 * The tracer itself will not take this lock, but still we want
3327 * to provide a consistent cpumask to user-space:
3329 static DEFINE_MUTEX(tracing_cpumask_update_lock
);
3332 * Temporary storage for the character representation of the
3333 * CPU bitmask (and one more byte for the newline):
3335 static char mask_str
[NR_CPUS
+ 1];
3338 tracing_cpumask_read(struct file
*filp
, char __user
*ubuf
,
3339 size_t count
, loff_t
*ppos
)
3341 struct trace_array
*tr
= file_inode(filp
)->i_private
;
3344 mutex_lock(&tracing_cpumask_update_lock
);
3346 len
= cpumask_scnprintf(mask_str
, count
, tr
->tracing_cpumask
);
3347 if (count
- len
< 2) {
3351 len
+= sprintf(mask_str
+ len
, "\n");
3352 count
= simple_read_from_buffer(ubuf
, count
, ppos
, mask_str
, NR_CPUS
+1);
3355 mutex_unlock(&tracing_cpumask_update_lock
);
3361 tracing_cpumask_write(struct file
*filp
, const char __user
*ubuf
,
3362 size_t count
, loff_t
*ppos
)
3364 struct trace_array
*tr
= file_inode(filp
)->i_private
;
3365 cpumask_var_t tracing_cpumask_new
;
3368 if (!alloc_cpumask_var(&tracing_cpumask_new
, GFP_KERNEL
))
3371 err
= cpumask_parse_user(ubuf
, count
, tracing_cpumask_new
);
3375 mutex_lock(&tracing_cpumask_update_lock
);
3377 local_irq_disable();
3378 arch_spin_lock(&tr
->max_lock
);
3379 for_each_tracing_cpu(cpu
) {
3381 * Increase/decrease the disabled counter if we are
3382 * about to flip a bit in the cpumask:
3384 if (cpumask_test_cpu(cpu
, tr
->tracing_cpumask
) &&
3385 !cpumask_test_cpu(cpu
, tracing_cpumask_new
)) {
3386 atomic_inc(&per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->disabled
);
3387 ring_buffer_record_disable_cpu(tr
->trace_buffer
.buffer
, cpu
);
3389 if (!cpumask_test_cpu(cpu
, tr
->tracing_cpumask
) &&
3390 cpumask_test_cpu(cpu
, tracing_cpumask_new
)) {
3391 atomic_dec(&per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->disabled
);
3392 ring_buffer_record_enable_cpu(tr
->trace_buffer
.buffer
, cpu
);
3395 arch_spin_unlock(&tr
->max_lock
);
3398 cpumask_copy(tr
->tracing_cpumask
, tracing_cpumask_new
);
3400 mutex_unlock(&tracing_cpumask_update_lock
);
3401 free_cpumask_var(tracing_cpumask_new
);
3406 free_cpumask_var(tracing_cpumask_new
);
3411 static const struct file_operations tracing_cpumask_fops
= {
3412 .open
= tracing_open_generic_tr
,
3413 .read
= tracing_cpumask_read
,
3414 .write
= tracing_cpumask_write
,
3415 .release
= tracing_release_generic_tr
,
3416 .llseek
= generic_file_llseek
,
3419 static int tracing_trace_options_show(struct seq_file
*m
, void *v
)
3421 struct tracer_opt
*trace_opts
;
3422 struct trace_array
*tr
= m
->private;
3426 mutex_lock(&trace_types_lock
);
3427 tracer_flags
= tr
->current_trace
->flags
->val
;
3428 trace_opts
= tr
->current_trace
->flags
->opts
;
3430 for (i
= 0; trace_options
[i
]; i
++) {
3431 if (trace_flags
& (1 << i
))
3432 seq_printf(m
, "%s\n", trace_options
[i
]);
3434 seq_printf(m
, "no%s\n", trace_options
[i
]);
3437 for (i
= 0; trace_opts
[i
].name
; i
++) {
3438 if (tracer_flags
& trace_opts
[i
].bit
)
3439 seq_printf(m
, "%s\n", trace_opts
[i
].name
);
3441 seq_printf(m
, "no%s\n", trace_opts
[i
].name
);
3443 mutex_unlock(&trace_types_lock
);
3448 static int __set_tracer_option(struct trace_array
*tr
,
3449 struct tracer_flags
*tracer_flags
,
3450 struct tracer_opt
*opts
, int neg
)
3452 struct tracer
*trace
= tr
->current_trace
;
3455 ret
= trace
->set_flag(tr
, tracer_flags
->val
, opts
->bit
, !neg
);
3460 tracer_flags
->val
&= ~opts
->bit
;
3462 tracer_flags
->val
|= opts
->bit
;
3466 /* Try to assign a tracer specific option */
3467 static int set_tracer_option(struct trace_array
*tr
, char *cmp
, int neg
)
3469 struct tracer
*trace
= tr
->current_trace
;
3470 struct tracer_flags
*tracer_flags
= trace
->flags
;
3471 struct tracer_opt
*opts
= NULL
;
3474 for (i
= 0; tracer_flags
->opts
[i
].name
; i
++) {
3475 opts
= &tracer_flags
->opts
[i
];
3477 if (strcmp(cmp
, opts
->name
) == 0)
3478 return __set_tracer_option(tr
, trace
->flags
, opts
, neg
);
3484 /* Some tracers require overwrite to stay enabled */
3485 int trace_keep_overwrite(struct tracer
*tracer
, u32 mask
, int set
)
3487 if (tracer
->enabled
&& (mask
& TRACE_ITER_OVERWRITE
) && !set
)
3493 int set_tracer_flag(struct trace_array
*tr
, unsigned int mask
, int enabled
)
3495 /* do nothing if flag is already set */
3496 if (!!(trace_flags
& mask
) == !!enabled
)
3499 /* Give the tracer a chance to approve the change */
3500 if (tr
->current_trace
->flag_changed
)
3501 if (tr
->current_trace
->flag_changed(tr
, mask
, !!enabled
))
3505 trace_flags
|= mask
;
3507 trace_flags
&= ~mask
;
3509 if (mask
== TRACE_ITER_RECORD_CMD
)
3510 trace_event_enable_cmd_record(enabled
);
3512 if (mask
== TRACE_ITER_OVERWRITE
) {
3513 ring_buffer_change_overwrite(tr
->trace_buffer
.buffer
, enabled
);
3514 #ifdef CONFIG_TRACER_MAX_TRACE
3515 ring_buffer_change_overwrite(tr
->max_buffer
.buffer
, enabled
);
3519 if (mask
== TRACE_ITER_PRINTK
)
3520 trace_printk_start_stop_comm(enabled
);
3525 static int trace_set_options(struct trace_array
*tr
, char *option
)
3532 cmp
= strstrip(option
);
3534 if (strncmp(cmp
, "no", 2) == 0) {
3539 mutex_lock(&trace_types_lock
);
3541 for (i
= 0; trace_options
[i
]; i
++) {
3542 if (strcmp(cmp
, trace_options
[i
]) == 0) {
3543 ret
= set_tracer_flag(tr
, 1 << i
, !neg
);
3548 /* If no option could be set, test the specific tracer options */
3549 if (!trace_options
[i
])
3550 ret
= set_tracer_option(tr
, cmp
, neg
);
3552 mutex_unlock(&trace_types_lock
);
3558 tracing_trace_options_write(struct file
*filp
, const char __user
*ubuf
,
3559 size_t cnt
, loff_t
*ppos
)
3561 struct seq_file
*m
= filp
->private_data
;
3562 struct trace_array
*tr
= m
->private;
3566 if (cnt
>= sizeof(buf
))
3569 if (copy_from_user(&buf
, ubuf
, cnt
))
3574 ret
= trace_set_options(tr
, buf
);
3583 static int tracing_trace_options_open(struct inode
*inode
, struct file
*file
)
3585 struct trace_array
*tr
= inode
->i_private
;
3588 if (tracing_disabled
)
3591 if (trace_array_get(tr
) < 0)
3594 ret
= single_open(file
, tracing_trace_options_show
, inode
->i_private
);
3596 trace_array_put(tr
);
3601 static const struct file_operations tracing_iter_fops
= {
3602 .open
= tracing_trace_options_open
,
3604 .llseek
= seq_lseek
,
3605 .release
= tracing_single_release_tr
,
3606 .write
= tracing_trace_options_write
,
3609 static const char readme_msg
[] =
3610 "tracing mini-HOWTO:\n\n"
3611 "# echo 0 > tracing_on : quick way to disable tracing\n"
3612 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
3613 " Important files:\n"
3614 " trace\t\t\t- The static contents of the buffer\n"
3615 "\t\t\t To clear the buffer write into this file: echo > trace\n"
3616 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
3617 " current_tracer\t- function and latency tracers\n"
3618 " available_tracers\t- list of configured tracers for current_tracer\n"
3619 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
3620 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
3621 " trace_clock\t\t-change the clock used to order events\n"
3622 " local: Per cpu clock but may not be synced across CPUs\n"
3623 " global: Synced across CPUs but slows tracing down.\n"
3624 " counter: Not a clock, but just an increment\n"
3625 " uptime: Jiffy counter from time of boot\n"
3626 " perf: Same clock that perf events use\n"
3627 #ifdef CONFIG_X86_64
3628 " x86-tsc: TSC cycle counter\n"
3630 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
3631 " tracing_cpumask\t- Limit which CPUs to trace\n"
3632 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
3633 "\t\t\t Remove sub-buffer with rmdir\n"
3634 " trace_options\t\t- Set format or modify how tracing happens\n"
3635 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
3636 "\t\t\t option name\n"
3637 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
3638 #ifdef CONFIG_DYNAMIC_FTRACE
3639 "\n available_filter_functions - list of functions that can be filtered on\n"
3640 " set_ftrace_filter\t- echo function name in here to only trace these\n"
3641 "\t\t\t functions\n"
3642 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3643 "\t modules: Can select a group via module\n"
3644 "\t Format: :mod:<module-name>\n"
3645 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
3646 "\t triggers: a command to perform when function is hit\n"
3647 "\t Format: <function>:<trigger>[:count]\n"
3648 "\t trigger: traceon, traceoff\n"
3649 "\t\t enable_event:<system>:<event>\n"
3650 "\t\t disable_event:<system>:<event>\n"
3651 #ifdef CONFIG_STACKTRACE
3654 #ifdef CONFIG_TRACER_SNAPSHOT
3659 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
3660 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
3661 "\t The first one will disable tracing every time do_fault is hit\n"
3662 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
3663 "\t The first time do trap is hit and it disables tracing, the\n"
3664 "\t counter will decrement to 2. If tracing is already disabled,\n"
3665 "\t the counter will not decrement. It only decrements when the\n"
3666 "\t trigger did work\n"
3667 "\t To remove trigger without count:\n"
3668 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
3669 "\t To remove trigger with a count:\n"
3670 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
3671 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
3672 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3673 "\t modules: Can select a group via module command :mod:\n"
3674 "\t Does not accept triggers\n"
3675 #endif /* CONFIG_DYNAMIC_FTRACE */
3676 #ifdef CONFIG_FUNCTION_TRACER
3677 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
3680 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3681 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
3682 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
3683 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
3685 #ifdef CONFIG_TRACER_SNAPSHOT
3686 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
3687 "\t\t\t snapshot buffer. Read the contents for more\n"
3688 "\t\t\t information\n"
3690 #ifdef CONFIG_STACK_TRACER
3691 " stack_trace\t\t- Shows the max stack trace when active\n"
3692 " stack_max_size\t- Shows current max stack size that was traced\n"
3693 "\t\t\t Write into this file to reset the max size (trigger a\n"
3694 "\t\t\t new trace)\n"
3695 #ifdef CONFIG_DYNAMIC_FTRACE
3696 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
3699 #endif /* CONFIG_STACK_TRACER */
3700 " events/\t\t- Directory containing all trace event subsystems:\n"
3701 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
3702 " events/<system>/\t- Directory containing all trace events for <system>:\n"
3703 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
3705 " filter\t\t- If set, only events passing filter are traced\n"
3706 " events/<system>/<event>/\t- Directory containing control files for\n"
3708 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
3709 " filter\t\t- If set, only events passing filter are traced\n"
3710 " trigger\t\t- If set, a command to perform when event is hit\n"
3711 "\t Format: <trigger>[:count][if <filter>]\n"
3712 "\t trigger: traceon, traceoff\n"
3713 "\t enable_event:<system>:<event>\n"
3714 "\t disable_event:<system>:<event>\n"
3715 #ifdef CONFIG_STACKTRACE
3718 #ifdef CONFIG_TRACER_SNAPSHOT
3721 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
3722 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
3723 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
3724 "\t events/block/block_unplug/trigger\n"
3725 "\t The first disables tracing every time block_unplug is hit.\n"
3726 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
3727 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
3728 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
3729 "\t Like function triggers, the counter is only decremented if it\n"
3730 "\t enabled or disabled tracing.\n"
3731 "\t To remove a trigger without a count:\n"
3732 "\t echo '!<trigger> > <system>/<event>/trigger\n"
3733 "\t To remove a trigger with a count:\n"
3734 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
3735 "\t Filters can be ignored when removing a trigger.\n"
3739 tracing_readme_read(struct file
*filp
, char __user
*ubuf
,
3740 size_t cnt
, loff_t
*ppos
)
3742 return simple_read_from_buffer(ubuf
, cnt
, ppos
,
3743 readme_msg
, strlen(readme_msg
));
3746 static const struct file_operations tracing_readme_fops
= {
3747 .open
= tracing_open_generic
,
3748 .read
= tracing_readme_read
,
3749 .llseek
= generic_file_llseek
,
3752 static void *saved_cmdlines_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
3754 unsigned int *ptr
= v
;
3756 if (*pos
|| m
->count
)
3761 for (; ptr
< &savedcmd
->map_cmdline_to_pid
[savedcmd
->cmdline_num
];
3763 if (*ptr
== -1 || *ptr
== NO_CMDLINE_MAP
)
3772 static void *saved_cmdlines_start(struct seq_file
*m
, loff_t
*pos
)
3778 arch_spin_lock(&trace_cmdline_lock
);
3780 v
= &savedcmd
->map_cmdline_to_pid
[0];
3782 v
= saved_cmdlines_next(m
, v
, &l
);
3790 static void saved_cmdlines_stop(struct seq_file
*m
, void *v
)
3792 arch_spin_unlock(&trace_cmdline_lock
);
3796 static int saved_cmdlines_show(struct seq_file
*m
, void *v
)
3798 char buf
[TASK_COMM_LEN
];
3799 unsigned int *pid
= v
;
3801 __trace_find_cmdline(*pid
, buf
);
3802 seq_printf(m
, "%d %s\n", *pid
, buf
);
3806 static const struct seq_operations tracing_saved_cmdlines_seq_ops
= {
3807 .start
= saved_cmdlines_start
,
3808 .next
= saved_cmdlines_next
,
3809 .stop
= saved_cmdlines_stop
,
3810 .show
= saved_cmdlines_show
,
3813 static int tracing_saved_cmdlines_open(struct inode
*inode
, struct file
*filp
)
3815 if (tracing_disabled
)
3818 return seq_open(filp
, &tracing_saved_cmdlines_seq_ops
);
3821 static const struct file_operations tracing_saved_cmdlines_fops
= {
3822 .open
= tracing_saved_cmdlines_open
,
3824 .llseek
= seq_lseek
,
3825 .release
= seq_release
,
3829 tracing_saved_cmdlines_size_read(struct file
*filp
, char __user
*ubuf
,
3830 size_t cnt
, loff_t
*ppos
)
3835 arch_spin_lock(&trace_cmdline_lock
);
3836 r
= scnprintf(buf
, sizeof(buf
), "%u\n", savedcmd
->cmdline_num
);
3837 arch_spin_unlock(&trace_cmdline_lock
);
3839 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
3842 static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer
*s
)
3844 kfree(s
->saved_cmdlines
);
3845 kfree(s
->map_cmdline_to_pid
);
3849 static int tracing_resize_saved_cmdlines(unsigned int val
)
3851 struct saved_cmdlines_buffer
*s
, *savedcmd_temp
;
3853 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
3857 if (allocate_cmdlines_buffer(val
, s
) < 0) {
3862 arch_spin_lock(&trace_cmdline_lock
);
3863 savedcmd_temp
= savedcmd
;
3865 arch_spin_unlock(&trace_cmdline_lock
);
3866 free_saved_cmdlines_buffer(savedcmd_temp
);
3872 tracing_saved_cmdlines_size_write(struct file
*filp
, const char __user
*ubuf
,
3873 size_t cnt
, loff_t
*ppos
)
3878 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
3882 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
3883 if (!val
|| val
> PID_MAX_DEFAULT
)
3886 ret
= tracing_resize_saved_cmdlines((unsigned int)val
);
3895 static const struct file_operations tracing_saved_cmdlines_size_fops
= {
3896 .open
= tracing_open_generic
,
3897 .read
= tracing_saved_cmdlines_size_read
,
3898 .write
= tracing_saved_cmdlines_size_write
,
3902 tracing_set_trace_read(struct file
*filp
, char __user
*ubuf
,
3903 size_t cnt
, loff_t
*ppos
)
3905 struct trace_array
*tr
= filp
->private_data
;
3906 char buf
[MAX_TRACER_SIZE
+2];
3909 mutex_lock(&trace_types_lock
);
3910 r
= sprintf(buf
, "%s\n", tr
->current_trace
->name
);
3911 mutex_unlock(&trace_types_lock
);
3913 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
3916 int tracer_init(struct tracer
*t
, struct trace_array
*tr
)
3918 tracing_reset_online_cpus(&tr
->trace_buffer
);
3922 static void set_buffer_entries(struct trace_buffer
*buf
, unsigned long val
)
3926 for_each_tracing_cpu(cpu
)
3927 per_cpu_ptr(buf
->data
, cpu
)->entries
= val
;
3930 #ifdef CONFIG_TRACER_MAX_TRACE
3931 /* resize @tr's buffer to the size of @size_tr's entries */
3932 static int resize_buffer_duplicate_size(struct trace_buffer
*trace_buf
,
3933 struct trace_buffer
*size_buf
, int cpu_id
)
3937 if (cpu_id
== RING_BUFFER_ALL_CPUS
) {
3938 for_each_tracing_cpu(cpu
) {
3939 ret
= ring_buffer_resize(trace_buf
->buffer
,
3940 per_cpu_ptr(size_buf
->data
, cpu
)->entries
, cpu
);
3943 per_cpu_ptr(trace_buf
->data
, cpu
)->entries
=
3944 per_cpu_ptr(size_buf
->data
, cpu
)->entries
;
3947 ret
= ring_buffer_resize(trace_buf
->buffer
,
3948 per_cpu_ptr(size_buf
->data
, cpu_id
)->entries
, cpu_id
);
3950 per_cpu_ptr(trace_buf
->data
, cpu_id
)->entries
=
3951 per_cpu_ptr(size_buf
->data
, cpu_id
)->entries
;
3956 #endif /* CONFIG_TRACER_MAX_TRACE */
3958 static int __tracing_resize_ring_buffer(struct trace_array
*tr
,
3959 unsigned long size
, int cpu
)
3964 * If kernel or user changes the size of the ring buffer
3965 * we use the size that was given, and we can forget about
3966 * expanding it later.
3968 ring_buffer_expanded
= true;
3970 /* May be called before buffers are initialized */
3971 if (!tr
->trace_buffer
.buffer
)
3974 ret
= ring_buffer_resize(tr
->trace_buffer
.buffer
, size
, cpu
);
3978 #ifdef CONFIG_TRACER_MAX_TRACE
3979 if (!(tr
->flags
& TRACE_ARRAY_FL_GLOBAL
) ||
3980 !tr
->current_trace
->use_max_tr
)
3983 ret
= ring_buffer_resize(tr
->max_buffer
.buffer
, size
, cpu
);
3985 int r
= resize_buffer_duplicate_size(&tr
->trace_buffer
,
3986 &tr
->trace_buffer
, cpu
);
3989 * AARGH! We are left with different
3990 * size max buffer!!!!
3991 * The max buffer is our "snapshot" buffer.
3992 * When a tracer needs a snapshot (one of the
3993 * latency tracers), it swaps the max buffer
3994 * with the saved snap shot. We succeeded to
3995 * update the size of the main buffer, but failed to
3996 * update the size of the max buffer. But when we tried
3997 * to reset the main buffer to the original size, we
3998 * failed there too. This is very unlikely to
3999 * happen, but if it does, warn and kill all
4003 tracing_disabled
= 1;
4008 if (cpu
== RING_BUFFER_ALL_CPUS
)
4009 set_buffer_entries(&tr
->max_buffer
, size
);
4011 per_cpu_ptr(tr
->max_buffer
.data
, cpu
)->entries
= size
;
4014 #endif /* CONFIG_TRACER_MAX_TRACE */
4016 if (cpu
== RING_BUFFER_ALL_CPUS
)
4017 set_buffer_entries(&tr
->trace_buffer
, size
);
4019 per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->entries
= size
;
4024 static ssize_t
tracing_resize_ring_buffer(struct trace_array
*tr
,
4025 unsigned long size
, int cpu_id
)
4029 mutex_lock(&trace_types_lock
);
4031 if (cpu_id
!= RING_BUFFER_ALL_CPUS
) {
4032 /* make sure, this cpu is enabled in the mask */
4033 if (!cpumask_test_cpu(cpu_id
, tracing_buffer_mask
)) {
4039 ret
= __tracing_resize_ring_buffer(tr
, size
, cpu_id
);
4044 mutex_unlock(&trace_types_lock
);
4051 * tracing_update_buffers - used by tracing facility to expand ring buffers
4053 * To save on memory when the tracing is never used on a system with it
4054 * configured in. The ring buffers are set to a minimum size. But once
4055 * a user starts to use the tracing facility, then they need to grow
4056 * to their default size.
4058 * This function is to be called when a tracer is about to be used.
4060 int tracing_update_buffers(void)
4064 mutex_lock(&trace_types_lock
);
4065 if (!ring_buffer_expanded
)
4066 ret
= __tracing_resize_ring_buffer(&global_trace
, trace_buf_size
,
4067 RING_BUFFER_ALL_CPUS
);
4068 mutex_unlock(&trace_types_lock
);
4073 struct trace_option_dentry
;
4075 static struct trace_option_dentry
*
4076 create_trace_option_files(struct trace_array
*tr
, struct tracer
*tracer
);
4079 destroy_trace_option_files(struct trace_option_dentry
*topts
);
4082 * Used to clear out the tracer before deletion of an instance.
4083 * Must have trace_types_lock held.
4085 static void tracing_set_nop(struct trace_array
*tr
)
4087 if (tr
->current_trace
== &nop_trace
)
4090 tr
->current_trace
->enabled
--;
4092 if (tr
->current_trace
->reset
)
4093 tr
->current_trace
->reset(tr
);
4095 tr
->current_trace
= &nop_trace
;
4098 static int tracing_set_tracer(struct trace_array
*tr
, const char *buf
)
4100 static struct trace_option_dentry
*topts
;
4102 #ifdef CONFIG_TRACER_MAX_TRACE
4107 mutex_lock(&trace_types_lock
);
4109 if (!ring_buffer_expanded
) {
4110 ret
= __tracing_resize_ring_buffer(tr
, trace_buf_size
,
4111 RING_BUFFER_ALL_CPUS
);
4117 for (t
= trace_types
; t
; t
= t
->next
) {
4118 if (strcmp(t
->name
, buf
) == 0)
4125 if (t
== tr
->current_trace
)
4128 /* Some tracers are only allowed for the top level buffer */
4129 if (!trace_ok_for_array(t
, tr
)) {
4134 trace_branch_disable();
4136 tr
->current_trace
->enabled
--;
4138 if (tr
->current_trace
->reset
)
4139 tr
->current_trace
->reset(tr
);
4141 /* Current trace needs to be nop_trace before synchronize_sched */
4142 tr
->current_trace
= &nop_trace
;
4144 #ifdef CONFIG_TRACER_MAX_TRACE
4145 had_max_tr
= tr
->allocated_snapshot
;
4147 if (had_max_tr
&& !t
->use_max_tr
) {
4149 * We need to make sure that the update_max_tr sees that
4150 * current_trace changed to nop_trace to keep it from
4151 * swapping the buffers after we resize it.
4152 * The update_max_tr is called from interrupts disabled
4153 * so a synchronized_sched() is sufficient.
4155 synchronize_sched();
4159 /* Currently, only the top instance has options */
4160 if (tr
->flags
& TRACE_ARRAY_FL_GLOBAL
) {
4161 destroy_trace_option_files(topts
);
4162 topts
= create_trace_option_files(tr
, t
);
4165 #ifdef CONFIG_TRACER_MAX_TRACE
4166 if (t
->use_max_tr
&& !had_max_tr
) {
4167 ret
= alloc_snapshot(tr
);
4174 ret
= tracer_init(t
, tr
);
4179 tr
->current_trace
= t
;
4180 tr
->current_trace
->enabled
++;
4181 trace_branch_enable(tr
);
4183 mutex_unlock(&trace_types_lock
);
4189 tracing_set_trace_write(struct file
*filp
, const char __user
*ubuf
,
4190 size_t cnt
, loff_t
*ppos
)
4192 struct trace_array
*tr
= filp
->private_data
;
4193 char buf
[MAX_TRACER_SIZE
+1];
4200 if (cnt
> MAX_TRACER_SIZE
)
4201 cnt
= MAX_TRACER_SIZE
;
4203 if (copy_from_user(&buf
, ubuf
, cnt
))
4208 /* strip ending whitespace. */
4209 for (i
= cnt
- 1; i
> 0 && isspace(buf
[i
]); i
--)
4212 err
= tracing_set_tracer(tr
, buf
);
4222 tracing_nsecs_read(unsigned long *ptr
, char __user
*ubuf
,
4223 size_t cnt
, loff_t
*ppos
)
4228 r
= snprintf(buf
, sizeof(buf
), "%ld\n",
4229 *ptr
== (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr
));
4230 if (r
> sizeof(buf
))
4232 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
4236 tracing_nsecs_write(unsigned long *ptr
, const char __user
*ubuf
,
4237 size_t cnt
, loff_t
*ppos
)
4242 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
4252 tracing_thresh_read(struct file
*filp
, char __user
*ubuf
,
4253 size_t cnt
, loff_t
*ppos
)
4255 return tracing_nsecs_read(&tracing_thresh
, ubuf
, cnt
, ppos
);
4259 tracing_thresh_write(struct file
*filp
, const char __user
*ubuf
,
4260 size_t cnt
, loff_t
*ppos
)
4262 struct trace_array
*tr
= filp
->private_data
;
4265 mutex_lock(&trace_types_lock
);
4266 ret
= tracing_nsecs_write(&tracing_thresh
, ubuf
, cnt
, ppos
);
4270 if (tr
->current_trace
->update_thresh
) {
4271 ret
= tr
->current_trace
->update_thresh(tr
);
4278 mutex_unlock(&trace_types_lock
);
4284 tracing_max_lat_read(struct file
*filp
, char __user
*ubuf
,
4285 size_t cnt
, loff_t
*ppos
)
4287 return tracing_nsecs_read(filp
->private_data
, ubuf
, cnt
, ppos
);
4291 tracing_max_lat_write(struct file
*filp
, const char __user
*ubuf
,
4292 size_t cnt
, loff_t
*ppos
)
4294 return tracing_nsecs_write(filp
->private_data
, ubuf
, cnt
, ppos
);
4297 static int tracing_open_pipe(struct inode
*inode
, struct file
*filp
)
4299 struct trace_array
*tr
= inode
->i_private
;
4300 struct trace_iterator
*iter
;
4303 if (tracing_disabled
)
4306 if (trace_array_get(tr
) < 0)
4309 mutex_lock(&trace_types_lock
);
4311 /* create a buffer to store the information to pass to userspace */
4312 iter
= kzalloc(sizeof(*iter
), GFP_KERNEL
);
4315 __trace_array_put(tr
);
4319 trace_seq_init(&iter
->seq
);
4322 * We make a copy of the current tracer to avoid concurrent
4323 * changes on it while we are reading.
4325 iter
->trace
= kmalloc(sizeof(*iter
->trace
), GFP_KERNEL
);
4330 *iter
->trace
= *tr
->current_trace
;
4332 if (!alloc_cpumask_var(&iter
->started
, GFP_KERNEL
)) {
4337 /* trace pipe does not show start of buffer */
4338 cpumask_setall(iter
->started
);
4340 if (trace_flags
& TRACE_ITER_LATENCY_FMT
)
4341 iter
->iter_flags
|= TRACE_FILE_LAT_FMT
;
4343 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
4344 if (trace_clocks
[tr
->clock_id
].in_ns
)
4345 iter
->iter_flags
|= TRACE_FILE_TIME_IN_NS
;
4348 iter
->trace_buffer
= &tr
->trace_buffer
;
4349 iter
->cpu_file
= tracing_get_cpu(inode
);
4350 mutex_init(&iter
->mutex
);
4351 filp
->private_data
= iter
;
4353 if (iter
->trace
->pipe_open
)
4354 iter
->trace
->pipe_open(iter
);
4356 nonseekable_open(inode
, filp
);
4358 mutex_unlock(&trace_types_lock
);
4364 __trace_array_put(tr
);
4365 mutex_unlock(&trace_types_lock
);
4369 static int tracing_release_pipe(struct inode
*inode
, struct file
*file
)
4371 struct trace_iterator
*iter
= file
->private_data
;
4372 struct trace_array
*tr
= inode
->i_private
;
4374 mutex_lock(&trace_types_lock
);
4376 if (iter
->trace
->pipe_close
)
4377 iter
->trace
->pipe_close(iter
);
4379 mutex_unlock(&trace_types_lock
);
4381 free_cpumask_var(iter
->started
);
4382 mutex_destroy(&iter
->mutex
);
4386 trace_array_put(tr
);
4392 trace_poll(struct trace_iterator
*iter
, struct file
*filp
, poll_table
*poll_table
)
4394 /* Iterators are static, they should be filled or empty */
4395 if (trace_buffer_iter(iter
, iter
->cpu_file
))
4396 return POLLIN
| POLLRDNORM
;
4398 if (trace_flags
& TRACE_ITER_BLOCK
)
4400 * Always select as readable when in blocking mode
4402 return POLLIN
| POLLRDNORM
;
4404 return ring_buffer_poll_wait(iter
->trace_buffer
->buffer
, iter
->cpu_file
,
4409 tracing_poll_pipe(struct file
*filp
, poll_table
*poll_table
)
4411 struct trace_iterator
*iter
= filp
->private_data
;
4413 return trace_poll(iter
, filp
, poll_table
);
4416 /* Must be called with trace_types_lock mutex held. */
4417 static int tracing_wait_pipe(struct file
*filp
)
4419 struct trace_iterator
*iter
= filp
->private_data
;
4422 while (trace_empty(iter
)) {
4424 if ((filp
->f_flags
& O_NONBLOCK
)) {
4429 * We block until we read something and tracing is disabled.
4430 * We still block if tracing is disabled, but we have never
4431 * read anything. This allows a user to cat this file, and
4432 * then enable tracing. But after we have read something,
4433 * we give an EOF when tracing is again disabled.
4435 * iter->pos will be 0 if we haven't read anything.
4437 if (!tracing_is_on() && iter
->pos
)
4440 mutex_unlock(&iter
->mutex
);
4442 ret
= wait_on_pipe(iter
);
4444 mutex_lock(&iter
->mutex
);
4449 if (signal_pending(current
))
4460 tracing_read_pipe(struct file
*filp
, char __user
*ubuf
,
4461 size_t cnt
, loff_t
*ppos
)
4463 struct trace_iterator
*iter
= filp
->private_data
;
4464 struct trace_array
*tr
= iter
->tr
;
4467 /* return any leftover data */
4468 sret
= trace_seq_to_user(&iter
->seq
, ubuf
, cnt
);
4472 trace_seq_init(&iter
->seq
);
4474 /* copy the tracer to avoid using a global lock all around */
4475 mutex_lock(&trace_types_lock
);
4476 if (unlikely(iter
->trace
->name
!= tr
->current_trace
->name
))
4477 *iter
->trace
= *tr
->current_trace
;
4478 mutex_unlock(&trace_types_lock
);
4481 * Avoid more than one consumer on a single file descriptor
4482 * This is just a matter of traces coherency, the ring buffer itself
4485 mutex_lock(&iter
->mutex
);
4486 if (iter
->trace
->read
) {
4487 sret
= iter
->trace
->read(iter
, filp
, ubuf
, cnt
, ppos
);
4493 sret
= tracing_wait_pipe(filp
);
4497 /* stop when tracing is finished */
4498 if (trace_empty(iter
)) {
4503 if (cnt
>= PAGE_SIZE
)
4504 cnt
= PAGE_SIZE
- 1;
4506 /* reset all but tr, trace, and overruns */
4507 memset(&iter
->seq
, 0,
4508 sizeof(struct trace_iterator
) -
4509 offsetof(struct trace_iterator
, seq
));
4510 cpumask_clear(iter
->started
);
4513 trace_event_read_lock();
4514 trace_access_lock(iter
->cpu_file
);
4515 while (trace_find_next_entry_inc(iter
) != NULL
) {
4516 enum print_line_t ret
;
4517 int len
= iter
->seq
.seq
.len
;
4519 ret
= print_trace_line(iter
);
4520 if (ret
== TRACE_TYPE_PARTIAL_LINE
) {
4521 /* don't print partial lines */
4522 iter
->seq
.seq
.len
= len
;
4525 if (ret
!= TRACE_TYPE_NO_CONSUME
)
4526 trace_consume(iter
);
4528 if (iter
->seq
.seq
.len
>= cnt
)
4532 * Setting the full flag means we reached the trace_seq buffer
4533 * size and we should leave by partial output condition above.
4534 * One of the trace_seq_* functions is not used properly.
4536 WARN_ONCE(iter
->seq
.full
, "full flag set for trace type %d",
4539 trace_access_unlock(iter
->cpu_file
);
4540 trace_event_read_unlock();
4542 /* Now copy what we have to the user */
4543 sret
= trace_seq_to_user(&iter
->seq
, ubuf
, cnt
);
4544 if (iter
->seq
.seq
.readpos
>= iter
->seq
.seq
.len
)
4545 trace_seq_init(&iter
->seq
);
4548 * If there was nothing to send to user, in spite of consuming trace
4549 * entries, go back to wait for more entries.
4555 mutex_unlock(&iter
->mutex
);
4560 static void tracing_spd_release_pipe(struct splice_pipe_desc
*spd
,
4563 __free_page(spd
->pages
[idx
]);
4566 static const struct pipe_buf_operations tracing_pipe_buf_ops
= {
4568 .confirm
= generic_pipe_buf_confirm
,
4569 .release
= generic_pipe_buf_release
,
4570 .steal
= generic_pipe_buf_steal
,
4571 .get
= generic_pipe_buf_get
,
4575 tracing_fill_pipe_page(size_t rem
, struct trace_iterator
*iter
)
4581 /* Seq buffer is page-sized, exactly what we need. */
4583 save_len
= iter
->seq
.seq
.len
;
4584 ret
= print_trace_line(iter
);
4586 if (trace_seq_has_overflowed(&iter
->seq
)) {
4587 iter
->seq
.seq
.len
= save_len
;
4592 * This should not be hit, because it should only
4593 * be set if the iter->seq overflowed. But check it
4594 * anyway to be safe.
4596 if (ret
== TRACE_TYPE_PARTIAL_LINE
) {
4597 iter
->seq
.seq
.len
= save_len
;
4601 count
= iter
->seq
.seq
.len
- save_len
;
4604 iter
->seq
.seq
.len
= save_len
;
4608 if (ret
!= TRACE_TYPE_NO_CONSUME
)
4609 trace_consume(iter
);
4611 if (!trace_find_next_entry_inc(iter
)) {
4621 static ssize_t
tracing_splice_read_pipe(struct file
*filp
,
4623 struct pipe_inode_info
*pipe
,
4627 struct page
*pages_def
[PIPE_DEF_BUFFERS
];
4628 struct partial_page partial_def
[PIPE_DEF_BUFFERS
];
4629 struct trace_iterator
*iter
= filp
->private_data
;
4630 struct splice_pipe_desc spd
= {
4632 .partial
= partial_def
,
4633 .nr_pages
= 0, /* This gets updated below. */
4634 .nr_pages_max
= PIPE_DEF_BUFFERS
,
4636 .ops
= &tracing_pipe_buf_ops
,
4637 .spd_release
= tracing_spd_release_pipe
,
4639 struct trace_array
*tr
= iter
->tr
;
4644 if (splice_grow_spd(pipe
, &spd
))
4647 /* copy the tracer to avoid using a global lock all around */
4648 mutex_lock(&trace_types_lock
);
4649 if (unlikely(iter
->trace
->name
!= tr
->current_trace
->name
))
4650 *iter
->trace
= *tr
->current_trace
;
4651 mutex_unlock(&trace_types_lock
);
4653 mutex_lock(&iter
->mutex
);
4655 if (iter
->trace
->splice_read
) {
4656 ret
= iter
->trace
->splice_read(iter
, filp
,
4657 ppos
, pipe
, len
, flags
);
4662 ret
= tracing_wait_pipe(filp
);
4666 if (!iter
->ent
&& !trace_find_next_entry_inc(iter
)) {
4671 trace_event_read_lock();
4672 trace_access_lock(iter
->cpu_file
);
4674 /* Fill as many pages as possible. */
4675 for (i
= 0, rem
= len
; i
< spd
.nr_pages_max
&& rem
; i
++) {
4676 spd
.pages
[i
] = alloc_page(GFP_KERNEL
);
4680 rem
= tracing_fill_pipe_page(rem
, iter
);
4682 /* Copy the data into the page, so we can start over. */
4683 ret
= trace_seq_to_buffer(&iter
->seq
,
4684 page_address(spd
.pages
[i
]),
4687 __free_page(spd
.pages
[i
]);
4690 spd
.partial
[i
].offset
= 0;
4691 spd
.partial
[i
].len
= iter
->seq
.seq
.len
;
4693 trace_seq_init(&iter
->seq
);
4696 trace_access_unlock(iter
->cpu_file
);
4697 trace_event_read_unlock();
4698 mutex_unlock(&iter
->mutex
);
4702 ret
= splice_to_pipe(pipe
, &spd
);
4704 splice_shrink_spd(&spd
);
4708 mutex_unlock(&iter
->mutex
);
4713 tracing_entries_read(struct file
*filp
, char __user
*ubuf
,
4714 size_t cnt
, loff_t
*ppos
)
4716 struct inode
*inode
= file_inode(filp
);
4717 struct trace_array
*tr
= inode
->i_private
;
4718 int cpu
= tracing_get_cpu(inode
);
4723 mutex_lock(&trace_types_lock
);
4725 if (cpu
== RING_BUFFER_ALL_CPUS
) {
4726 int cpu
, buf_size_same
;
4731 /* check if all cpu sizes are same */
4732 for_each_tracing_cpu(cpu
) {
4733 /* fill in the size from first enabled cpu */
4735 size
= per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->entries
;
4736 if (size
!= per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->entries
) {
4742 if (buf_size_same
) {
4743 if (!ring_buffer_expanded
)
4744 r
= sprintf(buf
, "%lu (expanded: %lu)\n",
4746 trace_buf_size
>> 10);
4748 r
= sprintf(buf
, "%lu\n", size
>> 10);
4750 r
= sprintf(buf
, "X\n");
4752 r
= sprintf(buf
, "%lu\n", per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->entries
>> 10);
4754 mutex_unlock(&trace_types_lock
);
4756 ret
= simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
4761 tracing_entries_write(struct file
*filp
, const char __user
*ubuf
,
4762 size_t cnt
, loff_t
*ppos
)
4764 struct inode
*inode
= file_inode(filp
);
4765 struct trace_array
*tr
= inode
->i_private
;
4769 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
4773 /* must have at least 1 entry */
4777 /* value is in KB */
4779 ret
= tracing_resize_ring_buffer(tr
, val
, tracing_get_cpu(inode
));
4789 tracing_total_entries_read(struct file
*filp
, char __user
*ubuf
,
4790 size_t cnt
, loff_t
*ppos
)
4792 struct trace_array
*tr
= filp
->private_data
;
4795 unsigned long size
= 0, expanded_size
= 0;
4797 mutex_lock(&trace_types_lock
);
4798 for_each_tracing_cpu(cpu
) {
4799 size
+= per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->entries
>> 10;
4800 if (!ring_buffer_expanded
)
4801 expanded_size
+= trace_buf_size
>> 10;
4803 if (ring_buffer_expanded
)
4804 r
= sprintf(buf
, "%lu\n", size
);
4806 r
= sprintf(buf
, "%lu (expanded: %lu)\n", size
, expanded_size
);
4807 mutex_unlock(&trace_types_lock
);
4809 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
4813 tracing_free_buffer_write(struct file
*filp
, const char __user
*ubuf
,
4814 size_t cnt
, loff_t
*ppos
)
4817 * There is no need to read what the user has written, this function
4818 * is just to make sure that there is no error when "echo" is used
4827 tracing_free_buffer_release(struct inode
*inode
, struct file
*filp
)
4829 struct trace_array
*tr
= inode
->i_private
;
4831 /* disable tracing ? */
4832 if (trace_flags
& TRACE_ITER_STOP_ON_FREE
)
4833 tracer_tracing_off(tr
);
4834 /* resize the ring buffer to 0 */
4835 tracing_resize_ring_buffer(tr
, 0, RING_BUFFER_ALL_CPUS
);
4837 trace_array_put(tr
);
4843 tracing_mark_write(struct file
*filp
, const char __user
*ubuf
,
4844 size_t cnt
, loff_t
*fpos
)
4846 unsigned long addr
= (unsigned long)ubuf
;
4847 struct trace_array
*tr
= filp
->private_data
;
4848 struct ring_buffer_event
*event
;
4849 struct ring_buffer
*buffer
;
4850 struct print_entry
*entry
;
4851 unsigned long irq_flags
;
4852 struct page
*pages
[2];
4862 if (tracing_disabled
)
4865 if (!(trace_flags
& TRACE_ITER_MARKERS
))
4868 if (cnt
> TRACE_BUF_SIZE
)
4869 cnt
= TRACE_BUF_SIZE
;
4872 * Userspace is injecting traces into the kernel trace buffer.
4873 * We want to be as non intrusive as possible.
4874 * To do so, we do not want to allocate any special buffers
4875 * or take any locks, but instead write the userspace data
4876 * straight into the ring buffer.
4878 * First we need to pin the userspace buffer into memory,
4879 * which, most likely it is, because it just referenced it.
4880 * But there's no guarantee that it is. By using get_user_pages_fast()
4881 * and kmap_atomic/kunmap_atomic() we can get access to the
4882 * pages directly. We then write the data directly into the
4885 BUILD_BUG_ON(TRACE_BUF_SIZE
>= PAGE_SIZE
);
4887 /* check if we cross pages */
4888 if ((addr
& PAGE_MASK
) != ((addr
+ cnt
) & PAGE_MASK
))
4891 offset
= addr
& (PAGE_SIZE
- 1);
4894 ret
= get_user_pages_fast(addr
, nr_pages
, 0, pages
);
4895 if (ret
< nr_pages
) {
4897 put_page(pages
[ret
]);
4902 for (i
= 0; i
< nr_pages
; i
++)
4903 map_page
[i
] = kmap_atomic(pages
[i
]);
4905 local_save_flags(irq_flags
);
4906 size
= sizeof(*entry
) + cnt
+ 2; /* possible \n added */
4907 buffer
= tr
->trace_buffer
.buffer
;
4908 event
= trace_buffer_lock_reserve(buffer
, TRACE_PRINT
, size
,
4909 irq_flags
, preempt_count());
4911 /* Ring buffer disabled, return as if not open for write */
4916 entry
= ring_buffer_event_data(event
);
4917 entry
->ip
= _THIS_IP_
;
4919 if (nr_pages
== 2) {
4920 len
= PAGE_SIZE
- offset
;
4921 memcpy(&entry
->buf
, map_page
[0] + offset
, len
);
4922 memcpy(&entry
->buf
[len
], map_page
[1], cnt
- len
);
4924 memcpy(&entry
->buf
, map_page
[0] + offset
, cnt
);
4926 if (entry
->buf
[cnt
- 1] != '\n') {
4927 entry
->buf
[cnt
] = '\n';
4928 entry
->buf
[cnt
+ 1] = '\0';
4930 entry
->buf
[cnt
] = '\0';
4932 __buffer_unlock_commit(buffer
, event
);
4939 for (i
= 0; i
< nr_pages
; i
++){
4940 kunmap_atomic(map_page
[i
]);
4947 static int tracing_clock_show(struct seq_file
*m
, void *v
)
4949 struct trace_array
*tr
= m
->private;
4952 for (i
= 0; i
< ARRAY_SIZE(trace_clocks
); i
++)
4954 "%s%s%s%s", i
? " " : "",
4955 i
== tr
->clock_id
? "[" : "", trace_clocks
[i
].name
,
4956 i
== tr
->clock_id
? "]" : "");
4962 static int tracing_set_clock(struct trace_array
*tr
, const char *clockstr
)
4966 for (i
= 0; i
< ARRAY_SIZE(trace_clocks
); i
++) {
4967 if (strcmp(trace_clocks
[i
].name
, clockstr
) == 0)
4970 if (i
== ARRAY_SIZE(trace_clocks
))
4973 mutex_lock(&trace_types_lock
);
4977 ring_buffer_set_clock(tr
->trace_buffer
.buffer
, trace_clocks
[i
].func
);
4980 * New clock may not be consistent with the previous clock.
4981 * Reset the buffer so that it doesn't have incomparable timestamps.
4983 tracing_reset_online_cpus(&tr
->trace_buffer
);
4985 #ifdef CONFIG_TRACER_MAX_TRACE
4986 if (tr
->flags
& TRACE_ARRAY_FL_GLOBAL
&& tr
->max_buffer
.buffer
)
4987 ring_buffer_set_clock(tr
->max_buffer
.buffer
, trace_clocks
[i
].func
);
4988 tracing_reset_online_cpus(&tr
->max_buffer
);
4991 mutex_unlock(&trace_types_lock
);
4996 static ssize_t
tracing_clock_write(struct file
*filp
, const char __user
*ubuf
,
4997 size_t cnt
, loff_t
*fpos
)
4999 struct seq_file
*m
= filp
->private_data
;
5000 struct trace_array
*tr
= m
->private;
5002 const char *clockstr
;
5005 if (cnt
>= sizeof(buf
))
5008 if (copy_from_user(&buf
, ubuf
, cnt
))
5013 clockstr
= strstrip(buf
);
5015 ret
= tracing_set_clock(tr
, clockstr
);
5024 static int tracing_clock_open(struct inode
*inode
, struct file
*file
)
5026 struct trace_array
*tr
= inode
->i_private
;
5029 if (tracing_disabled
)
5032 if (trace_array_get(tr
))
5035 ret
= single_open(file
, tracing_clock_show
, inode
->i_private
);
5037 trace_array_put(tr
);
5042 struct ftrace_buffer_info
{
5043 struct trace_iterator iter
;
5048 #ifdef CONFIG_TRACER_SNAPSHOT
5049 static int tracing_snapshot_open(struct inode
*inode
, struct file
*file
)
5051 struct trace_array
*tr
= inode
->i_private
;
5052 struct trace_iterator
*iter
;
5056 if (trace_array_get(tr
) < 0)
5059 if (file
->f_mode
& FMODE_READ
) {
5060 iter
= __tracing_open(inode
, file
, true);
5062 ret
= PTR_ERR(iter
);
5064 /* Writes still need the seq_file to hold the private data */
5066 m
= kzalloc(sizeof(*m
), GFP_KERNEL
);
5069 iter
= kzalloc(sizeof(*iter
), GFP_KERNEL
);
5077 iter
->trace_buffer
= &tr
->max_buffer
;
5078 iter
->cpu_file
= tracing_get_cpu(inode
);
5080 file
->private_data
= m
;
5084 trace_array_put(tr
);
5090 tracing_snapshot_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
5093 struct seq_file
*m
= filp
->private_data
;
5094 struct trace_iterator
*iter
= m
->private;
5095 struct trace_array
*tr
= iter
->tr
;
5099 ret
= tracing_update_buffers();
5103 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
5107 mutex_lock(&trace_types_lock
);
5109 if (tr
->current_trace
->use_max_tr
) {
5116 if (iter
->cpu_file
!= RING_BUFFER_ALL_CPUS
) {
5120 if (tr
->allocated_snapshot
)
5124 /* Only allow per-cpu swap if the ring buffer supports it */
5125 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
5126 if (iter
->cpu_file
!= RING_BUFFER_ALL_CPUS
) {
5131 if (!tr
->allocated_snapshot
) {
5132 ret
= alloc_snapshot(tr
);
5136 local_irq_disable();
5137 /* Now, we're going to swap */
5138 if (iter
->cpu_file
== RING_BUFFER_ALL_CPUS
)
5139 update_max_tr(tr
, current
, smp_processor_id());
5141 update_max_tr_single(tr
, current
, iter
->cpu_file
);
5145 if (tr
->allocated_snapshot
) {
5146 if (iter
->cpu_file
== RING_BUFFER_ALL_CPUS
)
5147 tracing_reset_online_cpus(&tr
->max_buffer
);
5149 tracing_reset(&tr
->max_buffer
, iter
->cpu_file
);
5159 mutex_unlock(&trace_types_lock
);
5163 static int tracing_snapshot_release(struct inode
*inode
, struct file
*file
)
5165 struct seq_file
*m
= file
->private_data
;
5168 ret
= tracing_release(inode
, file
);
5170 if (file
->f_mode
& FMODE_READ
)
5173 /* If write only, the seq_file is just a stub */
5181 static int tracing_buffers_open(struct inode
*inode
, struct file
*filp
);
5182 static ssize_t
tracing_buffers_read(struct file
*filp
, char __user
*ubuf
,
5183 size_t count
, loff_t
*ppos
);
5184 static int tracing_buffers_release(struct inode
*inode
, struct file
*file
);
5185 static ssize_t
tracing_buffers_splice_read(struct file
*file
, loff_t
*ppos
,
5186 struct pipe_inode_info
*pipe
, size_t len
, unsigned int flags
);
5188 static int snapshot_raw_open(struct inode
*inode
, struct file
*filp
)
5190 struct ftrace_buffer_info
*info
;
5193 ret
= tracing_buffers_open(inode
, filp
);
5197 info
= filp
->private_data
;
5199 if (info
->iter
.trace
->use_max_tr
) {
5200 tracing_buffers_release(inode
, filp
);
5204 info
->iter
.snapshot
= true;
5205 info
->iter
.trace_buffer
= &info
->iter
.tr
->max_buffer
;
5210 #endif /* CONFIG_TRACER_SNAPSHOT */
5213 static const struct file_operations tracing_thresh_fops
= {
5214 .open
= tracing_open_generic
,
5215 .read
= tracing_thresh_read
,
5216 .write
= tracing_thresh_write
,
5217 .llseek
= generic_file_llseek
,
5220 static const struct file_operations tracing_max_lat_fops
= {
5221 .open
= tracing_open_generic
,
5222 .read
= tracing_max_lat_read
,
5223 .write
= tracing_max_lat_write
,
5224 .llseek
= generic_file_llseek
,
5227 static const struct file_operations set_tracer_fops
= {
5228 .open
= tracing_open_generic
,
5229 .read
= tracing_set_trace_read
,
5230 .write
= tracing_set_trace_write
,
5231 .llseek
= generic_file_llseek
,
5234 static const struct file_operations tracing_pipe_fops
= {
5235 .open
= tracing_open_pipe
,
5236 .poll
= tracing_poll_pipe
,
5237 .read
= tracing_read_pipe
,
5238 .splice_read
= tracing_splice_read_pipe
,
5239 .release
= tracing_release_pipe
,
5240 .llseek
= no_llseek
,
5243 static const struct file_operations tracing_entries_fops
= {
5244 .open
= tracing_open_generic_tr
,
5245 .read
= tracing_entries_read
,
5246 .write
= tracing_entries_write
,
5247 .llseek
= generic_file_llseek
,
5248 .release
= tracing_release_generic_tr
,
5251 static const struct file_operations tracing_total_entries_fops
= {
5252 .open
= tracing_open_generic_tr
,
5253 .read
= tracing_total_entries_read
,
5254 .llseek
= generic_file_llseek
,
5255 .release
= tracing_release_generic_tr
,
5258 static const struct file_operations tracing_free_buffer_fops
= {
5259 .open
= tracing_open_generic_tr
,
5260 .write
= tracing_free_buffer_write
,
5261 .release
= tracing_free_buffer_release
,
5264 static const struct file_operations tracing_mark_fops
= {
5265 .open
= tracing_open_generic_tr
,
5266 .write
= tracing_mark_write
,
5267 .llseek
= generic_file_llseek
,
5268 .release
= tracing_release_generic_tr
,
5271 static const struct file_operations trace_clock_fops
= {
5272 .open
= tracing_clock_open
,
5274 .llseek
= seq_lseek
,
5275 .release
= tracing_single_release_tr
,
5276 .write
= tracing_clock_write
,
5279 #ifdef CONFIG_TRACER_SNAPSHOT
5280 static const struct file_operations snapshot_fops
= {
5281 .open
= tracing_snapshot_open
,
5283 .write
= tracing_snapshot_write
,
5284 .llseek
= tracing_lseek
,
5285 .release
= tracing_snapshot_release
,
5288 static const struct file_operations snapshot_raw_fops
= {
5289 .open
= snapshot_raw_open
,
5290 .read
= tracing_buffers_read
,
5291 .release
= tracing_buffers_release
,
5292 .splice_read
= tracing_buffers_splice_read
,
5293 .llseek
= no_llseek
,
5296 #endif /* CONFIG_TRACER_SNAPSHOT */
5298 static int tracing_buffers_open(struct inode
*inode
, struct file
*filp
)
5300 struct trace_array
*tr
= inode
->i_private
;
5301 struct ftrace_buffer_info
*info
;
5304 if (tracing_disabled
)
5307 if (trace_array_get(tr
) < 0)
5310 info
= kzalloc(sizeof(*info
), GFP_KERNEL
);
5312 trace_array_put(tr
);
5316 mutex_lock(&trace_types_lock
);
5319 info
->iter
.cpu_file
= tracing_get_cpu(inode
);
5320 info
->iter
.trace
= tr
->current_trace
;
5321 info
->iter
.trace_buffer
= &tr
->trace_buffer
;
5323 /* Force reading ring buffer for first read */
5324 info
->read
= (unsigned int)-1;
5326 filp
->private_data
= info
;
5328 mutex_unlock(&trace_types_lock
);
5330 ret
= nonseekable_open(inode
, filp
);
5332 trace_array_put(tr
);
5338 tracing_buffers_poll(struct file
*filp
, poll_table
*poll_table
)
5340 struct ftrace_buffer_info
*info
= filp
->private_data
;
5341 struct trace_iterator
*iter
= &info
->iter
;
5343 return trace_poll(iter
, filp
, poll_table
);
5347 tracing_buffers_read(struct file
*filp
, char __user
*ubuf
,
5348 size_t count
, loff_t
*ppos
)
5350 struct ftrace_buffer_info
*info
= filp
->private_data
;
5351 struct trace_iterator
*iter
= &info
->iter
;
5358 mutex_lock(&trace_types_lock
);
5360 #ifdef CONFIG_TRACER_MAX_TRACE
5361 if (iter
->snapshot
&& iter
->tr
->current_trace
->use_max_tr
) {
5368 info
->spare
= ring_buffer_alloc_read_page(iter
->trace_buffer
->buffer
,
5374 /* Do we have previous read data to read? */
5375 if (info
->read
< PAGE_SIZE
)
5379 trace_access_lock(iter
->cpu_file
);
5380 ret
= ring_buffer_read_page(iter
->trace_buffer
->buffer
,
5384 trace_access_unlock(iter
->cpu_file
);
5387 if (trace_empty(iter
)) {
5388 if ((filp
->f_flags
& O_NONBLOCK
)) {
5392 mutex_unlock(&trace_types_lock
);
5393 ret
= wait_on_pipe(iter
);
5394 mutex_lock(&trace_types_lock
);
5399 if (signal_pending(current
)) {
5411 size
= PAGE_SIZE
- info
->read
;
5415 ret
= copy_to_user(ubuf
, info
->spare
+ info
->read
, size
);
5426 mutex_unlock(&trace_types_lock
);
5431 static int tracing_buffers_release(struct inode
*inode
, struct file
*file
)
5433 struct ftrace_buffer_info
*info
= file
->private_data
;
5434 struct trace_iterator
*iter
= &info
->iter
;
5436 mutex_lock(&trace_types_lock
);
5438 __trace_array_put(iter
->tr
);
5441 ring_buffer_free_read_page(iter
->trace_buffer
->buffer
, info
->spare
);
5444 mutex_unlock(&trace_types_lock
);
5450 struct ring_buffer
*buffer
;
5455 static void buffer_pipe_buf_release(struct pipe_inode_info
*pipe
,
5456 struct pipe_buffer
*buf
)
5458 struct buffer_ref
*ref
= (struct buffer_ref
*)buf
->private;
5463 ring_buffer_free_read_page(ref
->buffer
, ref
->page
);
5468 static void buffer_pipe_buf_get(struct pipe_inode_info
*pipe
,
5469 struct pipe_buffer
*buf
)
5471 struct buffer_ref
*ref
= (struct buffer_ref
*)buf
->private;
5476 /* Pipe buffer operations for a buffer. */
5477 static const struct pipe_buf_operations buffer_pipe_buf_ops
= {
5479 .confirm
= generic_pipe_buf_confirm
,
5480 .release
= buffer_pipe_buf_release
,
5481 .steal
= generic_pipe_buf_steal
,
5482 .get
= buffer_pipe_buf_get
,
5486 * Callback from splice_to_pipe(), if we need to release some pages
5487 * at the end of the spd in case we error'ed out in filling the pipe.
5489 static void buffer_spd_release(struct splice_pipe_desc
*spd
, unsigned int i
)
5491 struct buffer_ref
*ref
=
5492 (struct buffer_ref
*)spd
->partial
[i
].private;
5497 ring_buffer_free_read_page(ref
->buffer
, ref
->page
);
5499 spd
->partial
[i
].private = 0;
5503 tracing_buffers_splice_read(struct file
*file
, loff_t
*ppos
,
5504 struct pipe_inode_info
*pipe
, size_t len
,
5507 struct ftrace_buffer_info
*info
= file
->private_data
;
5508 struct trace_iterator
*iter
= &info
->iter
;
5509 struct partial_page partial_def
[PIPE_DEF_BUFFERS
];
5510 struct page
*pages_def
[PIPE_DEF_BUFFERS
];
5511 struct splice_pipe_desc spd
= {
5513 .partial
= partial_def
,
5514 .nr_pages_max
= PIPE_DEF_BUFFERS
,
5516 .ops
= &buffer_pipe_buf_ops
,
5517 .spd_release
= buffer_spd_release
,
5519 struct buffer_ref
*ref
;
5520 int entries
, size
, i
;
5523 mutex_lock(&trace_types_lock
);
5525 #ifdef CONFIG_TRACER_MAX_TRACE
5526 if (iter
->snapshot
&& iter
->tr
->current_trace
->use_max_tr
) {
5532 if (splice_grow_spd(pipe
, &spd
)) {
5537 if (*ppos
& (PAGE_SIZE
- 1)) {
5542 if (len
& (PAGE_SIZE
- 1)) {
5543 if (len
< PAGE_SIZE
) {
5551 trace_access_lock(iter
->cpu_file
);
5552 entries
= ring_buffer_entries_cpu(iter
->trace_buffer
->buffer
, iter
->cpu_file
);
5554 for (i
= 0; i
< spd
.nr_pages_max
&& len
&& entries
; i
++, len
-= PAGE_SIZE
) {
5558 ref
= kzalloc(sizeof(*ref
), GFP_KERNEL
);
5563 ref
->buffer
= iter
->trace_buffer
->buffer
;
5564 ref
->page
= ring_buffer_alloc_read_page(ref
->buffer
, iter
->cpu_file
);
5570 r
= ring_buffer_read_page(ref
->buffer
, &ref
->page
,
5571 len
, iter
->cpu_file
, 1);
5573 ring_buffer_free_read_page(ref
->buffer
, ref
->page
);
5579 * zero out any left over data, this is going to
5582 size
= ring_buffer_page_len(ref
->page
);
5583 if (size
< PAGE_SIZE
)
5584 memset(ref
->page
+ size
, 0, PAGE_SIZE
- size
);
5586 page
= virt_to_page(ref
->page
);
5588 spd
.pages
[i
] = page
;
5589 spd
.partial
[i
].len
= PAGE_SIZE
;
5590 spd
.partial
[i
].offset
= 0;
5591 spd
.partial
[i
].private = (unsigned long)ref
;
5595 entries
= ring_buffer_entries_cpu(iter
->trace_buffer
->buffer
, iter
->cpu_file
);
5598 trace_access_unlock(iter
->cpu_file
);
5601 /* did we read anything? */
5602 if (!spd
.nr_pages
) {
5603 if ((file
->f_flags
& O_NONBLOCK
) || (flags
& SPLICE_F_NONBLOCK
)) {
5607 mutex_unlock(&trace_types_lock
);
5608 ret
= wait_on_pipe(iter
);
5609 mutex_lock(&trace_types_lock
);
5612 if (signal_pending(current
)) {
5619 ret
= splice_to_pipe(pipe
, &spd
);
5620 splice_shrink_spd(&spd
);
5622 mutex_unlock(&trace_types_lock
);
5627 static const struct file_operations tracing_buffers_fops
= {
5628 .open
= tracing_buffers_open
,
5629 .read
= tracing_buffers_read
,
5630 .poll
= tracing_buffers_poll
,
5631 .release
= tracing_buffers_release
,
5632 .splice_read
= tracing_buffers_splice_read
,
5633 .llseek
= no_llseek
,
5637 tracing_stats_read(struct file
*filp
, char __user
*ubuf
,
5638 size_t count
, loff_t
*ppos
)
5640 struct inode
*inode
= file_inode(filp
);
5641 struct trace_array
*tr
= inode
->i_private
;
5642 struct trace_buffer
*trace_buf
= &tr
->trace_buffer
;
5643 int cpu
= tracing_get_cpu(inode
);
5644 struct trace_seq
*s
;
5646 unsigned long long t
;
5647 unsigned long usec_rem
;
5649 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
5655 cnt
= ring_buffer_entries_cpu(trace_buf
->buffer
, cpu
);
5656 trace_seq_printf(s
, "entries: %ld\n", cnt
);
5658 cnt
= ring_buffer_overrun_cpu(trace_buf
->buffer
, cpu
);
5659 trace_seq_printf(s
, "overrun: %ld\n", cnt
);
5661 cnt
= ring_buffer_commit_overrun_cpu(trace_buf
->buffer
, cpu
);
5662 trace_seq_printf(s
, "commit overrun: %ld\n", cnt
);
5664 cnt
= ring_buffer_bytes_cpu(trace_buf
->buffer
, cpu
);
5665 trace_seq_printf(s
, "bytes: %ld\n", cnt
);
5667 if (trace_clocks
[tr
->clock_id
].in_ns
) {
5668 /* local or global for trace_clock */
5669 t
= ns2usecs(ring_buffer_oldest_event_ts(trace_buf
->buffer
, cpu
));
5670 usec_rem
= do_div(t
, USEC_PER_SEC
);
5671 trace_seq_printf(s
, "oldest event ts: %5llu.%06lu\n",
5674 t
= ns2usecs(ring_buffer_time_stamp(trace_buf
->buffer
, cpu
));
5675 usec_rem
= do_div(t
, USEC_PER_SEC
);
5676 trace_seq_printf(s
, "now ts: %5llu.%06lu\n", t
, usec_rem
);
5678 /* counter or tsc mode for trace_clock */
5679 trace_seq_printf(s
, "oldest event ts: %llu\n",
5680 ring_buffer_oldest_event_ts(trace_buf
->buffer
, cpu
));
5682 trace_seq_printf(s
, "now ts: %llu\n",
5683 ring_buffer_time_stamp(trace_buf
->buffer
, cpu
));
5686 cnt
= ring_buffer_dropped_events_cpu(trace_buf
->buffer
, cpu
);
5687 trace_seq_printf(s
, "dropped events: %ld\n", cnt
);
5689 cnt
= ring_buffer_read_events_cpu(trace_buf
->buffer
, cpu
);
5690 trace_seq_printf(s
, "read events: %ld\n", cnt
);
5692 count
= simple_read_from_buffer(ubuf
, count
, ppos
, s
->buffer
, s
->seq
.len
);
5699 static const struct file_operations tracing_stats_fops
= {
5700 .open
= tracing_open_generic_tr
,
5701 .read
= tracing_stats_read
,
5702 .llseek
= generic_file_llseek
,
5703 .release
= tracing_release_generic_tr
,
5706 #ifdef CONFIG_DYNAMIC_FTRACE
5708 int __weak
ftrace_arch_read_dyn_info(char *buf
, int size
)
5714 tracing_read_dyn_info(struct file
*filp
, char __user
*ubuf
,
5715 size_t cnt
, loff_t
*ppos
)
5717 static char ftrace_dyn_info_buffer
[1024];
5718 static DEFINE_MUTEX(dyn_info_mutex
);
5719 unsigned long *p
= filp
->private_data
;
5720 char *buf
= ftrace_dyn_info_buffer
;
5721 int size
= ARRAY_SIZE(ftrace_dyn_info_buffer
);
5724 mutex_lock(&dyn_info_mutex
);
5725 r
= sprintf(buf
, "%ld ", *p
);
5727 r
+= ftrace_arch_read_dyn_info(buf
+r
, (size
-1)-r
);
5730 r
= simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
5732 mutex_unlock(&dyn_info_mutex
);
5737 static const struct file_operations tracing_dyn_info_fops
= {
5738 .open
= tracing_open_generic
,
5739 .read
= tracing_read_dyn_info
,
5740 .llseek
= generic_file_llseek
,
5742 #endif /* CONFIG_DYNAMIC_FTRACE */
5744 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
5746 ftrace_snapshot(unsigned long ip
, unsigned long parent_ip
, void **data
)
5752 ftrace_count_snapshot(unsigned long ip
, unsigned long parent_ip
, void **data
)
5754 unsigned long *count
= (long *)data
;
5766 ftrace_snapshot_print(struct seq_file
*m
, unsigned long ip
,
5767 struct ftrace_probe_ops
*ops
, void *data
)
5769 long count
= (long)data
;
5771 seq_printf(m
, "%ps:", (void *)ip
);
5773 seq_puts(m
, "snapshot");
5776 seq_puts(m
, ":unlimited\n");
5778 seq_printf(m
, ":count=%ld\n", count
);
5783 static struct ftrace_probe_ops snapshot_probe_ops
= {
5784 .func
= ftrace_snapshot
,
5785 .print
= ftrace_snapshot_print
,
5788 static struct ftrace_probe_ops snapshot_count_probe_ops
= {
5789 .func
= ftrace_count_snapshot
,
5790 .print
= ftrace_snapshot_print
,
5794 ftrace_trace_snapshot_callback(struct ftrace_hash
*hash
,
5795 char *glob
, char *cmd
, char *param
, int enable
)
5797 struct ftrace_probe_ops
*ops
;
5798 void *count
= (void *)-1;
5802 /* hash funcs only work with set_ftrace_filter */
5806 ops
= param
? &snapshot_count_probe_ops
: &snapshot_probe_ops
;
5808 if (glob
[0] == '!') {
5809 unregister_ftrace_function_probe_func(glob
+1, ops
);
5816 number
= strsep(¶m
, ":");
5818 if (!strlen(number
))
5822 * We use the callback data field (which is a pointer)
5825 ret
= kstrtoul(number
, 0, (unsigned long *)&count
);
5830 ret
= register_ftrace_function_probe(glob
, ops
, count
);
5833 alloc_snapshot(&global_trace
);
5835 return ret
< 0 ? ret
: 0;
5838 static struct ftrace_func_command ftrace_snapshot_cmd
= {
5840 .func
= ftrace_trace_snapshot_callback
,
5843 static __init
int register_snapshot_cmd(void)
5845 return register_ftrace_command(&ftrace_snapshot_cmd
);
5848 static inline __init
int register_snapshot_cmd(void) { return 0; }
5849 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
5851 struct dentry
*tracing_init_dentry_tr(struct trace_array
*tr
)
5856 if (!debugfs_initialized())
5859 if (tr
->flags
& TRACE_ARRAY_FL_GLOBAL
)
5860 tr
->dir
= debugfs_create_dir("tracing", NULL
);
5863 pr_warn_once("Could not create debugfs directory 'tracing'\n");
5868 struct dentry
*tracing_init_dentry(void)
5870 return tracing_init_dentry_tr(&global_trace
);
5873 static struct dentry
*tracing_dentry_percpu(struct trace_array
*tr
, int cpu
)
5875 struct dentry
*d_tracer
;
5878 return tr
->percpu_dir
;
5880 d_tracer
= tracing_init_dentry_tr(tr
);
5884 tr
->percpu_dir
= debugfs_create_dir("per_cpu", d_tracer
);
5886 WARN_ONCE(!tr
->percpu_dir
,
5887 "Could not create debugfs directory 'per_cpu/%d'\n", cpu
);
5889 return tr
->percpu_dir
;
5892 static struct dentry
*
5893 trace_create_cpu_file(const char *name
, umode_t mode
, struct dentry
*parent
,
5894 void *data
, long cpu
, const struct file_operations
*fops
)
5896 struct dentry
*ret
= trace_create_file(name
, mode
, parent
, data
, fops
);
5898 if (ret
) /* See tracing_get_cpu() */
5899 ret
->d_inode
->i_cdev
= (void *)(cpu
+ 1);
5904 tracing_init_debugfs_percpu(struct trace_array
*tr
, long cpu
)
5906 struct dentry
*d_percpu
= tracing_dentry_percpu(tr
, cpu
);
5907 struct dentry
*d_cpu
;
5908 char cpu_dir
[30]; /* 30 characters should be more than enough */
5913 snprintf(cpu_dir
, 30, "cpu%ld", cpu
);
5914 d_cpu
= debugfs_create_dir(cpu_dir
, d_percpu
);
5916 pr_warning("Could not create debugfs '%s' entry\n", cpu_dir
);
5920 /* per cpu trace_pipe */
5921 trace_create_cpu_file("trace_pipe", 0444, d_cpu
,
5922 tr
, cpu
, &tracing_pipe_fops
);
5925 trace_create_cpu_file("trace", 0644, d_cpu
,
5926 tr
, cpu
, &tracing_fops
);
5928 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu
,
5929 tr
, cpu
, &tracing_buffers_fops
);
5931 trace_create_cpu_file("stats", 0444, d_cpu
,
5932 tr
, cpu
, &tracing_stats_fops
);
5934 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu
,
5935 tr
, cpu
, &tracing_entries_fops
);
5937 #ifdef CONFIG_TRACER_SNAPSHOT
5938 trace_create_cpu_file("snapshot", 0644, d_cpu
,
5939 tr
, cpu
, &snapshot_fops
);
5941 trace_create_cpu_file("snapshot_raw", 0444, d_cpu
,
5942 tr
, cpu
, &snapshot_raw_fops
);
5946 #ifdef CONFIG_FTRACE_SELFTEST
5947 /* Let selftest have access to static functions in this file */
5948 #include "trace_selftest.c"
5951 struct trace_option_dentry
{
5952 struct tracer_opt
*opt
;
5953 struct tracer_flags
*flags
;
5954 struct trace_array
*tr
;
5955 struct dentry
*entry
;
5959 trace_options_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
,
5962 struct trace_option_dentry
*topt
= filp
->private_data
;
5965 if (topt
->flags
->val
& topt
->opt
->bit
)
5970 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, 2);
5974 trace_options_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
5977 struct trace_option_dentry
*topt
= filp
->private_data
;
5981 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
5985 if (val
!= 0 && val
!= 1)
5988 if (!!(topt
->flags
->val
& topt
->opt
->bit
) != val
) {
5989 mutex_lock(&trace_types_lock
);
5990 ret
= __set_tracer_option(topt
->tr
, topt
->flags
,
5992 mutex_unlock(&trace_types_lock
);
6003 static const struct file_operations trace_options_fops
= {
6004 .open
= tracing_open_generic
,
6005 .read
= trace_options_read
,
6006 .write
= trace_options_write
,
6007 .llseek
= generic_file_llseek
,
6011 trace_options_core_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
,
6014 long index
= (long)filp
->private_data
;
6017 if (trace_flags
& (1 << index
))
6022 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, 2);
6026 trace_options_core_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
6029 struct trace_array
*tr
= &global_trace
;
6030 long index
= (long)filp
->private_data
;
6034 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
6038 if (val
!= 0 && val
!= 1)
6041 mutex_lock(&trace_types_lock
);
6042 ret
= set_tracer_flag(tr
, 1 << index
, val
);
6043 mutex_unlock(&trace_types_lock
);
6053 static const struct file_operations trace_options_core_fops
= {
6054 .open
= tracing_open_generic
,
6055 .read
= trace_options_core_read
,
6056 .write
= trace_options_core_write
,
6057 .llseek
= generic_file_llseek
,
6060 struct dentry
*trace_create_file(const char *name
,
6062 struct dentry
*parent
,
6064 const struct file_operations
*fops
)
6068 ret
= debugfs_create_file(name
, mode
, parent
, data
, fops
);
6070 pr_warning("Could not create debugfs '%s' entry\n", name
);
6076 static struct dentry
*trace_options_init_dentry(struct trace_array
*tr
)
6078 struct dentry
*d_tracer
;
6083 d_tracer
= tracing_init_dentry_tr(tr
);
6087 tr
->options
= debugfs_create_dir("options", d_tracer
);
6089 pr_warning("Could not create debugfs directory 'options'\n");
6097 create_trace_option_file(struct trace_array
*tr
,
6098 struct trace_option_dentry
*topt
,
6099 struct tracer_flags
*flags
,
6100 struct tracer_opt
*opt
)
6102 struct dentry
*t_options
;
6104 t_options
= trace_options_init_dentry(tr
);
6108 topt
->flags
= flags
;
6112 topt
->entry
= trace_create_file(opt
->name
, 0644, t_options
, topt
,
6113 &trace_options_fops
);
6117 static struct trace_option_dentry
*
6118 create_trace_option_files(struct trace_array
*tr
, struct tracer
*tracer
)
6120 struct trace_option_dentry
*topts
;
6121 struct tracer_flags
*flags
;
6122 struct tracer_opt
*opts
;
6128 flags
= tracer
->flags
;
6130 if (!flags
|| !flags
->opts
)
6135 for (cnt
= 0; opts
[cnt
].name
; cnt
++)
6138 topts
= kcalloc(cnt
+ 1, sizeof(*topts
), GFP_KERNEL
);
6142 for (cnt
= 0; opts
[cnt
].name
; cnt
++)
6143 create_trace_option_file(tr
, &topts
[cnt
], flags
,
6150 destroy_trace_option_files(struct trace_option_dentry
*topts
)
6157 for (cnt
= 0; topts
[cnt
].opt
; cnt
++)
6158 debugfs_remove(topts
[cnt
].entry
);
6163 static struct dentry
*
6164 create_trace_option_core_file(struct trace_array
*tr
,
6165 const char *option
, long index
)
6167 struct dentry
*t_options
;
6169 t_options
= trace_options_init_dentry(tr
);
6173 return trace_create_file(option
, 0644, t_options
, (void *)index
,
6174 &trace_options_core_fops
);
6177 static __init
void create_trace_options_dir(struct trace_array
*tr
)
6179 struct dentry
*t_options
;
6182 t_options
= trace_options_init_dentry(tr
);
6186 for (i
= 0; trace_options
[i
]; i
++)
6187 create_trace_option_core_file(tr
, trace_options
[i
], i
);
6191 rb_simple_read(struct file
*filp
, char __user
*ubuf
,
6192 size_t cnt
, loff_t
*ppos
)
6194 struct trace_array
*tr
= filp
->private_data
;
6198 r
= tracer_tracing_is_on(tr
);
6199 r
= sprintf(buf
, "%d\n", r
);
6201 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
6205 rb_simple_write(struct file
*filp
, const char __user
*ubuf
,
6206 size_t cnt
, loff_t
*ppos
)
6208 struct trace_array
*tr
= filp
->private_data
;
6209 struct ring_buffer
*buffer
= tr
->trace_buffer
.buffer
;
6213 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
6218 mutex_lock(&trace_types_lock
);
6220 tracer_tracing_on(tr
);
6221 if (tr
->current_trace
->start
)
6222 tr
->current_trace
->start(tr
);
6224 tracer_tracing_off(tr
);
6225 if (tr
->current_trace
->stop
)
6226 tr
->current_trace
->stop(tr
);
6228 mutex_unlock(&trace_types_lock
);
6236 static const struct file_operations rb_simple_fops
= {
6237 .open
= tracing_open_generic_tr
,
6238 .read
= rb_simple_read
,
6239 .write
= rb_simple_write
,
6240 .release
= tracing_release_generic_tr
,
6241 .llseek
= default_llseek
,
6244 struct dentry
*trace_instance_dir
;
6247 init_tracer_debugfs(struct trace_array
*tr
, struct dentry
*d_tracer
);
6250 allocate_trace_buffer(struct trace_array
*tr
, struct trace_buffer
*buf
, int size
)
6252 enum ring_buffer_flags rb_flags
;
6254 rb_flags
= trace_flags
& TRACE_ITER_OVERWRITE
? RB_FL_OVERWRITE
: 0;
6258 buf
->buffer
= ring_buffer_alloc(size
, rb_flags
);
6262 buf
->data
= alloc_percpu(struct trace_array_cpu
);
6264 ring_buffer_free(buf
->buffer
);
6268 /* Allocate the first page for all buffers */
6269 set_buffer_entries(&tr
->trace_buffer
,
6270 ring_buffer_size(tr
->trace_buffer
.buffer
, 0));
6275 static int allocate_trace_buffers(struct trace_array
*tr
, int size
)
6279 ret
= allocate_trace_buffer(tr
, &tr
->trace_buffer
, size
);
6283 #ifdef CONFIG_TRACER_MAX_TRACE
6284 ret
= allocate_trace_buffer(tr
, &tr
->max_buffer
,
6285 allocate_snapshot
? size
: 1);
6287 ring_buffer_free(tr
->trace_buffer
.buffer
);
6288 free_percpu(tr
->trace_buffer
.data
);
6291 tr
->allocated_snapshot
= allocate_snapshot
;
6294 * Only the top level trace array gets its snapshot allocated
6295 * from the kernel command line.
6297 allocate_snapshot
= false;
6302 static void free_trace_buffer(struct trace_buffer
*buf
)
6305 ring_buffer_free(buf
->buffer
);
6307 free_percpu(buf
->data
);
6312 static void free_trace_buffers(struct trace_array
*tr
)
6317 free_trace_buffer(&tr
->trace_buffer
);
6319 #ifdef CONFIG_TRACER_MAX_TRACE
6320 free_trace_buffer(&tr
->max_buffer
);
6324 static int new_instance_create(const char *name
)
6326 struct trace_array
*tr
;
6329 mutex_lock(&trace_types_lock
);
6332 list_for_each_entry(tr
, &ftrace_trace_arrays
, list
) {
6333 if (tr
->name
&& strcmp(tr
->name
, name
) == 0)
6338 tr
= kzalloc(sizeof(*tr
), GFP_KERNEL
);
6342 tr
->name
= kstrdup(name
, GFP_KERNEL
);
6346 if (!alloc_cpumask_var(&tr
->tracing_cpumask
, GFP_KERNEL
))
6349 cpumask_copy(tr
->tracing_cpumask
, cpu_all_mask
);
6351 raw_spin_lock_init(&tr
->start_lock
);
6353 tr
->max_lock
= (arch_spinlock_t
)__ARCH_SPIN_LOCK_UNLOCKED
;
6355 tr
->current_trace
= &nop_trace
;
6357 INIT_LIST_HEAD(&tr
->systems
);
6358 INIT_LIST_HEAD(&tr
->events
);
6360 if (allocate_trace_buffers(tr
, trace_buf_size
) < 0)
6363 tr
->dir
= debugfs_create_dir(name
, trace_instance_dir
);
6367 ret
= event_trace_add_tracer(tr
->dir
, tr
);
6369 debugfs_remove_recursive(tr
->dir
);
6373 init_tracer_debugfs(tr
, tr
->dir
);
6375 list_add(&tr
->list
, &ftrace_trace_arrays
);
6377 mutex_unlock(&trace_types_lock
);
6382 free_trace_buffers(tr
);
6383 free_cpumask_var(tr
->tracing_cpumask
);
6388 mutex_unlock(&trace_types_lock
);
6394 static int instance_delete(const char *name
)
6396 struct trace_array
*tr
;
6400 mutex_lock(&trace_types_lock
);
6403 list_for_each_entry(tr
, &ftrace_trace_arrays
, list
) {
6404 if (tr
->name
&& strcmp(tr
->name
, name
) == 0) {
6416 list_del(&tr
->list
);
6418 tracing_set_nop(tr
);
6419 event_trace_del_tracer(tr
);
6420 ftrace_destroy_function_files(tr
);
6421 debugfs_remove_recursive(tr
->dir
);
6422 free_trace_buffers(tr
);
6430 mutex_unlock(&trace_types_lock
);
6435 static int instance_mkdir (struct inode
*inode
, struct dentry
*dentry
, umode_t mode
)
6437 struct dentry
*parent
;
6440 /* Paranoid: Make sure the parent is the "instances" directory */
6441 parent
= hlist_entry(inode
->i_dentry
.first
, struct dentry
, d_alias
);
6442 if (WARN_ON_ONCE(parent
!= trace_instance_dir
))
6446 * The inode mutex is locked, but debugfs_create_dir() will also
6447 * take the mutex. As the instances directory can not be destroyed
6448 * or changed in any other way, it is safe to unlock it, and
6449 * let the dentry try. If two users try to make the same dir at
6450 * the same time, then the new_instance_create() will determine the
6453 mutex_unlock(&inode
->i_mutex
);
6455 ret
= new_instance_create(dentry
->d_iname
);
6457 mutex_lock(&inode
->i_mutex
);
6462 static int instance_rmdir(struct inode
*inode
, struct dentry
*dentry
)
6464 struct dentry
*parent
;
6467 /* Paranoid: Make sure the parent is the "instances" directory */
6468 parent
= hlist_entry(inode
->i_dentry
.first
, struct dentry
, d_alias
);
6469 if (WARN_ON_ONCE(parent
!= trace_instance_dir
))
6472 /* The caller did a dget() on dentry */
6473 mutex_unlock(&dentry
->d_inode
->i_mutex
);
6476 * The inode mutex is locked, but debugfs_create_dir() will also
6477 * take the mutex. As the instances directory can not be destroyed
6478 * or changed in any other way, it is safe to unlock it, and
6479 * let the dentry try. If two users try to make the same dir at
6480 * the same time, then the instance_delete() will determine the
6483 mutex_unlock(&inode
->i_mutex
);
6485 ret
= instance_delete(dentry
->d_iname
);
6487 mutex_lock_nested(&inode
->i_mutex
, I_MUTEX_PARENT
);
6488 mutex_lock(&dentry
->d_inode
->i_mutex
);
6493 static const struct inode_operations instance_dir_inode_operations
= {
6494 .lookup
= simple_lookup
,
6495 .mkdir
= instance_mkdir
,
6496 .rmdir
= instance_rmdir
,
6499 static __init
void create_trace_instances(struct dentry
*d_tracer
)
6501 trace_instance_dir
= debugfs_create_dir("instances", d_tracer
);
6502 if (WARN_ON(!trace_instance_dir
))
6505 /* Hijack the dir inode operations, to allow mkdir */
6506 trace_instance_dir
->d_inode
->i_op
= &instance_dir_inode_operations
;
6510 init_tracer_debugfs(struct trace_array
*tr
, struct dentry
*d_tracer
)
6514 trace_create_file("available_tracers", 0444, d_tracer
,
6515 tr
, &show_traces_fops
);
6517 trace_create_file("current_tracer", 0644, d_tracer
,
6518 tr
, &set_tracer_fops
);
6520 trace_create_file("tracing_cpumask", 0644, d_tracer
,
6521 tr
, &tracing_cpumask_fops
);
6523 trace_create_file("trace_options", 0644, d_tracer
,
6524 tr
, &tracing_iter_fops
);
6526 trace_create_file("trace", 0644, d_tracer
,
6529 trace_create_file("trace_pipe", 0444, d_tracer
,
6530 tr
, &tracing_pipe_fops
);
6532 trace_create_file("buffer_size_kb", 0644, d_tracer
,
6533 tr
, &tracing_entries_fops
);
6535 trace_create_file("buffer_total_size_kb", 0444, d_tracer
,
6536 tr
, &tracing_total_entries_fops
);
6538 trace_create_file("free_buffer", 0200, d_tracer
,
6539 tr
, &tracing_free_buffer_fops
);
6541 trace_create_file("trace_marker", 0220, d_tracer
,
6542 tr
, &tracing_mark_fops
);
6544 trace_create_file("trace_clock", 0644, d_tracer
, tr
,
6547 trace_create_file("tracing_on", 0644, d_tracer
,
6548 tr
, &rb_simple_fops
);
6550 #ifdef CONFIG_TRACER_MAX_TRACE
6551 trace_create_file("tracing_max_latency", 0644, d_tracer
,
6552 &tr
->max_latency
, &tracing_max_lat_fops
);
6555 if (ftrace_create_function_files(tr
, d_tracer
))
6556 WARN(1, "Could not allocate function filter files");
6558 #ifdef CONFIG_TRACER_SNAPSHOT
6559 trace_create_file("snapshot", 0644, d_tracer
,
6560 tr
, &snapshot_fops
);
6563 for_each_tracing_cpu(cpu
)
6564 tracing_init_debugfs_percpu(tr
, cpu
);
6568 static __init
int tracer_init_debugfs(void)
6570 struct dentry
*d_tracer
;
6572 trace_access_lock_init();
6574 d_tracer
= tracing_init_dentry();
6578 init_tracer_debugfs(&global_trace
, d_tracer
);
6580 trace_create_file("tracing_thresh", 0644, d_tracer
,
6581 &global_trace
, &tracing_thresh_fops
);
6583 trace_create_file("README", 0444, d_tracer
,
6584 NULL
, &tracing_readme_fops
);
6586 trace_create_file("saved_cmdlines", 0444, d_tracer
,
6587 NULL
, &tracing_saved_cmdlines_fops
);
6589 trace_create_file("saved_cmdlines_size", 0644, d_tracer
,
6590 NULL
, &tracing_saved_cmdlines_size_fops
);
6592 #ifdef CONFIG_DYNAMIC_FTRACE
6593 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer
,
6594 &ftrace_update_tot_cnt
, &tracing_dyn_info_fops
);
6597 create_trace_instances(d_tracer
);
6599 create_trace_options_dir(&global_trace
);
6604 static int trace_panic_handler(struct notifier_block
*this,
6605 unsigned long event
, void *unused
)
6607 if (ftrace_dump_on_oops
)
6608 ftrace_dump(ftrace_dump_on_oops
);
6612 static struct notifier_block trace_panic_notifier
= {
6613 .notifier_call
= trace_panic_handler
,
6615 .priority
= 150 /* priority: INT_MAX >= x >= 0 */
6618 static int trace_die_handler(struct notifier_block
*self
,
6624 if (ftrace_dump_on_oops
)
6625 ftrace_dump(ftrace_dump_on_oops
);
6633 static struct notifier_block trace_die_notifier
= {
6634 .notifier_call
= trace_die_handler
,
6639 * printk is set to max of 1024, we really don't need it that big.
6640 * Nothing should be printing 1000 characters anyway.
6642 #define TRACE_MAX_PRINT 1000
6645 * Define here KERN_TRACE so that we have one place to modify
6646 * it if we decide to change what log level the ftrace dump
6649 #define KERN_TRACE KERN_EMERG
6652 trace_printk_seq(struct trace_seq
*s
)
6654 /* Probably should print a warning here. */
6655 if (s
->seq
.len
>= TRACE_MAX_PRINT
)
6656 s
->seq
.len
= TRACE_MAX_PRINT
;
6658 /* should be zero ended, but we are paranoid. */
6659 s
->buffer
[s
->seq
.len
] = 0;
6661 printk(KERN_TRACE
"%s", s
->buffer
);
6666 void trace_init_global_iter(struct trace_iterator
*iter
)
6668 iter
->tr
= &global_trace
;
6669 iter
->trace
= iter
->tr
->current_trace
;
6670 iter
->cpu_file
= RING_BUFFER_ALL_CPUS
;
6671 iter
->trace_buffer
= &global_trace
.trace_buffer
;
6673 if (iter
->trace
&& iter
->trace
->open
)
6674 iter
->trace
->open(iter
);
6676 /* Annotate start of buffers if we had overruns */
6677 if (ring_buffer_overruns(iter
->trace_buffer
->buffer
))
6678 iter
->iter_flags
|= TRACE_FILE_ANNOTATE
;
6680 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
6681 if (trace_clocks
[iter
->tr
->clock_id
].in_ns
)
6682 iter
->iter_flags
|= TRACE_FILE_TIME_IN_NS
;
6685 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode
)
6687 /* use static because iter can be a bit big for the stack */
6688 static struct trace_iterator iter
;
6689 static atomic_t dump_running
;
6690 unsigned int old_userobj
;
6691 unsigned long flags
;
6694 /* Only allow one dump user at a time. */
6695 if (atomic_inc_return(&dump_running
) != 1) {
6696 atomic_dec(&dump_running
);
6701 * Always turn off tracing when we dump.
6702 * We don't need to show trace output of what happens
6703 * between multiple crashes.
6705 * If the user does a sysrq-z, then they can re-enable
6706 * tracing with echo 1 > tracing_on.
6710 local_irq_save(flags
);
6712 /* Simulate the iterator */
6713 trace_init_global_iter(&iter
);
6715 for_each_tracing_cpu(cpu
) {
6716 atomic_inc(&per_cpu_ptr(iter
.tr
->trace_buffer
.data
, cpu
)->disabled
);
6719 old_userobj
= trace_flags
& TRACE_ITER_SYM_USEROBJ
;
6721 /* don't look at user memory in panic mode */
6722 trace_flags
&= ~TRACE_ITER_SYM_USEROBJ
;
6724 switch (oops_dump_mode
) {
6726 iter
.cpu_file
= RING_BUFFER_ALL_CPUS
;
6729 iter
.cpu_file
= raw_smp_processor_id();
6734 printk(KERN_TRACE
"Bad dumping mode, switching to all CPUs dump\n");
6735 iter
.cpu_file
= RING_BUFFER_ALL_CPUS
;
6738 printk(KERN_TRACE
"Dumping ftrace buffer:\n");
6740 /* Did function tracer already get disabled? */
6741 if (ftrace_is_dead()) {
6742 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
6743 printk("# MAY BE MISSING FUNCTION EVENTS\n");
6747 * We need to stop all tracing on all CPUS to read the
6748 * the next buffer. This is a bit expensive, but is
6749 * not done often. We fill all what we can read,
6750 * and then release the locks again.
6753 while (!trace_empty(&iter
)) {
6756 printk(KERN_TRACE
"---------------------------------\n");
6760 /* reset all but tr, trace, and overruns */
6761 memset(&iter
.seq
, 0,
6762 sizeof(struct trace_iterator
) -
6763 offsetof(struct trace_iterator
, seq
));
6764 iter
.iter_flags
|= TRACE_FILE_LAT_FMT
;
6767 if (trace_find_next_entry_inc(&iter
) != NULL
) {
6770 ret
= print_trace_line(&iter
);
6771 if (ret
!= TRACE_TYPE_NO_CONSUME
)
6772 trace_consume(&iter
);
6774 touch_nmi_watchdog();
6776 trace_printk_seq(&iter
.seq
);
6780 printk(KERN_TRACE
" (ftrace buffer empty)\n");
6782 printk(KERN_TRACE
"---------------------------------\n");
6785 trace_flags
|= old_userobj
;
6787 for_each_tracing_cpu(cpu
) {
6788 atomic_dec(&per_cpu_ptr(iter
.trace_buffer
->data
, cpu
)->disabled
);
6790 atomic_dec(&dump_running
);
6791 local_irq_restore(flags
);
6793 EXPORT_SYMBOL_GPL(ftrace_dump
);
6795 __init
static int tracer_alloc_buffers(void)
6801 if (!alloc_cpumask_var(&tracing_buffer_mask
, GFP_KERNEL
))
6804 if (!alloc_cpumask_var(&global_trace
.tracing_cpumask
, GFP_KERNEL
))
6805 goto out_free_buffer_mask
;
6807 /* Only allocate trace_printk buffers if a trace_printk exists */
6808 if (__stop___trace_bprintk_fmt
!= __start___trace_bprintk_fmt
)
6809 /* Must be called before global_trace.buffer is allocated */
6810 trace_printk_init_buffers();
6812 /* To save memory, keep the ring buffer size to its minimum */
6813 if (ring_buffer_expanded
)
6814 ring_buf_size
= trace_buf_size
;
6818 cpumask_copy(tracing_buffer_mask
, cpu_possible_mask
);
6819 cpumask_copy(global_trace
.tracing_cpumask
, cpu_all_mask
);
6821 raw_spin_lock_init(&global_trace
.start_lock
);
6823 /* Used for event triggers */
6824 temp_buffer
= ring_buffer_alloc(PAGE_SIZE
, RB_FL_OVERWRITE
);
6826 goto out_free_cpumask
;
6828 if (trace_create_savedcmd() < 0)
6829 goto out_free_temp_buffer
;
6831 /* TODO: make the number of buffers hot pluggable with CPUS */
6832 if (allocate_trace_buffers(&global_trace
, ring_buf_size
) < 0) {
6833 printk(KERN_ERR
"tracer: failed to allocate ring buffer!\n");
6835 goto out_free_savedcmd
;
6838 if (global_trace
.buffer_disabled
)
6841 if (trace_boot_clock
) {
6842 ret
= tracing_set_clock(&global_trace
, trace_boot_clock
);
6844 pr_warning("Trace clock %s not defined, going back to default\n",
6849 * register_tracer() might reference current_trace, so it
6850 * needs to be set before we register anything. This is
6851 * just a bootstrap of current_trace anyway.
6853 global_trace
.current_trace
= &nop_trace
;
6855 global_trace
.max_lock
= (arch_spinlock_t
)__ARCH_SPIN_LOCK_UNLOCKED
;
6857 ftrace_init_global_array_ops(&global_trace
);
6859 register_tracer(&nop_trace
);
6861 /* All seems OK, enable tracing */
6862 tracing_disabled
= 0;
6864 atomic_notifier_chain_register(&panic_notifier_list
,
6865 &trace_panic_notifier
);
6867 register_die_notifier(&trace_die_notifier
);
6869 global_trace
.flags
= TRACE_ARRAY_FL_GLOBAL
;
6871 INIT_LIST_HEAD(&global_trace
.systems
);
6872 INIT_LIST_HEAD(&global_trace
.events
);
6873 list_add(&global_trace
.list
, &ftrace_trace_arrays
);
6875 while (trace_boot_options
) {
6878 option
= strsep(&trace_boot_options
, ",");
6879 trace_set_options(&global_trace
, option
);
6882 register_snapshot_cmd();
6887 free_saved_cmdlines_buffer(savedcmd
);
6888 out_free_temp_buffer
:
6889 ring_buffer_free(temp_buffer
);
6891 free_cpumask_var(global_trace
.tracing_cpumask
);
6892 out_free_buffer_mask
:
6893 free_cpumask_var(tracing_buffer_mask
);
6898 __init
static int clear_boot_tracer(void)
6901 * The default tracer at boot buffer is an init section.
6902 * This function is called in lateinit. If we did not
6903 * find the boot tracer, then clear it out, to prevent
6904 * later registration from accessing the buffer that is
6905 * about to be freed.
6907 if (!default_bootup_tracer
)
6910 printk(KERN_INFO
"ftrace bootup tracer '%s' not registered.\n",
6911 default_bootup_tracer
);
6912 default_bootup_tracer
= NULL
;
6917 early_initcall(tracer_alloc_buffers
);
6918 fs_initcall(tracer_init_debugfs
);
6919 late_initcall(clear_boot_tracer
);