2 * ring buffer based function tracer
4 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
12 * Copyright (C) 2004 Nadia Yvette Chambers
14 #include <linux/ring_buffer.h>
15 #include <generated/utsrelease.h>
16 #include <linux/stacktrace.h>
17 #include <linux/writeback.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/notifier.h>
21 #include <linux/irqflags.h>
22 #include <linux/debugfs.h>
23 #include <linux/pagemap.h>
24 #include <linux/hardirq.h>
25 #include <linux/linkage.h>
26 #include <linux/uaccess.h>
27 #include <linux/kprobes.h>
28 #include <linux/ftrace.h>
29 #include <linux/module.h>
30 #include <linux/percpu.h>
31 #include <linux/splice.h>
32 #include <linux/kdebug.h>
33 #include <linux/string.h>
34 #include <linux/rwsem.h>
35 #include <linux/slab.h>
36 #include <linux/ctype.h>
37 #include <linux/init.h>
38 #include <linux/poll.h>
39 #include <linux/nmi.h>
41 #include <linux/sched/rt.h>
44 #include "trace_output.h"
47 * On boot up, the ring buffer is set to the minimum size, so that
48 * we do not waste memory on systems that are not using tracing.
50 bool ring_buffer_expanded
;
53 * We need to change this state when a selftest is running.
54 * A selftest will lurk into the ring-buffer to count the
55 * entries inserted during the selftest although some concurrent
56 * insertions into the ring-buffer such as trace_printk could occurred
57 * at the same time, giving false positive or negative results.
59 static bool __read_mostly tracing_selftest_running
;
62 * If a tracer is running, we do not want to run SELFTEST.
64 bool __read_mostly tracing_selftest_disabled
;
66 /* Pipe tracepoints to printk */
67 struct trace_iterator
*tracepoint_print_iter
;
68 int tracepoint_printk
;
70 /* For tracers that don't implement custom flags */
71 static struct tracer_opt dummy_tracer_opt
[] = {
75 static struct tracer_flags dummy_tracer_flags
= {
77 .opts
= dummy_tracer_opt
81 dummy_set_flag(struct trace_array
*tr
, u32 old_flags
, u32 bit
, int set
)
87 * To prevent the comm cache from being overwritten when no
88 * tracing is active, only save the comm when a trace event
91 static DEFINE_PER_CPU(bool, trace_cmdline_save
);
94 * Kill all tracing for good (never come back).
95 * It is initialized to 1 but will turn to zero if the initialization
96 * of the tracer is successful. But that is the only place that sets
99 static int tracing_disabled
= 1;
101 DEFINE_PER_CPU(int, ftrace_cpu_disabled
);
103 cpumask_var_t __read_mostly tracing_buffer_mask
;
106 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
108 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
109 * is set, then ftrace_dump is called. This will output the contents
110 * of the ftrace buffers to the console. This is very useful for
111 * capturing traces that lead to crashes and outputing it to a
114 * It is default off, but you can enable it with either specifying
115 * "ftrace_dump_on_oops" in the kernel command line, or setting
116 * /proc/sys/kernel/ftrace_dump_on_oops
117 * Set 1 if you want to dump buffers of all CPUs
118 * Set 2 if you want to dump the buffer of the CPU that triggered oops
121 enum ftrace_dump_mode ftrace_dump_on_oops
;
123 /* When set, tracing will stop when a WARN*() is hit */
124 int __disable_trace_on_warning
;
126 static int tracing_set_tracer(struct trace_array
*tr
, const char *buf
);
128 #define MAX_TRACER_SIZE 100
129 static char bootup_tracer_buf
[MAX_TRACER_SIZE
] __initdata
;
130 static char *default_bootup_tracer
;
132 static bool allocate_snapshot
;
134 static int __init
set_cmdline_ftrace(char *str
)
136 strlcpy(bootup_tracer_buf
, str
, MAX_TRACER_SIZE
);
137 default_bootup_tracer
= bootup_tracer_buf
;
138 /* We are using ftrace early, expand it */
139 ring_buffer_expanded
= true;
142 __setup("ftrace=", set_cmdline_ftrace
);
144 static int __init
set_ftrace_dump_on_oops(char *str
)
146 if (*str
++ != '=' || !*str
) {
147 ftrace_dump_on_oops
= DUMP_ALL
;
151 if (!strcmp("orig_cpu", str
)) {
152 ftrace_dump_on_oops
= DUMP_ORIG
;
158 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops
);
160 static int __init
stop_trace_on_warning(char *str
)
162 if ((strcmp(str
, "=0") != 0 && strcmp(str
, "=off") != 0))
163 __disable_trace_on_warning
= 1;
166 __setup("traceoff_on_warning", stop_trace_on_warning
);
168 static int __init
boot_alloc_snapshot(char *str
)
170 allocate_snapshot
= true;
171 /* We also need the main ring buffer expanded */
172 ring_buffer_expanded
= true;
175 __setup("alloc_snapshot", boot_alloc_snapshot
);
178 static char trace_boot_options_buf
[MAX_TRACER_SIZE
] __initdata
;
179 static char *trace_boot_options __initdata
;
181 static int __init
set_trace_boot_options(char *str
)
183 strlcpy(trace_boot_options_buf
, str
, MAX_TRACER_SIZE
);
184 trace_boot_options
= trace_boot_options_buf
;
187 __setup("trace_options=", set_trace_boot_options
);
189 static char trace_boot_clock_buf
[MAX_TRACER_SIZE
] __initdata
;
190 static char *trace_boot_clock __initdata
;
192 static int __init
set_trace_boot_clock(char *str
)
194 strlcpy(trace_boot_clock_buf
, str
, MAX_TRACER_SIZE
);
195 trace_boot_clock
= trace_boot_clock_buf
;
198 __setup("trace_clock=", set_trace_boot_clock
);
200 static int __init
set_tracepoint_printk(char *str
)
202 if ((strcmp(str
, "=0") != 0 && strcmp(str
, "=off") != 0))
203 tracepoint_printk
= 1;
206 __setup("tp_printk", set_tracepoint_printk
);
208 unsigned long long ns2usecs(cycle_t nsec
)
216 * The global_trace is the descriptor that holds the tracing
217 * buffers for the live tracing. For each CPU, it contains
218 * a link list of pages that will store trace entries. The
219 * page descriptor of the pages in the memory is used to hold
220 * the link list by linking the lru item in the page descriptor
221 * to each of the pages in the buffer per CPU.
223 * For each active CPU there is a data field that holds the
224 * pages for the buffer for that CPU. Each CPU has the same number
225 * of pages allocated for its buffer.
227 static struct trace_array global_trace
;
229 LIST_HEAD(ftrace_trace_arrays
);
231 int trace_array_get(struct trace_array
*this_tr
)
233 struct trace_array
*tr
;
236 mutex_lock(&trace_types_lock
);
237 list_for_each_entry(tr
, &ftrace_trace_arrays
, list
) {
244 mutex_unlock(&trace_types_lock
);
249 static void __trace_array_put(struct trace_array
*this_tr
)
251 WARN_ON(!this_tr
->ref
);
255 void trace_array_put(struct trace_array
*this_tr
)
257 mutex_lock(&trace_types_lock
);
258 __trace_array_put(this_tr
);
259 mutex_unlock(&trace_types_lock
);
262 int filter_check_discard(struct ftrace_event_file
*file
, void *rec
,
263 struct ring_buffer
*buffer
,
264 struct ring_buffer_event
*event
)
266 if (unlikely(file
->flags
& FTRACE_EVENT_FL_FILTERED
) &&
267 !filter_match_preds(file
->filter
, rec
)) {
268 ring_buffer_discard_commit(buffer
, event
);
274 EXPORT_SYMBOL_GPL(filter_check_discard
);
276 int call_filter_check_discard(struct ftrace_event_call
*call
, void *rec
,
277 struct ring_buffer
*buffer
,
278 struct ring_buffer_event
*event
)
280 if (unlikely(call
->flags
& TRACE_EVENT_FL_FILTERED
) &&
281 !filter_match_preds(call
->filter
, rec
)) {
282 ring_buffer_discard_commit(buffer
, event
);
288 EXPORT_SYMBOL_GPL(call_filter_check_discard
);
290 static cycle_t
buffer_ftrace_now(struct trace_buffer
*buf
, int cpu
)
294 /* Early boot up does not have a buffer yet */
296 return trace_clock_local();
298 ts
= ring_buffer_time_stamp(buf
->buffer
, cpu
);
299 ring_buffer_normalize_time_stamp(buf
->buffer
, cpu
, &ts
);
304 cycle_t
ftrace_now(int cpu
)
306 return buffer_ftrace_now(&global_trace
.trace_buffer
, cpu
);
310 * tracing_is_enabled - Show if global_trace has been disabled
312 * Shows if the global trace has been enabled or not. It uses the
313 * mirror flag "buffer_disabled" to be used in fast paths such as for
314 * the irqsoff tracer. But it may be inaccurate due to races. If you
315 * need to know the accurate state, use tracing_is_on() which is a little
316 * slower, but accurate.
318 int tracing_is_enabled(void)
321 * For quick access (irqsoff uses this in fast path), just
322 * return the mirror variable of the state of the ring buffer.
323 * It's a little racy, but we don't really care.
326 return !global_trace
.buffer_disabled
;
330 * trace_buf_size is the size in bytes that is allocated
331 * for a buffer. Note, the number of bytes is always rounded
334 * This number is purposely set to a low number of 16384.
335 * If the dump on oops happens, it will be much appreciated
336 * to not have to wait for all that output. Anyway this can be
337 * boot time and run time configurable.
339 #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
341 static unsigned long trace_buf_size
= TRACE_BUF_SIZE_DEFAULT
;
343 /* trace_types holds a link list of available tracers. */
344 static struct tracer
*trace_types __read_mostly
;
347 * trace_types_lock is used to protect the trace_types list.
349 DEFINE_MUTEX(trace_types_lock
);
352 * serialize the access of the ring buffer
354 * ring buffer serializes readers, but it is low level protection.
355 * The validity of the events (which returns by ring_buffer_peek() ..etc)
356 * are not protected by ring buffer.
358 * The content of events may become garbage if we allow other process consumes
359 * these events concurrently:
360 * A) the page of the consumed events may become a normal page
361 * (not reader page) in ring buffer, and this page will be rewrited
362 * by events producer.
363 * B) The page of the consumed events may become a page for splice_read,
364 * and this page will be returned to system.
366 * These primitives allow multi process access to different cpu ring buffer
369 * These primitives don't distinguish read-only and read-consume access.
370 * Multi read-only access are also serialized.
374 static DECLARE_RWSEM(all_cpu_access_lock
);
375 static DEFINE_PER_CPU(struct mutex
, cpu_access_lock
);
377 static inline void trace_access_lock(int cpu
)
379 if (cpu
== RING_BUFFER_ALL_CPUS
) {
380 /* gain it for accessing the whole ring buffer. */
381 down_write(&all_cpu_access_lock
);
383 /* gain it for accessing a cpu ring buffer. */
385 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
386 down_read(&all_cpu_access_lock
);
388 /* Secondly block other access to this @cpu ring buffer. */
389 mutex_lock(&per_cpu(cpu_access_lock
, cpu
));
393 static inline void trace_access_unlock(int cpu
)
395 if (cpu
== RING_BUFFER_ALL_CPUS
) {
396 up_write(&all_cpu_access_lock
);
398 mutex_unlock(&per_cpu(cpu_access_lock
, cpu
));
399 up_read(&all_cpu_access_lock
);
403 static inline void trace_access_lock_init(void)
407 for_each_possible_cpu(cpu
)
408 mutex_init(&per_cpu(cpu_access_lock
, cpu
));
413 static DEFINE_MUTEX(access_lock
);
415 static inline void trace_access_lock(int cpu
)
418 mutex_lock(&access_lock
);
421 static inline void trace_access_unlock(int cpu
)
424 mutex_unlock(&access_lock
);
427 static inline void trace_access_lock_init(void)
433 /* trace_flags holds trace_options default values */
434 unsigned long trace_flags
= TRACE_ITER_PRINT_PARENT
| TRACE_ITER_PRINTK
|
435 TRACE_ITER_ANNOTATE
| TRACE_ITER_CONTEXT_INFO
| TRACE_ITER_SLEEP_TIME
|
436 TRACE_ITER_GRAPH_TIME
| TRACE_ITER_RECORD_CMD
| TRACE_ITER_OVERWRITE
|
437 TRACE_ITER_IRQ_INFO
| TRACE_ITER_MARKERS
| TRACE_ITER_FUNCTION
;
439 static void tracer_tracing_on(struct trace_array
*tr
)
441 if (tr
->trace_buffer
.buffer
)
442 ring_buffer_record_on(tr
->trace_buffer
.buffer
);
444 * This flag is looked at when buffers haven't been allocated
445 * yet, or by some tracers (like irqsoff), that just want to
446 * know if the ring buffer has been disabled, but it can handle
447 * races of where it gets disabled but we still do a record.
448 * As the check is in the fast path of the tracers, it is more
449 * important to be fast than accurate.
451 tr
->buffer_disabled
= 0;
452 /* Make the flag seen by readers */
457 * tracing_on - enable tracing buffers
459 * This function enables tracing buffers that may have been
460 * disabled with tracing_off.
462 void tracing_on(void)
464 tracer_tracing_on(&global_trace
);
466 EXPORT_SYMBOL_GPL(tracing_on
);
469 * __trace_puts - write a constant string into the trace buffer.
470 * @ip: The address of the caller
471 * @str: The constant string to write
472 * @size: The size of the string.
474 int __trace_puts(unsigned long ip
, const char *str
, int size
)
476 struct ring_buffer_event
*event
;
477 struct ring_buffer
*buffer
;
478 struct print_entry
*entry
;
479 unsigned long irq_flags
;
483 if (!(trace_flags
& TRACE_ITER_PRINTK
))
486 pc
= preempt_count();
488 if (unlikely(tracing_selftest_running
|| tracing_disabled
))
491 alloc
= sizeof(*entry
) + size
+ 2; /* possible \n added */
493 local_save_flags(irq_flags
);
494 buffer
= global_trace
.trace_buffer
.buffer
;
495 event
= trace_buffer_lock_reserve(buffer
, TRACE_PRINT
, alloc
,
500 entry
= ring_buffer_event_data(event
);
503 memcpy(&entry
->buf
, str
, size
);
505 /* Add a newline if necessary */
506 if (entry
->buf
[size
- 1] != '\n') {
507 entry
->buf
[size
] = '\n';
508 entry
->buf
[size
+ 1] = '\0';
510 entry
->buf
[size
] = '\0';
512 __buffer_unlock_commit(buffer
, event
);
513 ftrace_trace_stack(buffer
, irq_flags
, 4, pc
);
517 EXPORT_SYMBOL_GPL(__trace_puts
);
520 * __trace_bputs - write the pointer to a constant string into trace buffer
521 * @ip: The address of the caller
522 * @str: The constant string to write to the buffer to
524 int __trace_bputs(unsigned long ip
, const char *str
)
526 struct ring_buffer_event
*event
;
527 struct ring_buffer
*buffer
;
528 struct bputs_entry
*entry
;
529 unsigned long irq_flags
;
530 int size
= sizeof(struct bputs_entry
);
533 if (!(trace_flags
& TRACE_ITER_PRINTK
))
536 pc
= preempt_count();
538 if (unlikely(tracing_selftest_running
|| tracing_disabled
))
541 local_save_flags(irq_flags
);
542 buffer
= global_trace
.trace_buffer
.buffer
;
543 event
= trace_buffer_lock_reserve(buffer
, TRACE_BPUTS
, size
,
548 entry
= ring_buffer_event_data(event
);
552 __buffer_unlock_commit(buffer
, event
);
553 ftrace_trace_stack(buffer
, irq_flags
, 4, pc
);
557 EXPORT_SYMBOL_GPL(__trace_bputs
);
559 #ifdef CONFIG_TRACER_SNAPSHOT
561 * trace_snapshot - take a snapshot of the current buffer.
563 * This causes a swap between the snapshot buffer and the current live
564 * tracing buffer. You can use this to take snapshots of the live
565 * trace when some condition is triggered, but continue to trace.
567 * Note, make sure to allocate the snapshot with either
568 * a tracing_snapshot_alloc(), or by doing it manually
569 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
571 * If the snapshot buffer is not allocated, it will stop tracing.
572 * Basically making a permanent snapshot.
574 void tracing_snapshot(void)
576 struct trace_array
*tr
= &global_trace
;
577 struct tracer
*tracer
= tr
->current_trace
;
581 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
582 internal_trace_puts("*** snapshot is being ignored ***\n");
586 if (!tr
->allocated_snapshot
) {
587 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
588 internal_trace_puts("*** stopping trace here! ***\n");
593 /* Note, snapshot can not be used when the tracer uses it */
594 if (tracer
->use_max_tr
) {
595 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
596 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
600 local_irq_save(flags
);
601 update_max_tr(tr
, current
, smp_processor_id());
602 local_irq_restore(flags
);
604 EXPORT_SYMBOL_GPL(tracing_snapshot
);
606 static int resize_buffer_duplicate_size(struct trace_buffer
*trace_buf
,
607 struct trace_buffer
*size_buf
, int cpu_id
);
608 static void set_buffer_entries(struct trace_buffer
*buf
, unsigned long val
);
610 static int alloc_snapshot(struct trace_array
*tr
)
614 if (!tr
->allocated_snapshot
) {
616 /* allocate spare buffer */
617 ret
= resize_buffer_duplicate_size(&tr
->max_buffer
,
618 &tr
->trace_buffer
, RING_BUFFER_ALL_CPUS
);
622 tr
->allocated_snapshot
= true;
628 static void free_snapshot(struct trace_array
*tr
)
631 * We don't free the ring buffer. instead, resize it because
632 * The max_tr ring buffer has some state (e.g. ring->clock) and
633 * we want preserve it.
635 ring_buffer_resize(tr
->max_buffer
.buffer
, 1, RING_BUFFER_ALL_CPUS
);
636 set_buffer_entries(&tr
->max_buffer
, 1);
637 tracing_reset_online_cpus(&tr
->max_buffer
);
638 tr
->allocated_snapshot
= false;
642 * tracing_alloc_snapshot - allocate snapshot buffer.
644 * This only allocates the snapshot buffer if it isn't already
645 * allocated - it doesn't also take a snapshot.
647 * This is meant to be used in cases where the snapshot buffer needs
648 * to be set up for events that can't sleep but need to be able to
649 * trigger a snapshot.
651 int tracing_alloc_snapshot(void)
653 struct trace_array
*tr
= &global_trace
;
656 ret
= alloc_snapshot(tr
);
661 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot
);
664 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
666 * This is similar to trace_snapshot(), but it will allocate the
667 * snapshot buffer if it isn't already allocated. Use this only
668 * where it is safe to sleep, as the allocation may sleep.
670 * This causes a swap between the snapshot buffer and the current live
671 * tracing buffer. You can use this to take snapshots of the live
672 * trace when some condition is triggered, but continue to trace.
674 void tracing_snapshot_alloc(void)
678 ret
= tracing_alloc_snapshot();
684 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc
);
686 void tracing_snapshot(void)
688 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
690 EXPORT_SYMBOL_GPL(tracing_snapshot
);
691 int tracing_alloc_snapshot(void)
693 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
696 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot
);
697 void tracing_snapshot_alloc(void)
702 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc
);
703 #endif /* CONFIG_TRACER_SNAPSHOT */
705 static void tracer_tracing_off(struct trace_array
*tr
)
707 if (tr
->trace_buffer
.buffer
)
708 ring_buffer_record_off(tr
->trace_buffer
.buffer
);
710 * This flag is looked at when buffers haven't been allocated
711 * yet, or by some tracers (like irqsoff), that just want to
712 * know if the ring buffer has been disabled, but it can handle
713 * races of where it gets disabled but we still do a record.
714 * As the check is in the fast path of the tracers, it is more
715 * important to be fast than accurate.
717 tr
->buffer_disabled
= 1;
718 /* Make the flag seen by readers */
723 * tracing_off - turn off tracing buffers
725 * This function stops the tracing buffers from recording data.
726 * It does not disable any overhead the tracers themselves may
727 * be causing. This function simply causes all recording to
728 * the ring buffers to fail.
730 void tracing_off(void)
732 tracer_tracing_off(&global_trace
);
734 EXPORT_SYMBOL_GPL(tracing_off
);
736 void disable_trace_on_warning(void)
738 if (__disable_trace_on_warning
)
743 * tracer_tracing_is_on - show real state of ring buffer enabled
744 * @tr : the trace array to know if ring buffer is enabled
746 * Shows real state of the ring buffer if it is enabled or not.
748 static int tracer_tracing_is_on(struct trace_array
*tr
)
750 if (tr
->trace_buffer
.buffer
)
751 return ring_buffer_record_is_on(tr
->trace_buffer
.buffer
);
752 return !tr
->buffer_disabled
;
756 * tracing_is_on - show state of ring buffers enabled
758 int tracing_is_on(void)
760 return tracer_tracing_is_on(&global_trace
);
762 EXPORT_SYMBOL_GPL(tracing_is_on
);
764 static int __init
set_buf_size(char *str
)
766 unsigned long buf_size
;
770 buf_size
= memparse(str
, &str
);
771 /* nr_entries can not be zero */
774 trace_buf_size
= buf_size
;
777 __setup("trace_buf_size=", set_buf_size
);
779 static int __init
set_tracing_thresh(char *str
)
781 unsigned long threshold
;
786 ret
= kstrtoul(str
, 0, &threshold
);
789 tracing_thresh
= threshold
* 1000;
792 __setup("tracing_thresh=", set_tracing_thresh
);
794 unsigned long nsecs_to_usecs(unsigned long nsecs
)
799 /* These must match the bit postions in trace_iterator_flags */
800 static const char *trace_options
[] = {
833 int in_ns
; /* is this clock in nanoseconds? */
835 { trace_clock_local
, "local", 1 },
836 { trace_clock_global
, "global", 1 },
837 { trace_clock_counter
, "counter", 0 },
838 { trace_clock_jiffies
, "uptime", 0 },
839 { trace_clock
, "perf", 1 },
840 { ktime_get_mono_fast_ns
, "mono", 1 },
845 * trace_parser_get_init - gets the buffer for trace parser
847 int trace_parser_get_init(struct trace_parser
*parser
, int size
)
849 memset(parser
, 0, sizeof(*parser
));
851 parser
->buffer
= kmalloc(size
, GFP_KERNEL
);
860 * trace_parser_put - frees the buffer for trace parser
862 void trace_parser_put(struct trace_parser
*parser
)
864 kfree(parser
->buffer
);
868 * trace_get_user - reads the user input string separated by space
869 * (matched by isspace(ch))
871 * For each string found the 'struct trace_parser' is updated,
872 * and the function returns.
874 * Returns number of bytes read.
876 * See kernel/trace/trace.h for 'struct trace_parser' details.
878 int trace_get_user(struct trace_parser
*parser
, const char __user
*ubuf
,
879 size_t cnt
, loff_t
*ppos
)
886 trace_parser_clear(parser
);
888 ret
= get_user(ch
, ubuf
++);
896 * The parser is not finished with the last write,
897 * continue reading the user input without skipping spaces.
900 /* skip white space */
901 while (cnt
&& isspace(ch
)) {
902 ret
= get_user(ch
, ubuf
++);
909 /* only spaces were written */
919 /* read the non-space input */
920 while (cnt
&& !isspace(ch
)) {
921 if (parser
->idx
< parser
->size
- 1)
922 parser
->buffer
[parser
->idx
++] = ch
;
927 ret
= get_user(ch
, ubuf
++);
934 /* We either got finished input or we have to wait for another call. */
936 parser
->buffer
[parser
->idx
] = 0;
937 parser
->cont
= false;
938 } else if (parser
->idx
< parser
->size
- 1) {
940 parser
->buffer
[parser
->idx
++] = ch
;
953 static ssize_t
trace_seq_to_buffer(struct trace_seq
*s
, void *buf
, size_t cnt
)
957 if (s
->len
<= s
->readpos
)
960 len
= s
->len
- s
->readpos
;
963 memcpy(buf
, s
->buffer
+ s
->readpos
, cnt
);
969 unsigned long __read_mostly tracing_thresh
;
971 #ifdef CONFIG_TRACER_MAX_TRACE
973 * Copy the new maximum trace into the separate maximum-trace
974 * structure. (this way the maximum trace is permanently saved,
975 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
978 __update_max_tr(struct trace_array
*tr
, struct task_struct
*tsk
, int cpu
)
980 struct trace_buffer
*trace_buf
= &tr
->trace_buffer
;
981 struct trace_buffer
*max_buf
= &tr
->max_buffer
;
982 struct trace_array_cpu
*data
= per_cpu_ptr(trace_buf
->data
, cpu
);
983 struct trace_array_cpu
*max_data
= per_cpu_ptr(max_buf
->data
, cpu
);
986 max_buf
->time_start
= data
->preempt_timestamp
;
988 max_data
->saved_latency
= tr
->max_latency
;
989 max_data
->critical_start
= data
->critical_start
;
990 max_data
->critical_end
= data
->critical_end
;
992 memcpy(max_data
->comm
, tsk
->comm
, TASK_COMM_LEN
);
993 max_data
->pid
= tsk
->pid
;
995 * If tsk == current, then use current_uid(), as that does not use
996 * RCU. The irq tracer can be called out of RCU scope.
999 max_data
->uid
= current_uid();
1001 max_data
->uid
= task_uid(tsk
);
1003 max_data
->nice
= tsk
->static_prio
- 20 - MAX_RT_PRIO
;
1004 max_data
->policy
= tsk
->policy
;
1005 max_data
->rt_priority
= tsk
->rt_priority
;
1007 /* record this tasks comm */
1008 tracing_record_cmdline(tsk
);
1012 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1014 * @tsk: the task with the latency
1015 * @cpu: The cpu that initiated the trace.
1017 * Flip the buffers between the @tr and the max_tr and record information
1018 * about which task was the cause of this latency.
1021 update_max_tr(struct trace_array
*tr
, struct task_struct
*tsk
, int cpu
)
1023 struct ring_buffer
*buf
;
1028 WARN_ON_ONCE(!irqs_disabled());
1030 if (!tr
->allocated_snapshot
) {
1031 /* Only the nop tracer should hit this when disabling */
1032 WARN_ON_ONCE(tr
->current_trace
!= &nop_trace
);
1036 arch_spin_lock(&tr
->max_lock
);
1038 buf
= tr
->trace_buffer
.buffer
;
1039 tr
->trace_buffer
.buffer
= tr
->max_buffer
.buffer
;
1040 tr
->max_buffer
.buffer
= buf
;
1042 __update_max_tr(tr
, tsk
, cpu
);
1043 arch_spin_unlock(&tr
->max_lock
);
1047 * update_max_tr_single - only copy one trace over, and reset the rest
1049 * @tsk - task with the latency
1050 * @cpu - the cpu of the buffer to copy.
1052 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1055 update_max_tr_single(struct trace_array
*tr
, struct task_struct
*tsk
, int cpu
)
1062 WARN_ON_ONCE(!irqs_disabled());
1063 if (!tr
->allocated_snapshot
) {
1064 /* Only the nop tracer should hit this when disabling */
1065 WARN_ON_ONCE(tr
->current_trace
!= &nop_trace
);
1069 arch_spin_lock(&tr
->max_lock
);
1071 ret
= ring_buffer_swap_cpu(tr
->max_buffer
.buffer
, tr
->trace_buffer
.buffer
, cpu
);
1073 if (ret
== -EBUSY
) {
1075 * We failed to swap the buffer due to a commit taking
1076 * place on this CPU. We fail to record, but we reset
1077 * the max trace buffer (no one writes directly to it)
1078 * and flag that it failed.
1080 trace_array_printk_buf(tr
->max_buffer
.buffer
, _THIS_IP_
,
1081 "Failed to swap buffers due to commit in progress\n");
1084 WARN_ON_ONCE(ret
&& ret
!= -EAGAIN
&& ret
!= -EBUSY
);
1086 __update_max_tr(tr
, tsk
, cpu
);
1087 arch_spin_unlock(&tr
->max_lock
);
1089 #endif /* CONFIG_TRACER_MAX_TRACE */
1091 static int wait_on_pipe(struct trace_iterator
*iter
)
1093 /* Iterators are static, they should be filled or empty */
1094 if (trace_buffer_iter(iter
, iter
->cpu_file
))
1097 return ring_buffer_wait(iter
->trace_buffer
->buffer
, iter
->cpu_file
);
1100 #ifdef CONFIG_FTRACE_STARTUP_TEST
1101 static int run_tracer_selftest(struct tracer
*type
)
1103 struct trace_array
*tr
= &global_trace
;
1104 struct tracer
*saved_tracer
= tr
->current_trace
;
1107 if (!type
->selftest
|| tracing_selftest_disabled
)
1111 * Run a selftest on this tracer.
1112 * Here we reset the trace buffer, and set the current
1113 * tracer to be this tracer. The tracer can then run some
1114 * internal tracing to verify that everything is in order.
1115 * If we fail, we do not register this tracer.
1117 tracing_reset_online_cpus(&tr
->trace_buffer
);
1119 tr
->current_trace
= type
;
1121 #ifdef CONFIG_TRACER_MAX_TRACE
1122 if (type
->use_max_tr
) {
1123 /* If we expanded the buffers, make sure the max is expanded too */
1124 if (ring_buffer_expanded
)
1125 ring_buffer_resize(tr
->max_buffer
.buffer
, trace_buf_size
,
1126 RING_BUFFER_ALL_CPUS
);
1127 tr
->allocated_snapshot
= true;
1131 /* the test is responsible for initializing and enabling */
1132 pr_info("Testing tracer %s: ", type
->name
);
1133 ret
= type
->selftest(type
, tr
);
1134 /* the test is responsible for resetting too */
1135 tr
->current_trace
= saved_tracer
;
1137 printk(KERN_CONT
"FAILED!\n");
1138 /* Add the warning after printing 'FAILED' */
1142 /* Only reset on passing, to avoid touching corrupted buffers */
1143 tracing_reset_online_cpus(&tr
->trace_buffer
);
1145 #ifdef CONFIG_TRACER_MAX_TRACE
1146 if (type
->use_max_tr
) {
1147 tr
->allocated_snapshot
= false;
1149 /* Shrink the max buffer again */
1150 if (ring_buffer_expanded
)
1151 ring_buffer_resize(tr
->max_buffer
.buffer
, 1,
1152 RING_BUFFER_ALL_CPUS
);
1156 printk(KERN_CONT
"PASSED\n");
1160 static inline int run_tracer_selftest(struct tracer
*type
)
1164 #endif /* CONFIG_FTRACE_STARTUP_TEST */
1167 * register_tracer - register a tracer with the ftrace system.
1168 * @type - the plugin for the tracer
1170 * Register a new plugin tracer.
1172 int register_tracer(struct tracer
*type
)
1178 pr_info("Tracer must have a name\n");
1182 if (strlen(type
->name
) >= MAX_TRACER_SIZE
) {
1183 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE
);
1187 mutex_lock(&trace_types_lock
);
1189 tracing_selftest_running
= true;
1191 for (t
= trace_types
; t
; t
= t
->next
) {
1192 if (strcmp(type
->name
, t
->name
) == 0) {
1194 pr_info("Tracer %s already registered\n",
1201 if (!type
->set_flag
)
1202 type
->set_flag
= &dummy_set_flag
;
1204 type
->flags
= &dummy_tracer_flags
;
1206 if (!type
->flags
->opts
)
1207 type
->flags
->opts
= dummy_tracer_opt
;
1209 ret
= run_tracer_selftest(type
);
1213 type
->next
= trace_types
;
1217 tracing_selftest_running
= false;
1218 mutex_unlock(&trace_types_lock
);
1220 if (ret
|| !default_bootup_tracer
)
1223 if (strncmp(default_bootup_tracer
, type
->name
, MAX_TRACER_SIZE
))
1226 printk(KERN_INFO
"Starting tracer '%s'\n", type
->name
);
1227 /* Do we want this tracer to start on bootup? */
1228 tracing_set_tracer(&global_trace
, type
->name
);
1229 default_bootup_tracer
= NULL
;
1230 /* disable other selftests, since this will break it. */
1231 tracing_selftest_disabled
= true;
1232 #ifdef CONFIG_FTRACE_STARTUP_TEST
1233 printk(KERN_INFO
"Disabling FTRACE selftests due to running tracer '%s'\n",
1241 void tracing_reset(struct trace_buffer
*buf
, int cpu
)
1243 struct ring_buffer
*buffer
= buf
->buffer
;
1248 ring_buffer_record_disable(buffer
);
1250 /* Make sure all commits have finished */
1251 synchronize_sched();
1252 ring_buffer_reset_cpu(buffer
, cpu
);
1254 ring_buffer_record_enable(buffer
);
1257 void tracing_reset_online_cpus(struct trace_buffer
*buf
)
1259 struct ring_buffer
*buffer
= buf
->buffer
;
1265 ring_buffer_record_disable(buffer
);
1267 /* Make sure all commits have finished */
1268 synchronize_sched();
1270 buf
->time_start
= buffer_ftrace_now(buf
, buf
->cpu
);
1272 for_each_online_cpu(cpu
)
1273 ring_buffer_reset_cpu(buffer
, cpu
);
1275 ring_buffer_record_enable(buffer
);
1278 /* Must have trace_types_lock held */
1279 void tracing_reset_all_online_cpus(void)
1281 struct trace_array
*tr
;
1283 list_for_each_entry(tr
, &ftrace_trace_arrays
, list
) {
1284 tracing_reset_online_cpus(&tr
->trace_buffer
);
1285 #ifdef CONFIG_TRACER_MAX_TRACE
1286 tracing_reset_online_cpus(&tr
->max_buffer
);
1291 #define SAVED_CMDLINES_DEFAULT 128
1292 #define NO_CMDLINE_MAP UINT_MAX
1293 static arch_spinlock_t trace_cmdline_lock
= __ARCH_SPIN_LOCK_UNLOCKED
;
1294 struct saved_cmdlines_buffer
{
1295 unsigned map_pid_to_cmdline
[PID_MAX_DEFAULT
+1];
1296 unsigned *map_cmdline_to_pid
;
1297 unsigned cmdline_num
;
1299 char *saved_cmdlines
;
1301 static struct saved_cmdlines_buffer
*savedcmd
;
1303 /* temporary disable recording */
1304 static atomic_t trace_record_cmdline_disabled __read_mostly
;
1306 static inline char *get_saved_cmdlines(int idx
)
1308 return &savedcmd
->saved_cmdlines
[idx
* TASK_COMM_LEN
];
1311 static inline void set_cmdline(int idx
, const char *cmdline
)
1313 memcpy(get_saved_cmdlines(idx
), cmdline
, TASK_COMM_LEN
);
1316 static int allocate_cmdlines_buffer(unsigned int val
,
1317 struct saved_cmdlines_buffer
*s
)
1319 s
->map_cmdline_to_pid
= kmalloc(val
* sizeof(*s
->map_cmdline_to_pid
),
1321 if (!s
->map_cmdline_to_pid
)
1324 s
->saved_cmdlines
= kmalloc(val
* TASK_COMM_LEN
, GFP_KERNEL
);
1325 if (!s
->saved_cmdlines
) {
1326 kfree(s
->map_cmdline_to_pid
);
1331 s
->cmdline_num
= val
;
1332 memset(&s
->map_pid_to_cmdline
, NO_CMDLINE_MAP
,
1333 sizeof(s
->map_pid_to_cmdline
));
1334 memset(s
->map_cmdline_to_pid
, NO_CMDLINE_MAP
,
1335 val
* sizeof(*s
->map_cmdline_to_pid
));
1340 static int trace_create_savedcmd(void)
1344 savedcmd
= kmalloc(sizeof(*savedcmd
), GFP_KERNEL
);
1348 ret
= allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT
, savedcmd
);
1358 int is_tracing_stopped(void)
1360 return global_trace
.stop_count
;
1364 * tracing_start - quick start of the tracer
1366 * If tracing is enabled but was stopped by tracing_stop,
1367 * this will start the tracer back up.
1369 void tracing_start(void)
1371 struct ring_buffer
*buffer
;
1372 unsigned long flags
;
1374 if (tracing_disabled
)
1377 raw_spin_lock_irqsave(&global_trace
.start_lock
, flags
);
1378 if (--global_trace
.stop_count
) {
1379 if (global_trace
.stop_count
< 0) {
1380 /* Someone screwed up their debugging */
1382 global_trace
.stop_count
= 0;
1387 /* Prevent the buffers from switching */
1388 arch_spin_lock(&global_trace
.max_lock
);
1390 buffer
= global_trace
.trace_buffer
.buffer
;
1392 ring_buffer_record_enable(buffer
);
1394 #ifdef CONFIG_TRACER_MAX_TRACE
1395 buffer
= global_trace
.max_buffer
.buffer
;
1397 ring_buffer_record_enable(buffer
);
1400 arch_spin_unlock(&global_trace
.max_lock
);
1403 raw_spin_unlock_irqrestore(&global_trace
.start_lock
, flags
);
1406 static void tracing_start_tr(struct trace_array
*tr
)
1408 struct ring_buffer
*buffer
;
1409 unsigned long flags
;
1411 if (tracing_disabled
)
1414 /* If global, we need to also start the max tracer */
1415 if (tr
->flags
& TRACE_ARRAY_FL_GLOBAL
)
1416 return tracing_start();
1418 raw_spin_lock_irqsave(&tr
->start_lock
, flags
);
1420 if (--tr
->stop_count
) {
1421 if (tr
->stop_count
< 0) {
1422 /* Someone screwed up their debugging */
1429 buffer
= tr
->trace_buffer
.buffer
;
1431 ring_buffer_record_enable(buffer
);
1434 raw_spin_unlock_irqrestore(&tr
->start_lock
, flags
);
1438 * tracing_stop - quick stop of the tracer
1440 * Light weight way to stop tracing. Use in conjunction with
1443 void tracing_stop(void)
1445 struct ring_buffer
*buffer
;
1446 unsigned long flags
;
1448 raw_spin_lock_irqsave(&global_trace
.start_lock
, flags
);
1449 if (global_trace
.stop_count
++)
1452 /* Prevent the buffers from switching */
1453 arch_spin_lock(&global_trace
.max_lock
);
1455 buffer
= global_trace
.trace_buffer
.buffer
;
1457 ring_buffer_record_disable(buffer
);
1459 #ifdef CONFIG_TRACER_MAX_TRACE
1460 buffer
= global_trace
.max_buffer
.buffer
;
1462 ring_buffer_record_disable(buffer
);
1465 arch_spin_unlock(&global_trace
.max_lock
);
1468 raw_spin_unlock_irqrestore(&global_trace
.start_lock
, flags
);
1471 static void tracing_stop_tr(struct trace_array
*tr
)
1473 struct ring_buffer
*buffer
;
1474 unsigned long flags
;
1476 /* If global, we need to also stop the max tracer */
1477 if (tr
->flags
& TRACE_ARRAY_FL_GLOBAL
)
1478 return tracing_stop();
1480 raw_spin_lock_irqsave(&tr
->start_lock
, flags
);
1481 if (tr
->stop_count
++)
1484 buffer
= tr
->trace_buffer
.buffer
;
1486 ring_buffer_record_disable(buffer
);
1489 raw_spin_unlock_irqrestore(&tr
->start_lock
, flags
);
1492 void trace_stop_cmdline_recording(void);
1494 static int trace_save_cmdline(struct task_struct
*tsk
)
1498 if (!tsk
->pid
|| unlikely(tsk
->pid
> PID_MAX_DEFAULT
))
1502 * It's not the end of the world if we don't get
1503 * the lock, but we also don't want to spin
1504 * nor do we want to disable interrupts,
1505 * so if we miss here, then better luck next time.
1507 if (!arch_spin_trylock(&trace_cmdline_lock
))
1510 idx
= savedcmd
->map_pid_to_cmdline
[tsk
->pid
];
1511 if (idx
== NO_CMDLINE_MAP
) {
1512 idx
= (savedcmd
->cmdline_idx
+ 1) % savedcmd
->cmdline_num
;
1515 * Check whether the cmdline buffer at idx has a pid
1516 * mapped. We are going to overwrite that entry so we
1517 * need to clear the map_pid_to_cmdline. Otherwise we
1518 * would read the new comm for the old pid.
1520 pid
= savedcmd
->map_cmdline_to_pid
[idx
];
1521 if (pid
!= NO_CMDLINE_MAP
)
1522 savedcmd
->map_pid_to_cmdline
[pid
] = NO_CMDLINE_MAP
;
1524 savedcmd
->map_cmdline_to_pid
[idx
] = tsk
->pid
;
1525 savedcmd
->map_pid_to_cmdline
[tsk
->pid
] = idx
;
1527 savedcmd
->cmdline_idx
= idx
;
1530 set_cmdline(idx
, tsk
->comm
);
1532 arch_spin_unlock(&trace_cmdline_lock
);
1537 static void __trace_find_cmdline(int pid
, char comm
[])
1542 strcpy(comm
, "<idle>");
1546 if (WARN_ON_ONCE(pid
< 0)) {
1547 strcpy(comm
, "<XXX>");
1551 if (pid
> PID_MAX_DEFAULT
) {
1552 strcpy(comm
, "<...>");
1556 map
= savedcmd
->map_pid_to_cmdline
[pid
];
1557 if (map
!= NO_CMDLINE_MAP
)
1558 strcpy(comm
, get_saved_cmdlines(map
));
1560 strcpy(comm
, "<...>");
1563 void trace_find_cmdline(int pid
, char comm
[])
1566 arch_spin_lock(&trace_cmdline_lock
);
1568 __trace_find_cmdline(pid
, comm
);
1570 arch_spin_unlock(&trace_cmdline_lock
);
1574 void tracing_record_cmdline(struct task_struct
*tsk
)
1576 if (atomic_read(&trace_record_cmdline_disabled
) || !tracing_is_on())
1579 if (!__this_cpu_read(trace_cmdline_save
))
1582 if (trace_save_cmdline(tsk
))
1583 __this_cpu_write(trace_cmdline_save
, false);
1587 tracing_generic_entry_update(struct trace_entry
*entry
, unsigned long flags
,
1590 struct task_struct
*tsk
= current
;
1592 entry
->preempt_count
= pc
& 0xff;
1593 entry
->pid
= (tsk
) ? tsk
->pid
: 0;
1595 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
1596 (irqs_disabled_flags(flags
) ? TRACE_FLAG_IRQS_OFF
: 0) |
1598 TRACE_FLAG_IRQS_NOSUPPORT
|
1600 ((pc
& HARDIRQ_MASK
) ? TRACE_FLAG_HARDIRQ
: 0) |
1601 ((pc
& SOFTIRQ_MASK
) ? TRACE_FLAG_SOFTIRQ
: 0) |
1602 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED
: 0) |
1603 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED
: 0);
1605 EXPORT_SYMBOL_GPL(tracing_generic_entry_update
);
1607 struct ring_buffer_event
*
1608 trace_buffer_lock_reserve(struct ring_buffer
*buffer
,
1611 unsigned long flags
, int pc
)
1613 struct ring_buffer_event
*event
;
1615 event
= ring_buffer_lock_reserve(buffer
, len
);
1616 if (event
!= NULL
) {
1617 struct trace_entry
*ent
= ring_buffer_event_data(event
);
1619 tracing_generic_entry_update(ent
, flags
, pc
);
1627 __buffer_unlock_commit(struct ring_buffer
*buffer
, struct ring_buffer_event
*event
)
1629 __this_cpu_write(trace_cmdline_save
, true);
1630 ring_buffer_unlock_commit(buffer
, event
);
1634 __trace_buffer_unlock_commit(struct ring_buffer
*buffer
,
1635 struct ring_buffer_event
*event
,
1636 unsigned long flags
, int pc
)
1638 __buffer_unlock_commit(buffer
, event
);
1640 ftrace_trace_stack(buffer
, flags
, 6, pc
);
1641 ftrace_trace_userstack(buffer
, flags
, pc
);
1644 void trace_buffer_unlock_commit(struct ring_buffer
*buffer
,
1645 struct ring_buffer_event
*event
,
1646 unsigned long flags
, int pc
)
1648 __trace_buffer_unlock_commit(buffer
, event
, flags
, pc
);
1650 EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit
);
1652 static struct ring_buffer
*temp_buffer
;
1654 struct ring_buffer_event
*
1655 trace_event_buffer_lock_reserve(struct ring_buffer
**current_rb
,
1656 struct ftrace_event_file
*ftrace_file
,
1657 int type
, unsigned long len
,
1658 unsigned long flags
, int pc
)
1660 struct ring_buffer_event
*entry
;
1662 *current_rb
= ftrace_file
->tr
->trace_buffer
.buffer
;
1663 entry
= trace_buffer_lock_reserve(*current_rb
,
1664 type
, len
, flags
, pc
);
1666 * If tracing is off, but we have triggers enabled
1667 * we still need to look at the event data. Use the temp_buffer
1668 * to store the trace event for the tigger to use. It's recusive
1669 * safe and will not be recorded anywhere.
1671 if (!entry
&& ftrace_file
->flags
& FTRACE_EVENT_FL_TRIGGER_COND
) {
1672 *current_rb
= temp_buffer
;
1673 entry
= trace_buffer_lock_reserve(*current_rb
,
1674 type
, len
, flags
, pc
);
1678 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve
);
1680 struct ring_buffer_event
*
1681 trace_current_buffer_lock_reserve(struct ring_buffer
**current_rb
,
1682 int type
, unsigned long len
,
1683 unsigned long flags
, int pc
)
1685 *current_rb
= global_trace
.trace_buffer
.buffer
;
1686 return trace_buffer_lock_reserve(*current_rb
,
1687 type
, len
, flags
, pc
);
1689 EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve
);
1691 void trace_current_buffer_unlock_commit(struct ring_buffer
*buffer
,
1692 struct ring_buffer_event
*event
,
1693 unsigned long flags
, int pc
)
1695 __trace_buffer_unlock_commit(buffer
, event
, flags
, pc
);
1697 EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit
);
1699 void trace_buffer_unlock_commit_regs(struct ring_buffer
*buffer
,
1700 struct ring_buffer_event
*event
,
1701 unsigned long flags
, int pc
,
1702 struct pt_regs
*regs
)
1704 __buffer_unlock_commit(buffer
, event
);
1706 ftrace_trace_stack_regs(buffer
, flags
, 0, pc
, regs
);
1707 ftrace_trace_userstack(buffer
, flags
, pc
);
1709 EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs
);
1711 void trace_current_buffer_discard_commit(struct ring_buffer
*buffer
,
1712 struct ring_buffer_event
*event
)
1714 ring_buffer_discard_commit(buffer
, event
);
1716 EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit
);
1719 trace_function(struct trace_array
*tr
,
1720 unsigned long ip
, unsigned long parent_ip
, unsigned long flags
,
1723 struct ftrace_event_call
*call
= &event_function
;
1724 struct ring_buffer
*buffer
= tr
->trace_buffer
.buffer
;
1725 struct ring_buffer_event
*event
;
1726 struct ftrace_entry
*entry
;
1728 /* If we are reading the ring buffer, don't trace */
1729 if (unlikely(__this_cpu_read(ftrace_cpu_disabled
)))
1732 event
= trace_buffer_lock_reserve(buffer
, TRACE_FN
, sizeof(*entry
),
1736 entry
= ring_buffer_event_data(event
);
1738 entry
->parent_ip
= parent_ip
;
1740 if (!call_filter_check_discard(call
, entry
, buffer
, event
))
1741 __buffer_unlock_commit(buffer
, event
);
1744 #ifdef CONFIG_STACKTRACE
1746 #define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1747 struct ftrace_stack
{
1748 unsigned long calls
[FTRACE_STACK_MAX_ENTRIES
];
1751 static DEFINE_PER_CPU(struct ftrace_stack
, ftrace_stack
);
1752 static DEFINE_PER_CPU(int, ftrace_stack_reserve
);
1754 static void __ftrace_trace_stack(struct ring_buffer
*buffer
,
1755 unsigned long flags
,
1756 int skip
, int pc
, struct pt_regs
*regs
)
1758 struct ftrace_event_call
*call
= &event_kernel_stack
;
1759 struct ring_buffer_event
*event
;
1760 struct stack_entry
*entry
;
1761 struct stack_trace trace
;
1763 int size
= FTRACE_STACK_ENTRIES
;
1765 trace
.nr_entries
= 0;
1769 * Since events can happen in NMIs there's no safe way to
1770 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1771 * or NMI comes in, it will just have to use the default
1772 * FTRACE_STACK_SIZE.
1774 preempt_disable_notrace();
1776 use_stack
= __this_cpu_inc_return(ftrace_stack_reserve
);
1778 * We don't need any atomic variables, just a barrier.
1779 * If an interrupt comes in, we don't care, because it would
1780 * have exited and put the counter back to what we want.
1781 * We just need a barrier to keep gcc from moving things
1785 if (use_stack
== 1) {
1786 trace
.entries
= this_cpu_ptr(ftrace_stack
.calls
);
1787 trace
.max_entries
= FTRACE_STACK_MAX_ENTRIES
;
1790 save_stack_trace_regs(regs
, &trace
);
1792 save_stack_trace(&trace
);
1794 if (trace
.nr_entries
> size
)
1795 size
= trace
.nr_entries
;
1797 /* From now on, use_stack is a boolean */
1800 size
*= sizeof(unsigned long);
1802 event
= trace_buffer_lock_reserve(buffer
, TRACE_STACK
,
1803 sizeof(*entry
) + size
, flags
, pc
);
1806 entry
= ring_buffer_event_data(event
);
1808 memset(&entry
->caller
, 0, size
);
1811 memcpy(&entry
->caller
, trace
.entries
,
1812 trace
.nr_entries
* sizeof(unsigned long));
1814 trace
.max_entries
= FTRACE_STACK_ENTRIES
;
1815 trace
.entries
= entry
->caller
;
1817 save_stack_trace_regs(regs
, &trace
);
1819 save_stack_trace(&trace
);
1822 entry
->size
= trace
.nr_entries
;
1824 if (!call_filter_check_discard(call
, entry
, buffer
, event
))
1825 __buffer_unlock_commit(buffer
, event
);
1828 /* Again, don't let gcc optimize things here */
1830 __this_cpu_dec(ftrace_stack_reserve
);
1831 preempt_enable_notrace();
1835 void ftrace_trace_stack_regs(struct ring_buffer
*buffer
, unsigned long flags
,
1836 int skip
, int pc
, struct pt_regs
*regs
)
1838 if (!(trace_flags
& TRACE_ITER_STACKTRACE
))
1841 __ftrace_trace_stack(buffer
, flags
, skip
, pc
, regs
);
1844 void ftrace_trace_stack(struct ring_buffer
*buffer
, unsigned long flags
,
1847 if (!(trace_flags
& TRACE_ITER_STACKTRACE
))
1850 __ftrace_trace_stack(buffer
, flags
, skip
, pc
, NULL
);
1853 void __trace_stack(struct trace_array
*tr
, unsigned long flags
, int skip
,
1856 __ftrace_trace_stack(tr
->trace_buffer
.buffer
, flags
, skip
, pc
, NULL
);
1860 * trace_dump_stack - record a stack back trace in the trace buffer
1861 * @skip: Number of functions to skip (helper handlers)
1863 void trace_dump_stack(int skip
)
1865 unsigned long flags
;
1867 if (tracing_disabled
|| tracing_selftest_running
)
1870 local_save_flags(flags
);
1873 * Skip 3 more, seems to get us at the caller of
1877 __ftrace_trace_stack(global_trace
.trace_buffer
.buffer
,
1878 flags
, skip
, preempt_count(), NULL
);
1881 static DEFINE_PER_CPU(int, user_stack_count
);
1884 ftrace_trace_userstack(struct ring_buffer
*buffer
, unsigned long flags
, int pc
)
1886 struct ftrace_event_call
*call
= &event_user_stack
;
1887 struct ring_buffer_event
*event
;
1888 struct userstack_entry
*entry
;
1889 struct stack_trace trace
;
1891 if (!(trace_flags
& TRACE_ITER_USERSTACKTRACE
))
1895 * NMIs can not handle page faults, even with fix ups.
1896 * The save user stack can (and often does) fault.
1898 if (unlikely(in_nmi()))
1902 * prevent recursion, since the user stack tracing may
1903 * trigger other kernel events.
1906 if (__this_cpu_read(user_stack_count
))
1909 __this_cpu_inc(user_stack_count
);
1911 event
= trace_buffer_lock_reserve(buffer
, TRACE_USER_STACK
,
1912 sizeof(*entry
), flags
, pc
);
1914 goto out_drop_count
;
1915 entry
= ring_buffer_event_data(event
);
1917 entry
->tgid
= current
->tgid
;
1918 memset(&entry
->caller
, 0, sizeof(entry
->caller
));
1920 trace
.nr_entries
= 0;
1921 trace
.max_entries
= FTRACE_STACK_ENTRIES
;
1923 trace
.entries
= entry
->caller
;
1925 save_stack_trace_user(&trace
);
1926 if (!call_filter_check_discard(call
, entry
, buffer
, event
))
1927 __buffer_unlock_commit(buffer
, event
);
1930 __this_cpu_dec(user_stack_count
);
1936 static void __trace_userstack(struct trace_array
*tr
, unsigned long flags
)
1938 ftrace_trace_userstack(tr
, flags
, preempt_count());
1942 #endif /* CONFIG_STACKTRACE */
1944 /* created for use with alloc_percpu */
1945 struct trace_buffer_struct
{
1946 char buffer
[TRACE_BUF_SIZE
];
1949 static struct trace_buffer_struct
*trace_percpu_buffer
;
1950 static struct trace_buffer_struct
*trace_percpu_sirq_buffer
;
1951 static struct trace_buffer_struct
*trace_percpu_irq_buffer
;
1952 static struct trace_buffer_struct
*trace_percpu_nmi_buffer
;
1955 * The buffer used is dependent on the context. There is a per cpu
1956 * buffer for normal context, softirq contex, hard irq context and
1957 * for NMI context. Thise allows for lockless recording.
1959 * Note, if the buffers failed to be allocated, then this returns NULL
1961 static char *get_trace_buf(void)
1963 struct trace_buffer_struct
*percpu_buffer
;
1966 * If we have allocated per cpu buffers, then we do not
1967 * need to do any locking.
1970 percpu_buffer
= trace_percpu_nmi_buffer
;
1972 percpu_buffer
= trace_percpu_irq_buffer
;
1973 else if (in_softirq())
1974 percpu_buffer
= trace_percpu_sirq_buffer
;
1976 percpu_buffer
= trace_percpu_buffer
;
1981 return this_cpu_ptr(&percpu_buffer
->buffer
[0]);
1984 static int alloc_percpu_trace_buffer(void)
1986 struct trace_buffer_struct
*buffers
;
1987 struct trace_buffer_struct
*sirq_buffers
;
1988 struct trace_buffer_struct
*irq_buffers
;
1989 struct trace_buffer_struct
*nmi_buffers
;
1991 buffers
= alloc_percpu(struct trace_buffer_struct
);
1995 sirq_buffers
= alloc_percpu(struct trace_buffer_struct
);
1999 irq_buffers
= alloc_percpu(struct trace_buffer_struct
);
2003 nmi_buffers
= alloc_percpu(struct trace_buffer_struct
);
2007 trace_percpu_buffer
= buffers
;
2008 trace_percpu_sirq_buffer
= sirq_buffers
;
2009 trace_percpu_irq_buffer
= irq_buffers
;
2010 trace_percpu_nmi_buffer
= nmi_buffers
;
2015 free_percpu(irq_buffers
);
2017 free_percpu(sirq_buffers
);
2019 free_percpu(buffers
);
2021 WARN(1, "Could not allocate percpu trace_printk buffer");
2025 static int buffers_allocated
;
2027 void trace_printk_init_buffers(void)
2029 if (buffers_allocated
)
2032 if (alloc_percpu_trace_buffer())
2035 /* trace_printk() is for debug use only. Don't use it in production. */
2037 pr_warning("\n**********************************************************\n");
2038 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2039 pr_warning("** **\n");
2040 pr_warning("** trace_printk() being used. Allocating extra memory. **\n");
2041 pr_warning("** **\n");
2042 pr_warning("** This means that this is a DEBUG kernel and it is **\n");
2043 pr_warning("** unsafe for produciton use. **\n");
2044 pr_warning("** **\n");
2045 pr_warning("** If you see this message and you are not debugging **\n");
2046 pr_warning("** the kernel, report this immediately to your vendor! **\n");
2047 pr_warning("** **\n");
2048 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2049 pr_warning("**********************************************************\n");
2051 /* Expand the buffers to set size */
2052 tracing_update_buffers();
2054 buffers_allocated
= 1;
2057 * trace_printk_init_buffers() can be called by modules.
2058 * If that happens, then we need to start cmdline recording
2059 * directly here. If the global_trace.buffer is already
2060 * allocated here, then this was called by module code.
2062 if (global_trace
.trace_buffer
.buffer
)
2063 tracing_start_cmdline_record();
2066 void trace_printk_start_comm(void)
2068 /* Start tracing comms if trace printk is set */
2069 if (!buffers_allocated
)
2071 tracing_start_cmdline_record();
2074 static void trace_printk_start_stop_comm(int enabled
)
2076 if (!buffers_allocated
)
2080 tracing_start_cmdline_record();
2082 tracing_stop_cmdline_record();
2086 * trace_vbprintk - write binary msg to tracing buffer
2089 int trace_vbprintk(unsigned long ip
, const char *fmt
, va_list args
)
2091 struct ftrace_event_call
*call
= &event_bprint
;
2092 struct ring_buffer_event
*event
;
2093 struct ring_buffer
*buffer
;
2094 struct trace_array
*tr
= &global_trace
;
2095 struct bprint_entry
*entry
;
2096 unsigned long flags
;
2098 int len
= 0, size
, pc
;
2100 if (unlikely(tracing_selftest_running
|| tracing_disabled
))
2103 /* Don't pollute graph traces with trace_vprintk internals */
2104 pause_graph_tracing();
2106 pc
= preempt_count();
2107 preempt_disable_notrace();
2109 tbuffer
= get_trace_buf();
2115 len
= vbin_printf((u32
*)tbuffer
, TRACE_BUF_SIZE
/sizeof(int), fmt
, args
);
2117 if (len
> TRACE_BUF_SIZE
/sizeof(int) || len
< 0)
2120 local_save_flags(flags
);
2121 size
= sizeof(*entry
) + sizeof(u32
) * len
;
2122 buffer
= tr
->trace_buffer
.buffer
;
2123 event
= trace_buffer_lock_reserve(buffer
, TRACE_BPRINT
, size
,
2127 entry
= ring_buffer_event_data(event
);
2131 memcpy(entry
->buf
, tbuffer
, sizeof(u32
) * len
);
2132 if (!call_filter_check_discard(call
, entry
, buffer
, event
)) {
2133 __buffer_unlock_commit(buffer
, event
);
2134 ftrace_trace_stack(buffer
, flags
, 6, pc
);
2138 preempt_enable_notrace();
2139 unpause_graph_tracing();
2143 EXPORT_SYMBOL_GPL(trace_vbprintk
);
2146 __trace_array_vprintk(struct ring_buffer
*buffer
,
2147 unsigned long ip
, const char *fmt
, va_list args
)
2149 struct ftrace_event_call
*call
= &event_print
;
2150 struct ring_buffer_event
*event
;
2151 int len
= 0, size
, pc
;
2152 struct print_entry
*entry
;
2153 unsigned long flags
;
2156 if (tracing_disabled
|| tracing_selftest_running
)
2159 /* Don't pollute graph traces with trace_vprintk internals */
2160 pause_graph_tracing();
2162 pc
= preempt_count();
2163 preempt_disable_notrace();
2166 tbuffer
= get_trace_buf();
2172 len
= vscnprintf(tbuffer
, TRACE_BUF_SIZE
, fmt
, args
);
2174 local_save_flags(flags
);
2175 size
= sizeof(*entry
) + len
+ 1;
2176 event
= trace_buffer_lock_reserve(buffer
, TRACE_PRINT
, size
,
2180 entry
= ring_buffer_event_data(event
);
2183 memcpy(&entry
->buf
, tbuffer
, len
+ 1);
2184 if (!call_filter_check_discard(call
, entry
, buffer
, event
)) {
2185 __buffer_unlock_commit(buffer
, event
);
2186 ftrace_trace_stack(buffer
, flags
, 6, pc
);
2189 preempt_enable_notrace();
2190 unpause_graph_tracing();
2195 int trace_array_vprintk(struct trace_array
*tr
,
2196 unsigned long ip
, const char *fmt
, va_list args
)
2198 return __trace_array_vprintk(tr
->trace_buffer
.buffer
, ip
, fmt
, args
);
2201 int trace_array_printk(struct trace_array
*tr
,
2202 unsigned long ip
, const char *fmt
, ...)
2207 if (!(trace_flags
& TRACE_ITER_PRINTK
))
2211 ret
= trace_array_vprintk(tr
, ip
, fmt
, ap
);
2216 int trace_array_printk_buf(struct ring_buffer
*buffer
,
2217 unsigned long ip
, const char *fmt
, ...)
2222 if (!(trace_flags
& TRACE_ITER_PRINTK
))
2226 ret
= __trace_array_vprintk(buffer
, ip
, fmt
, ap
);
2231 int trace_vprintk(unsigned long ip
, const char *fmt
, va_list args
)
2233 return trace_array_vprintk(&global_trace
, ip
, fmt
, args
);
2235 EXPORT_SYMBOL_GPL(trace_vprintk
);
2237 static void trace_iterator_increment(struct trace_iterator
*iter
)
2239 struct ring_buffer_iter
*buf_iter
= trace_buffer_iter(iter
, iter
->cpu
);
2243 ring_buffer_read(buf_iter
, NULL
);
2246 static struct trace_entry
*
2247 peek_next_entry(struct trace_iterator
*iter
, int cpu
, u64
*ts
,
2248 unsigned long *lost_events
)
2250 struct ring_buffer_event
*event
;
2251 struct ring_buffer_iter
*buf_iter
= trace_buffer_iter(iter
, cpu
);
2254 event
= ring_buffer_iter_peek(buf_iter
, ts
);
2256 event
= ring_buffer_peek(iter
->trace_buffer
->buffer
, cpu
, ts
,
2260 iter
->ent_size
= ring_buffer_event_length(event
);
2261 return ring_buffer_event_data(event
);
2267 static struct trace_entry
*
2268 __find_next_entry(struct trace_iterator
*iter
, int *ent_cpu
,
2269 unsigned long *missing_events
, u64
*ent_ts
)
2271 struct ring_buffer
*buffer
= iter
->trace_buffer
->buffer
;
2272 struct trace_entry
*ent
, *next
= NULL
;
2273 unsigned long lost_events
= 0, next_lost
= 0;
2274 int cpu_file
= iter
->cpu_file
;
2275 u64 next_ts
= 0, ts
;
2281 * If we are in a per_cpu trace file, don't bother by iterating over
2282 * all cpu and peek directly.
2284 if (cpu_file
> RING_BUFFER_ALL_CPUS
) {
2285 if (ring_buffer_empty_cpu(buffer
, cpu_file
))
2287 ent
= peek_next_entry(iter
, cpu_file
, ent_ts
, missing_events
);
2289 *ent_cpu
= cpu_file
;
2294 for_each_tracing_cpu(cpu
) {
2296 if (ring_buffer_empty_cpu(buffer
, cpu
))
2299 ent
= peek_next_entry(iter
, cpu
, &ts
, &lost_events
);
2302 * Pick the entry with the smallest timestamp:
2304 if (ent
&& (!next
|| ts
< next_ts
)) {
2308 next_lost
= lost_events
;
2309 next_size
= iter
->ent_size
;
2313 iter
->ent_size
= next_size
;
2316 *ent_cpu
= next_cpu
;
2322 *missing_events
= next_lost
;
2327 /* Find the next real entry, without updating the iterator itself */
2328 struct trace_entry
*trace_find_next_entry(struct trace_iterator
*iter
,
2329 int *ent_cpu
, u64
*ent_ts
)
2331 return __find_next_entry(iter
, ent_cpu
, NULL
, ent_ts
);
2334 /* Find the next real entry, and increment the iterator to the next entry */
2335 void *trace_find_next_entry_inc(struct trace_iterator
*iter
)
2337 iter
->ent
= __find_next_entry(iter
, &iter
->cpu
,
2338 &iter
->lost_events
, &iter
->ts
);
2341 trace_iterator_increment(iter
);
2343 return iter
->ent
? iter
: NULL
;
2346 static void trace_consume(struct trace_iterator
*iter
)
2348 ring_buffer_consume(iter
->trace_buffer
->buffer
, iter
->cpu
, &iter
->ts
,
2349 &iter
->lost_events
);
2352 static void *s_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
2354 struct trace_iterator
*iter
= m
->private;
2358 WARN_ON_ONCE(iter
->leftover
);
2362 /* can't go backwards */
2367 ent
= trace_find_next_entry_inc(iter
);
2371 while (ent
&& iter
->idx
< i
)
2372 ent
= trace_find_next_entry_inc(iter
);
2379 void tracing_iter_reset(struct trace_iterator
*iter
, int cpu
)
2381 struct ring_buffer_event
*event
;
2382 struct ring_buffer_iter
*buf_iter
;
2383 unsigned long entries
= 0;
2386 per_cpu_ptr(iter
->trace_buffer
->data
, cpu
)->skipped_entries
= 0;
2388 buf_iter
= trace_buffer_iter(iter
, cpu
);
2392 ring_buffer_iter_reset(buf_iter
);
2395 * We could have the case with the max latency tracers
2396 * that a reset never took place on a cpu. This is evident
2397 * by the timestamp being before the start of the buffer.
2399 while ((event
= ring_buffer_iter_peek(buf_iter
, &ts
))) {
2400 if (ts
>= iter
->trace_buffer
->time_start
)
2403 ring_buffer_read(buf_iter
, NULL
);
2406 per_cpu_ptr(iter
->trace_buffer
->data
, cpu
)->skipped_entries
= entries
;
2410 * The current tracer is copied to avoid a global locking
2413 static void *s_start(struct seq_file
*m
, loff_t
*pos
)
2415 struct trace_iterator
*iter
= m
->private;
2416 struct trace_array
*tr
= iter
->tr
;
2417 int cpu_file
= iter
->cpu_file
;
2423 * copy the tracer to avoid using a global lock all around.
2424 * iter->trace is a copy of current_trace, the pointer to the
2425 * name may be used instead of a strcmp(), as iter->trace->name
2426 * will point to the same string as current_trace->name.
2428 mutex_lock(&trace_types_lock
);
2429 if (unlikely(tr
->current_trace
&& iter
->trace
->name
!= tr
->current_trace
->name
))
2430 *iter
->trace
= *tr
->current_trace
;
2431 mutex_unlock(&trace_types_lock
);
2433 #ifdef CONFIG_TRACER_MAX_TRACE
2434 if (iter
->snapshot
&& iter
->trace
->use_max_tr
)
2435 return ERR_PTR(-EBUSY
);
2438 if (!iter
->snapshot
)
2439 atomic_inc(&trace_record_cmdline_disabled
);
2441 if (*pos
!= iter
->pos
) {
2446 if (cpu_file
== RING_BUFFER_ALL_CPUS
) {
2447 for_each_tracing_cpu(cpu
)
2448 tracing_iter_reset(iter
, cpu
);
2450 tracing_iter_reset(iter
, cpu_file
);
2453 for (p
= iter
; p
&& l
< *pos
; p
= s_next(m
, p
, &l
))
2458 * If we overflowed the seq_file before, then we want
2459 * to just reuse the trace_seq buffer again.
2465 p
= s_next(m
, p
, &l
);
2469 trace_event_read_lock();
2470 trace_access_lock(cpu_file
);
2474 static void s_stop(struct seq_file
*m
, void *p
)
2476 struct trace_iterator
*iter
= m
->private;
2478 #ifdef CONFIG_TRACER_MAX_TRACE
2479 if (iter
->snapshot
&& iter
->trace
->use_max_tr
)
2483 if (!iter
->snapshot
)
2484 atomic_dec(&trace_record_cmdline_disabled
);
2486 trace_access_unlock(iter
->cpu_file
);
2487 trace_event_read_unlock();
2491 get_total_entries(struct trace_buffer
*buf
,
2492 unsigned long *total
, unsigned long *entries
)
2494 unsigned long count
;
2500 for_each_tracing_cpu(cpu
) {
2501 count
= ring_buffer_entries_cpu(buf
->buffer
, cpu
);
2503 * If this buffer has skipped entries, then we hold all
2504 * entries for the trace and we need to ignore the
2505 * ones before the time stamp.
2507 if (per_cpu_ptr(buf
->data
, cpu
)->skipped_entries
) {
2508 count
-= per_cpu_ptr(buf
->data
, cpu
)->skipped_entries
;
2509 /* total is the same as the entries */
2513 ring_buffer_overrun_cpu(buf
->buffer
, cpu
);
2518 static void print_lat_help_header(struct seq_file
*m
)
2520 seq_puts(m
, "# _------=> CPU# \n"
2521 "# / _-----=> irqs-off \n"
2522 "# | / _----=> need-resched \n"
2523 "# || / _---=> hardirq/softirq \n"
2524 "# ||| / _--=> preempt-depth \n"
2526 "# cmd pid ||||| time | caller \n"
2527 "# \\ / ||||| \\ | / \n");
2530 static void print_event_info(struct trace_buffer
*buf
, struct seq_file
*m
)
2532 unsigned long total
;
2533 unsigned long entries
;
2535 get_total_entries(buf
, &total
, &entries
);
2536 seq_printf(m
, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
2537 entries
, total
, num_online_cpus());
2541 static void print_func_help_header(struct trace_buffer
*buf
, struct seq_file
*m
)
2543 print_event_info(buf
, m
);
2544 seq_puts(m
, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"
2548 static void print_func_help_header_irq(struct trace_buffer
*buf
, struct seq_file
*m
)
2550 print_event_info(buf
, m
);
2551 seq_puts(m
, "# _-----=> irqs-off\n"
2552 "# / _----=> need-resched\n"
2553 "# | / _---=> hardirq/softirq\n"
2554 "# || / _--=> preempt-depth\n"
2556 "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n"
2557 "# | | | |||| | |\n");
2561 print_trace_header(struct seq_file
*m
, struct trace_iterator
*iter
)
2563 unsigned long sym_flags
= (trace_flags
& TRACE_ITER_SYM_MASK
);
2564 struct trace_buffer
*buf
= iter
->trace_buffer
;
2565 struct trace_array_cpu
*data
= per_cpu_ptr(buf
->data
, buf
->cpu
);
2566 struct tracer
*type
= iter
->trace
;
2567 unsigned long entries
;
2568 unsigned long total
;
2569 const char *name
= "preemption";
2573 get_total_entries(buf
, &total
, &entries
);
2575 seq_printf(m
, "# %s latency trace v1.1.5 on %s\n",
2577 seq_puts(m
, "# -----------------------------------"
2578 "---------------------------------\n");
2579 seq_printf(m
, "# latency: %lu us, #%lu/%lu, CPU#%d |"
2580 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
2581 nsecs_to_usecs(data
->saved_latency
),
2585 #if defined(CONFIG_PREEMPT_NONE)
2587 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
2589 #elif defined(CONFIG_PREEMPT)
2594 /* These are reserved for later use */
2597 seq_printf(m
, " #P:%d)\n", num_online_cpus());
2601 seq_puts(m
, "# -----------------\n");
2602 seq_printf(m
, "# | task: %.16s-%d "
2603 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
2604 data
->comm
, data
->pid
,
2605 from_kuid_munged(seq_user_ns(m
), data
->uid
), data
->nice
,
2606 data
->policy
, data
->rt_priority
);
2607 seq_puts(m
, "# -----------------\n");
2609 if (data
->critical_start
) {
2610 seq_puts(m
, "# => started at: ");
2611 seq_print_ip_sym(&iter
->seq
, data
->critical_start
, sym_flags
);
2612 trace_print_seq(m
, &iter
->seq
);
2613 seq_puts(m
, "\n# => ended at: ");
2614 seq_print_ip_sym(&iter
->seq
, data
->critical_end
, sym_flags
);
2615 trace_print_seq(m
, &iter
->seq
);
2616 seq_puts(m
, "\n#\n");
2622 static void test_cpu_buff_start(struct trace_iterator
*iter
)
2624 struct trace_seq
*s
= &iter
->seq
;
2626 if (!(trace_flags
& TRACE_ITER_ANNOTATE
))
2629 if (!(iter
->iter_flags
& TRACE_FILE_ANNOTATE
))
2632 if (cpumask_test_cpu(iter
->cpu
, iter
->started
))
2635 if (per_cpu_ptr(iter
->trace_buffer
->data
, iter
->cpu
)->skipped_entries
)
2638 cpumask_set_cpu(iter
->cpu
, iter
->started
);
2640 /* Don't print started cpu buffer for the first entry of the trace */
2642 trace_seq_printf(s
, "##### CPU %u buffer started ####\n",
2646 static enum print_line_t
print_trace_fmt(struct trace_iterator
*iter
)
2648 struct trace_seq
*s
= &iter
->seq
;
2649 unsigned long sym_flags
= (trace_flags
& TRACE_ITER_SYM_MASK
);
2650 struct trace_entry
*entry
;
2651 struct trace_event
*event
;
2655 test_cpu_buff_start(iter
);
2657 event
= ftrace_find_event(entry
->type
);
2659 if (trace_flags
& TRACE_ITER_CONTEXT_INFO
) {
2660 if (iter
->iter_flags
& TRACE_FILE_LAT_FMT
)
2661 trace_print_lat_context(iter
);
2663 trace_print_context(iter
);
2666 if (trace_seq_has_overflowed(s
))
2667 return TRACE_TYPE_PARTIAL_LINE
;
2670 return event
->funcs
->trace(iter
, sym_flags
, event
);
2672 trace_seq_printf(s
, "Unknown type %d\n", entry
->type
);
2674 return trace_handle_return(s
);
2677 static enum print_line_t
print_raw_fmt(struct trace_iterator
*iter
)
2679 struct trace_seq
*s
= &iter
->seq
;
2680 struct trace_entry
*entry
;
2681 struct trace_event
*event
;
2685 if (trace_flags
& TRACE_ITER_CONTEXT_INFO
)
2686 trace_seq_printf(s
, "%d %d %llu ",
2687 entry
->pid
, iter
->cpu
, iter
->ts
);
2689 if (trace_seq_has_overflowed(s
))
2690 return TRACE_TYPE_PARTIAL_LINE
;
2692 event
= ftrace_find_event(entry
->type
);
2694 return event
->funcs
->raw(iter
, 0, event
);
2696 trace_seq_printf(s
, "%d ?\n", entry
->type
);
2698 return trace_handle_return(s
);
2701 static enum print_line_t
print_hex_fmt(struct trace_iterator
*iter
)
2703 struct trace_seq
*s
= &iter
->seq
;
2704 unsigned char newline
= '\n';
2705 struct trace_entry
*entry
;
2706 struct trace_event
*event
;
2710 if (trace_flags
& TRACE_ITER_CONTEXT_INFO
) {
2711 SEQ_PUT_HEX_FIELD(s
, entry
->pid
);
2712 SEQ_PUT_HEX_FIELD(s
, iter
->cpu
);
2713 SEQ_PUT_HEX_FIELD(s
, iter
->ts
);
2714 if (trace_seq_has_overflowed(s
))
2715 return TRACE_TYPE_PARTIAL_LINE
;
2718 event
= ftrace_find_event(entry
->type
);
2720 enum print_line_t ret
= event
->funcs
->hex(iter
, 0, event
);
2721 if (ret
!= TRACE_TYPE_HANDLED
)
2725 SEQ_PUT_FIELD(s
, newline
);
2727 return trace_handle_return(s
);
2730 static enum print_line_t
print_bin_fmt(struct trace_iterator
*iter
)
2732 struct trace_seq
*s
= &iter
->seq
;
2733 struct trace_entry
*entry
;
2734 struct trace_event
*event
;
2738 if (trace_flags
& TRACE_ITER_CONTEXT_INFO
) {
2739 SEQ_PUT_FIELD(s
, entry
->pid
);
2740 SEQ_PUT_FIELD(s
, iter
->cpu
);
2741 SEQ_PUT_FIELD(s
, iter
->ts
);
2742 if (trace_seq_has_overflowed(s
))
2743 return TRACE_TYPE_PARTIAL_LINE
;
2746 event
= ftrace_find_event(entry
->type
);
2747 return event
? event
->funcs
->binary(iter
, 0, event
) :
2751 int trace_empty(struct trace_iterator
*iter
)
2753 struct ring_buffer_iter
*buf_iter
;
2756 /* If we are looking at one CPU buffer, only check that one */
2757 if (iter
->cpu_file
!= RING_BUFFER_ALL_CPUS
) {
2758 cpu
= iter
->cpu_file
;
2759 buf_iter
= trace_buffer_iter(iter
, cpu
);
2761 if (!ring_buffer_iter_empty(buf_iter
))
2764 if (!ring_buffer_empty_cpu(iter
->trace_buffer
->buffer
, cpu
))
2770 for_each_tracing_cpu(cpu
) {
2771 buf_iter
= trace_buffer_iter(iter
, cpu
);
2773 if (!ring_buffer_iter_empty(buf_iter
))
2776 if (!ring_buffer_empty_cpu(iter
->trace_buffer
->buffer
, cpu
))
2784 /* Called with trace_event_read_lock() held. */
2785 enum print_line_t
print_trace_line(struct trace_iterator
*iter
)
2787 enum print_line_t ret
;
2789 if (iter
->lost_events
) {
2790 trace_seq_printf(&iter
->seq
, "CPU:%d [LOST %lu EVENTS]\n",
2791 iter
->cpu
, iter
->lost_events
);
2792 if (trace_seq_has_overflowed(&iter
->seq
))
2793 return TRACE_TYPE_PARTIAL_LINE
;
2796 if (iter
->trace
&& iter
->trace
->print_line
) {
2797 ret
= iter
->trace
->print_line(iter
);
2798 if (ret
!= TRACE_TYPE_UNHANDLED
)
2802 if (iter
->ent
->type
== TRACE_BPUTS
&&
2803 trace_flags
& TRACE_ITER_PRINTK
&&
2804 trace_flags
& TRACE_ITER_PRINTK_MSGONLY
)
2805 return trace_print_bputs_msg_only(iter
);
2807 if (iter
->ent
->type
== TRACE_BPRINT
&&
2808 trace_flags
& TRACE_ITER_PRINTK
&&
2809 trace_flags
& TRACE_ITER_PRINTK_MSGONLY
)
2810 return trace_print_bprintk_msg_only(iter
);
2812 if (iter
->ent
->type
== TRACE_PRINT
&&
2813 trace_flags
& TRACE_ITER_PRINTK
&&
2814 trace_flags
& TRACE_ITER_PRINTK_MSGONLY
)
2815 return trace_print_printk_msg_only(iter
);
2817 if (trace_flags
& TRACE_ITER_BIN
)
2818 return print_bin_fmt(iter
);
2820 if (trace_flags
& TRACE_ITER_HEX
)
2821 return print_hex_fmt(iter
);
2823 if (trace_flags
& TRACE_ITER_RAW
)
2824 return print_raw_fmt(iter
);
2826 return print_trace_fmt(iter
);
2829 void trace_latency_header(struct seq_file
*m
)
2831 struct trace_iterator
*iter
= m
->private;
2833 /* print nothing if the buffers are empty */
2834 if (trace_empty(iter
))
2837 if (iter
->iter_flags
& TRACE_FILE_LAT_FMT
)
2838 print_trace_header(m
, iter
);
2840 if (!(trace_flags
& TRACE_ITER_VERBOSE
))
2841 print_lat_help_header(m
);
2844 void trace_default_header(struct seq_file
*m
)
2846 struct trace_iterator
*iter
= m
->private;
2848 if (!(trace_flags
& TRACE_ITER_CONTEXT_INFO
))
2851 if (iter
->iter_flags
& TRACE_FILE_LAT_FMT
) {
2852 /* print nothing if the buffers are empty */
2853 if (trace_empty(iter
))
2855 print_trace_header(m
, iter
);
2856 if (!(trace_flags
& TRACE_ITER_VERBOSE
))
2857 print_lat_help_header(m
);
2859 if (!(trace_flags
& TRACE_ITER_VERBOSE
)) {
2860 if (trace_flags
& TRACE_ITER_IRQ_INFO
)
2861 print_func_help_header_irq(iter
->trace_buffer
, m
);
2863 print_func_help_header(iter
->trace_buffer
, m
);
2868 static void test_ftrace_alive(struct seq_file
*m
)
2870 if (!ftrace_is_dead())
2872 seq_puts(m
, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
2873 "# MAY BE MISSING FUNCTION EVENTS\n");
2876 #ifdef CONFIG_TRACER_MAX_TRACE
2877 static void show_snapshot_main_help(struct seq_file
*m
)
2879 seq_puts(m
, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
2880 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2881 "# Takes a snapshot of the main buffer.\n"
2882 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
2883 "# (Doesn't have to be '2' works with any number that\n"
2884 "# is not a '0' or '1')\n");
2887 static void show_snapshot_percpu_help(struct seq_file
*m
)
2889 seq_puts(m
, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
2890 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2891 seq_puts(m
, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2892 "# Takes a snapshot of the main buffer for this cpu.\n");
2894 seq_puts(m
, "# echo 1 > snapshot : Not supported with this kernel.\n"
2895 "# Must use main snapshot file to allocate.\n");
2897 seq_puts(m
, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
2898 "# (Doesn't have to be '2' works with any number that\n"
2899 "# is not a '0' or '1')\n");
2902 static void print_snapshot_help(struct seq_file
*m
, struct trace_iterator
*iter
)
2904 if (iter
->tr
->allocated_snapshot
)
2905 seq_puts(m
, "#\n# * Snapshot is allocated *\n#\n");
2907 seq_puts(m
, "#\n# * Snapshot is freed *\n#\n");
2909 seq_puts(m
, "# Snapshot commands:\n");
2910 if (iter
->cpu_file
== RING_BUFFER_ALL_CPUS
)
2911 show_snapshot_main_help(m
);
2913 show_snapshot_percpu_help(m
);
2916 /* Should never be called */
2917 static inline void print_snapshot_help(struct seq_file
*m
, struct trace_iterator
*iter
) { }
2920 static int s_show(struct seq_file
*m
, void *v
)
2922 struct trace_iterator
*iter
= v
;
2925 if (iter
->ent
== NULL
) {
2927 seq_printf(m
, "# tracer: %s\n", iter
->trace
->name
);
2929 test_ftrace_alive(m
);
2931 if (iter
->snapshot
&& trace_empty(iter
))
2932 print_snapshot_help(m
, iter
);
2933 else if (iter
->trace
&& iter
->trace
->print_header
)
2934 iter
->trace
->print_header(m
);
2936 trace_default_header(m
);
2938 } else if (iter
->leftover
) {
2940 * If we filled the seq_file buffer earlier, we
2941 * want to just show it now.
2943 ret
= trace_print_seq(m
, &iter
->seq
);
2945 /* ret should this time be zero, but you never know */
2946 iter
->leftover
= ret
;
2949 print_trace_line(iter
);
2950 ret
= trace_print_seq(m
, &iter
->seq
);
2952 * If we overflow the seq_file buffer, then it will
2953 * ask us for this data again at start up.
2955 * ret is 0 if seq_file write succeeded.
2958 iter
->leftover
= ret
;
2965 * Should be used after trace_array_get(), trace_types_lock
2966 * ensures that i_cdev was already initialized.
2968 static inline int tracing_get_cpu(struct inode
*inode
)
2970 if (inode
->i_cdev
) /* See trace_create_cpu_file() */
2971 return (long)inode
->i_cdev
- 1;
2972 return RING_BUFFER_ALL_CPUS
;
2975 static const struct seq_operations tracer_seq_ops
= {
2982 static struct trace_iterator
*
2983 __tracing_open(struct inode
*inode
, struct file
*file
, bool snapshot
)
2985 struct trace_array
*tr
= inode
->i_private
;
2986 struct trace_iterator
*iter
;
2989 if (tracing_disabled
)
2990 return ERR_PTR(-ENODEV
);
2992 iter
= __seq_open_private(file
, &tracer_seq_ops
, sizeof(*iter
));
2994 return ERR_PTR(-ENOMEM
);
2996 iter
->buffer_iter
= kzalloc(sizeof(*iter
->buffer_iter
) * num_possible_cpus(),
2998 if (!iter
->buffer_iter
)
3002 * We make a copy of the current tracer to avoid concurrent
3003 * changes on it while we are reading.
3005 mutex_lock(&trace_types_lock
);
3006 iter
->trace
= kzalloc(sizeof(*iter
->trace
), GFP_KERNEL
);
3010 *iter
->trace
= *tr
->current_trace
;
3012 if (!zalloc_cpumask_var(&iter
->started
, GFP_KERNEL
))
3017 #ifdef CONFIG_TRACER_MAX_TRACE
3018 /* Currently only the top directory has a snapshot */
3019 if (tr
->current_trace
->print_max
|| snapshot
)
3020 iter
->trace_buffer
= &tr
->max_buffer
;
3023 iter
->trace_buffer
= &tr
->trace_buffer
;
3024 iter
->snapshot
= snapshot
;
3026 iter
->cpu_file
= tracing_get_cpu(inode
);
3027 mutex_init(&iter
->mutex
);
3029 /* Notify the tracer early; before we stop tracing. */
3030 if (iter
->trace
&& iter
->trace
->open
)
3031 iter
->trace
->open(iter
);
3033 /* Annotate start of buffers if we had overruns */
3034 if (ring_buffer_overruns(iter
->trace_buffer
->buffer
))
3035 iter
->iter_flags
|= TRACE_FILE_ANNOTATE
;
3037 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
3038 if (trace_clocks
[tr
->clock_id
].in_ns
)
3039 iter
->iter_flags
|= TRACE_FILE_TIME_IN_NS
;
3041 /* stop the trace while dumping if we are not opening "snapshot" */
3042 if (!iter
->snapshot
)
3043 tracing_stop_tr(tr
);
3045 if (iter
->cpu_file
== RING_BUFFER_ALL_CPUS
) {
3046 for_each_tracing_cpu(cpu
) {
3047 iter
->buffer_iter
[cpu
] =
3048 ring_buffer_read_prepare(iter
->trace_buffer
->buffer
, cpu
);
3050 ring_buffer_read_prepare_sync();
3051 for_each_tracing_cpu(cpu
) {
3052 ring_buffer_read_start(iter
->buffer_iter
[cpu
]);
3053 tracing_iter_reset(iter
, cpu
);
3056 cpu
= iter
->cpu_file
;
3057 iter
->buffer_iter
[cpu
] =
3058 ring_buffer_read_prepare(iter
->trace_buffer
->buffer
, cpu
);
3059 ring_buffer_read_prepare_sync();
3060 ring_buffer_read_start(iter
->buffer_iter
[cpu
]);
3061 tracing_iter_reset(iter
, cpu
);
3064 mutex_unlock(&trace_types_lock
);
3069 mutex_unlock(&trace_types_lock
);
3071 kfree(iter
->buffer_iter
);
3073 seq_release_private(inode
, file
);
3074 return ERR_PTR(-ENOMEM
);
3077 int tracing_open_generic(struct inode
*inode
, struct file
*filp
)
3079 if (tracing_disabled
)
3082 filp
->private_data
= inode
->i_private
;
3086 bool tracing_is_disabled(void)
3088 return (tracing_disabled
) ? true: false;
3092 * Open and update trace_array ref count.
3093 * Must have the current trace_array passed to it.
3095 static int tracing_open_generic_tr(struct inode
*inode
, struct file
*filp
)
3097 struct trace_array
*tr
= inode
->i_private
;
3099 if (tracing_disabled
)
3102 if (trace_array_get(tr
) < 0)
3105 filp
->private_data
= inode
->i_private
;
3110 static int tracing_release(struct inode
*inode
, struct file
*file
)
3112 struct trace_array
*tr
= inode
->i_private
;
3113 struct seq_file
*m
= file
->private_data
;
3114 struct trace_iterator
*iter
;
3117 if (!(file
->f_mode
& FMODE_READ
)) {
3118 trace_array_put(tr
);
3122 /* Writes do not use seq_file */
3124 mutex_lock(&trace_types_lock
);
3126 for_each_tracing_cpu(cpu
) {
3127 if (iter
->buffer_iter
[cpu
])
3128 ring_buffer_read_finish(iter
->buffer_iter
[cpu
]);
3131 if (iter
->trace
&& iter
->trace
->close
)
3132 iter
->trace
->close(iter
);
3134 if (!iter
->snapshot
)
3135 /* reenable tracing if it was previously enabled */
3136 tracing_start_tr(tr
);
3138 __trace_array_put(tr
);
3140 mutex_unlock(&trace_types_lock
);
3142 mutex_destroy(&iter
->mutex
);
3143 free_cpumask_var(iter
->started
);
3145 kfree(iter
->buffer_iter
);
3146 seq_release_private(inode
, file
);
3151 static int tracing_release_generic_tr(struct inode
*inode
, struct file
*file
)
3153 struct trace_array
*tr
= inode
->i_private
;
3155 trace_array_put(tr
);
3159 static int tracing_single_release_tr(struct inode
*inode
, struct file
*file
)
3161 struct trace_array
*tr
= inode
->i_private
;
3163 trace_array_put(tr
);
3165 return single_release(inode
, file
);
3168 static int tracing_open(struct inode
*inode
, struct file
*file
)
3170 struct trace_array
*tr
= inode
->i_private
;
3171 struct trace_iterator
*iter
;
3174 if (trace_array_get(tr
) < 0)
3177 /* If this file was open for write, then erase contents */
3178 if ((file
->f_mode
& FMODE_WRITE
) && (file
->f_flags
& O_TRUNC
)) {
3179 int cpu
= tracing_get_cpu(inode
);
3181 if (cpu
== RING_BUFFER_ALL_CPUS
)
3182 tracing_reset_online_cpus(&tr
->trace_buffer
);
3184 tracing_reset(&tr
->trace_buffer
, cpu
);
3187 if (file
->f_mode
& FMODE_READ
) {
3188 iter
= __tracing_open(inode
, file
, false);
3190 ret
= PTR_ERR(iter
);
3191 else if (trace_flags
& TRACE_ITER_LATENCY_FMT
)
3192 iter
->iter_flags
|= TRACE_FILE_LAT_FMT
;
3196 trace_array_put(tr
);
3202 * Some tracers are not suitable for instance buffers.
3203 * A tracer is always available for the global array (toplevel)
3204 * or if it explicitly states that it is.
3207 trace_ok_for_array(struct tracer
*t
, struct trace_array
*tr
)
3209 return (tr
->flags
& TRACE_ARRAY_FL_GLOBAL
) || t
->allow_instances
;
3212 /* Find the next tracer that this trace array may use */
3213 static struct tracer
*
3214 get_tracer_for_array(struct trace_array
*tr
, struct tracer
*t
)
3216 while (t
&& !trace_ok_for_array(t
, tr
))
3223 t_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
3225 struct trace_array
*tr
= m
->private;
3226 struct tracer
*t
= v
;
3231 t
= get_tracer_for_array(tr
, t
->next
);
3236 static void *t_start(struct seq_file
*m
, loff_t
*pos
)
3238 struct trace_array
*tr
= m
->private;
3242 mutex_lock(&trace_types_lock
);
3244 t
= get_tracer_for_array(tr
, trace_types
);
3245 for (; t
&& l
< *pos
; t
= t_next(m
, t
, &l
))
3251 static void t_stop(struct seq_file
*m
, void *p
)
3253 mutex_unlock(&trace_types_lock
);
3256 static int t_show(struct seq_file
*m
, void *v
)
3258 struct tracer
*t
= v
;
3263 seq_puts(m
, t
->name
);
3272 static const struct seq_operations show_traces_seq_ops
= {
3279 static int show_traces_open(struct inode
*inode
, struct file
*file
)
3281 struct trace_array
*tr
= inode
->i_private
;
3285 if (tracing_disabled
)
3288 ret
= seq_open(file
, &show_traces_seq_ops
);
3292 m
= file
->private_data
;
3299 tracing_write_stub(struct file
*filp
, const char __user
*ubuf
,
3300 size_t count
, loff_t
*ppos
)
3305 loff_t
tracing_lseek(struct file
*file
, loff_t offset
, int whence
)
3309 if (file
->f_mode
& FMODE_READ
)
3310 ret
= seq_lseek(file
, offset
, whence
);
3312 file
->f_pos
= ret
= 0;
3317 static const struct file_operations tracing_fops
= {
3318 .open
= tracing_open
,
3320 .write
= tracing_write_stub
,
3321 .llseek
= tracing_lseek
,
3322 .release
= tracing_release
,
3325 static const struct file_operations show_traces_fops
= {
3326 .open
= show_traces_open
,
3328 .release
= seq_release
,
3329 .llseek
= seq_lseek
,
3333 * The tracer itself will not take this lock, but still we want
3334 * to provide a consistent cpumask to user-space:
3336 static DEFINE_MUTEX(tracing_cpumask_update_lock
);
3339 * Temporary storage for the character representation of the
3340 * CPU bitmask (and one more byte for the newline):
3342 static char mask_str
[NR_CPUS
+ 1];
3345 tracing_cpumask_read(struct file
*filp
, char __user
*ubuf
,
3346 size_t count
, loff_t
*ppos
)
3348 struct trace_array
*tr
= file_inode(filp
)->i_private
;
3351 mutex_lock(&tracing_cpumask_update_lock
);
3353 len
= cpumask_scnprintf(mask_str
, count
, tr
->tracing_cpumask
);
3354 if (count
- len
< 2) {
3358 len
+= sprintf(mask_str
+ len
, "\n");
3359 count
= simple_read_from_buffer(ubuf
, count
, ppos
, mask_str
, NR_CPUS
+1);
3362 mutex_unlock(&tracing_cpumask_update_lock
);
3368 tracing_cpumask_write(struct file
*filp
, const char __user
*ubuf
,
3369 size_t count
, loff_t
*ppos
)
3371 struct trace_array
*tr
= file_inode(filp
)->i_private
;
3372 cpumask_var_t tracing_cpumask_new
;
3375 if (!alloc_cpumask_var(&tracing_cpumask_new
, GFP_KERNEL
))
3378 err
= cpumask_parse_user(ubuf
, count
, tracing_cpumask_new
);
3382 mutex_lock(&tracing_cpumask_update_lock
);
3384 local_irq_disable();
3385 arch_spin_lock(&tr
->max_lock
);
3386 for_each_tracing_cpu(cpu
) {
3388 * Increase/decrease the disabled counter if we are
3389 * about to flip a bit in the cpumask:
3391 if (cpumask_test_cpu(cpu
, tr
->tracing_cpumask
) &&
3392 !cpumask_test_cpu(cpu
, tracing_cpumask_new
)) {
3393 atomic_inc(&per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->disabled
);
3394 ring_buffer_record_disable_cpu(tr
->trace_buffer
.buffer
, cpu
);
3396 if (!cpumask_test_cpu(cpu
, tr
->tracing_cpumask
) &&
3397 cpumask_test_cpu(cpu
, tracing_cpumask_new
)) {
3398 atomic_dec(&per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->disabled
);
3399 ring_buffer_record_enable_cpu(tr
->trace_buffer
.buffer
, cpu
);
3402 arch_spin_unlock(&tr
->max_lock
);
3405 cpumask_copy(tr
->tracing_cpumask
, tracing_cpumask_new
);
3407 mutex_unlock(&tracing_cpumask_update_lock
);
3408 free_cpumask_var(tracing_cpumask_new
);
3413 free_cpumask_var(tracing_cpumask_new
);
3418 static const struct file_operations tracing_cpumask_fops
= {
3419 .open
= tracing_open_generic_tr
,
3420 .read
= tracing_cpumask_read
,
3421 .write
= tracing_cpumask_write
,
3422 .release
= tracing_release_generic_tr
,
3423 .llseek
= generic_file_llseek
,
3426 static int tracing_trace_options_show(struct seq_file
*m
, void *v
)
3428 struct tracer_opt
*trace_opts
;
3429 struct trace_array
*tr
= m
->private;
3433 mutex_lock(&trace_types_lock
);
3434 tracer_flags
= tr
->current_trace
->flags
->val
;
3435 trace_opts
= tr
->current_trace
->flags
->opts
;
3437 for (i
= 0; trace_options
[i
]; i
++) {
3438 if (trace_flags
& (1 << i
))
3439 seq_printf(m
, "%s\n", trace_options
[i
]);
3441 seq_printf(m
, "no%s\n", trace_options
[i
]);
3444 for (i
= 0; trace_opts
[i
].name
; i
++) {
3445 if (tracer_flags
& trace_opts
[i
].bit
)
3446 seq_printf(m
, "%s\n", trace_opts
[i
].name
);
3448 seq_printf(m
, "no%s\n", trace_opts
[i
].name
);
3450 mutex_unlock(&trace_types_lock
);
3455 static int __set_tracer_option(struct trace_array
*tr
,
3456 struct tracer_flags
*tracer_flags
,
3457 struct tracer_opt
*opts
, int neg
)
3459 struct tracer
*trace
= tr
->current_trace
;
3462 ret
= trace
->set_flag(tr
, tracer_flags
->val
, opts
->bit
, !neg
);
3467 tracer_flags
->val
&= ~opts
->bit
;
3469 tracer_flags
->val
|= opts
->bit
;
3473 /* Try to assign a tracer specific option */
3474 static int set_tracer_option(struct trace_array
*tr
, char *cmp
, int neg
)
3476 struct tracer
*trace
= tr
->current_trace
;
3477 struct tracer_flags
*tracer_flags
= trace
->flags
;
3478 struct tracer_opt
*opts
= NULL
;
3481 for (i
= 0; tracer_flags
->opts
[i
].name
; i
++) {
3482 opts
= &tracer_flags
->opts
[i
];
3484 if (strcmp(cmp
, opts
->name
) == 0)
3485 return __set_tracer_option(tr
, trace
->flags
, opts
, neg
);
3491 /* Some tracers require overwrite to stay enabled */
3492 int trace_keep_overwrite(struct tracer
*tracer
, u32 mask
, int set
)
3494 if (tracer
->enabled
&& (mask
& TRACE_ITER_OVERWRITE
) && !set
)
3500 int set_tracer_flag(struct trace_array
*tr
, unsigned int mask
, int enabled
)
3502 /* do nothing if flag is already set */
3503 if (!!(trace_flags
& mask
) == !!enabled
)
3506 /* Give the tracer a chance to approve the change */
3507 if (tr
->current_trace
->flag_changed
)
3508 if (tr
->current_trace
->flag_changed(tr
, mask
, !!enabled
))
3512 trace_flags
|= mask
;
3514 trace_flags
&= ~mask
;
3516 if (mask
== TRACE_ITER_RECORD_CMD
)
3517 trace_event_enable_cmd_record(enabled
);
3519 if (mask
== TRACE_ITER_OVERWRITE
) {
3520 ring_buffer_change_overwrite(tr
->trace_buffer
.buffer
, enabled
);
3521 #ifdef CONFIG_TRACER_MAX_TRACE
3522 ring_buffer_change_overwrite(tr
->max_buffer
.buffer
, enabled
);
3526 if (mask
== TRACE_ITER_PRINTK
)
3527 trace_printk_start_stop_comm(enabled
);
3532 static int trace_set_options(struct trace_array
*tr
, char *option
)
3539 cmp
= strstrip(option
);
3541 if (strncmp(cmp
, "no", 2) == 0) {
3546 mutex_lock(&trace_types_lock
);
3548 for (i
= 0; trace_options
[i
]; i
++) {
3549 if (strcmp(cmp
, trace_options
[i
]) == 0) {
3550 ret
= set_tracer_flag(tr
, 1 << i
, !neg
);
3555 /* If no option could be set, test the specific tracer options */
3556 if (!trace_options
[i
])
3557 ret
= set_tracer_option(tr
, cmp
, neg
);
3559 mutex_unlock(&trace_types_lock
);
3565 tracing_trace_options_write(struct file
*filp
, const char __user
*ubuf
,
3566 size_t cnt
, loff_t
*ppos
)
3568 struct seq_file
*m
= filp
->private_data
;
3569 struct trace_array
*tr
= m
->private;
3573 if (cnt
>= sizeof(buf
))
3576 if (copy_from_user(&buf
, ubuf
, cnt
))
3581 ret
= trace_set_options(tr
, buf
);
3590 static int tracing_trace_options_open(struct inode
*inode
, struct file
*file
)
3592 struct trace_array
*tr
= inode
->i_private
;
3595 if (tracing_disabled
)
3598 if (trace_array_get(tr
) < 0)
3601 ret
= single_open(file
, tracing_trace_options_show
, inode
->i_private
);
3603 trace_array_put(tr
);
3608 static const struct file_operations tracing_iter_fops
= {
3609 .open
= tracing_trace_options_open
,
3611 .llseek
= seq_lseek
,
3612 .release
= tracing_single_release_tr
,
3613 .write
= tracing_trace_options_write
,
3616 static const char readme_msg
[] =
3617 "tracing mini-HOWTO:\n\n"
3618 "# echo 0 > tracing_on : quick way to disable tracing\n"
3619 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
3620 " Important files:\n"
3621 " trace\t\t\t- The static contents of the buffer\n"
3622 "\t\t\t To clear the buffer write into this file: echo > trace\n"
3623 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
3624 " current_tracer\t- function and latency tracers\n"
3625 " available_tracers\t- list of configured tracers for current_tracer\n"
3626 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
3627 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
3628 " trace_clock\t\t-change the clock used to order events\n"
3629 " local: Per cpu clock but may not be synced across CPUs\n"
3630 " global: Synced across CPUs but slows tracing down.\n"
3631 " counter: Not a clock, but just an increment\n"
3632 " uptime: Jiffy counter from time of boot\n"
3633 " perf: Same clock that perf events use\n"
3634 #ifdef CONFIG_X86_64
3635 " x86-tsc: TSC cycle counter\n"
3637 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
3638 " tracing_cpumask\t- Limit which CPUs to trace\n"
3639 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
3640 "\t\t\t Remove sub-buffer with rmdir\n"
3641 " trace_options\t\t- Set format or modify how tracing happens\n"
3642 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
3643 "\t\t\t option name\n"
3644 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
3645 #ifdef CONFIG_DYNAMIC_FTRACE
3646 "\n available_filter_functions - list of functions that can be filtered on\n"
3647 " set_ftrace_filter\t- echo function name in here to only trace these\n"
3648 "\t\t\t functions\n"
3649 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3650 "\t modules: Can select a group via module\n"
3651 "\t Format: :mod:<module-name>\n"
3652 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
3653 "\t triggers: a command to perform when function is hit\n"
3654 "\t Format: <function>:<trigger>[:count]\n"
3655 "\t trigger: traceon, traceoff\n"
3656 "\t\t enable_event:<system>:<event>\n"
3657 "\t\t disable_event:<system>:<event>\n"
3658 #ifdef CONFIG_STACKTRACE
3661 #ifdef CONFIG_TRACER_SNAPSHOT
3666 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
3667 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
3668 "\t The first one will disable tracing every time do_fault is hit\n"
3669 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
3670 "\t The first time do trap is hit and it disables tracing, the\n"
3671 "\t counter will decrement to 2. If tracing is already disabled,\n"
3672 "\t the counter will not decrement. It only decrements when the\n"
3673 "\t trigger did work\n"
3674 "\t To remove trigger without count:\n"
3675 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
3676 "\t To remove trigger with a count:\n"
3677 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
3678 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
3679 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3680 "\t modules: Can select a group via module command :mod:\n"
3681 "\t Does not accept triggers\n"
3682 #endif /* CONFIG_DYNAMIC_FTRACE */
3683 #ifdef CONFIG_FUNCTION_TRACER
3684 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
3687 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3688 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
3689 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
3690 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
3692 #ifdef CONFIG_TRACER_SNAPSHOT
3693 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
3694 "\t\t\t snapshot buffer. Read the contents for more\n"
3695 "\t\t\t information\n"
3697 #ifdef CONFIG_STACK_TRACER
3698 " stack_trace\t\t- Shows the max stack trace when active\n"
3699 " stack_max_size\t- Shows current max stack size that was traced\n"
3700 "\t\t\t Write into this file to reset the max size (trigger a\n"
3701 "\t\t\t new trace)\n"
3702 #ifdef CONFIG_DYNAMIC_FTRACE
3703 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
3706 #endif /* CONFIG_STACK_TRACER */
3707 " events/\t\t- Directory containing all trace event subsystems:\n"
3708 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
3709 " events/<system>/\t- Directory containing all trace events for <system>:\n"
3710 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
3712 " filter\t\t- If set, only events passing filter are traced\n"
3713 " events/<system>/<event>/\t- Directory containing control files for\n"
3715 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
3716 " filter\t\t- If set, only events passing filter are traced\n"
3717 " trigger\t\t- If set, a command to perform when event is hit\n"
3718 "\t Format: <trigger>[:count][if <filter>]\n"
3719 "\t trigger: traceon, traceoff\n"
3720 "\t enable_event:<system>:<event>\n"
3721 "\t disable_event:<system>:<event>\n"
3722 #ifdef CONFIG_STACKTRACE
3725 #ifdef CONFIG_TRACER_SNAPSHOT
3728 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
3729 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
3730 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
3731 "\t events/block/block_unplug/trigger\n"
3732 "\t The first disables tracing every time block_unplug is hit.\n"
3733 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
3734 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
3735 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
3736 "\t Like function triggers, the counter is only decremented if it\n"
3737 "\t enabled or disabled tracing.\n"
3738 "\t To remove a trigger without a count:\n"
3739 "\t echo '!<trigger> > <system>/<event>/trigger\n"
3740 "\t To remove a trigger with a count:\n"
3741 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
3742 "\t Filters can be ignored when removing a trigger.\n"
3746 tracing_readme_read(struct file
*filp
, char __user
*ubuf
,
3747 size_t cnt
, loff_t
*ppos
)
3749 return simple_read_from_buffer(ubuf
, cnt
, ppos
,
3750 readme_msg
, strlen(readme_msg
));
3753 static const struct file_operations tracing_readme_fops
= {
3754 .open
= tracing_open_generic
,
3755 .read
= tracing_readme_read
,
3756 .llseek
= generic_file_llseek
,
3759 static void *saved_cmdlines_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
3761 unsigned int *ptr
= v
;
3763 if (*pos
|| m
->count
)
3768 for (; ptr
< &savedcmd
->map_cmdline_to_pid
[savedcmd
->cmdline_num
];
3770 if (*ptr
== -1 || *ptr
== NO_CMDLINE_MAP
)
3779 static void *saved_cmdlines_start(struct seq_file
*m
, loff_t
*pos
)
3785 arch_spin_lock(&trace_cmdline_lock
);
3787 v
= &savedcmd
->map_cmdline_to_pid
[0];
3789 v
= saved_cmdlines_next(m
, v
, &l
);
3797 static void saved_cmdlines_stop(struct seq_file
*m
, void *v
)
3799 arch_spin_unlock(&trace_cmdline_lock
);
3803 static int saved_cmdlines_show(struct seq_file
*m
, void *v
)
3805 char buf
[TASK_COMM_LEN
];
3806 unsigned int *pid
= v
;
3808 __trace_find_cmdline(*pid
, buf
);
3809 seq_printf(m
, "%d %s\n", *pid
, buf
);
3813 static const struct seq_operations tracing_saved_cmdlines_seq_ops
= {
3814 .start
= saved_cmdlines_start
,
3815 .next
= saved_cmdlines_next
,
3816 .stop
= saved_cmdlines_stop
,
3817 .show
= saved_cmdlines_show
,
3820 static int tracing_saved_cmdlines_open(struct inode
*inode
, struct file
*filp
)
3822 if (tracing_disabled
)
3825 return seq_open(filp
, &tracing_saved_cmdlines_seq_ops
);
3828 static const struct file_operations tracing_saved_cmdlines_fops
= {
3829 .open
= tracing_saved_cmdlines_open
,
3831 .llseek
= seq_lseek
,
3832 .release
= seq_release
,
3836 tracing_saved_cmdlines_size_read(struct file
*filp
, char __user
*ubuf
,
3837 size_t cnt
, loff_t
*ppos
)
3842 arch_spin_lock(&trace_cmdline_lock
);
3843 r
= scnprintf(buf
, sizeof(buf
), "%u\n", savedcmd
->cmdline_num
);
3844 arch_spin_unlock(&trace_cmdline_lock
);
3846 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
3849 static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer
*s
)
3851 kfree(s
->saved_cmdlines
);
3852 kfree(s
->map_cmdline_to_pid
);
3856 static int tracing_resize_saved_cmdlines(unsigned int val
)
3858 struct saved_cmdlines_buffer
*s
, *savedcmd_temp
;
3860 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
3864 if (allocate_cmdlines_buffer(val
, s
) < 0) {
3869 arch_spin_lock(&trace_cmdline_lock
);
3870 savedcmd_temp
= savedcmd
;
3872 arch_spin_unlock(&trace_cmdline_lock
);
3873 free_saved_cmdlines_buffer(savedcmd_temp
);
3879 tracing_saved_cmdlines_size_write(struct file
*filp
, const char __user
*ubuf
,
3880 size_t cnt
, loff_t
*ppos
)
3885 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
3889 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
3890 if (!val
|| val
> PID_MAX_DEFAULT
)
3893 ret
= tracing_resize_saved_cmdlines((unsigned int)val
);
3902 static const struct file_operations tracing_saved_cmdlines_size_fops
= {
3903 .open
= tracing_open_generic
,
3904 .read
= tracing_saved_cmdlines_size_read
,
3905 .write
= tracing_saved_cmdlines_size_write
,
3909 tracing_set_trace_read(struct file
*filp
, char __user
*ubuf
,
3910 size_t cnt
, loff_t
*ppos
)
3912 struct trace_array
*tr
= filp
->private_data
;
3913 char buf
[MAX_TRACER_SIZE
+2];
3916 mutex_lock(&trace_types_lock
);
3917 r
= sprintf(buf
, "%s\n", tr
->current_trace
->name
);
3918 mutex_unlock(&trace_types_lock
);
3920 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
3923 int tracer_init(struct tracer
*t
, struct trace_array
*tr
)
3925 tracing_reset_online_cpus(&tr
->trace_buffer
);
3929 static void set_buffer_entries(struct trace_buffer
*buf
, unsigned long val
)
3933 for_each_tracing_cpu(cpu
)
3934 per_cpu_ptr(buf
->data
, cpu
)->entries
= val
;
3937 #ifdef CONFIG_TRACER_MAX_TRACE
3938 /* resize @tr's buffer to the size of @size_tr's entries */
3939 static int resize_buffer_duplicate_size(struct trace_buffer
*trace_buf
,
3940 struct trace_buffer
*size_buf
, int cpu_id
)
3944 if (cpu_id
== RING_BUFFER_ALL_CPUS
) {
3945 for_each_tracing_cpu(cpu
) {
3946 ret
= ring_buffer_resize(trace_buf
->buffer
,
3947 per_cpu_ptr(size_buf
->data
, cpu
)->entries
, cpu
);
3950 per_cpu_ptr(trace_buf
->data
, cpu
)->entries
=
3951 per_cpu_ptr(size_buf
->data
, cpu
)->entries
;
3954 ret
= ring_buffer_resize(trace_buf
->buffer
,
3955 per_cpu_ptr(size_buf
->data
, cpu_id
)->entries
, cpu_id
);
3957 per_cpu_ptr(trace_buf
->data
, cpu_id
)->entries
=
3958 per_cpu_ptr(size_buf
->data
, cpu_id
)->entries
;
3963 #endif /* CONFIG_TRACER_MAX_TRACE */
3965 static int __tracing_resize_ring_buffer(struct trace_array
*tr
,
3966 unsigned long size
, int cpu
)
3971 * If kernel or user changes the size of the ring buffer
3972 * we use the size that was given, and we can forget about
3973 * expanding it later.
3975 ring_buffer_expanded
= true;
3977 /* May be called before buffers are initialized */
3978 if (!tr
->trace_buffer
.buffer
)
3981 ret
= ring_buffer_resize(tr
->trace_buffer
.buffer
, size
, cpu
);
3985 #ifdef CONFIG_TRACER_MAX_TRACE
3986 if (!(tr
->flags
& TRACE_ARRAY_FL_GLOBAL
) ||
3987 !tr
->current_trace
->use_max_tr
)
3990 ret
= ring_buffer_resize(tr
->max_buffer
.buffer
, size
, cpu
);
3992 int r
= resize_buffer_duplicate_size(&tr
->trace_buffer
,
3993 &tr
->trace_buffer
, cpu
);
3996 * AARGH! We are left with different
3997 * size max buffer!!!!
3998 * The max buffer is our "snapshot" buffer.
3999 * When a tracer needs a snapshot (one of the
4000 * latency tracers), it swaps the max buffer
4001 * with the saved snap shot. We succeeded to
4002 * update the size of the main buffer, but failed to
4003 * update the size of the max buffer. But when we tried
4004 * to reset the main buffer to the original size, we
4005 * failed there too. This is very unlikely to
4006 * happen, but if it does, warn and kill all
4010 tracing_disabled
= 1;
4015 if (cpu
== RING_BUFFER_ALL_CPUS
)
4016 set_buffer_entries(&tr
->max_buffer
, size
);
4018 per_cpu_ptr(tr
->max_buffer
.data
, cpu
)->entries
= size
;
4021 #endif /* CONFIG_TRACER_MAX_TRACE */
4023 if (cpu
== RING_BUFFER_ALL_CPUS
)
4024 set_buffer_entries(&tr
->trace_buffer
, size
);
4026 per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->entries
= size
;
4031 static ssize_t
tracing_resize_ring_buffer(struct trace_array
*tr
,
4032 unsigned long size
, int cpu_id
)
4036 mutex_lock(&trace_types_lock
);
4038 if (cpu_id
!= RING_BUFFER_ALL_CPUS
) {
4039 /* make sure, this cpu is enabled in the mask */
4040 if (!cpumask_test_cpu(cpu_id
, tracing_buffer_mask
)) {
4046 ret
= __tracing_resize_ring_buffer(tr
, size
, cpu_id
);
4051 mutex_unlock(&trace_types_lock
);
4058 * tracing_update_buffers - used by tracing facility to expand ring buffers
4060 * To save on memory when the tracing is never used on a system with it
4061 * configured in. The ring buffers are set to a minimum size. But once
4062 * a user starts to use the tracing facility, then they need to grow
4063 * to their default size.
4065 * This function is to be called when a tracer is about to be used.
4067 int tracing_update_buffers(void)
4071 mutex_lock(&trace_types_lock
);
4072 if (!ring_buffer_expanded
)
4073 ret
= __tracing_resize_ring_buffer(&global_trace
, trace_buf_size
,
4074 RING_BUFFER_ALL_CPUS
);
4075 mutex_unlock(&trace_types_lock
);
4080 struct trace_option_dentry
;
4082 static struct trace_option_dentry
*
4083 create_trace_option_files(struct trace_array
*tr
, struct tracer
*tracer
);
4086 destroy_trace_option_files(struct trace_option_dentry
*topts
);
4089 * Used to clear out the tracer before deletion of an instance.
4090 * Must have trace_types_lock held.
4092 static void tracing_set_nop(struct trace_array
*tr
)
4094 if (tr
->current_trace
== &nop_trace
)
4097 tr
->current_trace
->enabled
--;
4099 if (tr
->current_trace
->reset
)
4100 tr
->current_trace
->reset(tr
);
4102 tr
->current_trace
= &nop_trace
;
4105 static int tracing_set_tracer(struct trace_array
*tr
, const char *buf
)
4107 static struct trace_option_dentry
*topts
;
4109 #ifdef CONFIG_TRACER_MAX_TRACE
4114 mutex_lock(&trace_types_lock
);
4116 if (!ring_buffer_expanded
) {
4117 ret
= __tracing_resize_ring_buffer(tr
, trace_buf_size
,
4118 RING_BUFFER_ALL_CPUS
);
4124 for (t
= trace_types
; t
; t
= t
->next
) {
4125 if (strcmp(t
->name
, buf
) == 0)
4132 if (t
== tr
->current_trace
)
4135 /* Some tracers are only allowed for the top level buffer */
4136 if (!trace_ok_for_array(t
, tr
)) {
4141 trace_branch_disable();
4143 tr
->current_trace
->enabled
--;
4145 if (tr
->current_trace
->reset
)
4146 tr
->current_trace
->reset(tr
);
4148 /* Current trace needs to be nop_trace before synchronize_sched */
4149 tr
->current_trace
= &nop_trace
;
4151 #ifdef CONFIG_TRACER_MAX_TRACE
4152 had_max_tr
= tr
->allocated_snapshot
;
4154 if (had_max_tr
&& !t
->use_max_tr
) {
4156 * We need to make sure that the update_max_tr sees that
4157 * current_trace changed to nop_trace to keep it from
4158 * swapping the buffers after we resize it.
4159 * The update_max_tr is called from interrupts disabled
4160 * so a synchronized_sched() is sufficient.
4162 synchronize_sched();
4166 /* Currently, only the top instance has options */
4167 if (tr
->flags
& TRACE_ARRAY_FL_GLOBAL
) {
4168 destroy_trace_option_files(topts
);
4169 topts
= create_trace_option_files(tr
, t
);
4172 #ifdef CONFIG_TRACER_MAX_TRACE
4173 if (t
->use_max_tr
&& !had_max_tr
) {
4174 ret
= alloc_snapshot(tr
);
4181 ret
= tracer_init(t
, tr
);
4186 tr
->current_trace
= t
;
4187 tr
->current_trace
->enabled
++;
4188 trace_branch_enable(tr
);
4190 mutex_unlock(&trace_types_lock
);
4196 tracing_set_trace_write(struct file
*filp
, const char __user
*ubuf
,
4197 size_t cnt
, loff_t
*ppos
)
4199 struct trace_array
*tr
= filp
->private_data
;
4200 char buf
[MAX_TRACER_SIZE
+1];
4207 if (cnt
> MAX_TRACER_SIZE
)
4208 cnt
= MAX_TRACER_SIZE
;
4210 if (copy_from_user(&buf
, ubuf
, cnt
))
4215 /* strip ending whitespace. */
4216 for (i
= cnt
- 1; i
> 0 && isspace(buf
[i
]); i
--)
4219 err
= tracing_set_tracer(tr
, buf
);
4229 tracing_nsecs_read(unsigned long *ptr
, char __user
*ubuf
,
4230 size_t cnt
, loff_t
*ppos
)
4235 r
= snprintf(buf
, sizeof(buf
), "%ld\n",
4236 *ptr
== (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr
));
4237 if (r
> sizeof(buf
))
4239 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
4243 tracing_nsecs_write(unsigned long *ptr
, const char __user
*ubuf
,
4244 size_t cnt
, loff_t
*ppos
)
4249 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
4259 tracing_thresh_read(struct file
*filp
, char __user
*ubuf
,
4260 size_t cnt
, loff_t
*ppos
)
4262 return tracing_nsecs_read(&tracing_thresh
, ubuf
, cnt
, ppos
);
4266 tracing_thresh_write(struct file
*filp
, const char __user
*ubuf
,
4267 size_t cnt
, loff_t
*ppos
)
4269 struct trace_array
*tr
= filp
->private_data
;
4272 mutex_lock(&trace_types_lock
);
4273 ret
= tracing_nsecs_write(&tracing_thresh
, ubuf
, cnt
, ppos
);
4277 if (tr
->current_trace
->update_thresh
) {
4278 ret
= tr
->current_trace
->update_thresh(tr
);
4285 mutex_unlock(&trace_types_lock
);
4291 tracing_max_lat_read(struct file
*filp
, char __user
*ubuf
,
4292 size_t cnt
, loff_t
*ppos
)
4294 return tracing_nsecs_read(filp
->private_data
, ubuf
, cnt
, ppos
);
4298 tracing_max_lat_write(struct file
*filp
, const char __user
*ubuf
,
4299 size_t cnt
, loff_t
*ppos
)
4301 return tracing_nsecs_write(filp
->private_data
, ubuf
, cnt
, ppos
);
4304 static int tracing_open_pipe(struct inode
*inode
, struct file
*filp
)
4306 struct trace_array
*tr
= inode
->i_private
;
4307 struct trace_iterator
*iter
;
4310 if (tracing_disabled
)
4313 if (trace_array_get(tr
) < 0)
4316 mutex_lock(&trace_types_lock
);
4318 /* create a buffer to store the information to pass to userspace */
4319 iter
= kzalloc(sizeof(*iter
), GFP_KERNEL
);
4322 __trace_array_put(tr
);
4327 * We make a copy of the current tracer to avoid concurrent
4328 * changes on it while we are reading.
4330 iter
->trace
= kmalloc(sizeof(*iter
->trace
), GFP_KERNEL
);
4335 *iter
->trace
= *tr
->current_trace
;
4337 if (!alloc_cpumask_var(&iter
->started
, GFP_KERNEL
)) {
4342 /* trace pipe does not show start of buffer */
4343 cpumask_setall(iter
->started
);
4345 if (trace_flags
& TRACE_ITER_LATENCY_FMT
)
4346 iter
->iter_flags
|= TRACE_FILE_LAT_FMT
;
4348 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
4349 if (trace_clocks
[tr
->clock_id
].in_ns
)
4350 iter
->iter_flags
|= TRACE_FILE_TIME_IN_NS
;
4353 iter
->trace_buffer
= &tr
->trace_buffer
;
4354 iter
->cpu_file
= tracing_get_cpu(inode
);
4355 mutex_init(&iter
->mutex
);
4356 filp
->private_data
= iter
;
4358 if (iter
->trace
->pipe_open
)
4359 iter
->trace
->pipe_open(iter
);
4361 nonseekable_open(inode
, filp
);
4363 mutex_unlock(&trace_types_lock
);
4369 __trace_array_put(tr
);
4370 mutex_unlock(&trace_types_lock
);
4374 static int tracing_release_pipe(struct inode
*inode
, struct file
*file
)
4376 struct trace_iterator
*iter
= file
->private_data
;
4377 struct trace_array
*tr
= inode
->i_private
;
4379 mutex_lock(&trace_types_lock
);
4381 if (iter
->trace
->pipe_close
)
4382 iter
->trace
->pipe_close(iter
);
4384 mutex_unlock(&trace_types_lock
);
4386 free_cpumask_var(iter
->started
);
4387 mutex_destroy(&iter
->mutex
);
4391 trace_array_put(tr
);
4397 trace_poll(struct trace_iterator
*iter
, struct file
*filp
, poll_table
*poll_table
)
4399 /* Iterators are static, they should be filled or empty */
4400 if (trace_buffer_iter(iter
, iter
->cpu_file
))
4401 return POLLIN
| POLLRDNORM
;
4403 if (trace_flags
& TRACE_ITER_BLOCK
)
4405 * Always select as readable when in blocking mode
4407 return POLLIN
| POLLRDNORM
;
4409 return ring_buffer_poll_wait(iter
->trace_buffer
->buffer
, iter
->cpu_file
,
4414 tracing_poll_pipe(struct file
*filp
, poll_table
*poll_table
)
4416 struct trace_iterator
*iter
= filp
->private_data
;
4418 return trace_poll(iter
, filp
, poll_table
);
4421 /* Must be called with trace_types_lock mutex held. */
4422 static int tracing_wait_pipe(struct file
*filp
)
4424 struct trace_iterator
*iter
= filp
->private_data
;
4427 while (trace_empty(iter
)) {
4429 if ((filp
->f_flags
& O_NONBLOCK
)) {
4434 * We block until we read something and tracing is disabled.
4435 * We still block if tracing is disabled, but we have never
4436 * read anything. This allows a user to cat this file, and
4437 * then enable tracing. But after we have read something,
4438 * we give an EOF when tracing is again disabled.
4440 * iter->pos will be 0 if we haven't read anything.
4442 if (!tracing_is_on() && iter
->pos
)
4445 mutex_unlock(&iter
->mutex
);
4447 ret
= wait_on_pipe(iter
);
4449 mutex_lock(&iter
->mutex
);
4454 if (signal_pending(current
))
4465 tracing_read_pipe(struct file
*filp
, char __user
*ubuf
,
4466 size_t cnt
, loff_t
*ppos
)
4468 struct trace_iterator
*iter
= filp
->private_data
;
4469 struct trace_array
*tr
= iter
->tr
;
4472 /* return any leftover data */
4473 sret
= trace_seq_to_user(&iter
->seq
, ubuf
, cnt
);
4477 trace_seq_init(&iter
->seq
);
4479 /* copy the tracer to avoid using a global lock all around */
4480 mutex_lock(&trace_types_lock
);
4481 if (unlikely(iter
->trace
->name
!= tr
->current_trace
->name
))
4482 *iter
->trace
= *tr
->current_trace
;
4483 mutex_unlock(&trace_types_lock
);
4486 * Avoid more than one consumer on a single file descriptor
4487 * This is just a matter of traces coherency, the ring buffer itself
4490 mutex_lock(&iter
->mutex
);
4491 if (iter
->trace
->read
) {
4492 sret
= iter
->trace
->read(iter
, filp
, ubuf
, cnt
, ppos
);
4498 sret
= tracing_wait_pipe(filp
);
4502 /* stop when tracing is finished */
4503 if (trace_empty(iter
)) {
4508 if (cnt
>= PAGE_SIZE
)
4509 cnt
= PAGE_SIZE
- 1;
4511 /* reset all but tr, trace, and overruns */
4512 memset(&iter
->seq
, 0,
4513 sizeof(struct trace_iterator
) -
4514 offsetof(struct trace_iterator
, seq
));
4515 cpumask_clear(iter
->started
);
4518 trace_event_read_lock();
4519 trace_access_lock(iter
->cpu_file
);
4520 while (trace_find_next_entry_inc(iter
) != NULL
) {
4521 enum print_line_t ret
;
4522 int len
= iter
->seq
.len
;
4524 ret
= print_trace_line(iter
);
4525 if (ret
== TRACE_TYPE_PARTIAL_LINE
) {
4526 /* don't print partial lines */
4527 iter
->seq
.len
= len
;
4530 if (ret
!= TRACE_TYPE_NO_CONSUME
)
4531 trace_consume(iter
);
4533 if (iter
->seq
.len
>= cnt
)
4537 * Setting the full flag means we reached the trace_seq buffer
4538 * size and we should leave by partial output condition above.
4539 * One of the trace_seq_* functions is not used properly.
4541 WARN_ONCE(iter
->seq
.full
, "full flag set for trace type %d",
4544 trace_access_unlock(iter
->cpu_file
);
4545 trace_event_read_unlock();
4547 /* Now copy what we have to the user */
4548 sret
= trace_seq_to_user(&iter
->seq
, ubuf
, cnt
);
4549 if (iter
->seq
.readpos
>= iter
->seq
.len
)
4550 trace_seq_init(&iter
->seq
);
4553 * If there was nothing to send to user, in spite of consuming trace
4554 * entries, go back to wait for more entries.
4560 mutex_unlock(&iter
->mutex
);
4565 static void tracing_spd_release_pipe(struct splice_pipe_desc
*spd
,
4568 __free_page(spd
->pages
[idx
]);
4571 static const struct pipe_buf_operations tracing_pipe_buf_ops
= {
4573 .confirm
= generic_pipe_buf_confirm
,
4574 .release
= generic_pipe_buf_release
,
4575 .steal
= generic_pipe_buf_steal
,
4576 .get
= generic_pipe_buf_get
,
4580 tracing_fill_pipe_page(size_t rem
, struct trace_iterator
*iter
)
4585 /* Seq buffer is page-sized, exactly what we need. */
4587 count
= iter
->seq
.len
;
4588 ret
= print_trace_line(iter
);
4589 count
= iter
->seq
.len
- count
;
4592 iter
->seq
.len
-= count
;
4595 if (ret
== TRACE_TYPE_PARTIAL_LINE
) {
4596 iter
->seq
.len
-= count
;
4600 if (ret
!= TRACE_TYPE_NO_CONSUME
)
4601 trace_consume(iter
);
4603 if (!trace_find_next_entry_inc(iter
)) {
4613 static ssize_t
tracing_splice_read_pipe(struct file
*filp
,
4615 struct pipe_inode_info
*pipe
,
4619 struct page
*pages_def
[PIPE_DEF_BUFFERS
];
4620 struct partial_page partial_def
[PIPE_DEF_BUFFERS
];
4621 struct trace_iterator
*iter
= filp
->private_data
;
4622 struct splice_pipe_desc spd
= {
4624 .partial
= partial_def
,
4625 .nr_pages
= 0, /* This gets updated below. */
4626 .nr_pages_max
= PIPE_DEF_BUFFERS
,
4628 .ops
= &tracing_pipe_buf_ops
,
4629 .spd_release
= tracing_spd_release_pipe
,
4631 struct trace_array
*tr
= iter
->tr
;
4636 if (splice_grow_spd(pipe
, &spd
))
4639 /* copy the tracer to avoid using a global lock all around */
4640 mutex_lock(&trace_types_lock
);
4641 if (unlikely(iter
->trace
->name
!= tr
->current_trace
->name
))
4642 *iter
->trace
= *tr
->current_trace
;
4643 mutex_unlock(&trace_types_lock
);
4645 mutex_lock(&iter
->mutex
);
4647 if (iter
->trace
->splice_read
) {
4648 ret
= iter
->trace
->splice_read(iter
, filp
,
4649 ppos
, pipe
, len
, flags
);
4654 ret
= tracing_wait_pipe(filp
);
4658 if (!iter
->ent
&& !trace_find_next_entry_inc(iter
)) {
4663 trace_event_read_lock();
4664 trace_access_lock(iter
->cpu_file
);
4666 /* Fill as many pages as possible. */
4667 for (i
= 0, rem
= len
; i
< spd
.nr_pages_max
&& rem
; i
++) {
4668 spd
.pages
[i
] = alloc_page(GFP_KERNEL
);
4672 rem
= tracing_fill_pipe_page(rem
, iter
);
4674 /* Copy the data into the page, so we can start over. */
4675 ret
= trace_seq_to_buffer(&iter
->seq
,
4676 page_address(spd
.pages
[i
]),
4679 __free_page(spd
.pages
[i
]);
4682 spd
.partial
[i
].offset
= 0;
4683 spd
.partial
[i
].len
= iter
->seq
.len
;
4685 trace_seq_init(&iter
->seq
);
4688 trace_access_unlock(iter
->cpu_file
);
4689 trace_event_read_unlock();
4690 mutex_unlock(&iter
->mutex
);
4694 ret
= splice_to_pipe(pipe
, &spd
);
4696 splice_shrink_spd(&spd
);
4700 mutex_unlock(&iter
->mutex
);
4705 tracing_entries_read(struct file
*filp
, char __user
*ubuf
,
4706 size_t cnt
, loff_t
*ppos
)
4708 struct inode
*inode
= file_inode(filp
);
4709 struct trace_array
*tr
= inode
->i_private
;
4710 int cpu
= tracing_get_cpu(inode
);
4715 mutex_lock(&trace_types_lock
);
4717 if (cpu
== RING_BUFFER_ALL_CPUS
) {
4718 int cpu
, buf_size_same
;
4723 /* check if all cpu sizes are same */
4724 for_each_tracing_cpu(cpu
) {
4725 /* fill in the size from first enabled cpu */
4727 size
= per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->entries
;
4728 if (size
!= per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->entries
) {
4734 if (buf_size_same
) {
4735 if (!ring_buffer_expanded
)
4736 r
= sprintf(buf
, "%lu (expanded: %lu)\n",
4738 trace_buf_size
>> 10);
4740 r
= sprintf(buf
, "%lu\n", size
>> 10);
4742 r
= sprintf(buf
, "X\n");
4744 r
= sprintf(buf
, "%lu\n", per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->entries
>> 10);
4746 mutex_unlock(&trace_types_lock
);
4748 ret
= simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
4753 tracing_entries_write(struct file
*filp
, const char __user
*ubuf
,
4754 size_t cnt
, loff_t
*ppos
)
4756 struct inode
*inode
= file_inode(filp
);
4757 struct trace_array
*tr
= inode
->i_private
;
4761 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
4765 /* must have at least 1 entry */
4769 /* value is in KB */
4771 ret
= tracing_resize_ring_buffer(tr
, val
, tracing_get_cpu(inode
));
4781 tracing_total_entries_read(struct file
*filp
, char __user
*ubuf
,
4782 size_t cnt
, loff_t
*ppos
)
4784 struct trace_array
*tr
= filp
->private_data
;
4787 unsigned long size
= 0, expanded_size
= 0;
4789 mutex_lock(&trace_types_lock
);
4790 for_each_tracing_cpu(cpu
) {
4791 size
+= per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->entries
>> 10;
4792 if (!ring_buffer_expanded
)
4793 expanded_size
+= trace_buf_size
>> 10;
4795 if (ring_buffer_expanded
)
4796 r
= sprintf(buf
, "%lu\n", size
);
4798 r
= sprintf(buf
, "%lu (expanded: %lu)\n", size
, expanded_size
);
4799 mutex_unlock(&trace_types_lock
);
4801 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
4805 tracing_free_buffer_write(struct file
*filp
, const char __user
*ubuf
,
4806 size_t cnt
, loff_t
*ppos
)
4809 * There is no need to read what the user has written, this function
4810 * is just to make sure that there is no error when "echo" is used
4819 tracing_free_buffer_release(struct inode
*inode
, struct file
*filp
)
4821 struct trace_array
*tr
= inode
->i_private
;
4823 /* disable tracing ? */
4824 if (trace_flags
& TRACE_ITER_STOP_ON_FREE
)
4825 tracer_tracing_off(tr
);
4826 /* resize the ring buffer to 0 */
4827 tracing_resize_ring_buffer(tr
, 0, RING_BUFFER_ALL_CPUS
);
4829 trace_array_put(tr
);
4835 tracing_mark_write(struct file
*filp
, const char __user
*ubuf
,
4836 size_t cnt
, loff_t
*fpos
)
4838 unsigned long addr
= (unsigned long)ubuf
;
4839 struct trace_array
*tr
= filp
->private_data
;
4840 struct ring_buffer_event
*event
;
4841 struct ring_buffer
*buffer
;
4842 struct print_entry
*entry
;
4843 unsigned long irq_flags
;
4844 struct page
*pages
[2];
4854 if (tracing_disabled
)
4857 if (!(trace_flags
& TRACE_ITER_MARKERS
))
4860 if (cnt
> TRACE_BUF_SIZE
)
4861 cnt
= TRACE_BUF_SIZE
;
4864 * Userspace is injecting traces into the kernel trace buffer.
4865 * We want to be as non intrusive as possible.
4866 * To do so, we do not want to allocate any special buffers
4867 * or take any locks, but instead write the userspace data
4868 * straight into the ring buffer.
4870 * First we need to pin the userspace buffer into memory,
4871 * which, most likely it is, because it just referenced it.
4872 * But there's no guarantee that it is. By using get_user_pages_fast()
4873 * and kmap_atomic/kunmap_atomic() we can get access to the
4874 * pages directly. We then write the data directly into the
4877 BUILD_BUG_ON(TRACE_BUF_SIZE
>= PAGE_SIZE
);
4879 /* check if we cross pages */
4880 if ((addr
& PAGE_MASK
) != ((addr
+ cnt
) & PAGE_MASK
))
4883 offset
= addr
& (PAGE_SIZE
- 1);
4886 ret
= get_user_pages_fast(addr
, nr_pages
, 0, pages
);
4887 if (ret
< nr_pages
) {
4889 put_page(pages
[ret
]);
4894 for (i
= 0; i
< nr_pages
; i
++)
4895 map_page
[i
] = kmap_atomic(pages
[i
]);
4897 local_save_flags(irq_flags
);
4898 size
= sizeof(*entry
) + cnt
+ 2; /* possible \n added */
4899 buffer
= tr
->trace_buffer
.buffer
;
4900 event
= trace_buffer_lock_reserve(buffer
, TRACE_PRINT
, size
,
4901 irq_flags
, preempt_count());
4903 /* Ring buffer disabled, return as if not open for write */
4908 entry
= ring_buffer_event_data(event
);
4909 entry
->ip
= _THIS_IP_
;
4911 if (nr_pages
== 2) {
4912 len
= PAGE_SIZE
- offset
;
4913 memcpy(&entry
->buf
, map_page
[0] + offset
, len
);
4914 memcpy(&entry
->buf
[len
], map_page
[1], cnt
- len
);
4916 memcpy(&entry
->buf
, map_page
[0] + offset
, cnt
);
4918 if (entry
->buf
[cnt
- 1] != '\n') {
4919 entry
->buf
[cnt
] = '\n';
4920 entry
->buf
[cnt
+ 1] = '\0';
4922 entry
->buf
[cnt
] = '\0';
4924 __buffer_unlock_commit(buffer
, event
);
4931 for (i
= 0; i
< nr_pages
; i
++){
4932 kunmap_atomic(map_page
[i
]);
4939 static int tracing_clock_show(struct seq_file
*m
, void *v
)
4941 struct trace_array
*tr
= m
->private;
4944 for (i
= 0; i
< ARRAY_SIZE(trace_clocks
); i
++)
4946 "%s%s%s%s", i
? " " : "",
4947 i
== tr
->clock_id
? "[" : "", trace_clocks
[i
].name
,
4948 i
== tr
->clock_id
? "]" : "");
4954 static int tracing_set_clock(struct trace_array
*tr
, const char *clockstr
)
4958 for (i
= 0; i
< ARRAY_SIZE(trace_clocks
); i
++) {
4959 if (strcmp(trace_clocks
[i
].name
, clockstr
) == 0)
4962 if (i
== ARRAY_SIZE(trace_clocks
))
4965 mutex_lock(&trace_types_lock
);
4969 ring_buffer_set_clock(tr
->trace_buffer
.buffer
, trace_clocks
[i
].func
);
4972 * New clock may not be consistent with the previous clock.
4973 * Reset the buffer so that it doesn't have incomparable timestamps.
4975 tracing_reset_online_cpus(&tr
->trace_buffer
);
4977 #ifdef CONFIG_TRACER_MAX_TRACE
4978 if (tr
->flags
& TRACE_ARRAY_FL_GLOBAL
&& tr
->max_buffer
.buffer
)
4979 ring_buffer_set_clock(tr
->max_buffer
.buffer
, trace_clocks
[i
].func
);
4980 tracing_reset_online_cpus(&tr
->max_buffer
);
4983 mutex_unlock(&trace_types_lock
);
4988 static ssize_t
tracing_clock_write(struct file
*filp
, const char __user
*ubuf
,
4989 size_t cnt
, loff_t
*fpos
)
4991 struct seq_file
*m
= filp
->private_data
;
4992 struct trace_array
*tr
= m
->private;
4994 const char *clockstr
;
4997 if (cnt
>= sizeof(buf
))
5000 if (copy_from_user(&buf
, ubuf
, cnt
))
5005 clockstr
= strstrip(buf
);
5007 ret
= tracing_set_clock(tr
, clockstr
);
5016 static int tracing_clock_open(struct inode
*inode
, struct file
*file
)
5018 struct trace_array
*tr
= inode
->i_private
;
5021 if (tracing_disabled
)
5024 if (trace_array_get(tr
))
5027 ret
= single_open(file
, tracing_clock_show
, inode
->i_private
);
5029 trace_array_put(tr
);
5034 struct ftrace_buffer_info
{
5035 struct trace_iterator iter
;
5040 #ifdef CONFIG_TRACER_SNAPSHOT
5041 static int tracing_snapshot_open(struct inode
*inode
, struct file
*file
)
5043 struct trace_array
*tr
= inode
->i_private
;
5044 struct trace_iterator
*iter
;
5048 if (trace_array_get(tr
) < 0)
5051 if (file
->f_mode
& FMODE_READ
) {
5052 iter
= __tracing_open(inode
, file
, true);
5054 ret
= PTR_ERR(iter
);
5056 /* Writes still need the seq_file to hold the private data */
5058 m
= kzalloc(sizeof(*m
), GFP_KERNEL
);
5061 iter
= kzalloc(sizeof(*iter
), GFP_KERNEL
);
5069 iter
->trace_buffer
= &tr
->max_buffer
;
5070 iter
->cpu_file
= tracing_get_cpu(inode
);
5072 file
->private_data
= m
;
5076 trace_array_put(tr
);
5082 tracing_snapshot_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
5085 struct seq_file
*m
= filp
->private_data
;
5086 struct trace_iterator
*iter
= m
->private;
5087 struct trace_array
*tr
= iter
->tr
;
5091 ret
= tracing_update_buffers();
5095 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
5099 mutex_lock(&trace_types_lock
);
5101 if (tr
->current_trace
->use_max_tr
) {
5108 if (iter
->cpu_file
!= RING_BUFFER_ALL_CPUS
) {
5112 if (tr
->allocated_snapshot
)
5116 /* Only allow per-cpu swap if the ring buffer supports it */
5117 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
5118 if (iter
->cpu_file
!= RING_BUFFER_ALL_CPUS
) {
5123 if (!tr
->allocated_snapshot
) {
5124 ret
= alloc_snapshot(tr
);
5128 local_irq_disable();
5129 /* Now, we're going to swap */
5130 if (iter
->cpu_file
== RING_BUFFER_ALL_CPUS
)
5131 update_max_tr(tr
, current
, smp_processor_id());
5133 update_max_tr_single(tr
, current
, iter
->cpu_file
);
5137 if (tr
->allocated_snapshot
) {
5138 if (iter
->cpu_file
== RING_BUFFER_ALL_CPUS
)
5139 tracing_reset_online_cpus(&tr
->max_buffer
);
5141 tracing_reset(&tr
->max_buffer
, iter
->cpu_file
);
5151 mutex_unlock(&trace_types_lock
);
5155 static int tracing_snapshot_release(struct inode
*inode
, struct file
*file
)
5157 struct seq_file
*m
= file
->private_data
;
5160 ret
= tracing_release(inode
, file
);
5162 if (file
->f_mode
& FMODE_READ
)
5165 /* If write only, the seq_file is just a stub */
5173 static int tracing_buffers_open(struct inode
*inode
, struct file
*filp
);
5174 static ssize_t
tracing_buffers_read(struct file
*filp
, char __user
*ubuf
,
5175 size_t count
, loff_t
*ppos
);
5176 static int tracing_buffers_release(struct inode
*inode
, struct file
*file
);
5177 static ssize_t
tracing_buffers_splice_read(struct file
*file
, loff_t
*ppos
,
5178 struct pipe_inode_info
*pipe
, size_t len
, unsigned int flags
);
5180 static int snapshot_raw_open(struct inode
*inode
, struct file
*filp
)
5182 struct ftrace_buffer_info
*info
;
5185 ret
= tracing_buffers_open(inode
, filp
);
5189 info
= filp
->private_data
;
5191 if (info
->iter
.trace
->use_max_tr
) {
5192 tracing_buffers_release(inode
, filp
);
5196 info
->iter
.snapshot
= true;
5197 info
->iter
.trace_buffer
= &info
->iter
.tr
->max_buffer
;
5202 #endif /* CONFIG_TRACER_SNAPSHOT */
5205 static const struct file_operations tracing_thresh_fops
= {
5206 .open
= tracing_open_generic
,
5207 .read
= tracing_thresh_read
,
5208 .write
= tracing_thresh_write
,
5209 .llseek
= generic_file_llseek
,
5212 static const struct file_operations tracing_max_lat_fops
= {
5213 .open
= tracing_open_generic
,
5214 .read
= tracing_max_lat_read
,
5215 .write
= tracing_max_lat_write
,
5216 .llseek
= generic_file_llseek
,
5219 static const struct file_operations set_tracer_fops
= {
5220 .open
= tracing_open_generic
,
5221 .read
= tracing_set_trace_read
,
5222 .write
= tracing_set_trace_write
,
5223 .llseek
= generic_file_llseek
,
5226 static const struct file_operations tracing_pipe_fops
= {
5227 .open
= tracing_open_pipe
,
5228 .poll
= tracing_poll_pipe
,
5229 .read
= tracing_read_pipe
,
5230 .splice_read
= tracing_splice_read_pipe
,
5231 .release
= tracing_release_pipe
,
5232 .llseek
= no_llseek
,
5235 static const struct file_operations tracing_entries_fops
= {
5236 .open
= tracing_open_generic_tr
,
5237 .read
= tracing_entries_read
,
5238 .write
= tracing_entries_write
,
5239 .llseek
= generic_file_llseek
,
5240 .release
= tracing_release_generic_tr
,
5243 static const struct file_operations tracing_total_entries_fops
= {
5244 .open
= tracing_open_generic_tr
,
5245 .read
= tracing_total_entries_read
,
5246 .llseek
= generic_file_llseek
,
5247 .release
= tracing_release_generic_tr
,
5250 static const struct file_operations tracing_free_buffer_fops
= {
5251 .open
= tracing_open_generic_tr
,
5252 .write
= tracing_free_buffer_write
,
5253 .release
= tracing_free_buffer_release
,
5256 static const struct file_operations tracing_mark_fops
= {
5257 .open
= tracing_open_generic_tr
,
5258 .write
= tracing_mark_write
,
5259 .llseek
= generic_file_llseek
,
5260 .release
= tracing_release_generic_tr
,
5263 static const struct file_operations trace_clock_fops
= {
5264 .open
= tracing_clock_open
,
5266 .llseek
= seq_lseek
,
5267 .release
= tracing_single_release_tr
,
5268 .write
= tracing_clock_write
,
5271 #ifdef CONFIG_TRACER_SNAPSHOT
5272 static const struct file_operations snapshot_fops
= {
5273 .open
= tracing_snapshot_open
,
5275 .write
= tracing_snapshot_write
,
5276 .llseek
= tracing_lseek
,
5277 .release
= tracing_snapshot_release
,
5280 static const struct file_operations snapshot_raw_fops
= {
5281 .open
= snapshot_raw_open
,
5282 .read
= tracing_buffers_read
,
5283 .release
= tracing_buffers_release
,
5284 .splice_read
= tracing_buffers_splice_read
,
5285 .llseek
= no_llseek
,
5288 #endif /* CONFIG_TRACER_SNAPSHOT */
5290 static int tracing_buffers_open(struct inode
*inode
, struct file
*filp
)
5292 struct trace_array
*tr
= inode
->i_private
;
5293 struct ftrace_buffer_info
*info
;
5296 if (tracing_disabled
)
5299 if (trace_array_get(tr
) < 0)
5302 info
= kzalloc(sizeof(*info
), GFP_KERNEL
);
5304 trace_array_put(tr
);
5308 mutex_lock(&trace_types_lock
);
5311 info
->iter
.cpu_file
= tracing_get_cpu(inode
);
5312 info
->iter
.trace
= tr
->current_trace
;
5313 info
->iter
.trace_buffer
= &tr
->trace_buffer
;
5315 /* Force reading ring buffer for first read */
5316 info
->read
= (unsigned int)-1;
5318 filp
->private_data
= info
;
5320 mutex_unlock(&trace_types_lock
);
5322 ret
= nonseekable_open(inode
, filp
);
5324 trace_array_put(tr
);
5330 tracing_buffers_poll(struct file
*filp
, poll_table
*poll_table
)
5332 struct ftrace_buffer_info
*info
= filp
->private_data
;
5333 struct trace_iterator
*iter
= &info
->iter
;
5335 return trace_poll(iter
, filp
, poll_table
);
5339 tracing_buffers_read(struct file
*filp
, char __user
*ubuf
,
5340 size_t count
, loff_t
*ppos
)
5342 struct ftrace_buffer_info
*info
= filp
->private_data
;
5343 struct trace_iterator
*iter
= &info
->iter
;
5350 mutex_lock(&trace_types_lock
);
5352 #ifdef CONFIG_TRACER_MAX_TRACE
5353 if (iter
->snapshot
&& iter
->tr
->current_trace
->use_max_tr
) {
5360 info
->spare
= ring_buffer_alloc_read_page(iter
->trace_buffer
->buffer
,
5366 /* Do we have previous read data to read? */
5367 if (info
->read
< PAGE_SIZE
)
5371 trace_access_lock(iter
->cpu_file
);
5372 ret
= ring_buffer_read_page(iter
->trace_buffer
->buffer
,
5376 trace_access_unlock(iter
->cpu_file
);
5379 if (trace_empty(iter
)) {
5380 if ((filp
->f_flags
& O_NONBLOCK
)) {
5384 mutex_unlock(&trace_types_lock
);
5385 ret
= wait_on_pipe(iter
);
5386 mutex_lock(&trace_types_lock
);
5391 if (signal_pending(current
)) {
5403 size
= PAGE_SIZE
- info
->read
;
5407 ret
= copy_to_user(ubuf
, info
->spare
+ info
->read
, size
);
5418 mutex_unlock(&trace_types_lock
);
5423 static int tracing_buffers_release(struct inode
*inode
, struct file
*file
)
5425 struct ftrace_buffer_info
*info
= file
->private_data
;
5426 struct trace_iterator
*iter
= &info
->iter
;
5428 mutex_lock(&trace_types_lock
);
5430 __trace_array_put(iter
->tr
);
5433 ring_buffer_free_read_page(iter
->trace_buffer
->buffer
, info
->spare
);
5436 mutex_unlock(&trace_types_lock
);
5442 struct ring_buffer
*buffer
;
5447 static void buffer_pipe_buf_release(struct pipe_inode_info
*pipe
,
5448 struct pipe_buffer
*buf
)
5450 struct buffer_ref
*ref
= (struct buffer_ref
*)buf
->private;
5455 ring_buffer_free_read_page(ref
->buffer
, ref
->page
);
5460 static void buffer_pipe_buf_get(struct pipe_inode_info
*pipe
,
5461 struct pipe_buffer
*buf
)
5463 struct buffer_ref
*ref
= (struct buffer_ref
*)buf
->private;
5468 /* Pipe buffer operations for a buffer. */
5469 static const struct pipe_buf_operations buffer_pipe_buf_ops
= {
5471 .confirm
= generic_pipe_buf_confirm
,
5472 .release
= buffer_pipe_buf_release
,
5473 .steal
= generic_pipe_buf_steal
,
5474 .get
= buffer_pipe_buf_get
,
5478 * Callback from splice_to_pipe(), if we need to release some pages
5479 * at the end of the spd in case we error'ed out in filling the pipe.
5481 static void buffer_spd_release(struct splice_pipe_desc
*spd
, unsigned int i
)
5483 struct buffer_ref
*ref
=
5484 (struct buffer_ref
*)spd
->partial
[i
].private;
5489 ring_buffer_free_read_page(ref
->buffer
, ref
->page
);
5491 spd
->partial
[i
].private = 0;
5495 tracing_buffers_splice_read(struct file
*file
, loff_t
*ppos
,
5496 struct pipe_inode_info
*pipe
, size_t len
,
5499 struct ftrace_buffer_info
*info
= file
->private_data
;
5500 struct trace_iterator
*iter
= &info
->iter
;
5501 struct partial_page partial_def
[PIPE_DEF_BUFFERS
];
5502 struct page
*pages_def
[PIPE_DEF_BUFFERS
];
5503 struct splice_pipe_desc spd
= {
5505 .partial
= partial_def
,
5506 .nr_pages_max
= PIPE_DEF_BUFFERS
,
5508 .ops
= &buffer_pipe_buf_ops
,
5509 .spd_release
= buffer_spd_release
,
5511 struct buffer_ref
*ref
;
5512 int entries
, size
, i
;
5515 mutex_lock(&trace_types_lock
);
5517 #ifdef CONFIG_TRACER_MAX_TRACE
5518 if (iter
->snapshot
&& iter
->tr
->current_trace
->use_max_tr
) {
5524 if (splice_grow_spd(pipe
, &spd
)) {
5529 if (*ppos
& (PAGE_SIZE
- 1)) {
5534 if (len
& (PAGE_SIZE
- 1)) {
5535 if (len
< PAGE_SIZE
) {
5543 trace_access_lock(iter
->cpu_file
);
5544 entries
= ring_buffer_entries_cpu(iter
->trace_buffer
->buffer
, iter
->cpu_file
);
5546 for (i
= 0; i
< spd
.nr_pages_max
&& len
&& entries
; i
++, len
-= PAGE_SIZE
) {
5550 ref
= kzalloc(sizeof(*ref
), GFP_KERNEL
);
5555 ref
->buffer
= iter
->trace_buffer
->buffer
;
5556 ref
->page
= ring_buffer_alloc_read_page(ref
->buffer
, iter
->cpu_file
);
5562 r
= ring_buffer_read_page(ref
->buffer
, &ref
->page
,
5563 len
, iter
->cpu_file
, 1);
5565 ring_buffer_free_read_page(ref
->buffer
, ref
->page
);
5571 * zero out any left over data, this is going to
5574 size
= ring_buffer_page_len(ref
->page
);
5575 if (size
< PAGE_SIZE
)
5576 memset(ref
->page
+ size
, 0, PAGE_SIZE
- size
);
5578 page
= virt_to_page(ref
->page
);
5580 spd
.pages
[i
] = page
;
5581 spd
.partial
[i
].len
= PAGE_SIZE
;
5582 spd
.partial
[i
].offset
= 0;
5583 spd
.partial
[i
].private = (unsigned long)ref
;
5587 entries
= ring_buffer_entries_cpu(iter
->trace_buffer
->buffer
, iter
->cpu_file
);
5590 trace_access_unlock(iter
->cpu_file
);
5593 /* did we read anything? */
5594 if (!spd
.nr_pages
) {
5595 if ((file
->f_flags
& O_NONBLOCK
) || (flags
& SPLICE_F_NONBLOCK
)) {
5599 mutex_unlock(&trace_types_lock
);
5600 ret
= wait_on_pipe(iter
);
5601 mutex_lock(&trace_types_lock
);
5604 if (signal_pending(current
)) {
5611 ret
= splice_to_pipe(pipe
, &spd
);
5612 splice_shrink_spd(&spd
);
5614 mutex_unlock(&trace_types_lock
);
5619 static const struct file_operations tracing_buffers_fops
= {
5620 .open
= tracing_buffers_open
,
5621 .read
= tracing_buffers_read
,
5622 .poll
= tracing_buffers_poll
,
5623 .release
= tracing_buffers_release
,
5624 .splice_read
= tracing_buffers_splice_read
,
5625 .llseek
= no_llseek
,
5629 tracing_stats_read(struct file
*filp
, char __user
*ubuf
,
5630 size_t count
, loff_t
*ppos
)
5632 struct inode
*inode
= file_inode(filp
);
5633 struct trace_array
*tr
= inode
->i_private
;
5634 struct trace_buffer
*trace_buf
= &tr
->trace_buffer
;
5635 int cpu
= tracing_get_cpu(inode
);
5636 struct trace_seq
*s
;
5638 unsigned long long t
;
5639 unsigned long usec_rem
;
5641 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
5647 cnt
= ring_buffer_entries_cpu(trace_buf
->buffer
, cpu
);
5648 trace_seq_printf(s
, "entries: %ld\n", cnt
);
5650 cnt
= ring_buffer_overrun_cpu(trace_buf
->buffer
, cpu
);
5651 trace_seq_printf(s
, "overrun: %ld\n", cnt
);
5653 cnt
= ring_buffer_commit_overrun_cpu(trace_buf
->buffer
, cpu
);
5654 trace_seq_printf(s
, "commit overrun: %ld\n", cnt
);
5656 cnt
= ring_buffer_bytes_cpu(trace_buf
->buffer
, cpu
);
5657 trace_seq_printf(s
, "bytes: %ld\n", cnt
);
5659 if (trace_clocks
[tr
->clock_id
].in_ns
) {
5660 /* local or global for trace_clock */
5661 t
= ns2usecs(ring_buffer_oldest_event_ts(trace_buf
->buffer
, cpu
));
5662 usec_rem
= do_div(t
, USEC_PER_SEC
);
5663 trace_seq_printf(s
, "oldest event ts: %5llu.%06lu\n",
5666 t
= ns2usecs(ring_buffer_time_stamp(trace_buf
->buffer
, cpu
));
5667 usec_rem
= do_div(t
, USEC_PER_SEC
);
5668 trace_seq_printf(s
, "now ts: %5llu.%06lu\n", t
, usec_rem
);
5670 /* counter or tsc mode for trace_clock */
5671 trace_seq_printf(s
, "oldest event ts: %llu\n",
5672 ring_buffer_oldest_event_ts(trace_buf
->buffer
, cpu
));
5674 trace_seq_printf(s
, "now ts: %llu\n",
5675 ring_buffer_time_stamp(trace_buf
->buffer
, cpu
));
5678 cnt
= ring_buffer_dropped_events_cpu(trace_buf
->buffer
, cpu
);
5679 trace_seq_printf(s
, "dropped events: %ld\n", cnt
);
5681 cnt
= ring_buffer_read_events_cpu(trace_buf
->buffer
, cpu
);
5682 trace_seq_printf(s
, "read events: %ld\n", cnt
);
5684 count
= simple_read_from_buffer(ubuf
, count
, ppos
, s
->buffer
, s
->len
);
5691 static const struct file_operations tracing_stats_fops
= {
5692 .open
= tracing_open_generic_tr
,
5693 .read
= tracing_stats_read
,
5694 .llseek
= generic_file_llseek
,
5695 .release
= tracing_release_generic_tr
,
5698 #ifdef CONFIG_DYNAMIC_FTRACE
5700 int __weak
ftrace_arch_read_dyn_info(char *buf
, int size
)
5706 tracing_read_dyn_info(struct file
*filp
, char __user
*ubuf
,
5707 size_t cnt
, loff_t
*ppos
)
5709 static char ftrace_dyn_info_buffer
[1024];
5710 static DEFINE_MUTEX(dyn_info_mutex
);
5711 unsigned long *p
= filp
->private_data
;
5712 char *buf
= ftrace_dyn_info_buffer
;
5713 int size
= ARRAY_SIZE(ftrace_dyn_info_buffer
);
5716 mutex_lock(&dyn_info_mutex
);
5717 r
= sprintf(buf
, "%ld ", *p
);
5719 r
+= ftrace_arch_read_dyn_info(buf
+r
, (size
-1)-r
);
5722 r
= simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
5724 mutex_unlock(&dyn_info_mutex
);
5729 static const struct file_operations tracing_dyn_info_fops
= {
5730 .open
= tracing_open_generic
,
5731 .read
= tracing_read_dyn_info
,
5732 .llseek
= generic_file_llseek
,
5734 #endif /* CONFIG_DYNAMIC_FTRACE */
5736 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
5738 ftrace_snapshot(unsigned long ip
, unsigned long parent_ip
, void **data
)
5744 ftrace_count_snapshot(unsigned long ip
, unsigned long parent_ip
, void **data
)
5746 unsigned long *count
= (long *)data
;
5758 ftrace_snapshot_print(struct seq_file
*m
, unsigned long ip
,
5759 struct ftrace_probe_ops
*ops
, void *data
)
5761 long count
= (long)data
;
5763 seq_printf(m
, "%ps:", (void *)ip
);
5765 seq_puts(m
, "snapshot");
5768 seq_puts(m
, ":unlimited\n");
5770 seq_printf(m
, ":count=%ld\n", count
);
5775 static struct ftrace_probe_ops snapshot_probe_ops
= {
5776 .func
= ftrace_snapshot
,
5777 .print
= ftrace_snapshot_print
,
5780 static struct ftrace_probe_ops snapshot_count_probe_ops
= {
5781 .func
= ftrace_count_snapshot
,
5782 .print
= ftrace_snapshot_print
,
5786 ftrace_trace_snapshot_callback(struct ftrace_hash
*hash
,
5787 char *glob
, char *cmd
, char *param
, int enable
)
5789 struct ftrace_probe_ops
*ops
;
5790 void *count
= (void *)-1;
5794 /* hash funcs only work with set_ftrace_filter */
5798 ops
= param
? &snapshot_count_probe_ops
: &snapshot_probe_ops
;
5800 if (glob
[0] == '!') {
5801 unregister_ftrace_function_probe_func(glob
+1, ops
);
5808 number
= strsep(¶m
, ":");
5810 if (!strlen(number
))
5814 * We use the callback data field (which is a pointer)
5817 ret
= kstrtoul(number
, 0, (unsigned long *)&count
);
5822 ret
= register_ftrace_function_probe(glob
, ops
, count
);
5825 alloc_snapshot(&global_trace
);
5827 return ret
< 0 ? ret
: 0;
5830 static struct ftrace_func_command ftrace_snapshot_cmd
= {
5832 .func
= ftrace_trace_snapshot_callback
,
5835 static __init
int register_snapshot_cmd(void)
5837 return register_ftrace_command(&ftrace_snapshot_cmd
);
5840 static inline __init
int register_snapshot_cmd(void) { return 0; }
5841 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
5843 struct dentry
*tracing_init_dentry_tr(struct trace_array
*tr
)
5848 if (!debugfs_initialized())
5851 if (tr
->flags
& TRACE_ARRAY_FL_GLOBAL
)
5852 tr
->dir
= debugfs_create_dir("tracing", NULL
);
5855 pr_warn_once("Could not create debugfs directory 'tracing'\n");
5860 struct dentry
*tracing_init_dentry(void)
5862 return tracing_init_dentry_tr(&global_trace
);
5865 static struct dentry
*tracing_dentry_percpu(struct trace_array
*tr
, int cpu
)
5867 struct dentry
*d_tracer
;
5870 return tr
->percpu_dir
;
5872 d_tracer
= tracing_init_dentry_tr(tr
);
5876 tr
->percpu_dir
= debugfs_create_dir("per_cpu", d_tracer
);
5878 WARN_ONCE(!tr
->percpu_dir
,
5879 "Could not create debugfs directory 'per_cpu/%d'\n", cpu
);
5881 return tr
->percpu_dir
;
5884 static struct dentry
*
5885 trace_create_cpu_file(const char *name
, umode_t mode
, struct dentry
*parent
,
5886 void *data
, long cpu
, const struct file_operations
*fops
)
5888 struct dentry
*ret
= trace_create_file(name
, mode
, parent
, data
, fops
);
5890 if (ret
) /* See tracing_get_cpu() */
5891 ret
->d_inode
->i_cdev
= (void *)(cpu
+ 1);
5896 tracing_init_debugfs_percpu(struct trace_array
*tr
, long cpu
)
5898 struct dentry
*d_percpu
= tracing_dentry_percpu(tr
, cpu
);
5899 struct dentry
*d_cpu
;
5900 char cpu_dir
[30]; /* 30 characters should be more than enough */
5905 snprintf(cpu_dir
, 30, "cpu%ld", cpu
);
5906 d_cpu
= debugfs_create_dir(cpu_dir
, d_percpu
);
5908 pr_warning("Could not create debugfs '%s' entry\n", cpu_dir
);
5912 /* per cpu trace_pipe */
5913 trace_create_cpu_file("trace_pipe", 0444, d_cpu
,
5914 tr
, cpu
, &tracing_pipe_fops
);
5917 trace_create_cpu_file("trace", 0644, d_cpu
,
5918 tr
, cpu
, &tracing_fops
);
5920 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu
,
5921 tr
, cpu
, &tracing_buffers_fops
);
5923 trace_create_cpu_file("stats", 0444, d_cpu
,
5924 tr
, cpu
, &tracing_stats_fops
);
5926 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu
,
5927 tr
, cpu
, &tracing_entries_fops
);
5929 #ifdef CONFIG_TRACER_SNAPSHOT
5930 trace_create_cpu_file("snapshot", 0644, d_cpu
,
5931 tr
, cpu
, &snapshot_fops
);
5933 trace_create_cpu_file("snapshot_raw", 0444, d_cpu
,
5934 tr
, cpu
, &snapshot_raw_fops
);
5938 #ifdef CONFIG_FTRACE_SELFTEST
5939 /* Let selftest have access to static functions in this file */
5940 #include "trace_selftest.c"
5943 struct trace_option_dentry
{
5944 struct tracer_opt
*opt
;
5945 struct tracer_flags
*flags
;
5946 struct trace_array
*tr
;
5947 struct dentry
*entry
;
5951 trace_options_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
,
5954 struct trace_option_dentry
*topt
= filp
->private_data
;
5957 if (topt
->flags
->val
& topt
->opt
->bit
)
5962 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, 2);
5966 trace_options_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
5969 struct trace_option_dentry
*topt
= filp
->private_data
;
5973 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
5977 if (val
!= 0 && val
!= 1)
5980 if (!!(topt
->flags
->val
& topt
->opt
->bit
) != val
) {
5981 mutex_lock(&trace_types_lock
);
5982 ret
= __set_tracer_option(topt
->tr
, topt
->flags
,
5984 mutex_unlock(&trace_types_lock
);
5995 static const struct file_operations trace_options_fops
= {
5996 .open
= tracing_open_generic
,
5997 .read
= trace_options_read
,
5998 .write
= trace_options_write
,
5999 .llseek
= generic_file_llseek
,
6003 trace_options_core_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
,
6006 long index
= (long)filp
->private_data
;
6009 if (trace_flags
& (1 << index
))
6014 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, 2);
6018 trace_options_core_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
6021 struct trace_array
*tr
= &global_trace
;
6022 long index
= (long)filp
->private_data
;
6026 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
6030 if (val
!= 0 && val
!= 1)
6033 mutex_lock(&trace_types_lock
);
6034 ret
= set_tracer_flag(tr
, 1 << index
, val
);
6035 mutex_unlock(&trace_types_lock
);
6045 static const struct file_operations trace_options_core_fops
= {
6046 .open
= tracing_open_generic
,
6047 .read
= trace_options_core_read
,
6048 .write
= trace_options_core_write
,
6049 .llseek
= generic_file_llseek
,
6052 struct dentry
*trace_create_file(const char *name
,
6054 struct dentry
*parent
,
6056 const struct file_operations
*fops
)
6060 ret
= debugfs_create_file(name
, mode
, parent
, data
, fops
);
6062 pr_warning("Could not create debugfs '%s' entry\n", name
);
6068 static struct dentry
*trace_options_init_dentry(struct trace_array
*tr
)
6070 struct dentry
*d_tracer
;
6075 d_tracer
= tracing_init_dentry_tr(tr
);
6079 tr
->options
= debugfs_create_dir("options", d_tracer
);
6081 pr_warning("Could not create debugfs directory 'options'\n");
6089 create_trace_option_file(struct trace_array
*tr
,
6090 struct trace_option_dentry
*topt
,
6091 struct tracer_flags
*flags
,
6092 struct tracer_opt
*opt
)
6094 struct dentry
*t_options
;
6096 t_options
= trace_options_init_dentry(tr
);
6100 topt
->flags
= flags
;
6104 topt
->entry
= trace_create_file(opt
->name
, 0644, t_options
, topt
,
6105 &trace_options_fops
);
6109 static struct trace_option_dentry
*
6110 create_trace_option_files(struct trace_array
*tr
, struct tracer
*tracer
)
6112 struct trace_option_dentry
*topts
;
6113 struct tracer_flags
*flags
;
6114 struct tracer_opt
*opts
;
6120 flags
= tracer
->flags
;
6122 if (!flags
|| !flags
->opts
)
6127 for (cnt
= 0; opts
[cnt
].name
; cnt
++)
6130 topts
= kcalloc(cnt
+ 1, sizeof(*topts
), GFP_KERNEL
);
6134 for (cnt
= 0; opts
[cnt
].name
; cnt
++)
6135 create_trace_option_file(tr
, &topts
[cnt
], flags
,
6142 destroy_trace_option_files(struct trace_option_dentry
*topts
)
6149 for (cnt
= 0; topts
[cnt
].opt
; cnt
++)
6150 debugfs_remove(topts
[cnt
].entry
);
6155 static struct dentry
*
6156 create_trace_option_core_file(struct trace_array
*tr
,
6157 const char *option
, long index
)
6159 struct dentry
*t_options
;
6161 t_options
= trace_options_init_dentry(tr
);
6165 return trace_create_file(option
, 0644, t_options
, (void *)index
,
6166 &trace_options_core_fops
);
6169 static __init
void create_trace_options_dir(struct trace_array
*tr
)
6171 struct dentry
*t_options
;
6174 t_options
= trace_options_init_dentry(tr
);
6178 for (i
= 0; trace_options
[i
]; i
++)
6179 create_trace_option_core_file(tr
, trace_options
[i
], i
);
6183 rb_simple_read(struct file
*filp
, char __user
*ubuf
,
6184 size_t cnt
, loff_t
*ppos
)
6186 struct trace_array
*tr
= filp
->private_data
;
6190 r
= tracer_tracing_is_on(tr
);
6191 r
= sprintf(buf
, "%d\n", r
);
6193 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
6197 rb_simple_write(struct file
*filp
, const char __user
*ubuf
,
6198 size_t cnt
, loff_t
*ppos
)
6200 struct trace_array
*tr
= filp
->private_data
;
6201 struct ring_buffer
*buffer
= tr
->trace_buffer
.buffer
;
6205 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
6210 mutex_lock(&trace_types_lock
);
6212 tracer_tracing_on(tr
);
6213 if (tr
->current_trace
->start
)
6214 tr
->current_trace
->start(tr
);
6216 tracer_tracing_off(tr
);
6217 if (tr
->current_trace
->stop
)
6218 tr
->current_trace
->stop(tr
);
6220 mutex_unlock(&trace_types_lock
);
6228 static const struct file_operations rb_simple_fops
= {
6229 .open
= tracing_open_generic_tr
,
6230 .read
= rb_simple_read
,
6231 .write
= rb_simple_write
,
6232 .release
= tracing_release_generic_tr
,
6233 .llseek
= default_llseek
,
6236 struct dentry
*trace_instance_dir
;
6239 init_tracer_debugfs(struct trace_array
*tr
, struct dentry
*d_tracer
);
6242 allocate_trace_buffer(struct trace_array
*tr
, struct trace_buffer
*buf
, int size
)
6244 enum ring_buffer_flags rb_flags
;
6246 rb_flags
= trace_flags
& TRACE_ITER_OVERWRITE
? RB_FL_OVERWRITE
: 0;
6250 buf
->buffer
= ring_buffer_alloc(size
, rb_flags
);
6254 buf
->data
= alloc_percpu(struct trace_array_cpu
);
6256 ring_buffer_free(buf
->buffer
);
6260 /* Allocate the first page for all buffers */
6261 set_buffer_entries(&tr
->trace_buffer
,
6262 ring_buffer_size(tr
->trace_buffer
.buffer
, 0));
6267 static int allocate_trace_buffers(struct trace_array
*tr
, int size
)
6271 ret
= allocate_trace_buffer(tr
, &tr
->trace_buffer
, size
);
6275 #ifdef CONFIG_TRACER_MAX_TRACE
6276 ret
= allocate_trace_buffer(tr
, &tr
->max_buffer
,
6277 allocate_snapshot
? size
: 1);
6279 ring_buffer_free(tr
->trace_buffer
.buffer
);
6280 free_percpu(tr
->trace_buffer
.data
);
6283 tr
->allocated_snapshot
= allocate_snapshot
;
6286 * Only the top level trace array gets its snapshot allocated
6287 * from the kernel command line.
6289 allocate_snapshot
= false;
6294 static void free_trace_buffer(struct trace_buffer
*buf
)
6297 ring_buffer_free(buf
->buffer
);
6299 free_percpu(buf
->data
);
6304 static void free_trace_buffers(struct trace_array
*tr
)
6309 free_trace_buffer(&tr
->trace_buffer
);
6311 #ifdef CONFIG_TRACER_MAX_TRACE
6312 free_trace_buffer(&tr
->max_buffer
);
6316 static int new_instance_create(const char *name
)
6318 struct trace_array
*tr
;
6321 mutex_lock(&trace_types_lock
);
6324 list_for_each_entry(tr
, &ftrace_trace_arrays
, list
) {
6325 if (tr
->name
&& strcmp(tr
->name
, name
) == 0)
6330 tr
= kzalloc(sizeof(*tr
), GFP_KERNEL
);
6334 tr
->name
= kstrdup(name
, GFP_KERNEL
);
6338 if (!alloc_cpumask_var(&tr
->tracing_cpumask
, GFP_KERNEL
))
6341 cpumask_copy(tr
->tracing_cpumask
, cpu_all_mask
);
6343 raw_spin_lock_init(&tr
->start_lock
);
6345 tr
->max_lock
= (arch_spinlock_t
)__ARCH_SPIN_LOCK_UNLOCKED
;
6347 tr
->current_trace
= &nop_trace
;
6349 INIT_LIST_HEAD(&tr
->systems
);
6350 INIT_LIST_HEAD(&tr
->events
);
6352 if (allocate_trace_buffers(tr
, trace_buf_size
) < 0)
6355 tr
->dir
= debugfs_create_dir(name
, trace_instance_dir
);
6359 ret
= event_trace_add_tracer(tr
->dir
, tr
);
6361 debugfs_remove_recursive(tr
->dir
);
6365 init_tracer_debugfs(tr
, tr
->dir
);
6367 list_add(&tr
->list
, &ftrace_trace_arrays
);
6369 mutex_unlock(&trace_types_lock
);
6374 free_trace_buffers(tr
);
6375 free_cpumask_var(tr
->tracing_cpumask
);
6380 mutex_unlock(&trace_types_lock
);
6386 static int instance_delete(const char *name
)
6388 struct trace_array
*tr
;
6392 mutex_lock(&trace_types_lock
);
6395 list_for_each_entry(tr
, &ftrace_trace_arrays
, list
) {
6396 if (tr
->name
&& strcmp(tr
->name
, name
) == 0) {
6408 list_del(&tr
->list
);
6410 tracing_set_nop(tr
);
6411 event_trace_del_tracer(tr
);
6412 ftrace_destroy_function_files(tr
);
6413 debugfs_remove_recursive(tr
->dir
);
6414 free_trace_buffers(tr
);
6422 mutex_unlock(&trace_types_lock
);
6427 static int instance_mkdir (struct inode
*inode
, struct dentry
*dentry
, umode_t mode
)
6429 struct dentry
*parent
;
6432 /* Paranoid: Make sure the parent is the "instances" directory */
6433 parent
= hlist_entry(inode
->i_dentry
.first
, struct dentry
, d_alias
);
6434 if (WARN_ON_ONCE(parent
!= trace_instance_dir
))
6438 * The inode mutex is locked, but debugfs_create_dir() will also
6439 * take the mutex. As the instances directory can not be destroyed
6440 * or changed in any other way, it is safe to unlock it, and
6441 * let the dentry try. If two users try to make the same dir at
6442 * the same time, then the new_instance_create() will determine the
6445 mutex_unlock(&inode
->i_mutex
);
6447 ret
= new_instance_create(dentry
->d_iname
);
6449 mutex_lock(&inode
->i_mutex
);
6454 static int instance_rmdir(struct inode
*inode
, struct dentry
*dentry
)
6456 struct dentry
*parent
;
6459 /* Paranoid: Make sure the parent is the "instances" directory */
6460 parent
= hlist_entry(inode
->i_dentry
.first
, struct dentry
, d_alias
);
6461 if (WARN_ON_ONCE(parent
!= trace_instance_dir
))
6464 /* The caller did a dget() on dentry */
6465 mutex_unlock(&dentry
->d_inode
->i_mutex
);
6468 * The inode mutex is locked, but debugfs_create_dir() will also
6469 * take the mutex. As the instances directory can not be destroyed
6470 * or changed in any other way, it is safe to unlock it, and
6471 * let the dentry try. If two users try to make the same dir at
6472 * the same time, then the instance_delete() will determine the
6475 mutex_unlock(&inode
->i_mutex
);
6477 ret
= instance_delete(dentry
->d_iname
);
6479 mutex_lock_nested(&inode
->i_mutex
, I_MUTEX_PARENT
);
6480 mutex_lock(&dentry
->d_inode
->i_mutex
);
6485 static const struct inode_operations instance_dir_inode_operations
= {
6486 .lookup
= simple_lookup
,
6487 .mkdir
= instance_mkdir
,
6488 .rmdir
= instance_rmdir
,
6491 static __init
void create_trace_instances(struct dentry
*d_tracer
)
6493 trace_instance_dir
= debugfs_create_dir("instances", d_tracer
);
6494 if (WARN_ON(!trace_instance_dir
))
6497 /* Hijack the dir inode operations, to allow mkdir */
6498 trace_instance_dir
->d_inode
->i_op
= &instance_dir_inode_operations
;
6502 init_tracer_debugfs(struct trace_array
*tr
, struct dentry
*d_tracer
)
6506 trace_create_file("available_tracers", 0444, d_tracer
,
6507 tr
, &show_traces_fops
);
6509 trace_create_file("current_tracer", 0644, d_tracer
,
6510 tr
, &set_tracer_fops
);
6512 trace_create_file("tracing_cpumask", 0644, d_tracer
,
6513 tr
, &tracing_cpumask_fops
);
6515 trace_create_file("trace_options", 0644, d_tracer
,
6516 tr
, &tracing_iter_fops
);
6518 trace_create_file("trace", 0644, d_tracer
,
6521 trace_create_file("trace_pipe", 0444, d_tracer
,
6522 tr
, &tracing_pipe_fops
);
6524 trace_create_file("buffer_size_kb", 0644, d_tracer
,
6525 tr
, &tracing_entries_fops
);
6527 trace_create_file("buffer_total_size_kb", 0444, d_tracer
,
6528 tr
, &tracing_total_entries_fops
);
6530 trace_create_file("free_buffer", 0200, d_tracer
,
6531 tr
, &tracing_free_buffer_fops
);
6533 trace_create_file("trace_marker", 0220, d_tracer
,
6534 tr
, &tracing_mark_fops
);
6536 trace_create_file("trace_clock", 0644, d_tracer
, tr
,
6539 trace_create_file("tracing_on", 0644, d_tracer
,
6540 tr
, &rb_simple_fops
);
6542 #ifdef CONFIG_TRACER_MAX_TRACE
6543 trace_create_file("tracing_max_latency", 0644, d_tracer
,
6544 &tr
->max_latency
, &tracing_max_lat_fops
);
6547 if (ftrace_create_function_files(tr
, d_tracer
))
6548 WARN(1, "Could not allocate function filter files");
6550 #ifdef CONFIG_TRACER_SNAPSHOT
6551 trace_create_file("snapshot", 0644, d_tracer
,
6552 tr
, &snapshot_fops
);
6555 for_each_tracing_cpu(cpu
)
6556 tracing_init_debugfs_percpu(tr
, cpu
);
6560 static __init
int tracer_init_debugfs(void)
6562 struct dentry
*d_tracer
;
6564 trace_access_lock_init();
6566 d_tracer
= tracing_init_dentry();
6570 init_tracer_debugfs(&global_trace
, d_tracer
);
6572 trace_create_file("tracing_thresh", 0644, d_tracer
,
6573 &global_trace
, &tracing_thresh_fops
);
6575 trace_create_file("README", 0444, d_tracer
,
6576 NULL
, &tracing_readme_fops
);
6578 trace_create_file("saved_cmdlines", 0444, d_tracer
,
6579 NULL
, &tracing_saved_cmdlines_fops
);
6581 trace_create_file("saved_cmdlines_size", 0644, d_tracer
,
6582 NULL
, &tracing_saved_cmdlines_size_fops
);
6584 #ifdef CONFIG_DYNAMIC_FTRACE
6585 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer
,
6586 &ftrace_update_tot_cnt
, &tracing_dyn_info_fops
);
6589 create_trace_instances(d_tracer
);
6591 create_trace_options_dir(&global_trace
);
6596 static int trace_panic_handler(struct notifier_block
*this,
6597 unsigned long event
, void *unused
)
6599 if (ftrace_dump_on_oops
)
6600 ftrace_dump(ftrace_dump_on_oops
);
6604 static struct notifier_block trace_panic_notifier
= {
6605 .notifier_call
= trace_panic_handler
,
6607 .priority
= 150 /* priority: INT_MAX >= x >= 0 */
6610 static int trace_die_handler(struct notifier_block
*self
,
6616 if (ftrace_dump_on_oops
)
6617 ftrace_dump(ftrace_dump_on_oops
);
6625 static struct notifier_block trace_die_notifier
= {
6626 .notifier_call
= trace_die_handler
,
6631 * printk is set to max of 1024, we really don't need it that big.
6632 * Nothing should be printing 1000 characters anyway.
6634 #define TRACE_MAX_PRINT 1000
6637 * Define here KERN_TRACE so that we have one place to modify
6638 * it if we decide to change what log level the ftrace dump
6641 #define KERN_TRACE KERN_EMERG
6644 trace_printk_seq(struct trace_seq
*s
)
6646 /* Probably should print a warning here. */
6647 if (s
->len
>= TRACE_MAX_PRINT
)
6648 s
->len
= TRACE_MAX_PRINT
;
6650 /* should be zero ended, but we are paranoid. */
6651 s
->buffer
[s
->len
] = 0;
6653 printk(KERN_TRACE
"%s", s
->buffer
);
6658 void trace_init_global_iter(struct trace_iterator
*iter
)
6660 iter
->tr
= &global_trace
;
6661 iter
->trace
= iter
->tr
->current_trace
;
6662 iter
->cpu_file
= RING_BUFFER_ALL_CPUS
;
6663 iter
->trace_buffer
= &global_trace
.trace_buffer
;
6665 if (iter
->trace
&& iter
->trace
->open
)
6666 iter
->trace
->open(iter
);
6668 /* Annotate start of buffers if we had overruns */
6669 if (ring_buffer_overruns(iter
->trace_buffer
->buffer
))
6670 iter
->iter_flags
|= TRACE_FILE_ANNOTATE
;
6672 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
6673 if (trace_clocks
[iter
->tr
->clock_id
].in_ns
)
6674 iter
->iter_flags
|= TRACE_FILE_TIME_IN_NS
;
6677 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode
)
6679 /* use static because iter can be a bit big for the stack */
6680 static struct trace_iterator iter
;
6681 static atomic_t dump_running
;
6682 unsigned int old_userobj
;
6683 unsigned long flags
;
6686 /* Only allow one dump user at a time. */
6687 if (atomic_inc_return(&dump_running
) != 1) {
6688 atomic_dec(&dump_running
);
6693 * Always turn off tracing when we dump.
6694 * We don't need to show trace output of what happens
6695 * between multiple crashes.
6697 * If the user does a sysrq-z, then they can re-enable
6698 * tracing with echo 1 > tracing_on.
6702 local_irq_save(flags
);
6704 /* Simulate the iterator */
6705 trace_init_global_iter(&iter
);
6707 for_each_tracing_cpu(cpu
) {
6708 atomic_inc(&per_cpu_ptr(iter
.tr
->trace_buffer
.data
, cpu
)->disabled
);
6711 old_userobj
= trace_flags
& TRACE_ITER_SYM_USEROBJ
;
6713 /* don't look at user memory in panic mode */
6714 trace_flags
&= ~TRACE_ITER_SYM_USEROBJ
;
6716 switch (oops_dump_mode
) {
6718 iter
.cpu_file
= RING_BUFFER_ALL_CPUS
;
6721 iter
.cpu_file
= raw_smp_processor_id();
6726 printk(KERN_TRACE
"Bad dumping mode, switching to all CPUs dump\n");
6727 iter
.cpu_file
= RING_BUFFER_ALL_CPUS
;
6730 printk(KERN_TRACE
"Dumping ftrace buffer:\n");
6732 /* Did function tracer already get disabled? */
6733 if (ftrace_is_dead()) {
6734 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
6735 printk("# MAY BE MISSING FUNCTION EVENTS\n");
6739 * We need to stop all tracing on all CPUS to read the
6740 * the next buffer. This is a bit expensive, but is
6741 * not done often. We fill all what we can read,
6742 * and then release the locks again.
6745 while (!trace_empty(&iter
)) {
6748 printk(KERN_TRACE
"---------------------------------\n");
6752 /* reset all but tr, trace, and overruns */
6753 memset(&iter
.seq
, 0,
6754 sizeof(struct trace_iterator
) -
6755 offsetof(struct trace_iterator
, seq
));
6756 iter
.iter_flags
|= TRACE_FILE_LAT_FMT
;
6759 if (trace_find_next_entry_inc(&iter
) != NULL
) {
6762 ret
= print_trace_line(&iter
);
6763 if (ret
!= TRACE_TYPE_NO_CONSUME
)
6764 trace_consume(&iter
);
6766 touch_nmi_watchdog();
6768 trace_printk_seq(&iter
.seq
);
6772 printk(KERN_TRACE
" (ftrace buffer empty)\n");
6774 printk(KERN_TRACE
"---------------------------------\n");
6777 trace_flags
|= old_userobj
;
6779 for_each_tracing_cpu(cpu
) {
6780 atomic_dec(&per_cpu_ptr(iter
.trace_buffer
->data
, cpu
)->disabled
);
6782 atomic_dec(&dump_running
);
6783 local_irq_restore(flags
);
6785 EXPORT_SYMBOL_GPL(ftrace_dump
);
6787 __init
static int tracer_alloc_buffers(void)
6793 if (!alloc_cpumask_var(&tracing_buffer_mask
, GFP_KERNEL
))
6796 if (!alloc_cpumask_var(&global_trace
.tracing_cpumask
, GFP_KERNEL
))
6797 goto out_free_buffer_mask
;
6799 /* Only allocate trace_printk buffers if a trace_printk exists */
6800 if (__stop___trace_bprintk_fmt
!= __start___trace_bprintk_fmt
)
6801 /* Must be called before global_trace.buffer is allocated */
6802 trace_printk_init_buffers();
6804 /* To save memory, keep the ring buffer size to its minimum */
6805 if (ring_buffer_expanded
)
6806 ring_buf_size
= trace_buf_size
;
6810 cpumask_copy(tracing_buffer_mask
, cpu_possible_mask
);
6811 cpumask_copy(global_trace
.tracing_cpumask
, cpu_all_mask
);
6813 raw_spin_lock_init(&global_trace
.start_lock
);
6815 /* Used for event triggers */
6816 temp_buffer
= ring_buffer_alloc(PAGE_SIZE
, RB_FL_OVERWRITE
);
6818 goto out_free_cpumask
;
6820 if (trace_create_savedcmd() < 0)
6821 goto out_free_temp_buffer
;
6823 /* TODO: make the number of buffers hot pluggable with CPUS */
6824 if (allocate_trace_buffers(&global_trace
, ring_buf_size
) < 0) {
6825 printk(KERN_ERR
"tracer: failed to allocate ring buffer!\n");
6827 goto out_free_savedcmd
;
6830 if (global_trace
.buffer_disabled
)
6833 if (trace_boot_clock
) {
6834 ret
= tracing_set_clock(&global_trace
, trace_boot_clock
);
6836 pr_warning("Trace clock %s not defined, going back to default\n",
6841 * register_tracer() might reference current_trace, so it
6842 * needs to be set before we register anything. This is
6843 * just a bootstrap of current_trace anyway.
6845 global_trace
.current_trace
= &nop_trace
;
6847 global_trace
.max_lock
= (arch_spinlock_t
)__ARCH_SPIN_LOCK_UNLOCKED
;
6849 ftrace_init_global_array_ops(&global_trace
);
6851 register_tracer(&nop_trace
);
6853 /* All seems OK, enable tracing */
6854 tracing_disabled
= 0;
6856 atomic_notifier_chain_register(&panic_notifier_list
,
6857 &trace_panic_notifier
);
6859 register_die_notifier(&trace_die_notifier
);
6861 global_trace
.flags
= TRACE_ARRAY_FL_GLOBAL
;
6863 INIT_LIST_HEAD(&global_trace
.systems
);
6864 INIT_LIST_HEAD(&global_trace
.events
);
6865 list_add(&global_trace
.list
, &ftrace_trace_arrays
);
6867 while (trace_boot_options
) {
6870 option
= strsep(&trace_boot_options
, ",");
6871 trace_set_options(&global_trace
, option
);
6874 register_snapshot_cmd();
6879 free_saved_cmdlines_buffer(savedcmd
);
6880 out_free_temp_buffer
:
6881 ring_buffer_free(temp_buffer
);
6883 free_cpumask_var(global_trace
.tracing_cpumask
);
6884 out_free_buffer_mask
:
6885 free_cpumask_var(tracing_buffer_mask
);
6890 void __init
trace_init(void)
6892 if (tracepoint_printk
) {
6893 tracepoint_print_iter
=
6894 kmalloc(sizeof(*tracepoint_print_iter
), GFP_KERNEL
);
6895 if (WARN_ON(!tracepoint_print_iter
))
6896 tracepoint_printk
= 0;
6898 tracer_alloc_buffers();
6899 init_ftrace_syscalls();
6903 __init
static int clear_boot_tracer(void)
6906 * The default tracer at boot buffer is an init section.
6907 * This function is called in lateinit. If we did not
6908 * find the boot tracer, then clear it out, to prevent
6909 * later registration from accessing the buffer that is
6910 * about to be freed.
6912 if (!default_bootup_tracer
)
6915 printk(KERN_INFO
"ftrace bootup tracer '%s' not registered.\n",
6916 default_bootup_tracer
);
6917 default_bootup_tracer
= NULL
;
6922 fs_initcall(tracer_init_debugfs
);
6923 late_initcall(clear_boot_tracer
);