2 * ring buffer based function tracer
4 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
12 * Copyright (C) 2004 Nadia Yvette Chambers
14 #include <linux/ring_buffer.h>
15 #include <generated/utsrelease.h>
16 #include <linux/stacktrace.h>
17 #include <linux/writeback.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/notifier.h>
21 #include <linux/irqflags.h>
22 #include <linux/debugfs.h>
23 #include <linux/pagemap.h>
24 #include <linux/hardirq.h>
25 #include <linux/linkage.h>
26 #include <linux/uaccess.h>
27 #include <linux/kprobes.h>
28 #include <linux/ftrace.h>
29 #include <linux/module.h>
30 #include <linux/percpu.h>
31 #include <linux/splice.h>
32 #include <linux/kdebug.h>
33 #include <linux/string.h>
34 #include <linux/rwsem.h>
35 #include <linux/slab.h>
36 #include <linux/ctype.h>
37 #include <linux/init.h>
38 #include <linux/poll.h>
39 #include <linux/nmi.h>
41 #include <linux/sched/rt.h>
44 #include "trace_output.h"
47 * On boot up, the ring buffer is set to the minimum size, so that
48 * we do not waste memory on systems that are not using tracing.
50 bool ring_buffer_expanded
;
53 * We need to change this state when a selftest is running.
54 * A selftest will lurk into the ring-buffer to count the
55 * entries inserted during the selftest although some concurrent
56 * insertions into the ring-buffer such as trace_printk could occurred
57 * at the same time, giving false positive or negative results.
59 static bool __read_mostly tracing_selftest_running
;
62 * If a tracer is running, we do not want to run SELFTEST.
64 bool __read_mostly tracing_selftest_disabled
;
66 /* For tracers that don't implement custom flags */
67 static struct tracer_opt dummy_tracer_opt
[] = {
71 static struct tracer_flags dummy_tracer_flags
= {
73 .opts
= dummy_tracer_opt
77 dummy_set_flag(struct trace_array
*tr
, u32 old_flags
, u32 bit
, int set
)
83 * To prevent the comm cache from being overwritten when no
84 * tracing is active, only save the comm when a trace event
87 static DEFINE_PER_CPU(bool, trace_cmdline_save
);
90 * Kill all tracing for good (never come back).
91 * It is initialized to 1 but will turn to zero if the initialization
92 * of the tracer is successful. But that is the only place that sets
95 static int tracing_disabled
= 1;
97 DEFINE_PER_CPU(int, ftrace_cpu_disabled
);
99 cpumask_var_t __read_mostly tracing_buffer_mask
;
102 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
104 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
105 * is set, then ftrace_dump is called. This will output the contents
106 * of the ftrace buffers to the console. This is very useful for
107 * capturing traces that lead to crashes and outputing it to a
110 * It is default off, but you can enable it with either specifying
111 * "ftrace_dump_on_oops" in the kernel command line, or setting
112 * /proc/sys/kernel/ftrace_dump_on_oops
113 * Set 1 if you want to dump buffers of all CPUs
114 * Set 2 if you want to dump the buffer of the CPU that triggered oops
117 enum ftrace_dump_mode ftrace_dump_on_oops
;
119 /* When set, tracing will stop when a WARN*() is hit */
120 int __disable_trace_on_warning
;
122 static int tracing_set_tracer(struct trace_array
*tr
, const char *buf
);
124 #define MAX_TRACER_SIZE 100
125 static char bootup_tracer_buf
[MAX_TRACER_SIZE
] __initdata
;
126 static char *default_bootup_tracer
;
128 static bool allocate_snapshot
;
130 static int __init
set_cmdline_ftrace(char *str
)
132 strlcpy(bootup_tracer_buf
, str
, MAX_TRACER_SIZE
);
133 default_bootup_tracer
= bootup_tracer_buf
;
134 /* We are using ftrace early, expand it */
135 ring_buffer_expanded
= true;
138 __setup("ftrace=", set_cmdline_ftrace
);
140 static int __init
set_ftrace_dump_on_oops(char *str
)
142 if (*str
++ != '=' || !*str
) {
143 ftrace_dump_on_oops
= DUMP_ALL
;
147 if (!strcmp("orig_cpu", str
)) {
148 ftrace_dump_on_oops
= DUMP_ORIG
;
154 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops
);
156 static int __init
stop_trace_on_warning(char *str
)
158 __disable_trace_on_warning
= 1;
161 __setup("traceoff_on_warning=", stop_trace_on_warning
);
163 static int __init
boot_alloc_snapshot(char *str
)
165 allocate_snapshot
= true;
166 /* We also need the main ring buffer expanded */
167 ring_buffer_expanded
= true;
170 __setup("alloc_snapshot", boot_alloc_snapshot
);
173 static char trace_boot_options_buf
[MAX_TRACER_SIZE
] __initdata
;
174 static char *trace_boot_options __initdata
;
176 static int __init
set_trace_boot_options(char *str
)
178 strlcpy(trace_boot_options_buf
, str
, MAX_TRACER_SIZE
);
179 trace_boot_options
= trace_boot_options_buf
;
182 __setup("trace_options=", set_trace_boot_options
);
184 static char trace_boot_clock_buf
[MAX_TRACER_SIZE
] __initdata
;
185 static char *trace_boot_clock __initdata
;
187 static int __init
set_trace_boot_clock(char *str
)
189 strlcpy(trace_boot_clock_buf
, str
, MAX_TRACER_SIZE
);
190 trace_boot_clock
= trace_boot_clock_buf
;
193 __setup("trace_clock=", set_trace_boot_clock
);
196 unsigned long long ns2usecs(cycle_t nsec
)
204 * The global_trace is the descriptor that holds the tracing
205 * buffers for the live tracing. For each CPU, it contains
206 * a link list of pages that will store trace entries. The
207 * page descriptor of the pages in the memory is used to hold
208 * the link list by linking the lru item in the page descriptor
209 * to each of the pages in the buffer per CPU.
211 * For each active CPU there is a data field that holds the
212 * pages for the buffer for that CPU. Each CPU has the same number
213 * of pages allocated for its buffer.
215 static struct trace_array global_trace
;
217 LIST_HEAD(ftrace_trace_arrays
);
219 int trace_array_get(struct trace_array
*this_tr
)
221 struct trace_array
*tr
;
224 mutex_lock(&trace_types_lock
);
225 list_for_each_entry(tr
, &ftrace_trace_arrays
, list
) {
232 mutex_unlock(&trace_types_lock
);
237 static void __trace_array_put(struct trace_array
*this_tr
)
239 WARN_ON(!this_tr
->ref
);
243 void trace_array_put(struct trace_array
*this_tr
)
245 mutex_lock(&trace_types_lock
);
246 __trace_array_put(this_tr
);
247 mutex_unlock(&trace_types_lock
);
250 int filter_check_discard(struct ftrace_event_file
*file
, void *rec
,
251 struct ring_buffer
*buffer
,
252 struct ring_buffer_event
*event
)
254 if (unlikely(file
->flags
& FTRACE_EVENT_FL_FILTERED
) &&
255 !filter_match_preds(file
->filter
, rec
)) {
256 ring_buffer_discard_commit(buffer
, event
);
262 EXPORT_SYMBOL_GPL(filter_check_discard
);
264 int call_filter_check_discard(struct ftrace_event_call
*call
, void *rec
,
265 struct ring_buffer
*buffer
,
266 struct ring_buffer_event
*event
)
268 if (unlikely(call
->flags
& TRACE_EVENT_FL_FILTERED
) &&
269 !filter_match_preds(call
->filter
, rec
)) {
270 ring_buffer_discard_commit(buffer
, event
);
276 EXPORT_SYMBOL_GPL(call_filter_check_discard
);
278 static cycle_t
buffer_ftrace_now(struct trace_buffer
*buf
, int cpu
)
282 /* Early boot up does not have a buffer yet */
284 return trace_clock_local();
286 ts
= ring_buffer_time_stamp(buf
->buffer
, cpu
);
287 ring_buffer_normalize_time_stamp(buf
->buffer
, cpu
, &ts
);
292 cycle_t
ftrace_now(int cpu
)
294 return buffer_ftrace_now(&global_trace
.trace_buffer
, cpu
);
298 * tracing_is_enabled - Show if global_trace has been disabled
300 * Shows if the global trace has been enabled or not. It uses the
301 * mirror flag "buffer_disabled" to be used in fast paths such as for
302 * the irqsoff tracer. But it may be inaccurate due to races. If you
303 * need to know the accurate state, use tracing_is_on() which is a little
304 * slower, but accurate.
306 int tracing_is_enabled(void)
309 * For quick access (irqsoff uses this in fast path), just
310 * return the mirror variable of the state of the ring buffer.
311 * It's a little racy, but we don't really care.
314 return !global_trace
.buffer_disabled
;
318 * trace_buf_size is the size in bytes that is allocated
319 * for a buffer. Note, the number of bytes is always rounded
322 * This number is purposely set to a low number of 16384.
323 * If the dump on oops happens, it will be much appreciated
324 * to not have to wait for all that output. Anyway this can be
325 * boot time and run time configurable.
327 #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
329 static unsigned long trace_buf_size
= TRACE_BUF_SIZE_DEFAULT
;
331 /* trace_types holds a link list of available tracers. */
332 static struct tracer
*trace_types __read_mostly
;
335 * trace_types_lock is used to protect the trace_types list.
337 DEFINE_MUTEX(trace_types_lock
);
340 * serialize the access of the ring buffer
342 * ring buffer serializes readers, but it is low level protection.
343 * The validity of the events (which returns by ring_buffer_peek() ..etc)
344 * are not protected by ring buffer.
346 * The content of events may become garbage if we allow other process consumes
347 * these events concurrently:
348 * A) the page of the consumed events may become a normal page
349 * (not reader page) in ring buffer, and this page will be rewrited
350 * by events producer.
351 * B) The page of the consumed events may become a page for splice_read,
352 * and this page will be returned to system.
354 * These primitives allow multi process access to different cpu ring buffer
357 * These primitives don't distinguish read-only and read-consume access.
358 * Multi read-only access are also serialized.
362 static DECLARE_RWSEM(all_cpu_access_lock
);
363 static DEFINE_PER_CPU(struct mutex
, cpu_access_lock
);
365 static inline void trace_access_lock(int cpu
)
367 if (cpu
== RING_BUFFER_ALL_CPUS
) {
368 /* gain it for accessing the whole ring buffer. */
369 down_write(&all_cpu_access_lock
);
371 /* gain it for accessing a cpu ring buffer. */
373 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
374 down_read(&all_cpu_access_lock
);
376 /* Secondly block other access to this @cpu ring buffer. */
377 mutex_lock(&per_cpu(cpu_access_lock
, cpu
));
381 static inline void trace_access_unlock(int cpu
)
383 if (cpu
== RING_BUFFER_ALL_CPUS
) {
384 up_write(&all_cpu_access_lock
);
386 mutex_unlock(&per_cpu(cpu_access_lock
, cpu
));
387 up_read(&all_cpu_access_lock
);
391 static inline void trace_access_lock_init(void)
395 for_each_possible_cpu(cpu
)
396 mutex_init(&per_cpu(cpu_access_lock
, cpu
));
401 static DEFINE_MUTEX(access_lock
);
403 static inline void trace_access_lock(int cpu
)
406 mutex_lock(&access_lock
);
409 static inline void trace_access_unlock(int cpu
)
412 mutex_unlock(&access_lock
);
415 static inline void trace_access_lock_init(void)
421 /* trace_flags holds trace_options default values */
422 unsigned long trace_flags
= TRACE_ITER_PRINT_PARENT
| TRACE_ITER_PRINTK
|
423 TRACE_ITER_ANNOTATE
| TRACE_ITER_CONTEXT_INFO
| TRACE_ITER_SLEEP_TIME
|
424 TRACE_ITER_GRAPH_TIME
| TRACE_ITER_RECORD_CMD
| TRACE_ITER_OVERWRITE
|
425 TRACE_ITER_IRQ_INFO
| TRACE_ITER_MARKERS
| TRACE_ITER_FUNCTION
;
427 static void tracer_tracing_on(struct trace_array
*tr
)
429 if (tr
->trace_buffer
.buffer
)
430 ring_buffer_record_on(tr
->trace_buffer
.buffer
);
432 * This flag is looked at when buffers haven't been allocated
433 * yet, or by some tracers (like irqsoff), that just want to
434 * know if the ring buffer has been disabled, but it can handle
435 * races of where it gets disabled but we still do a record.
436 * As the check is in the fast path of the tracers, it is more
437 * important to be fast than accurate.
439 tr
->buffer_disabled
= 0;
440 /* Make the flag seen by readers */
445 * tracing_on - enable tracing buffers
447 * This function enables tracing buffers that may have been
448 * disabled with tracing_off.
450 void tracing_on(void)
452 tracer_tracing_on(&global_trace
);
454 EXPORT_SYMBOL_GPL(tracing_on
);
457 * __trace_puts - write a constant string into the trace buffer.
458 * @ip: The address of the caller
459 * @str: The constant string to write
460 * @size: The size of the string.
462 int __trace_puts(unsigned long ip
, const char *str
, int size
)
464 struct ring_buffer_event
*event
;
465 struct ring_buffer
*buffer
;
466 struct print_entry
*entry
;
467 unsigned long irq_flags
;
471 if (!(trace_flags
& TRACE_ITER_PRINTK
))
474 pc
= preempt_count();
476 if (unlikely(tracing_selftest_running
|| tracing_disabled
))
479 alloc
= sizeof(*entry
) + size
+ 2; /* possible \n added */
481 local_save_flags(irq_flags
);
482 buffer
= global_trace
.trace_buffer
.buffer
;
483 event
= trace_buffer_lock_reserve(buffer
, TRACE_PRINT
, alloc
,
488 entry
= ring_buffer_event_data(event
);
491 memcpy(&entry
->buf
, str
, size
);
493 /* Add a newline if necessary */
494 if (entry
->buf
[size
- 1] != '\n') {
495 entry
->buf
[size
] = '\n';
496 entry
->buf
[size
+ 1] = '\0';
498 entry
->buf
[size
] = '\0';
500 __buffer_unlock_commit(buffer
, event
);
501 ftrace_trace_stack(buffer
, irq_flags
, 4, pc
);
505 EXPORT_SYMBOL_GPL(__trace_puts
);
508 * __trace_bputs - write the pointer to a constant string into trace buffer
509 * @ip: The address of the caller
510 * @str: The constant string to write to the buffer to
512 int __trace_bputs(unsigned long ip
, const char *str
)
514 struct ring_buffer_event
*event
;
515 struct ring_buffer
*buffer
;
516 struct bputs_entry
*entry
;
517 unsigned long irq_flags
;
518 int size
= sizeof(struct bputs_entry
);
521 if (!(trace_flags
& TRACE_ITER_PRINTK
))
524 pc
= preempt_count();
526 if (unlikely(tracing_selftest_running
|| tracing_disabled
))
529 local_save_flags(irq_flags
);
530 buffer
= global_trace
.trace_buffer
.buffer
;
531 event
= trace_buffer_lock_reserve(buffer
, TRACE_BPUTS
, size
,
536 entry
= ring_buffer_event_data(event
);
540 __buffer_unlock_commit(buffer
, event
);
541 ftrace_trace_stack(buffer
, irq_flags
, 4, pc
);
545 EXPORT_SYMBOL_GPL(__trace_bputs
);
547 #ifdef CONFIG_TRACER_SNAPSHOT
549 * trace_snapshot - take a snapshot of the current buffer.
551 * This causes a swap between the snapshot buffer and the current live
552 * tracing buffer. You can use this to take snapshots of the live
553 * trace when some condition is triggered, but continue to trace.
555 * Note, make sure to allocate the snapshot with either
556 * a tracing_snapshot_alloc(), or by doing it manually
557 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
559 * If the snapshot buffer is not allocated, it will stop tracing.
560 * Basically making a permanent snapshot.
562 void tracing_snapshot(void)
564 struct trace_array
*tr
= &global_trace
;
565 struct tracer
*tracer
= tr
->current_trace
;
569 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
570 internal_trace_puts("*** snapshot is being ignored ***\n");
574 if (!tr
->allocated_snapshot
) {
575 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
576 internal_trace_puts("*** stopping trace here! ***\n");
581 /* Note, snapshot can not be used when the tracer uses it */
582 if (tracer
->use_max_tr
) {
583 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
584 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
588 local_irq_save(flags
);
589 update_max_tr(tr
, current
, smp_processor_id());
590 local_irq_restore(flags
);
592 EXPORT_SYMBOL_GPL(tracing_snapshot
);
594 static int resize_buffer_duplicate_size(struct trace_buffer
*trace_buf
,
595 struct trace_buffer
*size_buf
, int cpu_id
);
596 static void set_buffer_entries(struct trace_buffer
*buf
, unsigned long val
);
598 static int alloc_snapshot(struct trace_array
*tr
)
602 if (!tr
->allocated_snapshot
) {
604 /* allocate spare buffer */
605 ret
= resize_buffer_duplicate_size(&tr
->max_buffer
,
606 &tr
->trace_buffer
, RING_BUFFER_ALL_CPUS
);
610 tr
->allocated_snapshot
= true;
616 static void free_snapshot(struct trace_array
*tr
)
619 * We don't free the ring buffer. instead, resize it because
620 * The max_tr ring buffer has some state (e.g. ring->clock) and
621 * we want preserve it.
623 ring_buffer_resize(tr
->max_buffer
.buffer
, 1, RING_BUFFER_ALL_CPUS
);
624 set_buffer_entries(&tr
->max_buffer
, 1);
625 tracing_reset_online_cpus(&tr
->max_buffer
);
626 tr
->allocated_snapshot
= false;
630 * tracing_alloc_snapshot - allocate snapshot buffer.
632 * This only allocates the snapshot buffer if it isn't already
633 * allocated - it doesn't also take a snapshot.
635 * This is meant to be used in cases where the snapshot buffer needs
636 * to be set up for events that can't sleep but need to be able to
637 * trigger a snapshot.
639 int tracing_alloc_snapshot(void)
641 struct trace_array
*tr
= &global_trace
;
644 ret
= alloc_snapshot(tr
);
649 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot
);
652 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
654 * This is similar to trace_snapshot(), but it will allocate the
655 * snapshot buffer if it isn't already allocated. Use this only
656 * where it is safe to sleep, as the allocation may sleep.
658 * This causes a swap between the snapshot buffer and the current live
659 * tracing buffer. You can use this to take snapshots of the live
660 * trace when some condition is triggered, but continue to trace.
662 void tracing_snapshot_alloc(void)
666 ret
= tracing_alloc_snapshot();
672 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc
);
674 void tracing_snapshot(void)
676 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
678 EXPORT_SYMBOL_GPL(tracing_snapshot
);
679 int tracing_alloc_snapshot(void)
681 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
684 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot
);
685 void tracing_snapshot_alloc(void)
690 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc
);
691 #endif /* CONFIG_TRACER_SNAPSHOT */
693 static void tracer_tracing_off(struct trace_array
*tr
)
695 if (tr
->trace_buffer
.buffer
)
696 ring_buffer_record_off(tr
->trace_buffer
.buffer
);
698 * This flag is looked at when buffers haven't been allocated
699 * yet, or by some tracers (like irqsoff), that just want to
700 * know if the ring buffer has been disabled, but it can handle
701 * races of where it gets disabled but we still do a record.
702 * As the check is in the fast path of the tracers, it is more
703 * important to be fast than accurate.
705 tr
->buffer_disabled
= 1;
706 /* Make the flag seen by readers */
711 * tracing_off - turn off tracing buffers
713 * This function stops the tracing buffers from recording data.
714 * It does not disable any overhead the tracers themselves may
715 * be causing. This function simply causes all recording to
716 * the ring buffers to fail.
718 void tracing_off(void)
720 tracer_tracing_off(&global_trace
);
722 EXPORT_SYMBOL_GPL(tracing_off
);
724 void disable_trace_on_warning(void)
726 if (__disable_trace_on_warning
)
731 * tracer_tracing_is_on - show real state of ring buffer enabled
732 * @tr : the trace array to know if ring buffer is enabled
734 * Shows real state of the ring buffer if it is enabled or not.
736 static int tracer_tracing_is_on(struct trace_array
*tr
)
738 if (tr
->trace_buffer
.buffer
)
739 return ring_buffer_record_is_on(tr
->trace_buffer
.buffer
);
740 return !tr
->buffer_disabled
;
744 * tracing_is_on - show state of ring buffers enabled
746 int tracing_is_on(void)
748 return tracer_tracing_is_on(&global_trace
);
750 EXPORT_SYMBOL_GPL(tracing_is_on
);
752 static int __init
set_buf_size(char *str
)
754 unsigned long buf_size
;
758 buf_size
= memparse(str
, &str
);
759 /* nr_entries can not be zero */
762 trace_buf_size
= buf_size
;
765 __setup("trace_buf_size=", set_buf_size
);
767 static int __init
set_tracing_thresh(char *str
)
769 unsigned long threshold
;
774 ret
= kstrtoul(str
, 0, &threshold
);
777 tracing_thresh
= threshold
* 1000;
780 __setup("tracing_thresh=", set_tracing_thresh
);
782 unsigned long nsecs_to_usecs(unsigned long nsecs
)
787 /* These must match the bit postions in trace_iterator_flags */
788 static const char *trace_options
[] = {
821 int in_ns
; /* is this clock in nanoseconds? */
823 { trace_clock_local
, "local", 1 },
824 { trace_clock_global
, "global", 1 },
825 { trace_clock_counter
, "counter", 0 },
826 { trace_clock_jiffies
, "uptime", 0 },
827 { trace_clock
, "perf", 1 },
832 * trace_parser_get_init - gets the buffer for trace parser
834 int trace_parser_get_init(struct trace_parser
*parser
, int size
)
836 memset(parser
, 0, sizeof(*parser
));
838 parser
->buffer
= kmalloc(size
, GFP_KERNEL
);
847 * trace_parser_put - frees the buffer for trace parser
849 void trace_parser_put(struct trace_parser
*parser
)
851 kfree(parser
->buffer
);
855 * trace_get_user - reads the user input string separated by space
856 * (matched by isspace(ch))
858 * For each string found the 'struct trace_parser' is updated,
859 * and the function returns.
861 * Returns number of bytes read.
863 * See kernel/trace/trace.h for 'struct trace_parser' details.
865 int trace_get_user(struct trace_parser
*parser
, const char __user
*ubuf
,
866 size_t cnt
, loff_t
*ppos
)
873 trace_parser_clear(parser
);
875 ret
= get_user(ch
, ubuf
++);
883 * The parser is not finished with the last write,
884 * continue reading the user input without skipping spaces.
887 /* skip white space */
888 while (cnt
&& isspace(ch
)) {
889 ret
= get_user(ch
, ubuf
++);
896 /* only spaces were written */
906 /* read the non-space input */
907 while (cnt
&& !isspace(ch
)) {
908 if (parser
->idx
< parser
->size
- 1)
909 parser
->buffer
[parser
->idx
++] = ch
;
914 ret
= get_user(ch
, ubuf
++);
921 /* We either got finished input or we have to wait for another call. */
923 parser
->buffer
[parser
->idx
] = 0;
924 parser
->cont
= false;
925 } else if (parser
->idx
< parser
->size
- 1) {
927 parser
->buffer
[parser
->idx
++] = ch
;
940 static ssize_t
trace_seq_to_buffer(struct trace_seq
*s
, void *buf
, size_t cnt
)
944 if (s
->len
<= s
->readpos
)
947 len
= s
->len
- s
->readpos
;
950 memcpy(buf
, s
->buffer
+ s
->readpos
, cnt
);
956 unsigned long __read_mostly tracing_thresh
;
958 #ifdef CONFIG_TRACER_MAX_TRACE
960 * Copy the new maximum trace into the separate maximum-trace
961 * structure. (this way the maximum trace is permanently saved,
962 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
965 __update_max_tr(struct trace_array
*tr
, struct task_struct
*tsk
, int cpu
)
967 struct trace_buffer
*trace_buf
= &tr
->trace_buffer
;
968 struct trace_buffer
*max_buf
= &tr
->max_buffer
;
969 struct trace_array_cpu
*data
= per_cpu_ptr(trace_buf
->data
, cpu
);
970 struct trace_array_cpu
*max_data
= per_cpu_ptr(max_buf
->data
, cpu
);
973 max_buf
->time_start
= data
->preempt_timestamp
;
975 max_data
->saved_latency
= tr
->max_latency
;
976 max_data
->critical_start
= data
->critical_start
;
977 max_data
->critical_end
= data
->critical_end
;
979 memcpy(max_data
->comm
, tsk
->comm
, TASK_COMM_LEN
);
980 max_data
->pid
= tsk
->pid
;
982 * If tsk == current, then use current_uid(), as that does not use
983 * RCU. The irq tracer can be called out of RCU scope.
986 max_data
->uid
= current_uid();
988 max_data
->uid
= task_uid(tsk
);
990 max_data
->nice
= tsk
->static_prio
- 20 - MAX_RT_PRIO
;
991 max_data
->policy
= tsk
->policy
;
992 max_data
->rt_priority
= tsk
->rt_priority
;
994 /* record this tasks comm */
995 tracing_record_cmdline(tsk
);
999 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1001 * @tsk: the task with the latency
1002 * @cpu: The cpu that initiated the trace.
1004 * Flip the buffers between the @tr and the max_tr and record information
1005 * about which task was the cause of this latency.
1008 update_max_tr(struct trace_array
*tr
, struct task_struct
*tsk
, int cpu
)
1010 struct ring_buffer
*buf
;
1015 WARN_ON_ONCE(!irqs_disabled());
1017 if (!tr
->allocated_snapshot
) {
1018 /* Only the nop tracer should hit this when disabling */
1019 WARN_ON_ONCE(tr
->current_trace
!= &nop_trace
);
1023 arch_spin_lock(&tr
->max_lock
);
1025 buf
= tr
->trace_buffer
.buffer
;
1026 tr
->trace_buffer
.buffer
= tr
->max_buffer
.buffer
;
1027 tr
->max_buffer
.buffer
= buf
;
1029 __update_max_tr(tr
, tsk
, cpu
);
1030 arch_spin_unlock(&tr
->max_lock
);
1034 * update_max_tr_single - only copy one trace over, and reset the rest
1036 * @tsk - task with the latency
1037 * @cpu - the cpu of the buffer to copy.
1039 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1042 update_max_tr_single(struct trace_array
*tr
, struct task_struct
*tsk
, int cpu
)
1049 WARN_ON_ONCE(!irqs_disabled());
1050 if (!tr
->allocated_snapshot
) {
1051 /* Only the nop tracer should hit this when disabling */
1052 WARN_ON_ONCE(tr
->current_trace
!= &nop_trace
);
1056 arch_spin_lock(&tr
->max_lock
);
1058 ret
= ring_buffer_swap_cpu(tr
->max_buffer
.buffer
, tr
->trace_buffer
.buffer
, cpu
);
1060 if (ret
== -EBUSY
) {
1062 * We failed to swap the buffer due to a commit taking
1063 * place on this CPU. We fail to record, but we reset
1064 * the max trace buffer (no one writes directly to it)
1065 * and flag that it failed.
1067 trace_array_printk_buf(tr
->max_buffer
.buffer
, _THIS_IP_
,
1068 "Failed to swap buffers due to commit in progress\n");
1071 WARN_ON_ONCE(ret
&& ret
!= -EAGAIN
&& ret
!= -EBUSY
);
1073 __update_max_tr(tr
, tsk
, cpu
);
1074 arch_spin_unlock(&tr
->max_lock
);
1076 #endif /* CONFIG_TRACER_MAX_TRACE */
1078 static int wait_on_pipe(struct trace_iterator
*iter
)
1080 /* Iterators are static, they should be filled or empty */
1081 if (trace_buffer_iter(iter
, iter
->cpu_file
))
1084 return ring_buffer_wait(iter
->trace_buffer
->buffer
, iter
->cpu_file
);
1087 #ifdef CONFIG_FTRACE_STARTUP_TEST
1088 static int run_tracer_selftest(struct tracer
*type
)
1090 struct trace_array
*tr
= &global_trace
;
1091 struct tracer
*saved_tracer
= tr
->current_trace
;
1094 if (!type
->selftest
|| tracing_selftest_disabled
)
1098 * Run a selftest on this tracer.
1099 * Here we reset the trace buffer, and set the current
1100 * tracer to be this tracer. The tracer can then run some
1101 * internal tracing to verify that everything is in order.
1102 * If we fail, we do not register this tracer.
1104 tracing_reset_online_cpus(&tr
->trace_buffer
);
1106 tr
->current_trace
= type
;
1108 #ifdef CONFIG_TRACER_MAX_TRACE
1109 if (type
->use_max_tr
) {
1110 /* If we expanded the buffers, make sure the max is expanded too */
1111 if (ring_buffer_expanded
)
1112 ring_buffer_resize(tr
->max_buffer
.buffer
, trace_buf_size
,
1113 RING_BUFFER_ALL_CPUS
);
1114 tr
->allocated_snapshot
= true;
1118 /* the test is responsible for initializing and enabling */
1119 pr_info("Testing tracer %s: ", type
->name
);
1120 ret
= type
->selftest(type
, tr
);
1121 /* the test is responsible for resetting too */
1122 tr
->current_trace
= saved_tracer
;
1124 printk(KERN_CONT
"FAILED!\n");
1125 /* Add the warning after printing 'FAILED' */
1129 /* Only reset on passing, to avoid touching corrupted buffers */
1130 tracing_reset_online_cpus(&tr
->trace_buffer
);
1132 #ifdef CONFIG_TRACER_MAX_TRACE
1133 if (type
->use_max_tr
) {
1134 tr
->allocated_snapshot
= false;
1136 /* Shrink the max buffer again */
1137 if (ring_buffer_expanded
)
1138 ring_buffer_resize(tr
->max_buffer
.buffer
, 1,
1139 RING_BUFFER_ALL_CPUS
);
1143 printk(KERN_CONT
"PASSED\n");
1147 static inline int run_tracer_selftest(struct tracer
*type
)
1151 #endif /* CONFIG_FTRACE_STARTUP_TEST */
1154 * register_tracer - register a tracer with the ftrace system.
1155 * @type - the plugin for the tracer
1157 * Register a new plugin tracer.
1159 int register_tracer(struct tracer
*type
)
1165 pr_info("Tracer must have a name\n");
1169 if (strlen(type
->name
) >= MAX_TRACER_SIZE
) {
1170 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE
);
1174 mutex_lock(&trace_types_lock
);
1176 tracing_selftest_running
= true;
1178 for (t
= trace_types
; t
; t
= t
->next
) {
1179 if (strcmp(type
->name
, t
->name
) == 0) {
1181 pr_info("Tracer %s already registered\n",
1188 if (!type
->set_flag
)
1189 type
->set_flag
= &dummy_set_flag
;
1191 type
->flags
= &dummy_tracer_flags
;
1193 if (!type
->flags
->opts
)
1194 type
->flags
->opts
= dummy_tracer_opt
;
1196 ret
= run_tracer_selftest(type
);
1200 type
->next
= trace_types
;
1204 tracing_selftest_running
= false;
1205 mutex_unlock(&trace_types_lock
);
1207 if (ret
|| !default_bootup_tracer
)
1210 if (strncmp(default_bootup_tracer
, type
->name
, MAX_TRACER_SIZE
))
1213 printk(KERN_INFO
"Starting tracer '%s'\n", type
->name
);
1214 /* Do we want this tracer to start on bootup? */
1215 tracing_set_tracer(&global_trace
, type
->name
);
1216 default_bootup_tracer
= NULL
;
1217 /* disable other selftests, since this will break it. */
1218 tracing_selftest_disabled
= true;
1219 #ifdef CONFIG_FTRACE_STARTUP_TEST
1220 printk(KERN_INFO
"Disabling FTRACE selftests due to running tracer '%s'\n",
1228 void tracing_reset(struct trace_buffer
*buf
, int cpu
)
1230 struct ring_buffer
*buffer
= buf
->buffer
;
1235 ring_buffer_record_disable(buffer
);
1237 /* Make sure all commits have finished */
1238 synchronize_sched();
1239 ring_buffer_reset_cpu(buffer
, cpu
);
1241 ring_buffer_record_enable(buffer
);
1244 void tracing_reset_online_cpus(struct trace_buffer
*buf
)
1246 struct ring_buffer
*buffer
= buf
->buffer
;
1252 ring_buffer_record_disable(buffer
);
1254 /* Make sure all commits have finished */
1255 synchronize_sched();
1257 buf
->time_start
= buffer_ftrace_now(buf
, buf
->cpu
);
1259 for_each_online_cpu(cpu
)
1260 ring_buffer_reset_cpu(buffer
, cpu
);
1262 ring_buffer_record_enable(buffer
);
1265 /* Must have trace_types_lock held */
1266 void tracing_reset_all_online_cpus(void)
1268 struct trace_array
*tr
;
1270 list_for_each_entry(tr
, &ftrace_trace_arrays
, list
) {
1271 tracing_reset_online_cpus(&tr
->trace_buffer
);
1272 #ifdef CONFIG_TRACER_MAX_TRACE
1273 tracing_reset_online_cpus(&tr
->max_buffer
);
1278 #define SAVED_CMDLINES_DEFAULT 128
1279 #define NO_CMDLINE_MAP UINT_MAX
1280 static arch_spinlock_t trace_cmdline_lock
= __ARCH_SPIN_LOCK_UNLOCKED
;
1281 struct saved_cmdlines_buffer
{
1282 unsigned map_pid_to_cmdline
[PID_MAX_DEFAULT
+1];
1283 unsigned *map_cmdline_to_pid
;
1284 unsigned cmdline_num
;
1286 char *saved_cmdlines
;
1288 static struct saved_cmdlines_buffer
*savedcmd
;
1290 /* temporary disable recording */
1291 static atomic_t trace_record_cmdline_disabled __read_mostly
;
1293 static inline char *get_saved_cmdlines(int idx
)
1295 return &savedcmd
->saved_cmdlines
[idx
* TASK_COMM_LEN
];
1298 static inline void set_cmdline(int idx
, const char *cmdline
)
1300 memcpy(get_saved_cmdlines(idx
), cmdline
, TASK_COMM_LEN
);
1303 static int allocate_cmdlines_buffer(unsigned int val
,
1304 struct saved_cmdlines_buffer
*s
)
1306 s
->map_cmdline_to_pid
= kmalloc(val
* sizeof(*s
->map_cmdline_to_pid
),
1308 if (!s
->map_cmdline_to_pid
)
1311 s
->saved_cmdlines
= kmalloc(val
* TASK_COMM_LEN
, GFP_KERNEL
);
1312 if (!s
->saved_cmdlines
) {
1313 kfree(s
->map_cmdline_to_pid
);
1318 s
->cmdline_num
= val
;
1319 memset(&s
->map_pid_to_cmdline
, NO_CMDLINE_MAP
,
1320 sizeof(s
->map_pid_to_cmdline
));
1321 memset(s
->map_cmdline_to_pid
, NO_CMDLINE_MAP
,
1322 val
* sizeof(*s
->map_cmdline_to_pid
));
1327 static int trace_create_savedcmd(void)
1331 savedcmd
= kmalloc(sizeof(*savedcmd
), GFP_KERNEL
);
1335 ret
= allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT
, savedcmd
);
1345 int is_tracing_stopped(void)
1347 return global_trace
.stop_count
;
1351 * tracing_start - quick start of the tracer
1353 * If tracing is enabled but was stopped by tracing_stop,
1354 * this will start the tracer back up.
1356 void tracing_start(void)
1358 struct ring_buffer
*buffer
;
1359 unsigned long flags
;
1361 if (tracing_disabled
)
1364 raw_spin_lock_irqsave(&global_trace
.start_lock
, flags
);
1365 if (--global_trace
.stop_count
) {
1366 if (global_trace
.stop_count
< 0) {
1367 /* Someone screwed up their debugging */
1369 global_trace
.stop_count
= 0;
1374 /* Prevent the buffers from switching */
1375 arch_spin_lock(&global_trace
.max_lock
);
1377 buffer
= global_trace
.trace_buffer
.buffer
;
1379 ring_buffer_record_enable(buffer
);
1381 #ifdef CONFIG_TRACER_MAX_TRACE
1382 buffer
= global_trace
.max_buffer
.buffer
;
1384 ring_buffer_record_enable(buffer
);
1387 arch_spin_unlock(&global_trace
.max_lock
);
1390 raw_spin_unlock_irqrestore(&global_trace
.start_lock
, flags
);
1393 static void tracing_start_tr(struct trace_array
*tr
)
1395 struct ring_buffer
*buffer
;
1396 unsigned long flags
;
1398 if (tracing_disabled
)
1401 /* If global, we need to also start the max tracer */
1402 if (tr
->flags
& TRACE_ARRAY_FL_GLOBAL
)
1403 return tracing_start();
1405 raw_spin_lock_irqsave(&tr
->start_lock
, flags
);
1407 if (--tr
->stop_count
) {
1408 if (tr
->stop_count
< 0) {
1409 /* Someone screwed up their debugging */
1416 buffer
= tr
->trace_buffer
.buffer
;
1418 ring_buffer_record_enable(buffer
);
1421 raw_spin_unlock_irqrestore(&tr
->start_lock
, flags
);
1425 * tracing_stop - quick stop of the tracer
1427 * Light weight way to stop tracing. Use in conjunction with
1430 void tracing_stop(void)
1432 struct ring_buffer
*buffer
;
1433 unsigned long flags
;
1435 raw_spin_lock_irqsave(&global_trace
.start_lock
, flags
);
1436 if (global_trace
.stop_count
++)
1439 /* Prevent the buffers from switching */
1440 arch_spin_lock(&global_trace
.max_lock
);
1442 buffer
= global_trace
.trace_buffer
.buffer
;
1444 ring_buffer_record_disable(buffer
);
1446 #ifdef CONFIG_TRACER_MAX_TRACE
1447 buffer
= global_trace
.max_buffer
.buffer
;
1449 ring_buffer_record_disable(buffer
);
1452 arch_spin_unlock(&global_trace
.max_lock
);
1455 raw_spin_unlock_irqrestore(&global_trace
.start_lock
, flags
);
1458 static void tracing_stop_tr(struct trace_array
*tr
)
1460 struct ring_buffer
*buffer
;
1461 unsigned long flags
;
1463 /* If global, we need to also stop the max tracer */
1464 if (tr
->flags
& TRACE_ARRAY_FL_GLOBAL
)
1465 return tracing_stop();
1467 raw_spin_lock_irqsave(&tr
->start_lock
, flags
);
1468 if (tr
->stop_count
++)
1471 buffer
= tr
->trace_buffer
.buffer
;
1473 ring_buffer_record_disable(buffer
);
1476 raw_spin_unlock_irqrestore(&tr
->start_lock
, flags
);
1479 void trace_stop_cmdline_recording(void);
1481 static int trace_save_cmdline(struct task_struct
*tsk
)
1485 if (!tsk
->pid
|| unlikely(tsk
->pid
> PID_MAX_DEFAULT
))
1489 * It's not the end of the world if we don't get
1490 * the lock, but we also don't want to spin
1491 * nor do we want to disable interrupts,
1492 * so if we miss here, then better luck next time.
1494 if (!arch_spin_trylock(&trace_cmdline_lock
))
1497 idx
= savedcmd
->map_pid_to_cmdline
[tsk
->pid
];
1498 if (idx
== NO_CMDLINE_MAP
) {
1499 idx
= (savedcmd
->cmdline_idx
+ 1) % savedcmd
->cmdline_num
;
1502 * Check whether the cmdline buffer at idx has a pid
1503 * mapped. We are going to overwrite that entry so we
1504 * need to clear the map_pid_to_cmdline. Otherwise we
1505 * would read the new comm for the old pid.
1507 pid
= savedcmd
->map_cmdline_to_pid
[idx
];
1508 if (pid
!= NO_CMDLINE_MAP
)
1509 savedcmd
->map_pid_to_cmdline
[pid
] = NO_CMDLINE_MAP
;
1511 savedcmd
->map_cmdline_to_pid
[idx
] = tsk
->pid
;
1512 savedcmd
->map_pid_to_cmdline
[tsk
->pid
] = idx
;
1514 savedcmd
->cmdline_idx
= idx
;
1517 set_cmdline(idx
, tsk
->comm
);
1519 arch_spin_unlock(&trace_cmdline_lock
);
1524 static void __trace_find_cmdline(int pid
, char comm
[])
1529 strcpy(comm
, "<idle>");
1533 if (WARN_ON_ONCE(pid
< 0)) {
1534 strcpy(comm
, "<XXX>");
1538 if (pid
> PID_MAX_DEFAULT
) {
1539 strcpy(comm
, "<...>");
1543 map
= savedcmd
->map_pid_to_cmdline
[pid
];
1544 if (map
!= NO_CMDLINE_MAP
)
1545 strcpy(comm
, get_saved_cmdlines(map
));
1547 strcpy(comm
, "<...>");
1550 void trace_find_cmdline(int pid
, char comm
[])
1553 arch_spin_lock(&trace_cmdline_lock
);
1555 __trace_find_cmdline(pid
, comm
);
1557 arch_spin_unlock(&trace_cmdline_lock
);
1561 void tracing_record_cmdline(struct task_struct
*tsk
)
1563 if (atomic_read(&trace_record_cmdline_disabled
) || !tracing_is_on())
1566 if (!__this_cpu_read(trace_cmdline_save
))
1569 if (trace_save_cmdline(tsk
))
1570 __this_cpu_write(trace_cmdline_save
, false);
1574 tracing_generic_entry_update(struct trace_entry
*entry
, unsigned long flags
,
1577 struct task_struct
*tsk
= current
;
1579 entry
->preempt_count
= pc
& 0xff;
1580 entry
->pid
= (tsk
) ? tsk
->pid
: 0;
1582 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
1583 (irqs_disabled_flags(flags
) ? TRACE_FLAG_IRQS_OFF
: 0) |
1585 TRACE_FLAG_IRQS_NOSUPPORT
|
1587 ((pc
& HARDIRQ_MASK
) ? TRACE_FLAG_HARDIRQ
: 0) |
1588 ((pc
& SOFTIRQ_MASK
) ? TRACE_FLAG_SOFTIRQ
: 0) |
1589 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED
: 0) |
1590 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED
: 0);
1592 EXPORT_SYMBOL_GPL(tracing_generic_entry_update
);
1594 struct ring_buffer_event
*
1595 trace_buffer_lock_reserve(struct ring_buffer
*buffer
,
1598 unsigned long flags
, int pc
)
1600 struct ring_buffer_event
*event
;
1602 event
= ring_buffer_lock_reserve(buffer
, len
);
1603 if (event
!= NULL
) {
1604 struct trace_entry
*ent
= ring_buffer_event_data(event
);
1606 tracing_generic_entry_update(ent
, flags
, pc
);
1614 __buffer_unlock_commit(struct ring_buffer
*buffer
, struct ring_buffer_event
*event
)
1616 __this_cpu_write(trace_cmdline_save
, true);
1617 ring_buffer_unlock_commit(buffer
, event
);
1621 __trace_buffer_unlock_commit(struct ring_buffer
*buffer
,
1622 struct ring_buffer_event
*event
,
1623 unsigned long flags
, int pc
)
1625 __buffer_unlock_commit(buffer
, event
);
1627 ftrace_trace_stack(buffer
, flags
, 6, pc
);
1628 ftrace_trace_userstack(buffer
, flags
, pc
);
1631 void trace_buffer_unlock_commit(struct ring_buffer
*buffer
,
1632 struct ring_buffer_event
*event
,
1633 unsigned long flags
, int pc
)
1635 __trace_buffer_unlock_commit(buffer
, event
, flags
, pc
);
1637 EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit
);
1639 static struct ring_buffer
*temp_buffer
;
1641 struct ring_buffer_event
*
1642 trace_event_buffer_lock_reserve(struct ring_buffer
**current_rb
,
1643 struct ftrace_event_file
*ftrace_file
,
1644 int type
, unsigned long len
,
1645 unsigned long flags
, int pc
)
1647 struct ring_buffer_event
*entry
;
1649 *current_rb
= ftrace_file
->tr
->trace_buffer
.buffer
;
1650 entry
= trace_buffer_lock_reserve(*current_rb
,
1651 type
, len
, flags
, pc
);
1653 * If tracing is off, but we have triggers enabled
1654 * we still need to look at the event data. Use the temp_buffer
1655 * to store the trace event for the tigger to use. It's recusive
1656 * safe and will not be recorded anywhere.
1658 if (!entry
&& ftrace_file
->flags
& FTRACE_EVENT_FL_TRIGGER_COND
) {
1659 *current_rb
= temp_buffer
;
1660 entry
= trace_buffer_lock_reserve(*current_rb
,
1661 type
, len
, flags
, pc
);
1665 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve
);
1667 struct ring_buffer_event
*
1668 trace_current_buffer_lock_reserve(struct ring_buffer
**current_rb
,
1669 int type
, unsigned long len
,
1670 unsigned long flags
, int pc
)
1672 *current_rb
= global_trace
.trace_buffer
.buffer
;
1673 return trace_buffer_lock_reserve(*current_rb
,
1674 type
, len
, flags
, pc
);
1676 EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve
);
1678 void trace_current_buffer_unlock_commit(struct ring_buffer
*buffer
,
1679 struct ring_buffer_event
*event
,
1680 unsigned long flags
, int pc
)
1682 __trace_buffer_unlock_commit(buffer
, event
, flags
, pc
);
1684 EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit
);
1686 void trace_buffer_unlock_commit_regs(struct ring_buffer
*buffer
,
1687 struct ring_buffer_event
*event
,
1688 unsigned long flags
, int pc
,
1689 struct pt_regs
*regs
)
1691 __buffer_unlock_commit(buffer
, event
);
1693 ftrace_trace_stack_regs(buffer
, flags
, 0, pc
, regs
);
1694 ftrace_trace_userstack(buffer
, flags
, pc
);
1696 EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs
);
1698 void trace_current_buffer_discard_commit(struct ring_buffer
*buffer
,
1699 struct ring_buffer_event
*event
)
1701 ring_buffer_discard_commit(buffer
, event
);
1703 EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit
);
1706 trace_function(struct trace_array
*tr
,
1707 unsigned long ip
, unsigned long parent_ip
, unsigned long flags
,
1710 struct ftrace_event_call
*call
= &event_function
;
1711 struct ring_buffer
*buffer
= tr
->trace_buffer
.buffer
;
1712 struct ring_buffer_event
*event
;
1713 struct ftrace_entry
*entry
;
1715 /* If we are reading the ring buffer, don't trace */
1716 if (unlikely(__this_cpu_read(ftrace_cpu_disabled
)))
1719 event
= trace_buffer_lock_reserve(buffer
, TRACE_FN
, sizeof(*entry
),
1723 entry
= ring_buffer_event_data(event
);
1725 entry
->parent_ip
= parent_ip
;
1727 if (!call_filter_check_discard(call
, entry
, buffer
, event
))
1728 __buffer_unlock_commit(buffer
, event
);
1731 #ifdef CONFIG_STACKTRACE
1733 #define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1734 struct ftrace_stack
{
1735 unsigned long calls
[FTRACE_STACK_MAX_ENTRIES
];
1738 static DEFINE_PER_CPU(struct ftrace_stack
, ftrace_stack
);
1739 static DEFINE_PER_CPU(int, ftrace_stack_reserve
);
1741 static void __ftrace_trace_stack(struct ring_buffer
*buffer
,
1742 unsigned long flags
,
1743 int skip
, int pc
, struct pt_regs
*regs
)
1745 struct ftrace_event_call
*call
= &event_kernel_stack
;
1746 struct ring_buffer_event
*event
;
1747 struct stack_entry
*entry
;
1748 struct stack_trace trace
;
1750 int size
= FTRACE_STACK_ENTRIES
;
1752 trace
.nr_entries
= 0;
1756 * Since events can happen in NMIs there's no safe way to
1757 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1758 * or NMI comes in, it will just have to use the default
1759 * FTRACE_STACK_SIZE.
1761 preempt_disable_notrace();
1763 use_stack
= __this_cpu_inc_return(ftrace_stack_reserve
);
1765 * We don't need any atomic variables, just a barrier.
1766 * If an interrupt comes in, we don't care, because it would
1767 * have exited and put the counter back to what we want.
1768 * We just need a barrier to keep gcc from moving things
1772 if (use_stack
== 1) {
1773 trace
.entries
= this_cpu_ptr(ftrace_stack
.calls
);
1774 trace
.max_entries
= FTRACE_STACK_MAX_ENTRIES
;
1777 save_stack_trace_regs(regs
, &trace
);
1779 save_stack_trace(&trace
);
1781 if (trace
.nr_entries
> size
)
1782 size
= trace
.nr_entries
;
1784 /* From now on, use_stack is a boolean */
1787 size
*= sizeof(unsigned long);
1789 event
= trace_buffer_lock_reserve(buffer
, TRACE_STACK
,
1790 sizeof(*entry
) + size
, flags
, pc
);
1793 entry
= ring_buffer_event_data(event
);
1795 memset(&entry
->caller
, 0, size
);
1798 memcpy(&entry
->caller
, trace
.entries
,
1799 trace
.nr_entries
* sizeof(unsigned long));
1801 trace
.max_entries
= FTRACE_STACK_ENTRIES
;
1802 trace
.entries
= entry
->caller
;
1804 save_stack_trace_regs(regs
, &trace
);
1806 save_stack_trace(&trace
);
1809 entry
->size
= trace
.nr_entries
;
1811 if (!call_filter_check_discard(call
, entry
, buffer
, event
))
1812 __buffer_unlock_commit(buffer
, event
);
1815 /* Again, don't let gcc optimize things here */
1817 __this_cpu_dec(ftrace_stack_reserve
);
1818 preempt_enable_notrace();
1822 void ftrace_trace_stack_regs(struct ring_buffer
*buffer
, unsigned long flags
,
1823 int skip
, int pc
, struct pt_regs
*regs
)
1825 if (!(trace_flags
& TRACE_ITER_STACKTRACE
))
1828 __ftrace_trace_stack(buffer
, flags
, skip
, pc
, regs
);
1831 void ftrace_trace_stack(struct ring_buffer
*buffer
, unsigned long flags
,
1834 if (!(trace_flags
& TRACE_ITER_STACKTRACE
))
1837 __ftrace_trace_stack(buffer
, flags
, skip
, pc
, NULL
);
1840 void __trace_stack(struct trace_array
*tr
, unsigned long flags
, int skip
,
1843 __ftrace_trace_stack(tr
->trace_buffer
.buffer
, flags
, skip
, pc
, NULL
);
1847 * trace_dump_stack - record a stack back trace in the trace buffer
1848 * @skip: Number of functions to skip (helper handlers)
1850 void trace_dump_stack(int skip
)
1852 unsigned long flags
;
1854 if (tracing_disabled
|| tracing_selftest_running
)
1857 local_save_flags(flags
);
1860 * Skip 3 more, seems to get us at the caller of
1864 __ftrace_trace_stack(global_trace
.trace_buffer
.buffer
,
1865 flags
, skip
, preempt_count(), NULL
);
1868 static DEFINE_PER_CPU(int, user_stack_count
);
1871 ftrace_trace_userstack(struct ring_buffer
*buffer
, unsigned long flags
, int pc
)
1873 struct ftrace_event_call
*call
= &event_user_stack
;
1874 struct ring_buffer_event
*event
;
1875 struct userstack_entry
*entry
;
1876 struct stack_trace trace
;
1878 if (!(trace_flags
& TRACE_ITER_USERSTACKTRACE
))
1882 * NMIs can not handle page faults, even with fix ups.
1883 * The save user stack can (and often does) fault.
1885 if (unlikely(in_nmi()))
1889 * prevent recursion, since the user stack tracing may
1890 * trigger other kernel events.
1893 if (__this_cpu_read(user_stack_count
))
1896 __this_cpu_inc(user_stack_count
);
1898 event
= trace_buffer_lock_reserve(buffer
, TRACE_USER_STACK
,
1899 sizeof(*entry
), flags
, pc
);
1901 goto out_drop_count
;
1902 entry
= ring_buffer_event_data(event
);
1904 entry
->tgid
= current
->tgid
;
1905 memset(&entry
->caller
, 0, sizeof(entry
->caller
));
1907 trace
.nr_entries
= 0;
1908 trace
.max_entries
= FTRACE_STACK_ENTRIES
;
1910 trace
.entries
= entry
->caller
;
1912 save_stack_trace_user(&trace
);
1913 if (!call_filter_check_discard(call
, entry
, buffer
, event
))
1914 __buffer_unlock_commit(buffer
, event
);
1917 __this_cpu_dec(user_stack_count
);
1923 static void __trace_userstack(struct trace_array
*tr
, unsigned long flags
)
1925 ftrace_trace_userstack(tr
, flags
, preempt_count());
1929 #endif /* CONFIG_STACKTRACE */
1931 /* created for use with alloc_percpu */
1932 struct trace_buffer_struct
{
1933 char buffer
[TRACE_BUF_SIZE
];
1936 static struct trace_buffer_struct
*trace_percpu_buffer
;
1937 static struct trace_buffer_struct
*trace_percpu_sirq_buffer
;
1938 static struct trace_buffer_struct
*trace_percpu_irq_buffer
;
1939 static struct trace_buffer_struct
*trace_percpu_nmi_buffer
;
1942 * The buffer used is dependent on the context. There is a per cpu
1943 * buffer for normal context, softirq contex, hard irq context and
1944 * for NMI context. Thise allows for lockless recording.
1946 * Note, if the buffers failed to be allocated, then this returns NULL
1948 static char *get_trace_buf(void)
1950 struct trace_buffer_struct
*percpu_buffer
;
1953 * If we have allocated per cpu buffers, then we do not
1954 * need to do any locking.
1957 percpu_buffer
= trace_percpu_nmi_buffer
;
1959 percpu_buffer
= trace_percpu_irq_buffer
;
1960 else if (in_softirq())
1961 percpu_buffer
= trace_percpu_sirq_buffer
;
1963 percpu_buffer
= trace_percpu_buffer
;
1968 return this_cpu_ptr(&percpu_buffer
->buffer
[0]);
1971 static int alloc_percpu_trace_buffer(void)
1973 struct trace_buffer_struct
*buffers
;
1974 struct trace_buffer_struct
*sirq_buffers
;
1975 struct trace_buffer_struct
*irq_buffers
;
1976 struct trace_buffer_struct
*nmi_buffers
;
1978 buffers
= alloc_percpu(struct trace_buffer_struct
);
1982 sirq_buffers
= alloc_percpu(struct trace_buffer_struct
);
1986 irq_buffers
= alloc_percpu(struct trace_buffer_struct
);
1990 nmi_buffers
= alloc_percpu(struct trace_buffer_struct
);
1994 trace_percpu_buffer
= buffers
;
1995 trace_percpu_sirq_buffer
= sirq_buffers
;
1996 trace_percpu_irq_buffer
= irq_buffers
;
1997 trace_percpu_nmi_buffer
= nmi_buffers
;
2002 free_percpu(irq_buffers
);
2004 free_percpu(sirq_buffers
);
2006 free_percpu(buffers
);
2008 WARN(1, "Could not allocate percpu trace_printk buffer");
2012 static int buffers_allocated
;
2014 void trace_printk_init_buffers(void)
2016 if (buffers_allocated
)
2019 if (alloc_percpu_trace_buffer())
2022 /* trace_printk() is for debug use only. Don't use it in production. */
2024 pr_warning("\n**********************************************************\n");
2025 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2026 pr_warning("** **\n");
2027 pr_warning("** trace_printk() being used. Allocating extra memory. **\n");
2028 pr_warning("** **\n");
2029 pr_warning("** This means that this is a DEBUG kernel and it is **\n");
2030 pr_warning("** unsafe for produciton use. **\n");
2031 pr_warning("** **\n");
2032 pr_warning("** If you see this message and you are not debugging **\n");
2033 pr_warning("** the kernel, report this immediately to your vendor! **\n");
2034 pr_warning("** **\n");
2035 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2036 pr_warning("**********************************************************\n");
2038 /* Expand the buffers to set size */
2039 tracing_update_buffers();
2041 buffers_allocated
= 1;
2044 * trace_printk_init_buffers() can be called by modules.
2045 * If that happens, then we need to start cmdline recording
2046 * directly here. If the global_trace.buffer is already
2047 * allocated here, then this was called by module code.
2049 if (global_trace
.trace_buffer
.buffer
)
2050 tracing_start_cmdline_record();
2053 void trace_printk_start_comm(void)
2055 /* Start tracing comms if trace printk is set */
2056 if (!buffers_allocated
)
2058 tracing_start_cmdline_record();
2061 static void trace_printk_start_stop_comm(int enabled
)
2063 if (!buffers_allocated
)
2067 tracing_start_cmdline_record();
2069 tracing_stop_cmdline_record();
2073 * trace_vbprintk - write binary msg to tracing buffer
2076 int trace_vbprintk(unsigned long ip
, const char *fmt
, va_list args
)
2078 struct ftrace_event_call
*call
= &event_bprint
;
2079 struct ring_buffer_event
*event
;
2080 struct ring_buffer
*buffer
;
2081 struct trace_array
*tr
= &global_trace
;
2082 struct bprint_entry
*entry
;
2083 unsigned long flags
;
2085 int len
= 0, size
, pc
;
2087 if (unlikely(tracing_selftest_running
|| tracing_disabled
))
2090 /* Don't pollute graph traces with trace_vprintk internals */
2091 pause_graph_tracing();
2093 pc
= preempt_count();
2094 preempt_disable_notrace();
2096 tbuffer
= get_trace_buf();
2102 len
= vbin_printf((u32
*)tbuffer
, TRACE_BUF_SIZE
/sizeof(int), fmt
, args
);
2104 if (len
> TRACE_BUF_SIZE
/sizeof(int) || len
< 0)
2107 local_save_flags(flags
);
2108 size
= sizeof(*entry
) + sizeof(u32
) * len
;
2109 buffer
= tr
->trace_buffer
.buffer
;
2110 event
= trace_buffer_lock_reserve(buffer
, TRACE_BPRINT
, size
,
2114 entry
= ring_buffer_event_data(event
);
2118 memcpy(entry
->buf
, tbuffer
, sizeof(u32
) * len
);
2119 if (!call_filter_check_discard(call
, entry
, buffer
, event
)) {
2120 __buffer_unlock_commit(buffer
, event
);
2121 ftrace_trace_stack(buffer
, flags
, 6, pc
);
2125 preempt_enable_notrace();
2126 unpause_graph_tracing();
2130 EXPORT_SYMBOL_GPL(trace_vbprintk
);
2133 __trace_array_vprintk(struct ring_buffer
*buffer
,
2134 unsigned long ip
, const char *fmt
, va_list args
)
2136 struct ftrace_event_call
*call
= &event_print
;
2137 struct ring_buffer_event
*event
;
2138 int len
= 0, size
, pc
;
2139 struct print_entry
*entry
;
2140 unsigned long flags
;
2143 if (tracing_disabled
|| tracing_selftest_running
)
2146 /* Don't pollute graph traces with trace_vprintk internals */
2147 pause_graph_tracing();
2149 pc
= preempt_count();
2150 preempt_disable_notrace();
2153 tbuffer
= get_trace_buf();
2159 len
= vsnprintf(tbuffer
, TRACE_BUF_SIZE
, fmt
, args
);
2160 if (len
> TRACE_BUF_SIZE
)
2163 local_save_flags(flags
);
2164 size
= sizeof(*entry
) + len
+ 1;
2165 event
= trace_buffer_lock_reserve(buffer
, TRACE_PRINT
, size
,
2169 entry
= ring_buffer_event_data(event
);
2172 memcpy(&entry
->buf
, tbuffer
, len
);
2173 entry
->buf
[len
] = '\0';
2174 if (!call_filter_check_discard(call
, entry
, buffer
, event
)) {
2175 __buffer_unlock_commit(buffer
, event
);
2176 ftrace_trace_stack(buffer
, flags
, 6, pc
);
2179 preempt_enable_notrace();
2180 unpause_graph_tracing();
2185 int trace_array_vprintk(struct trace_array
*tr
,
2186 unsigned long ip
, const char *fmt
, va_list args
)
2188 return __trace_array_vprintk(tr
->trace_buffer
.buffer
, ip
, fmt
, args
);
2191 int trace_array_printk(struct trace_array
*tr
,
2192 unsigned long ip
, const char *fmt
, ...)
2197 if (!(trace_flags
& TRACE_ITER_PRINTK
))
2201 ret
= trace_array_vprintk(tr
, ip
, fmt
, ap
);
2206 int trace_array_printk_buf(struct ring_buffer
*buffer
,
2207 unsigned long ip
, const char *fmt
, ...)
2212 if (!(trace_flags
& TRACE_ITER_PRINTK
))
2216 ret
= __trace_array_vprintk(buffer
, ip
, fmt
, ap
);
2221 int trace_vprintk(unsigned long ip
, const char *fmt
, va_list args
)
2223 return trace_array_vprintk(&global_trace
, ip
, fmt
, args
);
2225 EXPORT_SYMBOL_GPL(trace_vprintk
);
2227 static void trace_iterator_increment(struct trace_iterator
*iter
)
2229 struct ring_buffer_iter
*buf_iter
= trace_buffer_iter(iter
, iter
->cpu
);
2233 ring_buffer_read(buf_iter
, NULL
);
2236 static struct trace_entry
*
2237 peek_next_entry(struct trace_iterator
*iter
, int cpu
, u64
*ts
,
2238 unsigned long *lost_events
)
2240 struct ring_buffer_event
*event
;
2241 struct ring_buffer_iter
*buf_iter
= trace_buffer_iter(iter
, cpu
);
2244 event
= ring_buffer_iter_peek(buf_iter
, ts
);
2246 event
= ring_buffer_peek(iter
->trace_buffer
->buffer
, cpu
, ts
,
2250 iter
->ent_size
= ring_buffer_event_length(event
);
2251 return ring_buffer_event_data(event
);
2257 static struct trace_entry
*
2258 __find_next_entry(struct trace_iterator
*iter
, int *ent_cpu
,
2259 unsigned long *missing_events
, u64
*ent_ts
)
2261 struct ring_buffer
*buffer
= iter
->trace_buffer
->buffer
;
2262 struct trace_entry
*ent
, *next
= NULL
;
2263 unsigned long lost_events
= 0, next_lost
= 0;
2264 int cpu_file
= iter
->cpu_file
;
2265 u64 next_ts
= 0, ts
;
2271 * If we are in a per_cpu trace file, don't bother by iterating over
2272 * all cpu and peek directly.
2274 if (cpu_file
> RING_BUFFER_ALL_CPUS
) {
2275 if (ring_buffer_empty_cpu(buffer
, cpu_file
))
2277 ent
= peek_next_entry(iter
, cpu_file
, ent_ts
, missing_events
);
2279 *ent_cpu
= cpu_file
;
2284 for_each_tracing_cpu(cpu
) {
2286 if (ring_buffer_empty_cpu(buffer
, cpu
))
2289 ent
= peek_next_entry(iter
, cpu
, &ts
, &lost_events
);
2292 * Pick the entry with the smallest timestamp:
2294 if (ent
&& (!next
|| ts
< next_ts
)) {
2298 next_lost
= lost_events
;
2299 next_size
= iter
->ent_size
;
2303 iter
->ent_size
= next_size
;
2306 *ent_cpu
= next_cpu
;
2312 *missing_events
= next_lost
;
2317 /* Find the next real entry, without updating the iterator itself */
2318 struct trace_entry
*trace_find_next_entry(struct trace_iterator
*iter
,
2319 int *ent_cpu
, u64
*ent_ts
)
2321 return __find_next_entry(iter
, ent_cpu
, NULL
, ent_ts
);
2324 /* Find the next real entry, and increment the iterator to the next entry */
2325 void *trace_find_next_entry_inc(struct trace_iterator
*iter
)
2327 iter
->ent
= __find_next_entry(iter
, &iter
->cpu
,
2328 &iter
->lost_events
, &iter
->ts
);
2331 trace_iterator_increment(iter
);
2333 return iter
->ent
? iter
: NULL
;
2336 static void trace_consume(struct trace_iterator
*iter
)
2338 ring_buffer_consume(iter
->trace_buffer
->buffer
, iter
->cpu
, &iter
->ts
,
2339 &iter
->lost_events
);
2342 static void *s_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
2344 struct trace_iterator
*iter
= m
->private;
2348 WARN_ON_ONCE(iter
->leftover
);
2352 /* can't go backwards */
2357 ent
= trace_find_next_entry_inc(iter
);
2361 while (ent
&& iter
->idx
< i
)
2362 ent
= trace_find_next_entry_inc(iter
);
2369 void tracing_iter_reset(struct trace_iterator
*iter
, int cpu
)
2371 struct ring_buffer_event
*event
;
2372 struct ring_buffer_iter
*buf_iter
;
2373 unsigned long entries
= 0;
2376 per_cpu_ptr(iter
->trace_buffer
->data
, cpu
)->skipped_entries
= 0;
2378 buf_iter
= trace_buffer_iter(iter
, cpu
);
2382 ring_buffer_iter_reset(buf_iter
);
2385 * We could have the case with the max latency tracers
2386 * that a reset never took place on a cpu. This is evident
2387 * by the timestamp being before the start of the buffer.
2389 while ((event
= ring_buffer_iter_peek(buf_iter
, &ts
))) {
2390 if (ts
>= iter
->trace_buffer
->time_start
)
2393 ring_buffer_read(buf_iter
, NULL
);
2396 per_cpu_ptr(iter
->trace_buffer
->data
, cpu
)->skipped_entries
= entries
;
2400 * The current tracer is copied to avoid a global locking
2403 static void *s_start(struct seq_file
*m
, loff_t
*pos
)
2405 struct trace_iterator
*iter
= m
->private;
2406 struct trace_array
*tr
= iter
->tr
;
2407 int cpu_file
= iter
->cpu_file
;
2413 * copy the tracer to avoid using a global lock all around.
2414 * iter->trace is a copy of current_trace, the pointer to the
2415 * name may be used instead of a strcmp(), as iter->trace->name
2416 * will point to the same string as current_trace->name.
2418 mutex_lock(&trace_types_lock
);
2419 if (unlikely(tr
->current_trace
&& iter
->trace
->name
!= tr
->current_trace
->name
))
2420 *iter
->trace
= *tr
->current_trace
;
2421 mutex_unlock(&trace_types_lock
);
2423 #ifdef CONFIG_TRACER_MAX_TRACE
2424 if (iter
->snapshot
&& iter
->trace
->use_max_tr
)
2425 return ERR_PTR(-EBUSY
);
2428 if (!iter
->snapshot
)
2429 atomic_inc(&trace_record_cmdline_disabled
);
2431 if (*pos
!= iter
->pos
) {
2436 if (cpu_file
== RING_BUFFER_ALL_CPUS
) {
2437 for_each_tracing_cpu(cpu
)
2438 tracing_iter_reset(iter
, cpu
);
2440 tracing_iter_reset(iter
, cpu_file
);
2443 for (p
= iter
; p
&& l
< *pos
; p
= s_next(m
, p
, &l
))
2448 * If we overflowed the seq_file before, then we want
2449 * to just reuse the trace_seq buffer again.
2455 p
= s_next(m
, p
, &l
);
2459 trace_event_read_lock();
2460 trace_access_lock(cpu_file
);
2464 static void s_stop(struct seq_file
*m
, void *p
)
2466 struct trace_iterator
*iter
= m
->private;
2468 #ifdef CONFIG_TRACER_MAX_TRACE
2469 if (iter
->snapshot
&& iter
->trace
->use_max_tr
)
2473 if (!iter
->snapshot
)
2474 atomic_dec(&trace_record_cmdline_disabled
);
2476 trace_access_unlock(iter
->cpu_file
);
2477 trace_event_read_unlock();
2481 get_total_entries(struct trace_buffer
*buf
,
2482 unsigned long *total
, unsigned long *entries
)
2484 unsigned long count
;
2490 for_each_tracing_cpu(cpu
) {
2491 count
= ring_buffer_entries_cpu(buf
->buffer
, cpu
);
2493 * If this buffer has skipped entries, then we hold all
2494 * entries for the trace and we need to ignore the
2495 * ones before the time stamp.
2497 if (per_cpu_ptr(buf
->data
, cpu
)->skipped_entries
) {
2498 count
-= per_cpu_ptr(buf
->data
, cpu
)->skipped_entries
;
2499 /* total is the same as the entries */
2503 ring_buffer_overrun_cpu(buf
->buffer
, cpu
);
2508 static void print_lat_help_header(struct seq_file
*m
)
2510 seq_puts(m
, "# _------=> CPU# \n");
2511 seq_puts(m
, "# / _-----=> irqs-off \n");
2512 seq_puts(m
, "# | / _----=> need-resched \n");
2513 seq_puts(m
, "# || / _---=> hardirq/softirq \n");
2514 seq_puts(m
, "# ||| / _--=> preempt-depth \n");
2515 seq_puts(m
, "# |||| / delay \n");
2516 seq_puts(m
, "# cmd pid ||||| time | caller \n");
2517 seq_puts(m
, "# \\ / ||||| \\ | / \n");
2520 static void print_event_info(struct trace_buffer
*buf
, struct seq_file
*m
)
2522 unsigned long total
;
2523 unsigned long entries
;
2525 get_total_entries(buf
, &total
, &entries
);
2526 seq_printf(m
, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
2527 entries
, total
, num_online_cpus());
2531 static void print_func_help_header(struct trace_buffer
*buf
, struct seq_file
*m
)
2533 print_event_info(buf
, m
);
2534 seq_puts(m
, "# TASK-PID CPU# TIMESTAMP FUNCTION\n");
2535 seq_puts(m
, "# | | | | |\n");
2538 static void print_func_help_header_irq(struct trace_buffer
*buf
, struct seq_file
*m
)
2540 print_event_info(buf
, m
);
2541 seq_puts(m
, "# _-----=> irqs-off\n");
2542 seq_puts(m
, "# / _----=> need-resched\n");
2543 seq_puts(m
, "# | / _---=> hardirq/softirq\n");
2544 seq_puts(m
, "# || / _--=> preempt-depth\n");
2545 seq_puts(m
, "# ||| / delay\n");
2546 seq_puts(m
, "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n");
2547 seq_puts(m
, "# | | | |||| | |\n");
2551 print_trace_header(struct seq_file
*m
, struct trace_iterator
*iter
)
2553 unsigned long sym_flags
= (trace_flags
& TRACE_ITER_SYM_MASK
);
2554 struct trace_buffer
*buf
= iter
->trace_buffer
;
2555 struct trace_array_cpu
*data
= per_cpu_ptr(buf
->data
, buf
->cpu
);
2556 struct tracer
*type
= iter
->trace
;
2557 unsigned long entries
;
2558 unsigned long total
;
2559 const char *name
= "preemption";
2563 get_total_entries(buf
, &total
, &entries
);
2565 seq_printf(m
, "# %s latency trace v1.1.5 on %s\n",
2567 seq_puts(m
, "# -----------------------------------"
2568 "---------------------------------\n");
2569 seq_printf(m
, "# latency: %lu us, #%lu/%lu, CPU#%d |"
2570 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
2571 nsecs_to_usecs(data
->saved_latency
),
2575 #if defined(CONFIG_PREEMPT_NONE)
2577 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
2579 #elif defined(CONFIG_PREEMPT)
2584 /* These are reserved for later use */
2587 seq_printf(m
, " #P:%d)\n", num_online_cpus());
2591 seq_puts(m
, "# -----------------\n");
2592 seq_printf(m
, "# | task: %.16s-%d "
2593 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
2594 data
->comm
, data
->pid
,
2595 from_kuid_munged(seq_user_ns(m
), data
->uid
), data
->nice
,
2596 data
->policy
, data
->rt_priority
);
2597 seq_puts(m
, "# -----------------\n");
2599 if (data
->critical_start
) {
2600 seq_puts(m
, "# => started at: ");
2601 seq_print_ip_sym(&iter
->seq
, data
->critical_start
, sym_flags
);
2602 trace_print_seq(m
, &iter
->seq
);
2603 seq_puts(m
, "\n# => ended at: ");
2604 seq_print_ip_sym(&iter
->seq
, data
->critical_end
, sym_flags
);
2605 trace_print_seq(m
, &iter
->seq
);
2606 seq_puts(m
, "\n#\n");
2612 static void test_cpu_buff_start(struct trace_iterator
*iter
)
2614 struct trace_seq
*s
= &iter
->seq
;
2616 if (!(trace_flags
& TRACE_ITER_ANNOTATE
))
2619 if (!(iter
->iter_flags
& TRACE_FILE_ANNOTATE
))
2622 if (cpumask_test_cpu(iter
->cpu
, iter
->started
))
2625 if (per_cpu_ptr(iter
->trace_buffer
->data
, iter
->cpu
)->skipped_entries
)
2628 cpumask_set_cpu(iter
->cpu
, iter
->started
);
2630 /* Don't print started cpu buffer for the first entry of the trace */
2632 trace_seq_printf(s
, "##### CPU %u buffer started ####\n",
2636 static enum print_line_t
print_trace_fmt(struct trace_iterator
*iter
)
2638 struct trace_seq
*s
= &iter
->seq
;
2639 unsigned long sym_flags
= (trace_flags
& TRACE_ITER_SYM_MASK
);
2640 struct trace_entry
*entry
;
2641 struct trace_event
*event
;
2645 test_cpu_buff_start(iter
);
2647 event
= ftrace_find_event(entry
->type
);
2649 if (trace_flags
& TRACE_ITER_CONTEXT_INFO
) {
2650 if (iter
->iter_flags
& TRACE_FILE_LAT_FMT
) {
2651 if (!trace_print_lat_context(iter
))
2654 if (!trace_print_context(iter
))
2660 return event
->funcs
->trace(iter
, sym_flags
, event
);
2662 if (!trace_seq_printf(s
, "Unknown type %d\n", entry
->type
))
2665 return TRACE_TYPE_HANDLED
;
2667 return TRACE_TYPE_PARTIAL_LINE
;
2670 static enum print_line_t
print_raw_fmt(struct trace_iterator
*iter
)
2672 struct trace_seq
*s
= &iter
->seq
;
2673 struct trace_entry
*entry
;
2674 struct trace_event
*event
;
2678 if (trace_flags
& TRACE_ITER_CONTEXT_INFO
) {
2679 if (!trace_seq_printf(s
, "%d %d %llu ",
2680 entry
->pid
, iter
->cpu
, iter
->ts
))
2684 event
= ftrace_find_event(entry
->type
);
2686 return event
->funcs
->raw(iter
, 0, event
);
2688 if (!trace_seq_printf(s
, "%d ?\n", entry
->type
))
2691 return TRACE_TYPE_HANDLED
;
2693 return TRACE_TYPE_PARTIAL_LINE
;
2696 static enum print_line_t
print_hex_fmt(struct trace_iterator
*iter
)
2698 struct trace_seq
*s
= &iter
->seq
;
2699 unsigned char newline
= '\n';
2700 struct trace_entry
*entry
;
2701 struct trace_event
*event
;
2705 if (trace_flags
& TRACE_ITER_CONTEXT_INFO
) {
2706 SEQ_PUT_HEX_FIELD_RET(s
, entry
->pid
);
2707 SEQ_PUT_HEX_FIELD_RET(s
, iter
->cpu
);
2708 SEQ_PUT_HEX_FIELD_RET(s
, iter
->ts
);
2711 event
= ftrace_find_event(entry
->type
);
2713 enum print_line_t ret
= event
->funcs
->hex(iter
, 0, event
);
2714 if (ret
!= TRACE_TYPE_HANDLED
)
2718 SEQ_PUT_FIELD_RET(s
, newline
);
2720 return TRACE_TYPE_HANDLED
;
2723 static enum print_line_t
print_bin_fmt(struct trace_iterator
*iter
)
2725 struct trace_seq
*s
= &iter
->seq
;
2726 struct trace_entry
*entry
;
2727 struct trace_event
*event
;
2731 if (trace_flags
& TRACE_ITER_CONTEXT_INFO
) {
2732 SEQ_PUT_FIELD_RET(s
, entry
->pid
);
2733 SEQ_PUT_FIELD_RET(s
, iter
->cpu
);
2734 SEQ_PUT_FIELD_RET(s
, iter
->ts
);
2737 event
= ftrace_find_event(entry
->type
);
2738 return event
? event
->funcs
->binary(iter
, 0, event
) :
2742 int trace_empty(struct trace_iterator
*iter
)
2744 struct ring_buffer_iter
*buf_iter
;
2747 /* If we are looking at one CPU buffer, only check that one */
2748 if (iter
->cpu_file
!= RING_BUFFER_ALL_CPUS
) {
2749 cpu
= iter
->cpu_file
;
2750 buf_iter
= trace_buffer_iter(iter
, cpu
);
2752 if (!ring_buffer_iter_empty(buf_iter
))
2755 if (!ring_buffer_empty_cpu(iter
->trace_buffer
->buffer
, cpu
))
2761 for_each_tracing_cpu(cpu
) {
2762 buf_iter
= trace_buffer_iter(iter
, cpu
);
2764 if (!ring_buffer_iter_empty(buf_iter
))
2767 if (!ring_buffer_empty_cpu(iter
->trace_buffer
->buffer
, cpu
))
2775 /* Called with trace_event_read_lock() held. */
2776 enum print_line_t
print_trace_line(struct trace_iterator
*iter
)
2778 enum print_line_t ret
;
2780 if (iter
->lost_events
&&
2781 !trace_seq_printf(&iter
->seq
, "CPU:%d [LOST %lu EVENTS]\n",
2782 iter
->cpu
, iter
->lost_events
))
2783 return TRACE_TYPE_PARTIAL_LINE
;
2785 if (iter
->trace
&& iter
->trace
->print_line
) {
2786 ret
= iter
->trace
->print_line(iter
);
2787 if (ret
!= TRACE_TYPE_UNHANDLED
)
2791 if (iter
->ent
->type
== TRACE_BPUTS
&&
2792 trace_flags
& TRACE_ITER_PRINTK
&&
2793 trace_flags
& TRACE_ITER_PRINTK_MSGONLY
)
2794 return trace_print_bputs_msg_only(iter
);
2796 if (iter
->ent
->type
== TRACE_BPRINT
&&
2797 trace_flags
& TRACE_ITER_PRINTK
&&
2798 trace_flags
& TRACE_ITER_PRINTK_MSGONLY
)
2799 return trace_print_bprintk_msg_only(iter
);
2801 if (iter
->ent
->type
== TRACE_PRINT
&&
2802 trace_flags
& TRACE_ITER_PRINTK
&&
2803 trace_flags
& TRACE_ITER_PRINTK_MSGONLY
)
2804 return trace_print_printk_msg_only(iter
);
2806 if (trace_flags
& TRACE_ITER_BIN
)
2807 return print_bin_fmt(iter
);
2809 if (trace_flags
& TRACE_ITER_HEX
)
2810 return print_hex_fmt(iter
);
2812 if (trace_flags
& TRACE_ITER_RAW
)
2813 return print_raw_fmt(iter
);
2815 return print_trace_fmt(iter
);
2818 void trace_latency_header(struct seq_file
*m
)
2820 struct trace_iterator
*iter
= m
->private;
2822 /* print nothing if the buffers are empty */
2823 if (trace_empty(iter
))
2826 if (iter
->iter_flags
& TRACE_FILE_LAT_FMT
)
2827 print_trace_header(m
, iter
);
2829 if (!(trace_flags
& TRACE_ITER_VERBOSE
))
2830 print_lat_help_header(m
);
2833 void trace_default_header(struct seq_file
*m
)
2835 struct trace_iterator
*iter
= m
->private;
2837 if (!(trace_flags
& TRACE_ITER_CONTEXT_INFO
))
2840 if (iter
->iter_flags
& TRACE_FILE_LAT_FMT
) {
2841 /* print nothing if the buffers are empty */
2842 if (trace_empty(iter
))
2844 print_trace_header(m
, iter
);
2845 if (!(trace_flags
& TRACE_ITER_VERBOSE
))
2846 print_lat_help_header(m
);
2848 if (!(trace_flags
& TRACE_ITER_VERBOSE
)) {
2849 if (trace_flags
& TRACE_ITER_IRQ_INFO
)
2850 print_func_help_header_irq(iter
->trace_buffer
, m
);
2852 print_func_help_header(iter
->trace_buffer
, m
);
2857 static void test_ftrace_alive(struct seq_file
*m
)
2859 if (!ftrace_is_dead())
2861 seq_printf(m
, "# WARNING: FUNCTION TRACING IS CORRUPTED\n");
2862 seq_printf(m
, "# MAY BE MISSING FUNCTION EVENTS\n");
2865 #ifdef CONFIG_TRACER_MAX_TRACE
2866 static void show_snapshot_main_help(struct seq_file
*m
)
2868 seq_printf(m
, "# echo 0 > snapshot : Clears and frees snapshot buffer\n");
2869 seq_printf(m
, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
2870 seq_printf(m
, "# Takes a snapshot of the main buffer.\n");
2871 seq_printf(m
, "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n");
2872 seq_printf(m
, "# (Doesn't have to be '2' works with any number that\n");
2873 seq_printf(m
, "# is not a '0' or '1')\n");
2876 static void show_snapshot_percpu_help(struct seq_file
*m
)
2878 seq_printf(m
, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
2879 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2880 seq_printf(m
, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
2881 seq_printf(m
, "# Takes a snapshot of the main buffer for this cpu.\n");
2883 seq_printf(m
, "# echo 1 > snapshot : Not supported with this kernel.\n");
2884 seq_printf(m
, "# Must use main snapshot file to allocate.\n");
2886 seq_printf(m
, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n");
2887 seq_printf(m
, "# (Doesn't have to be '2' works with any number that\n");
2888 seq_printf(m
, "# is not a '0' or '1')\n");
2891 static void print_snapshot_help(struct seq_file
*m
, struct trace_iterator
*iter
)
2893 if (iter
->tr
->allocated_snapshot
)
2894 seq_printf(m
, "#\n# * Snapshot is allocated *\n#\n");
2896 seq_printf(m
, "#\n# * Snapshot is freed *\n#\n");
2898 seq_printf(m
, "# Snapshot commands:\n");
2899 if (iter
->cpu_file
== RING_BUFFER_ALL_CPUS
)
2900 show_snapshot_main_help(m
);
2902 show_snapshot_percpu_help(m
);
2905 /* Should never be called */
2906 static inline void print_snapshot_help(struct seq_file
*m
, struct trace_iterator
*iter
) { }
2909 static int s_show(struct seq_file
*m
, void *v
)
2911 struct trace_iterator
*iter
= v
;
2914 if (iter
->ent
== NULL
) {
2916 seq_printf(m
, "# tracer: %s\n", iter
->trace
->name
);
2918 test_ftrace_alive(m
);
2920 if (iter
->snapshot
&& trace_empty(iter
))
2921 print_snapshot_help(m
, iter
);
2922 else if (iter
->trace
&& iter
->trace
->print_header
)
2923 iter
->trace
->print_header(m
);
2925 trace_default_header(m
);
2927 } else if (iter
->leftover
) {
2929 * If we filled the seq_file buffer earlier, we
2930 * want to just show it now.
2932 ret
= trace_print_seq(m
, &iter
->seq
);
2934 /* ret should this time be zero, but you never know */
2935 iter
->leftover
= ret
;
2938 print_trace_line(iter
);
2939 ret
= trace_print_seq(m
, &iter
->seq
);
2941 * If we overflow the seq_file buffer, then it will
2942 * ask us for this data again at start up.
2944 * ret is 0 if seq_file write succeeded.
2947 iter
->leftover
= ret
;
2954 * Should be used after trace_array_get(), trace_types_lock
2955 * ensures that i_cdev was already initialized.
2957 static inline int tracing_get_cpu(struct inode
*inode
)
2959 if (inode
->i_cdev
) /* See trace_create_cpu_file() */
2960 return (long)inode
->i_cdev
- 1;
2961 return RING_BUFFER_ALL_CPUS
;
2964 static const struct seq_operations tracer_seq_ops
= {
2971 static struct trace_iterator
*
2972 __tracing_open(struct inode
*inode
, struct file
*file
, bool snapshot
)
2974 struct trace_array
*tr
= inode
->i_private
;
2975 struct trace_iterator
*iter
;
2978 if (tracing_disabled
)
2979 return ERR_PTR(-ENODEV
);
2981 iter
= __seq_open_private(file
, &tracer_seq_ops
, sizeof(*iter
));
2983 return ERR_PTR(-ENOMEM
);
2985 iter
->buffer_iter
= kzalloc(sizeof(*iter
->buffer_iter
) * num_possible_cpus(),
2987 if (!iter
->buffer_iter
)
2991 * We make a copy of the current tracer to avoid concurrent
2992 * changes on it while we are reading.
2994 mutex_lock(&trace_types_lock
);
2995 iter
->trace
= kzalloc(sizeof(*iter
->trace
), GFP_KERNEL
);
2999 *iter
->trace
= *tr
->current_trace
;
3001 if (!zalloc_cpumask_var(&iter
->started
, GFP_KERNEL
))
3006 #ifdef CONFIG_TRACER_MAX_TRACE
3007 /* Currently only the top directory has a snapshot */
3008 if (tr
->current_trace
->print_max
|| snapshot
)
3009 iter
->trace_buffer
= &tr
->max_buffer
;
3012 iter
->trace_buffer
= &tr
->trace_buffer
;
3013 iter
->snapshot
= snapshot
;
3015 iter
->cpu_file
= tracing_get_cpu(inode
);
3016 mutex_init(&iter
->mutex
);
3018 /* Notify the tracer early; before we stop tracing. */
3019 if (iter
->trace
&& iter
->trace
->open
)
3020 iter
->trace
->open(iter
);
3022 /* Annotate start of buffers if we had overruns */
3023 if (ring_buffer_overruns(iter
->trace_buffer
->buffer
))
3024 iter
->iter_flags
|= TRACE_FILE_ANNOTATE
;
3026 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
3027 if (trace_clocks
[tr
->clock_id
].in_ns
)
3028 iter
->iter_flags
|= TRACE_FILE_TIME_IN_NS
;
3030 /* stop the trace while dumping if we are not opening "snapshot" */
3031 if (!iter
->snapshot
)
3032 tracing_stop_tr(tr
);
3034 if (iter
->cpu_file
== RING_BUFFER_ALL_CPUS
) {
3035 for_each_tracing_cpu(cpu
) {
3036 iter
->buffer_iter
[cpu
] =
3037 ring_buffer_read_prepare(iter
->trace_buffer
->buffer
, cpu
);
3039 ring_buffer_read_prepare_sync();
3040 for_each_tracing_cpu(cpu
) {
3041 ring_buffer_read_start(iter
->buffer_iter
[cpu
]);
3042 tracing_iter_reset(iter
, cpu
);
3045 cpu
= iter
->cpu_file
;
3046 iter
->buffer_iter
[cpu
] =
3047 ring_buffer_read_prepare(iter
->trace_buffer
->buffer
, cpu
);
3048 ring_buffer_read_prepare_sync();
3049 ring_buffer_read_start(iter
->buffer_iter
[cpu
]);
3050 tracing_iter_reset(iter
, cpu
);
3053 mutex_unlock(&trace_types_lock
);
3058 mutex_unlock(&trace_types_lock
);
3060 kfree(iter
->buffer_iter
);
3062 seq_release_private(inode
, file
);
3063 return ERR_PTR(-ENOMEM
);
3066 int tracing_open_generic(struct inode
*inode
, struct file
*filp
)
3068 if (tracing_disabled
)
3071 filp
->private_data
= inode
->i_private
;
3075 bool tracing_is_disabled(void)
3077 return (tracing_disabled
) ? true: false;
3081 * Open and update trace_array ref count.
3082 * Must have the current trace_array passed to it.
3084 static int tracing_open_generic_tr(struct inode
*inode
, struct file
*filp
)
3086 struct trace_array
*tr
= inode
->i_private
;
3088 if (tracing_disabled
)
3091 if (trace_array_get(tr
) < 0)
3094 filp
->private_data
= inode
->i_private
;
3099 static int tracing_release(struct inode
*inode
, struct file
*file
)
3101 struct trace_array
*tr
= inode
->i_private
;
3102 struct seq_file
*m
= file
->private_data
;
3103 struct trace_iterator
*iter
;
3106 if (!(file
->f_mode
& FMODE_READ
)) {
3107 trace_array_put(tr
);
3111 /* Writes do not use seq_file */
3113 mutex_lock(&trace_types_lock
);
3115 for_each_tracing_cpu(cpu
) {
3116 if (iter
->buffer_iter
[cpu
])
3117 ring_buffer_read_finish(iter
->buffer_iter
[cpu
]);
3120 if (iter
->trace
&& iter
->trace
->close
)
3121 iter
->trace
->close(iter
);
3123 if (!iter
->snapshot
)
3124 /* reenable tracing if it was previously enabled */
3125 tracing_start_tr(tr
);
3127 __trace_array_put(tr
);
3129 mutex_unlock(&trace_types_lock
);
3131 mutex_destroy(&iter
->mutex
);
3132 free_cpumask_var(iter
->started
);
3134 kfree(iter
->buffer_iter
);
3135 seq_release_private(inode
, file
);
3140 static int tracing_release_generic_tr(struct inode
*inode
, struct file
*file
)
3142 struct trace_array
*tr
= inode
->i_private
;
3144 trace_array_put(tr
);
3148 static int tracing_single_release_tr(struct inode
*inode
, struct file
*file
)
3150 struct trace_array
*tr
= inode
->i_private
;
3152 trace_array_put(tr
);
3154 return single_release(inode
, file
);
3157 static int tracing_open(struct inode
*inode
, struct file
*file
)
3159 struct trace_array
*tr
= inode
->i_private
;
3160 struct trace_iterator
*iter
;
3163 if (trace_array_get(tr
) < 0)
3166 /* If this file was open for write, then erase contents */
3167 if ((file
->f_mode
& FMODE_WRITE
) && (file
->f_flags
& O_TRUNC
)) {
3168 int cpu
= tracing_get_cpu(inode
);
3170 if (cpu
== RING_BUFFER_ALL_CPUS
)
3171 tracing_reset_online_cpus(&tr
->trace_buffer
);
3173 tracing_reset(&tr
->trace_buffer
, cpu
);
3176 if (file
->f_mode
& FMODE_READ
) {
3177 iter
= __tracing_open(inode
, file
, false);
3179 ret
= PTR_ERR(iter
);
3180 else if (trace_flags
& TRACE_ITER_LATENCY_FMT
)
3181 iter
->iter_flags
|= TRACE_FILE_LAT_FMT
;
3185 trace_array_put(tr
);
3191 * Some tracers are not suitable for instance buffers.
3192 * A tracer is always available for the global array (toplevel)
3193 * or if it explicitly states that it is.
3196 trace_ok_for_array(struct tracer
*t
, struct trace_array
*tr
)
3198 return (tr
->flags
& TRACE_ARRAY_FL_GLOBAL
) || t
->allow_instances
;
3201 /* Find the next tracer that this trace array may use */
3202 static struct tracer
*
3203 get_tracer_for_array(struct trace_array
*tr
, struct tracer
*t
)
3205 while (t
&& !trace_ok_for_array(t
, tr
))
3212 t_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
3214 struct trace_array
*tr
= m
->private;
3215 struct tracer
*t
= v
;
3220 t
= get_tracer_for_array(tr
, t
->next
);
3225 static void *t_start(struct seq_file
*m
, loff_t
*pos
)
3227 struct trace_array
*tr
= m
->private;
3231 mutex_lock(&trace_types_lock
);
3233 t
= get_tracer_for_array(tr
, trace_types
);
3234 for (; t
&& l
< *pos
; t
= t_next(m
, t
, &l
))
3240 static void t_stop(struct seq_file
*m
, void *p
)
3242 mutex_unlock(&trace_types_lock
);
3245 static int t_show(struct seq_file
*m
, void *v
)
3247 struct tracer
*t
= v
;
3252 seq_printf(m
, "%s", t
->name
);
3261 static const struct seq_operations show_traces_seq_ops
= {
3268 static int show_traces_open(struct inode
*inode
, struct file
*file
)
3270 struct trace_array
*tr
= inode
->i_private
;
3274 if (tracing_disabled
)
3277 ret
= seq_open(file
, &show_traces_seq_ops
);
3281 m
= file
->private_data
;
3288 tracing_write_stub(struct file
*filp
, const char __user
*ubuf
,
3289 size_t count
, loff_t
*ppos
)
3294 loff_t
tracing_lseek(struct file
*file
, loff_t offset
, int whence
)
3298 if (file
->f_mode
& FMODE_READ
)
3299 ret
= seq_lseek(file
, offset
, whence
);
3301 file
->f_pos
= ret
= 0;
3306 static const struct file_operations tracing_fops
= {
3307 .open
= tracing_open
,
3309 .write
= tracing_write_stub
,
3310 .llseek
= tracing_lseek
,
3311 .release
= tracing_release
,
3314 static const struct file_operations show_traces_fops
= {
3315 .open
= show_traces_open
,
3317 .release
= seq_release
,
3318 .llseek
= seq_lseek
,
3322 * The tracer itself will not take this lock, but still we want
3323 * to provide a consistent cpumask to user-space:
3325 static DEFINE_MUTEX(tracing_cpumask_update_lock
);
3328 * Temporary storage for the character representation of the
3329 * CPU bitmask (and one more byte for the newline):
3331 static char mask_str
[NR_CPUS
+ 1];
3334 tracing_cpumask_read(struct file
*filp
, char __user
*ubuf
,
3335 size_t count
, loff_t
*ppos
)
3337 struct trace_array
*tr
= file_inode(filp
)->i_private
;
3340 mutex_lock(&tracing_cpumask_update_lock
);
3342 len
= cpumask_scnprintf(mask_str
, count
, tr
->tracing_cpumask
);
3343 if (count
- len
< 2) {
3347 len
+= sprintf(mask_str
+ len
, "\n");
3348 count
= simple_read_from_buffer(ubuf
, count
, ppos
, mask_str
, NR_CPUS
+1);
3351 mutex_unlock(&tracing_cpumask_update_lock
);
3357 tracing_cpumask_write(struct file
*filp
, const char __user
*ubuf
,
3358 size_t count
, loff_t
*ppos
)
3360 struct trace_array
*tr
= file_inode(filp
)->i_private
;
3361 cpumask_var_t tracing_cpumask_new
;
3364 if (!alloc_cpumask_var(&tracing_cpumask_new
, GFP_KERNEL
))
3367 err
= cpumask_parse_user(ubuf
, count
, tracing_cpumask_new
);
3371 mutex_lock(&tracing_cpumask_update_lock
);
3373 local_irq_disable();
3374 arch_spin_lock(&tr
->max_lock
);
3375 for_each_tracing_cpu(cpu
) {
3377 * Increase/decrease the disabled counter if we are
3378 * about to flip a bit in the cpumask:
3380 if (cpumask_test_cpu(cpu
, tr
->tracing_cpumask
) &&
3381 !cpumask_test_cpu(cpu
, tracing_cpumask_new
)) {
3382 atomic_inc(&per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->disabled
);
3383 ring_buffer_record_disable_cpu(tr
->trace_buffer
.buffer
, cpu
);
3385 if (!cpumask_test_cpu(cpu
, tr
->tracing_cpumask
) &&
3386 cpumask_test_cpu(cpu
, tracing_cpumask_new
)) {
3387 atomic_dec(&per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->disabled
);
3388 ring_buffer_record_enable_cpu(tr
->trace_buffer
.buffer
, cpu
);
3391 arch_spin_unlock(&tr
->max_lock
);
3394 cpumask_copy(tr
->tracing_cpumask
, tracing_cpumask_new
);
3396 mutex_unlock(&tracing_cpumask_update_lock
);
3397 free_cpumask_var(tracing_cpumask_new
);
3402 free_cpumask_var(tracing_cpumask_new
);
3407 static const struct file_operations tracing_cpumask_fops
= {
3408 .open
= tracing_open_generic_tr
,
3409 .read
= tracing_cpumask_read
,
3410 .write
= tracing_cpumask_write
,
3411 .release
= tracing_release_generic_tr
,
3412 .llseek
= generic_file_llseek
,
3415 static int tracing_trace_options_show(struct seq_file
*m
, void *v
)
3417 struct tracer_opt
*trace_opts
;
3418 struct trace_array
*tr
= m
->private;
3422 mutex_lock(&trace_types_lock
);
3423 tracer_flags
= tr
->current_trace
->flags
->val
;
3424 trace_opts
= tr
->current_trace
->flags
->opts
;
3426 for (i
= 0; trace_options
[i
]; i
++) {
3427 if (trace_flags
& (1 << i
))
3428 seq_printf(m
, "%s\n", trace_options
[i
]);
3430 seq_printf(m
, "no%s\n", trace_options
[i
]);
3433 for (i
= 0; trace_opts
[i
].name
; i
++) {
3434 if (tracer_flags
& trace_opts
[i
].bit
)
3435 seq_printf(m
, "%s\n", trace_opts
[i
].name
);
3437 seq_printf(m
, "no%s\n", trace_opts
[i
].name
);
3439 mutex_unlock(&trace_types_lock
);
3444 static int __set_tracer_option(struct trace_array
*tr
,
3445 struct tracer_flags
*tracer_flags
,
3446 struct tracer_opt
*opts
, int neg
)
3448 struct tracer
*trace
= tr
->current_trace
;
3451 ret
= trace
->set_flag(tr
, tracer_flags
->val
, opts
->bit
, !neg
);
3456 tracer_flags
->val
&= ~opts
->bit
;
3458 tracer_flags
->val
|= opts
->bit
;
3462 /* Try to assign a tracer specific option */
3463 static int set_tracer_option(struct trace_array
*tr
, char *cmp
, int neg
)
3465 struct tracer
*trace
= tr
->current_trace
;
3466 struct tracer_flags
*tracer_flags
= trace
->flags
;
3467 struct tracer_opt
*opts
= NULL
;
3470 for (i
= 0; tracer_flags
->opts
[i
].name
; i
++) {
3471 opts
= &tracer_flags
->opts
[i
];
3473 if (strcmp(cmp
, opts
->name
) == 0)
3474 return __set_tracer_option(tr
, trace
->flags
, opts
, neg
);
3480 /* Some tracers require overwrite to stay enabled */
3481 int trace_keep_overwrite(struct tracer
*tracer
, u32 mask
, int set
)
3483 if (tracer
->enabled
&& (mask
& TRACE_ITER_OVERWRITE
) && !set
)
3489 int set_tracer_flag(struct trace_array
*tr
, unsigned int mask
, int enabled
)
3491 /* do nothing if flag is already set */
3492 if (!!(trace_flags
& mask
) == !!enabled
)
3495 /* Give the tracer a chance to approve the change */
3496 if (tr
->current_trace
->flag_changed
)
3497 if (tr
->current_trace
->flag_changed(tr
, mask
, !!enabled
))
3501 trace_flags
|= mask
;
3503 trace_flags
&= ~mask
;
3505 if (mask
== TRACE_ITER_RECORD_CMD
)
3506 trace_event_enable_cmd_record(enabled
);
3508 if (mask
== TRACE_ITER_OVERWRITE
) {
3509 ring_buffer_change_overwrite(tr
->trace_buffer
.buffer
, enabled
);
3510 #ifdef CONFIG_TRACER_MAX_TRACE
3511 ring_buffer_change_overwrite(tr
->max_buffer
.buffer
, enabled
);
3515 if (mask
== TRACE_ITER_PRINTK
)
3516 trace_printk_start_stop_comm(enabled
);
3521 static int trace_set_options(struct trace_array
*tr
, char *option
)
3528 cmp
= strstrip(option
);
3530 if (strncmp(cmp
, "no", 2) == 0) {
3535 mutex_lock(&trace_types_lock
);
3537 for (i
= 0; trace_options
[i
]; i
++) {
3538 if (strcmp(cmp
, trace_options
[i
]) == 0) {
3539 ret
= set_tracer_flag(tr
, 1 << i
, !neg
);
3544 /* If no option could be set, test the specific tracer options */
3545 if (!trace_options
[i
])
3546 ret
= set_tracer_option(tr
, cmp
, neg
);
3548 mutex_unlock(&trace_types_lock
);
3554 tracing_trace_options_write(struct file
*filp
, const char __user
*ubuf
,
3555 size_t cnt
, loff_t
*ppos
)
3557 struct seq_file
*m
= filp
->private_data
;
3558 struct trace_array
*tr
= m
->private;
3562 if (cnt
>= sizeof(buf
))
3565 if (copy_from_user(&buf
, ubuf
, cnt
))
3570 ret
= trace_set_options(tr
, buf
);
3579 static int tracing_trace_options_open(struct inode
*inode
, struct file
*file
)
3581 struct trace_array
*tr
= inode
->i_private
;
3584 if (tracing_disabled
)
3587 if (trace_array_get(tr
) < 0)
3590 ret
= single_open(file
, tracing_trace_options_show
, inode
->i_private
);
3592 trace_array_put(tr
);
3597 static const struct file_operations tracing_iter_fops
= {
3598 .open
= tracing_trace_options_open
,
3600 .llseek
= seq_lseek
,
3601 .release
= tracing_single_release_tr
,
3602 .write
= tracing_trace_options_write
,
3605 static const char readme_msg
[] =
3606 "tracing mini-HOWTO:\n\n"
3607 "# echo 0 > tracing_on : quick way to disable tracing\n"
3608 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
3609 " Important files:\n"
3610 " trace\t\t\t- The static contents of the buffer\n"
3611 "\t\t\t To clear the buffer write into this file: echo > trace\n"
3612 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
3613 " current_tracer\t- function and latency tracers\n"
3614 " available_tracers\t- list of configured tracers for current_tracer\n"
3615 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
3616 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
3617 " trace_clock\t\t-change the clock used to order events\n"
3618 " local: Per cpu clock but may not be synced across CPUs\n"
3619 " global: Synced across CPUs but slows tracing down.\n"
3620 " counter: Not a clock, but just an increment\n"
3621 " uptime: Jiffy counter from time of boot\n"
3622 " perf: Same clock that perf events use\n"
3623 #ifdef CONFIG_X86_64
3624 " x86-tsc: TSC cycle counter\n"
3626 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
3627 " tracing_cpumask\t- Limit which CPUs to trace\n"
3628 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
3629 "\t\t\t Remove sub-buffer with rmdir\n"
3630 " trace_options\t\t- Set format or modify how tracing happens\n"
3631 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
3632 "\t\t\t option name\n"
3633 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
3634 #ifdef CONFIG_DYNAMIC_FTRACE
3635 "\n available_filter_functions - list of functions that can be filtered on\n"
3636 " set_ftrace_filter\t- echo function name in here to only trace these\n"
3637 "\t\t\t functions\n"
3638 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3639 "\t modules: Can select a group via module\n"
3640 "\t Format: :mod:<module-name>\n"
3641 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
3642 "\t triggers: a command to perform when function is hit\n"
3643 "\t Format: <function>:<trigger>[:count]\n"
3644 "\t trigger: traceon, traceoff\n"
3645 "\t\t enable_event:<system>:<event>\n"
3646 "\t\t disable_event:<system>:<event>\n"
3647 #ifdef CONFIG_STACKTRACE
3650 #ifdef CONFIG_TRACER_SNAPSHOT
3655 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
3656 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
3657 "\t The first one will disable tracing every time do_fault is hit\n"
3658 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
3659 "\t The first time do trap is hit and it disables tracing, the\n"
3660 "\t counter will decrement to 2. If tracing is already disabled,\n"
3661 "\t the counter will not decrement. It only decrements when the\n"
3662 "\t trigger did work\n"
3663 "\t To remove trigger without count:\n"
3664 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
3665 "\t To remove trigger with a count:\n"
3666 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
3667 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
3668 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3669 "\t modules: Can select a group via module command :mod:\n"
3670 "\t Does not accept triggers\n"
3671 #endif /* CONFIG_DYNAMIC_FTRACE */
3672 #ifdef CONFIG_FUNCTION_TRACER
3673 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
3676 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3677 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
3678 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
3679 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
3681 #ifdef CONFIG_TRACER_SNAPSHOT
3682 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
3683 "\t\t\t snapshot buffer. Read the contents for more\n"
3684 "\t\t\t information\n"
3686 #ifdef CONFIG_STACK_TRACER
3687 " stack_trace\t\t- Shows the max stack trace when active\n"
3688 " stack_max_size\t- Shows current max stack size that was traced\n"
3689 "\t\t\t Write into this file to reset the max size (trigger a\n"
3690 "\t\t\t new trace)\n"
3691 #ifdef CONFIG_DYNAMIC_FTRACE
3692 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
3695 #endif /* CONFIG_STACK_TRACER */
3696 " events/\t\t- Directory containing all trace event subsystems:\n"
3697 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
3698 " events/<system>/\t- Directory containing all trace events for <system>:\n"
3699 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
3701 " filter\t\t- If set, only events passing filter are traced\n"
3702 " events/<system>/<event>/\t- Directory containing control files for\n"
3704 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
3705 " filter\t\t- If set, only events passing filter are traced\n"
3706 " trigger\t\t- If set, a command to perform when event is hit\n"
3707 "\t Format: <trigger>[:count][if <filter>]\n"
3708 "\t trigger: traceon, traceoff\n"
3709 "\t enable_event:<system>:<event>\n"
3710 "\t disable_event:<system>:<event>\n"
3711 #ifdef CONFIG_STACKTRACE
3714 #ifdef CONFIG_TRACER_SNAPSHOT
3717 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
3718 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
3719 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
3720 "\t events/block/block_unplug/trigger\n"
3721 "\t The first disables tracing every time block_unplug is hit.\n"
3722 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
3723 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
3724 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
3725 "\t Like function triggers, the counter is only decremented if it\n"
3726 "\t enabled or disabled tracing.\n"
3727 "\t To remove a trigger without a count:\n"
3728 "\t echo '!<trigger> > <system>/<event>/trigger\n"
3729 "\t To remove a trigger with a count:\n"
3730 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
3731 "\t Filters can be ignored when removing a trigger.\n"
3735 tracing_readme_read(struct file
*filp
, char __user
*ubuf
,
3736 size_t cnt
, loff_t
*ppos
)
3738 return simple_read_from_buffer(ubuf
, cnt
, ppos
,
3739 readme_msg
, strlen(readme_msg
));
3742 static const struct file_operations tracing_readme_fops
= {
3743 .open
= tracing_open_generic
,
3744 .read
= tracing_readme_read
,
3745 .llseek
= generic_file_llseek
,
3748 static void *saved_cmdlines_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
3750 unsigned int *ptr
= v
;
3752 if (*pos
|| m
->count
)
3757 for (; ptr
< &savedcmd
->map_cmdline_to_pid
[savedcmd
->cmdline_num
];
3759 if (*ptr
== -1 || *ptr
== NO_CMDLINE_MAP
)
3768 static void *saved_cmdlines_start(struct seq_file
*m
, loff_t
*pos
)
3774 arch_spin_lock(&trace_cmdline_lock
);
3776 v
= &savedcmd
->map_cmdline_to_pid
[0];
3778 v
= saved_cmdlines_next(m
, v
, &l
);
3786 static void saved_cmdlines_stop(struct seq_file
*m
, void *v
)
3788 arch_spin_unlock(&trace_cmdline_lock
);
3792 static int saved_cmdlines_show(struct seq_file
*m
, void *v
)
3794 char buf
[TASK_COMM_LEN
];
3795 unsigned int *pid
= v
;
3797 __trace_find_cmdline(*pid
, buf
);
3798 seq_printf(m
, "%d %s\n", *pid
, buf
);
3802 static const struct seq_operations tracing_saved_cmdlines_seq_ops
= {
3803 .start
= saved_cmdlines_start
,
3804 .next
= saved_cmdlines_next
,
3805 .stop
= saved_cmdlines_stop
,
3806 .show
= saved_cmdlines_show
,
3809 static int tracing_saved_cmdlines_open(struct inode
*inode
, struct file
*filp
)
3811 if (tracing_disabled
)
3814 return seq_open(filp
, &tracing_saved_cmdlines_seq_ops
);
3817 static const struct file_operations tracing_saved_cmdlines_fops
= {
3818 .open
= tracing_saved_cmdlines_open
,
3820 .llseek
= seq_lseek
,
3821 .release
= seq_release
,
3825 tracing_saved_cmdlines_size_read(struct file
*filp
, char __user
*ubuf
,
3826 size_t cnt
, loff_t
*ppos
)
3831 arch_spin_lock(&trace_cmdline_lock
);
3832 r
= scnprintf(buf
, sizeof(buf
), "%u\n", savedcmd
->cmdline_num
);
3833 arch_spin_unlock(&trace_cmdline_lock
);
3835 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
3838 static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer
*s
)
3840 kfree(s
->saved_cmdlines
);
3841 kfree(s
->map_cmdline_to_pid
);
3845 static int tracing_resize_saved_cmdlines(unsigned int val
)
3847 struct saved_cmdlines_buffer
*s
, *savedcmd_temp
;
3849 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
3853 if (allocate_cmdlines_buffer(val
, s
) < 0) {
3858 arch_spin_lock(&trace_cmdline_lock
);
3859 savedcmd_temp
= savedcmd
;
3861 arch_spin_unlock(&trace_cmdline_lock
);
3862 free_saved_cmdlines_buffer(savedcmd_temp
);
3868 tracing_saved_cmdlines_size_write(struct file
*filp
, const char __user
*ubuf
,
3869 size_t cnt
, loff_t
*ppos
)
3874 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
3878 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
3879 if (!val
|| val
> PID_MAX_DEFAULT
)
3882 ret
= tracing_resize_saved_cmdlines((unsigned int)val
);
3891 static const struct file_operations tracing_saved_cmdlines_size_fops
= {
3892 .open
= tracing_open_generic
,
3893 .read
= tracing_saved_cmdlines_size_read
,
3894 .write
= tracing_saved_cmdlines_size_write
,
3898 tracing_set_trace_read(struct file
*filp
, char __user
*ubuf
,
3899 size_t cnt
, loff_t
*ppos
)
3901 struct trace_array
*tr
= filp
->private_data
;
3902 char buf
[MAX_TRACER_SIZE
+2];
3905 mutex_lock(&trace_types_lock
);
3906 r
= sprintf(buf
, "%s\n", tr
->current_trace
->name
);
3907 mutex_unlock(&trace_types_lock
);
3909 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
3912 int tracer_init(struct tracer
*t
, struct trace_array
*tr
)
3914 tracing_reset_online_cpus(&tr
->trace_buffer
);
3918 static void set_buffer_entries(struct trace_buffer
*buf
, unsigned long val
)
3922 for_each_tracing_cpu(cpu
)
3923 per_cpu_ptr(buf
->data
, cpu
)->entries
= val
;
3926 #ifdef CONFIG_TRACER_MAX_TRACE
3927 /* resize @tr's buffer to the size of @size_tr's entries */
3928 static int resize_buffer_duplicate_size(struct trace_buffer
*trace_buf
,
3929 struct trace_buffer
*size_buf
, int cpu_id
)
3933 if (cpu_id
== RING_BUFFER_ALL_CPUS
) {
3934 for_each_tracing_cpu(cpu
) {
3935 ret
= ring_buffer_resize(trace_buf
->buffer
,
3936 per_cpu_ptr(size_buf
->data
, cpu
)->entries
, cpu
);
3939 per_cpu_ptr(trace_buf
->data
, cpu
)->entries
=
3940 per_cpu_ptr(size_buf
->data
, cpu
)->entries
;
3943 ret
= ring_buffer_resize(trace_buf
->buffer
,
3944 per_cpu_ptr(size_buf
->data
, cpu_id
)->entries
, cpu_id
);
3946 per_cpu_ptr(trace_buf
->data
, cpu_id
)->entries
=
3947 per_cpu_ptr(size_buf
->data
, cpu_id
)->entries
;
3952 #endif /* CONFIG_TRACER_MAX_TRACE */
3954 static int __tracing_resize_ring_buffer(struct trace_array
*tr
,
3955 unsigned long size
, int cpu
)
3960 * If kernel or user changes the size of the ring buffer
3961 * we use the size that was given, and we can forget about
3962 * expanding it later.
3964 ring_buffer_expanded
= true;
3966 /* May be called before buffers are initialized */
3967 if (!tr
->trace_buffer
.buffer
)
3970 ret
= ring_buffer_resize(tr
->trace_buffer
.buffer
, size
, cpu
);
3974 #ifdef CONFIG_TRACER_MAX_TRACE
3975 if (!(tr
->flags
& TRACE_ARRAY_FL_GLOBAL
) ||
3976 !tr
->current_trace
->use_max_tr
)
3979 ret
= ring_buffer_resize(tr
->max_buffer
.buffer
, size
, cpu
);
3981 int r
= resize_buffer_duplicate_size(&tr
->trace_buffer
,
3982 &tr
->trace_buffer
, cpu
);
3985 * AARGH! We are left with different
3986 * size max buffer!!!!
3987 * The max buffer is our "snapshot" buffer.
3988 * When a tracer needs a snapshot (one of the
3989 * latency tracers), it swaps the max buffer
3990 * with the saved snap shot. We succeeded to
3991 * update the size of the main buffer, but failed to
3992 * update the size of the max buffer. But when we tried
3993 * to reset the main buffer to the original size, we
3994 * failed there too. This is very unlikely to
3995 * happen, but if it does, warn and kill all
3999 tracing_disabled
= 1;
4004 if (cpu
== RING_BUFFER_ALL_CPUS
)
4005 set_buffer_entries(&tr
->max_buffer
, size
);
4007 per_cpu_ptr(tr
->max_buffer
.data
, cpu
)->entries
= size
;
4010 #endif /* CONFIG_TRACER_MAX_TRACE */
4012 if (cpu
== RING_BUFFER_ALL_CPUS
)
4013 set_buffer_entries(&tr
->trace_buffer
, size
);
4015 per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->entries
= size
;
4020 static ssize_t
tracing_resize_ring_buffer(struct trace_array
*tr
,
4021 unsigned long size
, int cpu_id
)
4025 mutex_lock(&trace_types_lock
);
4027 if (cpu_id
!= RING_BUFFER_ALL_CPUS
) {
4028 /* make sure, this cpu is enabled in the mask */
4029 if (!cpumask_test_cpu(cpu_id
, tracing_buffer_mask
)) {
4035 ret
= __tracing_resize_ring_buffer(tr
, size
, cpu_id
);
4040 mutex_unlock(&trace_types_lock
);
4047 * tracing_update_buffers - used by tracing facility to expand ring buffers
4049 * To save on memory when the tracing is never used on a system with it
4050 * configured in. The ring buffers are set to a minimum size. But once
4051 * a user starts to use the tracing facility, then they need to grow
4052 * to their default size.
4054 * This function is to be called when a tracer is about to be used.
4056 int tracing_update_buffers(void)
4060 mutex_lock(&trace_types_lock
);
4061 if (!ring_buffer_expanded
)
4062 ret
= __tracing_resize_ring_buffer(&global_trace
, trace_buf_size
,
4063 RING_BUFFER_ALL_CPUS
);
4064 mutex_unlock(&trace_types_lock
);
4069 struct trace_option_dentry
;
4071 static struct trace_option_dentry
*
4072 create_trace_option_files(struct trace_array
*tr
, struct tracer
*tracer
);
4075 destroy_trace_option_files(struct trace_option_dentry
*topts
);
4078 * Used to clear out the tracer before deletion of an instance.
4079 * Must have trace_types_lock held.
4081 static void tracing_set_nop(struct trace_array
*tr
)
4083 if (tr
->current_trace
== &nop_trace
)
4086 tr
->current_trace
->enabled
--;
4088 if (tr
->current_trace
->reset
)
4089 tr
->current_trace
->reset(tr
);
4091 tr
->current_trace
= &nop_trace
;
4094 static int tracing_set_tracer(struct trace_array
*tr
, const char *buf
)
4096 static struct trace_option_dentry
*topts
;
4098 #ifdef CONFIG_TRACER_MAX_TRACE
4103 mutex_lock(&trace_types_lock
);
4105 if (!ring_buffer_expanded
) {
4106 ret
= __tracing_resize_ring_buffer(tr
, trace_buf_size
,
4107 RING_BUFFER_ALL_CPUS
);
4113 for (t
= trace_types
; t
; t
= t
->next
) {
4114 if (strcmp(t
->name
, buf
) == 0)
4121 if (t
== tr
->current_trace
)
4124 /* Some tracers are only allowed for the top level buffer */
4125 if (!trace_ok_for_array(t
, tr
)) {
4130 trace_branch_disable();
4132 tr
->current_trace
->enabled
--;
4134 if (tr
->current_trace
->reset
)
4135 tr
->current_trace
->reset(tr
);
4137 /* Current trace needs to be nop_trace before synchronize_sched */
4138 tr
->current_trace
= &nop_trace
;
4140 #ifdef CONFIG_TRACER_MAX_TRACE
4141 had_max_tr
= tr
->allocated_snapshot
;
4143 if (had_max_tr
&& !t
->use_max_tr
) {
4145 * We need to make sure that the update_max_tr sees that
4146 * current_trace changed to nop_trace to keep it from
4147 * swapping the buffers after we resize it.
4148 * The update_max_tr is called from interrupts disabled
4149 * so a synchronized_sched() is sufficient.
4151 synchronize_sched();
4155 /* Currently, only the top instance has options */
4156 if (tr
->flags
& TRACE_ARRAY_FL_GLOBAL
) {
4157 destroy_trace_option_files(topts
);
4158 topts
= create_trace_option_files(tr
, t
);
4161 #ifdef CONFIG_TRACER_MAX_TRACE
4162 if (t
->use_max_tr
&& !had_max_tr
) {
4163 ret
= alloc_snapshot(tr
);
4170 ret
= tracer_init(t
, tr
);
4175 tr
->current_trace
= t
;
4176 tr
->current_trace
->enabled
++;
4177 trace_branch_enable(tr
);
4179 mutex_unlock(&trace_types_lock
);
4185 tracing_set_trace_write(struct file
*filp
, const char __user
*ubuf
,
4186 size_t cnt
, loff_t
*ppos
)
4188 struct trace_array
*tr
= filp
->private_data
;
4189 char buf
[MAX_TRACER_SIZE
+1];
4196 if (cnt
> MAX_TRACER_SIZE
)
4197 cnt
= MAX_TRACER_SIZE
;
4199 if (copy_from_user(&buf
, ubuf
, cnt
))
4204 /* strip ending whitespace. */
4205 for (i
= cnt
- 1; i
> 0 && isspace(buf
[i
]); i
--)
4208 err
= tracing_set_tracer(tr
, buf
);
4218 tracing_nsecs_read(unsigned long *ptr
, char __user
*ubuf
,
4219 size_t cnt
, loff_t
*ppos
)
4224 r
= snprintf(buf
, sizeof(buf
), "%ld\n",
4225 *ptr
== (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr
));
4226 if (r
> sizeof(buf
))
4228 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
4232 tracing_nsecs_write(unsigned long *ptr
, const char __user
*ubuf
,
4233 size_t cnt
, loff_t
*ppos
)
4238 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
4248 tracing_thresh_read(struct file
*filp
, char __user
*ubuf
,
4249 size_t cnt
, loff_t
*ppos
)
4251 return tracing_nsecs_read(&tracing_thresh
, ubuf
, cnt
, ppos
);
4255 tracing_thresh_write(struct file
*filp
, const char __user
*ubuf
,
4256 size_t cnt
, loff_t
*ppos
)
4258 struct trace_array
*tr
= filp
->private_data
;
4261 mutex_lock(&trace_types_lock
);
4262 ret
= tracing_nsecs_write(&tracing_thresh
, ubuf
, cnt
, ppos
);
4266 if (tr
->current_trace
->update_thresh
) {
4267 ret
= tr
->current_trace
->update_thresh(tr
);
4274 mutex_unlock(&trace_types_lock
);
4280 tracing_max_lat_read(struct file
*filp
, char __user
*ubuf
,
4281 size_t cnt
, loff_t
*ppos
)
4283 return tracing_nsecs_read(filp
->private_data
, ubuf
, cnt
, ppos
);
4287 tracing_max_lat_write(struct file
*filp
, const char __user
*ubuf
,
4288 size_t cnt
, loff_t
*ppos
)
4290 return tracing_nsecs_write(filp
->private_data
, ubuf
, cnt
, ppos
);
4293 static int tracing_open_pipe(struct inode
*inode
, struct file
*filp
)
4295 struct trace_array
*tr
= inode
->i_private
;
4296 struct trace_iterator
*iter
;
4299 if (tracing_disabled
)
4302 if (trace_array_get(tr
) < 0)
4305 mutex_lock(&trace_types_lock
);
4307 /* create a buffer to store the information to pass to userspace */
4308 iter
= kzalloc(sizeof(*iter
), GFP_KERNEL
);
4311 __trace_array_put(tr
);
4316 * We make a copy of the current tracer to avoid concurrent
4317 * changes on it while we are reading.
4319 iter
->trace
= kmalloc(sizeof(*iter
->trace
), GFP_KERNEL
);
4324 *iter
->trace
= *tr
->current_trace
;
4326 if (!alloc_cpumask_var(&iter
->started
, GFP_KERNEL
)) {
4331 /* trace pipe does not show start of buffer */
4332 cpumask_setall(iter
->started
);
4334 if (trace_flags
& TRACE_ITER_LATENCY_FMT
)
4335 iter
->iter_flags
|= TRACE_FILE_LAT_FMT
;
4337 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
4338 if (trace_clocks
[tr
->clock_id
].in_ns
)
4339 iter
->iter_flags
|= TRACE_FILE_TIME_IN_NS
;
4342 iter
->trace_buffer
= &tr
->trace_buffer
;
4343 iter
->cpu_file
= tracing_get_cpu(inode
);
4344 mutex_init(&iter
->mutex
);
4345 filp
->private_data
= iter
;
4347 if (iter
->trace
->pipe_open
)
4348 iter
->trace
->pipe_open(iter
);
4350 nonseekable_open(inode
, filp
);
4352 mutex_unlock(&trace_types_lock
);
4358 __trace_array_put(tr
);
4359 mutex_unlock(&trace_types_lock
);
4363 static int tracing_release_pipe(struct inode
*inode
, struct file
*file
)
4365 struct trace_iterator
*iter
= file
->private_data
;
4366 struct trace_array
*tr
= inode
->i_private
;
4368 mutex_lock(&trace_types_lock
);
4370 if (iter
->trace
->pipe_close
)
4371 iter
->trace
->pipe_close(iter
);
4373 mutex_unlock(&trace_types_lock
);
4375 free_cpumask_var(iter
->started
);
4376 mutex_destroy(&iter
->mutex
);
4380 trace_array_put(tr
);
4386 trace_poll(struct trace_iterator
*iter
, struct file
*filp
, poll_table
*poll_table
)
4388 /* Iterators are static, they should be filled or empty */
4389 if (trace_buffer_iter(iter
, iter
->cpu_file
))
4390 return POLLIN
| POLLRDNORM
;
4392 if (trace_flags
& TRACE_ITER_BLOCK
)
4394 * Always select as readable when in blocking mode
4396 return POLLIN
| POLLRDNORM
;
4398 return ring_buffer_poll_wait(iter
->trace_buffer
->buffer
, iter
->cpu_file
,
4403 tracing_poll_pipe(struct file
*filp
, poll_table
*poll_table
)
4405 struct trace_iterator
*iter
= filp
->private_data
;
4407 return trace_poll(iter
, filp
, poll_table
);
4410 /* Must be called with trace_types_lock mutex held. */
4411 static int tracing_wait_pipe(struct file
*filp
)
4413 struct trace_iterator
*iter
= filp
->private_data
;
4416 while (trace_empty(iter
)) {
4418 if ((filp
->f_flags
& O_NONBLOCK
)) {
4423 * We block until we read something and tracing is disabled.
4424 * We still block if tracing is disabled, but we have never
4425 * read anything. This allows a user to cat this file, and
4426 * then enable tracing. But after we have read something,
4427 * we give an EOF when tracing is again disabled.
4429 * iter->pos will be 0 if we haven't read anything.
4431 if (!tracing_is_on() && iter
->pos
)
4434 mutex_unlock(&iter
->mutex
);
4436 ret
= wait_on_pipe(iter
);
4438 mutex_lock(&iter
->mutex
);
4443 if (signal_pending(current
))
4454 tracing_read_pipe(struct file
*filp
, char __user
*ubuf
,
4455 size_t cnt
, loff_t
*ppos
)
4457 struct trace_iterator
*iter
= filp
->private_data
;
4458 struct trace_array
*tr
= iter
->tr
;
4461 /* return any leftover data */
4462 sret
= trace_seq_to_user(&iter
->seq
, ubuf
, cnt
);
4466 trace_seq_init(&iter
->seq
);
4468 /* copy the tracer to avoid using a global lock all around */
4469 mutex_lock(&trace_types_lock
);
4470 if (unlikely(iter
->trace
->name
!= tr
->current_trace
->name
))
4471 *iter
->trace
= *tr
->current_trace
;
4472 mutex_unlock(&trace_types_lock
);
4475 * Avoid more than one consumer on a single file descriptor
4476 * This is just a matter of traces coherency, the ring buffer itself
4479 mutex_lock(&iter
->mutex
);
4480 if (iter
->trace
->read
) {
4481 sret
= iter
->trace
->read(iter
, filp
, ubuf
, cnt
, ppos
);
4487 sret
= tracing_wait_pipe(filp
);
4491 /* stop when tracing is finished */
4492 if (trace_empty(iter
)) {
4497 if (cnt
>= PAGE_SIZE
)
4498 cnt
= PAGE_SIZE
- 1;
4500 /* reset all but tr, trace, and overruns */
4501 memset(&iter
->seq
, 0,
4502 sizeof(struct trace_iterator
) -
4503 offsetof(struct trace_iterator
, seq
));
4504 cpumask_clear(iter
->started
);
4507 trace_event_read_lock();
4508 trace_access_lock(iter
->cpu_file
);
4509 while (trace_find_next_entry_inc(iter
) != NULL
) {
4510 enum print_line_t ret
;
4511 int len
= iter
->seq
.len
;
4513 ret
= print_trace_line(iter
);
4514 if (ret
== TRACE_TYPE_PARTIAL_LINE
) {
4515 /* don't print partial lines */
4516 iter
->seq
.len
= len
;
4519 if (ret
!= TRACE_TYPE_NO_CONSUME
)
4520 trace_consume(iter
);
4522 if (iter
->seq
.len
>= cnt
)
4526 * Setting the full flag means we reached the trace_seq buffer
4527 * size and we should leave by partial output condition above.
4528 * One of the trace_seq_* functions is not used properly.
4530 WARN_ONCE(iter
->seq
.full
, "full flag set for trace type %d",
4533 trace_access_unlock(iter
->cpu_file
);
4534 trace_event_read_unlock();
4536 /* Now copy what we have to the user */
4537 sret
= trace_seq_to_user(&iter
->seq
, ubuf
, cnt
);
4538 if (iter
->seq
.readpos
>= iter
->seq
.len
)
4539 trace_seq_init(&iter
->seq
);
4542 * If there was nothing to send to user, in spite of consuming trace
4543 * entries, go back to wait for more entries.
4549 mutex_unlock(&iter
->mutex
);
4554 static void tracing_spd_release_pipe(struct splice_pipe_desc
*spd
,
4557 __free_page(spd
->pages
[idx
]);
4560 static const struct pipe_buf_operations tracing_pipe_buf_ops
= {
4562 .confirm
= generic_pipe_buf_confirm
,
4563 .release
= generic_pipe_buf_release
,
4564 .steal
= generic_pipe_buf_steal
,
4565 .get
= generic_pipe_buf_get
,
4569 tracing_fill_pipe_page(size_t rem
, struct trace_iterator
*iter
)
4574 /* Seq buffer is page-sized, exactly what we need. */
4576 count
= iter
->seq
.len
;
4577 ret
= print_trace_line(iter
);
4578 count
= iter
->seq
.len
- count
;
4581 iter
->seq
.len
-= count
;
4584 if (ret
== TRACE_TYPE_PARTIAL_LINE
) {
4585 iter
->seq
.len
-= count
;
4589 if (ret
!= TRACE_TYPE_NO_CONSUME
)
4590 trace_consume(iter
);
4592 if (!trace_find_next_entry_inc(iter
)) {
4602 static ssize_t
tracing_splice_read_pipe(struct file
*filp
,
4604 struct pipe_inode_info
*pipe
,
4608 struct page
*pages_def
[PIPE_DEF_BUFFERS
];
4609 struct partial_page partial_def
[PIPE_DEF_BUFFERS
];
4610 struct trace_iterator
*iter
= filp
->private_data
;
4611 struct splice_pipe_desc spd
= {
4613 .partial
= partial_def
,
4614 .nr_pages
= 0, /* This gets updated below. */
4615 .nr_pages_max
= PIPE_DEF_BUFFERS
,
4617 .ops
= &tracing_pipe_buf_ops
,
4618 .spd_release
= tracing_spd_release_pipe
,
4620 struct trace_array
*tr
= iter
->tr
;
4625 if (splice_grow_spd(pipe
, &spd
))
4628 /* copy the tracer to avoid using a global lock all around */
4629 mutex_lock(&trace_types_lock
);
4630 if (unlikely(iter
->trace
->name
!= tr
->current_trace
->name
))
4631 *iter
->trace
= *tr
->current_trace
;
4632 mutex_unlock(&trace_types_lock
);
4634 mutex_lock(&iter
->mutex
);
4636 if (iter
->trace
->splice_read
) {
4637 ret
= iter
->trace
->splice_read(iter
, filp
,
4638 ppos
, pipe
, len
, flags
);
4643 ret
= tracing_wait_pipe(filp
);
4647 if (!iter
->ent
&& !trace_find_next_entry_inc(iter
)) {
4652 trace_event_read_lock();
4653 trace_access_lock(iter
->cpu_file
);
4655 /* Fill as many pages as possible. */
4656 for (i
= 0, rem
= len
; i
< spd
.nr_pages_max
&& rem
; i
++) {
4657 spd
.pages
[i
] = alloc_page(GFP_KERNEL
);
4661 rem
= tracing_fill_pipe_page(rem
, iter
);
4663 /* Copy the data into the page, so we can start over. */
4664 ret
= trace_seq_to_buffer(&iter
->seq
,
4665 page_address(spd
.pages
[i
]),
4668 __free_page(spd
.pages
[i
]);
4671 spd
.partial
[i
].offset
= 0;
4672 spd
.partial
[i
].len
= iter
->seq
.len
;
4674 trace_seq_init(&iter
->seq
);
4677 trace_access_unlock(iter
->cpu_file
);
4678 trace_event_read_unlock();
4679 mutex_unlock(&iter
->mutex
);
4683 ret
= splice_to_pipe(pipe
, &spd
);
4685 splice_shrink_spd(&spd
);
4689 mutex_unlock(&iter
->mutex
);
4694 tracing_entries_read(struct file
*filp
, char __user
*ubuf
,
4695 size_t cnt
, loff_t
*ppos
)
4697 struct inode
*inode
= file_inode(filp
);
4698 struct trace_array
*tr
= inode
->i_private
;
4699 int cpu
= tracing_get_cpu(inode
);
4704 mutex_lock(&trace_types_lock
);
4706 if (cpu
== RING_BUFFER_ALL_CPUS
) {
4707 int cpu
, buf_size_same
;
4712 /* check if all cpu sizes are same */
4713 for_each_tracing_cpu(cpu
) {
4714 /* fill in the size from first enabled cpu */
4716 size
= per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->entries
;
4717 if (size
!= per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->entries
) {
4723 if (buf_size_same
) {
4724 if (!ring_buffer_expanded
)
4725 r
= sprintf(buf
, "%lu (expanded: %lu)\n",
4727 trace_buf_size
>> 10);
4729 r
= sprintf(buf
, "%lu\n", size
>> 10);
4731 r
= sprintf(buf
, "X\n");
4733 r
= sprintf(buf
, "%lu\n", per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->entries
>> 10);
4735 mutex_unlock(&trace_types_lock
);
4737 ret
= simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
4742 tracing_entries_write(struct file
*filp
, const char __user
*ubuf
,
4743 size_t cnt
, loff_t
*ppos
)
4745 struct inode
*inode
= file_inode(filp
);
4746 struct trace_array
*tr
= inode
->i_private
;
4750 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
4754 /* must have at least 1 entry */
4758 /* value is in KB */
4760 ret
= tracing_resize_ring_buffer(tr
, val
, tracing_get_cpu(inode
));
4770 tracing_total_entries_read(struct file
*filp
, char __user
*ubuf
,
4771 size_t cnt
, loff_t
*ppos
)
4773 struct trace_array
*tr
= filp
->private_data
;
4776 unsigned long size
= 0, expanded_size
= 0;
4778 mutex_lock(&trace_types_lock
);
4779 for_each_tracing_cpu(cpu
) {
4780 size
+= per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->entries
>> 10;
4781 if (!ring_buffer_expanded
)
4782 expanded_size
+= trace_buf_size
>> 10;
4784 if (ring_buffer_expanded
)
4785 r
= sprintf(buf
, "%lu\n", size
);
4787 r
= sprintf(buf
, "%lu (expanded: %lu)\n", size
, expanded_size
);
4788 mutex_unlock(&trace_types_lock
);
4790 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
4794 tracing_free_buffer_write(struct file
*filp
, const char __user
*ubuf
,
4795 size_t cnt
, loff_t
*ppos
)
4798 * There is no need to read what the user has written, this function
4799 * is just to make sure that there is no error when "echo" is used
4808 tracing_free_buffer_release(struct inode
*inode
, struct file
*filp
)
4810 struct trace_array
*tr
= inode
->i_private
;
4812 /* disable tracing ? */
4813 if (trace_flags
& TRACE_ITER_STOP_ON_FREE
)
4814 tracer_tracing_off(tr
);
4815 /* resize the ring buffer to 0 */
4816 tracing_resize_ring_buffer(tr
, 0, RING_BUFFER_ALL_CPUS
);
4818 trace_array_put(tr
);
4824 tracing_mark_write(struct file
*filp
, const char __user
*ubuf
,
4825 size_t cnt
, loff_t
*fpos
)
4827 unsigned long addr
= (unsigned long)ubuf
;
4828 struct trace_array
*tr
= filp
->private_data
;
4829 struct ring_buffer_event
*event
;
4830 struct ring_buffer
*buffer
;
4831 struct print_entry
*entry
;
4832 unsigned long irq_flags
;
4833 struct page
*pages
[2];
4843 if (tracing_disabled
)
4846 if (!(trace_flags
& TRACE_ITER_MARKERS
))
4849 if (cnt
> TRACE_BUF_SIZE
)
4850 cnt
= TRACE_BUF_SIZE
;
4853 * Userspace is injecting traces into the kernel trace buffer.
4854 * We want to be as non intrusive as possible.
4855 * To do so, we do not want to allocate any special buffers
4856 * or take any locks, but instead write the userspace data
4857 * straight into the ring buffer.
4859 * First we need to pin the userspace buffer into memory,
4860 * which, most likely it is, because it just referenced it.
4861 * But there's no guarantee that it is. By using get_user_pages_fast()
4862 * and kmap_atomic/kunmap_atomic() we can get access to the
4863 * pages directly. We then write the data directly into the
4866 BUILD_BUG_ON(TRACE_BUF_SIZE
>= PAGE_SIZE
);
4868 /* check if we cross pages */
4869 if ((addr
& PAGE_MASK
) != ((addr
+ cnt
) & PAGE_MASK
))
4872 offset
= addr
& (PAGE_SIZE
- 1);
4875 ret
= get_user_pages_fast(addr
, nr_pages
, 0, pages
);
4876 if (ret
< nr_pages
) {
4878 put_page(pages
[ret
]);
4883 for (i
= 0; i
< nr_pages
; i
++)
4884 map_page
[i
] = kmap_atomic(pages
[i
]);
4886 local_save_flags(irq_flags
);
4887 size
= sizeof(*entry
) + cnt
+ 2; /* possible \n added */
4888 buffer
= tr
->trace_buffer
.buffer
;
4889 event
= trace_buffer_lock_reserve(buffer
, TRACE_PRINT
, size
,
4890 irq_flags
, preempt_count());
4892 /* Ring buffer disabled, return as if not open for write */
4897 entry
= ring_buffer_event_data(event
);
4898 entry
->ip
= _THIS_IP_
;
4900 if (nr_pages
== 2) {
4901 len
= PAGE_SIZE
- offset
;
4902 memcpy(&entry
->buf
, map_page
[0] + offset
, len
);
4903 memcpy(&entry
->buf
[len
], map_page
[1], cnt
- len
);
4905 memcpy(&entry
->buf
, map_page
[0] + offset
, cnt
);
4907 if (entry
->buf
[cnt
- 1] != '\n') {
4908 entry
->buf
[cnt
] = '\n';
4909 entry
->buf
[cnt
+ 1] = '\0';
4911 entry
->buf
[cnt
] = '\0';
4913 __buffer_unlock_commit(buffer
, event
);
4920 for (i
= 0; i
< nr_pages
; i
++){
4921 kunmap_atomic(map_page
[i
]);
4928 static int tracing_clock_show(struct seq_file
*m
, void *v
)
4930 struct trace_array
*tr
= m
->private;
4933 for (i
= 0; i
< ARRAY_SIZE(trace_clocks
); i
++)
4935 "%s%s%s%s", i
? " " : "",
4936 i
== tr
->clock_id
? "[" : "", trace_clocks
[i
].name
,
4937 i
== tr
->clock_id
? "]" : "");
4943 static int tracing_set_clock(struct trace_array
*tr
, const char *clockstr
)
4947 for (i
= 0; i
< ARRAY_SIZE(trace_clocks
); i
++) {
4948 if (strcmp(trace_clocks
[i
].name
, clockstr
) == 0)
4951 if (i
== ARRAY_SIZE(trace_clocks
))
4954 mutex_lock(&trace_types_lock
);
4958 ring_buffer_set_clock(tr
->trace_buffer
.buffer
, trace_clocks
[i
].func
);
4961 * New clock may not be consistent with the previous clock.
4962 * Reset the buffer so that it doesn't have incomparable timestamps.
4964 tracing_reset_online_cpus(&tr
->trace_buffer
);
4966 #ifdef CONFIG_TRACER_MAX_TRACE
4967 if (tr
->flags
& TRACE_ARRAY_FL_GLOBAL
&& tr
->max_buffer
.buffer
)
4968 ring_buffer_set_clock(tr
->max_buffer
.buffer
, trace_clocks
[i
].func
);
4969 tracing_reset_online_cpus(&tr
->max_buffer
);
4972 mutex_unlock(&trace_types_lock
);
4977 static ssize_t
tracing_clock_write(struct file
*filp
, const char __user
*ubuf
,
4978 size_t cnt
, loff_t
*fpos
)
4980 struct seq_file
*m
= filp
->private_data
;
4981 struct trace_array
*tr
= m
->private;
4983 const char *clockstr
;
4986 if (cnt
>= sizeof(buf
))
4989 if (copy_from_user(&buf
, ubuf
, cnt
))
4994 clockstr
= strstrip(buf
);
4996 ret
= tracing_set_clock(tr
, clockstr
);
5005 static int tracing_clock_open(struct inode
*inode
, struct file
*file
)
5007 struct trace_array
*tr
= inode
->i_private
;
5010 if (tracing_disabled
)
5013 if (trace_array_get(tr
))
5016 ret
= single_open(file
, tracing_clock_show
, inode
->i_private
);
5018 trace_array_put(tr
);
5023 struct ftrace_buffer_info
{
5024 struct trace_iterator iter
;
5029 #ifdef CONFIG_TRACER_SNAPSHOT
5030 static int tracing_snapshot_open(struct inode
*inode
, struct file
*file
)
5032 struct trace_array
*tr
= inode
->i_private
;
5033 struct trace_iterator
*iter
;
5037 if (trace_array_get(tr
) < 0)
5040 if (file
->f_mode
& FMODE_READ
) {
5041 iter
= __tracing_open(inode
, file
, true);
5043 ret
= PTR_ERR(iter
);
5045 /* Writes still need the seq_file to hold the private data */
5047 m
= kzalloc(sizeof(*m
), GFP_KERNEL
);
5050 iter
= kzalloc(sizeof(*iter
), GFP_KERNEL
);
5058 iter
->trace_buffer
= &tr
->max_buffer
;
5059 iter
->cpu_file
= tracing_get_cpu(inode
);
5061 file
->private_data
= m
;
5065 trace_array_put(tr
);
5071 tracing_snapshot_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
5074 struct seq_file
*m
= filp
->private_data
;
5075 struct trace_iterator
*iter
= m
->private;
5076 struct trace_array
*tr
= iter
->tr
;
5080 ret
= tracing_update_buffers();
5084 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
5088 mutex_lock(&trace_types_lock
);
5090 if (tr
->current_trace
->use_max_tr
) {
5097 if (iter
->cpu_file
!= RING_BUFFER_ALL_CPUS
) {
5101 if (tr
->allocated_snapshot
)
5105 /* Only allow per-cpu swap if the ring buffer supports it */
5106 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
5107 if (iter
->cpu_file
!= RING_BUFFER_ALL_CPUS
) {
5112 if (!tr
->allocated_snapshot
) {
5113 ret
= alloc_snapshot(tr
);
5117 local_irq_disable();
5118 /* Now, we're going to swap */
5119 if (iter
->cpu_file
== RING_BUFFER_ALL_CPUS
)
5120 update_max_tr(tr
, current
, smp_processor_id());
5122 update_max_tr_single(tr
, current
, iter
->cpu_file
);
5126 if (tr
->allocated_snapshot
) {
5127 if (iter
->cpu_file
== RING_BUFFER_ALL_CPUS
)
5128 tracing_reset_online_cpus(&tr
->max_buffer
);
5130 tracing_reset(&tr
->max_buffer
, iter
->cpu_file
);
5140 mutex_unlock(&trace_types_lock
);
5144 static int tracing_snapshot_release(struct inode
*inode
, struct file
*file
)
5146 struct seq_file
*m
= file
->private_data
;
5149 ret
= tracing_release(inode
, file
);
5151 if (file
->f_mode
& FMODE_READ
)
5154 /* If write only, the seq_file is just a stub */
5162 static int tracing_buffers_open(struct inode
*inode
, struct file
*filp
);
5163 static ssize_t
tracing_buffers_read(struct file
*filp
, char __user
*ubuf
,
5164 size_t count
, loff_t
*ppos
);
5165 static int tracing_buffers_release(struct inode
*inode
, struct file
*file
);
5166 static ssize_t
tracing_buffers_splice_read(struct file
*file
, loff_t
*ppos
,
5167 struct pipe_inode_info
*pipe
, size_t len
, unsigned int flags
);
5169 static int snapshot_raw_open(struct inode
*inode
, struct file
*filp
)
5171 struct ftrace_buffer_info
*info
;
5174 ret
= tracing_buffers_open(inode
, filp
);
5178 info
= filp
->private_data
;
5180 if (info
->iter
.trace
->use_max_tr
) {
5181 tracing_buffers_release(inode
, filp
);
5185 info
->iter
.snapshot
= true;
5186 info
->iter
.trace_buffer
= &info
->iter
.tr
->max_buffer
;
5191 #endif /* CONFIG_TRACER_SNAPSHOT */
5194 static const struct file_operations tracing_thresh_fops
= {
5195 .open
= tracing_open_generic
,
5196 .read
= tracing_thresh_read
,
5197 .write
= tracing_thresh_write
,
5198 .llseek
= generic_file_llseek
,
5201 static const struct file_operations tracing_max_lat_fops
= {
5202 .open
= tracing_open_generic
,
5203 .read
= tracing_max_lat_read
,
5204 .write
= tracing_max_lat_write
,
5205 .llseek
= generic_file_llseek
,
5208 static const struct file_operations set_tracer_fops
= {
5209 .open
= tracing_open_generic
,
5210 .read
= tracing_set_trace_read
,
5211 .write
= tracing_set_trace_write
,
5212 .llseek
= generic_file_llseek
,
5215 static const struct file_operations tracing_pipe_fops
= {
5216 .open
= tracing_open_pipe
,
5217 .poll
= tracing_poll_pipe
,
5218 .read
= tracing_read_pipe
,
5219 .splice_read
= tracing_splice_read_pipe
,
5220 .release
= tracing_release_pipe
,
5221 .llseek
= no_llseek
,
5224 static const struct file_operations tracing_entries_fops
= {
5225 .open
= tracing_open_generic_tr
,
5226 .read
= tracing_entries_read
,
5227 .write
= tracing_entries_write
,
5228 .llseek
= generic_file_llseek
,
5229 .release
= tracing_release_generic_tr
,
5232 static const struct file_operations tracing_total_entries_fops
= {
5233 .open
= tracing_open_generic_tr
,
5234 .read
= tracing_total_entries_read
,
5235 .llseek
= generic_file_llseek
,
5236 .release
= tracing_release_generic_tr
,
5239 static const struct file_operations tracing_free_buffer_fops
= {
5240 .open
= tracing_open_generic_tr
,
5241 .write
= tracing_free_buffer_write
,
5242 .release
= tracing_free_buffer_release
,
5245 static const struct file_operations tracing_mark_fops
= {
5246 .open
= tracing_open_generic_tr
,
5247 .write
= tracing_mark_write
,
5248 .llseek
= generic_file_llseek
,
5249 .release
= tracing_release_generic_tr
,
5252 static const struct file_operations trace_clock_fops
= {
5253 .open
= tracing_clock_open
,
5255 .llseek
= seq_lseek
,
5256 .release
= tracing_single_release_tr
,
5257 .write
= tracing_clock_write
,
5260 #ifdef CONFIG_TRACER_SNAPSHOT
5261 static const struct file_operations snapshot_fops
= {
5262 .open
= tracing_snapshot_open
,
5264 .write
= tracing_snapshot_write
,
5265 .llseek
= tracing_lseek
,
5266 .release
= tracing_snapshot_release
,
5269 static const struct file_operations snapshot_raw_fops
= {
5270 .open
= snapshot_raw_open
,
5271 .read
= tracing_buffers_read
,
5272 .release
= tracing_buffers_release
,
5273 .splice_read
= tracing_buffers_splice_read
,
5274 .llseek
= no_llseek
,
5277 #endif /* CONFIG_TRACER_SNAPSHOT */
5279 static int tracing_buffers_open(struct inode
*inode
, struct file
*filp
)
5281 struct trace_array
*tr
= inode
->i_private
;
5282 struct ftrace_buffer_info
*info
;
5285 if (tracing_disabled
)
5288 if (trace_array_get(tr
) < 0)
5291 info
= kzalloc(sizeof(*info
), GFP_KERNEL
);
5293 trace_array_put(tr
);
5297 mutex_lock(&trace_types_lock
);
5300 info
->iter
.cpu_file
= tracing_get_cpu(inode
);
5301 info
->iter
.trace
= tr
->current_trace
;
5302 info
->iter
.trace_buffer
= &tr
->trace_buffer
;
5304 /* Force reading ring buffer for first read */
5305 info
->read
= (unsigned int)-1;
5307 filp
->private_data
= info
;
5309 mutex_unlock(&trace_types_lock
);
5311 ret
= nonseekable_open(inode
, filp
);
5313 trace_array_put(tr
);
5319 tracing_buffers_poll(struct file
*filp
, poll_table
*poll_table
)
5321 struct ftrace_buffer_info
*info
= filp
->private_data
;
5322 struct trace_iterator
*iter
= &info
->iter
;
5324 return trace_poll(iter
, filp
, poll_table
);
5328 tracing_buffers_read(struct file
*filp
, char __user
*ubuf
,
5329 size_t count
, loff_t
*ppos
)
5331 struct ftrace_buffer_info
*info
= filp
->private_data
;
5332 struct trace_iterator
*iter
= &info
->iter
;
5339 mutex_lock(&trace_types_lock
);
5341 #ifdef CONFIG_TRACER_MAX_TRACE
5342 if (iter
->snapshot
&& iter
->tr
->current_trace
->use_max_tr
) {
5349 info
->spare
= ring_buffer_alloc_read_page(iter
->trace_buffer
->buffer
,
5355 /* Do we have previous read data to read? */
5356 if (info
->read
< PAGE_SIZE
)
5360 trace_access_lock(iter
->cpu_file
);
5361 ret
= ring_buffer_read_page(iter
->trace_buffer
->buffer
,
5365 trace_access_unlock(iter
->cpu_file
);
5368 if (trace_empty(iter
)) {
5369 if ((filp
->f_flags
& O_NONBLOCK
)) {
5373 mutex_unlock(&trace_types_lock
);
5374 ret
= wait_on_pipe(iter
);
5375 mutex_lock(&trace_types_lock
);
5380 if (signal_pending(current
)) {
5392 size
= PAGE_SIZE
- info
->read
;
5396 ret
= copy_to_user(ubuf
, info
->spare
+ info
->read
, size
);
5407 mutex_unlock(&trace_types_lock
);
5412 static int tracing_buffers_release(struct inode
*inode
, struct file
*file
)
5414 struct ftrace_buffer_info
*info
= file
->private_data
;
5415 struct trace_iterator
*iter
= &info
->iter
;
5417 mutex_lock(&trace_types_lock
);
5419 __trace_array_put(iter
->tr
);
5422 ring_buffer_free_read_page(iter
->trace_buffer
->buffer
, info
->spare
);
5425 mutex_unlock(&trace_types_lock
);
5431 struct ring_buffer
*buffer
;
5436 static void buffer_pipe_buf_release(struct pipe_inode_info
*pipe
,
5437 struct pipe_buffer
*buf
)
5439 struct buffer_ref
*ref
= (struct buffer_ref
*)buf
->private;
5444 ring_buffer_free_read_page(ref
->buffer
, ref
->page
);
5449 static void buffer_pipe_buf_get(struct pipe_inode_info
*pipe
,
5450 struct pipe_buffer
*buf
)
5452 struct buffer_ref
*ref
= (struct buffer_ref
*)buf
->private;
5457 /* Pipe buffer operations for a buffer. */
5458 static const struct pipe_buf_operations buffer_pipe_buf_ops
= {
5460 .confirm
= generic_pipe_buf_confirm
,
5461 .release
= buffer_pipe_buf_release
,
5462 .steal
= generic_pipe_buf_steal
,
5463 .get
= buffer_pipe_buf_get
,
5467 * Callback from splice_to_pipe(), if we need to release some pages
5468 * at the end of the spd in case we error'ed out in filling the pipe.
5470 static void buffer_spd_release(struct splice_pipe_desc
*spd
, unsigned int i
)
5472 struct buffer_ref
*ref
=
5473 (struct buffer_ref
*)spd
->partial
[i
].private;
5478 ring_buffer_free_read_page(ref
->buffer
, ref
->page
);
5480 spd
->partial
[i
].private = 0;
5484 tracing_buffers_splice_read(struct file
*file
, loff_t
*ppos
,
5485 struct pipe_inode_info
*pipe
, size_t len
,
5488 struct ftrace_buffer_info
*info
= file
->private_data
;
5489 struct trace_iterator
*iter
= &info
->iter
;
5490 struct partial_page partial_def
[PIPE_DEF_BUFFERS
];
5491 struct page
*pages_def
[PIPE_DEF_BUFFERS
];
5492 struct splice_pipe_desc spd
= {
5494 .partial
= partial_def
,
5495 .nr_pages_max
= PIPE_DEF_BUFFERS
,
5497 .ops
= &buffer_pipe_buf_ops
,
5498 .spd_release
= buffer_spd_release
,
5500 struct buffer_ref
*ref
;
5501 int entries
, size
, i
;
5504 mutex_lock(&trace_types_lock
);
5506 #ifdef CONFIG_TRACER_MAX_TRACE
5507 if (iter
->snapshot
&& iter
->tr
->current_trace
->use_max_tr
) {
5513 if (splice_grow_spd(pipe
, &spd
)) {
5518 if (*ppos
& (PAGE_SIZE
- 1)) {
5523 if (len
& (PAGE_SIZE
- 1)) {
5524 if (len
< PAGE_SIZE
) {
5532 trace_access_lock(iter
->cpu_file
);
5533 entries
= ring_buffer_entries_cpu(iter
->trace_buffer
->buffer
, iter
->cpu_file
);
5535 for (i
= 0; i
< spd
.nr_pages_max
&& len
&& entries
; i
++, len
-= PAGE_SIZE
) {
5539 ref
= kzalloc(sizeof(*ref
), GFP_KERNEL
);
5544 ref
->buffer
= iter
->trace_buffer
->buffer
;
5545 ref
->page
= ring_buffer_alloc_read_page(ref
->buffer
, iter
->cpu_file
);
5551 r
= ring_buffer_read_page(ref
->buffer
, &ref
->page
,
5552 len
, iter
->cpu_file
, 1);
5554 ring_buffer_free_read_page(ref
->buffer
, ref
->page
);
5560 * zero out any left over data, this is going to
5563 size
= ring_buffer_page_len(ref
->page
);
5564 if (size
< PAGE_SIZE
)
5565 memset(ref
->page
+ size
, 0, PAGE_SIZE
- size
);
5567 page
= virt_to_page(ref
->page
);
5569 spd
.pages
[i
] = page
;
5570 spd
.partial
[i
].len
= PAGE_SIZE
;
5571 spd
.partial
[i
].offset
= 0;
5572 spd
.partial
[i
].private = (unsigned long)ref
;
5576 entries
= ring_buffer_entries_cpu(iter
->trace_buffer
->buffer
, iter
->cpu_file
);
5579 trace_access_unlock(iter
->cpu_file
);
5582 /* did we read anything? */
5583 if (!spd
.nr_pages
) {
5584 if ((file
->f_flags
& O_NONBLOCK
) || (flags
& SPLICE_F_NONBLOCK
)) {
5588 mutex_unlock(&trace_types_lock
);
5589 ret
= wait_on_pipe(iter
);
5590 mutex_lock(&trace_types_lock
);
5593 if (signal_pending(current
)) {
5600 ret
= splice_to_pipe(pipe
, &spd
);
5601 splice_shrink_spd(&spd
);
5603 mutex_unlock(&trace_types_lock
);
5608 static const struct file_operations tracing_buffers_fops
= {
5609 .open
= tracing_buffers_open
,
5610 .read
= tracing_buffers_read
,
5611 .poll
= tracing_buffers_poll
,
5612 .release
= tracing_buffers_release
,
5613 .splice_read
= tracing_buffers_splice_read
,
5614 .llseek
= no_llseek
,
5618 tracing_stats_read(struct file
*filp
, char __user
*ubuf
,
5619 size_t count
, loff_t
*ppos
)
5621 struct inode
*inode
= file_inode(filp
);
5622 struct trace_array
*tr
= inode
->i_private
;
5623 struct trace_buffer
*trace_buf
= &tr
->trace_buffer
;
5624 int cpu
= tracing_get_cpu(inode
);
5625 struct trace_seq
*s
;
5627 unsigned long long t
;
5628 unsigned long usec_rem
;
5630 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
5636 cnt
= ring_buffer_entries_cpu(trace_buf
->buffer
, cpu
);
5637 trace_seq_printf(s
, "entries: %ld\n", cnt
);
5639 cnt
= ring_buffer_overrun_cpu(trace_buf
->buffer
, cpu
);
5640 trace_seq_printf(s
, "overrun: %ld\n", cnt
);
5642 cnt
= ring_buffer_commit_overrun_cpu(trace_buf
->buffer
, cpu
);
5643 trace_seq_printf(s
, "commit overrun: %ld\n", cnt
);
5645 cnt
= ring_buffer_bytes_cpu(trace_buf
->buffer
, cpu
);
5646 trace_seq_printf(s
, "bytes: %ld\n", cnt
);
5648 if (trace_clocks
[tr
->clock_id
].in_ns
) {
5649 /* local or global for trace_clock */
5650 t
= ns2usecs(ring_buffer_oldest_event_ts(trace_buf
->buffer
, cpu
));
5651 usec_rem
= do_div(t
, USEC_PER_SEC
);
5652 trace_seq_printf(s
, "oldest event ts: %5llu.%06lu\n",
5655 t
= ns2usecs(ring_buffer_time_stamp(trace_buf
->buffer
, cpu
));
5656 usec_rem
= do_div(t
, USEC_PER_SEC
);
5657 trace_seq_printf(s
, "now ts: %5llu.%06lu\n", t
, usec_rem
);
5659 /* counter or tsc mode for trace_clock */
5660 trace_seq_printf(s
, "oldest event ts: %llu\n",
5661 ring_buffer_oldest_event_ts(trace_buf
->buffer
, cpu
));
5663 trace_seq_printf(s
, "now ts: %llu\n",
5664 ring_buffer_time_stamp(trace_buf
->buffer
, cpu
));
5667 cnt
= ring_buffer_dropped_events_cpu(trace_buf
->buffer
, cpu
);
5668 trace_seq_printf(s
, "dropped events: %ld\n", cnt
);
5670 cnt
= ring_buffer_read_events_cpu(trace_buf
->buffer
, cpu
);
5671 trace_seq_printf(s
, "read events: %ld\n", cnt
);
5673 count
= simple_read_from_buffer(ubuf
, count
, ppos
, s
->buffer
, s
->len
);
5680 static const struct file_operations tracing_stats_fops
= {
5681 .open
= tracing_open_generic_tr
,
5682 .read
= tracing_stats_read
,
5683 .llseek
= generic_file_llseek
,
5684 .release
= tracing_release_generic_tr
,
5687 #ifdef CONFIG_DYNAMIC_FTRACE
5689 int __weak
ftrace_arch_read_dyn_info(char *buf
, int size
)
5695 tracing_read_dyn_info(struct file
*filp
, char __user
*ubuf
,
5696 size_t cnt
, loff_t
*ppos
)
5698 static char ftrace_dyn_info_buffer
[1024];
5699 static DEFINE_MUTEX(dyn_info_mutex
);
5700 unsigned long *p
= filp
->private_data
;
5701 char *buf
= ftrace_dyn_info_buffer
;
5702 int size
= ARRAY_SIZE(ftrace_dyn_info_buffer
);
5705 mutex_lock(&dyn_info_mutex
);
5706 r
= sprintf(buf
, "%ld ", *p
);
5708 r
+= ftrace_arch_read_dyn_info(buf
+r
, (size
-1)-r
);
5711 r
= simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
5713 mutex_unlock(&dyn_info_mutex
);
5718 static const struct file_operations tracing_dyn_info_fops
= {
5719 .open
= tracing_open_generic
,
5720 .read
= tracing_read_dyn_info
,
5721 .llseek
= generic_file_llseek
,
5723 #endif /* CONFIG_DYNAMIC_FTRACE */
5725 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
5727 ftrace_snapshot(unsigned long ip
, unsigned long parent_ip
, void **data
)
5733 ftrace_count_snapshot(unsigned long ip
, unsigned long parent_ip
, void **data
)
5735 unsigned long *count
= (long *)data
;
5747 ftrace_snapshot_print(struct seq_file
*m
, unsigned long ip
,
5748 struct ftrace_probe_ops
*ops
, void *data
)
5750 long count
= (long)data
;
5752 seq_printf(m
, "%ps:", (void *)ip
);
5754 seq_printf(m
, "snapshot");
5757 seq_printf(m
, ":unlimited\n");
5759 seq_printf(m
, ":count=%ld\n", count
);
5764 static struct ftrace_probe_ops snapshot_probe_ops
= {
5765 .func
= ftrace_snapshot
,
5766 .print
= ftrace_snapshot_print
,
5769 static struct ftrace_probe_ops snapshot_count_probe_ops
= {
5770 .func
= ftrace_count_snapshot
,
5771 .print
= ftrace_snapshot_print
,
5775 ftrace_trace_snapshot_callback(struct ftrace_hash
*hash
,
5776 char *glob
, char *cmd
, char *param
, int enable
)
5778 struct ftrace_probe_ops
*ops
;
5779 void *count
= (void *)-1;
5783 /* hash funcs only work with set_ftrace_filter */
5787 ops
= param
? &snapshot_count_probe_ops
: &snapshot_probe_ops
;
5789 if (glob
[0] == '!') {
5790 unregister_ftrace_function_probe_func(glob
+1, ops
);
5797 number
= strsep(¶m
, ":");
5799 if (!strlen(number
))
5803 * We use the callback data field (which is a pointer)
5806 ret
= kstrtoul(number
, 0, (unsigned long *)&count
);
5811 ret
= register_ftrace_function_probe(glob
, ops
, count
);
5814 alloc_snapshot(&global_trace
);
5816 return ret
< 0 ? ret
: 0;
5819 static struct ftrace_func_command ftrace_snapshot_cmd
= {
5821 .func
= ftrace_trace_snapshot_callback
,
5824 static __init
int register_snapshot_cmd(void)
5826 return register_ftrace_command(&ftrace_snapshot_cmd
);
5829 static inline __init
int register_snapshot_cmd(void) { return 0; }
5830 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
5832 struct dentry
*tracing_init_dentry_tr(struct trace_array
*tr
)
5837 if (!debugfs_initialized())
5840 if (tr
->flags
& TRACE_ARRAY_FL_GLOBAL
)
5841 tr
->dir
= debugfs_create_dir("tracing", NULL
);
5844 pr_warn_once("Could not create debugfs directory 'tracing'\n");
5849 struct dentry
*tracing_init_dentry(void)
5851 return tracing_init_dentry_tr(&global_trace
);
5854 static struct dentry
*tracing_dentry_percpu(struct trace_array
*tr
, int cpu
)
5856 struct dentry
*d_tracer
;
5859 return tr
->percpu_dir
;
5861 d_tracer
= tracing_init_dentry_tr(tr
);
5865 tr
->percpu_dir
= debugfs_create_dir("per_cpu", d_tracer
);
5867 WARN_ONCE(!tr
->percpu_dir
,
5868 "Could not create debugfs directory 'per_cpu/%d'\n", cpu
);
5870 return tr
->percpu_dir
;
5873 static struct dentry
*
5874 trace_create_cpu_file(const char *name
, umode_t mode
, struct dentry
*parent
,
5875 void *data
, long cpu
, const struct file_operations
*fops
)
5877 struct dentry
*ret
= trace_create_file(name
, mode
, parent
, data
, fops
);
5879 if (ret
) /* See tracing_get_cpu() */
5880 ret
->d_inode
->i_cdev
= (void *)(cpu
+ 1);
5885 tracing_init_debugfs_percpu(struct trace_array
*tr
, long cpu
)
5887 struct dentry
*d_percpu
= tracing_dentry_percpu(tr
, cpu
);
5888 struct dentry
*d_cpu
;
5889 char cpu_dir
[30]; /* 30 characters should be more than enough */
5894 snprintf(cpu_dir
, 30, "cpu%ld", cpu
);
5895 d_cpu
= debugfs_create_dir(cpu_dir
, d_percpu
);
5897 pr_warning("Could not create debugfs '%s' entry\n", cpu_dir
);
5901 /* per cpu trace_pipe */
5902 trace_create_cpu_file("trace_pipe", 0444, d_cpu
,
5903 tr
, cpu
, &tracing_pipe_fops
);
5906 trace_create_cpu_file("trace", 0644, d_cpu
,
5907 tr
, cpu
, &tracing_fops
);
5909 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu
,
5910 tr
, cpu
, &tracing_buffers_fops
);
5912 trace_create_cpu_file("stats", 0444, d_cpu
,
5913 tr
, cpu
, &tracing_stats_fops
);
5915 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu
,
5916 tr
, cpu
, &tracing_entries_fops
);
5918 #ifdef CONFIG_TRACER_SNAPSHOT
5919 trace_create_cpu_file("snapshot", 0644, d_cpu
,
5920 tr
, cpu
, &snapshot_fops
);
5922 trace_create_cpu_file("snapshot_raw", 0444, d_cpu
,
5923 tr
, cpu
, &snapshot_raw_fops
);
5927 #ifdef CONFIG_FTRACE_SELFTEST
5928 /* Let selftest have access to static functions in this file */
5929 #include "trace_selftest.c"
5932 struct trace_option_dentry
{
5933 struct tracer_opt
*opt
;
5934 struct tracer_flags
*flags
;
5935 struct trace_array
*tr
;
5936 struct dentry
*entry
;
5940 trace_options_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
,
5943 struct trace_option_dentry
*topt
= filp
->private_data
;
5946 if (topt
->flags
->val
& topt
->opt
->bit
)
5951 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, 2);
5955 trace_options_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
5958 struct trace_option_dentry
*topt
= filp
->private_data
;
5962 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
5966 if (val
!= 0 && val
!= 1)
5969 if (!!(topt
->flags
->val
& topt
->opt
->bit
) != val
) {
5970 mutex_lock(&trace_types_lock
);
5971 ret
= __set_tracer_option(topt
->tr
, topt
->flags
,
5973 mutex_unlock(&trace_types_lock
);
5984 static const struct file_operations trace_options_fops
= {
5985 .open
= tracing_open_generic
,
5986 .read
= trace_options_read
,
5987 .write
= trace_options_write
,
5988 .llseek
= generic_file_llseek
,
5992 trace_options_core_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
,
5995 long index
= (long)filp
->private_data
;
5998 if (trace_flags
& (1 << index
))
6003 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, 2);
6007 trace_options_core_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
6010 struct trace_array
*tr
= &global_trace
;
6011 long index
= (long)filp
->private_data
;
6015 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
6019 if (val
!= 0 && val
!= 1)
6022 mutex_lock(&trace_types_lock
);
6023 ret
= set_tracer_flag(tr
, 1 << index
, val
);
6024 mutex_unlock(&trace_types_lock
);
6034 static const struct file_operations trace_options_core_fops
= {
6035 .open
= tracing_open_generic
,
6036 .read
= trace_options_core_read
,
6037 .write
= trace_options_core_write
,
6038 .llseek
= generic_file_llseek
,
6041 struct dentry
*trace_create_file(const char *name
,
6043 struct dentry
*parent
,
6045 const struct file_operations
*fops
)
6049 ret
= debugfs_create_file(name
, mode
, parent
, data
, fops
);
6051 pr_warning("Could not create debugfs '%s' entry\n", name
);
6057 static struct dentry
*trace_options_init_dentry(struct trace_array
*tr
)
6059 struct dentry
*d_tracer
;
6064 d_tracer
= tracing_init_dentry_tr(tr
);
6068 tr
->options
= debugfs_create_dir("options", d_tracer
);
6070 pr_warning("Could not create debugfs directory 'options'\n");
6078 create_trace_option_file(struct trace_array
*tr
,
6079 struct trace_option_dentry
*topt
,
6080 struct tracer_flags
*flags
,
6081 struct tracer_opt
*opt
)
6083 struct dentry
*t_options
;
6085 t_options
= trace_options_init_dentry(tr
);
6089 topt
->flags
= flags
;
6093 topt
->entry
= trace_create_file(opt
->name
, 0644, t_options
, topt
,
6094 &trace_options_fops
);
6098 static struct trace_option_dentry
*
6099 create_trace_option_files(struct trace_array
*tr
, struct tracer
*tracer
)
6101 struct trace_option_dentry
*topts
;
6102 struct tracer_flags
*flags
;
6103 struct tracer_opt
*opts
;
6109 flags
= tracer
->flags
;
6111 if (!flags
|| !flags
->opts
)
6116 for (cnt
= 0; opts
[cnt
].name
; cnt
++)
6119 topts
= kcalloc(cnt
+ 1, sizeof(*topts
), GFP_KERNEL
);
6123 for (cnt
= 0; opts
[cnt
].name
; cnt
++)
6124 create_trace_option_file(tr
, &topts
[cnt
], flags
,
6131 destroy_trace_option_files(struct trace_option_dentry
*topts
)
6138 for (cnt
= 0; topts
[cnt
].opt
; cnt
++)
6139 debugfs_remove(topts
[cnt
].entry
);
6144 static struct dentry
*
6145 create_trace_option_core_file(struct trace_array
*tr
,
6146 const char *option
, long index
)
6148 struct dentry
*t_options
;
6150 t_options
= trace_options_init_dentry(tr
);
6154 return trace_create_file(option
, 0644, t_options
, (void *)index
,
6155 &trace_options_core_fops
);
6158 static __init
void create_trace_options_dir(struct trace_array
*tr
)
6160 struct dentry
*t_options
;
6163 t_options
= trace_options_init_dentry(tr
);
6167 for (i
= 0; trace_options
[i
]; i
++)
6168 create_trace_option_core_file(tr
, trace_options
[i
], i
);
6172 rb_simple_read(struct file
*filp
, char __user
*ubuf
,
6173 size_t cnt
, loff_t
*ppos
)
6175 struct trace_array
*tr
= filp
->private_data
;
6179 r
= tracer_tracing_is_on(tr
);
6180 r
= sprintf(buf
, "%d\n", r
);
6182 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
6186 rb_simple_write(struct file
*filp
, const char __user
*ubuf
,
6187 size_t cnt
, loff_t
*ppos
)
6189 struct trace_array
*tr
= filp
->private_data
;
6190 struct ring_buffer
*buffer
= tr
->trace_buffer
.buffer
;
6194 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
6199 mutex_lock(&trace_types_lock
);
6201 tracer_tracing_on(tr
);
6202 if (tr
->current_trace
->start
)
6203 tr
->current_trace
->start(tr
);
6205 tracer_tracing_off(tr
);
6206 if (tr
->current_trace
->stop
)
6207 tr
->current_trace
->stop(tr
);
6209 mutex_unlock(&trace_types_lock
);
6217 static const struct file_operations rb_simple_fops
= {
6218 .open
= tracing_open_generic_tr
,
6219 .read
= rb_simple_read
,
6220 .write
= rb_simple_write
,
6221 .release
= tracing_release_generic_tr
,
6222 .llseek
= default_llseek
,
6225 struct dentry
*trace_instance_dir
;
6228 init_tracer_debugfs(struct trace_array
*tr
, struct dentry
*d_tracer
);
6231 allocate_trace_buffer(struct trace_array
*tr
, struct trace_buffer
*buf
, int size
)
6233 enum ring_buffer_flags rb_flags
;
6235 rb_flags
= trace_flags
& TRACE_ITER_OVERWRITE
? RB_FL_OVERWRITE
: 0;
6239 buf
->buffer
= ring_buffer_alloc(size
, rb_flags
);
6243 buf
->data
= alloc_percpu(struct trace_array_cpu
);
6245 ring_buffer_free(buf
->buffer
);
6249 /* Allocate the first page for all buffers */
6250 set_buffer_entries(&tr
->trace_buffer
,
6251 ring_buffer_size(tr
->trace_buffer
.buffer
, 0));
6256 static int allocate_trace_buffers(struct trace_array
*tr
, int size
)
6260 ret
= allocate_trace_buffer(tr
, &tr
->trace_buffer
, size
);
6264 #ifdef CONFIG_TRACER_MAX_TRACE
6265 ret
= allocate_trace_buffer(tr
, &tr
->max_buffer
,
6266 allocate_snapshot
? size
: 1);
6268 ring_buffer_free(tr
->trace_buffer
.buffer
);
6269 free_percpu(tr
->trace_buffer
.data
);
6272 tr
->allocated_snapshot
= allocate_snapshot
;
6275 * Only the top level trace array gets its snapshot allocated
6276 * from the kernel command line.
6278 allocate_snapshot
= false;
6283 static void free_trace_buffer(struct trace_buffer
*buf
)
6286 ring_buffer_free(buf
->buffer
);
6288 free_percpu(buf
->data
);
6293 static void free_trace_buffers(struct trace_array
*tr
)
6298 free_trace_buffer(&tr
->trace_buffer
);
6300 #ifdef CONFIG_TRACER_MAX_TRACE
6301 free_trace_buffer(&tr
->max_buffer
);
6305 static int new_instance_create(const char *name
)
6307 struct trace_array
*tr
;
6310 mutex_lock(&trace_types_lock
);
6313 list_for_each_entry(tr
, &ftrace_trace_arrays
, list
) {
6314 if (tr
->name
&& strcmp(tr
->name
, name
) == 0)
6319 tr
= kzalloc(sizeof(*tr
), GFP_KERNEL
);
6323 tr
->name
= kstrdup(name
, GFP_KERNEL
);
6327 if (!alloc_cpumask_var(&tr
->tracing_cpumask
, GFP_KERNEL
))
6330 cpumask_copy(tr
->tracing_cpumask
, cpu_all_mask
);
6332 raw_spin_lock_init(&tr
->start_lock
);
6334 tr
->max_lock
= (arch_spinlock_t
)__ARCH_SPIN_LOCK_UNLOCKED
;
6336 tr
->current_trace
= &nop_trace
;
6338 INIT_LIST_HEAD(&tr
->systems
);
6339 INIT_LIST_HEAD(&tr
->events
);
6341 if (allocate_trace_buffers(tr
, trace_buf_size
) < 0)
6344 tr
->dir
= debugfs_create_dir(name
, trace_instance_dir
);
6348 ret
= event_trace_add_tracer(tr
->dir
, tr
);
6350 debugfs_remove_recursive(tr
->dir
);
6354 init_tracer_debugfs(tr
, tr
->dir
);
6356 list_add(&tr
->list
, &ftrace_trace_arrays
);
6358 mutex_unlock(&trace_types_lock
);
6363 free_trace_buffers(tr
);
6364 free_cpumask_var(tr
->tracing_cpumask
);
6369 mutex_unlock(&trace_types_lock
);
6375 static int instance_delete(const char *name
)
6377 struct trace_array
*tr
;
6381 mutex_lock(&trace_types_lock
);
6384 list_for_each_entry(tr
, &ftrace_trace_arrays
, list
) {
6385 if (tr
->name
&& strcmp(tr
->name
, name
) == 0) {
6397 list_del(&tr
->list
);
6399 tracing_set_nop(tr
);
6400 event_trace_del_tracer(tr
);
6401 ftrace_destroy_function_files(tr
);
6402 debugfs_remove_recursive(tr
->dir
);
6403 free_trace_buffers(tr
);
6411 mutex_unlock(&trace_types_lock
);
6416 static int instance_mkdir (struct inode
*inode
, struct dentry
*dentry
, umode_t mode
)
6418 struct dentry
*parent
;
6421 /* Paranoid: Make sure the parent is the "instances" directory */
6422 parent
= hlist_entry(inode
->i_dentry
.first
, struct dentry
, d_alias
);
6423 if (WARN_ON_ONCE(parent
!= trace_instance_dir
))
6427 * The inode mutex is locked, but debugfs_create_dir() will also
6428 * take the mutex. As the instances directory can not be destroyed
6429 * or changed in any other way, it is safe to unlock it, and
6430 * let the dentry try. If two users try to make the same dir at
6431 * the same time, then the new_instance_create() will determine the
6434 mutex_unlock(&inode
->i_mutex
);
6436 ret
= new_instance_create(dentry
->d_iname
);
6438 mutex_lock(&inode
->i_mutex
);
6443 static int instance_rmdir(struct inode
*inode
, struct dentry
*dentry
)
6445 struct dentry
*parent
;
6448 /* Paranoid: Make sure the parent is the "instances" directory */
6449 parent
= hlist_entry(inode
->i_dentry
.first
, struct dentry
, d_alias
);
6450 if (WARN_ON_ONCE(parent
!= trace_instance_dir
))
6453 /* The caller did a dget() on dentry */
6454 mutex_unlock(&dentry
->d_inode
->i_mutex
);
6457 * The inode mutex is locked, but debugfs_create_dir() will also
6458 * take the mutex. As the instances directory can not be destroyed
6459 * or changed in any other way, it is safe to unlock it, and
6460 * let the dentry try. If two users try to make the same dir at
6461 * the same time, then the instance_delete() will determine the
6464 mutex_unlock(&inode
->i_mutex
);
6466 ret
= instance_delete(dentry
->d_iname
);
6468 mutex_lock_nested(&inode
->i_mutex
, I_MUTEX_PARENT
);
6469 mutex_lock(&dentry
->d_inode
->i_mutex
);
6474 static const struct inode_operations instance_dir_inode_operations
= {
6475 .lookup
= simple_lookup
,
6476 .mkdir
= instance_mkdir
,
6477 .rmdir
= instance_rmdir
,
6480 static __init
void create_trace_instances(struct dentry
*d_tracer
)
6482 trace_instance_dir
= debugfs_create_dir("instances", d_tracer
);
6483 if (WARN_ON(!trace_instance_dir
))
6486 /* Hijack the dir inode operations, to allow mkdir */
6487 trace_instance_dir
->d_inode
->i_op
= &instance_dir_inode_operations
;
6491 init_tracer_debugfs(struct trace_array
*tr
, struct dentry
*d_tracer
)
6495 trace_create_file("available_tracers", 0444, d_tracer
,
6496 tr
, &show_traces_fops
);
6498 trace_create_file("current_tracer", 0644, d_tracer
,
6499 tr
, &set_tracer_fops
);
6501 trace_create_file("tracing_cpumask", 0644, d_tracer
,
6502 tr
, &tracing_cpumask_fops
);
6504 trace_create_file("trace_options", 0644, d_tracer
,
6505 tr
, &tracing_iter_fops
);
6507 trace_create_file("trace", 0644, d_tracer
,
6510 trace_create_file("trace_pipe", 0444, d_tracer
,
6511 tr
, &tracing_pipe_fops
);
6513 trace_create_file("buffer_size_kb", 0644, d_tracer
,
6514 tr
, &tracing_entries_fops
);
6516 trace_create_file("buffer_total_size_kb", 0444, d_tracer
,
6517 tr
, &tracing_total_entries_fops
);
6519 trace_create_file("free_buffer", 0200, d_tracer
,
6520 tr
, &tracing_free_buffer_fops
);
6522 trace_create_file("trace_marker", 0220, d_tracer
,
6523 tr
, &tracing_mark_fops
);
6525 trace_create_file("trace_clock", 0644, d_tracer
, tr
,
6528 trace_create_file("tracing_on", 0644, d_tracer
,
6529 tr
, &rb_simple_fops
);
6531 #ifdef CONFIG_TRACER_MAX_TRACE
6532 trace_create_file("tracing_max_latency", 0644, d_tracer
,
6533 &tr
->max_latency
, &tracing_max_lat_fops
);
6536 if (ftrace_create_function_files(tr
, d_tracer
))
6537 WARN(1, "Could not allocate function filter files");
6539 #ifdef CONFIG_TRACER_SNAPSHOT
6540 trace_create_file("snapshot", 0644, d_tracer
,
6541 tr
, &snapshot_fops
);
6544 for_each_tracing_cpu(cpu
)
6545 tracing_init_debugfs_percpu(tr
, cpu
);
6549 static __init
int tracer_init_debugfs(void)
6551 struct dentry
*d_tracer
;
6553 trace_access_lock_init();
6555 d_tracer
= tracing_init_dentry();
6559 init_tracer_debugfs(&global_trace
, d_tracer
);
6561 trace_create_file("tracing_thresh", 0644, d_tracer
,
6562 &global_trace
, &tracing_thresh_fops
);
6564 trace_create_file("README", 0444, d_tracer
,
6565 NULL
, &tracing_readme_fops
);
6567 trace_create_file("saved_cmdlines", 0444, d_tracer
,
6568 NULL
, &tracing_saved_cmdlines_fops
);
6570 trace_create_file("saved_cmdlines_size", 0644, d_tracer
,
6571 NULL
, &tracing_saved_cmdlines_size_fops
);
6573 #ifdef CONFIG_DYNAMIC_FTRACE
6574 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer
,
6575 &ftrace_update_tot_cnt
, &tracing_dyn_info_fops
);
6578 create_trace_instances(d_tracer
);
6580 create_trace_options_dir(&global_trace
);
6585 static int trace_panic_handler(struct notifier_block
*this,
6586 unsigned long event
, void *unused
)
6588 if (ftrace_dump_on_oops
)
6589 ftrace_dump(ftrace_dump_on_oops
);
6593 static struct notifier_block trace_panic_notifier
= {
6594 .notifier_call
= trace_panic_handler
,
6596 .priority
= 150 /* priority: INT_MAX >= x >= 0 */
6599 static int trace_die_handler(struct notifier_block
*self
,
6605 if (ftrace_dump_on_oops
)
6606 ftrace_dump(ftrace_dump_on_oops
);
6614 static struct notifier_block trace_die_notifier
= {
6615 .notifier_call
= trace_die_handler
,
6620 * printk is set to max of 1024, we really don't need it that big.
6621 * Nothing should be printing 1000 characters anyway.
6623 #define TRACE_MAX_PRINT 1000
6626 * Define here KERN_TRACE so that we have one place to modify
6627 * it if we decide to change what log level the ftrace dump
6630 #define KERN_TRACE KERN_EMERG
6633 trace_printk_seq(struct trace_seq
*s
)
6635 /* Probably should print a warning here. */
6636 if (s
->len
>= TRACE_MAX_PRINT
)
6637 s
->len
= TRACE_MAX_PRINT
;
6639 /* should be zero ended, but we are paranoid. */
6640 s
->buffer
[s
->len
] = 0;
6642 printk(KERN_TRACE
"%s", s
->buffer
);
6647 void trace_init_global_iter(struct trace_iterator
*iter
)
6649 iter
->tr
= &global_trace
;
6650 iter
->trace
= iter
->tr
->current_trace
;
6651 iter
->cpu_file
= RING_BUFFER_ALL_CPUS
;
6652 iter
->trace_buffer
= &global_trace
.trace_buffer
;
6654 if (iter
->trace
&& iter
->trace
->open
)
6655 iter
->trace
->open(iter
);
6657 /* Annotate start of buffers if we had overruns */
6658 if (ring_buffer_overruns(iter
->trace_buffer
->buffer
))
6659 iter
->iter_flags
|= TRACE_FILE_ANNOTATE
;
6661 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
6662 if (trace_clocks
[iter
->tr
->clock_id
].in_ns
)
6663 iter
->iter_flags
|= TRACE_FILE_TIME_IN_NS
;
6666 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode
)
6668 /* use static because iter can be a bit big for the stack */
6669 static struct trace_iterator iter
;
6670 static atomic_t dump_running
;
6671 unsigned int old_userobj
;
6672 unsigned long flags
;
6675 /* Only allow one dump user at a time. */
6676 if (atomic_inc_return(&dump_running
) != 1) {
6677 atomic_dec(&dump_running
);
6682 * Always turn off tracing when we dump.
6683 * We don't need to show trace output of what happens
6684 * between multiple crashes.
6686 * If the user does a sysrq-z, then they can re-enable
6687 * tracing with echo 1 > tracing_on.
6691 local_irq_save(flags
);
6693 /* Simulate the iterator */
6694 trace_init_global_iter(&iter
);
6696 for_each_tracing_cpu(cpu
) {
6697 atomic_inc(&per_cpu_ptr(iter
.tr
->trace_buffer
.data
, cpu
)->disabled
);
6700 old_userobj
= trace_flags
& TRACE_ITER_SYM_USEROBJ
;
6702 /* don't look at user memory in panic mode */
6703 trace_flags
&= ~TRACE_ITER_SYM_USEROBJ
;
6705 switch (oops_dump_mode
) {
6707 iter
.cpu_file
= RING_BUFFER_ALL_CPUS
;
6710 iter
.cpu_file
= raw_smp_processor_id();
6715 printk(KERN_TRACE
"Bad dumping mode, switching to all CPUs dump\n");
6716 iter
.cpu_file
= RING_BUFFER_ALL_CPUS
;
6719 printk(KERN_TRACE
"Dumping ftrace buffer:\n");
6721 /* Did function tracer already get disabled? */
6722 if (ftrace_is_dead()) {
6723 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
6724 printk("# MAY BE MISSING FUNCTION EVENTS\n");
6728 * We need to stop all tracing on all CPUS to read the
6729 * the next buffer. This is a bit expensive, but is
6730 * not done often. We fill all what we can read,
6731 * and then release the locks again.
6734 while (!trace_empty(&iter
)) {
6737 printk(KERN_TRACE
"---------------------------------\n");
6741 /* reset all but tr, trace, and overruns */
6742 memset(&iter
.seq
, 0,
6743 sizeof(struct trace_iterator
) -
6744 offsetof(struct trace_iterator
, seq
));
6745 iter
.iter_flags
|= TRACE_FILE_LAT_FMT
;
6748 if (trace_find_next_entry_inc(&iter
) != NULL
) {
6751 ret
= print_trace_line(&iter
);
6752 if (ret
!= TRACE_TYPE_NO_CONSUME
)
6753 trace_consume(&iter
);
6755 touch_nmi_watchdog();
6757 trace_printk_seq(&iter
.seq
);
6761 printk(KERN_TRACE
" (ftrace buffer empty)\n");
6763 printk(KERN_TRACE
"---------------------------------\n");
6766 trace_flags
|= old_userobj
;
6768 for_each_tracing_cpu(cpu
) {
6769 atomic_dec(&per_cpu_ptr(iter
.trace_buffer
->data
, cpu
)->disabled
);
6771 atomic_dec(&dump_running
);
6772 local_irq_restore(flags
);
6774 EXPORT_SYMBOL_GPL(ftrace_dump
);
6776 __init
static int tracer_alloc_buffers(void)
6782 if (!alloc_cpumask_var(&tracing_buffer_mask
, GFP_KERNEL
))
6785 if (!alloc_cpumask_var(&global_trace
.tracing_cpumask
, GFP_KERNEL
))
6786 goto out_free_buffer_mask
;
6788 /* Only allocate trace_printk buffers if a trace_printk exists */
6789 if (__stop___trace_bprintk_fmt
!= __start___trace_bprintk_fmt
)
6790 /* Must be called before global_trace.buffer is allocated */
6791 trace_printk_init_buffers();
6793 /* To save memory, keep the ring buffer size to its minimum */
6794 if (ring_buffer_expanded
)
6795 ring_buf_size
= trace_buf_size
;
6799 cpumask_copy(tracing_buffer_mask
, cpu_possible_mask
);
6800 cpumask_copy(global_trace
.tracing_cpumask
, cpu_all_mask
);
6802 raw_spin_lock_init(&global_trace
.start_lock
);
6804 /* Used for event triggers */
6805 temp_buffer
= ring_buffer_alloc(PAGE_SIZE
, RB_FL_OVERWRITE
);
6807 goto out_free_cpumask
;
6809 if (trace_create_savedcmd() < 0)
6810 goto out_free_temp_buffer
;
6812 /* TODO: make the number of buffers hot pluggable with CPUS */
6813 if (allocate_trace_buffers(&global_trace
, ring_buf_size
) < 0) {
6814 printk(KERN_ERR
"tracer: failed to allocate ring buffer!\n");
6816 goto out_free_savedcmd
;
6819 if (global_trace
.buffer_disabled
)
6822 if (trace_boot_clock
) {
6823 ret
= tracing_set_clock(&global_trace
, trace_boot_clock
);
6825 pr_warning("Trace clock %s not defined, going back to default\n",
6830 * register_tracer() might reference current_trace, so it
6831 * needs to be set before we register anything. This is
6832 * just a bootstrap of current_trace anyway.
6834 global_trace
.current_trace
= &nop_trace
;
6836 global_trace
.max_lock
= (arch_spinlock_t
)__ARCH_SPIN_LOCK_UNLOCKED
;
6838 ftrace_init_global_array_ops(&global_trace
);
6840 register_tracer(&nop_trace
);
6842 /* All seems OK, enable tracing */
6843 tracing_disabled
= 0;
6845 atomic_notifier_chain_register(&panic_notifier_list
,
6846 &trace_panic_notifier
);
6848 register_die_notifier(&trace_die_notifier
);
6850 global_trace
.flags
= TRACE_ARRAY_FL_GLOBAL
;
6852 INIT_LIST_HEAD(&global_trace
.systems
);
6853 INIT_LIST_HEAD(&global_trace
.events
);
6854 list_add(&global_trace
.list
, &ftrace_trace_arrays
);
6856 while (trace_boot_options
) {
6859 option
= strsep(&trace_boot_options
, ",");
6860 trace_set_options(&global_trace
, option
);
6863 register_snapshot_cmd();
6868 free_saved_cmdlines_buffer(savedcmd
);
6869 out_free_temp_buffer
:
6870 ring_buffer_free(temp_buffer
);
6872 free_cpumask_var(global_trace
.tracing_cpumask
);
6873 out_free_buffer_mask
:
6874 free_cpumask_var(tracing_buffer_mask
);
6879 __init
static int clear_boot_tracer(void)
6882 * The default tracer at boot buffer is an init section.
6883 * This function is called in lateinit. If we did not
6884 * find the boot tracer, then clear it out, to prevent
6885 * later registration from accessing the buffer that is
6886 * about to be freed.
6888 if (!default_bootup_tracer
)
6891 printk(KERN_INFO
"ftrace bootup tracer '%s' not registered.\n",
6892 default_bootup_tracer
);
6893 default_bootup_tracer
= NULL
;
6898 early_initcall(tracer_alloc_buffers
);
6899 fs_initcall(tracer_init_debugfs
);
6900 late_initcall(clear_boot_tracer
);