tracing: Add build bug if we have more trace_flags than bits
[deliverable/linux.git] / kernel / trace / trace.c
CommitLineData
bc0c38d1
SR
1/*
2 * ring buffer based function tracer
3 *
2b6080f2 4 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
bc0c38d1
SR
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
6d49e352 12 * Copyright (C) 2004 Nadia Yvette Chambers
bc0c38d1 13 */
2cadf913 14#include <linux/ring_buffer.h>
273b281f 15#include <generated/utsrelease.h>
2cadf913
SR
16#include <linux/stacktrace.h>
17#include <linux/writeback.h>
bc0c38d1
SR
18#include <linux/kallsyms.h>
19#include <linux/seq_file.h>
3f5a54e3 20#include <linux/notifier.h>
2cadf913 21#include <linux/irqflags.h>
bc0c38d1 22#include <linux/debugfs.h>
8434dc93 23#include <linux/tracefs.h>
4c11d7ae 24#include <linux/pagemap.h>
bc0c38d1
SR
25#include <linux/hardirq.h>
26#include <linux/linkage.h>
27#include <linux/uaccess.h>
2cadf913 28#include <linux/kprobes.h>
bc0c38d1
SR
29#include <linux/ftrace.h>
30#include <linux/module.h>
31#include <linux/percpu.h>
2cadf913 32#include <linux/splice.h>
3f5a54e3 33#include <linux/kdebug.h>
5f0c6c03 34#include <linux/string.h>
f76180bc 35#include <linux/mount.h>
7e53bd42 36#include <linux/rwsem.h>
5a0e3ad6 37#include <linux/slab.h>
bc0c38d1
SR
38#include <linux/ctype.h>
39#include <linux/init.h>
2a2cc8f7 40#include <linux/poll.h>
b892e5c8 41#include <linux/nmi.h>
bc0c38d1 42#include <linux/fs.h>
8bd75c77 43#include <linux/sched/rt.h>
86387f7e 44
bc0c38d1 45#include "trace.h"
f0868d1e 46#include "trace_output.h"
bc0c38d1 47
73c5162a
SR
48/*
49 * On boot up, the ring buffer is set to the minimum size, so that
50 * we do not waste memory on systems that are not using tracing.
51 */
55034cd6 52bool ring_buffer_expanded;
73c5162a 53
8e1b82e0
FW
54/*
55 * We need to change this state when a selftest is running.
ff32504f
FW
56 * A selftest will lurk into the ring-buffer to count the
57 * entries inserted during the selftest although some concurrent
5e1607a0 58 * insertions into the ring-buffer such as trace_printk could occurred
ff32504f
FW
59 * at the same time, giving false positive or negative results.
60 */
8e1b82e0 61static bool __read_mostly tracing_selftest_running;
ff32504f 62
b2821ae6
SR
63/*
64 * If a tracer is running, we do not want to run SELFTEST.
65 */
020e5f85 66bool __read_mostly tracing_selftest_disabled;
b2821ae6 67
0daa2302
SRRH
68/* Pipe tracepoints to printk */
69struct trace_iterator *tracepoint_print_iter;
70int tracepoint_printk;
71
adf9f195
FW
72/* For tracers that don't implement custom flags */
73static struct tracer_opt dummy_tracer_opt[] = {
74 { }
75};
76
77static struct tracer_flags dummy_tracer_flags = {
78 .val = 0,
79 .opts = dummy_tracer_opt
80};
81
8c1a49ae
SRRH
82static int
83dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
adf9f195
FW
84{
85 return 0;
86}
0f048701 87
7ffbd48d
SR
88/*
89 * To prevent the comm cache from being overwritten when no
90 * tracing is active, only save the comm when a trace event
91 * occurred.
92 */
93static DEFINE_PER_CPU(bool, trace_cmdline_save);
94
0f048701
SR
95/*
96 * Kill all tracing for good (never come back).
97 * It is initialized to 1 but will turn to zero if the initialization
98 * of the tracer is successful. But that is the only place that sets
99 * this back to zero.
100 */
4fd27358 101static int tracing_disabled = 1;
0f048701 102
9288f99a 103DEFINE_PER_CPU(int, ftrace_cpu_disabled);
d769041f 104
955b61e5 105cpumask_var_t __read_mostly tracing_buffer_mask;
ab46428c 106
944ac425
SR
107/*
108 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
109 *
110 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
111 * is set, then ftrace_dump is called. This will output the contents
112 * of the ftrace buffers to the console. This is very useful for
113 * capturing traces that lead to crashes and outputing it to a
114 * serial console.
115 *
116 * It is default off, but you can enable it with either specifying
117 * "ftrace_dump_on_oops" in the kernel command line, or setting
cecbca96
FW
118 * /proc/sys/kernel/ftrace_dump_on_oops
119 * Set 1 if you want to dump buffers of all CPUs
120 * Set 2 if you want to dump the buffer of the CPU that triggered oops
944ac425 121 */
cecbca96
FW
122
123enum ftrace_dump_mode ftrace_dump_on_oops;
944ac425 124
de7edd31
SRRH
125/* When set, tracing will stop when a WARN*() is hit */
126int __disable_trace_on_warning;
127
9828413d
SRRH
128#ifdef CONFIG_TRACE_ENUM_MAP_FILE
129/* Map of enums to their values, for "enum_map" file */
130struct trace_enum_map_head {
131 struct module *mod;
132 unsigned long length;
133};
134
135union trace_enum_map_item;
136
137struct trace_enum_map_tail {
138 /*
139 * "end" is first and points to NULL as it must be different
140 * than "mod" or "enum_string"
141 */
142 union trace_enum_map_item *next;
143 const char *end; /* points to NULL */
144};
145
146static DEFINE_MUTEX(trace_enum_mutex);
147
148/*
149 * The trace_enum_maps are saved in an array with two extra elements,
150 * one at the beginning, and one at the end. The beginning item contains
151 * the count of the saved maps (head.length), and the module they
152 * belong to if not built in (head.mod). The ending item contains a
153 * pointer to the next array of saved enum_map items.
154 */
155union trace_enum_map_item {
156 struct trace_enum_map map;
157 struct trace_enum_map_head head;
158 struct trace_enum_map_tail tail;
159};
160
161static union trace_enum_map_item *trace_enum_maps;
162#endif /* CONFIG_TRACE_ENUM_MAP_FILE */
163
607e2ea1 164static int tracing_set_tracer(struct trace_array *tr, const char *buf);
b2821ae6 165
ee6c2c1b
LZ
166#define MAX_TRACER_SIZE 100
167static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
b2821ae6 168static char *default_bootup_tracer;
d9e54076 169
55034cd6
SRRH
170static bool allocate_snapshot;
171
1beee96b 172static int __init set_cmdline_ftrace(char *str)
d9e54076 173{
67012ab1 174 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
b2821ae6 175 default_bootup_tracer = bootup_tracer_buf;
73c5162a 176 /* We are using ftrace early, expand it */
55034cd6 177 ring_buffer_expanded = true;
d9e54076
PZ
178 return 1;
179}
1beee96b 180__setup("ftrace=", set_cmdline_ftrace);
d9e54076 181
944ac425
SR
182static int __init set_ftrace_dump_on_oops(char *str)
183{
cecbca96
FW
184 if (*str++ != '=' || !*str) {
185 ftrace_dump_on_oops = DUMP_ALL;
186 return 1;
187 }
188
189 if (!strcmp("orig_cpu", str)) {
190 ftrace_dump_on_oops = DUMP_ORIG;
191 return 1;
192 }
193
194 return 0;
944ac425
SR
195}
196__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
60a11774 197
de7edd31
SRRH
198static int __init stop_trace_on_warning(char *str)
199{
933ff9f2
LCG
200 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
201 __disable_trace_on_warning = 1;
de7edd31
SRRH
202 return 1;
203}
933ff9f2 204__setup("traceoff_on_warning", stop_trace_on_warning);
de7edd31 205
3209cff4 206static int __init boot_alloc_snapshot(char *str)
55034cd6
SRRH
207{
208 allocate_snapshot = true;
209 /* We also need the main ring buffer expanded */
210 ring_buffer_expanded = true;
211 return 1;
212}
3209cff4 213__setup("alloc_snapshot", boot_alloc_snapshot);
55034cd6 214
7bcfaf54
SR
215
216static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
217static char *trace_boot_options __initdata;
218
219static int __init set_trace_boot_options(char *str)
220{
67012ab1 221 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
7bcfaf54
SR
222 trace_boot_options = trace_boot_options_buf;
223 return 0;
224}
225__setup("trace_options=", set_trace_boot_options);
226
e1e232ca
SR
227static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
228static char *trace_boot_clock __initdata;
229
230static int __init set_trace_boot_clock(char *str)
231{
232 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
233 trace_boot_clock = trace_boot_clock_buf;
234 return 0;
235}
236__setup("trace_clock=", set_trace_boot_clock);
237
0daa2302
SRRH
238static int __init set_tracepoint_printk(char *str)
239{
240 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
241 tracepoint_printk = 1;
242 return 1;
243}
244__setup("tp_printk", set_tracepoint_printk);
de7edd31 245
cf8e3474 246unsigned long long ns2usecs(cycle_t nsec)
bc0c38d1
SR
247{
248 nsec += 500;
249 do_div(nsec, 1000);
250 return nsec;
251}
252
4fcdae83
SR
253/*
254 * The global_trace is the descriptor that holds the tracing
255 * buffers for the live tracing. For each CPU, it contains
256 * a link list of pages that will store trace entries. The
257 * page descriptor of the pages in the memory is used to hold
258 * the link list by linking the lru item in the page descriptor
259 * to each of the pages in the buffer per CPU.
260 *
261 * For each active CPU there is a data field that holds the
262 * pages for the buffer for that CPU. Each CPU has the same number
263 * of pages allocated for its buffer.
264 */
bc0c38d1
SR
265static struct trace_array global_trace;
266
ae63b31e 267LIST_HEAD(ftrace_trace_arrays);
bc0c38d1 268
ff451961
SRRH
269int trace_array_get(struct trace_array *this_tr)
270{
271 struct trace_array *tr;
272 int ret = -ENODEV;
273
274 mutex_lock(&trace_types_lock);
275 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
276 if (tr == this_tr) {
277 tr->ref++;
278 ret = 0;
279 break;
280 }
281 }
282 mutex_unlock(&trace_types_lock);
283
284 return ret;
285}
286
287static void __trace_array_put(struct trace_array *this_tr)
288{
289 WARN_ON(!this_tr->ref);
290 this_tr->ref--;
291}
292
293void trace_array_put(struct trace_array *this_tr)
294{
295 mutex_lock(&trace_types_lock);
296 __trace_array_put(this_tr);
297 mutex_unlock(&trace_types_lock);
298}
299
7f1d2f82 300int filter_check_discard(struct trace_event_file *file, void *rec,
f306cc82
TZ
301 struct ring_buffer *buffer,
302 struct ring_buffer_event *event)
eb02ce01 303{
5d6ad960 304 if (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
f306cc82
TZ
305 !filter_match_preds(file->filter, rec)) {
306 ring_buffer_discard_commit(buffer, event);
307 return 1;
308 }
309
310 return 0;
311}
312EXPORT_SYMBOL_GPL(filter_check_discard);
313
2425bcb9 314int call_filter_check_discard(struct trace_event_call *call, void *rec,
f306cc82
TZ
315 struct ring_buffer *buffer,
316 struct ring_buffer_event *event)
317{
318 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
319 !filter_match_preds(call->filter, rec)) {
320 ring_buffer_discard_commit(buffer, event);
321 return 1;
322 }
323
324 return 0;
eb02ce01 325}
f306cc82 326EXPORT_SYMBOL_GPL(call_filter_check_discard);
eb02ce01 327
ad1438a0 328static cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
37886f6a
SR
329{
330 u64 ts;
331
332 /* Early boot up does not have a buffer yet */
9457158b 333 if (!buf->buffer)
37886f6a
SR
334 return trace_clock_local();
335
9457158b
AL
336 ts = ring_buffer_time_stamp(buf->buffer, cpu);
337 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
37886f6a
SR
338
339 return ts;
340}
bc0c38d1 341
9457158b
AL
342cycle_t ftrace_now(int cpu)
343{
344 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
345}
346
10246fa3
SRRH
347/**
348 * tracing_is_enabled - Show if global_trace has been disabled
349 *
350 * Shows if the global trace has been enabled or not. It uses the
351 * mirror flag "buffer_disabled" to be used in fast paths such as for
352 * the irqsoff tracer. But it may be inaccurate due to races. If you
353 * need to know the accurate state, use tracing_is_on() which is a little
354 * slower, but accurate.
355 */
9036990d
SR
356int tracing_is_enabled(void)
357{
10246fa3
SRRH
358 /*
359 * For quick access (irqsoff uses this in fast path), just
360 * return the mirror variable of the state of the ring buffer.
361 * It's a little racy, but we don't really care.
362 */
363 smp_rmb();
364 return !global_trace.buffer_disabled;
9036990d
SR
365}
366
4fcdae83 367/*
3928a8a2
SR
368 * trace_buf_size is the size in bytes that is allocated
369 * for a buffer. Note, the number of bytes is always rounded
370 * to page size.
3f5a54e3
SR
371 *
372 * This number is purposely set to a low number of 16384.
373 * If the dump on oops happens, it will be much appreciated
374 * to not have to wait for all that output. Anyway this can be
375 * boot time and run time configurable.
4fcdae83 376 */
3928a8a2 377#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
3f5a54e3 378
3928a8a2 379static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
bc0c38d1 380
4fcdae83 381/* trace_types holds a link list of available tracers. */
bc0c38d1 382static struct tracer *trace_types __read_mostly;
4fcdae83 383
4fcdae83
SR
384/*
385 * trace_types_lock is used to protect the trace_types list.
4fcdae83 386 */
a8227415 387DEFINE_MUTEX(trace_types_lock);
4fcdae83 388
7e53bd42
LJ
389/*
390 * serialize the access of the ring buffer
391 *
392 * ring buffer serializes readers, but it is low level protection.
393 * The validity of the events (which returns by ring_buffer_peek() ..etc)
394 * are not protected by ring buffer.
395 *
396 * The content of events may become garbage if we allow other process consumes
397 * these events concurrently:
398 * A) the page of the consumed events may become a normal page
399 * (not reader page) in ring buffer, and this page will be rewrited
400 * by events producer.
401 * B) The page of the consumed events may become a page for splice_read,
402 * and this page will be returned to system.
403 *
404 * These primitives allow multi process access to different cpu ring buffer
405 * concurrently.
406 *
407 * These primitives don't distinguish read-only and read-consume access.
408 * Multi read-only access are also serialized.
409 */
410
411#ifdef CONFIG_SMP
412static DECLARE_RWSEM(all_cpu_access_lock);
413static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
414
415static inline void trace_access_lock(int cpu)
416{
ae3b5093 417 if (cpu == RING_BUFFER_ALL_CPUS) {
7e53bd42
LJ
418 /* gain it for accessing the whole ring buffer. */
419 down_write(&all_cpu_access_lock);
420 } else {
421 /* gain it for accessing a cpu ring buffer. */
422
ae3b5093 423 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
7e53bd42
LJ
424 down_read(&all_cpu_access_lock);
425
426 /* Secondly block other access to this @cpu ring buffer. */
427 mutex_lock(&per_cpu(cpu_access_lock, cpu));
428 }
429}
430
431static inline void trace_access_unlock(int cpu)
432{
ae3b5093 433 if (cpu == RING_BUFFER_ALL_CPUS) {
7e53bd42
LJ
434 up_write(&all_cpu_access_lock);
435 } else {
436 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
437 up_read(&all_cpu_access_lock);
438 }
439}
440
441static inline void trace_access_lock_init(void)
442{
443 int cpu;
444
445 for_each_possible_cpu(cpu)
446 mutex_init(&per_cpu(cpu_access_lock, cpu));
447}
448
449#else
450
451static DEFINE_MUTEX(access_lock);
452
453static inline void trace_access_lock(int cpu)
454{
455 (void)cpu;
456 mutex_lock(&access_lock);
457}
458
459static inline void trace_access_unlock(int cpu)
460{
461 (void)cpu;
462 mutex_unlock(&access_lock);
463}
464
465static inline void trace_access_lock_init(void)
466{
467}
468
469#endif
470
d78a4614
SRRH
471#ifdef CONFIG_STACKTRACE
472static void __ftrace_trace_stack(struct ring_buffer *buffer,
473 unsigned long flags,
474 int skip, int pc, struct pt_regs *regs);
73dddbb5
SRRH
475static inline void ftrace_trace_stack(struct ring_buffer *buffer,
476 unsigned long flags,
477 int skip, int pc, struct pt_regs *regs);
ca475e83 478
d78a4614
SRRH
479#else
480static inline void __ftrace_trace_stack(struct ring_buffer *buffer,
481 unsigned long flags,
482 int skip, int pc, struct pt_regs *regs)
483{
484}
ca475e83 485static inline void ftrace_trace_stack(struct ring_buffer *buffer,
73dddbb5
SRRH
486 unsigned long flags,
487 int skip, int pc, struct pt_regs *regs)
ca475e83
SRRH
488{
489}
490
d78a4614
SRRH
491#endif
492
ee6bce52 493/* trace_flags holds trace_options default values */
729358da 494unsigned long trace_flags =
8179e8a1 495 FUNCTION_DEFAULT_FLAGS | FUNCTION_GRAPH_DEFAULT_FLAGS |
729358da
SRRH
496 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
497 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO |
498 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
8179e8a1 499 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS
729358da 500 ;
e7e2ee89 501
5280bcef 502static void tracer_tracing_on(struct trace_array *tr)
10246fa3
SRRH
503{
504 if (tr->trace_buffer.buffer)
505 ring_buffer_record_on(tr->trace_buffer.buffer);
506 /*
507 * This flag is looked at when buffers haven't been allocated
508 * yet, or by some tracers (like irqsoff), that just want to
509 * know if the ring buffer has been disabled, but it can handle
510 * races of where it gets disabled but we still do a record.
511 * As the check is in the fast path of the tracers, it is more
512 * important to be fast than accurate.
513 */
514 tr->buffer_disabled = 0;
515 /* Make the flag seen by readers */
516 smp_wmb();
517}
518
499e5470
SR
519/**
520 * tracing_on - enable tracing buffers
521 *
522 * This function enables tracing buffers that may have been
523 * disabled with tracing_off.
524 */
525void tracing_on(void)
526{
10246fa3 527 tracer_tracing_on(&global_trace);
499e5470
SR
528}
529EXPORT_SYMBOL_GPL(tracing_on);
530
09ae7234
SRRH
531/**
532 * __trace_puts - write a constant string into the trace buffer.
533 * @ip: The address of the caller
534 * @str: The constant string to write
535 * @size: The size of the string.
536 */
537int __trace_puts(unsigned long ip, const char *str, int size)
538{
539 struct ring_buffer_event *event;
540 struct ring_buffer *buffer;
541 struct print_entry *entry;
542 unsigned long irq_flags;
543 int alloc;
8abfb872
J
544 int pc;
545
f0160a5a
J
546 if (!(trace_flags & TRACE_ITER_PRINTK))
547 return 0;
548
8abfb872 549 pc = preempt_count();
09ae7234 550
3132e107
SRRH
551 if (unlikely(tracing_selftest_running || tracing_disabled))
552 return 0;
553
09ae7234
SRRH
554 alloc = sizeof(*entry) + size + 2; /* possible \n added */
555
556 local_save_flags(irq_flags);
557 buffer = global_trace.trace_buffer.buffer;
558 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
8abfb872 559 irq_flags, pc);
09ae7234
SRRH
560 if (!event)
561 return 0;
562
563 entry = ring_buffer_event_data(event);
564 entry->ip = ip;
565
566 memcpy(&entry->buf, str, size);
567
568 /* Add a newline if necessary */
569 if (entry->buf[size - 1] != '\n') {
570 entry->buf[size] = '\n';
571 entry->buf[size + 1] = '\0';
572 } else
573 entry->buf[size] = '\0';
574
575 __buffer_unlock_commit(buffer, event);
73dddbb5 576 ftrace_trace_stack(buffer, irq_flags, 4, pc, NULL);
09ae7234
SRRH
577
578 return size;
579}
580EXPORT_SYMBOL_GPL(__trace_puts);
581
582/**
583 * __trace_bputs - write the pointer to a constant string into trace buffer
584 * @ip: The address of the caller
585 * @str: The constant string to write to the buffer to
586 */
587int __trace_bputs(unsigned long ip, const char *str)
588{
589 struct ring_buffer_event *event;
590 struct ring_buffer *buffer;
591 struct bputs_entry *entry;
592 unsigned long irq_flags;
593 int size = sizeof(struct bputs_entry);
8abfb872
J
594 int pc;
595
f0160a5a
J
596 if (!(trace_flags & TRACE_ITER_PRINTK))
597 return 0;
598
8abfb872 599 pc = preempt_count();
09ae7234 600
3132e107
SRRH
601 if (unlikely(tracing_selftest_running || tracing_disabled))
602 return 0;
603
09ae7234
SRRH
604 local_save_flags(irq_flags);
605 buffer = global_trace.trace_buffer.buffer;
606 event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
8abfb872 607 irq_flags, pc);
09ae7234
SRRH
608 if (!event)
609 return 0;
610
611 entry = ring_buffer_event_data(event);
612 entry->ip = ip;
613 entry->str = str;
614
615 __buffer_unlock_commit(buffer, event);
73dddbb5 616 ftrace_trace_stack(buffer, irq_flags, 4, pc, NULL);
09ae7234
SRRH
617
618 return 1;
619}
620EXPORT_SYMBOL_GPL(__trace_bputs);
621
ad909e21
SRRH
622#ifdef CONFIG_TRACER_SNAPSHOT
623/**
624 * trace_snapshot - take a snapshot of the current buffer.
625 *
626 * This causes a swap between the snapshot buffer and the current live
627 * tracing buffer. You can use this to take snapshots of the live
628 * trace when some condition is triggered, but continue to trace.
629 *
630 * Note, make sure to allocate the snapshot with either
631 * a tracing_snapshot_alloc(), or by doing it manually
632 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
633 *
634 * If the snapshot buffer is not allocated, it will stop tracing.
635 * Basically making a permanent snapshot.
636 */
637void tracing_snapshot(void)
638{
639 struct trace_array *tr = &global_trace;
640 struct tracer *tracer = tr->current_trace;
641 unsigned long flags;
642
1b22e382
SRRH
643 if (in_nmi()) {
644 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
645 internal_trace_puts("*** snapshot is being ignored ***\n");
646 return;
647 }
648
ad909e21 649 if (!tr->allocated_snapshot) {
ca268da6
SRRH
650 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
651 internal_trace_puts("*** stopping trace here! ***\n");
ad909e21
SRRH
652 tracing_off();
653 return;
654 }
655
656 /* Note, snapshot can not be used when the tracer uses it */
657 if (tracer->use_max_tr) {
ca268da6
SRRH
658 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
659 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
ad909e21
SRRH
660 return;
661 }
662
663 local_irq_save(flags);
664 update_max_tr(tr, current, smp_processor_id());
665 local_irq_restore(flags);
666}
1b22e382 667EXPORT_SYMBOL_GPL(tracing_snapshot);
ad909e21
SRRH
668
669static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
670 struct trace_buffer *size_buf, int cpu_id);
3209cff4
SRRH
671static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
672
673static int alloc_snapshot(struct trace_array *tr)
674{
675 int ret;
676
677 if (!tr->allocated_snapshot) {
678
679 /* allocate spare buffer */
680 ret = resize_buffer_duplicate_size(&tr->max_buffer,
681 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
682 if (ret < 0)
683 return ret;
684
685 tr->allocated_snapshot = true;
686 }
687
688 return 0;
689}
690
ad1438a0 691static void free_snapshot(struct trace_array *tr)
3209cff4
SRRH
692{
693 /*
694 * We don't free the ring buffer. instead, resize it because
695 * The max_tr ring buffer has some state (e.g. ring->clock) and
696 * we want preserve it.
697 */
698 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
699 set_buffer_entries(&tr->max_buffer, 1);
700 tracing_reset_online_cpus(&tr->max_buffer);
701 tr->allocated_snapshot = false;
702}
ad909e21 703
93e31ffb
TZ
704/**
705 * tracing_alloc_snapshot - allocate snapshot buffer.
706 *
707 * This only allocates the snapshot buffer if it isn't already
708 * allocated - it doesn't also take a snapshot.
709 *
710 * This is meant to be used in cases where the snapshot buffer needs
711 * to be set up for events that can't sleep but need to be able to
712 * trigger a snapshot.
713 */
714int tracing_alloc_snapshot(void)
715{
716 struct trace_array *tr = &global_trace;
717 int ret;
718
719 ret = alloc_snapshot(tr);
720 WARN_ON(ret < 0);
721
722 return ret;
723}
724EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
725
ad909e21
SRRH
726/**
727 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
728 *
729 * This is similar to trace_snapshot(), but it will allocate the
730 * snapshot buffer if it isn't already allocated. Use this only
731 * where it is safe to sleep, as the allocation may sleep.
732 *
733 * This causes a swap between the snapshot buffer and the current live
734 * tracing buffer. You can use this to take snapshots of the live
735 * trace when some condition is triggered, but continue to trace.
736 */
737void tracing_snapshot_alloc(void)
738{
ad909e21
SRRH
739 int ret;
740
93e31ffb
TZ
741 ret = tracing_alloc_snapshot();
742 if (ret < 0)
3209cff4 743 return;
ad909e21
SRRH
744
745 tracing_snapshot();
746}
1b22e382 747EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
ad909e21
SRRH
748#else
749void tracing_snapshot(void)
750{
751 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
752}
1b22e382 753EXPORT_SYMBOL_GPL(tracing_snapshot);
93e31ffb
TZ
754int tracing_alloc_snapshot(void)
755{
756 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
757 return -ENODEV;
758}
759EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
ad909e21
SRRH
760void tracing_snapshot_alloc(void)
761{
762 /* Give warning */
763 tracing_snapshot();
764}
1b22e382 765EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
ad909e21
SRRH
766#endif /* CONFIG_TRACER_SNAPSHOT */
767
5280bcef 768static void tracer_tracing_off(struct trace_array *tr)
10246fa3
SRRH
769{
770 if (tr->trace_buffer.buffer)
771 ring_buffer_record_off(tr->trace_buffer.buffer);
772 /*
773 * This flag is looked at when buffers haven't been allocated
774 * yet, or by some tracers (like irqsoff), that just want to
775 * know if the ring buffer has been disabled, but it can handle
776 * races of where it gets disabled but we still do a record.
777 * As the check is in the fast path of the tracers, it is more
778 * important to be fast than accurate.
779 */
780 tr->buffer_disabled = 1;
781 /* Make the flag seen by readers */
782 smp_wmb();
783}
784
499e5470
SR
785/**
786 * tracing_off - turn off tracing buffers
787 *
788 * This function stops the tracing buffers from recording data.
789 * It does not disable any overhead the tracers themselves may
790 * be causing. This function simply causes all recording to
791 * the ring buffers to fail.
792 */
793void tracing_off(void)
794{
10246fa3 795 tracer_tracing_off(&global_trace);
499e5470
SR
796}
797EXPORT_SYMBOL_GPL(tracing_off);
798
de7edd31
SRRH
799void disable_trace_on_warning(void)
800{
801 if (__disable_trace_on_warning)
802 tracing_off();
803}
804
10246fa3
SRRH
805/**
806 * tracer_tracing_is_on - show real state of ring buffer enabled
807 * @tr : the trace array to know if ring buffer is enabled
808 *
809 * Shows real state of the ring buffer if it is enabled or not.
810 */
5280bcef 811static int tracer_tracing_is_on(struct trace_array *tr)
10246fa3
SRRH
812{
813 if (tr->trace_buffer.buffer)
814 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
815 return !tr->buffer_disabled;
816}
817
499e5470
SR
818/**
819 * tracing_is_on - show state of ring buffers enabled
820 */
821int tracing_is_on(void)
822{
10246fa3 823 return tracer_tracing_is_on(&global_trace);
499e5470
SR
824}
825EXPORT_SYMBOL_GPL(tracing_is_on);
826
3928a8a2 827static int __init set_buf_size(char *str)
bc0c38d1 828{
3928a8a2 829 unsigned long buf_size;
c6caeeb1 830
bc0c38d1
SR
831 if (!str)
832 return 0;
9d612bef 833 buf_size = memparse(str, &str);
c6caeeb1 834 /* nr_entries can not be zero */
9d612bef 835 if (buf_size == 0)
c6caeeb1 836 return 0;
3928a8a2 837 trace_buf_size = buf_size;
bc0c38d1
SR
838 return 1;
839}
3928a8a2 840__setup("trace_buf_size=", set_buf_size);
bc0c38d1 841
0e950173
TB
842static int __init set_tracing_thresh(char *str)
843{
87abb3b1 844 unsigned long threshold;
0e950173
TB
845 int ret;
846
847 if (!str)
848 return 0;
bcd83ea6 849 ret = kstrtoul(str, 0, &threshold);
0e950173
TB
850 if (ret < 0)
851 return 0;
87abb3b1 852 tracing_thresh = threshold * 1000;
0e950173
TB
853 return 1;
854}
855__setup("tracing_thresh=", set_tracing_thresh);
856
57f50be1
SR
857unsigned long nsecs_to_usecs(unsigned long nsecs)
858{
859 return nsecs / 1000;
860}
861
a3418a36
SRRH
862/*
863 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
864 * It uses C(a, b) where 'a' is the enum name and 'b' is the string that
865 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
866 * of strings in the order that the enums were defined.
867 */
868#undef C
869#define C(a, b) b
870
4fcdae83 871/* These must match the bit postions in trace_iterator_flags */
bc0c38d1 872static const char *trace_options[] = {
a3418a36 873 TRACE_FLAGS
bc0c38d1
SR
874 NULL
875};
876
5079f326
Z
877static struct {
878 u64 (*func)(void);
879 const char *name;
8be0709f 880 int in_ns; /* is this clock in nanoseconds? */
5079f326 881} trace_clocks[] = {
1b3e5c09
TG
882 { trace_clock_local, "local", 1 },
883 { trace_clock_global, "global", 1 },
884 { trace_clock_counter, "counter", 0 },
e7fda6c4 885 { trace_clock_jiffies, "uptime", 0 },
1b3e5c09
TG
886 { trace_clock, "perf", 1 },
887 { ktime_get_mono_fast_ns, "mono", 1 },
aabfa5f2 888 { ktime_get_raw_fast_ns, "mono_raw", 1 },
8cbd9cc6 889 ARCH_TRACE_CLOCKS
5079f326
Z
890};
891
b63f39ea 892/*
893 * trace_parser_get_init - gets the buffer for trace parser
894 */
895int trace_parser_get_init(struct trace_parser *parser, int size)
896{
897 memset(parser, 0, sizeof(*parser));
898
899 parser->buffer = kmalloc(size, GFP_KERNEL);
900 if (!parser->buffer)
901 return 1;
902
903 parser->size = size;
904 return 0;
905}
906
907/*
908 * trace_parser_put - frees the buffer for trace parser
909 */
910void trace_parser_put(struct trace_parser *parser)
911{
912 kfree(parser->buffer);
913}
914
915/*
916 * trace_get_user - reads the user input string separated by space
917 * (matched by isspace(ch))
918 *
919 * For each string found the 'struct trace_parser' is updated,
920 * and the function returns.
921 *
922 * Returns number of bytes read.
923 *
924 * See kernel/trace/trace.h for 'struct trace_parser' details.
925 */
926int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
927 size_t cnt, loff_t *ppos)
928{
929 char ch;
930 size_t read = 0;
931 ssize_t ret;
932
933 if (!*ppos)
934 trace_parser_clear(parser);
935
936 ret = get_user(ch, ubuf++);
937 if (ret)
938 goto out;
939
940 read++;
941 cnt--;
942
943 /*
944 * The parser is not finished with the last write,
945 * continue reading the user input without skipping spaces.
946 */
947 if (!parser->cont) {
948 /* skip white space */
949 while (cnt && isspace(ch)) {
950 ret = get_user(ch, ubuf++);
951 if (ret)
952 goto out;
953 read++;
954 cnt--;
955 }
956
957 /* only spaces were written */
958 if (isspace(ch)) {
959 *ppos += read;
960 ret = read;
961 goto out;
962 }
963
964 parser->idx = 0;
965 }
966
967 /* read the non-space input */
968 while (cnt && !isspace(ch)) {
3c235a33 969 if (parser->idx < parser->size - 1)
b63f39ea 970 parser->buffer[parser->idx++] = ch;
971 else {
972 ret = -EINVAL;
973 goto out;
974 }
975 ret = get_user(ch, ubuf++);
976 if (ret)
977 goto out;
978 read++;
979 cnt--;
980 }
981
982 /* We either got finished input or we have to wait for another call. */
983 if (isspace(ch)) {
984 parser->buffer[parser->idx] = 0;
985 parser->cont = false;
057db848 986 } else if (parser->idx < parser->size - 1) {
b63f39ea 987 parser->cont = true;
988 parser->buffer[parser->idx++] = ch;
057db848
SR
989 } else {
990 ret = -EINVAL;
991 goto out;
b63f39ea 992 }
993
994 *ppos += read;
995 ret = read;
996
997out:
998 return ret;
999}
1000
3a161d99 1001/* TODO add a seq_buf_to_buffer() */
b8b94265 1002static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
3c56819b
EGM
1003{
1004 int len;
3c56819b 1005
5ac48378 1006 if (trace_seq_used(s) <= s->seq.readpos)
3c56819b
EGM
1007 return -EBUSY;
1008
5ac48378 1009 len = trace_seq_used(s) - s->seq.readpos;
3c56819b
EGM
1010 if (cnt > len)
1011 cnt = len;
3a161d99 1012 memcpy(buf, s->buffer + s->seq.readpos, cnt);
3c56819b 1013
3a161d99 1014 s->seq.readpos += cnt;
3c56819b
EGM
1015 return cnt;
1016}
1017
0e950173
TB
1018unsigned long __read_mostly tracing_thresh;
1019
5d4a9dba 1020#ifdef CONFIG_TRACER_MAX_TRACE
5d4a9dba
SR
1021/*
1022 * Copy the new maximum trace into the separate maximum-trace
1023 * structure. (this way the maximum trace is permanently saved,
1024 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
1025 */
1026static void
1027__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1028{
12883efb
SRRH
1029 struct trace_buffer *trace_buf = &tr->trace_buffer;
1030 struct trace_buffer *max_buf = &tr->max_buffer;
1031 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1032 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
5d4a9dba 1033
12883efb
SRRH
1034 max_buf->cpu = cpu;
1035 max_buf->time_start = data->preempt_timestamp;
5d4a9dba 1036
6d9b3fa5 1037 max_data->saved_latency = tr->max_latency;
8248ac05
SR
1038 max_data->critical_start = data->critical_start;
1039 max_data->critical_end = data->critical_end;
5d4a9dba 1040
1acaa1b2 1041 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
8248ac05 1042 max_data->pid = tsk->pid;
f17a5194
SRRH
1043 /*
1044 * If tsk == current, then use current_uid(), as that does not use
1045 * RCU. The irq tracer can be called out of RCU scope.
1046 */
1047 if (tsk == current)
1048 max_data->uid = current_uid();
1049 else
1050 max_data->uid = task_uid(tsk);
1051
8248ac05
SR
1052 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1053 max_data->policy = tsk->policy;
1054 max_data->rt_priority = tsk->rt_priority;
5d4a9dba
SR
1055
1056 /* record this tasks comm */
1057 tracing_record_cmdline(tsk);
1058}
1059
4fcdae83
SR
1060/**
1061 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1062 * @tr: tracer
1063 * @tsk: the task with the latency
1064 * @cpu: The cpu that initiated the trace.
1065 *
1066 * Flip the buffers between the @tr and the max_tr and record information
1067 * about which task was the cause of this latency.
1068 */
e309b41d 1069void
bc0c38d1
SR
1070update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1071{
2721e72d 1072 struct ring_buffer *buf;
bc0c38d1 1073
2b6080f2 1074 if (tr->stop_count)
b8de7bd1
SR
1075 return;
1076
4c11d7ae 1077 WARN_ON_ONCE(!irqs_disabled());
34600f0e 1078
45ad21ca 1079 if (!tr->allocated_snapshot) {
debdd57f 1080 /* Only the nop tracer should hit this when disabling */
2b6080f2 1081 WARN_ON_ONCE(tr->current_trace != &nop_trace);
34600f0e 1082 return;
debdd57f 1083 }
34600f0e 1084
0b9b12c1 1085 arch_spin_lock(&tr->max_lock);
3928a8a2 1086
12883efb
SRRH
1087 buf = tr->trace_buffer.buffer;
1088 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1089 tr->max_buffer.buffer = buf;
3928a8a2 1090
bc0c38d1 1091 __update_max_tr(tr, tsk, cpu);
0b9b12c1 1092 arch_spin_unlock(&tr->max_lock);
bc0c38d1
SR
1093}
1094
1095/**
1096 * update_max_tr_single - only copy one trace over, and reset the rest
1097 * @tr - tracer
1098 * @tsk - task with the latency
1099 * @cpu - the cpu of the buffer to copy.
4fcdae83
SR
1100 *
1101 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
bc0c38d1 1102 */
e309b41d 1103void
bc0c38d1
SR
1104update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1105{
3928a8a2 1106 int ret;
bc0c38d1 1107
2b6080f2 1108 if (tr->stop_count)
b8de7bd1
SR
1109 return;
1110
4c11d7ae 1111 WARN_ON_ONCE(!irqs_disabled());
6c24499f 1112 if (!tr->allocated_snapshot) {
2930e04d 1113 /* Only the nop tracer should hit this when disabling */
9e8529af 1114 WARN_ON_ONCE(tr->current_trace != &nop_trace);
ef710e10 1115 return;
2930e04d 1116 }
ef710e10 1117
0b9b12c1 1118 arch_spin_lock(&tr->max_lock);
bc0c38d1 1119
12883efb 1120 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
3928a8a2 1121
e8165dbb
SR
1122 if (ret == -EBUSY) {
1123 /*
1124 * We failed to swap the buffer due to a commit taking
1125 * place on this CPU. We fail to record, but we reset
1126 * the max trace buffer (no one writes directly to it)
1127 * and flag that it failed.
1128 */
12883efb 1129 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
e8165dbb
SR
1130 "Failed to swap buffers due to commit in progress\n");
1131 }
1132
e8165dbb 1133 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
bc0c38d1
SR
1134
1135 __update_max_tr(tr, tsk, cpu);
0b9b12c1 1136 arch_spin_unlock(&tr->max_lock);
bc0c38d1 1137}
5d4a9dba 1138#endif /* CONFIG_TRACER_MAX_TRACE */
bc0c38d1 1139
e30f53aa 1140static int wait_on_pipe(struct trace_iterator *iter, bool full)
0d5c6e1c 1141{
15693458
SRRH
1142 /* Iterators are static, they should be filled or empty */
1143 if (trace_buffer_iter(iter, iter->cpu_file))
8b8b3683 1144 return 0;
0d5c6e1c 1145
e30f53aa
RV
1146 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1147 full);
0d5c6e1c
SR
1148}
1149
f4e781c0
SRRH
1150#ifdef CONFIG_FTRACE_STARTUP_TEST
1151static int run_tracer_selftest(struct tracer *type)
1152{
1153 struct trace_array *tr = &global_trace;
1154 struct tracer *saved_tracer = tr->current_trace;
1155 int ret;
0d5c6e1c 1156
f4e781c0
SRRH
1157 if (!type->selftest || tracing_selftest_disabled)
1158 return 0;
0d5c6e1c
SR
1159
1160 /*
f4e781c0
SRRH
1161 * Run a selftest on this tracer.
1162 * Here we reset the trace buffer, and set the current
1163 * tracer to be this tracer. The tracer can then run some
1164 * internal tracing to verify that everything is in order.
1165 * If we fail, we do not register this tracer.
0d5c6e1c 1166 */
f4e781c0 1167 tracing_reset_online_cpus(&tr->trace_buffer);
0d5c6e1c 1168
f4e781c0
SRRH
1169 tr->current_trace = type;
1170
1171#ifdef CONFIG_TRACER_MAX_TRACE
1172 if (type->use_max_tr) {
1173 /* If we expanded the buffers, make sure the max is expanded too */
1174 if (ring_buffer_expanded)
1175 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1176 RING_BUFFER_ALL_CPUS);
1177 tr->allocated_snapshot = true;
1178 }
1179#endif
1180
1181 /* the test is responsible for initializing and enabling */
1182 pr_info("Testing tracer %s: ", type->name);
1183 ret = type->selftest(type, tr);
1184 /* the test is responsible for resetting too */
1185 tr->current_trace = saved_tracer;
1186 if (ret) {
1187 printk(KERN_CONT "FAILED!\n");
1188 /* Add the warning after printing 'FAILED' */
1189 WARN_ON(1);
1190 return -1;
1191 }
1192 /* Only reset on passing, to avoid touching corrupted buffers */
1193 tracing_reset_online_cpus(&tr->trace_buffer);
1194
1195#ifdef CONFIG_TRACER_MAX_TRACE
1196 if (type->use_max_tr) {
1197 tr->allocated_snapshot = false;
0d5c6e1c 1198
f4e781c0
SRRH
1199 /* Shrink the max buffer again */
1200 if (ring_buffer_expanded)
1201 ring_buffer_resize(tr->max_buffer.buffer, 1,
1202 RING_BUFFER_ALL_CPUS);
1203 }
1204#endif
1205
1206 printk(KERN_CONT "PASSED\n");
1207 return 0;
1208}
1209#else
1210static inline int run_tracer_selftest(struct tracer *type)
1211{
1212 return 0;
0d5c6e1c 1213}
f4e781c0 1214#endif /* CONFIG_FTRACE_STARTUP_TEST */
0d5c6e1c 1215
41d9c0be
SRRH
1216static void add_tracer_options(struct trace_array *tr, struct tracer *t);
1217
4fcdae83
SR
1218/**
1219 * register_tracer - register a tracer with the ftrace system.
1220 * @type - the plugin for the tracer
1221 *
1222 * Register a new plugin tracer.
1223 */
bc0c38d1
SR
1224int register_tracer(struct tracer *type)
1225{
1226 struct tracer *t;
bc0c38d1
SR
1227 int ret = 0;
1228
1229 if (!type->name) {
1230 pr_info("Tracer must have a name\n");
1231 return -1;
1232 }
1233
24a461d5 1234 if (strlen(type->name) >= MAX_TRACER_SIZE) {
ee6c2c1b
LZ
1235 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1236 return -1;
1237 }
1238
bc0c38d1 1239 mutex_lock(&trace_types_lock);
86fa2f60 1240
8e1b82e0
FW
1241 tracing_selftest_running = true;
1242
bc0c38d1
SR
1243 for (t = trace_types; t; t = t->next) {
1244 if (strcmp(type->name, t->name) == 0) {
1245 /* already found */
ee6c2c1b 1246 pr_info("Tracer %s already registered\n",
bc0c38d1
SR
1247 type->name);
1248 ret = -1;
1249 goto out;
1250 }
1251 }
1252
adf9f195
FW
1253 if (!type->set_flag)
1254 type->set_flag = &dummy_set_flag;
1255 if (!type->flags)
1256 type->flags = &dummy_tracer_flags;
1257 else
1258 if (!type->flags->opts)
1259 type->flags->opts = dummy_tracer_opt;
6eaaa5d5 1260
f4e781c0
SRRH
1261 ret = run_tracer_selftest(type);
1262 if (ret < 0)
1263 goto out;
60a11774 1264
bc0c38d1
SR
1265 type->next = trace_types;
1266 trace_types = type;
41d9c0be 1267 add_tracer_options(&global_trace, type);
60a11774 1268
bc0c38d1 1269 out:
8e1b82e0 1270 tracing_selftest_running = false;
bc0c38d1
SR
1271 mutex_unlock(&trace_types_lock);
1272
dac74940
SR
1273 if (ret || !default_bootup_tracer)
1274 goto out_unlock;
1275
ee6c2c1b 1276 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
dac74940
SR
1277 goto out_unlock;
1278
1279 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1280 /* Do we want this tracer to start on bootup? */
607e2ea1 1281 tracing_set_tracer(&global_trace, type->name);
dac74940
SR
1282 default_bootup_tracer = NULL;
1283 /* disable other selftests, since this will break it. */
55034cd6 1284 tracing_selftest_disabled = true;
b2821ae6 1285#ifdef CONFIG_FTRACE_STARTUP_TEST
dac74940
SR
1286 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1287 type->name);
b2821ae6 1288#endif
b2821ae6 1289
dac74940 1290 out_unlock:
bc0c38d1
SR
1291 return ret;
1292}
1293
12883efb 1294void tracing_reset(struct trace_buffer *buf, int cpu)
f633903a 1295{
12883efb 1296 struct ring_buffer *buffer = buf->buffer;
f633903a 1297
a5416411
HT
1298 if (!buffer)
1299 return;
1300
f633903a
SR
1301 ring_buffer_record_disable(buffer);
1302
1303 /* Make sure all commits have finished */
1304 synchronize_sched();
68179686 1305 ring_buffer_reset_cpu(buffer, cpu);
f633903a
SR
1306
1307 ring_buffer_record_enable(buffer);
1308}
1309
12883efb 1310void tracing_reset_online_cpus(struct trace_buffer *buf)
213cc060 1311{
12883efb 1312 struct ring_buffer *buffer = buf->buffer;
213cc060
PE
1313 int cpu;
1314
a5416411
HT
1315 if (!buffer)
1316 return;
1317
621968cd
SR
1318 ring_buffer_record_disable(buffer);
1319
1320 /* Make sure all commits have finished */
1321 synchronize_sched();
1322
9457158b 1323 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
213cc060
PE
1324
1325 for_each_online_cpu(cpu)
68179686 1326 ring_buffer_reset_cpu(buffer, cpu);
621968cd
SR
1327
1328 ring_buffer_record_enable(buffer);
213cc060
PE
1329}
1330
09d8091c 1331/* Must have trace_types_lock held */
873c642f 1332void tracing_reset_all_online_cpus(void)
9456f0fa 1333{
873c642f
SRRH
1334 struct trace_array *tr;
1335
873c642f 1336 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
12883efb
SRRH
1337 tracing_reset_online_cpus(&tr->trace_buffer);
1338#ifdef CONFIG_TRACER_MAX_TRACE
1339 tracing_reset_online_cpus(&tr->max_buffer);
1340#endif
873c642f 1341 }
9456f0fa
SR
1342}
1343
939c7a4f 1344#define SAVED_CMDLINES_DEFAULT 128
2c7eea4c 1345#define NO_CMDLINE_MAP UINT_MAX
edc35bd7 1346static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
939c7a4f
YY
1347struct saved_cmdlines_buffer {
1348 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1349 unsigned *map_cmdline_to_pid;
1350 unsigned cmdline_num;
1351 int cmdline_idx;
1352 char *saved_cmdlines;
1353};
1354static struct saved_cmdlines_buffer *savedcmd;
25b0b44a 1355
25b0b44a 1356/* temporary disable recording */
4fd27358 1357static atomic_t trace_record_cmdline_disabled __read_mostly;
bc0c38d1 1358
939c7a4f
YY
1359static inline char *get_saved_cmdlines(int idx)
1360{
1361 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1362}
1363
1364static inline void set_cmdline(int idx, const char *cmdline)
bc0c38d1 1365{
939c7a4f
YY
1366 memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1367}
1368
1369static int allocate_cmdlines_buffer(unsigned int val,
1370 struct saved_cmdlines_buffer *s)
1371{
1372 s->map_cmdline_to_pid = kmalloc(val * sizeof(*s->map_cmdline_to_pid),
1373 GFP_KERNEL);
1374 if (!s->map_cmdline_to_pid)
1375 return -ENOMEM;
1376
1377 s->saved_cmdlines = kmalloc(val * TASK_COMM_LEN, GFP_KERNEL);
1378 if (!s->saved_cmdlines) {
1379 kfree(s->map_cmdline_to_pid);
1380 return -ENOMEM;
1381 }
1382
1383 s->cmdline_idx = 0;
1384 s->cmdline_num = val;
1385 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1386 sizeof(s->map_pid_to_cmdline));
1387 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1388 val * sizeof(*s->map_cmdline_to_pid));
1389
1390 return 0;
1391}
1392
1393static int trace_create_savedcmd(void)
1394{
1395 int ret;
1396
a6af8fbf 1397 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
939c7a4f
YY
1398 if (!savedcmd)
1399 return -ENOMEM;
1400
1401 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1402 if (ret < 0) {
1403 kfree(savedcmd);
1404 savedcmd = NULL;
1405 return -ENOMEM;
1406 }
1407
1408 return 0;
bc0c38d1
SR
1409}
1410
b5130b1e
CE
1411int is_tracing_stopped(void)
1412{
2b6080f2 1413 return global_trace.stop_count;
b5130b1e
CE
1414}
1415
0f048701
SR
1416/**
1417 * tracing_start - quick start of the tracer
1418 *
1419 * If tracing is enabled but was stopped by tracing_stop,
1420 * this will start the tracer back up.
1421 */
1422void tracing_start(void)
1423{
1424 struct ring_buffer *buffer;
1425 unsigned long flags;
1426
1427 if (tracing_disabled)
1428 return;
1429
2b6080f2
SR
1430 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1431 if (--global_trace.stop_count) {
1432 if (global_trace.stop_count < 0) {
b06a8301
SR
1433 /* Someone screwed up their debugging */
1434 WARN_ON_ONCE(1);
2b6080f2 1435 global_trace.stop_count = 0;
b06a8301 1436 }
0f048701
SR
1437 goto out;
1438 }
1439
a2f80714 1440 /* Prevent the buffers from switching */
0b9b12c1 1441 arch_spin_lock(&global_trace.max_lock);
0f048701 1442
12883efb 1443 buffer = global_trace.trace_buffer.buffer;
0f048701
SR
1444 if (buffer)
1445 ring_buffer_record_enable(buffer);
1446
12883efb
SRRH
1447#ifdef CONFIG_TRACER_MAX_TRACE
1448 buffer = global_trace.max_buffer.buffer;
0f048701
SR
1449 if (buffer)
1450 ring_buffer_record_enable(buffer);
12883efb 1451#endif
0f048701 1452
0b9b12c1 1453 arch_spin_unlock(&global_trace.max_lock);
a2f80714 1454
0f048701 1455 out:
2b6080f2
SR
1456 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1457}
1458
1459static void tracing_start_tr(struct trace_array *tr)
1460{
1461 struct ring_buffer *buffer;
1462 unsigned long flags;
1463
1464 if (tracing_disabled)
1465 return;
1466
1467 /* If global, we need to also start the max tracer */
1468 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1469 return tracing_start();
1470
1471 raw_spin_lock_irqsave(&tr->start_lock, flags);
1472
1473 if (--tr->stop_count) {
1474 if (tr->stop_count < 0) {
1475 /* Someone screwed up their debugging */
1476 WARN_ON_ONCE(1);
1477 tr->stop_count = 0;
1478 }
1479 goto out;
1480 }
1481
12883efb 1482 buffer = tr->trace_buffer.buffer;
2b6080f2
SR
1483 if (buffer)
1484 ring_buffer_record_enable(buffer);
1485
1486 out:
1487 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
0f048701
SR
1488}
1489
1490/**
1491 * tracing_stop - quick stop of the tracer
1492 *
1493 * Light weight way to stop tracing. Use in conjunction with
1494 * tracing_start.
1495 */
1496void tracing_stop(void)
1497{
1498 struct ring_buffer *buffer;
1499 unsigned long flags;
1500
2b6080f2
SR
1501 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1502 if (global_trace.stop_count++)
0f048701
SR
1503 goto out;
1504
a2f80714 1505 /* Prevent the buffers from switching */
0b9b12c1 1506 arch_spin_lock(&global_trace.max_lock);
a2f80714 1507
12883efb 1508 buffer = global_trace.trace_buffer.buffer;
0f048701
SR
1509 if (buffer)
1510 ring_buffer_record_disable(buffer);
1511
12883efb
SRRH
1512#ifdef CONFIG_TRACER_MAX_TRACE
1513 buffer = global_trace.max_buffer.buffer;
0f048701
SR
1514 if (buffer)
1515 ring_buffer_record_disable(buffer);
12883efb 1516#endif
0f048701 1517
0b9b12c1 1518 arch_spin_unlock(&global_trace.max_lock);
a2f80714 1519
0f048701 1520 out:
2b6080f2
SR
1521 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1522}
1523
1524static void tracing_stop_tr(struct trace_array *tr)
1525{
1526 struct ring_buffer *buffer;
1527 unsigned long flags;
1528
1529 /* If global, we need to also stop the max tracer */
1530 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1531 return tracing_stop();
1532
1533 raw_spin_lock_irqsave(&tr->start_lock, flags);
1534 if (tr->stop_count++)
1535 goto out;
1536
12883efb 1537 buffer = tr->trace_buffer.buffer;
2b6080f2
SR
1538 if (buffer)
1539 ring_buffer_record_disable(buffer);
1540
1541 out:
1542 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
0f048701
SR
1543}
1544
e309b41d 1545void trace_stop_cmdline_recording(void);
bc0c38d1 1546
379cfdac 1547static int trace_save_cmdline(struct task_struct *tsk)
bc0c38d1 1548{
a635cf04 1549 unsigned pid, idx;
bc0c38d1
SR
1550
1551 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
379cfdac 1552 return 0;
bc0c38d1
SR
1553
1554 /*
1555 * It's not the end of the world if we don't get
1556 * the lock, but we also don't want to spin
1557 * nor do we want to disable interrupts,
1558 * so if we miss here, then better luck next time.
1559 */
0199c4e6 1560 if (!arch_spin_trylock(&trace_cmdline_lock))
379cfdac 1561 return 0;
bc0c38d1 1562
939c7a4f 1563 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
2c7eea4c 1564 if (idx == NO_CMDLINE_MAP) {
939c7a4f 1565 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
bc0c38d1 1566
a635cf04
CE
1567 /*
1568 * Check whether the cmdline buffer at idx has a pid
1569 * mapped. We are going to overwrite that entry so we
1570 * need to clear the map_pid_to_cmdline. Otherwise we
1571 * would read the new comm for the old pid.
1572 */
939c7a4f 1573 pid = savedcmd->map_cmdline_to_pid[idx];
a635cf04 1574 if (pid != NO_CMDLINE_MAP)
939c7a4f 1575 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
bc0c38d1 1576
939c7a4f
YY
1577 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1578 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
bc0c38d1 1579
939c7a4f 1580 savedcmd->cmdline_idx = idx;
bc0c38d1
SR
1581 }
1582
939c7a4f 1583 set_cmdline(idx, tsk->comm);
bc0c38d1 1584
0199c4e6 1585 arch_spin_unlock(&trace_cmdline_lock);
379cfdac
SRRH
1586
1587 return 1;
bc0c38d1
SR
1588}
1589
4c27e756 1590static void __trace_find_cmdline(int pid, char comm[])
bc0c38d1 1591{
bc0c38d1
SR
1592 unsigned map;
1593
4ca53085
SR
1594 if (!pid) {
1595 strcpy(comm, "<idle>");
1596 return;
1597 }
bc0c38d1 1598
74bf4076
SR
1599 if (WARN_ON_ONCE(pid < 0)) {
1600 strcpy(comm, "<XXX>");
1601 return;
1602 }
1603
4ca53085
SR
1604 if (pid > PID_MAX_DEFAULT) {
1605 strcpy(comm, "<...>");
1606 return;
1607 }
bc0c38d1 1608
939c7a4f 1609 map = savedcmd->map_pid_to_cmdline[pid];
50d88758 1610 if (map != NO_CMDLINE_MAP)
939c7a4f 1611 strcpy(comm, get_saved_cmdlines(map));
50d88758
TG
1612 else
1613 strcpy(comm, "<...>");
4c27e756
SRRH
1614}
1615
1616void trace_find_cmdline(int pid, char comm[])
1617{
1618 preempt_disable();
1619 arch_spin_lock(&trace_cmdline_lock);
1620
1621 __trace_find_cmdline(pid, comm);
bc0c38d1 1622
0199c4e6 1623 arch_spin_unlock(&trace_cmdline_lock);
5b6045a9 1624 preempt_enable();
bc0c38d1
SR
1625}
1626
e309b41d 1627void tracing_record_cmdline(struct task_struct *tsk)
bc0c38d1 1628{
0fb9656d 1629 if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
bc0c38d1
SR
1630 return;
1631
7ffbd48d
SR
1632 if (!__this_cpu_read(trace_cmdline_save))
1633 return;
1634
379cfdac
SRRH
1635 if (trace_save_cmdline(tsk))
1636 __this_cpu_write(trace_cmdline_save, false);
bc0c38d1
SR
1637}
1638
45dcd8b8 1639void
38697053
SR
1640tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1641 int pc)
bc0c38d1
SR
1642{
1643 struct task_struct *tsk = current;
bc0c38d1 1644
777e208d
SR
1645 entry->preempt_count = pc & 0xff;
1646 entry->pid = (tsk) ? tsk->pid : 0;
1647 entry->flags =
9244489a 1648#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
2e2ca155 1649 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
9244489a
SR
1650#else
1651 TRACE_FLAG_IRQS_NOSUPPORT |
1652#endif
bc0c38d1
SR
1653 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1654 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
e5137b50
PZ
1655 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
1656 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
bc0c38d1 1657}
f413cdb8 1658EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
bc0c38d1 1659
e77405ad
SR
1660struct ring_buffer_event *
1661trace_buffer_lock_reserve(struct ring_buffer *buffer,
1662 int type,
1663 unsigned long len,
1664 unsigned long flags, int pc)
51a763dd
ACM
1665{
1666 struct ring_buffer_event *event;
1667
e77405ad 1668 event = ring_buffer_lock_reserve(buffer, len);
51a763dd
ACM
1669 if (event != NULL) {
1670 struct trace_entry *ent = ring_buffer_event_data(event);
1671
1672 tracing_generic_entry_update(ent, flags, pc);
1673 ent->type = type;
1674 }
1675
1676 return event;
1677}
51a763dd 1678
7ffbd48d
SR
1679void
1680__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
1681{
1682 __this_cpu_write(trace_cmdline_save, true);
1683 ring_buffer_unlock_commit(buffer, event);
1684}
1685
b7f0c959
SRRH
1686void trace_buffer_unlock_commit(struct trace_array *tr,
1687 struct ring_buffer *buffer,
1688 struct ring_buffer_event *event,
1689 unsigned long flags, int pc)
51a763dd 1690{
7ffbd48d 1691 __buffer_unlock_commit(buffer, event);
51a763dd 1692
73dddbb5 1693 ftrace_trace_stack(buffer, flags, 6, pc, NULL);
e77405ad 1694 ftrace_trace_userstack(buffer, flags, pc);
07edf712 1695}
0d5c6e1c 1696EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
51a763dd 1697
2c4a33ab
SRRH
1698static struct ring_buffer *temp_buffer;
1699
ccb469a1
SR
1700struct ring_buffer_event *
1701trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
7f1d2f82 1702 struct trace_event_file *trace_file,
ccb469a1
SR
1703 int type, unsigned long len,
1704 unsigned long flags, int pc)
1705{
2c4a33ab
SRRH
1706 struct ring_buffer_event *entry;
1707
7f1d2f82 1708 *current_rb = trace_file->tr->trace_buffer.buffer;
2c4a33ab 1709 entry = trace_buffer_lock_reserve(*current_rb,
ccb469a1 1710 type, len, flags, pc);
2c4a33ab
SRRH
1711 /*
1712 * If tracing is off, but we have triggers enabled
1713 * we still need to look at the event data. Use the temp_buffer
1714 * to store the trace event for the tigger to use. It's recusive
1715 * safe and will not be recorded anywhere.
1716 */
5d6ad960 1717 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
2c4a33ab
SRRH
1718 *current_rb = temp_buffer;
1719 entry = trace_buffer_lock_reserve(*current_rb,
1720 type, len, flags, pc);
1721 }
1722 return entry;
ccb469a1
SR
1723}
1724EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
1725
ef5580d0 1726struct ring_buffer_event *
e77405ad
SR
1727trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1728 int type, unsigned long len,
ef5580d0
SR
1729 unsigned long flags, int pc)
1730{
12883efb 1731 *current_rb = global_trace.trace_buffer.buffer;
e77405ad 1732 return trace_buffer_lock_reserve(*current_rb,
ef5580d0
SR
1733 type, len, flags, pc);
1734}
94487d6d 1735EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
ef5580d0 1736
b7f0c959
SRRH
1737void trace_buffer_unlock_commit_regs(struct trace_array *tr,
1738 struct ring_buffer *buffer,
0d5c6e1c
SR
1739 struct ring_buffer_event *event,
1740 unsigned long flags, int pc,
1741 struct pt_regs *regs)
1fd8df2c 1742{
7ffbd48d 1743 __buffer_unlock_commit(buffer, event);
1fd8df2c 1744
73dddbb5 1745 ftrace_trace_stack(buffer, flags, 6, pc, regs);
1fd8df2c
MH
1746 ftrace_trace_userstack(buffer, flags, pc);
1747}
0d5c6e1c 1748EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
1fd8df2c 1749
e77405ad
SR
1750void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1751 struct ring_buffer_event *event)
77d9f465 1752{
e77405ad 1753 ring_buffer_discard_commit(buffer, event);
ef5580d0 1754}
12acd473 1755EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
ef5580d0 1756
e309b41d 1757void
7be42151 1758trace_function(struct trace_array *tr,
38697053
SR
1759 unsigned long ip, unsigned long parent_ip, unsigned long flags,
1760 int pc)
bc0c38d1 1761{
2425bcb9 1762 struct trace_event_call *call = &event_function;
12883efb 1763 struct ring_buffer *buffer = tr->trace_buffer.buffer;
3928a8a2 1764 struct ring_buffer_event *event;
777e208d 1765 struct ftrace_entry *entry;
bc0c38d1 1766
d769041f 1767 /* If we are reading the ring buffer, don't trace */
dd17c8f7 1768 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
d769041f
SR
1769 return;
1770
e77405ad 1771 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
51a763dd 1772 flags, pc);
3928a8a2
SR
1773 if (!event)
1774 return;
1775 entry = ring_buffer_event_data(event);
777e208d
SR
1776 entry->ip = ip;
1777 entry->parent_ip = parent_ip;
e1112b4d 1778
f306cc82 1779 if (!call_filter_check_discard(call, entry, buffer, event))
7ffbd48d 1780 __buffer_unlock_commit(buffer, event);
bc0c38d1
SR
1781}
1782
c0a0d0d3 1783#ifdef CONFIG_STACKTRACE
4a9bd3f1
SR
1784
1785#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1786struct ftrace_stack {
1787 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
1788};
1789
1790static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
1791static DEFINE_PER_CPU(int, ftrace_stack_reserve);
1792
e77405ad 1793static void __ftrace_trace_stack(struct ring_buffer *buffer,
53614991 1794 unsigned long flags,
1fd8df2c 1795 int skip, int pc, struct pt_regs *regs)
86387f7e 1796{
2425bcb9 1797 struct trace_event_call *call = &event_kernel_stack;
3928a8a2 1798 struct ring_buffer_event *event;
777e208d 1799 struct stack_entry *entry;
86387f7e 1800 struct stack_trace trace;
4a9bd3f1
SR
1801 int use_stack;
1802 int size = FTRACE_STACK_ENTRIES;
1803
1804 trace.nr_entries = 0;
1805 trace.skip = skip;
1806
1807 /*
1808 * Since events can happen in NMIs there's no safe way to
1809 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1810 * or NMI comes in, it will just have to use the default
1811 * FTRACE_STACK_SIZE.
1812 */
1813 preempt_disable_notrace();
1814
82146529 1815 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
4a9bd3f1
SR
1816 /*
1817 * We don't need any atomic variables, just a barrier.
1818 * If an interrupt comes in, we don't care, because it would
1819 * have exited and put the counter back to what we want.
1820 * We just need a barrier to keep gcc from moving things
1821 * around.
1822 */
1823 barrier();
1824 if (use_stack == 1) {
bdffd893 1825 trace.entries = this_cpu_ptr(ftrace_stack.calls);
4a9bd3f1
SR
1826 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
1827
1828 if (regs)
1829 save_stack_trace_regs(regs, &trace);
1830 else
1831 save_stack_trace(&trace);
1832
1833 if (trace.nr_entries > size)
1834 size = trace.nr_entries;
1835 } else
1836 /* From now on, use_stack is a boolean */
1837 use_stack = 0;
1838
1839 size *= sizeof(unsigned long);
86387f7e 1840
e77405ad 1841 event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
4a9bd3f1 1842 sizeof(*entry) + size, flags, pc);
3928a8a2 1843 if (!event)
4a9bd3f1
SR
1844 goto out;
1845 entry = ring_buffer_event_data(event);
86387f7e 1846
4a9bd3f1
SR
1847 memset(&entry->caller, 0, size);
1848
1849 if (use_stack)
1850 memcpy(&entry->caller, trace.entries,
1851 trace.nr_entries * sizeof(unsigned long));
1852 else {
1853 trace.max_entries = FTRACE_STACK_ENTRIES;
1854 trace.entries = entry->caller;
1855 if (regs)
1856 save_stack_trace_regs(regs, &trace);
1857 else
1858 save_stack_trace(&trace);
1859 }
1860
1861 entry->size = trace.nr_entries;
86387f7e 1862
f306cc82 1863 if (!call_filter_check_discard(call, entry, buffer, event))
7ffbd48d 1864 __buffer_unlock_commit(buffer, event);
4a9bd3f1
SR
1865
1866 out:
1867 /* Again, don't let gcc optimize things here */
1868 barrier();
82146529 1869 __this_cpu_dec(ftrace_stack_reserve);
4a9bd3f1
SR
1870 preempt_enable_notrace();
1871
f0a920d5
IM
1872}
1873
73dddbb5
SRRH
1874static inline void ftrace_trace_stack(struct ring_buffer *buffer,
1875 unsigned long flags,
1876 int skip, int pc, struct pt_regs *regs)
53614991
SR
1877{
1878 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1879 return;
1880
73dddbb5 1881 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
53614991
SR
1882}
1883
c0a0d0d3
FW
1884void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1885 int pc)
38697053 1886{
12883efb 1887 __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
38697053
SR
1888}
1889
03889384
SR
1890/**
1891 * trace_dump_stack - record a stack back trace in the trace buffer
c142be8e 1892 * @skip: Number of functions to skip (helper handlers)
03889384 1893 */
c142be8e 1894void trace_dump_stack(int skip)
03889384
SR
1895{
1896 unsigned long flags;
1897
1898 if (tracing_disabled || tracing_selftest_running)
e36c5458 1899 return;
03889384
SR
1900
1901 local_save_flags(flags);
1902
c142be8e
SRRH
1903 /*
1904 * Skip 3 more, seems to get us at the caller of
1905 * this function.
1906 */
1907 skip += 3;
1908 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
1909 flags, skip, preempt_count(), NULL);
03889384
SR
1910}
1911
91e86e56
SR
1912static DEFINE_PER_CPU(int, user_stack_count);
1913
e77405ad
SR
1914void
1915ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
02b67518 1916{
2425bcb9 1917 struct trace_event_call *call = &event_user_stack;
8d7c6a96 1918 struct ring_buffer_event *event;
02b67518
TE
1919 struct userstack_entry *entry;
1920 struct stack_trace trace;
02b67518
TE
1921
1922 if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
1923 return;
1924
b6345879
SR
1925 /*
1926 * NMIs can not handle page faults, even with fix ups.
1927 * The save user stack can (and often does) fault.
1928 */
1929 if (unlikely(in_nmi()))
1930 return;
02b67518 1931
91e86e56
SR
1932 /*
1933 * prevent recursion, since the user stack tracing may
1934 * trigger other kernel events.
1935 */
1936 preempt_disable();
1937 if (__this_cpu_read(user_stack_count))
1938 goto out;
1939
1940 __this_cpu_inc(user_stack_count);
1941
e77405ad 1942 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
51a763dd 1943 sizeof(*entry), flags, pc);
02b67518 1944 if (!event)
1dbd1951 1945 goto out_drop_count;
02b67518 1946 entry = ring_buffer_event_data(event);
02b67518 1947
48659d31 1948 entry->tgid = current->tgid;
02b67518
TE
1949 memset(&entry->caller, 0, sizeof(entry->caller));
1950
1951 trace.nr_entries = 0;
1952 trace.max_entries = FTRACE_STACK_ENTRIES;
1953 trace.skip = 0;
1954 trace.entries = entry->caller;
1955
1956 save_stack_trace_user(&trace);
f306cc82 1957 if (!call_filter_check_discard(call, entry, buffer, event))
7ffbd48d 1958 __buffer_unlock_commit(buffer, event);
91e86e56 1959
1dbd1951 1960 out_drop_count:
91e86e56 1961 __this_cpu_dec(user_stack_count);
91e86e56
SR
1962 out:
1963 preempt_enable();
02b67518
TE
1964}
1965
4fd27358
HE
1966#ifdef UNUSED
1967static void __trace_userstack(struct trace_array *tr, unsigned long flags)
02b67518 1968{
7be42151 1969 ftrace_trace_userstack(tr, flags, preempt_count());
02b67518 1970}
4fd27358 1971#endif /* UNUSED */
02b67518 1972
c0a0d0d3
FW
1973#endif /* CONFIG_STACKTRACE */
1974
07d777fe
SR
1975/* created for use with alloc_percpu */
1976struct trace_buffer_struct {
1977 char buffer[TRACE_BUF_SIZE];
1978};
1979
1980static struct trace_buffer_struct *trace_percpu_buffer;
1981static struct trace_buffer_struct *trace_percpu_sirq_buffer;
1982static struct trace_buffer_struct *trace_percpu_irq_buffer;
1983static struct trace_buffer_struct *trace_percpu_nmi_buffer;
1984
1985/*
1986 * The buffer used is dependent on the context. There is a per cpu
1987 * buffer for normal context, softirq contex, hard irq context and
1988 * for NMI context. Thise allows for lockless recording.
1989 *
1990 * Note, if the buffers failed to be allocated, then this returns NULL
1991 */
1992static char *get_trace_buf(void)
1993{
1994 struct trace_buffer_struct *percpu_buffer;
07d777fe
SR
1995
1996 /*
1997 * If we have allocated per cpu buffers, then we do not
1998 * need to do any locking.
1999 */
2000 if (in_nmi())
2001 percpu_buffer = trace_percpu_nmi_buffer;
2002 else if (in_irq())
2003 percpu_buffer = trace_percpu_irq_buffer;
2004 else if (in_softirq())
2005 percpu_buffer = trace_percpu_sirq_buffer;
2006 else
2007 percpu_buffer = trace_percpu_buffer;
2008
2009 if (!percpu_buffer)
2010 return NULL;
2011
d8a0349c 2012 return this_cpu_ptr(&percpu_buffer->buffer[0]);
07d777fe
SR
2013}
2014
2015static int alloc_percpu_trace_buffer(void)
2016{
2017 struct trace_buffer_struct *buffers;
2018 struct trace_buffer_struct *sirq_buffers;
2019 struct trace_buffer_struct *irq_buffers;
2020 struct trace_buffer_struct *nmi_buffers;
2021
2022 buffers = alloc_percpu(struct trace_buffer_struct);
2023 if (!buffers)
2024 goto err_warn;
2025
2026 sirq_buffers = alloc_percpu(struct trace_buffer_struct);
2027 if (!sirq_buffers)
2028 goto err_sirq;
2029
2030 irq_buffers = alloc_percpu(struct trace_buffer_struct);
2031 if (!irq_buffers)
2032 goto err_irq;
2033
2034 nmi_buffers = alloc_percpu(struct trace_buffer_struct);
2035 if (!nmi_buffers)
2036 goto err_nmi;
2037
2038 trace_percpu_buffer = buffers;
2039 trace_percpu_sirq_buffer = sirq_buffers;
2040 trace_percpu_irq_buffer = irq_buffers;
2041 trace_percpu_nmi_buffer = nmi_buffers;
2042
2043 return 0;
2044
2045 err_nmi:
2046 free_percpu(irq_buffers);
2047 err_irq:
2048 free_percpu(sirq_buffers);
2049 err_sirq:
2050 free_percpu(buffers);
2051 err_warn:
2052 WARN(1, "Could not allocate percpu trace_printk buffer");
2053 return -ENOMEM;
2054}
2055
81698831
SR
2056static int buffers_allocated;
2057
07d777fe
SR
2058void trace_printk_init_buffers(void)
2059{
07d777fe
SR
2060 if (buffers_allocated)
2061 return;
2062
2063 if (alloc_percpu_trace_buffer())
2064 return;
2065
2184db46
SR
2066 /* trace_printk() is for debug use only. Don't use it in production. */
2067
69a1c994
BP
2068 pr_warning("\n");
2069 pr_warning("**********************************************************\n");
2184db46
SR
2070 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2071 pr_warning("** **\n");
2072 pr_warning("** trace_printk() being used. Allocating extra memory. **\n");
2073 pr_warning("** **\n");
2074 pr_warning("** This means that this is a DEBUG kernel and it is **\n");
eff264ef 2075 pr_warning("** unsafe for production use. **\n");
2184db46
SR
2076 pr_warning("** **\n");
2077 pr_warning("** If you see this message and you are not debugging **\n");
2078 pr_warning("** the kernel, report this immediately to your vendor! **\n");
2079 pr_warning("** **\n");
2080 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2081 pr_warning("**********************************************************\n");
07d777fe 2082
b382ede6
SR
2083 /* Expand the buffers to set size */
2084 tracing_update_buffers();
2085
07d777fe 2086 buffers_allocated = 1;
81698831
SR
2087
2088 /*
2089 * trace_printk_init_buffers() can be called by modules.
2090 * If that happens, then we need to start cmdline recording
2091 * directly here. If the global_trace.buffer is already
2092 * allocated here, then this was called by module code.
2093 */
12883efb 2094 if (global_trace.trace_buffer.buffer)
81698831
SR
2095 tracing_start_cmdline_record();
2096}
2097
2098void trace_printk_start_comm(void)
2099{
2100 /* Start tracing comms if trace printk is set */
2101 if (!buffers_allocated)
2102 return;
2103 tracing_start_cmdline_record();
2104}
2105
2106static void trace_printk_start_stop_comm(int enabled)
2107{
2108 if (!buffers_allocated)
2109 return;
2110
2111 if (enabled)
2112 tracing_start_cmdline_record();
2113 else
2114 tracing_stop_cmdline_record();
07d777fe
SR
2115}
2116
769b0441 2117/**
48ead020 2118 * trace_vbprintk - write binary msg to tracing buffer
769b0441
FW
2119 *
2120 */
40ce74f1 2121int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
769b0441 2122{
2425bcb9 2123 struct trace_event_call *call = &event_bprint;
769b0441 2124 struct ring_buffer_event *event;
e77405ad 2125 struct ring_buffer *buffer;
769b0441 2126 struct trace_array *tr = &global_trace;
48ead020 2127 struct bprint_entry *entry;
769b0441 2128 unsigned long flags;
07d777fe
SR
2129 char *tbuffer;
2130 int len = 0, size, pc;
769b0441
FW
2131
2132 if (unlikely(tracing_selftest_running || tracing_disabled))
2133 return 0;
2134
2135 /* Don't pollute graph traces with trace_vprintk internals */
2136 pause_graph_tracing();
2137
2138 pc = preempt_count();
5168ae50 2139 preempt_disable_notrace();
769b0441 2140
07d777fe
SR
2141 tbuffer = get_trace_buf();
2142 if (!tbuffer) {
2143 len = 0;
769b0441 2144 goto out;
07d777fe 2145 }
769b0441 2146
07d777fe 2147 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
769b0441 2148
07d777fe
SR
2149 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
2150 goto out;
769b0441 2151
07d777fe 2152 local_save_flags(flags);
769b0441 2153 size = sizeof(*entry) + sizeof(u32) * len;
12883efb 2154 buffer = tr->trace_buffer.buffer;
e77405ad
SR
2155 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2156 flags, pc);
769b0441 2157 if (!event)
07d777fe 2158 goto out;
769b0441
FW
2159 entry = ring_buffer_event_data(event);
2160 entry->ip = ip;
769b0441
FW
2161 entry->fmt = fmt;
2162
07d777fe 2163 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
f306cc82 2164 if (!call_filter_check_discard(call, entry, buffer, event)) {
7ffbd48d 2165 __buffer_unlock_commit(buffer, event);
73dddbb5 2166 ftrace_trace_stack(buffer, flags, 6, pc, NULL);
d931369b 2167 }
769b0441 2168
769b0441 2169out:
5168ae50 2170 preempt_enable_notrace();
769b0441
FW
2171 unpause_graph_tracing();
2172
2173 return len;
2174}
48ead020
FW
2175EXPORT_SYMBOL_GPL(trace_vbprintk);
2176
12883efb
SRRH
2177static int
2178__trace_array_vprintk(struct ring_buffer *buffer,
2179 unsigned long ip, const char *fmt, va_list args)
48ead020 2180{
2425bcb9 2181 struct trace_event_call *call = &event_print;
48ead020 2182 struct ring_buffer_event *event;
07d777fe 2183 int len = 0, size, pc;
48ead020 2184 struct print_entry *entry;
07d777fe
SR
2185 unsigned long flags;
2186 char *tbuffer;
48ead020
FW
2187
2188 if (tracing_disabled || tracing_selftest_running)
2189 return 0;
2190
07d777fe
SR
2191 /* Don't pollute graph traces with trace_vprintk internals */
2192 pause_graph_tracing();
2193
48ead020
FW
2194 pc = preempt_count();
2195 preempt_disable_notrace();
48ead020 2196
07d777fe
SR
2197
2198 tbuffer = get_trace_buf();
2199 if (!tbuffer) {
2200 len = 0;
48ead020 2201 goto out;
07d777fe 2202 }
48ead020 2203
3558a5ac 2204 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
48ead020 2205
07d777fe 2206 local_save_flags(flags);
48ead020 2207 size = sizeof(*entry) + len + 1;
e77405ad 2208 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
07d777fe 2209 flags, pc);
48ead020 2210 if (!event)
07d777fe 2211 goto out;
48ead020 2212 entry = ring_buffer_event_data(event);
c13d2f7c 2213 entry->ip = ip;
48ead020 2214
3558a5ac 2215 memcpy(&entry->buf, tbuffer, len + 1);
f306cc82 2216 if (!call_filter_check_discard(call, entry, buffer, event)) {
7ffbd48d 2217 __buffer_unlock_commit(buffer, event);
73dddbb5 2218 ftrace_trace_stack(buffer, flags, 6, pc, NULL);
d931369b 2219 }
48ead020
FW
2220 out:
2221 preempt_enable_notrace();
07d777fe 2222 unpause_graph_tracing();
48ead020
FW
2223
2224 return len;
2225}
659372d3 2226
12883efb
SRRH
2227int trace_array_vprintk(struct trace_array *tr,
2228 unsigned long ip, const char *fmt, va_list args)
2229{
2230 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2231}
2232
2233int trace_array_printk(struct trace_array *tr,
2234 unsigned long ip, const char *fmt, ...)
2235{
2236 int ret;
2237 va_list ap;
2238
2239 if (!(trace_flags & TRACE_ITER_PRINTK))
2240 return 0;
2241
2242 va_start(ap, fmt);
2243 ret = trace_array_vprintk(tr, ip, fmt, ap);
2244 va_end(ap);
2245 return ret;
2246}
2247
2248int trace_array_printk_buf(struct ring_buffer *buffer,
2249 unsigned long ip, const char *fmt, ...)
2250{
2251 int ret;
2252 va_list ap;
2253
2254 if (!(trace_flags & TRACE_ITER_PRINTK))
2255 return 0;
2256
2257 va_start(ap, fmt);
2258 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2259 va_end(ap);
2260 return ret;
2261}
2262
659372d3
SR
2263int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2264{
a813a159 2265 return trace_array_vprintk(&global_trace, ip, fmt, args);
659372d3 2266}
769b0441
FW
2267EXPORT_SYMBOL_GPL(trace_vprintk);
2268
e2ac8ef5 2269static void trace_iterator_increment(struct trace_iterator *iter)
5a90f577 2270{
6d158a81
SR
2271 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2272
5a90f577 2273 iter->idx++;
6d158a81
SR
2274 if (buf_iter)
2275 ring_buffer_read(buf_iter, NULL);
5a90f577
SR
2276}
2277
e309b41d 2278static struct trace_entry *
bc21b478
SR
2279peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2280 unsigned long *lost_events)
dd0e545f 2281{
3928a8a2 2282 struct ring_buffer_event *event;
6d158a81 2283 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
dd0e545f 2284
d769041f
SR
2285 if (buf_iter)
2286 event = ring_buffer_iter_peek(buf_iter, ts);
2287 else
12883efb 2288 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
bc21b478 2289 lost_events);
d769041f 2290
4a9bd3f1
SR
2291 if (event) {
2292 iter->ent_size = ring_buffer_event_length(event);
2293 return ring_buffer_event_data(event);
2294 }
2295 iter->ent_size = 0;
2296 return NULL;
dd0e545f 2297}
d769041f 2298
dd0e545f 2299static struct trace_entry *
bc21b478
SR
2300__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2301 unsigned long *missing_events, u64 *ent_ts)
bc0c38d1 2302{
12883efb 2303 struct ring_buffer *buffer = iter->trace_buffer->buffer;
bc0c38d1 2304 struct trace_entry *ent, *next = NULL;
aa27497c 2305 unsigned long lost_events = 0, next_lost = 0;
b04cc6b1 2306 int cpu_file = iter->cpu_file;
3928a8a2 2307 u64 next_ts = 0, ts;
bc0c38d1 2308 int next_cpu = -1;
12b5da34 2309 int next_size = 0;
bc0c38d1
SR
2310 int cpu;
2311
b04cc6b1
FW
2312 /*
2313 * If we are in a per_cpu trace file, don't bother by iterating over
2314 * all cpu and peek directly.
2315 */
ae3b5093 2316 if (cpu_file > RING_BUFFER_ALL_CPUS) {
b04cc6b1
FW
2317 if (ring_buffer_empty_cpu(buffer, cpu_file))
2318 return NULL;
bc21b478 2319 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
b04cc6b1
FW
2320 if (ent_cpu)
2321 *ent_cpu = cpu_file;
2322
2323 return ent;
2324 }
2325
ab46428c 2326 for_each_tracing_cpu(cpu) {
dd0e545f 2327
3928a8a2
SR
2328 if (ring_buffer_empty_cpu(buffer, cpu))
2329 continue;
dd0e545f 2330
bc21b478 2331 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
dd0e545f 2332
cdd31cd2
IM
2333 /*
2334 * Pick the entry with the smallest timestamp:
2335 */
3928a8a2 2336 if (ent && (!next || ts < next_ts)) {
bc0c38d1
SR
2337 next = ent;
2338 next_cpu = cpu;
3928a8a2 2339 next_ts = ts;
bc21b478 2340 next_lost = lost_events;
12b5da34 2341 next_size = iter->ent_size;
bc0c38d1
SR
2342 }
2343 }
2344
12b5da34
SR
2345 iter->ent_size = next_size;
2346
bc0c38d1
SR
2347 if (ent_cpu)
2348 *ent_cpu = next_cpu;
2349
3928a8a2
SR
2350 if (ent_ts)
2351 *ent_ts = next_ts;
2352
bc21b478
SR
2353 if (missing_events)
2354 *missing_events = next_lost;
2355
bc0c38d1
SR
2356 return next;
2357}
2358
dd0e545f 2359/* Find the next real entry, without updating the iterator itself */
c4a8e8be
FW
2360struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
2361 int *ent_cpu, u64 *ent_ts)
bc0c38d1 2362{
bc21b478 2363 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
dd0e545f
SR
2364}
2365
2366/* Find the next real entry, and increment the iterator to the next entry */
955b61e5 2367void *trace_find_next_entry_inc(struct trace_iterator *iter)
dd0e545f 2368{
bc21b478
SR
2369 iter->ent = __find_next_entry(iter, &iter->cpu,
2370 &iter->lost_events, &iter->ts);
dd0e545f 2371
3928a8a2 2372 if (iter->ent)
e2ac8ef5 2373 trace_iterator_increment(iter);
dd0e545f 2374
3928a8a2 2375 return iter->ent ? iter : NULL;
b3806b43 2376}
bc0c38d1 2377
e309b41d 2378static void trace_consume(struct trace_iterator *iter)
b3806b43 2379{
12883efb 2380 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
bc21b478 2381 &iter->lost_events);
bc0c38d1
SR
2382}
2383
e309b41d 2384static void *s_next(struct seq_file *m, void *v, loff_t *pos)
bc0c38d1
SR
2385{
2386 struct trace_iterator *iter = m->private;
bc0c38d1 2387 int i = (int)*pos;
4e3c3333 2388 void *ent;
bc0c38d1 2389
a63ce5b3
SR
2390 WARN_ON_ONCE(iter->leftover);
2391
bc0c38d1
SR
2392 (*pos)++;
2393
2394 /* can't go backwards */
2395 if (iter->idx > i)
2396 return NULL;
2397
2398 if (iter->idx < 0)
955b61e5 2399 ent = trace_find_next_entry_inc(iter);
bc0c38d1
SR
2400 else
2401 ent = iter;
2402
2403 while (ent && iter->idx < i)
955b61e5 2404 ent = trace_find_next_entry_inc(iter);
bc0c38d1
SR
2405
2406 iter->pos = *pos;
2407
bc0c38d1
SR
2408 return ent;
2409}
2410
955b61e5 2411void tracing_iter_reset(struct trace_iterator *iter, int cpu)
2f26ebd5 2412{
2f26ebd5
SR
2413 struct ring_buffer_event *event;
2414 struct ring_buffer_iter *buf_iter;
2415 unsigned long entries = 0;
2416 u64 ts;
2417
12883efb 2418 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
2f26ebd5 2419
6d158a81
SR
2420 buf_iter = trace_buffer_iter(iter, cpu);
2421 if (!buf_iter)
2f26ebd5
SR
2422 return;
2423
2f26ebd5
SR
2424 ring_buffer_iter_reset(buf_iter);
2425
2426 /*
2427 * We could have the case with the max latency tracers
2428 * that a reset never took place on a cpu. This is evident
2429 * by the timestamp being before the start of the buffer.
2430 */
2431 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
12883efb 2432 if (ts >= iter->trace_buffer->time_start)
2f26ebd5
SR
2433 break;
2434 entries++;
2435 ring_buffer_read(buf_iter, NULL);
2436 }
2437
12883efb 2438 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
2f26ebd5
SR
2439}
2440
d7350c3f 2441/*
d7350c3f
FW
2442 * The current tracer is copied to avoid a global locking
2443 * all around.
2444 */
bc0c38d1
SR
2445static void *s_start(struct seq_file *m, loff_t *pos)
2446{
2447 struct trace_iterator *iter = m->private;
2b6080f2 2448 struct trace_array *tr = iter->tr;
b04cc6b1 2449 int cpu_file = iter->cpu_file;
bc0c38d1
SR
2450 void *p = NULL;
2451 loff_t l = 0;
3928a8a2 2452 int cpu;
bc0c38d1 2453
2fd196ec
HT
2454 /*
2455 * copy the tracer to avoid using a global lock all around.
2456 * iter->trace is a copy of current_trace, the pointer to the
2457 * name may be used instead of a strcmp(), as iter->trace->name
2458 * will point to the same string as current_trace->name.
2459 */
bc0c38d1 2460 mutex_lock(&trace_types_lock);
2b6080f2
SR
2461 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
2462 *iter->trace = *tr->current_trace;
d7350c3f 2463 mutex_unlock(&trace_types_lock);
bc0c38d1 2464
12883efb 2465#ifdef CONFIG_TRACER_MAX_TRACE
debdd57f
HT
2466 if (iter->snapshot && iter->trace->use_max_tr)
2467 return ERR_PTR(-EBUSY);
12883efb 2468#endif
debdd57f
HT
2469
2470 if (!iter->snapshot)
2471 atomic_inc(&trace_record_cmdline_disabled);
bc0c38d1 2472
bc0c38d1
SR
2473 if (*pos != iter->pos) {
2474 iter->ent = NULL;
2475 iter->cpu = 0;
2476 iter->idx = -1;
2477
ae3b5093 2478 if (cpu_file == RING_BUFFER_ALL_CPUS) {
b04cc6b1 2479 for_each_tracing_cpu(cpu)
2f26ebd5 2480 tracing_iter_reset(iter, cpu);
b04cc6b1 2481 } else
2f26ebd5 2482 tracing_iter_reset(iter, cpu_file);
bc0c38d1 2483
ac91d854 2484 iter->leftover = 0;
bc0c38d1
SR
2485 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
2486 ;
2487
2488 } else {
a63ce5b3
SR
2489 /*
2490 * If we overflowed the seq_file before, then we want
2491 * to just reuse the trace_seq buffer again.
2492 */
2493 if (iter->leftover)
2494 p = iter;
2495 else {
2496 l = *pos - 1;
2497 p = s_next(m, p, &l);
2498 }
bc0c38d1
SR
2499 }
2500
4f535968 2501 trace_event_read_lock();
7e53bd42 2502 trace_access_lock(cpu_file);
bc0c38d1
SR
2503 return p;
2504}
2505
2506static void s_stop(struct seq_file *m, void *p)
2507{
7e53bd42
LJ
2508 struct trace_iterator *iter = m->private;
2509
12883efb 2510#ifdef CONFIG_TRACER_MAX_TRACE
debdd57f
HT
2511 if (iter->snapshot && iter->trace->use_max_tr)
2512 return;
12883efb 2513#endif
debdd57f
HT
2514
2515 if (!iter->snapshot)
2516 atomic_dec(&trace_record_cmdline_disabled);
12883efb 2517
7e53bd42 2518 trace_access_unlock(iter->cpu_file);
4f535968 2519 trace_event_read_unlock();
bc0c38d1
SR
2520}
2521
39eaf7ef 2522static void
12883efb
SRRH
2523get_total_entries(struct trace_buffer *buf,
2524 unsigned long *total, unsigned long *entries)
39eaf7ef
SR
2525{
2526 unsigned long count;
2527 int cpu;
2528
2529 *total = 0;
2530 *entries = 0;
2531
2532 for_each_tracing_cpu(cpu) {
12883efb 2533 count = ring_buffer_entries_cpu(buf->buffer, cpu);
39eaf7ef
SR
2534 /*
2535 * If this buffer has skipped entries, then we hold all
2536 * entries for the trace and we need to ignore the
2537 * ones before the time stamp.
2538 */
12883efb
SRRH
2539 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
2540 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
39eaf7ef
SR
2541 /* total is the same as the entries */
2542 *total += count;
2543 } else
2544 *total += count +
12883efb 2545 ring_buffer_overrun_cpu(buf->buffer, cpu);
39eaf7ef
SR
2546 *entries += count;
2547 }
2548}
2549
e309b41d 2550static void print_lat_help_header(struct seq_file *m)
bc0c38d1 2551{
d79ac28f
RV
2552 seq_puts(m, "# _------=> CPU# \n"
2553 "# / _-----=> irqs-off \n"
2554 "# | / _----=> need-resched \n"
2555 "# || / _---=> hardirq/softirq \n"
2556 "# ||| / _--=> preempt-depth \n"
2557 "# |||| / delay \n"
2558 "# cmd pid ||||| time | caller \n"
2559 "# \\ / ||||| \\ | / \n");
bc0c38d1
SR
2560}
2561
12883efb 2562static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
bc0c38d1 2563{
39eaf7ef
SR
2564 unsigned long total;
2565 unsigned long entries;
2566
12883efb 2567 get_total_entries(buf, &total, &entries);
39eaf7ef
SR
2568 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
2569 entries, total, num_online_cpus());
2570 seq_puts(m, "#\n");
2571}
2572
12883efb 2573static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
39eaf7ef 2574{
12883efb 2575 print_event_info(buf, m);
d79ac28f
RV
2576 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"
2577 "# | | | | |\n");
bc0c38d1
SR
2578}
2579
12883efb 2580static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
77271ce4 2581{
12883efb 2582 print_event_info(buf, m);
d79ac28f
RV
2583 seq_puts(m, "# _-----=> irqs-off\n"
2584 "# / _----=> need-resched\n"
2585 "# | / _---=> hardirq/softirq\n"
2586 "# || / _--=> preempt-depth\n"
2587 "# ||| / delay\n"
2588 "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n"
2589 "# | | | |||| | |\n");
77271ce4 2590}
bc0c38d1 2591
62b915f1 2592void
bc0c38d1
SR
2593print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2594{
2595 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
12883efb
SRRH
2596 struct trace_buffer *buf = iter->trace_buffer;
2597 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
2b6080f2 2598 struct tracer *type = iter->trace;
39eaf7ef
SR
2599 unsigned long entries;
2600 unsigned long total;
bc0c38d1
SR
2601 const char *name = "preemption";
2602
d840f718 2603 name = type->name;
bc0c38d1 2604
12883efb 2605 get_total_entries(buf, &total, &entries);
bc0c38d1 2606
888b55dc 2607 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
bc0c38d1 2608 name, UTS_RELEASE);
888b55dc 2609 seq_puts(m, "# -----------------------------------"
bc0c38d1 2610 "---------------------------------\n");
888b55dc 2611 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
bc0c38d1 2612 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
57f50be1 2613 nsecs_to_usecs(data->saved_latency),
bc0c38d1 2614 entries,
4c11d7ae 2615 total,
12883efb 2616 buf->cpu,
bc0c38d1
SR
2617#if defined(CONFIG_PREEMPT_NONE)
2618 "server",
2619#elif defined(CONFIG_PREEMPT_VOLUNTARY)
2620 "desktop",
b5c21b45 2621#elif defined(CONFIG_PREEMPT)
bc0c38d1
SR
2622 "preempt",
2623#else
2624 "unknown",
2625#endif
2626 /* These are reserved for later use */
2627 0, 0, 0, 0);
2628#ifdef CONFIG_SMP
2629 seq_printf(m, " #P:%d)\n", num_online_cpus());
2630#else
2631 seq_puts(m, ")\n");
2632#endif
888b55dc
KM
2633 seq_puts(m, "# -----------------\n");
2634 seq_printf(m, "# | task: %.16s-%d "
bc0c38d1 2635 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
d20b92ab
EB
2636 data->comm, data->pid,
2637 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
bc0c38d1 2638 data->policy, data->rt_priority);
888b55dc 2639 seq_puts(m, "# -----------------\n");
bc0c38d1
SR
2640
2641 if (data->critical_start) {
888b55dc 2642 seq_puts(m, "# => started at: ");
214023c3
SR
2643 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
2644 trace_print_seq(m, &iter->seq);
888b55dc 2645 seq_puts(m, "\n# => ended at: ");
214023c3
SR
2646 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
2647 trace_print_seq(m, &iter->seq);
8248ac05 2648 seq_puts(m, "\n#\n");
bc0c38d1
SR
2649 }
2650
888b55dc 2651 seq_puts(m, "#\n");
bc0c38d1
SR
2652}
2653
a309720c
SR
2654static void test_cpu_buff_start(struct trace_iterator *iter)
2655{
2656 struct trace_seq *s = &iter->seq;
2657
12ef7d44
SR
2658 if (!(trace_flags & TRACE_ITER_ANNOTATE))
2659 return;
2660
2661 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
2662 return;
2663
4462344e 2664 if (cpumask_test_cpu(iter->cpu, iter->started))
a309720c
SR
2665 return;
2666
12883efb 2667 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
2f26ebd5
SR
2668 return;
2669
4462344e 2670 cpumask_set_cpu(iter->cpu, iter->started);
b0dfa978
FW
2671
2672 /* Don't print started cpu buffer for the first entry of the trace */
2673 if (iter->idx > 1)
2674 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
2675 iter->cpu);
a309720c
SR
2676}
2677
2c4f035f 2678static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
bc0c38d1 2679{
214023c3 2680 struct trace_seq *s = &iter->seq;
bc0c38d1 2681 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
4e3c3333 2682 struct trace_entry *entry;
f633cef0 2683 struct trace_event *event;
bc0c38d1 2684
4e3c3333 2685 entry = iter->ent;
dd0e545f 2686
a309720c
SR
2687 test_cpu_buff_start(iter);
2688
c4a8e8be 2689 event = ftrace_find_event(entry->type);
bc0c38d1 2690
c4a8e8be 2691 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
19a7fe20
SRRH
2692 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2693 trace_print_lat_context(iter);
2694 else
2695 trace_print_context(iter);
c4a8e8be 2696 }
bc0c38d1 2697
19a7fe20
SRRH
2698 if (trace_seq_has_overflowed(s))
2699 return TRACE_TYPE_PARTIAL_LINE;
2700
268ccda0 2701 if (event)
a9a57763 2702 return event->funcs->trace(iter, sym_flags, event);
d9793bd8 2703
19a7fe20 2704 trace_seq_printf(s, "Unknown type %d\n", entry->type);
02b67518 2705
19a7fe20 2706 return trace_handle_return(s);
bc0c38d1
SR
2707}
2708
2c4f035f 2709static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
f9896bf3
IM
2710{
2711 struct trace_seq *s = &iter->seq;
2712 struct trace_entry *entry;
f633cef0 2713 struct trace_event *event;
f9896bf3
IM
2714
2715 entry = iter->ent;
dd0e545f 2716
19a7fe20
SRRH
2717 if (trace_flags & TRACE_ITER_CONTEXT_INFO)
2718 trace_seq_printf(s, "%d %d %llu ",
2719 entry->pid, iter->cpu, iter->ts);
2720
2721 if (trace_seq_has_overflowed(s))
2722 return TRACE_TYPE_PARTIAL_LINE;
f9896bf3 2723
f633cef0 2724 event = ftrace_find_event(entry->type);
268ccda0 2725 if (event)
a9a57763 2726 return event->funcs->raw(iter, 0, event);
d9793bd8 2727
19a7fe20 2728 trace_seq_printf(s, "%d ?\n", entry->type);
777e208d 2729
19a7fe20 2730 return trace_handle_return(s);
f9896bf3
IM
2731}
2732
2c4f035f 2733static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
5e3ca0ec
IM
2734{
2735 struct trace_seq *s = &iter->seq;
2736 unsigned char newline = '\n';
2737 struct trace_entry *entry;
f633cef0 2738 struct trace_event *event;
5e3ca0ec
IM
2739
2740 entry = iter->ent;
dd0e545f 2741
c4a8e8be 2742 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
19a7fe20
SRRH
2743 SEQ_PUT_HEX_FIELD(s, entry->pid);
2744 SEQ_PUT_HEX_FIELD(s, iter->cpu);
2745 SEQ_PUT_HEX_FIELD(s, iter->ts);
2746 if (trace_seq_has_overflowed(s))
2747 return TRACE_TYPE_PARTIAL_LINE;
c4a8e8be 2748 }
5e3ca0ec 2749
f633cef0 2750 event = ftrace_find_event(entry->type);
268ccda0 2751 if (event) {
a9a57763 2752 enum print_line_t ret = event->funcs->hex(iter, 0, event);
d9793bd8
ACM
2753 if (ret != TRACE_TYPE_HANDLED)
2754 return ret;
2755 }
7104f300 2756
19a7fe20 2757 SEQ_PUT_FIELD(s, newline);
5e3ca0ec 2758
19a7fe20 2759 return trace_handle_return(s);
5e3ca0ec
IM
2760}
2761
2c4f035f 2762static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
cb0f12aa
IM
2763{
2764 struct trace_seq *s = &iter->seq;
2765 struct trace_entry *entry;
f633cef0 2766 struct trace_event *event;
cb0f12aa
IM
2767
2768 entry = iter->ent;
dd0e545f 2769
c4a8e8be 2770 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
19a7fe20
SRRH
2771 SEQ_PUT_FIELD(s, entry->pid);
2772 SEQ_PUT_FIELD(s, iter->cpu);
2773 SEQ_PUT_FIELD(s, iter->ts);
2774 if (trace_seq_has_overflowed(s))
2775 return TRACE_TYPE_PARTIAL_LINE;
c4a8e8be 2776 }
cb0f12aa 2777
f633cef0 2778 event = ftrace_find_event(entry->type);
a9a57763
SR
2779 return event ? event->funcs->binary(iter, 0, event) :
2780 TRACE_TYPE_HANDLED;
cb0f12aa
IM
2781}
2782
62b915f1 2783int trace_empty(struct trace_iterator *iter)
bc0c38d1 2784{
6d158a81 2785 struct ring_buffer_iter *buf_iter;
bc0c38d1
SR
2786 int cpu;
2787
9aba60fe 2788 /* If we are looking at one CPU buffer, only check that one */
ae3b5093 2789 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
9aba60fe 2790 cpu = iter->cpu_file;
6d158a81
SR
2791 buf_iter = trace_buffer_iter(iter, cpu);
2792 if (buf_iter) {
2793 if (!ring_buffer_iter_empty(buf_iter))
9aba60fe
SR
2794 return 0;
2795 } else {
12883efb 2796 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
9aba60fe
SR
2797 return 0;
2798 }
2799 return 1;
2800 }
2801
ab46428c 2802 for_each_tracing_cpu(cpu) {
6d158a81
SR
2803 buf_iter = trace_buffer_iter(iter, cpu);
2804 if (buf_iter) {
2805 if (!ring_buffer_iter_empty(buf_iter))
d769041f
SR
2806 return 0;
2807 } else {
12883efb 2808 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
d769041f
SR
2809 return 0;
2810 }
bc0c38d1 2811 }
d769041f 2812
797d3712 2813 return 1;
bc0c38d1
SR
2814}
2815
4f535968 2816/* Called with trace_event_read_lock() held. */
955b61e5 2817enum print_line_t print_trace_line(struct trace_iterator *iter)
f9896bf3 2818{
2c4f035f
FW
2819 enum print_line_t ret;
2820
19a7fe20
SRRH
2821 if (iter->lost_events) {
2822 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2823 iter->cpu, iter->lost_events);
2824 if (trace_seq_has_overflowed(&iter->seq))
2825 return TRACE_TYPE_PARTIAL_LINE;
2826 }
bc21b478 2827
2c4f035f
FW
2828 if (iter->trace && iter->trace->print_line) {
2829 ret = iter->trace->print_line(iter);
2830 if (ret != TRACE_TYPE_UNHANDLED)
2831 return ret;
2832 }
72829bc3 2833
09ae7234
SRRH
2834 if (iter->ent->type == TRACE_BPUTS &&
2835 trace_flags & TRACE_ITER_PRINTK &&
2836 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2837 return trace_print_bputs_msg_only(iter);
2838
48ead020
FW
2839 if (iter->ent->type == TRACE_BPRINT &&
2840 trace_flags & TRACE_ITER_PRINTK &&
2841 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
5ef841f6 2842 return trace_print_bprintk_msg_only(iter);
48ead020 2843
66896a85
FW
2844 if (iter->ent->type == TRACE_PRINT &&
2845 trace_flags & TRACE_ITER_PRINTK &&
2846 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
5ef841f6 2847 return trace_print_printk_msg_only(iter);
66896a85 2848
cb0f12aa
IM
2849 if (trace_flags & TRACE_ITER_BIN)
2850 return print_bin_fmt(iter);
2851
5e3ca0ec
IM
2852 if (trace_flags & TRACE_ITER_HEX)
2853 return print_hex_fmt(iter);
2854
f9896bf3
IM
2855 if (trace_flags & TRACE_ITER_RAW)
2856 return print_raw_fmt(iter);
2857
f9896bf3
IM
2858 return print_trace_fmt(iter);
2859}
2860
7e9a49ef
JO
2861void trace_latency_header(struct seq_file *m)
2862{
2863 struct trace_iterator *iter = m->private;
2864
2865 /* print nothing if the buffers are empty */
2866 if (trace_empty(iter))
2867 return;
2868
2869 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2870 print_trace_header(m, iter);
2871
2872 if (!(trace_flags & TRACE_ITER_VERBOSE))
2873 print_lat_help_header(m);
2874}
2875
62b915f1
JO
2876void trace_default_header(struct seq_file *m)
2877{
2878 struct trace_iterator *iter = m->private;
2879
f56e7f8e
JO
2880 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
2881 return;
2882
62b915f1
JO
2883 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2884 /* print nothing if the buffers are empty */
2885 if (trace_empty(iter))
2886 return;
2887 print_trace_header(m, iter);
2888 if (!(trace_flags & TRACE_ITER_VERBOSE))
2889 print_lat_help_header(m);
2890 } else {
77271ce4
SR
2891 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
2892 if (trace_flags & TRACE_ITER_IRQ_INFO)
12883efb 2893 print_func_help_header_irq(iter->trace_buffer, m);
77271ce4 2894 else
12883efb 2895 print_func_help_header(iter->trace_buffer, m);
77271ce4 2896 }
62b915f1
JO
2897 }
2898}
2899
e0a413f6
SR
2900static void test_ftrace_alive(struct seq_file *m)
2901{
2902 if (!ftrace_is_dead())
2903 return;
d79ac28f
RV
2904 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
2905 "# MAY BE MISSING FUNCTION EVENTS\n");
e0a413f6
SR
2906}
2907
d8741e2e 2908#ifdef CONFIG_TRACER_MAX_TRACE
f1affcaa 2909static void show_snapshot_main_help(struct seq_file *m)
d8741e2e 2910{
d79ac28f
RV
2911 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
2912 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2913 "# Takes a snapshot of the main buffer.\n"
2914 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
2915 "# (Doesn't have to be '2' works with any number that\n"
2916 "# is not a '0' or '1')\n");
d8741e2e 2917}
f1affcaa
SRRH
2918
2919static void show_snapshot_percpu_help(struct seq_file *m)
2920{
fa6f0cc7 2921 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
f1affcaa 2922#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
d79ac28f
RV
2923 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2924 "# Takes a snapshot of the main buffer for this cpu.\n");
f1affcaa 2925#else
d79ac28f
RV
2926 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
2927 "# Must use main snapshot file to allocate.\n");
f1affcaa 2928#endif
d79ac28f
RV
2929 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
2930 "# (Doesn't have to be '2' works with any number that\n"
2931 "# is not a '0' or '1')\n");
f1affcaa
SRRH
2932}
2933
d8741e2e
SRRH
2934static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
2935{
45ad21ca 2936 if (iter->tr->allocated_snapshot)
fa6f0cc7 2937 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
d8741e2e 2938 else
fa6f0cc7 2939 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
d8741e2e 2940
fa6f0cc7 2941 seq_puts(m, "# Snapshot commands:\n");
f1affcaa
SRRH
2942 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
2943 show_snapshot_main_help(m);
2944 else
2945 show_snapshot_percpu_help(m);
d8741e2e
SRRH
2946}
2947#else
2948/* Should never be called */
2949static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
2950#endif
2951
bc0c38d1
SR
2952static int s_show(struct seq_file *m, void *v)
2953{
2954 struct trace_iterator *iter = v;
a63ce5b3 2955 int ret;
bc0c38d1
SR
2956
2957 if (iter->ent == NULL) {
2958 if (iter->tr) {
2959 seq_printf(m, "# tracer: %s\n", iter->trace->name);
2960 seq_puts(m, "#\n");
e0a413f6 2961 test_ftrace_alive(m);
bc0c38d1 2962 }
d8741e2e
SRRH
2963 if (iter->snapshot && trace_empty(iter))
2964 print_snapshot_help(m, iter);
2965 else if (iter->trace && iter->trace->print_header)
8bba1bf5 2966 iter->trace->print_header(m);
62b915f1
JO
2967 else
2968 trace_default_header(m);
2969
a63ce5b3
SR
2970 } else if (iter->leftover) {
2971 /*
2972 * If we filled the seq_file buffer earlier, we
2973 * want to just show it now.
2974 */
2975 ret = trace_print_seq(m, &iter->seq);
2976
2977 /* ret should this time be zero, but you never know */
2978 iter->leftover = ret;
2979
bc0c38d1 2980 } else {
f9896bf3 2981 print_trace_line(iter);
a63ce5b3
SR
2982 ret = trace_print_seq(m, &iter->seq);
2983 /*
2984 * If we overflow the seq_file buffer, then it will
2985 * ask us for this data again at start up.
2986 * Use that instead.
2987 * ret is 0 if seq_file write succeeded.
2988 * -1 otherwise.
2989 */
2990 iter->leftover = ret;
bc0c38d1
SR
2991 }
2992
2993 return 0;
2994}
2995
649e9c70
ON
2996/*
2997 * Should be used after trace_array_get(), trace_types_lock
2998 * ensures that i_cdev was already initialized.
2999 */
3000static inline int tracing_get_cpu(struct inode *inode)
3001{
3002 if (inode->i_cdev) /* See trace_create_cpu_file() */
3003 return (long)inode->i_cdev - 1;
3004 return RING_BUFFER_ALL_CPUS;
3005}
3006
88e9d34c 3007static const struct seq_operations tracer_seq_ops = {
4bf39a94
IM
3008 .start = s_start,
3009 .next = s_next,
3010 .stop = s_stop,
3011 .show = s_show,
bc0c38d1
SR
3012};
3013
e309b41d 3014static struct trace_iterator *
6484c71c 3015__tracing_open(struct inode *inode, struct file *file, bool snapshot)
bc0c38d1 3016{
6484c71c 3017 struct trace_array *tr = inode->i_private;
bc0c38d1 3018 struct trace_iterator *iter;
50e18b94 3019 int cpu;
bc0c38d1 3020
85a2f9b4
SR
3021 if (tracing_disabled)
3022 return ERR_PTR(-ENODEV);
60a11774 3023
50e18b94 3024 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
85a2f9b4
SR
3025 if (!iter)
3026 return ERR_PTR(-ENOMEM);
bc0c38d1 3027
72917235 3028 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
6d158a81 3029 GFP_KERNEL);
93574fcc
DC
3030 if (!iter->buffer_iter)
3031 goto release;
3032
d7350c3f
FW
3033 /*
3034 * We make a copy of the current tracer to avoid concurrent
3035 * changes on it while we are reading.
3036 */
bc0c38d1 3037 mutex_lock(&trace_types_lock);
d7350c3f 3038 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
85a2f9b4 3039 if (!iter->trace)
d7350c3f 3040 goto fail;
85a2f9b4 3041
2b6080f2 3042 *iter->trace = *tr->current_trace;
d7350c3f 3043
79f55997 3044 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
b0dfa978
FW
3045 goto fail;
3046
12883efb
SRRH
3047 iter->tr = tr;
3048
3049#ifdef CONFIG_TRACER_MAX_TRACE
2b6080f2
SR
3050 /* Currently only the top directory has a snapshot */
3051 if (tr->current_trace->print_max || snapshot)
12883efb 3052 iter->trace_buffer = &tr->max_buffer;
bc0c38d1 3053 else
12883efb
SRRH
3054#endif
3055 iter->trace_buffer = &tr->trace_buffer;
debdd57f 3056 iter->snapshot = snapshot;
bc0c38d1 3057 iter->pos = -1;
6484c71c 3058 iter->cpu_file = tracing_get_cpu(inode);
d7350c3f 3059 mutex_init(&iter->mutex);
bc0c38d1 3060
8bba1bf5
MM
3061 /* Notify the tracer early; before we stop tracing. */
3062 if (iter->trace && iter->trace->open)
a93751ca 3063 iter->trace->open(iter);
8bba1bf5 3064
12ef7d44 3065 /* Annotate start of buffers if we had overruns */
12883efb 3066 if (ring_buffer_overruns(iter->trace_buffer->buffer))
12ef7d44
SR
3067 iter->iter_flags |= TRACE_FILE_ANNOTATE;
3068
8be0709f 3069 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
58e8eedf 3070 if (trace_clocks[tr->clock_id].in_ns)
8be0709f
DS
3071 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3072
debdd57f
HT
3073 /* stop the trace while dumping if we are not opening "snapshot" */
3074 if (!iter->snapshot)
2b6080f2 3075 tracing_stop_tr(tr);
2f26ebd5 3076
ae3b5093 3077 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
b04cc6b1 3078 for_each_tracing_cpu(cpu) {
b04cc6b1 3079 iter->buffer_iter[cpu] =
12883efb 3080 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
72c9ddfd
DM
3081 }
3082 ring_buffer_read_prepare_sync();
3083 for_each_tracing_cpu(cpu) {
3084 ring_buffer_read_start(iter->buffer_iter[cpu]);
2f26ebd5 3085 tracing_iter_reset(iter, cpu);
b04cc6b1
FW
3086 }
3087 } else {
3088 cpu = iter->cpu_file;
3928a8a2 3089 iter->buffer_iter[cpu] =
12883efb 3090 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
72c9ddfd
DM
3091 ring_buffer_read_prepare_sync();
3092 ring_buffer_read_start(iter->buffer_iter[cpu]);
2f26ebd5 3093 tracing_iter_reset(iter, cpu);
3928a8a2
SR
3094 }
3095
bc0c38d1
SR
3096 mutex_unlock(&trace_types_lock);
3097
bc0c38d1 3098 return iter;
3928a8a2 3099
d7350c3f 3100 fail:
3928a8a2 3101 mutex_unlock(&trace_types_lock);
d7350c3f 3102 kfree(iter->trace);
6d158a81 3103 kfree(iter->buffer_iter);
93574fcc 3104release:
50e18b94
JO
3105 seq_release_private(inode, file);
3106 return ERR_PTR(-ENOMEM);
bc0c38d1
SR
3107}
3108
3109int tracing_open_generic(struct inode *inode, struct file *filp)
3110{
60a11774
SR
3111 if (tracing_disabled)
3112 return -ENODEV;
3113
bc0c38d1
SR
3114 filp->private_data = inode->i_private;
3115 return 0;
3116}
3117
2e86421d
GB
3118bool tracing_is_disabled(void)
3119{
3120 return (tracing_disabled) ? true: false;
3121}
3122
7b85af63
SRRH
3123/*
3124 * Open and update trace_array ref count.
3125 * Must have the current trace_array passed to it.
3126 */
dcc30223 3127static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
7b85af63
SRRH
3128{
3129 struct trace_array *tr = inode->i_private;
3130
3131 if (tracing_disabled)
3132 return -ENODEV;
3133
3134 if (trace_array_get(tr) < 0)
3135 return -ENODEV;
3136
3137 filp->private_data = inode->i_private;
3138
3139 return 0;
7b85af63
SRRH
3140}
3141
4fd27358 3142static int tracing_release(struct inode *inode, struct file *file)
bc0c38d1 3143{
6484c71c 3144 struct trace_array *tr = inode->i_private;
907f2784 3145 struct seq_file *m = file->private_data;
4acd4d00 3146 struct trace_iterator *iter;
3928a8a2 3147 int cpu;
bc0c38d1 3148
ff451961 3149 if (!(file->f_mode & FMODE_READ)) {
6484c71c 3150 trace_array_put(tr);
4acd4d00 3151 return 0;
ff451961 3152 }
4acd4d00 3153
6484c71c 3154 /* Writes do not use seq_file */
4acd4d00 3155 iter = m->private;
bc0c38d1 3156 mutex_lock(&trace_types_lock);
a695cb58 3157
3928a8a2
SR
3158 for_each_tracing_cpu(cpu) {
3159 if (iter->buffer_iter[cpu])
3160 ring_buffer_read_finish(iter->buffer_iter[cpu]);
3161 }
3162
bc0c38d1
SR
3163 if (iter->trace && iter->trace->close)
3164 iter->trace->close(iter);
3165
debdd57f
HT
3166 if (!iter->snapshot)
3167 /* reenable tracing if it was previously enabled */
2b6080f2 3168 tracing_start_tr(tr);
f77d09a3
AL
3169
3170 __trace_array_put(tr);
3171
bc0c38d1
SR
3172 mutex_unlock(&trace_types_lock);
3173
d7350c3f 3174 mutex_destroy(&iter->mutex);
b0dfa978 3175 free_cpumask_var(iter->started);
d7350c3f 3176 kfree(iter->trace);
6d158a81 3177 kfree(iter->buffer_iter);
50e18b94 3178 seq_release_private(inode, file);
ff451961 3179
bc0c38d1
SR
3180 return 0;
3181}
3182
7b85af63
SRRH
3183static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3184{
3185 struct trace_array *tr = inode->i_private;
3186
3187 trace_array_put(tr);
bc0c38d1
SR
3188 return 0;
3189}
3190
7b85af63
SRRH
3191static int tracing_single_release_tr(struct inode *inode, struct file *file)
3192{
3193 struct trace_array *tr = inode->i_private;
3194
3195 trace_array_put(tr);
3196
3197 return single_release(inode, file);
3198}
3199
bc0c38d1
SR
3200static int tracing_open(struct inode *inode, struct file *file)
3201{
6484c71c 3202 struct trace_array *tr = inode->i_private;
85a2f9b4
SR
3203 struct trace_iterator *iter;
3204 int ret = 0;
bc0c38d1 3205
ff451961
SRRH
3206 if (trace_array_get(tr) < 0)
3207 return -ENODEV;
3208
4acd4d00 3209 /* If this file was open for write, then erase contents */
6484c71c
ON
3210 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
3211 int cpu = tracing_get_cpu(inode);
3212
3213 if (cpu == RING_BUFFER_ALL_CPUS)
12883efb 3214 tracing_reset_online_cpus(&tr->trace_buffer);
4acd4d00 3215 else
6484c71c 3216 tracing_reset(&tr->trace_buffer, cpu);
4acd4d00 3217 }
bc0c38d1 3218
4acd4d00 3219 if (file->f_mode & FMODE_READ) {
6484c71c 3220 iter = __tracing_open(inode, file, false);
4acd4d00
SR
3221 if (IS_ERR(iter))
3222 ret = PTR_ERR(iter);
3223 else if (trace_flags & TRACE_ITER_LATENCY_FMT)
3224 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3225 }
ff451961
SRRH
3226
3227 if (ret < 0)
3228 trace_array_put(tr);
3229
bc0c38d1
SR
3230 return ret;
3231}
3232
607e2ea1
SRRH
3233/*
3234 * Some tracers are not suitable for instance buffers.
3235 * A tracer is always available for the global array (toplevel)
3236 * or if it explicitly states that it is.
3237 */
3238static bool
3239trace_ok_for_array(struct tracer *t, struct trace_array *tr)
3240{
3241 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
3242}
3243
3244/* Find the next tracer that this trace array may use */
3245static struct tracer *
3246get_tracer_for_array(struct trace_array *tr, struct tracer *t)
3247{
3248 while (t && !trace_ok_for_array(t, tr))
3249 t = t->next;
3250
3251 return t;
3252}
3253
e309b41d 3254static void *
bc0c38d1
SR
3255t_next(struct seq_file *m, void *v, loff_t *pos)
3256{
607e2ea1 3257 struct trace_array *tr = m->private;
f129e965 3258 struct tracer *t = v;
bc0c38d1
SR
3259
3260 (*pos)++;
3261
3262 if (t)
607e2ea1 3263 t = get_tracer_for_array(tr, t->next);
bc0c38d1 3264
bc0c38d1
SR
3265 return t;
3266}
3267
3268static void *t_start(struct seq_file *m, loff_t *pos)
3269{
607e2ea1 3270 struct trace_array *tr = m->private;
f129e965 3271 struct tracer *t;
bc0c38d1
SR
3272 loff_t l = 0;
3273
3274 mutex_lock(&trace_types_lock);
607e2ea1
SRRH
3275
3276 t = get_tracer_for_array(tr, trace_types);
3277 for (; t && l < *pos; t = t_next(m, t, &l))
3278 ;
bc0c38d1
SR
3279
3280 return t;
3281}
3282
3283static void t_stop(struct seq_file *m, void *p)
3284{
3285 mutex_unlock(&trace_types_lock);
3286}
3287
3288static int t_show(struct seq_file *m, void *v)
3289{
3290 struct tracer *t = v;
3291
3292 if (!t)
3293 return 0;
3294
fa6f0cc7 3295 seq_puts(m, t->name);
bc0c38d1
SR
3296 if (t->next)
3297 seq_putc(m, ' ');
3298 else
3299 seq_putc(m, '\n');
3300
3301 return 0;
3302}
3303
88e9d34c 3304static const struct seq_operations show_traces_seq_ops = {
4bf39a94
IM
3305 .start = t_start,
3306 .next = t_next,
3307 .stop = t_stop,
3308 .show = t_show,
bc0c38d1
SR
3309};
3310
3311static int show_traces_open(struct inode *inode, struct file *file)
3312{
607e2ea1
SRRH
3313 struct trace_array *tr = inode->i_private;
3314 struct seq_file *m;
3315 int ret;
3316
60a11774
SR
3317 if (tracing_disabled)
3318 return -ENODEV;
3319
607e2ea1
SRRH
3320 ret = seq_open(file, &show_traces_seq_ops);
3321 if (ret)
3322 return ret;
3323
3324 m = file->private_data;
3325 m->private = tr;
3326
3327 return 0;
bc0c38d1
SR
3328}
3329
4acd4d00
SR
3330static ssize_t
3331tracing_write_stub(struct file *filp, const char __user *ubuf,
3332 size_t count, loff_t *ppos)
3333{
3334 return count;
3335}
3336
098c879e 3337loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
364829b1 3338{
098c879e
SRRH
3339 int ret;
3340
364829b1 3341 if (file->f_mode & FMODE_READ)
098c879e 3342 ret = seq_lseek(file, offset, whence);
364829b1 3343 else
098c879e
SRRH
3344 file->f_pos = ret = 0;
3345
3346 return ret;
364829b1
SP
3347}
3348
5e2336a0 3349static const struct file_operations tracing_fops = {
4bf39a94
IM
3350 .open = tracing_open,
3351 .read = seq_read,
4acd4d00 3352 .write = tracing_write_stub,
098c879e 3353 .llseek = tracing_lseek,
4bf39a94 3354 .release = tracing_release,
bc0c38d1
SR
3355};
3356
5e2336a0 3357static const struct file_operations show_traces_fops = {
c7078de1
IM
3358 .open = show_traces_open,
3359 .read = seq_read,
3360 .release = seq_release,
b444786f 3361 .llseek = seq_lseek,
c7078de1
IM
3362};
3363
36dfe925
IM
3364/*
3365 * The tracer itself will not take this lock, but still we want
3366 * to provide a consistent cpumask to user-space:
3367 */
3368static DEFINE_MUTEX(tracing_cpumask_update_lock);
3369
3370/*
3371 * Temporary storage for the character representation of the
3372 * CPU bitmask (and one more byte for the newline):
3373 */
3374static char mask_str[NR_CPUS + 1];
3375
c7078de1
IM
3376static ssize_t
3377tracing_cpumask_read(struct file *filp, char __user *ubuf,
3378 size_t count, loff_t *ppos)
3379{
ccfe9e42 3380 struct trace_array *tr = file_inode(filp)->i_private;
36dfe925 3381 int len;
c7078de1
IM
3382
3383 mutex_lock(&tracing_cpumask_update_lock);
36dfe925 3384
1a40243b
TH
3385 len = snprintf(mask_str, count, "%*pb\n",
3386 cpumask_pr_args(tr->tracing_cpumask));
3387 if (len >= count) {
36dfe925
IM
3388 count = -EINVAL;
3389 goto out_err;
3390 }
36dfe925
IM
3391 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
3392
3393out_err:
c7078de1
IM
3394 mutex_unlock(&tracing_cpumask_update_lock);
3395
3396 return count;
3397}
3398
3399static ssize_t
3400tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3401 size_t count, loff_t *ppos)
3402{
ccfe9e42 3403 struct trace_array *tr = file_inode(filp)->i_private;
9e01c1b7 3404 cpumask_var_t tracing_cpumask_new;
2b6080f2 3405 int err, cpu;
9e01c1b7
RR
3406
3407 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
3408 return -ENOMEM;
c7078de1 3409
9e01c1b7 3410 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
c7078de1 3411 if (err)
36dfe925
IM
3412 goto err_unlock;
3413
215368e8
LZ
3414 mutex_lock(&tracing_cpumask_update_lock);
3415
a5e25883 3416 local_irq_disable();
0b9b12c1 3417 arch_spin_lock(&tr->max_lock);
ab46428c 3418 for_each_tracing_cpu(cpu) {
36dfe925
IM
3419 /*
3420 * Increase/decrease the disabled counter if we are
3421 * about to flip a bit in the cpumask:
3422 */
ccfe9e42 3423 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
9e01c1b7 3424 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
12883efb
SRRH
3425 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3426 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
36dfe925 3427 }
ccfe9e42 3428 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
9e01c1b7 3429 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
12883efb
SRRH
3430 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3431 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
36dfe925
IM
3432 }
3433 }
0b9b12c1 3434 arch_spin_unlock(&tr->max_lock);
a5e25883 3435 local_irq_enable();
36dfe925 3436
ccfe9e42 3437 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
36dfe925
IM
3438
3439 mutex_unlock(&tracing_cpumask_update_lock);
9e01c1b7 3440 free_cpumask_var(tracing_cpumask_new);
c7078de1
IM
3441
3442 return count;
36dfe925
IM
3443
3444err_unlock:
215368e8 3445 free_cpumask_var(tracing_cpumask_new);
36dfe925
IM
3446
3447 return err;
c7078de1
IM
3448}
3449
5e2336a0 3450static const struct file_operations tracing_cpumask_fops = {
ccfe9e42 3451 .open = tracing_open_generic_tr,
c7078de1
IM
3452 .read = tracing_cpumask_read,
3453 .write = tracing_cpumask_write,
ccfe9e42 3454 .release = tracing_release_generic_tr,
b444786f 3455 .llseek = generic_file_llseek,
bc0c38d1
SR
3456};
3457
fdb372ed 3458static int tracing_trace_options_show(struct seq_file *m, void *v)
bc0c38d1 3459{
d8e83d26 3460 struct tracer_opt *trace_opts;
2b6080f2 3461 struct trace_array *tr = m->private;
d8e83d26 3462 u32 tracer_flags;
d8e83d26 3463 int i;
adf9f195 3464
d8e83d26 3465 mutex_lock(&trace_types_lock);
2b6080f2
SR
3466 tracer_flags = tr->current_trace->flags->val;
3467 trace_opts = tr->current_trace->flags->opts;
d8e83d26 3468
bc0c38d1
SR
3469 for (i = 0; trace_options[i]; i++) {
3470 if (trace_flags & (1 << i))
fdb372ed 3471 seq_printf(m, "%s\n", trace_options[i]);
bc0c38d1 3472 else
fdb372ed 3473 seq_printf(m, "no%s\n", trace_options[i]);
bc0c38d1
SR
3474 }
3475
adf9f195
FW
3476 for (i = 0; trace_opts[i].name; i++) {
3477 if (tracer_flags & trace_opts[i].bit)
fdb372ed 3478 seq_printf(m, "%s\n", trace_opts[i].name);
adf9f195 3479 else
fdb372ed 3480 seq_printf(m, "no%s\n", trace_opts[i].name);
adf9f195 3481 }
d8e83d26 3482 mutex_unlock(&trace_types_lock);
adf9f195 3483
fdb372ed 3484 return 0;
bc0c38d1 3485}
bc0c38d1 3486
8c1a49ae 3487static int __set_tracer_option(struct trace_array *tr,
8d18eaaf
LZ
3488 struct tracer_flags *tracer_flags,
3489 struct tracer_opt *opts, int neg)
3490{
8c1a49ae 3491 struct tracer *trace = tr->current_trace;
8d18eaaf 3492 int ret;
bc0c38d1 3493
8c1a49ae 3494 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
8d18eaaf
LZ
3495 if (ret)
3496 return ret;
3497
3498 if (neg)
3499 tracer_flags->val &= ~opts->bit;
3500 else
3501 tracer_flags->val |= opts->bit;
3502 return 0;
bc0c38d1
SR
3503}
3504
adf9f195 3505/* Try to assign a tracer specific option */
8c1a49ae 3506static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
adf9f195 3507{
8c1a49ae 3508 struct tracer *trace = tr->current_trace;
7770841e 3509 struct tracer_flags *tracer_flags = trace->flags;
adf9f195 3510 struct tracer_opt *opts = NULL;
8d18eaaf 3511 int i;
adf9f195 3512
7770841e
Z
3513 for (i = 0; tracer_flags->opts[i].name; i++) {
3514 opts = &tracer_flags->opts[i];
adf9f195 3515
8d18eaaf 3516 if (strcmp(cmp, opts->name) == 0)
8c1a49ae 3517 return __set_tracer_option(tr, trace->flags, opts, neg);
adf9f195 3518 }
adf9f195 3519
8d18eaaf 3520 return -EINVAL;
adf9f195
FW
3521}
3522
613f04a0
SRRH
3523/* Some tracers require overwrite to stay enabled */
3524int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
3525{
3526 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
3527 return -1;
3528
3529 return 0;
3530}
3531
2b6080f2 3532int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
af4617bd
SR
3533{
3534 /* do nothing if flag is already set */
3535 if (!!(trace_flags & mask) == !!enabled)
613f04a0
SRRH
3536 return 0;
3537
3538 /* Give the tracer a chance to approve the change */
2b6080f2 3539 if (tr->current_trace->flag_changed)
bf6065b5 3540 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
613f04a0 3541 return -EINVAL;
af4617bd
SR
3542
3543 if (enabled)
3544 trace_flags |= mask;
3545 else
3546 trace_flags &= ~mask;
e870e9a1
LZ
3547
3548 if (mask == TRACE_ITER_RECORD_CMD)
3549 trace_event_enable_cmd_record(enabled);
750912fa 3550
80902822 3551 if (mask == TRACE_ITER_OVERWRITE) {
12883efb 3552 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
80902822 3553#ifdef CONFIG_TRACER_MAX_TRACE
12883efb 3554 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
80902822
SRRH
3555#endif
3556 }
81698831
SR
3557
3558 if (mask == TRACE_ITER_PRINTK)
3559 trace_printk_start_stop_comm(enabled);
613f04a0
SRRH
3560
3561 return 0;
af4617bd
SR
3562}
3563
2b6080f2 3564static int trace_set_options(struct trace_array *tr, char *option)
bc0c38d1 3565{
8d18eaaf 3566 char *cmp;
bc0c38d1 3567 int neg = 0;
613f04a0 3568 int ret = -ENODEV;
bc0c38d1
SR
3569 int i;
3570
7bcfaf54 3571 cmp = strstrip(option);
bc0c38d1 3572
8d18eaaf 3573 if (strncmp(cmp, "no", 2) == 0) {
bc0c38d1
SR
3574 neg = 1;
3575 cmp += 2;
3576 }
3577
69d34da2
SRRH
3578 mutex_lock(&trace_types_lock);
3579
bc0c38d1 3580 for (i = 0; trace_options[i]; i++) {
8d18eaaf 3581 if (strcmp(cmp, trace_options[i]) == 0) {
2b6080f2 3582 ret = set_tracer_flag(tr, 1 << i, !neg);
bc0c38d1
SR
3583 break;
3584 }
3585 }
adf9f195
FW
3586
3587 /* If no option could be set, test the specific tracer options */
69d34da2 3588 if (!trace_options[i])
8c1a49ae 3589 ret = set_tracer_option(tr, cmp, neg);
69d34da2
SRRH
3590
3591 mutex_unlock(&trace_types_lock);
bc0c38d1 3592
7bcfaf54
SR
3593 return ret;
3594}
3595
3596static ssize_t
3597tracing_trace_options_write(struct file *filp, const char __user *ubuf,
3598 size_t cnt, loff_t *ppos)
3599{
2b6080f2
SR
3600 struct seq_file *m = filp->private_data;
3601 struct trace_array *tr = m->private;
7bcfaf54 3602 char buf[64];
613f04a0 3603 int ret;
7bcfaf54
SR
3604
3605 if (cnt >= sizeof(buf))
3606 return -EINVAL;
3607
3608 if (copy_from_user(&buf, ubuf, cnt))
3609 return -EFAULT;
3610
a8dd2176
SR
3611 buf[cnt] = 0;
3612
2b6080f2 3613 ret = trace_set_options(tr, buf);
613f04a0
SRRH
3614 if (ret < 0)
3615 return ret;
7bcfaf54 3616
cf8517cf 3617 *ppos += cnt;
bc0c38d1
SR
3618
3619 return cnt;
3620}
3621
fdb372ed
LZ
3622static int tracing_trace_options_open(struct inode *inode, struct file *file)
3623{
7b85af63 3624 struct trace_array *tr = inode->i_private;
f77d09a3 3625 int ret;
7b85af63 3626
fdb372ed
LZ
3627 if (tracing_disabled)
3628 return -ENODEV;
2b6080f2 3629
7b85af63
SRRH
3630 if (trace_array_get(tr) < 0)
3631 return -ENODEV;
3632
f77d09a3
AL
3633 ret = single_open(file, tracing_trace_options_show, inode->i_private);
3634 if (ret < 0)
3635 trace_array_put(tr);
3636
3637 return ret;
fdb372ed
LZ
3638}
3639
5e2336a0 3640static const struct file_operations tracing_iter_fops = {
fdb372ed
LZ
3641 .open = tracing_trace_options_open,
3642 .read = seq_read,
3643 .llseek = seq_lseek,
7b85af63 3644 .release = tracing_single_release_tr,
ee6bce52 3645 .write = tracing_trace_options_write,
bc0c38d1
SR
3646};
3647
7bd2f24c
IM
3648static const char readme_msg[] =
3649 "tracing mini-HOWTO:\n\n"
22f45649
SRRH
3650 "# echo 0 > tracing_on : quick way to disable tracing\n"
3651 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
3652 " Important files:\n"
3653 " trace\t\t\t- The static contents of the buffer\n"
3654 "\t\t\t To clear the buffer write into this file: echo > trace\n"
3655 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
3656 " current_tracer\t- function and latency tracers\n"
3657 " available_tracers\t- list of configured tracers for current_tracer\n"
3658 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
3659 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
3660 " trace_clock\t\t-change the clock used to order events\n"
3661 " local: Per cpu clock but may not be synced across CPUs\n"
3662 " global: Synced across CPUs but slows tracing down.\n"
3663 " counter: Not a clock, but just an increment\n"
3664 " uptime: Jiffy counter from time of boot\n"
3665 " perf: Same clock that perf events use\n"
3666#ifdef CONFIG_X86_64
3667 " x86-tsc: TSC cycle counter\n"
3668#endif
3669 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
3670 " tracing_cpumask\t- Limit which CPUs to trace\n"
3671 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
3672 "\t\t\t Remove sub-buffer with rmdir\n"
3673 " trace_options\t\t- Set format or modify how tracing happens\n"
71485c45
SRRH
3674 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
3675 "\t\t\t option name\n"
939c7a4f 3676 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
22f45649
SRRH
3677#ifdef CONFIG_DYNAMIC_FTRACE
3678 "\n available_filter_functions - list of functions that can be filtered on\n"
71485c45
SRRH
3679 " set_ftrace_filter\t- echo function name in here to only trace these\n"
3680 "\t\t\t functions\n"
3681 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3682 "\t modules: Can select a group via module\n"
3683 "\t Format: :mod:<module-name>\n"
3684 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
3685 "\t triggers: a command to perform when function is hit\n"
3686 "\t Format: <function>:<trigger>[:count]\n"
3687 "\t trigger: traceon, traceoff\n"
3688 "\t\t enable_event:<system>:<event>\n"
3689 "\t\t disable_event:<system>:<event>\n"
22f45649 3690#ifdef CONFIG_STACKTRACE
71485c45 3691 "\t\t stacktrace\n"
22f45649
SRRH
3692#endif
3693#ifdef CONFIG_TRACER_SNAPSHOT
71485c45 3694 "\t\t snapshot\n"
22f45649 3695#endif
17a280ea
SRRH
3696 "\t\t dump\n"
3697 "\t\t cpudump\n"
71485c45
SRRH
3698 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
3699 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
3700 "\t The first one will disable tracing every time do_fault is hit\n"
3701 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
3702 "\t The first time do trap is hit and it disables tracing, the\n"
3703 "\t counter will decrement to 2. If tracing is already disabled,\n"
3704 "\t the counter will not decrement. It only decrements when the\n"
3705 "\t trigger did work\n"
3706 "\t To remove trigger without count:\n"
3707 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
3708 "\t To remove trigger with a count:\n"
3709 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
22f45649 3710 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
71485c45
SRRH
3711 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3712 "\t modules: Can select a group via module command :mod:\n"
3713 "\t Does not accept triggers\n"
22f45649
SRRH
3714#endif /* CONFIG_DYNAMIC_FTRACE */
3715#ifdef CONFIG_FUNCTION_TRACER
71485c45
SRRH
3716 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
3717 "\t\t (function)\n"
22f45649
SRRH
3718#endif
3719#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3720 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
d048a8c7 3721 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
22f45649
SRRH
3722 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
3723#endif
3724#ifdef CONFIG_TRACER_SNAPSHOT
71485c45
SRRH
3725 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
3726 "\t\t\t snapshot buffer. Read the contents for more\n"
3727 "\t\t\t information\n"
22f45649 3728#endif
991821c8 3729#ifdef CONFIG_STACK_TRACER
22f45649
SRRH
3730 " stack_trace\t\t- Shows the max stack trace when active\n"
3731 " stack_max_size\t- Shows current max stack size that was traced\n"
71485c45
SRRH
3732 "\t\t\t Write into this file to reset the max size (trigger a\n"
3733 "\t\t\t new trace)\n"
22f45649 3734#ifdef CONFIG_DYNAMIC_FTRACE
71485c45
SRRH
3735 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
3736 "\t\t\t traces\n"
22f45649 3737#endif
991821c8 3738#endif /* CONFIG_STACK_TRACER */
26f25564
TZ
3739 " events/\t\t- Directory containing all trace event subsystems:\n"
3740 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
3741 " events/<system>/\t- Directory containing all trace events for <system>:\n"
71485c45
SRRH
3742 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
3743 "\t\t\t events\n"
26f25564 3744 " filter\t\t- If set, only events passing filter are traced\n"
71485c45
SRRH
3745 " events/<system>/<event>/\t- Directory containing control files for\n"
3746 "\t\t\t <event>:\n"
26f25564
TZ
3747 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
3748 " filter\t\t- If set, only events passing filter are traced\n"
3749 " trigger\t\t- If set, a command to perform when event is hit\n"
71485c45
SRRH
3750 "\t Format: <trigger>[:count][if <filter>]\n"
3751 "\t trigger: traceon, traceoff\n"
3752 "\t enable_event:<system>:<event>\n"
3753 "\t disable_event:<system>:<event>\n"
26f25564 3754#ifdef CONFIG_STACKTRACE
71485c45 3755 "\t\t stacktrace\n"
26f25564
TZ
3756#endif
3757#ifdef CONFIG_TRACER_SNAPSHOT
71485c45 3758 "\t\t snapshot\n"
26f25564 3759#endif
71485c45
SRRH
3760 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
3761 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
3762 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
3763 "\t events/block/block_unplug/trigger\n"
3764 "\t The first disables tracing every time block_unplug is hit.\n"
3765 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
3766 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
3767 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
3768 "\t Like function triggers, the counter is only decremented if it\n"
3769 "\t enabled or disabled tracing.\n"
3770 "\t To remove a trigger without a count:\n"
3771 "\t echo '!<trigger> > <system>/<event>/trigger\n"
3772 "\t To remove a trigger with a count:\n"
3773 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
3774 "\t Filters can be ignored when removing a trigger.\n"
7bd2f24c
IM
3775;
3776
3777static ssize_t
3778tracing_readme_read(struct file *filp, char __user *ubuf,
3779 size_t cnt, loff_t *ppos)
3780{
3781 return simple_read_from_buffer(ubuf, cnt, ppos,
3782 readme_msg, strlen(readme_msg));
3783}
3784
5e2336a0 3785static const struct file_operations tracing_readme_fops = {
c7078de1
IM
3786 .open = tracing_open_generic,
3787 .read = tracing_readme_read,
b444786f 3788 .llseek = generic_file_llseek,
7bd2f24c
IM
3789};
3790
42584c81
YY
3791static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
3792{
3793 unsigned int *ptr = v;
69abe6a5 3794
42584c81
YY
3795 if (*pos || m->count)
3796 ptr++;
69abe6a5 3797
42584c81 3798 (*pos)++;
69abe6a5 3799
939c7a4f
YY
3800 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
3801 ptr++) {
42584c81
YY
3802 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
3803 continue;
69abe6a5 3804
42584c81
YY
3805 return ptr;
3806 }
69abe6a5 3807
42584c81
YY
3808 return NULL;
3809}
3810
3811static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
3812{
3813 void *v;
3814 loff_t l = 0;
69abe6a5 3815
4c27e756
SRRH
3816 preempt_disable();
3817 arch_spin_lock(&trace_cmdline_lock);
3818
939c7a4f 3819 v = &savedcmd->map_cmdline_to_pid[0];
42584c81
YY
3820 while (l <= *pos) {
3821 v = saved_cmdlines_next(m, v, &l);
3822 if (!v)
3823 return NULL;
69abe6a5
AP
3824 }
3825
42584c81
YY
3826 return v;
3827}
3828
3829static void saved_cmdlines_stop(struct seq_file *m, void *v)
3830{
4c27e756
SRRH
3831 arch_spin_unlock(&trace_cmdline_lock);
3832 preempt_enable();
42584c81 3833}
69abe6a5 3834
42584c81
YY
3835static int saved_cmdlines_show(struct seq_file *m, void *v)
3836{
3837 char buf[TASK_COMM_LEN];
3838 unsigned int *pid = v;
69abe6a5 3839
4c27e756 3840 __trace_find_cmdline(*pid, buf);
42584c81
YY
3841 seq_printf(m, "%d %s\n", *pid, buf);
3842 return 0;
3843}
3844
3845static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
3846 .start = saved_cmdlines_start,
3847 .next = saved_cmdlines_next,
3848 .stop = saved_cmdlines_stop,
3849 .show = saved_cmdlines_show,
3850};
3851
3852static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
3853{
3854 if (tracing_disabled)
3855 return -ENODEV;
3856
3857 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
69abe6a5
AP
3858}
3859
3860static const struct file_operations tracing_saved_cmdlines_fops = {
42584c81
YY
3861 .open = tracing_saved_cmdlines_open,
3862 .read = seq_read,
3863 .llseek = seq_lseek,
3864 .release = seq_release,
69abe6a5
AP
3865};
3866
939c7a4f
YY
3867static ssize_t
3868tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
3869 size_t cnt, loff_t *ppos)
3870{
3871 char buf[64];
3872 int r;
3873
3874 arch_spin_lock(&trace_cmdline_lock);
a6af8fbf 3875 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
939c7a4f
YY
3876 arch_spin_unlock(&trace_cmdline_lock);
3877
3878 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3879}
3880
3881static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
3882{
3883 kfree(s->saved_cmdlines);
3884 kfree(s->map_cmdline_to_pid);
3885 kfree(s);
3886}
3887
3888static int tracing_resize_saved_cmdlines(unsigned int val)
3889{
3890 struct saved_cmdlines_buffer *s, *savedcmd_temp;
3891
a6af8fbf 3892 s = kmalloc(sizeof(*s), GFP_KERNEL);
939c7a4f
YY
3893 if (!s)
3894 return -ENOMEM;
3895
3896 if (allocate_cmdlines_buffer(val, s) < 0) {
3897 kfree(s);
3898 return -ENOMEM;
3899 }
3900
3901 arch_spin_lock(&trace_cmdline_lock);
3902 savedcmd_temp = savedcmd;
3903 savedcmd = s;
3904 arch_spin_unlock(&trace_cmdline_lock);
3905 free_saved_cmdlines_buffer(savedcmd_temp);
3906
3907 return 0;
3908}
3909
3910static ssize_t
3911tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
3912 size_t cnt, loff_t *ppos)
3913{
3914 unsigned long val;
3915 int ret;
3916
3917 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
3918 if (ret)
3919 return ret;
3920
3921 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
3922 if (!val || val > PID_MAX_DEFAULT)
3923 return -EINVAL;
3924
3925 ret = tracing_resize_saved_cmdlines((unsigned int)val);
3926 if (ret < 0)
3927 return ret;
3928
3929 *ppos += cnt;
3930
3931 return cnt;
3932}
3933
3934static const struct file_operations tracing_saved_cmdlines_size_fops = {
3935 .open = tracing_open_generic,
3936 .read = tracing_saved_cmdlines_size_read,
3937 .write = tracing_saved_cmdlines_size_write,
3938};
3939
9828413d
SRRH
3940#ifdef CONFIG_TRACE_ENUM_MAP_FILE
3941static union trace_enum_map_item *
3942update_enum_map(union trace_enum_map_item *ptr)
3943{
3944 if (!ptr->map.enum_string) {
3945 if (ptr->tail.next) {
3946 ptr = ptr->tail.next;
3947 /* Set ptr to the next real item (skip head) */
3948 ptr++;
3949 } else
3950 return NULL;
3951 }
3952 return ptr;
3953}
3954
3955static void *enum_map_next(struct seq_file *m, void *v, loff_t *pos)
3956{
3957 union trace_enum_map_item *ptr = v;
3958
3959 /*
3960 * Paranoid! If ptr points to end, we don't want to increment past it.
3961 * This really should never happen.
3962 */
3963 ptr = update_enum_map(ptr);
3964 if (WARN_ON_ONCE(!ptr))
3965 return NULL;
3966
3967 ptr++;
3968
3969 (*pos)++;
3970
3971 ptr = update_enum_map(ptr);
3972
3973 return ptr;
3974}
3975
3976static void *enum_map_start(struct seq_file *m, loff_t *pos)
3977{
3978 union trace_enum_map_item *v;
3979 loff_t l = 0;
3980
3981 mutex_lock(&trace_enum_mutex);
3982
3983 v = trace_enum_maps;
3984 if (v)
3985 v++;
3986
3987 while (v && l < *pos) {
3988 v = enum_map_next(m, v, &l);
3989 }
3990
3991 return v;
3992}
3993
3994static void enum_map_stop(struct seq_file *m, void *v)
3995{
3996 mutex_unlock(&trace_enum_mutex);
3997}
3998
3999static int enum_map_show(struct seq_file *m, void *v)
4000{
4001 union trace_enum_map_item *ptr = v;
4002
4003 seq_printf(m, "%s %ld (%s)\n",
4004 ptr->map.enum_string, ptr->map.enum_value,
4005 ptr->map.system);
4006
4007 return 0;
4008}
4009
4010static const struct seq_operations tracing_enum_map_seq_ops = {
4011 .start = enum_map_start,
4012 .next = enum_map_next,
4013 .stop = enum_map_stop,
4014 .show = enum_map_show,
4015};
4016
4017static int tracing_enum_map_open(struct inode *inode, struct file *filp)
4018{
4019 if (tracing_disabled)
4020 return -ENODEV;
4021
4022 return seq_open(filp, &tracing_enum_map_seq_ops);
4023}
4024
4025static const struct file_operations tracing_enum_map_fops = {
4026 .open = tracing_enum_map_open,
4027 .read = seq_read,
4028 .llseek = seq_lseek,
4029 .release = seq_release,
4030};
4031
4032static inline union trace_enum_map_item *
4033trace_enum_jmp_to_tail(union trace_enum_map_item *ptr)
4034{
4035 /* Return tail of array given the head */
4036 return ptr + ptr->head.length + 1;
4037}
4038
4039static void
4040trace_insert_enum_map_file(struct module *mod, struct trace_enum_map **start,
4041 int len)
4042{
4043 struct trace_enum_map **stop;
4044 struct trace_enum_map **map;
4045 union trace_enum_map_item *map_array;
4046 union trace_enum_map_item *ptr;
4047
4048 stop = start + len;
4049
4050 /*
4051 * The trace_enum_maps contains the map plus a head and tail item,
4052 * where the head holds the module and length of array, and the
4053 * tail holds a pointer to the next list.
4054 */
4055 map_array = kmalloc(sizeof(*map_array) * (len + 2), GFP_KERNEL);
4056 if (!map_array) {
4057 pr_warning("Unable to allocate trace enum mapping\n");
4058 return;
4059 }
4060
4061 mutex_lock(&trace_enum_mutex);
4062
4063 if (!trace_enum_maps)
4064 trace_enum_maps = map_array;
4065 else {
4066 ptr = trace_enum_maps;
4067 for (;;) {
4068 ptr = trace_enum_jmp_to_tail(ptr);
4069 if (!ptr->tail.next)
4070 break;
4071 ptr = ptr->tail.next;
4072
4073 }
4074 ptr->tail.next = map_array;
4075 }
4076 map_array->head.mod = mod;
4077 map_array->head.length = len;
4078 map_array++;
4079
4080 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
4081 map_array->map = **map;
4082 map_array++;
4083 }
4084 memset(map_array, 0, sizeof(*map_array));
4085
4086 mutex_unlock(&trace_enum_mutex);
4087}
4088
4089static void trace_create_enum_file(struct dentry *d_tracer)
4090{
4091 trace_create_file("enum_map", 0444, d_tracer,
4092 NULL, &tracing_enum_map_fops);
4093}
4094
4095#else /* CONFIG_TRACE_ENUM_MAP_FILE */
4096static inline void trace_create_enum_file(struct dentry *d_tracer) { }
4097static inline void trace_insert_enum_map_file(struct module *mod,
4098 struct trace_enum_map **start, int len) { }
4099#endif /* !CONFIG_TRACE_ENUM_MAP_FILE */
4100
4101static void trace_insert_enum_map(struct module *mod,
4102 struct trace_enum_map **start, int len)
0c564a53
SRRH
4103{
4104 struct trace_enum_map **map;
0c564a53
SRRH
4105
4106 if (len <= 0)
4107 return;
4108
4109 map = start;
4110
4111 trace_event_enum_update(map, len);
9828413d
SRRH
4112
4113 trace_insert_enum_map_file(mod, start, len);
0c564a53
SRRH
4114}
4115
bc0c38d1
SR
4116static ssize_t
4117tracing_set_trace_read(struct file *filp, char __user *ubuf,
4118 size_t cnt, loff_t *ppos)
4119{
2b6080f2 4120 struct trace_array *tr = filp->private_data;
ee6c2c1b 4121 char buf[MAX_TRACER_SIZE+2];
bc0c38d1
SR
4122 int r;
4123
4124 mutex_lock(&trace_types_lock);
2b6080f2 4125 r = sprintf(buf, "%s\n", tr->current_trace->name);
bc0c38d1
SR
4126 mutex_unlock(&trace_types_lock);
4127
4bf39a94 4128 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
bc0c38d1
SR
4129}
4130
b6f11df2
ACM
4131int tracer_init(struct tracer *t, struct trace_array *tr)
4132{
12883efb 4133 tracing_reset_online_cpus(&tr->trace_buffer);
b6f11df2
ACM
4134 return t->init(tr);
4135}
4136
12883efb 4137static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
438ced17
VN
4138{
4139 int cpu;
737223fb 4140
438ced17 4141 for_each_tracing_cpu(cpu)
12883efb 4142 per_cpu_ptr(buf->data, cpu)->entries = val;
438ced17
VN
4143}
4144
12883efb 4145#ifdef CONFIG_TRACER_MAX_TRACE
d60da506 4146/* resize @tr's buffer to the size of @size_tr's entries */
12883efb
SRRH
4147static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
4148 struct trace_buffer *size_buf, int cpu_id)
d60da506
HT
4149{
4150 int cpu, ret = 0;
4151
4152 if (cpu_id == RING_BUFFER_ALL_CPUS) {
4153 for_each_tracing_cpu(cpu) {
12883efb
SRRH
4154 ret = ring_buffer_resize(trace_buf->buffer,
4155 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
d60da506
HT
4156 if (ret < 0)
4157 break;
12883efb
SRRH
4158 per_cpu_ptr(trace_buf->data, cpu)->entries =
4159 per_cpu_ptr(size_buf->data, cpu)->entries;
d60da506
HT
4160 }
4161 } else {
12883efb
SRRH
4162 ret = ring_buffer_resize(trace_buf->buffer,
4163 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
d60da506 4164 if (ret == 0)
12883efb
SRRH
4165 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
4166 per_cpu_ptr(size_buf->data, cpu_id)->entries;
d60da506
HT
4167 }
4168
4169 return ret;
4170}
12883efb 4171#endif /* CONFIG_TRACER_MAX_TRACE */
d60da506 4172
2b6080f2
SR
4173static int __tracing_resize_ring_buffer(struct trace_array *tr,
4174 unsigned long size, int cpu)
73c5162a
SR
4175{
4176 int ret;
4177
4178 /*
4179 * If kernel or user changes the size of the ring buffer
a123c52b
SR
4180 * we use the size that was given, and we can forget about
4181 * expanding it later.
73c5162a 4182 */
55034cd6 4183 ring_buffer_expanded = true;
73c5162a 4184
b382ede6 4185 /* May be called before buffers are initialized */
12883efb 4186 if (!tr->trace_buffer.buffer)
b382ede6
SR
4187 return 0;
4188
12883efb 4189 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
73c5162a
SR
4190 if (ret < 0)
4191 return ret;
4192
12883efb 4193#ifdef CONFIG_TRACER_MAX_TRACE
2b6080f2
SR
4194 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
4195 !tr->current_trace->use_max_tr)
ef710e10
KM
4196 goto out;
4197
12883efb 4198 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
73c5162a 4199 if (ret < 0) {
12883efb
SRRH
4200 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
4201 &tr->trace_buffer, cpu);
73c5162a 4202 if (r < 0) {
a123c52b
SR
4203 /*
4204 * AARGH! We are left with different
4205 * size max buffer!!!!
4206 * The max buffer is our "snapshot" buffer.
4207 * When a tracer needs a snapshot (one of the
4208 * latency tracers), it swaps the max buffer
4209 * with the saved snap shot. We succeeded to
4210 * update the size of the main buffer, but failed to
4211 * update the size of the max buffer. But when we tried
4212 * to reset the main buffer to the original size, we
4213 * failed there too. This is very unlikely to
4214 * happen, but if it does, warn and kill all
4215 * tracing.
4216 */
73c5162a
SR
4217 WARN_ON(1);
4218 tracing_disabled = 1;
4219 }
4220 return ret;
4221 }
4222
438ced17 4223 if (cpu == RING_BUFFER_ALL_CPUS)
12883efb 4224 set_buffer_entries(&tr->max_buffer, size);
438ced17 4225 else
12883efb 4226 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
438ced17 4227
ef710e10 4228 out:
12883efb
SRRH
4229#endif /* CONFIG_TRACER_MAX_TRACE */
4230
438ced17 4231 if (cpu == RING_BUFFER_ALL_CPUS)
12883efb 4232 set_buffer_entries(&tr->trace_buffer, size);
438ced17 4233 else
12883efb 4234 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
73c5162a
SR
4235
4236 return ret;
4237}
4238
2b6080f2
SR
4239static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
4240 unsigned long size, int cpu_id)
4f271a2a 4241{
83f40318 4242 int ret = size;
4f271a2a
VN
4243
4244 mutex_lock(&trace_types_lock);
4245
438ced17
VN
4246 if (cpu_id != RING_BUFFER_ALL_CPUS) {
4247 /* make sure, this cpu is enabled in the mask */
4248 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
4249 ret = -EINVAL;
4250 goto out;
4251 }
4252 }
4f271a2a 4253
2b6080f2 4254 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
4f271a2a
VN
4255 if (ret < 0)
4256 ret = -ENOMEM;
4257
438ced17 4258out:
4f271a2a
VN
4259 mutex_unlock(&trace_types_lock);
4260
4261 return ret;
4262}
4263
ef710e10 4264
1852fcce
SR
4265/**
4266 * tracing_update_buffers - used by tracing facility to expand ring buffers
4267 *
4268 * To save on memory when the tracing is never used on a system with it
4269 * configured in. The ring buffers are set to a minimum size. But once
4270 * a user starts to use the tracing facility, then they need to grow
4271 * to their default size.
4272 *
4273 * This function is to be called when a tracer is about to be used.
4274 */
4275int tracing_update_buffers(void)
4276{
4277 int ret = 0;
4278
1027fcb2 4279 mutex_lock(&trace_types_lock);
1852fcce 4280 if (!ring_buffer_expanded)
2b6080f2 4281 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
438ced17 4282 RING_BUFFER_ALL_CPUS);
1027fcb2 4283 mutex_unlock(&trace_types_lock);
1852fcce
SR
4284
4285 return ret;
4286}
4287
577b785f
SR
4288struct trace_option_dentry;
4289
4290static struct trace_option_dentry *
2b6080f2 4291create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
577b785f 4292
6b450d25
SRRH
4293/*
4294 * Used to clear out the tracer before deletion of an instance.
4295 * Must have trace_types_lock held.
4296 */
4297static void tracing_set_nop(struct trace_array *tr)
4298{
4299 if (tr->current_trace == &nop_trace)
4300 return;
4301
50512ab5 4302 tr->current_trace->enabled--;
6b450d25
SRRH
4303
4304 if (tr->current_trace->reset)
4305 tr->current_trace->reset(tr);
4306
4307 tr->current_trace = &nop_trace;
4308}
4309
41d9c0be 4310static void add_tracer_options(struct trace_array *tr, struct tracer *t)
bc0c38d1 4311{
09d23a1d
SRRH
4312 /* Only enable if the directory has been created already. */
4313 if (!tr->dir)
4314 return;
4315
4316 /* Currently, only the top instance has options */
4317 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL))
4318 return;
4319
41d9c0be
SRRH
4320 /* Ignore if they were already created */
4321 if (t->topts)
4322 return;
4323
4324 t->topts = create_trace_option_files(tr, t);
09d23a1d
SRRH
4325}
4326
4327static int tracing_set_tracer(struct trace_array *tr, const char *buf)
4328{
bc0c38d1 4329 struct tracer *t;
12883efb 4330#ifdef CONFIG_TRACER_MAX_TRACE
34600f0e 4331 bool had_max_tr;
12883efb 4332#endif
d9e54076 4333 int ret = 0;
bc0c38d1 4334
1027fcb2
SR
4335 mutex_lock(&trace_types_lock);
4336
73c5162a 4337 if (!ring_buffer_expanded) {
2b6080f2 4338 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
438ced17 4339 RING_BUFFER_ALL_CPUS);
73c5162a 4340 if (ret < 0)
59f586db 4341 goto out;
73c5162a
SR
4342 ret = 0;
4343 }
4344
bc0c38d1
SR
4345 for (t = trace_types; t; t = t->next) {
4346 if (strcmp(t->name, buf) == 0)
4347 break;
4348 }
c2931e05
FW
4349 if (!t) {
4350 ret = -EINVAL;
4351 goto out;
4352 }
2b6080f2 4353 if (t == tr->current_trace)
bc0c38d1
SR
4354 goto out;
4355
607e2ea1
SRRH
4356 /* Some tracers are only allowed for the top level buffer */
4357 if (!trace_ok_for_array(t, tr)) {
4358 ret = -EINVAL;
4359 goto out;
4360 }
4361
cf6ab6d9
SRRH
4362 /* If trace pipe files are being read, we can't change the tracer */
4363 if (tr->current_trace->ref) {
4364 ret = -EBUSY;
4365 goto out;
4366 }
4367
9f029e83 4368 trace_branch_disable();
613f04a0 4369
50512ab5 4370 tr->current_trace->enabled--;
613f04a0 4371
2b6080f2
SR
4372 if (tr->current_trace->reset)
4373 tr->current_trace->reset(tr);
34600f0e 4374
12883efb 4375 /* Current trace needs to be nop_trace before synchronize_sched */
2b6080f2 4376 tr->current_trace = &nop_trace;
34600f0e 4377
45ad21ca
SRRH
4378#ifdef CONFIG_TRACER_MAX_TRACE
4379 had_max_tr = tr->allocated_snapshot;
34600f0e
SR
4380
4381 if (had_max_tr && !t->use_max_tr) {
4382 /*
4383 * We need to make sure that the update_max_tr sees that
4384 * current_trace changed to nop_trace to keep it from
4385 * swapping the buffers after we resize it.
4386 * The update_max_tr is called from interrupts disabled
4387 * so a synchronized_sched() is sufficient.
4388 */
4389 synchronize_sched();
3209cff4 4390 free_snapshot(tr);
ef710e10 4391 }
12883efb 4392#endif
12883efb
SRRH
4393
4394#ifdef CONFIG_TRACER_MAX_TRACE
34600f0e 4395 if (t->use_max_tr && !had_max_tr) {
3209cff4 4396 ret = alloc_snapshot(tr);
d60da506
HT
4397 if (ret < 0)
4398 goto out;
ef710e10 4399 }
12883efb 4400#endif
577b785f 4401
1c80025a 4402 if (t->init) {
b6f11df2 4403 ret = tracer_init(t, tr);
1c80025a
FW
4404 if (ret)
4405 goto out;
4406 }
bc0c38d1 4407
2b6080f2 4408 tr->current_trace = t;
50512ab5 4409 tr->current_trace->enabled++;
9f029e83 4410 trace_branch_enable(tr);
bc0c38d1
SR
4411 out:
4412 mutex_unlock(&trace_types_lock);
4413
d9e54076
PZ
4414 return ret;
4415}
4416
4417static ssize_t
4418tracing_set_trace_write(struct file *filp, const char __user *ubuf,
4419 size_t cnt, loff_t *ppos)
4420{
607e2ea1 4421 struct trace_array *tr = filp->private_data;
ee6c2c1b 4422 char buf[MAX_TRACER_SIZE+1];
d9e54076
PZ
4423 int i;
4424 size_t ret;
e6e7a65a
FW
4425 int err;
4426
4427 ret = cnt;
d9e54076 4428
ee6c2c1b
LZ
4429 if (cnt > MAX_TRACER_SIZE)
4430 cnt = MAX_TRACER_SIZE;
d9e54076
PZ
4431
4432 if (copy_from_user(&buf, ubuf, cnt))
4433 return -EFAULT;
4434
4435 buf[cnt] = 0;
4436
4437 /* strip ending whitespace. */
4438 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
4439 buf[i] = 0;
4440
607e2ea1 4441 err = tracing_set_tracer(tr, buf);
e6e7a65a
FW
4442 if (err)
4443 return err;
d9e54076 4444
cf8517cf 4445 *ppos += ret;
bc0c38d1 4446
c2931e05 4447 return ret;
bc0c38d1
SR
4448}
4449
4450static ssize_t
6508fa76
SF
4451tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
4452 size_t cnt, loff_t *ppos)
bc0c38d1 4453{
bc0c38d1
SR
4454 char buf[64];
4455 int r;
4456
cffae437 4457 r = snprintf(buf, sizeof(buf), "%ld\n",
bc0c38d1 4458 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
cffae437
SR
4459 if (r > sizeof(buf))
4460 r = sizeof(buf);
4bf39a94 4461 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
bc0c38d1
SR
4462}
4463
4464static ssize_t
6508fa76
SF
4465tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
4466 size_t cnt, loff_t *ppos)
bc0c38d1 4467{
5e39841c 4468 unsigned long val;
c6caeeb1 4469 int ret;
bc0c38d1 4470
22fe9b54
PH
4471 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4472 if (ret)
c6caeeb1 4473 return ret;
bc0c38d1
SR
4474
4475 *ptr = val * 1000;
4476
4477 return cnt;
4478}
4479
6508fa76
SF
4480static ssize_t
4481tracing_thresh_read(struct file *filp, char __user *ubuf,
4482 size_t cnt, loff_t *ppos)
4483{
4484 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
4485}
4486
4487static ssize_t
4488tracing_thresh_write(struct file *filp, const char __user *ubuf,
4489 size_t cnt, loff_t *ppos)
4490{
4491 struct trace_array *tr = filp->private_data;
4492 int ret;
4493
4494 mutex_lock(&trace_types_lock);
4495 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
4496 if (ret < 0)
4497 goto out;
4498
4499 if (tr->current_trace->update_thresh) {
4500 ret = tr->current_trace->update_thresh(tr);
4501 if (ret < 0)
4502 goto out;
4503 }
4504
4505 ret = cnt;
4506out:
4507 mutex_unlock(&trace_types_lock);
4508
4509 return ret;
4510}
4511
4512static ssize_t
4513tracing_max_lat_read(struct file *filp, char __user *ubuf,
4514 size_t cnt, loff_t *ppos)
4515{
4516 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
4517}
4518
4519static ssize_t
4520tracing_max_lat_write(struct file *filp, const char __user *ubuf,
4521 size_t cnt, loff_t *ppos)
4522{
4523 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
4524}
4525
b3806b43
SR
4526static int tracing_open_pipe(struct inode *inode, struct file *filp)
4527{
15544209 4528 struct trace_array *tr = inode->i_private;
b3806b43 4529 struct trace_iterator *iter;
b04cc6b1 4530 int ret = 0;
b3806b43
SR
4531
4532 if (tracing_disabled)
4533 return -ENODEV;
4534
7b85af63
SRRH
4535 if (trace_array_get(tr) < 0)
4536 return -ENODEV;
4537
b04cc6b1
FW
4538 mutex_lock(&trace_types_lock);
4539
b3806b43
SR
4540 /* create a buffer to store the information to pass to userspace */
4541 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
b04cc6b1
FW
4542 if (!iter) {
4543 ret = -ENOMEM;
f77d09a3 4544 __trace_array_put(tr);
b04cc6b1
FW
4545 goto out;
4546 }
b3806b43 4547
3a161d99 4548 trace_seq_init(&iter->seq);
d716ff71 4549 iter->trace = tr->current_trace;
d7350c3f 4550
4462344e 4551 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
b04cc6b1 4552 ret = -ENOMEM;
d7350c3f 4553 goto fail;
4462344e
RR
4554 }
4555
a309720c 4556 /* trace pipe does not show start of buffer */
4462344e 4557 cpumask_setall(iter->started);
a309720c 4558
112f38a7
SR
4559 if (trace_flags & TRACE_ITER_LATENCY_FMT)
4560 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4561
8be0709f 4562 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
58e8eedf 4563 if (trace_clocks[tr->clock_id].in_ns)
8be0709f
DS
4564 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4565
15544209
ON
4566 iter->tr = tr;
4567 iter->trace_buffer = &tr->trace_buffer;
4568 iter->cpu_file = tracing_get_cpu(inode);
d7350c3f 4569 mutex_init(&iter->mutex);
b3806b43
SR
4570 filp->private_data = iter;
4571
107bad8b
SR
4572 if (iter->trace->pipe_open)
4573 iter->trace->pipe_open(iter);
107bad8b 4574
b444786f 4575 nonseekable_open(inode, filp);
cf6ab6d9
SRRH
4576
4577 tr->current_trace->ref++;
b04cc6b1
FW
4578out:
4579 mutex_unlock(&trace_types_lock);
4580 return ret;
d7350c3f
FW
4581
4582fail:
4583 kfree(iter->trace);
4584 kfree(iter);
7b85af63 4585 __trace_array_put(tr);
d7350c3f
FW
4586 mutex_unlock(&trace_types_lock);
4587 return ret;
b3806b43
SR
4588}
4589
4590static int tracing_release_pipe(struct inode *inode, struct file *file)
4591{
4592 struct trace_iterator *iter = file->private_data;
15544209 4593 struct trace_array *tr = inode->i_private;
b3806b43 4594
b04cc6b1
FW
4595 mutex_lock(&trace_types_lock);
4596
cf6ab6d9
SRRH
4597 tr->current_trace->ref--;
4598
29bf4a5e 4599 if (iter->trace->pipe_close)
c521efd1
SR
4600 iter->trace->pipe_close(iter);
4601
b04cc6b1
FW
4602 mutex_unlock(&trace_types_lock);
4603
4462344e 4604 free_cpumask_var(iter->started);
d7350c3f 4605 mutex_destroy(&iter->mutex);
b3806b43 4606 kfree(iter);
b3806b43 4607
7b85af63
SRRH
4608 trace_array_put(tr);
4609
b3806b43
SR
4610 return 0;
4611}
4612
2a2cc8f7 4613static unsigned int
cc60cdc9 4614trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
2a2cc8f7 4615{
15693458
SRRH
4616 /* Iterators are static, they should be filled or empty */
4617 if (trace_buffer_iter(iter, iter->cpu_file))
4618 return POLLIN | POLLRDNORM;
2a2cc8f7 4619
15693458 4620 if (trace_flags & TRACE_ITER_BLOCK)
2a2cc8f7
SSP
4621 /*
4622 * Always select as readable when in blocking mode
4623 */
4624 return POLLIN | POLLRDNORM;
15693458 4625 else
12883efb 4626 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
15693458 4627 filp, poll_table);
2a2cc8f7 4628}
2a2cc8f7 4629
cc60cdc9
SR
4630static unsigned int
4631tracing_poll_pipe(struct file *filp, poll_table *poll_table)
4632{
4633 struct trace_iterator *iter = filp->private_data;
4634
4635 return trace_poll(iter, filp, poll_table);
2a2cc8f7
SSP
4636}
4637
d716ff71 4638/* Must be called with iter->mutex held. */
ff98781b 4639static int tracing_wait_pipe(struct file *filp)
b3806b43
SR
4640{
4641 struct trace_iterator *iter = filp->private_data;
8b8b3683 4642 int ret;
b3806b43 4643
b3806b43 4644 while (trace_empty(iter)) {
2dc8f095 4645
107bad8b 4646 if ((filp->f_flags & O_NONBLOCK)) {
ff98781b 4647 return -EAGAIN;
107bad8b 4648 }
2dc8f095 4649
b3806b43 4650 /*
250bfd3d 4651 * We block until we read something and tracing is disabled.
b3806b43
SR
4652 * We still block if tracing is disabled, but we have never
4653 * read anything. This allows a user to cat this file, and
4654 * then enable tracing. But after we have read something,
4655 * we give an EOF when tracing is again disabled.
4656 *
4657 * iter->pos will be 0 if we haven't read anything.
4658 */
10246fa3 4659 if (!tracing_is_on() && iter->pos)
b3806b43 4660 break;
f4874261
SRRH
4661
4662 mutex_unlock(&iter->mutex);
4663
e30f53aa 4664 ret = wait_on_pipe(iter, false);
f4874261
SRRH
4665
4666 mutex_lock(&iter->mutex);
4667
8b8b3683
SRRH
4668 if (ret)
4669 return ret;
b3806b43
SR
4670 }
4671
ff98781b
EGM
4672 return 1;
4673}
4674
4675/*
4676 * Consumer reader.
4677 */
4678static ssize_t
4679tracing_read_pipe(struct file *filp, char __user *ubuf,
4680 size_t cnt, loff_t *ppos)
4681{
4682 struct trace_iterator *iter = filp->private_data;
4683 ssize_t sret;
4684
4685 /* return any leftover data */
4686 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4687 if (sret != -EBUSY)
4688 return sret;
4689
f9520750 4690 trace_seq_init(&iter->seq);
ff98781b 4691
d7350c3f
FW
4692 /*
4693 * Avoid more than one consumer on a single file descriptor
4694 * This is just a matter of traces coherency, the ring buffer itself
4695 * is protected.
4696 */
4697 mutex_lock(&iter->mutex);
ff98781b
EGM
4698 if (iter->trace->read) {
4699 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
4700 if (sret)
4701 goto out;
4702 }
4703
4704waitagain:
4705 sret = tracing_wait_pipe(filp);
4706 if (sret <= 0)
4707 goto out;
4708
b3806b43 4709 /* stop when tracing is finished */
ff98781b
EGM
4710 if (trace_empty(iter)) {
4711 sret = 0;
107bad8b 4712 goto out;
ff98781b 4713 }
b3806b43
SR
4714
4715 if (cnt >= PAGE_SIZE)
4716 cnt = PAGE_SIZE - 1;
4717
53d0aa77 4718 /* reset all but tr, trace, and overruns */
53d0aa77
SR
4719 memset(&iter->seq, 0,
4720 sizeof(struct trace_iterator) -
4721 offsetof(struct trace_iterator, seq));
ed5467da 4722 cpumask_clear(iter->started);
4823ed7e 4723 iter->pos = -1;
b3806b43 4724
4f535968 4725 trace_event_read_lock();
7e53bd42 4726 trace_access_lock(iter->cpu_file);
955b61e5 4727 while (trace_find_next_entry_inc(iter) != NULL) {
2c4f035f 4728 enum print_line_t ret;
5ac48378 4729 int save_len = iter->seq.seq.len;
088b1e42 4730
f9896bf3 4731 ret = print_trace_line(iter);
2c4f035f 4732 if (ret == TRACE_TYPE_PARTIAL_LINE) {
088b1e42 4733 /* don't print partial lines */
5ac48378 4734 iter->seq.seq.len = save_len;
b3806b43 4735 break;
088b1e42 4736 }
b91facc3
FW
4737 if (ret != TRACE_TYPE_NO_CONSUME)
4738 trace_consume(iter);
b3806b43 4739
5ac48378 4740 if (trace_seq_used(&iter->seq) >= cnt)
b3806b43 4741 break;
ee5e51f5
JO
4742
4743 /*
4744 * Setting the full flag means we reached the trace_seq buffer
4745 * size and we should leave by partial output condition above.
4746 * One of the trace_seq_* functions is not used properly.
4747 */
4748 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
4749 iter->ent->type);
b3806b43 4750 }
7e53bd42 4751 trace_access_unlock(iter->cpu_file);
4f535968 4752 trace_event_read_unlock();
b3806b43 4753
b3806b43 4754 /* Now copy what we have to the user */
6c6c2796 4755 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
5ac48378 4756 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
f9520750 4757 trace_seq_init(&iter->seq);
9ff4b974
PP
4758
4759 /*
25985edc 4760 * If there was nothing to send to user, in spite of consuming trace
9ff4b974
PP
4761 * entries, go back to wait for more entries.
4762 */
6c6c2796 4763 if (sret == -EBUSY)
9ff4b974 4764 goto waitagain;
b3806b43 4765
107bad8b 4766out:
d7350c3f 4767 mutex_unlock(&iter->mutex);
107bad8b 4768
6c6c2796 4769 return sret;
b3806b43
SR
4770}
4771
3c56819b
EGM
4772static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
4773 unsigned int idx)
4774{
4775 __free_page(spd->pages[idx]);
4776}
4777
28dfef8f 4778static const struct pipe_buf_operations tracing_pipe_buf_ops = {
34cd4998 4779 .can_merge = 0,
34cd4998 4780 .confirm = generic_pipe_buf_confirm,
92fdd98c 4781 .release = generic_pipe_buf_release,
34cd4998
SR
4782 .steal = generic_pipe_buf_steal,
4783 .get = generic_pipe_buf_get,
3c56819b
EGM
4784};
4785
34cd4998 4786static size_t
fa7c7f6e 4787tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
34cd4998
SR
4788{
4789 size_t count;
74f06bb7 4790 int save_len;
34cd4998
SR
4791 int ret;
4792
4793 /* Seq buffer is page-sized, exactly what we need. */
4794 for (;;) {
74f06bb7 4795 save_len = iter->seq.seq.len;
34cd4998 4796 ret = print_trace_line(iter);
74f06bb7
SRRH
4797
4798 if (trace_seq_has_overflowed(&iter->seq)) {
4799 iter->seq.seq.len = save_len;
34cd4998
SR
4800 break;
4801 }
74f06bb7
SRRH
4802
4803 /*
4804 * This should not be hit, because it should only
4805 * be set if the iter->seq overflowed. But check it
4806 * anyway to be safe.
4807 */
34cd4998 4808 if (ret == TRACE_TYPE_PARTIAL_LINE) {
74f06bb7
SRRH
4809 iter->seq.seq.len = save_len;
4810 break;
4811 }
4812
5ac48378 4813 count = trace_seq_used(&iter->seq) - save_len;
74f06bb7
SRRH
4814 if (rem < count) {
4815 rem = 0;
4816 iter->seq.seq.len = save_len;
34cd4998
SR
4817 break;
4818 }
4819
74e7ff8c
LJ
4820 if (ret != TRACE_TYPE_NO_CONSUME)
4821 trace_consume(iter);
34cd4998 4822 rem -= count;
955b61e5 4823 if (!trace_find_next_entry_inc(iter)) {
34cd4998
SR
4824 rem = 0;
4825 iter->ent = NULL;
4826 break;
4827 }
4828 }
4829
4830 return rem;
4831}
4832
3c56819b
EGM
4833static ssize_t tracing_splice_read_pipe(struct file *filp,
4834 loff_t *ppos,
4835 struct pipe_inode_info *pipe,
4836 size_t len,
4837 unsigned int flags)
4838{
35f3d14d
JA
4839 struct page *pages_def[PIPE_DEF_BUFFERS];
4840 struct partial_page partial_def[PIPE_DEF_BUFFERS];
3c56819b
EGM
4841 struct trace_iterator *iter = filp->private_data;
4842 struct splice_pipe_desc spd = {
35f3d14d
JA
4843 .pages = pages_def,
4844 .partial = partial_def,
34cd4998 4845 .nr_pages = 0, /* This gets updated below. */
047fe360 4846 .nr_pages_max = PIPE_DEF_BUFFERS,
34cd4998
SR
4847 .flags = flags,
4848 .ops = &tracing_pipe_buf_ops,
4849 .spd_release = tracing_spd_release_pipe,
3c56819b
EGM
4850 };
4851 ssize_t ret;
34cd4998 4852 size_t rem;
3c56819b
EGM
4853 unsigned int i;
4854
35f3d14d
JA
4855 if (splice_grow_spd(pipe, &spd))
4856 return -ENOMEM;
4857
d7350c3f 4858 mutex_lock(&iter->mutex);
3c56819b
EGM
4859
4860 if (iter->trace->splice_read) {
4861 ret = iter->trace->splice_read(iter, filp,
4862 ppos, pipe, len, flags);
4863 if (ret)
34cd4998 4864 goto out_err;
3c56819b
EGM
4865 }
4866
4867 ret = tracing_wait_pipe(filp);
4868 if (ret <= 0)
34cd4998 4869 goto out_err;
3c56819b 4870
955b61e5 4871 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
3c56819b 4872 ret = -EFAULT;
34cd4998 4873 goto out_err;
3c56819b
EGM
4874 }
4875
4f535968 4876 trace_event_read_lock();
7e53bd42 4877 trace_access_lock(iter->cpu_file);
4f535968 4878
3c56819b 4879 /* Fill as many pages as possible. */
a786c06d 4880 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
35f3d14d
JA
4881 spd.pages[i] = alloc_page(GFP_KERNEL);
4882 if (!spd.pages[i])
34cd4998 4883 break;
3c56819b 4884
fa7c7f6e 4885 rem = tracing_fill_pipe_page(rem, iter);
3c56819b
EGM
4886
4887 /* Copy the data into the page, so we can start over. */
4888 ret = trace_seq_to_buffer(&iter->seq,
35f3d14d 4889 page_address(spd.pages[i]),
5ac48378 4890 trace_seq_used(&iter->seq));
3c56819b 4891 if (ret < 0) {
35f3d14d 4892 __free_page(spd.pages[i]);
3c56819b
EGM
4893 break;
4894 }
35f3d14d 4895 spd.partial[i].offset = 0;
5ac48378 4896 spd.partial[i].len = trace_seq_used(&iter->seq);
3c56819b 4897
f9520750 4898 trace_seq_init(&iter->seq);
3c56819b
EGM
4899 }
4900
7e53bd42 4901 trace_access_unlock(iter->cpu_file);
4f535968 4902 trace_event_read_unlock();
d7350c3f 4903 mutex_unlock(&iter->mutex);
3c56819b
EGM
4904
4905 spd.nr_pages = i;
4906
35f3d14d
JA
4907 ret = splice_to_pipe(pipe, &spd);
4908out:
047fe360 4909 splice_shrink_spd(&spd);
35f3d14d 4910 return ret;
3c56819b 4911
34cd4998 4912out_err:
d7350c3f 4913 mutex_unlock(&iter->mutex);
35f3d14d 4914 goto out;
3c56819b
EGM
4915}
4916
a98a3c3f
SR
4917static ssize_t
4918tracing_entries_read(struct file *filp, char __user *ubuf,
4919 size_t cnt, loff_t *ppos)
4920{
0bc392ee
ON
4921 struct inode *inode = file_inode(filp);
4922 struct trace_array *tr = inode->i_private;
4923 int cpu = tracing_get_cpu(inode);
438ced17
VN
4924 char buf[64];
4925 int r = 0;
4926 ssize_t ret;
a98a3c3f 4927
db526ca3 4928 mutex_lock(&trace_types_lock);
438ced17 4929
0bc392ee 4930 if (cpu == RING_BUFFER_ALL_CPUS) {
438ced17
VN
4931 int cpu, buf_size_same;
4932 unsigned long size;
4933
4934 size = 0;
4935 buf_size_same = 1;
4936 /* check if all cpu sizes are same */
4937 for_each_tracing_cpu(cpu) {
4938 /* fill in the size from first enabled cpu */
4939 if (size == 0)
12883efb
SRRH
4940 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
4941 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
438ced17
VN
4942 buf_size_same = 0;
4943 break;
4944 }
4945 }
4946
4947 if (buf_size_same) {
4948 if (!ring_buffer_expanded)
4949 r = sprintf(buf, "%lu (expanded: %lu)\n",
4950 size >> 10,
4951 trace_buf_size >> 10);
4952 else
4953 r = sprintf(buf, "%lu\n", size >> 10);
4954 } else
4955 r = sprintf(buf, "X\n");
4956 } else
0bc392ee 4957 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
438ced17 4958
db526ca3
SR
4959 mutex_unlock(&trace_types_lock);
4960
438ced17
VN
4961 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4962 return ret;
a98a3c3f
SR
4963}
4964
4965static ssize_t
4966tracing_entries_write(struct file *filp, const char __user *ubuf,
4967 size_t cnt, loff_t *ppos)
4968{
0bc392ee
ON
4969 struct inode *inode = file_inode(filp);
4970 struct trace_array *tr = inode->i_private;
a98a3c3f 4971 unsigned long val;
4f271a2a 4972 int ret;
a98a3c3f 4973
22fe9b54
PH
4974 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4975 if (ret)
c6caeeb1 4976 return ret;
a98a3c3f
SR
4977
4978 /* must have at least 1 entry */
4979 if (!val)
4980 return -EINVAL;
4981
1696b2b0
SR
4982 /* value is in KB */
4983 val <<= 10;
0bc392ee 4984 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
4f271a2a
VN
4985 if (ret < 0)
4986 return ret;
a98a3c3f 4987
cf8517cf 4988 *ppos += cnt;
a98a3c3f 4989
4f271a2a
VN
4990 return cnt;
4991}
bf5e6519 4992
f81ab074
VN
4993static ssize_t
4994tracing_total_entries_read(struct file *filp, char __user *ubuf,
4995 size_t cnt, loff_t *ppos)
4996{
4997 struct trace_array *tr = filp->private_data;
4998 char buf[64];
4999 int r, cpu;
5000 unsigned long size = 0, expanded_size = 0;
5001
5002 mutex_lock(&trace_types_lock);
5003 for_each_tracing_cpu(cpu) {
12883efb 5004 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
f81ab074
VN
5005 if (!ring_buffer_expanded)
5006 expanded_size += trace_buf_size >> 10;
5007 }
5008 if (ring_buffer_expanded)
5009 r = sprintf(buf, "%lu\n", size);
5010 else
5011 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
5012 mutex_unlock(&trace_types_lock);
5013
5014 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5015}
5016
4f271a2a
VN
5017static ssize_t
5018tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
5019 size_t cnt, loff_t *ppos)
5020{
5021 /*
5022 * There is no need to read what the user has written, this function
5023 * is just to make sure that there is no error when "echo" is used
5024 */
5025
5026 *ppos += cnt;
a98a3c3f
SR
5027
5028 return cnt;
5029}
5030
4f271a2a
VN
5031static int
5032tracing_free_buffer_release(struct inode *inode, struct file *filp)
5033{
2b6080f2
SR
5034 struct trace_array *tr = inode->i_private;
5035
cf30cf67
SR
5036 /* disable tracing ? */
5037 if (trace_flags & TRACE_ITER_STOP_ON_FREE)
711e1243 5038 tracer_tracing_off(tr);
4f271a2a 5039 /* resize the ring buffer to 0 */
2b6080f2 5040 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
4f271a2a 5041
7b85af63
SRRH
5042 trace_array_put(tr);
5043
4f271a2a
VN
5044 return 0;
5045}
5046
5bf9a1ee
PP
5047static ssize_t
5048tracing_mark_write(struct file *filp, const char __user *ubuf,
5049 size_t cnt, loff_t *fpos)
5050{
d696b58c 5051 unsigned long addr = (unsigned long)ubuf;
2d71619c 5052 struct trace_array *tr = filp->private_data;
d696b58c
SR
5053 struct ring_buffer_event *event;
5054 struct ring_buffer *buffer;
5055 struct print_entry *entry;
5056 unsigned long irq_flags;
5057 struct page *pages[2];
6edb2a8a 5058 void *map_page[2];
d696b58c
SR
5059 int nr_pages = 1;
5060 ssize_t written;
d696b58c
SR
5061 int offset;
5062 int size;
5063 int len;
5064 int ret;
6edb2a8a 5065 int i;
5bf9a1ee 5066
c76f0694 5067 if (tracing_disabled)
5bf9a1ee
PP
5068 return -EINVAL;
5069
5224c3a3
MSB
5070 if (!(trace_flags & TRACE_ITER_MARKERS))
5071 return -EINVAL;
5072
5bf9a1ee
PP
5073 if (cnt > TRACE_BUF_SIZE)
5074 cnt = TRACE_BUF_SIZE;
5075
d696b58c
SR
5076 /*
5077 * Userspace is injecting traces into the kernel trace buffer.
5078 * We want to be as non intrusive as possible.
5079 * To do so, we do not want to allocate any special buffers
5080 * or take any locks, but instead write the userspace data
5081 * straight into the ring buffer.
5082 *
5083 * First we need to pin the userspace buffer into memory,
5084 * which, most likely it is, because it just referenced it.
5085 * But there's no guarantee that it is. By using get_user_pages_fast()
5086 * and kmap_atomic/kunmap_atomic() we can get access to the
5087 * pages directly. We then write the data directly into the
5088 * ring buffer.
5089 */
5090 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
5bf9a1ee 5091
d696b58c
SR
5092 /* check if we cross pages */
5093 if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
5094 nr_pages = 2;
5095
5096 offset = addr & (PAGE_SIZE - 1);
5097 addr &= PAGE_MASK;
5098
5099 ret = get_user_pages_fast(addr, nr_pages, 0, pages);
5100 if (ret < nr_pages) {
5101 while (--ret >= 0)
5102 put_page(pages[ret]);
5103 written = -EFAULT;
5104 goto out;
5bf9a1ee 5105 }
d696b58c 5106
6edb2a8a
SR
5107 for (i = 0; i < nr_pages; i++)
5108 map_page[i] = kmap_atomic(pages[i]);
d696b58c
SR
5109
5110 local_save_flags(irq_flags);
5111 size = sizeof(*entry) + cnt + 2; /* possible \n added */
2d71619c 5112 buffer = tr->trace_buffer.buffer;
d696b58c
SR
5113 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
5114 irq_flags, preempt_count());
5115 if (!event) {
5116 /* Ring buffer disabled, return as if not open for write */
5117 written = -EBADF;
5118 goto out_unlock;
5bf9a1ee 5119 }
d696b58c
SR
5120
5121 entry = ring_buffer_event_data(event);
5122 entry->ip = _THIS_IP_;
5123
5124 if (nr_pages == 2) {
5125 len = PAGE_SIZE - offset;
6edb2a8a
SR
5126 memcpy(&entry->buf, map_page[0] + offset, len);
5127 memcpy(&entry->buf[len], map_page[1], cnt - len);
c13d2f7c 5128 } else
6edb2a8a 5129 memcpy(&entry->buf, map_page[0] + offset, cnt);
5bf9a1ee 5130
d696b58c
SR
5131 if (entry->buf[cnt - 1] != '\n') {
5132 entry->buf[cnt] = '\n';
5133 entry->buf[cnt + 1] = '\0';
5134 } else
5135 entry->buf[cnt] = '\0';
5136
7ffbd48d 5137 __buffer_unlock_commit(buffer, event);
5bf9a1ee 5138
d696b58c 5139 written = cnt;
5bf9a1ee 5140
d696b58c 5141 *fpos += written;
1aa54bca 5142
d696b58c 5143 out_unlock:
7215853e 5144 for (i = nr_pages - 1; i >= 0; i--) {
6edb2a8a
SR
5145 kunmap_atomic(map_page[i]);
5146 put_page(pages[i]);
5147 }
d696b58c 5148 out:
1aa54bca 5149 return written;
5bf9a1ee
PP
5150}
5151
13f16d20 5152static int tracing_clock_show(struct seq_file *m, void *v)
5079f326 5153{
2b6080f2 5154 struct trace_array *tr = m->private;
5079f326
Z
5155 int i;
5156
5157 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
13f16d20 5158 seq_printf(m,
5079f326 5159 "%s%s%s%s", i ? " " : "",
2b6080f2
SR
5160 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
5161 i == tr->clock_id ? "]" : "");
13f16d20 5162 seq_putc(m, '\n');
5079f326 5163
13f16d20 5164 return 0;
5079f326
Z
5165}
5166
e1e232ca 5167static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
5079f326 5168{
5079f326
Z
5169 int i;
5170
5079f326
Z
5171 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
5172 if (strcmp(trace_clocks[i].name, clockstr) == 0)
5173 break;
5174 }
5175 if (i == ARRAY_SIZE(trace_clocks))
5176 return -EINVAL;
5177
5079f326
Z
5178 mutex_lock(&trace_types_lock);
5179
2b6080f2
SR
5180 tr->clock_id = i;
5181
12883efb 5182 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
5079f326 5183
60303ed3
DS
5184 /*
5185 * New clock may not be consistent with the previous clock.
5186 * Reset the buffer so that it doesn't have incomparable timestamps.
5187 */
9457158b 5188 tracing_reset_online_cpus(&tr->trace_buffer);
12883efb
SRRH
5189
5190#ifdef CONFIG_TRACER_MAX_TRACE
5191 if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
5192 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
9457158b 5193 tracing_reset_online_cpus(&tr->max_buffer);
12883efb 5194#endif
60303ed3 5195
5079f326
Z
5196 mutex_unlock(&trace_types_lock);
5197
e1e232ca
SR
5198 return 0;
5199}
5200
5201static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
5202 size_t cnt, loff_t *fpos)
5203{
5204 struct seq_file *m = filp->private_data;
5205 struct trace_array *tr = m->private;
5206 char buf[64];
5207 const char *clockstr;
5208 int ret;
5209
5210 if (cnt >= sizeof(buf))
5211 return -EINVAL;
5212
5213 if (copy_from_user(&buf, ubuf, cnt))
5214 return -EFAULT;
5215
5216 buf[cnt] = 0;
5217
5218 clockstr = strstrip(buf);
5219
5220 ret = tracing_set_clock(tr, clockstr);
5221 if (ret)
5222 return ret;
5223
5079f326
Z
5224 *fpos += cnt;
5225
5226 return cnt;
5227}
5228
13f16d20
LZ
5229static int tracing_clock_open(struct inode *inode, struct file *file)
5230{
7b85af63
SRRH
5231 struct trace_array *tr = inode->i_private;
5232 int ret;
5233
13f16d20
LZ
5234 if (tracing_disabled)
5235 return -ENODEV;
2b6080f2 5236
7b85af63
SRRH
5237 if (trace_array_get(tr))
5238 return -ENODEV;
5239
5240 ret = single_open(file, tracing_clock_show, inode->i_private);
5241 if (ret < 0)
5242 trace_array_put(tr);
5243
5244 return ret;
13f16d20
LZ
5245}
5246
6de58e62
SRRH
5247struct ftrace_buffer_info {
5248 struct trace_iterator iter;
5249 void *spare;
5250 unsigned int read;
5251};
5252
debdd57f
HT
5253#ifdef CONFIG_TRACER_SNAPSHOT
5254static int tracing_snapshot_open(struct inode *inode, struct file *file)
5255{
6484c71c 5256 struct trace_array *tr = inode->i_private;
debdd57f 5257 struct trace_iterator *iter;
2b6080f2 5258 struct seq_file *m;
debdd57f
HT
5259 int ret = 0;
5260
ff451961
SRRH
5261 if (trace_array_get(tr) < 0)
5262 return -ENODEV;
5263
debdd57f 5264 if (file->f_mode & FMODE_READ) {
6484c71c 5265 iter = __tracing_open(inode, file, true);
debdd57f
HT
5266 if (IS_ERR(iter))
5267 ret = PTR_ERR(iter);
2b6080f2
SR
5268 } else {
5269 /* Writes still need the seq_file to hold the private data */
f77d09a3 5270 ret = -ENOMEM;
2b6080f2
SR
5271 m = kzalloc(sizeof(*m), GFP_KERNEL);
5272 if (!m)
f77d09a3 5273 goto out;
2b6080f2
SR
5274 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
5275 if (!iter) {
5276 kfree(m);
f77d09a3 5277 goto out;
2b6080f2 5278 }
f77d09a3
AL
5279 ret = 0;
5280
ff451961 5281 iter->tr = tr;
6484c71c
ON
5282 iter->trace_buffer = &tr->max_buffer;
5283 iter->cpu_file = tracing_get_cpu(inode);
2b6080f2
SR
5284 m->private = iter;
5285 file->private_data = m;
debdd57f 5286 }
f77d09a3 5287out:
ff451961
SRRH
5288 if (ret < 0)
5289 trace_array_put(tr);
5290
debdd57f
HT
5291 return ret;
5292}
5293
5294static ssize_t
5295tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
5296 loff_t *ppos)
5297{
2b6080f2
SR
5298 struct seq_file *m = filp->private_data;
5299 struct trace_iterator *iter = m->private;
5300 struct trace_array *tr = iter->tr;
debdd57f
HT
5301 unsigned long val;
5302 int ret;
5303
5304 ret = tracing_update_buffers();
5305 if (ret < 0)
5306 return ret;
5307
5308 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5309 if (ret)
5310 return ret;
5311
5312 mutex_lock(&trace_types_lock);
5313
2b6080f2 5314 if (tr->current_trace->use_max_tr) {
debdd57f
HT
5315 ret = -EBUSY;
5316 goto out;
5317 }
5318
5319 switch (val) {
5320 case 0:
f1affcaa
SRRH
5321 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5322 ret = -EINVAL;
5323 break;
debdd57f 5324 }
3209cff4
SRRH
5325 if (tr->allocated_snapshot)
5326 free_snapshot(tr);
debdd57f
HT
5327 break;
5328 case 1:
f1affcaa
SRRH
5329/* Only allow per-cpu swap if the ring buffer supports it */
5330#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
5331 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5332 ret = -EINVAL;
5333 break;
5334 }
5335#endif
45ad21ca 5336 if (!tr->allocated_snapshot) {
3209cff4 5337 ret = alloc_snapshot(tr);
debdd57f
HT
5338 if (ret < 0)
5339 break;
debdd57f 5340 }
debdd57f
HT
5341 local_irq_disable();
5342 /* Now, we're going to swap */
f1affcaa 5343 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
ce9bae55 5344 update_max_tr(tr, current, smp_processor_id());
f1affcaa 5345 else
ce9bae55 5346 update_max_tr_single(tr, current, iter->cpu_file);
debdd57f
HT
5347 local_irq_enable();
5348 break;
5349 default:
45ad21ca 5350 if (tr->allocated_snapshot) {
f1affcaa
SRRH
5351 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
5352 tracing_reset_online_cpus(&tr->max_buffer);
5353 else
5354 tracing_reset(&tr->max_buffer, iter->cpu_file);
5355 }
debdd57f
HT
5356 break;
5357 }
5358
5359 if (ret >= 0) {
5360 *ppos += cnt;
5361 ret = cnt;
5362 }
5363out:
5364 mutex_unlock(&trace_types_lock);
5365 return ret;
5366}
2b6080f2
SR
5367
5368static int tracing_snapshot_release(struct inode *inode, struct file *file)
5369{
5370 struct seq_file *m = file->private_data;
ff451961
SRRH
5371 int ret;
5372
5373 ret = tracing_release(inode, file);
2b6080f2
SR
5374
5375 if (file->f_mode & FMODE_READ)
ff451961 5376 return ret;
2b6080f2
SR
5377
5378 /* If write only, the seq_file is just a stub */
5379 if (m)
5380 kfree(m->private);
5381 kfree(m);
5382
5383 return 0;
5384}
5385
6de58e62
SRRH
5386static int tracing_buffers_open(struct inode *inode, struct file *filp);
5387static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
5388 size_t count, loff_t *ppos);
5389static int tracing_buffers_release(struct inode *inode, struct file *file);
5390static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5391 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
5392
5393static int snapshot_raw_open(struct inode *inode, struct file *filp)
5394{
5395 struct ftrace_buffer_info *info;
5396 int ret;
5397
5398 ret = tracing_buffers_open(inode, filp);
5399 if (ret < 0)
5400 return ret;
5401
5402 info = filp->private_data;
5403
5404 if (info->iter.trace->use_max_tr) {
5405 tracing_buffers_release(inode, filp);
5406 return -EBUSY;
5407 }
5408
5409 info->iter.snapshot = true;
5410 info->iter.trace_buffer = &info->iter.tr->max_buffer;
5411
5412 return ret;
5413}
5414
debdd57f
HT
5415#endif /* CONFIG_TRACER_SNAPSHOT */
5416
5417
6508fa76
SF
5418static const struct file_operations tracing_thresh_fops = {
5419 .open = tracing_open_generic,
5420 .read = tracing_thresh_read,
5421 .write = tracing_thresh_write,
5422 .llseek = generic_file_llseek,
5423};
5424
5e2336a0 5425static const struct file_operations tracing_max_lat_fops = {
4bf39a94
IM
5426 .open = tracing_open_generic,
5427 .read = tracing_max_lat_read,
5428 .write = tracing_max_lat_write,
b444786f 5429 .llseek = generic_file_llseek,
bc0c38d1
SR
5430};
5431
5e2336a0 5432static const struct file_operations set_tracer_fops = {
4bf39a94
IM
5433 .open = tracing_open_generic,
5434 .read = tracing_set_trace_read,
5435 .write = tracing_set_trace_write,
b444786f 5436 .llseek = generic_file_llseek,
bc0c38d1
SR
5437};
5438
5e2336a0 5439static const struct file_operations tracing_pipe_fops = {
4bf39a94 5440 .open = tracing_open_pipe,
2a2cc8f7 5441 .poll = tracing_poll_pipe,
4bf39a94 5442 .read = tracing_read_pipe,
3c56819b 5443 .splice_read = tracing_splice_read_pipe,
4bf39a94 5444 .release = tracing_release_pipe,
b444786f 5445 .llseek = no_llseek,
b3806b43
SR
5446};
5447
5e2336a0 5448static const struct file_operations tracing_entries_fops = {
0bc392ee 5449 .open = tracing_open_generic_tr,
a98a3c3f
SR
5450 .read = tracing_entries_read,
5451 .write = tracing_entries_write,
b444786f 5452 .llseek = generic_file_llseek,
0bc392ee 5453 .release = tracing_release_generic_tr,
a98a3c3f
SR
5454};
5455
f81ab074 5456static const struct file_operations tracing_total_entries_fops = {
7b85af63 5457 .open = tracing_open_generic_tr,
f81ab074
VN
5458 .read = tracing_total_entries_read,
5459 .llseek = generic_file_llseek,
7b85af63 5460 .release = tracing_release_generic_tr,
f81ab074
VN
5461};
5462
4f271a2a 5463static const struct file_operations tracing_free_buffer_fops = {
7b85af63 5464 .open = tracing_open_generic_tr,
4f271a2a
VN
5465 .write = tracing_free_buffer_write,
5466 .release = tracing_free_buffer_release,
5467};
5468
5e2336a0 5469static const struct file_operations tracing_mark_fops = {
7b85af63 5470 .open = tracing_open_generic_tr,
5bf9a1ee 5471 .write = tracing_mark_write,
b444786f 5472 .llseek = generic_file_llseek,
7b85af63 5473 .release = tracing_release_generic_tr,
5bf9a1ee
PP
5474};
5475
5079f326 5476static const struct file_operations trace_clock_fops = {
13f16d20
LZ
5477 .open = tracing_clock_open,
5478 .read = seq_read,
5479 .llseek = seq_lseek,
7b85af63 5480 .release = tracing_single_release_tr,
5079f326
Z
5481 .write = tracing_clock_write,
5482};
5483
debdd57f
HT
5484#ifdef CONFIG_TRACER_SNAPSHOT
5485static const struct file_operations snapshot_fops = {
5486 .open = tracing_snapshot_open,
5487 .read = seq_read,
5488 .write = tracing_snapshot_write,
098c879e 5489 .llseek = tracing_lseek,
2b6080f2 5490 .release = tracing_snapshot_release,
debdd57f 5491};
debdd57f 5492
6de58e62
SRRH
5493static const struct file_operations snapshot_raw_fops = {
5494 .open = snapshot_raw_open,
5495 .read = tracing_buffers_read,
5496 .release = tracing_buffers_release,
5497 .splice_read = tracing_buffers_splice_read,
5498 .llseek = no_llseek,
2cadf913
SR
5499};
5500
6de58e62
SRRH
5501#endif /* CONFIG_TRACER_SNAPSHOT */
5502
2cadf913
SR
5503static int tracing_buffers_open(struct inode *inode, struct file *filp)
5504{
46ef2be0 5505 struct trace_array *tr = inode->i_private;
2cadf913 5506 struct ftrace_buffer_info *info;
7b85af63 5507 int ret;
2cadf913
SR
5508
5509 if (tracing_disabled)
5510 return -ENODEV;
5511
7b85af63
SRRH
5512 if (trace_array_get(tr) < 0)
5513 return -ENODEV;
5514
2cadf913 5515 info = kzalloc(sizeof(*info), GFP_KERNEL);
7b85af63
SRRH
5516 if (!info) {
5517 trace_array_put(tr);
2cadf913 5518 return -ENOMEM;
7b85af63 5519 }
2cadf913 5520
a695cb58
SRRH
5521 mutex_lock(&trace_types_lock);
5522
cc60cdc9 5523 info->iter.tr = tr;
46ef2be0 5524 info->iter.cpu_file = tracing_get_cpu(inode);
b627344f 5525 info->iter.trace = tr->current_trace;
12883efb 5526 info->iter.trace_buffer = &tr->trace_buffer;
cc60cdc9 5527 info->spare = NULL;
2cadf913 5528 /* Force reading ring buffer for first read */
cc60cdc9 5529 info->read = (unsigned int)-1;
2cadf913
SR
5530
5531 filp->private_data = info;
5532
cf6ab6d9
SRRH
5533 tr->current_trace->ref++;
5534
a695cb58
SRRH
5535 mutex_unlock(&trace_types_lock);
5536
7b85af63
SRRH
5537 ret = nonseekable_open(inode, filp);
5538 if (ret < 0)
5539 trace_array_put(tr);
5540
5541 return ret;
2cadf913
SR
5542}
5543
cc60cdc9
SR
5544static unsigned int
5545tracing_buffers_poll(struct file *filp, poll_table *poll_table)
5546{
5547 struct ftrace_buffer_info *info = filp->private_data;
5548 struct trace_iterator *iter = &info->iter;
5549
5550 return trace_poll(iter, filp, poll_table);
5551}
5552
2cadf913
SR
5553static ssize_t
5554tracing_buffers_read(struct file *filp, char __user *ubuf,
5555 size_t count, loff_t *ppos)
5556{
5557 struct ftrace_buffer_info *info = filp->private_data;
cc60cdc9 5558 struct trace_iterator *iter = &info->iter;
2cadf913 5559 ssize_t ret;
6de58e62 5560 ssize_t size;
2cadf913 5561
2dc5d12b
SR
5562 if (!count)
5563 return 0;
5564
6de58e62 5565#ifdef CONFIG_TRACER_MAX_TRACE
d716ff71
SRRH
5566 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
5567 return -EBUSY;
6de58e62
SRRH
5568#endif
5569
ddd538f3 5570 if (!info->spare)
12883efb
SRRH
5571 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
5572 iter->cpu_file);
ddd538f3 5573 if (!info->spare)
d716ff71 5574 return -ENOMEM;
ddd538f3 5575
2cadf913
SR
5576 /* Do we have previous read data to read? */
5577 if (info->read < PAGE_SIZE)
5578 goto read;
5579
b627344f 5580 again:
cc60cdc9 5581 trace_access_lock(iter->cpu_file);
12883efb 5582 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
2cadf913
SR
5583 &info->spare,
5584 count,
cc60cdc9
SR
5585 iter->cpu_file, 0);
5586 trace_access_unlock(iter->cpu_file);
2cadf913 5587
b627344f
SR
5588 if (ret < 0) {
5589 if (trace_empty(iter)) {
d716ff71
SRRH
5590 if ((filp->f_flags & O_NONBLOCK))
5591 return -EAGAIN;
5592
e30f53aa 5593 ret = wait_on_pipe(iter, false);
d716ff71
SRRH
5594 if (ret)
5595 return ret;
5596
b627344f
SR
5597 goto again;
5598 }
d716ff71 5599 return 0;
b627344f 5600 }
436fc280 5601
436fc280 5602 info->read = 0;
b627344f 5603 read:
2cadf913
SR
5604 size = PAGE_SIZE - info->read;
5605 if (size > count)
5606 size = count;
5607
5608 ret = copy_to_user(ubuf, info->spare + info->read, size);
d716ff71
SRRH
5609 if (ret == size)
5610 return -EFAULT;
5611
2dc5d12b
SR
5612 size -= ret;
5613
2cadf913
SR
5614 *ppos += size;
5615 info->read += size;
5616
5617 return size;
5618}
5619
5620static int tracing_buffers_release(struct inode *inode, struct file *file)
5621{
5622 struct ftrace_buffer_info *info = file->private_data;
cc60cdc9 5623 struct trace_iterator *iter = &info->iter;
2cadf913 5624
a695cb58
SRRH
5625 mutex_lock(&trace_types_lock);
5626
cf6ab6d9
SRRH
5627 iter->tr->current_trace->ref--;
5628
ff451961 5629 __trace_array_put(iter->tr);
2cadf913 5630
ddd538f3 5631 if (info->spare)
12883efb 5632 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
2cadf913
SR
5633 kfree(info);
5634
a695cb58
SRRH
5635 mutex_unlock(&trace_types_lock);
5636
2cadf913
SR
5637 return 0;
5638}
5639
5640struct buffer_ref {
5641 struct ring_buffer *buffer;
5642 void *page;
5643 int ref;
5644};
5645
5646static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
5647 struct pipe_buffer *buf)
5648{
5649 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5650
5651 if (--ref->ref)
5652 return;
5653
5654 ring_buffer_free_read_page(ref->buffer, ref->page);
5655 kfree(ref);
5656 buf->private = 0;
5657}
5658
2cadf913
SR
5659static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
5660 struct pipe_buffer *buf)
5661{
5662 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5663
5664 ref->ref++;
5665}
5666
5667/* Pipe buffer operations for a buffer. */
28dfef8f 5668static const struct pipe_buf_operations buffer_pipe_buf_ops = {
2cadf913 5669 .can_merge = 0,
2cadf913
SR
5670 .confirm = generic_pipe_buf_confirm,
5671 .release = buffer_pipe_buf_release,
d55cb6cf 5672 .steal = generic_pipe_buf_steal,
2cadf913
SR
5673 .get = buffer_pipe_buf_get,
5674};
5675
5676/*
5677 * Callback from splice_to_pipe(), if we need to release some pages
5678 * at the end of the spd in case we error'ed out in filling the pipe.
5679 */
5680static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
5681{
5682 struct buffer_ref *ref =
5683 (struct buffer_ref *)spd->partial[i].private;
5684
5685 if (--ref->ref)
5686 return;
5687
5688 ring_buffer_free_read_page(ref->buffer, ref->page);
5689 kfree(ref);
5690 spd->partial[i].private = 0;
5691}
5692
5693static ssize_t
5694tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5695 struct pipe_inode_info *pipe, size_t len,
5696 unsigned int flags)
5697{
5698 struct ftrace_buffer_info *info = file->private_data;
cc60cdc9 5699 struct trace_iterator *iter = &info->iter;
35f3d14d
JA
5700 struct partial_page partial_def[PIPE_DEF_BUFFERS];
5701 struct page *pages_def[PIPE_DEF_BUFFERS];
2cadf913 5702 struct splice_pipe_desc spd = {
35f3d14d
JA
5703 .pages = pages_def,
5704 .partial = partial_def,
047fe360 5705 .nr_pages_max = PIPE_DEF_BUFFERS,
2cadf913
SR
5706 .flags = flags,
5707 .ops = &buffer_pipe_buf_ops,
5708 .spd_release = buffer_spd_release,
5709 };
5710 struct buffer_ref *ref;
93459c6c 5711 int entries, size, i;
07906da7 5712 ssize_t ret = 0;
2cadf913 5713
6de58e62 5714#ifdef CONFIG_TRACER_MAX_TRACE
d716ff71
SRRH
5715 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
5716 return -EBUSY;
6de58e62
SRRH
5717#endif
5718
d716ff71
SRRH
5719 if (splice_grow_spd(pipe, &spd))
5720 return -ENOMEM;
35f3d14d 5721
d716ff71
SRRH
5722 if (*ppos & (PAGE_SIZE - 1))
5723 return -EINVAL;
93cfb3c9
LJ
5724
5725 if (len & (PAGE_SIZE - 1)) {
d716ff71
SRRH
5726 if (len < PAGE_SIZE)
5727 return -EINVAL;
93cfb3c9
LJ
5728 len &= PAGE_MASK;
5729 }
5730
cc60cdc9
SR
5731 again:
5732 trace_access_lock(iter->cpu_file);
12883efb 5733 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
93459c6c 5734
a786c06d 5735 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
2cadf913
SR
5736 struct page *page;
5737 int r;
5738
5739 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
07906da7
RV
5740 if (!ref) {
5741 ret = -ENOMEM;
2cadf913 5742 break;
07906da7 5743 }
2cadf913 5744
7267fa68 5745 ref->ref = 1;
12883efb 5746 ref->buffer = iter->trace_buffer->buffer;
cc60cdc9 5747 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
2cadf913 5748 if (!ref->page) {
07906da7 5749 ret = -ENOMEM;
2cadf913
SR
5750 kfree(ref);
5751 break;
5752 }
5753
5754 r = ring_buffer_read_page(ref->buffer, &ref->page,
cc60cdc9 5755 len, iter->cpu_file, 1);
2cadf913 5756 if (r < 0) {
7ea59064 5757 ring_buffer_free_read_page(ref->buffer, ref->page);
2cadf913
SR
5758 kfree(ref);
5759 break;
5760 }
5761
5762 /*
5763 * zero out any left over data, this is going to
5764 * user land.
5765 */
5766 size = ring_buffer_page_len(ref->page);
5767 if (size < PAGE_SIZE)
5768 memset(ref->page + size, 0, PAGE_SIZE - size);
5769
5770 page = virt_to_page(ref->page);
5771
5772 spd.pages[i] = page;
5773 spd.partial[i].len = PAGE_SIZE;
5774 spd.partial[i].offset = 0;
5775 spd.partial[i].private = (unsigned long)ref;
5776 spd.nr_pages++;
93cfb3c9 5777 *ppos += PAGE_SIZE;
93459c6c 5778
12883efb 5779 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
2cadf913
SR
5780 }
5781
cc60cdc9 5782 trace_access_unlock(iter->cpu_file);
2cadf913
SR
5783 spd.nr_pages = i;
5784
5785 /* did we read anything? */
5786 if (!spd.nr_pages) {
07906da7 5787 if (ret)
d716ff71
SRRH
5788 return ret;
5789
5790 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
5791 return -EAGAIN;
07906da7 5792
e30f53aa 5793 ret = wait_on_pipe(iter, true);
8b8b3683 5794 if (ret)
d716ff71 5795 return ret;
e30f53aa 5796
cc60cdc9 5797 goto again;
2cadf913
SR
5798 }
5799
5800 ret = splice_to_pipe(pipe, &spd);
047fe360 5801 splice_shrink_spd(&spd);
6de58e62 5802
2cadf913
SR
5803 return ret;
5804}
5805
5806static const struct file_operations tracing_buffers_fops = {
5807 .open = tracing_buffers_open,
5808 .read = tracing_buffers_read,
cc60cdc9 5809 .poll = tracing_buffers_poll,
2cadf913
SR
5810 .release = tracing_buffers_release,
5811 .splice_read = tracing_buffers_splice_read,
5812 .llseek = no_llseek,
5813};
5814
c8d77183
SR
5815static ssize_t
5816tracing_stats_read(struct file *filp, char __user *ubuf,
5817 size_t count, loff_t *ppos)
5818{
4d3435b8
ON
5819 struct inode *inode = file_inode(filp);
5820 struct trace_array *tr = inode->i_private;
12883efb 5821 struct trace_buffer *trace_buf = &tr->trace_buffer;
4d3435b8 5822 int cpu = tracing_get_cpu(inode);
c8d77183
SR
5823 struct trace_seq *s;
5824 unsigned long cnt;
c64e148a
VN
5825 unsigned long long t;
5826 unsigned long usec_rem;
c8d77183 5827
e4f2d10f 5828 s = kmalloc(sizeof(*s), GFP_KERNEL);
c8d77183 5829 if (!s)
a646365c 5830 return -ENOMEM;
c8d77183
SR
5831
5832 trace_seq_init(s);
5833
12883efb 5834 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
c8d77183
SR
5835 trace_seq_printf(s, "entries: %ld\n", cnt);
5836
12883efb 5837 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
c8d77183
SR
5838 trace_seq_printf(s, "overrun: %ld\n", cnt);
5839
12883efb 5840 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
c8d77183
SR
5841 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
5842
12883efb 5843 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
c64e148a
VN
5844 trace_seq_printf(s, "bytes: %ld\n", cnt);
5845
58e8eedf 5846 if (trace_clocks[tr->clock_id].in_ns) {
11043d8b 5847 /* local or global for trace_clock */
12883efb 5848 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
11043d8b
YY
5849 usec_rem = do_div(t, USEC_PER_SEC);
5850 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
5851 t, usec_rem);
5852
12883efb 5853 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
11043d8b
YY
5854 usec_rem = do_div(t, USEC_PER_SEC);
5855 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
5856 } else {
5857 /* counter or tsc mode for trace_clock */
5858 trace_seq_printf(s, "oldest event ts: %llu\n",
12883efb 5859 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
c64e148a 5860
11043d8b 5861 trace_seq_printf(s, "now ts: %llu\n",
12883efb 5862 ring_buffer_time_stamp(trace_buf->buffer, cpu));
11043d8b 5863 }
c64e148a 5864
12883efb 5865 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
884bfe89
SP
5866 trace_seq_printf(s, "dropped events: %ld\n", cnt);
5867
12883efb 5868 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
ad964704
SRRH
5869 trace_seq_printf(s, "read events: %ld\n", cnt);
5870
5ac48378
SRRH
5871 count = simple_read_from_buffer(ubuf, count, ppos,
5872 s->buffer, trace_seq_used(s));
c8d77183
SR
5873
5874 kfree(s);
5875
5876 return count;
5877}
5878
5879static const struct file_operations tracing_stats_fops = {
4d3435b8 5880 .open = tracing_open_generic_tr,
c8d77183 5881 .read = tracing_stats_read,
b444786f 5882 .llseek = generic_file_llseek,
4d3435b8 5883 .release = tracing_release_generic_tr,
c8d77183
SR
5884};
5885
bc0c38d1
SR
5886#ifdef CONFIG_DYNAMIC_FTRACE
5887
b807c3d0
SR
5888int __weak ftrace_arch_read_dyn_info(char *buf, int size)
5889{
5890 return 0;
5891}
5892
bc0c38d1 5893static ssize_t
b807c3d0 5894tracing_read_dyn_info(struct file *filp, char __user *ubuf,
bc0c38d1
SR
5895 size_t cnt, loff_t *ppos)
5896{
a26a2a27
SR
5897 static char ftrace_dyn_info_buffer[1024];
5898 static DEFINE_MUTEX(dyn_info_mutex);
bc0c38d1 5899 unsigned long *p = filp->private_data;
b807c3d0 5900 char *buf = ftrace_dyn_info_buffer;
a26a2a27 5901 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
bc0c38d1
SR
5902 int r;
5903
b807c3d0
SR
5904 mutex_lock(&dyn_info_mutex);
5905 r = sprintf(buf, "%ld ", *p);
4bf39a94 5906
a26a2a27 5907 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
b807c3d0
SR
5908 buf[r++] = '\n';
5909
5910 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5911
5912 mutex_unlock(&dyn_info_mutex);
5913
5914 return r;
bc0c38d1
SR
5915}
5916
5e2336a0 5917static const struct file_operations tracing_dyn_info_fops = {
4bf39a94 5918 .open = tracing_open_generic,
b807c3d0 5919 .read = tracing_read_dyn_info,
b444786f 5920 .llseek = generic_file_llseek,
bc0c38d1 5921};
77fd5c15 5922#endif /* CONFIG_DYNAMIC_FTRACE */
bc0c38d1 5923
77fd5c15
SRRH
5924#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
5925static void
5926ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5927{
5928 tracing_snapshot();
5929}
bc0c38d1 5930
77fd5c15
SRRH
5931static void
5932ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
bc0c38d1 5933{
77fd5c15
SRRH
5934 unsigned long *count = (long *)data;
5935
5936 if (!*count)
5937 return;
bc0c38d1 5938
77fd5c15
SRRH
5939 if (*count != -1)
5940 (*count)--;
5941
5942 tracing_snapshot();
5943}
5944
5945static int
5946ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
5947 struct ftrace_probe_ops *ops, void *data)
5948{
5949 long count = (long)data;
5950
5951 seq_printf(m, "%ps:", (void *)ip);
5952
fa6f0cc7 5953 seq_puts(m, "snapshot");
77fd5c15
SRRH
5954
5955 if (count == -1)
fa6f0cc7 5956 seq_puts(m, ":unlimited\n");
77fd5c15
SRRH
5957 else
5958 seq_printf(m, ":count=%ld\n", count);
5959
5960 return 0;
5961}
5962
5963static struct ftrace_probe_ops snapshot_probe_ops = {
5964 .func = ftrace_snapshot,
5965 .print = ftrace_snapshot_print,
5966};
5967
5968static struct ftrace_probe_ops snapshot_count_probe_ops = {
5969 .func = ftrace_count_snapshot,
5970 .print = ftrace_snapshot_print,
5971};
5972
5973static int
5974ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
5975 char *glob, char *cmd, char *param, int enable)
5976{
5977 struct ftrace_probe_ops *ops;
5978 void *count = (void *)-1;
5979 char *number;
5980 int ret;
5981
5982 /* hash funcs only work with set_ftrace_filter */
5983 if (!enable)
5984 return -EINVAL;
5985
5986 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
5987
5988 if (glob[0] == '!') {
5989 unregister_ftrace_function_probe_func(glob+1, ops);
5990 return 0;
5991 }
5992
5993 if (!param)
5994 goto out_reg;
5995
5996 number = strsep(&param, ":");
5997
5998 if (!strlen(number))
5999 goto out_reg;
6000
6001 /*
6002 * We use the callback data field (which is a pointer)
6003 * as our counter.
6004 */
6005 ret = kstrtoul(number, 0, (unsigned long *)&count);
6006 if (ret)
6007 return ret;
6008
6009 out_reg:
6010 ret = register_ftrace_function_probe(glob, ops, count);
6011
6012 if (ret >= 0)
6013 alloc_snapshot(&global_trace);
6014
6015 return ret < 0 ? ret : 0;
6016}
6017
6018static struct ftrace_func_command ftrace_snapshot_cmd = {
6019 .name = "snapshot",
6020 .func = ftrace_trace_snapshot_callback,
6021};
6022
38de93ab 6023static __init int register_snapshot_cmd(void)
77fd5c15
SRRH
6024{
6025 return register_ftrace_command(&ftrace_snapshot_cmd);
6026}
6027#else
38de93ab 6028static inline __init int register_snapshot_cmd(void) { return 0; }
77fd5c15 6029#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
bc0c38d1 6030
7eeafbca 6031static struct dentry *tracing_get_dentry(struct trace_array *tr)
bc0c38d1 6032{
8434dc93
SRRH
6033 if (WARN_ON(!tr->dir))
6034 return ERR_PTR(-ENODEV);
6035
6036 /* Top directory uses NULL as the parent */
6037 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
6038 return NULL;
6039
6040 /* All sub buffers have a descriptor */
2b6080f2 6041 return tr->dir;
bc0c38d1
SR
6042}
6043
2b6080f2 6044static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
b04cc6b1 6045{
b04cc6b1
FW
6046 struct dentry *d_tracer;
6047
2b6080f2
SR
6048 if (tr->percpu_dir)
6049 return tr->percpu_dir;
b04cc6b1 6050
7eeafbca 6051 d_tracer = tracing_get_dentry(tr);
14a5ae40 6052 if (IS_ERR(d_tracer))
b04cc6b1
FW
6053 return NULL;
6054
8434dc93 6055 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
b04cc6b1 6056
2b6080f2 6057 WARN_ONCE(!tr->percpu_dir,
8434dc93 6058 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
b04cc6b1 6059
2b6080f2 6060 return tr->percpu_dir;
b04cc6b1
FW
6061}
6062
649e9c70
ON
6063static struct dentry *
6064trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
6065 void *data, long cpu, const struct file_operations *fops)
6066{
6067 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
6068
6069 if (ret) /* See tracing_get_cpu() */
7682c918 6070 d_inode(ret)->i_cdev = (void *)(cpu + 1);
649e9c70
ON
6071 return ret;
6072}
6073
2b6080f2 6074static void
8434dc93 6075tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
b04cc6b1 6076{
2b6080f2 6077 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
5452af66 6078 struct dentry *d_cpu;
dd49a38c 6079 char cpu_dir[30]; /* 30 characters should be more than enough */
b04cc6b1 6080
0a3d7ce7
NK
6081 if (!d_percpu)
6082 return;
6083
dd49a38c 6084 snprintf(cpu_dir, 30, "cpu%ld", cpu);
8434dc93 6085 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
8656e7a2 6086 if (!d_cpu) {
8434dc93 6087 pr_warning("Could not create tracefs '%s' entry\n", cpu_dir);
8656e7a2
FW
6088 return;
6089 }
b04cc6b1 6090
8656e7a2 6091 /* per cpu trace_pipe */
649e9c70 6092 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
15544209 6093 tr, cpu, &tracing_pipe_fops);
b04cc6b1
FW
6094
6095 /* per cpu trace */
649e9c70 6096 trace_create_cpu_file("trace", 0644, d_cpu,
6484c71c 6097 tr, cpu, &tracing_fops);
7f96f93f 6098
649e9c70 6099 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
46ef2be0 6100 tr, cpu, &tracing_buffers_fops);
7f96f93f 6101
649e9c70 6102 trace_create_cpu_file("stats", 0444, d_cpu,
4d3435b8 6103 tr, cpu, &tracing_stats_fops);
438ced17 6104
649e9c70 6105 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
0bc392ee 6106 tr, cpu, &tracing_entries_fops);
f1affcaa
SRRH
6107
6108#ifdef CONFIG_TRACER_SNAPSHOT
649e9c70 6109 trace_create_cpu_file("snapshot", 0644, d_cpu,
6484c71c 6110 tr, cpu, &snapshot_fops);
6de58e62 6111
649e9c70 6112 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
46ef2be0 6113 tr, cpu, &snapshot_raw_fops);
f1affcaa 6114#endif
b04cc6b1
FW
6115}
6116
60a11774
SR
6117#ifdef CONFIG_FTRACE_SELFTEST
6118/* Let selftest have access to static functions in this file */
6119#include "trace_selftest.c"
6120#endif
6121
577b785f
SR
6122static ssize_t
6123trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
6124 loff_t *ppos)
6125{
6126 struct trace_option_dentry *topt = filp->private_data;
6127 char *buf;
6128
6129 if (topt->flags->val & topt->opt->bit)
6130 buf = "1\n";
6131 else
6132 buf = "0\n";
6133
6134 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6135}
6136
6137static ssize_t
6138trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
6139 loff_t *ppos)
6140{
6141 struct trace_option_dentry *topt = filp->private_data;
6142 unsigned long val;
577b785f
SR
6143 int ret;
6144
22fe9b54
PH
6145 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6146 if (ret)
577b785f
SR
6147 return ret;
6148
8d18eaaf
LZ
6149 if (val != 0 && val != 1)
6150 return -EINVAL;
577b785f 6151
8d18eaaf 6152 if (!!(topt->flags->val & topt->opt->bit) != val) {
577b785f 6153 mutex_lock(&trace_types_lock);
8c1a49ae 6154 ret = __set_tracer_option(topt->tr, topt->flags,
c757bea9 6155 topt->opt, !val);
577b785f
SR
6156 mutex_unlock(&trace_types_lock);
6157 if (ret)
6158 return ret;
577b785f
SR
6159 }
6160
6161 *ppos += cnt;
6162
6163 return cnt;
6164}
6165
6166
6167static const struct file_operations trace_options_fops = {
6168 .open = tracing_open_generic,
6169 .read = trace_options_read,
6170 .write = trace_options_write,
b444786f 6171 .llseek = generic_file_llseek,
577b785f
SR
6172};
6173
a8259075
SR
6174static ssize_t
6175trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
6176 loff_t *ppos)
6177{
6178 long index = (long)filp->private_data;
6179 char *buf;
6180
6181 if (trace_flags & (1 << index))
6182 buf = "1\n";
6183 else
6184 buf = "0\n";
6185
6186 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6187}
6188
6189static ssize_t
6190trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
6191 loff_t *ppos)
6192{
2b6080f2 6193 struct trace_array *tr = &global_trace;
a8259075 6194 long index = (long)filp->private_data;
a8259075
SR
6195 unsigned long val;
6196 int ret;
6197
22fe9b54
PH
6198 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6199 if (ret)
a8259075
SR
6200 return ret;
6201
f2d84b65 6202 if (val != 0 && val != 1)
a8259075 6203 return -EINVAL;
69d34da2
SRRH
6204
6205 mutex_lock(&trace_types_lock);
2b6080f2 6206 ret = set_tracer_flag(tr, 1 << index, val);
69d34da2 6207 mutex_unlock(&trace_types_lock);
a8259075 6208
613f04a0
SRRH
6209 if (ret < 0)
6210 return ret;
6211
a8259075
SR
6212 *ppos += cnt;
6213
6214 return cnt;
6215}
6216
a8259075
SR
6217static const struct file_operations trace_options_core_fops = {
6218 .open = tracing_open_generic,
6219 .read = trace_options_core_read,
6220 .write = trace_options_core_write,
b444786f 6221 .llseek = generic_file_llseek,
a8259075
SR
6222};
6223
5452af66 6224struct dentry *trace_create_file(const char *name,
f4ae40a6 6225 umode_t mode,
5452af66
FW
6226 struct dentry *parent,
6227 void *data,
6228 const struct file_operations *fops)
6229{
6230 struct dentry *ret;
6231
8434dc93 6232 ret = tracefs_create_file(name, mode, parent, data, fops);
5452af66 6233 if (!ret)
8434dc93 6234 pr_warning("Could not create tracefs '%s' entry\n", name);
5452af66
FW
6235
6236 return ret;
6237}
6238
6239
2b6080f2 6240static struct dentry *trace_options_init_dentry(struct trace_array *tr)
a8259075
SR
6241{
6242 struct dentry *d_tracer;
a8259075 6243
2b6080f2
SR
6244 if (tr->options)
6245 return tr->options;
a8259075 6246
7eeafbca 6247 d_tracer = tracing_get_dentry(tr);
14a5ae40 6248 if (IS_ERR(d_tracer))
a8259075
SR
6249 return NULL;
6250
8434dc93 6251 tr->options = tracefs_create_dir("options", d_tracer);
2b6080f2 6252 if (!tr->options) {
8434dc93 6253 pr_warning("Could not create tracefs directory 'options'\n");
a8259075
SR
6254 return NULL;
6255 }
6256
2b6080f2 6257 return tr->options;
a8259075
SR
6258}
6259
577b785f 6260static void
2b6080f2
SR
6261create_trace_option_file(struct trace_array *tr,
6262 struct trace_option_dentry *topt,
577b785f
SR
6263 struct tracer_flags *flags,
6264 struct tracer_opt *opt)
6265{
6266 struct dentry *t_options;
577b785f 6267
2b6080f2 6268 t_options = trace_options_init_dentry(tr);
577b785f
SR
6269 if (!t_options)
6270 return;
6271
6272 topt->flags = flags;
6273 topt->opt = opt;
2b6080f2 6274 topt->tr = tr;
577b785f 6275
5452af66 6276 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
577b785f
SR
6277 &trace_options_fops);
6278
577b785f
SR
6279}
6280
6281static struct trace_option_dentry *
2b6080f2 6282create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
577b785f
SR
6283{
6284 struct trace_option_dentry *topts;
6285 struct tracer_flags *flags;
6286 struct tracer_opt *opts;
6287 int cnt;
6288
6289 if (!tracer)
6290 return NULL;
6291
6292 flags = tracer->flags;
6293
6294 if (!flags || !flags->opts)
6295 return NULL;
6296
6297 opts = flags->opts;
6298
6299 for (cnt = 0; opts[cnt].name; cnt++)
6300 ;
6301
0cfe8245 6302 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
577b785f
SR
6303 if (!topts)
6304 return NULL;
6305
41d9c0be 6306 for (cnt = 0; opts[cnt].name; cnt++) {
2b6080f2 6307 create_trace_option_file(tr, &topts[cnt], flags,
577b785f 6308 &opts[cnt]);
41d9c0be
SRRH
6309 WARN_ONCE(topts[cnt].entry == NULL,
6310 "Failed to create trace option: %s",
6311 opts[cnt].name);
6312 }
577b785f
SR
6313
6314 return topts;
6315}
6316
a8259075 6317static struct dentry *
2b6080f2
SR
6318create_trace_option_core_file(struct trace_array *tr,
6319 const char *option, long index)
a8259075
SR
6320{
6321 struct dentry *t_options;
a8259075 6322
2b6080f2 6323 t_options = trace_options_init_dentry(tr);
a8259075
SR
6324 if (!t_options)
6325 return NULL;
6326
5452af66 6327 return trace_create_file(option, 0644, t_options, (void *)index,
a8259075 6328 &trace_options_core_fops);
a8259075
SR
6329}
6330
2b6080f2 6331static __init void create_trace_options_dir(struct trace_array *tr)
a8259075
SR
6332{
6333 struct dentry *t_options;
a8259075
SR
6334 int i;
6335
2b6080f2 6336 t_options = trace_options_init_dentry(tr);
a8259075
SR
6337 if (!t_options)
6338 return;
6339
5452af66 6340 for (i = 0; trace_options[i]; i++)
2b6080f2 6341 create_trace_option_core_file(tr, trace_options[i], i);
a8259075
SR
6342}
6343
499e5470
SR
6344static ssize_t
6345rb_simple_read(struct file *filp, char __user *ubuf,
6346 size_t cnt, loff_t *ppos)
6347{
348f0fc2 6348 struct trace_array *tr = filp->private_data;
499e5470
SR
6349 char buf[64];
6350 int r;
6351
10246fa3 6352 r = tracer_tracing_is_on(tr);
499e5470
SR
6353 r = sprintf(buf, "%d\n", r);
6354
6355 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6356}
6357
6358static ssize_t
6359rb_simple_write(struct file *filp, const char __user *ubuf,
6360 size_t cnt, loff_t *ppos)
6361{
348f0fc2 6362 struct trace_array *tr = filp->private_data;
12883efb 6363 struct ring_buffer *buffer = tr->trace_buffer.buffer;
499e5470
SR
6364 unsigned long val;
6365 int ret;
6366
6367 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6368 if (ret)
6369 return ret;
6370
6371 if (buffer) {
2df8f8a6
SR
6372 mutex_lock(&trace_types_lock);
6373 if (val) {
10246fa3 6374 tracer_tracing_on(tr);
2b6080f2
SR
6375 if (tr->current_trace->start)
6376 tr->current_trace->start(tr);
2df8f8a6 6377 } else {
10246fa3 6378 tracer_tracing_off(tr);
2b6080f2
SR
6379 if (tr->current_trace->stop)
6380 tr->current_trace->stop(tr);
2df8f8a6
SR
6381 }
6382 mutex_unlock(&trace_types_lock);
499e5470
SR
6383 }
6384
6385 (*ppos)++;
6386
6387 return cnt;
6388}
6389
6390static const struct file_operations rb_simple_fops = {
7b85af63 6391 .open = tracing_open_generic_tr,
499e5470
SR
6392 .read = rb_simple_read,
6393 .write = rb_simple_write,
7b85af63 6394 .release = tracing_release_generic_tr,
499e5470
SR
6395 .llseek = default_llseek,
6396};
6397
277ba044
SR
6398struct dentry *trace_instance_dir;
6399
6400static void
8434dc93 6401init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
277ba044 6402
55034cd6
SRRH
6403static int
6404allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
277ba044
SR
6405{
6406 enum ring_buffer_flags rb_flags;
737223fb
SRRH
6407
6408 rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
6409
dced341b
SRRH
6410 buf->tr = tr;
6411
55034cd6
SRRH
6412 buf->buffer = ring_buffer_alloc(size, rb_flags);
6413 if (!buf->buffer)
6414 return -ENOMEM;
737223fb 6415
55034cd6
SRRH
6416 buf->data = alloc_percpu(struct trace_array_cpu);
6417 if (!buf->data) {
6418 ring_buffer_free(buf->buffer);
6419 return -ENOMEM;
6420 }
737223fb 6421
737223fb
SRRH
6422 /* Allocate the first page for all buffers */
6423 set_buffer_entries(&tr->trace_buffer,
6424 ring_buffer_size(tr->trace_buffer.buffer, 0));
6425
55034cd6
SRRH
6426 return 0;
6427}
737223fb 6428
55034cd6
SRRH
6429static int allocate_trace_buffers(struct trace_array *tr, int size)
6430{
6431 int ret;
737223fb 6432
55034cd6
SRRH
6433 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
6434 if (ret)
6435 return ret;
737223fb 6436
55034cd6
SRRH
6437#ifdef CONFIG_TRACER_MAX_TRACE
6438 ret = allocate_trace_buffer(tr, &tr->max_buffer,
6439 allocate_snapshot ? size : 1);
6440 if (WARN_ON(ret)) {
737223fb 6441 ring_buffer_free(tr->trace_buffer.buffer);
55034cd6
SRRH
6442 free_percpu(tr->trace_buffer.data);
6443 return -ENOMEM;
6444 }
6445 tr->allocated_snapshot = allocate_snapshot;
737223fb 6446
55034cd6
SRRH
6447 /*
6448 * Only the top level trace array gets its snapshot allocated
6449 * from the kernel command line.
6450 */
6451 allocate_snapshot = false;
737223fb 6452#endif
55034cd6 6453 return 0;
737223fb
SRRH
6454}
6455
f0b70cc4
SRRH
6456static void free_trace_buffer(struct trace_buffer *buf)
6457{
6458 if (buf->buffer) {
6459 ring_buffer_free(buf->buffer);
6460 buf->buffer = NULL;
6461 free_percpu(buf->data);
6462 buf->data = NULL;
6463 }
6464}
6465
23aaa3c1
SRRH
6466static void free_trace_buffers(struct trace_array *tr)
6467{
6468 if (!tr)
6469 return;
6470
f0b70cc4 6471 free_trace_buffer(&tr->trace_buffer);
23aaa3c1
SRRH
6472
6473#ifdef CONFIG_TRACER_MAX_TRACE
f0b70cc4 6474 free_trace_buffer(&tr->max_buffer);
23aaa3c1
SRRH
6475#endif
6476}
6477
eae47358 6478static int instance_mkdir(const char *name)
737223fb 6479{
277ba044
SR
6480 struct trace_array *tr;
6481 int ret;
277ba044
SR
6482
6483 mutex_lock(&trace_types_lock);
6484
6485 ret = -EEXIST;
6486 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6487 if (tr->name && strcmp(tr->name, name) == 0)
6488 goto out_unlock;
6489 }
6490
6491 ret = -ENOMEM;
6492 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
6493 if (!tr)
6494 goto out_unlock;
6495
6496 tr->name = kstrdup(name, GFP_KERNEL);
6497 if (!tr->name)
6498 goto out_free_tr;
6499
ccfe9e42
AL
6500 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
6501 goto out_free_tr;
6502
6503 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
6504
277ba044
SR
6505 raw_spin_lock_init(&tr->start_lock);
6506
0b9b12c1
SRRH
6507 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6508
277ba044
SR
6509 tr->current_trace = &nop_trace;
6510
6511 INIT_LIST_HEAD(&tr->systems);
6512 INIT_LIST_HEAD(&tr->events);
6513
737223fb 6514 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
277ba044
SR
6515 goto out_free_tr;
6516
8434dc93 6517 tr->dir = tracefs_create_dir(name, trace_instance_dir);
277ba044
SR
6518 if (!tr->dir)
6519 goto out_free_tr;
6520
6521 ret = event_trace_add_tracer(tr->dir, tr);
609e85a7 6522 if (ret) {
8434dc93 6523 tracefs_remove_recursive(tr->dir);
277ba044 6524 goto out_free_tr;
609e85a7 6525 }
277ba044 6526
8434dc93 6527 init_tracer_tracefs(tr, tr->dir);
277ba044
SR
6528
6529 list_add(&tr->list, &ftrace_trace_arrays);
6530
6531 mutex_unlock(&trace_types_lock);
6532
6533 return 0;
6534
6535 out_free_tr:
23aaa3c1 6536 free_trace_buffers(tr);
ccfe9e42 6537 free_cpumask_var(tr->tracing_cpumask);
277ba044
SR
6538 kfree(tr->name);
6539 kfree(tr);
6540
6541 out_unlock:
6542 mutex_unlock(&trace_types_lock);
6543
6544 return ret;
6545
6546}
6547
eae47358 6548static int instance_rmdir(const char *name)
0c8916c3
SR
6549{
6550 struct trace_array *tr;
6551 int found = 0;
6552 int ret;
6553
6554 mutex_lock(&trace_types_lock);
6555
6556 ret = -ENODEV;
6557 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6558 if (tr->name && strcmp(tr->name, name) == 0) {
6559 found = 1;
6560 break;
6561 }
6562 }
6563 if (!found)
6564 goto out_unlock;
6565
a695cb58 6566 ret = -EBUSY;
cf6ab6d9 6567 if (tr->ref || (tr->current_trace && tr->current_trace->ref))
a695cb58
SRRH
6568 goto out_unlock;
6569
0c8916c3
SR
6570 list_del(&tr->list);
6571
6b450d25 6572 tracing_set_nop(tr);
0c8916c3 6573 event_trace_del_tracer(tr);
591dffda 6574 ftrace_destroy_function_files(tr);
0c8916c3 6575 debugfs_remove_recursive(tr->dir);
a9fcaaac 6576 free_trace_buffers(tr);
0c8916c3
SR
6577
6578 kfree(tr->name);
6579 kfree(tr);
6580
6581 ret = 0;
6582
6583 out_unlock:
6584 mutex_unlock(&trace_types_lock);
6585
6586 return ret;
6587}
6588
277ba044
SR
6589static __init void create_trace_instances(struct dentry *d_tracer)
6590{
eae47358
SRRH
6591 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
6592 instance_mkdir,
6593 instance_rmdir);
277ba044
SR
6594 if (WARN_ON(!trace_instance_dir))
6595 return;
277ba044
SR
6596}
6597
2b6080f2 6598static void
8434dc93 6599init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
2b6080f2 6600{
121aaee7 6601 int cpu;
2b6080f2 6602
607e2ea1
SRRH
6603 trace_create_file("available_tracers", 0444, d_tracer,
6604 tr, &show_traces_fops);
6605
6606 trace_create_file("current_tracer", 0644, d_tracer,
6607 tr, &set_tracer_fops);
6608
ccfe9e42
AL
6609 trace_create_file("tracing_cpumask", 0644, d_tracer,
6610 tr, &tracing_cpumask_fops);
6611
2b6080f2
SR
6612 trace_create_file("trace_options", 0644, d_tracer,
6613 tr, &tracing_iter_fops);
6614
6615 trace_create_file("trace", 0644, d_tracer,
6484c71c 6616 tr, &tracing_fops);
2b6080f2
SR
6617
6618 trace_create_file("trace_pipe", 0444, d_tracer,
15544209 6619 tr, &tracing_pipe_fops);
2b6080f2
SR
6620
6621 trace_create_file("buffer_size_kb", 0644, d_tracer,
0bc392ee 6622 tr, &tracing_entries_fops);
2b6080f2
SR
6623
6624 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
6625 tr, &tracing_total_entries_fops);
6626
238ae93d 6627 trace_create_file("free_buffer", 0200, d_tracer,
2b6080f2
SR
6628 tr, &tracing_free_buffer_fops);
6629
6630 trace_create_file("trace_marker", 0220, d_tracer,
6631 tr, &tracing_mark_fops);
6632
6633 trace_create_file("trace_clock", 0644, d_tracer, tr,
6634 &trace_clock_fops);
6635
6636 trace_create_file("tracing_on", 0644, d_tracer,
6484c71c 6637 tr, &rb_simple_fops);
ce9bae55 6638
6d9b3fa5
SRRH
6639#ifdef CONFIG_TRACER_MAX_TRACE
6640 trace_create_file("tracing_max_latency", 0644, d_tracer,
6641 &tr->max_latency, &tracing_max_lat_fops);
6642#endif
6643
591dffda
SRRH
6644 if (ftrace_create_function_files(tr, d_tracer))
6645 WARN(1, "Could not allocate function filter files");
6646
ce9bae55
SRRH
6647#ifdef CONFIG_TRACER_SNAPSHOT
6648 trace_create_file("snapshot", 0644, d_tracer,
6484c71c 6649 tr, &snapshot_fops);
ce9bae55 6650#endif
121aaee7
SRRH
6651
6652 for_each_tracing_cpu(cpu)
8434dc93 6653 tracing_init_tracefs_percpu(tr, cpu);
121aaee7 6654
2b6080f2
SR
6655}
6656
f76180bc
SRRH
6657static struct vfsmount *trace_automount(void *ingore)
6658{
6659 struct vfsmount *mnt;
6660 struct file_system_type *type;
6661
6662 /*
6663 * To maintain backward compatibility for tools that mount
6664 * debugfs to get to the tracing facility, tracefs is automatically
6665 * mounted to the debugfs/tracing directory.
6666 */
6667 type = get_fs_type("tracefs");
6668 if (!type)
6669 return NULL;
6670 mnt = vfs_kern_mount(type, 0, "tracefs", NULL);
6671 put_filesystem(type);
6672 if (IS_ERR(mnt))
6673 return NULL;
6674 mntget(mnt);
6675
6676 return mnt;
6677}
6678
7eeafbca
SRRH
6679/**
6680 * tracing_init_dentry - initialize top level trace array
6681 *
6682 * This is called when creating files or directories in the tracing
6683 * directory. It is called via fs_initcall() by any of the boot up code
6684 * and expects to return the dentry of the top level tracing directory.
6685 */
6686struct dentry *tracing_init_dentry(void)
6687{
6688 struct trace_array *tr = &global_trace;
6689
f76180bc 6690 /* The top level trace array uses NULL as parent */
7eeafbca 6691 if (tr->dir)
f76180bc 6692 return NULL;
7eeafbca
SRRH
6693
6694 if (WARN_ON(!debugfs_initialized()))
6695 return ERR_PTR(-ENODEV);
6696
f76180bc
SRRH
6697 /*
6698 * As there may still be users that expect the tracing
6699 * files to exist in debugfs/tracing, we must automount
6700 * the tracefs file system there, so older tools still
6701 * work with the newer kerenl.
6702 */
6703 tr->dir = debugfs_create_automount("tracing", NULL,
6704 trace_automount, NULL);
7eeafbca
SRRH
6705 if (!tr->dir) {
6706 pr_warn_once("Could not create debugfs directory 'tracing'\n");
6707 return ERR_PTR(-ENOMEM);
6708 }
6709
8434dc93 6710 return NULL;
7eeafbca
SRRH
6711}
6712
0c564a53
SRRH
6713extern struct trace_enum_map *__start_ftrace_enum_maps[];
6714extern struct trace_enum_map *__stop_ftrace_enum_maps[];
6715
6716static void __init trace_enum_init(void)
6717{
3673b8e4
SRRH
6718 int len;
6719
6720 len = __stop_ftrace_enum_maps - __start_ftrace_enum_maps;
9828413d 6721 trace_insert_enum_map(NULL, __start_ftrace_enum_maps, len);
3673b8e4
SRRH
6722}
6723
6724#ifdef CONFIG_MODULES
6725static void trace_module_add_enums(struct module *mod)
6726{
6727 if (!mod->num_trace_enums)
6728 return;
6729
6730 /*
6731 * Modules with bad taint do not have events created, do
6732 * not bother with enums either.
6733 */
6734 if (trace_module_has_bad_taint(mod))
6735 return;
6736
9828413d 6737 trace_insert_enum_map(mod, mod->trace_enums, mod->num_trace_enums);
3673b8e4
SRRH
6738}
6739
9828413d
SRRH
6740#ifdef CONFIG_TRACE_ENUM_MAP_FILE
6741static void trace_module_remove_enums(struct module *mod)
6742{
6743 union trace_enum_map_item *map;
6744 union trace_enum_map_item **last = &trace_enum_maps;
6745
6746 if (!mod->num_trace_enums)
6747 return;
6748
6749 mutex_lock(&trace_enum_mutex);
6750
6751 map = trace_enum_maps;
6752
6753 while (map) {
6754 if (map->head.mod == mod)
6755 break;
6756 map = trace_enum_jmp_to_tail(map);
6757 last = &map->tail.next;
6758 map = map->tail.next;
6759 }
6760 if (!map)
6761 goto out;
6762
6763 *last = trace_enum_jmp_to_tail(map)->tail.next;
6764 kfree(map);
6765 out:
6766 mutex_unlock(&trace_enum_mutex);
6767}
6768#else
6769static inline void trace_module_remove_enums(struct module *mod) { }
6770#endif /* CONFIG_TRACE_ENUM_MAP_FILE */
6771
3673b8e4
SRRH
6772static int trace_module_notify(struct notifier_block *self,
6773 unsigned long val, void *data)
6774{
6775 struct module *mod = data;
6776
6777 switch (val) {
6778 case MODULE_STATE_COMING:
6779 trace_module_add_enums(mod);
6780 break;
9828413d
SRRH
6781 case MODULE_STATE_GOING:
6782 trace_module_remove_enums(mod);
6783 break;
3673b8e4
SRRH
6784 }
6785
6786 return 0;
0c564a53
SRRH
6787}
6788
3673b8e4
SRRH
6789static struct notifier_block trace_module_nb = {
6790 .notifier_call = trace_module_notify,
6791 .priority = 0,
6792};
9828413d 6793#endif /* CONFIG_MODULES */
3673b8e4 6794
8434dc93 6795static __init int tracer_init_tracefs(void)
bc0c38d1
SR
6796{
6797 struct dentry *d_tracer;
41d9c0be 6798 struct tracer *t;
bc0c38d1 6799
7e53bd42
LJ
6800 trace_access_lock_init();
6801
bc0c38d1 6802 d_tracer = tracing_init_dentry();
14a5ae40 6803 if (IS_ERR(d_tracer))
ed6f1c99 6804 return 0;
bc0c38d1 6805
8434dc93 6806 init_tracer_tracefs(&global_trace, d_tracer);
bc0c38d1 6807
5452af66 6808 trace_create_file("tracing_thresh", 0644, d_tracer,
6508fa76 6809 &global_trace, &tracing_thresh_fops);
a8259075 6810
339ae5d3 6811 trace_create_file("README", 0444, d_tracer,
5452af66
FW
6812 NULL, &tracing_readme_fops);
6813
69abe6a5
AP
6814 trace_create_file("saved_cmdlines", 0444, d_tracer,
6815 NULL, &tracing_saved_cmdlines_fops);
5bf9a1ee 6816
939c7a4f
YY
6817 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
6818 NULL, &tracing_saved_cmdlines_size_fops);
6819
0c564a53
SRRH
6820 trace_enum_init();
6821
9828413d
SRRH
6822 trace_create_enum_file(d_tracer);
6823
3673b8e4
SRRH
6824#ifdef CONFIG_MODULES
6825 register_module_notifier(&trace_module_nb);
6826#endif
6827
bc0c38d1 6828#ifdef CONFIG_DYNAMIC_FTRACE
5452af66
FW
6829 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
6830 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
bc0c38d1 6831#endif
b04cc6b1 6832
277ba044 6833 create_trace_instances(d_tracer);
5452af66 6834
2b6080f2 6835 create_trace_options_dir(&global_trace);
b04cc6b1 6836
41d9c0be
SRRH
6837 mutex_lock(&trace_types_lock);
6838 for (t = trace_types; t; t = t->next)
6839 add_tracer_options(&global_trace, t);
6840 mutex_unlock(&trace_types_lock);
09d23a1d 6841
b5ad384e 6842 return 0;
bc0c38d1
SR
6843}
6844
3f5a54e3
SR
6845static int trace_panic_handler(struct notifier_block *this,
6846 unsigned long event, void *unused)
6847{
944ac425 6848 if (ftrace_dump_on_oops)
cecbca96 6849 ftrace_dump(ftrace_dump_on_oops);
3f5a54e3
SR
6850 return NOTIFY_OK;
6851}
6852
6853static struct notifier_block trace_panic_notifier = {
6854 .notifier_call = trace_panic_handler,
6855 .next = NULL,
6856 .priority = 150 /* priority: INT_MAX >= x >= 0 */
6857};
6858
6859static int trace_die_handler(struct notifier_block *self,
6860 unsigned long val,
6861 void *data)
6862{
6863 switch (val) {
6864 case DIE_OOPS:
944ac425 6865 if (ftrace_dump_on_oops)
cecbca96 6866 ftrace_dump(ftrace_dump_on_oops);
3f5a54e3
SR
6867 break;
6868 default:
6869 break;
6870 }
6871 return NOTIFY_OK;
6872}
6873
6874static struct notifier_block trace_die_notifier = {
6875 .notifier_call = trace_die_handler,
6876 .priority = 200
6877};
6878
6879/*
6880 * printk is set to max of 1024, we really don't need it that big.
6881 * Nothing should be printing 1000 characters anyway.
6882 */
6883#define TRACE_MAX_PRINT 1000
6884
6885/*
6886 * Define here KERN_TRACE so that we have one place to modify
6887 * it if we decide to change what log level the ftrace dump
6888 * should be at.
6889 */
428aee14 6890#define KERN_TRACE KERN_EMERG
3f5a54e3 6891
955b61e5 6892void
3f5a54e3
SR
6893trace_printk_seq(struct trace_seq *s)
6894{
6895 /* Probably should print a warning here. */
3a161d99
SRRH
6896 if (s->seq.len >= TRACE_MAX_PRINT)
6897 s->seq.len = TRACE_MAX_PRINT;
3f5a54e3 6898
820b75f6
SRRH
6899 /*
6900 * More paranoid code. Although the buffer size is set to
6901 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
6902 * an extra layer of protection.
6903 */
6904 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
6905 s->seq.len = s->seq.size - 1;
3f5a54e3
SR
6906
6907 /* should be zero ended, but we are paranoid. */
3a161d99 6908 s->buffer[s->seq.len] = 0;
3f5a54e3
SR
6909
6910 printk(KERN_TRACE "%s", s->buffer);
6911
f9520750 6912 trace_seq_init(s);
3f5a54e3
SR
6913}
6914
955b61e5
JW
6915void trace_init_global_iter(struct trace_iterator *iter)
6916{
6917 iter->tr = &global_trace;
2b6080f2 6918 iter->trace = iter->tr->current_trace;
ae3b5093 6919 iter->cpu_file = RING_BUFFER_ALL_CPUS;
12883efb 6920 iter->trace_buffer = &global_trace.trace_buffer;
b2f974d6
CS
6921
6922 if (iter->trace && iter->trace->open)
6923 iter->trace->open(iter);
6924
6925 /* Annotate start of buffers if we had overruns */
6926 if (ring_buffer_overruns(iter->trace_buffer->buffer))
6927 iter->iter_flags |= TRACE_FILE_ANNOTATE;
6928
6929 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
6930 if (trace_clocks[iter->tr->clock_id].in_ns)
6931 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
955b61e5
JW
6932}
6933
7fe70b57 6934void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
3f5a54e3 6935{
3f5a54e3
SR
6936 /* use static because iter can be a bit big for the stack */
6937 static struct trace_iterator iter;
7fe70b57 6938 static atomic_t dump_running;
cf586b61 6939 unsigned int old_userobj;
d769041f
SR
6940 unsigned long flags;
6941 int cnt = 0, cpu;
3f5a54e3 6942
7fe70b57
SRRH
6943 /* Only allow one dump user at a time. */
6944 if (atomic_inc_return(&dump_running) != 1) {
6945 atomic_dec(&dump_running);
6946 return;
6947 }
3f5a54e3 6948
7fe70b57
SRRH
6949 /*
6950 * Always turn off tracing when we dump.
6951 * We don't need to show trace output of what happens
6952 * between multiple crashes.
6953 *
6954 * If the user does a sysrq-z, then they can re-enable
6955 * tracing with echo 1 > tracing_on.
6956 */
0ee6b6cf 6957 tracing_off();
cf586b61 6958
7fe70b57 6959 local_irq_save(flags);
3f5a54e3 6960
38dbe0b1 6961 /* Simulate the iterator */
955b61e5
JW
6962 trace_init_global_iter(&iter);
6963
d769041f 6964 for_each_tracing_cpu(cpu) {
5e2d5ef8 6965 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
d769041f
SR
6966 }
6967
cf586b61
FW
6968 old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
6969
b54d3de9
TE
6970 /* don't look at user memory in panic mode */
6971 trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
6972
cecbca96
FW
6973 switch (oops_dump_mode) {
6974 case DUMP_ALL:
ae3b5093 6975 iter.cpu_file = RING_BUFFER_ALL_CPUS;
cecbca96
FW
6976 break;
6977 case DUMP_ORIG:
6978 iter.cpu_file = raw_smp_processor_id();
6979 break;
6980 case DUMP_NONE:
6981 goto out_enable;
6982 default:
6983 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
ae3b5093 6984 iter.cpu_file = RING_BUFFER_ALL_CPUS;
cecbca96
FW
6985 }
6986
6987 printk(KERN_TRACE "Dumping ftrace buffer:\n");
3f5a54e3 6988
7fe70b57
SRRH
6989 /* Did function tracer already get disabled? */
6990 if (ftrace_is_dead()) {
6991 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
6992 printk("# MAY BE MISSING FUNCTION EVENTS\n");
6993 }
6994
3f5a54e3
SR
6995 /*
6996 * We need to stop all tracing on all CPUS to read the
6997 * the next buffer. This is a bit expensive, but is
6998 * not done often. We fill all what we can read,
6999 * and then release the locks again.
7000 */
7001
3f5a54e3
SR
7002 while (!trace_empty(&iter)) {
7003
7004 if (!cnt)
7005 printk(KERN_TRACE "---------------------------------\n");
7006
7007 cnt++;
7008
7009 /* reset all but tr, trace, and overruns */
7010 memset(&iter.seq, 0,
7011 sizeof(struct trace_iterator) -
7012 offsetof(struct trace_iterator, seq));
7013 iter.iter_flags |= TRACE_FILE_LAT_FMT;
7014 iter.pos = -1;
7015
955b61e5 7016 if (trace_find_next_entry_inc(&iter) != NULL) {
74e7ff8c
LJ
7017 int ret;
7018
7019 ret = print_trace_line(&iter);
7020 if (ret != TRACE_TYPE_NO_CONSUME)
7021 trace_consume(&iter);
3f5a54e3 7022 }
b892e5c8 7023 touch_nmi_watchdog();
3f5a54e3
SR
7024
7025 trace_printk_seq(&iter.seq);
7026 }
7027
7028 if (!cnt)
7029 printk(KERN_TRACE " (ftrace buffer empty)\n");
7030 else
7031 printk(KERN_TRACE "---------------------------------\n");
7032
cecbca96 7033 out_enable:
7fe70b57 7034 trace_flags |= old_userobj;
cf586b61 7035
7fe70b57
SRRH
7036 for_each_tracing_cpu(cpu) {
7037 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
cf586b61 7038 }
7fe70b57 7039 atomic_dec(&dump_running);
cd891ae0 7040 local_irq_restore(flags);
3f5a54e3 7041}
a8eecf22 7042EXPORT_SYMBOL_GPL(ftrace_dump);
cf586b61 7043
3928a8a2 7044__init static int tracer_alloc_buffers(void)
bc0c38d1 7045{
73c5162a 7046 int ring_buf_size;
9e01c1b7 7047 int ret = -ENOMEM;
4c11d7ae 7048
b5e87c05
SRRH
7049 /*
7050 * Make sure we don't accidently add more trace options
7051 * than we have bits for.
7052 */
7053 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > 32);
7054
9e01c1b7
RR
7055 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
7056 goto out;
7057
ccfe9e42 7058 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
9e01c1b7 7059 goto out_free_buffer_mask;
4c11d7ae 7060
07d777fe
SR
7061 /* Only allocate trace_printk buffers if a trace_printk exists */
7062 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
81698831 7063 /* Must be called before global_trace.buffer is allocated */
07d777fe
SR
7064 trace_printk_init_buffers();
7065
73c5162a
SR
7066 /* To save memory, keep the ring buffer size to its minimum */
7067 if (ring_buffer_expanded)
7068 ring_buf_size = trace_buf_size;
7069 else
7070 ring_buf_size = 1;
7071
9e01c1b7 7072 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
ccfe9e42 7073 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
9e01c1b7 7074
2b6080f2
SR
7075 raw_spin_lock_init(&global_trace.start_lock);
7076
2c4a33ab
SRRH
7077 /* Used for event triggers */
7078 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
7079 if (!temp_buffer)
7080 goto out_free_cpumask;
7081
939c7a4f
YY
7082 if (trace_create_savedcmd() < 0)
7083 goto out_free_temp_buffer;
7084
9e01c1b7 7085 /* TODO: make the number of buffers hot pluggable with CPUS */
737223fb 7086 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
3928a8a2
SR
7087 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
7088 WARN_ON(1);
939c7a4f 7089 goto out_free_savedcmd;
4c11d7ae 7090 }
a7603ff4 7091
499e5470
SR
7092 if (global_trace.buffer_disabled)
7093 tracing_off();
4c11d7ae 7094
e1e232ca
SR
7095 if (trace_boot_clock) {
7096 ret = tracing_set_clock(&global_trace, trace_boot_clock);
7097 if (ret < 0)
7098 pr_warning("Trace clock %s not defined, going back to default\n",
7099 trace_boot_clock);
7100 }
7101
ca164318
SRRH
7102 /*
7103 * register_tracer() might reference current_trace, so it
7104 * needs to be set before we register anything. This is
7105 * just a bootstrap of current_trace anyway.
7106 */
2b6080f2
SR
7107 global_trace.current_trace = &nop_trace;
7108
0b9b12c1
SRRH
7109 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
7110
4104d326
SRRH
7111 ftrace_init_global_array_ops(&global_trace);
7112
ca164318
SRRH
7113 register_tracer(&nop_trace);
7114
60a11774
SR
7115 /* All seems OK, enable tracing */
7116 tracing_disabled = 0;
3928a8a2 7117
3f5a54e3
SR
7118 atomic_notifier_chain_register(&panic_notifier_list,
7119 &trace_panic_notifier);
7120
7121 register_die_notifier(&trace_die_notifier);
2fc1dfbe 7122
ae63b31e
SR
7123 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
7124
7125 INIT_LIST_HEAD(&global_trace.systems);
7126 INIT_LIST_HEAD(&global_trace.events);
7127 list_add(&global_trace.list, &ftrace_trace_arrays);
7128
7bcfaf54
SR
7129 while (trace_boot_options) {
7130 char *option;
7131
7132 option = strsep(&trace_boot_options, ",");
2b6080f2 7133 trace_set_options(&global_trace, option);
7bcfaf54
SR
7134 }
7135
77fd5c15
SRRH
7136 register_snapshot_cmd();
7137
2fc1dfbe 7138 return 0;
3f5a54e3 7139
939c7a4f
YY
7140out_free_savedcmd:
7141 free_saved_cmdlines_buffer(savedcmd);
2c4a33ab
SRRH
7142out_free_temp_buffer:
7143 ring_buffer_free(temp_buffer);
9e01c1b7 7144out_free_cpumask:
ccfe9e42 7145 free_cpumask_var(global_trace.tracing_cpumask);
9e01c1b7
RR
7146out_free_buffer_mask:
7147 free_cpumask_var(tracing_buffer_mask);
7148out:
7149 return ret;
bc0c38d1 7150}
b2821ae6 7151
5f893b26
SRRH
7152void __init trace_init(void)
7153{
0daa2302
SRRH
7154 if (tracepoint_printk) {
7155 tracepoint_print_iter =
7156 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
7157 if (WARN_ON(!tracepoint_print_iter))
7158 tracepoint_printk = 0;
7159 }
5f893b26 7160 tracer_alloc_buffers();
0c564a53 7161 trace_event_init();
5f893b26
SRRH
7162}
7163
b2821ae6
SR
7164__init static int clear_boot_tracer(void)
7165{
7166 /*
7167 * The default tracer at boot buffer is an init section.
7168 * This function is called in lateinit. If we did not
7169 * find the boot tracer, then clear it out, to prevent
7170 * later registration from accessing the buffer that is
7171 * about to be freed.
7172 */
7173 if (!default_bootup_tracer)
7174 return 0;
7175
7176 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
7177 default_bootup_tracer);
7178 default_bootup_tracer = NULL;
7179
7180 return 0;
7181}
7182
8434dc93 7183fs_initcall(tracer_init_tracefs);
b2821ae6 7184late_initcall(clear_boot_tracer);
This page took 0.984566 seconds and 5 git commands to generate.