tracing: Fix array size mismatch in format string
[deliverable/linux.git] / kernel / trace / trace.c
CommitLineData
bc0c38d1
SR
1/*
2 * ring buffer based function tracer
3 *
2b6080f2 4 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
bc0c38d1
SR
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
6d49e352 12 * Copyright (C) 2004 Nadia Yvette Chambers
bc0c38d1 13 */
2cadf913 14#include <linux/ring_buffer.h>
273b281f 15#include <generated/utsrelease.h>
2cadf913
SR
16#include <linux/stacktrace.h>
17#include <linux/writeback.h>
bc0c38d1
SR
18#include <linux/kallsyms.h>
19#include <linux/seq_file.h>
3f5a54e3 20#include <linux/notifier.h>
2cadf913 21#include <linux/irqflags.h>
bc0c38d1 22#include <linux/debugfs.h>
4c11d7ae 23#include <linux/pagemap.h>
bc0c38d1
SR
24#include <linux/hardirq.h>
25#include <linux/linkage.h>
26#include <linux/uaccess.h>
2cadf913 27#include <linux/kprobes.h>
bc0c38d1
SR
28#include <linux/ftrace.h>
29#include <linux/module.h>
30#include <linux/percpu.h>
2cadf913 31#include <linux/splice.h>
3f5a54e3 32#include <linux/kdebug.h>
5f0c6c03 33#include <linux/string.h>
7e53bd42 34#include <linux/rwsem.h>
5a0e3ad6 35#include <linux/slab.h>
bc0c38d1
SR
36#include <linux/ctype.h>
37#include <linux/init.h>
2a2cc8f7 38#include <linux/poll.h>
b892e5c8 39#include <linux/nmi.h>
bc0c38d1 40#include <linux/fs.h>
8bd75c77 41#include <linux/sched/rt.h>
86387f7e 42
bc0c38d1 43#include "trace.h"
f0868d1e 44#include "trace_output.h"
bc0c38d1 45
73c5162a
SR
46/*
47 * On boot up, the ring buffer is set to the minimum size, so that
48 * we do not waste memory on systems that are not using tracing.
49 */
55034cd6 50bool ring_buffer_expanded;
73c5162a 51
8e1b82e0
FW
52/*
53 * We need to change this state when a selftest is running.
ff32504f
FW
54 * A selftest will lurk into the ring-buffer to count the
55 * entries inserted during the selftest although some concurrent
5e1607a0 56 * insertions into the ring-buffer such as trace_printk could occurred
ff32504f
FW
57 * at the same time, giving false positive or negative results.
58 */
8e1b82e0 59static bool __read_mostly tracing_selftest_running;
ff32504f 60
b2821ae6
SR
61/*
62 * If a tracer is running, we do not want to run SELFTEST.
63 */
020e5f85 64bool __read_mostly tracing_selftest_disabled;
b2821ae6 65
adf9f195
FW
66/* For tracers that don't implement custom flags */
67static struct tracer_opt dummy_tracer_opt[] = {
68 { }
69};
70
71static struct tracer_flags dummy_tracer_flags = {
72 .val = 0,
73 .opts = dummy_tracer_opt
74};
75
76static int dummy_set_flag(u32 old_flags, u32 bit, int set)
77{
78 return 0;
79}
0f048701 80
7ffbd48d
SR
81/*
82 * To prevent the comm cache from being overwritten when no
83 * tracing is active, only save the comm when a trace event
84 * occurred.
85 */
86static DEFINE_PER_CPU(bool, trace_cmdline_save);
87
0f048701
SR
88/*
89 * Kill all tracing for good (never come back).
90 * It is initialized to 1 but will turn to zero if the initialization
91 * of the tracer is successful. But that is the only place that sets
92 * this back to zero.
93 */
4fd27358 94static int tracing_disabled = 1;
0f048701 95
9288f99a 96DEFINE_PER_CPU(int, ftrace_cpu_disabled);
d769041f 97
955b61e5 98cpumask_var_t __read_mostly tracing_buffer_mask;
ab46428c 99
944ac425
SR
100/*
101 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
102 *
103 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
104 * is set, then ftrace_dump is called. This will output the contents
105 * of the ftrace buffers to the console. This is very useful for
106 * capturing traces that lead to crashes and outputing it to a
107 * serial console.
108 *
109 * It is default off, but you can enable it with either specifying
110 * "ftrace_dump_on_oops" in the kernel command line, or setting
cecbca96
FW
111 * /proc/sys/kernel/ftrace_dump_on_oops
112 * Set 1 if you want to dump buffers of all CPUs
113 * Set 2 if you want to dump the buffer of the CPU that triggered oops
944ac425 114 */
cecbca96
FW
115
116enum ftrace_dump_mode ftrace_dump_on_oops;
944ac425 117
de7edd31
SRRH
118/* When set, tracing will stop when a WARN*() is hit */
119int __disable_trace_on_warning;
120
b2821ae6
SR
121static int tracing_set_tracer(const char *buf);
122
ee6c2c1b
LZ
123#define MAX_TRACER_SIZE 100
124static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
b2821ae6 125static char *default_bootup_tracer;
d9e54076 126
55034cd6
SRRH
127static bool allocate_snapshot;
128
1beee96b 129static int __init set_cmdline_ftrace(char *str)
d9e54076 130{
67012ab1 131 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
b2821ae6 132 default_bootup_tracer = bootup_tracer_buf;
73c5162a 133 /* We are using ftrace early, expand it */
55034cd6 134 ring_buffer_expanded = true;
d9e54076
PZ
135 return 1;
136}
1beee96b 137__setup("ftrace=", set_cmdline_ftrace);
d9e54076 138
944ac425
SR
139static int __init set_ftrace_dump_on_oops(char *str)
140{
cecbca96
FW
141 if (*str++ != '=' || !*str) {
142 ftrace_dump_on_oops = DUMP_ALL;
143 return 1;
144 }
145
146 if (!strcmp("orig_cpu", str)) {
147 ftrace_dump_on_oops = DUMP_ORIG;
148 return 1;
149 }
150
151 return 0;
944ac425
SR
152}
153__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
60a11774 154
de7edd31
SRRH
155static int __init stop_trace_on_warning(char *str)
156{
157 __disable_trace_on_warning = 1;
158 return 1;
159}
160__setup("traceoff_on_warning=", stop_trace_on_warning);
161
3209cff4 162static int __init boot_alloc_snapshot(char *str)
55034cd6
SRRH
163{
164 allocate_snapshot = true;
165 /* We also need the main ring buffer expanded */
166 ring_buffer_expanded = true;
167 return 1;
168}
3209cff4 169__setup("alloc_snapshot", boot_alloc_snapshot);
55034cd6 170
7bcfaf54
SR
171
172static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
173static char *trace_boot_options __initdata;
174
175static int __init set_trace_boot_options(char *str)
176{
67012ab1 177 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
7bcfaf54
SR
178 trace_boot_options = trace_boot_options_buf;
179 return 0;
180}
181__setup("trace_options=", set_trace_boot_options);
182
de7edd31 183
cf8e3474 184unsigned long long ns2usecs(cycle_t nsec)
bc0c38d1
SR
185{
186 nsec += 500;
187 do_div(nsec, 1000);
188 return nsec;
189}
190
4fcdae83
SR
191/*
192 * The global_trace is the descriptor that holds the tracing
193 * buffers for the live tracing. For each CPU, it contains
194 * a link list of pages that will store trace entries. The
195 * page descriptor of the pages in the memory is used to hold
196 * the link list by linking the lru item in the page descriptor
197 * to each of the pages in the buffer per CPU.
198 *
199 * For each active CPU there is a data field that holds the
200 * pages for the buffer for that CPU. Each CPU has the same number
201 * of pages allocated for its buffer.
202 */
bc0c38d1
SR
203static struct trace_array global_trace;
204
ae63b31e 205LIST_HEAD(ftrace_trace_arrays);
bc0c38d1 206
ff451961
SRRH
207int trace_array_get(struct trace_array *this_tr)
208{
209 struct trace_array *tr;
210 int ret = -ENODEV;
211
212 mutex_lock(&trace_types_lock);
213 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
214 if (tr == this_tr) {
215 tr->ref++;
216 ret = 0;
217 break;
218 }
219 }
220 mutex_unlock(&trace_types_lock);
221
222 return ret;
223}
224
225static void __trace_array_put(struct trace_array *this_tr)
226{
227 WARN_ON(!this_tr->ref);
228 this_tr->ref--;
229}
230
231void trace_array_put(struct trace_array *this_tr)
232{
233 mutex_lock(&trace_types_lock);
234 __trace_array_put(this_tr);
235 mutex_unlock(&trace_types_lock);
236}
237
f306cc82
TZ
238int filter_check_discard(struct ftrace_event_file *file, void *rec,
239 struct ring_buffer *buffer,
240 struct ring_buffer_event *event)
eb02ce01 241{
f306cc82
TZ
242 if (unlikely(file->flags & FTRACE_EVENT_FL_FILTERED) &&
243 !filter_match_preds(file->filter, rec)) {
244 ring_buffer_discard_commit(buffer, event);
245 return 1;
246 }
247
248 return 0;
249}
250EXPORT_SYMBOL_GPL(filter_check_discard);
251
252int call_filter_check_discard(struct ftrace_event_call *call, void *rec,
253 struct ring_buffer *buffer,
254 struct ring_buffer_event *event)
255{
256 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
257 !filter_match_preds(call->filter, rec)) {
258 ring_buffer_discard_commit(buffer, event);
259 return 1;
260 }
261
262 return 0;
eb02ce01 263}
f306cc82 264EXPORT_SYMBOL_GPL(call_filter_check_discard);
eb02ce01 265
9457158b 266cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
37886f6a
SR
267{
268 u64 ts;
269
270 /* Early boot up does not have a buffer yet */
9457158b 271 if (!buf->buffer)
37886f6a
SR
272 return trace_clock_local();
273
9457158b
AL
274 ts = ring_buffer_time_stamp(buf->buffer, cpu);
275 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
37886f6a
SR
276
277 return ts;
278}
bc0c38d1 279
9457158b
AL
280cycle_t ftrace_now(int cpu)
281{
282 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
283}
284
10246fa3
SRRH
285/**
286 * tracing_is_enabled - Show if global_trace has been disabled
287 *
288 * Shows if the global trace has been enabled or not. It uses the
289 * mirror flag "buffer_disabled" to be used in fast paths such as for
290 * the irqsoff tracer. But it may be inaccurate due to races. If you
291 * need to know the accurate state, use tracing_is_on() which is a little
292 * slower, but accurate.
293 */
9036990d
SR
294int tracing_is_enabled(void)
295{
10246fa3
SRRH
296 /*
297 * For quick access (irqsoff uses this in fast path), just
298 * return the mirror variable of the state of the ring buffer.
299 * It's a little racy, but we don't really care.
300 */
301 smp_rmb();
302 return !global_trace.buffer_disabled;
9036990d
SR
303}
304
4fcdae83 305/*
3928a8a2
SR
306 * trace_buf_size is the size in bytes that is allocated
307 * for a buffer. Note, the number of bytes is always rounded
308 * to page size.
3f5a54e3
SR
309 *
310 * This number is purposely set to a low number of 16384.
311 * If the dump on oops happens, it will be much appreciated
312 * to not have to wait for all that output. Anyway this can be
313 * boot time and run time configurable.
4fcdae83 314 */
3928a8a2 315#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
3f5a54e3 316
3928a8a2 317static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
bc0c38d1 318
4fcdae83 319/* trace_types holds a link list of available tracers. */
bc0c38d1 320static struct tracer *trace_types __read_mostly;
4fcdae83 321
4fcdae83
SR
322/*
323 * trace_types_lock is used to protect the trace_types list.
4fcdae83 324 */
a8227415 325DEFINE_MUTEX(trace_types_lock);
4fcdae83 326
7e53bd42
LJ
327/*
328 * serialize the access of the ring buffer
329 *
330 * ring buffer serializes readers, but it is low level protection.
331 * The validity of the events (which returns by ring_buffer_peek() ..etc)
332 * are not protected by ring buffer.
333 *
334 * The content of events may become garbage if we allow other process consumes
335 * these events concurrently:
336 * A) the page of the consumed events may become a normal page
337 * (not reader page) in ring buffer, and this page will be rewrited
338 * by events producer.
339 * B) The page of the consumed events may become a page for splice_read,
340 * and this page will be returned to system.
341 *
342 * These primitives allow multi process access to different cpu ring buffer
343 * concurrently.
344 *
345 * These primitives don't distinguish read-only and read-consume access.
346 * Multi read-only access are also serialized.
347 */
348
349#ifdef CONFIG_SMP
350static DECLARE_RWSEM(all_cpu_access_lock);
351static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
352
353static inline void trace_access_lock(int cpu)
354{
ae3b5093 355 if (cpu == RING_BUFFER_ALL_CPUS) {
7e53bd42
LJ
356 /* gain it for accessing the whole ring buffer. */
357 down_write(&all_cpu_access_lock);
358 } else {
359 /* gain it for accessing a cpu ring buffer. */
360
ae3b5093 361 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
7e53bd42
LJ
362 down_read(&all_cpu_access_lock);
363
364 /* Secondly block other access to this @cpu ring buffer. */
365 mutex_lock(&per_cpu(cpu_access_lock, cpu));
366 }
367}
368
369static inline void trace_access_unlock(int cpu)
370{
ae3b5093 371 if (cpu == RING_BUFFER_ALL_CPUS) {
7e53bd42
LJ
372 up_write(&all_cpu_access_lock);
373 } else {
374 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
375 up_read(&all_cpu_access_lock);
376 }
377}
378
379static inline void trace_access_lock_init(void)
380{
381 int cpu;
382
383 for_each_possible_cpu(cpu)
384 mutex_init(&per_cpu(cpu_access_lock, cpu));
385}
386
387#else
388
389static DEFINE_MUTEX(access_lock);
390
391static inline void trace_access_lock(int cpu)
392{
393 (void)cpu;
394 mutex_lock(&access_lock);
395}
396
397static inline void trace_access_unlock(int cpu)
398{
399 (void)cpu;
400 mutex_unlock(&access_lock);
401}
402
403static inline void trace_access_lock_init(void)
404{
405}
406
407#endif
408
ee6bce52 409/* trace_flags holds trace_options default values */
12ef7d44 410unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
a2a16d6a 411 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
77271ce4 412 TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
328df475 413 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | TRACE_ITER_FUNCTION;
e7e2ee89 414
5280bcef 415static void tracer_tracing_on(struct trace_array *tr)
10246fa3
SRRH
416{
417 if (tr->trace_buffer.buffer)
418 ring_buffer_record_on(tr->trace_buffer.buffer);
419 /*
420 * This flag is looked at when buffers haven't been allocated
421 * yet, or by some tracers (like irqsoff), that just want to
422 * know if the ring buffer has been disabled, but it can handle
423 * races of where it gets disabled but we still do a record.
424 * As the check is in the fast path of the tracers, it is more
425 * important to be fast than accurate.
426 */
427 tr->buffer_disabled = 0;
428 /* Make the flag seen by readers */
429 smp_wmb();
430}
431
499e5470
SR
432/**
433 * tracing_on - enable tracing buffers
434 *
435 * This function enables tracing buffers that may have been
436 * disabled with tracing_off.
437 */
438void tracing_on(void)
439{
10246fa3 440 tracer_tracing_on(&global_trace);
499e5470
SR
441}
442EXPORT_SYMBOL_GPL(tracing_on);
443
09ae7234
SRRH
444/**
445 * __trace_puts - write a constant string into the trace buffer.
446 * @ip: The address of the caller
447 * @str: The constant string to write
448 * @size: The size of the string.
449 */
450int __trace_puts(unsigned long ip, const char *str, int size)
451{
452 struct ring_buffer_event *event;
453 struct ring_buffer *buffer;
454 struct print_entry *entry;
455 unsigned long irq_flags;
456 int alloc;
457
3132e107
SRRH
458 if (unlikely(tracing_selftest_running || tracing_disabled))
459 return 0;
460
09ae7234
SRRH
461 alloc = sizeof(*entry) + size + 2; /* possible \n added */
462
463 local_save_flags(irq_flags);
464 buffer = global_trace.trace_buffer.buffer;
465 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
466 irq_flags, preempt_count());
467 if (!event)
468 return 0;
469
470 entry = ring_buffer_event_data(event);
471 entry->ip = ip;
472
473 memcpy(&entry->buf, str, size);
474
475 /* Add a newline if necessary */
476 if (entry->buf[size - 1] != '\n') {
477 entry->buf[size] = '\n';
478 entry->buf[size + 1] = '\0';
479 } else
480 entry->buf[size] = '\0';
481
482 __buffer_unlock_commit(buffer, event);
483
484 return size;
485}
486EXPORT_SYMBOL_GPL(__trace_puts);
487
488/**
489 * __trace_bputs - write the pointer to a constant string into trace buffer
490 * @ip: The address of the caller
491 * @str: The constant string to write to the buffer to
492 */
493int __trace_bputs(unsigned long ip, const char *str)
494{
495 struct ring_buffer_event *event;
496 struct ring_buffer *buffer;
497 struct bputs_entry *entry;
498 unsigned long irq_flags;
499 int size = sizeof(struct bputs_entry);
500
3132e107
SRRH
501 if (unlikely(tracing_selftest_running || tracing_disabled))
502 return 0;
503
09ae7234
SRRH
504 local_save_flags(irq_flags);
505 buffer = global_trace.trace_buffer.buffer;
506 event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
507 irq_flags, preempt_count());
508 if (!event)
509 return 0;
510
511 entry = ring_buffer_event_data(event);
512 entry->ip = ip;
513 entry->str = str;
514
515 __buffer_unlock_commit(buffer, event);
516
517 return 1;
518}
519EXPORT_SYMBOL_GPL(__trace_bputs);
520
ad909e21
SRRH
521#ifdef CONFIG_TRACER_SNAPSHOT
522/**
523 * trace_snapshot - take a snapshot of the current buffer.
524 *
525 * This causes a swap between the snapshot buffer and the current live
526 * tracing buffer. You can use this to take snapshots of the live
527 * trace when some condition is triggered, but continue to trace.
528 *
529 * Note, make sure to allocate the snapshot with either
530 * a tracing_snapshot_alloc(), or by doing it manually
531 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
532 *
533 * If the snapshot buffer is not allocated, it will stop tracing.
534 * Basically making a permanent snapshot.
535 */
536void tracing_snapshot(void)
537{
538 struct trace_array *tr = &global_trace;
539 struct tracer *tracer = tr->current_trace;
540 unsigned long flags;
541
1b22e382
SRRH
542 if (in_nmi()) {
543 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
544 internal_trace_puts("*** snapshot is being ignored ***\n");
545 return;
546 }
547
ad909e21 548 if (!tr->allocated_snapshot) {
ca268da6
SRRH
549 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
550 internal_trace_puts("*** stopping trace here! ***\n");
ad909e21
SRRH
551 tracing_off();
552 return;
553 }
554
555 /* Note, snapshot can not be used when the tracer uses it */
556 if (tracer->use_max_tr) {
ca268da6
SRRH
557 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
558 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
ad909e21
SRRH
559 return;
560 }
561
562 local_irq_save(flags);
563 update_max_tr(tr, current, smp_processor_id());
564 local_irq_restore(flags);
565}
1b22e382 566EXPORT_SYMBOL_GPL(tracing_snapshot);
ad909e21
SRRH
567
568static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
569 struct trace_buffer *size_buf, int cpu_id);
3209cff4
SRRH
570static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
571
572static int alloc_snapshot(struct trace_array *tr)
573{
574 int ret;
575
576 if (!tr->allocated_snapshot) {
577
578 /* allocate spare buffer */
579 ret = resize_buffer_duplicate_size(&tr->max_buffer,
580 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
581 if (ret < 0)
582 return ret;
583
584 tr->allocated_snapshot = true;
585 }
586
587 return 0;
588}
589
590void free_snapshot(struct trace_array *tr)
591{
592 /*
593 * We don't free the ring buffer. instead, resize it because
594 * The max_tr ring buffer has some state (e.g. ring->clock) and
595 * we want preserve it.
596 */
597 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
598 set_buffer_entries(&tr->max_buffer, 1);
599 tracing_reset_online_cpus(&tr->max_buffer);
600 tr->allocated_snapshot = false;
601}
ad909e21 602
93e31ffb
TZ
603/**
604 * tracing_alloc_snapshot - allocate snapshot buffer.
605 *
606 * This only allocates the snapshot buffer if it isn't already
607 * allocated - it doesn't also take a snapshot.
608 *
609 * This is meant to be used in cases where the snapshot buffer needs
610 * to be set up for events that can't sleep but need to be able to
611 * trigger a snapshot.
612 */
613int tracing_alloc_snapshot(void)
614{
615 struct trace_array *tr = &global_trace;
616 int ret;
617
618 ret = alloc_snapshot(tr);
619 WARN_ON(ret < 0);
620
621 return ret;
622}
623EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
624
ad909e21
SRRH
625/**
626 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
627 *
628 * This is similar to trace_snapshot(), but it will allocate the
629 * snapshot buffer if it isn't already allocated. Use this only
630 * where it is safe to sleep, as the allocation may sleep.
631 *
632 * This causes a swap between the snapshot buffer and the current live
633 * tracing buffer. You can use this to take snapshots of the live
634 * trace when some condition is triggered, but continue to trace.
635 */
636void tracing_snapshot_alloc(void)
637{
ad909e21
SRRH
638 int ret;
639
93e31ffb
TZ
640 ret = tracing_alloc_snapshot();
641 if (ret < 0)
3209cff4 642 return;
ad909e21
SRRH
643
644 tracing_snapshot();
645}
1b22e382 646EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
ad909e21
SRRH
647#else
648void tracing_snapshot(void)
649{
650 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
651}
1b22e382 652EXPORT_SYMBOL_GPL(tracing_snapshot);
93e31ffb
TZ
653int tracing_alloc_snapshot(void)
654{
655 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
656 return -ENODEV;
657}
658EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
ad909e21
SRRH
659void tracing_snapshot_alloc(void)
660{
661 /* Give warning */
662 tracing_snapshot();
663}
1b22e382 664EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
ad909e21
SRRH
665#endif /* CONFIG_TRACER_SNAPSHOT */
666
5280bcef 667static void tracer_tracing_off(struct trace_array *tr)
10246fa3
SRRH
668{
669 if (tr->trace_buffer.buffer)
670 ring_buffer_record_off(tr->trace_buffer.buffer);
671 /*
672 * This flag is looked at when buffers haven't been allocated
673 * yet, or by some tracers (like irqsoff), that just want to
674 * know if the ring buffer has been disabled, but it can handle
675 * races of where it gets disabled but we still do a record.
676 * As the check is in the fast path of the tracers, it is more
677 * important to be fast than accurate.
678 */
679 tr->buffer_disabled = 1;
680 /* Make the flag seen by readers */
681 smp_wmb();
682}
683
499e5470
SR
684/**
685 * tracing_off - turn off tracing buffers
686 *
687 * This function stops the tracing buffers from recording data.
688 * It does not disable any overhead the tracers themselves may
689 * be causing. This function simply causes all recording to
690 * the ring buffers to fail.
691 */
692void tracing_off(void)
693{
10246fa3 694 tracer_tracing_off(&global_trace);
499e5470
SR
695}
696EXPORT_SYMBOL_GPL(tracing_off);
697
de7edd31
SRRH
698void disable_trace_on_warning(void)
699{
700 if (__disable_trace_on_warning)
701 tracing_off();
702}
703
10246fa3
SRRH
704/**
705 * tracer_tracing_is_on - show real state of ring buffer enabled
706 * @tr : the trace array to know if ring buffer is enabled
707 *
708 * Shows real state of the ring buffer if it is enabled or not.
709 */
5280bcef 710static int tracer_tracing_is_on(struct trace_array *tr)
10246fa3
SRRH
711{
712 if (tr->trace_buffer.buffer)
713 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
714 return !tr->buffer_disabled;
715}
716
499e5470
SR
717/**
718 * tracing_is_on - show state of ring buffers enabled
719 */
720int tracing_is_on(void)
721{
10246fa3 722 return tracer_tracing_is_on(&global_trace);
499e5470
SR
723}
724EXPORT_SYMBOL_GPL(tracing_is_on);
725
3928a8a2 726static int __init set_buf_size(char *str)
bc0c38d1 727{
3928a8a2 728 unsigned long buf_size;
c6caeeb1 729
bc0c38d1
SR
730 if (!str)
731 return 0;
9d612bef 732 buf_size = memparse(str, &str);
c6caeeb1 733 /* nr_entries can not be zero */
9d612bef 734 if (buf_size == 0)
c6caeeb1 735 return 0;
3928a8a2 736 trace_buf_size = buf_size;
bc0c38d1
SR
737 return 1;
738}
3928a8a2 739__setup("trace_buf_size=", set_buf_size);
bc0c38d1 740
0e950173
TB
741static int __init set_tracing_thresh(char *str)
742{
87abb3b1 743 unsigned long threshold;
0e950173
TB
744 int ret;
745
746 if (!str)
747 return 0;
bcd83ea6 748 ret = kstrtoul(str, 0, &threshold);
0e950173
TB
749 if (ret < 0)
750 return 0;
87abb3b1 751 tracing_thresh = threshold * 1000;
0e950173
TB
752 return 1;
753}
754__setup("tracing_thresh=", set_tracing_thresh);
755
57f50be1
SR
756unsigned long nsecs_to_usecs(unsigned long nsecs)
757{
758 return nsecs / 1000;
759}
760
4fcdae83 761/* These must match the bit postions in trace_iterator_flags */
bc0c38d1
SR
762static const char *trace_options[] = {
763 "print-parent",
764 "sym-offset",
765 "sym-addr",
766 "verbose",
f9896bf3 767 "raw",
5e3ca0ec 768 "hex",
cb0f12aa 769 "bin",
2a2cc8f7 770 "block",
86387f7e 771 "stacktrace",
5e1607a0 772 "trace_printk",
b2a866f9 773 "ftrace_preempt",
9f029e83 774 "branch",
12ef7d44 775 "annotate",
02b67518 776 "userstacktrace",
b54d3de9 777 "sym-userobj",
66896a85 778 "printk-msg-only",
c4a8e8be 779 "context-info",
c032ef64 780 "latency-format",
be6f164a 781 "sleep-time",
a2a16d6a 782 "graph-time",
e870e9a1 783 "record-cmd",
750912fa 784 "overwrite",
cf30cf67 785 "disable_on_free",
77271ce4 786 "irq-info",
5224c3a3 787 "markers",
328df475 788 "function-trace",
bc0c38d1
SR
789 NULL
790};
791
5079f326
Z
792static struct {
793 u64 (*func)(void);
794 const char *name;
8be0709f 795 int in_ns; /* is this clock in nanoseconds? */
5079f326 796} trace_clocks[] = {
8be0709f
DS
797 { trace_clock_local, "local", 1 },
798 { trace_clock_global, "global", 1 },
799 { trace_clock_counter, "counter", 0 },
8aacf017 800 { trace_clock_jiffies, "uptime", 1 },
76f11917 801 { trace_clock, "perf", 1 },
8cbd9cc6 802 ARCH_TRACE_CLOCKS
5079f326
Z
803};
804
b63f39ea 805/*
806 * trace_parser_get_init - gets the buffer for trace parser
807 */
808int trace_parser_get_init(struct trace_parser *parser, int size)
809{
810 memset(parser, 0, sizeof(*parser));
811
812 parser->buffer = kmalloc(size, GFP_KERNEL);
813 if (!parser->buffer)
814 return 1;
815
816 parser->size = size;
817 return 0;
818}
819
820/*
821 * trace_parser_put - frees the buffer for trace parser
822 */
823void trace_parser_put(struct trace_parser *parser)
824{
825 kfree(parser->buffer);
826}
827
828/*
829 * trace_get_user - reads the user input string separated by space
830 * (matched by isspace(ch))
831 *
832 * For each string found the 'struct trace_parser' is updated,
833 * and the function returns.
834 *
835 * Returns number of bytes read.
836 *
837 * See kernel/trace/trace.h for 'struct trace_parser' details.
838 */
839int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
840 size_t cnt, loff_t *ppos)
841{
842 char ch;
843 size_t read = 0;
844 ssize_t ret;
845
846 if (!*ppos)
847 trace_parser_clear(parser);
848
849 ret = get_user(ch, ubuf++);
850 if (ret)
851 goto out;
852
853 read++;
854 cnt--;
855
856 /*
857 * The parser is not finished with the last write,
858 * continue reading the user input without skipping spaces.
859 */
860 if (!parser->cont) {
861 /* skip white space */
862 while (cnt && isspace(ch)) {
863 ret = get_user(ch, ubuf++);
864 if (ret)
865 goto out;
866 read++;
867 cnt--;
868 }
869
870 /* only spaces were written */
871 if (isspace(ch)) {
872 *ppos += read;
873 ret = read;
874 goto out;
875 }
876
877 parser->idx = 0;
878 }
879
880 /* read the non-space input */
881 while (cnt && !isspace(ch)) {
3c235a33 882 if (parser->idx < parser->size - 1)
b63f39ea 883 parser->buffer[parser->idx++] = ch;
884 else {
885 ret = -EINVAL;
886 goto out;
887 }
888 ret = get_user(ch, ubuf++);
889 if (ret)
890 goto out;
891 read++;
892 cnt--;
893 }
894
895 /* We either got finished input or we have to wait for another call. */
896 if (isspace(ch)) {
897 parser->buffer[parser->idx] = 0;
898 parser->cont = false;
057db848 899 } else if (parser->idx < parser->size - 1) {
b63f39ea 900 parser->cont = true;
901 parser->buffer[parser->idx++] = ch;
057db848
SR
902 } else {
903 ret = -EINVAL;
904 goto out;
b63f39ea 905 }
906
907 *ppos += read;
908 ret = read;
909
910out:
911 return ret;
912}
913
6c6c2796
PP
914ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
915{
916 int len;
917 int ret;
918
2dc5d12b
SR
919 if (!cnt)
920 return 0;
921
6c6c2796
PP
922 if (s->len <= s->readpos)
923 return -EBUSY;
924
925 len = s->len - s->readpos;
926 if (cnt > len)
927 cnt = len;
928 ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
2dc5d12b 929 if (ret == cnt)
6c6c2796
PP
930 return -EFAULT;
931
2dc5d12b
SR
932 cnt -= ret;
933
e74da523 934 s->readpos += cnt;
6c6c2796 935 return cnt;
214023c3
SR
936}
937
b8b94265 938static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
3c56819b
EGM
939{
940 int len;
3c56819b
EGM
941
942 if (s->len <= s->readpos)
943 return -EBUSY;
944
945 len = s->len - s->readpos;
946 if (cnt > len)
947 cnt = len;
5a26c8f0 948 memcpy(buf, s->buffer + s->readpos, cnt);
3c56819b 949
e74da523 950 s->readpos += cnt;
3c56819b
EGM
951 return cnt;
952}
953
5d4a9dba
SR
954/*
955 * ftrace_max_lock is used to protect the swapping of buffers
956 * when taking a max snapshot. The buffers themselves are
957 * protected by per_cpu spinlocks. But the action of the swap
958 * needs its own lock.
959 *
445c8951 960 * This is defined as a arch_spinlock_t in order to help
5d4a9dba
SR
961 * with performance when lockdep debugging is enabled.
962 *
963 * It is also used in other places outside the update_max_tr
964 * so it needs to be defined outside of the
965 * CONFIG_TRACER_MAX_TRACE.
966 */
445c8951 967static arch_spinlock_t ftrace_max_lock =
edc35bd7 968 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
5d4a9dba 969
0e950173
TB
970unsigned long __read_mostly tracing_thresh;
971
5d4a9dba
SR
972#ifdef CONFIG_TRACER_MAX_TRACE
973unsigned long __read_mostly tracing_max_latency;
5d4a9dba
SR
974
975/*
976 * Copy the new maximum trace into the separate maximum-trace
977 * structure. (this way the maximum trace is permanently saved,
978 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
979 */
980static void
981__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
982{
12883efb
SRRH
983 struct trace_buffer *trace_buf = &tr->trace_buffer;
984 struct trace_buffer *max_buf = &tr->max_buffer;
985 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
986 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
5d4a9dba 987
12883efb
SRRH
988 max_buf->cpu = cpu;
989 max_buf->time_start = data->preempt_timestamp;
5d4a9dba 990
8248ac05
SR
991 max_data->saved_latency = tracing_max_latency;
992 max_data->critical_start = data->critical_start;
993 max_data->critical_end = data->critical_end;
5d4a9dba 994
1acaa1b2 995 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
8248ac05 996 max_data->pid = tsk->pid;
f17a5194
SRRH
997 /*
998 * If tsk == current, then use current_uid(), as that does not use
999 * RCU. The irq tracer can be called out of RCU scope.
1000 */
1001 if (tsk == current)
1002 max_data->uid = current_uid();
1003 else
1004 max_data->uid = task_uid(tsk);
1005
8248ac05
SR
1006 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1007 max_data->policy = tsk->policy;
1008 max_data->rt_priority = tsk->rt_priority;
5d4a9dba
SR
1009
1010 /* record this tasks comm */
1011 tracing_record_cmdline(tsk);
1012}
1013
4fcdae83
SR
1014/**
1015 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1016 * @tr: tracer
1017 * @tsk: the task with the latency
1018 * @cpu: The cpu that initiated the trace.
1019 *
1020 * Flip the buffers between the @tr and the max_tr and record information
1021 * about which task was the cause of this latency.
1022 */
e309b41d 1023void
bc0c38d1
SR
1024update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1025{
2721e72d 1026 struct ring_buffer *buf;
bc0c38d1 1027
2b6080f2 1028 if (tr->stop_count)
b8de7bd1
SR
1029 return;
1030
4c11d7ae 1031 WARN_ON_ONCE(!irqs_disabled());
34600f0e 1032
45ad21ca 1033 if (!tr->allocated_snapshot) {
debdd57f 1034 /* Only the nop tracer should hit this when disabling */
2b6080f2 1035 WARN_ON_ONCE(tr->current_trace != &nop_trace);
34600f0e 1036 return;
debdd57f 1037 }
34600f0e 1038
0199c4e6 1039 arch_spin_lock(&ftrace_max_lock);
3928a8a2 1040
12883efb
SRRH
1041 buf = tr->trace_buffer.buffer;
1042 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1043 tr->max_buffer.buffer = buf;
3928a8a2 1044
bc0c38d1 1045 __update_max_tr(tr, tsk, cpu);
0199c4e6 1046 arch_spin_unlock(&ftrace_max_lock);
bc0c38d1
SR
1047}
1048
1049/**
1050 * update_max_tr_single - only copy one trace over, and reset the rest
1051 * @tr - tracer
1052 * @tsk - task with the latency
1053 * @cpu - the cpu of the buffer to copy.
4fcdae83
SR
1054 *
1055 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
bc0c38d1 1056 */
e309b41d 1057void
bc0c38d1
SR
1058update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1059{
3928a8a2 1060 int ret;
bc0c38d1 1061
2b6080f2 1062 if (tr->stop_count)
b8de7bd1
SR
1063 return;
1064
4c11d7ae 1065 WARN_ON_ONCE(!irqs_disabled());
6c24499f 1066 if (!tr->allocated_snapshot) {
2930e04d 1067 /* Only the nop tracer should hit this when disabling */
9e8529af 1068 WARN_ON_ONCE(tr->current_trace != &nop_trace);
ef710e10 1069 return;
2930e04d 1070 }
ef710e10 1071
0199c4e6 1072 arch_spin_lock(&ftrace_max_lock);
bc0c38d1 1073
12883efb 1074 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
3928a8a2 1075
e8165dbb
SR
1076 if (ret == -EBUSY) {
1077 /*
1078 * We failed to swap the buffer due to a commit taking
1079 * place on this CPU. We fail to record, but we reset
1080 * the max trace buffer (no one writes directly to it)
1081 * and flag that it failed.
1082 */
12883efb 1083 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
e8165dbb
SR
1084 "Failed to swap buffers due to commit in progress\n");
1085 }
1086
e8165dbb 1087 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
bc0c38d1
SR
1088
1089 __update_max_tr(tr, tsk, cpu);
0199c4e6 1090 arch_spin_unlock(&ftrace_max_lock);
bc0c38d1 1091}
5d4a9dba 1092#endif /* CONFIG_TRACER_MAX_TRACE */
bc0c38d1 1093
0d5c6e1c
SR
1094static void default_wait_pipe(struct trace_iterator *iter)
1095{
15693458
SRRH
1096 /* Iterators are static, they should be filled or empty */
1097 if (trace_buffer_iter(iter, iter->cpu_file))
1098 return;
0d5c6e1c 1099
12883efb 1100 ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file);
0d5c6e1c
SR
1101}
1102
f4e781c0
SRRH
1103#ifdef CONFIG_FTRACE_STARTUP_TEST
1104static int run_tracer_selftest(struct tracer *type)
1105{
1106 struct trace_array *tr = &global_trace;
1107 struct tracer *saved_tracer = tr->current_trace;
1108 int ret;
0d5c6e1c 1109
f4e781c0
SRRH
1110 if (!type->selftest || tracing_selftest_disabled)
1111 return 0;
0d5c6e1c
SR
1112
1113 /*
f4e781c0
SRRH
1114 * Run a selftest on this tracer.
1115 * Here we reset the trace buffer, and set the current
1116 * tracer to be this tracer. The tracer can then run some
1117 * internal tracing to verify that everything is in order.
1118 * If we fail, we do not register this tracer.
0d5c6e1c 1119 */
f4e781c0 1120 tracing_reset_online_cpus(&tr->trace_buffer);
0d5c6e1c 1121
f4e781c0
SRRH
1122 tr->current_trace = type;
1123
1124#ifdef CONFIG_TRACER_MAX_TRACE
1125 if (type->use_max_tr) {
1126 /* If we expanded the buffers, make sure the max is expanded too */
1127 if (ring_buffer_expanded)
1128 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1129 RING_BUFFER_ALL_CPUS);
1130 tr->allocated_snapshot = true;
1131 }
1132#endif
1133
1134 /* the test is responsible for initializing and enabling */
1135 pr_info("Testing tracer %s: ", type->name);
1136 ret = type->selftest(type, tr);
1137 /* the test is responsible for resetting too */
1138 tr->current_trace = saved_tracer;
1139 if (ret) {
1140 printk(KERN_CONT "FAILED!\n");
1141 /* Add the warning after printing 'FAILED' */
1142 WARN_ON(1);
1143 return -1;
1144 }
1145 /* Only reset on passing, to avoid touching corrupted buffers */
1146 tracing_reset_online_cpus(&tr->trace_buffer);
1147
1148#ifdef CONFIG_TRACER_MAX_TRACE
1149 if (type->use_max_tr) {
1150 tr->allocated_snapshot = false;
0d5c6e1c 1151
f4e781c0
SRRH
1152 /* Shrink the max buffer again */
1153 if (ring_buffer_expanded)
1154 ring_buffer_resize(tr->max_buffer.buffer, 1,
1155 RING_BUFFER_ALL_CPUS);
1156 }
1157#endif
1158
1159 printk(KERN_CONT "PASSED\n");
1160 return 0;
1161}
1162#else
1163static inline int run_tracer_selftest(struct tracer *type)
1164{
1165 return 0;
0d5c6e1c 1166}
f4e781c0 1167#endif /* CONFIG_FTRACE_STARTUP_TEST */
0d5c6e1c 1168
4fcdae83
SR
1169/**
1170 * register_tracer - register a tracer with the ftrace system.
1171 * @type - the plugin for the tracer
1172 *
1173 * Register a new plugin tracer.
1174 */
bc0c38d1
SR
1175int register_tracer(struct tracer *type)
1176{
1177 struct tracer *t;
bc0c38d1
SR
1178 int ret = 0;
1179
1180 if (!type->name) {
1181 pr_info("Tracer must have a name\n");
1182 return -1;
1183 }
1184
24a461d5 1185 if (strlen(type->name) >= MAX_TRACER_SIZE) {
ee6c2c1b
LZ
1186 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1187 return -1;
1188 }
1189
bc0c38d1 1190 mutex_lock(&trace_types_lock);
86fa2f60 1191
8e1b82e0
FW
1192 tracing_selftest_running = true;
1193
bc0c38d1
SR
1194 for (t = trace_types; t; t = t->next) {
1195 if (strcmp(type->name, t->name) == 0) {
1196 /* already found */
ee6c2c1b 1197 pr_info("Tracer %s already registered\n",
bc0c38d1
SR
1198 type->name);
1199 ret = -1;
1200 goto out;
1201 }
1202 }
1203
adf9f195
FW
1204 if (!type->set_flag)
1205 type->set_flag = &dummy_set_flag;
1206 if (!type->flags)
1207 type->flags = &dummy_tracer_flags;
1208 else
1209 if (!type->flags->opts)
1210 type->flags->opts = dummy_tracer_opt;
6eaaa5d5
FW
1211 if (!type->wait_pipe)
1212 type->wait_pipe = default_wait_pipe;
1213
f4e781c0
SRRH
1214 ret = run_tracer_selftest(type);
1215 if (ret < 0)
1216 goto out;
60a11774 1217
bc0c38d1
SR
1218 type->next = trace_types;
1219 trace_types = type;
60a11774 1220
bc0c38d1 1221 out:
8e1b82e0 1222 tracing_selftest_running = false;
bc0c38d1
SR
1223 mutex_unlock(&trace_types_lock);
1224
dac74940
SR
1225 if (ret || !default_bootup_tracer)
1226 goto out_unlock;
1227
ee6c2c1b 1228 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
dac74940
SR
1229 goto out_unlock;
1230
1231 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1232 /* Do we want this tracer to start on bootup? */
1233 tracing_set_tracer(type->name);
1234 default_bootup_tracer = NULL;
1235 /* disable other selftests, since this will break it. */
55034cd6 1236 tracing_selftest_disabled = true;
b2821ae6 1237#ifdef CONFIG_FTRACE_STARTUP_TEST
dac74940
SR
1238 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1239 type->name);
b2821ae6 1240#endif
b2821ae6 1241
dac74940 1242 out_unlock:
bc0c38d1
SR
1243 return ret;
1244}
1245
12883efb 1246void tracing_reset(struct trace_buffer *buf, int cpu)
f633903a 1247{
12883efb 1248 struct ring_buffer *buffer = buf->buffer;
f633903a 1249
a5416411
HT
1250 if (!buffer)
1251 return;
1252
f633903a
SR
1253 ring_buffer_record_disable(buffer);
1254
1255 /* Make sure all commits have finished */
1256 synchronize_sched();
68179686 1257 ring_buffer_reset_cpu(buffer, cpu);
f633903a
SR
1258
1259 ring_buffer_record_enable(buffer);
1260}
1261
12883efb 1262void tracing_reset_online_cpus(struct trace_buffer *buf)
213cc060 1263{
12883efb 1264 struct ring_buffer *buffer = buf->buffer;
213cc060
PE
1265 int cpu;
1266
a5416411
HT
1267 if (!buffer)
1268 return;
1269
621968cd
SR
1270 ring_buffer_record_disable(buffer);
1271
1272 /* Make sure all commits have finished */
1273 synchronize_sched();
1274
9457158b 1275 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
213cc060
PE
1276
1277 for_each_online_cpu(cpu)
68179686 1278 ring_buffer_reset_cpu(buffer, cpu);
621968cd
SR
1279
1280 ring_buffer_record_enable(buffer);
213cc060
PE
1281}
1282
09d8091c 1283/* Must have trace_types_lock held */
873c642f 1284void tracing_reset_all_online_cpus(void)
9456f0fa 1285{
873c642f
SRRH
1286 struct trace_array *tr;
1287
873c642f 1288 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
12883efb
SRRH
1289 tracing_reset_online_cpus(&tr->trace_buffer);
1290#ifdef CONFIG_TRACER_MAX_TRACE
1291 tracing_reset_online_cpus(&tr->max_buffer);
1292#endif
873c642f 1293 }
9456f0fa
SR
1294}
1295
bc0c38d1 1296#define SAVED_CMDLINES 128
2c7eea4c 1297#define NO_CMDLINE_MAP UINT_MAX
bc0c38d1
SR
1298static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1299static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
1300static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
1301static int cmdline_idx;
edc35bd7 1302static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
25b0b44a 1303
25b0b44a 1304/* temporary disable recording */
4fd27358 1305static atomic_t trace_record_cmdline_disabled __read_mostly;
bc0c38d1
SR
1306
1307static void trace_init_cmdlines(void)
1308{
2c7eea4c
TG
1309 memset(&map_pid_to_cmdline, NO_CMDLINE_MAP, sizeof(map_pid_to_cmdline));
1310 memset(&map_cmdline_to_pid, NO_CMDLINE_MAP, sizeof(map_cmdline_to_pid));
bc0c38d1
SR
1311 cmdline_idx = 0;
1312}
1313
b5130b1e
CE
1314int is_tracing_stopped(void)
1315{
2b6080f2 1316 return global_trace.stop_count;
b5130b1e
CE
1317}
1318
0f048701
SR
1319/**
1320 * tracing_start - quick start of the tracer
1321 *
1322 * If tracing is enabled but was stopped by tracing_stop,
1323 * this will start the tracer back up.
1324 */
1325void tracing_start(void)
1326{
1327 struct ring_buffer *buffer;
1328 unsigned long flags;
1329
1330 if (tracing_disabled)
1331 return;
1332
2b6080f2
SR
1333 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1334 if (--global_trace.stop_count) {
1335 if (global_trace.stop_count < 0) {
b06a8301
SR
1336 /* Someone screwed up their debugging */
1337 WARN_ON_ONCE(1);
2b6080f2 1338 global_trace.stop_count = 0;
b06a8301 1339 }
0f048701
SR
1340 goto out;
1341 }
1342
a2f80714
SR
1343 /* Prevent the buffers from switching */
1344 arch_spin_lock(&ftrace_max_lock);
0f048701 1345
12883efb 1346 buffer = global_trace.trace_buffer.buffer;
0f048701
SR
1347 if (buffer)
1348 ring_buffer_record_enable(buffer);
1349
12883efb
SRRH
1350#ifdef CONFIG_TRACER_MAX_TRACE
1351 buffer = global_trace.max_buffer.buffer;
0f048701
SR
1352 if (buffer)
1353 ring_buffer_record_enable(buffer);
12883efb 1354#endif
0f048701 1355
a2f80714
SR
1356 arch_spin_unlock(&ftrace_max_lock);
1357
0f048701
SR
1358 ftrace_start();
1359 out:
2b6080f2
SR
1360 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1361}
1362
1363static void tracing_start_tr(struct trace_array *tr)
1364{
1365 struct ring_buffer *buffer;
1366 unsigned long flags;
1367
1368 if (tracing_disabled)
1369 return;
1370
1371 /* If global, we need to also start the max tracer */
1372 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1373 return tracing_start();
1374
1375 raw_spin_lock_irqsave(&tr->start_lock, flags);
1376
1377 if (--tr->stop_count) {
1378 if (tr->stop_count < 0) {
1379 /* Someone screwed up their debugging */
1380 WARN_ON_ONCE(1);
1381 tr->stop_count = 0;
1382 }
1383 goto out;
1384 }
1385
12883efb 1386 buffer = tr->trace_buffer.buffer;
2b6080f2
SR
1387 if (buffer)
1388 ring_buffer_record_enable(buffer);
1389
1390 out:
1391 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
0f048701
SR
1392}
1393
1394/**
1395 * tracing_stop - quick stop of the tracer
1396 *
1397 * Light weight way to stop tracing. Use in conjunction with
1398 * tracing_start.
1399 */
1400void tracing_stop(void)
1401{
1402 struct ring_buffer *buffer;
1403 unsigned long flags;
1404
1405 ftrace_stop();
2b6080f2
SR
1406 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1407 if (global_trace.stop_count++)
0f048701
SR
1408 goto out;
1409
a2f80714
SR
1410 /* Prevent the buffers from switching */
1411 arch_spin_lock(&ftrace_max_lock);
1412
12883efb 1413 buffer = global_trace.trace_buffer.buffer;
0f048701
SR
1414 if (buffer)
1415 ring_buffer_record_disable(buffer);
1416
12883efb
SRRH
1417#ifdef CONFIG_TRACER_MAX_TRACE
1418 buffer = global_trace.max_buffer.buffer;
0f048701
SR
1419 if (buffer)
1420 ring_buffer_record_disable(buffer);
12883efb 1421#endif
0f048701 1422
a2f80714
SR
1423 arch_spin_unlock(&ftrace_max_lock);
1424
0f048701 1425 out:
2b6080f2
SR
1426 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1427}
1428
1429static void tracing_stop_tr(struct trace_array *tr)
1430{
1431 struct ring_buffer *buffer;
1432 unsigned long flags;
1433
1434 /* If global, we need to also stop the max tracer */
1435 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1436 return tracing_stop();
1437
1438 raw_spin_lock_irqsave(&tr->start_lock, flags);
1439 if (tr->stop_count++)
1440 goto out;
1441
12883efb 1442 buffer = tr->trace_buffer.buffer;
2b6080f2
SR
1443 if (buffer)
1444 ring_buffer_record_disable(buffer);
1445
1446 out:
1447 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
0f048701
SR
1448}
1449
e309b41d 1450void trace_stop_cmdline_recording(void);
bc0c38d1 1451
e309b41d 1452static void trace_save_cmdline(struct task_struct *tsk)
bc0c38d1 1453{
a635cf04 1454 unsigned pid, idx;
bc0c38d1
SR
1455
1456 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
1457 return;
1458
1459 /*
1460 * It's not the end of the world if we don't get
1461 * the lock, but we also don't want to spin
1462 * nor do we want to disable interrupts,
1463 * so if we miss here, then better luck next time.
1464 */
0199c4e6 1465 if (!arch_spin_trylock(&trace_cmdline_lock))
bc0c38d1
SR
1466 return;
1467
1468 idx = map_pid_to_cmdline[tsk->pid];
2c7eea4c 1469 if (idx == NO_CMDLINE_MAP) {
bc0c38d1
SR
1470 idx = (cmdline_idx + 1) % SAVED_CMDLINES;
1471
a635cf04
CE
1472 /*
1473 * Check whether the cmdline buffer at idx has a pid
1474 * mapped. We are going to overwrite that entry so we
1475 * need to clear the map_pid_to_cmdline. Otherwise we
1476 * would read the new comm for the old pid.
1477 */
1478 pid = map_cmdline_to_pid[idx];
1479 if (pid != NO_CMDLINE_MAP)
1480 map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
bc0c38d1 1481
a635cf04 1482 map_cmdline_to_pid[idx] = tsk->pid;
bc0c38d1
SR
1483 map_pid_to_cmdline[tsk->pid] = idx;
1484
1485 cmdline_idx = idx;
1486 }
1487
1488 memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
1489
0199c4e6 1490 arch_spin_unlock(&trace_cmdline_lock);
bc0c38d1
SR
1491}
1492
4ca53085 1493void trace_find_cmdline(int pid, char comm[])
bc0c38d1 1494{
bc0c38d1
SR
1495 unsigned map;
1496
4ca53085
SR
1497 if (!pid) {
1498 strcpy(comm, "<idle>");
1499 return;
1500 }
bc0c38d1 1501
74bf4076
SR
1502 if (WARN_ON_ONCE(pid < 0)) {
1503 strcpy(comm, "<XXX>");
1504 return;
1505 }
1506
4ca53085
SR
1507 if (pid > PID_MAX_DEFAULT) {
1508 strcpy(comm, "<...>");
1509 return;
1510 }
bc0c38d1 1511
5b6045a9 1512 preempt_disable();
0199c4e6 1513 arch_spin_lock(&trace_cmdline_lock);
bc0c38d1 1514 map = map_pid_to_cmdline[pid];
50d88758
TG
1515 if (map != NO_CMDLINE_MAP)
1516 strcpy(comm, saved_cmdlines[map]);
1517 else
1518 strcpy(comm, "<...>");
bc0c38d1 1519
0199c4e6 1520 arch_spin_unlock(&trace_cmdline_lock);
5b6045a9 1521 preempt_enable();
bc0c38d1
SR
1522}
1523
e309b41d 1524void tracing_record_cmdline(struct task_struct *tsk)
bc0c38d1 1525{
0fb9656d 1526 if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
bc0c38d1
SR
1527 return;
1528
7ffbd48d
SR
1529 if (!__this_cpu_read(trace_cmdline_save))
1530 return;
1531
1532 __this_cpu_write(trace_cmdline_save, false);
1533
bc0c38d1
SR
1534 trace_save_cmdline(tsk);
1535}
1536
45dcd8b8 1537void
38697053
SR
1538tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1539 int pc)
bc0c38d1
SR
1540{
1541 struct task_struct *tsk = current;
bc0c38d1 1542
777e208d
SR
1543 entry->preempt_count = pc & 0xff;
1544 entry->pid = (tsk) ? tsk->pid : 0;
1545 entry->flags =
9244489a 1546#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
2e2ca155 1547 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
9244489a
SR
1548#else
1549 TRACE_FLAG_IRQS_NOSUPPORT |
1550#endif
bc0c38d1
SR
1551 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1552 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
e5137b50
PZ
1553 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
1554 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
bc0c38d1 1555}
f413cdb8 1556EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
bc0c38d1 1557
e77405ad
SR
1558struct ring_buffer_event *
1559trace_buffer_lock_reserve(struct ring_buffer *buffer,
1560 int type,
1561 unsigned long len,
1562 unsigned long flags, int pc)
51a763dd
ACM
1563{
1564 struct ring_buffer_event *event;
1565
e77405ad 1566 event = ring_buffer_lock_reserve(buffer, len);
51a763dd
ACM
1567 if (event != NULL) {
1568 struct trace_entry *ent = ring_buffer_event_data(event);
1569
1570 tracing_generic_entry_update(ent, flags, pc);
1571 ent->type = type;
1572 }
1573
1574 return event;
1575}
51a763dd 1576
7ffbd48d
SR
1577void
1578__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
1579{
1580 __this_cpu_write(trace_cmdline_save, true);
1581 ring_buffer_unlock_commit(buffer, event);
1582}
1583
e77405ad
SR
1584static inline void
1585__trace_buffer_unlock_commit(struct ring_buffer *buffer,
1586 struct ring_buffer_event *event,
0d5c6e1c 1587 unsigned long flags, int pc)
51a763dd 1588{
7ffbd48d 1589 __buffer_unlock_commit(buffer, event);
51a763dd 1590
e77405ad
SR
1591 ftrace_trace_stack(buffer, flags, 6, pc);
1592 ftrace_trace_userstack(buffer, flags, pc);
07edf712
FW
1593}
1594
e77405ad
SR
1595void trace_buffer_unlock_commit(struct ring_buffer *buffer,
1596 struct ring_buffer_event *event,
1597 unsigned long flags, int pc)
07edf712 1598{
0d5c6e1c 1599 __trace_buffer_unlock_commit(buffer, event, flags, pc);
51a763dd 1600}
0d5c6e1c 1601EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
51a763dd 1602
ccb469a1
SR
1603struct ring_buffer_event *
1604trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
1605 struct ftrace_event_file *ftrace_file,
1606 int type, unsigned long len,
1607 unsigned long flags, int pc)
1608{
12883efb 1609 *current_rb = ftrace_file->tr->trace_buffer.buffer;
ccb469a1
SR
1610 return trace_buffer_lock_reserve(*current_rb,
1611 type, len, flags, pc);
1612}
1613EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
1614
ef5580d0 1615struct ring_buffer_event *
e77405ad
SR
1616trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1617 int type, unsigned long len,
ef5580d0
SR
1618 unsigned long flags, int pc)
1619{
12883efb 1620 *current_rb = global_trace.trace_buffer.buffer;
e77405ad 1621 return trace_buffer_lock_reserve(*current_rb,
ef5580d0
SR
1622 type, len, flags, pc);
1623}
94487d6d 1624EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
ef5580d0 1625
e77405ad
SR
1626void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
1627 struct ring_buffer_event *event,
ef5580d0
SR
1628 unsigned long flags, int pc)
1629{
0d5c6e1c 1630 __trace_buffer_unlock_commit(buffer, event, flags, pc);
07edf712 1631}
94487d6d 1632EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit);
07edf712 1633
0d5c6e1c
SR
1634void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer,
1635 struct ring_buffer_event *event,
1636 unsigned long flags, int pc,
1637 struct pt_regs *regs)
1fd8df2c 1638{
7ffbd48d 1639 __buffer_unlock_commit(buffer, event);
1fd8df2c
MH
1640
1641 ftrace_trace_stack_regs(buffer, flags, 0, pc, regs);
1642 ftrace_trace_userstack(buffer, flags, pc);
1643}
0d5c6e1c 1644EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
1fd8df2c 1645
e77405ad
SR
1646void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1647 struct ring_buffer_event *event)
77d9f465 1648{
e77405ad 1649 ring_buffer_discard_commit(buffer, event);
ef5580d0 1650}
12acd473 1651EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
ef5580d0 1652
e309b41d 1653void
7be42151 1654trace_function(struct trace_array *tr,
38697053
SR
1655 unsigned long ip, unsigned long parent_ip, unsigned long flags,
1656 int pc)
bc0c38d1 1657{
e1112b4d 1658 struct ftrace_event_call *call = &event_function;
12883efb 1659 struct ring_buffer *buffer = tr->trace_buffer.buffer;
3928a8a2 1660 struct ring_buffer_event *event;
777e208d 1661 struct ftrace_entry *entry;
bc0c38d1 1662
d769041f 1663 /* If we are reading the ring buffer, don't trace */
dd17c8f7 1664 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
d769041f
SR
1665 return;
1666
e77405ad 1667 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
51a763dd 1668 flags, pc);
3928a8a2
SR
1669 if (!event)
1670 return;
1671 entry = ring_buffer_event_data(event);
777e208d
SR
1672 entry->ip = ip;
1673 entry->parent_ip = parent_ip;
e1112b4d 1674
f306cc82 1675 if (!call_filter_check_discard(call, entry, buffer, event))
7ffbd48d 1676 __buffer_unlock_commit(buffer, event);
bc0c38d1
SR
1677}
1678
c0a0d0d3 1679#ifdef CONFIG_STACKTRACE
4a9bd3f1
SR
1680
1681#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1682struct ftrace_stack {
1683 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
1684};
1685
1686static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
1687static DEFINE_PER_CPU(int, ftrace_stack_reserve);
1688
e77405ad 1689static void __ftrace_trace_stack(struct ring_buffer *buffer,
53614991 1690 unsigned long flags,
1fd8df2c 1691 int skip, int pc, struct pt_regs *regs)
86387f7e 1692{
e1112b4d 1693 struct ftrace_event_call *call = &event_kernel_stack;
3928a8a2 1694 struct ring_buffer_event *event;
777e208d 1695 struct stack_entry *entry;
86387f7e 1696 struct stack_trace trace;
4a9bd3f1
SR
1697 int use_stack;
1698 int size = FTRACE_STACK_ENTRIES;
1699
1700 trace.nr_entries = 0;
1701 trace.skip = skip;
1702
1703 /*
1704 * Since events can happen in NMIs there's no safe way to
1705 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1706 * or NMI comes in, it will just have to use the default
1707 * FTRACE_STACK_SIZE.
1708 */
1709 preempt_disable_notrace();
1710
82146529 1711 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
4a9bd3f1
SR
1712 /*
1713 * We don't need any atomic variables, just a barrier.
1714 * If an interrupt comes in, we don't care, because it would
1715 * have exited and put the counter back to what we want.
1716 * We just need a barrier to keep gcc from moving things
1717 * around.
1718 */
1719 barrier();
1720 if (use_stack == 1) {
1721 trace.entries = &__get_cpu_var(ftrace_stack).calls[0];
1722 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
1723
1724 if (regs)
1725 save_stack_trace_regs(regs, &trace);
1726 else
1727 save_stack_trace(&trace);
1728
1729 if (trace.nr_entries > size)
1730 size = trace.nr_entries;
1731 } else
1732 /* From now on, use_stack is a boolean */
1733 use_stack = 0;
1734
1735 size *= sizeof(unsigned long);
86387f7e 1736
e77405ad 1737 event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
4a9bd3f1 1738 sizeof(*entry) + size, flags, pc);
3928a8a2 1739 if (!event)
4a9bd3f1
SR
1740 goto out;
1741 entry = ring_buffer_event_data(event);
86387f7e 1742
4a9bd3f1
SR
1743 memset(&entry->caller, 0, size);
1744
1745 if (use_stack)
1746 memcpy(&entry->caller, trace.entries,
1747 trace.nr_entries * sizeof(unsigned long));
1748 else {
1749 trace.max_entries = FTRACE_STACK_ENTRIES;
1750 trace.entries = entry->caller;
1751 if (regs)
1752 save_stack_trace_regs(regs, &trace);
1753 else
1754 save_stack_trace(&trace);
1755 }
1756
1757 entry->size = trace.nr_entries;
86387f7e 1758
f306cc82 1759 if (!call_filter_check_discard(call, entry, buffer, event))
7ffbd48d 1760 __buffer_unlock_commit(buffer, event);
4a9bd3f1
SR
1761
1762 out:
1763 /* Again, don't let gcc optimize things here */
1764 barrier();
82146529 1765 __this_cpu_dec(ftrace_stack_reserve);
4a9bd3f1
SR
1766 preempt_enable_notrace();
1767
f0a920d5
IM
1768}
1769
1fd8df2c
MH
1770void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
1771 int skip, int pc, struct pt_regs *regs)
1772{
1773 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1774 return;
1775
1776 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
1777}
1778
e77405ad
SR
1779void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
1780 int skip, int pc)
53614991
SR
1781{
1782 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1783 return;
1784
1fd8df2c 1785 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
53614991
SR
1786}
1787
c0a0d0d3
FW
1788void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1789 int pc)
38697053 1790{
12883efb 1791 __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
38697053
SR
1792}
1793
03889384
SR
1794/**
1795 * trace_dump_stack - record a stack back trace in the trace buffer
c142be8e 1796 * @skip: Number of functions to skip (helper handlers)
03889384 1797 */
c142be8e 1798void trace_dump_stack(int skip)
03889384
SR
1799{
1800 unsigned long flags;
1801
1802 if (tracing_disabled || tracing_selftest_running)
e36c5458 1803 return;
03889384
SR
1804
1805 local_save_flags(flags);
1806
c142be8e
SRRH
1807 /*
1808 * Skip 3 more, seems to get us at the caller of
1809 * this function.
1810 */
1811 skip += 3;
1812 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
1813 flags, skip, preempt_count(), NULL);
03889384
SR
1814}
1815
91e86e56
SR
1816static DEFINE_PER_CPU(int, user_stack_count);
1817
e77405ad
SR
1818void
1819ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
02b67518 1820{
e1112b4d 1821 struct ftrace_event_call *call = &event_user_stack;
8d7c6a96 1822 struct ring_buffer_event *event;
02b67518
TE
1823 struct userstack_entry *entry;
1824 struct stack_trace trace;
02b67518
TE
1825
1826 if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
1827 return;
1828
b6345879
SR
1829 /*
1830 * NMIs can not handle page faults, even with fix ups.
1831 * The save user stack can (and often does) fault.
1832 */
1833 if (unlikely(in_nmi()))
1834 return;
02b67518 1835
91e86e56
SR
1836 /*
1837 * prevent recursion, since the user stack tracing may
1838 * trigger other kernel events.
1839 */
1840 preempt_disable();
1841 if (__this_cpu_read(user_stack_count))
1842 goto out;
1843
1844 __this_cpu_inc(user_stack_count);
1845
e77405ad 1846 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
51a763dd 1847 sizeof(*entry), flags, pc);
02b67518 1848 if (!event)
1dbd1951 1849 goto out_drop_count;
02b67518 1850 entry = ring_buffer_event_data(event);
02b67518 1851
48659d31 1852 entry->tgid = current->tgid;
02b67518
TE
1853 memset(&entry->caller, 0, sizeof(entry->caller));
1854
1855 trace.nr_entries = 0;
1856 trace.max_entries = FTRACE_STACK_ENTRIES;
1857 trace.skip = 0;
1858 trace.entries = entry->caller;
1859
1860 save_stack_trace_user(&trace);
f306cc82 1861 if (!call_filter_check_discard(call, entry, buffer, event))
7ffbd48d 1862 __buffer_unlock_commit(buffer, event);
91e86e56 1863
1dbd1951 1864 out_drop_count:
91e86e56 1865 __this_cpu_dec(user_stack_count);
91e86e56
SR
1866 out:
1867 preempt_enable();
02b67518
TE
1868}
1869
4fd27358
HE
1870#ifdef UNUSED
1871static void __trace_userstack(struct trace_array *tr, unsigned long flags)
02b67518 1872{
7be42151 1873 ftrace_trace_userstack(tr, flags, preempt_count());
02b67518 1874}
4fd27358 1875#endif /* UNUSED */
02b67518 1876
c0a0d0d3
FW
1877#endif /* CONFIG_STACKTRACE */
1878
07d777fe
SR
1879/* created for use with alloc_percpu */
1880struct trace_buffer_struct {
1881 char buffer[TRACE_BUF_SIZE];
1882};
1883
1884static struct trace_buffer_struct *trace_percpu_buffer;
1885static struct trace_buffer_struct *trace_percpu_sirq_buffer;
1886static struct trace_buffer_struct *trace_percpu_irq_buffer;
1887static struct trace_buffer_struct *trace_percpu_nmi_buffer;
1888
1889/*
1890 * The buffer used is dependent on the context. There is a per cpu
1891 * buffer for normal context, softirq contex, hard irq context and
1892 * for NMI context. Thise allows for lockless recording.
1893 *
1894 * Note, if the buffers failed to be allocated, then this returns NULL
1895 */
1896static char *get_trace_buf(void)
1897{
1898 struct trace_buffer_struct *percpu_buffer;
07d777fe
SR
1899
1900 /*
1901 * If we have allocated per cpu buffers, then we do not
1902 * need to do any locking.
1903 */
1904 if (in_nmi())
1905 percpu_buffer = trace_percpu_nmi_buffer;
1906 else if (in_irq())
1907 percpu_buffer = trace_percpu_irq_buffer;
1908 else if (in_softirq())
1909 percpu_buffer = trace_percpu_sirq_buffer;
1910 else
1911 percpu_buffer = trace_percpu_buffer;
1912
1913 if (!percpu_buffer)
1914 return NULL;
1915
d8a0349c 1916 return this_cpu_ptr(&percpu_buffer->buffer[0]);
07d777fe
SR
1917}
1918
1919static int alloc_percpu_trace_buffer(void)
1920{
1921 struct trace_buffer_struct *buffers;
1922 struct trace_buffer_struct *sirq_buffers;
1923 struct trace_buffer_struct *irq_buffers;
1924 struct trace_buffer_struct *nmi_buffers;
1925
1926 buffers = alloc_percpu(struct trace_buffer_struct);
1927 if (!buffers)
1928 goto err_warn;
1929
1930 sirq_buffers = alloc_percpu(struct trace_buffer_struct);
1931 if (!sirq_buffers)
1932 goto err_sirq;
1933
1934 irq_buffers = alloc_percpu(struct trace_buffer_struct);
1935 if (!irq_buffers)
1936 goto err_irq;
1937
1938 nmi_buffers = alloc_percpu(struct trace_buffer_struct);
1939 if (!nmi_buffers)
1940 goto err_nmi;
1941
1942 trace_percpu_buffer = buffers;
1943 trace_percpu_sirq_buffer = sirq_buffers;
1944 trace_percpu_irq_buffer = irq_buffers;
1945 trace_percpu_nmi_buffer = nmi_buffers;
1946
1947 return 0;
1948
1949 err_nmi:
1950 free_percpu(irq_buffers);
1951 err_irq:
1952 free_percpu(sirq_buffers);
1953 err_sirq:
1954 free_percpu(buffers);
1955 err_warn:
1956 WARN(1, "Could not allocate percpu trace_printk buffer");
1957 return -ENOMEM;
1958}
1959
81698831
SR
1960static int buffers_allocated;
1961
07d777fe
SR
1962void trace_printk_init_buffers(void)
1963{
07d777fe
SR
1964 if (buffers_allocated)
1965 return;
1966
1967 if (alloc_percpu_trace_buffer())
1968 return;
1969
1970 pr_info("ftrace: Allocated trace_printk buffers\n");
1971
b382ede6
SR
1972 /* Expand the buffers to set size */
1973 tracing_update_buffers();
1974
07d777fe 1975 buffers_allocated = 1;
81698831
SR
1976
1977 /*
1978 * trace_printk_init_buffers() can be called by modules.
1979 * If that happens, then we need to start cmdline recording
1980 * directly here. If the global_trace.buffer is already
1981 * allocated here, then this was called by module code.
1982 */
12883efb 1983 if (global_trace.trace_buffer.buffer)
81698831
SR
1984 tracing_start_cmdline_record();
1985}
1986
1987void trace_printk_start_comm(void)
1988{
1989 /* Start tracing comms if trace printk is set */
1990 if (!buffers_allocated)
1991 return;
1992 tracing_start_cmdline_record();
1993}
1994
1995static void trace_printk_start_stop_comm(int enabled)
1996{
1997 if (!buffers_allocated)
1998 return;
1999
2000 if (enabled)
2001 tracing_start_cmdline_record();
2002 else
2003 tracing_stop_cmdline_record();
07d777fe
SR
2004}
2005
769b0441 2006/**
48ead020 2007 * trace_vbprintk - write binary msg to tracing buffer
769b0441
FW
2008 *
2009 */
40ce74f1 2010int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
769b0441 2011{
e1112b4d 2012 struct ftrace_event_call *call = &event_bprint;
769b0441 2013 struct ring_buffer_event *event;
e77405ad 2014 struct ring_buffer *buffer;
769b0441 2015 struct trace_array *tr = &global_trace;
48ead020 2016 struct bprint_entry *entry;
769b0441 2017 unsigned long flags;
07d777fe
SR
2018 char *tbuffer;
2019 int len = 0, size, pc;
769b0441
FW
2020
2021 if (unlikely(tracing_selftest_running || tracing_disabled))
2022 return 0;
2023
2024 /* Don't pollute graph traces with trace_vprintk internals */
2025 pause_graph_tracing();
2026
2027 pc = preempt_count();
5168ae50 2028 preempt_disable_notrace();
769b0441 2029
07d777fe
SR
2030 tbuffer = get_trace_buf();
2031 if (!tbuffer) {
2032 len = 0;
769b0441 2033 goto out;
07d777fe 2034 }
769b0441 2035
07d777fe 2036 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
769b0441 2037
07d777fe
SR
2038 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
2039 goto out;
769b0441 2040
07d777fe 2041 local_save_flags(flags);
769b0441 2042 size = sizeof(*entry) + sizeof(u32) * len;
12883efb 2043 buffer = tr->trace_buffer.buffer;
e77405ad
SR
2044 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2045 flags, pc);
769b0441 2046 if (!event)
07d777fe 2047 goto out;
769b0441
FW
2048 entry = ring_buffer_event_data(event);
2049 entry->ip = ip;
769b0441
FW
2050 entry->fmt = fmt;
2051
07d777fe 2052 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
f306cc82 2053 if (!call_filter_check_discard(call, entry, buffer, event)) {
7ffbd48d 2054 __buffer_unlock_commit(buffer, event);
d931369b
SR
2055 ftrace_trace_stack(buffer, flags, 6, pc);
2056 }
769b0441 2057
769b0441 2058out:
5168ae50 2059 preempt_enable_notrace();
769b0441
FW
2060 unpause_graph_tracing();
2061
2062 return len;
2063}
48ead020
FW
2064EXPORT_SYMBOL_GPL(trace_vbprintk);
2065
12883efb
SRRH
2066static int
2067__trace_array_vprintk(struct ring_buffer *buffer,
2068 unsigned long ip, const char *fmt, va_list args)
48ead020 2069{
e1112b4d 2070 struct ftrace_event_call *call = &event_print;
48ead020 2071 struct ring_buffer_event *event;
07d777fe 2072 int len = 0, size, pc;
48ead020 2073 struct print_entry *entry;
07d777fe
SR
2074 unsigned long flags;
2075 char *tbuffer;
48ead020
FW
2076
2077 if (tracing_disabled || tracing_selftest_running)
2078 return 0;
2079
07d777fe
SR
2080 /* Don't pollute graph traces with trace_vprintk internals */
2081 pause_graph_tracing();
2082
48ead020
FW
2083 pc = preempt_count();
2084 preempt_disable_notrace();
48ead020 2085
07d777fe
SR
2086
2087 tbuffer = get_trace_buf();
2088 if (!tbuffer) {
2089 len = 0;
48ead020 2090 goto out;
07d777fe 2091 }
48ead020 2092
07d777fe
SR
2093 len = vsnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
2094 if (len > TRACE_BUF_SIZE)
2095 goto out;
48ead020 2096
07d777fe 2097 local_save_flags(flags);
48ead020 2098 size = sizeof(*entry) + len + 1;
e77405ad 2099 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
07d777fe 2100 flags, pc);
48ead020 2101 if (!event)
07d777fe 2102 goto out;
48ead020 2103 entry = ring_buffer_event_data(event);
c13d2f7c 2104 entry->ip = ip;
48ead020 2105
07d777fe 2106 memcpy(&entry->buf, tbuffer, len);
c13d2f7c 2107 entry->buf[len] = '\0';
f306cc82 2108 if (!call_filter_check_discard(call, entry, buffer, event)) {
7ffbd48d 2109 __buffer_unlock_commit(buffer, event);
07d777fe 2110 ftrace_trace_stack(buffer, flags, 6, pc);
d931369b 2111 }
48ead020
FW
2112 out:
2113 preempt_enable_notrace();
07d777fe 2114 unpause_graph_tracing();
48ead020
FW
2115
2116 return len;
2117}
659372d3 2118
12883efb
SRRH
2119int trace_array_vprintk(struct trace_array *tr,
2120 unsigned long ip, const char *fmt, va_list args)
2121{
2122 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2123}
2124
2125int trace_array_printk(struct trace_array *tr,
2126 unsigned long ip, const char *fmt, ...)
2127{
2128 int ret;
2129 va_list ap;
2130
2131 if (!(trace_flags & TRACE_ITER_PRINTK))
2132 return 0;
2133
2134 va_start(ap, fmt);
2135 ret = trace_array_vprintk(tr, ip, fmt, ap);
2136 va_end(ap);
2137 return ret;
2138}
2139
2140int trace_array_printk_buf(struct ring_buffer *buffer,
2141 unsigned long ip, const char *fmt, ...)
2142{
2143 int ret;
2144 va_list ap;
2145
2146 if (!(trace_flags & TRACE_ITER_PRINTK))
2147 return 0;
2148
2149 va_start(ap, fmt);
2150 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2151 va_end(ap);
2152 return ret;
2153}
2154
659372d3
SR
2155int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2156{
a813a159 2157 return trace_array_vprintk(&global_trace, ip, fmt, args);
659372d3 2158}
769b0441
FW
2159EXPORT_SYMBOL_GPL(trace_vprintk);
2160
e2ac8ef5 2161static void trace_iterator_increment(struct trace_iterator *iter)
5a90f577 2162{
6d158a81
SR
2163 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2164
5a90f577 2165 iter->idx++;
6d158a81
SR
2166 if (buf_iter)
2167 ring_buffer_read(buf_iter, NULL);
5a90f577
SR
2168}
2169
e309b41d 2170static struct trace_entry *
bc21b478
SR
2171peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2172 unsigned long *lost_events)
dd0e545f 2173{
3928a8a2 2174 struct ring_buffer_event *event;
6d158a81 2175 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
dd0e545f 2176
d769041f
SR
2177 if (buf_iter)
2178 event = ring_buffer_iter_peek(buf_iter, ts);
2179 else
12883efb 2180 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
bc21b478 2181 lost_events);
d769041f 2182
4a9bd3f1
SR
2183 if (event) {
2184 iter->ent_size = ring_buffer_event_length(event);
2185 return ring_buffer_event_data(event);
2186 }
2187 iter->ent_size = 0;
2188 return NULL;
dd0e545f 2189}
d769041f 2190
dd0e545f 2191static struct trace_entry *
bc21b478
SR
2192__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2193 unsigned long *missing_events, u64 *ent_ts)
bc0c38d1 2194{
12883efb 2195 struct ring_buffer *buffer = iter->trace_buffer->buffer;
bc0c38d1 2196 struct trace_entry *ent, *next = NULL;
aa27497c 2197 unsigned long lost_events = 0, next_lost = 0;
b04cc6b1 2198 int cpu_file = iter->cpu_file;
3928a8a2 2199 u64 next_ts = 0, ts;
bc0c38d1 2200 int next_cpu = -1;
12b5da34 2201 int next_size = 0;
bc0c38d1
SR
2202 int cpu;
2203
b04cc6b1
FW
2204 /*
2205 * If we are in a per_cpu trace file, don't bother by iterating over
2206 * all cpu and peek directly.
2207 */
ae3b5093 2208 if (cpu_file > RING_BUFFER_ALL_CPUS) {
b04cc6b1
FW
2209 if (ring_buffer_empty_cpu(buffer, cpu_file))
2210 return NULL;
bc21b478 2211 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
b04cc6b1
FW
2212 if (ent_cpu)
2213 *ent_cpu = cpu_file;
2214
2215 return ent;
2216 }
2217
ab46428c 2218 for_each_tracing_cpu(cpu) {
dd0e545f 2219
3928a8a2
SR
2220 if (ring_buffer_empty_cpu(buffer, cpu))
2221 continue;
dd0e545f 2222
bc21b478 2223 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
dd0e545f 2224
cdd31cd2
IM
2225 /*
2226 * Pick the entry with the smallest timestamp:
2227 */
3928a8a2 2228 if (ent && (!next || ts < next_ts)) {
bc0c38d1
SR
2229 next = ent;
2230 next_cpu = cpu;
3928a8a2 2231 next_ts = ts;
bc21b478 2232 next_lost = lost_events;
12b5da34 2233 next_size = iter->ent_size;
bc0c38d1
SR
2234 }
2235 }
2236
12b5da34
SR
2237 iter->ent_size = next_size;
2238
bc0c38d1
SR
2239 if (ent_cpu)
2240 *ent_cpu = next_cpu;
2241
3928a8a2
SR
2242 if (ent_ts)
2243 *ent_ts = next_ts;
2244
bc21b478
SR
2245 if (missing_events)
2246 *missing_events = next_lost;
2247
bc0c38d1
SR
2248 return next;
2249}
2250
dd0e545f 2251/* Find the next real entry, without updating the iterator itself */
c4a8e8be
FW
2252struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
2253 int *ent_cpu, u64 *ent_ts)
bc0c38d1 2254{
bc21b478 2255 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
dd0e545f
SR
2256}
2257
2258/* Find the next real entry, and increment the iterator to the next entry */
955b61e5 2259void *trace_find_next_entry_inc(struct trace_iterator *iter)
dd0e545f 2260{
bc21b478
SR
2261 iter->ent = __find_next_entry(iter, &iter->cpu,
2262 &iter->lost_events, &iter->ts);
dd0e545f 2263
3928a8a2 2264 if (iter->ent)
e2ac8ef5 2265 trace_iterator_increment(iter);
dd0e545f 2266
3928a8a2 2267 return iter->ent ? iter : NULL;
b3806b43 2268}
bc0c38d1 2269
e309b41d 2270static void trace_consume(struct trace_iterator *iter)
b3806b43 2271{
12883efb 2272 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
bc21b478 2273 &iter->lost_events);
bc0c38d1
SR
2274}
2275
e309b41d 2276static void *s_next(struct seq_file *m, void *v, loff_t *pos)
bc0c38d1
SR
2277{
2278 struct trace_iterator *iter = m->private;
bc0c38d1 2279 int i = (int)*pos;
4e3c3333 2280 void *ent;
bc0c38d1 2281
a63ce5b3
SR
2282 WARN_ON_ONCE(iter->leftover);
2283
bc0c38d1
SR
2284 (*pos)++;
2285
2286 /* can't go backwards */
2287 if (iter->idx > i)
2288 return NULL;
2289
2290 if (iter->idx < 0)
955b61e5 2291 ent = trace_find_next_entry_inc(iter);
bc0c38d1
SR
2292 else
2293 ent = iter;
2294
2295 while (ent && iter->idx < i)
955b61e5 2296 ent = trace_find_next_entry_inc(iter);
bc0c38d1
SR
2297
2298 iter->pos = *pos;
2299
bc0c38d1
SR
2300 return ent;
2301}
2302
955b61e5 2303void tracing_iter_reset(struct trace_iterator *iter, int cpu)
2f26ebd5 2304{
2f26ebd5
SR
2305 struct ring_buffer_event *event;
2306 struct ring_buffer_iter *buf_iter;
2307 unsigned long entries = 0;
2308 u64 ts;
2309
12883efb 2310 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
2f26ebd5 2311
6d158a81
SR
2312 buf_iter = trace_buffer_iter(iter, cpu);
2313 if (!buf_iter)
2f26ebd5
SR
2314 return;
2315
2f26ebd5
SR
2316 ring_buffer_iter_reset(buf_iter);
2317
2318 /*
2319 * We could have the case with the max latency tracers
2320 * that a reset never took place on a cpu. This is evident
2321 * by the timestamp being before the start of the buffer.
2322 */
2323 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
12883efb 2324 if (ts >= iter->trace_buffer->time_start)
2f26ebd5
SR
2325 break;
2326 entries++;
2327 ring_buffer_read(buf_iter, NULL);
2328 }
2329
12883efb 2330 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
2f26ebd5
SR
2331}
2332
d7350c3f 2333/*
d7350c3f
FW
2334 * The current tracer is copied to avoid a global locking
2335 * all around.
2336 */
bc0c38d1
SR
2337static void *s_start(struct seq_file *m, loff_t *pos)
2338{
2339 struct trace_iterator *iter = m->private;
2b6080f2 2340 struct trace_array *tr = iter->tr;
b04cc6b1 2341 int cpu_file = iter->cpu_file;
bc0c38d1
SR
2342 void *p = NULL;
2343 loff_t l = 0;
3928a8a2 2344 int cpu;
bc0c38d1 2345
2fd196ec
HT
2346 /*
2347 * copy the tracer to avoid using a global lock all around.
2348 * iter->trace is a copy of current_trace, the pointer to the
2349 * name may be used instead of a strcmp(), as iter->trace->name
2350 * will point to the same string as current_trace->name.
2351 */
bc0c38d1 2352 mutex_lock(&trace_types_lock);
2b6080f2
SR
2353 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
2354 *iter->trace = *tr->current_trace;
d7350c3f 2355 mutex_unlock(&trace_types_lock);
bc0c38d1 2356
12883efb 2357#ifdef CONFIG_TRACER_MAX_TRACE
debdd57f
HT
2358 if (iter->snapshot && iter->trace->use_max_tr)
2359 return ERR_PTR(-EBUSY);
12883efb 2360#endif
debdd57f
HT
2361
2362 if (!iter->snapshot)
2363 atomic_inc(&trace_record_cmdline_disabled);
bc0c38d1 2364
bc0c38d1
SR
2365 if (*pos != iter->pos) {
2366 iter->ent = NULL;
2367 iter->cpu = 0;
2368 iter->idx = -1;
2369
ae3b5093 2370 if (cpu_file == RING_BUFFER_ALL_CPUS) {
b04cc6b1 2371 for_each_tracing_cpu(cpu)
2f26ebd5 2372 tracing_iter_reset(iter, cpu);
b04cc6b1 2373 } else
2f26ebd5 2374 tracing_iter_reset(iter, cpu_file);
bc0c38d1 2375
ac91d854 2376 iter->leftover = 0;
bc0c38d1
SR
2377 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
2378 ;
2379
2380 } else {
a63ce5b3
SR
2381 /*
2382 * If we overflowed the seq_file before, then we want
2383 * to just reuse the trace_seq buffer again.
2384 */
2385 if (iter->leftover)
2386 p = iter;
2387 else {
2388 l = *pos - 1;
2389 p = s_next(m, p, &l);
2390 }
bc0c38d1
SR
2391 }
2392
4f535968 2393 trace_event_read_lock();
7e53bd42 2394 trace_access_lock(cpu_file);
bc0c38d1
SR
2395 return p;
2396}
2397
2398static void s_stop(struct seq_file *m, void *p)
2399{
7e53bd42
LJ
2400 struct trace_iterator *iter = m->private;
2401
12883efb 2402#ifdef CONFIG_TRACER_MAX_TRACE
debdd57f
HT
2403 if (iter->snapshot && iter->trace->use_max_tr)
2404 return;
12883efb 2405#endif
debdd57f
HT
2406
2407 if (!iter->snapshot)
2408 atomic_dec(&trace_record_cmdline_disabled);
12883efb 2409
7e53bd42 2410 trace_access_unlock(iter->cpu_file);
4f535968 2411 trace_event_read_unlock();
bc0c38d1
SR
2412}
2413
39eaf7ef 2414static void
12883efb
SRRH
2415get_total_entries(struct trace_buffer *buf,
2416 unsigned long *total, unsigned long *entries)
39eaf7ef
SR
2417{
2418 unsigned long count;
2419 int cpu;
2420
2421 *total = 0;
2422 *entries = 0;
2423
2424 for_each_tracing_cpu(cpu) {
12883efb 2425 count = ring_buffer_entries_cpu(buf->buffer, cpu);
39eaf7ef
SR
2426 /*
2427 * If this buffer has skipped entries, then we hold all
2428 * entries for the trace and we need to ignore the
2429 * ones before the time stamp.
2430 */
12883efb
SRRH
2431 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
2432 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
39eaf7ef
SR
2433 /* total is the same as the entries */
2434 *total += count;
2435 } else
2436 *total += count +
12883efb 2437 ring_buffer_overrun_cpu(buf->buffer, cpu);
39eaf7ef
SR
2438 *entries += count;
2439 }
2440}
2441
e309b41d 2442static void print_lat_help_header(struct seq_file *m)
bc0c38d1 2443{
a6168353
ME
2444 seq_puts(m, "# _------=> CPU# \n");
2445 seq_puts(m, "# / _-----=> irqs-off \n");
2446 seq_puts(m, "# | / _----=> need-resched \n");
2447 seq_puts(m, "# || / _---=> hardirq/softirq \n");
2448 seq_puts(m, "# ||| / _--=> preempt-depth \n");
e6e1e259
SR
2449 seq_puts(m, "# |||| / delay \n");
2450 seq_puts(m, "# cmd pid ||||| time | caller \n");
2451 seq_puts(m, "# \\ / ||||| \\ | / \n");
bc0c38d1
SR
2452}
2453
12883efb 2454static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
bc0c38d1 2455{
39eaf7ef
SR
2456 unsigned long total;
2457 unsigned long entries;
2458
12883efb 2459 get_total_entries(buf, &total, &entries);
39eaf7ef
SR
2460 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
2461 entries, total, num_online_cpus());
2462 seq_puts(m, "#\n");
2463}
2464
12883efb 2465static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
39eaf7ef 2466{
12883efb 2467 print_event_info(buf, m);
77271ce4 2468 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n");
a6168353 2469 seq_puts(m, "# | | | | |\n");
bc0c38d1
SR
2470}
2471
12883efb 2472static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
77271ce4 2473{
12883efb 2474 print_event_info(buf, m);
77271ce4
SR
2475 seq_puts(m, "# _-----=> irqs-off\n");
2476 seq_puts(m, "# / _----=> need-resched\n");
2477 seq_puts(m, "# | / _---=> hardirq/softirq\n");
2478 seq_puts(m, "# || / _--=> preempt-depth\n");
2479 seq_puts(m, "# ||| / delay\n");
2480 seq_puts(m, "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n");
2481 seq_puts(m, "# | | | |||| | |\n");
2482}
bc0c38d1 2483
62b915f1 2484void
bc0c38d1
SR
2485print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2486{
2487 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
12883efb
SRRH
2488 struct trace_buffer *buf = iter->trace_buffer;
2489 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
2b6080f2 2490 struct tracer *type = iter->trace;
39eaf7ef
SR
2491 unsigned long entries;
2492 unsigned long total;
bc0c38d1
SR
2493 const char *name = "preemption";
2494
d840f718 2495 name = type->name;
bc0c38d1 2496
12883efb 2497 get_total_entries(buf, &total, &entries);
bc0c38d1 2498
888b55dc 2499 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
bc0c38d1 2500 name, UTS_RELEASE);
888b55dc 2501 seq_puts(m, "# -----------------------------------"
bc0c38d1 2502 "---------------------------------\n");
888b55dc 2503 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
bc0c38d1 2504 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
57f50be1 2505 nsecs_to_usecs(data->saved_latency),
bc0c38d1 2506 entries,
4c11d7ae 2507 total,
12883efb 2508 buf->cpu,
bc0c38d1
SR
2509#if defined(CONFIG_PREEMPT_NONE)
2510 "server",
2511#elif defined(CONFIG_PREEMPT_VOLUNTARY)
2512 "desktop",
b5c21b45 2513#elif defined(CONFIG_PREEMPT)
bc0c38d1
SR
2514 "preempt",
2515#else
2516 "unknown",
2517#endif
2518 /* These are reserved for later use */
2519 0, 0, 0, 0);
2520#ifdef CONFIG_SMP
2521 seq_printf(m, " #P:%d)\n", num_online_cpus());
2522#else
2523 seq_puts(m, ")\n");
2524#endif
888b55dc
KM
2525 seq_puts(m, "# -----------------\n");
2526 seq_printf(m, "# | task: %.16s-%d "
bc0c38d1 2527 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
d20b92ab
EB
2528 data->comm, data->pid,
2529 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
bc0c38d1 2530 data->policy, data->rt_priority);
888b55dc 2531 seq_puts(m, "# -----------------\n");
bc0c38d1
SR
2532
2533 if (data->critical_start) {
888b55dc 2534 seq_puts(m, "# => started at: ");
214023c3
SR
2535 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
2536 trace_print_seq(m, &iter->seq);
888b55dc 2537 seq_puts(m, "\n# => ended at: ");
214023c3
SR
2538 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
2539 trace_print_seq(m, &iter->seq);
8248ac05 2540 seq_puts(m, "\n#\n");
bc0c38d1
SR
2541 }
2542
888b55dc 2543 seq_puts(m, "#\n");
bc0c38d1
SR
2544}
2545
a309720c
SR
2546static void test_cpu_buff_start(struct trace_iterator *iter)
2547{
2548 struct trace_seq *s = &iter->seq;
2549
12ef7d44
SR
2550 if (!(trace_flags & TRACE_ITER_ANNOTATE))
2551 return;
2552
2553 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
2554 return;
2555
4462344e 2556 if (cpumask_test_cpu(iter->cpu, iter->started))
a309720c
SR
2557 return;
2558
12883efb 2559 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
2f26ebd5
SR
2560 return;
2561
4462344e 2562 cpumask_set_cpu(iter->cpu, iter->started);
b0dfa978
FW
2563
2564 /* Don't print started cpu buffer for the first entry of the trace */
2565 if (iter->idx > 1)
2566 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
2567 iter->cpu);
a309720c
SR
2568}
2569
2c4f035f 2570static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
bc0c38d1 2571{
214023c3 2572 struct trace_seq *s = &iter->seq;
bc0c38d1 2573 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
4e3c3333 2574 struct trace_entry *entry;
f633cef0 2575 struct trace_event *event;
bc0c38d1 2576
4e3c3333 2577 entry = iter->ent;
dd0e545f 2578
a309720c
SR
2579 test_cpu_buff_start(iter);
2580
c4a8e8be 2581 event = ftrace_find_event(entry->type);
bc0c38d1 2582
c4a8e8be 2583 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
27d48be8
SR
2584 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2585 if (!trace_print_lat_context(iter))
2586 goto partial;
2587 } else {
2588 if (!trace_print_context(iter))
2589 goto partial;
2590 }
c4a8e8be 2591 }
bc0c38d1 2592
268ccda0 2593 if (event)
a9a57763 2594 return event->funcs->trace(iter, sym_flags, event);
d9793bd8
ACM
2595
2596 if (!trace_seq_printf(s, "Unknown type %d\n", entry->type))
2597 goto partial;
02b67518 2598
2c4f035f 2599 return TRACE_TYPE_HANDLED;
d9793bd8
ACM
2600partial:
2601 return TRACE_TYPE_PARTIAL_LINE;
bc0c38d1
SR
2602}
2603
2c4f035f 2604static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
f9896bf3
IM
2605{
2606 struct trace_seq *s = &iter->seq;
2607 struct trace_entry *entry;
f633cef0 2608 struct trace_event *event;
f9896bf3
IM
2609
2610 entry = iter->ent;
dd0e545f 2611
c4a8e8be 2612 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
d9793bd8
ACM
2613 if (!trace_seq_printf(s, "%d %d %llu ",
2614 entry->pid, iter->cpu, iter->ts))
2615 goto partial;
c4a8e8be 2616 }
f9896bf3 2617
f633cef0 2618 event = ftrace_find_event(entry->type);
268ccda0 2619 if (event)
a9a57763 2620 return event->funcs->raw(iter, 0, event);
d9793bd8
ACM
2621
2622 if (!trace_seq_printf(s, "%d ?\n", entry->type))
2623 goto partial;
777e208d 2624
2c4f035f 2625 return TRACE_TYPE_HANDLED;
d9793bd8
ACM
2626partial:
2627 return TRACE_TYPE_PARTIAL_LINE;
f9896bf3
IM
2628}
2629
2c4f035f 2630static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
5e3ca0ec
IM
2631{
2632 struct trace_seq *s = &iter->seq;
2633 unsigned char newline = '\n';
2634 struct trace_entry *entry;
f633cef0 2635 struct trace_event *event;
5e3ca0ec
IM
2636
2637 entry = iter->ent;
dd0e545f 2638
c4a8e8be
FW
2639 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2640 SEQ_PUT_HEX_FIELD_RET(s, entry->pid);
2641 SEQ_PUT_HEX_FIELD_RET(s, iter->cpu);
2642 SEQ_PUT_HEX_FIELD_RET(s, iter->ts);
2643 }
5e3ca0ec 2644
f633cef0 2645 event = ftrace_find_event(entry->type);
268ccda0 2646 if (event) {
a9a57763 2647 enum print_line_t ret = event->funcs->hex(iter, 0, event);
d9793bd8
ACM
2648 if (ret != TRACE_TYPE_HANDLED)
2649 return ret;
2650 }
7104f300 2651
5e3ca0ec
IM
2652 SEQ_PUT_FIELD_RET(s, newline);
2653
2c4f035f 2654 return TRACE_TYPE_HANDLED;
5e3ca0ec
IM
2655}
2656
2c4f035f 2657static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
cb0f12aa
IM
2658{
2659 struct trace_seq *s = &iter->seq;
2660 struct trace_entry *entry;
f633cef0 2661 struct trace_event *event;
cb0f12aa
IM
2662
2663 entry = iter->ent;
dd0e545f 2664
c4a8e8be
FW
2665 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2666 SEQ_PUT_FIELD_RET(s, entry->pid);
1830b52d 2667 SEQ_PUT_FIELD_RET(s, iter->cpu);
c4a8e8be
FW
2668 SEQ_PUT_FIELD_RET(s, iter->ts);
2669 }
cb0f12aa 2670
f633cef0 2671 event = ftrace_find_event(entry->type);
a9a57763
SR
2672 return event ? event->funcs->binary(iter, 0, event) :
2673 TRACE_TYPE_HANDLED;
cb0f12aa
IM
2674}
2675
62b915f1 2676int trace_empty(struct trace_iterator *iter)
bc0c38d1 2677{
6d158a81 2678 struct ring_buffer_iter *buf_iter;
bc0c38d1
SR
2679 int cpu;
2680
9aba60fe 2681 /* If we are looking at one CPU buffer, only check that one */
ae3b5093 2682 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
9aba60fe 2683 cpu = iter->cpu_file;
6d158a81
SR
2684 buf_iter = trace_buffer_iter(iter, cpu);
2685 if (buf_iter) {
2686 if (!ring_buffer_iter_empty(buf_iter))
9aba60fe
SR
2687 return 0;
2688 } else {
12883efb 2689 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
9aba60fe
SR
2690 return 0;
2691 }
2692 return 1;
2693 }
2694
ab46428c 2695 for_each_tracing_cpu(cpu) {
6d158a81
SR
2696 buf_iter = trace_buffer_iter(iter, cpu);
2697 if (buf_iter) {
2698 if (!ring_buffer_iter_empty(buf_iter))
d769041f
SR
2699 return 0;
2700 } else {
12883efb 2701 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
d769041f
SR
2702 return 0;
2703 }
bc0c38d1 2704 }
d769041f 2705
797d3712 2706 return 1;
bc0c38d1
SR
2707}
2708
4f535968 2709/* Called with trace_event_read_lock() held. */
955b61e5 2710enum print_line_t print_trace_line(struct trace_iterator *iter)
f9896bf3 2711{
2c4f035f
FW
2712 enum print_line_t ret;
2713
ee5e51f5
JO
2714 if (iter->lost_events &&
2715 !trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2716 iter->cpu, iter->lost_events))
2717 return TRACE_TYPE_PARTIAL_LINE;
bc21b478 2718
2c4f035f
FW
2719 if (iter->trace && iter->trace->print_line) {
2720 ret = iter->trace->print_line(iter);
2721 if (ret != TRACE_TYPE_UNHANDLED)
2722 return ret;
2723 }
72829bc3 2724
09ae7234
SRRH
2725 if (iter->ent->type == TRACE_BPUTS &&
2726 trace_flags & TRACE_ITER_PRINTK &&
2727 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2728 return trace_print_bputs_msg_only(iter);
2729
48ead020
FW
2730 if (iter->ent->type == TRACE_BPRINT &&
2731 trace_flags & TRACE_ITER_PRINTK &&
2732 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
5ef841f6 2733 return trace_print_bprintk_msg_only(iter);
48ead020 2734
66896a85
FW
2735 if (iter->ent->type == TRACE_PRINT &&
2736 trace_flags & TRACE_ITER_PRINTK &&
2737 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
5ef841f6 2738 return trace_print_printk_msg_only(iter);
66896a85 2739
cb0f12aa
IM
2740 if (trace_flags & TRACE_ITER_BIN)
2741 return print_bin_fmt(iter);
2742
5e3ca0ec
IM
2743 if (trace_flags & TRACE_ITER_HEX)
2744 return print_hex_fmt(iter);
2745
f9896bf3
IM
2746 if (trace_flags & TRACE_ITER_RAW)
2747 return print_raw_fmt(iter);
2748
f9896bf3
IM
2749 return print_trace_fmt(iter);
2750}
2751
7e9a49ef
JO
2752void trace_latency_header(struct seq_file *m)
2753{
2754 struct trace_iterator *iter = m->private;
2755
2756 /* print nothing if the buffers are empty */
2757 if (trace_empty(iter))
2758 return;
2759
2760 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2761 print_trace_header(m, iter);
2762
2763 if (!(trace_flags & TRACE_ITER_VERBOSE))
2764 print_lat_help_header(m);
2765}
2766
62b915f1
JO
2767void trace_default_header(struct seq_file *m)
2768{
2769 struct trace_iterator *iter = m->private;
2770
f56e7f8e
JO
2771 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
2772 return;
2773
62b915f1
JO
2774 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2775 /* print nothing if the buffers are empty */
2776 if (trace_empty(iter))
2777 return;
2778 print_trace_header(m, iter);
2779 if (!(trace_flags & TRACE_ITER_VERBOSE))
2780 print_lat_help_header(m);
2781 } else {
77271ce4
SR
2782 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
2783 if (trace_flags & TRACE_ITER_IRQ_INFO)
12883efb 2784 print_func_help_header_irq(iter->trace_buffer, m);
77271ce4 2785 else
12883efb 2786 print_func_help_header(iter->trace_buffer, m);
77271ce4 2787 }
62b915f1
JO
2788 }
2789}
2790
e0a413f6
SR
2791static void test_ftrace_alive(struct seq_file *m)
2792{
2793 if (!ftrace_is_dead())
2794 return;
2795 seq_printf(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n");
2796 seq_printf(m, "# MAY BE MISSING FUNCTION EVENTS\n");
2797}
2798
d8741e2e 2799#ifdef CONFIG_TRACER_MAX_TRACE
f1affcaa 2800static void show_snapshot_main_help(struct seq_file *m)
d8741e2e 2801{
d8741e2e
SRRH
2802 seq_printf(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n");
2803 seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
2804 seq_printf(m, "# Takes a snapshot of the main buffer.\n");
b9be6d02 2805 seq_printf(m, "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n");
d8741e2e
SRRH
2806 seq_printf(m, "# (Doesn't have to be '2' works with any number that\n");
2807 seq_printf(m, "# is not a '0' or '1')\n");
2808}
f1affcaa
SRRH
2809
2810static void show_snapshot_percpu_help(struct seq_file *m)
2811{
2812 seq_printf(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
2813#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2814 seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
2815 seq_printf(m, "# Takes a snapshot of the main buffer for this cpu.\n");
2816#else
2817 seq_printf(m, "# echo 1 > snapshot : Not supported with this kernel.\n");
2818 seq_printf(m, "# Must use main snapshot file to allocate.\n");
2819#endif
2820 seq_printf(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n");
2821 seq_printf(m, "# (Doesn't have to be '2' works with any number that\n");
2822 seq_printf(m, "# is not a '0' or '1')\n");
2823}
2824
d8741e2e
SRRH
2825static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
2826{
45ad21ca 2827 if (iter->tr->allocated_snapshot)
d8741e2e
SRRH
2828 seq_printf(m, "#\n# * Snapshot is allocated *\n#\n");
2829 else
2830 seq_printf(m, "#\n# * Snapshot is freed *\n#\n");
2831
2832 seq_printf(m, "# Snapshot commands:\n");
f1affcaa
SRRH
2833 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
2834 show_snapshot_main_help(m);
2835 else
2836 show_snapshot_percpu_help(m);
d8741e2e
SRRH
2837}
2838#else
2839/* Should never be called */
2840static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
2841#endif
2842
bc0c38d1
SR
2843static int s_show(struct seq_file *m, void *v)
2844{
2845 struct trace_iterator *iter = v;
a63ce5b3 2846 int ret;
bc0c38d1
SR
2847
2848 if (iter->ent == NULL) {
2849 if (iter->tr) {
2850 seq_printf(m, "# tracer: %s\n", iter->trace->name);
2851 seq_puts(m, "#\n");
e0a413f6 2852 test_ftrace_alive(m);
bc0c38d1 2853 }
d8741e2e
SRRH
2854 if (iter->snapshot && trace_empty(iter))
2855 print_snapshot_help(m, iter);
2856 else if (iter->trace && iter->trace->print_header)
8bba1bf5 2857 iter->trace->print_header(m);
62b915f1
JO
2858 else
2859 trace_default_header(m);
2860
a63ce5b3
SR
2861 } else if (iter->leftover) {
2862 /*
2863 * If we filled the seq_file buffer earlier, we
2864 * want to just show it now.
2865 */
2866 ret = trace_print_seq(m, &iter->seq);
2867
2868 /* ret should this time be zero, but you never know */
2869 iter->leftover = ret;
2870
bc0c38d1 2871 } else {
f9896bf3 2872 print_trace_line(iter);
a63ce5b3
SR
2873 ret = trace_print_seq(m, &iter->seq);
2874 /*
2875 * If we overflow the seq_file buffer, then it will
2876 * ask us for this data again at start up.
2877 * Use that instead.
2878 * ret is 0 if seq_file write succeeded.
2879 * -1 otherwise.
2880 */
2881 iter->leftover = ret;
bc0c38d1
SR
2882 }
2883
2884 return 0;
2885}
2886
649e9c70
ON
2887/*
2888 * Should be used after trace_array_get(), trace_types_lock
2889 * ensures that i_cdev was already initialized.
2890 */
2891static inline int tracing_get_cpu(struct inode *inode)
2892{
2893 if (inode->i_cdev) /* See trace_create_cpu_file() */
2894 return (long)inode->i_cdev - 1;
2895 return RING_BUFFER_ALL_CPUS;
2896}
2897
88e9d34c 2898static const struct seq_operations tracer_seq_ops = {
4bf39a94
IM
2899 .start = s_start,
2900 .next = s_next,
2901 .stop = s_stop,
2902 .show = s_show,
bc0c38d1
SR
2903};
2904
e309b41d 2905static struct trace_iterator *
6484c71c 2906__tracing_open(struct inode *inode, struct file *file, bool snapshot)
bc0c38d1 2907{
6484c71c 2908 struct trace_array *tr = inode->i_private;
bc0c38d1 2909 struct trace_iterator *iter;
50e18b94 2910 int cpu;
bc0c38d1 2911
85a2f9b4
SR
2912 if (tracing_disabled)
2913 return ERR_PTR(-ENODEV);
60a11774 2914
50e18b94 2915 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
85a2f9b4
SR
2916 if (!iter)
2917 return ERR_PTR(-ENOMEM);
bc0c38d1 2918
6d158a81
SR
2919 iter->buffer_iter = kzalloc(sizeof(*iter->buffer_iter) * num_possible_cpus(),
2920 GFP_KERNEL);
93574fcc
DC
2921 if (!iter->buffer_iter)
2922 goto release;
2923
d7350c3f
FW
2924 /*
2925 * We make a copy of the current tracer to avoid concurrent
2926 * changes on it while we are reading.
2927 */
bc0c38d1 2928 mutex_lock(&trace_types_lock);
d7350c3f 2929 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
85a2f9b4 2930 if (!iter->trace)
d7350c3f 2931 goto fail;
85a2f9b4 2932
2b6080f2 2933 *iter->trace = *tr->current_trace;
d7350c3f 2934
79f55997 2935 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
b0dfa978
FW
2936 goto fail;
2937
12883efb
SRRH
2938 iter->tr = tr;
2939
2940#ifdef CONFIG_TRACER_MAX_TRACE
2b6080f2
SR
2941 /* Currently only the top directory has a snapshot */
2942 if (tr->current_trace->print_max || snapshot)
12883efb 2943 iter->trace_buffer = &tr->max_buffer;
bc0c38d1 2944 else
12883efb
SRRH
2945#endif
2946 iter->trace_buffer = &tr->trace_buffer;
debdd57f 2947 iter->snapshot = snapshot;
bc0c38d1 2948 iter->pos = -1;
6484c71c 2949 iter->cpu_file = tracing_get_cpu(inode);
d7350c3f 2950 mutex_init(&iter->mutex);
bc0c38d1 2951
8bba1bf5
MM
2952 /* Notify the tracer early; before we stop tracing. */
2953 if (iter->trace && iter->trace->open)
a93751ca 2954 iter->trace->open(iter);
8bba1bf5 2955
12ef7d44 2956 /* Annotate start of buffers if we had overruns */
12883efb 2957 if (ring_buffer_overruns(iter->trace_buffer->buffer))
12ef7d44
SR
2958 iter->iter_flags |= TRACE_FILE_ANNOTATE;
2959
8be0709f 2960 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
58e8eedf 2961 if (trace_clocks[tr->clock_id].in_ns)
8be0709f
DS
2962 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
2963
debdd57f
HT
2964 /* stop the trace while dumping if we are not opening "snapshot" */
2965 if (!iter->snapshot)
2b6080f2 2966 tracing_stop_tr(tr);
2f26ebd5 2967
ae3b5093 2968 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
b04cc6b1 2969 for_each_tracing_cpu(cpu) {
b04cc6b1 2970 iter->buffer_iter[cpu] =
12883efb 2971 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
72c9ddfd
DM
2972 }
2973 ring_buffer_read_prepare_sync();
2974 for_each_tracing_cpu(cpu) {
2975 ring_buffer_read_start(iter->buffer_iter[cpu]);
2f26ebd5 2976 tracing_iter_reset(iter, cpu);
b04cc6b1
FW
2977 }
2978 } else {
2979 cpu = iter->cpu_file;
3928a8a2 2980 iter->buffer_iter[cpu] =
12883efb 2981 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
72c9ddfd
DM
2982 ring_buffer_read_prepare_sync();
2983 ring_buffer_read_start(iter->buffer_iter[cpu]);
2f26ebd5 2984 tracing_iter_reset(iter, cpu);
3928a8a2
SR
2985 }
2986
bc0c38d1
SR
2987 mutex_unlock(&trace_types_lock);
2988
bc0c38d1 2989 return iter;
3928a8a2 2990
d7350c3f 2991 fail:
3928a8a2 2992 mutex_unlock(&trace_types_lock);
d7350c3f 2993 kfree(iter->trace);
6d158a81 2994 kfree(iter->buffer_iter);
93574fcc 2995release:
50e18b94
JO
2996 seq_release_private(inode, file);
2997 return ERR_PTR(-ENOMEM);
bc0c38d1
SR
2998}
2999
3000int tracing_open_generic(struct inode *inode, struct file *filp)
3001{
60a11774
SR
3002 if (tracing_disabled)
3003 return -ENODEV;
3004
bc0c38d1
SR
3005 filp->private_data = inode->i_private;
3006 return 0;
3007}
3008
2e86421d
GB
3009bool tracing_is_disabled(void)
3010{
3011 return (tracing_disabled) ? true: false;
3012}
3013
7b85af63
SRRH
3014/*
3015 * Open and update trace_array ref count.
3016 * Must have the current trace_array passed to it.
3017 */
dcc30223 3018static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
7b85af63
SRRH
3019{
3020 struct trace_array *tr = inode->i_private;
3021
3022 if (tracing_disabled)
3023 return -ENODEV;
3024
3025 if (trace_array_get(tr) < 0)
3026 return -ENODEV;
3027
3028 filp->private_data = inode->i_private;
3029
3030 return 0;
7b85af63
SRRH
3031}
3032
4fd27358 3033static int tracing_release(struct inode *inode, struct file *file)
bc0c38d1 3034{
6484c71c 3035 struct trace_array *tr = inode->i_private;
907f2784 3036 struct seq_file *m = file->private_data;
4acd4d00 3037 struct trace_iterator *iter;
3928a8a2 3038 int cpu;
bc0c38d1 3039
ff451961 3040 if (!(file->f_mode & FMODE_READ)) {
6484c71c 3041 trace_array_put(tr);
4acd4d00 3042 return 0;
ff451961 3043 }
4acd4d00 3044
6484c71c 3045 /* Writes do not use seq_file */
4acd4d00 3046 iter = m->private;
bc0c38d1 3047 mutex_lock(&trace_types_lock);
a695cb58 3048
3928a8a2
SR
3049 for_each_tracing_cpu(cpu) {
3050 if (iter->buffer_iter[cpu])
3051 ring_buffer_read_finish(iter->buffer_iter[cpu]);
3052 }
3053
bc0c38d1
SR
3054 if (iter->trace && iter->trace->close)
3055 iter->trace->close(iter);
3056
debdd57f
HT
3057 if (!iter->snapshot)
3058 /* reenable tracing if it was previously enabled */
2b6080f2 3059 tracing_start_tr(tr);
f77d09a3
AL
3060
3061 __trace_array_put(tr);
3062
bc0c38d1
SR
3063 mutex_unlock(&trace_types_lock);
3064
d7350c3f 3065 mutex_destroy(&iter->mutex);
b0dfa978 3066 free_cpumask_var(iter->started);
d7350c3f 3067 kfree(iter->trace);
6d158a81 3068 kfree(iter->buffer_iter);
50e18b94 3069 seq_release_private(inode, file);
ff451961 3070
bc0c38d1
SR
3071 return 0;
3072}
3073
7b85af63
SRRH
3074static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3075{
3076 struct trace_array *tr = inode->i_private;
3077
3078 trace_array_put(tr);
bc0c38d1
SR
3079 return 0;
3080}
3081
7b85af63
SRRH
3082static int tracing_single_release_tr(struct inode *inode, struct file *file)
3083{
3084 struct trace_array *tr = inode->i_private;
3085
3086 trace_array_put(tr);
3087
3088 return single_release(inode, file);
3089}
3090
bc0c38d1
SR
3091static int tracing_open(struct inode *inode, struct file *file)
3092{
6484c71c 3093 struct trace_array *tr = inode->i_private;
85a2f9b4
SR
3094 struct trace_iterator *iter;
3095 int ret = 0;
bc0c38d1 3096
ff451961
SRRH
3097 if (trace_array_get(tr) < 0)
3098 return -ENODEV;
3099
4acd4d00 3100 /* If this file was open for write, then erase contents */
6484c71c
ON
3101 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
3102 int cpu = tracing_get_cpu(inode);
3103
3104 if (cpu == RING_BUFFER_ALL_CPUS)
12883efb 3105 tracing_reset_online_cpus(&tr->trace_buffer);
4acd4d00 3106 else
6484c71c 3107 tracing_reset(&tr->trace_buffer, cpu);
4acd4d00 3108 }
bc0c38d1 3109
4acd4d00 3110 if (file->f_mode & FMODE_READ) {
6484c71c 3111 iter = __tracing_open(inode, file, false);
4acd4d00
SR
3112 if (IS_ERR(iter))
3113 ret = PTR_ERR(iter);
3114 else if (trace_flags & TRACE_ITER_LATENCY_FMT)
3115 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3116 }
ff451961
SRRH
3117
3118 if (ret < 0)
3119 trace_array_put(tr);
3120
bc0c38d1
SR
3121 return ret;
3122}
3123
e309b41d 3124static void *
bc0c38d1
SR
3125t_next(struct seq_file *m, void *v, loff_t *pos)
3126{
f129e965 3127 struct tracer *t = v;
bc0c38d1
SR
3128
3129 (*pos)++;
3130
3131 if (t)
3132 t = t->next;
3133
bc0c38d1
SR
3134 return t;
3135}
3136
3137static void *t_start(struct seq_file *m, loff_t *pos)
3138{
f129e965 3139 struct tracer *t;
bc0c38d1
SR
3140 loff_t l = 0;
3141
3142 mutex_lock(&trace_types_lock);
f129e965 3143 for (t = trace_types; t && l < *pos; t = t_next(m, t, &l))
bc0c38d1
SR
3144 ;
3145
3146 return t;
3147}
3148
3149static void t_stop(struct seq_file *m, void *p)
3150{
3151 mutex_unlock(&trace_types_lock);
3152}
3153
3154static int t_show(struct seq_file *m, void *v)
3155{
3156 struct tracer *t = v;
3157
3158 if (!t)
3159 return 0;
3160
3161 seq_printf(m, "%s", t->name);
3162 if (t->next)
3163 seq_putc(m, ' ');
3164 else
3165 seq_putc(m, '\n');
3166
3167 return 0;
3168}
3169
88e9d34c 3170static const struct seq_operations show_traces_seq_ops = {
4bf39a94
IM
3171 .start = t_start,
3172 .next = t_next,
3173 .stop = t_stop,
3174 .show = t_show,
bc0c38d1
SR
3175};
3176
3177static int show_traces_open(struct inode *inode, struct file *file)
3178{
60a11774
SR
3179 if (tracing_disabled)
3180 return -ENODEV;
3181
f129e965 3182 return seq_open(file, &show_traces_seq_ops);
bc0c38d1
SR
3183}
3184
4acd4d00
SR
3185static ssize_t
3186tracing_write_stub(struct file *filp, const char __user *ubuf,
3187 size_t count, loff_t *ppos)
3188{
3189 return count;
3190}
3191
098c879e 3192loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
364829b1 3193{
098c879e
SRRH
3194 int ret;
3195
364829b1 3196 if (file->f_mode & FMODE_READ)
098c879e 3197 ret = seq_lseek(file, offset, whence);
364829b1 3198 else
098c879e
SRRH
3199 file->f_pos = ret = 0;
3200
3201 return ret;
364829b1
SP
3202}
3203
5e2336a0 3204static const struct file_operations tracing_fops = {
4bf39a94
IM
3205 .open = tracing_open,
3206 .read = seq_read,
4acd4d00 3207 .write = tracing_write_stub,
098c879e 3208 .llseek = tracing_lseek,
4bf39a94 3209 .release = tracing_release,
bc0c38d1
SR
3210};
3211
5e2336a0 3212static const struct file_operations show_traces_fops = {
c7078de1
IM
3213 .open = show_traces_open,
3214 .read = seq_read,
3215 .release = seq_release,
b444786f 3216 .llseek = seq_lseek,
c7078de1
IM
3217};
3218
36dfe925
IM
3219/*
3220 * The tracer itself will not take this lock, but still we want
3221 * to provide a consistent cpumask to user-space:
3222 */
3223static DEFINE_MUTEX(tracing_cpumask_update_lock);
3224
3225/*
3226 * Temporary storage for the character representation of the
3227 * CPU bitmask (and one more byte for the newline):
3228 */
3229static char mask_str[NR_CPUS + 1];
3230
c7078de1
IM
3231static ssize_t
3232tracing_cpumask_read(struct file *filp, char __user *ubuf,
3233 size_t count, loff_t *ppos)
3234{
ccfe9e42 3235 struct trace_array *tr = file_inode(filp)->i_private;
36dfe925 3236 int len;
c7078de1
IM
3237
3238 mutex_lock(&tracing_cpumask_update_lock);
36dfe925 3239
ccfe9e42 3240 len = cpumask_scnprintf(mask_str, count, tr->tracing_cpumask);
36dfe925
IM
3241 if (count - len < 2) {
3242 count = -EINVAL;
3243 goto out_err;
3244 }
3245 len += sprintf(mask_str + len, "\n");
3246 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
3247
3248out_err:
c7078de1
IM
3249 mutex_unlock(&tracing_cpumask_update_lock);
3250
3251 return count;
3252}
3253
3254static ssize_t
3255tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3256 size_t count, loff_t *ppos)
3257{
ccfe9e42 3258 struct trace_array *tr = file_inode(filp)->i_private;
9e01c1b7 3259 cpumask_var_t tracing_cpumask_new;
2b6080f2 3260 int err, cpu;
9e01c1b7
RR
3261
3262 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
3263 return -ENOMEM;
c7078de1 3264
9e01c1b7 3265 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
c7078de1 3266 if (err)
36dfe925
IM
3267 goto err_unlock;
3268
215368e8
LZ
3269 mutex_lock(&tracing_cpumask_update_lock);
3270
a5e25883 3271 local_irq_disable();
0199c4e6 3272 arch_spin_lock(&ftrace_max_lock);
ab46428c 3273 for_each_tracing_cpu(cpu) {
36dfe925
IM
3274 /*
3275 * Increase/decrease the disabled counter if we are
3276 * about to flip a bit in the cpumask:
3277 */
ccfe9e42 3278 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
9e01c1b7 3279 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
12883efb
SRRH
3280 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3281 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
36dfe925 3282 }
ccfe9e42 3283 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
9e01c1b7 3284 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
12883efb
SRRH
3285 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3286 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
36dfe925
IM
3287 }
3288 }
0199c4e6 3289 arch_spin_unlock(&ftrace_max_lock);
a5e25883 3290 local_irq_enable();
36dfe925 3291
ccfe9e42 3292 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
36dfe925
IM
3293
3294 mutex_unlock(&tracing_cpumask_update_lock);
9e01c1b7 3295 free_cpumask_var(tracing_cpumask_new);
c7078de1
IM
3296
3297 return count;
36dfe925
IM
3298
3299err_unlock:
215368e8 3300 free_cpumask_var(tracing_cpumask_new);
36dfe925
IM
3301
3302 return err;
c7078de1
IM
3303}
3304
5e2336a0 3305static const struct file_operations tracing_cpumask_fops = {
ccfe9e42 3306 .open = tracing_open_generic_tr,
c7078de1
IM
3307 .read = tracing_cpumask_read,
3308 .write = tracing_cpumask_write,
ccfe9e42 3309 .release = tracing_release_generic_tr,
b444786f 3310 .llseek = generic_file_llseek,
bc0c38d1
SR
3311};
3312
fdb372ed 3313static int tracing_trace_options_show(struct seq_file *m, void *v)
bc0c38d1 3314{
d8e83d26 3315 struct tracer_opt *trace_opts;
2b6080f2 3316 struct trace_array *tr = m->private;
d8e83d26 3317 u32 tracer_flags;
d8e83d26 3318 int i;
adf9f195 3319
d8e83d26 3320 mutex_lock(&trace_types_lock);
2b6080f2
SR
3321 tracer_flags = tr->current_trace->flags->val;
3322 trace_opts = tr->current_trace->flags->opts;
d8e83d26 3323
bc0c38d1
SR
3324 for (i = 0; trace_options[i]; i++) {
3325 if (trace_flags & (1 << i))
fdb372ed 3326 seq_printf(m, "%s\n", trace_options[i]);
bc0c38d1 3327 else
fdb372ed 3328 seq_printf(m, "no%s\n", trace_options[i]);
bc0c38d1
SR
3329 }
3330
adf9f195
FW
3331 for (i = 0; trace_opts[i].name; i++) {
3332 if (tracer_flags & trace_opts[i].bit)
fdb372ed 3333 seq_printf(m, "%s\n", trace_opts[i].name);
adf9f195 3334 else
fdb372ed 3335 seq_printf(m, "no%s\n", trace_opts[i].name);
adf9f195 3336 }
d8e83d26 3337 mutex_unlock(&trace_types_lock);
adf9f195 3338
fdb372ed 3339 return 0;
bc0c38d1 3340}
bc0c38d1 3341
8d18eaaf
LZ
3342static int __set_tracer_option(struct tracer *trace,
3343 struct tracer_flags *tracer_flags,
3344 struct tracer_opt *opts, int neg)
3345{
3346 int ret;
bc0c38d1 3347
8d18eaaf
LZ
3348 ret = trace->set_flag(tracer_flags->val, opts->bit, !neg);
3349 if (ret)
3350 return ret;
3351
3352 if (neg)
3353 tracer_flags->val &= ~opts->bit;
3354 else
3355 tracer_flags->val |= opts->bit;
3356 return 0;
bc0c38d1
SR
3357}
3358
adf9f195
FW
3359/* Try to assign a tracer specific option */
3360static int set_tracer_option(struct tracer *trace, char *cmp, int neg)
3361{
7770841e 3362 struct tracer_flags *tracer_flags = trace->flags;
adf9f195 3363 struct tracer_opt *opts = NULL;
8d18eaaf 3364 int i;
adf9f195 3365
7770841e
Z
3366 for (i = 0; tracer_flags->opts[i].name; i++) {
3367 opts = &tracer_flags->opts[i];
adf9f195 3368
8d18eaaf
LZ
3369 if (strcmp(cmp, opts->name) == 0)
3370 return __set_tracer_option(trace, trace->flags,
3371 opts, neg);
adf9f195 3372 }
adf9f195 3373
8d18eaaf 3374 return -EINVAL;
adf9f195
FW
3375}
3376
613f04a0
SRRH
3377/* Some tracers require overwrite to stay enabled */
3378int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
3379{
3380 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
3381 return -1;
3382
3383 return 0;
3384}
3385
2b6080f2 3386int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
af4617bd
SR
3387{
3388 /* do nothing if flag is already set */
3389 if (!!(trace_flags & mask) == !!enabled)
613f04a0
SRRH
3390 return 0;
3391
3392 /* Give the tracer a chance to approve the change */
2b6080f2
SR
3393 if (tr->current_trace->flag_changed)
3394 if (tr->current_trace->flag_changed(tr->current_trace, mask, !!enabled))
613f04a0 3395 return -EINVAL;
af4617bd
SR
3396
3397 if (enabled)
3398 trace_flags |= mask;
3399 else
3400 trace_flags &= ~mask;
e870e9a1
LZ
3401
3402 if (mask == TRACE_ITER_RECORD_CMD)
3403 trace_event_enable_cmd_record(enabled);
750912fa 3404
80902822 3405 if (mask == TRACE_ITER_OVERWRITE) {
12883efb 3406 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
80902822 3407#ifdef CONFIG_TRACER_MAX_TRACE
12883efb 3408 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
80902822
SRRH
3409#endif
3410 }
81698831
SR
3411
3412 if (mask == TRACE_ITER_PRINTK)
3413 trace_printk_start_stop_comm(enabled);
613f04a0
SRRH
3414
3415 return 0;
af4617bd
SR
3416}
3417
2b6080f2 3418static int trace_set_options(struct trace_array *tr, char *option)
bc0c38d1 3419{
8d18eaaf 3420 char *cmp;
bc0c38d1 3421 int neg = 0;
613f04a0 3422 int ret = -ENODEV;
bc0c38d1
SR
3423 int i;
3424
7bcfaf54 3425 cmp = strstrip(option);
bc0c38d1 3426
8d18eaaf 3427 if (strncmp(cmp, "no", 2) == 0) {
bc0c38d1
SR
3428 neg = 1;
3429 cmp += 2;
3430 }
3431
69d34da2
SRRH
3432 mutex_lock(&trace_types_lock);
3433
bc0c38d1 3434 for (i = 0; trace_options[i]; i++) {
8d18eaaf 3435 if (strcmp(cmp, trace_options[i]) == 0) {
2b6080f2 3436 ret = set_tracer_flag(tr, 1 << i, !neg);
bc0c38d1
SR
3437 break;
3438 }
3439 }
adf9f195
FW
3440
3441 /* If no option could be set, test the specific tracer options */
69d34da2 3442 if (!trace_options[i])
2b6080f2 3443 ret = set_tracer_option(tr->current_trace, cmp, neg);
69d34da2
SRRH
3444
3445 mutex_unlock(&trace_types_lock);
bc0c38d1 3446
7bcfaf54
SR
3447 return ret;
3448}
3449
3450static ssize_t
3451tracing_trace_options_write(struct file *filp, const char __user *ubuf,
3452 size_t cnt, loff_t *ppos)
3453{
2b6080f2
SR
3454 struct seq_file *m = filp->private_data;
3455 struct trace_array *tr = m->private;
7bcfaf54 3456 char buf[64];
613f04a0 3457 int ret;
7bcfaf54
SR
3458
3459 if (cnt >= sizeof(buf))
3460 return -EINVAL;
3461
3462 if (copy_from_user(&buf, ubuf, cnt))
3463 return -EFAULT;
3464
a8dd2176
SR
3465 buf[cnt] = 0;
3466
2b6080f2 3467 ret = trace_set_options(tr, buf);
613f04a0
SRRH
3468 if (ret < 0)
3469 return ret;
7bcfaf54 3470
cf8517cf 3471 *ppos += cnt;
bc0c38d1
SR
3472
3473 return cnt;
3474}
3475
fdb372ed
LZ
3476static int tracing_trace_options_open(struct inode *inode, struct file *file)
3477{
7b85af63 3478 struct trace_array *tr = inode->i_private;
f77d09a3 3479 int ret;
7b85af63 3480
fdb372ed
LZ
3481 if (tracing_disabled)
3482 return -ENODEV;
2b6080f2 3483
7b85af63
SRRH
3484 if (trace_array_get(tr) < 0)
3485 return -ENODEV;
3486
f77d09a3
AL
3487 ret = single_open(file, tracing_trace_options_show, inode->i_private);
3488 if (ret < 0)
3489 trace_array_put(tr);
3490
3491 return ret;
fdb372ed
LZ
3492}
3493
5e2336a0 3494static const struct file_operations tracing_iter_fops = {
fdb372ed
LZ
3495 .open = tracing_trace_options_open,
3496 .read = seq_read,
3497 .llseek = seq_lseek,
7b85af63 3498 .release = tracing_single_release_tr,
ee6bce52 3499 .write = tracing_trace_options_write,
bc0c38d1
SR
3500};
3501
7bd2f24c
IM
3502static const char readme_msg[] =
3503 "tracing mini-HOWTO:\n\n"
22f45649
SRRH
3504 "# echo 0 > tracing_on : quick way to disable tracing\n"
3505 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
3506 " Important files:\n"
3507 " trace\t\t\t- The static contents of the buffer\n"
3508 "\t\t\t To clear the buffer write into this file: echo > trace\n"
3509 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
3510 " current_tracer\t- function and latency tracers\n"
3511 " available_tracers\t- list of configured tracers for current_tracer\n"
3512 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
3513 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
3514 " trace_clock\t\t-change the clock used to order events\n"
3515 " local: Per cpu clock but may not be synced across CPUs\n"
3516 " global: Synced across CPUs but slows tracing down.\n"
3517 " counter: Not a clock, but just an increment\n"
3518 " uptime: Jiffy counter from time of boot\n"
3519 " perf: Same clock that perf events use\n"
3520#ifdef CONFIG_X86_64
3521 " x86-tsc: TSC cycle counter\n"
3522#endif
3523 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
3524 " tracing_cpumask\t- Limit which CPUs to trace\n"
3525 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
3526 "\t\t\t Remove sub-buffer with rmdir\n"
3527 " trace_options\t\t- Set format or modify how tracing happens\n"
71485c45
SRRH
3528 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
3529 "\t\t\t option name\n"
22f45649
SRRH
3530#ifdef CONFIG_DYNAMIC_FTRACE
3531 "\n available_filter_functions - list of functions that can be filtered on\n"
71485c45
SRRH
3532 " set_ftrace_filter\t- echo function name in here to only trace these\n"
3533 "\t\t\t functions\n"
3534 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3535 "\t modules: Can select a group via module\n"
3536 "\t Format: :mod:<module-name>\n"
3537 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
3538 "\t triggers: a command to perform when function is hit\n"
3539 "\t Format: <function>:<trigger>[:count]\n"
3540 "\t trigger: traceon, traceoff\n"
3541 "\t\t enable_event:<system>:<event>\n"
3542 "\t\t disable_event:<system>:<event>\n"
22f45649 3543#ifdef CONFIG_STACKTRACE
71485c45 3544 "\t\t stacktrace\n"
22f45649
SRRH
3545#endif
3546#ifdef CONFIG_TRACER_SNAPSHOT
71485c45 3547 "\t\t snapshot\n"
22f45649 3548#endif
71485c45
SRRH
3549 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
3550 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
3551 "\t The first one will disable tracing every time do_fault is hit\n"
3552 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
3553 "\t The first time do trap is hit and it disables tracing, the\n"
3554 "\t counter will decrement to 2. If tracing is already disabled,\n"
3555 "\t the counter will not decrement. It only decrements when the\n"
3556 "\t trigger did work\n"
3557 "\t To remove trigger without count:\n"
3558 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
3559 "\t To remove trigger with a count:\n"
3560 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
22f45649 3561 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
71485c45
SRRH
3562 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3563 "\t modules: Can select a group via module command :mod:\n"
3564 "\t Does not accept triggers\n"
22f45649
SRRH
3565#endif /* CONFIG_DYNAMIC_FTRACE */
3566#ifdef CONFIG_FUNCTION_TRACER
71485c45
SRRH
3567 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
3568 "\t\t (function)\n"
22f45649
SRRH
3569#endif
3570#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3571 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
3572 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
3573#endif
3574#ifdef CONFIG_TRACER_SNAPSHOT
71485c45
SRRH
3575 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
3576 "\t\t\t snapshot buffer. Read the contents for more\n"
3577 "\t\t\t information\n"
22f45649 3578#endif
991821c8 3579#ifdef CONFIG_STACK_TRACER
22f45649
SRRH
3580 " stack_trace\t\t- Shows the max stack trace when active\n"
3581 " stack_max_size\t- Shows current max stack size that was traced\n"
71485c45
SRRH
3582 "\t\t\t Write into this file to reset the max size (trigger a\n"
3583 "\t\t\t new trace)\n"
22f45649 3584#ifdef CONFIG_DYNAMIC_FTRACE
71485c45
SRRH
3585 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
3586 "\t\t\t traces\n"
22f45649 3587#endif
991821c8 3588#endif /* CONFIG_STACK_TRACER */
26f25564
TZ
3589 " events/\t\t- Directory containing all trace event subsystems:\n"
3590 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
3591 " events/<system>/\t- Directory containing all trace events for <system>:\n"
71485c45
SRRH
3592 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
3593 "\t\t\t events\n"
26f25564 3594 " filter\t\t- If set, only events passing filter are traced\n"
71485c45
SRRH
3595 " events/<system>/<event>/\t- Directory containing control files for\n"
3596 "\t\t\t <event>:\n"
26f25564
TZ
3597 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
3598 " filter\t\t- If set, only events passing filter are traced\n"
3599 " trigger\t\t- If set, a command to perform when event is hit\n"
71485c45
SRRH
3600 "\t Format: <trigger>[:count][if <filter>]\n"
3601 "\t trigger: traceon, traceoff\n"
3602 "\t enable_event:<system>:<event>\n"
3603 "\t disable_event:<system>:<event>\n"
26f25564 3604#ifdef CONFIG_STACKTRACE
71485c45 3605 "\t\t stacktrace\n"
26f25564
TZ
3606#endif
3607#ifdef CONFIG_TRACER_SNAPSHOT
71485c45 3608 "\t\t snapshot\n"
26f25564 3609#endif
71485c45
SRRH
3610 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
3611 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
3612 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
3613 "\t events/block/block_unplug/trigger\n"
3614 "\t The first disables tracing every time block_unplug is hit.\n"
3615 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
3616 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
3617 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
3618 "\t Like function triggers, the counter is only decremented if it\n"
3619 "\t enabled or disabled tracing.\n"
3620 "\t To remove a trigger without a count:\n"
3621 "\t echo '!<trigger> > <system>/<event>/trigger\n"
3622 "\t To remove a trigger with a count:\n"
3623 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
3624 "\t Filters can be ignored when removing a trigger.\n"
7bd2f24c
IM
3625;
3626
3627static ssize_t
3628tracing_readme_read(struct file *filp, char __user *ubuf,
3629 size_t cnt, loff_t *ppos)
3630{
3631 return simple_read_from_buffer(ubuf, cnt, ppos,
3632 readme_msg, strlen(readme_msg));
3633}
3634
5e2336a0 3635static const struct file_operations tracing_readme_fops = {
c7078de1
IM
3636 .open = tracing_open_generic,
3637 .read = tracing_readme_read,
b444786f 3638 .llseek = generic_file_llseek,
7bd2f24c
IM
3639};
3640
69abe6a5
AP
3641static ssize_t
3642tracing_saved_cmdlines_read(struct file *file, char __user *ubuf,
3643 size_t cnt, loff_t *ppos)
3644{
3645 char *buf_comm;
3646 char *file_buf;
3647 char *buf;
3648 int len = 0;
3649 int pid;
3650 int i;
3651
3652 file_buf = kmalloc(SAVED_CMDLINES*(16+TASK_COMM_LEN), GFP_KERNEL);
3653 if (!file_buf)
3654 return -ENOMEM;
3655
3656 buf_comm = kmalloc(TASK_COMM_LEN, GFP_KERNEL);
3657 if (!buf_comm) {
3658 kfree(file_buf);
3659 return -ENOMEM;
3660 }
3661
3662 buf = file_buf;
3663
3664 for (i = 0; i < SAVED_CMDLINES; i++) {
3665 int r;
3666
3667 pid = map_cmdline_to_pid[i];
3668 if (pid == -1 || pid == NO_CMDLINE_MAP)
3669 continue;
3670
3671 trace_find_cmdline(pid, buf_comm);
3672 r = sprintf(buf, "%d %s\n", pid, buf_comm);
3673 buf += r;
3674 len += r;
3675 }
3676
3677 len = simple_read_from_buffer(ubuf, cnt, ppos,
3678 file_buf, len);
3679
3680 kfree(file_buf);
3681 kfree(buf_comm);
3682
3683 return len;
3684}
3685
3686static const struct file_operations tracing_saved_cmdlines_fops = {
3687 .open = tracing_open_generic,
3688 .read = tracing_saved_cmdlines_read,
b444786f 3689 .llseek = generic_file_llseek,
69abe6a5
AP
3690};
3691
bc0c38d1
SR
3692static ssize_t
3693tracing_set_trace_read(struct file *filp, char __user *ubuf,
3694 size_t cnt, loff_t *ppos)
3695{
2b6080f2 3696 struct trace_array *tr = filp->private_data;
ee6c2c1b 3697 char buf[MAX_TRACER_SIZE+2];
bc0c38d1
SR
3698 int r;
3699
3700 mutex_lock(&trace_types_lock);
2b6080f2 3701 r = sprintf(buf, "%s\n", tr->current_trace->name);
bc0c38d1
SR
3702 mutex_unlock(&trace_types_lock);
3703
4bf39a94 3704 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
bc0c38d1
SR
3705}
3706
b6f11df2
ACM
3707int tracer_init(struct tracer *t, struct trace_array *tr)
3708{
12883efb 3709 tracing_reset_online_cpus(&tr->trace_buffer);
b6f11df2
ACM
3710 return t->init(tr);
3711}
3712
12883efb 3713static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
438ced17
VN
3714{
3715 int cpu;
737223fb 3716
438ced17 3717 for_each_tracing_cpu(cpu)
12883efb 3718 per_cpu_ptr(buf->data, cpu)->entries = val;
438ced17
VN
3719}
3720
12883efb 3721#ifdef CONFIG_TRACER_MAX_TRACE
d60da506 3722/* resize @tr's buffer to the size of @size_tr's entries */
12883efb
SRRH
3723static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
3724 struct trace_buffer *size_buf, int cpu_id)
d60da506
HT
3725{
3726 int cpu, ret = 0;
3727
3728 if (cpu_id == RING_BUFFER_ALL_CPUS) {
3729 for_each_tracing_cpu(cpu) {
12883efb
SRRH
3730 ret = ring_buffer_resize(trace_buf->buffer,
3731 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
d60da506
HT
3732 if (ret < 0)
3733 break;
12883efb
SRRH
3734 per_cpu_ptr(trace_buf->data, cpu)->entries =
3735 per_cpu_ptr(size_buf->data, cpu)->entries;
d60da506
HT
3736 }
3737 } else {
12883efb
SRRH
3738 ret = ring_buffer_resize(trace_buf->buffer,
3739 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
d60da506 3740 if (ret == 0)
12883efb
SRRH
3741 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
3742 per_cpu_ptr(size_buf->data, cpu_id)->entries;
d60da506
HT
3743 }
3744
3745 return ret;
3746}
12883efb 3747#endif /* CONFIG_TRACER_MAX_TRACE */
d60da506 3748
2b6080f2
SR
3749static int __tracing_resize_ring_buffer(struct trace_array *tr,
3750 unsigned long size, int cpu)
73c5162a
SR
3751{
3752 int ret;
3753
3754 /*
3755 * If kernel or user changes the size of the ring buffer
a123c52b
SR
3756 * we use the size that was given, and we can forget about
3757 * expanding it later.
73c5162a 3758 */
55034cd6 3759 ring_buffer_expanded = true;
73c5162a 3760
b382ede6 3761 /* May be called before buffers are initialized */
12883efb 3762 if (!tr->trace_buffer.buffer)
b382ede6
SR
3763 return 0;
3764
12883efb 3765 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
73c5162a
SR
3766 if (ret < 0)
3767 return ret;
3768
12883efb 3769#ifdef CONFIG_TRACER_MAX_TRACE
2b6080f2
SR
3770 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
3771 !tr->current_trace->use_max_tr)
ef710e10
KM
3772 goto out;
3773
12883efb 3774 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
73c5162a 3775 if (ret < 0) {
12883efb
SRRH
3776 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
3777 &tr->trace_buffer, cpu);
73c5162a 3778 if (r < 0) {
a123c52b
SR
3779 /*
3780 * AARGH! We are left with different
3781 * size max buffer!!!!
3782 * The max buffer is our "snapshot" buffer.
3783 * When a tracer needs a snapshot (one of the
3784 * latency tracers), it swaps the max buffer
3785 * with the saved snap shot. We succeeded to
3786 * update the size of the main buffer, but failed to
3787 * update the size of the max buffer. But when we tried
3788 * to reset the main buffer to the original size, we
3789 * failed there too. This is very unlikely to
3790 * happen, but if it does, warn and kill all
3791 * tracing.
3792 */
73c5162a
SR
3793 WARN_ON(1);
3794 tracing_disabled = 1;
3795 }
3796 return ret;
3797 }
3798
438ced17 3799 if (cpu == RING_BUFFER_ALL_CPUS)
12883efb 3800 set_buffer_entries(&tr->max_buffer, size);
438ced17 3801 else
12883efb 3802 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
438ced17 3803
ef710e10 3804 out:
12883efb
SRRH
3805#endif /* CONFIG_TRACER_MAX_TRACE */
3806
438ced17 3807 if (cpu == RING_BUFFER_ALL_CPUS)
12883efb 3808 set_buffer_entries(&tr->trace_buffer, size);
438ced17 3809 else
12883efb 3810 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
73c5162a
SR
3811
3812 return ret;
3813}
3814
2b6080f2
SR
3815static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
3816 unsigned long size, int cpu_id)
4f271a2a 3817{
83f40318 3818 int ret = size;
4f271a2a
VN
3819
3820 mutex_lock(&trace_types_lock);
3821
438ced17
VN
3822 if (cpu_id != RING_BUFFER_ALL_CPUS) {
3823 /* make sure, this cpu is enabled in the mask */
3824 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
3825 ret = -EINVAL;
3826 goto out;
3827 }
3828 }
4f271a2a 3829
2b6080f2 3830 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
4f271a2a
VN
3831 if (ret < 0)
3832 ret = -ENOMEM;
3833
438ced17 3834out:
4f271a2a
VN
3835 mutex_unlock(&trace_types_lock);
3836
3837 return ret;
3838}
3839
ef710e10 3840
1852fcce
SR
3841/**
3842 * tracing_update_buffers - used by tracing facility to expand ring buffers
3843 *
3844 * To save on memory when the tracing is never used on a system with it
3845 * configured in. The ring buffers are set to a minimum size. But once
3846 * a user starts to use the tracing facility, then they need to grow
3847 * to their default size.
3848 *
3849 * This function is to be called when a tracer is about to be used.
3850 */
3851int tracing_update_buffers(void)
3852{
3853 int ret = 0;
3854
1027fcb2 3855 mutex_lock(&trace_types_lock);
1852fcce 3856 if (!ring_buffer_expanded)
2b6080f2 3857 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
438ced17 3858 RING_BUFFER_ALL_CPUS);
1027fcb2 3859 mutex_unlock(&trace_types_lock);
1852fcce
SR
3860
3861 return ret;
3862}
3863
577b785f
SR
3864struct trace_option_dentry;
3865
3866static struct trace_option_dentry *
2b6080f2 3867create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
577b785f
SR
3868
3869static void
3870destroy_trace_option_files(struct trace_option_dentry *topts);
3871
b2821ae6 3872static int tracing_set_tracer(const char *buf)
bc0c38d1 3873{
577b785f 3874 static struct trace_option_dentry *topts;
bc0c38d1
SR
3875 struct trace_array *tr = &global_trace;
3876 struct tracer *t;
12883efb 3877#ifdef CONFIG_TRACER_MAX_TRACE
34600f0e 3878 bool had_max_tr;
12883efb 3879#endif
d9e54076 3880 int ret = 0;
bc0c38d1 3881
1027fcb2
SR
3882 mutex_lock(&trace_types_lock);
3883
73c5162a 3884 if (!ring_buffer_expanded) {
2b6080f2 3885 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
438ced17 3886 RING_BUFFER_ALL_CPUS);
73c5162a 3887 if (ret < 0)
59f586db 3888 goto out;
73c5162a
SR
3889 ret = 0;
3890 }
3891
bc0c38d1
SR
3892 for (t = trace_types; t; t = t->next) {
3893 if (strcmp(t->name, buf) == 0)
3894 break;
3895 }
c2931e05
FW
3896 if (!t) {
3897 ret = -EINVAL;
3898 goto out;
3899 }
2b6080f2 3900 if (t == tr->current_trace)
bc0c38d1
SR
3901 goto out;
3902
9f029e83 3903 trace_branch_disable();
613f04a0 3904
2b6080f2 3905 tr->current_trace->enabled = false;
613f04a0 3906
2b6080f2
SR
3907 if (tr->current_trace->reset)
3908 tr->current_trace->reset(tr);
34600f0e 3909
12883efb 3910 /* Current trace needs to be nop_trace before synchronize_sched */
2b6080f2 3911 tr->current_trace = &nop_trace;
34600f0e 3912
45ad21ca
SRRH
3913#ifdef CONFIG_TRACER_MAX_TRACE
3914 had_max_tr = tr->allocated_snapshot;
34600f0e
SR
3915
3916 if (had_max_tr && !t->use_max_tr) {
3917 /*
3918 * We need to make sure that the update_max_tr sees that
3919 * current_trace changed to nop_trace to keep it from
3920 * swapping the buffers after we resize it.
3921 * The update_max_tr is called from interrupts disabled
3922 * so a synchronized_sched() is sufficient.
3923 */
3924 synchronize_sched();
3209cff4 3925 free_snapshot(tr);
ef710e10 3926 }
12883efb 3927#endif
577b785f
SR
3928 destroy_trace_option_files(topts);
3929
2b6080f2 3930 topts = create_trace_option_files(tr, t);
12883efb
SRRH
3931
3932#ifdef CONFIG_TRACER_MAX_TRACE
34600f0e 3933 if (t->use_max_tr && !had_max_tr) {
3209cff4 3934 ret = alloc_snapshot(tr);
d60da506
HT
3935 if (ret < 0)
3936 goto out;
ef710e10 3937 }
12883efb 3938#endif
577b785f 3939
1c80025a 3940 if (t->init) {
b6f11df2 3941 ret = tracer_init(t, tr);
1c80025a
FW
3942 if (ret)
3943 goto out;
3944 }
bc0c38d1 3945
2b6080f2
SR
3946 tr->current_trace = t;
3947 tr->current_trace->enabled = true;
9f029e83 3948 trace_branch_enable(tr);
bc0c38d1
SR
3949 out:
3950 mutex_unlock(&trace_types_lock);
3951
d9e54076
PZ
3952 return ret;
3953}
3954
3955static ssize_t
3956tracing_set_trace_write(struct file *filp, const char __user *ubuf,
3957 size_t cnt, loff_t *ppos)
3958{
ee6c2c1b 3959 char buf[MAX_TRACER_SIZE+1];
d9e54076
PZ
3960 int i;
3961 size_t ret;
e6e7a65a
FW
3962 int err;
3963
3964 ret = cnt;
d9e54076 3965
ee6c2c1b
LZ
3966 if (cnt > MAX_TRACER_SIZE)
3967 cnt = MAX_TRACER_SIZE;
d9e54076
PZ
3968
3969 if (copy_from_user(&buf, ubuf, cnt))
3970 return -EFAULT;
3971
3972 buf[cnt] = 0;
3973
3974 /* strip ending whitespace. */
3975 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
3976 buf[i] = 0;
3977
e6e7a65a
FW
3978 err = tracing_set_tracer(buf);
3979 if (err)
3980 return err;
d9e54076 3981
cf8517cf 3982 *ppos += ret;
bc0c38d1 3983
c2931e05 3984 return ret;
bc0c38d1
SR
3985}
3986
3987static ssize_t
3988tracing_max_lat_read(struct file *filp, char __user *ubuf,
3989 size_t cnt, loff_t *ppos)
3990{
3991 unsigned long *ptr = filp->private_data;
3992 char buf[64];
3993 int r;
3994
cffae437 3995 r = snprintf(buf, sizeof(buf), "%ld\n",
bc0c38d1 3996 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
cffae437
SR
3997 if (r > sizeof(buf))
3998 r = sizeof(buf);
4bf39a94 3999 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
bc0c38d1
SR
4000}
4001
4002static ssize_t
4003tracing_max_lat_write(struct file *filp, const char __user *ubuf,
4004 size_t cnt, loff_t *ppos)
4005{
5e39841c 4006 unsigned long *ptr = filp->private_data;
5e39841c 4007 unsigned long val;
c6caeeb1 4008 int ret;
bc0c38d1 4009
22fe9b54
PH
4010 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4011 if (ret)
c6caeeb1 4012 return ret;
bc0c38d1
SR
4013
4014 *ptr = val * 1000;
4015
4016 return cnt;
4017}
4018
b3806b43
SR
4019static int tracing_open_pipe(struct inode *inode, struct file *filp)
4020{
15544209 4021 struct trace_array *tr = inode->i_private;
b3806b43 4022 struct trace_iterator *iter;
b04cc6b1 4023 int ret = 0;
b3806b43
SR
4024
4025 if (tracing_disabled)
4026 return -ENODEV;
4027
7b85af63
SRRH
4028 if (trace_array_get(tr) < 0)
4029 return -ENODEV;
4030
b04cc6b1
FW
4031 mutex_lock(&trace_types_lock);
4032
b3806b43
SR
4033 /* create a buffer to store the information to pass to userspace */
4034 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
b04cc6b1
FW
4035 if (!iter) {
4036 ret = -ENOMEM;
f77d09a3 4037 __trace_array_put(tr);
b04cc6b1
FW
4038 goto out;
4039 }
b3806b43 4040
d7350c3f
FW
4041 /*
4042 * We make a copy of the current tracer to avoid concurrent
4043 * changes on it while we are reading.
4044 */
4045 iter->trace = kmalloc(sizeof(*iter->trace), GFP_KERNEL);
4046 if (!iter->trace) {
4047 ret = -ENOMEM;
4048 goto fail;
4049 }
2b6080f2 4050 *iter->trace = *tr->current_trace;
d7350c3f 4051
4462344e 4052 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
b04cc6b1 4053 ret = -ENOMEM;
d7350c3f 4054 goto fail;
4462344e
RR
4055 }
4056
a309720c 4057 /* trace pipe does not show start of buffer */
4462344e 4058 cpumask_setall(iter->started);
a309720c 4059
112f38a7
SR
4060 if (trace_flags & TRACE_ITER_LATENCY_FMT)
4061 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4062
8be0709f 4063 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
58e8eedf 4064 if (trace_clocks[tr->clock_id].in_ns)
8be0709f
DS
4065 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4066
15544209
ON
4067 iter->tr = tr;
4068 iter->trace_buffer = &tr->trace_buffer;
4069 iter->cpu_file = tracing_get_cpu(inode);
d7350c3f 4070 mutex_init(&iter->mutex);
b3806b43
SR
4071 filp->private_data = iter;
4072
107bad8b
SR
4073 if (iter->trace->pipe_open)
4074 iter->trace->pipe_open(iter);
107bad8b 4075
b444786f 4076 nonseekable_open(inode, filp);
b04cc6b1
FW
4077out:
4078 mutex_unlock(&trace_types_lock);
4079 return ret;
d7350c3f
FW
4080
4081fail:
4082 kfree(iter->trace);
4083 kfree(iter);
7b85af63 4084 __trace_array_put(tr);
d7350c3f
FW
4085 mutex_unlock(&trace_types_lock);
4086 return ret;
b3806b43
SR
4087}
4088
4089static int tracing_release_pipe(struct inode *inode, struct file *file)
4090{
4091 struct trace_iterator *iter = file->private_data;
15544209 4092 struct trace_array *tr = inode->i_private;
b3806b43 4093
b04cc6b1
FW
4094 mutex_lock(&trace_types_lock);
4095
29bf4a5e 4096 if (iter->trace->pipe_close)
c521efd1
SR
4097 iter->trace->pipe_close(iter);
4098
b04cc6b1
FW
4099 mutex_unlock(&trace_types_lock);
4100
4462344e 4101 free_cpumask_var(iter->started);
d7350c3f
FW
4102 mutex_destroy(&iter->mutex);
4103 kfree(iter->trace);
b3806b43 4104 kfree(iter);
b3806b43 4105
7b85af63
SRRH
4106 trace_array_put(tr);
4107
b3806b43
SR
4108 return 0;
4109}
4110
2a2cc8f7 4111static unsigned int
cc60cdc9 4112trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
2a2cc8f7 4113{
15693458
SRRH
4114 /* Iterators are static, they should be filled or empty */
4115 if (trace_buffer_iter(iter, iter->cpu_file))
4116 return POLLIN | POLLRDNORM;
2a2cc8f7 4117
15693458 4118 if (trace_flags & TRACE_ITER_BLOCK)
2a2cc8f7
SSP
4119 /*
4120 * Always select as readable when in blocking mode
4121 */
4122 return POLLIN | POLLRDNORM;
15693458 4123 else
12883efb 4124 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
15693458 4125 filp, poll_table);
2a2cc8f7 4126}
2a2cc8f7 4127
cc60cdc9
SR
4128static unsigned int
4129tracing_poll_pipe(struct file *filp, poll_table *poll_table)
4130{
4131 struct trace_iterator *iter = filp->private_data;
4132
4133 return trace_poll(iter, filp, poll_table);
2a2cc8f7
SSP
4134}
4135
6eaaa5d5
FW
4136/*
4137 * This is a make-shift waitqueue.
4138 * A tracer might use this callback on some rare cases:
4139 *
4140 * 1) the current tracer might hold the runqueue lock when it wakes up
4141 * a reader, hence a deadlock (sched, function, and function graph tracers)
4142 * 2) the function tracers, trace all functions, we don't want
4143 * the overhead of calling wake_up and friends
4144 * (and tracing them too)
4145 *
4146 * Anyway, this is really very primitive wakeup.
4147 */
4148void poll_wait_pipe(struct trace_iterator *iter)
4149{
4150 set_current_state(TASK_INTERRUPTIBLE);
4151 /* sleep for 100 msecs, and try again. */
4152 schedule_timeout(HZ / 10);
4153}
4154
ff98781b
EGM
4155/* Must be called with trace_types_lock mutex held. */
4156static int tracing_wait_pipe(struct file *filp)
b3806b43
SR
4157{
4158 struct trace_iterator *iter = filp->private_data;
b3806b43 4159
b3806b43 4160 while (trace_empty(iter)) {
2dc8f095 4161
107bad8b 4162 if ((filp->f_flags & O_NONBLOCK)) {
ff98781b 4163 return -EAGAIN;
107bad8b 4164 }
2dc8f095 4165
d7350c3f 4166 mutex_unlock(&iter->mutex);
107bad8b 4167
6eaaa5d5 4168 iter->trace->wait_pipe(iter);
b3806b43 4169
d7350c3f 4170 mutex_lock(&iter->mutex);
107bad8b 4171
6eaaa5d5 4172 if (signal_pending(current))
ff98781b 4173 return -EINTR;
b3806b43
SR
4174
4175 /*
250bfd3d 4176 * We block until we read something and tracing is disabled.
b3806b43
SR
4177 * We still block if tracing is disabled, but we have never
4178 * read anything. This allows a user to cat this file, and
4179 * then enable tracing. But after we have read something,
4180 * we give an EOF when tracing is again disabled.
4181 *
4182 * iter->pos will be 0 if we haven't read anything.
4183 */
10246fa3 4184 if (!tracing_is_on() && iter->pos)
b3806b43 4185 break;
b3806b43
SR
4186 }
4187
ff98781b
EGM
4188 return 1;
4189}
4190
4191/*
4192 * Consumer reader.
4193 */
4194static ssize_t
4195tracing_read_pipe(struct file *filp, char __user *ubuf,
4196 size_t cnt, loff_t *ppos)
4197{
4198 struct trace_iterator *iter = filp->private_data;
2b6080f2 4199 struct trace_array *tr = iter->tr;
ff98781b
EGM
4200 ssize_t sret;
4201
4202 /* return any leftover data */
4203 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4204 if (sret != -EBUSY)
4205 return sret;
4206
f9520750 4207 trace_seq_init(&iter->seq);
ff98781b 4208
d7350c3f 4209 /* copy the tracer to avoid using a global lock all around */
ff98781b 4210 mutex_lock(&trace_types_lock);
2b6080f2
SR
4211 if (unlikely(iter->trace->name != tr->current_trace->name))
4212 *iter->trace = *tr->current_trace;
d7350c3f
FW
4213 mutex_unlock(&trace_types_lock);
4214
4215 /*
4216 * Avoid more than one consumer on a single file descriptor
4217 * This is just a matter of traces coherency, the ring buffer itself
4218 * is protected.
4219 */
4220 mutex_lock(&iter->mutex);
ff98781b
EGM
4221 if (iter->trace->read) {
4222 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
4223 if (sret)
4224 goto out;
4225 }
4226
4227waitagain:
4228 sret = tracing_wait_pipe(filp);
4229 if (sret <= 0)
4230 goto out;
4231
b3806b43 4232 /* stop when tracing is finished */
ff98781b
EGM
4233 if (trace_empty(iter)) {
4234 sret = 0;
107bad8b 4235 goto out;
ff98781b 4236 }
b3806b43
SR
4237
4238 if (cnt >= PAGE_SIZE)
4239 cnt = PAGE_SIZE - 1;
4240
53d0aa77 4241 /* reset all but tr, trace, and overruns */
53d0aa77
SR
4242 memset(&iter->seq, 0,
4243 sizeof(struct trace_iterator) -
4244 offsetof(struct trace_iterator, seq));
ed5467da 4245 cpumask_clear(iter->started);
4823ed7e 4246 iter->pos = -1;
b3806b43 4247
4f535968 4248 trace_event_read_lock();
7e53bd42 4249 trace_access_lock(iter->cpu_file);
955b61e5 4250 while (trace_find_next_entry_inc(iter) != NULL) {
2c4f035f 4251 enum print_line_t ret;
088b1e42
SR
4252 int len = iter->seq.len;
4253
f9896bf3 4254 ret = print_trace_line(iter);
2c4f035f 4255 if (ret == TRACE_TYPE_PARTIAL_LINE) {
088b1e42
SR
4256 /* don't print partial lines */
4257 iter->seq.len = len;
b3806b43 4258 break;
088b1e42 4259 }
b91facc3
FW
4260 if (ret != TRACE_TYPE_NO_CONSUME)
4261 trace_consume(iter);
b3806b43
SR
4262
4263 if (iter->seq.len >= cnt)
4264 break;
ee5e51f5
JO
4265
4266 /*
4267 * Setting the full flag means we reached the trace_seq buffer
4268 * size and we should leave by partial output condition above.
4269 * One of the trace_seq_* functions is not used properly.
4270 */
4271 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
4272 iter->ent->type);
b3806b43 4273 }
7e53bd42 4274 trace_access_unlock(iter->cpu_file);
4f535968 4275 trace_event_read_unlock();
b3806b43 4276
b3806b43 4277 /* Now copy what we have to the user */
6c6c2796
PP
4278 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4279 if (iter->seq.readpos >= iter->seq.len)
f9520750 4280 trace_seq_init(&iter->seq);
9ff4b974
PP
4281
4282 /*
25985edc 4283 * If there was nothing to send to user, in spite of consuming trace
9ff4b974
PP
4284 * entries, go back to wait for more entries.
4285 */
6c6c2796 4286 if (sret == -EBUSY)
9ff4b974 4287 goto waitagain;
b3806b43 4288
107bad8b 4289out:
d7350c3f 4290 mutex_unlock(&iter->mutex);
107bad8b 4291
6c6c2796 4292 return sret;
b3806b43
SR
4293}
4294
3c56819b
EGM
4295static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
4296 unsigned int idx)
4297{
4298 __free_page(spd->pages[idx]);
4299}
4300
28dfef8f 4301static const struct pipe_buf_operations tracing_pipe_buf_ops = {
34cd4998
SR
4302 .can_merge = 0,
4303 .map = generic_pipe_buf_map,
4304 .unmap = generic_pipe_buf_unmap,
4305 .confirm = generic_pipe_buf_confirm,
92fdd98c 4306 .release = generic_pipe_buf_release,
34cd4998
SR
4307 .steal = generic_pipe_buf_steal,
4308 .get = generic_pipe_buf_get,
3c56819b
EGM
4309};
4310
34cd4998 4311static size_t
fa7c7f6e 4312tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
34cd4998
SR
4313{
4314 size_t count;
4315 int ret;
4316
4317 /* Seq buffer is page-sized, exactly what we need. */
4318 for (;;) {
4319 count = iter->seq.len;
4320 ret = print_trace_line(iter);
4321 count = iter->seq.len - count;
4322 if (rem < count) {
4323 rem = 0;
4324 iter->seq.len -= count;
4325 break;
4326 }
4327 if (ret == TRACE_TYPE_PARTIAL_LINE) {
4328 iter->seq.len -= count;
4329 break;
4330 }
4331
74e7ff8c
LJ
4332 if (ret != TRACE_TYPE_NO_CONSUME)
4333 trace_consume(iter);
34cd4998 4334 rem -= count;
955b61e5 4335 if (!trace_find_next_entry_inc(iter)) {
34cd4998
SR
4336 rem = 0;
4337 iter->ent = NULL;
4338 break;
4339 }
4340 }
4341
4342 return rem;
4343}
4344
3c56819b
EGM
4345static ssize_t tracing_splice_read_pipe(struct file *filp,
4346 loff_t *ppos,
4347 struct pipe_inode_info *pipe,
4348 size_t len,
4349 unsigned int flags)
4350{
35f3d14d
JA
4351 struct page *pages_def[PIPE_DEF_BUFFERS];
4352 struct partial_page partial_def[PIPE_DEF_BUFFERS];
3c56819b
EGM
4353 struct trace_iterator *iter = filp->private_data;
4354 struct splice_pipe_desc spd = {
35f3d14d
JA
4355 .pages = pages_def,
4356 .partial = partial_def,
34cd4998 4357 .nr_pages = 0, /* This gets updated below. */
047fe360 4358 .nr_pages_max = PIPE_DEF_BUFFERS,
34cd4998
SR
4359 .flags = flags,
4360 .ops = &tracing_pipe_buf_ops,
4361 .spd_release = tracing_spd_release_pipe,
3c56819b 4362 };
2b6080f2 4363 struct trace_array *tr = iter->tr;
3c56819b 4364 ssize_t ret;
34cd4998 4365 size_t rem;
3c56819b
EGM
4366 unsigned int i;
4367
35f3d14d
JA
4368 if (splice_grow_spd(pipe, &spd))
4369 return -ENOMEM;
4370
d7350c3f 4371 /* copy the tracer to avoid using a global lock all around */
3c56819b 4372 mutex_lock(&trace_types_lock);
2b6080f2
SR
4373 if (unlikely(iter->trace->name != tr->current_trace->name))
4374 *iter->trace = *tr->current_trace;
d7350c3f
FW
4375 mutex_unlock(&trace_types_lock);
4376
4377 mutex_lock(&iter->mutex);
3c56819b
EGM
4378
4379 if (iter->trace->splice_read) {
4380 ret = iter->trace->splice_read(iter, filp,
4381 ppos, pipe, len, flags);
4382 if (ret)
34cd4998 4383 goto out_err;
3c56819b
EGM
4384 }
4385
4386 ret = tracing_wait_pipe(filp);
4387 if (ret <= 0)
34cd4998 4388 goto out_err;
3c56819b 4389
955b61e5 4390 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
3c56819b 4391 ret = -EFAULT;
34cd4998 4392 goto out_err;
3c56819b
EGM
4393 }
4394
4f535968 4395 trace_event_read_lock();
7e53bd42 4396 trace_access_lock(iter->cpu_file);
4f535968 4397
3c56819b 4398 /* Fill as many pages as possible. */
35f3d14d
JA
4399 for (i = 0, rem = len; i < pipe->buffers && rem; i++) {
4400 spd.pages[i] = alloc_page(GFP_KERNEL);
4401 if (!spd.pages[i])
34cd4998 4402 break;
3c56819b 4403
fa7c7f6e 4404 rem = tracing_fill_pipe_page(rem, iter);
3c56819b
EGM
4405
4406 /* Copy the data into the page, so we can start over. */
4407 ret = trace_seq_to_buffer(&iter->seq,
35f3d14d 4408 page_address(spd.pages[i]),
3c56819b
EGM
4409 iter->seq.len);
4410 if (ret < 0) {
35f3d14d 4411 __free_page(spd.pages[i]);
3c56819b
EGM
4412 break;
4413 }
35f3d14d
JA
4414 spd.partial[i].offset = 0;
4415 spd.partial[i].len = iter->seq.len;
3c56819b 4416
f9520750 4417 trace_seq_init(&iter->seq);
3c56819b
EGM
4418 }
4419
7e53bd42 4420 trace_access_unlock(iter->cpu_file);
4f535968 4421 trace_event_read_unlock();
d7350c3f 4422 mutex_unlock(&iter->mutex);
3c56819b
EGM
4423
4424 spd.nr_pages = i;
4425
35f3d14d
JA
4426 ret = splice_to_pipe(pipe, &spd);
4427out:
047fe360 4428 splice_shrink_spd(&spd);
35f3d14d 4429 return ret;
3c56819b 4430
34cd4998 4431out_err:
d7350c3f 4432 mutex_unlock(&iter->mutex);
35f3d14d 4433 goto out;
3c56819b
EGM
4434}
4435
a98a3c3f
SR
4436static ssize_t
4437tracing_entries_read(struct file *filp, char __user *ubuf,
4438 size_t cnt, loff_t *ppos)
4439{
0bc392ee
ON
4440 struct inode *inode = file_inode(filp);
4441 struct trace_array *tr = inode->i_private;
4442 int cpu = tracing_get_cpu(inode);
438ced17
VN
4443 char buf[64];
4444 int r = 0;
4445 ssize_t ret;
a98a3c3f 4446
db526ca3 4447 mutex_lock(&trace_types_lock);
438ced17 4448
0bc392ee 4449 if (cpu == RING_BUFFER_ALL_CPUS) {
438ced17
VN
4450 int cpu, buf_size_same;
4451 unsigned long size;
4452
4453 size = 0;
4454 buf_size_same = 1;
4455 /* check if all cpu sizes are same */
4456 for_each_tracing_cpu(cpu) {
4457 /* fill in the size from first enabled cpu */
4458 if (size == 0)
12883efb
SRRH
4459 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
4460 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
438ced17
VN
4461 buf_size_same = 0;
4462 break;
4463 }
4464 }
4465
4466 if (buf_size_same) {
4467 if (!ring_buffer_expanded)
4468 r = sprintf(buf, "%lu (expanded: %lu)\n",
4469 size >> 10,
4470 trace_buf_size >> 10);
4471 else
4472 r = sprintf(buf, "%lu\n", size >> 10);
4473 } else
4474 r = sprintf(buf, "X\n");
4475 } else
0bc392ee 4476 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
438ced17 4477
db526ca3
SR
4478 mutex_unlock(&trace_types_lock);
4479
438ced17
VN
4480 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4481 return ret;
a98a3c3f
SR
4482}
4483
4484static ssize_t
4485tracing_entries_write(struct file *filp, const char __user *ubuf,
4486 size_t cnt, loff_t *ppos)
4487{
0bc392ee
ON
4488 struct inode *inode = file_inode(filp);
4489 struct trace_array *tr = inode->i_private;
a98a3c3f 4490 unsigned long val;
4f271a2a 4491 int ret;
a98a3c3f 4492
22fe9b54
PH
4493 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4494 if (ret)
c6caeeb1 4495 return ret;
a98a3c3f
SR
4496
4497 /* must have at least 1 entry */
4498 if (!val)
4499 return -EINVAL;
4500
1696b2b0
SR
4501 /* value is in KB */
4502 val <<= 10;
0bc392ee 4503 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
4f271a2a
VN
4504 if (ret < 0)
4505 return ret;
a98a3c3f 4506
cf8517cf 4507 *ppos += cnt;
a98a3c3f 4508
4f271a2a
VN
4509 return cnt;
4510}
bf5e6519 4511
f81ab074
VN
4512static ssize_t
4513tracing_total_entries_read(struct file *filp, char __user *ubuf,
4514 size_t cnt, loff_t *ppos)
4515{
4516 struct trace_array *tr = filp->private_data;
4517 char buf[64];
4518 int r, cpu;
4519 unsigned long size = 0, expanded_size = 0;
4520
4521 mutex_lock(&trace_types_lock);
4522 for_each_tracing_cpu(cpu) {
12883efb 4523 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
f81ab074
VN
4524 if (!ring_buffer_expanded)
4525 expanded_size += trace_buf_size >> 10;
4526 }
4527 if (ring_buffer_expanded)
4528 r = sprintf(buf, "%lu\n", size);
4529 else
4530 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
4531 mutex_unlock(&trace_types_lock);
4532
4533 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4534}
4535
4f271a2a
VN
4536static ssize_t
4537tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
4538 size_t cnt, loff_t *ppos)
4539{
4540 /*
4541 * There is no need to read what the user has written, this function
4542 * is just to make sure that there is no error when "echo" is used
4543 */
4544
4545 *ppos += cnt;
a98a3c3f
SR
4546
4547 return cnt;
4548}
4549
4f271a2a
VN
4550static int
4551tracing_free_buffer_release(struct inode *inode, struct file *filp)
4552{
2b6080f2
SR
4553 struct trace_array *tr = inode->i_private;
4554
cf30cf67
SR
4555 /* disable tracing ? */
4556 if (trace_flags & TRACE_ITER_STOP_ON_FREE)
711e1243 4557 tracer_tracing_off(tr);
4f271a2a 4558 /* resize the ring buffer to 0 */
2b6080f2 4559 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
4f271a2a 4560
7b85af63
SRRH
4561 trace_array_put(tr);
4562
4f271a2a
VN
4563 return 0;
4564}
4565
5bf9a1ee
PP
4566static ssize_t
4567tracing_mark_write(struct file *filp, const char __user *ubuf,
4568 size_t cnt, loff_t *fpos)
4569{
d696b58c 4570 unsigned long addr = (unsigned long)ubuf;
2d71619c 4571 struct trace_array *tr = filp->private_data;
d696b58c
SR
4572 struct ring_buffer_event *event;
4573 struct ring_buffer *buffer;
4574 struct print_entry *entry;
4575 unsigned long irq_flags;
4576 struct page *pages[2];
6edb2a8a 4577 void *map_page[2];
d696b58c
SR
4578 int nr_pages = 1;
4579 ssize_t written;
d696b58c
SR
4580 int offset;
4581 int size;
4582 int len;
4583 int ret;
6edb2a8a 4584 int i;
5bf9a1ee 4585
c76f0694 4586 if (tracing_disabled)
5bf9a1ee
PP
4587 return -EINVAL;
4588
5224c3a3
MSB
4589 if (!(trace_flags & TRACE_ITER_MARKERS))
4590 return -EINVAL;
4591
5bf9a1ee
PP
4592 if (cnt > TRACE_BUF_SIZE)
4593 cnt = TRACE_BUF_SIZE;
4594
d696b58c
SR
4595 /*
4596 * Userspace is injecting traces into the kernel trace buffer.
4597 * We want to be as non intrusive as possible.
4598 * To do so, we do not want to allocate any special buffers
4599 * or take any locks, but instead write the userspace data
4600 * straight into the ring buffer.
4601 *
4602 * First we need to pin the userspace buffer into memory,
4603 * which, most likely it is, because it just referenced it.
4604 * But there's no guarantee that it is. By using get_user_pages_fast()
4605 * and kmap_atomic/kunmap_atomic() we can get access to the
4606 * pages directly. We then write the data directly into the
4607 * ring buffer.
4608 */
4609 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
5bf9a1ee 4610
d696b58c
SR
4611 /* check if we cross pages */
4612 if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
4613 nr_pages = 2;
4614
4615 offset = addr & (PAGE_SIZE - 1);
4616 addr &= PAGE_MASK;
4617
4618 ret = get_user_pages_fast(addr, nr_pages, 0, pages);
4619 if (ret < nr_pages) {
4620 while (--ret >= 0)
4621 put_page(pages[ret]);
4622 written = -EFAULT;
4623 goto out;
5bf9a1ee 4624 }
d696b58c 4625
6edb2a8a
SR
4626 for (i = 0; i < nr_pages; i++)
4627 map_page[i] = kmap_atomic(pages[i]);
d696b58c
SR
4628
4629 local_save_flags(irq_flags);
4630 size = sizeof(*entry) + cnt + 2; /* possible \n added */
2d71619c 4631 buffer = tr->trace_buffer.buffer;
d696b58c
SR
4632 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
4633 irq_flags, preempt_count());
4634 if (!event) {
4635 /* Ring buffer disabled, return as if not open for write */
4636 written = -EBADF;
4637 goto out_unlock;
5bf9a1ee 4638 }
d696b58c
SR
4639
4640 entry = ring_buffer_event_data(event);
4641 entry->ip = _THIS_IP_;
4642
4643 if (nr_pages == 2) {
4644 len = PAGE_SIZE - offset;
6edb2a8a
SR
4645 memcpy(&entry->buf, map_page[0] + offset, len);
4646 memcpy(&entry->buf[len], map_page[1], cnt - len);
c13d2f7c 4647 } else
6edb2a8a 4648 memcpy(&entry->buf, map_page[0] + offset, cnt);
5bf9a1ee 4649
d696b58c
SR
4650 if (entry->buf[cnt - 1] != '\n') {
4651 entry->buf[cnt] = '\n';
4652 entry->buf[cnt + 1] = '\0';
4653 } else
4654 entry->buf[cnt] = '\0';
4655
7ffbd48d 4656 __buffer_unlock_commit(buffer, event);
5bf9a1ee 4657
d696b58c 4658 written = cnt;
5bf9a1ee 4659
d696b58c 4660 *fpos += written;
1aa54bca 4661
d696b58c 4662 out_unlock:
6edb2a8a
SR
4663 for (i = 0; i < nr_pages; i++){
4664 kunmap_atomic(map_page[i]);
4665 put_page(pages[i]);
4666 }
d696b58c 4667 out:
1aa54bca 4668 return written;
5bf9a1ee
PP
4669}
4670
13f16d20 4671static int tracing_clock_show(struct seq_file *m, void *v)
5079f326 4672{
2b6080f2 4673 struct trace_array *tr = m->private;
5079f326
Z
4674 int i;
4675
4676 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
13f16d20 4677 seq_printf(m,
5079f326 4678 "%s%s%s%s", i ? " " : "",
2b6080f2
SR
4679 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
4680 i == tr->clock_id ? "]" : "");
13f16d20 4681 seq_putc(m, '\n');
5079f326 4682
13f16d20 4683 return 0;
5079f326
Z
4684}
4685
4686static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
4687 size_t cnt, loff_t *fpos)
4688{
2b6080f2
SR
4689 struct seq_file *m = filp->private_data;
4690 struct trace_array *tr = m->private;
5079f326
Z
4691 char buf[64];
4692 const char *clockstr;
4693 int i;
4694
4695 if (cnt >= sizeof(buf))
4696 return -EINVAL;
4697
4698 if (copy_from_user(&buf, ubuf, cnt))
4699 return -EFAULT;
4700
4701 buf[cnt] = 0;
4702
4703 clockstr = strstrip(buf);
4704
4705 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
4706 if (strcmp(trace_clocks[i].name, clockstr) == 0)
4707 break;
4708 }
4709 if (i == ARRAY_SIZE(trace_clocks))
4710 return -EINVAL;
4711
5079f326
Z
4712 mutex_lock(&trace_types_lock);
4713
2b6080f2
SR
4714 tr->clock_id = i;
4715
12883efb 4716 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
5079f326 4717
60303ed3
DS
4718 /*
4719 * New clock may not be consistent with the previous clock.
4720 * Reset the buffer so that it doesn't have incomparable timestamps.
4721 */
9457158b 4722 tracing_reset_online_cpus(&tr->trace_buffer);
12883efb
SRRH
4723
4724#ifdef CONFIG_TRACER_MAX_TRACE
4725 if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
4726 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
9457158b 4727 tracing_reset_online_cpus(&tr->max_buffer);
12883efb 4728#endif
60303ed3 4729
5079f326
Z
4730 mutex_unlock(&trace_types_lock);
4731
4732 *fpos += cnt;
4733
4734 return cnt;
4735}
4736
13f16d20
LZ
4737static int tracing_clock_open(struct inode *inode, struct file *file)
4738{
7b85af63
SRRH
4739 struct trace_array *tr = inode->i_private;
4740 int ret;
4741
13f16d20
LZ
4742 if (tracing_disabled)
4743 return -ENODEV;
2b6080f2 4744
7b85af63
SRRH
4745 if (trace_array_get(tr))
4746 return -ENODEV;
4747
4748 ret = single_open(file, tracing_clock_show, inode->i_private);
4749 if (ret < 0)
4750 trace_array_put(tr);
4751
4752 return ret;
13f16d20
LZ
4753}
4754
6de58e62
SRRH
4755struct ftrace_buffer_info {
4756 struct trace_iterator iter;
4757 void *spare;
4758 unsigned int read;
4759};
4760
debdd57f
HT
4761#ifdef CONFIG_TRACER_SNAPSHOT
4762static int tracing_snapshot_open(struct inode *inode, struct file *file)
4763{
6484c71c 4764 struct trace_array *tr = inode->i_private;
debdd57f 4765 struct trace_iterator *iter;
2b6080f2 4766 struct seq_file *m;
debdd57f
HT
4767 int ret = 0;
4768
ff451961
SRRH
4769 if (trace_array_get(tr) < 0)
4770 return -ENODEV;
4771
debdd57f 4772 if (file->f_mode & FMODE_READ) {
6484c71c 4773 iter = __tracing_open(inode, file, true);
debdd57f
HT
4774 if (IS_ERR(iter))
4775 ret = PTR_ERR(iter);
2b6080f2
SR
4776 } else {
4777 /* Writes still need the seq_file to hold the private data */
f77d09a3 4778 ret = -ENOMEM;
2b6080f2
SR
4779 m = kzalloc(sizeof(*m), GFP_KERNEL);
4780 if (!m)
f77d09a3 4781 goto out;
2b6080f2
SR
4782 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
4783 if (!iter) {
4784 kfree(m);
f77d09a3 4785 goto out;
2b6080f2 4786 }
f77d09a3
AL
4787 ret = 0;
4788
ff451961 4789 iter->tr = tr;
6484c71c
ON
4790 iter->trace_buffer = &tr->max_buffer;
4791 iter->cpu_file = tracing_get_cpu(inode);
2b6080f2
SR
4792 m->private = iter;
4793 file->private_data = m;
debdd57f 4794 }
f77d09a3 4795out:
ff451961
SRRH
4796 if (ret < 0)
4797 trace_array_put(tr);
4798
debdd57f
HT
4799 return ret;
4800}
4801
4802static ssize_t
4803tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
4804 loff_t *ppos)
4805{
2b6080f2
SR
4806 struct seq_file *m = filp->private_data;
4807 struct trace_iterator *iter = m->private;
4808 struct trace_array *tr = iter->tr;
debdd57f
HT
4809 unsigned long val;
4810 int ret;
4811
4812 ret = tracing_update_buffers();
4813 if (ret < 0)
4814 return ret;
4815
4816 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4817 if (ret)
4818 return ret;
4819
4820 mutex_lock(&trace_types_lock);
4821
2b6080f2 4822 if (tr->current_trace->use_max_tr) {
debdd57f
HT
4823 ret = -EBUSY;
4824 goto out;
4825 }
4826
4827 switch (val) {
4828 case 0:
f1affcaa
SRRH
4829 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4830 ret = -EINVAL;
4831 break;
debdd57f 4832 }
3209cff4
SRRH
4833 if (tr->allocated_snapshot)
4834 free_snapshot(tr);
debdd57f
HT
4835 break;
4836 case 1:
f1affcaa
SRRH
4837/* Only allow per-cpu swap if the ring buffer supports it */
4838#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
4839 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4840 ret = -EINVAL;
4841 break;
4842 }
4843#endif
45ad21ca 4844 if (!tr->allocated_snapshot) {
3209cff4 4845 ret = alloc_snapshot(tr);
debdd57f
HT
4846 if (ret < 0)
4847 break;
debdd57f 4848 }
debdd57f
HT
4849 local_irq_disable();
4850 /* Now, we're going to swap */
f1affcaa 4851 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
ce9bae55 4852 update_max_tr(tr, current, smp_processor_id());
f1affcaa 4853 else
ce9bae55 4854 update_max_tr_single(tr, current, iter->cpu_file);
debdd57f
HT
4855 local_irq_enable();
4856 break;
4857 default:
45ad21ca 4858 if (tr->allocated_snapshot) {
f1affcaa
SRRH
4859 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4860 tracing_reset_online_cpus(&tr->max_buffer);
4861 else
4862 tracing_reset(&tr->max_buffer, iter->cpu_file);
4863 }
debdd57f
HT
4864 break;
4865 }
4866
4867 if (ret >= 0) {
4868 *ppos += cnt;
4869 ret = cnt;
4870 }
4871out:
4872 mutex_unlock(&trace_types_lock);
4873 return ret;
4874}
2b6080f2
SR
4875
4876static int tracing_snapshot_release(struct inode *inode, struct file *file)
4877{
4878 struct seq_file *m = file->private_data;
ff451961
SRRH
4879 int ret;
4880
4881 ret = tracing_release(inode, file);
2b6080f2
SR
4882
4883 if (file->f_mode & FMODE_READ)
ff451961 4884 return ret;
2b6080f2
SR
4885
4886 /* If write only, the seq_file is just a stub */
4887 if (m)
4888 kfree(m->private);
4889 kfree(m);
4890
4891 return 0;
4892}
4893
6de58e62
SRRH
4894static int tracing_buffers_open(struct inode *inode, struct file *filp);
4895static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
4896 size_t count, loff_t *ppos);
4897static int tracing_buffers_release(struct inode *inode, struct file *file);
4898static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
4899 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
4900
4901static int snapshot_raw_open(struct inode *inode, struct file *filp)
4902{
4903 struct ftrace_buffer_info *info;
4904 int ret;
4905
4906 ret = tracing_buffers_open(inode, filp);
4907 if (ret < 0)
4908 return ret;
4909
4910 info = filp->private_data;
4911
4912 if (info->iter.trace->use_max_tr) {
4913 tracing_buffers_release(inode, filp);
4914 return -EBUSY;
4915 }
4916
4917 info->iter.snapshot = true;
4918 info->iter.trace_buffer = &info->iter.tr->max_buffer;
4919
4920 return ret;
4921}
4922
debdd57f
HT
4923#endif /* CONFIG_TRACER_SNAPSHOT */
4924
4925
5e2336a0 4926static const struct file_operations tracing_max_lat_fops = {
4bf39a94
IM
4927 .open = tracing_open_generic,
4928 .read = tracing_max_lat_read,
4929 .write = tracing_max_lat_write,
b444786f 4930 .llseek = generic_file_llseek,
bc0c38d1
SR
4931};
4932
5e2336a0 4933static const struct file_operations set_tracer_fops = {
4bf39a94
IM
4934 .open = tracing_open_generic,
4935 .read = tracing_set_trace_read,
4936 .write = tracing_set_trace_write,
b444786f 4937 .llseek = generic_file_llseek,
bc0c38d1
SR
4938};
4939
5e2336a0 4940static const struct file_operations tracing_pipe_fops = {
4bf39a94 4941 .open = tracing_open_pipe,
2a2cc8f7 4942 .poll = tracing_poll_pipe,
4bf39a94 4943 .read = tracing_read_pipe,
3c56819b 4944 .splice_read = tracing_splice_read_pipe,
4bf39a94 4945 .release = tracing_release_pipe,
b444786f 4946 .llseek = no_llseek,
b3806b43
SR
4947};
4948
5e2336a0 4949static const struct file_operations tracing_entries_fops = {
0bc392ee 4950 .open = tracing_open_generic_tr,
a98a3c3f
SR
4951 .read = tracing_entries_read,
4952 .write = tracing_entries_write,
b444786f 4953 .llseek = generic_file_llseek,
0bc392ee 4954 .release = tracing_release_generic_tr,
a98a3c3f
SR
4955};
4956
f81ab074 4957static const struct file_operations tracing_total_entries_fops = {
7b85af63 4958 .open = tracing_open_generic_tr,
f81ab074
VN
4959 .read = tracing_total_entries_read,
4960 .llseek = generic_file_llseek,
7b85af63 4961 .release = tracing_release_generic_tr,
f81ab074
VN
4962};
4963
4f271a2a 4964static const struct file_operations tracing_free_buffer_fops = {
7b85af63 4965 .open = tracing_open_generic_tr,
4f271a2a
VN
4966 .write = tracing_free_buffer_write,
4967 .release = tracing_free_buffer_release,
4968};
4969
5e2336a0 4970static const struct file_operations tracing_mark_fops = {
7b85af63 4971 .open = tracing_open_generic_tr,
5bf9a1ee 4972 .write = tracing_mark_write,
b444786f 4973 .llseek = generic_file_llseek,
7b85af63 4974 .release = tracing_release_generic_tr,
5bf9a1ee
PP
4975};
4976
5079f326 4977static const struct file_operations trace_clock_fops = {
13f16d20
LZ
4978 .open = tracing_clock_open,
4979 .read = seq_read,
4980 .llseek = seq_lseek,
7b85af63 4981 .release = tracing_single_release_tr,
5079f326
Z
4982 .write = tracing_clock_write,
4983};
4984
debdd57f
HT
4985#ifdef CONFIG_TRACER_SNAPSHOT
4986static const struct file_operations snapshot_fops = {
4987 .open = tracing_snapshot_open,
4988 .read = seq_read,
4989 .write = tracing_snapshot_write,
098c879e 4990 .llseek = tracing_lseek,
2b6080f2 4991 .release = tracing_snapshot_release,
debdd57f 4992};
debdd57f 4993
6de58e62
SRRH
4994static const struct file_operations snapshot_raw_fops = {
4995 .open = snapshot_raw_open,
4996 .read = tracing_buffers_read,
4997 .release = tracing_buffers_release,
4998 .splice_read = tracing_buffers_splice_read,
4999 .llseek = no_llseek,
2cadf913
SR
5000};
5001
6de58e62
SRRH
5002#endif /* CONFIG_TRACER_SNAPSHOT */
5003
2cadf913
SR
5004static int tracing_buffers_open(struct inode *inode, struct file *filp)
5005{
46ef2be0 5006 struct trace_array *tr = inode->i_private;
2cadf913 5007 struct ftrace_buffer_info *info;
7b85af63 5008 int ret;
2cadf913
SR
5009
5010 if (tracing_disabled)
5011 return -ENODEV;
5012
7b85af63
SRRH
5013 if (trace_array_get(tr) < 0)
5014 return -ENODEV;
5015
2cadf913 5016 info = kzalloc(sizeof(*info), GFP_KERNEL);
7b85af63
SRRH
5017 if (!info) {
5018 trace_array_put(tr);
2cadf913 5019 return -ENOMEM;
7b85af63 5020 }
2cadf913 5021
a695cb58
SRRH
5022 mutex_lock(&trace_types_lock);
5023
cc60cdc9 5024 info->iter.tr = tr;
46ef2be0 5025 info->iter.cpu_file = tracing_get_cpu(inode);
b627344f 5026 info->iter.trace = tr->current_trace;
12883efb 5027 info->iter.trace_buffer = &tr->trace_buffer;
cc60cdc9 5028 info->spare = NULL;
2cadf913 5029 /* Force reading ring buffer for first read */
cc60cdc9 5030 info->read = (unsigned int)-1;
2cadf913
SR
5031
5032 filp->private_data = info;
5033
a695cb58
SRRH
5034 mutex_unlock(&trace_types_lock);
5035
7b85af63
SRRH
5036 ret = nonseekable_open(inode, filp);
5037 if (ret < 0)
5038 trace_array_put(tr);
5039
5040 return ret;
2cadf913
SR
5041}
5042
cc60cdc9
SR
5043static unsigned int
5044tracing_buffers_poll(struct file *filp, poll_table *poll_table)
5045{
5046 struct ftrace_buffer_info *info = filp->private_data;
5047 struct trace_iterator *iter = &info->iter;
5048
5049 return trace_poll(iter, filp, poll_table);
5050}
5051
2cadf913
SR
5052static ssize_t
5053tracing_buffers_read(struct file *filp, char __user *ubuf,
5054 size_t count, loff_t *ppos)
5055{
5056 struct ftrace_buffer_info *info = filp->private_data;
cc60cdc9 5057 struct trace_iterator *iter = &info->iter;
2cadf913 5058 ssize_t ret;
6de58e62 5059 ssize_t size;
2cadf913 5060
2dc5d12b
SR
5061 if (!count)
5062 return 0;
5063
6de58e62
SRRH
5064 mutex_lock(&trace_types_lock);
5065
5066#ifdef CONFIG_TRACER_MAX_TRACE
5067 if (iter->snapshot && iter->tr->current_trace->use_max_tr) {
5068 size = -EBUSY;
5069 goto out_unlock;
5070 }
5071#endif
5072
ddd538f3 5073 if (!info->spare)
12883efb
SRRH
5074 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
5075 iter->cpu_file);
6de58e62 5076 size = -ENOMEM;
ddd538f3 5077 if (!info->spare)
6de58e62 5078 goto out_unlock;
ddd538f3 5079
2cadf913
SR
5080 /* Do we have previous read data to read? */
5081 if (info->read < PAGE_SIZE)
5082 goto read;
5083
b627344f 5084 again:
cc60cdc9 5085 trace_access_lock(iter->cpu_file);
12883efb 5086 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
2cadf913
SR
5087 &info->spare,
5088 count,
cc60cdc9
SR
5089 iter->cpu_file, 0);
5090 trace_access_unlock(iter->cpu_file);
2cadf913 5091
b627344f
SR
5092 if (ret < 0) {
5093 if (trace_empty(iter)) {
6de58e62
SRRH
5094 if ((filp->f_flags & O_NONBLOCK)) {
5095 size = -EAGAIN;
5096 goto out_unlock;
5097 }
5098 mutex_unlock(&trace_types_lock);
b627344f 5099 iter->trace->wait_pipe(iter);
6de58e62
SRRH
5100 mutex_lock(&trace_types_lock);
5101 if (signal_pending(current)) {
5102 size = -EINTR;
5103 goto out_unlock;
5104 }
b627344f
SR
5105 goto again;
5106 }
6de58e62
SRRH
5107 size = 0;
5108 goto out_unlock;
b627344f 5109 }
436fc280 5110
436fc280 5111 info->read = 0;
b627344f 5112 read:
2cadf913
SR
5113 size = PAGE_SIZE - info->read;
5114 if (size > count)
5115 size = count;
5116
5117 ret = copy_to_user(ubuf, info->spare + info->read, size);
6de58e62
SRRH
5118 if (ret == size) {
5119 size = -EFAULT;
5120 goto out_unlock;
5121 }
2dc5d12b
SR
5122 size -= ret;
5123
2cadf913
SR
5124 *ppos += size;
5125 info->read += size;
5126
6de58e62
SRRH
5127 out_unlock:
5128 mutex_unlock(&trace_types_lock);
5129
2cadf913
SR
5130 return size;
5131}
5132
5133static int tracing_buffers_release(struct inode *inode, struct file *file)
5134{
5135 struct ftrace_buffer_info *info = file->private_data;
cc60cdc9 5136 struct trace_iterator *iter = &info->iter;
2cadf913 5137
a695cb58
SRRH
5138 mutex_lock(&trace_types_lock);
5139
ff451961 5140 __trace_array_put(iter->tr);
2cadf913 5141
ddd538f3 5142 if (info->spare)
12883efb 5143 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
2cadf913
SR
5144 kfree(info);
5145
a695cb58
SRRH
5146 mutex_unlock(&trace_types_lock);
5147
2cadf913
SR
5148 return 0;
5149}
5150
5151struct buffer_ref {
5152 struct ring_buffer *buffer;
5153 void *page;
5154 int ref;
5155};
5156
5157static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
5158 struct pipe_buffer *buf)
5159{
5160 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5161
5162 if (--ref->ref)
5163 return;
5164
5165 ring_buffer_free_read_page(ref->buffer, ref->page);
5166 kfree(ref);
5167 buf->private = 0;
5168}
5169
2cadf913
SR
5170static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
5171 struct pipe_buffer *buf)
5172{
5173 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5174
5175 ref->ref++;
5176}
5177
5178/* Pipe buffer operations for a buffer. */
28dfef8f 5179static const struct pipe_buf_operations buffer_pipe_buf_ops = {
2cadf913
SR
5180 .can_merge = 0,
5181 .map = generic_pipe_buf_map,
5182 .unmap = generic_pipe_buf_unmap,
5183 .confirm = generic_pipe_buf_confirm,
5184 .release = buffer_pipe_buf_release,
d55cb6cf 5185 .steal = generic_pipe_buf_steal,
2cadf913
SR
5186 .get = buffer_pipe_buf_get,
5187};
5188
5189/*
5190 * Callback from splice_to_pipe(), if we need to release some pages
5191 * at the end of the spd in case we error'ed out in filling the pipe.
5192 */
5193static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
5194{
5195 struct buffer_ref *ref =
5196 (struct buffer_ref *)spd->partial[i].private;
5197
5198 if (--ref->ref)
5199 return;
5200
5201 ring_buffer_free_read_page(ref->buffer, ref->page);
5202 kfree(ref);
5203 spd->partial[i].private = 0;
5204}
5205
5206static ssize_t
5207tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5208 struct pipe_inode_info *pipe, size_t len,
5209 unsigned int flags)
5210{
5211 struct ftrace_buffer_info *info = file->private_data;
cc60cdc9 5212 struct trace_iterator *iter = &info->iter;
35f3d14d
JA
5213 struct partial_page partial_def[PIPE_DEF_BUFFERS];
5214 struct page *pages_def[PIPE_DEF_BUFFERS];
2cadf913 5215 struct splice_pipe_desc spd = {
35f3d14d
JA
5216 .pages = pages_def,
5217 .partial = partial_def,
047fe360 5218 .nr_pages_max = PIPE_DEF_BUFFERS,
2cadf913
SR
5219 .flags = flags,
5220 .ops = &buffer_pipe_buf_ops,
5221 .spd_release = buffer_spd_release,
5222 };
5223 struct buffer_ref *ref;
93459c6c 5224 int entries, size, i;
6de58e62 5225 ssize_t ret;
2cadf913 5226
6de58e62
SRRH
5227 mutex_lock(&trace_types_lock);
5228
5229#ifdef CONFIG_TRACER_MAX_TRACE
5230 if (iter->snapshot && iter->tr->current_trace->use_max_tr) {
5231 ret = -EBUSY;
5232 goto out;
5233 }
5234#endif
5235
5236 if (splice_grow_spd(pipe, &spd)) {
5237 ret = -ENOMEM;
5238 goto out;
5239 }
35f3d14d 5240
93cfb3c9 5241 if (*ppos & (PAGE_SIZE - 1)) {
35f3d14d
JA
5242 ret = -EINVAL;
5243 goto out;
93cfb3c9
LJ
5244 }
5245
5246 if (len & (PAGE_SIZE - 1)) {
35f3d14d
JA
5247 if (len < PAGE_SIZE) {
5248 ret = -EINVAL;
5249 goto out;
5250 }
93cfb3c9
LJ
5251 len &= PAGE_MASK;
5252 }
5253
cc60cdc9
SR
5254 again:
5255 trace_access_lock(iter->cpu_file);
12883efb 5256 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
93459c6c 5257
35f3d14d 5258 for (i = 0; i < pipe->buffers && len && entries; i++, len -= PAGE_SIZE) {
2cadf913
SR
5259 struct page *page;
5260 int r;
5261
5262 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
5263 if (!ref)
5264 break;
5265
7267fa68 5266 ref->ref = 1;
12883efb 5267 ref->buffer = iter->trace_buffer->buffer;
cc60cdc9 5268 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
2cadf913
SR
5269 if (!ref->page) {
5270 kfree(ref);
5271 break;
5272 }
5273
5274 r = ring_buffer_read_page(ref->buffer, &ref->page,
cc60cdc9 5275 len, iter->cpu_file, 1);
2cadf913 5276 if (r < 0) {
7ea59064 5277 ring_buffer_free_read_page(ref->buffer, ref->page);
2cadf913
SR
5278 kfree(ref);
5279 break;
5280 }
5281
5282 /*
5283 * zero out any left over data, this is going to
5284 * user land.
5285 */
5286 size = ring_buffer_page_len(ref->page);
5287 if (size < PAGE_SIZE)
5288 memset(ref->page + size, 0, PAGE_SIZE - size);
5289
5290 page = virt_to_page(ref->page);
5291
5292 spd.pages[i] = page;
5293 spd.partial[i].len = PAGE_SIZE;
5294 spd.partial[i].offset = 0;
5295 spd.partial[i].private = (unsigned long)ref;
5296 spd.nr_pages++;
93cfb3c9 5297 *ppos += PAGE_SIZE;
93459c6c 5298
12883efb 5299 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
2cadf913
SR
5300 }
5301
cc60cdc9 5302 trace_access_unlock(iter->cpu_file);
2cadf913
SR
5303 spd.nr_pages = i;
5304
5305 /* did we read anything? */
5306 if (!spd.nr_pages) {
cc60cdc9 5307 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) {
2cadf913 5308 ret = -EAGAIN;
cc60cdc9
SR
5309 goto out;
5310 }
6de58e62 5311 mutex_unlock(&trace_types_lock);
b627344f 5312 iter->trace->wait_pipe(iter);
6de58e62 5313 mutex_lock(&trace_types_lock);
cc60cdc9
SR
5314 if (signal_pending(current)) {
5315 ret = -EINTR;
5316 goto out;
5317 }
5318 goto again;
2cadf913
SR
5319 }
5320
5321 ret = splice_to_pipe(pipe, &spd);
047fe360 5322 splice_shrink_spd(&spd);
35f3d14d 5323out:
6de58e62
SRRH
5324 mutex_unlock(&trace_types_lock);
5325
2cadf913
SR
5326 return ret;
5327}
5328
5329static const struct file_operations tracing_buffers_fops = {
5330 .open = tracing_buffers_open,
5331 .read = tracing_buffers_read,
cc60cdc9 5332 .poll = tracing_buffers_poll,
2cadf913
SR
5333 .release = tracing_buffers_release,
5334 .splice_read = tracing_buffers_splice_read,
5335 .llseek = no_llseek,
5336};
5337
c8d77183
SR
5338static ssize_t
5339tracing_stats_read(struct file *filp, char __user *ubuf,
5340 size_t count, loff_t *ppos)
5341{
4d3435b8
ON
5342 struct inode *inode = file_inode(filp);
5343 struct trace_array *tr = inode->i_private;
12883efb 5344 struct trace_buffer *trace_buf = &tr->trace_buffer;
4d3435b8 5345 int cpu = tracing_get_cpu(inode);
c8d77183
SR
5346 struct trace_seq *s;
5347 unsigned long cnt;
c64e148a
VN
5348 unsigned long long t;
5349 unsigned long usec_rem;
c8d77183 5350
e4f2d10f 5351 s = kmalloc(sizeof(*s), GFP_KERNEL);
c8d77183 5352 if (!s)
a646365c 5353 return -ENOMEM;
c8d77183
SR
5354
5355 trace_seq_init(s);
5356
12883efb 5357 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
c8d77183
SR
5358 trace_seq_printf(s, "entries: %ld\n", cnt);
5359
12883efb 5360 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
c8d77183
SR
5361 trace_seq_printf(s, "overrun: %ld\n", cnt);
5362
12883efb 5363 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
c8d77183
SR
5364 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
5365
12883efb 5366 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
c64e148a
VN
5367 trace_seq_printf(s, "bytes: %ld\n", cnt);
5368
58e8eedf 5369 if (trace_clocks[tr->clock_id].in_ns) {
11043d8b 5370 /* local or global for trace_clock */
12883efb 5371 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
11043d8b
YY
5372 usec_rem = do_div(t, USEC_PER_SEC);
5373 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
5374 t, usec_rem);
5375
12883efb 5376 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
11043d8b
YY
5377 usec_rem = do_div(t, USEC_PER_SEC);
5378 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
5379 } else {
5380 /* counter or tsc mode for trace_clock */
5381 trace_seq_printf(s, "oldest event ts: %llu\n",
12883efb 5382 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
c64e148a 5383
11043d8b 5384 trace_seq_printf(s, "now ts: %llu\n",
12883efb 5385 ring_buffer_time_stamp(trace_buf->buffer, cpu));
11043d8b 5386 }
c64e148a 5387
12883efb 5388 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
884bfe89
SP
5389 trace_seq_printf(s, "dropped events: %ld\n", cnt);
5390
12883efb 5391 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
ad964704
SRRH
5392 trace_seq_printf(s, "read events: %ld\n", cnt);
5393
c8d77183
SR
5394 count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len);
5395
5396 kfree(s);
5397
5398 return count;
5399}
5400
5401static const struct file_operations tracing_stats_fops = {
4d3435b8 5402 .open = tracing_open_generic_tr,
c8d77183 5403 .read = tracing_stats_read,
b444786f 5404 .llseek = generic_file_llseek,
4d3435b8 5405 .release = tracing_release_generic_tr,
c8d77183
SR
5406};
5407
bc0c38d1
SR
5408#ifdef CONFIG_DYNAMIC_FTRACE
5409
b807c3d0
SR
5410int __weak ftrace_arch_read_dyn_info(char *buf, int size)
5411{
5412 return 0;
5413}
5414
bc0c38d1 5415static ssize_t
b807c3d0 5416tracing_read_dyn_info(struct file *filp, char __user *ubuf,
bc0c38d1
SR
5417 size_t cnt, loff_t *ppos)
5418{
a26a2a27
SR
5419 static char ftrace_dyn_info_buffer[1024];
5420 static DEFINE_MUTEX(dyn_info_mutex);
bc0c38d1 5421 unsigned long *p = filp->private_data;
b807c3d0 5422 char *buf = ftrace_dyn_info_buffer;
a26a2a27 5423 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
bc0c38d1
SR
5424 int r;
5425
b807c3d0
SR
5426 mutex_lock(&dyn_info_mutex);
5427 r = sprintf(buf, "%ld ", *p);
4bf39a94 5428
a26a2a27 5429 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
b807c3d0
SR
5430 buf[r++] = '\n';
5431
5432 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5433
5434 mutex_unlock(&dyn_info_mutex);
5435
5436 return r;
bc0c38d1
SR
5437}
5438
5e2336a0 5439static const struct file_operations tracing_dyn_info_fops = {
4bf39a94 5440 .open = tracing_open_generic,
b807c3d0 5441 .read = tracing_read_dyn_info,
b444786f 5442 .llseek = generic_file_llseek,
bc0c38d1 5443};
77fd5c15 5444#endif /* CONFIG_DYNAMIC_FTRACE */
bc0c38d1 5445
77fd5c15
SRRH
5446#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
5447static void
5448ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5449{
5450 tracing_snapshot();
5451}
bc0c38d1 5452
77fd5c15
SRRH
5453static void
5454ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
bc0c38d1 5455{
77fd5c15
SRRH
5456 unsigned long *count = (long *)data;
5457
5458 if (!*count)
5459 return;
bc0c38d1 5460
77fd5c15
SRRH
5461 if (*count != -1)
5462 (*count)--;
5463
5464 tracing_snapshot();
5465}
5466
5467static int
5468ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
5469 struct ftrace_probe_ops *ops, void *data)
5470{
5471 long count = (long)data;
5472
5473 seq_printf(m, "%ps:", (void *)ip);
5474
5475 seq_printf(m, "snapshot");
5476
5477 if (count == -1)
5478 seq_printf(m, ":unlimited\n");
5479 else
5480 seq_printf(m, ":count=%ld\n", count);
5481
5482 return 0;
5483}
5484
5485static struct ftrace_probe_ops snapshot_probe_ops = {
5486 .func = ftrace_snapshot,
5487 .print = ftrace_snapshot_print,
5488};
5489
5490static struct ftrace_probe_ops snapshot_count_probe_ops = {
5491 .func = ftrace_count_snapshot,
5492 .print = ftrace_snapshot_print,
5493};
5494
5495static int
5496ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
5497 char *glob, char *cmd, char *param, int enable)
5498{
5499 struct ftrace_probe_ops *ops;
5500 void *count = (void *)-1;
5501 char *number;
5502 int ret;
5503
5504 /* hash funcs only work with set_ftrace_filter */
5505 if (!enable)
5506 return -EINVAL;
5507
5508 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
5509
5510 if (glob[0] == '!') {
5511 unregister_ftrace_function_probe_func(glob+1, ops);
5512 return 0;
5513 }
5514
5515 if (!param)
5516 goto out_reg;
5517
5518 number = strsep(&param, ":");
5519
5520 if (!strlen(number))
5521 goto out_reg;
5522
5523 /*
5524 * We use the callback data field (which is a pointer)
5525 * as our counter.
5526 */
5527 ret = kstrtoul(number, 0, (unsigned long *)&count);
5528 if (ret)
5529 return ret;
5530
5531 out_reg:
5532 ret = register_ftrace_function_probe(glob, ops, count);
5533
5534 if (ret >= 0)
5535 alloc_snapshot(&global_trace);
5536
5537 return ret < 0 ? ret : 0;
5538}
5539
5540static struct ftrace_func_command ftrace_snapshot_cmd = {
5541 .name = "snapshot",
5542 .func = ftrace_trace_snapshot_callback,
5543};
5544
38de93ab 5545static __init int register_snapshot_cmd(void)
77fd5c15
SRRH
5546{
5547 return register_ftrace_command(&ftrace_snapshot_cmd);
5548}
5549#else
38de93ab 5550static inline __init int register_snapshot_cmd(void) { return 0; }
77fd5c15 5551#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
bc0c38d1 5552
2b6080f2 5553struct dentry *tracing_init_dentry_tr(struct trace_array *tr)
bc0c38d1 5554{
2b6080f2
SR
5555 if (tr->dir)
5556 return tr->dir;
bc0c38d1 5557
3e1f60b8
FW
5558 if (!debugfs_initialized())
5559 return NULL;
5560
2b6080f2
SR
5561 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
5562 tr->dir = debugfs_create_dir("tracing", NULL);
bc0c38d1 5563
687c878a
J
5564 if (!tr->dir)
5565 pr_warn_once("Could not create debugfs directory 'tracing'\n");
bc0c38d1 5566
2b6080f2 5567 return tr->dir;
bc0c38d1
SR
5568}
5569
2b6080f2
SR
5570struct dentry *tracing_init_dentry(void)
5571{
5572 return tracing_init_dentry_tr(&global_trace);
5573}
b04cc6b1 5574
2b6080f2 5575static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
b04cc6b1 5576{
b04cc6b1
FW
5577 struct dentry *d_tracer;
5578
2b6080f2
SR
5579 if (tr->percpu_dir)
5580 return tr->percpu_dir;
b04cc6b1 5581
2b6080f2 5582 d_tracer = tracing_init_dentry_tr(tr);
b04cc6b1
FW
5583 if (!d_tracer)
5584 return NULL;
5585
2b6080f2 5586 tr->percpu_dir = debugfs_create_dir("per_cpu", d_tracer);
b04cc6b1 5587
2b6080f2
SR
5588 WARN_ONCE(!tr->percpu_dir,
5589 "Could not create debugfs directory 'per_cpu/%d'\n", cpu);
b04cc6b1 5590
2b6080f2 5591 return tr->percpu_dir;
b04cc6b1
FW
5592}
5593
649e9c70
ON
5594static struct dentry *
5595trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
5596 void *data, long cpu, const struct file_operations *fops)
5597{
5598 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
5599
5600 if (ret) /* See tracing_get_cpu() */
5601 ret->d_inode->i_cdev = (void *)(cpu + 1);
5602 return ret;
5603}
5604
2b6080f2
SR
5605static void
5606tracing_init_debugfs_percpu(struct trace_array *tr, long cpu)
b04cc6b1 5607{
2b6080f2 5608 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
5452af66 5609 struct dentry *d_cpu;
dd49a38c 5610 char cpu_dir[30]; /* 30 characters should be more than enough */
b04cc6b1 5611
0a3d7ce7
NK
5612 if (!d_percpu)
5613 return;
5614
dd49a38c 5615 snprintf(cpu_dir, 30, "cpu%ld", cpu);
8656e7a2
FW
5616 d_cpu = debugfs_create_dir(cpu_dir, d_percpu);
5617 if (!d_cpu) {
5618 pr_warning("Could not create debugfs '%s' entry\n", cpu_dir);
5619 return;
5620 }
b04cc6b1 5621
8656e7a2 5622 /* per cpu trace_pipe */
649e9c70 5623 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
15544209 5624 tr, cpu, &tracing_pipe_fops);
b04cc6b1
FW
5625
5626 /* per cpu trace */
649e9c70 5627 trace_create_cpu_file("trace", 0644, d_cpu,
6484c71c 5628 tr, cpu, &tracing_fops);
7f96f93f 5629
649e9c70 5630 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
46ef2be0 5631 tr, cpu, &tracing_buffers_fops);
7f96f93f 5632
649e9c70 5633 trace_create_cpu_file("stats", 0444, d_cpu,
4d3435b8 5634 tr, cpu, &tracing_stats_fops);
438ced17 5635
649e9c70 5636 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
0bc392ee 5637 tr, cpu, &tracing_entries_fops);
f1affcaa
SRRH
5638
5639#ifdef CONFIG_TRACER_SNAPSHOT
649e9c70 5640 trace_create_cpu_file("snapshot", 0644, d_cpu,
6484c71c 5641 tr, cpu, &snapshot_fops);
6de58e62 5642
649e9c70 5643 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
46ef2be0 5644 tr, cpu, &snapshot_raw_fops);
f1affcaa 5645#endif
b04cc6b1
FW
5646}
5647
60a11774
SR
5648#ifdef CONFIG_FTRACE_SELFTEST
5649/* Let selftest have access to static functions in this file */
5650#include "trace_selftest.c"
5651#endif
5652
577b785f
SR
5653struct trace_option_dentry {
5654 struct tracer_opt *opt;
5655 struct tracer_flags *flags;
2b6080f2 5656 struct trace_array *tr;
577b785f
SR
5657 struct dentry *entry;
5658};
5659
5660static ssize_t
5661trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
5662 loff_t *ppos)
5663{
5664 struct trace_option_dentry *topt = filp->private_data;
5665 char *buf;
5666
5667 if (topt->flags->val & topt->opt->bit)
5668 buf = "1\n";
5669 else
5670 buf = "0\n";
5671
5672 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
5673}
5674
5675static ssize_t
5676trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
5677 loff_t *ppos)
5678{
5679 struct trace_option_dentry *topt = filp->private_data;
5680 unsigned long val;
577b785f
SR
5681 int ret;
5682
22fe9b54
PH
5683 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5684 if (ret)
577b785f
SR
5685 return ret;
5686
8d18eaaf
LZ
5687 if (val != 0 && val != 1)
5688 return -EINVAL;
577b785f 5689
8d18eaaf 5690 if (!!(topt->flags->val & topt->opt->bit) != val) {
577b785f 5691 mutex_lock(&trace_types_lock);
2b6080f2 5692 ret = __set_tracer_option(topt->tr->current_trace, topt->flags,
c757bea9 5693 topt->opt, !val);
577b785f
SR
5694 mutex_unlock(&trace_types_lock);
5695 if (ret)
5696 return ret;
577b785f
SR
5697 }
5698
5699 *ppos += cnt;
5700
5701 return cnt;
5702}
5703
5704
5705static const struct file_operations trace_options_fops = {
5706 .open = tracing_open_generic,
5707 .read = trace_options_read,
5708 .write = trace_options_write,
b444786f 5709 .llseek = generic_file_llseek,
577b785f
SR
5710};
5711
a8259075
SR
5712static ssize_t
5713trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
5714 loff_t *ppos)
5715{
5716 long index = (long)filp->private_data;
5717 char *buf;
5718
5719 if (trace_flags & (1 << index))
5720 buf = "1\n";
5721 else
5722 buf = "0\n";
5723
5724 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
5725}
5726
5727static ssize_t
5728trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
5729 loff_t *ppos)
5730{
2b6080f2 5731 struct trace_array *tr = &global_trace;
a8259075 5732 long index = (long)filp->private_data;
a8259075
SR
5733 unsigned long val;
5734 int ret;
5735
22fe9b54
PH
5736 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5737 if (ret)
a8259075
SR
5738 return ret;
5739
f2d84b65 5740 if (val != 0 && val != 1)
a8259075 5741 return -EINVAL;
69d34da2
SRRH
5742
5743 mutex_lock(&trace_types_lock);
2b6080f2 5744 ret = set_tracer_flag(tr, 1 << index, val);
69d34da2 5745 mutex_unlock(&trace_types_lock);
a8259075 5746
613f04a0
SRRH
5747 if (ret < 0)
5748 return ret;
5749
a8259075
SR
5750 *ppos += cnt;
5751
5752 return cnt;
5753}
5754
a8259075
SR
5755static const struct file_operations trace_options_core_fops = {
5756 .open = tracing_open_generic,
5757 .read = trace_options_core_read,
5758 .write = trace_options_core_write,
b444786f 5759 .llseek = generic_file_llseek,
a8259075
SR
5760};
5761
5452af66 5762struct dentry *trace_create_file(const char *name,
f4ae40a6 5763 umode_t mode,
5452af66
FW
5764 struct dentry *parent,
5765 void *data,
5766 const struct file_operations *fops)
5767{
5768 struct dentry *ret;
5769
5770 ret = debugfs_create_file(name, mode, parent, data, fops);
5771 if (!ret)
5772 pr_warning("Could not create debugfs '%s' entry\n", name);
5773
5774 return ret;
5775}
5776
5777
2b6080f2 5778static struct dentry *trace_options_init_dentry(struct trace_array *tr)
a8259075
SR
5779{
5780 struct dentry *d_tracer;
a8259075 5781
2b6080f2
SR
5782 if (tr->options)
5783 return tr->options;
a8259075 5784
2b6080f2 5785 d_tracer = tracing_init_dentry_tr(tr);
a8259075
SR
5786 if (!d_tracer)
5787 return NULL;
5788
2b6080f2
SR
5789 tr->options = debugfs_create_dir("options", d_tracer);
5790 if (!tr->options) {
a8259075
SR
5791 pr_warning("Could not create debugfs directory 'options'\n");
5792 return NULL;
5793 }
5794
2b6080f2 5795 return tr->options;
a8259075
SR
5796}
5797
577b785f 5798static void
2b6080f2
SR
5799create_trace_option_file(struct trace_array *tr,
5800 struct trace_option_dentry *topt,
577b785f
SR
5801 struct tracer_flags *flags,
5802 struct tracer_opt *opt)
5803{
5804 struct dentry *t_options;
577b785f 5805
2b6080f2 5806 t_options = trace_options_init_dentry(tr);
577b785f
SR
5807 if (!t_options)
5808 return;
5809
5810 topt->flags = flags;
5811 topt->opt = opt;
2b6080f2 5812 topt->tr = tr;
577b785f 5813
5452af66 5814 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
577b785f
SR
5815 &trace_options_fops);
5816
577b785f
SR
5817}
5818
5819static struct trace_option_dentry *
2b6080f2 5820create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
577b785f
SR
5821{
5822 struct trace_option_dentry *topts;
5823 struct tracer_flags *flags;
5824 struct tracer_opt *opts;
5825 int cnt;
5826
5827 if (!tracer)
5828 return NULL;
5829
5830 flags = tracer->flags;
5831
5832 if (!flags || !flags->opts)
5833 return NULL;
5834
5835 opts = flags->opts;
5836
5837 for (cnt = 0; opts[cnt].name; cnt++)
5838 ;
5839
0cfe8245 5840 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
577b785f
SR
5841 if (!topts)
5842 return NULL;
5843
5844 for (cnt = 0; opts[cnt].name; cnt++)
2b6080f2 5845 create_trace_option_file(tr, &topts[cnt], flags,
577b785f
SR
5846 &opts[cnt]);
5847
5848 return topts;
5849}
5850
5851static void
5852destroy_trace_option_files(struct trace_option_dentry *topts)
5853{
5854 int cnt;
5855
5856 if (!topts)
5857 return;
5858
5859 for (cnt = 0; topts[cnt].opt; cnt++) {
5860 if (topts[cnt].entry)
5861 debugfs_remove(topts[cnt].entry);
5862 }
5863
5864 kfree(topts);
5865}
5866
a8259075 5867static struct dentry *
2b6080f2
SR
5868create_trace_option_core_file(struct trace_array *tr,
5869 const char *option, long index)
a8259075
SR
5870{
5871 struct dentry *t_options;
a8259075 5872
2b6080f2 5873 t_options = trace_options_init_dentry(tr);
a8259075
SR
5874 if (!t_options)
5875 return NULL;
5876
5452af66 5877 return trace_create_file(option, 0644, t_options, (void *)index,
a8259075 5878 &trace_options_core_fops);
a8259075
SR
5879}
5880
2b6080f2 5881static __init void create_trace_options_dir(struct trace_array *tr)
a8259075
SR
5882{
5883 struct dentry *t_options;
a8259075
SR
5884 int i;
5885
2b6080f2 5886 t_options = trace_options_init_dentry(tr);
a8259075
SR
5887 if (!t_options)
5888 return;
5889
5452af66 5890 for (i = 0; trace_options[i]; i++)
2b6080f2 5891 create_trace_option_core_file(tr, trace_options[i], i);
a8259075
SR
5892}
5893
499e5470
SR
5894static ssize_t
5895rb_simple_read(struct file *filp, char __user *ubuf,
5896 size_t cnt, loff_t *ppos)
5897{
348f0fc2 5898 struct trace_array *tr = filp->private_data;
499e5470
SR
5899 char buf[64];
5900 int r;
5901
10246fa3 5902 r = tracer_tracing_is_on(tr);
499e5470
SR
5903 r = sprintf(buf, "%d\n", r);
5904
5905 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5906}
5907
5908static ssize_t
5909rb_simple_write(struct file *filp, const char __user *ubuf,
5910 size_t cnt, loff_t *ppos)
5911{
348f0fc2 5912 struct trace_array *tr = filp->private_data;
12883efb 5913 struct ring_buffer *buffer = tr->trace_buffer.buffer;
499e5470
SR
5914 unsigned long val;
5915 int ret;
5916
5917 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5918 if (ret)
5919 return ret;
5920
5921 if (buffer) {
2df8f8a6
SR
5922 mutex_lock(&trace_types_lock);
5923 if (val) {
10246fa3 5924 tracer_tracing_on(tr);
2b6080f2
SR
5925 if (tr->current_trace->start)
5926 tr->current_trace->start(tr);
2df8f8a6 5927 } else {
10246fa3 5928 tracer_tracing_off(tr);
2b6080f2
SR
5929 if (tr->current_trace->stop)
5930 tr->current_trace->stop(tr);
2df8f8a6
SR
5931 }
5932 mutex_unlock(&trace_types_lock);
499e5470
SR
5933 }
5934
5935 (*ppos)++;
5936
5937 return cnt;
5938}
5939
5940static const struct file_operations rb_simple_fops = {
7b85af63 5941 .open = tracing_open_generic_tr,
499e5470
SR
5942 .read = rb_simple_read,
5943 .write = rb_simple_write,
7b85af63 5944 .release = tracing_release_generic_tr,
499e5470
SR
5945 .llseek = default_llseek,
5946};
5947
277ba044
SR
5948struct dentry *trace_instance_dir;
5949
5950static void
5951init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer);
5952
55034cd6
SRRH
5953static int
5954allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
277ba044
SR
5955{
5956 enum ring_buffer_flags rb_flags;
737223fb
SRRH
5957
5958 rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
5959
dced341b
SRRH
5960 buf->tr = tr;
5961
55034cd6
SRRH
5962 buf->buffer = ring_buffer_alloc(size, rb_flags);
5963 if (!buf->buffer)
5964 return -ENOMEM;
737223fb 5965
55034cd6
SRRH
5966 buf->data = alloc_percpu(struct trace_array_cpu);
5967 if (!buf->data) {
5968 ring_buffer_free(buf->buffer);
5969 return -ENOMEM;
5970 }
737223fb 5971
737223fb
SRRH
5972 /* Allocate the first page for all buffers */
5973 set_buffer_entries(&tr->trace_buffer,
5974 ring_buffer_size(tr->trace_buffer.buffer, 0));
5975
55034cd6
SRRH
5976 return 0;
5977}
737223fb 5978
55034cd6
SRRH
5979static int allocate_trace_buffers(struct trace_array *tr, int size)
5980{
5981 int ret;
737223fb 5982
55034cd6
SRRH
5983 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
5984 if (ret)
5985 return ret;
737223fb 5986
55034cd6
SRRH
5987#ifdef CONFIG_TRACER_MAX_TRACE
5988 ret = allocate_trace_buffer(tr, &tr->max_buffer,
5989 allocate_snapshot ? size : 1);
5990 if (WARN_ON(ret)) {
737223fb 5991 ring_buffer_free(tr->trace_buffer.buffer);
55034cd6
SRRH
5992 free_percpu(tr->trace_buffer.data);
5993 return -ENOMEM;
5994 }
5995 tr->allocated_snapshot = allocate_snapshot;
737223fb 5996
55034cd6
SRRH
5997 /*
5998 * Only the top level trace array gets its snapshot allocated
5999 * from the kernel command line.
6000 */
6001 allocate_snapshot = false;
737223fb 6002#endif
55034cd6 6003 return 0;
737223fb
SRRH
6004}
6005
6006static int new_instance_create(const char *name)
6007{
277ba044
SR
6008 struct trace_array *tr;
6009 int ret;
277ba044
SR
6010
6011 mutex_lock(&trace_types_lock);
6012
6013 ret = -EEXIST;
6014 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6015 if (tr->name && strcmp(tr->name, name) == 0)
6016 goto out_unlock;
6017 }
6018
6019 ret = -ENOMEM;
6020 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
6021 if (!tr)
6022 goto out_unlock;
6023
6024 tr->name = kstrdup(name, GFP_KERNEL);
6025 if (!tr->name)
6026 goto out_free_tr;
6027
ccfe9e42
AL
6028 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
6029 goto out_free_tr;
6030
6031 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
6032
277ba044
SR
6033 raw_spin_lock_init(&tr->start_lock);
6034
6035 tr->current_trace = &nop_trace;
6036
6037 INIT_LIST_HEAD(&tr->systems);
6038 INIT_LIST_HEAD(&tr->events);
6039
737223fb 6040 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
277ba044
SR
6041 goto out_free_tr;
6042
277ba044
SR
6043 tr->dir = debugfs_create_dir(name, trace_instance_dir);
6044 if (!tr->dir)
6045 goto out_free_tr;
6046
6047 ret = event_trace_add_tracer(tr->dir, tr);
609e85a7
AL
6048 if (ret) {
6049 debugfs_remove_recursive(tr->dir);
277ba044 6050 goto out_free_tr;
609e85a7 6051 }
277ba044
SR
6052
6053 init_tracer_debugfs(tr, tr->dir);
6054
6055 list_add(&tr->list, &ftrace_trace_arrays);
6056
6057 mutex_unlock(&trace_types_lock);
6058
6059 return 0;
6060
6061 out_free_tr:
12883efb
SRRH
6062 if (tr->trace_buffer.buffer)
6063 ring_buffer_free(tr->trace_buffer.buffer);
ccfe9e42 6064 free_cpumask_var(tr->tracing_cpumask);
277ba044
SR
6065 kfree(tr->name);
6066 kfree(tr);
6067
6068 out_unlock:
6069 mutex_unlock(&trace_types_lock);
6070
6071 return ret;
6072
6073}
6074
0c8916c3
SR
6075static int instance_delete(const char *name)
6076{
6077 struct trace_array *tr;
6078 int found = 0;
6079 int ret;
6080
6081 mutex_lock(&trace_types_lock);
6082
6083 ret = -ENODEV;
6084 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6085 if (tr->name && strcmp(tr->name, name) == 0) {
6086 found = 1;
6087 break;
6088 }
6089 }
6090 if (!found)
6091 goto out_unlock;
6092
a695cb58
SRRH
6093 ret = -EBUSY;
6094 if (tr->ref)
6095 goto out_unlock;
6096
0c8916c3
SR
6097 list_del(&tr->list);
6098
6099 event_trace_del_tracer(tr);
6100 debugfs_remove_recursive(tr->dir);
12883efb
SRRH
6101 free_percpu(tr->trace_buffer.data);
6102 ring_buffer_free(tr->trace_buffer.buffer);
0c8916c3
SR
6103
6104 kfree(tr->name);
6105 kfree(tr);
6106
6107 ret = 0;
6108
6109 out_unlock:
6110 mutex_unlock(&trace_types_lock);
6111
6112 return ret;
6113}
6114
277ba044
SR
6115static int instance_mkdir (struct inode *inode, struct dentry *dentry, umode_t mode)
6116{
6117 struct dentry *parent;
6118 int ret;
6119
6120 /* Paranoid: Make sure the parent is the "instances" directory */
6121 parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
6122 if (WARN_ON_ONCE(parent != trace_instance_dir))
6123 return -ENOENT;
6124
6125 /*
6126 * The inode mutex is locked, but debugfs_create_dir() will also
6127 * take the mutex. As the instances directory can not be destroyed
6128 * or changed in any other way, it is safe to unlock it, and
6129 * let the dentry try. If two users try to make the same dir at
6130 * the same time, then the new_instance_create() will determine the
6131 * winner.
6132 */
6133 mutex_unlock(&inode->i_mutex);
6134
6135 ret = new_instance_create(dentry->d_iname);
6136
6137 mutex_lock(&inode->i_mutex);
6138
6139 return ret;
6140}
6141
0c8916c3
SR
6142static int instance_rmdir(struct inode *inode, struct dentry *dentry)
6143{
6144 struct dentry *parent;
6145 int ret;
6146
6147 /* Paranoid: Make sure the parent is the "instances" directory */
6148 parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
6149 if (WARN_ON_ONCE(parent != trace_instance_dir))
6150 return -ENOENT;
6151
6152 /* The caller did a dget() on dentry */
6153 mutex_unlock(&dentry->d_inode->i_mutex);
6154
6155 /*
6156 * The inode mutex is locked, but debugfs_create_dir() will also
6157 * take the mutex. As the instances directory can not be destroyed
6158 * or changed in any other way, it is safe to unlock it, and
6159 * let the dentry try. If two users try to make the same dir at
6160 * the same time, then the instance_delete() will determine the
6161 * winner.
6162 */
6163 mutex_unlock(&inode->i_mutex);
6164
6165 ret = instance_delete(dentry->d_iname);
6166
6167 mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT);
6168 mutex_lock(&dentry->d_inode->i_mutex);
6169
6170 return ret;
6171}
6172
277ba044
SR
6173static const struct inode_operations instance_dir_inode_operations = {
6174 .lookup = simple_lookup,
6175 .mkdir = instance_mkdir,
0c8916c3 6176 .rmdir = instance_rmdir,
277ba044
SR
6177};
6178
6179static __init void create_trace_instances(struct dentry *d_tracer)
6180{
6181 trace_instance_dir = debugfs_create_dir("instances", d_tracer);
6182 if (WARN_ON(!trace_instance_dir))
6183 return;
6184
6185 /* Hijack the dir inode operations, to allow mkdir */
6186 trace_instance_dir->d_inode->i_op = &instance_dir_inode_operations;
6187}
6188
2b6080f2
SR
6189static void
6190init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer)
6191{
121aaee7 6192 int cpu;
2b6080f2 6193
ccfe9e42
AL
6194 trace_create_file("tracing_cpumask", 0644, d_tracer,
6195 tr, &tracing_cpumask_fops);
6196
2b6080f2
SR
6197 trace_create_file("trace_options", 0644, d_tracer,
6198 tr, &tracing_iter_fops);
6199
6200 trace_create_file("trace", 0644, d_tracer,
6484c71c 6201 tr, &tracing_fops);
2b6080f2
SR
6202
6203 trace_create_file("trace_pipe", 0444, d_tracer,
15544209 6204 tr, &tracing_pipe_fops);
2b6080f2
SR
6205
6206 trace_create_file("buffer_size_kb", 0644, d_tracer,
0bc392ee 6207 tr, &tracing_entries_fops);
2b6080f2
SR
6208
6209 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
6210 tr, &tracing_total_entries_fops);
6211
238ae93d 6212 trace_create_file("free_buffer", 0200, d_tracer,
2b6080f2
SR
6213 tr, &tracing_free_buffer_fops);
6214
6215 trace_create_file("trace_marker", 0220, d_tracer,
6216 tr, &tracing_mark_fops);
6217
6218 trace_create_file("trace_clock", 0644, d_tracer, tr,
6219 &trace_clock_fops);
6220
6221 trace_create_file("tracing_on", 0644, d_tracer,
6484c71c 6222 tr, &rb_simple_fops);
ce9bae55
SRRH
6223
6224#ifdef CONFIG_TRACER_SNAPSHOT
6225 trace_create_file("snapshot", 0644, d_tracer,
6484c71c 6226 tr, &snapshot_fops);
ce9bae55 6227#endif
121aaee7
SRRH
6228
6229 for_each_tracing_cpu(cpu)
6230 tracing_init_debugfs_percpu(tr, cpu);
6231
2b6080f2
SR
6232}
6233
b5ad384e 6234static __init int tracer_init_debugfs(void)
bc0c38d1
SR
6235{
6236 struct dentry *d_tracer;
bc0c38d1 6237
7e53bd42
LJ
6238 trace_access_lock_init();
6239
bc0c38d1 6240 d_tracer = tracing_init_dentry();
ed6f1c99
NK
6241 if (!d_tracer)
6242 return 0;
bc0c38d1 6243
2b6080f2 6244 init_tracer_debugfs(&global_trace, d_tracer);
bc0c38d1 6245
5452af66
FW
6246 trace_create_file("available_tracers", 0444, d_tracer,
6247 &global_trace, &show_traces_fops);
6248
339ae5d3 6249 trace_create_file("current_tracer", 0644, d_tracer,
5452af66
FW
6250 &global_trace, &set_tracer_fops);
6251
5d4a9dba 6252#ifdef CONFIG_TRACER_MAX_TRACE
5452af66
FW
6253 trace_create_file("tracing_max_latency", 0644, d_tracer,
6254 &tracing_max_latency, &tracing_max_lat_fops);
0e950173 6255#endif
5452af66
FW
6256
6257 trace_create_file("tracing_thresh", 0644, d_tracer,
6258 &tracing_thresh, &tracing_max_lat_fops);
a8259075 6259
339ae5d3 6260 trace_create_file("README", 0444, d_tracer,
5452af66
FW
6261 NULL, &tracing_readme_fops);
6262
69abe6a5
AP
6263 trace_create_file("saved_cmdlines", 0444, d_tracer,
6264 NULL, &tracing_saved_cmdlines_fops);
5bf9a1ee 6265
bc0c38d1 6266#ifdef CONFIG_DYNAMIC_FTRACE
5452af66
FW
6267 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
6268 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
bc0c38d1 6269#endif
b04cc6b1 6270
277ba044 6271 create_trace_instances(d_tracer);
5452af66 6272
2b6080f2 6273 create_trace_options_dir(&global_trace);
b04cc6b1 6274
b5ad384e 6275 return 0;
bc0c38d1
SR
6276}
6277
3f5a54e3
SR
6278static int trace_panic_handler(struct notifier_block *this,
6279 unsigned long event, void *unused)
6280{
944ac425 6281 if (ftrace_dump_on_oops)
cecbca96 6282 ftrace_dump(ftrace_dump_on_oops);
3f5a54e3
SR
6283 return NOTIFY_OK;
6284}
6285
6286static struct notifier_block trace_panic_notifier = {
6287 .notifier_call = trace_panic_handler,
6288 .next = NULL,
6289 .priority = 150 /* priority: INT_MAX >= x >= 0 */
6290};
6291
6292static int trace_die_handler(struct notifier_block *self,
6293 unsigned long val,
6294 void *data)
6295{
6296 switch (val) {
6297 case DIE_OOPS:
944ac425 6298 if (ftrace_dump_on_oops)
cecbca96 6299 ftrace_dump(ftrace_dump_on_oops);
3f5a54e3
SR
6300 break;
6301 default:
6302 break;
6303 }
6304 return NOTIFY_OK;
6305}
6306
6307static struct notifier_block trace_die_notifier = {
6308 .notifier_call = trace_die_handler,
6309 .priority = 200
6310};
6311
6312/*
6313 * printk is set to max of 1024, we really don't need it that big.
6314 * Nothing should be printing 1000 characters anyway.
6315 */
6316#define TRACE_MAX_PRINT 1000
6317
6318/*
6319 * Define here KERN_TRACE so that we have one place to modify
6320 * it if we decide to change what log level the ftrace dump
6321 * should be at.
6322 */
428aee14 6323#define KERN_TRACE KERN_EMERG
3f5a54e3 6324
955b61e5 6325void
3f5a54e3
SR
6326trace_printk_seq(struct trace_seq *s)
6327{
6328 /* Probably should print a warning here. */
bd6df187
J
6329 if (s->len >= TRACE_MAX_PRINT)
6330 s->len = TRACE_MAX_PRINT;
3f5a54e3
SR
6331
6332 /* should be zero ended, but we are paranoid. */
6333 s->buffer[s->len] = 0;
6334
6335 printk(KERN_TRACE "%s", s->buffer);
6336
f9520750 6337 trace_seq_init(s);
3f5a54e3
SR
6338}
6339
955b61e5
JW
6340void trace_init_global_iter(struct trace_iterator *iter)
6341{
6342 iter->tr = &global_trace;
2b6080f2 6343 iter->trace = iter->tr->current_trace;
ae3b5093 6344 iter->cpu_file = RING_BUFFER_ALL_CPUS;
12883efb 6345 iter->trace_buffer = &global_trace.trace_buffer;
b2f974d6
CS
6346
6347 if (iter->trace && iter->trace->open)
6348 iter->trace->open(iter);
6349
6350 /* Annotate start of buffers if we had overruns */
6351 if (ring_buffer_overruns(iter->trace_buffer->buffer))
6352 iter->iter_flags |= TRACE_FILE_ANNOTATE;
6353
6354 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
6355 if (trace_clocks[iter->tr->clock_id].in_ns)
6356 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
955b61e5
JW
6357}
6358
7fe70b57 6359void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
3f5a54e3 6360{
3f5a54e3
SR
6361 /* use static because iter can be a bit big for the stack */
6362 static struct trace_iterator iter;
7fe70b57 6363 static atomic_t dump_running;
cf586b61 6364 unsigned int old_userobj;
d769041f
SR
6365 unsigned long flags;
6366 int cnt = 0, cpu;
3f5a54e3 6367
7fe70b57
SRRH
6368 /* Only allow one dump user at a time. */
6369 if (atomic_inc_return(&dump_running) != 1) {
6370 atomic_dec(&dump_running);
6371 return;
6372 }
3f5a54e3 6373
7fe70b57
SRRH
6374 /*
6375 * Always turn off tracing when we dump.
6376 * We don't need to show trace output of what happens
6377 * between multiple crashes.
6378 *
6379 * If the user does a sysrq-z, then they can re-enable
6380 * tracing with echo 1 > tracing_on.
6381 */
0ee6b6cf 6382 tracing_off();
cf586b61 6383
7fe70b57 6384 local_irq_save(flags);
3f5a54e3 6385
38dbe0b1 6386 /* Simulate the iterator */
955b61e5
JW
6387 trace_init_global_iter(&iter);
6388
d769041f 6389 for_each_tracing_cpu(cpu) {
12883efb 6390 atomic_inc(&per_cpu_ptr(iter.tr->trace_buffer.data, cpu)->disabled);
d769041f
SR
6391 }
6392
cf586b61
FW
6393 old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
6394
b54d3de9
TE
6395 /* don't look at user memory in panic mode */
6396 trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
6397
cecbca96
FW
6398 switch (oops_dump_mode) {
6399 case DUMP_ALL:
ae3b5093 6400 iter.cpu_file = RING_BUFFER_ALL_CPUS;
cecbca96
FW
6401 break;
6402 case DUMP_ORIG:
6403 iter.cpu_file = raw_smp_processor_id();
6404 break;
6405 case DUMP_NONE:
6406 goto out_enable;
6407 default:
6408 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
ae3b5093 6409 iter.cpu_file = RING_BUFFER_ALL_CPUS;
cecbca96
FW
6410 }
6411
6412 printk(KERN_TRACE "Dumping ftrace buffer:\n");
3f5a54e3 6413
7fe70b57
SRRH
6414 /* Did function tracer already get disabled? */
6415 if (ftrace_is_dead()) {
6416 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
6417 printk("# MAY BE MISSING FUNCTION EVENTS\n");
6418 }
6419
3f5a54e3
SR
6420 /*
6421 * We need to stop all tracing on all CPUS to read the
6422 * the next buffer. This is a bit expensive, but is
6423 * not done often. We fill all what we can read,
6424 * and then release the locks again.
6425 */
6426
3f5a54e3
SR
6427 while (!trace_empty(&iter)) {
6428
6429 if (!cnt)
6430 printk(KERN_TRACE "---------------------------------\n");
6431
6432 cnt++;
6433
6434 /* reset all but tr, trace, and overruns */
6435 memset(&iter.seq, 0,
6436 sizeof(struct trace_iterator) -
6437 offsetof(struct trace_iterator, seq));
6438 iter.iter_flags |= TRACE_FILE_LAT_FMT;
6439 iter.pos = -1;
6440
955b61e5 6441 if (trace_find_next_entry_inc(&iter) != NULL) {
74e7ff8c
LJ
6442 int ret;
6443
6444 ret = print_trace_line(&iter);
6445 if (ret != TRACE_TYPE_NO_CONSUME)
6446 trace_consume(&iter);
3f5a54e3 6447 }
b892e5c8 6448 touch_nmi_watchdog();
3f5a54e3
SR
6449
6450 trace_printk_seq(&iter.seq);
6451 }
6452
6453 if (!cnt)
6454 printk(KERN_TRACE " (ftrace buffer empty)\n");
6455 else
6456 printk(KERN_TRACE "---------------------------------\n");
6457
cecbca96 6458 out_enable:
7fe70b57 6459 trace_flags |= old_userobj;
cf586b61 6460
7fe70b57
SRRH
6461 for_each_tracing_cpu(cpu) {
6462 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
cf586b61 6463 }
7fe70b57 6464 atomic_dec(&dump_running);
cd891ae0 6465 local_irq_restore(flags);
3f5a54e3 6466}
a8eecf22 6467EXPORT_SYMBOL_GPL(ftrace_dump);
cf586b61 6468
3928a8a2 6469__init static int tracer_alloc_buffers(void)
bc0c38d1 6470{
73c5162a 6471 int ring_buf_size;
9e01c1b7 6472 int ret = -ENOMEM;
4c11d7ae 6473
750912fa 6474
9e01c1b7
RR
6475 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
6476 goto out;
6477
ccfe9e42 6478 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
9e01c1b7 6479 goto out_free_buffer_mask;
4c11d7ae 6480
07d777fe
SR
6481 /* Only allocate trace_printk buffers if a trace_printk exists */
6482 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
81698831 6483 /* Must be called before global_trace.buffer is allocated */
07d777fe
SR
6484 trace_printk_init_buffers();
6485
73c5162a
SR
6486 /* To save memory, keep the ring buffer size to its minimum */
6487 if (ring_buffer_expanded)
6488 ring_buf_size = trace_buf_size;
6489 else
6490 ring_buf_size = 1;
6491
9e01c1b7 6492 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
ccfe9e42 6493 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
9e01c1b7 6494
2b6080f2
SR
6495 raw_spin_lock_init(&global_trace.start_lock);
6496
9e01c1b7 6497 /* TODO: make the number of buffers hot pluggable with CPUS */
737223fb 6498 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
3928a8a2
SR
6499 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
6500 WARN_ON(1);
9e01c1b7 6501 goto out_free_cpumask;
4c11d7ae 6502 }
a7603ff4 6503
499e5470
SR
6504 if (global_trace.buffer_disabled)
6505 tracing_off();
4c11d7ae 6506
bc0c38d1
SR
6507 trace_init_cmdlines();
6508
ca164318
SRRH
6509 /*
6510 * register_tracer() might reference current_trace, so it
6511 * needs to be set before we register anything. This is
6512 * just a bootstrap of current_trace anyway.
6513 */
2b6080f2
SR
6514 global_trace.current_trace = &nop_trace;
6515
ca164318
SRRH
6516 register_tracer(&nop_trace);
6517
60a11774
SR
6518 /* All seems OK, enable tracing */
6519 tracing_disabled = 0;
3928a8a2 6520
3f5a54e3
SR
6521 atomic_notifier_chain_register(&panic_notifier_list,
6522 &trace_panic_notifier);
6523
6524 register_die_notifier(&trace_die_notifier);
2fc1dfbe 6525
ae63b31e
SR
6526 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
6527
6528 INIT_LIST_HEAD(&global_trace.systems);
6529 INIT_LIST_HEAD(&global_trace.events);
6530 list_add(&global_trace.list, &ftrace_trace_arrays);
6531
7bcfaf54
SR
6532 while (trace_boot_options) {
6533 char *option;
6534
6535 option = strsep(&trace_boot_options, ",");
2b6080f2 6536 trace_set_options(&global_trace, option);
7bcfaf54
SR
6537 }
6538
77fd5c15
SRRH
6539 register_snapshot_cmd();
6540
2fc1dfbe 6541 return 0;
3f5a54e3 6542
9e01c1b7 6543out_free_cpumask:
12883efb
SRRH
6544 free_percpu(global_trace.trace_buffer.data);
6545#ifdef CONFIG_TRACER_MAX_TRACE
6546 free_percpu(global_trace.max_buffer.data);
6547#endif
ccfe9e42 6548 free_cpumask_var(global_trace.tracing_cpumask);
9e01c1b7
RR
6549out_free_buffer_mask:
6550 free_cpumask_var(tracing_buffer_mask);
6551out:
6552 return ret;
bc0c38d1 6553}
b2821ae6
SR
6554
6555__init static int clear_boot_tracer(void)
6556{
6557 /*
6558 * The default tracer at boot buffer is an init section.
6559 * This function is called in lateinit. If we did not
6560 * find the boot tracer, then clear it out, to prevent
6561 * later registration from accessing the buffer that is
6562 * about to be freed.
6563 */
6564 if (!default_bootup_tracer)
6565 return 0;
6566
6567 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
6568 default_bootup_tracer);
6569 default_bootup_tracer = NULL;
6570
6571 return 0;
6572}
6573
b5ad384e
FW
6574early_initcall(tracer_alloc_buffers);
6575fs_initcall(tracer_init_debugfs);
b2821ae6 6576late_initcall(clear_boot_tracer);
This page took 0.91076 seconds and 5 git commands to generate.