ARM: 8255/1: perf: Prevent wraparound during overflow
[deliverable/linux.git] / kernel / trace / trace.c
CommitLineData
bc0c38d1
SR
1/*
2 * ring buffer based function tracer
3 *
2b6080f2 4 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
bc0c38d1
SR
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
6d49e352 12 * Copyright (C) 2004 Nadia Yvette Chambers
bc0c38d1 13 */
2cadf913 14#include <linux/ring_buffer.h>
273b281f 15#include <generated/utsrelease.h>
2cadf913
SR
16#include <linux/stacktrace.h>
17#include <linux/writeback.h>
bc0c38d1
SR
18#include <linux/kallsyms.h>
19#include <linux/seq_file.h>
3f5a54e3 20#include <linux/notifier.h>
2cadf913 21#include <linux/irqflags.h>
bc0c38d1 22#include <linux/debugfs.h>
4c11d7ae 23#include <linux/pagemap.h>
bc0c38d1
SR
24#include <linux/hardirq.h>
25#include <linux/linkage.h>
26#include <linux/uaccess.h>
2cadf913 27#include <linux/kprobes.h>
bc0c38d1
SR
28#include <linux/ftrace.h>
29#include <linux/module.h>
30#include <linux/percpu.h>
2cadf913 31#include <linux/splice.h>
3f5a54e3 32#include <linux/kdebug.h>
5f0c6c03 33#include <linux/string.h>
7e53bd42 34#include <linux/rwsem.h>
5a0e3ad6 35#include <linux/slab.h>
bc0c38d1
SR
36#include <linux/ctype.h>
37#include <linux/init.h>
2a2cc8f7 38#include <linux/poll.h>
b892e5c8 39#include <linux/nmi.h>
bc0c38d1 40#include <linux/fs.h>
8bd75c77 41#include <linux/sched/rt.h>
86387f7e 42
bc0c38d1 43#include "trace.h"
f0868d1e 44#include "trace_output.h"
bc0c38d1 45
73c5162a
SR
46/*
47 * On boot up, the ring buffer is set to the minimum size, so that
48 * we do not waste memory on systems that are not using tracing.
49 */
55034cd6 50bool ring_buffer_expanded;
73c5162a 51
8e1b82e0
FW
52/*
53 * We need to change this state when a selftest is running.
ff32504f
FW
54 * A selftest will lurk into the ring-buffer to count the
55 * entries inserted during the selftest although some concurrent
5e1607a0 56 * insertions into the ring-buffer such as trace_printk could occurred
ff32504f
FW
57 * at the same time, giving false positive or negative results.
58 */
8e1b82e0 59static bool __read_mostly tracing_selftest_running;
ff32504f 60
b2821ae6
SR
61/*
62 * If a tracer is running, we do not want to run SELFTEST.
63 */
020e5f85 64bool __read_mostly tracing_selftest_disabled;
b2821ae6 65
0daa2302
SRRH
66/* Pipe tracepoints to printk */
67struct trace_iterator *tracepoint_print_iter;
68int tracepoint_printk;
69
adf9f195
FW
70/* For tracers that don't implement custom flags */
71static struct tracer_opt dummy_tracer_opt[] = {
72 { }
73};
74
75static struct tracer_flags dummy_tracer_flags = {
76 .val = 0,
77 .opts = dummy_tracer_opt
78};
79
8c1a49ae
SRRH
80static int
81dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
adf9f195
FW
82{
83 return 0;
84}
0f048701 85
7ffbd48d
SR
86/*
87 * To prevent the comm cache from being overwritten when no
88 * tracing is active, only save the comm when a trace event
89 * occurred.
90 */
91static DEFINE_PER_CPU(bool, trace_cmdline_save);
92
0f048701
SR
93/*
94 * Kill all tracing for good (never come back).
95 * It is initialized to 1 but will turn to zero if the initialization
96 * of the tracer is successful. But that is the only place that sets
97 * this back to zero.
98 */
4fd27358 99static int tracing_disabled = 1;
0f048701 100
9288f99a 101DEFINE_PER_CPU(int, ftrace_cpu_disabled);
d769041f 102
955b61e5 103cpumask_var_t __read_mostly tracing_buffer_mask;
ab46428c 104
944ac425
SR
105/*
106 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
107 *
108 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
109 * is set, then ftrace_dump is called. This will output the contents
110 * of the ftrace buffers to the console. This is very useful for
111 * capturing traces that lead to crashes and outputing it to a
112 * serial console.
113 *
114 * It is default off, but you can enable it with either specifying
115 * "ftrace_dump_on_oops" in the kernel command line, or setting
cecbca96
FW
116 * /proc/sys/kernel/ftrace_dump_on_oops
117 * Set 1 if you want to dump buffers of all CPUs
118 * Set 2 if you want to dump the buffer of the CPU that triggered oops
944ac425 119 */
cecbca96
FW
120
121enum ftrace_dump_mode ftrace_dump_on_oops;
944ac425 122
de7edd31
SRRH
123/* When set, tracing will stop when a WARN*() is hit */
124int __disable_trace_on_warning;
125
607e2ea1 126static int tracing_set_tracer(struct trace_array *tr, const char *buf);
b2821ae6 127
ee6c2c1b
LZ
128#define MAX_TRACER_SIZE 100
129static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
b2821ae6 130static char *default_bootup_tracer;
d9e54076 131
55034cd6
SRRH
132static bool allocate_snapshot;
133
1beee96b 134static int __init set_cmdline_ftrace(char *str)
d9e54076 135{
67012ab1 136 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
b2821ae6 137 default_bootup_tracer = bootup_tracer_buf;
73c5162a 138 /* We are using ftrace early, expand it */
55034cd6 139 ring_buffer_expanded = true;
d9e54076
PZ
140 return 1;
141}
1beee96b 142__setup("ftrace=", set_cmdline_ftrace);
d9e54076 143
944ac425
SR
144static int __init set_ftrace_dump_on_oops(char *str)
145{
cecbca96
FW
146 if (*str++ != '=' || !*str) {
147 ftrace_dump_on_oops = DUMP_ALL;
148 return 1;
149 }
150
151 if (!strcmp("orig_cpu", str)) {
152 ftrace_dump_on_oops = DUMP_ORIG;
153 return 1;
154 }
155
156 return 0;
944ac425
SR
157}
158__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
60a11774 159
de7edd31
SRRH
160static int __init stop_trace_on_warning(char *str)
161{
933ff9f2
LCG
162 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
163 __disable_trace_on_warning = 1;
de7edd31
SRRH
164 return 1;
165}
933ff9f2 166__setup("traceoff_on_warning", stop_trace_on_warning);
de7edd31 167
3209cff4 168static int __init boot_alloc_snapshot(char *str)
55034cd6
SRRH
169{
170 allocate_snapshot = true;
171 /* We also need the main ring buffer expanded */
172 ring_buffer_expanded = true;
173 return 1;
174}
3209cff4 175__setup("alloc_snapshot", boot_alloc_snapshot);
55034cd6 176
7bcfaf54
SR
177
178static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
179static char *trace_boot_options __initdata;
180
181static int __init set_trace_boot_options(char *str)
182{
67012ab1 183 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
7bcfaf54
SR
184 trace_boot_options = trace_boot_options_buf;
185 return 0;
186}
187__setup("trace_options=", set_trace_boot_options);
188
e1e232ca
SR
189static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
190static char *trace_boot_clock __initdata;
191
192static int __init set_trace_boot_clock(char *str)
193{
194 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
195 trace_boot_clock = trace_boot_clock_buf;
196 return 0;
197}
198__setup("trace_clock=", set_trace_boot_clock);
199
0daa2302
SRRH
200static int __init set_tracepoint_printk(char *str)
201{
202 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
203 tracepoint_printk = 1;
204 return 1;
205}
206__setup("tp_printk", set_tracepoint_printk);
de7edd31 207
cf8e3474 208unsigned long long ns2usecs(cycle_t nsec)
bc0c38d1
SR
209{
210 nsec += 500;
211 do_div(nsec, 1000);
212 return nsec;
213}
214
4fcdae83
SR
215/*
216 * The global_trace is the descriptor that holds the tracing
217 * buffers for the live tracing. For each CPU, it contains
218 * a link list of pages that will store trace entries. The
219 * page descriptor of the pages in the memory is used to hold
220 * the link list by linking the lru item in the page descriptor
221 * to each of the pages in the buffer per CPU.
222 *
223 * For each active CPU there is a data field that holds the
224 * pages for the buffer for that CPU. Each CPU has the same number
225 * of pages allocated for its buffer.
226 */
bc0c38d1
SR
227static struct trace_array global_trace;
228
ae63b31e 229LIST_HEAD(ftrace_trace_arrays);
bc0c38d1 230
ff451961
SRRH
231int trace_array_get(struct trace_array *this_tr)
232{
233 struct trace_array *tr;
234 int ret = -ENODEV;
235
236 mutex_lock(&trace_types_lock);
237 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
238 if (tr == this_tr) {
239 tr->ref++;
240 ret = 0;
241 break;
242 }
243 }
244 mutex_unlock(&trace_types_lock);
245
246 return ret;
247}
248
249static void __trace_array_put(struct trace_array *this_tr)
250{
251 WARN_ON(!this_tr->ref);
252 this_tr->ref--;
253}
254
255void trace_array_put(struct trace_array *this_tr)
256{
257 mutex_lock(&trace_types_lock);
258 __trace_array_put(this_tr);
259 mutex_unlock(&trace_types_lock);
260}
261
f306cc82
TZ
262int filter_check_discard(struct ftrace_event_file *file, void *rec,
263 struct ring_buffer *buffer,
264 struct ring_buffer_event *event)
eb02ce01 265{
f306cc82
TZ
266 if (unlikely(file->flags & FTRACE_EVENT_FL_FILTERED) &&
267 !filter_match_preds(file->filter, rec)) {
268 ring_buffer_discard_commit(buffer, event);
269 return 1;
270 }
271
272 return 0;
273}
274EXPORT_SYMBOL_GPL(filter_check_discard);
275
276int call_filter_check_discard(struct ftrace_event_call *call, void *rec,
277 struct ring_buffer *buffer,
278 struct ring_buffer_event *event)
279{
280 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
281 !filter_match_preds(call->filter, rec)) {
282 ring_buffer_discard_commit(buffer, event);
283 return 1;
284 }
285
286 return 0;
eb02ce01 287}
f306cc82 288EXPORT_SYMBOL_GPL(call_filter_check_discard);
eb02ce01 289
ad1438a0 290static cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
37886f6a
SR
291{
292 u64 ts;
293
294 /* Early boot up does not have a buffer yet */
9457158b 295 if (!buf->buffer)
37886f6a
SR
296 return trace_clock_local();
297
9457158b
AL
298 ts = ring_buffer_time_stamp(buf->buffer, cpu);
299 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
37886f6a
SR
300
301 return ts;
302}
bc0c38d1 303
9457158b
AL
304cycle_t ftrace_now(int cpu)
305{
306 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
307}
308
10246fa3
SRRH
309/**
310 * tracing_is_enabled - Show if global_trace has been disabled
311 *
312 * Shows if the global trace has been enabled or not. It uses the
313 * mirror flag "buffer_disabled" to be used in fast paths such as for
314 * the irqsoff tracer. But it may be inaccurate due to races. If you
315 * need to know the accurate state, use tracing_is_on() which is a little
316 * slower, but accurate.
317 */
9036990d
SR
318int tracing_is_enabled(void)
319{
10246fa3
SRRH
320 /*
321 * For quick access (irqsoff uses this in fast path), just
322 * return the mirror variable of the state of the ring buffer.
323 * It's a little racy, but we don't really care.
324 */
325 smp_rmb();
326 return !global_trace.buffer_disabled;
9036990d
SR
327}
328
4fcdae83 329/*
3928a8a2
SR
330 * trace_buf_size is the size in bytes that is allocated
331 * for a buffer. Note, the number of bytes is always rounded
332 * to page size.
3f5a54e3
SR
333 *
334 * This number is purposely set to a low number of 16384.
335 * If the dump on oops happens, it will be much appreciated
336 * to not have to wait for all that output. Anyway this can be
337 * boot time and run time configurable.
4fcdae83 338 */
3928a8a2 339#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
3f5a54e3 340
3928a8a2 341static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
bc0c38d1 342
4fcdae83 343/* trace_types holds a link list of available tracers. */
bc0c38d1 344static struct tracer *trace_types __read_mostly;
4fcdae83 345
4fcdae83
SR
346/*
347 * trace_types_lock is used to protect the trace_types list.
4fcdae83 348 */
a8227415 349DEFINE_MUTEX(trace_types_lock);
4fcdae83 350
7e53bd42
LJ
351/*
352 * serialize the access of the ring buffer
353 *
354 * ring buffer serializes readers, but it is low level protection.
355 * The validity of the events (which returns by ring_buffer_peek() ..etc)
356 * are not protected by ring buffer.
357 *
358 * The content of events may become garbage if we allow other process consumes
359 * these events concurrently:
360 * A) the page of the consumed events may become a normal page
361 * (not reader page) in ring buffer, and this page will be rewrited
362 * by events producer.
363 * B) The page of the consumed events may become a page for splice_read,
364 * and this page will be returned to system.
365 *
366 * These primitives allow multi process access to different cpu ring buffer
367 * concurrently.
368 *
369 * These primitives don't distinguish read-only and read-consume access.
370 * Multi read-only access are also serialized.
371 */
372
373#ifdef CONFIG_SMP
374static DECLARE_RWSEM(all_cpu_access_lock);
375static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
376
377static inline void trace_access_lock(int cpu)
378{
ae3b5093 379 if (cpu == RING_BUFFER_ALL_CPUS) {
7e53bd42
LJ
380 /* gain it for accessing the whole ring buffer. */
381 down_write(&all_cpu_access_lock);
382 } else {
383 /* gain it for accessing a cpu ring buffer. */
384
ae3b5093 385 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
7e53bd42
LJ
386 down_read(&all_cpu_access_lock);
387
388 /* Secondly block other access to this @cpu ring buffer. */
389 mutex_lock(&per_cpu(cpu_access_lock, cpu));
390 }
391}
392
393static inline void trace_access_unlock(int cpu)
394{
ae3b5093 395 if (cpu == RING_BUFFER_ALL_CPUS) {
7e53bd42
LJ
396 up_write(&all_cpu_access_lock);
397 } else {
398 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
399 up_read(&all_cpu_access_lock);
400 }
401}
402
403static inline void trace_access_lock_init(void)
404{
405 int cpu;
406
407 for_each_possible_cpu(cpu)
408 mutex_init(&per_cpu(cpu_access_lock, cpu));
409}
410
411#else
412
413static DEFINE_MUTEX(access_lock);
414
415static inline void trace_access_lock(int cpu)
416{
417 (void)cpu;
418 mutex_lock(&access_lock);
419}
420
421static inline void trace_access_unlock(int cpu)
422{
423 (void)cpu;
424 mutex_unlock(&access_lock);
425}
426
427static inline void trace_access_lock_init(void)
428{
429}
430
431#endif
432
ee6bce52 433/* trace_flags holds trace_options default values */
12ef7d44 434unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
a2a16d6a 435 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
77271ce4 436 TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
328df475 437 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | TRACE_ITER_FUNCTION;
e7e2ee89 438
5280bcef 439static void tracer_tracing_on(struct trace_array *tr)
10246fa3
SRRH
440{
441 if (tr->trace_buffer.buffer)
442 ring_buffer_record_on(tr->trace_buffer.buffer);
443 /*
444 * This flag is looked at when buffers haven't been allocated
445 * yet, or by some tracers (like irqsoff), that just want to
446 * know if the ring buffer has been disabled, but it can handle
447 * races of where it gets disabled but we still do a record.
448 * As the check is in the fast path of the tracers, it is more
449 * important to be fast than accurate.
450 */
451 tr->buffer_disabled = 0;
452 /* Make the flag seen by readers */
453 smp_wmb();
454}
455
499e5470
SR
456/**
457 * tracing_on - enable tracing buffers
458 *
459 * This function enables tracing buffers that may have been
460 * disabled with tracing_off.
461 */
462void tracing_on(void)
463{
10246fa3 464 tracer_tracing_on(&global_trace);
499e5470
SR
465}
466EXPORT_SYMBOL_GPL(tracing_on);
467
09ae7234
SRRH
468/**
469 * __trace_puts - write a constant string into the trace buffer.
470 * @ip: The address of the caller
471 * @str: The constant string to write
472 * @size: The size of the string.
473 */
474int __trace_puts(unsigned long ip, const char *str, int size)
475{
476 struct ring_buffer_event *event;
477 struct ring_buffer *buffer;
478 struct print_entry *entry;
479 unsigned long irq_flags;
480 int alloc;
8abfb872
J
481 int pc;
482
f0160a5a
J
483 if (!(trace_flags & TRACE_ITER_PRINTK))
484 return 0;
485
8abfb872 486 pc = preempt_count();
09ae7234 487
3132e107
SRRH
488 if (unlikely(tracing_selftest_running || tracing_disabled))
489 return 0;
490
09ae7234
SRRH
491 alloc = sizeof(*entry) + size + 2; /* possible \n added */
492
493 local_save_flags(irq_flags);
494 buffer = global_trace.trace_buffer.buffer;
495 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
8abfb872 496 irq_flags, pc);
09ae7234
SRRH
497 if (!event)
498 return 0;
499
500 entry = ring_buffer_event_data(event);
501 entry->ip = ip;
502
503 memcpy(&entry->buf, str, size);
504
505 /* Add a newline if necessary */
506 if (entry->buf[size - 1] != '\n') {
507 entry->buf[size] = '\n';
508 entry->buf[size + 1] = '\0';
509 } else
510 entry->buf[size] = '\0';
511
512 __buffer_unlock_commit(buffer, event);
8abfb872 513 ftrace_trace_stack(buffer, irq_flags, 4, pc);
09ae7234
SRRH
514
515 return size;
516}
517EXPORT_SYMBOL_GPL(__trace_puts);
518
519/**
520 * __trace_bputs - write the pointer to a constant string into trace buffer
521 * @ip: The address of the caller
522 * @str: The constant string to write to the buffer to
523 */
524int __trace_bputs(unsigned long ip, const char *str)
525{
526 struct ring_buffer_event *event;
527 struct ring_buffer *buffer;
528 struct bputs_entry *entry;
529 unsigned long irq_flags;
530 int size = sizeof(struct bputs_entry);
8abfb872
J
531 int pc;
532
f0160a5a
J
533 if (!(trace_flags & TRACE_ITER_PRINTK))
534 return 0;
535
8abfb872 536 pc = preempt_count();
09ae7234 537
3132e107
SRRH
538 if (unlikely(tracing_selftest_running || tracing_disabled))
539 return 0;
540
09ae7234
SRRH
541 local_save_flags(irq_flags);
542 buffer = global_trace.trace_buffer.buffer;
543 event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
8abfb872 544 irq_flags, pc);
09ae7234
SRRH
545 if (!event)
546 return 0;
547
548 entry = ring_buffer_event_data(event);
549 entry->ip = ip;
550 entry->str = str;
551
552 __buffer_unlock_commit(buffer, event);
8abfb872 553 ftrace_trace_stack(buffer, irq_flags, 4, pc);
09ae7234
SRRH
554
555 return 1;
556}
557EXPORT_SYMBOL_GPL(__trace_bputs);
558
ad909e21
SRRH
559#ifdef CONFIG_TRACER_SNAPSHOT
560/**
561 * trace_snapshot - take a snapshot of the current buffer.
562 *
563 * This causes a swap between the snapshot buffer and the current live
564 * tracing buffer. You can use this to take snapshots of the live
565 * trace when some condition is triggered, but continue to trace.
566 *
567 * Note, make sure to allocate the snapshot with either
568 * a tracing_snapshot_alloc(), or by doing it manually
569 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
570 *
571 * If the snapshot buffer is not allocated, it will stop tracing.
572 * Basically making a permanent snapshot.
573 */
574void tracing_snapshot(void)
575{
576 struct trace_array *tr = &global_trace;
577 struct tracer *tracer = tr->current_trace;
578 unsigned long flags;
579
1b22e382
SRRH
580 if (in_nmi()) {
581 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
582 internal_trace_puts("*** snapshot is being ignored ***\n");
583 return;
584 }
585
ad909e21 586 if (!tr->allocated_snapshot) {
ca268da6
SRRH
587 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
588 internal_trace_puts("*** stopping trace here! ***\n");
ad909e21
SRRH
589 tracing_off();
590 return;
591 }
592
593 /* Note, snapshot can not be used when the tracer uses it */
594 if (tracer->use_max_tr) {
ca268da6
SRRH
595 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
596 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
ad909e21
SRRH
597 return;
598 }
599
600 local_irq_save(flags);
601 update_max_tr(tr, current, smp_processor_id());
602 local_irq_restore(flags);
603}
1b22e382 604EXPORT_SYMBOL_GPL(tracing_snapshot);
ad909e21
SRRH
605
606static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
607 struct trace_buffer *size_buf, int cpu_id);
3209cff4
SRRH
608static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
609
610static int alloc_snapshot(struct trace_array *tr)
611{
612 int ret;
613
614 if (!tr->allocated_snapshot) {
615
616 /* allocate spare buffer */
617 ret = resize_buffer_duplicate_size(&tr->max_buffer,
618 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
619 if (ret < 0)
620 return ret;
621
622 tr->allocated_snapshot = true;
623 }
624
625 return 0;
626}
627
ad1438a0 628static void free_snapshot(struct trace_array *tr)
3209cff4
SRRH
629{
630 /*
631 * We don't free the ring buffer. instead, resize it because
632 * The max_tr ring buffer has some state (e.g. ring->clock) and
633 * we want preserve it.
634 */
635 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
636 set_buffer_entries(&tr->max_buffer, 1);
637 tracing_reset_online_cpus(&tr->max_buffer);
638 tr->allocated_snapshot = false;
639}
ad909e21 640
93e31ffb
TZ
641/**
642 * tracing_alloc_snapshot - allocate snapshot buffer.
643 *
644 * This only allocates the snapshot buffer if it isn't already
645 * allocated - it doesn't also take a snapshot.
646 *
647 * This is meant to be used in cases where the snapshot buffer needs
648 * to be set up for events that can't sleep but need to be able to
649 * trigger a snapshot.
650 */
651int tracing_alloc_snapshot(void)
652{
653 struct trace_array *tr = &global_trace;
654 int ret;
655
656 ret = alloc_snapshot(tr);
657 WARN_ON(ret < 0);
658
659 return ret;
660}
661EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
662
ad909e21
SRRH
663/**
664 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
665 *
666 * This is similar to trace_snapshot(), but it will allocate the
667 * snapshot buffer if it isn't already allocated. Use this only
668 * where it is safe to sleep, as the allocation may sleep.
669 *
670 * This causes a swap between the snapshot buffer and the current live
671 * tracing buffer. You can use this to take snapshots of the live
672 * trace when some condition is triggered, but continue to trace.
673 */
674void tracing_snapshot_alloc(void)
675{
ad909e21
SRRH
676 int ret;
677
93e31ffb
TZ
678 ret = tracing_alloc_snapshot();
679 if (ret < 0)
3209cff4 680 return;
ad909e21
SRRH
681
682 tracing_snapshot();
683}
1b22e382 684EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
ad909e21
SRRH
685#else
686void tracing_snapshot(void)
687{
688 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
689}
1b22e382 690EXPORT_SYMBOL_GPL(tracing_snapshot);
93e31ffb
TZ
691int tracing_alloc_snapshot(void)
692{
693 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
694 return -ENODEV;
695}
696EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
ad909e21
SRRH
697void tracing_snapshot_alloc(void)
698{
699 /* Give warning */
700 tracing_snapshot();
701}
1b22e382 702EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
ad909e21
SRRH
703#endif /* CONFIG_TRACER_SNAPSHOT */
704
5280bcef 705static void tracer_tracing_off(struct trace_array *tr)
10246fa3
SRRH
706{
707 if (tr->trace_buffer.buffer)
708 ring_buffer_record_off(tr->trace_buffer.buffer);
709 /*
710 * This flag is looked at when buffers haven't been allocated
711 * yet, or by some tracers (like irqsoff), that just want to
712 * know if the ring buffer has been disabled, but it can handle
713 * races of where it gets disabled but we still do a record.
714 * As the check is in the fast path of the tracers, it is more
715 * important to be fast than accurate.
716 */
717 tr->buffer_disabled = 1;
718 /* Make the flag seen by readers */
719 smp_wmb();
720}
721
499e5470
SR
722/**
723 * tracing_off - turn off tracing buffers
724 *
725 * This function stops the tracing buffers from recording data.
726 * It does not disable any overhead the tracers themselves may
727 * be causing. This function simply causes all recording to
728 * the ring buffers to fail.
729 */
730void tracing_off(void)
731{
10246fa3 732 tracer_tracing_off(&global_trace);
499e5470
SR
733}
734EXPORT_SYMBOL_GPL(tracing_off);
735
de7edd31
SRRH
736void disable_trace_on_warning(void)
737{
738 if (__disable_trace_on_warning)
739 tracing_off();
740}
741
10246fa3
SRRH
742/**
743 * tracer_tracing_is_on - show real state of ring buffer enabled
744 * @tr : the trace array to know if ring buffer is enabled
745 *
746 * Shows real state of the ring buffer if it is enabled or not.
747 */
5280bcef 748static int tracer_tracing_is_on(struct trace_array *tr)
10246fa3
SRRH
749{
750 if (tr->trace_buffer.buffer)
751 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
752 return !tr->buffer_disabled;
753}
754
499e5470
SR
755/**
756 * tracing_is_on - show state of ring buffers enabled
757 */
758int tracing_is_on(void)
759{
10246fa3 760 return tracer_tracing_is_on(&global_trace);
499e5470
SR
761}
762EXPORT_SYMBOL_GPL(tracing_is_on);
763
3928a8a2 764static int __init set_buf_size(char *str)
bc0c38d1 765{
3928a8a2 766 unsigned long buf_size;
c6caeeb1 767
bc0c38d1
SR
768 if (!str)
769 return 0;
9d612bef 770 buf_size = memparse(str, &str);
c6caeeb1 771 /* nr_entries can not be zero */
9d612bef 772 if (buf_size == 0)
c6caeeb1 773 return 0;
3928a8a2 774 trace_buf_size = buf_size;
bc0c38d1
SR
775 return 1;
776}
3928a8a2 777__setup("trace_buf_size=", set_buf_size);
bc0c38d1 778
0e950173
TB
779static int __init set_tracing_thresh(char *str)
780{
87abb3b1 781 unsigned long threshold;
0e950173
TB
782 int ret;
783
784 if (!str)
785 return 0;
bcd83ea6 786 ret = kstrtoul(str, 0, &threshold);
0e950173
TB
787 if (ret < 0)
788 return 0;
87abb3b1 789 tracing_thresh = threshold * 1000;
0e950173
TB
790 return 1;
791}
792__setup("tracing_thresh=", set_tracing_thresh);
793
57f50be1
SR
794unsigned long nsecs_to_usecs(unsigned long nsecs)
795{
796 return nsecs / 1000;
797}
798
4fcdae83 799/* These must match the bit postions in trace_iterator_flags */
bc0c38d1
SR
800static const char *trace_options[] = {
801 "print-parent",
802 "sym-offset",
803 "sym-addr",
804 "verbose",
f9896bf3 805 "raw",
5e3ca0ec 806 "hex",
cb0f12aa 807 "bin",
2a2cc8f7 808 "block",
86387f7e 809 "stacktrace",
5e1607a0 810 "trace_printk",
b2a866f9 811 "ftrace_preempt",
9f029e83 812 "branch",
12ef7d44 813 "annotate",
02b67518 814 "userstacktrace",
b54d3de9 815 "sym-userobj",
66896a85 816 "printk-msg-only",
c4a8e8be 817 "context-info",
c032ef64 818 "latency-format",
be6f164a 819 "sleep-time",
a2a16d6a 820 "graph-time",
e870e9a1 821 "record-cmd",
750912fa 822 "overwrite",
cf30cf67 823 "disable_on_free",
77271ce4 824 "irq-info",
5224c3a3 825 "markers",
328df475 826 "function-trace",
bc0c38d1
SR
827 NULL
828};
829
5079f326
Z
830static struct {
831 u64 (*func)(void);
832 const char *name;
8be0709f 833 int in_ns; /* is this clock in nanoseconds? */
5079f326 834} trace_clocks[] = {
1b3e5c09
TG
835 { trace_clock_local, "local", 1 },
836 { trace_clock_global, "global", 1 },
837 { trace_clock_counter, "counter", 0 },
e7fda6c4 838 { trace_clock_jiffies, "uptime", 0 },
1b3e5c09
TG
839 { trace_clock, "perf", 1 },
840 { ktime_get_mono_fast_ns, "mono", 1 },
8cbd9cc6 841 ARCH_TRACE_CLOCKS
5079f326
Z
842};
843
b63f39ea 844/*
845 * trace_parser_get_init - gets the buffer for trace parser
846 */
847int trace_parser_get_init(struct trace_parser *parser, int size)
848{
849 memset(parser, 0, sizeof(*parser));
850
851 parser->buffer = kmalloc(size, GFP_KERNEL);
852 if (!parser->buffer)
853 return 1;
854
855 parser->size = size;
856 return 0;
857}
858
859/*
860 * trace_parser_put - frees the buffer for trace parser
861 */
862void trace_parser_put(struct trace_parser *parser)
863{
864 kfree(parser->buffer);
865}
866
867/*
868 * trace_get_user - reads the user input string separated by space
869 * (matched by isspace(ch))
870 *
871 * For each string found the 'struct trace_parser' is updated,
872 * and the function returns.
873 *
874 * Returns number of bytes read.
875 *
876 * See kernel/trace/trace.h for 'struct trace_parser' details.
877 */
878int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
879 size_t cnt, loff_t *ppos)
880{
881 char ch;
882 size_t read = 0;
883 ssize_t ret;
884
885 if (!*ppos)
886 trace_parser_clear(parser);
887
888 ret = get_user(ch, ubuf++);
889 if (ret)
890 goto out;
891
892 read++;
893 cnt--;
894
895 /*
896 * The parser is not finished with the last write,
897 * continue reading the user input without skipping spaces.
898 */
899 if (!parser->cont) {
900 /* skip white space */
901 while (cnt && isspace(ch)) {
902 ret = get_user(ch, ubuf++);
903 if (ret)
904 goto out;
905 read++;
906 cnt--;
907 }
908
909 /* only spaces were written */
910 if (isspace(ch)) {
911 *ppos += read;
912 ret = read;
913 goto out;
914 }
915
916 parser->idx = 0;
917 }
918
919 /* read the non-space input */
920 while (cnt && !isspace(ch)) {
3c235a33 921 if (parser->idx < parser->size - 1)
b63f39ea 922 parser->buffer[parser->idx++] = ch;
923 else {
924 ret = -EINVAL;
925 goto out;
926 }
927 ret = get_user(ch, ubuf++);
928 if (ret)
929 goto out;
930 read++;
931 cnt--;
932 }
933
934 /* We either got finished input or we have to wait for another call. */
935 if (isspace(ch)) {
936 parser->buffer[parser->idx] = 0;
937 parser->cont = false;
057db848 938 } else if (parser->idx < parser->size - 1) {
b63f39ea 939 parser->cont = true;
940 parser->buffer[parser->idx++] = ch;
057db848
SR
941 } else {
942 ret = -EINVAL;
943 goto out;
b63f39ea 944 }
945
946 *ppos += read;
947 ret = read;
948
949out:
950 return ret;
951}
952
3a161d99 953/* TODO add a seq_buf_to_buffer() */
b8b94265 954static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
3c56819b
EGM
955{
956 int len;
3c56819b 957
5ac48378 958 if (trace_seq_used(s) <= s->seq.readpos)
3c56819b
EGM
959 return -EBUSY;
960
5ac48378 961 len = trace_seq_used(s) - s->seq.readpos;
3c56819b
EGM
962 if (cnt > len)
963 cnt = len;
3a161d99 964 memcpy(buf, s->buffer + s->seq.readpos, cnt);
3c56819b 965
3a161d99 966 s->seq.readpos += cnt;
3c56819b
EGM
967 return cnt;
968}
969
0e950173
TB
970unsigned long __read_mostly tracing_thresh;
971
5d4a9dba 972#ifdef CONFIG_TRACER_MAX_TRACE
5d4a9dba
SR
973/*
974 * Copy the new maximum trace into the separate maximum-trace
975 * structure. (this way the maximum trace is permanently saved,
976 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
977 */
978static void
979__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
980{
12883efb
SRRH
981 struct trace_buffer *trace_buf = &tr->trace_buffer;
982 struct trace_buffer *max_buf = &tr->max_buffer;
983 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
984 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
5d4a9dba 985
12883efb
SRRH
986 max_buf->cpu = cpu;
987 max_buf->time_start = data->preempt_timestamp;
5d4a9dba 988
6d9b3fa5 989 max_data->saved_latency = tr->max_latency;
8248ac05
SR
990 max_data->critical_start = data->critical_start;
991 max_data->critical_end = data->critical_end;
5d4a9dba 992
1acaa1b2 993 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
8248ac05 994 max_data->pid = tsk->pid;
f17a5194
SRRH
995 /*
996 * If tsk == current, then use current_uid(), as that does not use
997 * RCU. The irq tracer can be called out of RCU scope.
998 */
999 if (tsk == current)
1000 max_data->uid = current_uid();
1001 else
1002 max_data->uid = task_uid(tsk);
1003
8248ac05
SR
1004 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1005 max_data->policy = tsk->policy;
1006 max_data->rt_priority = tsk->rt_priority;
5d4a9dba
SR
1007
1008 /* record this tasks comm */
1009 tracing_record_cmdline(tsk);
1010}
1011
4fcdae83
SR
1012/**
1013 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1014 * @tr: tracer
1015 * @tsk: the task with the latency
1016 * @cpu: The cpu that initiated the trace.
1017 *
1018 * Flip the buffers between the @tr and the max_tr and record information
1019 * about which task was the cause of this latency.
1020 */
e309b41d 1021void
bc0c38d1
SR
1022update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1023{
2721e72d 1024 struct ring_buffer *buf;
bc0c38d1 1025
2b6080f2 1026 if (tr->stop_count)
b8de7bd1
SR
1027 return;
1028
4c11d7ae 1029 WARN_ON_ONCE(!irqs_disabled());
34600f0e 1030
45ad21ca 1031 if (!tr->allocated_snapshot) {
debdd57f 1032 /* Only the nop tracer should hit this when disabling */
2b6080f2 1033 WARN_ON_ONCE(tr->current_trace != &nop_trace);
34600f0e 1034 return;
debdd57f 1035 }
34600f0e 1036
0b9b12c1 1037 arch_spin_lock(&tr->max_lock);
3928a8a2 1038
12883efb
SRRH
1039 buf = tr->trace_buffer.buffer;
1040 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1041 tr->max_buffer.buffer = buf;
3928a8a2 1042
bc0c38d1 1043 __update_max_tr(tr, tsk, cpu);
0b9b12c1 1044 arch_spin_unlock(&tr->max_lock);
bc0c38d1
SR
1045}
1046
1047/**
1048 * update_max_tr_single - only copy one trace over, and reset the rest
1049 * @tr - tracer
1050 * @tsk - task with the latency
1051 * @cpu - the cpu of the buffer to copy.
4fcdae83
SR
1052 *
1053 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
bc0c38d1 1054 */
e309b41d 1055void
bc0c38d1
SR
1056update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1057{
3928a8a2 1058 int ret;
bc0c38d1 1059
2b6080f2 1060 if (tr->stop_count)
b8de7bd1
SR
1061 return;
1062
4c11d7ae 1063 WARN_ON_ONCE(!irqs_disabled());
6c24499f 1064 if (!tr->allocated_snapshot) {
2930e04d 1065 /* Only the nop tracer should hit this when disabling */
9e8529af 1066 WARN_ON_ONCE(tr->current_trace != &nop_trace);
ef710e10 1067 return;
2930e04d 1068 }
ef710e10 1069
0b9b12c1 1070 arch_spin_lock(&tr->max_lock);
bc0c38d1 1071
12883efb 1072 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
3928a8a2 1073
e8165dbb
SR
1074 if (ret == -EBUSY) {
1075 /*
1076 * We failed to swap the buffer due to a commit taking
1077 * place on this CPU. We fail to record, but we reset
1078 * the max trace buffer (no one writes directly to it)
1079 * and flag that it failed.
1080 */
12883efb 1081 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
e8165dbb
SR
1082 "Failed to swap buffers due to commit in progress\n");
1083 }
1084
e8165dbb 1085 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
bc0c38d1
SR
1086
1087 __update_max_tr(tr, tsk, cpu);
0b9b12c1 1088 arch_spin_unlock(&tr->max_lock);
bc0c38d1 1089}
5d4a9dba 1090#endif /* CONFIG_TRACER_MAX_TRACE */
bc0c38d1 1091
e30f53aa 1092static int wait_on_pipe(struct trace_iterator *iter, bool full)
0d5c6e1c 1093{
15693458
SRRH
1094 /* Iterators are static, they should be filled or empty */
1095 if (trace_buffer_iter(iter, iter->cpu_file))
8b8b3683 1096 return 0;
0d5c6e1c 1097
e30f53aa
RV
1098 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1099 full);
0d5c6e1c
SR
1100}
1101
f4e781c0
SRRH
1102#ifdef CONFIG_FTRACE_STARTUP_TEST
1103static int run_tracer_selftest(struct tracer *type)
1104{
1105 struct trace_array *tr = &global_trace;
1106 struct tracer *saved_tracer = tr->current_trace;
1107 int ret;
0d5c6e1c 1108
f4e781c0
SRRH
1109 if (!type->selftest || tracing_selftest_disabled)
1110 return 0;
0d5c6e1c
SR
1111
1112 /*
f4e781c0
SRRH
1113 * Run a selftest on this tracer.
1114 * Here we reset the trace buffer, and set the current
1115 * tracer to be this tracer. The tracer can then run some
1116 * internal tracing to verify that everything is in order.
1117 * If we fail, we do not register this tracer.
0d5c6e1c 1118 */
f4e781c0 1119 tracing_reset_online_cpus(&tr->trace_buffer);
0d5c6e1c 1120
f4e781c0
SRRH
1121 tr->current_trace = type;
1122
1123#ifdef CONFIG_TRACER_MAX_TRACE
1124 if (type->use_max_tr) {
1125 /* If we expanded the buffers, make sure the max is expanded too */
1126 if (ring_buffer_expanded)
1127 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1128 RING_BUFFER_ALL_CPUS);
1129 tr->allocated_snapshot = true;
1130 }
1131#endif
1132
1133 /* the test is responsible for initializing and enabling */
1134 pr_info("Testing tracer %s: ", type->name);
1135 ret = type->selftest(type, tr);
1136 /* the test is responsible for resetting too */
1137 tr->current_trace = saved_tracer;
1138 if (ret) {
1139 printk(KERN_CONT "FAILED!\n");
1140 /* Add the warning after printing 'FAILED' */
1141 WARN_ON(1);
1142 return -1;
1143 }
1144 /* Only reset on passing, to avoid touching corrupted buffers */
1145 tracing_reset_online_cpus(&tr->trace_buffer);
1146
1147#ifdef CONFIG_TRACER_MAX_TRACE
1148 if (type->use_max_tr) {
1149 tr->allocated_snapshot = false;
0d5c6e1c 1150
f4e781c0
SRRH
1151 /* Shrink the max buffer again */
1152 if (ring_buffer_expanded)
1153 ring_buffer_resize(tr->max_buffer.buffer, 1,
1154 RING_BUFFER_ALL_CPUS);
1155 }
1156#endif
1157
1158 printk(KERN_CONT "PASSED\n");
1159 return 0;
1160}
1161#else
1162static inline int run_tracer_selftest(struct tracer *type)
1163{
1164 return 0;
0d5c6e1c 1165}
f4e781c0 1166#endif /* CONFIG_FTRACE_STARTUP_TEST */
0d5c6e1c 1167
4fcdae83
SR
1168/**
1169 * register_tracer - register a tracer with the ftrace system.
1170 * @type - the plugin for the tracer
1171 *
1172 * Register a new plugin tracer.
1173 */
bc0c38d1
SR
1174int register_tracer(struct tracer *type)
1175{
1176 struct tracer *t;
bc0c38d1
SR
1177 int ret = 0;
1178
1179 if (!type->name) {
1180 pr_info("Tracer must have a name\n");
1181 return -1;
1182 }
1183
24a461d5 1184 if (strlen(type->name) >= MAX_TRACER_SIZE) {
ee6c2c1b
LZ
1185 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1186 return -1;
1187 }
1188
bc0c38d1 1189 mutex_lock(&trace_types_lock);
86fa2f60 1190
8e1b82e0
FW
1191 tracing_selftest_running = true;
1192
bc0c38d1
SR
1193 for (t = trace_types; t; t = t->next) {
1194 if (strcmp(type->name, t->name) == 0) {
1195 /* already found */
ee6c2c1b 1196 pr_info("Tracer %s already registered\n",
bc0c38d1
SR
1197 type->name);
1198 ret = -1;
1199 goto out;
1200 }
1201 }
1202
adf9f195
FW
1203 if (!type->set_flag)
1204 type->set_flag = &dummy_set_flag;
1205 if (!type->flags)
1206 type->flags = &dummy_tracer_flags;
1207 else
1208 if (!type->flags->opts)
1209 type->flags->opts = dummy_tracer_opt;
6eaaa5d5 1210
f4e781c0
SRRH
1211 ret = run_tracer_selftest(type);
1212 if (ret < 0)
1213 goto out;
60a11774 1214
bc0c38d1
SR
1215 type->next = trace_types;
1216 trace_types = type;
60a11774 1217
bc0c38d1 1218 out:
8e1b82e0 1219 tracing_selftest_running = false;
bc0c38d1
SR
1220 mutex_unlock(&trace_types_lock);
1221
dac74940
SR
1222 if (ret || !default_bootup_tracer)
1223 goto out_unlock;
1224
ee6c2c1b 1225 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
dac74940
SR
1226 goto out_unlock;
1227
1228 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1229 /* Do we want this tracer to start on bootup? */
607e2ea1 1230 tracing_set_tracer(&global_trace, type->name);
dac74940
SR
1231 default_bootup_tracer = NULL;
1232 /* disable other selftests, since this will break it. */
55034cd6 1233 tracing_selftest_disabled = true;
b2821ae6 1234#ifdef CONFIG_FTRACE_STARTUP_TEST
dac74940
SR
1235 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1236 type->name);
b2821ae6 1237#endif
b2821ae6 1238
dac74940 1239 out_unlock:
bc0c38d1
SR
1240 return ret;
1241}
1242
12883efb 1243void tracing_reset(struct trace_buffer *buf, int cpu)
f633903a 1244{
12883efb 1245 struct ring_buffer *buffer = buf->buffer;
f633903a 1246
a5416411
HT
1247 if (!buffer)
1248 return;
1249
f633903a
SR
1250 ring_buffer_record_disable(buffer);
1251
1252 /* Make sure all commits have finished */
1253 synchronize_sched();
68179686 1254 ring_buffer_reset_cpu(buffer, cpu);
f633903a
SR
1255
1256 ring_buffer_record_enable(buffer);
1257}
1258
12883efb 1259void tracing_reset_online_cpus(struct trace_buffer *buf)
213cc060 1260{
12883efb 1261 struct ring_buffer *buffer = buf->buffer;
213cc060
PE
1262 int cpu;
1263
a5416411
HT
1264 if (!buffer)
1265 return;
1266
621968cd
SR
1267 ring_buffer_record_disable(buffer);
1268
1269 /* Make sure all commits have finished */
1270 synchronize_sched();
1271
9457158b 1272 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
213cc060
PE
1273
1274 for_each_online_cpu(cpu)
68179686 1275 ring_buffer_reset_cpu(buffer, cpu);
621968cd
SR
1276
1277 ring_buffer_record_enable(buffer);
213cc060
PE
1278}
1279
09d8091c 1280/* Must have trace_types_lock held */
873c642f 1281void tracing_reset_all_online_cpus(void)
9456f0fa 1282{
873c642f
SRRH
1283 struct trace_array *tr;
1284
873c642f 1285 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
12883efb
SRRH
1286 tracing_reset_online_cpus(&tr->trace_buffer);
1287#ifdef CONFIG_TRACER_MAX_TRACE
1288 tracing_reset_online_cpus(&tr->max_buffer);
1289#endif
873c642f 1290 }
9456f0fa
SR
1291}
1292
939c7a4f 1293#define SAVED_CMDLINES_DEFAULT 128
2c7eea4c 1294#define NO_CMDLINE_MAP UINT_MAX
edc35bd7 1295static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
939c7a4f
YY
1296struct saved_cmdlines_buffer {
1297 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1298 unsigned *map_cmdline_to_pid;
1299 unsigned cmdline_num;
1300 int cmdline_idx;
1301 char *saved_cmdlines;
1302};
1303static struct saved_cmdlines_buffer *savedcmd;
25b0b44a 1304
25b0b44a 1305/* temporary disable recording */
4fd27358 1306static atomic_t trace_record_cmdline_disabled __read_mostly;
bc0c38d1 1307
939c7a4f
YY
1308static inline char *get_saved_cmdlines(int idx)
1309{
1310 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1311}
1312
1313static inline void set_cmdline(int idx, const char *cmdline)
bc0c38d1 1314{
939c7a4f
YY
1315 memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1316}
1317
1318static int allocate_cmdlines_buffer(unsigned int val,
1319 struct saved_cmdlines_buffer *s)
1320{
1321 s->map_cmdline_to_pid = kmalloc(val * sizeof(*s->map_cmdline_to_pid),
1322 GFP_KERNEL);
1323 if (!s->map_cmdline_to_pid)
1324 return -ENOMEM;
1325
1326 s->saved_cmdlines = kmalloc(val * TASK_COMM_LEN, GFP_KERNEL);
1327 if (!s->saved_cmdlines) {
1328 kfree(s->map_cmdline_to_pid);
1329 return -ENOMEM;
1330 }
1331
1332 s->cmdline_idx = 0;
1333 s->cmdline_num = val;
1334 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1335 sizeof(s->map_pid_to_cmdline));
1336 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1337 val * sizeof(*s->map_cmdline_to_pid));
1338
1339 return 0;
1340}
1341
1342static int trace_create_savedcmd(void)
1343{
1344 int ret;
1345
a6af8fbf 1346 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
939c7a4f
YY
1347 if (!savedcmd)
1348 return -ENOMEM;
1349
1350 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1351 if (ret < 0) {
1352 kfree(savedcmd);
1353 savedcmd = NULL;
1354 return -ENOMEM;
1355 }
1356
1357 return 0;
bc0c38d1
SR
1358}
1359
b5130b1e
CE
1360int is_tracing_stopped(void)
1361{
2b6080f2 1362 return global_trace.stop_count;
b5130b1e
CE
1363}
1364
0f048701
SR
1365/**
1366 * tracing_start - quick start of the tracer
1367 *
1368 * If tracing is enabled but was stopped by tracing_stop,
1369 * this will start the tracer back up.
1370 */
1371void tracing_start(void)
1372{
1373 struct ring_buffer *buffer;
1374 unsigned long flags;
1375
1376 if (tracing_disabled)
1377 return;
1378
2b6080f2
SR
1379 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1380 if (--global_trace.stop_count) {
1381 if (global_trace.stop_count < 0) {
b06a8301
SR
1382 /* Someone screwed up their debugging */
1383 WARN_ON_ONCE(1);
2b6080f2 1384 global_trace.stop_count = 0;
b06a8301 1385 }
0f048701
SR
1386 goto out;
1387 }
1388
a2f80714 1389 /* Prevent the buffers from switching */
0b9b12c1 1390 arch_spin_lock(&global_trace.max_lock);
0f048701 1391
12883efb 1392 buffer = global_trace.trace_buffer.buffer;
0f048701
SR
1393 if (buffer)
1394 ring_buffer_record_enable(buffer);
1395
12883efb
SRRH
1396#ifdef CONFIG_TRACER_MAX_TRACE
1397 buffer = global_trace.max_buffer.buffer;
0f048701
SR
1398 if (buffer)
1399 ring_buffer_record_enable(buffer);
12883efb 1400#endif
0f048701 1401
0b9b12c1 1402 arch_spin_unlock(&global_trace.max_lock);
a2f80714 1403
0f048701 1404 out:
2b6080f2
SR
1405 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1406}
1407
1408static void tracing_start_tr(struct trace_array *tr)
1409{
1410 struct ring_buffer *buffer;
1411 unsigned long flags;
1412
1413 if (tracing_disabled)
1414 return;
1415
1416 /* If global, we need to also start the max tracer */
1417 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1418 return tracing_start();
1419
1420 raw_spin_lock_irqsave(&tr->start_lock, flags);
1421
1422 if (--tr->stop_count) {
1423 if (tr->stop_count < 0) {
1424 /* Someone screwed up their debugging */
1425 WARN_ON_ONCE(1);
1426 tr->stop_count = 0;
1427 }
1428 goto out;
1429 }
1430
12883efb 1431 buffer = tr->trace_buffer.buffer;
2b6080f2
SR
1432 if (buffer)
1433 ring_buffer_record_enable(buffer);
1434
1435 out:
1436 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
0f048701
SR
1437}
1438
1439/**
1440 * tracing_stop - quick stop of the tracer
1441 *
1442 * Light weight way to stop tracing. Use in conjunction with
1443 * tracing_start.
1444 */
1445void tracing_stop(void)
1446{
1447 struct ring_buffer *buffer;
1448 unsigned long flags;
1449
2b6080f2
SR
1450 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1451 if (global_trace.stop_count++)
0f048701
SR
1452 goto out;
1453
a2f80714 1454 /* Prevent the buffers from switching */
0b9b12c1 1455 arch_spin_lock(&global_trace.max_lock);
a2f80714 1456
12883efb 1457 buffer = global_trace.trace_buffer.buffer;
0f048701
SR
1458 if (buffer)
1459 ring_buffer_record_disable(buffer);
1460
12883efb
SRRH
1461#ifdef CONFIG_TRACER_MAX_TRACE
1462 buffer = global_trace.max_buffer.buffer;
0f048701
SR
1463 if (buffer)
1464 ring_buffer_record_disable(buffer);
12883efb 1465#endif
0f048701 1466
0b9b12c1 1467 arch_spin_unlock(&global_trace.max_lock);
a2f80714 1468
0f048701 1469 out:
2b6080f2
SR
1470 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1471}
1472
1473static void tracing_stop_tr(struct trace_array *tr)
1474{
1475 struct ring_buffer *buffer;
1476 unsigned long flags;
1477
1478 /* If global, we need to also stop the max tracer */
1479 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1480 return tracing_stop();
1481
1482 raw_spin_lock_irqsave(&tr->start_lock, flags);
1483 if (tr->stop_count++)
1484 goto out;
1485
12883efb 1486 buffer = tr->trace_buffer.buffer;
2b6080f2
SR
1487 if (buffer)
1488 ring_buffer_record_disable(buffer);
1489
1490 out:
1491 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
0f048701
SR
1492}
1493
e309b41d 1494void trace_stop_cmdline_recording(void);
bc0c38d1 1495
379cfdac 1496static int trace_save_cmdline(struct task_struct *tsk)
bc0c38d1 1497{
a635cf04 1498 unsigned pid, idx;
bc0c38d1
SR
1499
1500 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
379cfdac 1501 return 0;
bc0c38d1
SR
1502
1503 /*
1504 * It's not the end of the world if we don't get
1505 * the lock, but we also don't want to spin
1506 * nor do we want to disable interrupts,
1507 * so if we miss here, then better luck next time.
1508 */
0199c4e6 1509 if (!arch_spin_trylock(&trace_cmdline_lock))
379cfdac 1510 return 0;
bc0c38d1 1511
939c7a4f 1512 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
2c7eea4c 1513 if (idx == NO_CMDLINE_MAP) {
939c7a4f 1514 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
bc0c38d1 1515
a635cf04
CE
1516 /*
1517 * Check whether the cmdline buffer at idx has a pid
1518 * mapped. We are going to overwrite that entry so we
1519 * need to clear the map_pid_to_cmdline. Otherwise we
1520 * would read the new comm for the old pid.
1521 */
939c7a4f 1522 pid = savedcmd->map_cmdline_to_pid[idx];
a635cf04 1523 if (pid != NO_CMDLINE_MAP)
939c7a4f 1524 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
bc0c38d1 1525
939c7a4f
YY
1526 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1527 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
bc0c38d1 1528
939c7a4f 1529 savedcmd->cmdline_idx = idx;
bc0c38d1
SR
1530 }
1531
939c7a4f 1532 set_cmdline(idx, tsk->comm);
bc0c38d1 1533
0199c4e6 1534 arch_spin_unlock(&trace_cmdline_lock);
379cfdac
SRRH
1535
1536 return 1;
bc0c38d1
SR
1537}
1538
4c27e756 1539static void __trace_find_cmdline(int pid, char comm[])
bc0c38d1 1540{
bc0c38d1
SR
1541 unsigned map;
1542
4ca53085
SR
1543 if (!pid) {
1544 strcpy(comm, "<idle>");
1545 return;
1546 }
bc0c38d1 1547
74bf4076
SR
1548 if (WARN_ON_ONCE(pid < 0)) {
1549 strcpy(comm, "<XXX>");
1550 return;
1551 }
1552
4ca53085
SR
1553 if (pid > PID_MAX_DEFAULT) {
1554 strcpy(comm, "<...>");
1555 return;
1556 }
bc0c38d1 1557
939c7a4f 1558 map = savedcmd->map_pid_to_cmdline[pid];
50d88758 1559 if (map != NO_CMDLINE_MAP)
939c7a4f 1560 strcpy(comm, get_saved_cmdlines(map));
50d88758
TG
1561 else
1562 strcpy(comm, "<...>");
4c27e756
SRRH
1563}
1564
1565void trace_find_cmdline(int pid, char comm[])
1566{
1567 preempt_disable();
1568 arch_spin_lock(&trace_cmdline_lock);
1569
1570 __trace_find_cmdline(pid, comm);
bc0c38d1 1571
0199c4e6 1572 arch_spin_unlock(&trace_cmdline_lock);
5b6045a9 1573 preempt_enable();
bc0c38d1
SR
1574}
1575
e309b41d 1576void tracing_record_cmdline(struct task_struct *tsk)
bc0c38d1 1577{
0fb9656d 1578 if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
bc0c38d1
SR
1579 return;
1580
7ffbd48d
SR
1581 if (!__this_cpu_read(trace_cmdline_save))
1582 return;
1583
379cfdac
SRRH
1584 if (trace_save_cmdline(tsk))
1585 __this_cpu_write(trace_cmdline_save, false);
bc0c38d1
SR
1586}
1587
45dcd8b8 1588void
38697053
SR
1589tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1590 int pc)
bc0c38d1
SR
1591{
1592 struct task_struct *tsk = current;
bc0c38d1 1593
777e208d
SR
1594 entry->preempt_count = pc & 0xff;
1595 entry->pid = (tsk) ? tsk->pid : 0;
1596 entry->flags =
9244489a 1597#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
2e2ca155 1598 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
9244489a
SR
1599#else
1600 TRACE_FLAG_IRQS_NOSUPPORT |
1601#endif
bc0c38d1
SR
1602 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1603 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
e5137b50
PZ
1604 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
1605 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
bc0c38d1 1606}
f413cdb8 1607EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
bc0c38d1 1608
e77405ad
SR
1609struct ring_buffer_event *
1610trace_buffer_lock_reserve(struct ring_buffer *buffer,
1611 int type,
1612 unsigned long len,
1613 unsigned long flags, int pc)
51a763dd
ACM
1614{
1615 struct ring_buffer_event *event;
1616
e77405ad 1617 event = ring_buffer_lock_reserve(buffer, len);
51a763dd
ACM
1618 if (event != NULL) {
1619 struct trace_entry *ent = ring_buffer_event_data(event);
1620
1621 tracing_generic_entry_update(ent, flags, pc);
1622 ent->type = type;
1623 }
1624
1625 return event;
1626}
51a763dd 1627
7ffbd48d
SR
1628void
1629__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
1630{
1631 __this_cpu_write(trace_cmdline_save, true);
1632 ring_buffer_unlock_commit(buffer, event);
1633}
1634
e77405ad
SR
1635static inline void
1636__trace_buffer_unlock_commit(struct ring_buffer *buffer,
1637 struct ring_buffer_event *event,
0d5c6e1c 1638 unsigned long flags, int pc)
51a763dd 1639{
7ffbd48d 1640 __buffer_unlock_commit(buffer, event);
51a763dd 1641
e77405ad
SR
1642 ftrace_trace_stack(buffer, flags, 6, pc);
1643 ftrace_trace_userstack(buffer, flags, pc);
07edf712
FW
1644}
1645
e77405ad
SR
1646void trace_buffer_unlock_commit(struct ring_buffer *buffer,
1647 struct ring_buffer_event *event,
1648 unsigned long flags, int pc)
07edf712 1649{
0d5c6e1c 1650 __trace_buffer_unlock_commit(buffer, event, flags, pc);
51a763dd 1651}
0d5c6e1c 1652EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
51a763dd 1653
2c4a33ab
SRRH
1654static struct ring_buffer *temp_buffer;
1655
ccb469a1
SR
1656struct ring_buffer_event *
1657trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
1658 struct ftrace_event_file *ftrace_file,
1659 int type, unsigned long len,
1660 unsigned long flags, int pc)
1661{
2c4a33ab
SRRH
1662 struct ring_buffer_event *entry;
1663
12883efb 1664 *current_rb = ftrace_file->tr->trace_buffer.buffer;
2c4a33ab 1665 entry = trace_buffer_lock_reserve(*current_rb,
ccb469a1 1666 type, len, flags, pc);
2c4a33ab
SRRH
1667 /*
1668 * If tracing is off, but we have triggers enabled
1669 * we still need to look at the event data. Use the temp_buffer
1670 * to store the trace event for the tigger to use. It's recusive
1671 * safe and will not be recorded anywhere.
1672 */
1673 if (!entry && ftrace_file->flags & FTRACE_EVENT_FL_TRIGGER_COND) {
1674 *current_rb = temp_buffer;
1675 entry = trace_buffer_lock_reserve(*current_rb,
1676 type, len, flags, pc);
1677 }
1678 return entry;
ccb469a1
SR
1679}
1680EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
1681
ef5580d0 1682struct ring_buffer_event *
e77405ad
SR
1683trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1684 int type, unsigned long len,
ef5580d0
SR
1685 unsigned long flags, int pc)
1686{
12883efb 1687 *current_rb = global_trace.trace_buffer.buffer;
e77405ad 1688 return trace_buffer_lock_reserve(*current_rb,
ef5580d0
SR
1689 type, len, flags, pc);
1690}
94487d6d 1691EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
ef5580d0 1692
e77405ad
SR
1693void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
1694 struct ring_buffer_event *event,
ef5580d0
SR
1695 unsigned long flags, int pc)
1696{
0d5c6e1c 1697 __trace_buffer_unlock_commit(buffer, event, flags, pc);
07edf712 1698}
94487d6d 1699EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit);
07edf712 1700
0d5c6e1c
SR
1701void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer,
1702 struct ring_buffer_event *event,
1703 unsigned long flags, int pc,
1704 struct pt_regs *regs)
1fd8df2c 1705{
7ffbd48d 1706 __buffer_unlock_commit(buffer, event);
1fd8df2c
MH
1707
1708 ftrace_trace_stack_regs(buffer, flags, 0, pc, regs);
1709 ftrace_trace_userstack(buffer, flags, pc);
1710}
0d5c6e1c 1711EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
1fd8df2c 1712
e77405ad
SR
1713void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1714 struct ring_buffer_event *event)
77d9f465 1715{
e77405ad 1716 ring_buffer_discard_commit(buffer, event);
ef5580d0 1717}
12acd473 1718EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
ef5580d0 1719
e309b41d 1720void
7be42151 1721trace_function(struct trace_array *tr,
38697053
SR
1722 unsigned long ip, unsigned long parent_ip, unsigned long flags,
1723 int pc)
bc0c38d1 1724{
e1112b4d 1725 struct ftrace_event_call *call = &event_function;
12883efb 1726 struct ring_buffer *buffer = tr->trace_buffer.buffer;
3928a8a2 1727 struct ring_buffer_event *event;
777e208d 1728 struct ftrace_entry *entry;
bc0c38d1 1729
d769041f 1730 /* If we are reading the ring buffer, don't trace */
dd17c8f7 1731 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
d769041f
SR
1732 return;
1733
e77405ad 1734 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
51a763dd 1735 flags, pc);
3928a8a2
SR
1736 if (!event)
1737 return;
1738 entry = ring_buffer_event_data(event);
777e208d
SR
1739 entry->ip = ip;
1740 entry->parent_ip = parent_ip;
e1112b4d 1741
f306cc82 1742 if (!call_filter_check_discard(call, entry, buffer, event))
7ffbd48d 1743 __buffer_unlock_commit(buffer, event);
bc0c38d1
SR
1744}
1745
c0a0d0d3 1746#ifdef CONFIG_STACKTRACE
4a9bd3f1
SR
1747
1748#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1749struct ftrace_stack {
1750 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
1751};
1752
1753static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
1754static DEFINE_PER_CPU(int, ftrace_stack_reserve);
1755
e77405ad 1756static void __ftrace_trace_stack(struct ring_buffer *buffer,
53614991 1757 unsigned long flags,
1fd8df2c 1758 int skip, int pc, struct pt_regs *regs)
86387f7e 1759{
e1112b4d 1760 struct ftrace_event_call *call = &event_kernel_stack;
3928a8a2 1761 struct ring_buffer_event *event;
777e208d 1762 struct stack_entry *entry;
86387f7e 1763 struct stack_trace trace;
4a9bd3f1
SR
1764 int use_stack;
1765 int size = FTRACE_STACK_ENTRIES;
1766
1767 trace.nr_entries = 0;
1768 trace.skip = skip;
1769
1770 /*
1771 * Since events can happen in NMIs there's no safe way to
1772 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1773 * or NMI comes in, it will just have to use the default
1774 * FTRACE_STACK_SIZE.
1775 */
1776 preempt_disable_notrace();
1777
82146529 1778 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
4a9bd3f1
SR
1779 /*
1780 * We don't need any atomic variables, just a barrier.
1781 * If an interrupt comes in, we don't care, because it would
1782 * have exited and put the counter back to what we want.
1783 * We just need a barrier to keep gcc from moving things
1784 * around.
1785 */
1786 barrier();
1787 if (use_stack == 1) {
bdffd893 1788 trace.entries = this_cpu_ptr(ftrace_stack.calls);
4a9bd3f1
SR
1789 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
1790
1791 if (regs)
1792 save_stack_trace_regs(regs, &trace);
1793 else
1794 save_stack_trace(&trace);
1795
1796 if (trace.nr_entries > size)
1797 size = trace.nr_entries;
1798 } else
1799 /* From now on, use_stack is a boolean */
1800 use_stack = 0;
1801
1802 size *= sizeof(unsigned long);
86387f7e 1803
e77405ad 1804 event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
4a9bd3f1 1805 sizeof(*entry) + size, flags, pc);
3928a8a2 1806 if (!event)
4a9bd3f1
SR
1807 goto out;
1808 entry = ring_buffer_event_data(event);
86387f7e 1809
4a9bd3f1
SR
1810 memset(&entry->caller, 0, size);
1811
1812 if (use_stack)
1813 memcpy(&entry->caller, trace.entries,
1814 trace.nr_entries * sizeof(unsigned long));
1815 else {
1816 trace.max_entries = FTRACE_STACK_ENTRIES;
1817 trace.entries = entry->caller;
1818 if (regs)
1819 save_stack_trace_regs(regs, &trace);
1820 else
1821 save_stack_trace(&trace);
1822 }
1823
1824 entry->size = trace.nr_entries;
86387f7e 1825
f306cc82 1826 if (!call_filter_check_discard(call, entry, buffer, event))
7ffbd48d 1827 __buffer_unlock_commit(buffer, event);
4a9bd3f1
SR
1828
1829 out:
1830 /* Again, don't let gcc optimize things here */
1831 barrier();
82146529 1832 __this_cpu_dec(ftrace_stack_reserve);
4a9bd3f1
SR
1833 preempt_enable_notrace();
1834
f0a920d5
IM
1835}
1836
1fd8df2c
MH
1837void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
1838 int skip, int pc, struct pt_regs *regs)
1839{
1840 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1841 return;
1842
1843 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
1844}
1845
e77405ad
SR
1846void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
1847 int skip, int pc)
53614991
SR
1848{
1849 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1850 return;
1851
1fd8df2c 1852 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
53614991
SR
1853}
1854
c0a0d0d3
FW
1855void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1856 int pc)
38697053 1857{
12883efb 1858 __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
38697053
SR
1859}
1860
03889384
SR
1861/**
1862 * trace_dump_stack - record a stack back trace in the trace buffer
c142be8e 1863 * @skip: Number of functions to skip (helper handlers)
03889384 1864 */
c142be8e 1865void trace_dump_stack(int skip)
03889384
SR
1866{
1867 unsigned long flags;
1868
1869 if (tracing_disabled || tracing_selftest_running)
e36c5458 1870 return;
03889384
SR
1871
1872 local_save_flags(flags);
1873
c142be8e
SRRH
1874 /*
1875 * Skip 3 more, seems to get us at the caller of
1876 * this function.
1877 */
1878 skip += 3;
1879 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
1880 flags, skip, preempt_count(), NULL);
03889384
SR
1881}
1882
91e86e56
SR
1883static DEFINE_PER_CPU(int, user_stack_count);
1884
e77405ad
SR
1885void
1886ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
02b67518 1887{
e1112b4d 1888 struct ftrace_event_call *call = &event_user_stack;
8d7c6a96 1889 struct ring_buffer_event *event;
02b67518
TE
1890 struct userstack_entry *entry;
1891 struct stack_trace trace;
02b67518
TE
1892
1893 if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
1894 return;
1895
b6345879
SR
1896 /*
1897 * NMIs can not handle page faults, even with fix ups.
1898 * The save user stack can (and often does) fault.
1899 */
1900 if (unlikely(in_nmi()))
1901 return;
02b67518 1902
91e86e56
SR
1903 /*
1904 * prevent recursion, since the user stack tracing may
1905 * trigger other kernel events.
1906 */
1907 preempt_disable();
1908 if (__this_cpu_read(user_stack_count))
1909 goto out;
1910
1911 __this_cpu_inc(user_stack_count);
1912
e77405ad 1913 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
51a763dd 1914 sizeof(*entry), flags, pc);
02b67518 1915 if (!event)
1dbd1951 1916 goto out_drop_count;
02b67518 1917 entry = ring_buffer_event_data(event);
02b67518 1918
48659d31 1919 entry->tgid = current->tgid;
02b67518
TE
1920 memset(&entry->caller, 0, sizeof(entry->caller));
1921
1922 trace.nr_entries = 0;
1923 trace.max_entries = FTRACE_STACK_ENTRIES;
1924 trace.skip = 0;
1925 trace.entries = entry->caller;
1926
1927 save_stack_trace_user(&trace);
f306cc82 1928 if (!call_filter_check_discard(call, entry, buffer, event))
7ffbd48d 1929 __buffer_unlock_commit(buffer, event);
91e86e56 1930
1dbd1951 1931 out_drop_count:
91e86e56 1932 __this_cpu_dec(user_stack_count);
91e86e56
SR
1933 out:
1934 preempt_enable();
02b67518
TE
1935}
1936
4fd27358
HE
1937#ifdef UNUSED
1938static void __trace_userstack(struct trace_array *tr, unsigned long flags)
02b67518 1939{
7be42151 1940 ftrace_trace_userstack(tr, flags, preempt_count());
02b67518 1941}
4fd27358 1942#endif /* UNUSED */
02b67518 1943
c0a0d0d3
FW
1944#endif /* CONFIG_STACKTRACE */
1945
07d777fe
SR
1946/* created for use with alloc_percpu */
1947struct trace_buffer_struct {
1948 char buffer[TRACE_BUF_SIZE];
1949};
1950
1951static struct trace_buffer_struct *trace_percpu_buffer;
1952static struct trace_buffer_struct *trace_percpu_sirq_buffer;
1953static struct trace_buffer_struct *trace_percpu_irq_buffer;
1954static struct trace_buffer_struct *trace_percpu_nmi_buffer;
1955
1956/*
1957 * The buffer used is dependent on the context. There is a per cpu
1958 * buffer for normal context, softirq contex, hard irq context and
1959 * for NMI context. Thise allows for lockless recording.
1960 *
1961 * Note, if the buffers failed to be allocated, then this returns NULL
1962 */
1963static char *get_trace_buf(void)
1964{
1965 struct trace_buffer_struct *percpu_buffer;
07d777fe
SR
1966
1967 /*
1968 * If we have allocated per cpu buffers, then we do not
1969 * need to do any locking.
1970 */
1971 if (in_nmi())
1972 percpu_buffer = trace_percpu_nmi_buffer;
1973 else if (in_irq())
1974 percpu_buffer = trace_percpu_irq_buffer;
1975 else if (in_softirq())
1976 percpu_buffer = trace_percpu_sirq_buffer;
1977 else
1978 percpu_buffer = trace_percpu_buffer;
1979
1980 if (!percpu_buffer)
1981 return NULL;
1982
d8a0349c 1983 return this_cpu_ptr(&percpu_buffer->buffer[0]);
07d777fe
SR
1984}
1985
1986static int alloc_percpu_trace_buffer(void)
1987{
1988 struct trace_buffer_struct *buffers;
1989 struct trace_buffer_struct *sirq_buffers;
1990 struct trace_buffer_struct *irq_buffers;
1991 struct trace_buffer_struct *nmi_buffers;
1992
1993 buffers = alloc_percpu(struct trace_buffer_struct);
1994 if (!buffers)
1995 goto err_warn;
1996
1997 sirq_buffers = alloc_percpu(struct trace_buffer_struct);
1998 if (!sirq_buffers)
1999 goto err_sirq;
2000
2001 irq_buffers = alloc_percpu(struct trace_buffer_struct);
2002 if (!irq_buffers)
2003 goto err_irq;
2004
2005 nmi_buffers = alloc_percpu(struct trace_buffer_struct);
2006 if (!nmi_buffers)
2007 goto err_nmi;
2008
2009 trace_percpu_buffer = buffers;
2010 trace_percpu_sirq_buffer = sirq_buffers;
2011 trace_percpu_irq_buffer = irq_buffers;
2012 trace_percpu_nmi_buffer = nmi_buffers;
2013
2014 return 0;
2015
2016 err_nmi:
2017 free_percpu(irq_buffers);
2018 err_irq:
2019 free_percpu(sirq_buffers);
2020 err_sirq:
2021 free_percpu(buffers);
2022 err_warn:
2023 WARN(1, "Could not allocate percpu trace_printk buffer");
2024 return -ENOMEM;
2025}
2026
81698831
SR
2027static int buffers_allocated;
2028
07d777fe
SR
2029void trace_printk_init_buffers(void)
2030{
07d777fe
SR
2031 if (buffers_allocated)
2032 return;
2033
2034 if (alloc_percpu_trace_buffer())
2035 return;
2036
2184db46
SR
2037 /* trace_printk() is for debug use only. Don't use it in production. */
2038
2039 pr_warning("\n**********************************************************\n");
2040 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2041 pr_warning("** **\n");
2042 pr_warning("** trace_printk() being used. Allocating extra memory. **\n");
2043 pr_warning("** **\n");
2044 pr_warning("** This means that this is a DEBUG kernel and it is **\n");
eff264ef 2045 pr_warning("** unsafe for production use. **\n");
2184db46
SR
2046 pr_warning("** **\n");
2047 pr_warning("** If you see this message and you are not debugging **\n");
2048 pr_warning("** the kernel, report this immediately to your vendor! **\n");
2049 pr_warning("** **\n");
2050 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2051 pr_warning("**********************************************************\n");
07d777fe 2052
b382ede6
SR
2053 /* Expand the buffers to set size */
2054 tracing_update_buffers();
2055
07d777fe 2056 buffers_allocated = 1;
81698831
SR
2057
2058 /*
2059 * trace_printk_init_buffers() can be called by modules.
2060 * If that happens, then we need to start cmdline recording
2061 * directly here. If the global_trace.buffer is already
2062 * allocated here, then this was called by module code.
2063 */
12883efb 2064 if (global_trace.trace_buffer.buffer)
81698831
SR
2065 tracing_start_cmdline_record();
2066}
2067
2068void trace_printk_start_comm(void)
2069{
2070 /* Start tracing comms if trace printk is set */
2071 if (!buffers_allocated)
2072 return;
2073 tracing_start_cmdline_record();
2074}
2075
2076static void trace_printk_start_stop_comm(int enabled)
2077{
2078 if (!buffers_allocated)
2079 return;
2080
2081 if (enabled)
2082 tracing_start_cmdline_record();
2083 else
2084 tracing_stop_cmdline_record();
07d777fe
SR
2085}
2086
769b0441 2087/**
48ead020 2088 * trace_vbprintk - write binary msg to tracing buffer
769b0441
FW
2089 *
2090 */
40ce74f1 2091int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
769b0441 2092{
e1112b4d 2093 struct ftrace_event_call *call = &event_bprint;
769b0441 2094 struct ring_buffer_event *event;
e77405ad 2095 struct ring_buffer *buffer;
769b0441 2096 struct trace_array *tr = &global_trace;
48ead020 2097 struct bprint_entry *entry;
769b0441 2098 unsigned long flags;
07d777fe
SR
2099 char *tbuffer;
2100 int len = 0, size, pc;
769b0441
FW
2101
2102 if (unlikely(tracing_selftest_running || tracing_disabled))
2103 return 0;
2104
2105 /* Don't pollute graph traces with trace_vprintk internals */
2106 pause_graph_tracing();
2107
2108 pc = preempt_count();
5168ae50 2109 preempt_disable_notrace();
769b0441 2110
07d777fe
SR
2111 tbuffer = get_trace_buf();
2112 if (!tbuffer) {
2113 len = 0;
769b0441 2114 goto out;
07d777fe 2115 }
769b0441 2116
07d777fe 2117 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
769b0441 2118
07d777fe
SR
2119 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
2120 goto out;
769b0441 2121
07d777fe 2122 local_save_flags(flags);
769b0441 2123 size = sizeof(*entry) + sizeof(u32) * len;
12883efb 2124 buffer = tr->trace_buffer.buffer;
e77405ad
SR
2125 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2126 flags, pc);
769b0441 2127 if (!event)
07d777fe 2128 goto out;
769b0441
FW
2129 entry = ring_buffer_event_data(event);
2130 entry->ip = ip;
769b0441
FW
2131 entry->fmt = fmt;
2132
07d777fe 2133 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
f306cc82 2134 if (!call_filter_check_discard(call, entry, buffer, event)) {
7ffbd48d 2135 __buffer_unlock_commit(buffer, event);
d931369b
SR
2136 ftrace_trace_stack(buffer, flags, 6, pc);
2137 }
769b0441 2138
769b0441 2139out:
5168ae50 2140 preempt_enable_notrace();
769b0441
FW
2141 unpause_graph_tracing();
2142
2143 return len;
2144}
48ead020
FW
2145EXPORT_SYMBOL_GPL(trace_vbprintk);
2146
12883efb
SRRH
2147static int
2148__trace_array_vprintk(struct ring_buffer *buffer,
2149 unsigned long ip, const char *fmt, va_list args)
48ead020 2150{
e1112b4d 2151 struct ftrace_event_call *call = &event_print;
48ead020 2152 struct ring_buffer_event *event;
07d777fe 2153 int len = 0, size, pc;
48ead020 2154 struct print_entry *entry;
07d777fe
SR
2155 unsigned long flags;
2156 char *tbuffer;
48ead020
FW
2157
2158 if (tracing_disabled || tracing_selftest_running)
2159 return 0;
2160
07d777fe
SR
2161 /* Don't pollute graph traces with trace_vprintk internals */
2162 pause_graph_tracing();
2163
48ead020
FW
2164 pc = preempt_count();
2165 preempt_disable_notrace();
48ead020 2166
07d777fe
SR
2167
2168 tbuffer = get_trace_buf();
2169 if (!tbuffer) {
2170 len = 0;
48ead020 2171 goto out;
07d777fe 2172 }
48ead020 2173
3558a5ac 2174 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
48ead020 2175
07d777fe 2176 local_save_flags(flags);
48ead020 2177 size = sizeof(*entry) + len + 1;
e77405ad 2178 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
07d777fe 2179 flags, pc);
48ead020 2180 if (!event)
07d777fe 2181 goto out;
48ead020 2182 entry = ring_buffer_event_data(event);
c13d2f7c 2183 entry->ip = ip;
48ead020 2184
3558a5ac 2185 memcpy(&entry->buf, tbuffer, len + 1);
f306cc82 2186 if (!call_filter_check_discard(call, entry, buffer, event)) {
7ffbd48d 2187 __buffer_unlock_commit(buffer, event);
07d777fe 2188 ftrace_trace_stack(buffer, flags, 6, pc);
d931369b 2189 }
48ead020
FW
2190 out:
2191 preempt_enable_notrace();
07d777fe 2192 unpause_graph_tracing();
48ead020
FW
2193
2194 return len;
2195}
659372d3 2196
12883efb
SRRH
2197int trace_array_vprintk(struct trace_array *tr,
2198 unsigned long ip, const char *fmt, va_list args)
2199{
2200 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2201}
2202
2203int trace_array_printk(struct trace_array *tr,
2204 unsigned long ip, const char *fmt, ...)
2205{
2206 int ret;
2207 va_list ap;
2208
2209 if (!(trace_flags & TRACE_ITER_PRINTK))
2210 return 0;
2211
2212 va_start(ap, fmt);
2213 ret = trace_array_vprintk(tr, ip, fmt, ap);
2214 va_end(ap);
2215 return ret;
2216}
2217
2218int trace_array_printk_buf(struct ring_buffer *buffer,
2219 unsigned long ip, const char *fmt, ...)
2220{
2221 int ret;
2222 va_list ap;
2223
2224 if (!(trace_flags & TRACE_ITER_PRINTK))
2225 return 0;
2226
2227 va_start(ap, fmt);
2228 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2229 va_end(ap);
2230 return ret;
2231}
2232
659372d3
SR
2233int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2234{
a813a159 2235 return trace_array_vprintk(&global_trace, ip, fmt, args);
659372d3 2236}
769b0441
FW
2237EXPORT_SYMBOL_GPL(trace_vprintk);
2238
e2ac8ef5 2239static void trace_iterator_increment(struct trace_iterator *iter)
5a90f577 2240{
6d158a81
SR
2241 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2242
5a90f577 2243 iter->idx++;
6d158a81
SR
2244 if (buf_iter)
2245 ring_buffer_read(buf_iter, NULL);
5a90f577
SR
2246}
2247
e309b41d 2248static struct trace_entry *
bc21b478
SR
2249peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2250 unsigned long *lost_events)
dd0e545f 2251{
3928a8a2 2252 struct ring_buffer_event *event;
6d158a81 2253 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
dd0e545f 2254
d769041f
SR
2255 if (buf_iter)
2256 event = ring_buffer_iter_peek(buf_iter, ts);
2257 else
12883efb 2258 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
bc21b478 2259 lost_events);
d769041f 2260
4a9bd3f1
SR
2261 if (event) {
2262 iter->ent_size = ring_buffer_event_length(event);
2263 return ring_buffer_event_data(event);
2264 }
2265 iter->ent_size = 0;
2266 return NULL;
dd0e545f 2267}
d769041f 2268
dd0e545f 2269static struct trace_entry *
bc21b478
SR
2270__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2271 unsigned long *missing_events, u64 *ent_ts)
bc0c38d1 2272{
12883efb 2273 struct ring_buffer *buffer = iter->trace_buffer->buffer;
bc0c38d1 2274 struct trace_entry *ent, *next = NULL;
aa27497c 2275 unsigned long lost_events = 0, next_lost = 0;
b04cc6b1 2276 int cpu_file = iter->cpu_file;
3928a8a2 2277 u64 next_ts = 0, ts;
bc0c38d1 2278 int next_cpu = -1;
12b5da34 2279 int next_size = 0;
bc0c38d1
SR
2280 int cpu;
2281
b04cc6b1
FW
2282 /*
2283 * If we are in a per_cpu trace file, don't bother by iterating over
2284 * all cpu and peek directly.
2285 */
ae3b5093 2286 if (cpu_file > RING_BUFFER_ALL_CPUS) {
b04cc6b1
FW
2287 if (ring_buffer_empty_cpu(buffer, cpu_file))
2288 return NULL;
bc21b478 2289 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
b04cc6b1
FW
2290 if (ent_cpu)
2291 *ent_cpu = cpu_file;
2292
2293 return ent;
2294 }
2295
ab46428c 2296 for_each_tracing_cpu(cpu) {
dd0e545f 2297
3928a8a2
SR
2298 if (ring_buffer_empty_cpu(buffer, cpu))
2299 continue;
dd0e545f 2300
bc21b478 2301 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
dd0e545f 2302
cdd31cd2
IM
2303 /*
2304 * Pick the entry with the smallest timestamp:
2305 */
3928a8a2 2306 if (ent && (!next || ts < next_ts)) {
bc0c38d1
SR
2307 next = ent;
2308 next_cpu = cpu;
3928a8a2 2309 next_ts = ts;
bc21b478 2310 next_lost = lost_events;
12b5da34 2311 next_size = iter->ent_size;
bc0c38d1
SR
2312 }
2313 }
2314
12b5da34
SR
2315 iter->ent_size = next_size;
2316
bc0c38d1
SR
2317 if (ent_cpu)
2318 *ent_cpu = next_cpu;
2319
3928a8a2
SR
2320 if (ent_ts)
2321 *ent_ts = next_ts;
2322
bc21b478
SR
2323 if (missing_events)
2324 *missing_events = next_lost;
2325
bc0c38d1
SR
2326 return next;
2327}
2328
dd0e545f 2329/* Find the next real entry, without updating the iterator itself */
c4a8e8be
FW
2330struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
2331 int *ent_cpu, u64 *ent_ts)
bc0c38d1 2332{
bc21b478 2333 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
dd0e545f
SR
2334}
2335
2336/* Find the next real entry, and increment the iterator to the next entry */
955b61e5 2337void *trace_find_next_entry_inc(struct trace_iterator *iter)
dd0e545f 2338{
bc21b478
SR
2339 iter->ent = __find_next_entry(iter, &iter->cpu,
2340 &iter->lost_events, &iter->ts);
dd0e545f 2341
3928a8a2 2342 if (iter->ent)
e2ac8ef5 2343 trace_iterator_increment(iter);
dd0e545f 2344
3928a8a2 2345 return iter->ent ? iter : NULL;
b3806b43 2346}
bc0c38d1 2347
e309b41d 2348static void trace_consume(struct trace_iterator *iter)
b3806b43 2349{
12883efb 2350 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
bc21b478 2351 &iter->lost_events);
bc0c38d1
SR
2352}
2353
e309b41d 2354static void *s_next(struct seq_file *m, void *v, loff_t *pos)
bc0c38d1
SR
2355{
2356 struct trace_iterator *iter = m->private;
bc0c38d1 2357 int i = (int)*pos;
4e3c3333 2358 void *ent;
bc0c38d1 2359
a63ce5b3
SR
2360 WARN_ON_ONCE(iter->leftover);
2361
bc0c38d1
SR
2362 (*pos)++;
2363
2364 /* can't go backwards */
2365 if (iter->idx > i)
2366 return NULL;
2367
2368 if (iter->idx < 0)
955b61e5 2369 ent = trace_find_next_entry_inc(iter);
bc0c38d1
SR
2370 else
2371 ent = iter;
2372
2373 while (ent && iter->idx < i)
955b61e5 2374 ent = trace_find_next_entry_inc(iter);
bc0c38d1
SR
2375
2376 iter->pos = *pos;
2377
bc0c38d1
SR
2378 return ent;
2379}
2380
955b61e5 2381void tracing_iter_reset(struct trace_iterator *iter, int cpu)
2f26ebd5 2382{
2f26ebd5
SR
2383 struct ring_buffer_event *event;
2384 struct ring_buffer_iter *buf_iter;
2385 unsigned long entries = 0;
2386 u64 ts;
2387
12883efb 2388 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
2f26ebd5 2389
6d158a81
SR
2390 buf_iter = trace_buffer_iter(iter, cpu);
2391 if (!buf_iter)
2f26ebd5
SR
2392 return;
2393
2f26ebd5
SR
2394 ring_buffer_iter_reset(buf_iter);
2395
2396 /*
2397 * We could have the case with the max latency tracers
2398 * that a reset never took place on a cpu. This is evident
2399 * by the timestamp being before the start of the buffer.
2400 */
2401 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
12883efb 2402 if (ts >= iter->trace_buffer->time_start)
2f26ebd5
SR
2403 break;
2404 entries++;
2405 ring_buffer_read(buf_iter, NULL);
2406 }
2407
12883efb 2408 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
2f26ebd5
SR
2409}
2410
d7350c3f 2411/*
d7350c3f
FW
2412 * The current tracer is copied to avoid a global locking
2413 * all around.
2414 */
bc0c38d1
SR
2415static void *s_start(struct seq_file *m, loff_t *pos)
2416{
2417 struct trace_iterator *iter = m->private;
2b6080f2 2418 struct trace_array *tr = iter->tr;
b04cc6b1 2419 int cpu_file = iter->cpu_file;
bc0c38d1
SR
2420 void *p = NULL;
2421 loff_t l = 0;
3928a8a2 2422 int cpu;
bc0c38d1 2423
2fd196ec
HT
2424 /*
2425 * copy the tracer to avoid using a global lock all around.
2426 * iter->trace is a copy of current_trace, the pointer to the
2427 * name may be used instead of a strcmp(), as iter->trace->name
2428 * will point to the same string as current_trace->name.
2429 */
bc0c38d1 2430 mutex_lock(&trace_types_lock);
2b6080f2
SR
2431 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
2432 *iter->trace = *tr->current_trace;
d7350c3f 2433 mutex_unlock(&trace_types_lock);
bc0c38d1 2434
12883efb 2435#ifdef CONFIG_TRACER_MAX_TRACE
debdd57f
HT
2436 if (iter->snapshot && iter->trace->use_max_tr)
2437 return ERR_PTR(-EBUSY);
12883efb 2438#endif
debdd57f
HT
2439
2440 if (!iter->snapshot)
2441 atomic_inc(&trace_record_cmdline_disabled);
bc0c38d1 2442
bc0c38d1
SR
2443 if (*pos != iter->pos) {
2444 iter->ent = NULL;
2445 iter->cpu = 0;
2446 iter->idx = -1;
2447
ae3b5093 2448 if (cpu_file == RING_BUFFER_ALL_CPUS) {
b04cc6b1 2449 for_each_tracing_cpu(cpu)
2f26ebd5 2450 tracing_iter_reset(iter, cpu);
b04cc6b1 2451 } else
2f26ebd5 2452 tracing_iter_reset(iter, cpu_file);
bc0c38d1 2453
ac91d854 2454 iter->leftover = 0;
bc0c38d1
SR
2455 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
2456 ;
2457
2458 } else {
a63ce5b3
SR
2459 /*
2460 * If we overflowed the seq_file before, then we want
2461 * to just reuse the trace_seq buffer again.
2462 */
2463 if (iter->leftover)
2464 p = iter;
2465 else {
2466 l = *pos - 1;
2467 p = s_next(m, p, &l);
2468 }
bc0c38d1
SR
2469 }
2470
4f535968 2471 trace_event_read_lock();
7e53bd42 2472 trace_access_lock(cpu_file);
bc0c38d1
SR
2473 return p;
2474}
2475
2476static void s_stop(struct seq_file *m, void *p)
2477{
7e53bd42
LJ
2478 struct trace_iterator *iter = m->private;
2479
12883efb 2480#ifdef CONFIG_TRACER_MAX_TRACE
debdd57f
HT
2481 if (iter->snapshot && iter->trace->use_max_tr)
2482 return;
12883efb 2483#endif
debdd57f
HT
2484
2485 if (!iter->snapshot)
2486 atomic_dec(&trace_record_cmdline_disabled);
12883efb 2487
7e53bd42 2488 trace_access_unlock(iter->cpu_file);
4f535968 2489 trace_event_read_unlock();
bc0c38d1
SR
2490}
2491
39eaf7ef 2492static void
12883efb
SRRH
2493get_total_entries(struct trace_buffer *buf,
2494 unsigned long *total, unsigned long *entries)
39eaf7ef
SR
2495{
2496 unsigned long count;
2497 int cpu;
2498
2499 *total = 0;
2500 *entries = 0;
2501
2502 for_each_tracing_cpu(cpu) {
12883efb 2503 count = ring_buffer_entries_cpu(buf->buffer, cpu);
39eaf7ef
SR
2504 /*
2505 * If this buffer has skipped entries, then we hold all
2506 * entries for the trace and we need to ignore the
2507 * ones before the time stamp.
2508 */
12883efb
SRRH
2509 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
2510 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
39eaf7ef
SR
2511 /* total is the same as the entries */
2512 *total += count;
2513 } else
2514 *total += count +
12883efb 2515 ring_buffer_overrun_cpu(buf->buffer, cpu);
39eaf7ef
SR
2516 *entries += count;
2517 }
2518}
2519
e309b41d 2520static void print_lat_help_header(struct seq_file *m)
bc0c38d1 2521{
d79ac28f
RV
2522 seq_puts(m, "# _------=> CPU# \n"
2523 "# / _-----=> irqs-off \n"
2524 "# | / _----=> need-resched \n"
2525 "# || / _---=> hardirq/softirq \n"
2526 "# ||| / _--=> preempt-depth \n"
2527 "# |||| / delay \n"
2528 "# cmd pid ||||| time | caller \n"
2529 "# \\ / ||||| \\ | / \n");
bc0c38d1
SR
2530}
2531
12883efb 2532static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
bc0c38d1 2533{
39eaf7ef
SR
2534 unsigned long total;
2535 unsigned long entries;
2536
12883efb 2537 get_total_entries(buf, &total, &entries);
39eaf7ef
SR
2538 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
2539 entries, total, num_online_cpus());
2540 seq_puts(m, "#\n");
2541}
2542
12883efb 2543static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
39eaf7ef 2544{
12883efb 2545 print_event_info(buf, m);
d79ac28f
RV
2546 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"
2547 "# | | | | |\n");
bc0c38d1
SR
2548}
2549
12883efb 2550static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
77271ce4 2551{
12883efb 2552 print_event_info(buf, m);
d79ac28f
RV
2553 seq_puts(m, "# _-----=> irqs-off\n"
2554 "# / _----=> need-resched\n"
2555 "# | / _---=> hardirq/softirq\n"
2556 "# || / _--=> preempt-depth\n"
2557 "# ||| / delay\n"
2558 "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n"
2559 "# | | | |||| | |\n");
77271ce4 2560}
bc0c38d1 2561
62b915f1 2562void
bc0c38d1
SR
2563print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2564{
2565 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
12883efb
SRRH
2566 struct trace_buffer *buf = iter->trace_buffer;
2567 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
2b6080f2 2568 struct tracer *type = iter->trace;
39eaf7ef
SR
2569 unsigned long entries;
2570 unsigned long total;
bc0c38d1
SR
2571 const char *name = "preemption";
2572
d840f718 2573 name = type->name;
bc0c38d1 2574
12883efb 2575 get_total_entries(buf, &total, &entries);
bc0c38d1 2576
888b55dc 2577 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
bc0c38d1 2578 name, UTS_RELEASE);
888b55dc 2579 seq_puts(m, "# -----------------------------------"
bc0c38d1 2580 "---------------------------------\n");
888b55dc 2581 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
bc0c38d1 2582 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
57f50be1 2583 nsecs_to_usecs(data->saved_latency),
bc0c38d1 2584 entries,
4c11d7ae 2585 total,
12883efb 2586 buf->cpu,
bc0c38d1
SR
2587#if defined(CONFIG_PREEMPT_NONE)
2588 "server",
2589#elif defined(CONFIG_PREEMPT_VOLUNTARY)
2590 "desktop",
b5c21b45 2591#elif defined(CONFIG_PREEMPT)
bc0c38d1
SR
2592 "preempt",
2593#else
2594 "unknown",
2595#endif
2596 /* These are reserved for later use */
2597 0, 0, 0, 0);
2598#ifdef CONFIG_SMP
2599 seq_printf(m, " #P:%d)\n", num_online_cpus());
2600#else
2601 seq_puts(m, ")\n");
2602#endif
888b55dc
KM
2603 seq_puts(m, "# -----------------\n");
2604 seq_printf(m, "# | task: %.16s-%d "
bc0c38d1 2605 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
d20b92ab
EB
2606 data->comm, data->pid,
2607 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
bc0c38d1 2608 data->policy, data->rt_priority);
888b55dc 2609 seq_puts(m, "# -----------------\n");
bc0c38d1
SR
2610
2611 if (data->critical_start) {
888b55dc 2612 seq_puts(m, "# => started at: ");
214023c3
SR
2613 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
2614 trace_print_seq(m, &iter->seq);
888b55dc 2615 seq_puts(m, "\n# => ended at: ");
214023c3
SR
2616 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
2617 trace_print_seq(m, &iter->seq);
8248ac05 2618 seq_puts(m, "\n#\n");
bc0c38d1
SR
2619 }
2620
888b55dc 2621 seq_puts(m, "#\n");
bc0c38d1
SR
2622}
2623
a309720c
SR
2624static void test_cpu_buff_start(struct trace_iterator *iter)
2625{
2626 struct trace_seq *s = &iter->seq;
2627
12ef7d44
SR
2628 if (!(trace_flags & TRACE_ITER_ANNOTATE))
2629 return;
2630
2631 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
2632 return;
2633
4462344e 2634 if (cpumask_test_cpu(iter->cpu, iter->started))
a309720c
SR
2635 return;
2636
12883efb 2637 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
2f26ebd5
SR
2638 return;
2639
4462344e 2640 cpumask_set_cpu(iter->cpu, iter->started);
b0dfa978
FW
2641
2642 /* Don't print started cpu buffer for the first entry of the trace */
2643 if (iter->idx > 1)
2644 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
2645 iter->cpu);
a309720c
SR
2646}
2647
2c4f035f 2648static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
bc0c38d1 2649{
214023c3 2650 struct trace_seq *s = &iter->seq;
bc0c38d1 2651 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
4e3c3333 2652 struct trace_entry *entry;
f633cef0 2653 struct trace_event *event;
bc0c38d1 2654
4e3c3333 2655 entry = iter->ent;
dd0e545f 2656
a309720c
SR
2657 test_cpu_buff_start(iter);
2658
c4a8e8be 2659 event = ftrace_find_event(entry->type);
bc0c38d1 2660
c4a8e8be 2661 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
19a7fe20
SRRH
2662 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2663 trace_print_lat_context(iter);
2664 else
2665 trace_print_context(iter);
c4a8e8be 2666 }
bc0c38d1 2667
19a7fe20
SRRH
2668 if (trace_seq_has_overflowed(s))
2669 return TRACE_TYPE_PARTIAL_LINE;
2670
268ccda0 2671 if (event)
a9a57763 2672 return event->funcs->trace(iter, sym_flags, event);
d9793bd8 2673
19a7fe20 2674 trace_seq_printf(s, "Unknown type %d\n", entry->type);
02b67518 2675
19a7fe20 2676 return trace_handle_return(s);
bc0c38d1
SR
2677}
2678
2c4f035f 2679static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
f9896bf3
IM
2680{
2681 struct trace_seq *s = &iter->seq;
2682 struct trace_entry *entry;
f633cef0 2683 struct trace_event *event;
f9896bf3
IM
2684
2685 entry = iter->ent;
dd0e545f 2686
19a7fe20
SRRH
2687 if (trace_flags & TRACE_ITER_CONTEXT_INFO)
2688 trace_seq_printf(s, "%d %d %llu ",
2689 entry->pid, iter->cpu, iter->ts);
2690
2691 if (trace_seq_has_overflowed(s))
2692 return TRACE_TYPE_PARTIAL_LINE;
f9896bf3 2693
f633cef0 2694 event = ftrace_find_event(entry->type);
268ccda0 2695 if (event)
a9a57763 2696 return event->funcs->raw(iter, 0, event);
d9793bd8 2697
19a7fe20 2698 trace_seq_printf(s, "%d ?\n", entry->type);
777e208d 2699
19a7fe20 2700 return trace_handle_return(s);
f9896bf3
IM
2701}
2702
2c4f035f 2703static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
5e3ca0ec
IM
2704{
2705 struct trace_seq *s = &iter->seq;
2706 unsigned char newline = '\n';
2707 struct trace_entry *entry;
f633cef0 2708 struct trace_event *event;
5e3ca0ec
IM
2709
2710 entry = iter->ent;
dd0e545f 2711
c4a8e8be 2712 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
19a7fe20
SRRH
2713 SEQ_PUT_HEX_FIELD(s, entry->pid);
2714 SEQ_PUT_HEX_FIELD(s, iter->cpu);
2715 SEQ_PUT_HEX_FIELD(s, iter->ts);
2716 if (trace_seq_has_overflowed(s))
2717 return TRACE_TYPE_PARTIAL_LINE;
c4a8e8be 2718 }
5e3ca0ec 2719
f633cef0 2720 event = ftrace_find_event(entry->type);
268ccda0 2721 if (event) {
a9a57763 2722 enum print_line_t ret = event->funcs->hex(iter, 0, event);
d9793bd8
ACM
2723 if (ret != TRACE_TYPE_HANDLED)
2724 return ret;
2725 }
7104f300 2726
19a7fe20 2727 SEQ_PUT_FIELD(s, newline);
5e3ca0ec 2728
19a7fe20 2729 return trace_handle_return(s);
5e3ca0ec
IM
2730}
2731
2c4f035f 2732static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
cb0f12aa
IM
2733{
2734 struct trace_seq *s = &iter->seq;
2735 struct trace_entry *entry;
f633cef0 2736 struct trace_event *event;
cb0f12aa
IM
2737
2738 entry = iter->ent;
dd0e545f 2739
c4a8e8be 2740 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
19a7fe20
SRRH
2741 SEQ_PUT_FIELD(s, entry->pid);
2742 SEQ_PUT_FIELD(s, iter->cpu);
2743 SEQ_PUT_FIELD(s, iter->ts);
2744 if (trace_seq_has_overflowed(s))
2745 return TRACE_TYPE_PARTIAL_LINE;
c4a8e8be 2746 }
cb0f12aa 2747
f633cef0 2748 event = ftrace_find_event(entry->type);
a9a57763
SR
2749 return event ? event->funcs->binary(iter, 0, event) :
2750 TRACE_TYPE_HANDLED;
cb0f12aa
IM
2751}
2752
62b915f1 2753int trace_empty(struct trace_iterator *iter)
bc0c38d1 2754{
6d158a81 2755 struct ring_buffer_iter *buf_iter;
bc0c38d1
SR
2756 int cpu;
2757
9aba60fe 2758 /* If we are looking at one CPU buffer, only check that one */
ae3b5093 2759 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
9aba60fe 2760 cpu = iter->cpu_file;
6d158a81
SR
2761 buf_iter = trace_buffer_iter(iter, cpu);
2762 if (buf_iter) {
2763 if (!ring_buffer_iter_empty(buf_iter))
9aba60fe
SR
2764 return 0;
2765 } else {
12883efb 2766 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
9aba60fe
SR
2767 return 0;
2768 }
2769 return 1;
2770 }
2771
ab46428c 2772 for_each_tracing_cpu(cpu) {
6d158a81
SR
2773 buf_iter = trace_buffer_iter(iter, cpu);
2774 if (buf_iter) {
2775 if (!ring_buffer_iter_empty(buf_iter))
d769041f
SR
2776 return 0;
2777 } else {
12883efb 2778 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
d769041f
SR
2779 return 0;
2780 }
bc0c38d1 2781 }
d769041f 2782
797d3712 2783 return 1;
bc0c38d1
SR
2784}
2785
4f535968 2786/* Called with trace_event_read_lock() held. */
955b61e5 2787enum print_line_t print_trace_line(struct trace_iterator *iter)
f9896bf3 2788{
2c4f035f
FW
2789 enum print_line_t ret;
2790
19a7fe20
SRRH
2791 if (iter->lost_events) {
2792 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2793 iter->cpu, iter->lost_events);
2794 if (trace_seq_has_overflowed(&iter->seq))
2795 return TRACE_TYPE_PARTIAL_LINE;
2796 }
bc21b478 2797
2c4f035f
FW
2798 if (iter->trace && iter->trace->print_line) {
2799 ret = iter->trace->print_line(iter);
2800 if (ret != TRACE_TYPE_UNHANDLED)
2801 return ret;
2802 }
72829bc3 2803
09ae7234
SRRH
2804 if (iter->ent->type == TRACE_BPUTS &&
2805 trace_flags & TRACE_ITER_PRINTK &&
2806 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2807 return trace_print_bputs_msg_only(iter);
2808
48ead020
FW
2809 if (iter->ent->type == TRACE_BPRINT &&
2810 trace_flags & TRACE_ITER_PRINTK &&
2811 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
5ef841f6 2812 return trace_print_bprintk_msg_only(iter);
48ead020 2813
66896a85
FW
2814 if (iter->ent->type == TRACE_PRINT &&
2815 trace_flags & TRACE_ITER_PRINTK &&
2816 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
5ef841f6 2817 return trace_print_printk_msg_only(iter);
66896a85 2818
cb0f12aa
IM
2819 if (trace_flags & TRACE_ITER_BIN)
2820 return print_bin_fmt(iter);
2821
5e3ca0ec
IM
2822 if (trace_flags & TRACE_ITER_HEX)
2823 return print_hex_fmt(iter);
2824
f9896bf3
IM
2825 if (trace_flags & TRACE_ITER_RAW)
2826 return print_raw_fmt(iter);
2827
f9896bf3
IM
2828 return print_trace_fmt(iter);
2829}
2830
7e9a49ef
JO
2831void trace_latency_header(struct seq_file *m)
2832{
2833 struct trace_iterator *iter = m->private;
2834
2835 /* print nothing if the buffers are empty */
2836 if (trace_empty(iter))
2837 return;
2838
2839 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2840 print_trace_header(m, iter);
2841
2842 if (!(trace_flags & TRACE_ITER_VERBOSE))
2843 print_lat_help_header(m);
2844}
2845
62b915f1
JO
2846void trace_default_header(struct seq_file *m)
2847{
2848 struct trace_iterator *iter = m->private;
2849
f56e7f8e
JO
2850 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
2851 return;
2852
62b915f1
JO
2853 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2854 /* print nothing if the buffers are empty */
2855 if (trace_empty(iter))
2856 return;
2857 print_trace_header(m, iter);
2858 if (!(trace_flags & TRACE_ITER_VERBOSE))
2859 print_lat_help_header(m);
2860 } else {
77271ce4
SR
2861 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
2862 if (trace_flags & TRACE_ITER_IRQ_INFO)
12883efb 2863 print_func_help_header_irq(iter->trace_buffer, m);
77271ce4 2864 else
12883efb 2865 print_func_help_header(iter->trace_buffer, m);
77271ce4 2866 }
62b915f1
JO
2867 }
2868}
2869
e0a413f6
SR
2870static void test_ftrace_alive(struct seq_file *m)
2871{
2872 if (!ftrace_is_dead())
2873 return;
d79ac28f
RV
2874 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
2875 "# MAY BE MISSING FUNCTION EVENTS\n");
e0a413f6
SR
2876}
2877
d8741e2e 2878#ifdef CONFIG_TRACER_MAX_TRACE
f1affcaa 2879static void show_snapshot_main_help(struct seq_file *m)
d8741e2e 2880{
d79ac28f
RV
2881 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
2882 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2883 "# Takes a snapshot of the main buffer.\n"
2884 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
2885 "# (Doesn't have to be '2' works with any number that\n"
2886 "# is not a '0' or '1')\n");
d8741e2e 2887}
f1affcaa
SRRH
2888
2889static void show_snapshot_percpu_help(struct seq_file *m)
2890{
fa6f0cc7 2891 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
f1affcaa 2892#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
d79ac28f
RV
2893 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2894 "# Takes a snapshot of the main buffer for this cpu.\n");
f1affcaa 2895#else
d79ac28f
RV
2896 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
2897 "# Must use main snapshot file to allocate.\n");
f1affcaa 2898#endif
d79ac28f
RV
2899 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
2900 "# (Doesn't have to be '2' works with any number that\n"
2901 "# is not a '0' or '1')\n");
f1affcaa
SRRH
2902}
2903
d8741e2e
SRRH
2904static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
2905{
45ad21ca 2906 if (iter->tr->allocated_snapshot)
fa6f0cc7 2907 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
d8741e2e 2908 else
fa6f0cc7 2909 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
d8741e2e 2910
fa6f0cc7 2911 seq_puts(m, "# Snapshot commands:\n");
f1affcaa
SRRH
2912 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
2913 show_snapshot_main_help(m);
2914 else
2915 show_snapshot_percpu_help(m);
d8741e2e
SRRH
2916}
2917#else
2918/* Should never be called */
2919static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
2920#endif
2921
bc0c38d1
SR
2922static int s_show(struct seq_file *m, void *v)
2923{
2924 struct trace_iterator *iter = v;
a63ce5b3 2925 int ret;
bc0c38d1
SR
2926
2927 if (iter->ent == NULL) {
2928 if (iter->tr) {
2929 seq_printf(m, "# tracer: %s\n", iter->trace->name);
2930 seq_puts(m, "#\n");
e0a413f6 2931 test_ftrace_alive(m);
bc0c38d1 2932 }
d8741e2e
SRRH
2933 if (iter->snapshot && trace_empty(iter))
2934 print_snapshot_help(m, iter);
2935 else if (iter->trace && iter->trace->print_header)
8bba1bf5 2936 iter->trace->print_header(m);
62b915f1
JO
2937 else
2938 trace_default_header(m);
2939
a63ce5b3
SR
2940 } else if (iter->leftover) {
2941 /*
2942 * If we filled the seq_file buffer earlier, we
2943 * want to just show it now.
2944 */
2945 ret = trace_print_seq(m, &iter->seq);
2946
2947 /* ret should this time be zero, but you never know */
2948 iter->leftover = ret;
2949
bc0c38d1 2950 } else {
f9896bf3 2951 print_trace_line(iter);
a63ce5b3
SR
2952 ret = trace_print_seq(m, &iter->seq);
2953 /*
2954 * If we overflow the seq_file buffer, then it will
2955 * ask us for this data again at start up.
2956 * Use that instead.
2957 * ret is 0 if seq_file write succeeded.
2958 * -1 otherwise.
2959 */
2960 iter->leftover = ret;
bc0c38d1
SR
2961 }
2962
2963 return 0;
2964}
2965
649e9c70
ON
2966/*
2967 * Should be used after trace_array_get(), trace_types_lock
2968 * ensures that i_cdev was already initialized.
2969 */
2970static inline int tracing_get_cpu(struct inode *inode)
2971{
2972 if (inode->i_cdev) /* See trace_create_cpu_file() */
2973 return (long)inode->i_cdev - 1;
2974 return RING_BUFFER_ALL_CPUS;
2975}
2976
88e9d34c 2977static const struct seq_operations tracer_seq_ops = {
4bf39a94
IM
2978 .start = s_start,
2979 .next = s_next,
2980 .stop = s_stop,
2981 .show = s_show,
bc0c38d1
SR
2982};
2983
e309b41d 2984static struct trace_iterator *
6484c71c 2985__tracing_open(struct inode *inode, struct file *file, bool snapshot)
bc0c38d1 2986{
6484c71c 2987 struct trace_array *tr = inode->i_private;
bc0c38d1 2988 struct trace_iterator *iter;
50e18b94 2989 int cpu;
bc0c38d1 2990
85a2f9b4
SR
2991 if (tracing_disabled)
2992 return ERR_PTR(-ENODEV);
60a11774 2993
50e18b94 2994 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
85a2f9b4
SR
2995 if (!iter)
2996 return ERR_PTR(-ENOMEM);
bc0c38d1 2997
6d158a81
SR
2998 iter->buffer_iter = kzalloc(sizeof(*iter->buffer_iter) * num_possible_cpus(),
2999 GFP_KERNEL);
93574fcc
DC
3000 if (!iter->buffer_iter)
3001 goto release;
3002
d7350c3f
FW
3003 /*
3004 * We make a copy of the current tracer to avoid concurrent
3005 * changes on it while we are reading.
3006 */
bc0c38d1 3007 mutex_lock(&trace_types_lock);
d7350c3f 3008 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
85a2f9b4 3009 if (!iter->trace)
d7350c3f 3010 goto fail;
85a2f9b4 3011
2b6080f2 3012 *iter->trace = *tr->current_trace;
d7350c3f 3013
79f55997 3014 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
b0dfa978
FW
3015 goto fail;
3016
12883efb
SRRH
3017 iter->tr = tr;
3018
3019#ifdef CONFIG_TRACER_MAX_TRACE
2b6080f2
SR
3020 /* Currently only the top directory has a snapshot */
3021 if (tr->current_trace->print_max || snapshot)
12883efb 3022 iter->trace_buffer = &tr->max_buffer;
bc0c38d1 3023 else
12883efb
SRRH
3024#endif
3025 iter->trace_buffer = &tr->trace_buffer;
debdd57f 3026 iter->snapshot = snapshot;
bc0c38d1 3027 iter->pos = -1;
6484c71c 3028 iter->cpu_file = tracing_get_cpu(inode);
d7350c3f 3029 mutex_init(&iter->mutex);
bc0c38d1 3030
8bba1bf5
MM
3031 /* Notify the tracer early; before we stop tracing. */
3032 if (iter->trace && iter->trace->open)
a93751ca 3033 iter->trace->open(iter);
8bba1bf5 3034
12ef7d44 3035 /* Annotate start of buffers if we had overruns */
12883efb 3036 if (ring_buffer_overruns(iter->trace_buffer->buffer))
12ef7d44
SR
3037 iter->iter_flags |= TRACE_FILE_ANNOTATE;
3038
8be0709f 3039 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
58e8eedf 3040 if (trace_clocks[tr->clock_id].in_ns)
8be0709f
DS
3041 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3042
debdd57f
HT
3043 /* stop the trace while dumping if we are not opening "snapshot" */
3044 if (!iter->snapshot)
2b6080f2 3045 tracing_stop_tr(tr);
2f26ebd5 3046
ae3b5093 3047 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
b04cc6b1 3048 for_each_tracing_cpu(cpu) {
b04cc6b1 3049 iter->buffer_iter[cpu] =
12883efb 3050 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
72c9ddfd
DM
3051 }
3052 ring_buffer_read_prepare_sync();
3053 for_each_tracing_cpu(cpu) {
3054 ring_buffer_read_start(iter->buffer_iter[cpu]);
2f26ebd5 3055 tracing_iter_reset(iter, cpu);
b04cc6b1
FW
3056 }
3057 } else {
3058 cpu = iter->cpu_file;
3928a8a2 3059 iter->buffer_iter[cpu] =
12883efb 3060 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
72c9ddfd
DM
3061 ring_buffer_read_prepare_sync();
3062 ring_buffer_read_start(iter->buffer_iter[cpu]);
2f26ebd5 3063 tracing_iter_reset(iter, cpu);
3928a8a2
SR
3064 }
3065
bc0c38d1
SR
3066 mutex_unlock(&trace_types_lock);
3067
bc0c38d1 3068 return iter;
3928a8a2 3069
d7350c3f 3070 fail:
3928a8a2 3071 mutex_unlock(&trace_types_lock);
d7350c3f 3072 kfree(iter->trace);
6d158a81 3073 kfree(iter->buffer_iter);
93574fcc 3074release:
50e18b94
JO
3075 seq_release_private(inode, file);
3076 return ERR_PTR(-ENOMEM);
bc0c38d1
SR
3077}
3078
3079int tracing_open_generic(struct inode *inode, struct file *filp)
3080{
60a11774
SR
3081 if (tracing_disabled)
3082 return -ENODEV;
3083
bc0c38d1
SR
3084 filp->private_data = inode->i_private;
3085 return 0;
3086}
3087
2e86421d
GB
3088bool tracing_is_disabled(void)
3089{
3090 return (tracing_disabled) ? true: false;
3091}
3092
7b85af63
SRRH
3093/*
3094 * Open and update trace_array ref count.
3095 * Must have the current trace_array passed to it.
3096 */
dcc30223 3097static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
7b85af63
SRRH
3098{
3099 struct trace_array *tr = inode->i_private;
3100
3101 if (tracing_disabled)
3102 return -ENODEV;
3103
3104 if (trace_array_get(tr) < 0)
3105 return -ENODEV;
3106
3107 filp->private_data = inode->i_private;
3108
3109 return 0;
7b85af63
SRRH
3110}
3111
4fd27358 3112static int tracing_release(struct inode *inode, struct file *file)
bc0c38d1 3113{
6484c71c 3114 struct trace_array *tr = inode->i_private;
907f2784 3115 struct seq_file *m = file->private_data;
4acd4d00 3116 struct trace_iterator *iter;
3928a8a2 3117 int cpu;
bc0c38d1 3118
ff451961 3119 if (!(file->f_mode & FMODE_READ)) {
6484c71c 3120 trace_array_put(tr);
4acd4d00 3121 return 0;
ff451961 3122 }
4acd4d00 3123
6484c71c 3124 /* Writes do not use seq_file */
4acd4d00 3125 iter = m->private;
bc0c38d1 3126 mutex_lock(&trace_types_lock);
a695cb58 3127
3928a8a2
SR
3128 for_each_tracing_cpu(cpu) {
3129 if (iter->buffer_iter[cpu])
3130 ring_buffer_read_finish(iter->buffer_iter[cpu]);
3131 }
3132
bc0c38d1
SR
3133 if (iter->trace && iter->trace->close)
3134 iter->trace->close(iter);
3135
debdd57f
HT
3136 if (!iter->snapshot)
3137 /* reenable tracing if it was previously enabled */
2b6080f2 3138 tracing_start_tr(tr);
f77d09a3
AL
3139
3140 __trace_array_put(tr);
3141
bc0c38d1
SR
3142 mutex_unlock(&trace_types_lock);
3143
d7350c3f 3144 mutex_destroy(&iter->mutex);
b0dfa978 3145 free_cpumask_var(iter->started);
d7350c3f 3146 kfree(iter->trace);
6d158a81 3147 kfree(iter->buffer_iter);
50e18b94 3148 seq_release_private(inode, file);
ff451961 3149
bc0c38d1
SR
3150 return 0;
3151}
3152
7b85af63
SRRH
3153static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3154{
3155 struct trace_array *tr = inode->i_private;
3156
3157 trace_array_put(tr);
bc0c38d1
SR
3158 return 0;
3159}
3160
7b85af63
SRRH
3161static int tracing_single_release_tr(struct inode *inode, struct file *file)
3162{
3163 struct trace_array *tr = inode->i_private;
3164
3165 trace_array_put(tr);
3166
3167 return single_release(inode, file);
3168}
3169
bc0c38d1
SR
3170static int tracing_open(struct inode *inode, struct file *file)
3171{
6484c71c 3172 struct trace_array *tr = inode->i_private;
85a2f9b4
SR
3173 struct trace_iterator *iter;
3174 int ret = 0;
bc0c38d1 3175
ff451961
SRRH
3176 if (trace_array_get(tr) < 0)
3177 return -ENODEV;
3178
4acd4d00 3179 /* If this file was open for write, then erase contents */
6484c71c
ON
3180 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
3181 int cpu = tracing_get_cpu(inode);
3182
3183 if (cpu == RING_BUFFER_ALL_CPUS)
12883efb 3184 tracing_reset_online_cpus(&tr->trace_buffer);
4acd4d00 3185 else
6484c71c 3186 tracing_reset(&tr->trace_buffer, cpu);
4acd4d00 3187 }
bc0c38d1 3188
4acd4d00 3189 if (file->f_mode & FMODE_READ) {
6484c71c 3190 iter = __tracing_open(inode, file, false);
4acd4d00
SR
3191 if (IS_ERR(iter))
3192 ret = PTR_ERR(iter);
3193 else if (trace_flags & TRACE_ITER_LATENCY_FMT)
3194 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3195 }
ff451961
SRRH
3196
3197 if (ret < 0)
3198 trace_array_put(tr);
3199
bc0c38d1
SR
3200 return ret;
3201}
3202
607e2ea1
SRRH
3203/*
3204 * Some tracers are not suitable for instance buffers.
3205 * A tracer is always available for the global array (toplevel)
3206 * or if it explicitly states that it is.
3207 */
3208static bool
3209trace_ok_for_array(struct tracer *t, struct trace_array *tr)
3210{
3211 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
3212}
3213
3214/* Find the next tracer that this trace array may use */
3215static struct tracer *
3216get_tracer_for_array(struct trace_array *tr, struct tracer *t)
3217{
3218 while (t && !trace_ok_for_array(t, tr))
3219 t = t->next;
3220
3221 return t;
3222}
3223
e309b41d 3224static void *
bc0c38d1
SR
3225t_next(struct seq_file *m, void *v, loff_t *pos)
3226{
607e2ea1 3227 struct trace_array *tr = m->private;
f129e965 3228 struct tracer *t = v;
bc0c38d1
SR
3229
3230 (*pos)++;
3231
3232 if (t)
607e2ea1 3233 t = get_tracer_for_array(tr, t->next);
bc0c38d1 3234
bc0c38d1
SR
3235 return t;
3236}
3237
3238static void *t_start(struct seq_file *m, loff_t *pos)
3239{
607e2ea1 3240 struct trace_array *tr = m->private;
f129e965 3241 struct tracer *t;
bc0c38d1
SR
3242 loff_t l = 0;
3243
3244 mutex_lock(&trace_types_lock);
607e2ea1
SRRH
3245
3246 t = get_tracer_for_array(tr, trace_types);
3247 for (; t && l < *pos; t = t_next(m, t, &l))
3248 ;
bc0c38d1
SR
3249
3250 return t;
3251}
3252
3253static void t_stop(struct seq_file *m, void *p)
3254{
3255 mutex_unlock(&trace_types_lock);
3256}
3257
3258static int t_show(struct seq_file *m, void *v)
3259{
3260 struct tracer *t = v;
3261
3262 if (!t)
3263 return 0;
3264
fa6f0cc7 3265 seq_puts(m, t->name);
bc0c38d1
SR
3266 if (t->next)
3267 seq_putc(m, ' ');
3268 else
3269 seq_putc(m, '\n');
3270
3271 return 0;
3272}
3273
88e9d34c 3274static const struct seq_operations show_traces_seq_ops = {
4bf39a94
IM
3275 .start = t_start,
3276 .next = t_next,
3277 .stop = t_stop,
3278 .show = t_show,
bc0c38d1
SR
3279};
3280
3281static int show_traces_open(struct inode *inode, struct file *file)
3282{
607e2ea1
SRRH
3283 struct trace_array *tr = inode->i_private;
3284 struct seq_file *m;
3285 int ret;
3286
60a11774
SR
3287 if (tracing_disabled)
3288 return -ENODEV;
3289
607e2ea1
SRRH
3290 ret = seq_open(file, &show_traces_seq_ops);
3291 if (ret)
3292 return ret;
3293
3294 m = file->private_data;
3295 m->private = tr;
3296
3297 return 0;
bc0c38d1
SR
3298}
3299
4acd4d00
SR
3300static ssize_t
3301tracing_write_stub(struct file *filp, const char __user *ubuf,
3302 size_t count, loff_t *ppos)
3303{
3304 return count;
3305}
3306
098c879e 3307loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
364829b1 3308{
098c879e
SRRH
3309 int ret;
3310
364829b1 3311 if (file->f_mode & FMODE_READ)
098c879e 3312 ret = seq_lseek(file, offset, whence);
364829b1 3313 else
098c879e
SRRH
3314 file->f_pos = ret = 0;
3315
3316 return ret;
364829b1
SP
3317}
3318
5e2336a0 3319static const struct file_operations tracing_fops = {
4bf39a94
IM
3320 .open = tracing_open,
3321 .read = seq_read,
4acd4d00 3322 .write = tracing_write_stub,
098c879e 3323 .llseek = tracing_lseek,
4bf39a94 3324 .release = tracing_release,
bc0c38d1
SR
3325};
3326
5e2336a0 3327static const struct file_operations show_traces_fops = {
c7078de1
IM
3328 .open = show_traces_open,
3329 .read = seq_read,
3330 .release = seq_release,
b444786f 3331 .llseek = seq_lseek,
c7078de1
IM
3332};
3333
36dfe925
IM
3334/*
3335 * The tracer itself will not take this lock, but still we want
3336 * to provide a consistent cpumask to user-space:
3337 */
3338static DEFINE_MUTEX(tracing_cpumask_update_lock);
3339
3340/*
3341 * Temporary storage for the character representation of the
3342 * CPU bitmask (and one more byte for the newline):
3343 */
3344static char mask_str[NR_CPUS + 1];
3345
c7078de1
IM
3346static ssize_t
3347tracing_cpumask_read(struct file *filp, char __user *ubuf,
3348 size_t count, loff_t *ppos)
3349{
ccfe9e42 3350 struct trace_array *tr = file_inode(filp)->i_private;
36dfe925 3351 int len;
c7078de1
IM
3352
3353 mutex_lock(&tracing_cpumask_update_lock);
36dfe925 3354
ccfe9e42 3355 len = cpumask_scnprintf(mask_str, count, tr->tracing_cpumask);
36dfe925
IM
3356 if (count - len < 2) {
3357 count = -EINVAL;
3358 goto out_err;
3359 }
3360 len += sprintf(mask_str + len, "\n");
3361 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
3362
3363out_err:
c7078de1
IM
3364 mutex_unlock(&tracing_cpumask_update_lock);
3365
3366 return count;
3367}
3368
3369static ssize_t
3370tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3371 size_t count, loff_t *ppos)
3372{
ccfe9e42 3373 struct trace_array *tr = file_inode(filp)->i_private;
9e01c1b7 3374 cpumask_var_t tracing_cpumask_new;
2b6080f2 3375 int err, cpu;
9e01c1b7
RR
3376
3377 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
3378 return -ENOMEM;
c7078de1 3379
9e01c1b7 3380 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
c7078de1 3381 if (err)
36dfe925
IM
3382 goto err_unlock;
3383
215368e8
LZ
3384 mutex_lock(&tracing_cpumask_update_lock);
3385
a5e25883 3386 local_irq_disable();
0b9b12c1 3387 arch_spin_lock(&tr->max_lock);
ab46428c 3388 for_each_tracing_cpu(cpu) {
36dfe925
IM
3389 /*
3390 * Increase/decrease the disabled counter if we are
3391 * about to flip a bit in the cpumask:
3392 */
ccfe9e42 3393 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
9e01c1b7 3394 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
12883efb
SRRH
3395 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3396 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
36dfe925 3397 }
ccfe9e42 3398 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
9e01c1b7 3399 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
12883efb
SRRH
3400 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3401 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
36dfe925
IM
3402 }
3403 }
0b9b12c1 3404 arch_spin_unlock(&tr->max_lock);
a5e25883 3405 local_irq_enable();
36dfe925 3406
ccfe9e42 3407 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
36dfe925
IM
3408
3409 mutex_unlock(&tracing_cpumask_update_lock);
9e01c1b7 3410 free_cpumask_var(tracing_cpumask_new);
c7078de1
IM
3411
3412 return count;
36dfe925
IM
3413
3414err_unlock:
215368e8 3415 free_cpumask_var(tracing_cpumask_new);
36dfe925
IM
3416
3417 return err;
c7078de1
IM
3418}
3419
5e2336a0 3420static const struct file_operations tracing_cpumask_fops = {
ccfe9e42 3421 .open = tracing_open_generic_tr,
c7078de1
IM
3422 .read = tracing_cpumask_read,
3423 .write = tracing_cpumask_write,
ccfe9e42 3424 .release = tracing_release_generic_tr,
b444786f 3425 .llseek = generic_file_llseek,
bc0c38d1
SR
3426};
3427
fdb372ed 3428static int tracing_trace_options_show(struct seq_file *m, void *v)
bc0c38d1 3429{
d8e83d26 3430 struct tracer_opt *trace_opts;
2b6080f2 3431 struct trace_array *tr = m->private;
d8e83d26 3432 u32 tracer_flags;
d8e83d26 3433 int i;
adf9f195 3434
d8e83d26 3435 mutex_lock(&trace_types_lock);
2b6080f2
SR
3436 tracer_flags = tr->current_trace->flags->val;
3437 trace_opts = tr->current_trace->flags->opts;
d8e83d26 3438
bc0c38d1
SR
3439 for (i = 0; trace_options[i]; i++) {
3440 if (trace_flags & (1 << i))
fdb372ed 3441 seq_printf(m, "%s\n", trace_options[i]);
bc0c38d1 3442 else
fdb372ed 3443 seq_printf(m, "no%s\n", trace_options[i]);
bc0c38d1
SR
3444 }
3445
adf9f195
FW
3446 for (i = 0; trace_opts[i].name; i++) {
3447 if (tracer_flags & trace_opts[i].bit)
fdb372ed 3448 seq_printf(m, "%s\n", trace_opts[i].name);
adf9f195 3449 else
fdb372ed 3450 seq_printf(m, "no%s\n", trace_opts[i].name);
adf9f195 3451 }
d8e83d26 3452 mutex_unlock(&trace_types_lock);
adf9f195 3453
fdb372ed 3454 return 0;
bc0c38d1 3455}
bc0c38d1 3456
8c1a49ae 3457static int __set_tracer_option(struct trace_array *tr,
8d18eaaf
LZ
3458 struct tracer_flags *tracer_flags,
3459 struct tracer_opt *opts, int neg)
3460{
8c1a49ae 3461 struct tracer *trace = tr->current_trace;
8d18eaaf 3462 int ret;
bc0c38d1 3463
8c1a49ae 3464 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
8d18eaaf
LZ
3465 if (ret)
3466 return ret;
3467
3468 if (neg)
3469 tracer_flags->val &= ~opts->bit;
3470 else
3471 tracer_flags->val |= opts->bit;
3472 return 0;
bc0c38d1
SR
3473}
3474
adf9f195 3475/* Try to assign a tracer specific option */
8c1a49ae 3476static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
adf9f195 3477{
8c1a49ae 3478 struct tracer *trace = tr->current_trace;
7770841e 3479 struct tracer_flags *tracer_flags = trace->flags;
adf9f195 3480 struct tracer_opt *opts = NULL;
8d18eaaf 3481 int i;
adf9f195 3482
7770841e
Z
3483 for (i = 0; tracer_flags->opts[i].name; i++) {
3484 opts = &tracer_flags->opts[i];
adf9f195 3485
8d18eaaf 3486 if (strcmp(cmp, opts->name) == 0)
8c1a49ae 3487 return __set_tracer_option(tr, trace->flags, opts, neg);
adf9f195 3488 }
adf9f195 3489
8d18eaaf 3490 return -EINVAL;
adf9f195
FW
3491}
3492
613f04a0
SRRH
3493/* Some tracers require overwrite to stay enabled */
3494int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
3495{
3496 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
3497 return -1;
3498
3499 return 0;
3500}
3501
2b6080f2 3502int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
af4617bd
SR
3503{
3504 /* do nothing if flag is already set */
3505 if (!!(trace_flags & mask) == !!enabled)
613f04a0
SRRH
3506 return 0;
3507
3508 /* Give the tracer a chance to approve the change */
2b6080f2 3509 if (tr->current_trace->flag_changed)
bf6065b5 3510 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
613f04a0 3511 return -EINVAL;
af4617bd
SR
3512
3513 if (enabled)
3514 trace_flags |= mask;
3515 else
3516 trace_flags &= ~mask;
e870e9a1
LZ
3517
3518 if (mask == TRACE_ITER_RECORD_CMD)
3519 trace_event_enable_cmd_record(enabled);
750912fa 3520
80902822 3521 if (mask == TRACE_ITER_OVERWRITE) {
12883efb 3522 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
80902822 3523#ifdef CONFIG_TRACER_MAX_TRACE
12883efb 3524 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
80902822
SRRH
3525#endif
3526 }
81698831
SR
3527
3528 if (mask == TRACE_ITER_PRINTK)
3529 trace_printk_start_stop_comm(enabled);
613f04a0
SRRH
3530
3531 return 0;
af4617bd
SR
3532}
3533
2b6080f2 3534static int trace_set_options(struct trace_array *tr, char *option)
bc0c38d1 3535{
8d18eaaf 3536 char *cmp;
bc0c38d1 3537 int neg = 0;
613f04a0 3538 int ret = -ENODEV;
bc0c38d1
SR
3539 int i;
3540
7bcfaf54 3541 cmp = strstrip(option);
bc0c38d1 3542
8d18eaaf 3543 if (strncmp(cmp, "no", 2) == 0) {
bc0c38d1
SR
3544 neg = 1;
3545 cmp += 2;
3546 }
3547
69d34da2
SRRH
3548 mutex_lock(&trace_types_lock);
3549
bc0c38d1 3550 for (i = 0; trace_options[i]; i++) {
8d18eaaf 3551 if (strcmp(cmp, trace_options[i]) == 0) {
2b6080f2 3552 ret = set_tracer_flag(tr, 1 << i, !neg);
bc0c38d1
SR
3553 break;
3554 }
3555 }
adf9f195
FW
3556
3557 /* If no option could be set, test the specific tracer options */
69d34da2 3558 if (!trace_options[i])
8c1a49ae 3559 ret = set_tracer_option(tr, cmp, neg);
69d34da2
SRRH
3560
3561 mutex_unlock(&trace_types_lock);
bc0c38d1 3562
7bcfaf54
SR
3563 return ret;
3564}
3565
3566static ssize_t
3567tracing_trace_options_write(struct file *filp, const char __user *ubuf,
3568 size_t cnt, loff_t *ppos)
3569{
2b6080f2
SR
3570 struct seq_file *m = filp->private_data;
3571 struct trace_array *tr = m->private;
7bcfaf54 3572 char buf[64];
613f04a0 3573 int ret;
7bcfaf54
SR
3574
3575 if (cnt >= sizeof(buf))
3576 return -EINVAL;
3577
3578 if (copy_from_user(&buf, ubuf, cnt))
3579 return -EFAULT;
3580
a8dd2176
SR
3581 buf[cnt] = 0;
3582
2b6080f2 3583 ret = trace_set_options(tr, buf);
613f04a0
SRRH
3584 if (ret < 0)
3585 return ret;
7bcfaf54 3586
cf8517cf 3587 *ppos += cnt;
bc0c38d1
SR
3588
3589 return cnt;
3590}
3591
fdb372ed
LZ
3592static int tracing_trace_options_open(struct inode *inode, struct file *file)
3593{
7b85af63 3594 struct trace_array *tr = inode->i_private;
f77d09a3 3595 int ret;
7b85af63 3596
fdb372ed
LZ
3597 if (tracing_disabled)
3598 return -ENODEV;
2b6080f2 3599
7b85af63
SRRH
3600 if (trace_array_get(tr) < 0)
3601 return -ENODEV;
3602
f77d09a3
AL
3603 ret = single_open(file, tracing_trace_options_show, inode->i_private);
3604 if (ret < 0)
3605 trace_array_put(tr);
3606
3607 return ret;
fdb372ed
LZ
3608}
3609
5e2336a0 3610static const struct file_operations tracing_iter_fops = {
fdb372ed
LZ
3611 .open = tracing_trace_options_open,
3612 .read = seq_read,
3613 .llseek = seq_lseek,
7b85af63 3614 .release = tracing_single_release_tr,
ee6bce52 3615 .write = tracing_trace_options_write,
bc0c38d1
SR
3616};
3617
7bd2f24c
IM
3618static const char readme_msg[] =
3619 "tracing mini-HOWTO:\n\n"
22f45649
SRRH
3620 "# echo 0 > tracing_on : quick way to disable tracing\n"
3621 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
3622 " Important files:\n"
3623 " trace\t\t\t- The static contents of the buffer\n"
3624 "\t\t\t To clear the buffer write into this file: echo > trace\n"
3625 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
3626 " current_tracer\t- function and latency tracers\n"
3627 " available_tracers\t- list of configured tracers for current_tracer\n"
3628 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
3629 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
3630 " trace_clock\t\t-change the clock used to order events\n"
3631 " local: Per cpu clock but may not be synced across CPUs\n"
3632 " global: Synced across CPUs but slows tracing down.\n"
3633 " counter: Not a clock, but just an increment\n"
3634 " uptime: Jiffy counter from time of boot\n"
3635 " perf: Same clock that perf events use\n"
3636#ifdef CONFIG_X86_64
3637 " x86-tsc: TSC cycle counter\n"
3638#endif
3639 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
3640 " tracing_cpumask\t- Limit which CPUs to trace\n"
3641 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
3642 "\t\t\t Remove sub-buffer with rmdir\n"
3643 " trace_options\t\t- Set format or modify how tracing happens\n"
71485c45
SRRH
3644 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
3645 "\t\t\t option name\n"
939c7a4f 3646 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
22f45649
SRRH
3647#ifdef CONFIG_DYNAMIC_FTRACE
3648 "\n available_filter_functions - list of functions that can be filtered on\n"
71485c45
SRRH
3649 " set_ftrace_filter\t- echo function name in here to only trace these\n"
3650 "\t\t\t functions\n"
3651 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3652 "\t modules: Can select a group via module\n"
3653 "\t Format: :mod:<module-name>\n"
3654 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
3655 "\t triggers: a command to perform when function is hit\n"
3656 "\t Format: <function>:<trigger>[:count]\n"
3657 "\t trigger: traceon, traceoff\n"
3658 "\t\t enable_event:<system>:<event>\n"
3659 "\t\t disable_event:<system>:<event>\n"
22f45649 3660#ifdef CONFIG_STACKTRACE
71485c45 3661 "\t\t stacktrace\n"
22f45649
SRRH
3662#endif
3663#ifdef CONFIG_TRACER_SNAPSHOT
71485c45 3664 "\t\t snapshot\n"
22f45649 3665#endif
17a280ea
SRRH
3666 "\t\t dump\n"
3667 "\t\t cpudump\n"
71485c45
SRRH
3668 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
3669 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
3670 "\t The first one will disable tracing every time do_fault is hit\n"
3671 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
3672 "\t The first time do trap is hit and it disables tracing, the\n"
3673 "\t counter will decrement to 2. If tracing is already disabled,\n"
3674 "\t the counter will not decrement. It only decrements when the\n"
3675 "\t trigger did work\n"
3676 "\t To remove trigger without count:\n"
3677 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
3678 "\t To remove trigger with a count:\n"
3679 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
22f45649 3680 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
71485c45
SRRH
3681 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3682 "\t modules: Can select a group via module command :mod:\n"
3683 "\t Does not accept triggers\n"
22f45649
SRRH
3684#endif /* CONFIG_DYNAMIC_FTRACE */
3685#ifdef CONFIG_FUNCTION_TRACER
71485c45
SRRH
3686 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
3687 "\t\t (function)\n"
22f45649
SRRH
3688#endif
3689#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3690 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
d048a8c7 3691 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
22f45649
SRRH
3692 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
3693#endif
3694#ifdef CONFIG_TRACER_SNAPSHOT
71485c45
SRRH
3695 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
3696 "\t\t\t snapshot buffer. Read the contents for more\n"
3697 "\t\t\t information\n"
22f45649 3698#endif
991821c8 3699#ifdef CONFIG_STACK_TRACER
22f45649
SRRH
3700 " stack_trace\t\t- Shows the max stack trace when active\n"
3701 " stack_max_size\t- Shows current max stack size that was traced\n"
71485c45
SRRH
3702 "\t\t\t Write into this file to reset the max size (trigger a\n"
3703 "\t\t\t new trace)\n"
22f45649 3704#ifdef CONFIG_DYNAMIC_FTRACE
71485c45
SRRH
3705 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
3706 "\t\t\t traces\n"
22f45649 3707#endif
991821c8 3708#endif /* CONFIG_STACK_TRACER */
26f25564
TZ
3709 " events/\t\t- Directory containing all trace event subsystems:\n"
3710 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
3711 " events/<system>/\t- Directory containing all trace events for <system>:\n"
71485c45
SRRH
3712 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
3713 "\t\t\t events\n"
26f25564 3714 " filter\t\t- If set, only events passing filter are traced\n"
71485c45
SRRH
3715 " events/<system>/<event>/\t- Directory containing control files for\n"
3716 "\t\t\t <event>:\n"
26f25564
TZ
3717 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
3718 " filter\t\t- If set, only events passing filter are traced\n"
3719 " trigger\t\t- If set, a command to perform when event is hit\n"
71485c45
SRRH
3720 "\t Format: <trigger>[:count][if <filter>]\n"
3721 "\t trigger: traceon, traceoff\n"
3722 "\t enable_event:<system>:<event>\n"
3723 "\t disable_event:<system>:<event>\n"
26f25564 3724#ifdef CONFIG_STACKTRACE
71485c45 3725 "\t\t stacktrace\n"
26f25564
TZ
3726#endif
3727#ifdef CONFIG_TRACER_SNAPSHOT
71485c45 3728 "\t\t snapshot\n"
26f25564 3729#endif
71485c45
SRRH
3730 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
3731 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
3732 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
3733 "\t events/block/block_unplug/trigger\n"
3734 "\t The first disables tracing every time block_unplug is hit.\n"
3735 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
3736 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
3737 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
3738 "\t Like function triggers, the counter is only decremented if it\n"
3739 "\t enabled or disabled tracing.\n"
3740 "\t To remove a trigger without a count:\n"
3741 "\t echo '!<trigger> > <system>/<event>/trigger\n"
3742 "\t To remove a trigger with a count:\n"
3743 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
3744 "\t Filters can be ignored when removing a trigger.\n"
7bd2f24c
IM
3745;
3746
3747static ssize_t
3748tracing_readme_read(struct file *filp, char __user *ubuf,
3749 size_t cnt, loff_t *ppos)
3750{
3751 return simple_read_from_buffer(ubuf, cnt, ppos,
3752 readme_msg, strlen(readme_msg));
3753}
3754
5e2336a0 3755static const struct file_operations tracing_readme_fops = {
c7078de1
IM
3756 .open = tracing_open_generic,
3757 .read = tracing_readme_read,
b444786f 3758 .llseek = generic_file_llseek,
7bd2f24c
IM
3759};
3760
42584c81
YY
3761static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
3762{
3763 unsigned int *ptr = v;
69abe6a5 3764
42584c81
YY
3765 if (*pos || m->count)
3766 ptr++;
69abe6a5 3767
42584c81 3768 (*pos)++;
69abe6a5 3769
939c7a4f
YY
3770 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
3771 ptr++) {
42584c81
YY
3772 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
3773 continue;
69abe6a5 3774
42584c81
YY
3775 return ptr;
3776 }
69abe6a5 3777
42584c81
YY
3778 return NULL;
3779}
3780
3781static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
3782{
3783 void *v;
3784 loff_t l = 0;
69abe6a5 3785
4c27e756
SRRH
3786 preempt_disable();
3787 arch_spin_lock(&trace_cmdline_lock);
3788
939c7a4f 3789 v = &savedcmd->map_cmdline_to_pid[0];
42584c81
YY
3790 while (l <= *pos) {
3791 v = saved_cmdlines_next(m, v, &l);
3792 if (!v)
3793 return NULL;
69abe6a5
AP
3794 }
3795
42584c81
YY
3796 return v;
3797}
3798
3799static void saved_cmdlines_stop(struct seq_file *m, void *v)
3800{
4c27e756
SRRH
3801 arch_spin_unlock(&trace_cmdline_lock);
3802 preempt_enable();
42584c81 3803}
69abe6a5 3804
42584c81
YY
3805static int saved_cmdlines_show(struct seq_file *m, void *v)
3806{
3807 char buf[TASK_COMM_LEN];
3808 unsigned int *pid = v;
69abe6a5 3809
4c27e756 3810 __trace_find_cmdline(*pid, buf);
42584c81
YY
3811 seq_printf(m, "%d %s\n", *pid, buf);
3812 return 0;
3813}
3814
3815static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
3816 .start = saved_cmdlines_start,
3817 .next = saved_cmdlines_next,
3818 .stop = saved_cmdlines_stop,
3819 .show = saved_cmdlines_show,
3820};
3821
3822static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
3823{
3824 if (tracing_disabled)
3825 return -ENODEV;
3826
3827 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
69abe6a5
AP
3828}
3829
3830static const struct file_operations tracing_saved_cmdlines_fops = {
42584c81
YY
3831 .open = tracing_saved_cmdlines_open,
3832 .read = seq_read,
3833 .llseek = seq_lseek,
3834 .release = seq_release,
69abe6a5
AP
3835};
3836
939c7a4f
YY
3837static ssize_t
3838tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
3839 size_t cnt, loff_t *ppos)
3840{
3841 char buf[64];
3842 int r;
3843
3844 arch_spin_lock(&trace_cmdline_lock);
a6af8fbf 3845 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
939c7a4f
YY
3846 arch_spin_unlock(&trace_cmdline_lock);
3847
3848 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3849}
3850
3851static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
3852{
3853 kfree(s->saved_cmdlines);
3854 kfree(s->map_cmdline_to_pid);
3855 kfree(s);
3856}
3857
3858static int tracing_resize_saved_cmdlines(unsigned int val)
3859{
3860 struct saved_cmdlines_buffer *s, *savedcmd_temp;
3861
a6af8fbf 3862 s = kmalloc(sizeof(*s), GFP_KERNEL);
939c7a4f
YY
3863 if (!s)
3864 return -ENOMEM;
3865
3866 if (allocate_cmdlines_buffer(val, s) < 0) {
3867 kfree(s);
3868 return -ENOMEM;
3869 }
3870
3871 arch_spin_lock(&trace_cmdline_lock);
3872 savedcmd_temp = savedcmd;
3873 savedcmd = s;
3874 arch_spin_unlock(&trace_cmdline_lock);
3875 free_saved_cmdlines_buffer(savedcmd_temp);
3876
3877 return 0;
3878}
3879
3880static ssize_t
3881tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
3882 size_t cnt, loff_t *ppos)
3883{
3884 unsigned long val;
3885 int ret;
3886
3887 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
3888 if (ret)
3889 return ret;
3890
3891 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
3892 if (!val || val > PID_MAX_DEFAULT)
3893 return -EINVAL;
3894
3895 ret = tracing_resize_saved_cmdlines((unsigned int)val);
3896 if (ret < 0)
3897 return ret;
3898
3899 *ppos += cnt;
3900
3901 return cnt;
3902}
3903
3904static const struct file_operations tracing_saved_cmdlines_size_fops = {
3905 .open = tracing_open_generic,
3906 .read = tracing_saved_cmdlines_size_read,
3907 .write = tracing_saved_cmdlines_size_write,
3908};
3909
bc0c38d1
SR
3910static ssize_t
3911tracing_set_trace_read(struct file *filp, char __user *ubuf,
3912 size_t cnt, loff_t *ppos)
3913{
2b6080f2 3914 struct trace_array *tr = filp->private_data;
ee6c2c1b 3915 char buf[MAX_TRACER_SIZE+2];
bc0c38d1
SR
3916 int r;
3917
3918 mutex_lock(&trace_types_lock);
2b6080f2 3919 r = sprintf(buf, "%s\n", tr->current_trace->name);
bc0c38d1
SR
3920 mutex_unlock(&trace_types_lock);
3921
4bf39a94 3922 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
bc0c38d1
SR
3923}
3924
b6f11df2
ACM
3925int tracer_init(struct tracer *t, struct trace_array *tr)
3926{
12883efb 3927 tracing_reset_online_cpus(&tr->trace_buffer);
b6f11df2
ACM
3928 return t->init(tr);
3929}
3930
12883efb 3931static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
438ced17
VN
3932{
3933 int cpu;
737223fb 3934
438ced17 3935 for_each_tracing_cpu(cpu)
12883efb 3936 per_cpu_ptr(buf->data, cpu)->entries = val;
438ced17
VN
3937}
3938
12883efb 3939#ifdef CONFIG_TRACER_MAX_TRACE
d60da506 3940/* resize @tr's buffer to the size of @size_tr's entries */
12883efb
SRRH
3941static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
3942 struct trace_buffer *size_buf, int cpu_id)
d60da506
HT
3943{
3944 int cpu, ret = 0;
3945
3946 if (cpu_id == RING_BUFFER_ALL_CPUS) {
3947 for_each_tracing_cpu(cpu) {
12883efb
SRRH
3948 ret = ring_buffer_resize(trace_buf->buffer,
3949 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
d60da506
HT
3950 if (ret < 0)
3951 break;
12883efb
SRRH
3952 per_cpu_ptr(trace_buf->data, cpu)->entries =
3953 per_cpu_ptr(size_buf->data, cpu)->entries;
d60da506
HT
3954 }
3955 } else {
12883efb
SRRH
3956 ret = ring_buffer_resize(trace_buf->buffer,
3957 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
d60da506 3958 if (ret == 0)
12883efb
SRRH
3959 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
3960 per_cpu_ptr(size_buf->data, cpu_id)->entries;
d60da506
HT
3961 }
3962
3963 return ret;
3964}
12883efb 3965#endif /* CONFIG_TRACER_MAX_TRACE */
d60da506 3966
2b6080f2
SR
3967static int __tracing_resize_ring_buffer(struct trace_array *tr,
3968 unsigned long size, int cpu)
73c5162a
SR
3969{
3970 int ret;
3971
3972 /*
3973 * If kernel or user changes the size of the ring buffer
a123c52b
SR
3974 * we use the size that was given, and we can forget about
3975 * expanding it later.
73c5162a 3976 */
55034cd6 3977 ring_buffer_expanded = true;
73c5162a 3978
b382ede6 3979 /* May be called before buffers are initialized */
12883efb 3980 if (!tr->trace_buffer.buffer)
b382ede6
SR
3981 return 0;
3982
12883efb 3983 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
73c5162a
SR
3984 if (ret < 0)
3985 return ret;
3986
12883efb 3987#ifdef CONFIG_TRACER_MAX_TRACE
2b6080f2
SR
3988 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
3989 !tr->current_trace->use_max_tr)
ef710e10
KM
3990 goto out;
3991
12883efb 3992 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
73c5162a 3993 if (ret < 0) {
12883efb
SRRH
3994 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
3995 &tr->trace_buffer, cpu);
73c5162a 3996 if (r < 0) {
a123c52b
SR
3997 /*
3998 * AARGH! We are left with different
3999 * size max buffer!!!!
4000 * The max buffer is our "snapshot" buffer.
4001 * When a tracer needs a snapshot (one of the
4002 * latency tracers), it swaps the max buffer
4003 * with the saved snap shot. We succeeded to
4004 * update the size of the main buffer, but failed to
4005 * update the size of the max buffer. But when we tried
4006 * to reset the main buffer to the original size, we
4007 * failed there too. This is very unlikely to
4008 * happen, but if it does, warn and kill all
4009 * tracing.
4010 */
73c5162a
SR
4011 WARN_ON(1);
4012 tracing_disabled = 1;
4013 }
4014 return ret;
4015 }
4016
438ced17 4017 if (cpu == RING_BUFFER_ALL_CPUS)
12883efb 4018 set_buffer_entries(&tr->max_buffer, size);
438ced17 4019 else
12883efb 4020 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
438ced17 4021
ef710e10 4022 out:
12883efb
SRRH
4023#endif /* CONFIG_TRACER_MAX_TRACE */
4024
438ced17 4025 if (cpu == RING_BUFFER_ALL_CPUS)
12883efb 4026 set_buffer_entries(&tr->trace_buffer, size);
438ced17 4027 else
12883efb 4028 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
73c5162a
SR
4029
4030 return ret;
4031}
4032
2b6080f2
SR
4033static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
4034 unsigned long size, int cpu_id)
4f271a2a 4035{
83f40318 4036 int ret = size;
4f271a2a
VN
4037
4038 mutex_lock(&trace_types_lock);
4039
438ced17
VN
4040 if (cpu_id != RING_BUFFER_ALL_CPUS) {
4041 /* make sure, this cpu is enabled in the mask */
4042 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
4043 ret = -EINVAL;
4044 goto out;
4045 }
4046 }
4f271a2a 4047
2b6080f2 4048 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
4f271a2a
VN
4049 if (ret < 0)
4050 ret = -ENOMEM;
4051
438ced17 4052out:
4f271a2a
VN
4053 mutex_unlock(&trace_types_lock);
4054
4055 return ret;
4056}
4057
ef710e10 4058
1852fcce
SR
4059/**
4060 * tracing_update_buffers - used by tracing facility to expand ring buffers
4061 *
4062 * To save on memory when the tracing is never used on a system with it
4063 * configured in. The ring buffers are set to a minimum size. But once
4064 * a user starts to use the tracing facility, then they need to grow
4065 * to their default size.
4066 *
4067 * This function is to be called when a tracer is about to be used.
4068 */
4069int tracing_update_buffers(void)
4070{
4071 int ret = 0;
4072
1027fcb2 4073 mutex_lock(&trace_types_lock);
1852fcce 4074 if (!ring_buffer_expanded)
2b6080f2 4075 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
438ced17 4076 RING_BUFFER_ALL_CPUS);
1027fcb2 4077 mutex_unlock(&trace_types_lock);
1852fcce
SR
4078
4079 return ret;
4080}
4081
577b785f
SR
4082struct trace_option_dentry;
4083
4084static struct trace_option_dentry *
2b6080f2 4085create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
577b785f
SR
4086
4087static void
4088destroy_trace_option_files(struct trace_option_dentry *topts);
4089
6b450d25
SRRH
4090/*
4091 * Used to clear out the tracer before deletion of an instance.
4092 * Must have trace_types_lock held.
4093 */
4094static void tracing_set_nop(struct trace_array *tr)
4095{
4096 if (tr->current_trace == &nop_trace)
4097 return;
4098
50512ab5 4099 tr->current_trace->enabled--;
6b450d25
SRRH
4100
4101 if (tr->current_trace->reset)
4102 tr->current_trace->reset(tr);
4103
4104 tr->current_trace = &nop_trace;
4105}
4106
607e2ea1 4107static int tracing_set_tracer(struct trace_array *tr, const char *buf)
bc0c38d1 4108{
577b785f 4109 static struct trace_option_dentry *topts;
bc0c38d1 4110 struct tracer *t;
12883efb 4111#ifdef CONFIG_TRACER_MAX_TRACE
34600f0e 4112 bool had_max_tr;
12883efb 4113#endif
d9e54076 4114 int ret = 0;
bc0c38d1 4115
1027fcb2
SR
4116 mutex_lock(&trace_types_lock);
4117
73c5162a 4118 if (!ring_buffer_expanded) {
2b6080f2 4119 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
438ced17 4120 RING_BUFFER_ALL_CPUS);
73c5162a 4121 if (ret < 0)
59f586db 4122 goto out;
73c5162a
SR
4123 ret = 0;
4124 }
4125
bc0c38d1
SR
4126 for (t = trace_types; t; t = t->next) {
4127 if (strcmp(t->name, buf) == 0)
4128 break;
4129 }
c2931e05
FW
4130 if (!t) {
4131 ret = -EINVAL;
4132 goto out;
4133 }
2b6080f2 4134 if (t == tr->current_trace)
bc0c38d1
SR
4135 goto out;
4136
607e2ea1
SRRH
4137 /* Some tracers are only allowed for the top level buffer */
4138 if (!trace_ok_for_array(t, tr)) {
4139 ret = -EINVAL;
4140 goto out;
4141 }
4142
9f029e83 4143 trace_branch_disable();
613f04a0 4144
50512ab5 4145 tr->current_trace->enabled--;
613f04a0 4146
2b6080f2
SR
4147 if (tr->current_trace->reset)
4148 tr->current_trace->reset(tr);
34600f0e 4149
12883efb 4150 /* Current trace needs to be nop_trace before synchronize_sched */
2b6080f2 4151 tr->current_trace = &nop_trace;
34600f0e 4152
45ad21ca
SRRH
4153#ifdef CONFIG_TRACER_MAX_TRACE
4154 had_max_tr = tr->allocated_snapshot;
34600f0e
SR
4155
4156 if (had_max_tr && !t->use_max_tr) {
4157 /*
4158 * We need to make sure that the update_max_tr sees that
4159 * current_trace changed to nop_trace to keep it from
4160 * swapping the buffers after we resize it.
4161 * The update_max_tr is called from interrupts disabled
4162 * so a synchronized_sched() is sufficient.
4163 */
4164 synchronize_sched();
3209cff4 4165 free_snapshot(tr);
ef710e10 4166 }
12883efb 4167#endif
f1b21c9a
SRRH
4168 /* Currently, only the top instance has options */
4169 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
4170 destroy_trace_option_files(topts);
4171 topts = create_trace_option_files(tr, t);
4172 }
12883efb
SRRH
4173
4174#ifdef CONFIG_TRACER_MAX_TRACE
34600f0e 4175 if (t->use_max_tr && !had_max_tr) {
3209cff4 4176 ret = alloc_snapshot(tr);
d60da506
HT
4177 if (ret < 0)
4178 goto out;
ef710e10 4179 }
12883efb 4180#endif
577b785f 4181
1c80025a 4182 if (t->init) {
b6f11df2 4183 ret = tracer_init(t, tr);
1c80025a
FW
4184 if (ret)
4185 goto out;
4186 }
bc0c38d1 4187
2b6080f2 4188 tr->current_trace = t;
50512ab5 4189 tr->current_trace->enabled++;
9f029e83 4190 trace_branch_enable(tr);
bc0c38d1
SR
4191 out:
4192 mutex_unlock(&trace_types_lock);
4193
d9e54076
PZ
4194 return ret;
4195}
4196
4197static ssize_t
4198tracing_set_trace_write(struct file *filp, const char __user *ubuf,
4199 size_t cnt, loff_t *ppos)
4200{
607e2ea1 4201 struct trace_array *tr = filp->private_data;
ee6c2c1b 4202 char buf[MAX_TRACER_SIZE+1];
d9e54076
PZ
4203 int i;
4204 size_t ret;
e6e7a65a
FW
4205 int err;
4206
4207 ret = cnt;
d9e54076 4208
ee6c2c1b
LZ
4209 if (cnt > MAX_TRACER_SIZE)
4210 cnt = MAX_TRACER_SIZE;
d9e54076
PZ
4211
4212 if (copy_from_user(&buf, ubuf, cnt))
4213 return -EFAULT;
4214
4215 buf[cnt] = 0;
4216
4217 /* strip ending whitespace. */
4218 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
4219 buf[i] = 0;
4220
607e2ea1 4221 err = tracing_set_tracer(tr, buf);
e6e7a65a
FW
4222 if (err)
4223 return err;
d9e54076 4224
cf8517cf 4225 *ppos += ret;
bc0c38d1 4226
c2931e05 4227 return ret;
bc0c38d1
SR
4228}
4229
4230static ssize_t
6508fa76
SF
4231tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
4232 size_t cnt, loff_t *ppos)
bc0c38d1 4233{
bc0c38d1
SR
4234 char buf[64];
4235 int r;
4236
cffae437 4237 r = snprintf(buf, sizeof(buf), "%ld\n",
bc0c38d1 4238 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
cffae437
SR
4239 if (r > sizeof(buf))
4240 r = sizeof(buf);
4bf39a94 4241 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
bc0c38d1
SR
4242}
4243
4244static ssize_t
6508fa76
SF
4245tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
4246 size_t cnt, loff_t *ppos)
bc0c38d1 4247{
5e39841c 4248 unsigned long val;
c6caeeb1 4249 int ret;
bc0c38d1 4250
22fe9b54
PH
4251 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4252 if (ret)
c6caeeb1 4253 return ret;
bc0c38d1
SR
4254
4255 *ptr = val * 1000;
4256
4257 return cnt;
4258}
4259
6508fa76
SF
4260static ssize_t
4261tracing_thresh_read(struct file *filp, char __user *ubuf,
4262 size_t cnt, loff_t *ppos)
4263{
4264 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
4265}
4266
4267static ssize_t
4268tracing_thresh_write(struct file *filp, const char __user *ubuf,
4269 size_t cnt, loff_t *ppos)
4270{
4271 struct trace_array *tr = filp->private_data;
4272 int ret;
4273
4274 mutex_lock(&trace_types_lock);
4275 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
4276 if (ret < 0)
4277 goto out;
4278
4279 if (tr->current_trace->update_thresh) {
4280 ret = tr->current_trace->update_thresh(tr);
4281 if (ret < 0)
4282 goto out;
4283 }
4284
4285 ret = cnt;
4286out:
4287 mutex_unlock(&trace_types_lock);
4288
4289 return ret;
4290}
4291
4292static ssize_t
4293tracing_max_lat_read(struct file *filp, char __user *ubuf,
4294 size_t cnt, loff_t *ppos)
4295{
4296 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
4297}
4298
4299static ssize_t
4300tracing_max_lat_write(struct file *filp, const char __user *ubuf,
4301 size_t cnt, loff_t *ppos)
4302{
4303 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
4304}
4305
b3806b43
SR
4306static int tracing_open_pipe(struct inode *inode, struct file *filp)
4307{
15544209 4308 struct trace_array *tr = inode->i_private;
b3806b43 4309 struct trace_iterator *iter;
b04cc6b1 4310 int ret = 0;
b3806b43
SR
4311
4312 if (tracing_disabled)
4313 return -ENODEV;
4314
7b85af63
SRRH
4315 if (trace_array_get(tr) < 0)
4316 return -ENODEV;
4317
b04cc6b1
FW
4318 mutex_lock(&trace_types_lock);
4319
b3806b43
SR
4320 /* create a buffer to store the information to pass to userspace */
4321 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
b04cc6b1
FW
4322 if (!iter) {
4323 ret = -ENOMEM;
f77d09a3 4324 __trace_array_put(tr);
b04cc6b1
FW
4325 goto out;
4326 }
b3806b43 4327
3a161d99
SRRH
4328 trace_seq_init(&iter->seq);
4329
d7350c3f
FW
4330 /*
4331 * We make a copy of the current tracer to avoid concurrent
4332 * changes on it while we are reading.
4333 */
4334 iter->trace = kmalloc(sizeof(*iter->trace), GFP_KERNEL);
4335 if (!iter->trace) {
4336 ret = -ENOMEM;
4337 goto fail;
4338 }
2b6080f2 4339 *iter->trace = *tr->current_trace;
d7350c3f 4340
4462344e 4341 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
b04cc6b1 4342 ret = -ENOMEM;
d7350c3f 4343 goto fail;
4462344e
RR
4344 }
4345
a309720c 4346 /* trace pipe does not show start of buffer */
4462344e 4347 cpumask_setall(iter->started);
a309720c 4348
112f38a7
SR
4349 if (trace_flags & TRACE_ITER_LATENCY_FMT)
4350 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4351
8be0709f 4352 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
58e8eedf 4353 if (trace_clocks[tr->clock_id].in_ns)
8be0709f
DS
4354 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4355
15544209
ON
4356 iter->tr = tr;
4357 iter->trace_buffer = &tr->trace_buffer;
4358 iter->cpu_file = tracing_get_cpu(inode);
d7350c3f 4359 mutex_init(&iter->mutex);
b3806b43
SR
4360 filp->private_data = iter;
4361
107bad8b
SR
4362 if (iter->trace->pipe_open)
4363 iter->trace->pipe_open(iter);
107bad8b 4364
b444786f 4365 nonseekable_open(inode, filp);
b04cc6b1
FW
4366out:
4367 mutex_unlock(&trace_types_lock);
4368 return ret;
d7350c3f
FW
4369
4370fail:
4371 kfree(iter->trace);
4372 kfree(iter);
7b85af63 4373 __trace_array_put(tr);
d7350c3f
FW
4374 mutex_unlock(&trace_types_lock);
4375 return ret;
b3806b43
SR
4376}
4377
4378static int tracing_release_pipe(struct inode *inode, struct file *file)
4379{
4380 struct trace_iterator *iter = file->private_data;
15544209 4381 struct trace_array *tr = inode->i_private;
b3806b43 4382
b04cc6b1
FW
4383 mutex_lock(&trace_types_lock);
4384
29bf4a5e 4385 if (iter->trace->pipe_close)
c521efd1
SR
4386 iter->trace->pipe_close(iter);
4387
b04cc6b1
FW
4388 mutex_unlock(&trace_types_lock);
4389
4462344e 4390 free_cpumask_var(iter->started);
d7350c3f
FW
4391 mutex_destroy(&iter->mutex);
4392 kfree(iter->trace);
b3806b43 4393 kfree(iter);
b3806b43 4394
7b85af63
SRRH
4395 trace_array_put(tr);
4396
b3806b43
SR
4397 return 0;
4398}
4399
2a2cc8f7 4400static unsigned int
cc60cdc9 4401trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
2a2cc8f7 4402{
15693458
SRRH
4403 /* Iterators are static, they should be filled or empty */
4404 if (trace_buffer_iter(iter, iter->cpu_file))
4405 return POLLIN | POLLRDNORM;
2a2cc8f7 4406
15693458 4407 if (trace_flags & TRACE_ITER_BLOCK)
2a2cc8f7
SSP
4408 /*
4409 * Always select as readable when in blocking mode
4410 */
4411 return POLLIN | POLLRDNORM;
15693458 4412 else
12883efb 4413 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
15693458 4414 filp, poll_table);
2a2cc8f7 4415}
2a2cc8f7 4416
cc60cdc9
SR
4417static unsigned int
4418tracing_poll_pipe(struct file *filp, poll_table *poll_table)
4419{
4420 struct trace_iterator *iter = filp->private_data;
4421
4422 return trace_poll(iter, filp, poll_table);
2a2cc8f7
SSP
4423}
4424
ff98781b
EGM
4425/* Must be called with trace_types_lock mutex held. */
4426static int tracing_wait_pipe(struct file *filp)
b3806b43
SR
4427{
4428 struct trace_iterator *iter = filp->private_data;
8b8b3683 4429 int ret;
b3806b43 4430
b3806b43 4431 while (trace_empty(iter)) {
2dc8f095 4432
107bad8b 4433 if ((filp->f_flags & O_NONBLOCK)) {
ff98781b 4434 return -EAGAIN;
107bad8b 4435 }
2dc8f095 4436
b3806b43 4437 /*
250bfd3d 4438 * We block until we read something and tracing is disabled.
b3806b43
SR
4439 * We still block if tracing is disabled, but we have never
4440 * read anything. This allows a user to cat this file, and
4441 * then enable tracing. But after we have read something,
4442 * we give an EOF when tracing is again disabled.
4443 *
4444 * iter->pos will be 0 if we haven't read anything.
4445 */
10246fa3 4446 if (!tracing_is_on() && iter->pos)
b3806b43 4447 break;
f4874261
SRRH
4448
4449 mutex_unlock(&iter->mutex);
4450
e30f53aa 4451 ret = wait_on_pipe(iter, false);
f4874261
SRRH
4452
4453 mutex_lock(&iter->mutex);
4454
8b8b3683
SRRH
4455 if (ret)
4456 return ret;
b3806b43
SR
4457 }
4458
ff98781b
EGM
4459 return 1;
4460}
4461
4462/*
4463 * Consumer reader.
4464 */
4465static ssize_t
4466tracing_read_pipe(struct file *filp, char __user *ubuf,
4467 size_t cnt, loff_t *ppos)
4468{
4469 struct trace_iterator *iter = filp->private_data;
2b6080f2 4470 struct trace_array *tr = iter->tr;
ff98781b
EGM
4471 ssize_t sret;
4472
4473 /* return any leftover data */
4474 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4475 if (sret != -EBUSY)
4476 return sret;
4477
f9520750 4478 trace_seq_init(&iter->seq);
ff98781b 4479
d7350c3f 4480 /* copy the tracer to avoid using a global lock all around */
ff98781b 4481 mutex_lock(&trace_types_lock);
2b6080f2
SR
4482 if (unlikely(iter->trace->name != tr->current_trace->name))
4483 *iter->trace = *tr->current_trace;
d7350c3f
FW
4484 mutex_unlock(&trace_types_lock);
4485
4486 /*
4487 * Avoid more than one consumer on a single file descriptor
4488 * This is just a matter of traces coherency, the ring buffer itself
4489 * is protected.
4490 */
4491 mutex_lock(&iter->mutex);
ff98781b
EGM
4492 if (iter->trace->read) {
4493 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
4494 if (sret)
4495 goto out;
4496 }
4497
4498waitagain:
4499 sret = tracing_wait_pipe(filp);
4500 if (sret <= 0)
4501 goto out;
4502
b3806b43 4503 /* stop when tracing is finished */
ff98781b
EGM
4504 if (trace_empty(iter)) {
4505 sret = 0;
107bad8b 4506 goto out;
ff98781b 4507 }
b3806b43
SR
4508
4509 if (cnt >= PAGE_SIZE)
4510 cnt = PAGE_SIZE - 1;
4511
53d0aa77 4512 /* reset all but tr, trace, and overruns */
53d0aa77
SR
4513 memset(&iter->seq, 0,
4514 sizeof(struct trace_iterator) -
4515 offsetof(struct trace_iterator, seq));
ed5467da 4516 cpumask_clear(iter->started);
4823ed7e 4517 iter->pos = -1;
b3806b43 4518
4f535968 4519 trace_event_read_lock();
7e53bd42 4520 trace_access_lock(iter->cpu_file);
955b61e5 4521 while (trace_find_next_entry_inc(iter) != NULL) {
2c4f035f 4522 enum print_line_t ret;
5ac48378 4523 int save_len = iter->seq.seq.len;
088b1e42 4524
f9896bf3 4525 ret = print_trace_line(iter);
2c4f035f 4526 if (ret == TRACE_TYPE_PARTIAL_LINE) {
088b1e42 4527 /* don't print partial lines */
5ac48378 4528 iter->seq.seq.len = save_len;
b3806b43 4529 break;
088b1e42 4530 }
b91facc3
FW
4531 if (ret != TRACE_TYPE_NO_CONSUME)
4532 trace_consume(iter);
b3806b43 4533
5ac48378 4534 if (trace_seq_used(&iter->seq) >= cnt)
b3806b43 4535 break;
ee5e51f5
JO
4536
4537 /*
4538 * Setting the full flag means we reached the trace_seq buffer
4539 * size and we should leave by partial output condition above.
4540 * One of the trace_seq_* functions is not used properly.
4541 */
4542 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
4543 iter->ent->type);
b3806b43 4544 }
7e53bd42 4545 trace_access_unlock(iter->cpu_file);
4f535968 4546 trace_event_read_unlock();
b3806b43 4547
b3806b43 4548 /* Now copy what we have to the user */
6c6c2796 4549 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
5ac48378 4550 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
f9520750 4551 trace_seq_init(&iter->seq);
9ff4b974
PP
4552
4553 /*
25985edc 4554 * If there was nothing to send to user, in spite of consuming trace
9ff4b974
PP
4555 * entries, go back to wait for more entries.
4556 */
6c6c2796 4557 if (sret == -EBUSY)
9ff4b974 4558 goto waitagain;
b3806b43 4559
107bad8b 4560out:
d7350c3f 4561 mutex_unlock(&iter->mutex);
107bad8b 4562
6c6c2796 4563 return sret;
b3806b43
SR
4564}
4565
3c56819b
EGM
4566static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
4567 unsigned int idx)
4568{
4569 __free_page(spd->pages[idx]);
4570}
4571
28dfef8f 4572static const struct pipe_buf_operations tracing_pipe_buf_ops = {
34cd4998 4573 .can_merge = 0,
34cd4998 4574 .confirm = generic_pipe_buf_confirm,
92fdd98c 4575 .release = generic_pipe_buf_release,
34cd4998
SR
4576 .steal = generic_pipe_buf_steal,
4577 .get = generic_pipe_buf_get,
3c56819b
EGM
4578};
4579
34cd4998 4580static size_t
fa7c7f6e 4581tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
34cd4998
SR
4582{
4583 size_t count;
74f06bb7 4584 int save_len;
34cd4998
SR
4585 int ret;
4586
4587 /* Seq buffer is page-sized, exactly what we need. */
4588 for (;;) {
74f06bb7 4589 save_len = iter->seq.seq.len;
34cd4998 4590 ret = print_trace_line(iter);
74f06bb7
SRRH
4591
4592 if (trace_seq_has_overflowed(&iter->seq)) {
4593 iter->seq.seq.len = save_len;
34cd4998
SR
4594 break;
4595 }
74f06bb7
SRRH
4596
4597 /*
4598 * This should not be hit, because it should only
4599 * be set if the iter->seq overflowed. But check it
4600 * anyway to be safe.
4601 */
34cd4998 4602 if (ret == TRACE_TYPE_PARTIAL_LINE) {
74f06bb7
SRRH
4603 iter->seq.seq.len = save_len;
4604 break;
4605 }
4606
5ac48378 4607 count = trace_seq_used(&iter->seq) - save_len;
74f06bb7
SRRH
4608 if (rem < count) {
4609 rem = 0;
4610 iter->seq.seq.len = save_len;
34cd4998
SR
4611 break;
4612 }
4613
74e7ff8c
LJ
4614 if (ret != TRACE_TYPE_NO_CONSUME)
4615 trace_consume(iter);
34cd4998 4616 rem -= count;
955b61e5 4617 if (!trace_find_next_entry_inc(iter)) {
34cd4998
SR
4618 rem = 0;
4619 iter->ent = NULL;
4620 break;
4621 }
4622 }
4623
4624 return rem;
4625}
4626
3c56819b
EGM
4627static ssize_t tracing_splice_read_pipe(struct file *filp,
4628 loff_t *ppos,
4629 struct pipe_inode_info *pipe,
4630 size_t len,
4631 unsigned int flags)
4632{
35f3d14d
JA
4633 struct page *pages_def[PIPE_DEF_BUFFERS];
4634 struct partial_page partial_def[PIPE_DEF_BUFFERS];
3c56819b
EGM
4635 struct trace_iterator *iter = filp->private_data;
4636 struct splice_pipe_desc spd = {
35f3d14d
JA
4637 .pages = pages_def,
4638 .partial = partial_def,
34cd4998 4639 .nr_pages = 0, /* This gets updated below. */
047fe360 4640 .nr_pages_max = PIPE_DEF_BUFFERS,
34cd4998
SR
4641 .flags = flags,
4642 .ops = &tracing_pipe_buf_ops,
4643 .spd_release = tracing_spd_release_pipe,
3c56819b 4644 };
2b6080f2 4645 struct trace_array *tr = iter->tr;
3c56819b 4646 ssize_t ret;
34cd4998 4647 size_t rem;
3c56819b
EGM
4648 unsigned int i;
4649
35f3d14d
JA
4650 if (splice_grow_spd(pipe, &spd))
4651 return -ENOMEM;
4652
d7350c3f 4653 /* copy the tracer to avoid using a global lock all around */
3c56819b 4654 mutex_lock(&trace_types_lock);
2b6080f2
SR
4655 if (unlikely(iter->trace->name != tr->current_trace->name))
4656 *iter->trace = *tr->current_trace;
d7350c3f
FW
4657 mutex_unlock(&trace_types_lock);
4658
4659 mutex_lock(&iter->mutex);
3c56819b
EGM
4660
4661 if (iter->trace->splice_read) {
4662 ret = iter->trace->splice_read(iter, filp,
4663 ppos, pipe, len, flags);
4664 if (ret)
34cd4998 4665 goto out_err;
3c56819b
EGM
4666 }
4667
4668 ret = tracing_wait_pipe(filp);
4669 if (ret <= 0)
34cd4998 4670 goto out_err;
3c56819b 4671
955b61e5 4672 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
3c56819b 4673 ret = -EFAULT;
34cd4998 4674 goto out_err;
3c56819b
EGM
4675 }
4676
4f535968 4677 trace_event_read_lock();
7e53bd42 4678 trace_access_lock(iter->cpu_file);
4f535968 4679
3c56819b 4680 /* Fill as many pages as possible. */
a786c06d 4681 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
35f3d14d
JA
4682 spd.pages[i] = alloc_page(GFP_KERNEL);
4683 if (!spd.pages[i])
34cd4998 4684 break;
3c56819b 4685
fa7c7f6e 4686 rem = tracing_fill_pipe_page(rem, iter);
3c56819b
EGM
4687
4688 /* Copy the data into the page, so we can start over. */
4689 ret = trace_seq_to_buffer(&iter->seq,
35f3d14d 4690 page_address(spd.pages[i]),
5ac48378 4691 trace_seq_used(&iter->seq));
3c56819b 4692 if (ret < 0) {
35f3d14d 4693 __free_page(spd.pages[i]);
3c56819b
EGM
4694 break;
4695 }
35f3d14d 4696 spd.partial[i].offset = 0;
5ac48378 4697 spd.partial[i].len = trace_seq_used(&iter->seq);
3c56819b 4698
f9520750 4699 trace_seq_init(&iter->seq);
3c56819b
EGM
4700 }
4701
7e53bd42 4702 trace_access_unlock(iter->cpu_file);
4f535968 4703 trace_event_read_unlock();
d7350c3f 4704 mutex_unlock(&iter->mutex);
3c56819b
EGM
4705
4706 spd.nr_pages = i;
4707
35f3d14d
JA
4708 ret = splice_to_pipe(pipe, &spd);
4709out:
047fe360 4710 splice_shrink_spd(&spd);
35f3d14d 4711 return ret;
3c56819b 4712
34cd4998 4713out_err:
d7350c3f 4714 mutex_unlock(&iter->mutex);
35f3d14d 4715 goto out;
3c56819b
EGM
4716}
4717
a98a3c3f
SR
4718static ssize_t
4719tracing_entries_read(struct file *filp, char __user *ubuf,
4720 size_t cnt, loff_t *ppos)
4721{
0bc392ee
ON
4722 struct inode *inode = file_inode(filp);
4723 struct trace_array *tr = inode->i_private;
4724 int cpu = tracing_get_cpu(inode);
438ced17
VN
4725 char buf[64];
4726 int r = 0;
4727 ssize_t ret;
a98a3c3f 4728
db526ca3 4729 mutex_lock(&trace_types_lock);
438ced17 4730
0bc392ee 4731 if (cpu == RING_BUFFER_ALL_CPUS) {
438ced17
VN
4732 int cpu, buf_size_same;
4733 unsigned long size;
4734
4735 size = 0;
4736 buf_size_same = 1;
4737 /* check if all cpu sizes are same */
4738 for_each_tracing_cpu(cpu) {
4739 /* fill in the size from first enabled cpu */
4740 if (size == 0)
12883efb
SRRH
4741 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
4742 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
438ced17
VN
4743 buf_size_same = 0;
4744 break;
4745 }
4746 }
4747
4748 if (buf_size_same) {
4749 if (!ring_buffer_expanded)
4750 r = sprintf(buf, "%lu (expanded: %lu)\n",
4751 size >> 10,
4752 trace_buf_size >> 10);
4753 else
4754 r = sprintf(buf, "%lu\n", size >> 10);
4755 } else
4756 r = sprintf(buf, "X\n");
4757 } else
0bc392ee 4758 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
438ced17 4759
db526ca3
SR
4760 mutex_unlock(&trace_types_lock);
4761
438ced17
VN
4762 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4763 return ret;
a98a3c3f
SR
4764}
4765
4766static ssize_t
4767tracing_entries_write(struct file *filp, const char __user *ubuf,
4768 size_t cnt, loff_t *ppos)
4769{
0bc392ee
ON
4770 struct inode *inode = file_inode(filp);
4771 struct trace_array *tr = inode->i_private;
a98a3c3f 4772 unsigned long val;
4f271a2a 4773 int ret;
a98a3c3f 4774
22fe9b54
PH
4775 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4776 if (ret)
c6caeeb1 4777 return ret;
a98a3c3f
SR
4778
4779 /* must have at least 1 entry */
4780 if (!val)
4781 return -EINVAL;
4782
1696b2b0
SR
4783 /* value is in KB */
4784 val <<= 10;
0bc392ee 4785 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
4f271a2a
VN
4786 if (ret < 0)
4787 return ret;
a98a3c3f 4788
cf8517cf 4789 *ppos += cnt;
a98a3c3f 4790
4f271a2a
VN
4791 return cnt;
4792}
bf5e6519 4793
f81ab074
VN
4794static ssize_t
4795tracing_total_entries_read(struct file *filp, char __user *ubuf,
4796 size_t cnt, loff_t *ppos)
4797{
4798 struct trace_array *tr = filp->private_data;
4799 char buf[64];
4800 int r, cpu;
4801 unsigned long size = 0, expanded_size = 0;
4802
4803 mutex_lock(&trace_types_lock);
4804 for_each_tracing_cpu(cpu) {
12883efb 4805 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
f81ab074
VN
4806 if (!ring_buffer_expanded)
4807 expanded_size += trace_buf_size >> 10;
4808 }
4809 if (ring_buffer_expanded)
4810 r = sprintf(buf, "%lu\n", size);
4811 else
4812 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
4813 mutex_unlock(&trace_types_lock);
4814
4815 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4816}
4817
4f271a2a
VN
4818static ssize_t
4819tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
4820 size_t cnt, loff_t *ppos)
4821{
4822 /*
4823 * There is no need to read what the user has written, this function
4824 * is just to make sure that there is no error when "echo" is used
4825 */
4826
4827 *ppos += cnt;
a98a3c3f
SR
4828
4829 return cnt;
4830}
4831
4f271a2a
VN
4832static int
4833tracing_free_buffer_release(struct inode *inode, struct file *filp)
4834{
2b6080f2
SR
4835 struct trace_array *tr = inode->i_private;
4836
cf30cf67
SR
4837 /* disable tracing ? */
4838 if (trace_flags & TRACE_ITER_STOP_ON_FREE)
711e1243 4839 tracer_tracing_off(tr);
4f271a2a 4840 /* resize the ring buffer to 0 */
2b6080f2 4841 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
4f271a2a 4842
7b85af63
SRRH
4843 trace_array_put(tr);
4844
4f271a2a
VN
4845 return 0;
4846}
4847
5bf9a1ee
PP
4848static ssize_t
4849tracing_mark_write(struct file *filp, const char __user *ubuf,
4850 size_t cnt, loff_t *fpos)
4851{
d696b58c 4852 unsigned long addr = (unsigned long)ubuf;
2d71619c 4853 struct trace_array *tr = filp->private_data;
d696b58c
SR
4854 struct ring_buffer_event *event;
4855 struct ring_buffer *buffer;
4856 struct print_entry *entry;
4857 unsigned long irq_flags;
4858 struct page *pages[2];
6edb2a8a 4859 void *map_page[2];
d696b58c
SR
4860 int nr_pages = 1;
4861 ssize_t written;
d696b58c
SR
4862 int offset;
4863 int size;
4864 int len;
4865 int ret;
6edb2a8a 4866 int i;
5bf9a1ee 4867
c76f0694 4868 if (tracing_disabled)
5bf9a1ee
PP
4869 return -EINVAL;
4870
5224c3a3
MSB
4871 if (!(trace_flags & TRACE_ITER_MARKERS))
4872 return -EINVAL;
4873
5bf9a1ee
PP
4874 if (cnt > TRACE_BUF_SIZE)
4875 cnt = TRACE_BUF_SIZE;
4876
d696b58c
SR
4877 /*
4878 * Userspace is injecting traces into the kernel trace buffer.
4879 * We want to be as non intrusive as possible.
4880 * To do so, we do not want to allocate any special buffers
4881 * or take any locks, but instead write the userspace data
4882 * straight into the ring buffer.
4883 *
4884 * First we need to pin the userspace buffer into memory,
4885 * which, most likely it is, because it just referenced it.
4886 * But there's no guarantee that it is. By using get_user_pages_fast()
4887 * and kmap_atomic/kunmap_atomic() we can get access to the
4888 * pages directly. We then write the data directly into the
4889 * ring buffer.
4890 */
4891 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
5bf9a1ee 4892
d696b58c
SR
4893 /* check if we cross pages */
4894 if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
4895 nr_pages = 2;
4896
4897 offset = addr & (PAGE_SIZE - 1);
4898 addr &= PAGE_MASK;
4899
4900 ret = get_user_pages_fast(addr, nr_pages, 0, pages);
4901 if (ret < nr_pages) {
4902 while (--ret >= 0)
4903 put_page(pages[ret]);
4904 written = -EFAULT;
4905 goto out;
5bf9a1ee 4906 }
d696b58c 4907
6edb2a8a
SR
4908 for (i = 0; i < nr_pages; i++)
4909 map_page[i] = kmap_atomic(pages[i]);
d696b58c
SR
4910
4911 local_save_flags(irq_flags);
4912 size = sizeof(*entry) + cnt + 2; /* possible \n added */
2d71619c 4913 buffer = tr->trace_buffer.buffer;
d696b58c
SR
4914 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
4915 irq_flags, preempt_count());
4916 if (!event) {
4917 /* Ring buffer disabled, return as if not open for write */
4918 written = -EBADF;
4919 goto out_unlock;
5bf9a1ee 4920 }
d696b58c
SR
4921
4922 entry = ring_buffer_event_data(event);
4923 entry->ip = _THIS_IP_;
4924
4925 if (nr_pages == 2) {
4926 len = PAGE_SIZE - offset;
6edb2a8a
SR
4927 memcpy(&entry->buf, map_page[0] + offset, len);
4928 memcpy(&entry->buf[len], map_page[1], cnt - len);
c13d2f7c 4929 } else
6edb2a8a 4930 memcpy(&entry->buf, map_page[0] + offset, cnt);
5bf9a1ee 4931
d696b58c
SR
4932 if (entry->buf[cnt - 1] != '\n') {
4933 entry->buf[cnt] = '\n';
4934 entry->buf[cnt + 1] = '\0';
4935 } else
4936 entry->buf[cnt] = '\0';
4937
7ffbd48d 4938 __buffer_unlock_commit(buffer, event);
5bf9a1ee 4939
d696b58c 4940 written = cnt;
5bf9a1ee 4941
d696b58c 4942 *fpos += written;
1aa54bca 4943
d696b58c 4944 out_unlock:
6edb2a8a
SR
4945 for (i = 0; i < nr_pages; i++){
4946 kunmap_atomic(map_page[i]);
4947 put_page(pages[i]);
4948 }
d696b58c 4949 out:
1aa54bca 4950 return written;
5bf9a1ee
PP
4951}
4952
13f16d20 4953static int tracing_clock_show(struct seq_file *m, void *v)
5079f326 4954{
2b6080f2 4955 struct trace_array *tr = m->private;
5079f326
Z
4956 int i;
4957
4958 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
13f16d20 4959 seq_printf(m,
5079f326 4960 "%s%s%s%s", i ? " " : "",
2b6080f2
SR
4961 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
4962 i == tr->clock_id ? "]" : "");
13f16d20 4963 seq_putc(m, '\n');
5079f326 4964
13f16d20 4965 return 0;
5079f326
Z
4966}
4967
e1e232ca 4968static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
5079f326 4969{
5079f326
Z
4970 int i;
4971
5079f326
Z
4972 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
4973 if (strcmp(trace_clocks[i].name, clockstr) == 0)
4974 break;
4975 }
4976 if (i == ARRAY_SIZE(trace_clocks))
4977 return -EINVAL;
4978
5079f326
Z
4979 mutex_lock(&trace_types_lock);
4980
2b6080f2
SR
4981 tr->clock_id = i;
4982
12883efb 4983 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
5079f326 4984
60303ed3
DS
4985 /*
4986 * New clock may not be consistent with the previous clock.
4987 * Reset the buffer so that it doesn't have incomparable timestamps.
4988 */
9457158b 4989 tracing_reset_online_cpus(&tr->trace_buffer);
12883efb
SRRH
4990
4991#ifdef CONFIG_TRACER_MAX_TRACE
4992 if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
4993 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
9457158b 4994 tracing_reset_online_cpus(&tr->max_buffer);
12883efb 4995#endif
60303ed3 4996
5079f326
Z
4997 mutex_unlock(&trace_types_lock);
4998
e1e232ca
SR
4999 return 0;
5000}
5001
5002static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
5003 size_t cnt, loff_t *fpos)
5004{
5005 struct seq_file *m = filp->private_data;
5006 struct trace_array *tr = m->private;
5007 char buf[64];
5008 const char *clockstr;
5009 int ret;
5010
5011 if (cnt >= sizeof(buf))
5012 return -EINVAL;
5013
5014 if (copy_from_user(&buf, ubuf, cnt))
5015 return -EFAULT;
5016
5017 buf[cnt] = 0;
5018
5019 clockstr = strstrip(buf);
5020
5021 ret = tracing_set_clock(tr, clockstr);
5022 if (ret)
5023 return ret;
5024
5079f326
Z
5025 *fpos += cnt;
5026
5027 return cnt;
5028}
5029
13f16d20
LZ
5030static int tracing_clock_open(struct inode *inode, struct file *file)
5031{
7b85af63
SRRH
5032 struct trace_array *tr = inode->i_private;
5033 int ret;
5034
13f16d20
LZ
5035 if (tracing_disabled)
5036 return -ENODEV;
2b6080f2 5037
7b85af63
SRRH
5038 if (trace_array_get(tr))
5039 return -ENODEV;
5040
5041 ret = single_open(file, tracing_clock_show, inode->i_private);
5042 if (ret < 0)
5043 trace_array_put(tr);
5044
5045 return ret;
13f16d20
LZ
5046}
5047
6de58e62
SRRH
5048struct ftrace_buffer_info {
5049 struct trace_iterator iter;
5050 void *spare;
5051 unsigned int read;
5052};
5053
debdd57f
HT
5054#ifdef CONFIG_TRACER_SNAPSHOT
5055static int tracing_snapshot_open(struct inode *inode, struct file *file)
5056{
6484c71c 5057 struct trace_array *tr = inode->i_private;
debdd57f 5058 struct trace_iterator *iter;
2b6080f2 5059 struct seq_file *m;
debdd57f
HT
5060 int ret = 0;
5061
ff451961
SRRH
5062 if (trace_array_get(tr) < 0)
5063 return -ENODEV;
5064
debdd57f 5065 if (file->f_mode & FMODE_READ) {
6484c71c 5066 iter = __tracing_open(inode, file, true);
debdd57f
HT
5067 if (IS_ERR(iter))
5068 ret = PTR_ERR(iter);
2b6080f2
SR
5069 } else {
5070 /* Writes still need the seq_file to hold the private data */
f77d09a3 5071 ret = -ENOMEM;
2b6080f2
SR
5072 m = kzalloc(sizeof(*m), GFP_KERNEL);
5073 if (!m)
f77d09a3 5074 goto out;
2b6080f2
SR
5075 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
5076 if (!iter) {
5077 kfree(m);
f77d09a3 5078 goto out;
2b6080f2 5079 }
f77d09a3
AL
5080 ret = 0;
5081
ff451961 5082 iter->tr = tr;
6484c71c
ON
5083 iter->trace_buffer = &tr->max_buffer;
5084 iter->cpu_file = tracing_get_cpu(inode);
2b6080f2
SR
5085 m->private = iter;
5086 file->private_data = m;
debdd57f 5087 }
f77d09a3 5088out:
ff451961
SRRH
5089 if (ret < 0)
5090 trace_array_put(tr);
5091
debdd57f
HT
5092 return ret;
5093}
5094
5095static ssize_t
5096tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
5097 loff_t *ppos)
5098{
2b6080f2
SR
5099 struct seq_file *m = filp->private_data;
5100 struct trace_iterator *iter = m->private;
5101 struct trace_array *tr = iter->tr;
debdd57f
HT
5102 unsigned long val;
5103 int ret;
5104
5105 ret = tracing_update_buffers();
5106 if (ret < 0)
5107 return ret;
5108
5109 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5110 if (ret)
5111 return ret;
5112
5113 mutex_lock(&trace_types_lock);
5114
2b6080f2 5115 if (tr->current_trace->use_max_tr) {
debdd57f
HT
5116 ret = -EBUSY;
5117 goto out;
5118 }
5119
5120 switch (val) {
5121 case 0:
f1affcaa
SRRH
5122 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5123 ret = -EINVAL;
5124 break;
debdd57f 5125 }
3209cff4
SRRH
5126 if (tr->allocated_snapshot)
5127 free_snapshot(tr);
debdd57f
HT
5128 break;
5129 case 1:
f1affcaa
SRRH
5130/* Only allow per-cpu swap if the ring buffer supports it */
5131#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
5132 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5133 ret = -EINVAL;
5134 break;
5135 }
5136#endif
45ad21ca 5137 if (!tr->allocated_snapshot) {
3209cff4 5138 ret = alloc_snapshot(tr);
debdd57f
HT
5139 if (ret < 0)
5140 break;
debdd57f 5141 }
debdd57f
HT
5142 local_irq_disable();
5143 /* Now, we're going to swap */
f1affcaa 5144 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
ce9bae55 5145 update_max_tr(tr, current, smp_processor_id());
f1affcaa 5146 else
ce9bae55 5147 update_max_tr_single(tr, current, iter->cpu_file);
debdd57f
HT
5148 local_irq_enable();
5149 break;
5150 default:
45ad21ca 5151 if (tr->allocated_snapshot) {
f1affcaa
SRRH
5152 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
5153 tracing_reset_online_cpus(&tr->max_buffer);
5154 else
5155 tracing_reset(&tr->max_buffer, iter->cpu_file);
5156 }
debdd57f
HT
5157 break;
5158 }
5159
5160 if (ret >= 0) {
5161 *ppos += cnt;
5162 ret = cnt;
5163 }
5164out:
5165 mutex_unlock(&trace_types_lock);
5166 return ret;
5167}
2b6080f2
SR
5168
5169static int tracing_snapshot_release(struct inode *inode, struct file *file)
5170{
5171 struct seq_file *m = file->private_data;
ff451961
SRRH
5172 int ret;
5173
5174 ret = tracing_release(inode, file);
2b6080f2
SR
5175
5176 if (file->f_mode & FMODE_READ)
ff451961 5177 return ret;
2b6080f2
SR
5178
5179 /* If write only, the seq_file is just a stub */
5180 if (m)
5181 kfree(m->private);
5182 kfree(m);
5183
5184 return 0;
5185}
5186
6de58e62
SRRH
5187static int tracing_buffers_open(struct inode *inode, struct file *filp);
5188static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
5189 size_t count, loff_t *ppos);
5190static int tracing_buffers_release(struct inode *inode, struct file *file);
5191static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5192 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
5193
5194static int snapshot_raw_open(struct inode *inode, struct file *filp)
5195{
5196 struct ftrace_buffer_info *info;
5197 int ret;
5198
5199 ret = tracing_buffers_open(inode, filp);
5200 if (ret < 0)
5201 return ret;
5202
5203 info = filp->private_data;
5204
5205 if (info->iter.trace->use_max_tr) {
5206 tracing_buffers_release(inode, filp);
5207 return -EBUSY;
5208 }
5209
5210 info->iter.snapshot = true;
5211 info->iter.trace_buffer = &info->iter.tr->max_buffer;
5212
5213 return ret;
5214}
5215
debdd57f
HT
5216#endif /* CONFIG_TRACER_SNAPSHOT */
5217
5218
6508fa76
SF
5219static const struct file_operations tracing_thresh_fops = {
5220 .open = tracing_open_generic,
5221 .read = tracing_thresh_read,
5222 .write = tracing_thresh_write,
5223 .llseek = generic_file_llseek,
5224};
5225
5e2336a0 5226static const struct file_operations tracing_max_lat_fops = {
4bf39a94
IM
5227 .open = tracing_open_generic,
5228 .read = tracing_max_lat_read,
5229 .write = tracing_max_lat_write,
b444786f 5230 .llseek = generic_file_llseek,
bc0c38d1
SR
5231};
5232
5e2336a0 5233static const struct file_operations set_tracer_fops = {
4bf39a94
IM
5234 .open = tracing_open_generic,
5235 .read = tracing_set_trace_read,
5236 .write = tracing_set_trace_write,
b444786f 5237 .llseek = generic_file_llseek,
bc0c38d1
SR
5238};
5239
5e2336a0 5240static const struct file_operations tracing_pipe_fops = {
4bf39a94 5241 .open = tracing_open_pipe,
2a2cc8f7 5242 .poll = tracing_poll_pipe,
4bf39a94 5243 .read = tracing_read_pipe,
3c56819b 5244 .splice_read = tracing_splice_read_pipe,
4bf39a94 5245 .release = tracing_release_pipe,
b444786f 5246 .llseek = no_llseek,
b3806b43
SR
5247};
5248
5e2336a0 5249static const struct file_operations tracing_entries_fops = {
0bc392ee 5250 .open = tracing_open_generic_tr,
a98a3c3f
SR
5251 .read = tracing_entries_read,
5252 .write = tracing_entries_write,
b444786f 5253 .llseek = generic_file_llseek,
0bc392ee 5254 .release = tracing_release_generic_tr,
a98a3c3f
SR
5255};
5256
f81ab074 5257static const struct file_operations tracing_total_entries_fops = {
7b85af63 5258 .open = tracing_open_generic_tr,
f81ab074
VN
5259 .read = tracing_total_entries_read,
5260 .llseek = generic_file_llseek,
7b85af63 5261 .release = tracing_release_generic_tr,
f81ab074
VN
5262};
5263
4f271a2a 5264static const struct file_operations tracing_free_buffer_fops = {
7b85af63 5265 .open = tracing_open_generic_tr,
4f271a2a
VN
5266 .write = tracing_free_buffer_write,
5267 .release = tracing_free_buffer_release,
5268};
5269
5e2336a0 5270static const struct file_operations tracing_mark_fops = {
7b85af63 5271 .open = tracing_open_generic_tr,
5bf9a1ee 5272 .write = tracing_mark_write,
b444786f 5273 .llseek = generic_file_llseek,
7b85af63 5274 .release = tracing_release_generic_tr,
5bf9a1ee
PP
5275};
5276
5079f326 5277static const struct file_operations trace_clock_fops = {
13f16d20
LZ
5278 .open = tracing_clock_open,
5279 .read = seq_read,
5280 .llseek = seq_lseek,
7b85af63 5281 .release = tracing_single_release_tr,
5079f326
Z
5282 .write = tracing_clock_write,
5283};
5284
debdd57f
HT
5285#ifdef CONFIG_TRACER_SNAPSHOT
5286static const struct file_operations snapshot_fops = {
5287 .open = tracing_snapshot_open,
5288 .read = seq_read,
5289 .write = tracing_snapshot_write,
098c879e 5290 .llseek = tracing_lseek,
2b6080f2 5291 .release = tracing_snapshot_release,
debdd57f 5292};
debdd57f 5293
6de58e62
SRRH
5294static const struct file_operations snapshot_raw_fops = {
5295 .open = snapshot_raw_open,
5296 .read = tracing_buffers_read,
5297 .release = tracing_buffers_release,
5298 .splice_read = tracing_buffers_splice_read,
5299 .llseek = no_llseek,
2cadf913
SR
5300};
5301
6de58e62
SRRH
5302#endif /* CONFIG_TRACER_SNAPSHOT */
5303
2cadf913
SR
5304static int tracing_buffers_open(struct inode *inode, struct file *filp)
5305{
46ef2be0 5306 struct trace_array *tr = inode->i_private;
2cadf913 5307 struct ftrace_buffer_info *info;
7b85af63 5308 int ret;
2cadf913
SR
5309
5310 if (tracing_disabled)
5311 return -ENODEV;
5312
7b85af63
SRRH
5313 if (trace_array_get(tr) < 0)
5314 return -ENODEV;
5315
2cadf913 5316 info = kzalloc(sizeof(*info), GFP_KERNEL);
7b85af63
SRRH
5317 if (!info) {
5318 trace_array_put(tr);
2cadf913 5319 return -ENOMEM;
7b85af63 5320 }
2cadf913 5321
a695cb58
SRRH
5322 mutex_lock(&trace_types_lock);
5323
cc60cdc9 5324 info->iter.tr = tr;
46ef2be0 5325 info->iter.cpu_file = tracing_get_cpu(inode);
b627344f 5326 info->iter.trace = tr->current_trace;
12883efb 5327 info->iter.trace_buffer = &tr->trace_buffer;
cc60cdc9 5328 info->spare = NULL;
2cadf913 5329 /* Force reading ring buffer for first read */
cc60cdc9 5330 info->read = (unsigned int)-1;
2cadf913
SR
5331
5332 filp->private_data = info;
5333
a695cb58
SRRH
5334 mutex_unlock(&trace_types_lock);
5335
7b85af63
SRRH
5336 ret = nonseekable_open(inode, filp);
5337 if (ret < 0)
5338 trace_array_put(tr);
5339
5340 return ret;
2cadf913
SR
5341}
5342
cc60cdc9
SR
5343static unsigned int
5344tracing_buffers_poll(struct file *filp, poll_table *poll_table)
5345{
5346 struct ftrace_buffer_info *info = filp->private_data;
5347 struct trace_iterator *iter = &info->iter;
5348
5349 return trace_poll(iter, filp, poll_table);
5350}
5351
2cadf913
SR
5352static ssize_t
5353tracing_buffers_read(struct file *filp, char __user *ubuf,
5354 size_t count, loff_t *ppos)
5355{
5356 struct ftrace_buffer_info *info = filp->private_data;
cc60cdc9 5357 struct trace_iterator *iter = &info->iter;
2cadf913 5358 ssize_t ret;
6de58e62 5359 ssize_t size;
2cadf913 5360
2dc5d12b
SR
5361 if (!count)
5362 return 0;
5363
6de58e62
SRRH
5364 mutex_lock(&trace_types_lock);
5365
5366#ifdef CONFIG_TRACER_MAX_TRACE
5367 if (iter->snapshot && iter->tr->current_trace->use_max_tr) {
5368 size = -EBUSY;
5369 goto out_unlock;
5370 }
5371#endif
5372
ddd538f3 5373 if (!info->spare)
12883efb
SRRH
5374 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
5375 iter->cpu_file);
6de58e62 5376 size = -ENOMEM;
ddd538f3 5377 if (!info->spare)
6de58e62 5378 goto out_unlock;
ddd538f3 5379
2cadf913
SR
5380 /* Do we have previous read data to read? */
5381 if (info->read < PAGE_SIZE)
5382 goto read;
5383
b627344f 5384 again:
cc60cdc9 5385 trace_access_lock(iter->cpu_file);
12883efb 5386 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
2cadf913
SR
5387 &info->spare,
5388 count,
cc60cdc9
SR
5389 iter->cpu_file, 0);
5390 trace_access_unlock(iter->cpu_file);
2cadf913 5391
b627344f
SR
5392 if (ret < 0) {
5393 if (trace_empty(iter)) {
6de58e62
SRRH
5394 if ((filp->f_flags & O_NONBLOCK)) {
5395 size = -EAGAIN;
5396 goto out_unlock;
5397 }
5398 mutex_unlock(&trace_types_lock);
e30f53aa 5399 ret = wait_on_pipe(iter, false);
6de58e62 5400 mutex_lock(&trace_types_lock);
8b8b3683
SRRH
5401 if (ret) {
5402 size = ret;
5403 goto out_unlock;
5404 }
b627344f
SR
5405 goto again;
5406 }
6de58e62
SRRH
5407 size = 0;
5408 goto out_unlock;
b627344f 5409 }
436fc280 5410
436fc280 5411 info->read = 0;
b627344f 5412 read:
2cadf913
SR
5413 size = PAGE_SIZE - info->read;
5414 if (size > count)
5415 size = count;
5416
5417 ret = copy_to_user(ubuf, info->spare + info->read, size);
6de58e62
SRRH
5418 if (ret == size) {
5419 size = -EFAULT;
5420 goto out_unlock;
5421 }
2dc5d12b
SR
5422 size -= ret;
5423
2cadf913
SR
5424 *ppos += size;
5425 info->read += size;
5426
6de58e62
SRRH
5427 out_unlock:
5428 mutex_unlock(&trace_types_lock);
5429
2cadf913
SR
5430 return size;
5431}
5432
5433static int tracing_buffers_release(struct inode *inode, struct file *file)
5434{
5435 struct ftrace_buffer_info *info = file->private_data;
cc60cdc9 5436 struct trace_iterator *iter = &info->iter;
2cadf913 5437
a695cb58
SRRH
5438 mutex_lock(&trace_types_lock);
5439
ff451961 5440 __trace_array_put(iter->tr);
2cadf913 5441
ddd538f3 5442 if (info->spare)
12883efb 5443 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
2cadf913
SR
5444 kfree(info);
5445
a695cb58
SRRH
5446 mutex_unlock(&trace_types_lock);
5447
2cadf913
SR
5448 return 0;
5449}
5450
5451struct buffer_ref {
5452 struct ring_buffer *buffer;
5453 void *page;
5454 int ref;
5455};
5456
5457static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
5458 struct pipe_buffer *buf)
5459{
5460 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5461
5462 if (--ref->ref)
5463 return;
5464
5465 ring_buffer_free_read_page(ref->buffer, ref->page);
5466 kfree(ref);
5467 buf->private = 0;
5468}
5469
2cadf913
SR
5470static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
5471 struct pipe_buffer *buf)
5472{
5473 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5474
5475 ref->ref++;
5476}
5477
5478/* Pipe buffer operations for a buffer. */
28dfef8f 5479static const struct pipe_buf_operations buffer_pipe_buf_ops = {
2cadf913 5480 .can_merge = 0,
2cadf913
SR
5481 .confirm = generic_pipe_buf_confirm,
5482 .release = buffer_pipe_buf_release,
d55cb6cf 5483 .steal = generic_pipe_buf_steal,
2cadf913
SR
5484 .get = buffer_pipe_buf_get,
5485};
5486
5487/*
5488 * Callback from splice_to_pipe(), if we need to release some pages
5489 * at the end of the spd in case we error'ed out in filling the pipe.
5490 */
5491static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
5492{
5493 struct buffer_ref *ref =
5494 (struct buffer_ref *)spd->partial[i].private;
5495
5496 if (--ref->ref)
5497 return;
5498
5499 ring_buffer_free_read_page(ref->buffer, ref->page);
5500 kfree(ref);
5501 spd->partial[i].private = 0;
5502}
5503
5504static ssize_t
5505tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5506 struct pipe_inode_info *pipe, size_t len,
5507 unsigned int flags)
5508{
5509 struct ftrace_buffer_info *info = file->private_data;
cc60cdc9 5510 struct trace_iterator *iter = &info->iter;
35f3d14d
JA
5511 struct partial_page partial_def[PIPE_DEF_BUFFERS];
5512 struct page *pages_def[PIPE_DEF_BUFFERS];
2cadf913 5513 struct splice_pipe_desc spd = {
35f3d14d
JA
5514 .pages = pages_def,
5515 .partial = partial_def,
047fe360 5516 .nr_pages_max = PIPE_DEF_BUFFERS,
2cadf913
SR
5517 .flags = flags,
5518 .ops = &buffer_pipe_buf_ops,
5519 .spd_release = buffer_spd_release,
5520 };
5521 struct buffer_ref *ref;
93459c6c 5522 int entries, size, i;
07906da7 5523 ssize_t ret = 0;
2cadf913 5524
6de58e62
SRRH
5525 mutex_lock(&trace_types_lock);
5526
5527#ifdef CONFIG_TRACER_MAX_TRACE
5528 if (iter->snapshot && iter->tr->current_trace->use_max_tr) {
5529 ret = -EBUSY;
5530 goto out;
5531 }
5532#endif
5533
5534 if (splice_grow_spd(pipe, &spd)) {
5535 ret = -ENOMEM;
5536 goto out;
5537 }
35f3d14d 5538
93cfb3c9 5539 if (*ppos & (PAGE_SIZE - 1)) {
35f3d14d
JA
5540 ret = -EINVAL;
5541 goto out;
93cfb3c9
LJ
5542 }
5543
5544 if (len & (PAGE_SIZE - 1)) {
35f3d14d
JA
5545 if (len < PAGE_SIZE) {
5546 ret = -EINVAL;
5547 goto out;
5548 }
93cfb3c9
LJ
5549 len &= PAGE_MASK;
5550 }
5551
cc60cdc9
SR
5552 again:
5553 trace_access_lock(iter->cpu_file);
12883efb 5554 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
93459c6c 5555
a786c06d 5556 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
2cadf913
SR
5557 struct page *page;
5558 int r;
5559
5560 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
07906da7
RV
5561 if (!ref) {
5562 ret = -ENOMEM;
2cadf913 5563 break;
07906da7 5564 }
2cadf913 5565
7267fa68 5566 ref->ref = 1;
12883efb 5567 ref->buffer = iter->trace_buffer->buffer;
cc60cdc9 5568 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
2cadf913 5569 if (!ref->page) {
07906da7 5570 ret = -ENOMEM;
2cadf913
SR
5571 kfree(ref);
5572 break;
5573 }
5574
5575 r = ring_buffer_read_page(ref->buffer, &ref->page,
cc60cdc9 5576 len, iter->cpu_file, 1);
2cadf913 5577 if (r < 0) {
7ea59064 5578 ring_buffer_free_read_page(ref->buffer, ref->page);
2cadf913
SR
5579 kfree(ref);
5580 break;
5581 }
5582
5583 /*
5584 * zero out any left over data, this is going to
5585 * user land.
5586 */
5587 size = ring_buffer_page_len(ref->page);
5588 if (size < PAGE_SIZE)
5589 memset(ref->page + size, 0, PAGE_SIZE - size);
5590
5591 page = virt_to_page(ref->page);
5592
5593 spd.pages[i] = page;
5594 spd.partial[i].len = PAGE_SIZE;
5595 spd.partial[i].offset = 0;
5596 spd.partial[i].private = (unsigned long)ref;
5597 spd.nr_pages++;
93cfb3c9 5598 *ppos += PAGE_SIZE;
93459c6c 5599
12883efb 5600 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
2cadf913
SR
5601 }
5602
cc60cdc9 5603 trace_access_unlock(iter->cpu_file);
2cadf913
SR
5604 spd.nr_pages = i;
5605
5606 /* did we read anything? */
5607 if (!spd.nr_pages) {
07906da7
RV
5608 if (ret)
5609 goto out;
5610
cc60cdc9 5611 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) {
2cadf913 5612 ret = -EAGAIN;
cc60cdc9
SR
5613 goto out;
5614 }
6de58e62 5615 mutex_unlock(&trace_types_lock);
e30f53aa 5616 ret = wait_on_pipe(iter, true);
6de58e62 5617 mutex_lock(&trace_types_lock);
8b8b3683
SRRH
5618 if (ret)
5619 goto out;
e30f53aa 5620
cc60cdc9 5621 goto again;
2cadf913
SR
5622 }
5623
5624 ret = splice_to_pipe(pipe, &spd);
047fe360 5625 splice_shrink_spd(&spd);
35f3d14d 5626out:
6de58e62
SRRH
5627 mutex_unlock(&trace_types_lock);
5628
2cadf913
SR
5629 return ret;
5630}
5631
5632static const struct file_operations tracing_buffers_fops = {
5633 .open = tracing_buffers_open,
5634 .read = tracing_buffers_read,
cc60cdc9 5635 .poll = tracing_buffers_poll,
2cadf913
SR
5636 .release = tracing_buffers_release,
5637 .splice_read = tracing_buffers_splice_read,
5638 .llseek = no_llseek,
5639};
5640
c8d77183
SR
5641static ssize_t
5642tracing_stats_read(struct file *filp, char __user *ubuf,
5643 size_t count, loff_t *ppos)
5644{
4d3435b8
ON
5645 struct inode *inode = file_inode(filp);
5646 struct trace_array *tr = inode->i_private;
12883efb 5647 struct trace_buffer *trace_buf = &tr->trace_buffer;
4d3435b8 5648 int cpu = tracing_get_cpu(inode);
c8d77183
SR
5649 struct trace_seq *s;
5650 unsigned long cnt;
c64e148a
VN
5651 unsigned long long t;
5652 unsigned long usec_rem;
c8d77183 5653
e4f2d10f 5654 s = kmalloc(sizeof(*s), GFP_KERNEL);
c8d77183 5655 if (!s)
a646365c 5656 return -ENOMEM;
c8d77183
SR
5657
5658 trace_seq_init(s);
5659
12883efb 5660 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
c8d77183
SR
5661 trace_seq_printf(s, "entries: %ld\n", cnt);
5662
12883efb 5663 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
c8d77183
SR
5664 trace_seq_printf(s, "overrun: %ld\n", cnt);
5665
12883efb 5666 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
c8d77183
SR
5667 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
5668
12883efb 5669 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
c64e148a
VN
5670 trace_seq_printf(s, "bytes: %ld\n", cnt);
5671
58e8eedf 5672 if (trace_clocks[tr->clock_id].in_ns) {
11043d8b 5673 /* local or global for trace_clock */
12883efb 5674 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
11043d8b
YY
5675 usec_rem = do_div(t, USEC_PER_SEC);
5676 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
5677 t, usec_rem);
5678
12883efb 5679 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
11043d8b
YY
5680 usec_rem = do_div(t, USEC_PER_SEC);
5681 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
5682 } else {
5683 /* counter or tsc mode for trace_clock */
5684 trace_seq_printf(s, "oldest event ts: %llu\n",
12883efb 5685 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
c64e148a 5686
11043d8b 5687 trace_seq_printf(s, "now ts: %llu\n",
12883efb 5688 ring_buffer_time_stamp(trace_buf->buffer, cpu));
11043d8b 5689 }
c64e148a 5690
12883efb 5691 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
884bfe89
SP
5692 trace_seq_printf(s, "dropped events: %ld\n", cnt);
5693
12883efb 5694 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
ad964704
SRRH
5695 trace_seq_printf(s, "read events: %ld\n", cnt);
5696
5ac48378
SRRH
5697 count = simple_read_from_buffer(ubuf, count, ppos,
5698 s->buffer, trace_seq_used(s));
c8d77183
SR
5699
5700 kfree(s);
5701
5702 return count;
5703}
5704
5705static const struct file_operations tracing_stats_fops = {
4d3435b8 5706 .open = tracing_open_generic_tr,
c8d77183 5707 .read = tracing_stats_read,
b444786f 5708 .llseek = generic_file_llseek,
4d3435b8 5709 .release = tracing_release_generic_tr,
c8d77183
SR
5710};
5711
bc0c38d1
SR
5712#ifdef CONFIG_DYNAMIC_FTRACE
5713
b807c3d0
SR
5714int __weak ftrace_arch_read_dyn_info(char *buf, int size)
5715{
5716 return 0;
5717}
5718
bc0c38d1 5719static ssize_t
b807c3d0 5720tracing_read_dyn_info(struct file *filp, char __user *ubuf,
bc0c38d1
SR
5721 size_t cnt, loff_t *ppos)
5722{
a26a2a27
SR
5723 static char ftrace_dyn_info_buffer[1024];
5724 static DEFINE_MUTEX(dyn_info_mutex);
bc0c38d1 5725 unsigned long *p = filp->private_data;
b807c3d0 5726 char *buf = ftrace_dyn_info_buffer;
a26a2a27 5727 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
bc0c38d1
SR
5728 int r;
5729
b807c3d0
SR
5730 mutex_lock(&dyn_info_mutex);
5731 r = sprintf(buf, "%ld ", *p);
4bf39a94 5732
a26a2a27 5733 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
b807c3d0
SR
5734 buf[r++] = '\n';
5735
5736 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5737
5738 mutex_unlock(&dyn_info_mutex);
5739
5740 return r;
bc0c38d1
SR
5741}
5742
5e2336a0 5743static const struct file_operations tracing_dyn_info_fops = {
4bf39a94 5744 .open = tracing_open_generic,
b807c3d0 5745 .read = tracing_read_dyn_info,
b444786f 5746 .llseek = generic_file_llseek,
bc0c38d1 5747};
77fd5c15 5748#endif /* CONFIG_DYNAMIC_FTRACE */
bc0c38d1 5749
77fd5c15
SRRH
5750#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
5751static void
5752ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5753{
5754 tracing_snapshot();
5755}
bc0c38d1 5756
77fd5c15
SRRH
5757static void
5758ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
bc0c38d1 5759{
77fd5c15
SRRH
5760 unsigned long *count = (long *)data;
5761
5762 if (!*count)
5763 return;
bc0c38d1 5764
77fd5c15
SRRH
5765 if (*count != -1)
5766 (*count)--;
5767
5768 tracing_snapshot();
5769}
5770
5771static int
5772ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
5773 struct ftrace_probe_ops *ops, void *data)
5774{
5775 long count = (long)data;
5776
5777 seq_printf(m, "%ps:", (void *)ip);
5778
fa6f0cc7 5779 seq_puts(m, "snapshot");
77fd5c15
SRRH
5780
5781 if (count == -1)
fa6f0cc7 5782 seq_puts(m, ":unlimited\n");
77fd5c15
SRRH
5783 else
5784 seq_printf(m, ":count=%ld\n", count);
5785
5786 return 0;
5787}
5788
5789static struct ftrace_probe_ops snapshot_probe_ops = {
5790 .func = ftrace_snapshot,
5791 .print = ftrace_snapshot_print,
5792};
5793
5794static struct ftrace_probe_ops snapshot_count_probe_ops = {
5795 .func = ftrace_count_snapshot,
5796 .print = ftrace_snapshot_print,
5797};
5798
5799static int
5800ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
5801 char *glob, char *cmd, char *param, int enable)
5802{
5803 struct ftrace_probe_ops *ops;
5804 void *count = (void *)-1;
5805 char *number;
5806 int ret;
5807
5808 /* hash funcs only work with set_ftrace_filter */
5809 if (!enable)
5810 return -EINVAL;
5811
5812 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
5813
5814 if (glob[0] == '!') {
5815 unregister_ftrace_function_probe_func(glob+1, ops);
5816 return 0;
5817 }
5818
5819 if (!param)
5820 goto out_reg;
5821
5822 number = strsep(&param, ":");
5823
5824 if (!strlen(number))
5825 goto out_reg;
5826
5827 /*
5828 * We use the callback data field (which is a pointer)
5829 * as our counter.
5830 */
5831 ret = kstrtoul(number, 0, (unsigned long *)&count);
5832 if (ret)
5833 return ret;
5834
5835 out_reg:
5836 ret = register_ftrace_function_probe(glob, ops, count);
5837
5838 if (ret >= 0)
5839 alloc_snapshot(&global_trace);
5840
5841 return ret < 0 ? ret : 0;
5842}
5843
5844static struct ftrace_func_command ftrace_snapshot_cmd = {
5845 .name = "snapshot",
5846 .func = ftrace_trace_snapshot_callback,
5847};
5848
38de93ab 5849static __init int register_snapshot_cmd(void)
77fd5c15
SRRH
5850{
5851 return register_ftrace_command(&ftrace_snapshot_cmd);
5852}
5853#else
38de93ab 5854static inline __init int register_snapshot_cmd(void) { return 0; }
77fd5c15 5855#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
bc0c38d1 5856
2b6080f2 5857struct dentry *tracing_init_dentry_tr(struct trace_array *tr)
bc0c38d1 5858{
2b6080f2
SR
5859 if (tr->dir)
5860 return tr->dir;
bc0c38d1 5861
3e1f60b8
FW
5862 if (!debugfs_initialized())
5863 return NULL;
5864
2b6080f2
SR
5865 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
5866 tr->dir = debugfs_create_dir("tracing", NULL);
bc0c38d1 5867
687c878a
J
5868 if (!tr->dir)
5869 pr_warn_once("Could not create debugfs directory 'tracing'\n");
bc0c38d1 5870
2b6080f2 5871 return tr->dir;
bc0c38d1
SR
5872}
5873
2b6080f2
SR
5874struct dentry *tracing_init_dentry(void)
5875{
5876 return tracing_init_dentry_tr(&global_trace);
5877}
b04cc6b1 5878
2b6080f2 5879static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
b04cc6b1 5880{
b04cc6b1
FW
5881 struct dentry *d_tracer;
5882
2b6080f2
SR
5883 if (tr->percpu_dir)
5884 return tr->percpu_dir;
b04cc6b1 5885
2b6080f2 5886 d_tracer = tracing_init_dentry_tr(tr);
b04cc6b1
FW
5887 if (!d_tracer)
5888 return NULL;
5889
2b6080f2 5890 tr->percpu_dir = debugfs_create_dir("per_cpu", d_tracer);
b04cc6b1 5891
2b6080f2
SR
5892 WARN_ONCE(!tr->percpu_dir,
5893 "Could not create debugfs directory 'per_cpu/%d'\n", cpu);
b04cc6b1 5894
2b6080f2 5895 return tr->percpu_dir;
b04cc6b1
FW
5896}
5897
649e9c70
ON
5898static struct dentry *
5899trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
5900 void *data, long cpu, const struct file_operations *fops)
5901{
5902 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
5903
5904 if (ret) /* See tracing_get_cpu() */
5905 ret->d_inode->i_cdev = (void *)(cpu + 1);
5906 return ret;
5907}
5908
2b6080f2
SR
5909static void
5910tracing_init_debugfs_percpu(struct trace_array *tr, long cpu)
b04cc6b1 5911{
2b6080f2 5912 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
5452af66 5913 struct dentry *d_cpu;
dd49a38c 5914 char cpu_dir[30]; /* 30 characters should be more than enough */
b04cc6b1 5915
0a3d7ce7
NK
5916 if (!d_percpu)
5917 return;
5918
dd49a38c 5919 snprintf(cpu_dir, 30, "cpu%ld", cpu);
8656e7a2
FW
5920 d_cpu = debugfs_create_dir(cpu_dir, d_percpu);
5921 if (!d_cpu) {
5922 pr_warning("Could not create debugfs '%s' entry\n", cpu_dir);
5923 return;
5924 }
b04cc6b1 5925
8656e7a2 5926 /* per cpu trace_pipe */
649e9c70 5927 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
15544209 5928 tr, cpu, &tracing_pipe_fops);
b04cc6b1
FW
5929
5930 /* per cpu trace */
649e9c70 5931 trace_create_cpu_file("trace", 0644, d_cpu,
6484c71c 5932 tr, cpu, &tracing_fops);
7f96f93f 5933
649e9c70 5934 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
46ef2be0 5935 tr, cpu, &tracing_buffers_fops);
7f96f93f 5936
649e9c70 5937 trace_create_cpu_file("stats", 0444, d_cpu,
4d3435b8 5938 tr, cpu, &tracing_stats_fops);
438ced17 5939
649e9c70 5940 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
0bc392ee 5941 tr, cpu, &tracing_entries_fops);
f1affcaa
SRRH
5942
5943#ifdef CONFIG_TRACER_SNAPSHOT
649e9c70 5944 trace_create_cpu_file("snapshot", 0644, d_cpu,
6484c71c 5945 tr, cpu, &snapshot_fops);
6de58e62 5946
649e9c70 5947 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
46ef2be0 5948 tr, cpu, &snapshot_raw_fops);
f1affcaa 5949#endif
b04cc6b1
FW
5950}
5951
60a11774
SR
5952#ifdef CONFIG_FTRACE_SELFTEST
5953/* Let selftest have access to static functions in this file */
5954#include "trace_selftest.c"
5955#endif
5956
577b785f
SR
5957struct trace_option_dentry {
5958 struct tracer_opt *opt;
5959 struct tracer_flags *flags;
2b6080f2 5960 struct trace_array *tr;
577b785f
SR
5961 struct dentry *entry;
5962};
5963
5964static ssize_t
5965trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
5966 loff_t *ppos)
5967{
5968 struct trace_option_dentry *topt = filp->private_data;
5969 char *buf;
5970
5971 if (topt->flags->val & topt->opt->bit)
5972 buf = "1\n";
5973 else
5974 buf = "0\n";
5975
5976 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
5977}
5978
5979static ssize_t
5980trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
5981 loff_t *ppos)
5982{
5983 struct trace_option_dentry *topt = filp->private_data;
5984 unsigned long val;
577b785f
SR
5985 int ret;
5986
22fe9b54
PH
5987 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5988 if (ret)
577b785f
SR
5989 return ret;
5990
8d18eaaf
LZ
5991 if (val != 0 && val != 1)
5992 return -EINVAL;
577b785f 5993
8d18eaaf 5994 if (!!(topt->flags->val & topt->opt->bit) != val) {
577b785f 5995 mutex_lock(&trace_types_lock);
8c1a49ae 5996 ret = __set_tracer_option(topt->tr, topt->flags,
c757bea9 5997 topt->opt, !val);
577b785f
SR
5998 mutex_unlock(&trace_types_lock);
5999 if (ret)
6000 return ret;
577b785f
SR
6001 }
6002
6003 *ppos += cnt;
6004
6005 return cnt;
6006}
6007
6008
6009static const struct file_operations trace_options_fops = {
6010 .open = tracing_open_generic,
6011 .read = trace_options_read,
6012 .write = trace_options_write,
b444786f 6013 .llseek = generic_file_llseek,
577b785f
SR
6014};
6015
a8259075
SR
6016static ssize_t
6017trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
6018 loff_t *ppos)
6019{
6020 long index = (long)filp->private_data;
6021 char *buf;
6022
6023 if (trace_flags & (1 << index))
6024 buf = "1\n";
6025 else
6026 buf = "0\n";
6027
6028 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6029}
6030
6031static ssize_t
6032trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
6033 loff_t *ppos)
6034{
2b6080f2 6035 struct trace_array *tr = &global_trace;
a8259075 6036 long index = (long)filp->private_data;
a8259075
SR
6037 unsigned long val;
6038 int ret;
6039
22fe9b54
PH
6040 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6041 if (ret)
a8259075
SR
6042 return ret;
6043
f2d84b65 6044 if (val != 0 && val != 1)
a8259075 6045 return -EINVAL;
69d34da2
SRRH
6046
6047 mutex_lock(&trace_types_lock);
2b6080f2 6048 ret = set_tracer_flag(tr, 1 << index, val);
69d34da2 6049 mutex_unlock(&trace_types_lock);
a8259075 6050
613f04a0
SRRH
6051 if (ret < 0)
6052 return ret;
6053
a8259075
SR
6054 *ppos += cnt;
6055
6056 return cnt;
6057}
6058
a8259075
SR
6059static const struct file_operations trace_options_core_fops = {
6060 .open = tracing_open_generic,
6061 .read = trace_options_core_read,
6062 .write = trace_options_core_write,
b444786f 6063 .llseek = generic_file_llseek,
a8259075
SR
6064};
6065
5452af66 6066struct dentry *trace_create_file(const char *name,
f4ae40a6 6067 umode_t mode,
5452af66
FW
6068 struct dentry *parent,
6069 void *data,
6070 const struct file_operations *fops)
6071{
6072 struct dentry *ret;
6073
6074 ret = debugfs_create_file(name, mode, parent, data, fops);
6075 if (!ret)
6076 pr_warning("Could not create debugfs '%s' entry\n", name);
6077
6078 return ret;
6079}
6080
6081
2b6080f2 6082static struct dentry *trace_options_init_dentry(struct trace_array *tr)
a8259075
SR
6083{
6084 struct dentry *d_tracer;
a8259075 6085
2b6080f2
SR
6086 if (tr->options)
6087 return tr->options;
a8259075 6088
2b6080f2 6089 d_tracer = tracing_init_dentry_tr(tr);
a8259075
SR
6090 if (!d_tracer)
6091 return NULL;
6092
2b6080f2
SR
6093 tr->options = debugfs_create_dir("options", d_tracer);
6094 if (!tr->options) {
a8259075
SR
6095 pr_warning("Could not create debugfs directory 'options'\n");
6096 return NULL;
6097 }
6098
2b6080f2 6099 return tr->options;
a8259075
SR
6100}
6101
577b785f 6102static void
2b6080f2
SR
6103create_trace_option_file(struct trace_array *tr,
6104 struct trace_option_dentry *topt,
577b785f
SR
6105 struct tracer_flags *flags,
6106 struct tracer_opt *opt)
6107{
6108 struct dentry *t_options;
577b785f 6109
2b6080f2 6110 t_options = trace_options_init_dentry(tr);
577b785f
SR
6111 if (!t_options)
6112 return;
6113
6114 topt->flags = flags;
6115 topt->opt = opt;
2b6080f2 6116 topt->tr = tr;
577b785f 6117
5452af66 6118 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
577b785f
SR
6119 &trace_options_fops);
6120
577b785f
SR
6121}
6122
6123static struct trace_option_dentry *
2b6080f2 6124create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
577b785f
SR
6125{
6126 struct trace_option_dentry *topts;
6127 struct tracer_flags *flags;
6128 struct tracer_opt *opts;
6129 int cnt;
6130
6131 if (!tracer)
6132 return NULL;
6133
6134 flags = tracer->flags;
6135
6136 if (!flags || !flags->opts)
6137 return NULL;
6138
6139 opts = flags->opts;
6140
6141 for (cnt = 0; opts[cnt].name; cnt++)
6142 ;
6143
0cfe8245 6144 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
577b785f
SR
6145 if (!topts)
6146 return NULL;
6147
6148 for (cnt = 0; opts[cnt].name; cnt++)
2b6080f2 6149 create_trace_option_file(tr, &topts[cnt], flags,
577b785f
SR
6150 &opts[cnt]);
6151
6152 return topts;
6153}
6154
6155static void
6156destroy_trace_option_files(struct trace_option_dentry *topts)
6157{
6158 int cnt;
6159
6160 if (!topts)
6161 return;
6162
3f4d8f78
FF
6163 for (cnt = 0; topts[cnt].opt; cnt++)
6164 debugfs_remove(topts[cnt].entry);
577b785f
SR
6165
6166 kfree(topts);
6167}
6168
a8259075 6169static struct dentry *
2b6080f2
SR
6170create_trace_option_core_file(struct trace_array *tr,
6171 const char *option, long index)
a8259075
SR
6172{
6173 struct dentry *t_options;
a8259075 6174
2b6080f2 6175 t_options = trace_options_init_dentry(tr);
a8259075
SR
6176 if (!t_options)
6177 return NULL;
6178
5452af66 6179 return trace_create_file(option, 0644, t_options, (void *)index,
a8259075 6180 &trace_options_core_fops);
a8259075
SR
6181}
6182
2b6080f2 6183static __init void create_trace_options_dir(struct trace_array *tr)
a8259075
SR
6184{
6185 struct dentry *t_options;
a8259075
SR
6186 int i;
6187
2b6080f2 6188 t_options = trace_options_init_dentry(tr);
a8259075
SR
6189 if (!t_options)
6190 return;
6191
5452af66 6192 for (i = 0; trace_options[i]; i++)
2b6080f2 6193 create_trace_option_core_file(tr, trace_options[i], i);
a8259075
SR
6194}
6195
499e5470
SR
6196static ssize_t
6197rb_simple_read(struct file *filp, char __user *ubuf,
6198 size_t cnt, loff_t *ppos)
6199{
348f0fc2 6200 struct trace_array *tr = filp->private_data;
499e5470
SR
6201 char buf[64];
6202 int r;
6203
10246fa3 6204 r = tracer_tracing_is_on(tr);
499e5470
SR
6205 r = sprintf(buf, "%d\n", r);
6206
6207 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6208}
6209
6210static ssize_t
6211rb_simple_write(struct file *filp, const char __user *ubuf,
6212 size_t cnt, loff_t *ppos)
6213{
348f0fc2 6214 struct trace_array *tr = filp->private_data;
12883efb 6215 struct ring_buffer *buffer = tr->trace_buffer.buffer;
499e5470
SR
6216 unsigned long val;
6217 int ret;
6218
6219 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6220 if (ret)
6221 return ret;
6222
6223 if (buffer) {
2df8f8a6
SR
6224 mutex_lock(&trace_types_lock);
6225 if (val) {
10246fa3 6226 tracer_tracing_on(tr);
2b6080f2
SR
6227 if (tr->current_trace->start)
6228 tr->current_trace->start(tr);
2df8f8a6 6229 } else {
10246fa3 6230 tracer_tracing_off(tr);
2b6080f2
SR
6231 if (tr->current_trace->stop)
6232 tr->current_trace->stop(tr);
2df8f8a6
SR
6233 }
6234 mutex_unlock(&trace_types_lock);
499e5470
SR
6235 }
6236
6237 (*ppos)++;
6238
6239 return cnt;
6240}
6241
6242static const struct file_operations rb_simple_fops = {
7b85af63 6243 .open = tracing_open_generic_tr,
499e5470
SR
6244 .read = rb_simple_read,
6245 .write = rb_simple_write,
7b85af63 6246 .release = tracing_release_generic_tr,
499e5470
SR
6247 .llseek = default_llseek,
6248};
6249
277ba044
SR
6250struct dentry *trace_instance_dir;
6251
6252static void
6253init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer);
6254
55034cd6
SRRH
6255static int
6256allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
277ba044
SR
6257{
6258 enum ring_buffer_flags rb_flags;
737223fb
SRRH
6259
6260 rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
6261
dced341b
SRRH
6262 buf->tr = tr;
6263
55034cd6
SRRH
6264 buf->buffer = ring_buffer_alloc(size, rb_flags);
6265 if (!buf->buffer)
6266 return -ENOMEM;
737223fb 6267
55034cd6
SRRH
6268 buf->data = alloc_percpu(struct trace_array_cpu);
6269 if (!buf->data) {
6270 ring_buffer_free(buf->buffer);
6271 return -ENOMEM;
6272 }
737223fb 6273
737223fb
SRRH
6274 /* Allocate the first page for all buffers */
6275 set_buffer_entries(&tr->trace_buffer,
6276 ring_buffer_size(tr->trace_buffer.buffer, 0));
6277
55034cd6
SRRH
6278 return 0;
6279}
737223fb 6280
55034cd6
SRRH
6281static int allocate_trace_buffers(struct trace_array *tr, int size)
6282{
6283 int ret;
737223fb 6284
55034cd6
SRRH
6285 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
6286 if (ret)
6287 return ret;
737223fb 6288
55034cd6
SRRH
6289#ifdef CONFIG_TRACER_MAX_TRACE
6290 ret = allocate_trace_buffer(tr, &tr->max_buffer,
6291 allocate_snapshot ? size : 1);
6292 if (WARN_ON(ret)) {
737223fb 6293 ring_buffer_free(tr->trace_buffer.buffer);
55034cd6
SRRH
6294 free_percpu(tr->trace_buffer.data);
6295 return -ENOMEM;
6296 }
6297 tr->allocated_snapshot = allocate_snapshot;
737223fb 6298
55034cd6
SRRH
6299 /*
6300 * Only the top level trace array gets its snapshot allocated
6301 * from the kernel command line.
6302 */
6303 allocate_snapshot = false;
737223fb 6304#endif
55034cd6 6305 return 0;
737223fb
SRRH
6306}
6307
f0b70cc4
SRRH
6308static void free_trace_buffer(struct trace_buffer *buf)
6309{
6310 if (buf->buffer) {
6311 ring_buffer_free(buf->buffer);
6312 buf->buffer = NULL;
6313 free_percpu(buf->data);
6314 buf->data = NULL;
6315 }
6316}
6317
23aaa3c1
SRRH
6318static void free_trace_buffers(struct trace_array *tr)
6319{
6320 if (!tr)
6321 return;
6322
f0b70cc4 6323 free_trace_buffer(&tr->trace_buffer);
23aaa3c1
SRRH
6324
6325#ifdef CONFIG_TRACER_MAX_TRACE
f0b70cc4 6326 free_trace_buffer(&tr->max_buffer);
23aaa3c1
SRRH
6327#endif
6328}
6329
737223fb
SRRH
6330static int new_instance_create(const char *name)
6331{
277ba044
SR
6332 struct trace_array *tr;
6333 int ret;
277ba044
SR
6334
6335 mutex_lock(&trace_types_lock);
6336
6337 ret = -EEXIST;
6338 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6339 if (tr->name && strcmp(tr->name, name) == 0)
6340 goto out_unlock;
6341 }
6342
6343 ret = -ENOMEM;
6344 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
6345 if (!tr)
6346 goto out_unlock;
6347
6348 tr->name = kstrdup(name, GFP_KERNEL);
6349 if (!tr->name)
6350 goto out_free_tr;
6351
ccfe9e42
AL
6352 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
6353 goto out_free_tr;
6354
6355 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
6356
277ba044
SR
6357 raw_spin_lock_init(&tr->start_lock);
6358
0b9b12c1
SRRH
6359 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6360
277ba044
SR
6361 tr->current_trace = &nop_trace;
6362
6363 INIT_LIST_HEAD(&tr->systems);
6364 INIT_LIST_HEAD(&tr->events);
6365
737223fb 6366 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
277ba044
SR
6367 goto out_free_tr;
6368
277ba044
SR
6369 tr->dir = debugfs_create_dir(name, trace_instance_dir);
6370 if (!tr->dir)
6371 goto out_free_tr;
6372
6373 ret = event_trace_add_tracer(tr->dir, tr);
609e85a7
AL
6374 if (ret) {
6375 debugfs_remove_recursive(tr->dir);
277ba044 6376 goto out_free_tr;
609e85a7 6377 }
277ba044
SR
6378
6379 init_tracer_debugfs(tr, tr->dir);
6380
6381 list_add(&tr->list, &ftrace_trace_arrays);
6382
6383 mutex_unlock(&trace_types_lock);
6384
6385 return 0;
6386
6387 out_free_tr:
23aaa3c1 6388 free_trace_buffers(tr);
ccfe9e42 6389 free_cpumask_var(tr->tracing_cpumask);
277ba044
SR
6390 kfree(tr->name);
6391 kfree(tr);
6392
6393 out_unlock:
6394 mutex_unlock(&trace_types_lock);
6395
6396 return ret;
6397
6398}
6399
0c8916c3
SR
6400static int instance_delete(const char *name)
6401{
6402 struct trace_array *tr;
6403 int found = 0;
6404 int ret;
6405
6406 mutex_lock(&trace_types_lock);
6407
6408 ret = -ENODEV;
6409 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6410 if (tr->name && strcmp(tr->name, name) == 0) {
6411 found = 1;
6412 break;
6413 }
6414 }
6415 if (!found)
6416 goto out_unlock;
6417
a695cb58
SRRH
6418 ret = -EBUSY;
6419 if (tr->ref)
6420 goto out_unlock;
6421
0c8916c3
SR
6422 list_del(&tr->list);
6423
6b450d25 6424 tracing_set_nop(tr);
0c8916c3 6425 event_trace_del_tracer(tr);
591dffda 6426 ftrace_destroy_function_files(tr);
0c8916c3 6427 debugfs_remove_recursive(tr->dir);
a9fcaaac 6428 free_trace_buffers(tr);
0c8916c3
SR
6429
6430 kfree(tr->name);
6431 kfree(tr);
6432
6433 ret = 0;
6434
6435 out_unlock:
6436 mutex_unlock(&trace_types_lock);
6437
6438 return ret;
6439}
6440
277ba044
SR
6441static int instance_mkdir (struct inode *inode, struct dentry *dentry, umode_t mode)
6442{
6443 struct dentry *parent;
6444 int ret;
6445
6446 /* Paranoid: Make sure the parent is the "instances" directory */
946e51f2 6447 parent = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
277ba044
SR
6448 if (WARN_ON_ONCE(parent != trace_instance_dir))
6449 return -ENOENT;
6450
6451 /*
6452 * The inode mutex is locked, but debugfs_create_dir() will also
6453 * take the mutex. As the instances directory can not be destroyed
6454 * or changed in any other way, it is safe to unlock it, and
6455 * let the dentry try. If two users try to make the same dir at
6456 * the same time, then the new_instance_create() will determine the
6457 * winner.
6458 */
6459 mutex_unlock(&inode->i_mutex);
6460
6461 ret = new_instance_create(dentry->d_iname);
6462
6463 mutex_lock(&inode->i_mutex);
6464
6465 return ret;
6466}
6467
0c8916c3
SR
6468static int instance_rmdir(struct inode *inode, struct dentry *dentry)
6469{
6470 struct dentry *parent;
6471 int ret;
6472
6473 /* Paranoid: Make sure the parent is the "instances" directory */
946e51f2 6474 parent = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
0c8916c3
SR
6475 if (WARN_ON_ONCE(parent != trace_instance_dir))
6476 return -ENOENT;
6477
6478 /* The caller did a dget() on dentry */
6479 mutex_unlock(&dentry->d_inode->i_mutex);
6480
6481 /*
6482 * The inode mutex is locked, but debugfs_create_dir() will also
6483 * take the mutex. As the instances directory can not be destroyed
6484 * or changed in any other way, it is safe to unlock it, and
6485 * let the dentry try. If two users try to make the same dir at
6486 * the same time, then the instance_delete() will determine the
6487 * winner.
6488 */
6489 mutex_unlock(&inode->i_mutex);
6490
6491 ret = instance_delete(dentry->d_iname);
6492
6493 mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT);
6494 mutex_lock(&dentry->d_inode->i_mutex);
6495
6496 return ret;
6497}
6498
277ba044
SR
6499static const struct inode_operations instance_dir_inode_operations = {
6500 .lookup = simple_lookup,
6501 .mkdir = instance_mkdir,
0c8916c3 6502 .rmdir = instance_rmdir,
277ba044
SR
6503};
6504
6505static __init void create_trace_instances(struct dentry *d_tracer)
6506{
6507 trace_instance_dir = debugfs_create_dir("instances", d_tracer);
6508 if (WARN_ON(!trace_instance_dir))
6509 return;
6510
6511 /* Hijack the dir inode operations, to allow mkdir */
6512 trace_instance_dir->d_inode->i_op = &instance_dir_inode_operations;
6513}
6514
2b6080f2
SR
6515static void
6516init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer)
6517{
121aaee7 6518 int cpu;
2b6080f2 6519
607e2ea1
SRRH
6520 trace_create_file("available_tracers", 0444, d_tracer,
6521 tr, &show_traces_fops);
6522
6523 trace_create_file("current_tracer", 0644, d_tracer,
6524 tr, &set_tracer_fops);
6525
ccfe9e42
AL
6526 trace_create_file("tracing_cpumask", 0644, d_tracer,
6527 tr, &tracing_cpumask_fops);
6528
2b6080f2
SR
6529 trace_create_file("trace_options", 0644, d_tracer,
6530 tr, &tracing_iter_fops);
6531
6532 trace_create_file("trace", 0644, d_tracer,
6484c71c 6533 tr, &tracing_fops);
2b6080f2
SR
6534
6535 trace_create_file("trace_pipe", 0444, d_tracer,
15544209 6536 tr, &tracing_pipe_fops);
2b6080f2
SR
6537
6538 trace_create_file("buffer_size_kb", 0644, d_tracer,
0bc392ee 6539 tr, &tracing_entries_fops);
2b6080f2
SR
6540
6541 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
6542 tr, &tracing_total_entries_fops);
6543
238ae93d 6544 trace_create_file("free_buffer", 0200, d_tracer,
2b6080f2
SR
6545 tr, &tracing_free_buffer_fops);
6546
6547 trace_create_file("trace_marker", 0220, d_tracer,
6548 tr, &tracing_mark_fops);
6549
6550 trace_create_file("trace_clock", 0644, d_tracer, tr,
6551 &trace_clock_fops);
6552
6553 trace_create_file("tracing_on", 0644, d_tracer,
6484c71c 6554 tr, &rb_simple_fops);
ce9bae55 6555
6d9b3fa5
SRRH
6556#ifdef CONFIG_TRACER_MAX_TRACE
6557 trace_create_file("tracing_max_latency", 0644, d_tracer,
6558 &tr->max_latency, &tracing_max_lat_fops);
6559#endif
6560
591dffda
SRRH
6561 if (ftrace_create_function_files(tr, d_tracer))
6562 WARN(1, "Could not allocate function filter files");
6563
ce9bae55
SRRH
6564#ifdef CONFIG_TRACER_SNAPSHOT
6565 trace_create_file("snapshot", 0644, d_tracer,
6484c71c 6566 tr, &snapshot_fops);
ce9bae55 6567#endif
121aaee7
SRRH
6568
6569 for_each_tracing_cpu(cpu)
6570 tracing_init_debugfs_percpu(tr, cpu);
6571
2b6080f2
SR
6572}
6573
b5ad384e 6574static __init int tracer_init_debugfs(void)
bc0c38d1
SR
6575{
6576 struct dentry *d_tracer;
bc0c38d1 6577
7e53bd42
LJ
6578 trace_access_lock_init();
6579
bc0c38d1 6580 d_tracer = tracing_init_dentry();
ed6f1c99
NK
6581 if (!d_tracer)
6582 return 0;
bc0c38d1 6583
2b6080f2 6584 init_tracer_debugfs(&global_trace, d_tracer);
bc0c38d1 6585
5452af66 6586 trace_create_file("tracing_thresh", 0644, d_tracer,
6508fa76 6587 &global_trace, &tracing_thresh_fops);
a8259075 6588
339ae5d3 6589 trace_create_file("README", 0444, d_tracer,
5452af66
FW
6590 NULL, &tracing_readme_fops);
6591
69abe6a5
AP
6592 trace_create_file("saved_cmdlines", 0444, d_tracer,
6593 NULL, &tracing_saved_cmdlines_fops);
5bf9a1ee 6594
939c7a4f
YY
6595 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
6596 NULL, &tracing_saved_cmdlines_size_fops);
6597
bc0c38d1 6598#ifdef CONFIG_DYNAMIC_FTRACE
5452af66
FW
6599 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
6600 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
bc0c38d1 6601#endif
b04cc6b1 6602
277ba044 6603 create_trace_instances(d_tracer);
5452af66 6604
2b6080f2 6605 create_trace_options_dir(&global_trace);
b04cc6b1 6606
b5ad384e 6607 return 0;
bc0c38d1
SR
6608}
6609
3f5a54e3
SR
6610static int trace_panic_handler(struct notifier_block *this,
6611 unsigned long event, void *unused)
6612{
944ac425 6613 if (ftrace_dump_on_oops)
cecbca96 6614 ftrace_dump(ftrace_dump_on_oops);
3f5a54e3
SR
6615 return NOTIFY_OK;
6616}
6617
6618static struct notifier_block trace_panic_notifier = {
6619 .notifier_call = trace_panic_handler,
6620 .next = NULL,
6621 .priority = 150 /* priority: INT_MAX >= x >= 0 */
6622};
6623
6624static int trace_die_handler(struct notifier_block *self,
6625 unsigned long val,
6626 void *data)
6627{
6628 switch (val) {
6629 case DIE_OOPS:
944ac425 6630 if (ftrace_dump_on_oops)
cecbca96 6631 ftrace_dump(ftrace_dump_on_oops);
3f5a54e3
SR
6632 break;
6633 default:
6634 break;
6635 }
6636 return NOTIFY_OK;
6637}
6638
6639static struct notifier_block trace_die_notifier = {
6640 .notifier_call = trace_die_handler,
6641 .priority = 200
6642};
6643
6644/*
6645 * printk is set to max of 1024, we really don't need it that big.
6646 * Nothing should be printing 1000 characters anyway.
6647 */
6648#define TRACE_MAX_PRINT 1000
6649
6650/*
6651 * Define here KERN_TRACE so that we have one place to modify
6652 * it if we decide to change what log level the ftrace dump
6653 * should be at.
6654 */
428aee14 6655#define KERN_TRACE KERN_EMERG
3f5a54e3 6656
955b61e5 6657void
3f5a54e3
SR
6658trace_printk_seq(struct trace_seq *s)
6659{
6660 /* Probably should print a warning here. */
3a161d99
SRRH
6661 if (s->seq.len >= TRACE_MAX_PRINT)
6662 s->seq.len = TRACE_MAX_PRINT;
3f5a54e3 6663
820b75f6
SRRH
6664 /*
6665 * More paranoid code. Although the buffer size is set to
6666 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
6667 * an extra layer of protection.
6668 */
6669 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
6670 s->seq.len = s->seq.size - 1;
3f5a54e3
SR
6671
6672 /* should be zero ended, but we are paranoid. */
3a161d99 6673 s->buffer[s->seq.len] = 0;
3f5a54e3
SR
6674
6675 printk(KERN_TRACE "%s", s->buffer);
6676
f9520750 6677 trace_seq_init(s);
3f5a54e3
SR
6678}
6679
955b61e5
JW
6680void trace_init_global_iter(struct trace_iterator *iter)
6681{
6682 iter->tr = &global_trace;
2b6080f2 6683 iter->trace = iter->tr->current_trace;
ae3b5093 6684 iter->cpu_file = RING_BUFFER_ALL_CPUS;
12883efb 6685 iter->trace_buffer = &global_trace.trace_buffer;
b2f974d6
CS
6686
6687 if (iter->trace && iter->trace->open)
6688 iter->trace->open(iter);
6689
6690 /* Annotate start of buffers if we had overruns */
6691 if (ring_buffer_overruns(iter->trace_buffer->buffer))
6692 iter->iter_flags |= TRACE_FILE_ANNOTATE;
6693
6694 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
6695 if (trace_clocks[iter->tr->clock_id].in_ns)
6696 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
955b61e5
JW
6697}
6698
7fe70b57 6699void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
3f5a54e3 6700{
3f5a54e3
SR
6701 /* use static because iter can be a bit big for the stack */
6702 static struct trace_iterator iter;
7fe70b57 6703 static atomic_t dump_running;
cf586b61 6704 unsigned int old_userobj;
d769041f
SR
6705 unsigned long flags;
6706 int cnt = 0, cpu;
3f5a54e3 6707
7fe70b57
SRRH
6708 /* Only allow one dump user at a time. */
6709 if (atomic_inc_return(&dump_running) != 1) {
6710 atomic_dec(&dump_running);
6711 return;
6712 }
3f5a54e3 6713
7fe70b57
SRRH
6714 /*
6715 * Always turn off tracing when we dump.
6716 * We don't need to show trace output of what happens
6717 * between multiple crashes.
6718 *
6719 * If the user does a sysrq-z, then they can re-enable
6720 * tracing with echo 1 > tracing_on.
6721 */
0ee6b6cf 6722 tracing_off();
cf586b61 6723
7fe70b57 6724 local_irq_save(flags);
3f5a54e3 6725
38dbe0b1 6726 /* Simulate the iterator */
955b61e5
JW
6727 trace_init_global_iter(&iter);
6728
d769041f 6729 for_each_tracing_cpu(cpu) {
12883efb 6730 atomic_inc(&per_cpu_ptr(iter.tr->trace_buffer.data, cpu)->disabled);
d769041f
SR
6731 }
6732
cf586b61
FW
6733 old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
6734
b54d3de9
TE
6735 /* don't look at user memory in panic mode */
6736 trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
6737
cecbca96
FW
6738 switch (oops_dump_mode) {
6739 case DUMP_ALL:
ae3b5093 6740 iter.cpu_file = RING_BUFFER_ALL_CPUS;
cecbca96
FW
6741 break;
6742 case DUMP_ORIG:
6743 iter.cpu_file = raw_smp_processor_id();
6744 break;
6745 case DUMP_NONE:
6746 goto out_enable;
6747 default:
6748 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
ae3b5093 6749 iter.cpu_file = RING_BUFFER_ALL_CPUS;
cecbca96
FW
6750 }
6751
6752 printk(KERN_TRACE "Dumping ftrace buffer:\n");
3f5a54e3 6753
7fe70b57
SRRH
6754 /* Did function tracer already get disabled? */
6755 if (ftrace_is_dead()) {
6756 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
6757 printk("# MAY BE MISSING FUNCTION EVENTS\n");
6758 }
6759
3f5a54e3
SR
6760 /*
6761 * We need to stop all tracing on all CPUS to read the
6762 * the next buffer. This is a bit expensive, but is
6763 * not done often. We fill all what we can read,
6764 * and then release the locks again.
6765 */
6766
3f5a54e3
SR
6767 while (!trace_empty(&iter)) {
6768
6769 if (!cnt)
6770 printk(KERN_TRACE "---------------------------------\n");
6771
6772 cnt++;
6773
6774 /* reset all but tr, trace, and overruns */
6775 memset(&iter.seq, 0,
6776 sizeof(struct trace_iterator) -
6777 offsetof(struct trace_iterator, seq));
6778 iter.iter_flags |= TRACE_FILE_LAT_FMT;
6779 iter.pos = -1;
6780
955b61e5 6781 if (trace_find_next_entry_inc(&iter) != NULL) {
74e7ff8c
LJ
6782 int ret;
6783
6784 ret = print_trace_line(&iter);
6785 if (ret != TRACE_TYPE_NO_CONSUME)
6786 trace_consume(&iter);
3f5a54e3 6787 }
b892e5c8 6788 touch_nmi_watchdog();
3f5a54e3
SR
6789
6790 trace_printk_seq(&iter.seq);
6791 }
6792
6793 if (!cnt)
6794 printk(KERN_TRACE " (ftrace buffer empty)\n");
6795 else
6796 printk(KERN_TRACE "---------------------------------\n");
6797
cecbca96 6798 out_enable:
7fe70b57 6799 trace_flags |= old_userobj;
cf586b61 6800
7fe70b57
SRRH
6801 for_each_tracing_cpu(cpu) {
6802 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
cf586b61 6803 }
7fe70b57 6804 atomic_dec(&dump_running);
cd891ae0 6805 local_irq_restore(flags);
3f5a54e3 6806}
a8eecf22 6807EXPORT_SYMBOL_GPL(ftrace_dump);
cf586b61 6808
3928a8a2 6809__init static int tracer_alloc_buffers(void)
bc0c38d1 6810{
73c5162a 6811 int ring_buf_size;
9e01c1b7 6812 int ret = -ENOMEM;
4c11d7ae 6813
750912fa 6814
9e01c1b7
RR
6815 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
6816 goto out;
6817
ccfe9e42 6818 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
9e01c1b7 6819 goto out_free_buffer_mask;
4c11d7ae 6820
07d777fe
SR
6821 /* Only allocate trace_printk buffers if a trace_printk exists */
6822 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
81698831 6823 /* Must be called before global_trace.buffer is allocated */
07d777fe
SR
6824 trace_printk_init_buffers();
6825
73c5162a
SR
6826 /* To save memory, keep the ring buffer size to its minimum */
6827 if (ring_buffer_expanded)
6828 ring_buf_size = trace_buf_size;
6829 else
6830 ring_buf_size = 1;
6831
9e01c1b7 6832 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
ccfe9e42 6833 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
9e01c1b7 6834
2b6080f2
SR
6835 raw_spin_lock_init(&global_trace.start_lock);
6836
2c4a33ab
SRRH
6837 /* Used for event triggers */
6838 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
6839 if (!temp_buffer)
6840 goto out_free_cpumask;
6841
939c7a4f
YY
6842 if (trace_create_savedcmd() < 0)
6843 goto out_free_temp_buffer;
6844
9e01c1b7 6845 /* TODO: make the number of buffers hot pluggable with CPUS */
737223fb 6846 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
3928a8a2
SR
6847 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
6848 WARN_ON(1);
939c7a4f 6849 goto out_free_savedcmd;
4c11d7ae 6850 }
a7603ff4 6851
499e5470
SR
6852 if (global_trace.buffer_disabled)
6853 tracing_off();
4c11d7ae 6854
e1e232ca
SR
6855 if (trace_boot_clock) {
6856 ret = tracing_set_clock(&global_trace, trace_boot_clock);
6857 if (ret < 0)
6858 pr_warning("Trace clock %s not defined, going back to default\n",
6859 trace_boot_clock);
6860 }
6861
ca164318
SRRH
6862 /*
6863 * register_tracer() might reference current_trace, so it
6864 * needs to be set before we register anything. This is
6865 * just a bootstrap of current_trace anyway.
6866 */
2b6080f2
SR
6867 global_trace.current_trace = &nop_trace;
6868
0b9b12c1
SRRH
6869 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6870
4104d326
SRRH
6871 ftrace_init_global_array_ops(&global_trace);
6872
ca164318
SRRH
6873 register_tracer(&nop_trace);
6874
60a11774
SR
6875 /* All seems OK, enable tracing */
6876 tracing_disabled = 0;
3928a8a2 6877
3f5a54e3
SR
6878 atomic_notifier_chain_register(&panic_notifier_list,
6879 &trace_panic_notifier);
6880
6881 register_die_notifier(&trace_die_notifier);
2fc1dfbe 6882
ae63b31e
SR
6883 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
6884
6885 INIT_LIST_HEAD(&global_trace.systems);
6886 INIT_LIST_HEAD(&global_trace.events);
6887 list_add(&global_trace.list, &ftrace_trace_arrays);
6888
7bcfaf54
SR
6889 while (trace_boot_options) {
6890 char *option;
6891
6892 option = strsep(&trace_boot_options, ",");
2b6080f2 6893 trace_set_options(&global_trace, option);
7bcfaf54
SR
6894 }
6895
77fd5c15
SRRH
6896 register_snapshot_cmd();
6897
2fc1dfbe 6898 return 0;
3f5a54e3 6899
939c7a4f
YY
6900out_free_savedcmd:
6901 free_saved_cmdlines_buffer(savedcmd);
2c4a33ab
SRRH
6902out_free_temp_buffer:
6903 ring_buffer_free(temp_buffer);
9e01c1b7 6904out_free_cpumask:
ccfe9e42 6905 free_cpumask_var(global_trace.tracing_cpumask);
9e01c1b7
RR
6906out_free_buffer_mask:
6907 free_cpumask_var(tracing_buffer_mask);
6908out:
6909 return ret;
bc0c38d1 6910}
b2821ae6 6911
5f893b26
SRRH
6912void __init trace_init(void)
6913{
0daa2302
SRRH
6914 if (tracepoint_printk) {
6915 tracepoint_print_iter =
6916 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
6917 if (WARN_ON(!tracepoint_print_iter))
6918 tracepoint_printk = 0;
6919 }
5f893b26
SRRH
6920 tracer_alloc_buffers();
6921 init_ftrace_syscalls();
6922 trace_event_init();
6923}
6924
b2821ae6
SR
6925__init static int clear_boot_tracer(void)
6926{
6927 /*
6928 * The default tracer at boot buffer is an init section.
6929 * This function is called in lateinit. If we did not
6930 * find the boot tracer, then clear it out, to prevent
6931 * later registration from accessing the buffer that is
6932 * about to be freed.
6933 */
6934 if (!default_bootup_tracer)
6935 return 0;
6936
6937 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
6938 default_bootup_tracer);
6939 default_bootup_tracer = NULL;
6940
6941 return 0;
6942}
6943
b5ad384e 6944fs_initcall(tracer_init_debugfs);
b2821ae6 6945late_initcall(clear_boot_tracer);
This page took 0.978846 seconds and 5 git commands to generate.