tracing/uprobes: Support mix of ftrace and perf
[deliverable/linux.git] / kernel / trace / trace.c
CommitLineData
bc0c38d1
SR
1/*
2 * ring buffer based function tracer
3 *
2b6080f2 4 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
bc0c38d1
SR
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
6d49e352 12 * Copyright (C) 2004 Nadia Yvette Chambers
bc0c38d1 13 */
2cadf913 14#include <linux/ring_buffer.h>
273b281f 15#include <generated/utsrelease.h>
2cadf913
SR
16#include <linux/stacktrace.h>
17#include <linux/writeback.h>
bc0c38d1
SR
18#include <linux/kallsyms.h>
19#include <linux/seq_file.h>
3f5a54e3 20#include <linux/notifier.h>
2cadf913 21#include <linux/irqflags.h>
bc0c38d1 22#include <linux/debugfs.h>
4c11d7ae 23#include <linux/pagemap.h>
bc0c38d1
SR
24#include <linux/hardirq.h>
25#include <linux/linkage.h>
26#include <linux/uaccess.h>
2cadf913 27#include <linux/kprobes.h>
bc0c38d1
SR
28#include <linux/ftrace.h>
29#include <linux/module.h>
30#include <linux/percpu.h>
2cadf913 31#include <linux/splice.h>
3f5a54e3 32#include <linux/kdebug.h>
5f0c6c03 33#include <linux/string.h>
7e53bd42 34#include <linux/rwsem.h>
5a0e3ad6 35#include <linux/slab.h>
bc0c38d1
SR
36#include <linux/ctype.h>
37#include <linux/init.h>
2a2cc8f7 38#include <linux/poll.h>
b892e5c8 39#include <linux/nmi.h>
bc0c38d1 40#include <linux/fs.h>
8bd75c77 41#include <linux/sched/rt.h>
86387f7e 42
bc0c38d1 43#include "trace.h"
f0868d1e 44#include "trace_output.h"
bc0c38d1 45
73c5162a
SR
46/*
47 * On boot up, the ring buffer is set to the minimum size, so that
48 * we do not waste memory on systems that are not using tracing.
49 */
55034cd6 50bool ring_buffer_expanded;
73c5162a 51
8e1b82e0
FW
52/*
53 * We need to change this state when a selftest is running.
ff32504f
FW
54 * A selftest will lurk into the ring-buffer to count the
55 * entries inserted during the selftest although some concurrent
5e1607a0 56 * insertions into the ring-buffer such as trace_printk could occurred
ff32504f
FW
57 * at the same time, giving false positive or negative results.
58 */
8e1b82e0 59static bool __read_mostly tracing_selftest_running;
ff32504f 60
b2821ae6
SR
61/*
62 * If a tracer is running, we do not want to run SELFTEST.
63 */
020e5f85 64bool __read_mostly tracing_selftest_disabled;
b2821ae6 65
adf9f195
FW
66/* For tracers that don't implement custom flags */
67static struct tracer_opt dummy_tracer_opt[] = {
68 { }
69};
70
71static struct tracer_flags dummy_tracer_flags = {
72 .val = 0,
73 .opts = dummy_tracer_opt
74};
75
8c1a49ae
SRRH
76static int
77dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
adf9f195
FW
78{
79 return 0;
80}
0f048701 81
7ffbd48d
SR
82/*
83 * To prevent the comm cache from being overwritten when no
84 * tracing is active, only save the comm when a trace event
85 * occurred.
86 */
87static DEFINE_PER_CPU(bool, trace_cmdline_save);
88
0f048701
SR
89/*
90 * Kill all tracing for good (never come back).
91 * It is initialized to 1 but will turn to zero if the initialization
92 * of the tracer is successful. But that is the only place that sets
93 * this back to zero.
94 */
4fd27358 95static int tracing_disabled = 1;
0f048701 96
9288f99a 97DEFINE_PER_CPU(int, ftrace_cpu_disabled);
d769041f 98
955b61e5 99cpumask_var_t __read_mostly tracing_buffer_mask;
ab46428c 100
944ac425
SR
101/*
102 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
103 *
104 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
105 * is set, then ftrace_dump is called. This will output the contents
106 * of the ftrace buffers to the console. This is very useful for
107 * capturing traces that lead to crashes and outputing it to a
108 * serial console.
109 *
110 * It is default off, but you can enable it with either specifying
111 * "ftrace_dump_on_oops" in the kernel command line, or setting
cecbca96
FW
112 * /proc/sys/kernel/ftrace_dump_on_oops
113 * Set 1 if you want to dump buffers of all CPUs
114 * Set 2 if you want to dump the buffer of the CPU that triggered oops
944ac425 115 */
cecbca96
FW
116
117enum ftrace_dump_mode ftrace_dump_on_oops;
944ac425 118
de7edd31
SRRH
119/* When set, tracing will stop when a WARN*() is hit */
120int __disable_trace_on_warning;
121
607e2ea1 122static int tracing_set_tracer(struct trace_array *tr, const char *buf);
b2821ae6 123
ee6c2c1b
LZ
124#define MAX_TRACER_SIZE 100
125static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
b2821ae6 126static char *default_bootup_tracer;
d9e54076 127
55034cd6
SRRH
128static bool allocate_snapshot;
129
1beee96b 130static int __init set_cmdline_ftrace(char *str)
d9e54076 131{
67012ab1 132 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
b2821ae6 133 default_bootup_tracer = bootup_tracer_buf;
73c5162a 134 /* We are using ftrace early, expand it */
55034cd6 135 ring_buffer_expanded = true;
d9e54076
PZ
136 return 1;
137}
1beee96b 138__setup("ftrace=", set_cmdline_ftrace);
d9e54076 139
944ac425
SR
140static int __init set_ftrace_dump_on_oops(char *str)
141{
cecbca96
FW
142 if (*str++ != '=' || !*str) {
143 ftrace_dump_on_oops = DUMP_ALL;
144 return 1;
145 }
146
147 if (!strcmp("orig_cpu", str)) {
148 ftrace_dump_on_oops = DUMP_ORIG;
149 return 1;
150 }
151
152 return 0;
944ac425
SR
153}
154__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
60a11774 155
de7edd31
SRRH
156static int __init stop_trace_on_warning(char *str)
157{
158 __disable_trace_on_warning = 1;
159 return 1;
160}
161__setup("traceoff_on_warning=", stop_trace_on_warning);
162
3209cff4 163static int __init boot_alloc_snapshot(char *str)
55034cd6
SRRH
164{
165 allocate_snapshot = true;
166 /* We also need the main ring buffer expanded */
167 ring_buffer_expanded = true;
168 return 1;
169}
3209cff4 170__setup("alloc_snapshot", boot_alloc_snapshot);
55034cd6 171
7bcfaf54
SR
172
173static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
174static char *trace_boot_options __initdata;
175
176static int __init set_trace_boot_options(char *str)
177{
67012ab1 178 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
7bcfaf54
SR
179 trace_boot_options = trace_boot_options_buf;
180 return 0;
181}
182__setup("trace_options=", set_trace_boot_options);
183
de7edd31 184
cf8e3474 185unsigned long long ns2usecs(cycle_t nsec)
bc0c38d1
SR
186{
187 nsec += 500;
188 do_div(nsec, 1000);
189 return nsec;
190}
191
4fcdae83
SR
192/*
193 * The global_trace is the descriptor that holds the tracing
194 * buffers for the live tracing. For each CPU, it contains
195 * a link list of pages that will store trace entries. The
196 * page descriptor of the pages in the memory is used to hold
197 * the link list by linking the lru item in the page descriptor
198 * to each of the pages in the buffer per CPU.
199 *
200 * For each active CPU there is a data field that holds the
201 * pages for the buffer for that CPU. Each CPU has the same number
202 * of pages allocated for its buffer.
203 */
bc0c38d1
SR
204static struct trace_array global_trace;
205
ae63b31e 206LIST_HEAD(ftrace_trace_arrays);
bc0c38d1 207
ff451961
SRRH
208int trace_array_get(struct trace_array *this_tr)
209{
210 struct trace_array *tr;
211 int ret = -ENODEV;
212
213 mutex_lock(&trace_types_lock);
214 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
215 if (tr == this_tr) {
216 tr->ref++;
217 ret = 0;
218 break;
219 }
220 }
221 mutex_unlock(&trace_types_lock);
222
223 return ret;
224}
225
226static void __trace_array_put(struct trace_array *this_tr)
227{
228 WARN_ON(!this_tr->ref);
229 this_tr->ref--;
230}
231
232void trace_array_put(struct trace_array *this_tr)
233{
234 mutex_lock(&trace_types_lock);
235 __trace_array_put(this_tr);
236 mutex_unlock(&trace_types_lock);
237}
238
f306cc82
TZ
239int filter_check_discard(struct ftrace_event_file *file, void *rec,
240 struct ring_buffer *buffer,
241 struct ring_buffer_event *event)
eb02ce01 242{
f306cc82
TZ
243 if (unlikely(file->flags & FTRACE_EVENT_FL_FILTERED) &&
244 !filter_match_preds(file->filter, rec)) {
245 ring_buffer_discard_commit(buffer, event);
246 return 1;
247 }
248
249 return 0;
250}
251EXPORT_SYMBOL_GPL(filter_check_discard);
252
253int call_filter_check_discard(struct ftrace_event_call *call, void *rec,
254 struct ring_buffer *buffer,
255 struct ring_buffer_event *event)
256{
257 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
258 !filter_match_preds(call->filter, rec)) {
259 ring_buffer_discard_commit(buffer, event);
260 return 1;
261 }
262
263 return 0;
eb02ce01 264}
f306cc82 265EXPORT_SYMBOL_GPL(call_filter_check_discard);
eb02ce01 266
9457158b 267cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
37886f6a
SR
268{
269 u64 ts;
270
271 /* Early boot up does not have a buffer yet */
9457158b 272 if (!buf->buffer)
37886f6a
SR
273 return trace_clock_local();
274
9457158b
AL
275 ts = ring_buffer_time_stamp(buf->buffer, cpu);
276 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
37886f6a
SR
277
278 return ts;
279}
bc0c38d1 280
9457158b
AL
281cycle_t ftrace_now(int cpu)
282{
283 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
284}
285
10246fa3
SRRH
286/**
287 * tracing_is_enabled - Show if global_trace has been disabled
288 *
289 * Shows if the global trace has been enabled or not. It uses the
290 * mirror flag "buffer_disabled" to be used in fast paths such as for
291 * the irqsoff tracer. But it may be inaccurate due to races. If you
292 * need to know the accurate state, use tracing_is_on() which is a little
293 * slower, but accurate.
294 */
9036990d
SR
295int tracing_is_enabled(void)
296{
10246fa3
SRRH
297 /*
298 * For quick access (irqsoff uses this in fast path), just
299 * return the mirror variable of the state of the ring buffer.
300 * It's a little racy, but we don't really care.
301 */
302 smp_rmb();
303 return !global_trace.buffer_disabled;
9036990d
SR
304}
305
4fcdae83 306/*
3928a8a2
SR
307 * trace_buf_size is the size in bytes that is allocated
308 * for a buffer. Note, the number of bytes is always rounded
309 * to page size.
3f5a54e3
SR
310 *
311 * This number is purposely set to a low number of 16384.
312 * If the dump on oops happens, it will be much appreciated
313 * to not have to wait for all that output. Anyway this can be
314 * boot time and run time configurable.
4fcdae83 315 */
3928a8a2 316#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
3f5a54e3 317
3928a8a2 318static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
bc0c38d1 319
4fcdae83 320/* trace_types holds a link list of available tracers. */
bc0c38d1 321static struct tracer *trace_types __read_mostly;
4fcdae83 322
4fcdae83
SR
323/*
324 * trace_types_lock is used to protect the trace_types list.
4fcdae83 325 */
a8227415 326DEFINE_MUTEX(trace_types_lock);
4fcdae83 327
7e53bd42
LJ
328/*
329 * serialize the access of the ring buffer
330 *
331 * ring buffer serializes readers, but it is low level protection.
332 * The validity of the events (which returns by ring_buffer_peek() ..etc)
333 * are not protected by ring buffer.
334 *
335 * The content of events may become garbage if we allow other process consumes
336 * these events concurrently:
337 * A) the page of the consumed events may become a normal page
338 * (not reader page) in ring buffer, and this page will be rewrited
339 * by events producer.
340 * B) The page of the consumed events may become a page for splice_read,
341 * and this page will be returned to system.
342 *
343 * These primitives allow multi process access to different cpu ring buffer
344 * concurrently.
345 *
346 * These primitives don't distinguish read-only and read-consume access.
347 * Multi read-only access are also serialized.
348 */
349
350#ifdef CONFIG_SMP
351static DECLARE_RWSEM(all_cpu_access_lock);
352static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
353
354static inline void trace_access_lock(int cpu)
355{
ae3b5093 356 if (cpu == RING_BUFFER_ALL_CPUS) {
7e53bd42
LJ
357 /* gain it for accessing the whole ring buffer. */
358 down_write(&all_cpu_access_lock);
359 } else {
360 /* gain it for accessing a cpu ring buffer. */
361
ae3b5093 362 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
7e53bd42
LJ
363 down_read(&all_cpu_access_lock);
364
365 /* Secondly block other access to this @cpu ring buffer. */
366 mutex_lock(&per_cpu(cpu_access_lock, cpu));
367 }
368}
369
370static inline void trace_access_unlock(int cpu)
371{
ae3b5093 372 if (cpu == RING_BUFFER_ALL_CPUS) {
7e53bd42
LJ
373 up_write(&all_cpu_access_lock);
374 } else {
375 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
376 up_read(&all_cpu_access_lock);
377 }
378}
379
380static inline void trace_access_lock_init(void)
381{
382 int cpu;
383
384 for_each_possible_cpu(cpu)
385 mutex_init(&per_cpu(cpu_access_lock, cpu));
386}
387
388#else
389
390static DEFINE_MUTEX(access_lock);
391
392static inline void trace_access_lock(int cpu)
393{
394 (void)cpu;
395 mutex_lock(&access_lock);
396}
397
398static inline void trace_access_unlock(int cpu)
399{
400 (void)cpu;
401 mutex_unlock(&access_lock);
402}
403
404static inline void trace_access_lock_init(void)
405{
406}
407
408#endif
409
ee6bce52 410/* trace_flags holds trace_options default values */
12ef7d44 411unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
a2a16d6a 412 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
77271ce4 413 TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
328df475 414 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | TRACE_ITER_FUNCTION;
e7e2ee89 415
5280bcef 416static void tracer_tracing_on(struct trace_array *tr)
10246fa3
SRRH
417{
418 if (tr->trace_buffer.buffer)
419 ring_buffer_record_on(tr->trace_buffer.buffer);
420 /*
421 * This flag is looked at when buffers haven't been allocated
422 * yet, or by some tracers (like irqsoff), that just want to
423 * know if the ring buffer has been disabled, but it can handle
424 * races of where it gets disabled but we still do a record.
425 * As the check is in the fast path of the tracers, it is more
426 * important to be fast than accurate.
427 */
428 tr->buffer_disabled = 0;
429 /* Make the flag seen by readers */
430 smp_wmb();
431}
432
499e5470
SR
433/**
434 * tracing_on - enable tracing buffers
435 *
436 * This function enables tracing buffers that may have been
437 * disabled with tracing_off.
438 */
439void tracing_on(void)
440{
10246fa3 441 tracer_tracing_on(&global_trace);
499e5470
SR
442}
443EXPORT_SYMBOL_GPL(tracing_on);
444
09ae7234
SRRH
445/**
446 * __trace_puts - write a constant string into the trace buffer.
447 * @ip: The address of the caller
448 * @str: The constant string to write
449 * @size: The size of the string.
450 */
451int __trace_puts(unsigned long ip, const char *str, int size)
452{
453 struct ring_buffer_event *event;
454 struct ring_buffer *buffer;
455 struct print_entry *entry;
456 unsigned long irq_flags;
457 int alloc;
458
3132e107
SRRH
459 if (unlikely(tracing_selftest_running || tracing_disabled))
460 return 0;
461
09ae7234
SRRH
462 alloc = sizeof(*entry) + size + 2; /* possible \n added */
463
464 local_save_flags(irq_flags);
465 buffer = global_trace.trace_buffer.buffer;
466 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
467 irq_flags, preempt_count());
468 if (!event)
469 return 0;
470
471 entry = ring_buffer_event_data(event);
472 entry->ip = ip;
473
474 memcpy(&entry->buf, str, size);
475
476 /* Add a newline if necessary */
477 if (entry->buf[size - 1] != '\n') {
478 entry->buf[size] = '\n';
479 entry->buf[size + 1] = '\0';
480 } else
481 entry->buf[size] = '\0';
482
483 __buffer_unlock_commit(buffer, event);
484
485 return size;
486}
487EXPORT_SYMBOL_GPL(__trace_puts);
488
489/**
490 * __trace_bputs - write the pointer to a constant string into trace buffer
491 * @ip: The address of the caller
492 * @str: The constant string to write to the buffer to
493 */
494int __trace_bputs(unsigned long ip, const char *str)
495{
496 struct ring_buffer_event *event;
497 struct ring_buffer *buffer;
498 struct bputs_entry *entry;
499 unsigned long irq_flags;
500 int size = sizeof(struct bputs_entry);
501
3132e107
SRRH
502 if (unlikely(tracing_selftest_running || tracing_disabled))
503 return 0;
504
09ae7234
SRRH
505 local_save_flags(irq_flags);
506 buffer = global_trace.trace_buffer.buffer;
507 event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
508 irq_flags, preempt_count());
509 if (!event)
510 return 0;
511
512 entry = ring_buffer_event_data(event);
513 entry->ip = ip;
514 entry->str = str;
515
516 __buffer_unlock_commit(buffer, event);
517
518 return 1;
519}
520EXPORT_SYMBOL_GPL(__trace_bputs);
521
ad909e21
SRRH
522#ifdef CONFIG_TRACER_SNAPSHOT
523/**
524 * trace_snapshot - take a snapshot of the current buffer.
525 *
526 * This causes a swap between the snapshot buffer and the current live
527 * tracing buffer. You can use this to take snapshots of the live
528 * trace when some condition is triggered, but continue to trace.
529 *
530 * Note, make sure to allocate the snapshot with either
531 * a tracing_snapshot_alloc(), or by doing it manually
532 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
533 *
534 * If the snapshot buffer is not allocated, it will stop tracing.
535 * Basically making a permanent snapshot.
536 */
537void tracing_snapshot(void)
538{
539 struct trace_array *tr = &global_trace;
540 struct tracer *tracer = tr->current_trace;
541 unsigned long flags;
542
1b22e382
SRRH
543 if (in_nmi()) {
544 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
545 internal_trace_puts("*** snapshot is being ignored ***\n");
546 return;
547 }
548
ad909e21 549 if (!tr->allocated_snapshot) {
ca268da6
SRRH
550 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
551 internal_trace_puts("*** stopping trace here! ***\n");
ad909e21
SRRH
552 tracing_off();
553 return;
554 }
555
556 /* Note, snapshot can not be used when the tracer uses it */
557 if (tracer->use_max_tr) {
ca268da6
SRRH
558 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
559 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
ad909e21
SRRH
560 return;
561 }
562
563 local_irq_save(flags);
564 update_max_tr(tr, current, smp_processor_id());
565 local_irq_restore(flags);
566}
1b22e382 567EXPORT_SYMBOL_GPL(tracing_snapshot);
ad909e21
SRRH
568
569static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
570 struct trace_buffer *size_buf, int cpu_id);
3209cff4
SRRH
571static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
572
573static int alloc_snapshot(struct trace_array *tr)
574{
575 int ret;
576
577 if (!tr->allocated_snapshot) {
578
579 /* allocate spare buffer */
580 ret = resize_buffer_duplicate_size(&tr->max_buffer,
581 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
582 if (ret < 0)
583 return ret;
584
585 tr->allocated_snapshot = true;
586 }
587
588 return 0;
589}
590
591void free_snapshot(struct trace_array *tr)
592{
593 /*
594 * We don't free the ring buffer. instead, resize it because
595 * The max_tr ring buffer has some state (e.g. ring->clock) and
596 * we want preserve it.
597 */
598 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
599 set_buffer_entries(&tr->max_buffer, 1);
600 tracing_reset_online_cpus(&tr->max_buffer);
601 tr->allocated_snapshot = false;
602}
ad909e21 603
93e31ffb
TZ
604/**
605 * tracing_alloc_snapshot - allocate snapshot buffer.
606 *
607 * This only allocates the snapshot buffer if it isn't already
608 * allocated - it doesn't also take a snapshot.
609 *
610 * This is meant to be used in cases where the snapshot buffer needs
611 * to be set up for events that can't sleep but need to be able to
612 * trigger a snapshot.
613 */
614int tracing_alloc_snapshot(void)
615{
616 struct trace_array *tr = &global_trace;
617 int ret;
618
619 ret = alloc_snapshot(tr);
620 WARN_ON(ret < 0);
621
622 return ret;
623}
624EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
625
ad909e21
SRRH
626/**
627 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
628 *
629 * This is similar to trace_snapshot(), but it will allocate the
630 * snapshot buffer if it isn't already allocated. Use this only
631 * where it is safe to sleep, as the allocation may sleep.
632 *
633 * This causes a swap between the snapshot buffer and the current live
634 * tracing buffer. You can use this to take snapshots of the live
635 * trace when some condition is triggered, but continue to trace.
636 */
637void tracing_snapshot_alloc(void)
638{
ad909e21
SRRH
639 int ret;
640
93e31ffb
TZ
641 ret = tracing_alloc_snapshot();
642 if (ret < 0)
3209cff4 643 return;
ad909e21
SRRH
644
645 tracing_snapshot();
646}
1b22e382 647EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
ad909e21
SRRH
648#else
649void tracing_snapshot(void)
650{
651 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
652}
1b22e382 653EXPORT_SYMBOL_GPL(tracing_snapshot);
93e31ffb
TZ
654int tracing_alloc_snapshot(void)
655{
656 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
657 return -ENODEV;
658}
659EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
ad909e21
SRRH
660void tracing_snapshot_alloc(void)
661{
662 /* Give warning */
663 tracing_snapshot();
664}
1b22e382 665EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
ad909e21
SRRH
666#endif /* CONFIG_TRACER_SNAPSHOT */
667
5280bcef 668static void tracer_tracing_off(struct trace_array *tr)
10246fa3
SRRH
669{
670 if (tr->trace_buffer.buffer)
671 ring_buffer_record_off(tr->trace_buffer.buffer);
672 /*
673 * This flag is looked at when buffers haven't been allocated
674 * yet, or by some tracers (like irqsoff), that just want to
675 * know if the ring buffer has been disabled, but it can handle
676 * races of where it gets disabled but we still do a record.
677 * As the check is in the fast path of the tracers, it is more
678 * important to be fast than accurate.
679 */
680 tr->buffer_disabled = 1;
681 /* Make the flag seen by readers */
682 smp_wmb();
683}
684
499e5470
SR
685/**
686 * tracing_off - turn off tracing buffers
687 *
688 * This function stops the tracing buffers from recording data.
689 * It does not disable any overhead the tracers themselves may
690 * be causing. This function simply causes all recording to
691 * the ring buffers to fail.
692 */
693void tracing_off(void)
694{
10246fa3 695 tracer_tracing_off(&global_trace);
499e5470
SR
696}
697EXPORT_SYMBOL_GPL(tracing_off);
698
de7edd31
SRRH
699void disable_trace_on_warning(void)
700{
701 if (__disable_trace_on_warning)
702 tracing_off();
703}
704
10246fa3
SRRH
705/**
706 * tracer_tracing_is_on - show real state of ring buffer enabled
707 * @tr : the trace array to know if ring buffer is enabled
708 *
709 * Shows real state of the ring buffer if it is enabled or not.
710 */
5280bcef 711static int tracer_tracing_is_on(struct trace_array *tr)
10246fa3
SRRH
712{
713 if (tr->trace_buffer.buffer)
714 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
715 return !tr->buffer_disabled;
716}
717
499e5470
SR
718/**
719 * tracing_is_on - show state of ring buffers enabled
720 */
721int tracing_is_on(void)
722{
10246fa3 723 return tracer_tracing_is_on(&global_trace);
499e5470
SR
724}
725EXPORT_SYMBOL_GPL(tracing_is_on);
726
3928a8a2 727static int __init set_buf_size(char *str)
bc0c38d1 728{
3928a8a2 729 unsigned long buf_size;
c6caeeb1 730
bc0c38d1
SR
731 if (!str)
732 return 0;
9d612bef 733 buf_size = memparse(str, &str);
c6caeeb1 734 /* nr_entries can not be zero */
9d612bef 735 if (buf_size == 0)
c6caeeb1 736 return 0;
3928a8a2 737 trace_buf_size = buf_size;
bc0c38d1
SR
738 return 1;
739}
3928a8a2 740__setup("trace_buf_size=", set_buf_size);
bc0c38d1 741
0e950173
TB
742static int __init set_tracing_thresh(char *str)
743{
87abb3b1 744 unsigned long threshold;
0e950173
TB
745 int ret;
746
747 if (!str)
748 return 0;
bcd83ea6 749 ret = kstrtoul(str, 0, &threshold);
0e950173
TB
750 if (ret < 0)
751 return 0;
87abb3b1 752 tracing_thresh = threshold * 1000;
0e950173
TB
753 return 1;
754}
755__setup("tracing_thresh=", set_tracing_thresh);
756
57f50be1
SR
757unsigned long nsecs_to_usecs(unsigned long nsecs)
758{
759 return nsecs / 1000;
760}
761
4fcdae83 762/* These must match the bit postions in trace_iterator_flags */
bc0c38d1
SR
763static const char *trace_options[] = {
764 "print-parent",
765 "sym-offset",
766 "sym-addr",
767 "verbose",
f9896bf3 768 "raw",
5e3ca0ec 769 "hex",
cb0f12aa 770 "bin",
2a2cc8f7 771 "block",
86387f7e 772 "stacktrace",
5e1607a0 773 "trace_printk",
b2a866f9 774 "ftrace_preempt",
9f029e83 775 "branch",
12ef7d44 776 "annotate",
02b67518 777 "userstacktrace",
b54d3de9 778 "sym-userobj",
66896a85 779 "printk-msg-only",
c4a8e8be 780 "context-info",
c032ef64 781 "latency-format",
be6f164a 782 "sleep-time",
a2a16d6a 783 "graph-time",
e870e9a1 784 "record-cmd",
750912fa 785 "overwrite",
cf30cf67 786 "disable_on_free",
77271ce4 787 "irq-info",
5224c3a3 788 "markers",
328df475 789 "function-trace",
bc0c38d1
SR
790 NULL
791};
792
5079f326
Z
793static struct {
794 u64 (*func)(void);
795 const char *name;
8be0709f 796 int in_ns; /* is this clock in nanoseconds? */
5079f326 797} trace_clocks[] = {
8be0709f
DS
798 { trace_clock_local, "local", 1 },
799 { trace_clock_global, "global", 1 },
800 { trace_clock_counter, "counter", 0 },
8aacf017 801 { trace_clock_jiffies, "uptime", 1 },
76f11917 802 { trace_clock, "perf", 1 },
8cbd9cc6 803 ARCH_TRACE_CLOCKS
5079f326
Z
804};
805
b63f39ea 806/*
807 * trace_parser_get_init - gets the buffer for trace parser
808 */
809int trace_parser_get_init(struct trace_parser *parser, int size)
810{
811 memset(parser, 0, sizeof(*parser));
812
813 parser->buffer = kmalloc(size, GFP_KERNEL);
814 if (!parser->buffer)
815 return 1;
816
817 parser->size = size;
818 return 0;
819}
820
821/*
822 * trace_parser_put - frees the buffer for trace parser
823 */
824void trace_parser_put(struct trace_parser *parser)
825{
826 kfree(parser->buffer);
827}
828
829/*
830 * trace_get_user - reads the user input string separated by space
831 * (matched by isspace(ch))
832 *
833 * For each string found the 'struct trace_parser' is updated,
834 * and the function returns.
835 *
836 * Returns number of bytes read.
837 *
838 * See kernel/trace/trace.h for 'struct trace_parser' details.
839 */
840int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
841 size_t cnt, loff_t *ppos)
842{
843 char ch;
844 size_t read = 0;
845 ssize_t ret;
846
847 if (!*ppos)
848 trace_parser_clear(parser);
849
850 ret = get_user(ch, ubuf++);
851 if (ret)
852 goto out;
853
854 read++;
855 cnt--;
856
857 /*
858 * The parser is not finished with the last write,
859 * continue reading the user input without skipping spaces.
860 */
861 if (!parser->cont) {
862 /* skip white space */
863 while (cnt && isspace(ch)) {
864 ret = get_user(ch, ubuf++);
865 if (ret)
866 goto out;
867 read++;
868 cnt--;
869 }
870
871 /* only spaces were written */
872 if (isspace(ch)) {
873 *ppos += read;
874 ret = read;
875 goto out;
876 }
877
878 parser->idx = 0;
879 }
880
881 /* read the non-space input */
882 while (cnt && !isspace(ch)) {
3c235a33 883 if (parser->idx < parser->size - 1)
b63f39ea 884 parser->buffer[parser->idx++] = ch;
885 else {
886 ret = -EINVAL;
887 goto out;
888 }
889 ret = get_user(ch, ubuf++);
890 if (ret)
891 goto out;
892 read++;
893 cnt--;
894 }
895
896 /* We either got finished input or we have to wait for another call. */
897 if (isspace(ch)) {
898 parser->buffer[parser->idx] = 0;
899 parser->cont = false;
057db848 900 } else if (parser->idx < parser->size - 1) {
b63f39ea 901 parser->cont = true;
902 parser->buffer[parser->idx++] = ch;
057db848
SR
903 } else {
904 ret = -EINVAL;
905 goto out;
b63f39ea 906 }
907
908 *ppos += read;
909 ret = read;
910
911out:
912 return ret;
913}
914
6c6c2796
PP
915ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
916{
917 int len;
918 int ret;
919
2dc5d12b
SR
920 if (!cnt)
921 return 0;
922
6c6c2796
PP
923 if (s->len <= s->readpos)
924 return -EBUSY;
925
926 len = s->len - s->readpos;
927 if (cnt > len)
928 cnt = len;
929 ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
2dc5d12b 930 if (ret == cnt)
6c6c2796
PP
931 return -EFAULT;
932
2dc5d12b
SR
933 cnt -= ret;
934
e74da523 935 s->readpos += cnt;
6c6c2796 936 return cnt;
214023c3
SR
937}
938
b8b94265 939static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
3c56819b
EGM
940{
941 int len;
3c56819b
EGM
942
943 if (s->len <= s->readpos)
944 return -EBUSY;
945
946 len = s->len - s->readpos;
947 if (cnt > len)
948 cnt = len;
5a26c8f0 949 memcpy(buf, s->buffer + s->readpos, cnt);
3c56819b 950
e74da523 951 s->readpos += cnt;
3c56819b
EGM
952 return cnt;
953}
954
5d4a9dba
SR
955/*
956 * ftrace_max_lock is used to protect the swapping of buffers
957 * when taking a max snapshot. The buffers themselves are
958 * protected by per_cpu spinlocks. But the action of the swap
959 * needs its own lock.
960 *
445c8951 961 * This is defined as a arch_spinlock_t in order to help
5d4a9dba
SR
962 * with performance when lockdep debugging is enabled.
963 *
964 * It is also used in other places outside the update_max_tr
965 * so it needs to be defined outside of the
966 * CONFIG_TRACER_MAX_TRACE.
967 */
445c8951 968static arch_spinlock_t ftrace_max_lock =
edc35bd7 969 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
5d4a9dba 970
0e950173
TB
971unsigned long __read_mostly tracing_thresh;
972
5d4a9dba
SR
973#ifdef CONFIG_TRACER_MAX_TRACE
974unsigned long __read_mostly tracing_max_latency;
5d4a9dba
SR
975
976/*
977 * Copy the new maximum trace into the separate maximum-trace
978 * structure. (this way the maximum trace is permanently saved,
979 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
980 */
981static void
982__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
983{
12883efb
SRRH
984 struct trace_buffer *trace_buf = &tr->trace_buffer;
985 struct trace_buffer *max_buf = &tr->max_buffer;
986 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
987 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
5d4a9dba 988
12883efb
SRRH
989 max_buf->cpu = cpu;
990 max_buf->time_start = data->preempt_timestamp;
5d4a9dba 991
8248ac05
SR
992 max_data->saved_latency = tracing_max_latency;
993 max_data->critical_start = data->critical_start;
994 max_data->critical_end = data->critical_end;
5d4a9dba 995
1acaa1b2 996 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
8248ac05 997 max_data->pid = tsk->pid;
f17a5194
SRRH
998 /*
999 * If tsk == current, then use current_uid(), as that does not use
1000 * RCU. The irq tracer can be called out of RCU scope.
1001 */
1002 if (tsk == current)
1003 max_data->uid = current_uid();
1004 else
1005 max_data->uid = task_uid(tsk);
1006
8248ac05
SR
1007 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1008 max_data->policy = tsk->policy;
1009 max_data->rt_priority = tsk->rt_priority;
5d4a9dba
SR
1010
1011 /* record this tasks comm */
1012 tracing_record_cmdline(tsk);
1013}
1014
4fcdae83
SR
1015/**
1016 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1017 * @tr: tracer
1018 * @tsk: the task with the latency
1019 * @cpu: The cpu that initiated the trace.
1020 *
1021 * Flip the buffers between the @tr and the max_tr and record information
1022 * about which task was the cause of this latency.
1023 */
e309b41d 1024void
bc0c38d1
SR
1025update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1026{
2721e72d 1027 struct ring_buffer *buf;
bc0c38d1 1028
2b6080f2 1029 if (tr->stop_count)
b8de7bd1
SR
1030 return;
1031
4c11d7ae 1032 WARN_ON_ONCE(!irqs_disabled());
34600f0e 1033
45ad21ca 1034 if (!tr->allocated_snapshot) {
debdd57f 1035 /* Only the nop tracer should hit this when disabling */
2b6080f2 1036 WARN_ON_ONCE(tr->current_trace != &nop_trace);
34600f0e 1037 return;
debdd57f 1038 }
34600f0e 1039
0199c4e6 1040 arch_spin_lock(&ftrace_max_lock);
3928a8a2 1041
12883efb
SRRH
1042 buf = tr->trace_buffer.buffer;
1043 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1044 tr->max_buffer.buffer = buf;
3928a8a2 1045
bc0c38d1 1046 __update_max_tr(tr, tsk, cpu);
0199c4e6 1047 arch_spin_unlock(&ftrace_max_lock);
bc0c38d1
SR
1048}
1049
1050/**
1051 * update_max_tr_single - only copy one trace over, and reset the rest
1052 * @tr - tracer
1053 * @tsk - task with the latency
1054 * @cpu - the cpu of the buffer to copy.
4fcdae83
SR
1055 *
1056 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
bc0c38d1 1057 */
e309b41d 1058void
bc0c38d1
SR
1059update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1060{
3928a8a2 1061 int ret;
bc0c38d1 1062
2b6080f2 1063 if (tr->stop_count)
b8de7bd1
SR
1064 return;
1065
4c11d7ae 1066 WARN_ON_ONCE(!irqs_disabled());
6c24499f 1067 if (!tr->allocated_snapshot) {
2930e04d 1068 /* Only the nop tracer should hit this when disabling */
9e8529af 1069 WARN_ON_ONCE(tr->current_trace != &nop_trace);
ef710e10 1070 return;
2930e04d 1071 }
ef710e10 1072
0199c4e6 1073 arch_spin_lock(&ftrace_max_lock);
bc0c38d1 1074
12883efb 1075 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
3928a8a2 1076
e8165dbb
SR
1077 if (ret == -EBUSY) {
1078 /*
1079 * We failed to swap the buffer due to a commit taking
1080 * place on this CPU. We fail to record, but we reset
1081 * the max trace buffer (no one writes directly to it)
1082 * and flag that it failed.
1083 */
12883efb 1084 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
e8165dbb
SR
1085 "Failed to swap buffers due to commit in progress\n");
1086 }
1087
e8165dbb 1088 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
bc0c38d1
SR
1089
1090 __update_max_tr(tr, tsk, cpu);
0199c4e6 1091 arch_spin_unlock(&ftrace_max_lock);
bc0c38d1 1092}
5d4a9dba 1093#endif /* CONFIG_TRACER_MAX_TRACE */
bc0c38d1 1094
0d5c6e1c
SR
1095static void default_wait_pipe(struct trace_iterator *iter)
1096{
15693458
SRRH
1097 /* Iterators are static, they should be filled or empty */
1098 if (trace_buffer_iter(iter, iter->cpu_file))
1099 return;
0d5c6e1c 1100
12883efb 1101 ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file);
0d5c6e1c
SR
1102}
1103
f4e781c0
SRRH
1104#ifdef CONFIG_FTRACE_STARTUP_TEST
1105static int run_tracer_selftest(struct tracer *type)
1106{
1107 struct trace_array *tr = &global_trace;
1108 struct tracer *saved_tracer = tr->current_trace;
1109 int ret;
0d5c6e1c 1110
f4e781c0
SRRH
1111 if (!type->selftest || tracing_selftest_disabled)
1112 return 0;
0d5c6e1c
SR
1113
1114 /*
f4e781c0
SRRH
1115 * Run a selftest on this tracer.
1116 * Here we reset the trace buffer, and set the current
1117 * tracer to be this tracer. The tracer can then run some
1118 * internal tracing to verify that everything is in order.
1119 * If we fail, we do not register this tracer.
0d5c6e1c 1120 */
f4e781c0 1121 tracing_reset_online_cpus(&tr->trace_buffer);
0d5c6e1c 1122
f4e781c0
SRRH
1123 tr->current_trace = type;
1124
1125#ifdef CONFIG_TRACER_MAX_TRACE
1126 if (type->use_max_tr) {
1127 /* If we expanded the buffers, make sure the max is expanded too */
1128 if (ring_buffer_expanded)
1129 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1130 RING_BUFFER_ALL_CPUS);
1131 tr->allocated_snapshot = true;
1132 }
1133#endif
1134
1135 /* the test is responsible for initializing and enabling */
1136 pr_info("Testing tracer %s: ", type->name);
1137 ret = type->selftest(type, tr);
1138 /* the test is responsible for resetting too */
1139 tr->current_trace = saved_tracer;
1140 if (ret) {
1141 printk(KERN_CONT "FAILED!\n");
1142 /* Add the warning after printing 'FAILED' */
1143 WARN_ON(1);
1144 return -1;
1145 }
1146 /* Only reset on passing, to avoid touching corrupted buffers */
1147 tracing_reset_online_cpus(&tr->trace_buffer);
1148
1149#ifdef CONFIG_TRACER_MAX_TRACE
1150 if (type->use_max_tr) {
1151 tr->allocated_snapshot = false;
0d5c6e1c 1152
f4e781c0
SRRH
1153 /* Shrink the max buffer again */
1154 if (ring_buffer_expanded)
1155 ring_buffer_resize(tr->max_buffer.buffer, 1,
1156 RING_BUFFER_ALL_CPUS);
1157 }
1158#endif
1159
1160 printk(KERN_CONT "PASSED\n");
1161 return 0;
1162}
1163#else
1164static inline int run_tracer_selftest(struct tracer *type)
1165{
1166 return 0;
0d5c6e1c 1167}
f4e781c0 1168#endif /* CONFIG_FTRACE_STARTUP_TEST */
0d5c6e1c 1169
4fcdae83
SR
1170/**
1171 * register_tracer - register a tracer with the ftrace system.
1172 * @type - the plugin for the tracer
1173 *
1174 * Register a new plugin tracer.
1175 */
bc0c38d1
SR
1176int register_tracer(struct tracer *type)
1177{
1178 struct tracer *t;
bc0c38d1
SR
1179 int ret = 0;
1180
1181 if (!type->name) {
1182 pr_info("Tracer must have a name\n");
1183 return -1;
1184 }
1185
24a461d5 1186 if (strlen(type->name) >= MAX_TRACER_SIZE) {
ee6c2c1b
LZ
1187 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1188 return -1;
1189 }
1190
bc0c38d1 1191 mutex_lock(&trace_types_lock);
86fa2f60 1192
8e1b82e0
FW
1193 tracing_selftest_running = true;
1194
bc0c38d1
SR
1195 for (t = trace_types; t; t = t->next) {
1196 if (strcmp(type->name, t->name) == 0) {
1197 /* already found */
ee6c2c1b 1198 pr_info("Tracer %s already registered\n",
bc0c38d1
SR
1199 type->name);
1200 ret = -1;
1201 goto out;
1202 }
1203 }
1204
adf9f195
FW
1205 if (!type->set_flag)
1206 type->set_flag = &dummy_set_flag;
1207 if (!type->flags)
1208 type->flags = &dummy_tracer_flags;
1209 else
1210 if (!type->flags->opts)
1211 type->flags->opts = dummy_tracer_opt;
6eaaa5d5
FW
1212 if (!type->wait_pipe)
1213 type->wait_pipe = default_wait_pipe;
1214
f4e781c0
SRRH
1215 ret = run_tracer_selftest(type);
1216 if (ret < 0)
1217 goto out;
60a11774 1218
bc0c38d1
SR
1219 type->next = trace_types;
1220 trace_types = type;
60a11774 1221
bc0c38d1 1222 out:
8e1b82e0 1223 tracing_selftest_running = false;
bc0c38d1
SR
1224 mutex_unlock(&trace_types_lock);
1225
dac74940
SR
1226 if (ret || !default_bootup_tracer)
1227 goto out_unlock;
1228
ee6c2c1b 1229 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
dac74940
SR
1230 goto out_unlock;
1231
1232 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1233 /* Do we want this tracer to start on bootup? */
607e2ea1 1234 tracing_set_tracer(&global_trace, type->name);
dac74940
SR
1235 default_bootup_tracer = NULL;
1236 /* disable other selftests, since this will break it. */
55034cd6 1237 tracing_selftest_disabled = true;
b2821ae6 1238#ifdef CONFIG_FTRACE_STARTUP_TEST
dac74940
SR
1239 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1240 type->name);
b2821ae6 1241#endif
b2821ae6 1242
dac74940 1243 out_unlock:
bc0c38d1
SR
1244 return ret;
1245}
1246
12883efb 1247void tracing_reset(struct trace_buffer *buf, int cpu)
f633903a 1248{
12883efb 1249 struct ring_buffer *buffer = buf->buffer;
f633903a 1250
a5416411
HT
1251 if (!buffer)
1252 return;
1253
f633903a
SR
1254 ring_buffer_record_disable(buffer);
1255
1256 /* Make sure all commits have finished */
1257 synchronize_sched();
68179686 1258 ring_buffer_reset_cpu(buffer, cpu);
f633903a
SR
1259
1260 ring_buffer_record_enable(buffer);
1261}
1262
12883efb 1263void tracing_reset_online_cpus(struct trace_buffer *buf)
213cc060 1264{
12883efb 1265 struct ring_buffer *buffer = buf->buffer;
213cc060
PE
1266 int cpu;
1267
a5416411
HT
1268 if (!buffer)
1269 return;
1270
621968cd
SR
1271 ring_buffer_record_disable(buffer);
1272
1273 /* Make sure all commits have finished */
1274 synchronize_sched();
1275
9457158b 1276 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
213cc060
PE
1277
1278 for_each_online_cpu(cpu)
68179686 1279 ring_buffer_reset_cpu(buffer, cpu);
621968cd
SR
1280
1281 ring_buffer_record_enable(buffer);
213cc060
PE
1282}
1283
09d8091c 1284/* Must have trace_types_lock held */
873c642f 1285void tracing_reset_all_online_cpus(void)
9456f0fa 1286{
873c642f
SRRH
1287 struct trace_array *tr;
1288
873c642f 1289 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
12883efb
SRRH
1290 tracing_reset_online_cpus(&tr->trace_buffer);
1291#ifdef CONFIG_TRACER_MAX_TRACE
1292 tracing_reset_online_cpus(&tr->max_buffer);
1293#endif
873c642f 1294 }
9456f0fa
SR
1295}
1296
bc0c38d1 1297#define SAVED_CMDLINES 128
2c7eea4c 1298#define NO_CMDLINE_MAP UINT_MAX
bc0c38d1
SR
1299static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1300static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
1301static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
1302static int cmdline_idx;
edc35bd7 1303static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
25b0b44a 1304
25b0b44a 1305/* temporary disable recording */
4fd27358 1306static atomic_t trace_record_cmdline_disabled __read_mostly;
bc0c38d1
SR
1307
1308static void trace_init_cmdlines(void)
1309{
2c7eea4c
TG
1310 memset(&map_pid_to_cmdline, NO_CMDLINE_MAP, sizeof(map_pid_to_cmdline));
1311 memset(&map_cmdline_to_pid, NO_CMDLINE_MAP, sizeof(map_cmdline_to_pid));
bc0c38d1
SR
1312 cmdline_idx = 0;
1313}
1314
b5130b1e
CE
1315int is_tracing_stopped(void)
1316{
2b6080f2 1317 return global_trace.stop_count;
b5130b1e
CE
1318}
1319
0f048701
SR
1320/**
1321 * tracing_start - quick start of the tracer
1322 *
1323 * If tracing is enabled but was stopped by tracing_stop,
1324 * this will start the tracer back up.
1325 */
1326void tracing_start(void)
1327{
1328 struct ring_buffer *buffer;
1329 unsigned long flags;
1330
1331 if (tracing_disabled)
1332 return;
1333
2b6080f2
SR
1334 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1335 if (--global_trace.stop_count) {
1336 if (global_trace.stop_count < 0) {
b06a8301
SR
1337 /* Someone screwed up their debugging */
1338 WARN_ON_ONCE(1);
2b6080f2 1339 global_trace.stop_count = 0;
b06a8301 1340 }
0f048701
SR
1341 goto out;
1342 }
1343
a2f80714
SR
1344 /* Prevent the buffers from switching */
1345 arch_spin_lock(&ftrace_max_lock);
0f048701 1346
12883efb 1347 buffer = global_trace.trace_buffer.buffer;
0f048701
SR
1348 if (buffer)
1349 ring_buffer_record_enable(buffer);
1350
12883efb
SRRH
1351#ifdef CONFIG_TRACER_MAX_TRACE
1352 buffer = global_trace.max_buffer.buffer;
0f048701
SR
1353 if (buffer)
1354 ring_buffer_record_enable(buffer);
12883efb 1355#endif
0f048701 1356
a2f80714
SR
1357 arch_spin_unlock(&ftrace_max_lock);
1358
0f048701
SR
1359 ftrace_start();
1360 out:
2b6080f2
SR
1361 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1362}
1363
1364static void tracing_start_tr(struct trace_array *tr)
1365{
1366 struct ring_buffer *buffer;
1367 unsigned long flags;
1368
1369 if (tracing_disabled)
1370 return;
1371
1372 /* If global, we need to also start the max tracer */
1373 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1374 return tracing_start();
1375
1376 raw_spin_lock_irqsave(&tr->start_lock, flags);
1377
1378 if (--tr->stop_count) {
1379 if (tr->stop_count < 0) {
1380 /* Someone screwed up their debugging */
1381 WARN_ON_ONCE(1);
1382 tr->stop_count = 0;
1383 }
1384 goto out;
1385 }
1386
12883efb 1387 buffer = tr->trace_buffer.buffer;
2b6080f2
SR
1388 if (buffer)
1389 ring_buffer_record_enable(buffer);
1390
1391 out:
1392 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
0f048701
SR
1393}
1394
1395/**
1396 * tracing_stop - quick stop of the tracer
1397 *
1398 * Light weight way to stop tracing. Use in conjunction with
1399 * tracing_start.
1400 */
1401void tracing_stop(void)
1402{
1403 struct ring_buffer *buffer;
1404 unsigned long flags;
1405
1406 ftrace_stop();
2b6080f2
SR
1407 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1408 if (global_trace.stop_count++)
0f048701
SR
1409 goto out;
1410
a2f80714
SR
1411 /* Prevent the buffers from switching */
1412 arch_spin_lock(&ftrace_max_lock);
1413
12883efb 1414 buffer = global_trace.trace_buffer.buffer;
0f048701
SR
1415 if (buffer)
1416 ring_buffer_record_disable(buffer);
1417
12883efb
SRRH
1418#ifdef CONFIG_TRACER_MAX_TRACE
1419 buffer = global_trace.max_buffer.buffer;
0f048701
SR
1420 if (buffer)
1421 ring_buffer_record_disable(buffer);
12883efb 1422#endif
0f048701 1423
a2f80714
SR
1424 arch_spin_unlock(&ftrace_max_lock);
1425
0f048701 1426 out:
2b6080f2
SR
1427 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1428}
1429
1430static void tracing_stop_tr(struct trace_array *tr)
1431{
1432 struct ring_buffer *buffer;
1433 unsigned long flags;
1434
1435 /* If global, we need to also stop the max tracer */
1436 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1437 return tracing_stop();
1438
1439 raw_spin_lock_irqsave(&tr->start_lock, flags);
1440 if (tr->stop_count++)
1441 goto out;
1442
12883efb 1443 buffer = tr->trace_buffer.buffer;
2b6080f2
SR
1444 if (buffer)
1445 ring_buffer_record_disable(buffer);
1446
1447 out:
1448 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
0f048701
SR
1449}
1450
e309b41d 1451void trace_stop_cmdline_recording(void);
bc0c38d1 1452
e309b41d 1453static void trace_save_cmdline(struct task_struct *tsk)
bc0c38d1 1454{
a635cf04 1455 unsigned pid, idx;
bc0c38d1
SR
1456
1457 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
1458 return;
1459
1460 /*
1461 * It's not the end of the world if we don't get
1462 * the lock, but we also don't want to spin
1463 * nor do we want to disable interrupts,
1464 * so if we miss here, then better luck next time.
1465 */
0199c4e6 1466 if (!arch_spin_trylock(&trace_cmdline_lock))
bc0c38d1
SR
1467 return;
1468
1469 idx = map_pid_to_cmdline[tsk->pid];
2c7eea4c 1470 if (idx == NO_CMDLINE_MAP) {
bc0c38d1
SR
1471 idx = (cmdline_idx + 1) % SAVED_CMDLINES;
1472
a635cf04
CE
1473 /*
1474 * Check whether the cmdline buffer at idx has a pid
1475 * mapped. We are going to overwrite that entry so we
1476 * need to clear the map_pid_to_cmdline. Otherwise we
1477 * would read the new comm for the old pid.
1478 */
1479 pid = map_cmdline_to_pid[idx];
1480 if (pid != NO_CMDLINE_MAP)
1481 map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
bc0c38d1 1482
a635cf04 1483 map_cmdline_to_pid[idx] = tsk->pid;
bc0c38d1
SR
1484 map_pid_to_cmdline[tsk->pid] = idx;
1485
1486 cmdline_idx = idx;
1487 }
1488
1489 memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
1490
0199c4e6 1491 arch_spin_unlock(&trace_cmdline_lock);
bc0c38d1
SR
1492}
1493
4ca53085 1494void trace_find_cmdline(int pid, char comm[])
bc0c38d1 1495{
bc0c38d1
SR
1496 unsigned map;
1497
4ca53085
SR
1498 if (!pid) {
1499 strcpy(comm, "<idle>");
1500 return;
1501 }
bc0c38d1 1502
74bf4076
SR
1503 if (WARN_ON_ONCE(pid < 0)) {
1504 strcpy(comm, "<XXX>");
1505 return;
1506 }
1507
4ca53085
SR
1508 if (pid > PID_MAX_DEFAULT) {
1509 strcpy(comm, "<...>");
1510 return;
1511 }
bc0c38d1 1512
5b6045a9 1513 preempt_disable();
0199c4e6 1514 arch_spin_lock(&trace_cmdline_lock);
bc0c38d1 1515 map = map_pid_to_cmdline[pid];
50d88758
TG
1516 if (map != NO_CMDLINE_MAP)
1517 strcpy(comm, saved_cmdlines[map]);
1518 else
1519 strcpy(comm, "<...>");
bc0c38d1 1520
0199c4e6 1521 arch_spin_unlock(&trace_cmdline_lock);
5b6045a9 1522 preempt_enable();
bc0c38d1
SR
1523}
1524
e309b41d 1525void tracing_record_cmdline(struct task_struct *tsk)
bc0c38d1 1526{
0fb9656d 1527 if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
bc0c38d1
SR
1528 return;
1529
7ffbd48d
SR
1530 if (!__this_cpu_read(trace_cmdline_save))
1531 return;
1532
1533 __this_cpu_write(trace_cmdline_save, false);
1534
bc0c38d1
SR
1535 trace_save_cmdline(tsk);
1536}
1537
45dcd8b8 1538void
38697053
SR
1539tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1540 int pc)
bc0c38d1
SR
1541{
1542 struct task_struct *tsk = current;
bc0c38d1 1543
777e208d
SR
1544 entry->preempt_count = pc & 0xff;
1545 entry->pid = (tsk) ? tsk->pid : 0;
1546 entry->flags =
9244489a 1547#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
2e2ca155 1548 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
9244489a
SR
1549#else
1550 TRACE_FLAG_IRQS_NOSUPPORT |
1551#endif
bc0c38d1
SR
1552 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1553 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
e5137b50
PZ
1554 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
1555 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
bc0c38d1 1556}
f413cdb8 1557EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
bc0c38d1 1558
e77405ad
SR
1559struct ring_buffer_event *
1560trace_buffer_lock_reserve(struct ring_buffer *buffer,
1561 int type,
1562 unsigned long len,
1563 unsigned long flags, int pc)
51a763dd
ACM
1564{
1565 struct ring_buffer_event *event;
1566
e77405ad 1567 event = ring_buffer_lock_reserve(buffer, len);
51a763dd
ACM
1568 if (event != NULL) {
1569 struct trace_entry *ent = ring_buffer_event_data(event);
1570
1571 tracing_generic_entry_update(ent, flags, pc);
1572 ent->type = type;
1573 }
1574
1575 return event;
1576}
51a763dd 1577
7ffbd48d
SR
1578void
1579__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
1580{
1581 __this_cpu_write(trace_cmdline_save, true);
1582 ring_buffer_unlock_commit(buffer, event);
1583}
1584
e77405ad
SR
1585static inline void
1586__trace_buffer_unlock_commit(struct ring_buffer *buffer,
1587 struct ring_buffer_event *event,
0d5c6e1c 1588 unsigned long flags, int pc)
51a763dd 1589{
7ffbd48d 1590 __buffer_unlock_commit(buffer, event);
51a763dd 1591
e77405ad
SR
1592 ftrace_trace_stack(buffer, flags, 6, pc);
1593 ftrace_trace_userstack(buffer, flags, pc);
07edf712
FW
1594}
1595
e77405ad
SR
1596void trace_buffer_unlock_commit(struct ring_buffer *buffer,
1597 struct ring_buffer_event *event,
1598 unsigned long flags, int pc)
07edf712 1599{
0d5c6e1c 1600 __trace_buffer_unlock_commit(buffer, event, flags, pc);
51a763dd 1601}
0d5c6e1c 1602EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
51a763dd 1603
ccb469a1
SR
1604struct ring_buffer_event *
1605trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
1606 struct ftrace_event_file *ftrace_file,
1607 int type, unsigned long len,
1608 unsigned long flags, int pc)
1609{
12883efb 1610 *current_rb = ftrace_file->tr->trace_buffer.buffer;
ccb469a1
SR
1611 return trace_buffer_lock_reserve(*current_rb,
1612 type, len, flags, pc);
1613}
1614EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
1615
ef5580d0 1616struct ring_buffer_event *
e77405ad
SR
1617trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1618 int type, unsigned long len,
ef5580d0
SR
1619 unsigned long flags, int pc)
1620{
12883efb 1621 *current_rb = global_trace.trace_buffer.buffer;
e77405ad 1622 return trace_buffer_lock_reserve(*current_rb,
ef5580d0
SR
1623 type, len, flags, pc);
1624}
94487d6d 1625EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
ef5580d0 1626
e77405ad
SR
1627void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
1628 struct ring_buffer_event *event,
ef5580d0
SR
1629 unsigned long flags, int pc)
1630{
0d5c6e1c 1631 __trace_buffer_unlock_commit(buffer, event, flags, pc);
07edf712 1632}
94487d6d 1633EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit);
07edf712 1634
0d5c6e1c
SR
1635void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer,
1636 struct ring_buffer_event *event,
1637 unsigned long flags, int pc,
1638 struct pt_regs *regs)
1fd8df2c 1639{
7ffbd48d 1640 __buffer_unlock_commit(buffer, event);
1fd8df2c
MH
1641
1642 ftrace_trace_stack_regs(buffer, flags, 0, pc, regs);
1643 ftrace_trace_userstack(buffer, flags, pc);
1644}
0d5c6e1c 1645EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
1fd8df2c 1646
e77405ad
SR
1647void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1648 struct ring_buffer_event *event)
77d9f465 1649{
e77405ad 1650 ring_buffer_discard_commit(buffer, event);
ef5580d0 1651}
12acd473 1652EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
ef5580d0 1653
e309b41d 1654void
7be42151 1655trace_function(struct trace_array *tr,
38697053
SR
1656 unsigned long ip, unsigned long parent_ip, unsigned long flags,
1657 int pc)
bc0c38d1 1658{
e1112b4d 1659 struct ftrace_event_call *call = &event_function;
12883efb 1660 struct ring_buffer *buffer = tr->trace_buffer.buffer;
3928a8a2 1661 struct ring_buffer_event *event;
777e208d 1662 struct ftrace_entry *entry;
bc0c38d1 1663
d769041f 1664 /* If we are reading the ring buffer, don't trace */
dd17c8f7 1665 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
d769041f
SR
1666 return;
1667
e77405ad 1668 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
51a763dd 1669 flags, pc);
3928a8a2
SR
1670 if (!event)
1671 return;
1672 entry = ring_buffer_event_data(event);
777e208d
SR
1673 entry->ip = ip;
1674 entry->parent_ip = parent_ip;
e1112b4d 1675
f306cc82 1676 if (!call_filter_check_discard(call, entry, buffer, event))
7ffbd48d 1677 __buffer_unlock_commit(buffer, event);
bc0c38d1
SR
1678}
1679
c0a0d0d3 1680#ifdef CONFIG_STACKTRACE
4a9bd3f1
SR
1681
1682#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1683struct ftrace_stack {
1684 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
1685};
1686
1687static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
1688static DEFINE_PER_CPU(int, ftrace_stack_reserve);
1689
e77405ad 1690static void __ftrace_trace_stack(struct ring_buffer *buffer,
53614991 1691 unsigned long flags,
1fd8df2c 1692 int skip, int pc, struct pt_regs *regs)
86387f7e 1693{
e1112b4d 1694 struct ftrace_event_call *call = &event_kernel_stack;
3928a8a2 1695 struct ring_buffer_event *event;
777e208d 1696 struct stack_entry *entry;
86387f7e 1697 struct stack_trace trace;
4a9bd3f1
SR
1698 int use_stack;
1699 int size = FTRACE_STACK_ENTRIES;
1700
1701 trace.nr_entries = 0;
1702 trace.skip = skip;
1703
1704 /*
1705 * Since events can happen in NMIs there's no safe way to
1706 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1707 * or NMI comes in, it will just have to use the default
1708 * FTRACE_STACK_SIZE.
1709 */
1710 preempt_disable_notrace();
1711
82146529 1712 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
4a9bd3f1
SR
1713 /*
1714 * We don't need any atomic variables, just a barrier.
1715 * If an interrupt comes in, we don't care, because it would
1716 * have exited and put the counter back to what we want.
1717 * We just need a barrier to keep gcc from moving things
1718 * around.
1719 */
1720 barrier();
1721 if (use_stack == 1) {
1722 trace.entries = &__get_cpu_var(ftrace_stack).calls[0];
1723 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
1724
1725 if (regs)
1726 save_stack_trace_regs(regs, &trace);
1727 else
1728 save_stack_trace(&trace);
1729
1730 if (trace.nr_entries > size)
1731 size = trace.nr_entries;
1732 } else
1733 /* From now on, use_stack is a boolean */
1734 use_stack = 0;
1735
1736 size *= sizeof(unsigned long);
86387f7e 1737
e77405ad 1738 event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
4a9bd3f1 1739 sizeof(*entry) + size, flags, pc);
3928a8a2 1740 if (!event)
4a9bd3f1
SR
1741 goto out;
1742 entry = ring_buffer_event_data(event);
86387f7e 1743
4a9bd3f1
SR
1744 memset(&entry->caller, 0, size);
1745
1746 if (use_stack)
1747 memcpy(&entry->caller, trace.entries,
1748 trace.nr_entries * sizeof(unsigned long));
1749 else {
1750 trace.max_entries = FTRACE_STACK_ENTRIES;
1751 trace.entries = entry->caller;
1752 if (regs)
1753 save_stack_trace_regs(regs, &trace);
1754 else
1755 save_stack_trace(&trace);
1756 }
1757
1758 entry->size = trace.nr_entries;
86387f7e 1759
f306cc82 1760 if (!call_filter_check_discard(call, entry, buffer, event))
7ffbd48d 1761 __buffer_unlock_commit(buffer, event);
4a9bd3f1
SR
1762
1763 out:
1764 /* Again, don't let gcc optimize things here */
1765 barrier();
82146529 1766 __this_cpu_dec(ftrace_stack_reserve);
4a9bd3f1
SR
1767 preempt_enable_notrace();
1768
f0a920d5
IM
1769}
1770
1fd8df2c
MH
1771void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
1772 int skip, int pc, struct pt_regs *regs)
1773{
1774 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1775 return;
1776
1777 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
1778}
1779
e77405ad
SR
1780void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
1781 int skip, int pc)
53614991
SR
1782{
1783 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1784 return;
1785
1fd8df2c 1786 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
53614991
SR
1787}
1788
c0a0d0d3
FW
1789void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1790 int pc)
38697053 1791{
12883efb 1792 __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
38697053
SR
1793}
1794
03889384
SR
1795/**
1796 * trace_dump_stack - record a stack back trace in the trace buffer
c142be8e 1797 * @skip: Number of functions to skip (helper handlers)
03889384 1798 */
c142be8e 1799void trace_dump_stack(int skip)
03889384
SR
1800{
1801 unsigned long flags;
1802
1803 if (tracing_disabled || tracing_selftest_running)
e36c5458 1804 return;
03889384
SR
1805
1806 local_save_flags(flags);
1807
c142be8e
SRRH
1808 /*
1809 * Skip 3 more, seems to get us at the caller of
1810 * this function.
1811 */
1812 skip += 3;
1813 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
1814 flags, skip, preempt_count(), NULL);
03889384
SR
1815}
1816
91e86e56
SR
1817static DEFINE_PER_CPU(int, user_stack_count);
1818
e77405ad
SR
1819void
1820ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
02b67518 1821{
e1112b4d 1822 struct ftrace_event_call *call = &event_user_stack;
8d7c6a96 1823 struct ring_buffer_event *event;
02b67518
TE
1824 struct userstack_entry *entry;
1825 struct stack_trace trace;
02b67518
TE
1826
1827 if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
1828 return;
1829
b6345879
SR
1830 /*
1831 * NMIs can not handle page faults, even with fix ups.
1832 * The save user stack can (and often does) fault.
1833 */
1834 if (unlikely(in_nmi()))
1835 return;
02b67518 1836
91e86e56
SR
1837 /*
1838 * prevent recursion, since the user stack tracing may
1839 * trigger other kernel events.
1840 */
1841 preempt_disable();
1842 if (__this_cpu_read(user_stack_count))
1843 goto out;
1844
1845 __this_cpu_inc(user_stack_count);
1846
e77405ad 1847 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
51a763dd 1848 sizeof(*entry), flags, pc);
02b67518 1849 if (!event)
1dbd1951 1850 goto out_drop_count;
02b67518 1851 entry = ring_buffer_event_data(event);
02b67518 1852
48659d31 1853 entry->tgid = current->tgid;
02b67518
TE
1854 memset(&entry->caller, 0, sizeof(entry->caller));
1855
1856 trace.nr_entries = 0;
1857 trace.max_entries = FTRACE_STACK_ENTRIES;
1858 trace.skip = 0;
1859 trace.entries = entry->caller;
1860
1861 save_stack_trace_user(&trace);
f306cc82 1862 if (!call_filter_check_discard(call, entry, buffer, event))
7ffbd48d 1863 __buffer_unlock_commit(buffer, event);
91e86e56 1864
1dbd1951 1865 out_drop_count:
91e86e56 1866 __this_cpu_dec(user_stack_count);
91e86e56
SR
1867 out:
1868 preempt_enable();
02b67518
TE
1869}
1870
4fd27358
HE
1871#ifdef UNUSED
1872static void __trace_userstack(struct trace_array *tr, unsigned long flags)
02b67518 1873{
7be42151 1874 ftrace_trace_userstack(tr, flags, preempt_count());
02b67518 1875}
4fd27358 1876#endif /* UNUSED */
02b67518 1877
c0a0d0d3
FW
1878#endif /* CONFIG_STACKTRACE */
1879
07d777fe
SR
1880/* created for use with alloc_percpu */
1881struct trace_buffer_struct {
1882 char buffer[TRACE_BUF_SIZE];
1883};
1884
1885static struct trace_buffer_struct *trace_percpu_buffer;
1886static struct trace_buffer_struct *trace_percpu_sirq_buffer;
1887static struct trace_buffer_struct *trace_percpu_irq_buffer;
1888static struct trace_buffer_struct *trace_percpu_nmi_buffer;
1889
1890/*
1891 * The buffer used is dependent on the context. There is a per cpu
1892 * buffer for normal context, softirq contex, hard irq context and
1893 * for NMI context. Thise allows for lockless recording.
1894 *
1895 * Note, if the buffers failed to be allocated, then this returns NULL
1896 */
1897static char *get_trace_buf(void)
1898{
1899 struct trace_buffer_struct *percpu_buffer;
07d777fe
SR
1900
1901 /*
1902 * If we have allocated per cpu buffers, then we do not
1903 * need to do any locking.
1904 */
1905 if (in_nmi())
1906 percpu_buffer = trace_percpu_nmi_buffer;
1907 else if (in_irq())
1908 percpu_buffer = trace_percpu_irq_buffer;
1909 else if (in_softirq())
1910 percpu_buffer = trace_percpu_sirq_buffer;
1911 else
1912 percpu_buffer = trace_percpu_buffer;
1913
1914 if (!percpu_buffer)
1915 return NULL;
1916
d8a0349c 1917 return this_cpu_ptr(&percpu_buffer->buffer[0]);
07d777fe
SR
1918}
1919
1920static int alloc_percpu_trace_buffer(void)
1921{
1922 struct trace_buffer_struct *buffers;
1923 struct trace_buffer_struct *sirq_buffers;
1924 struct trace_buffer_struct *irq_buffers;
1925 struct trace_buffer_struct *nmi_buffers;
1926
1927 buffers = alloc_percpu(struct trace_buffer_struct);
1928 if (!buffers)
1929 goto err_warn;
1930
1931 sirq_buffers = alloc_percpu(struct trace_buffer_struct);
1932 if (!sirq_buffers)
1933 goto err_sirq;
1934
1935 irq_buffers = alloc_percpu(struct trace_buffer_struct);
1936 if (!irq_buffers)
1937 goto err_irq;
1938
1939 nmi_buffers = alloc_percpu(struct trace_buffer_struct);
1940 if (!nmi_buffers)
1941 goto err_nmi;
1942
1943 trace_percpu_buffer = buffers;
1944 trace_percpu_sirq_buffer = sirq_buffers;
1945 trace_percpu_irq_buffer = irq_buffers;
1946 trace_percpu_nmi_buffer = nmi_buffers;
1947
1948 return 0;
1949
1950 err_nmi:
1951 free_percpu(irq_buffers);
1952 err_irq:
1953 free_percpu(sirq_buffers);
1954 err_sirq:
1955 free_percpu(buffers);
1956 err_warn:
1957 WARN(1, "Could not allocate percpu trace_printk buffer");
1958 return -ENOMEM;
1959}
1960
81698831
SR
1961static int buffers_allocated;
1962
07d777fe
SR
1963void trace_printk_init_buffers(void)
1964{
07d777fe
SR
1965 if (buffers_allocated)
1966 return;
1967
1968 if (alloc_percpu_trace_buffer())
1969 return;
1970
1971 pr_info("ftrace: Allocated trace_printk buffers\n");
1972
b382ede6
SR
1973 /* Expand the buffers to set size */
1974 tracing_update_buffers();
1975
07d777fe 1976 buffers_allocated = 1;
81698831
SR
1977
1978 /*
1979 * trace_printk_init_buffers() can be called by modules.
1980 * If that happens, then we need to start cmdline recording
1981 * directly here. If the global_trace.buffer is already
1982 * allocated here, then this was called by module code.
1983 */
12883efb 1984 if (global_trace.trace_buffer.buffer)
81698831
SR
1985 tracing_start_cmdline_record();
1986}
1987
1988void trace_printk_start_comm(void)
1989{
1990 /* Start tracing comms if trace printk is set */
1991 if (!buffers_allocated)
1992 return;
1993 tracing_start_cmdline_record();
1994}
1995
1996static void trace_printk_start_stop_comm(int enabled)
1997{
1998 if (!buffers_allocated)
1999 return;
2000
2001 if (enabled)
2002 tracing_start_cmdline_record();
2003 else
2004 tracing_stop_cmdline_record();
07d777fe
SR
2005}
2006
769b0441 2007/**
48ead020 2008 * trace_vbprintk - write binary msg to tracing buffer
769b0441
FW
2009 *
2010 */
40ce74f1 2011int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
769b0441 2012{
e1112b4d 2013 struct ftrace_event_call *call = &event_bprint;
769b0441 2014 struct ring_buffer_event *event;
e77405ad 2015 struct ring_buffer *buffer;
769b0441 2016 struct trace_array *tr = &global_trace;
48ead020 2017 struct bprint_entry *entry;
769b0441 2018 unsigned long flags;
07d777fe
SR
2019 char *tbuffer;
2020 int len = 0, size, pc;
769b0441
FW
2021
2022 if (unlikely(tracing_selftest_running || tracing_disabled))
2023 return 0;
2024
2025 /* Don't pollute graph traces with trace_vprintk internals */
2026 pause_graph_tracing();
2027
2028 pc = preempt_count();
5168ae50 2029 preempt_disable_notrace();
769b0441 2030
07d777fe
SR
2031 tbuffer = get_trace_buf();
2032 if (!tbuffer) {
2033 len = 0;
769b0441 2034 goto out;
07d777fe 2035 }
769b0441 2036
07d777fe 2037 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
769b0441 2038
07d777fe
SR
2039 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
2040 goto out;
769b0441 2041
07d777fe 2042 local_save_flags(flags);
769b0441 2043 size = sizeof(*entry) + sizeof(u32) * len;
12883efb 2044 buffer = tr->trace_buffer.buffer;
e77405ad
SR
2045 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2046 flags, pc);
769b0441 2047 if (!event)
07d777fe 2048 goto out;
769b0441
FW
2049 entry = ring_buffer_event_data(event);
2050 entry->ip = ip;
769b0441
FW
2051 entry->fmt = fmt;
2052
07d777fe 2053 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
f306cc82 2054 if (!call_filter_check_discard(call, entry, buffer, event)) {
7ffbd48d 2055 __buffer_unlock_commit(buffer, event);
d931369b
SR
2056 ftrace_trace_stack(buffer, flags, 6, pc);
2057 }
769b0441 2058
769b0441 2059out:
5168ae50 2060 preempt_enable_notrace();
769b0441
FW
2061 unpause_graph_tracing();
2062
2063 return len;
2064}
48ead020
FW
2065EXPORT_SYMBOL_GPL(trace_vbprintk);
2066
12883efb
SRRH
2067static int
2068__trace_array_vprintk(struct ring_buffer *buffer,
2069 unsigned long ip, const char *fmt, va_list args)
48ead020 2070{
e1112b4d 2071 struct ftrace_event_call *call = &event_print;
48ead020 2072 struct ring_buffer_event *event;
07d777fe 2073 int len = 0, size, pc;
48ead020 2074 struct print_entry *entry;
07d777fe
SR
2075 unsigned long flags;
2076 char *tbuffer;
48ead020
FW
2077
2078 if (tracing_disabled || tracing_selftest_running)
2079 return 0;
2080
07d777fe
SR
2081 /* Don't pollute graph traces with trace_vprintk internals */
2082 pause_graph_tracing();
2083
48ead020
FW
2084 pc = preempt_count();
2085 preempt_disable_notrace();
48ead020 2086
07d777fe
SR
2087
2088 tbuffer = get_trace_buf();
2089 if (!tbuffer) {
2090 len = 0;
48ead020 2091 goto out;
07d777fe 2092 }
48ead020 2093
07d777fe
SR
2094 len = vsnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
2095 if (len > TRACE_BUF_SIZE)
2096 goto out;
48ead020 2097
07d777fe 2098 local_save_flags(flags);
48ead020 2099 size = sizeof(*entry) + len + 1;
e77405ad 2100 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
07d777fe 2101 flags, pc);
48ead020 2102 if (!event)
07d777fe 2103 goto out;
48ead020 2104 entry = ring_buffer_event_data(event);
c13d2f7c 2105 entry->ip = ip;
48ead020 2106
07d777fe 2107 memcpy(&entry->buf, tbuffer, len);
c13d2f7c 2108 entry->buf[len] = '\0';
f306cc82 2109 if (!call_filter_check_discard(call, entry, buffer, event)) {
7ffbd48d 2110 __buffer_unlock_commit(buffer, event);
07d777fe 2111 ftrace_trace_stack(buffer, flags, 6, pc);
d931369b 2112 }
48ead020
FW
2113 out:
2114 preempt_enable_notrace();
07d777fe 2115 unpause_graph_tracing();
48ead020
FW
2116
2117 return len;
2118}
659372d3 2119
12883efb
SRRH
2120int trace_array_vprintk(struct trace_array *tr,
2121 unsigned long ip, const char *fmt, va_list args)
2122{
2123 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2124}
2125
2126int trace_array_printk(struct trace_array *tr,
2127 unsigned long ip, const char *fmt, ...)
2128{
2129 int ret;
2130 va_list ap;
2131
2132 if (!(trace_flags & TRACE_ITER_PRINTK))
2133 return 0;
2134
2135 va_start(ap, fmt);
2136 ret = trace_array_vprintk(tr, ip, fmt, ap);
2137 va_end(ap);
2138 return ret;
2139}
2140
2141int trace_array_printk_buf(struct ring_buffer *buffer,
2142 unsigned long ip, const char *fmt, ...)
2143{
2144 int ret;
2145 va_list ap;
2146
2147 if (!(trace_flags & TRACE_ITER_PRINTK))
2148 return 0;
2149
2150 va_start(ap, fmt);
2151 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2152 va_end(ap);
2153 return ret;
2154}
2155
659372d3
SR
2156int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2157{
a813a159 2158 return trace_array_vprintk(&global_trace, ip, fmt, args);
659372d3 2159}
769b0441
FW
2160EXPORT_SYMBOL_GPL(trace_vprintk);
2161
e2ac8ef5 2162static void trace_iterator_increment(struct trace_iterator *iter)
5a90f577 2163{
6d158a81
SR
2164 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2165
5a90f577 2166 iter->idx++;
6d158a81
SR
2167 if (buf_iter)
2168 ring_buffer_read(buf_iter, NULL);
5a90f577
SR
2169}
2170
e309b41d 2171static struct trace_entry *
bc21b478
SR
2172peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2173 unsigned long *lost_events)
dd0e545f 2174{
3928a8a2 2175 struct ring_buffer_event *event;
6d158a81 2176 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
dd0e545f 2177
d769041f
SR
2178 if (buf_iter)
2179 event = ring_buffer_iter_peek(buf_iter, ts);
2180 else
12883efb 2181 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
bc21b478 2182 lost_events);
d769041f 2183
4a9bd3f1
SR
2184 if (event) {
2185 iter->ent_size = ring_buffer_event_length(event);
2186 return ring_buffer_event_data(event);
2187 }
2188 iter->ent_size = 0;
2189 return NULL;
dd0e545f 2190}
d769041f 2191
dd0e545f 2192static struct trace_entry *
bc21b478
SR
2193__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2194 unsigned long *missing_events, u64 *ent_ts)
bc0c38d1 2195{
12883efb 2196 struct ring_buffer *buffer = iter->trace_buffer->buffer;
bc0c38d1 2197 struct trace_entry *ent, *next = NULL;
aa27497c 2198 unsigned long lost_events = 0, next_lost = 0;
b04cc6b1 2199 int cpu_file = iter->cpu_file;
3928a8a2 2200 u64 next_ts = 0, ts;
bc0c38d1 2201 int next_cpu = -1;
12b5da34 2202 int next_size = 0;
bc0c38d1
SR
2203 int cpu;
2204
b04cc6b1
FW
2205 /*
2206 * If we are in a per_cpu trace file, don't bother by iterating over
2207 * all cpu and peek directly.
2208 */
ae3b5093 2209 if (cpu_file > RING_BUFFER_ALL_CPUS) {
b04cc6b1
FW
2210 if (ring_buffer_empty_cpu(buffer, cpu_file))
2211 return NULL;
bc21b478 2212 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
b04cc6b1
FW
2213 if (ent_cpu)
2214 *ent_cpu = cpu_file;
2215
2216 return ent;
2217 }
2218
ab46428c 2219 for_each_tracing_cpu(cpu) {
dd0e545f 2220
3928a8a2
SR
2221 if (ring_buffer_empty_cpu(buffer, cpu))
2222 continue;
dd0e545f 2223
bc21b478 2224 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
dd0e545f 2225
cdd31cd2
IM
2226 /*
2227 * Pick the entry with the smallest timestamp:
2228 */
3928a8a2 2229 if (ent && (!next || ts < next_ts)) {
bc0c38d1
SR
2230 next = ent;
2231 next_cpu = cpu;
3928a8a2 2232 next_ts = ts;
bc21b478 2233 next_lost = lost_events;
12b5da34 2234 next_size = iter->ent_size;
bc0c38d1
SR
2235 }
2236 }
2237
12b5da34
SR
2238 iter->ent_size = next_size;
2239
bc0c38d1
SR
2240 if (ent_cpu)
2241 *ent_cpu = next_cpu;
2242
3928a8a2
SR
2243 if (ent_ts)
2244 *ent_ts = next_ts;
2245
bc21b478
SR
2246 if (missing_events)
2247 *missing_events = next_lost;
2248
bc0c38d1
SR
2249 return next;
2250}
2251
dd0e545f 2252/* Find the next real entry, without updating the iterator itself */
c4a8e8be
FW
2253struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
2254 int *ent_cpu, u64 *ent_ts)
bc0c38d1 2255{
bc21b478 2256 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
dd0e545f
SR
2257}
2258
2259/* Find the next real entry, and increment the iterator to the next entry */
955b61e5 2260void *trace_find_next_entry_inc(struct trace_iterator *iter)
dd0e545f 2261{
bc21b478
SR
2262 iter->ent = __find_next_entry(iter, &iter->cpu,
2263 &iter->lost_events, &iter->ts);
dd0e545f 2264
3928a8a2 2265 if (iter->ent)
e2ac8ef5 2266 trace_iterator_increment(iter);
dd0e545f 2267
3928a8a2 2268 return iter->ent ? iter : NULL;
b3806b43 2269}
bc0c38d1 2270
e309b41d 2271static void trace_consume(struct trace_iterator *iter)
b3806b43 2272{
12883efb 2273 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
bc21b478 2274 &iter->lost_events);
bc0c38d1
SR
2275}
2276
e309b41d 2277static void *s_next(struct seq_file *m, void *v, loff_t *pos)
bc0c38d1
SR
2278{
2279 struct trace_iterator *iter = m->private;
bc0c38d1 2280 int i = (int)*pos;
4e3c3333 2281 void *ent;
bc0c38d1 2282
a63ce5b3
SR
2283 WARN_ON_ONCE(iter->leftover);
2284
bc0c38d1
SR
2285 (*pos)++;
2286
2287 /* can't go backwards */
2288 if (iter->idx > i)
2289 return NULL;
2290
2291 if (iter->idx < 0)
955b61e5 2292 ent = trace_find_next_entry_inc(iter);
bc0c38d1
SR
2293 else
2294 ent = iter;
2295
2296 while (ent && iter->idx < i)
955b61e5 2297 ent = trace_find_next_entry_inc(iter);
bc0c38d1
SR
2298
2299 iter->pos = *pos;
2300
bc0c38d1
SR
2301 return ent;
2302}
2303
955b61e5 2304void tracing_iter_reset(struct trace_iterator *iter, int cpu)
2f26ebd5 2305{
2f26ebd5
SR
2306 struct ring_buffer_event *event;
2307 struct ring_buffer_iter *buf_iter;
2308 unsigned long entries = 0;
2309 u64 ts;
2310
12883efb 2311 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
2f26ebd5 2312
6d158a81
SR
2313 buf_iter = trace_buffer_iter(iter, cpu);
2314 if (!buf_iter)
2f26ebd5
SR
2315 return;
2316
2f26ebd5
SR
2317 ring_buffer_iter_reset(buf_iter);
2318
2319 /*
2320 * We could have the case with the max latency tracers
2321 * that a reset never took place on a cpu. This is evident
2322 * by the timestamp being before the start of the buffer.
2323 */
2324 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
12883efb 2325 if (ts >= iter->trace_buffer->time_start)
2f26ebd5
SR
2326 break;
2327 entries++;
2328 ring_buffer_read(buf_iter, NULL);
2329 }
2330
12883efb 2331 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
2f26ebd5
SR
2332}
2333
d7350c3f 2334/*
d7350c3f
FW
2335 * The current tracer is copied to avoid a global locking
2336 * all around.
2337 */
bc0c38d1
SR
2338static void *s_start(struct seq_file *m, loff_t *pos)
2339{
2340 struct trace_iterator *iter = m->private;
2b6080f2 2341 struct trace_array *tr = iter->tr;
b04cc6b1 2342 int cpu_file = iter->cpu_file;
bc0c38d1
SR
2343 void *p = NULL;
2344 loff_t l = 0;
3928a8a2 2345 int cpu;
bc0c38d1 2346
2fd196ec
HT
2347 /*
2348 * copy the tracer to avoid using a global lock all around.
2349 * iter->trace is a copy of current_trace, the pointer to the
2350 * name may be used instead of a strcmp(), as iter->trace->name
2351 * will point to the same string as current_trace->name.
2352 */
bc0c38d1 2353 mutex_lock(&trace_types_lock);
2b6080f2
SR
2354 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
2355 *iter->trace = *tr->current_trace;
d7350c3f 2356 mutex_unlock(&trace_types_lock);
bc0c38d1 2357
12883efb 2358#ifdef CONFIG_TRACER_MAX_TRACE
debdd57f
HT
2359 if (iter->snapshot && iter->trace->use_max_tr)
2360 return ERR_PTR(-EBUSY);
12883efb 2361#endif
debdd57f
HT
2362
2363 if (!iter->snapshot)
2364 atomic_inc(&trace_record_cmdline_disabled);
bc0c38d1 2365
bc0c38d1
SR
2366 if (*pos != iter->pos) {
2367 iter->ent = NULL;
2368 iter->cpu = 0;
2369 iter->idx = -1;
2370
ae3b5093 2371 if (cpu_file == RING_BUFFER_ALL_CPUS) {
b04cc6b1 2372 for_each_tracing_cpu(cpu)
2f26ebd5 2373 tracing_iter_reset(iter, cpu);
b04cc6b1 2374 } else
2f26ebd5 2375 tracing_iter_reset(iter, cpu_file);
bc0c38d1 2376
ac91d854 2377 iter->leftover = 0;
bc0c38d1
SR
2378 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
2379 ;
2380
2381 } else {
a63ce5b3
SR
2382 /*
2383 * If we overflowed the seq_file before, then we want
2384 * to just reuse the trace_seq buffer again.
2385 */
2386 if (iter->leftover)
2387 p = iter;
2388 else {
2389 l = *pos - 1;
2390 p = s_next(m, p, &l);
2391 }
bc0c38d1
SR
2392 }
2393
4f535968 2394 trace_event_read_lock();
7e53bd42 2395 trace_access_lock(cpu_file);
bc0c38d1
SR
2396 return p;
2397}
2398
2399static void s_stop(struct seq_file *m, void *p)
2400{
7e53bd42
LJ
2401 struct trace_iterator *iter = m->private;
2402
12883efb 2403#ifdef CONFIG_TRACER_MAX_TRACE
debdd57f
HT
2404 if (iter->snapshot && iter->trace->use_max_tr)
2405 return;
12883efb 2406#endif
debdd57f
HT
2407
2408 if (!iter->snapshot)
2409 atomic_dec(&trace_record_cmdline_disabled);
12883efb 2410
7e53bd42 2411 trace_access_unlock(iter->cpu_file);
4f535968 2412 trace_event_read_unlock();
bc0c38d1
SR
2413}
2414
39eaf7ef 2415static void
12883efb
SRRH
2416get_total_entries(struct trace_buffer *buf,
2417 unsigned long *total, unsigned long *entries)
39eaf7ef
SR
2418{
2419 unsigned long count;
2420 int cpu;
2421
2422 *total = 0;
2423 *entries = 0;
2424
2425 for_each_tracing_cpu(cpu) {
12883efb 2426 count = ring_buffer_entries_cpu(buf->buffer, cpu);
39eaf7ef
SR
2427 /*
2428 * If this buffer has skipped entries, then we hold all
2429 * entries for the trace and we need to ignore the
2430 * ones before the time stamp.
2431 */
12883efb
SRRH
2432 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
2433 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
39eaf7ef
SR
2434 /* total is the same as the entries */
2435 *total += count;
2436 } else
2437 *total += count +
12883efb 2438 ring_buffer_overrun_cpu(buf->buffer, cpu);
39eaf7ef
SR
2439 *entries += count;
2440 }
2441}
2442
e309b41d 2443static void print_lat_help_header(struct seq_file *m)
bc0c38d1 2444{
a6168353
ME
2445 seq_puts(m, "# _------=> CPU# \n");
2446 seq_puts(m, "# / _-----=> irqs-off \n");
2447 seq_puts(m, "# | / _----=> need-resched \n");
2448 seq_puts(m, "# || / _---=> hardirq/softirq \n");
2449 seq_puts(m, "# ||| / _--=> preempt-depth \n");
e6e1e259
SR
2450 seq_puts(m, "# |||| / delay \n");
2451 seq_puts(m, "# cmd pid ||||| time | caller \n");
2452 seq_puts(m, "# \\ / ||||| \\ | / \n");
bc0c38d1
SR
2453}
2454
12883efb 2455static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
bc0c38d1 2456{
39eaf7ef
SR
2457 unsigned long total;
2458 unsigned long entries;
2459
12883efb 2460 get_total_entries(buf, &total, &entries);
39eaf7ef
SR
2461 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
2462 entries, total, num_online_cpus());
2463 seq_puts(m, "#\n");
2464}
2465
12883efb 2466static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
39eaf7ef 2467{
12883efb 2468 print_event_info(buf, m);
77271ce4 2469 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n");
a6168353 2470 seq_puts(m, "# | | | | |\n");
bc0c38d1
SR
2471}
2472
12883efb 2473static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
77271ce4 2474{
12883efb 2475 print_event_info(buf, m);
77271ce4
SR
2476 seq_puts(m, "# _-----=> irqs-off\n");
2477 seq_puts(m, "# / _----=> need-resched\n");
2478 seq_puts(m, "# | / _---=> hardirq/softirq\n");
2479 seq_puts(m, "# || / _--=> preempt-depth\n");
2480 seq_puts(m, "# ||| / delay\n");
2481 seq_puts(m, "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n");
2482 seq_puts(m, "# | | | |||| | |\n");
2483}
bc0c38d1 2484
62b915f1 2485void
bc0c38d1
SR
2486print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2487{
2488 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
12883efb
SRRH
2489 struct trace_buffer *buf = iter->trace_buffer;
2490 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
2b6080f2 2491 struct tracer *type = iter->trace;
39eaf7ef
SR
2492 unsigned long entries;
2493 unsigned long total;
bc0c38d1
SR
2494 const char *name = "preemption";
2495
d840f718 2496 name = type->name;
bc0c38d1 2497
12883efb 2498 get_total_entries(buf, &total, &entries);
bc0c38d1 2499
888b55dc 2500 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
bc0c38d1 2501 name, UTS_RELEASE);
888b55dc 2502 seq_puts(m, "# -----------------------------------"
bc0c38d1 2503 "---------------------------------\n");
888b55dc 2504 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
bc0c38d1 2505 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
57f50be1 2506 nsecs_to_usecs(data->saved_latency),
bc0c38d1 2507 entries,
4c11d7ae 2508 total,
12883efb 2509 buf->cpu,
bc0c38d1
SR
2510#if defined(CONFIG_PREEMPT_NONE)
2511 "server",
2512#elif defined(CONFIG_PREEMPT_VOLUNTARY)
2513 "desktop",
b5c21b45 2514#elif defined(CONFIG_PREEMPT)
bc0c38d1
SR
2515 "preempt",
2516#else
2517 "unknown",
2518#endif
2519 /* These are reserved for later use */
2520 0, 0, 0, 0);
2521#ifdef CONFIG_SMP
2522 seq_printf(m, " #P:%d)\n", num_online_cpus());
2523#else
2524 seq_puts(m, ")\n");
2525#endif
888b55dc
KM
2526 seq_puts(m, "# -----------------\n");
2527 seq_printf(m, "# | task: %.16s-%d "
bc0c38d1 2528 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
d20b92ab
EB
2529 data->comm, data->pid,
2530 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
bc0c38d1 2531 data->policy, data->rt_priority);
888b55dc 2532 seq_puts(m, "# -----------------\n");
bc0c38d1
SR
2533
2534 if (data->critical_start) {
888b55dc 2535 seq_puts(m, "# => started at: ");
214023c3
SR
2536 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
2537 trace_print_seq(m, &iter->seq);
888b55dc 2538 seq_puts(m, "\n# => ended at: ");
214023c3
SR
2539 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
2540 trace_print_seq(m, &iter->seq);
8248ac05 2541 seq_puts(m, "\n#\n");
bc0c38d1
SR
2542 }
2543
888b55dc 2544 seq_puts(m, "#\n");
bc0c38d1
SR
2545}
2546
a309720c
SR
2547static void test_cpu_buff_start(struct trace_iterator *iter)
2548{
2549 struct trace_seq *s = &iter->seq;
2550
12ef7d44
SR
2551 if (!(trace_flags & TRACE_ITER_ANNOTATE))
2552 return;
2553
2554 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
2555 return;
2556
4462344e 2557 if (cpumask_test_cpu(iter->cpu, iter->started))
a309720c
SR
2558 return;
2559
12883efb 2560 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
2f26ebd5
SR
2561 return;
2562
4462344e 2563 cpumask_set_cpu(iter->cpu, iter->started);
b0dfa978
FW
2564
2565 /* Don't print started cpu buffer for the first entry of the trace */
2566 if (iter->idx > 1)
2567 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
2568 iter->cpu);
a309720c
SR
2569}
2570
2c4f035f 2571static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
bc0c38d1 2572{
214023c3 2573 struct trace_seq *s = &iter->seq;
bc0c38d1 2574 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
4e3c3333 2575 struct trace_entry *entry;
f633cef0 2576 struct trace_event *event;
bc0c38d1 2577
4e3c3333 2578 entry = iter->ent;
dd0e545f 2579
a309720c
SR
2580 test_cpu_buff_start(iter);
2581
c4a8e8be 2582 event = ftrace_find_event(entry->type);
bc0c38d1 2583
c4a8e8be 2584 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
27d48be8
SR
2585 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2586 if (!trace_print_lat_context(iter))
2587 goto partial;
2588 } else {
2589 if (!trace_print_context(iter))
2590 goto partial;
2591 }
c4a8e8be 2592 }
bc0c38d1 2593
268ccda0 2594 if (event)
a9a57763 2595 return event->funcs->trace(iter, sym_flags, event);
d9793bd8
ACM
2596
2597 if (!trace_seq_printf(s, "Unknown type %d\n", entry->type))
2598 goto partial;
02b67518 2599
2c4f035f 2600 return TRACE_TYPE_HANDLED;
d9793bd8
ACM
2601partial:
2602 return TRACE_TYPE_PARTIAL_LINE;
bc0c38d1
SR
2603}
2604
2c4f035f 2605static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
f9896bf3
IM
2606{
2607 struct trace_seq *s = &iter->seq;
2608 struct trace_entry *entry;
f633cef0 2609 struct trace_event *event;
f9896bf3
IM
2610
2611 entry = iter->ent;
dd0e545f 2612
c4a8e8be 2613 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
d9793bd8
ACM
2614 if (!trace_seq_printf(s, "%d %d %llu ",
2615 entry->pid, iter->cpu, iter->ts))
2616 goto partial;
c4a8e8be 2617 }
f9896bf3 2618
f633cef0 2619 event = ftrace_find_event(entry->type);
268ccda0 2620 if (event)
a9a57763 2621 return event->funcs->raw(iter, 0, event);
d9793bd8
ACM
2622
2623 if (!trace_seq_printf(s, "%d ?\n", entry->type))
2624 goto partial;
777e208d 2625
2c4f035f 2626 return TRACE_TYPE_HANDLED;
d9793bd8
ACM
2627partial:
2628 return TRACE_TYPE_PARTIAL_LINE;
f9896bf3
IM
2629}
2630
2c4f035f 2631static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
5e3ca0ec
IM
2632{
2633 struct trace_seq *s = &iter->seq;
2634 unsigned char newline = '\n';
2635 struct trace_entry *entry;
f633cef0 2636 struct trace_event *event;
5e3ca0ec
IM
2637
2638 entry = iter->ent;
dd0e545f 2639
c4a8e8be
FW
2640 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2641 SEQ_PUT_HEX_FIELD_RET(s, entry->pid);
2642 SEQ_PUT_HEX_FIELD_RET(s, iter->cpu);
2643 SEQ_PUT_HEX_FIELD_RET(s, iter->ts);
2644 }
5e3ca0ec 2645
f633cef0 2646 event = ftrace_find_event(entry->type);
268ccda0 2647 if (event) {
a9a57763 2648 enum print_line_t ret = event->funcs->hex(iter, 0, event);
d9793bd8
ACM
2649 if (ret != TRACE_TYPE_HANDLED)
2650 return ret;
2651 }
7104f300 2652
5e3ca0ec
IM
2653 SEQ_PUT_FIELD_RET(s, newline);
2654
2c4f035f 2655 return TRACE_TYPE_HANDLED;
5e3ca0ec
IM
2656}
2657
2c4f035f 2658static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
cb0f12aa
IM
2659{
2660 struct trace_seq *s = &iter->seq;
2661 struct trace_entry *entry;
f633cef0 2662 struct trace_event *event;
cb0f12aa
IM
2663
2664 entry = iter->ent;
dd0e545f 2665
c4a8e8be
FW
2666 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2667 SEQ_PUT_FIELD_RET(s, entry->pid);
1830b52d 2668 SEQ_PUT_FIELD_RET(s, iter->cpu);
c4a8e8be
FW
2669 SEQ_PUT_FIELD_RET(s, iter->ts);
2670 }
cb0f12aa 2671
f633cef0 2672 event = ftrace_find_event(entry->type);
a9a57763
SR
2673 return event ? event->funcs->binary(iter, 0, event) :
2674 TRACE_TYPE_HANDLED;
cb0f12aa
IM
2675}
2676
62b915f1 2677int trace_empty(struct trace_iterator *iter)
bc0c38d1 2678{
6d158a81 2679 struct ring_buffer_iter *buf_iter;
bc0c38d1
SR
2680 int cpu;
2681
9aba60fe 2682 /* If we are looking at one CPU buffer, only check that one */
ae3b5093 2683 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
9aba60fe 2684 cpu = iter->cpu_file;
6d158a81
SR
2685 buf_iter = trace_buffer_iter(iter, cpu);
2686 if (buf_iter) {
2687 if (!ring_buffer_iter_empty(buf_iter))
9aba60fe
SR
2688 return 0;
2689 } else {
12883efb 2690 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
9aba60fe
SR
2691 return 0;
2692 }
2693 return 1;
2694 }
2695
ab46428c 2696 for_each_tracing_cpu(cpu) {
6d158a81
SR
2697 buf_iter = trace_buffer_iter(iter, cpu);
2698 if (buf_iter) {
2699 if (!ring_buffer_iter_empty(buf_iter))
d769041f
SR
2700 return 0;
2701 } else {
12883efb 2702 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
d769041f
SR
2703 return 0;
2704 }
bc0c38d1 2705 }
d769041f 2706
797d3712 2707 return 1;
bc0c38d1
SR
2708}
2709
4f535968 2710/* Called with trace_event_read_lock() held. */
955b61e5 2711enum print_line_t print_trace_line(struct trace_iterator *iter)
f9896bf3 2712{
2c4f035f
FW
2713 enum print_line_t ret;
2714
ee5e51f5
JO
2715 if (iter->lost_events &&
2716 !trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2717 iter->cpu, iter->lost_events))
2718 return TRACE_TYPE_PARTIAL_LINE;
bc21b478 2719
2c4f035f
FW
2720 if (iter->trace && iter->trace->print_line) {
2721 ret = iter->trace->print_line(iter);
2722 if (ret != TRACE_TYPE_UNHANDLED)
2723 return ret;
2724 }
72829bc3 2725
09ae7234
SRRH
2726 if (iter->ent->type == TRACE_BPUTS &&
2727 trace_flags & TRACE_ITER_PRINTK &&
2728 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2729 return trace_print_bputs_msg_only(iter);
2730
48ead020
FW
2731 if (iter->ent->type == TRACE_BPRINT &&
2732 trace_flags & TRACE_ITER_PRINTK &&
2733 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
5ef841f6 2734 return trace_print_bprintk_msg_only(iter);
48ead020 2735
66896a85
FW
2736 if (iter->ent->type == TRACE_PRINT &&
2737 trace_flags & TRACE_ITER_PRINTK &&
2738 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
5ef841f6 2739 return trace_print_printk_msg_only(iter);
66896a85 2740
cb0f12aa
IM
2741 if (trace_flags & TRACE_ITER_BIN)
2742 return print_bin_fmt(iter);
2743
5e3ca0ec
IM
2744 if (trace_flags & TRACE_ITER_HEX)
2745 return print_hex_fmt(iter);
2746
f9896bf3
IM
2747 if (trace_flags & TRACE_ITER_RAW)
2748 return print_raw_fmt(iter);
2749
f9896bf3
IM
2750 return print_trace_fmt(iter);
2751}
2752
7e9a49ef
JO
2753void trace_latency_header(struct seq_file *m)
2754{
2755 struct trace_iterator *iter = m->private;
2756
2757 /* print nothing if the buffers are empty */
2758 if (trace_empty(iter))
2759 return;
2760
2761 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2762 print_trace_header(m, iter);
2763
2764 if (!(trace_flags & TRACE_ITER_VERBOSE))
2765 print_lat_help_header(m);
2766}
2767
62b915f1
JO
2768void trace_default_header(struct seq_file *m)
2769{
2770 struct trace_iterator *iter = m->private;
2771
f56e7f8e
JO
2772 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
2773 return;
2774
62b915f1
JO
2775 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2776 /* print nothing if the buffers are empty */
2777 if (trace_empty(iter))
2778 return;
2779 print_trace_header(m, iter);
2780 if (!(trace_flags & TRACE_ITER_VERBOSE))
2781 print_lat_help_header(m);
2782 } else {
77271ce4
SR
2783 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
2784 if (trace_flags & TRACE_ITER_IRQ_INFO)
12883efb 2785 print_func_help_header_irq(iter->trace_buffer, m);
77271ce4 2786 else
12883efb 2787 print_func_help_header(iter->trace_buffer, m);
77271ce4 2788 }
62b915f1
JO
2789 }
2790}
2791
e0a413f6
SR
2792static void test_ftrace_alive(struct seq_file *m)
2793{
2794 if (!ftrace_is_dead())
2795 return;
2796 seq_printf(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n");
2797 seq_printf(m, "# MAY BE MISSING FUNCTION EVENTS\n");
2798}
2799
d8741e2e 2800#ifdef CONFIG_TRACER_MAX_TRACE
f1affcaa 2801static void show_snapshot_main_help(struct seq_file *m)
d8741e2e 2802{
d8741e2e
SRRH
2803 seq_printf(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n");
2804 seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
2805 seq_printf(m, "# Takes a snapshot of the main buffer.\n");
b9be6d02 2806 seq_printf(m, "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n");
d8741e2e
SRRH
2807 seq_printf(m, "# (Doesn't have to be '2' works with any number that\n");
2808 seq_printf(m, "# is not a '0' or '1')\n");
2809}
f1affcaa
SRRH
2810
2811static void show_snapshot_percpu_help(struct seq_file *m)
2812{
2813 seq_printf(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
2814#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2815 seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
2816 seq_printf(m, "# Takes a snapshot of the main buffer for this cpu.\n");
2817#else
2818 seq_printf(m, "# echo 1 > snapshot : Not supported with this kernel.\n");
2819 seq_printf(m, "# Must use main snapshot file to allocate.\n");
2820#endif
2821 seq_printf(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n");
2822 seq_printf(m, "# (Doesn't have to be '2' works with any number that\n");
2823 seq_printf(m, "# is not a '0' or '1')\n");
2824}
2825
d8741e2e
SRRH
2826static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
2827{
45ad21ca 2828 if (iter->tr->allocated_snapshot)
d8741e2e
SRRH
2829 seq_printf(m, "#\n# * Snapshot is allocated *\n#\n");
2830 else
2831 seq_printf(m, "#\n# * Snapshot is freed *\n#\n");
2832
2833 seq_printf(m, "# Snapshot commands:\n");
f1affcaa
SRRH
2834 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
2835 show_snapshot_main_help(m);
2836 else
2837 show_snapshot_percpu_help(m);
d8741e2e
SRRH
2838}
2839#else
2840/* Should never be called */
2841static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
2842#endif
2843
bc0c38d1
SR
2844static int s_show(struct seq_file *m, void *v)
2845{
2846 struct trace_iterator *iter = v;
a63ce5b3 2847 int ret;
bc0c38d1
SR
2848
2849 if (iter->ent == NULL) {
2850 if (iter->tr) {
2851 seq_printf(m, "# tracer: %s\n", iter->trace->name);
2852 seq_puts(m, "#\n");
e0a413f6 2853 test_ftrace_alive(m);
bc0c38d1 2854 }
d8741e2e
SRRH
2855 if (iter->snapshot && trace_empty(iter))
2856 print_snapshot_help(m, iter);
2857 else if (iter->trace && iter->trace->print_header)
8bba1bf5 2858 iter->trace->print_header(m);
62b915f1
JO
2859 else
2860 trace_default_header(m);
2861
a63ce5b3
SR
2862 } else if (iter->leftover) {
2863 /*
2864 * If we filled the seq_file buffer earlier, we
2865 * want to just show it now.
2866 */
2867 ret = trace_print_seq(m, &iter->seq);
2868
2869 /* ret should this time be zero, but you never know */
2870 iter->leftover = ret;
2871
bc0c38d1 2872 } else {
f9896bf3 2873 print_trace_line(iter);
a63ce5b3
SR
2874 ret = trace_print_seq(m, &iter->seq);
2875 /*
2876 * If we overflow the seq_file buffer, then it will
2877 * ask us for this data again at start up.
2878 * Use that instead.
2879 * ret is 0 if seq_file write succeeded.
2880 * -1 otherwise.
2881 */
2882 iter->leftover = ret;
bc0c38d1
SR
2883 }
2884
2885 return 0;
2886}
2887
649e9c70
ON
2888/*
2889 * Should be used after trace_array_get(), trace_types_lock
2890 * ensures that i_cdev was already initialized.
2891 */
2892static inline int tracing_get_cpu(struct inode *inode)
2893{
2894 if (inode->i_cdev) /* See trace_create_cpu_file() */
2895 return (long)inode->i_cdev - 1;
2896 return RING_BUFFER_ALL_CPUS;
2897}
2898
88e9d34c 2899static const struct seq_operations tracer_seq_ops = {
4bf39a94
IM
2900 .start = s_start,
2901 .next = s_next,
2902 .stop = s_stop,
2903 .show = s_show,
bc0c38d1
SR
2904};
2905
e309b41d 2906static struct trace_iterator *
6484c71c 2907__tracing_open(struct inode *inode, struct file *file, bool snapshot)
bc0c38d1 2908{
6484c71c 2909 struct trace_array *tr = inode->i_private;
bc0c38d1 2910 struct trace_iterator *iter;
50e18b94 2911 int cpu;
bc0c38d1 2912
85a2f9b4
SR
2913 if (tracing_disabled)
2914 return ERR_PTR(-ENODEV);
60a11774 2915
50e18b94 2916 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
85a2f9b4
SR
2917 if (!iter)
2918 return ERR_PTR(-ENOMEM);
bc0c38d1 2919
6d158a81
SR
2920 iter->buffer_iter = kzalloc(sizeof(*iter->buffer_iter) * num_possible_cpus(),
2921 GFP_KERNEL);
93574fcc
DC
2922 if (!iter->buffer_iter)
2923 goto release;
2924
d7350c3f
FW
2925 /*
2926 * We make a copy of the current tracer to avoid concurrent
2927 * changes on it while we are reading.
2928 */
bc0c38d1 2929 mutex_lock(&trace_types_lock);
d7350c3f 2930 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
85a2f9b4 2931 if (!iter->trace)
d7350c3f 2932 goto fail;
85a2f9b4 2933
2b6080f2 2934 *iter->trace = *tr->current_trace;
d7350c3f 2935
79f55997 2936 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
b0dfa978
FW
2937 goto fail;
2938
12883efb
SRRH
2939 iter->tr = tr;
2940
2941#ifdef CONFIG_TRACER_MAX_TRACE
2b6080f2
SR
2942 /* Currently only the top directory has a snapshot */
2943 if (tr->current_trace->print_max || snapshot)
12883efb 2944 iter->trace_buffer = &tr->max_buffer;
bc0c38d1 2945 else
12883efb
SRRH
2946#endif
2947 iter->trace_buffer = &tr->trace_buffer;
debdd57f 2948 iter->snapshot = snapshot;
bc0c38d1 2949 iter->pos = -1;
6484c71c 2950 iter->cpu_file = tracing_get_cpu(inode);
d7350c3f 2951 mutex_init(&iter->mutex);
bc0c38d1 2952
8bba1bf5
MM
2953 /* Notify the tracer early; before we stop tracing. */
2954 if (iter->trace && iter->trace->open)
a93751ca 2955 iter->trace->open(iter);
8bba1bf5 2956
12ef7d44 2957 /* Annotate start of buffers if we had overruns */
12883efb 2958 if (ring_buffer_overruns(iter->trace_buffer->buffer))
12ef7d44
SR
2959 iter->iter_flags |= TRACE_FILE_ANNOTATE;
2960
8be0709f 2961 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
58e8eedf 2962 if (trace_clocks[tr->clock_id].in_ns)
8be0709f
DS
2963 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
2964
debdd57f
HT
2965 /* stop the trace while dumping if we are not opening "snapshot" */
2966 if (!iter->snapshot)
2b6080f2 2967 tracing_stop_tr(tr);
2f26ebd5 2968
ae3b5093 2969 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
b04cc6b1 2970 for_each_tracing_cpu(cpu) {
b04cc6b1 2971 iter->buffer_iter[cpu] =
12883efb 2972 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
72c9ddfd
DM
2973 }
2974 ring_buffer_read_prepare_sync();
2975 for_each_tracing_cpu(cpu) {
2976 ring_buffer_read_start(iter->buffer_iter[cpu]);
2f26ebd5 2977 tracing_iter_reset(iter, cpu);
b04cc6b1
FW
2978 }
2979 } else {
2980 cpu = iter->cpu_file;
3928a8a2 2981 iter->buffer_iter[cpu] =
12883efb 2982 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
72c9ddfd
DM
2983 ring_buffer_read_prepare_sync();
2984 ring_buffer_read_start(iter->buffer_iter[cpu]);
2f26ebd5 2985 tracing_iter_reset(iter, cpu);
3928a8a2
SR
2986 }
2987
bc0c38d1
SR
2988 mutex_unlock(&trace_types_lock);
2989
bc0c38d1 2990 return iter;
3928a8a2 2991
d7350c3f 2992 fail:
3928a8a2 2993 mutex_unlock(&trace_types_lock);
d7350c3f 2994 kfree(iter->trace);
6d158a81 2995 kfree(iter->buffer_iter);
93574fcc 2996release:
50e18b94
JO
2997 seq_release_private(inode, file);
2998 return ERR_PTR(-ENOMEM);
bc0c38d1
SR
2999}
3000
3001int tracing_open_generic(struct inode *inode, struct file *filp)
3002{
60a11774
SR
3003 if (tracing_disabled)
3004 return -ENODEV;
3005
bc0c38d1
SR
3006 filp->private_data = inode->i_private;
3007 return 0;
3008}
3009
2e86421d
GB
3010bool tracing_is_disabled(void)
3011{
3012 return (tracing_disabled) ? true: false;
3013}
3014
7b85af63
SRRH
3015/*
3016 * Open and update trace_array ref count.
3017 * Must have the current trace_array passed to it.
3018 */
dcc30223 3019static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
7b85af63
SRRH
3020{
3021 struct trace_array *tr = inode->i_private;
3022
3023 if (tracing_disabled)
3024 return -ENODEV;
3025
3026 if (trace_array_get(tr) < 0)
3027 return -ENODEV;
3028
3029 filp->private_data = inode->i_private;
3030
3031 return 0;
7b85af63
SRRH
3032}
3033
4fd27358 3034static int tracing_release(struct inode *inode, struct file *file)
bc0c38d1 3035{
6484c71c 3036 struct trace_array *tr = inode->i_private;
907f2784 3037 struct seq_file *m = file->private_data;
4acd4d00 3038 struct trace_iterator *iter;
3928a8a2 3039 int cpu;
bc0c38d1 3040
ff451961 3041 if (!(file->f_mode & FMODE_READ)) {
6484c71c 3042 trace_array_put(tr);
4acd4d00 3043 return 0;
ff451961 3044 }
4acd4d00 3045
6484c71c 3046 /* Writes do not use seq_file */
4acd4d00 3047 iter = m->private;
bc0c38d1 3048 mutex_lock(&trace_types_lock);
a695cb58 3049
3928a8a2
SR
3050 for_each_tracing_cpu(cpu) {
3051 if (iter->buffer_iter[cpu])
3052 ring_buffer_read_finish(iter->buffer_iter[cpu]);
3053 }
3054
bc0c38d1
SR
3055 if (iter->trace && iter->trace->close)
3056 iter->trace->close(iter);
3057
debdd57f
HT
3058 if (!iter->snapshot)
3059 /* reenable tracing if it was previously enabled */
2b6080f2 3060 tracing_start_tr(tr);
f77d09a3
AL
3061
3062 __trace_array_put(tr);
3063
bc0c38d1
SR
3064 mutex_unlock(&trace_types_lock);
3065
d7350c3f 3066 mutex_destroy(&iter->mutex);
b0dfa978 3067 free_cpumask_var(iter->started);
d7350c3f 3068 kfree(iter->trace);
6d158a81 3069 kfree(iter->buffer_iter);
50e18b94 3070 seq_release_private(inode, file);
ff451961 3071
bc0c38d1
SR
3072 return 0;
3073}
3074
7b85af63
SRRH
3075static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3076{
3077 struct trace_array *tr = inode->i_private;
3078
3079 trace_array_put(tr);
bc0c38d1
SR
3080 return 0;
3081}
3082
7b85af63
SRRH
3083static int tracing_single_release_tr(struct inode *inode, struct file *file)
3084{
3085 struct trace_array *tr = inode->i_private;
3086
3087 trace_array_put(tr);
3088
3089 return single_release(inode, file);
3090}
3091
bc0c38d1
SR
3092static int tracing_open(struct inode *inode, struct file *file)
3093{
6484c71c 3094 struct trace_array *tr = inode->i_private;
85a2f9b4
SR
3095 struct trace_iterator *iter;
3096 int ret = 0;
bc0c38d1 3097
ff451961
SRRH
3098 if (trace_array_get(tr) < 0)
3099 return -ENODEV;
3100
4acd4d00 3101 /* If this file was open for write, then erase contents */
6484c71c
ON
3102 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
3103 int cpu = tracing_get_cpu(inode);
3104
3105 if (cpu == RING_BUFFER_ALL_CPUS)
12883efb 3106 tracing_reset_online_cpus(&tr->trace_buffer);
4acd4d00 3107 else
6484c71c 3108 tracing_reset(&tr->trace_buffer, cpu);
4acd4d00 3109 }
bc0c38d1 3110
4acd4d00 3111 if (file->f_mode & FMODE_READ) {
6484c71c 3112 iter = __tracing_open(inode, file, false);
4acd4d00
SR
3113 if (IS_ERR(iter))
3114 ret = PTR_ERR(iter);
3115 else if (trace_flags & TRACE_ITER_LATENCY_FMT)
3116 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3117 }
ff451961
SRRH
3118
3119 if (ret < 0)
3120 trace_array_put(tr);
3121
bc0c38d1
SR
3122 return ret;
3123}
3124
607e2ea1
SRRH
3125/*
3126 * Some tracers are not suitable for instance buffers.
3127 * A tracer is always available for the global array (toplevel)
3128 * or if it explicitly states that it is.
3129 */
3130static bool
3131trace_ok_for_array(struct tracer *t, struct trace_array *tr)
3132{
3133 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
3134}
3135
3136/* Find the next tracer that this trace array may use */
3137static struct tracer *
3138get_tracer_for_array(struct trace_array *tr, struct tracer *t)
3139{
3140 while (t && !trace_ok_for_array(t, tr))
3141 t = t->next;
3142
3143 return t;
3144}
3145
e309b41d 3146static void *
bc0c38d1
SR
3147t_next(struct seq_file *m, void *v, loff_t *pos)
3148{
607e2ea1 3149 struct trace_array *tr = m->private;
f129e965 3150 struct tracer *t = v;
bc0c38d1
SR
3151
3152 (*pos)++;
3153
3154 if (t)
607e2ea1 3155 t = get_tracer_for_array(tr, t->next);
bc0c38d1 3156
bc0c38d1
SR
3157 return t;
3158}
3159
3160static void *t_start(struct seq_file *m, loff_t *pos)
3161{
607e2ea1 3162 struct trace_array *tr = m->private;
f129e965 3163 struct tracer *t;
bc0c38d1
SR
3164 loff_t l = 0;
3165
3166 mutex_lock(&trace_types_lock);
607e2ea1
SRRH
3167
3168 t = get_tracer_for_array(tr, trace_types);
3169 for (; t && l < *pos; t = t_next(m, t, &l))
3170 ;
bc0c38d1
SR
3171
3172 return t;
3173}
3174
3175static void t_stop(struct seq_file *m, void *p)
3176{
3177 mutex_unlock(&trace_types_lock);
3178}
3179
3180static int t_show(struct seq_file *m, void *v)
3181{
3182 struct tracer *t = v;
3183
3184 if (!t)
3185 return 0;
3186
3187 seq_printf(m, "%s", t->name);
3188 if (t->next)
3189 seq_putc(m, ' ');
3190 else
3191 seq_putc(m, '\n');
3192
3193 return 0;
3194}
3195
88e9d34c 3196static const struct seq_operations show_traces_seq_ops = {
4bf39a94
IM
3197 .start = t_start,
3198 .next = t_next,
3199 .stop = t_stop,
3200 .show = t_show,
bc0c38d1
SR
3201};
3202
3203static int show_traces_open(struct inode *inode, struct file *file)
3204{
607e2ea1
SRRH
3205 struct trace_array *tr = inode->i_private;
3206 struct seq_file *m;
3207 int ret;
3208
60a11774
SR
3209 if (tracing_disabled)
3210 return -ENODEV;
3211
607e2ea1
SRRH
3212 ret = seq_open(file, &show_traces_seq_ops);
3213 if (ret)
3214 return ret;
3215
3216 m = file->private_data;
3217 m->private = tr;
3218
3219 return 0;
bc0c38d1
SR
3220}
3221
4acd4d00
SR
3222static ssize_t
3223tracing_write_stub(struct file *filp, const char __user *ubuf,
3224 size_t count, loff_t *ppos)
3225{
3226 return count;
3227}
3228
098c879e 3229loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
364829b1 3230{
098c879e
SRRH
3231 int ret;
3232
364829b1 3233 if (file->f_mode & FMODE_READ)
098c879e 3234 ret = seq_lseek(file, offset, whence);
364829b1 3235 else
098c879e
SRRH
3236 file->f_pos = ret = 0;
3237
3238 return ret;
364829b1
SP
3239}
3240
5e2336a0 3241static const struct file_operations tracing_fops = {
4bf39a94
IM
3242 .open = tracing_open,
3243 .read = seq_read,
4acd4d00 3244 .write = tracing_write_stub,
098c879e 3245 .llseek = tracing_lseek,
4bf39a94 3246 .release = tracing_release,
bc0c38d1
SR
3247};
3248
5e2336a0 3249static const struct file_operations show_traces_fops = {
c7078de1
IM
3250 .open = show_traces_open,
3251 .read = seq_read,
3252 .release = seq_release,
b444786f 3253 .llseek = seq_lseek,
c7078de1
IM
3254};
3255
36dfe925
IM
3256/*
3257 * The tracer itself will not take this lock, but still we want
3258 * to provide a consistent cpumask to user-space:
3259 */
3260static DEFINE_MUTEX(tracing_cpumask_update_lock);
3261
3262/*
3263 * Temporary storage for the character representation of the
3264 * CPU bitmask (and one more byte for the newline):
3265 */
3266static char mask_str[NR_CPUS + 1];
3267
c7078de1
IM
3268static ssize_t
3269tracing_cpumask_read(struct file *filp, char __user *ubuf,
3270 size_t count, loff_t *ppos)
3271{
ccfe9e42 3272 struct trace_array *tr = file_inode(filp)->i_private;
36dfe925 3273 int len;
c7078de1
IM
3274
3275 mutex_lock(&tracing_cpumask_update_lock);
36dfe925 3276
ccfe9e42 3277 len = cpumask_scnprintf(mask_str, count, tr->tracing_cpumask);
36dfe925
IM
3278 if (count - len < 2) {
3279 count = -EINVAL;
3280 goto out_err;
3281 }
3282 len += sprintf(mask_str + len, "\n");
3283 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
3284
3285out_err:
c7078de1
IM
3286 mutex_unlock(&tracing_cpumask_update_lock);
3287
3288 return count;
3289}
3290
3291static ssize_t
3292tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3293 size_t count, loff_t *ppos)
3294{
ccfe9e42 3295 struct trace_array *tr = file_inode(filp)->i_private;
9e01c1b7 3296 cpumask_var_t tracing_cpumask_new;
2b6080f2 3297 int err, cpu;
9e01c1b7
RR
3298
3299 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
3300 return -ENOMEM;
c7078de1 3301
9e01c1b7 3302 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
c7078de1 3303 if (err)
36dfe925
IM
3304 goto err_unlock;
3305
215368e8
LZ
3306 mutex_lock(&tracing_cpumask_update_lock);
3307
a5e25883 3308 local_irq_disable();
0199c4e6 3309 arch_spin_lock(&ftrace_max_lock);
ab46428c 3310 for_each_tracing_cpu(cpu) {
36dfe925
IM
3311 /*
3312 * Increase/decrease the disabled counter if we are
3313 * about to flip a bit in the cpumask:
3314 */
ccfe9e42 3315 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
9e01c1b7 3316 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
12883efb
SRRH
3317 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3318 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
36dfe925 3319 }
ccfe9e42 3320 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
9e01c1b7 3321 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
12883efb
SRRH
3322 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3323 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
36dfe925
IM
3324 }
3325 }
0199c4e6 3326 arch_spin_unlock(&ftrace_max_lock);
a5e25883 3327 local_irq_enable();
36dfe925 3328
ccfe9e42 3329 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
36dfe925
IM
3330
3331 mutex_unlock(&tracing_cpumask_update_lock);
9e01c1b7 3332 free_cpumask_var(tracing_cpumask_new);
c7078de1
IM
3333
3334 return count;
36dfe925
IM
3335
3336err_unlock:
215368e8 3337 free_cpumask_var(tracing_cpumask_new);
36dfe925
IM
3338
3339 return err;
c7078de1
IM
3340}
3341
5e2336a0 3342static const struct file_operations tracing_cpumask_fops = {
ccfe9e42 3343 .open = tracing_open_generic_tr,
c7078de1
IM
3344 .read = tracing_cpumask_read,
3345 .write = tracing_cpumask_write,
ccfe9e42 3346 .release = tracing_release_generic_tr,
b444786f 3347 .llseek = generic_file_llseek,
bc0c38d1
SR
3348};
3349
fdb372ed 3350static int tracing_trace_options_show(struct seq_file *m, void *v)
bc0c38d1 3351{
d8e83d26 3352 struct tracer_opt *trace_opts;
2b6080f2 3353 struct trace_array *tr = m->private;
d8e83d26 3354 u32 tracer_flags;
d8e83d26 3355 int i;
adf9f195 3356
d8e83d26 3357 mutex_lock(&trace_types_lock);
2b6080f2
SR
3358 tracer_flags = tr->current_trace->flags->val;
3359 trace_opts = tr->current_trace->flags->opts;
d8e83d26 3360
bc0c38d1
SR
3361 for (i = 0; trace_options[i]; i++) {
3362 if (trace_flags & (1 << i))
fdb372ed 3363 seq_printf(m, "%s\n", trace_options[i]);
bc0c38d1 3364 else
fdb372ed 3365 seq_printf(m, "no%s\n", trace_options[i]);
bc0c38d1
SR
3366 }
3367
adf9f195
FW
3368 for (i = 0; trace_opts[i].name; i++) {
3369 if (tracer_flags & trace_opts[i].bit)
fdb372ed 3370 seq_printf(m, "%s\n", trace_opts[i].name);
adf9f195 3371 else
fdb372ed 3372 seq_printf(m, "no%s\n", trace_opts[i].name);
adf9f195 3373 }
d8e83d26 3374 mutex_unlock(&trace_types_lock);
adf9f195 3375
fdb372ed 3376 return 0;
bc0c38d1 3377}
bc0c38d1 3378
8c1a49ae 3379static int __set_tracer_option(struct trace_array *tr,
8d18eaaf
LZ
3380 struct tracer_flags *tracer_flags,
3381 struct tracer_opt *opts, int neg)
3382{
8c1a49ae 3383 struct tracer *trace = tr->current_trace;
8d18eaaf 3384 int ret;
bc0c38d1 3385
8c1a49ae 3386 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
8d18eaaf
LZ
3387 if (ret)
3388 return ret;
3389
3390 if (neg)
3391 tracer_flags->val &= ~opts->bit;
3392 else
3393 tracer_flags->val |= opts->bit;
3394 return 0;
bc0c38d1
SR
3395}
3396
adf9f195 3397/* Try to assign a tracer specific option */
8c1a49ae 3398static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
adf9f195 3399{
8c1a49ae 3400 struct tracer *trace = tr->current_trace;
7770841e 3401 struct tracer_flags *tracer_flags = trace->flags;
adf9f195 3402 struct tracer_opt *opts = NULL;
8d18eaaf 3403 int i;
adf9f195 3404
7770841e
Z
3405 for (i = 0; tracer_flags->opts[i].name; i++) {
3406 opts = &tracer_flags->opts[i];
adf9f195 3407
8d18eaaf 3408 if (strcmp(cmp, opts->name) == 0)
8c1a49ae 3409 return __set_tracer_option(tr, trace->flags, opts, neg);
adf9f195 3410 }
adf9f195 3411
8d18eaaf 3412 return -EINVAL;
adf9f195
FW
3413}
3414
613f04a0
SRRH
3415/* Some tracers require overwrite to stay enabled */
3416int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
3417{
3418 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
3419 return -1;
3420
3421 return 0;
3422}
3423
2b6080f2 3424int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
af4617bd
SR
3425{
3426 /* do nothing if flag is already set */
3427 if (!!(trace_flags & mask) == !!enabled)
613f04a0
SRRH
3428 return 0;
3429
3430 /* Give the tracer a chance to approve the change */
2b6080f2 3431 if (tr->current_trace->flag_changed)
bf6065b5 3432 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
613f04a0 3433 return -EINVAL;
af4617bd
SR
3434
3435 if (enabled)
3436 trace_flags |= mask;
3437 else
3438 trace_flags &= ~mask;
e870e9a1
LZ
3439
3440 if (mask == TRACE_ITER_RECORD_CMD)
3441 trace_event_enable_cmd_record(enabled);
750912fa 3442
80902822 3443 if (mask == TRACE_ITER_OVERWRITE) {
12883efb 3444 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
80902822 3445#ifdef CONFIG_TRACER_MAX_TRACE
12883efb 3446 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
80902822
SRRH
3447#endif
3448 }
81698831
SR
3449
3450 if (mask == TRACE_ITER_PRINTK)
3451 trace_printk_start_stop_comm(enabled);
613f04a0
SRRH
3452
3453 return 0;
af4617bd
SR
3454}
3455
2b6080f2 3456static int trace_set_options(struct trace_array *tr, char *option)
bc0c38d1 3457{
8d18eaaf 3458 char *cmp;
bc0c38d1 3459 int neg = 0;
613f04a0 3460 int ret = -ENODEV;
bc0c38d1
SR
3461 int i;
3462
7bcfaf54 3463 cmp = strstrip(option);
bc0c38d1 3464
8d18eaaf 3465 if (strncmp(cmp, "no", 2) == 0) {
bc0c38d1
SR
3466 neg = 1;
3467 cmp += 2;
3468 }
3469
69d34da2
SRRH
3470 mutex_lock(&trace_types_lock);
3471
bc0c38d1 3472 for (i = 0; trace_options[i]; i++) {
8d18eaaf 3473 if (strcmp(cmp, trace_options[i]) == 0) {
2b6080f2 3474 ret = set_tracer_flag(tr, 1 << i, !neg);
bc0c38d1
SR
3475 break;
3476 }
3477 }
adf9f195
FW
3478
3479 /* If no option could be set, test the specific tracer options */
69d34da2 3480 if (!trace_options[i])
8c1a49ae 3481 ret = set_tracer_option(tr, cmp, neg);
69d34da2
SRRH
3482
3483 mutex_unlock(&trace_types_lock);
bc0c38d1 3484
7bcfaf54
SR
3485 return ret;
3486}
3487
3488static ssize_t
3489tracing_trace_options_write(struct file *filp, const char __user *ubuf,
3490 size_t cnt, loff_t *ppos)
3491{
2b6080f2
SR
3492 struct seq_file *m = filp->private_data;
3493 struct trace_array *tr = m->private;
7bcfaf54 3494 char buf[64];
613f04a0 3495 int ret;
7bcfaf54
SR
3496
3497 if (cnt >= sizeof(buf))
3498 return -EINVAL;
3499
3500 if (copy_from_user(&buf, ubuf, cnt))
3501 return -EFAULT;
3502
a8dd2176
SR
3503 buf[cnt] = 0;
3504
2b6080f2 3505 ret = trace_set_options(tr, buf);
613f04a0
SRRH
3506 if (ret < 0)
3507 return ret;
7bcfaf54 3508
cf8517cf 3509 *ppos += cnt;
bc0c38d1
SR
3510
3511 return cnt;
3512}
3513
fdb372ed
LZ
3514static int tracing_trace_options_open(struct inode *inode, struct file *file)
3515{
7b85af63 3516 struct trace_array *tr = inode->i_private;
f77d09a3 3517 int ret;
7b85af63 3518
fdb372ed
LZ
3519 if (tracing_disabled)
3520 return -ENODEV;
2b6080f2 3521
7b85af63
SRRH
3522 if (trace_array_get(tr) < 0)
3523 return -ENODEV;
3524
f77d09a3
AL
3525 ret = single_open(file, tracing_trace_options_show, inode->i_private);
3526 if (ret < 0)
3527 trace_array_put(tr);
3528
3529 return ret;
fdb372ed
LZ
3530}
3531
5e2336a0 3532static const struct file_operations tracing_iter_fops = {
fdb372ed
LZ
3533 .open = tracing_trace_options_open,
3534 .read = seq_read,
3535 .llseek = seq_lseek,
7b85af63 3536 .release = tracing_single_release_tr,
ee6bce52 3537 .write = tracing_trace_options_write,
bc0c38d1
SR
3538};
3539
7bd2f24c
IM
3540static const char readme_msg[] =
3541 "tracing mini-HOWTO:\n\n"
22f45649
SRRH
3542 "# echo 0 > tracing_on : quick way to disable tracing\n"
3543 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
3544 " Important files:\n"
3545 " trace\t\t\t- The static contents of the buffer\n"
3546 "\t\t\t To clear the buffer write into this file: echo > trace\n"
3547 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
3548 " current_tracer\t- function and latency tracers\n"
3549 " available_tracers\t- list of configured tracers for current_tracer\n"
3550 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
3551 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
3552 " trace_clock\t\t-change the clock used to order events\n"
3553 " local: Per cpu clock but may not be synced across CPUs\n"
3554 " global: Synced across CPUs but slows tracing down.\n"
3555 " counter: Not a clock, but just an increment\n"
3556 " uptime: Jiffy counter from time of boot\n"
3557 " perf: Same clock that perf events use\n"
3558#ifdef CONFIG_X86_64
3559 " x86-tsc: TSC cycle counter\n"
3560#endif
3561 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
3562 " tracing_cpumask\t- Limit which CPUs to trace\n"
3563 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
3564 "\t\t\t Remove sub-buffer with rmdir\n"
3565 " trace_options\t\t- Set format or modify how tracing happens\n"
71485c45
SRRH
3566 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
3567 "\t\t\t option name\n"
22f45649
SRRH
3568#ifdef CONFIG_DYNAMIC_FTRACE
3569 "\n available_filter_functions - list of functions that can be filtered on\n"
71485c45
SRRH
3570 " set_ftrace_filter\t- echo function name in here to only trace these\n"
3571 "\t\t\t functions\n"
3572 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3573 "\t modules: Can select a group via module\n"
3574 "\t Format: :mod:<module-name>\n"
3575 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
3576 "\t triggers: a command to perform when function is hit\n"
3577 "\t Format: <function>:<trigger>[:count]\n"
3578 "\t trigger: traceon, traceoff\n"
3579 "\t\t enable_event:<system>:<event>\n"
3580 "\t\t disable_event:<system>:<event>\n"
22f45649 3581#ifdef CONFIG_STACKTRACE
71485c45 3582 "\t\t stacktrace\n"
22f45649
SRRH
3583#endif
3584#ifdef CONFIG_TRACER_SNAPSHOT
71485c45 3585 "\t\t snapshot\n"
22f45649 3586#endif
71485c45
SRRH
3587 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
3588 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
3589 "\t The first one will disable tracing every time do_fault is hit\n"
3590 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
3591 "\t The first time do trap is hit and it disables tracing, the\n"
3592 "\t counter will decrement to 2. If tracing is already disabled,\n"
3593 "\t the counter will not decrement. It only decrements when the\n"
3594 "\t trigger did work\n"
3595 "\t To remove trigger without count:\n"
3596 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
3597 "\t To remove trigger with a count:\n"
3598 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
22f45649 3599 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
71485c45
SRRH
3600 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3601 "\t modules: Can select a group via module command :mod:\n"
3602 "\t Does not accept triggers\n"
22f45649
SRRH
3603#endif /* CONFIG_DYNAMIC_FTRACE */
3604#ifdef CONFIG_FUNCTION_TRACER
71485c45
SRRH
3605 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
3606 "\t\t (function)\n"
22f45649
SRRH
3607#endif
3608#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3609 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
3610 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
3611#endif
3612#ifdef CONFIG_TRACER_SNAPSHOT
71485c45
SRRH
3613 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
3614 "\t\t\t snapshot buffer. Read the contents for more\n"
3615 "\t\t\t information\n"
22f45649 3616#endif
991821c8 3617#ifdef CONFIG_STACK_TRACER
22f45649
SRRH
3618 " stack_trace\t\t- Shows the max stack trace when active\n"
3619 " stack_max_size\t- Shows current max stack size that was traced\n"
71485c45
SRRH
3620 "\t\t\t Write into this file to reset the max size (trigger a\n"
3621 "\t\t\t new trace)\n"
22f45649 3622#ifdef CONFIG_DYNAMIC_FTRACE
71485c45
SRRH
3623 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
3624 "\t\t\t traces\n"
22f45649 3625#endif
991821c8 3626#endif /* CONFIG_STACK_TRACER */
26f25564
TZ
3627 " events/\t\t- Directory containing all trace event subsystems:\n"
3628 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
3629 " events/<system>/\t- Directory containing all trace events for <system>:\n"
71485c45
SRRH
3630 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
3631 "\t\t\t events\n"
26f25564 3632 " filter\t\t- If set, only events passing filter are traced\n"
71485c45
SRRH
3633 " events/<system>/<event>/\t- Directory containing control files for\n"
3634 "\t\t\t <event>:\n"
26f25564
TZ
3635 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
3636 " filter\t\t- If set, only events passing filter are traced\n"
3637 " trigger\t\t- If set, a command to perform when event is hit\n"
71485c45
SRRH
3638 "\t Format: <trigger>[:count][if <filter>]\n"
3639 "\t trigger: traceon, traceoff\n"
3640 "\t enable_event:<system>:<event>\n"
3641 "\t disable_event:<system>:<event>\n"
26f25564 3642#ifdef CONFIG_STACKTRACE
71485c45 3643 "\t\t stacktrace\n"
26f25564
TZ
3644#endif
3645#ifdef CONFIG_TRACER_SNAPSHOT
71485c45 3646 "\t\t snapshot\n"
26f25564 3647#endif
71485c45
SRRH
3648 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
3649 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
3650 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
3651 "\t events/block/block_unplug/trigger\n"
3652 "\t The first disables tracing every time block_unplug is hit.\n"
3653 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
3654 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
3655 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
3656 "\t Like function triggers, the counter is only decremented if it\n"
3657 "\t enabled or disabled tracing.\n"
3658 "\t To remove a trigger without a count:\n"
3659 "\t echo '!<trigger> > <system>/<event>/trigger\n"
3660 "\t To remove a trigger with a count:\n"
3661 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
3662 "\t Filters can be ignored when removing a trigger.\n"
7bd2f24c
IM
3663;
3664
3665static ssize_t
3666tracing_readme_read(struct file *filp, char __user *ubuf,
3667 size_t cnt, loff_t *ppos)
3668{
3669 return simple_read_from_buffer(ubuf, cnt, ppos,
3670 readme_msg, strlen(readme_msg));
3671}
3672
5e2336a0 3673static const struct file_operations tracing_readme_fops = {
c7078de1
IM
3674 .open = tracing_open_generic,
3675 .read = tracing_readme_read,
b444786f 3676 .llseek = generic_file_llseek,
7bd2f24c
IM
3677};
3678
69abe6a5
AP
3679static ssize_t
3680tracing_saved_cmdlines_read(struct file *file, char __user *ubuf,
3681 size_t cnt, loff_t *ppos)
3682{
3683 char *buf_comm;
3684 char *file_buf;
3685 char *buf;
3686 int len = 0;
3687 int pid;
3688 int i;
3689
3690 file_buf = kmalloc(SAVED_CMDLINES*(16+TASK_COMM_LEN), GFP_KERNEL);
3691 if (!file_buf)
3692 return -ENOMEM;
3693
3694 buf_comm = kmalloc(TASK_COMM_LEN, GFP_KERNEL);
3695 if (!buf_comm) {
3696 kfree(file_buf);
3697 return -ENOMEM;
3698 }
3699
3700 buf = file_buf;
3701
3702 for (i = 0; i < SAVED_CMDLINES; i++) {
3703 int r;
3704
3705 pid = map_cmdline_to_pid[i];
3706 if (pid == -1 || pid == NO_CMDLINE_MAP)
3707 continue;
3708
3709 trace_find_cmdline(pid, buf_comm);
3710 r = sprintf(buf, "%d %s\n", pid, buf_comm);
3711 buf += r;
3712 len += r;
3713 }
3714
3715 len = simple_read_from_buffer(ubuf, cnt, ppos,
3716 file_buf, len);
3717
3718 kfree(file_buf);
3719 kfree(buf_comm);
3720
3721 return len;
3722}
3723
3724static const struct file_operations tracing_saved_cmdlines_fops = {
3725 .open = tracing_open_generic,
3726 .read = tracing_saved_cmdlines_read,
b444786f 3727 .llseek = generic_file_llseek,
69abe6a5
AP
3728};
3729
bc0c38d1
SR
3730static ssize_t
3731tracing_set_trace_read(struct file *filp, char __user *ubuf,
3732 size_t cnt, loff_t *ppos)
3733{
2b6080f2 3734 struct trace_array *tr = filp->private_data;
ee6c2c1b 3735 char buf[MAX_TRACER_SIZE+2];
bc0c38d1
SR
3736 int r;
3737
3738 mutex_lock(&trace_types_lock);
2b6080f2 3739 r = sprintf(buf, "%s\n", tr->current_trace->name);
bc0c38d1
SR
3740 mutex_unlock(&trace_types_lock);
3741
4bf39a94 3742 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
bc0c38d1
SR
3743}
3744
b6f11df2
ACM
3745int tracer_init(struct tracer *t, struct trace_array *tr)
3746{
12883efb 3747 tracing_reset_online_cpus(&tr->trace_buffer);
b6f11df2
ACM
3748 return t->init(tr);
3749}
3750
12883efb 3751static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
438ced17
VN
3752{
3753 int cpu;
737223fb 3754
438ced17 3755 for_each_tracing_cpu(cpu)
12883efb 3756 per_cpu_ptr(buf->data, cpu)->entries = val;
438ced17
VN
3757}
3758
12883efb 3759#ifdef CONFIG_TRACER_MAX_TRACE
d60da506 3760/* resize @tr's buffer to the size of @size_tr's entries */
12883efb
SRRH
3761static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
3762 struct trace_buffer *size_buf, int cpu_id)
d60da506
HT
3763{
3764 int cpu, ret = 0;
3765
3766 if (cpu_id == RING_BUFFER_ALL_CPUS) {
3767 for_each_tracing_cpu(cpu) {
12883efb
SRRH
3768 ret = ring_buffer_resize(trace_buf->buffer,
3769 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
d60da506
HT
3770 if (ret < 0)
3771 break;
12883efb
SRRH
3772 per_cpu_ptr(trace_buf->data, cpu)->entries =
3773 per_cpu_ptr(size_buf->data, cpu)->entries;
d60da506
HT
3774 }
3775 } else {
12883efb
SRRH
3776 ret = ring_buffer_resize(trace_buf->buffer,
3777 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
d60da506 3778 if (ret == 0)
12883efb
SRRH
3779 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
3780 per_cpu_ptr(size_buf->data, cpu_id)->entries;
d60da506
HT
3781 }
3782
3783 return ret;
3784}
12883efb 3785#endif /* CONFIG_TRACER_MAX_TRACE */
d60da506 3786
2b6080f2
SR
3787static int __tracing_resize_ring_buffer(struct trace_array *tr,
3788 unsigned long size, int cpu)
73c5162a
SR
3789{
3790 int ret;
3791
3792 /*
3793 * If kernel or user changes the size of the ring buffer
a123c52b
SR
3794 * we use the size that was given, and we can forget about
3795 * expanding it later.
73c5162a 3796 */
55034cd6 3797 ring_buffer_expanded = true;
73c5162a 3798
b382ede6 3799 /* May be called before buffers are initialized */
12883efb 3800 if (!tr->trace_buffer.buffer)
b382ede6
SR
3801 return 0;
3802
12883efb 3803 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
73c5162a
SR
3804 if (ret < 0)
3805 return ret;
3806
12883efb 3807#ifdef CONFIG_TRACER_MAX_TRACE
2b6080f2
SR
3808 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
3809 !tr->current_trace->use_max_tr)
ef710e10
KM
3810 goto out;
3811
12883efb 3812 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
73c5162a 3813 if (ret < 0) {
12883efb
SRRH
3814 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
3815 &tr->trace_buffer, cpu);
73c5162a 3816 if (r < 0) {
a123c52b
SR
3817 /*
3818 * AARGH! We are left with different
3819 * size max buffer!!!!
3820 * The max buffer is our "snapshot" buffer.
3821 * When a tracer needs a snapshot (one of the
3822 * latency tracers), it swaps the max buffer
3823 * with the saved snap shot. We succeeded to
3824 * update the size of the main buffer, but failed to
3825 * update the size of the max buffer. But when we tried
3826 * to reset the main buffer to the original size, we
3827 * failed there too. This is very unlikely to
3828 * happen, but if it does, warn and kill all
3829 * tracing.
3830 */
73c5162a
SR
3831 WARN_ON(1);
3832 tracing_disabled = 1;
3833 }
3834 return ret;
3835 }
3836
438ced17 3837 if (cpu == RING_BUFFER_ALL_CPUS)
12883efb 3838 set_buffer_entries(&tr->max_buffer, size);
438ced17 3839 else
12883efb 3840 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
438ced17 3841
ef710e10 3842 out:
12883efb
SRRH
3843#endif /* CONFIG_TRACER_MAX_TRACE */
3844
438ced17 3845 if (cpu == RING_BUFFER_ALL_CPUS)
12883efb 3846 set_buffer_entries(&tr->trace_buffer, size);
438ced17 3847 else
12883efb 3848 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
73c5162a
SR
3849
3850 return ret;
3851}
3852
2b6080f2
SR
3853static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
3854 unsigned long size, int cpu_id)
4f271a2a 3855{
83f40318 3856 int ret = size;
4f271a2a
VN
3857
3858 mutex_lock(&trace_types_lock);
3859
438ced17
VN
3860 if (cpu_id != RING_BUFFER_ALL_CPUS) {
3861 /* make sure, this cpu is enabled in the mask */
3862 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
3863 ret = -EINVAL;
3864 goto out;
3865 }
3866 }
4f271a2a 3867
2b6080f2 3868 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
4f271a2a
VN
3869 if (ret < 0)
3870 ret = -ENOMEM;
3871
438ced17 3872out:
4f271a2a
VN
3873 mutex_unlock(&trace_types_lock);
3874
3875 return ret;
3876}
3877
ef710e10 3878
1852fcce
SR
3879/**
3880 * tracing_update_buffers - used by tracing facility to expand ring buffers
3881 *
3882 * To save on memory when the tracing is never used on a system with it
3883 * configured in. The ring buffers are set to a minimum size. But once
3884 * a user starts to use the tracing facility, then they need to grow
3885 * to their default size.
3886 *
3887 * This function is to be called when a tracer is about to be used.
3888 */
3889int tracing_update_buffers(void)
3890{
3891 int ret = 0;
3892
1027fcb2 3893 mutex_lock(&trace_types_lock);
1852fcce 3894 if (!ring_buffer_expanded)
2b6080f2 3895 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
438ced17 3896 RING_BUFFER_ALL_CPUS);
1027fcb2 3897 mutex_unlock(&trace_types_lock);
1852fcce
SR
3898
3899 return ret;
3900}
3901
577b785f
SR
3902struct trace_option_dentry;
3903
3904static struct trace_option_dentry *
2b6080f2 3905create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
577b785f
SR
3906
3907static void
3908destroy_trace_option_files(struct trace_option_dentry *topts);
3909
6b450d25
SRRH
3910/*
3911 * Used to clear out the tracer before deletion of an instance.
3912 * Must have trace_types_lock held.
3913 */
3914static void tracing_set_nop(struct trace_array *tr)
3915{
3916 if (tr->current_trace == &nop_trace)
3917 return;
3918
50512ab5 3919 tr->current_trace->enabled--;
6b450d25
SRRH
3920
3921 if (tr->current_trace->reset)
3922 tr->current_trace->reset(tr);
3923
3924 tr->current_trace = &nop_trace;
3925}
3926
607e2ea1 3927static int tracing_set_tracer(struct trace_array *tr, const char *buf)
bc0c38d1 3928{
577b785f 3929 static struct trace_option_dentry *topts;
bc0c38d1 3930 struct tracer *t;
12883efb 3931#ifdef CONFIG_TRACER_MAX_TRACE
34600f0e 3932 bool had_max_tr;
12883efb 3933#endif
d9e54076 3934 int ret = 0;
bc0c38d1 3935
1027fcb2
SR
3936 mutex_lock(&trace_types_lock);
3937
73c5162a 3938 if (!ring_buffer_expanded) {
2b6080f2 3939 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
438ced17 3940 RING_BUFFER_ALL_CPUS);
73c5162a 3941 if (ret < 0)
59f586db 3942 goto out;
73c5162a
SR
3943 ret = 0;
3944 }
3945
bc0c38d1
SR
3946 for (t = trace_types; t; t = t->next) {
3947 if (strcmp(t->name, buf) == 0)
3948 break;
3949 }
c2931e05
FW
3950 if (!t) {
3951 ret = -EINVAL;
3952 goto out;
3953 }
2b6080f2 3954 if (t == tr->current_trace)
bc0c38d1
SR
3955 goto out;
3956
607e2ea1
SRRH
3957 /* Some tracers are only allowed for the top level buffer */
3958 if (!trace_ok_for_array(t, tr)) {
3959 ret = -EINVAL;
3960 goto out;
3961 }
3962
9f029e83 3963 trace_branch_disable();
613f04a0 3964
50512ab5 3965 tr->current_trace->enabled--;
613f04a0 3966
2b6080f2
SR
3967 if (tr->current_trace->reset)
3968 tr->current_trace->reset(tr);
34600f0e 3969
12883efb 3970 /* Current trace needs to be nop_trace before synchronize_sched */
2b6080f2 3971 tr->current_trace = &nop_trace;
34600f0e 3972
45ad21ca
SRRH
3973#ifdef CONFIG_TRACER_MAX_TRACE
3974 had_max_tr = tr->allocated_snapshot;
34600f0e
SR
3975
3976 if (had_max_tr && !t->use_max_tr) {
3977 /*
3978 * We need to make sure that the update_max_tr sees that
3979 * current_trace changed to nop_trace to keep it from
3980 * swapping the buffers after we resize it.
3981 * The update_max_tr is called from interrupts disabled
3982 * so a synchronized_sched() is sufficient.
3983 */
3984 synchronize_sched();
3209cff4 3985 free_snapshot(tr);
ef710e10 3986 }
12883efb 3987#endif
f1b21c9a
SRRH
3988 /* Currently, only the top instance has options */
3989 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
3990 destroy_trace_option_files(topts);
3991 topts = create_trace_option_files(tr, t);
3992 }
12883efb
SRRH
3993
3994#ifdef CONFIG_TRACER_MAX_TRACE
34600f0e 3995 if (t->use_max_tr && !had_max_tr) {
3209cff4 3996 ret = alloc_snapshot(tr);
d60da506
HT
3997 if (ret < 0)
3998 goto out;
ef710e10 3999 }
12883efb 4000#endif
577b785f 4001
1c80025a 4002 if (t->init) {
b6f11df2 4003 ret = tracer_init(t, tr);
1c80025a
FW
4004 if (ret)
4005 goto out;
4006 }
bc0c38d1 4007
2b6080f2 4008 tr->current_trace = t;
50512ab5 4009 tr->current_trace->enabled++;
9f029e83 4010 trace_branch_enable(tr);
bc0c38d1
SR
4011 out:
4012 mutex_unlock(&trace_types_lock);
4013
d9e54076
PZ
4014 return ret;
4015}
4016
4017static ssize_t
4018tracing_set_trace_write(struct file *filp, const char __user *ubuf,
4019 size_t cnt, loff_t *ppos)
4020{
607e2ea1 4021 struct trace_array *tr = filp->private_data;
ee6c2c1b 4022 char buf[MAX_TRACER_SIZE+1];
d9e54076
PZ
4023 int i;
4024 size_t ret;
e6e7a65a
FW
4025 int err;
4026
4027 ret = cnt;
d9e54076 4028
ee6c2c1b
LZ
4029 if (cnt > MAX_TRACER_SIZE)
4030 cnt = MAX_TRACER_SIZE;
d9e54076
PZ
4031
4032 if (copy_from_user(&buf, ubuf, cnt))
4033 return -EFAULT;
4034
4035 buf[cnt] = 0;
4036
4037 /* strip ending whitespace. */
4038 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
4039 buf[i] = 0;
4040
607e2ea1 4041 err = tracing_set_tracer(tr, buf);
e6e7a65a
FW
4042 if (err)
4043 return err;
d9e54076 4044
cf8517cf 4045 *ppos += ret;
bc0c38d1 4046
c2931e05 4047 return ret;
bc0c38d1
SR
4048}
4049
4050static ssize_t
4051tracing_max_lat_read(struct file *filp, char __user *ubuf,
4052 size_t cnt, loff_t *ppos)
4053{
4054 unsigned long *ptr = filp->private_data;
4055 char buf[64];
4056 int r;
4057
cffae437 4058 r = snprintf(buf, sizeof(buf), "%ld\n",
bc0c38d1 4059 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
cffae437
SR
4060 if (r > sizeof(buf))
4061 r = sizeof(buf);
4bf39a94 4062 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
bc0c38d1
SR
4063}
4064
4065static ssize_t
4066tracing_max_lat_write(struct file *filp, const char __user *ubuf,
4067 size_t cnt, loff_t *ppos)
4068{
5e39841c 4069 unsigned long *ptr = filp->private_data;
5e39841c 4070 unsigned long val;
c6caeeb1 4071 int ret;
bc0c38d1 4072
22fe9b54
PH
4073 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4074 if (ret)
c6caeeb1 4075 return ret;
bc0c38d1
SR
4076
4077 *ptr = val * 1000;
4078
4079 return cnt;
4080}
4081
b3806b43
SR
4082static int tracing_open_pipe(struct inode *inode, struct file *filp)
4083{
15544209 4084 struct trace_array *tr = inode->i_private;
b3806b43 4085 struct trace_iterator *iter;
b04cc6b1 4086 int ret = 0;
b3806b43
SR
4087
4088 if (tracing_disabled)
4089 return -ENODEV;
4090
7b85af63
SRRH
4091 if (trace_array_get(tr) < 0)
4092 return -ENODEV;
4093
b04cc6b1
FW
4094 mutex_lock(&trace_types_lock);
4095
b3806b43
SR
4096 /* create a buffer to store the information to pass to userspace */
4097 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
b04cc6b1
FW
4098 if (!iter) {
4099 ret = -ENOMEM;
f77d09a3 4100 __trace_array_put(tr);
b04cc6b1
FW
4101 goto out;
4102 }
b3806b43 4103
d7350c3f
FW
4104 /*
4105 * We make a copy of the current tracer to avoid concurrent
4106 * changes on it while we are reading.
4107 */
4108 iter->trace = kmalloc(sizeof(*iter->trace), GFP_KERNEL);
4109 if (!iter->trace) {
4110 ret = -ENOMEM;
4111 goto fail;
4112 }
2b6080f2 4113 *iter->trace = *tr->current_trace;
d7350c3f 4114
4462344e 4115 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
b04cc6b1 4116 ret = -ENOMEM;
d7350c3f 4117 goto fail;
4462344e
RR
4118 }
4119
a309720c 4120 /* trace pipe does not show start of buffer */
4462344e 4121 cpumask_setall(iter->started);
a309720c 4122
112f38a7
SR
4123 if (trace_flags & TRACE_ITER_LATENCY_FMT)
4124 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4125
8be0709f 4126 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
58e8eedf 4127 if (trace_clocks[tr->clock_id].in_ns)
8be0709f
DS
4128 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4129
15544209
ON
4130 iter->tr = tr;
4131 iter->trace_buffer = &tr->trace_buffer;
4132 iter->cpu_file = tracing_get_cpu(inode);
d7350c3f 4133 mutex_init(&iter->mutex);
b3806b43
SR
4134 filp->private_data = iter;
4135
107bad8b
SR
4136 if (iter->trace->pipe_open)
4137 iter->trace->pipe_open(iter);
107bad8b 4138
b444786f 4139 nonseekable_open(inode, filp);
b04cc6b1
FW
4140out:
4141 mutex_unlock(&trace_types_lock);
4142 return ret;
d7350c3f
FW
4143
4144fail:
4145 kfree(iter->trace);
4146 kfree(iter);
7b85af63 4147 __trace_array_put(tr);
d7350c3f
FW
4148 mutex_unlock(&trace_types_lock);
4149 return ret;
b3806b43
SR
4150}
4151
4152static int tracing_release_pipe(struct inode *inode, struct file *file)
4153{
4154 struct trace_iterator *iter = file->private_data;
15544209 4155 struct trace_array *tr = inode->i_private;
b3806b43 4156
b04cc6b1
FW
4157 mutex_lock(&trace_types_lock);
4158
29bf4a5e 4159 if (iter->trace->pipe_close)
c521efd1
SR
4160 iter->trace->pipe_close(iter);
4161
b04cc6b1
FW
4162 mutex_unlock(&trace_types_lock);
4163
4462344e 4164 free_cpumask_var(iter->started);
d7350c3f
FW
4165 mutex_destroy(&iter->mutex);
4166 kfree(iter->trace);
b3806b43 4167 kfree(iter);
b3806b43 4168
7b85af63
SRRH
4169 trace_array_put(tr);
4170
b3806b43
SR
4171 return 0;
4172}
4173
2a2cc8f7 4174static unsigned int
cc60cdc9 4175trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
2a2cc8f7 4176{
15693458
SRRH
4177 /* Iterators are static, they should be filled or empty */
4178 if (trace_buffer_iter(iter, iter->cpu_file))
4179 return POLLIN | POLLRDNORM;
2a2cc8f7 4180
15693458 4181 if (trace_flags & TRACE_ITER_BLOCK)
2a2cc8f7
SSP
4182 /*
4183 * Always select as readable when in blocking mode
4184 */
4185 return POLLIN | POLLRDNORM;
15693458 4186 else
12883efb 4187 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
15693458 4188 filp, poll_table);
2a2cc8f7 4189}
2a2cc8f7 4190
cc60cdc9
SR
4191static unsigned int
4192tracing_poll_pipe(struct file *filp, poll_table *poll_table)
4193{
4194 struct trace_iterator *iter = filp->private_data;
4195
4196 return trace_poll(iter, filp, poll_table);
2a2cc8f7
SSP
4197}
4198
6eaaa5d5
FW
4199/*
4200 * This is a make-shift waitqueue.
4201 * A tracer might use this callback on some rare cases:
4202 *
4203 * 1) the current tracer might hold the runqueue lock when it wakes up
4204 * a reader, hence a deadlock (sched, function, and function graph tracers)
4205 * 2) the function tracers, trace all functions, we don't want
4206 * the overhead of calling wake_up and friends
4207 * (and tracing them too)
4208 *
4209 * Anyway, this is really very primitive wakeup.
4210 */
4211void poll_wait_pipe(struct trace_iterator *iter)
4212{
4213 set_current_state(TASK_INTERRUPTIBLE);
4214 /* sleep for 100 msecs, and try again. */
4215 schedule_timeout(HZ / 10);
4216}
4217
ff98781b
EGM
4218/* Must be called with trace_types_lock mutex held. */
4219static int tracing_wait_pipe(struct file *filp)
b3806b43
SR
4220{
4221 struct trace_iterator *iter = filp->private_data;
b3806b43 4222
b3806b43 4223 while (trace_empty(iter)) {
2dc8f095 4224
107bad8b 4225 if ((filp->f_flags & O_NONBLOCK)) {
ff98781b 4226 return -EAGAIN;
107bad8b 4227 }
2dc8f095 4228
d7350c3f 4229 mutex_unlock(&iter->mutex);
107bad8b 4230
6eaaa5d5 4231 iter->trace->wait_pipe(iter);
b3806b43 4232
d7350c3f 4233 mutex_lock(&iter->mutex);
107bad8b 4234
6eaaa5d5 4235 if (signal_pending(current))
ff98781b 4236 return -EINTR;
b3806b43
SR
4237
4238 /*
250bfd3d 4239 * We block until we read something and tracing is disabled.
b3806b43
SR
4240 * We still block if tracing is disabled, but we have never
4241 * read anything. This allows a user to cat this file, and
4242 * then enable tracing. But after we have read something,
4243 * we give an EOF when tracing is again disabled.
4244 *
4245 * iter->pos will be 0 if we haven't read anything.
4246 */
10246fa3 4247 if (!tracing_is_on() && iter->pos)
b3806b43 4248 break;
b3806b43
SR
4249 }
4250
ff98781b
EGM
4251 return 1;
4252}
4253
4254/*
4255 * Consumer reader.
4256 */
4257static ssize_t
4258tracing_read_pipe(struct file *filp, char __user *ubuf,
4259 size_t cnt, loff_t *ppos)
4260{
4261 struct trace_iterator *iter = filp->private_data;
2b6080f2 4262 struct trace_array *tr = iter->tr;
ff98781b
EGM
4263 ssize_t sret;
4264
4265 /* return any leftover data */
4266 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4267 if (sret != -EBUSY)
4268 return sret;
4269
f9520750 4270 trace_seq_init(&iter->seq);
ff98781b 4271
d7350c3f 4272 /* copy the tracer to avoid using a global lock all around */
ff98781b 4273 mutex_lock(&trace_types_lock);
2b6080f2
SR
4274 if (unlikely(iter->trace->name != tr->current_trace->name))
4275 *iter->trace = *tr->current_trace;
d7350c3f
FW
4276 mutex_unlock(&trace_types_lock);
4277
4278 /*
4279 * Avoid more than one consumer on a single file descriptor
4280 * This is just a matter of traces coherency, the ring buffer itself
4281 * is protected.
4282 */
4283 mutex_lock(&iter->mutex);
ff98781b
EGM
4284 if (iter->trace->read) {
4285 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
4286 if (sret)
4287 goto out;
4288 }
4289
4290waitagain:
4291 sret = tracing_wait_pipe(filp);
4292 if (sret <= 0)
4293 goto out;
4294
b3806b43 4295 /* stop when tracing is finished */
ff98781b
EGM
4296 if (trace_empty(iter)) {
4297 sret = 0;
107bad8b 4298 goto out;
ff98781b 4299 }
b3806b43
SR
4300
4301 if (cnt >= PAGE_SIZE)
4302 cnt = PAGE_SIZE - 1;
4303
53d0aa77 4304 /* reset all but tr, trace, and overruns */
53d0aa77
SR
4305 memset(&iter->seq, 0,
4306 sizeof(struct trace_iterator) -
4307 offsetof(struct trace_iterator, seq));
ed5467da 4308 cpumask_clear(iter->started);
4823ed7e 4309 iter->pos = -1;
b3806b43 4310
4f535968 4311 trace_event_read_lock();
7e53bd42 4312 trace_access_lock(iter->cpu_file);
955b61e5 4313 while (trace_find_next_entry_inc(iter) != NULL) {
2c4f035f 4314 enum print_line_t ret;
088b1e42
SR
4315 int len = iter->seq.len;
4316
f9896bf3 4317 ret = print_trace_line(iter);
2c4f035f 4318 if (ret == TRACE_TYPE_PARTIAL_LINE) {
088b1e42
SR
4319 /* don't print partial lines */
4320 iter->seq.len = len;
b3806b43 4321 break;
088b1e42 4322 }
b91facc3
FW
4323 if (ret != TRACE_TYPE_NO_CONSUME)
4324 trace_consume(iter);
b3806b43
SR
4325
4326 if (iter->seq.len >= cnt)
4327 break;
ee5e51f5
JO
4328
4329 /*
4330 * Setting the full flag means we reached the trace_seq buffer
4331 * size and we should leave by partial output condition above.
4332 * One of the trace_seq_* functions is not used properly.
4333 */
4334 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
4335 iter->ent->type);
b3806b43 4336 }
7e53bd42 4337 trace_access_unlock(iter->cpu_file);
4f535968 4338 trace_event_read_unlock();
b3806b43 4339
b3806b43 4340 /* Now copy what we have to the user */
6c6c2796
PP
4341 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4342 if (iter->seq.readpos >= iter->seq.len)
f9520750 4343 trace_seq_init(&iter->seq);
9ff4b974
PP
4344
4345 /*
25985edc 4346 * If there was nothing to send to user, in spite of consuming trace
9ff4b974
PP
4347 * entries, go back to wait for more entries.
4348 */
6c6c2796 4349 if (sret == -EBUSY)
9ff4b974 4350 goto waitagain;
b3806b43 4351
107bad8b 4352out:
d7350c3f 4353 mutex_unlock(&iter->mutex);
107bad8b 4354
6c6c2796 4355 return sret;
b3806b43
SR
4356}
4357
3c56819b
EGM
4358static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
4359 unsigned int idx)
4360{
4361 __free_page(spd->pages[idx]);
4362}
4363
28dfef8f 4364static const struct pipe_buf_operations tracing_pipe_buf_ops = {
34cd4998
SR
4365 .can_merge = 0,
4366 .map = generic_pipe_buf_map,
4367 .unmap = generic_pipe_buf_unmap,
4368 .confirm = generic_pipe_buf_confirm,
92fdd98c 4369 .release = generic_pipe_buf_release,
34cd4998
SR
4370 .steal = generic_pipe_buf_steal,
4371 .get = generic_pipe_buf_get,
3c56819b
EGM
4372};
4373
34cd4998 4374static size_t
fa7c7f6e 4375tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
34cd4998
SR
4376{
4377 size_t count;
4378 int ret;
4379
4380 /* Seq buffer is page-sized, exactly what we need. */
4381 for (;;) {
4382 count = iter->seq.len;
4383 ret = print_trace_line(iter);
4384 count = iter->seq.len - count;
4385 if (rem < count) {
4386 rem = 0;
4387 iter->seq.len -= count;
4388 break;
4389 }
4390 if (ret == TRACE_TYPE_PARTIAL_LINE) {
4391 iter->seq.len -= count;
4392 break;
4393 }
4394
74e7ff8c
LJ
4395 if (ret != TRACE_TYPE_NO_CONSUME)
4396 trace_consume(iter);
34cd4998 4397 rem -= count;
955b61e5 4398 if (!trace_find_next_entry_inc(iter)) {
34cd4998
SR
4399 rem = 0;
4400 iter->ent = NULL;
4401 break;
4402 }
4403 }
4404
4405 return rem;
4406}
4407
3c56819b
EGM
4408static ssize_t tracing_splice_read_pipe(struct file *filp,
4409 loff_t *ppos,
4410 struct pipe_inode_info *pipe,
4411 size_t len,
4412 unsigned int flags)
4413{
35f3d14d
JA
4414 struct page *pages_def[PIPE_DEF_BUFFERS];
4415 struct partial_page partial_def[PIPE_DEF_BUFFERS];
3c56819b
EGM
4416 struct trace_iterator *iter = filp->private_data;
4417 struct splice_pipe_desc spd = {
35f3d14d
JA
4418 .pages = pages_def,
4419 .partial = partial_def,
34cd4998 4420 .nr_pages = 0, /* This gets updated below. */
047fe360 4421 .nr_pages_max = PIPE_DEF_BUFFERS,
34cd4998
SR
4422 .flags = flags,
4423 .ops = &tracing_pipe_buf_ops,
4424 .spd_release = tracing_spd_release_pipe,
3c56819b 4425 };
2b6080f2 4426 struct trace_array *tr = iter->tr;
3c56819b 4427 ssize_t ret;
34cd4998 4428 size_t rem;
3c56819b
EGM
4429 unsigned int i;
4430
35f3d14d
JA
4431 if (splice_grow_spd(pipe, &spd))
4432 return -ENOMEM;
4433
d7350c3f 4434 /* copy the tracer to avoid using a global lock all around */
3c56819b 4435 mutex_lock(&trace_types_lock);
2b6080f2
SR
4436 if (unlikely(iter->trace->name != tr->current_trace->name))
4437 *iter->trace = *tr->current_trace;
d7350c3f
FW
4438 mutex_unlock(&trace_types_lock);
4439
4440 mutex_lock(&iter->mutex);
3c56819b
EGM
4441
4442 if (iter->trace->splice_read) {
4443 ret = iter->trace->splice_read(iter, filp,
4444 ppos, pipe, len, flags);
4445 if (ret)
34cd4998 4446 goto out_err;
3c56819b
EGM
4447 }
4448
4449 ret = tracing_wait_pipe(filp);
4450 if (ret <= 0)
34cd4998 4451 goto out_err;
3c56819b 4452
955b61e5 4453 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
3c56819b 4454 ret = -EFAULT;
34cd4998 4455 goto out_err;
3c56819b
EGM
4456 }
4457
4f535968 4458 trace_event_read_lock();
7e53bd42 4459 trace_access_lock(iter->cpu_file);
4f535968 4460
3c56819b 4461 /* Fill as many pages as possible. */
35f3d14d
JA
4462 for (i = 0, rem = len; i < pipe->buffers && rem; i++) {
4463 spd.pages[i] = alloc_page(GFP_KERNEL);
4464 if (!spd.pages[i])
34cd4998 4465 break;
3c56819b 4466
fa7c7f6e 4467 rem = tracing_fill_pipe_page(rem, iter);
3c56819b
EGM
4468
4469 /* Copy the data into the page, so we can start over. */
4470 ret = trace_seq_to_buffer(&iter->seq,
35f3d14d 4471 page_address(spd.pages[i]),
3c56819b
EGM
4472 iter->seq.len);
4473 if (ret < 0) {
35f3d14d 4474 __free_page(spd.pages[i]);
3c56819b
EGM
4475 break;
4476 }
35f3d14d
JA
4477 spd.partial[i].offset = 0;
4478 spd.partial[i].len = iter->seq.len;
3c56819b 4479
f9520750 4480 trace_seq_init(&iter->seq);
3c56819b
EGM
4481 }
4482
7e53bd42 4483 trace_access_unlock(iter->cpu_file);
4f535968 4484 trace_event_read_unlock();
d7350c3f 4485 mutex_unlock(&iter->mutex);
3c56819b
EGM
4486
4487 spd.nr_pages = i;
4488
35f3d14d
JA
4489 ret = splice_to_pipe(pipe, &spd);
4490out:
047fe360 4491 splice_shrink_spd(&spd);
35f3d14d 4492 return ret;
3c56819b 4493
34cd4998 4494out_err:
d7350c3f 4495 mutex_unlock(&iter->mutex);
35f3d14d 4496 goto out;
3c56819b
EGM
4497}
4498
a98a3c3f
SR
4499static ssize_t
4500tracing_entries_read(struct file *filp, char __user *ubuf,
4501 size_t cnt, loff_t *ppos)
4502{
0bc392ee
ON
4503 struct inode *inode = file_inode(filp);
4504 struct trace_array *tr = inode->i_private;
4505 int cpu = tracing_get_cpu(inode);
438ced17
VN
4506 char buf[64];
4507 int r = 0;
4508 ssize_t ret;
a98a3c3f 4509
db526ca3 4510 mutex_lock(&trace_types_lock);
438ced17 4511
0bc392ee 4512 if (cpu == RING_BUFFER_ALL_CPUS) {
438ced17
VN
4513 int cpu, buf_size_same;
4514 unsigned long size;
4515
4516 size = 0;
4517 buf_size_same = 1;
4518 /* check if all cpu sizes are same */
4519 for_each_tracing_cpu(cpu) {
4520 /* fill in the size from first enabled cpu */
4521 if (size == 0)
12883efb
SRRH
4522 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
4523 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
438ced17
VN
4524 buf_size_same = 0;
4525 break;
4526 }
4527 }
4528
4529 if (buf_size_same) {
4530 if (!ring_buffer_expanded)
4531 r = sprintf(buf, "%lu (expanded: %lu)\n",
4532 size >> 10,
4533 trace_buf_size >> 10);
4534 else
4535 r = sprintf(buf, "%lu\n", size >> 10);
4536 } else
4537 r = sprintf(buf, "X\n");
4538 } else
0bc392ee 4539 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
438ced17 4540
db526ca3
SR
4541 mutex_unlock(&trace_types_lock);
4542
438ced17
VN
4543 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4544 return ret;
a98a3c3f
SR
4545}
4546
4547static ssize_t
4548tracing_entries_write(struct file *filp, const char __user *ubuf,
4549 size_t cnt, loff_t *ppos)
4550{
0bc392ee
ON
4551 struct inode *inode = file_inode(filp);
4552 struct trace_array *tr = inode->i_private;
a98a3c3f 4553 unsigned long val;
4f271a2a 4554 int ret;
a98a3c3f 4555
22fe9b54
PH
4556 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4557 if (ret)
c6caeeb1 4558 return ret;
a98a3c3f
SR
4559
4560 /* must have at least 1 entry */
4561 if (!val)
4562 return -EINVAL;
4563
1696b2b0
SR
4564 /* value is in KB */
4565 val <<= 10;
0bc392ee 4566 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
4f271a2a
VN
4567 if (ret < 0)
4568 return ret;
a98a3c3f 4569
cf8517cf 4570 *ppos += cnt;
a98a3c3f 4571
4f271a2a
VN
4572 return cnt;
4573}
bf5e6519 4574
f81ab074
VN
4575static ssize_t
4576tracing_total_entries_read(struct file *filp, char __user *ubuf,
4577 size_t cnt, loff_t *ppos)
4578{
4579 struct trace_array *tr = filp->private_data;
4580 char buf[64];
4581 int r, cpu;
4582 unsigned long size = 0, expanded_size = 0;
4583
4584 mutex_lock(&trace_types_lock);
4585 for_each_tracing_cpu(cpu) {
12883efb 4586 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
f81ab074
VN
4587 if (!ring_buffer_expanded)
4588 expanded_size += trace_buf_size >> 10;
4589 }
4590 if (ring_buffer_expanded)
4591 r = sprintf(buf, "%lu\n", size);
4592 else
4593 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
4594 mutex_unlock(&trace_types_lock);
4595
4596 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4597}
4598
4f271a2a
VN
4599static ssize_t
4600tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
4601 size_t cnt, loff_t *ppos)
4602{
4603 /*
4604 * There is no need to read what the user has written, this function
4605 * is just to make sure that there is no error when "echo" is used
4606 */
4607
4608 *ppos += cnt;
a98a3c3f
SR
4609
4610 return cnt;
4611}
4612
4f271a2a
VN
4613static int
4614tracing_free_buffer_release(struct inode *inode, struct file *filp)
4615{
2b6080f2
SR
4616 struct trace_array *tr = inode->i_private;
4617
cf30cf67
SR
4618 /* disable tracing ? */
4619 if (trace_flags & TRACE_ITER_STOP_ON_FREE)
711e1243 4620 tracer_tracing_off(tr);
4f271a2a 4621 /* resize the ring buffer to 0 */
2b6080f2 4622 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
4f271a2a 4623
7b85af63
SRRH
4624 trace_array_put(tr);
4625
4f271a2a
VN
4626 return 0;
4627}
4628
5bf9a1ee
PP
4629static ssize_t
4630tracing_mark_write(struct file *filp, const char __user *ubuf,
4631 size_t cnt, loff_t *fpos)
4632{
d696b58c 4633 unsigned long addr = (unsigned long)ubuf;
2d71619c 4634 struct trace_array *tr = filp->private_data;
d696b58c
SR
4635 struct ring_buffer_event *event;
4636 struct ring_buffer *buffer;
4637 struct print_entry *entry;
4638 unsigned long irq_flags;
4639 struct page *pages[2];
6edb2a8a 4640 void *map_page[2];
d696b58c
SR
4641 int nr_pages = 1;
4642 ssize_t written;
d696b58c
SR
4643 int offset;
4644 int size;
4645 int len;
4646 int ret;
6edb2a8a 4647 int i;
5bf9a1ee 4648
c76f0694 4649 if (tracing_disabled)
5bf9a1ee
PP
4650 return -EINVAL;
4651
5224c3a3
MSB
4652 if (!(trace_flags & TRACE_ITER_MARKERS))
4653 return -EINVAL;
4654
5bf9a1ee
PP
4655 if (cnt > TRACE_BUF_SIZE)
4656 cnt = TRACE_BUF_SIZE;
4657
d696b58c
SR
4658 /*
4659 * Userspace is injecting traces into the kernel trace buffer.
4660 * We want to be as non intrusive as possible.
4661 * To do so, we do not want to allocate any special buffers
4662 * or take any locks, but instead write the userspace data
4663 * straight into the ring buffer.
4664 *
4665 * First we need to pin the userspace buffer into memory,
4666 * which, most likely it is, because it just referenced it.
4667 * But there's no guarantee that it is. By using get_user_pages_fast()
4668 * and kmap_atomic/kunmap_atomic() we can get access to the
4669 * pages directly. We then write the data directly into the
4670 * ring buffer.
4671 */
4672 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
5bf9a1ee 4673
d696b58c
SR
4674 /* check if we cross pages */
4675 if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
4676 nr_pages = 2;
4677
4678 offset = addr & (PAGE_SIZE - 1);
4679 addr &= PAGE_MASK;
4680
4681 ret = get_user_pages_fast(addr, nr_pages, 0, pages);
4682 if (ret < nr_pages) {
4683 while (--ret >= 0)
4684 put_page(pages[ret]);
4685 written = -EFAULT;
4686 goto out;
5bf9a1ee 4687 }
d696b58c 4688
6edb2a8a
SR
4689 for (i = 0; i < nr_pages; i++)
4690 map_page[i] = kmap_atomic(pages[i]);
d696b58c
SR
4691
4692 local_save_flags(irq_flags);
4693 size = sizeof(*entry) + cnt + 2; /* possible \n added */
2d71619c 4694 buffer = tr->trace_buffer.buffer;
d696b58c
SR
4695 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
4696 irq_flags, preempt_count());
4697 if (!event) {
4698 /* Ring buffer disabled, return as if not open for write */
4699 written = -EBADF;
4700 goto out_unlock;
5bf9a1ee 4701 }
d696b58c
SR
4702
4703 entry = ring_buffer_event_data(event);
4704 entry->ip = _THIS_IP_;
4705
4706 if (nr_pages == 2) {
4707 len = PAGE_SIZE - offset;
6edb2a8a
SR
4708 memcpy(&entry->buf, map_page[0] + offset, len);
4709 memcpy(&entry->buf[len], map_page[1], cnt - len);
c13d2f7c 4710 } else
6edb2a8a 4711 memcpy(&entry->buf, map_page[0] + offset, cnt);
5bf9a1ee 4712
d696b58c
SR
4713 if (entry->buf[cnt - 1] != '\n') {
4714 entry->buf[cnt] = '\n';
4715 entry->buf[cnt + 1] = '\0';
4716 } else
4717 entry->buf[cnt] = '\0';
4718
7ffbd48d 4719 __buffer_unlock_commit(buffer, event);
5bf9a1ee 4720
d696b58c 4721 written = cnt;
5bf9a1ee 4722
d696b58c 4723 *fpos += written;
1aa54bca 4724
d696b58c 4725 out_unlock:
6edb2a8a
SR
4726 for (i = 0; i < nr_pages; i++){
4727 kunmap_atomic(map_page[i]);
4728 put_page(pages[i]);
4729 }
d696b58c 4730 out:
1aa54bca 4731 return written;
5bf9a1ee
PP
4732}
4733
13f16d20 4734static int tracing_clock_show(struct seq_file *m, void *v)
5079f326 4735{
2b6080f2 4736 struct trace_array *tr = m->private;
5079f326
Z
4737 int i;
4738
4739 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
13f16d20 4740 seq_printf(m,
5079f326 4741 "%s%s%s%s", i ? " " : "",
2b6080f2
SR
4742 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
4743 i == tr->clock_id ? "]" : "");
13f16d20 4744 seq_putc(m, '\n');
5079f326 4745
13f16d20 4746 return 0;
5079f326
Z
4747}
4748
4749static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
4750 size_t cnt, loff_t *fpos)
4751{
2b6080f2
SR
4752 struct seq_file *m = filp->private_data;
4753 struct trace_array *tr = m->private;
5079f326
Z
4754 char buf[64];
4755 const char *clockstr;
4756 int i;
4757
4758 if (cnt >= sizeof(buf))
4759 return -EINVAL;
4760
4761 if (copy_from_user(&buf, ubuf, cnt))
4762 return -EFAULT;
4763
4764 buf[cnt] = 0;
4765
4766 clockstr = strstrip(buf);
4767
4768 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
4769 if (strcmp(trace_clocks[i].name, clockstr) == 0)
4770 break;
4771 }
4772 if (i == ARRAY_SIZE(trace_clocks))
4773 return -EINVAL;
4774
5079f326
Z
4775 mutex_lock(&trace_types_lock);
4776
2b6080f2
SR
4777 tr->clock_id = i;
4778
12883efb 4779 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
5079f326 4780
60303ed3
DS
4781 /*
4782 * New clock may not be consistent with the previous clock.
4783 * Reset the buffer so that it doesn't have incomparable timestamps.
4784 */
9457158b 4785 tracing_reset_online_cpus(&tr->trace_buffer);
12883efb
SRRH
4786
4787#ifdef CONFIG_TRACER_MAX_TRACE
4788 if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
4789 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
9457158b 4790 tracing_reset_online_cpus(&tr->max_buffer);
12883efb 4791#endif
60303ed3 4792
5079f326
Z
4793 mutex_unlock(&trace_types_lock);
4794
4795 *fpos += cnt;
4796
4797 return cnt;
4798}
4799
13f16d20
LZ
4800static int tracing_clock_open(struct inode *inode, struct file *file)
4801{
7b85af63
SRRH
4802 struct trace_array *tr = inode->i_private;
4803 int ret;
4804
13f16d20
LZ
4805 if (tracing_disabled)
4806 return -ENODEV;
2b6080f2 4807
7b85af63
SRRH
4808 if (trace_array_get(tr))
4809 return -ENODEV;
4810
4811 ret = single_open(file, tracing_clock_show, inode->i_private);
4812 if (ret < 0)
4813 trace_array_put(tr);
4814
4815 return ret;
13f16d20
LZ
4816}
4817
6de58e62
SRRH
4818struct ftrace_buffer_info {
4819 struct trace_iterator iter;
4820 void *spare;
4821 unsigned int read;
4822};
4823
debdd57f
HT
4824#ifdef CONFIG_TRACER_SNAPSHOT
4825static int tracing_snapshot_open(struct inode *inode, struct file *file)
4826{
6484c71c 4827 struct trace_array *tr = inode->i_private;
debdd57f 4828 struct trace_iterator *iter;
2b6080f2 4829 struct seq_file *m;
debdd57f
HT
4830 int ret = 0;
4831
ff451961
SRRH
4832 if (trace_array_get(tr) < 0)
4833 return -ENODEV;
4834
debdd57f 4835 if (file->f_mode & FMODE_READ) {
6484c71c 4836 iter = __tracing_open(inode, file, true);
debdd57f
HT
4837 if (IS_ERR(iter))
4838 ret = PTR_ERR(iter);
2b6080f2
SR
4839 } else {
4840 /* Writes still need the seq_file to hold the private data */
f77d09a3 4841 ret = -ENOMEM;
2b6080f2
SR
4842 m = kzalloc(sizeof(*m), GFP_KERNEL);
4843 if (!m)
f77d09a3 4844 goto out;
2b6080f2
SR
4845 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
4846 if (!iter) {
4847 kfree(m);
f77d09a3 4848 goto out;
2b6080f2 4849 }
f77d09a3
AL
4850 ret = 0;
4851
ff451961 4852 iter->tr = tr;
6484c71c
ON
4853 iter->trace_buffer = &tr->max_buffer;
4854 iter->cpu_file = tracing_get_cpu(inode);
2b6080f2
SR
4855 m->private = iter;
4856 file->private_data = m;
debdd57f 4857 }
f77d09a3 4858out:
ff451961
SRRH
4859 if (ret < 0)
4860 trace_array_put(tr);
4861
debdd57f
HT
4862 return ret;
4863}
4864
4865static ssize_t
4866tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
4867 loff_t *ppos)
4868{
2b6080f2
SR
4869 struct seq_file *m = filp->private_data;
4870 struct trace_iterator *iter = m->private;
4871 struct trace_array *tr = iter->tr;
debdd57f
HT
4872 unsigned long val;
4873 int ret;
4874
4875 ret = tracing_update_buffers();
4876 if (ret < 0)
4877 return ret;
4878
4879 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4880 if (ret)
4881 return ret;
4882
4883 mutex_lock(&trace_types_lock);
4884
2b6080f2 4885 if (tr->current_trace->use_max_tr) {
debdd57f
HT
4886 ret = -EBUSY;
4887 goto out;
4888 }
4889
4890 switch (val) {
4891 case 0:
f1affcaa
SRRH
4892 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4893 ret = -EINVAL;
4894 break;
debdd57f 4895 }
3209cff4
SRRH
4896 if (tr->allocated_snapshot)
4897 free_snapshot(tr);
debdd57f
HT
4898 break;
4899 case 1:
f1affcaa
SRRH
4900/* Only allow per-cpu swap if the ring buffer supports it */
4901#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
4902 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4903 ret = -EINVAL;
4904 break;
4905 }
4906#endif
45ad21ca 4907 if (!tr->allocated_snapshot) {
3209cff4 4908 ret = alloc_snapshot(tr);
debdd57f
HT
4909 if (ret < 0)
4910 break;
debdd57f 4911 }
debdd57f
HT
4912 local_irq_disable();
4913 /* Now, we're going to swap */
f1affcaa 4914 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
ce9bae55 4915 update_max_tr(tr, current, smp_processor_id());
f1affcaa 4916 else
ce9bae55 4917 update_max_tr_single(tr, current, iter->cpu_file);
debdd57f
HT
4918 local_irq_enable();
4919 break;
4920 default:
45ad21ca 4921 if (tr->allocated_snapshot) {
f1affcaa
SRRH
4922 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4923 tracing_reset_online_cpus(&tr->max_buffer);
4924 else
4925 tracing_reset(&tr->max_buffer, iter->cpu_file);
4926 }
debdd57f
HT
4927 break;
4928 }
4929
4930 if (ret >= 0) {
4931 *ppos += cnt;
4932 ret = cnt;
4933 }
4934out:
4935 mutex_unlock(&trace_types_lock);
4936 return ret;
4937}
2b6080f2
SR
4938
4939static int tracing_snapshot_release(struct inode *inode, struct file *file)
4940{
4941 struct seq_file *m = file->private_data;
ff451961
SRRH
4942 int ret;
4943
4944 ret = tracing_release(inode, file);
2b6080f2
SR
4945
4946 if (file->f_mode & FMODE_READ)
ff451961 4947 return ret;
2b6080f2
SR
4948
4949 /* If write only, the seq_file is just a stub */
4950 if (m)
4951 kfree(m->private);
4952 kfree(m);
4953
4954 return 0;
4955}
4956
6de58e62
SRRH
4957static int tracing_buffers_open(struct inode *inode, struct file *filp);
4958static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
4959 size_t count, loff_t *ppos);
4960static int tracing_buffers_release(struct inode *inode, struct file *file);
4961static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
4962 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
4963
4964static int snapshot_raw_open(struct inode *inode, struct file *filp)
4965{
4966 struct ftrace_buffer_info *info;
4967 int ret;
4968
4969 ret = tracing_buffers_open(inode, filp);
4970 if (ret < 0)
4971 return ret;
4972
4973 info = filp->private_data;
4974
4975 if (info->iter.trace->use_max_tr) {
4976 tracing_buffers_release(inode, filp);
4977 return -EBUSY;
4978 }
4979
4980 info->iter.snapshot = true;
4981 info->iter.trace_buffer = &info->iter.tr->max_buffer;
4982
4983 return ret;
4984}
4985
debdd57f
HT
4986#endif /* CONFIG_TRACER_SNAPSHOT */
4987
4988
5e2336a0 4989static const struct file_operations tracing_max_lat_fops = {
4bf39a94
IM
4990 .open = tracing_open_generic,
4991 .read = tracing_max_lat_read,
4992 .write = tracing_max_lat_write,
b444786f 4993 .llseek = generic_file_llseek,
bc0c38d1
SR
4994};
4995
5e2336a0 4996static const struct file_operations set_tracer_fops = {
4bf39a94
IM
4997 .open = tracing_open_generic,
4998 .read = tracing_set_trace_read,
4999 .write = tracing_set_trace_write,
b444786f 5000 .llseek = generic_file_llseek,
bc0c38d1
SR
5001};
5002
5e2336a0 5003static const struct file_operations tracing_pipe_fops = {
4bf39a94 5004 .open = tracing_open_pipe,
2a2cc8f7 5005 .poll = tracing_poll_pipe,
4bf39a94 5006 .read = tracing_read_pipe,
3c56819b 5007 .splice_read = tracing_splice_read_pipe,
4bf39a94 5008 .release = tracing_release_pipe,
b444786f 5009 .llseek = no_llseek,
b3806b43
SR
5010};
5011
5e2336a0 5012static const struct file_operations tracing_entries_fops = {
0bc392ee 5013 .open = tracing_open_generic_tr,
a98a3c3f
SR
5014 .read = tracing_entries_read,
5015 .write = tracing_entries_write,
b444786f 5016 .llseek = generic_file_llseek,
0bc392ee 5017 .release = tracing_release_generic_tr,
a98a3c3f
SR
5018};
5019
f81ab074 5020static const struct file_operations tracing_total_entries_fops = {
7b85af63 5021 .open = tracing_open_generic_tr,
f81ab074
VN
5022 .read = tracing_total_entries_read,
5023 .llseek = generic_file_llseek,
7b85af63 5024 .release = tracing_release_generic_tr,
f81ab074
VN
5025};
5026
4f271a2a 5027static const struct file_operations tracing_free_buffer_fops = {
7b85af63 5028 .open = tracing_open_generic_tr,
4f271a2a
VN
5029 .write = tracing_free_buffer_write,
5030 .release = tracing_free_buffer_release,
5031};
5032
5e2336a0 5033static const struct file_operations tracing_mark_fops = {
7b85af63 5034 .open = tracing_open_generic_tr,
5bf9a1ee 5035 .write = tracing_mark_write,
b444786f 5036 .llseek = generic_file_llseek,
7b85af63 5037 .release = tracing_release_generic_tr,
5bf9a1ee
PP
5038};
5039
5079f326 5040static const struct file_operations trace_clock_fops = {
13f16d20
LZ
5041 .open = tracing_clock_open,
5042 .read = seq_read,
5043 .llseek = seq_lseek,
7b85af63 5044 .release = tracing_single_release_tr,
5079f326
Z
5045 .write = tracing_clock_write,
5046};
5047
debdd57f
HT
5048#ifdef CONFIG_TRACER_SNAPSHOT
5049static const struct file_operations snapshot_fops = {
5050 .open = tracing_snapshot_open,
5051 .read = seq_read,
5052 .write = tracing_snapshot_write,
098c879e 5053 .llseek = tracing_lseek,
2b6080f2 5054 .release = tracing_snapshot_release,
debdd57f 5055};
debdd57f 5056
6de58e62
SRRH
5057static const struct file_operations snapshot_raw_fops = {
5058 .open = snapshot_raw_open,
5059 .read = tracing_buffers_read,
5060 .release = tracing_buffers_release,
5061 .splice_read = tracing_buffers_splice_read,
5062 .llseek = no_llseek,
2cadf913
SR
5063};
5064
6de58e62
SRRH
5065#endif /* CONFIG_TRACER_SNAPSHOT */
5066
2cadf913
SR
5067static int tracing_buffers_open(struct inode *inode, struct file *filp)
5068{
46ef2be0 5069 struct trace_array *tr = inode->i_private;
2cadf913 5070 struct ftrace_buffer_info *info;
7b85af63 5071 int ret;
2cadf913
SR
5072
5073 if (tracing_disabled)
5074 return -ENODEV;
5075
7b85af63
SRRH
5076 if (trace_array_get(tr) < 0)
5077 return -ENODEV;
5078
2cadf913 5079 info = kzalloc(sizeof(*info), GFP_KERNEL);
7b85af63
SRRH
5080 if (!info) {
5081 trace_array_put(tr);
2cadf913 5082 return -ENOMEM;
7b85af63 5083 }
2cadf913 5084
a695cb58
SRRH
5085 mutex_lock(&trace_types_lock);
5086
cc60cdc9 5087 info->iter.tr = tr;
46ef2be0 5088 info->iter.cpu_file = tracing_get_cpu(inode);
b627344f 5089 info->iter.trace = tr->current_trace;
12883efb 5090 info->iter.trace_buffer = &tr->trace_buffer;
cc60cdc9 5091 info->spare = NULL;
2cadf913 5092 /* Force reading ring buffer for first read */
cc60cdc9 5093 info->read = (unsigned int)-1;
2cadf913
SR
5094
5095 filp->private_data = info;
5096
a695cb58
SRRH
5097 mutex_unlock(&trace_types_lock);
5098
7b85af63
SRRH
5099 ret = nonseekable_open(inode, filp);
5100 if (ret < 0)
5101 trace_array_put(tr);
5102
5103 return ret;
2cadf913
SR
5104}
5105
cc60cdc9
SR
5106static unsigned int
5107tracing_buffers_poll(struct file *filp, poll_table *poll_table)
5108{
5109 struct ftrace_buffer_info *info = filp->private_data;
5110 struct trace_iterator *iter = &info->iter;
5111
5112 return trace_poll(iter, filp, poll_table);
5113}
5114
2cadf913
SR
5115static ssize_t
5116tracing_buffers_read(struct file *filp, char __user *ubuf,
5117 size_t count, loff_t *ppos)
5118{
5119 struct ftrace_buffer_info *info = filp->private_data;
cc60cdc9 5120 struct trace_iterator *iter = &info->iter;
2cadf913 5121 ssize_t ret;
6de58e62 5122 ssize_t size;
2cadf913 5123
2dc5d12b
SR
5124 if (!count)
5125 return 0;
5126
6de58e62
SRRH
5127 mutex_lock(&trace_types_lock);
5128
5129#ifdef CONFIG_TRACER_MAX_TRACE
5130 if (iter->snapshot && iter->tr->current_trace->use_max_tr) {
5131 size = -EBUSY;
5132 goto out_unlock;
5133 }
5134#endif
5135
ddd538f3 5136 if (!info->spare)
12883efb
SRRH
5137 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
5138 iter->cpu_file);
6de58e62 5139 size = -ENOMEM;
ddd538f3 5140 if (!info->spare)
6de58e62 5141 goto out_unlock;
ddd538f3 5142
2cadf913
SR
5143 /* Do we have previous read data to read? */
5144 if (info->read < PAGE_SIZE)
5145 goto read;
5146
b627344f 5147 again:
cc60cdc9 5148 trace_access_lock(iter->cpu_file);
12883efb 5149 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
2cadf913
SR
5150 &info->spare,
5151 count,
cc60cdc9
SR
5152 iter->cpu_file, 0);
5153 trace_access_unlock(iter->cpu_file);
2cadf913 5154
b627344f
SR
5155 if (ret < 0) {
5156 if (trace_empty(iter)) {
6de58e62
SRRH
5157 if ((filp->f_flags & O_NONBLOCK)) {
5158 size = -EAGAIN;
5159 goto out_unlock;
5160 }
5161 mutex_unlock(&trace_types_lock);
b627344f 5162 iter->trace->wait_pipe(iter);
6de58e62
SRRH
5163 mutex_lock(&trace_types_lock);
5164 if (signal_pending(current)) {
5165 size = -EINTR;
5166 goto out_unlock;
5167 }
b627344f
SR
5168 goto again;
5169 }
6de58e62
SRRH
5170 size = 0;
5171 goto out_unlock;
b627344f 5172 }
436fc280 5173
436fc280 5174 info->read = 0;
b627344f 5175 read:
2cadf913
SR
5176 size = PAGE_SIZE - info->read;
5177 if (size > count)
5178 size = count;
5179
5180 ret = copy_to_user(ubuf, info->spare + info->read, size);
6de58e62
SRRH
5181 if (ret == size) {
5182 size = -EFAULT;
5183 goto out_unlock;
5184 }
2dc5d12b
SR
5185 size -= ret;
5186
2cadf913
SR
5187 *ppos += size;
5188 info->read += size;
5189
6de58e62
SRRH
5190 out_unlock:
5191 mutex_unlock(&trace_types_lock);
5192
2cadf913
SR
5193 return size;
5194}
5195
5196static int tracing_buffers_release(struct inode *inode, struct file *file)
5197{
5198 struct ftrace_buffer_info *info = file->private_data;
cc60cdc9 5199 struct trace_iterator *iter = &info->iter;
2cadf913 5200
a695cb58
SRRH
5201 mutex_lock(&trace_types_lock);
5202
ff451961 5203 __trace_array_put(iter->tr);
2cadf913 5204
ddd538f3 5205 if (info->spare)
12883efb 5206 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
2cadf913
SR
5207 kfree(info);
5208
a695cb58
SRRH
5209 mutex_unlock(&trace_types_lock);
5210
2cadf913
SR
5211 return 0;
5212}
5213
5214struct buffer_ref {
5215 struct ring_buffer *buffer;
5216 void *page;
5217 int ref;
5218};
5219
5220static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
5221 struct pipe_buffer *buf)
5222{
5223 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5224
5225 if (--ref->ref)
5226 return;
5227
5228 ring_buffer_free_read_page(ref->buffer, ref->page);
5229 kfree(ref);
5230 buf->private = 0;
5231}
5232
2cadf913
SR
5233static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
5234 struct pipe_buffer *buf)
5235{
5236 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5237
5238 ref->ref++;
5239}
5240
5241/* Pipe buffer operations for a buffer. */
28dfef8f 5242static const struct pipe_buf_operations buffer_pipe_buf_ops = {
2cadf913
SR
5243 .can_merge = 0,
5244 .map = generic_pipe_buf_map,
5245 .unmap = generic_pipe_buf_unmap,
5246 .confirm = generic_pipe_buf_confirm,
5247 .release = buffer_pipe_buf_release,
d55cb6cf 5248 .steal = generic_pipe_buf_steal,
2cadf913
SR
5249 .get = buffer_pipe_buf_get,
5250};
5251
5252/*
5253 * Callback from splice_to_pipe(), if we need to release some pages
5254 * at the end of the spd in case we error'ed out in filling the pipe.
5255 */
5256static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
5257{
5258 struct buffer_ref *ref =
5259 (struct buffer_ref *)spd->partial[i].private;
5260
5261 if (--ref->ref)
5262 return;
5263
5264 ring_buffer_free_read_page(ref->buffer, ref->page);
5265 kfree(ref);
5266 spd->partial[i].private = 0;
5267}
5268
5269static ssize_t
5270tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5271 struct pipe_inode_info *pipe, size_t len,
5272 unsigned int flags)
5273{
5274 struct ftrace_buffer_info *info = file->private_data;
cc60cdc9 5275 struct trace_iterator *iter = &info->iter;
35f3d14d
JA
5276 struct partial_page partial_def[PIPE_DEF_BUFFERS];
5277 struct page *pages_def[PIPE_DEF_BUFFERS];
2cadf913 5278 struct splice_pipe_desc spd = {
35f3d14d
JA
5279 .pages = pages_def,
5280 .partial = partial_def,
047fe360 5281 .nr_pages_max = PIPE_DEF_BUFFERS,
2cadf913
SR
5282 .flags = flags,
5283 .ops = &buffer_pipe_buf_ops,
5284 .spd_release = buffer_spd_release,
5285 };
5286 struct buffer_ref *ref;
93459c6c 5287 int entries, size, i;
6de58e62 5288 ssize_t ret;
2cadf913 5289
6de58e62
SRRH
5290 mutex_lock(&trace_types_lock);
5291
5292#ifdef CONFIG_TRACER_MAX_TRACE
5293 if (iter->snapshot && iter->tr->current_trace->use_max_tr) {
5294 ret = -EBUSY;
5295 goto out;
5296 }
5297#endif
5298
5299 if (splice_grow_spd(pipe, &spd)) {
5300 ret = -ENOMEM;
5301 goto out;
5302 }
35f3d14d 5303
93cfb3c9 5304 if (*ppos & (PAGE_SIZE - 1)) {
35f3d14d
JA
5305 ret = -EINVAL;
5306 goto out;
93cfb3c9
LJ
5307 }
5308
5309 if (len & (PAGE_SIZE - 1)) {
35f3d14d
JA
5310 if (len < PAGE_SIZE) {
5311 ret = -EINVAL;
5312 goto out;
5313 }
93cfb3c9
LJ
5314 len &= PAGE_MASK;
5315 }
5316
cc60cdc9
SR
5317 again:
5318 trace_access_lock(iter->cpu_file);
12883efb 5319 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
93459c6c 5320
35f3d14d 5321 for (i = 0; i < pipe->buffers && len && entries; i++, len -= PAGE_SIZE) {
2cadf913
SR
5322 struct page *page;
5323 int r;
5324
5325 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
5326 if (!ref)
5327 break;
5328
7267fa68 5329 ref->ref = 1;
12883efb 5330 ref->buffer = iter->trace_buffer->buffer;
cc60cdc9 5331 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
2cadf913
SR
5332 if (!ref->page) {
5333 kfree(ref);
5334 break;
5335 }
5336
5337 r = ring_buffer_read_page(ref->buffer, &ref->page,
cc60cdc9 5338 len, iter->cpu_file, 1);
2cadf913 5339 if (r < 0) {
7ea59064 5340 ring_buffer_free_read_page(ref->buffer, ref->page);
2cadf913
SR
5341 kfree(ref);
5342 break;
5343 }
5344
5345 /*
5346 * zero out any left over data, this is going to
5347 * user land.
5348 */
5349 size = ring_buffer_page_len(ref->page);
5350 if (size < PAGE_SIZE)
5351 memset(ref->page + size, 0, PAGE_SIZE - size);
5352
5353 page = virt_to_page(ref->page);
5354
5355 spd.pages[i] = page;
5356 spd.partial[i].len = PAGE_SIZE;
5357 spd.partial[i].offset = 0;
5358 spd.partial[i].private = (unsigned long)ref;
5359 spd.nr_pages++;
93cfb3c9 5360 *ppos += PAGE_SIZE;
93459c6c 5361
12883efb 5362 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
2cadf913
SR
5363 }
5364
cc60cdc9 5365 trace_access_unlock(iter->cpu_file);
2cadf913
SR
5366 spd.nr_pages = i;
5367
5368 /* did we read anything? */
5369 if (!spd.nr_pages) {
cc60cdc9 5370 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) {
2cadf913 5371 ret = -EAGAIN;
cc60cdc9
SR
5372 goto out;
5373 }
6de58e62 5374 mutex_unlock(&trace_types_lock);
b627344f 5375 iter->trace->wait_pipe(iter);
6de58e62 5376 mutex_lock(&trace_types_lock);
cc60cdc9
SR
5377 if (signal_pending(current)) {
5378 ret = -EINTR;
5379 goto out;
5380 }
5381 goto again;
2cadf913
SR
5382 }
5383
5384 ret = splice_to_pipe(pipe, &spd);
047fe360 5385 splice_shrink_spd(&spd);
35f3d14d 5386out:
6de58e62
SRRH
5387 mutex_unlock(&trace_types_lock);
5388
2cadf913
SR
5389 return ret;
5390}
5391
5392static const struct file_operations tracing_buffers_fops = {
5393 .open = tracing_buffers_open,
5394 .read = tracing_buffers_read,
cc60cdc9 5395 .poll = tracing_buffers_poll,
2cadf913
SR
5396 .release = tracing_buffers_release,
5397 .splice_read = tracing_buffers_splice_read,
5398 .llseek = no_llseek,
5399};
5400
c8d77183
SR
5401static ssize_t
5402tracing_stats_read(struct file *filp, char __user *ubuf,
5403 size_t count, loff_t *ppos)
5404{
4d3435b8
ON
5405 struct inode *inode = file_inode(filp);
5406 struct trace_array *tr = inode->i_private;
12883efb 5407 struct trace_buffer *trace_buf = &tr->trace_buffer;
4d3435b8 5408 int cpu = tracing_get_cpu(inode);
c8d77183
SR
5409 struct trace_seq *s;
5410 unsigned long cnt;
c64e148a
VN
5411 unsigned long long t;
5412 unsigned long usec_rem;
c8d77183 5413
e4f2d10f 5414 s = kmalloc(sizeof(*s), GFP_KERNEL);
c8d77183 5415 if (!s)
a646365c 5416 return -ENOMEM;
c8d77183
SR
5417
5418 trace_seq_init(s);
5419
12883efb 5420 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
c8d77183
SR
5421 trace_seq_printf(s, "entries: %ld\n", cnt);
5422
12883efb 5423 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
c8d77183
SR
5424 trace_seq_printf(s, "overrun: %ld\n", cnt);
5425
12883efb 5426 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
c8d77183
SR
5427 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
5428
12883efb 5429 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
c64e148a
VN
5430 trace_seq_printf(s, "bytes: %ld\n", cnt);
5431
58e8eedf 5432 if (trace_clocks[tr->clock_id].in_ns) {
11043d8b 5433 /* local or global for trace_clock */
12883efb 5434 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
11043d8b
YY
5435 usec_rem = do_div(t, USEC_PER_SEC);
5436 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
5437 t, usec_rem);
5438
12883efb 5439 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
11043d8b
YY
5440 usec_rem = do_div(t, USEC_PER_SEC);
5441 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
5442 } else {
5443 /* counter or tsc mode for trace_clock */
5444 trace_seq_printf(s, "oldest event ts: %llu\n",
12883efb 5445 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
c64e148a 5446
11043d8b 5447 trace_seq_printf(s, "now ts: %llu\n",
12883efb 5448 ring_buffer_time_stamp(trace_buf->buffer, cpu));
11043d8b 5449 }
c64e148a 5450
12883efb 5451 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
884bfe89
SP
5452 trace_seq_printf(s, "dropped events: %ld\n", cnt);
5453
12883efb 5454 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
ad964704
SRRH
5455 trace_seq_printf(s, "read events: %ld\n", cnt);
5456
c8d77183
SR
5457 count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len);
5458
5459 kfree(s);
5460
5461 return count;
5462}
5463
5464static const struct file_operations tracing_stats_fops = {
4d3435b8 5465 .open = tracing_open_generic_tr,
c8d77183 5466 .read = tracing_stats_read,
b444786f 5467 .llseek = generic_file_llseek,
4d3435b8 5468 .release = tracing_release_generic_tr,
c8d77183
SR
5469};
5470
bc0c38d1
SR
5471#ifdef CONFIG_DYNAMIC_FTRACE
5472
b807c3d0
SR
5473int __weak ftrace_arch_read_dyn_info(char *buf, int size)
5474{
5475 return 0;
5476}
5477
bc0c38d1 5478static ssize_t
b807c3d0 5479tracing_read_dyn_info(struct file *filp, char __user *ubuf,
bc0c38d1
SR
5480 size_t cnt, loff_t *ppos)
5481{
a26a2a27
SR
5482 static char ftrace_dyn_info_buffer[1024];
5483 static DEFINE_MUTEX(dyn_info_mutex);
bc0c38d1 5484 unsigned long *p = filp->private_data;
b807c3d0 5485 char *buf = ftrace_dyn_info_buffer;
a26a2a27 5486 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
bc0c38d1
SR
5487 int r;
5488
b807c3d0
SR
5489 mutex_lock(&dyn_info_mutex);
5490 r = sprintf(buf, "%ld ", *p);
4bf39a94 5491
a26a2a27 5492 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
b807c3d0
SR
5493 buf[r++] = '\n';
5494
5495 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5496
5497 mutex_unlock(&dyn_info_mutex);
5498
5499 return r;
bc0c38d1
SR
5500}
5501
5e2336a0 5502static const struct file_operations tracing_dyn_info_fops = {
4bf39a94 5503 .open = tracing_open_generic,
b807c3d0 5504 .read = tracing_read_dyn_info,
b444786f 5505 .llseek = generic_file_llseek,
bc0c38d1 5506};
77fd5c15 5507#endif /* CONFIG_DYNAMIC_FTRACE */
bc0c38d1 5508
77fd5c15
SRRH
5509#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
5510static void
5511ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5512{
5513 tracing_snapshot();
5514}
bc0c38d1 5515
77fd5c15
SRRH
5516static void
5517ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
bc0c38d1 5518{
77fd5c15
SRRH
5519 unsigned long *count = (long *)data;
5520
5521 if (!*count)
5522 return;
bc0c38d1 5523
77fd5c15
SRRH
5524 if (*count != -1)
5525 (*count)--;
5526
5527 tracing_snapshot();
5528}
5529
5530static int
5531ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
5532 struct ftrace_probe_ops *ops, void *data)
5533{
5534 long count = (long)data;
5535
5536 seq_printf(m, "%ps:", (void *)ip);
5537
5538 seq_printf(m, "snapshot");
5539
5540 if (count == -1)
5541 seq_printf(m, ":unlimited\n");
5542 else
5543 seq_printf(m, ":count=%ld\n", count);
5544
5545 return 0;
5546}
5547
5548static struct ftrace_probe_ops snapshot_probe_ops = {
5549 .func = ftrace_snapshot,
5550 .print = ftrace_snapshot_print,
5551};
5552
5553static struct ftrace_probe_ops snapshot_count_probe_ops = {
5554 .func = ftrace_count_snapshot,
5555 .print = ftrace_snapshot_print,
5556};
5557
5558static int
5559ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
5560 char *glob, char *cmd, char *param, int enable)
5561{
5562 struct ftrace_probe_ops *ops;
5563 void *count = (void *)-1;
5564 char *number;
5565 int ret;
5566
5567 /* hash funcs only work with set_ftrace_filter */
5568 if (!enable)
5569 return -EINVAL;
5570
5571 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
5572
5573 if (glob[0] == '!') {
5574 unregister_ftrace_function_probe_func(glob+1, ops);
5575 return 0;
5576 }
5577
5578 if (!param)
5579 goto out_reg;
5580
5581 number = strsep(&param, ":");
5582
5583 if (!strlen(number))
5584 goto out_reg;
5585
5586 /*
5587 * We use the callback data field (which is a pointer)
5588 * as our counter.
5589 */
5590 ret = kstrtoul(number, 0, (unsigned long *)&count);
5591 if (ret)
5592 return ret;
5593
5594 out_reg:
5595 ret = register_ftrace_function_probe(glob, ops, count);
5596
5597 if (ret >= 0)
5598 alloc_snapshot(&global_trace);
5599
5600 return ret < 0 ? ret : 0;
5601}
5602
5603static struct ftrace_func_command ftrace_snapshot_cmd = {
5604 .name = "snapshot",
5605 .func = ftrace_trace_snapshot_callback,
5606};
5607
38de93ab 5608static __init int register_snapshot_cmd(void)
77fd5c15
SRRH
5609{
5610 return register_ftrace_command(&ftrace_snapshot_cmd);
5611}
5612#else
38de93ab 5613static inline __init int register_snapshot_cmd(void) { return 0; }
77fd5c15 5614#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
bc0c38d1 5615
2b6080f2 5616struct dentry *tracing_init_dentry_tr(struct trace_array *tr)
bc0c38d1 5617{
2b6080f2
SR
5618 if (tr->dir)
5619 return tr->dir;
bc0c38d1 5620
3e1f60b8
FW
5621 if (!debugfs_initialized())
5622 return NULL;
5623
2b6080f2
SR
5624 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
5625 tr->dir = debugfs_create_dir("tracing", NULL);
bc0c38d1 5626
687c878a
J
5627 if (!tr->dir)
5628 pr_warn_once("Could not create debugfs directory 'tracing'\n");
bc0c38d1 5629
2b6080f2 5630 return tr->dir;
bc0c38d1
SR
5631}
5632
2b6080f2
SR
5633struct dentry *tracing_init_dentry(void)
5634{
5635 return tracing_init_dentry_tr(&global_trace);
5636}
b04cc6b1 5637
2b6080f2 5638static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
b04cc6b1 5639{
b04cc6b1
FW
5640 struct dentry *d_tracer;
5641
2b6080f2
SR
5642 if (tr->percpu_dir)
5643 return tr->percpu_dir;
b04cc6b1 5644
2b6080f2 5645 d_tracer = tracing_init_dentry_tr(tr);
b04cc6b1
FW
5646 if (!d_tracer)
5647 return NULL;
5648
2b6080f2 5649 tr->percpu_dir = debugfs_create_dir("per_cpu", d_tracer);
b04cc6b1 5650
2b6080f2
SR
5651 WARN_ONCE(!tr->percpu_dir,
5652 "Could not create debugfs directory 'per_cpu/%d'\n", cpu);
b04cc6b1 5653
2b6080f2 5654 return tr->percpu_dir;
b04cc6b1
FW
5655}
5656
649e9c70
ON
5657static struct dentry *
5658trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
5659 void *data, long cpu, const struct file_operations *fops)
5660{
5661 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
5662
5663 if (ret) /* See tracing_get_cpu() */
5664 ret->d_inode->i_cdev = (void *)(cpu + 1);
5665 return ret;
5666}
5667
2b6080f2
SR
5668static void
5669tracing_init_debugfs_percpu(struct trace_array *tr, long cpu)
b04cc6b1 5670{
2b6080f2 5671 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
5452af66 5672 struct dentry *d_cpu;
dd49a38c 5673 char cpu_dir[30]; /* 30 characters should be more than enough */
b04cc6b1 5674
0a3d7ce7
NK
5675 if (!d_percpu)
5676 return;
5677
dd49a38c 5678 snprintf(cpu_dir, 30, "cpu%ld", cpu);
8656e7a2
FW
5679 d_cpu = debugfs_create_dir(cpu_dir, d_percpu);
5680 if (!d_cpu) {
5681 pr_warning("Could not create debugfs '%s' entry\n", cpu_dir);
5682 return;
5683 }
b04cc6b1 5684
8656e7a2 5685 /* per cpu trace_pipe */
649e9c70 5686 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
15544209 5687 tr, cpu, &tracing_pipe_fops);
b04cc6b1
FW
5688
5689 /* per cpu trace */
649e9c70 5690 trace_create_cpu_file("trace", 0644, d_cpu,
6484c71c 5691 tr, cpu, &tracing_fops);
7f96f93f 5692
649e9c70 5693 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
46ef2be0 5694 tr, cpu, &tracing_buffers_fops);
7f96f93f 5695
649e9c70 5696 trace_create_cpu_file("stats", 0444, d_cpu,
4d3435b8 5697 tr, cpu, &tracing_stats_fops);
438ced17 5698
649e9c70 5699 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
0bc392ee 5700 tr, cpu, &tracing_entries_fops);
f1affcaa
SRRH
5701
5702#ifdef CONFIG_TRACER_SNAPSHOT
649e9c70 5703 trace_create_cpu_file("snapshot", 0644, d_cpu,
6484c71c 5704 tr, cpu, &snapshot_fops);
6de58e62 5705
649e9c70 5706 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
46ef2be0 5707 tr, cpu, &snapshot_raw_fops);
f1affcaa 5708#endif
b04cc6b1
FW
5709}
5710
60a11774
SR
5711#ifdef CONFIG_FTRACE_SELFTEST
5712/* Let selftest have access to static functions in this file */
5713#include "trace_selftest.c"
5714#endif
5715
577b785f
SR
5716struct trace_option_dentry {
5717 struct tracer_opt *opt;
5718 struct tracer_flags *flags;
2b6080f2 5719 struct trace_array *tr;
577b785f
SR
5720 struct dentry *entry;
5721};
5722
5723static ssize_t
5724trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
5725 loff_t *ppos)
5726{
5727 struct trace_option_dentry *topt = filp->private_data;
5728 char *buf;
5729
5730 if (topt->flags->val & topt->opt->bit)
5731 buf = "1\n";
5732 else
5733 buf = "0\n";
5734
5735 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
5736}
5737
5738static ssize_t
5739trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
5740 loff_t *ppos)
5741{
5742 struct trace_option_dentry *topt = filp->private_data;
5743 unsigned long val;
577b785f
SR
5744 int ret;
5745
22fe9b54
PH
5746 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5747 if (ret)
577b785f
SR
5748 return ret;
5749
8d18eaaf
LZ
5750 if (val != 0 && val != 1)
5751 return -EINVAL;
577b785f 5752
8d18eaaf 5753 if (!!(topt->flags->val & topt->opt->bit) != val) {
577b785f 5754 mutex_lock(&trace_types_lock);
8c1a49ae 5755 ret = __set_tracer_option(topt->tr, topt->flags,
c757bea9 5756 topt->opt, !val);
577b785f
SR
5757 mutex_unlock(&trace_types_lock);
5758 if (ret)
5759 return ret;
577b785f
SR
5760 }
5761
5762 *ppos += cnt;
5763
5764 return cnt;
5765}
5766
5767
5768static const struct file_operations trace_options_fops = {
5769 .open = tracing_open_generic,
5770 .read = trace_options_read,
5771 .write = trace_options_write,
b444786f 5772 .llseek = generic_file_llseek,
577b785f
SR
5773};
5774
a8259075
SR
5775static ssize_t
5776trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
5777 loff_t *ppos)
5778{
5779 long index = (long)filp->private_data;
5780 char *buf;
5781
5782 if (trace_flags & (1 << index))
5783 buf = "1\n";
5784 else
5785 buf = "0\n";
5786
5787 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
5788}
5789
5790static ssize_t
5791trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
5792 loff_t *ppos)
5793{
2b6080f2 5794 struct trace_array *tr = &global_trace;
a8259075 5795 long index = (long)filp->private_data;
a8259075
SR
5796 unsigned long val;
5797 int ret;
5798
22fe9b54
PH
5799 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5800 if (ret)
a8259075
SR
5801 return ret;
5802
f2d84b65 5803 if (val != 0 && val != 1)
a8259075 5804 return -EINVAL;
69d34da2
SRRH
5805
5806 mutex_lock(&trace_types_lock);
2b6080f2 5807 ret = set_tracer_flag(tr, 1 << index, val);
69d34da2 5808 mutex_unlock(&trace_types_lock);
a8259075 5809
613f04a0
SRRH
5810 if (ret < 0)
5811 return ret;
5812
a8259075
SR
5813 *ppos += cnt;
5814
5815 return cnt;
5816}
5817
a8259075
SR
5818static const struct file_operations trace_options_core_fops = {
5819 .open = tracing_open_generic,
5820 .read = trace_options_core_read,
5821 .write = trace_options_core_write,
b444786f 5822 .llseek = generic_file_llseek,
a8259075
SR
5823};
5824
5452af66 5825struct dentry *trace_create_file(const char *name,
f4ae40a6 5826 umode_t mode,
5452af66
FW
5827 struct dentry *parent,
5828 void *data,
5829 const struct file_operations *fops)
5830{
5831 struct dentry *ret;
5832
5833 ret = debugfs_create_file(name, mode, parent, data, fops);
5834 if (!ret)
5835 pr_warning("Could not create debugfs '%s' entry\n", name);
5836
5837 return ret;
5838}
5839
5840
2b6080f2 5841static struct dentry *trace_options_init_dentry(struct trace_array *tr)
a8259075
SR
5842{
5843 struct dentry *d_tracer;
a8259075 5844
2b6080f2
SR
5845 if (tr->options)
5846 return tr->options;
a8259075 5847
2b6080f2 5848 d_tracer = tracing_init_dentry_tr(tr);
a8259075
SR
5849 if (!d_tracer)
5850 return NULL;
5851
2b6080f2
SR
5852 tr->options = debugfs_create_dir("options", d_tracer);
5853 if (!tr->options) {
a8259075
SR
5854 pr_warning("Could not create debugfs directory 'options'\n");
5855 return NULL;
5856 }
5857
2b6080f2 5858 return tr->options;
a8259075
SR
5859}
5860
577b785f 5861static void
2b6080f2
SR
5862create_trace_option_file(struct trace_array *tr,
5863 struct trace_option_dentry *topt,
577b785f
SR
5864 struct tracer_flags *flags,
5865 struct tracer_opt *opt)
5866{
5867 struct dentry *t_options;
577b785f 5868
2b6080f2 5869 t_options = trace_options_init_dentry(tr);
577b785f
SR
5870 if (!t_options)
5871 return;
5872
5873 topt->flags = flags;
5874 topt->opt = opt;
2b6080f2 5875 topt->tr = tr;
577b785f 5876
5452af66 5877 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
577b785f
SR
5878 &trace_options_fops);
5879
577b785f
SR
5880}
5881
5882static struct trace_option_dentry *
2b6080f2 5883create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
577b785f
SR
5884{
5885 struct trace_option_dentry *topts;
5886 struct tracer_flags *flags;
5887 struct tracer_opt *opts;
5888 int cnt;
5889
5890 if (!tracer)
5891 return NULL;
5892
5893 flags = tracer->flags;
5894
5895 if (!flags || !flags->opts)
5896 return NULL;
5897
5898 opts = flags->opts;
5899
5900 for (cnt = 0; opts[cnt].name; cnt++)
5901 ;
5902
0cfe8245 5903 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
577b785f
SR
5904 if (!topts)
5905 return NULL;
5906
5907 for (cnt = 0; opts[cnt].name; cnt++)
2b6080f2 5908 create_trace_option_file(tr, &topts[cnt], flags,
577b785f
SR
5909 &opts[cnt]);
5910
5911 return topts;
5912}
5913
5914static void
5915destroy_trace_option_files(struct trace_option_dentry *topts)
5916{
5917 int cnt;
5918
5919 if (!topts)
5920 return;
5921
5922 for (cnt = 0; topts[cnt].opt; cnt++) {
5923 if (topts[cnt].entry)
5924 debugfs_remove(topts[cnt].entry);
5925 }
5926
5927 kfree(topts);
5928}
5929
a8259075 5930static struct dentry *
2b6080f2
SR
5931create_trace_option_core_file(struct trace_array *tr,
5932 const char *option, long index)
a8259075
SR
5933{
5934 struct dentry *t_options;
a8259075 5935
2b6080f2 5936 t_options = trace_options_init_dentry(tr);
a8259075
SR
5937 if (!t_options)
5938 return NULL;
5939
5452af66 5940 return trace_create_file(option, 0644, t_options, (void *)index,
a8259075 5941 &trace_options_core_fops);
a8259075
SR
5942}
5943
2b6080f2 5944static __init void create_trace_options_dir(struct trace_array *tr)
a8259075
SR
5945{
5946 struct dentry *t_options;
a8259075
SR
5947 int i;
5948
2b6080f2 5949 t_options = trace_options_init_dentry(tr);
a8259075
SR
5950 if (!t_options)
5951 return;
5952
5452af66 5953 for (i = 0; trace_options[i]; i++)
2b6080f2 5954 create_trace_option_core_file(tr, trace_options[i], i);
a8259075
SR
5955}
5956
499e5470
SR
5957static ssize_t
5958rb_simple_read(struct file *filp, char __user *ubuf,
5959 size_t cnt, loff_t *ppos)
5960{
348f0fc2 5961 struct trace_array *tr = filp->private_data;
499e5470
SR
5962 char buf[64];
5963 int r;
5964
10246fa3 5965 r = tracer_tracing_is_on(tr);
499e5470
SR
5966 r = sprintf(buf, "%d\n", r);
5967
5968 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5969}
5970
5971static ssize_t
5972rb_simple_write(struct file *filp, const char __user *ubuf,
5973 size_t cnt, loff_t *ppos)
5974{
348f0fc2 5975 struct trace_array *tr = filp->private_data;
12883efb 5976 struct ring_buffer *buffer = tr->trace_buffer.buffer;
499e5470
SR
5977 unsigned long val;
5978 int ret;
5979
5980 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5981 if (ret)
5982 return ret;
5983
5984 if (buffer) {
2df8f8a6
SR
5985 mutex_lock(&trace_types_lock);
5986 if (val) {
10246fa3 5987 tracer_tracing_on(tr);
2b6080f2
SR
5988 if (tr->current_trace->start)
5989 tr->current_trace->start(tr);
2df8f8a6 5990 } else {
10246fa3 5991 tracer_tracing_off(tr);
2b6080f2
SR
5992 if (tr->current_trace->stop)
5993 tr->current_trace->stop(tr);
2df8f8a6
SR
5994 }
5995 mutex_unlock(&trace_types_lock);
499e5470
SR
5996 }
5997
5998 (*ppos)++;
5999
6000 return cnt;
6001}
6002
6003static const struct file_operations rb_simple_fops = {
7b85af63 6004 .open = tracing_open_generic_tr,
499e5470
SR
6005 .read = rb_simple_read,
6006 .write = rb_simple_write,
7b85af63 6007 .release = tracing_release_generic_tr,
499e5470
SR
6008 .llseek = default_llseek,
6009};
6010
277ba044
SR
6011struct dentry *trace_instance_dir;
6012
6013static void
6014init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer);
6015
55034cd6
SRRH
6016static int
6017allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
277ba044
SR
6018{
6019 enum ring_buffer_flags rb_flags;
737223fb
SRRH
6020
6021 rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
6022
dced341b
SRRH
6023 buf->tr = tr;
6024
55034cd6
SRRH
6025 buf->buffer = ring_buffer_alloc(size, rb_flags);
6026 if (!buf->buffer)
6027 return -ENOMEM;
737223fb 6028
55034cd6
SRRH
6029 buf->data = alloc_percpu(struct trace_array_cpu);
6030 if (!buf->data) {
6031 ring_buffer_free(buf->buffer);
6032 return -ENOMEM;
6033 }
737223fb 6034
737223fb
SRRH
6035 /* Allocate the first page for all buffers */
6036 set_buffer_entries(&tr->trace_buffer,
6037 ring_buffer_size(tr->trace_buffer.buffer, 0));
6038
55034cd6
SRRH
6039 return 0;
6040}
737223fb 6041
55034cd6
SRRH
6042static int allocate_trace_buffers(struct trace_array *tr, int size)
6043{
6044 int ret;
737223fb 6045
55034cd6
SRRH
6046 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
6047 if (ret)
6048 return ret;
737223fb 6049
55034cd6
SRRH
6050#ifdef CONFIG_TRACER_MAX_TRACE
6051 ret = allocate_trace_buffer(tr, &tr->max_buffer,
6052 allocate_snapshot ? size : 1);
6053 if (WARN_ON(ret)) {
737223fb 6054 ring_buffer_free(tr->trace_buffer.buffer);
55034cd6
SRRH
6055 free_percpu(tr->trace_buffer.data);
6056 return -ENOMEM;
6057 }
6058 tr->allocated_snapshot = allocate_snapshot;
737223fb 6059
55034cd6
SRRH
6060 /*
6061 * Only the top level trace array gets its snapshot allocated
6062 * from the kernel command line.
6063 */
6064 allocate_snapshot = false;
737223fb 6065#endif
55034cd6 6066 return 0;
737223fb
SRRH
6067}
6068
6069static int new_instance_create(const char *name)
6070{
277ba044
SR
6071 struct trace_array *tr;
6072 int ret;
277ba044
SR
6073
6074 mutex_lock(&trace_types_lock);
6075
6076 ret = -EEXIST;
6077 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6078 if (tr->name && strcmp(tr->name, name) == 0)
6079 goto out_unlock;
6080 }
6081
6082 ret = -ENOMEM;
6083 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
6084 if (!tr)
6085 goto out_unlock;
6086
6087 tr->name = kstrdup(name, GFP_KERNEL);
6088 if (!tr->name)
6089 goto out_free_tr;
6090
ccfe9e42
AL
6091 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
6092 goto out_free_tr;
6093
6094 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
6095
277ba044
SR
6096 raw_spin_lock_init(&tr->start_lock);
6097
6098 tr->current_trace = &nop_trace;
6099
6100 INIT_LIST_HEAD(&tr->systems);
6101 INIT_LIST_HEAD(&tr->events);
6102
737223fb 6103 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
277ba044
SR
6104 goto out_free_tr;
6105
277ba044
SR
6106 tr->dir = debugfs_create_dir(name, trace_instance_dir);
6107 if (!tr->dir)
6108 goto out_free_tr;
6109
6110 ret = event_trace_add_tracer(tr->dir, tr);
609e85a7
AL
6111 if (ret) {
6112 debugfs_remove_recursive(tr->dir);
277ba044 6113 goto out_free_tr;
609e85a7 6114 }
277ba044
SR
6115
6116 init_tracer_debugfs(tr, tr->dir);
6117
6118 list_add(&tr->list, &ftrace_trace_arrays);
6119
6120 mutex_unlock(&trace_types_lock);
6121
6122 return 0;
6123
6124 out_free_tr:
12883efb
SRRH
6125 if (tr->trace_buffer.buffer)
6126 ring_buffer_free(tr->trace_buffer.buffer);
ccfe9e42 6127 free_cpumask_var(tr->tracing_cpumask);
277ba044
SR
6128 kfree(tr->name);
6129 kfree(tr);
6130
6131 out_unlock:
6132 mutex_unlock(&trace_types_lock);
6133
6134 return ret;
6135
6136}
6137
0c8916c3
SR
6138static int instance_delete(const char *name)
6139{
6140 struct trace_array *tr;
6141 int found = 0;
6142 int ret;
6143
6144 mutex_lock(&trace_types_lock);
6145
6146 ret = -ENODEV;
6147 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6148 if (tr->name && strcmp(tr->name, name) == 0) {
6149 found = 1;
6150 break;
6151 }
6152 }
6153 if (!found)
6154 goto out_unlock;
6155
a695cb58
SRRH
6156 ret = -EBUSY;
6157 if (tr->ref)
6158 goto out_unlock;
6159
0c8916c3
SR
6160 list_del(&tr->list);
6161
6b450d25 6162 tracing_set_nop(tr);
0c8916c3 6163 event_trace_del_tracer(tr);
591dffda 6164 ftrace_destroy_function_files(tr);
0c8916c3 6165 debugfs_remove_recursive(tr->dir);
12883efb
SRRH
6166 free_percpu(tr->trace_buffer.data);
6167 ring_buffer_free(tr->trace_buffer.buffer);
0c8916c3
SR
6168
6169 kfree(tr->name);
6170 kfree(tr);
6171
6172 ret = 0;
6173
6174 out_unlock:
6175 mutex_unlock(&trace_types_lock);
6176
6177 return ret;
6178}
6179
277ba044
SR
6180static int instance_mkdir (struct inode *inode, struct dentry *dentry, umode_t mode)
6181{
6182 struct dentry *parent;
6183 int ret;
6184
6185 /* Paranoid: Make sure the parent is the "instances" directory */
6186 parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
6187 if (WARN_ON_ONCE(parent != trace_instance_dir))
6188 return -ENOENT;
6189
6190 /*
6191 * The inode mutex is locked, but debugfs_create_dir() will also
6192 * take the mutex. As the instances directory can not be destroyed
6193 * or changed in any other way, it is safe to unlock it, and
6194 * let the dentry try. If two users try to make the same dir at
6195 * the same time, then the new_instance_create() will determine the
6196 * winner.
6197 */
6198 mutex_unlock(&inode->i_mutex);
6199
6200 ret = new_instance_create(dentry->d_iname);
6201
6202 mutex_lock(&inode->i_mutex);
6203
6204 return ret;
6205}
6206
0c8916c3
SR
6207static int instance_rmdir(struct inode *inode, struct dentry *dentry)
6208{
6209 struct dentry *parent;
6210 int ret;
6211
6212 /* Paranoid: Make sure the parent is the "instances" directory */
6213 parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
6214 if (WARN_ON_ONCE(parent != trace_instance_dir))
6215 return -ENOENT;
6216
6217 /* The caller did a dget() on dentry */
6218 mutex_unlock(&dentry->d_inode->i_mutex);
6219
6220 /*
6221 * The inode mutex is locked, but debugfs_create_dir() will also
6222 * take the mutex. As the instances directory can not be destroyed
6223 * or changed in any other way, it is safe to unlock it, and
6224 * let the dentry try. If two users try to make the same dir at
6225 * the same time, then the instance_delete() will determine the
6226 * winner.
6227 */
6228 mutex_unlock(&inode->i_mutex);
6229
6230 ret = instance_delete(dentry->d_iname);
6231
6232 mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT);
6233 mutex_lock(&dentry->d_inode->i_mutex);
6234
6235 return ret;
6236}
6237
277ba044
SR
6238static const struct inode_operations instance_dir_inode_operations = {
6239 .lookup = simple_lookup,
6240 .mkdir = instance_mkdir,
0c8916c3 6241 .rmdir = instance_rmdir,
277ba044
SR
6242};
6243
6244static __init void create_trace_instances(struct dentry *d_tracer)
6245{
6246 trace_instance_dir = debugfs_create_dir("instances", d_tracer);
6247 if (WARN_ON(!trace_instance_dir))
6248 return;
6249
6250 /* Hijack the dir inode operations, to allow mkdir */
6251 trace_instance_dir->d_inode->i_op = &instance_dir_inode_operations;
6252}
6253
2b6080f2
SR
6254static void
6255init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer)
6256{
121aaee7 6257 int cpu;
2b6080f2 6258
607e2ea1
SRRH
6259 trace_create_file("available_tracers", 0444, d_tracer,
6260 tr, &show_traces_fops);
6261
6262 trace_create_file("current_tracer", 0644, d_tracer,
6263 tr, &set_tracer_fops);
6264
ccfe9e42
AL
6265 trace_create_file("tracing_cpumask", 0644, d_tracer,
6266 tr, &tracing_cpumask_fops);
6267
2b6080f2
SR
6268 trace_create_file("trace_options", 0644, d_tracer,
6269 tr, &tracing_iter_fops);
6270
6271 trace_create_file("trace", 0644, d_tracer,
6484c71c 6272 tr, &tracing_fops);
2b6080f2
SR
6273
6274 trace_create_file("trace_pipe", 0444, d_tracer,
15544209 6275 tr, &tracing_pipe_fops);
2b6080f2
SR
6276
6277 trace_create_file("buffer_size_kb", 0644, d_tracer,
0bc392ee 6278 tr, &tracing_entries_fops);
2b6080f2
SR
6279
6280 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
6281 tr, &tracing_total_entries_fops);
6282
238ae93d 6283 trace_create_file("free_buffer", 0200, d_tracer,
2b6080f2
SR
6284 tr, &tracing_free_buffer_fops);
6285
6286 trace_create_file("trace_marker", 0220, d_tracer,
6287 tr, &tracing_mark_fops);
6288
6289 trace_create_file("trace_clock", 0644, d_tracer, tr,
6290 &trace_clock_fops);
6291
6292 trace_create_file("tracing_on", 0644, d_tracer,
6484c71c 6293 tr, &rb_simple_fops);
ce9bae55 6294
591dffda
SRRH
6295 if (ftrace_create_function_files(tr, d_tracer))
6296 WARN(1, "Could not allocate function filter files");
6297
ce9bae55
SRRH
6298#ifdef CONFIG_TRACER_SNAPSHOT
6299 trace_create_file("snapshot", 0644, d_tracer,
6484c71c 6300 tr, &snapshot_fops);
ce9bae55 6301#endif
121aaee7
SRRH
6302
6303 for_each_tracing_cpu(cpu)
6304 tracing_init_debugfs_percpu(tr, cpu);
6305
2b6080f2
SR
6306}
6307
b5ad384e 6308static __init int tracer_init_debugfs(void)
bc0c38d1
SR
6309{
6310 struct dentry *d_tracer;
bc0c38d1 6311
7e53bd42
LJ
6312 trace_access_lock_init();
6313
bc0c38d1 6314 d_tracer = tracing_init_dentry();
ed6f1c99
NK
6315 if (!d_tracer)
6316 return 0;
bc0c38d1 6317
2b6080f2 6318 init_tracer_debugfs(&global_trace, d_tracer);
bc0c38d1 6319
5d4a9dba 6320#ifdef CONFIG_TRACER_MAX_TRACE
5452af66
FW
6321 trace_create_file("tracing_max_latency", 0644, d_tracer,
6322 &tracing_max_latency, &tracing_max_lat_fops);
0e950173 6323#endif
5452af66
FW
6324
6325 trace_create_file("tracing_thresh", 0644, d_tracer,
6326 &tracing_thresh, &tracing_max_lat_fops);
a8259075 6327
339ae5d3 6328 trace_create_file("README", 0444, d_tracer,
5452af66
FW
6329 NULL, &tracing_readme_fops);
6330
69abe6a5
AP
6331 trace_create_file("saved_cmdlines", 0444, d_tracer,
6332 NULL, &tracing_saved_cmdlines_fops);
5bf9a1ee 6333
bc0c38d1 6334#ifdef CONFIG_DYNAMIC_FTRACE
5452af66
FW
6335 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
6336 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
bc0c38d1 6337#endif
b04cc6b1 6338
277ba044 6339 create_trace_instances(d_tracer);
5452af66 6340
2b6080f2 6341 create_trace_options_dir(&global_trace);
b04cc6b1 6342
b5ad384e 6343 return 0;
bc0c38d1
SR
6344}
6345
3f5a54e3
SR
6346static int trace_panic_handler(struct notifier_block *this,
6347 unsigned long event, void *unused)
6348{
944ac425 6349 if (ftrace_dump_on_oops)
cecbca96 6350 ftrace_dump(ftrace_dump_on_oops);
3f5a54e3
SR
6351 return NOTIFY_OK;
6352}
6353
6354static struct notifier_block trace_panic_notifier = {
6355 .notifier_call = trace_panic_handler,
6356 .next = NULL,
6357 .priority = 150 /* priority: INT_MAX >= x >= 0 */
6358};
6359
6360static int trace_die_handler(struct notifier_block *self,
6361 unsigned long val,
6362 void *data)
6363{
6364 switch (val) {
6365 case DIE_OOPS:
944ac425 6366 if (ftrace_dump_on_oops)
cecbca96 6367 ftrace_dump(ftrace_dump_on_oops);
3f5a54e3
SR
6368 break;
6369 default:
6370 break;
6371 }
6372 return NOTIFY_OK;
6373}
6374
6375static struct notifier_block trace_die_notifier = {
6376 .notifier_call = trace_die_handler,
6377 .priority = 200
6378};
6379
6380/*
6381 * printk is set to max of 1024, we really don't need it that big.
6382 * Nothing should be printing 1000 characters anyway.
6383 */
6384#define TRACE_MAX_PRINT 1000
6385
6386/*
6387 * Define here KERN_TRACE so that we have one place to modify
6388 * it if we decide to change what log level the ftrace dump
6389 * should be at.
6390 */
428aee14 6391#define KERN_TRACE KERN_EMERG
3f5a54e3 6392
955b61e5 6393void
3f5a54e3
SR
6394trace_printk_seq(struct trace_seq *s)
6395{
6396 /* Probably should print a warning here. */
bd6df187
J
6397 if (s->len >= TRACE_MAX_PRINT)
6398 s->len = TRACE_MAX_PRINT;
3f5a54e3
SR
6399
6400 /* should be zero ended, but we are paranoid. */
6401 s->buffer[s->len] = 0;
6402
6403 printk(KERN_TRACE "%s", s->buffer);
6404
f9520750 6405 trace_seq_init(s);
3f5a54e3
SR
6406}
6407
955b61e5
JW
6408void trace_init_global_iter(struct trace_iterator *iter)
6409{
6410 iter->tr = &global_trace;
2b6080f2 6411 iter->trace = iter->tr->current_trace;
ae3b5093 6412 iter->cpu_file = RING_BUFFER_ALL_CPUS;
12883efb 6413 iter->trace_buffer = &global_trace.trace_buffer;
b2f974d6
CS
6414
6415 if (iter->trace && iter->trace->open)
6416 iter->trace->open(iter);
6417
6418 /* Annotate start of buffers if we had overruns */
6419 if (ring_buffer_overruns(iter->trace_buffer->buffer))
6420 iter->iter_flags |= TRACE_FILE_ANNOTATE;
6421
6422 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
6423 if (trace_clocks[iter->tr->clock_id].in_ns)
6424 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
955b61e5
JW
6425}
6426
7fe70b57 6427void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
3f5a54e3 6428{
3f5a54e3
SR
6429 /* use static because iter can be a bit big for the stack */
6430 static struct trace_iterator iter;
7fe70b57 6431 static atomic_t dump_running;
cf586b61 6432 unsigned int old_userobj;
d769041f
SR
6433 unsigned long flags;
6434 int cnt = 0, cpu;
3f5a54e3 6435
7fe70b57
SRRH
6436 /* Only allow one dump user at a time. */
6437 if (atomic_inc_return(&dump_running) != 1) {
6438 atomic_dec(&dump_running);
6439 return;
6440 }
3f5a54e3 6441
7fe70b57
SRRH
6442 /*
6443 * Always turn off tracing when we dump.
6444 * We don't need to show trace output of what happens
6445 * between multiple crashes.
6446 *
6447 * If the user does a sysrq-z, then they can re-enable
6448 * tracing with echo 1 > tracing_on.
6449 */
0ee6b6cf 6450 tracing_off();
cf586b61 6451
7fe70b57 6452 local_irq_save(flags);
3f5a54e3 6453
38dbe0b1 6454 /* Simulate the iterator */
955b61e5
JW
6455 trace_init_global_iter(&iter);
6456
d769041f 6457 for_each_tracing_cpu(cpu) {
12883efb 6458 atomic_inc(&per_cpu_ptr(iter.tr->trace_buffer.data, cpu)->disabled);
d769041f
SR
6459 }
6460
cf586b61
FW
6461 old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
6462
b54d3de9
TE
6463 /* don't look at user memory in panic mode */
6464 trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
6465
cecbca96
FW
6466 switch (oops_dump_mode) {
6467 case DUMP_ALL:
ae3b5093 6468 iter.cpu_file = RING_BUFFER_ALL_CPUS;
cecbca96
FW
6469 break;
6470 case DUMP_ORIG:
6471 iter.cpu_file = raw_smp_processor_id();
6472 break;
6473 case DUMP_NONE:
6474 goto out_enable;
6475 default:
6476 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
ae3b5093 6477 iter.cpu_file = RING_BUFFER_ALL_CPUS;
cecbca96
FW
6478 }
6479
6480 printk(KERN_TRACE "Dumping ftrace buffer:\n");
3f5a54e3 6481
7fe70b57
SRRH
6482 /* Did function tracer already get disabled? */
6483 if (ftrace_is_dead()) {
6484 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
6485 printk("# MAY BE MISSING FUNCTION EVENTS\n");
6486 }
6487
3f5a54e3
SR
6488 /*
6489 * We need to stop all tracing on all CPUS to read the
6490 * the next buffer. This is a bit expensive, but is
6491 * not done often. We fill all what we can read,
6492 * and then release the locks again.
6493 */
6494
3f5a54e3
SR
6495 while (!trace_empty(&iter)) {
6496
6497 if (!cnt)
6498 printk(KERN_TRACE "---------------------------------\n");
6499
6500 cnt++;
6501
6502 /* reset all but tr, trace, and overruns */
6503 memset(&iter.seq, 0,
6504 sizeof(struct trace_iterator) -
6505 offsetof(struct trace_iterator, seq));
6506 iter.iter_flags |= TRACE_FILE_LAT_FMT;
6507 iter.pos = -1;
6508
955b61e5 6509 if (trace_find_next_entry_inc(&iter) != NULL) {
74e7ff8c
LJ
6510 int ret;
6511
6512 ret = print_trace_line(&iter);
6513 if (ret != TRACE_TYPE_NO_CONSUME)
6514 trace_consume(&iter);
3f5a54e3 6515 }
b892e5c8 6516 touch_nmi_watchdog();
3f5a54e3
SR
6517
6518 trace_printk_seq(&iter.seq);
6519 }
6520
6521 if (!cnt)
6522 printk(KERN_TRACE " (ftrace buffer empty)\n");
6523 else
6524 printk(KERN_TRACE "---------------------------------\n");
6525
cecbca96 6526 out_enable:
7fe70b57 6527 trace_flags |= old_userobj;
cf586b61 6528
7fe70b57
SRRH
6529 for_each_tracing_cpu(cpu) {
6530 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
cf586b61 6531 }
7fe70b57 6532 atomic_dec(&dump_running);
cd891ae0 6533 local_irq_restore(flags);
3f5a54e3 6534}
a8eecf22 6535EXPORT_SYMBOL_GPL(ftrace_dump);
cf586b61 6536
3928a8a2 6537__init static int tracer_alloc_buffers(void)
bc0c38d1 6538{
73c5162a 6539 int ring_buf_size;
9e01c1b7 6540 int ret = -ENOMEM;
4c11d7ae 6541
750912fa 6542
9e01c1b7
RR
6543 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
6544 goto out;
6545
ccfe9e42 6546 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
9e01c1b7 6547 goto out_free_buffer_mask;
4c11d7ae 6548
07d777fe
SR
6549 /* Only allocate trace_printk buffers if a trace_printk exists */
6550 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
81698831 6551 /* Must be called before global_trace.buffer is allocated */
07d777fe
SR
6552 trace_printk_init_buffers();
6553
73c5162a
SR
6554 /* To save memory, keep the ring buffer size to its minimum */
6555 if (ring_buffer_expanded)
6556 ring_buf_size = trace_buf_size;
6557 else
6558 ring_buf_size = 1;
6559
9e01c1b7 6560 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
ccfe9e42 6561 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
9e01c1b7 6562
2b6080f2
SR
6563 raw_spin_lock_init(&global_trace.start_lock);
6564
9e01c1b7 6565 /* TODO: make the number of buffers hot pluggable with CPUS */
737223fb 6566 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
3928a8a2
SR
6567 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
6568 WARN_ON(1);
9e01c1b7 6569 goto out_free_cpumask;
4c11d7ae 6570 }
a7603ff4 6571
499e5470
SR
6572 if (global_trace.buffer_disabled)
6573 tracing_off();
4c11d7ae 6574
bc0c38d1
SR
6575 trace_init_cmdlines();
6576
ca164318
SRRH
6577 /*
6578 * register_tracer() might reference current_trace, so it
6579 * needs to be set before we register anything. This is
6580 * just a bootstrap of current_trace anyway.
6581 */
2b6080f2
SR
6582 global_trace.current_trace = &nop_trace;
6583
ca164318
SRRH
6584 register_tracer(&nop_trace);
6585
60a11774
SR
6586 /* All seems OK, enable tracing */
6587 tracing_disabled = 0;
3928a8a2 6588
3f5a54e3
SR
6589 atomic_notifier_chain_register(&panic_notifier_list,
6590 &trace_panic_notifier);
6591
6592 register_die_notifier(&trace_die_notifier);
2fc1dfbe 6593
ae63b31e
SR
6594 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
6595
6596 INIT_LIST_HEAD(&global_trace.systems);
6597 INIT_LIST_HEAD(&global_trace.events);
6598 list_add(&global_trace.list, &ftrace_trace_arrays);
6599
7bcfaf54
SR
6600 while (trace_boot_options) {
6601 char *option;
6602
6603 option = strsep(&trace_boot_options, ",");
2b6080f2 6604 trace_set_options(&global_trace, option);
7bcfaf54
SR
6605 }
6606
77fd5c15
SRRH
6607 register_snapshot_cmd();
6608
2fc1dfbe 6609 return 0;
3f5a54e3 6610
9e01c1b7 6611out_free_cpumask:
12883efb
SRRH
6612 free_percpu(global_trace.trace_buffer.data);
6613#ifdef CONFIG_TRACER_MAX_TRACE
6614 free_percpu(global_trace.max_buffer.data);
6615#endif
ccfe9e42 6616 free_cpumask_var(global_trace.tracing_cpumask);
9e01c1b7
RR
6617out_free_buffer_mask:
6618 free_cpumask_var(tracing_buffer_mask);
6619out:
6620 return ret;
bc0c38d1 6621}
b2821ae6
SR
6622
6623__init static int clear_boot_tracer(void)
6624{
6625 /*
6626 * The default tracer at boot buffer is an init section.
6627 * This function is called in lateinit. If we did not
6628 * find the boot tracer, then clear it out, to prevent
6629 * later registration from accessing the buffer that is
6630 * about to be freed.
6631 */
6632 if (!default_bootup_tracer)
6633 return 0;
6634
6635 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
6636 default_bootup_tracer);
6637 default_bootup_tracer = NULL;
6638
6639 return 0;
6640}
6641
b5ad384e
FW
6642early_initcall(tracer_alloc_buffers);
6643fs_initcall(tracer_init_debugfs);
b2821ae6 6644late_initcall(clear_boot_tracer);
This page took 0.854043 seconds and 5 git commands to generate.