tracing: Apply tracer specific options from kernel command line.
[deliverable/linux.git] / kernel / trace / trace.c
CommitLineData
bc0c38d1
SR
1/*
2 * ring buffer based function tracer
3 *
2b6080f2 4 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
bc0c38d1
SR
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
6d49e352 12 * Copyright (C) 2004 Nadia Yvette Chambers
bc0c38d1 13 */
2cadf913 14#include <linux/ring_buffer.h>
273b281f 15#include <generated/utsrelease.h>
2cadf913
SR
16#include <linux/stacktrace.h>
17#include <linux/writeback.h>
bc0c38d1
SR
18#include <linux/kallsyms.h>
19#include <linux/seq_file.h>
3f5a54e3 20#include <linux/notifier.h>
2cadf913 21#include <linux/irqflags.h>
bc0c38d1 22#include <linux/debugfs.h>
8434dc93 23#include <linux/tracefs.h>
4c11d7ae 24#include <linux/pagemap.h>
bc0c38d1
SR
25#include <linux/hardirq.h>
26#include <linux/linkage.h>
27#include <linux/uaccess.h>
2cadf913 28#include <linux/kprobes.h>
bc0c38d1
SR
29#include <linux/ftrace.h>
30#include <linux/module.h>
31#include <linux/percpu.h>
2cadf913 32#include <linux/splice.h>
3f5a54e3 33#include <linux/kdebug.h>
5f0c6c03 34#include <linux/string.h>
f76180bc 35#include <linux/mount.h>
7e53bd42 36#include <linux/rwsem.h>
5a0e3ad6 37#include <linux/slab.h>
bc0c38d1
SR
38#include <linux/ctype.h>
39#include <linux/init.h>
2a2cc8f7 40#include <linux/poll.h>
b892e5c8 41#include <linux/nmi.h>
bc0c38d1 42#include <linux/fs.h>
8bd75c77 43#include <linux/sched/rt.h>
86387f7e 44
bc0c38d1 45#include "trace.h"
f0868d1e 46#include "trace_output.h"
bc0c38d1 47
73c5162a
SR
48/*
49 * On boot up, the ring buffer is set to the minimum size, so that
50 * we do not waste memory on systems that are not using tracing.
51 */
55034cd6 52bool ring_buffer_expanded;
73c5162a 53
8e1b82e0
FW
54/*
55 * We need to change this state when a selftest is running.
ff32504f
FW
56 * A selftest will lurk into the ring-buffer to count the
57 * entries inserted during the selftest although some concurrent
5e1607a0 58 * insertions into the ring-buffer such as trace_printk could occurred
ff32504f
FW
59 * at the same time, giving false positive or negative results.
60 */
8e1b82e0 61static bool __read_mostly tracing_selftest_running;
ff32504f 62
b2821ae6
SR
63/*
64 * If a tracer is running, we do not want to run SELFTEST.
65 */
020e5f85 66bool __read_mostly tracing_selftest_disabled;
b2821ae6 67
0daa2302
SRRH
68/* Pipe tracepoints to printk */
69struct trace_iterator *tracepoint_print_iter;
70int tracepoint_printk;
71
adf9f195
FW
72/* For tracers that don't implement custom flags */
73static struct tracer_opt dummy_tracer_opt[] = {
74 { }
75};
76
77static struct tracer_flags dummy_tracer_flags = {
78 .val = 0,
79 .opts = dummy_tracer_opt
80};
81
8c1a49ae
SRRH
82static int
83dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
adf9f195
FW
84{
85 return 0;
86}
0f048701 87
7ffbd48d
SR
88/*
89 * To prevent the comm cache from being overwritten when no
90 * tracing is active, only save the comm when a trace event
91 * occurred.
92 */
93static DEFINE_PER_CPU(bool, trace_cmdline_save);
94
0f048701
SR
95/*
96 * Kill all tracing for good (never come back).
97 * It is initialized to 1 but will turn to zero if the initialization
98 * of the tracer is successful. But that is the only place that sets
99 * this back to zero.
100 */
4fd27358 101static int tracing_disabled = 1;
0f048701 102
9288f99a 103DEFINE_PER_CPU(int, ftrace_cpu_disabled);
d769041f 104
955b61e5 105cpumask_var_t __read_mostly tracing_buffer_mask;
ab46428c 106
944ac425
SR
107/*
108 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
109 *
110 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
111 * is set, then ftrace_dump is called. This will output the contents
112 * of the ftrace buffers to the console. This is very useful for
113 * capturing traces that lead to crashes and outputing it to a
114 * serial console.
115 *
116 * It is default off, but you can enable it with either specifying
117 * "ftrace_dump_on_oops" in the kernel command line, or setting
cecbca96
FW
118 * /proc/sys/kernel/ftrace_dump_on_oops
119 * Set 1 if you want to dump buffers of all CPUs
120 * Set 2 if you want to dump the buffer of the CPU that triggered oops
944ac425 121 */
cecbca96
FW
122
123enum ftrace_dump_mode ftrace_dump_on_oops;
944ac425 124
de7edd31
SRRH
125/* When set, tracing will stop when a WARN*() is hit */
126int __disable_trace_on_warning;
127
9828413d
SRRH
128#ifdef CONFIG_TRACE_ENUM_MAP_FILE
129/* Map of enums to their values, for "enum_map" file */
130struct trace_enum_map_head {
131 struct module *mod;
132 unsigned long length;
133};
134
135union trace_enum_map_item;
136
137struct trace_enum_map_tail {
138 /*
139 * "end" is first and points to NULL as it must be different
140 * than "mod" or "enum_string"
141 */
142 union trace_enum_map_item *next;
143 const char *end; /* points to NULL */
144};
145
146static DEFINE_MUTEX(trace_enum_mutex);
147
148/*
149 * The trace_enum_maps are saved in an array with two extra elements,
150 * one at the beginning, and one at the end. The beginning item contains
151 * the count of the saved maps (head.length), and the module they
152 * belong to if not built in (head.mod). The ending item contains a
153 * pointer to the next array of saved enum_map items.
154 */
155union trace_enum_map_item {
156 struct trace_enum_map map;
157 struct trace_enum_map_head head;
158 struct trace_enum_map_tail tail;
159};
160
161static union trace_enum_map_item *trace_enum_maps;
162#endif /* CONFIG_TRACE_ENUM_MAP_FILE */
163
607e2ea1 164static int tracing_set_tracer(struct trace_array *tr, const char *buf);
b2821ae6 165
ee6c2c1b
LZ
166#define MAX_TRACER_SIZE 100
167static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
b2821ae6 168static char *default_bootup_tracer;
d9e54076 169
55034cd6
SRRH
170static bool allocate_snapshot;
171
1beee96b 172static int __init set_cmdline_ftrace(char *str)
d9e54076 173{
67012ab1 174 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
b2821ae6 175 default_bootup_tracer = bootup_tracer_buf;
73c5162a 176 /* We are using ftrace early, expand it */
55034cd6 177 ring_buffer_expanded = true;
d9e54076
PZ
178 return 1;
179}
1beee96b 180__setup("ftrace=", set_cmdline_ftrace);
d9e54076 181
944ac425
SR
182static int __init set_ftrace_dump_on_oops(char *str)
183{
cecbca96
FW
184 if (*str++ != '=' || !*str) {
185 ftrace_dump_on_oops = DUMP_ALL;
186 return 1;
187 }
188
189 if (!strcmp("orig_cpu", str)) {
190 ftrace_dump_on_oops = DUMP_ORIG;
191 return 1;
192 }
193
194 return 0;
944ac425
SR
195}
196__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
60a11774 197
de7edd31
SRRH
198static int __init stop_trace_on_warning(char *str)
199{
933ff9f2
LCG
200 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
201 __disable_trace_on_warning = 1;
de7edd31
SRRH
202 return 1;
203}
933ff9f2 204__setup("traceoff_on_warning", stop_trace_on_warning);
de7edd31 205
3209cff4 206static int __init boot_alloc_snapshot(char *str)
55034cd6
SRRH
207{
208 allocate_snapshot = true;
209 /* We also need the main ring buffer expanded */
210 ring_buffer_expanded = true;
211 return 1;
212}
3209cff4 213__setup("alloc_snapshot", boot_alloc_snapshot);
55034cd6 214
7bcfaf54
SR
215
216static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
7bcfaf54
SR
217
218static int __init set_trace_boot_options(char *str)
219{
67012ab1 220 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
7bcfaf54
SR
221 return 0;
222}
223__setup("trace_options=", set_trace_boot_options);
224
e1e232ca
SR
225static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
226static char *trace_boot_clock __initdata;
227
228static int __init set_trace_boot_clock(char *str)
229{
230 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
231 trace_boot_clock = trace_boot_clock_buf;
232 return 0;
233}
234__setup("trace_clock=", set_trace_boot_clock);
235
0daa2302
SRRH
236static int __init set_tracepoint_printk(char *str)
237{
238 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
239 tracepoint_printk = 1;
240 return 1;
241}
242__setup("tp_printk", set_tracepoint_printk);
de7edd31 243
cf8e3474 244unsigned long long ns2usecs(cycle_t nsec)
bc0c38d1
SR
245{
246 nsec += 500;
247 do_div(nsec, 1000);
248 return nsec;
249}
250
983f938a
SRRH
251/* trace_flags holds trace_options default values */
252#define TRACE_DEFAULT_FLAGS \
253 (FUNCTION_DEFAULT_FLAGS | \
254 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
255 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
256 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
257 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
258
16270145
SRRH
259/* trace_options that are only supported by global_trace */
260#define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
261 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
262
263
4fcdae83
SR
264/*
265 * The global_trace is the descriptor that holds the tracing
266 * buffers for the live tracing. For each CPU, it contains
267 * a link list of pages that will store trace entries. The
268 * page descriptor of the pages in the memory is used to hold
269 * the link list by linking the lru item in the page descriptor
270 * to each of the pages in the buffer per CPU.
271 *
272 * For each active CPU there is a data field that holds the
273 * pages for the buffer for that CPU. Each CPU has the same number
274 * of pages allocated for its buffer.
275 */
983f938a
SRRH
276static struct trace_array global_trace = {
277 .trace_flags = TRACE_DEFAULT_FLAGS,
278};
bc0c38d1 279
ae63b31e 280LIST_HEAD(ftrace_trace_arrays);
bc0c38d1 281
ff451961
SRRH
282int trace_array_get(struct trace_array *this_tr)
283{
284 struct trace_array *tr;
285 int ret = -ENODEV;
286
287 mutex_lock(&trace_types_lock);
288 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
289 if (tr == this_tr) {
290 tr->ref++;
291 ret = 0;
292 break;
293 }
294 }
295 mutex_unlock(&trace_types_lock);
296
297 return ret;
298}
299
300static void __trace_array_put(struct trace_array *this_tr)
301{
302 WARN_ON(!this_tr->ref);
303 this_tr->ref--;
304}
305
306void trace_array_put(struct trace_array *this_tr)
307{
308 mutex_lock(&trace_types_lock);
309 __trace_array_put(this_tr);
310 mutex_unlock(&trace_types_lock);
311}
312
7f1d2f82 313int filter_check_discard(struct trace_event_file *file, void *rec,
f306cc82
TZ
314 struct ring_buffer *buffer,
315 struct ring_buffer_event *event)
eb02ce01 316{
5d6ad960 317 if (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
f306cc82
TZ
318 !filter_match_preds(file->filter, rec)) {
319 ring_buffer_discard_commit(buffer, event);
320 return 1;
321 }
322
323 return 0;
324}
325EXPORT_SYMBOL_GPL(filter_check_discard);
326
2425bcb9 327int call_filter_check_discard(struct trace_event_call *call, void *rec,
f306cc82
TZ
328 struct ring_buffer *buffer,
329 struct ring_buffer_event *event)
330{
331 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
332 !filter_match_preds(call->filter, rec)) {
333 ring_buffer_discard_commit(buffer, event);
334 return 1;
335 }
336
337 return 0;
eb02ce01 338}
f306cc82 339EXPORT_SYMBOL_GPL(call_filter_check_discard);
eb02ce01 340
ad1438a0 341static cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
37886f6a
SR
342{
343 u64 ts;
344
345 /* Early boot up does not have a buffer yet */
9457158b 346 if (!buf->buffer)
37886f6a
SR
347 return trace_clock_local();
348
9457158b
AL
349 ts = ring_buffer_time_stamp(buf->buffer, cpu);
350 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
37886f6a
SR
351
352 return ts;
353}
bc0c38d1 354
9457158b
AL
355cycle_t ftrace_now(int cpu)
356{
357 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
358}
359
10246fa3
SRRH
360/**
361 * tracing_is_enabled - Show if global_trace has been disabled
362 *
363 * Shows if the global trace has been enabled or not. It uses the
364 * mirror flag "buffer_disabled" to be used in fast paths such as for
365 * the irqsoff tracer. But it may be inaccurate due to races. If you
366 * need to know the accurate state, use tracing_is_on() which is a little
367 * slower, but accurate.
368 */
9036990d
SR
369int tracing_is_enabled(void)
370{
10246fa3
SRRH
371 /*
372 * For quick access (irqsoff uses this in fast path), just
373 * return the mirror variable of the state of the ring buffer.
374 * It's a little racy, but we don't really care.
375 */
376 smp_rmb();
377 return !global_trace.buffer_disabled;
9036990d
SR
378}
379
4fcdae83 380/*
3928a8a2
SR
381 * trace_buf_size is the size in bytes that is allocated
382 * for a buffer. Note, the number of bytes is always rounded
383 * to page size.
3f5a54e3
SR
384 *
385 * This number is purposely set to a low number of 16384.
386 * If the dump on oops happens, it will be much appreciated
387 * to not have to wait for all that output. Anyway this can be
388 * boot time and run time configurable.
4fcdae83 389 */
3928a8a2 390#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
3f5a54e3 391
3928a8a2 392static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
bc0c38d1 393
4fcdae83 394/* trace_types holds a link list of available tracers. */
bc0c38d1 395static struct tracer *trace_types __read_mostly;
4fcdae83 396
4fcdae83
SR
397/*
398 * trace_types_lock is used to protect the trace_types list.
4fcdae83 399 */
a8227415 400DEFINE_MUTEX(trace_types_lock);
4fcdae83 401
7e53bd42
LJ
402/*
403 * serialize the access of the ring buffer
404 *
405 * ring buffer serializes readers, but it is low level protection.
406 * The validity of the events (which returns by ring_buffer_peek() ..etc)
407 * are not protected by ring buffer.
408 *
409 * The content of events may become garbage if we allow other process consumes
410 * these events concurrently:
411 * A) the page of the consumed events may become a normal page
412 * (not reader page) in ring buffer, and this page will be rewrited
413 * by events producer.
414 * B) The page of the consumed events may become a page for splice_read,
415 * and this page will be returned to system.
416 *
417 * These primitives allow multi process access to different cpu ring buffer
418 * concurrently.
419 *
420 * These primitives don't distinguish read-only and read-consume access.
421 * Multi read-only access are also serialized.
422 */
423
424#ifdef CONFIG_SMP
425static DECLARE_RWSEM(all_cpu_access_lock);
426static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
427
428static inline void trace_access_lock(int cpu)
429{
ae3b5093 430 if (cpu == RING_BUFFER_ALL_CPUS) {
7e53bd42
LJ
431 /* gain it for accessing the whole ring buffer. */
432 down_write(&all_cpu_access_lock);
433 } else {
434 /* gain it for accessing a cpu ring buffer. */
435
ae3b5093 436 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
7e53bd42
LJ
437 down_read(&all_cpu_access_lock);
438
439 /* Secondly block other access to this @cpu ring buffer. */
440 mutex_lock(&per_cpu(cpu_access_lock, cpu));
441 }
442}
443
444static inline void trace_access_unlock(int cpu)
445{
ae3b5093 446 if (cpu == RING_BUFFER_ALL_CPUS) {
7e53bd42
LJ
447 up_write(&all_cpu_access_lock);
448 } else {
449 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
450 up_read(&all_cpu_access_lock);
451 }
452}
453
454static inline void trace_access_lock_init(void)
455{
456 int cpu;
457
458 for_each_possible_cpu(cpu)
459 mutex_init(&per_cpu(cpu_access_lock, cpu));
460}
461
462#else
463
464static DEFINE_MUTEX(access_lock);
465
466static inline void trace_access_lock(int cpu)
467{
468 (void)cpu;
469 mutex_lock(&access_lock);
470}
471
472static inline void trace_access_unlock(int cpu)
473{
474 (void)cpu;
475 mutex_unlock(&access_lock);
476}
477
478static inline void trace_access_lock_init(void)
479{
480}
481
482#endif
483
d78a4614
SRRH
484#ifdef CONFIG_STACKTRACE
485static void __ftrace_trace_stack(struct ring_buffer *buffer,
486 unsigned long flags,
487 int skip, int pc, struct pt_regs *regs);
2d34f489
SRRH
488static inline void ftrace_trace_stack(struct trace_array *tr,
489 struct ring_buffer *buffer,
73dddbb5
SRRH
490 unsigned long flags,
491 int skip, int pc, struct pt_regs *regs);
ca475e83 492
d78a4614
SRRH
493#else
494static inline void __ftrace_trace_stack(struct ring_buffer *buffer,
495 unsigned long flags,
496 int skip, int pc, struct pt_regs *regs)
497{
498}
2d34f489
SRRH
499static inline void ftrace_trace_stack(struct trace_array *tr,
500 struct ring_buffer *buffer,
73dddbb5
SRRH
501 unsigned long flags,
502 int skip, int pc, struct pt_regs *regs)
ca475e83
SRRH
503{
504}
505
d78a4614
SRRH
506#endif
507
5280bcef 508static void tracer_tracing_on(struct trace_array *tr)
10246fa3
SRRH
509{
510 if (tr->trace_buffer.buffer)
511 ring_buffer_record_on(tr->trace_buffer.buffer);
512 /*
513 * This flag is looked at when buffers haven't been allocated
514 * yet, or by some tracers (like irqsoff), that just want to
515 * know if the ring buffer has been disabled, but it can handle
516 * races of where it gets disabled but we still do a record.
517 * As the check is in the fast path of the tracers, it is more
518 * important to be fast than accurate.
519 */
520 tr->buffer_disabled = 0;
521 /* Make the flag seen by readers */
522 smp_wmb();
523}
524
499e5470
SR
525/**
526 * tracing_on - enable tracing buffers
527 *
528 * This function enables tracing buffers that may have been
529 * disabled with tracing_off.
530 */
531void tracing_on(void)
532{
10246fa3 533 tracer_tracing_on(&global_trace);
499e5470
SR
534}
535EXPORT_SYMBOL_GPL(tracing_on);
536
09ae7234
SRRH
537/**
538 * __trace_puts - write a constant string into the trace buffer.
539 * @ip: The address of the caller
540 * @str: The constant string to write
541 * @size: The size of the string.
542 */
543int __trace_puts(unsigned long ip, const char *str, int size)
544{
545 struct ring_buffer_event *event;
546 struct ring_buffer *buffer;
547 struct print_entry *entry;
548 unsigned long irq_flags;
549 int alloc;
8abfb872
J
550 int pc;
551
983f938a 552 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
f0160a5a
J
553 return 0;
554
8abfb872 555 pc = preempt_count();
09ae7234 556
3132e107
SRRH
557 if (unlikely(tracing_selftest_running || tracing_disabled))
558 return 0;
559
09ae7234
SRRH
560 alloc = sizeof(*entry) + size + 2; /* possible \n added */
561
562 local_save_flags(irq_flags);
563 buffer = global_trace.trace_buffer.buffer;
564 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
8abfb872 565 irq_flags, pc);
09ae7234
SRRH
566 if (!event)
567 return 0;
568
569 entry = ring_buffer_event_data(event);
570 entry->ip = ip;
571
572 memcpy(&entry->buf, str, size);
573
574 /* Add a newline if necessary */
575 if (entry->buf[size - 1] != '\n') {
576 entry->buf[size] = '\n';
577 entry->buf[size + 1] = '\0';
578 } else
579 entry->buf[size] = '\0';
580
581 __buffer_unlock_commit(buffer, event);
2d34f489 582 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
09ae7234
SRRH
583
584 return size;
585}
586EXPORT_SYMBOL_GPL(__trace_puts);
587
588/**
589 * __trace_bputs - write the pointer to a constant string into trace buffer
590 * @ip: The address of the caller
591 * @str: The constant string to write to the buffer to
592 */
593int __trace_bputs(unsigned long ip, const char *str)
594{
595 struct ring_buffer_event *event;
596 struct ring_buffer *buffer;
597 struct bputs_entry *entry;
598 unsigned long irq_flags;
599 int size = sizeof(struct bputs_entry);
8abfb872
J
600 int pc;
601
983f938a 602 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
f0160a5a
J
603 return 0;
604
8abfb872 605 pc = preempt_count();
09ae7234 606
3132e107
SRRH
607 if (unlikely(tracing_selftest_running || tracing_disabled))
608 return 0;
609
09ae7234
SRRH
610 local_save_flags(irq_flags);
611 buffer = global_trace.trace_buffer.buffer;
612 event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
8abfb872 613 irq_flags, pc);
09ae7234
SRRH
614 if (!event)
615 return 0;
616
617 entry = ring_buffer_event_data(event);
618 entry->ip = ip;
619 entry->str = str;
620
621 __buffer_unlock_commit(buffer, event);
2d34f489 622 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
09ae7234
SRRH
623
624 return 1;
625}
626EXPORT_SYMBOL_GPL(__trace_bputs);
627
ad909e21
SRRH
628#ifdef CONFIG_TRACER_SNAPSHOT
629/**
630 * trace_snapshot - take a snapshot of the current buffer.
631 *
632 * This causes a swap between the snapshot buffer and the current live
633 * tracing buffer. You can use this to take snapshots of the live
634 * trace when some condition is triggered, but continue to trace.
635 *
636 * Note, make sure to allocate the snapshot with either
637 * a tracing_snapshot_alloc(), or by doing it manually
638 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
639 *
640 * If the snapshot buffer is not allocated, it will stop tracing.
641 * Basically making a permanent snapshot.
642 */
643void tracing_snapshot(void)
644{
645 struct trace_array *tr = &global_trace;
646 struct tracer *tracer = tr->current_trace;
647 unsigned long flags;
648
1b22e382
SRRH
649 if (in_nmi()) {
650 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
651 internal_trace_puts("*** snapshot is being ignored ***\n");
652 return;
653 }
654
ad909e21 655 if (!tr->allocated_snapshot) {
ca268da6
SRRH
656 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
657 internal_trace_puts("*** stopping trace here! ***\n");
ad909e21
SRRH
658 tracing_off();
659 return;
660 }
661
662 /* Note, snapshot can not be used when the tracer uses it */
663 if (tracer->use_max_tr) {
ca268da6
SRRH
664 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
665 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
ad909e21
SRRH
666 return;
667 }
668
669 local_irq_save(flags);
670 update_max_tr(tr, current, smp_processor_id());
671 local_irq_restore(flags);
672}
1b22e382 673EXPORT_SYMBOL_GPL(tracing_snapshot);
ad909e21
SRRH
674
675static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
676 struct trace_buffer *size_buf, int cpu_id);
3209cff4
SRRH
677static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
678
679static int alloc_snapshot(struct trace_array *tr)
680{
681 int ret;
682
683 if (!tr->allocated_snapshot) {
684
685 /* allocate spare buffer */
686 ret = resize_buffer_duplicate_size(&tr->max_buffer,
687 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
688 if (ret < 0)
689 return ret;
690
691 tr->allocated_snapshot = true;
692 }
693
694 return 0;
695}
696
ad1438a0 697static void free_snapshot(struct trace_array *tr)
3209cff4
SRRH
698{
699 /*
700 * We don't free the ring buffer. instead, resize it because
701 * The max_tr ring buffer has some state (e.g. ring->clock) and
702 * we want preserve it.
703 */
704 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
705 set_buffer_entries(&tr->max_buffer, 1);
706 tracing_reset_online_cpus(&tr->max_buffer);
707 tr->allocated_snapshot = false;
708}
ad909e21 709
93e31ffb
TZ
710/**
711 * tracing_alloc_snapshot - allocate snapshot buffer.
712 *
713 * This only allocates the snapshot buffer if it isn't already
714 * allocated - it doesn't also take a snapshot.
715 *
716 * This is meant to be used in cases where the snapshot buffer needs
717 * to be set up for events that can't sleep but need to be able to
718 * trigger a snapshot.
719 */
720int tracing_alloc_snapshot(void)
721{
722 struct trace_array *tr = &global_trace;
723 int ret;
724
725 ret = alloc_snapshot(tr);
726 WARN_ON(ret < 0);
727
728 return ret;
729}
730EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
731
ad909e21
SRRH
732/**
733 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
734 *
735 * This is similar to trace_snapshot(), but it will allocate the
736 * snapshot buffer if it isn't already allocated. Use this only
737 * where it is safe to sleep, as the allocation may sleep.
738 *
739 * This causes a swap between the snapshot buffer and the current live
740 * tracing buffer. You can use this to take snapshots of the live
741 * trace when some condition is triggered, but continue to trace.
742 */
743void tracing_snapshot_alloc(void)
744{
ad909e21
SRRH
745 int ret;
746
93e31ffb
TZ
747 ret = tracing_alloc_snapshot();
748 if (ret < 0)
3209cff4 749 return;
ad909e21
SRRH
750
751 tracing_snapshot();
752}
1b22e382 753EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
ad909e21
SRRH
754#else
755void tracing_snapshot(void)
756{
757 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
758}
1b22e382 759EXPORT_SYMBOL_GPL(tracing_snapshot);
93e31ffb
TZ
760int tracing_alloc_snapshot(void)
761{
762 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
763 return -ENODEV;
764}
765EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
ad909e21
SRRH
766void tracing_snapshot_alloc(void)
767{
768 /* Give warning */
769 tracing_snapshot();
770}
1b22e382 771EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
ad909e21
SRRH
772#endif /* CONFIG_TRACER_SNAPSHOT */
773
5280bcef 774static void tracer_tracing_off(struct trace_array *tr)
10246fa3
SRRH
775{
776 if (tr->trace_buffer.buffer)
777 ring_buffer_record_off(tr->trace_buffer.buffer);
778 /*
779 * This flag is looked at when buffers haven't been allocated
780 * yet, or by some tracers (like irqsoff), that just want to
781 * know if the ring buffer has been disabled, but it can handle
782 * races of where it gets disabled but we still do a record.
783 * As the check is in the fast path of the tracers, it is more
784 * important to be fast than accurate.
785 */
786 tr->buffer_disabled = 1;
787 /* Make the flag seen by readers */
788 smp_wmb();
789}
790
499e5470
SR
791/**
792 * tracing_off - turn off tracing buffers
793 *
794 * This function stops the tracing buffers from recording data.
795 * It does not disable any overhead the tracers themselves may
796 * be causing. This function simply causes all recording to
797 * the ring buffers to fail.
798 */
799void tracing_off(void)
800{
10246fa3 801 tracer_tracing_off(&global_trace);
499e5470
SR
802}
803EXPORT_SYMBOL_GPL(tracing_off);
804
de7edd31
SRRH
805void disable_trace_on_warning(void)
806{
807 if (__disable_trace_on_warning)
808 tracing_off();
809}
810
10246fa3
SRRH
811/**
812 * tracer_tracing_is_on - show real state of ring buffer enabled
813 * @tr : the trace array to know if ring buffer is enabled
814 *
815 * Shows real state of the ring buffer if it is enabled or not.
816 */
5280bcef 817static int tracer_tracing_is_on(struct trace_array *tr)
10246fa3
SRRH
818{
819 if (tr->trace_buffer.buffer)
820 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
821 return !tr->buffer_disabled;
822}
823
499e5470
SR
824/**
825 * tracing_is_on - show state of ring buffers enabled
826 */
827int tracing_is_on(void)
828{
10246fa3 829 return tracer_tracing_is_on(&global_trace);
499e5470
SR
830}
831EXPORT_SYMBOL_GPL(tracing_is_on);
832
3928a8a2 833static int __init set_buf_size(char *str)
bc0c38d1 834{
3928a8a2 835 unsigned long buf_size;
c6caeeb1 836
bc0c38d1
SR
837 if (!str)
838 return 0;
9d612bef 839 buf_size = memparse(str, &str);
c6caeeb1 840 /* nr_entries can not be zero */
9d612bef 841 if (buf_size == 0)
c6caeeb1 842 return 0;
3928a8a2 843 trace_buf_size = buf_size;
bc0c38d1
SR
844 return 1;
845}
3928a8a2 846__setup("trace_buf_size=", set_buf_size);
bc0c38d1 847
0e950173
TB
848static int __init set_tracing_thresh(char *str)
849{
87abb3b1 850 unsigned long threshold;
0e950173
TB
851 int ret;
852
853 if (!str)
854 return 0;
bcd83ea6 855 ret = kstrtoul(str, 0, &threshold);
0e950173
TB
856 if (ret < 0)
857 return 0;
87abb3b1 858 tracing_thresh = threshold * 1000;
0e950173
TB
859 return 1;
860}
861__setup("tracing_thresh=", set_tracing_thresh);
862
57f50be1
SR
863unsigned long nsecs_to_usecs(unsigned long nsecs)
864{
865 return nsecs / 1000;
866}
867
a3418a36
SRRH
868/*
869 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
870 * It uses C(a, b) where 'a' is the enum name and 'b' is the string that
871 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
872 * of strings in the order that the enums were defined.
873 */
874#undef C
875#define C(a, b) b
876
4fcdae83 877/* These must match the bit postions in trace_iterator_flags */
bc0c38d1 878static const char *trace_options[] = {
a3418a36 879 TRACE_FLAGS
bc0c38d1
SR
880 NULL
881};
882
5079f326
Z
883static struct {
884 u64 (*func)(void);
885 const char *name;
8be0709f 886 int in_ns; /* is this clock in nanoseconds? */
5079f326 887} trace_clocks[] = {
1b3e5c09
TG
888 { trace_clock_local, "local", 1 },
889 { trace_clock_global, "global", 1 },
890 { trace_clock_counter, "counter", 0 },
e7fda6c4 891 { trace_clock_jiffies, "uptime", 0 },
1b3e5c09
TG
892 { trace_clock, "perf", 1 },
893 { ktime_get_mono_fast_ns, "mono", 1 },
aabfa5f2 894 { ktime_get_raw_fast_ns, "mono_raw", 1 },
8cbd9cc6 895 ARCH_TRACE_CLOCKS
5079f326
Z
896};
897
b63f39ea 898/*
899 * trace_parser_get_init - gets the buffer for trace parser
900 */
901int trace_parser_get_init(struct trace_parser *parser, int size)
902{
903 memset(parser, 0, sizeof(*parser));
904
905 parser->buffer = kmalloc(size, GFP_KERNEL);
906 if (!parser->buffer)
907 return 1;
908
909 parser->size = size;
910 return 0;
911}
912
913/*
914 * trace_parser_put - frees the buffer for trace parser
915 */
916void trace_parser_put(struct trace_parser *parser)
917{
918 kfree(parser->buffer);
919}
920
921/*
922 * trace_get_user - reads the user input string separated by space
923 * (matched by isspace(ch))
924 *
925 * For each string found the 'struct trace_parser' is updated,
926 * and the function returns.
927 *
928 * Returns number of bytes read.
929 *
930 * See kernel/trace/trace.h for 'struct trace_parser' details.
931 */
932int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
933 size_t cnt, loff_t *ppos)
934{
935 char ch;
936 size_t read = 0;
937 ssize_t ret;
938
939 if (!*ppos)
940 trace_parser_clear(parser);
941
942 ret = get_user(ch, ubuf++);
943 if (ret)
944 goto out;
945
946 read++;
947 cnt--;
948
949 /*
950 * The parser is not finished with the last write,
951 * continue reading the user input without skipping spaces.
952 */
953 if (!parser->cont) {
954 /* skip white space */
955 while (cnt && isspace(ch)) {
956 ret = get_user(ch, ubuf++);
957 if (ret)
958 goto out;
959 read++;
960 cnt--;
961 }
962
963 /* only spaces were written */
964 if (isspace(ch)) {
965 *ppos += read;
966 ret = read;
967 goto out;
968 }
969
970 parser->idx = 0;
971 }
972
973 /* read the non-space input */
974 while (cnt && !isspace(ch)) {
3c235a33 975 if (parser->idx < parser->size - 1)
b63f39ea 976 parser->buffer[parser->idx++] = ch;
977 else {
978 ret = -EINVAL;
979 goto out;
980 }
981 ret = get_user(ch, ubuf++);
982 if (ret)
983 goto out;
984 read++;
985 cnt--;
986 }
987
988 /* We either got finished input or we have to wait for another call. */
989 if (isspace(ch)) {
990 parser->buffer[parser->idx] = 0;
991 parser->cont = false;
057db848 992 } else if (parser->idx < parser->size - 1) {
b63f39ea 993 parser->cont = true;
994 parser->buffer[parser->idx++] = ch;
057db848
SR
995 } else {
996 ret = -EINVAL;
997 goto out;
b63f39ea 998 }
999
1000 *ppos += read;
1001 ret = read;
1002
1003out:
1004 return ret;
1005}
1006
3a161d99 1007/* TODO add a seq_buf_to_buffer() */
b8b94265 1008static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
3c56819b
EGM
1009{
1010 int len;
3c56819b 1011
5ac48378 1012 if (trace_seq_used(s) <= s->seq.readpos)
3c56819b
EGM
1013 return -EBUSY;
1014
5ac48378 1015 len = trace_seq_used(s) - s->seq.readpos;
3c56819b
EGM
1016 if (cnt > len)
1017 cnt = len;
3a161d99 1018 memcpy(buf, s->buffer + s->seq.readpos, cnt);
3c56819b 1019
3a161d99 1020 s->seq.readpos += cnt;
3c56819b
EGM
1021 return cnt;
1022}
1023
0e950173
TB
1024unsigned long __read_mostly tracing_thresh;
1025
5d4a9dba 1026#ifdef CONFIG_TRACER_MAX_TRACE
5d4a9dba
SR
1027/*
1028 * Copy the new maximum trace into the separate maximum-trace
1029 * structure. (this way the maximum trace is permanently saved,
1030 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
1031 */
1032static void
1033__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1034{
12883efb
SRRH
1035 struct trace_buffer *trace_buf = &tr->trace_buffer;
1036 struct trace_buffer *max_buf = &tr->max_buffer;
1037 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1038 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
5d4a9dba 1039
12883efb
SRRH
1040 max_buf->cpu = cpu;
1041 max_buf->time_start = data->preempt_timestamp;
5d4a9dba 1042
6d9b3fa5 1043 max_data->saved_latency = tr->max_latency;
8248ac05
SR
1044 max_data->critical_start = data->critical_start;
1045 max_data->critical_end = data->critical_end;
5d4a9dba 1046
1acaa1b2 1047 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
8248ac05 1048 max_data->pid = tsk->pid;
f17a5194
SRRH
1049 /*
1050 * If tsk == current, then use current_uid(), as that does not use
1051 * RCU. The irq tracer can be called out of RCU scope.
1052 */
1053 if (tsk == current)
1054 max_data->uid = current_uid();
1055 else
1056 max_data->uid = task_uid(tsk);
1057
8248ac05
SR
1058 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1059 max_data->policy = tsk->policy;
1060 max_data->rt_priority = tsk->rt_priority;
5d4a9dba
SR
1061
1062 /* record this tasks comm */
1063 tracing_record_cmdline(tsk);
1064}
1065
4fcdae83
SR
1066/**
1067 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1068 * @tr: tracer
1069 * @tsk: the task with the latency
1070 * @cpu: The cpu that initiated the trace.
1071 *
1072 * Flip the buffers between the @tr and the max_tr and record information
1073 * about which task was the cause of this latency.
1074 */
e309b41d 1075void
bc0c38d1
SR
1076update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1077{
2721e72d 1078 struct ring_buffer *buf;
bc0c38d1 1079
2b6080f2 1080 if (tr->stop_count)
b8de7bd1
SR
1081 return;
1082
4c11d7ae 1083 WARN_ON_ONCE(!irqs_disabled());
34600f0e 1084
45ad21ca 1085 if (!tr->allocated_snapshot) {
debdd57f 1086 /* Only the nop tracer should hit this when disabling */
2b6080f2 1087 WARN_ON_ONCE(tr->current_trace != &nop_trace);
34600f0e 1088 return;
debdd57f 1089 }
34600f0e 1090
0b9b12c1 1091 arch_spin_lock(&tr->max_lock);
3928a8a2 1092
12883efb
SRRH
1093 buf = tr->trace_buffer.buffer;
1094 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1095 tr->max_buffer.buffer = buf;
3928a8a2 1096
bc0c38d1 1097 __update_max_tr(tr, tsk, cpu);
0b9b12c1 1098 arch_spin_unlock(&tr->max_lock);
bc0c38d1
SR
1099}
1100
1101/**
1102 * update_max_tr_single - only copy one trace over, and reset the rest
1103 * @tr - tracer
1104 * @tsk - task with the latency
1105 * @cpu - the cpu of the buffer to copy.
4fcdae83
SR
1106 *
1107 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
bc0c38d1 1108 */
e309b41d 1109void
bc0c38d1
SR
1110update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1111{
3928a8a2 1112 int ret;
bc0c38d1 1113
2b6080f2 1114 if (tr->stop_count)
b8de7bd1
SR
1115 return;
1116
4c11d7ae 1117 WARN_ON_ONCE(!irqs_disabled());
6c24499f 1118 if (!tr->allocated_snapshot) {
2930e04d 1119 /* Only the nop tracer should hit this when disabling */
9e8529af 1120 WARN_ON_ONCE(tr->current_trace != &nop_trace);
ef710e10 1121 return;
2930e04d 1122 }
ef710e10 1123
0b9b12c1 1124 arch_spin_lock(&tr->max_lock);
bc0c38d1 1125
12883efb 1126 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
3928a8a2 1127
e8165dbb
SR
1128 if (ret == -EBUSY) {
1129 /*
1130 * We failed to swap the buffer due to a commit taking
1131 * place on this CPU. We fail to record, but we reset
1132 * the max trace buffer (no one writes directly to it)
1133 * and flag that it failed.
1134 */
12883efb 1135 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
e8165dbb
SR
1136 "Failed to swap buffers due to commit in progress\n");
1137 }
1138
e8165dbb 1139 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
bc0c38d1
SR
1140
1141 __update_max_tr(tr, tsk, cpu);
0b9b12c1 1142 arch_spin_unlock(&tr->max_lock);
bc0c38d1 1143}
5d4a9dba 1144#endif /* CONFIG_TRACER_MAX_TRACE */
bc0c38d1 1145
e30f53aa 1146static int wait_on_pipe(struct trace_iterator *iter, bool full)
0d5c6e1c 1147{
15693458
SRRH
1148 /* Iterators are static, they should be filled or empty */
1149 if (trace_buffer_iter(iter, iter->cpu_file))
8b8b3683 1150 return 0;
0d5c6e1c 1151
e30f53aa
RV
1152 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1153 full);
0d5c6e1c
SR
1154}
1155
f4e781c0
SRRH
1156#ifdef CONFIG_FTRACE_STARTUP_TEST
1157static int run_tracer_selftest(struct tracer *type)
1158{
1159 struct trace_array *tr = &global_trace;
1160 struct tracer *saved_tracer = tr->current_trace;
1161 int ret;
0d5c6e1c 1162
f4e781c0
SRRH
1163 if (!type->selftest || tracing_selftest_disabled)
1164 return 0;
0d5c6e1c
SR
1165
1166 /*
f4e781c0
SRRH
1167 * Run a selftest on this tracer.
1168 * Here we reset the trace buffer, and set the current
1169 * tracer to be this tracer. The tracer can then run some
1170 * internal tracing to verify that everything is in order.
1171 * If we fail, we do not register this tracer.
0d5c6e1c 1172 */
f4e781c0 1173 tracing_reset_online_cpus(&tr->trace_buffer);
0d5c6e1c 1174
f4e781c0
SRRH
1175 tr->current_trace = type;
1176
1177#ifdef CONFIG_TRACER_MAX_TRACE
1178 if (type->use_max_tr) {
1179 /* If we expanded the buffers, make sure the max is expanded too */
1180 if (ring_buffer_expanded)
1181 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1182 RING_BUFFER_ALL_CPUS);
1183 tr->allocated_snapshot = true;
1184 }
1185#endif
1186
1187 /* the test is responsible for initializing and enabling */
1188 pr_info("Testing tracer %s: ", type->name);
1189 ret = type->selftest(type, tr);
1190 /* the test is responsible for resetting too */
1191 tr->current_trace = saved_tracer;
1192 if (ret) {
1193 printk(KERN_CONT "FAILED!\n");
1194 /* Add the warning after printing 'FAILED' */
1195 WARN_ON(1);
1196 return -1;
1197 }
1198 /* Only reset on passing, to avoid touching corrupted buffers */
1199 tracing_reset_online_cpus(&tr->trace_buffer);
1200
1201#ifdef CONFIG_TRACER_MAX_TRACE
1202 if (type->use_max_tr) {
1203 tr->allocated_snapshot = false;
0d5c6e1c 1204
f4e781c0
SRRH
1205 /* Shrink the max buffer again */
1206 if (ring_buffer_expanded)
1207 ring_buffer_resize(tr->max_buffer.buffer, 1,
1208 RING_BUFFER_ALL_CPUS);
1209 }
1210#endif
1211
1212 printk(KERN_CONT "PASSED\n");
1213 return 0;
1214}
1215#else
1216static inline int run_tracer_selftest(struct tracer *type)
1217{
1218 return 0;
0d5c6e1c 1219}
f4e781c0 1220#endif /* CONFIG_FTRACE_STARTUP_TEST */
0d5c6e1c 1221
41d9c0be
SRRH
1222static void add_tracer_options(struct trace_array *tr, struct tracer *t);
1223
a4d1e688
JW
1224static void __init apply_trace_boot_options(void);
1225
4fcdae83
SR
1226/**
1227 * register_tracer - register a tracer with the ftrace system.
1228 * @type - the plugin for the tracer
1229 *
1230 * Register a new plugin tracer.
1231 */
a4d1e688 1232int __init register_tracer(struct tracer *type)
bc0c38d1
SR
1233{
1234 struct tracer *t;
bc0c38d1
SR
1235 int ret = 0;
1236
1237 if (!type->name) {
1238 pr_info("Tracer must have a name\n");
1239 return -1;
1240 }
1241
24a461d5 1242 if (strlen(type->name) >= MAX_TRACER_SIZE) {
ee6c2c1b
LZ
1243 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1244 return -1;
1245 }
1246
bc0c38d1 1247 mutex_lock(&trace_types_lock);
86fa2f60 1248
8e1b82e0
FW
1249 tracing_selftest_running = true;
1250
bc0c38d1
SR
1251 for (t = trace_types; t; t = t->next) {
1252 if (strcmp(type->name, t->name) == 0) {
1253 /* already found */
ee6c2c1b 1254 pr_info("Tracer %s already registered\n",
bc0c38d1
SR
1255 type->name);
1256 ret = -1;
1257 goto out;
1258 }
1259 }
1260
adf9f195
FW
1261 if (!type->set_flag)
1262 type->set_flag = &dummy_set_flag;
1263 if (!type->flags)
1264 type->flags = &dummy_tracer_flags;
1265 else
1266 if (!type->flags->opts)
1267 type->flags->opts = dummy_tracer_opt;
6eaaa5d5 1268
f4e781c0
SRRH
1269 ret = run_tracer_selftest(type);
1270 if (ret < 0)
1271 goto out;
60a11774 1272
bc0c38d1
SR
1273 type->next = trace_types;
1274 trace_types = type;
41d9c0be 1275 add_tracer_options(&global_trace, type);
60a11774 1276
bc0c38d1 1277 out:
8e1b82e0 1278 tracing_selftest_running = false;
bc0c38d1
SR
1279 mutex_unlock(&trace_types_lock);
1280
dac74940
SR
1281 if (ret || !default_bootup_tracer)
1282 goto out_unlock;
1283
ee6c2c1b 1284 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
dac74940
SR
1285 goto out_unlock;
1286
1287 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1288 /* Do we want this tracer to start on bootup? */
607e2ea1 1289 tracing_set_tracer(&global_trace, type->name);
dac74940 1290 default_bootup_tracer = NULL;
a4d1e688
JW
1291
1292 apply_trace_boot_options();
1293
dac74940 1294 /* disable other selftests, since this will break it. */
55034cd6 1295 tracing_selftest_disabled = true;
b2821ae6 1296#ifdef CONFIG_FTRACE_STARTUP_TEST
dac74940
SR
1297 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1298 type->name);
b2821ae6 1299#endif
b2821ae6 1300
dac74940 1301 out_unlock:
bc0c38d1
SR
1302 return ret;
1303}
1304
12883efb 1305void tracing_reset(struct trace_buffer *buf, int cpu)
f633903a 1306{
12883efb 1307 struct ring_buffer *buffer = buf->buffer;
f633903a 1308
a5416411
HT
1309 if (!buffer)
1310 return;
1311
f633903a
SR
1312 ring_buffer_record_disable(buffer);
1313
1314 /* Make sure all commits have finished */
1315 synchronize_sched();
68179686 1316 ring_buffer_reset_cpu(buffer, cpu);
f633903a
SR
1317
1318 ring_buffer_record_enable(buffer);
1319}
1320
12883efb 1321void tracing_reset_online_cpus(struct trace_buffer *buf)
213cc060 1322{
12883efb 1323 struct ring_buffer *buffer = buf->buffer;
213cc060
PE
1324 int cpu;
1325
a5416411
HT
1326 if (!buffer)
1327 return;
1328
621968cd
SR
1329 ring_buffer_record_disable(buffer);
1330
1331 /* Make sure all commits have finished */
1332 synchronize_sched();
1333
9457158b 1334 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
213cc060
PE
1335
1336 for_each_online_cpu(cpu)
68179686 1337 ring_buffer_reset_cpu(buffer, cpu);
621968cd
SR
1338
1339 ring_buffer_record_enable(buffer);
213cc060
PE
1340}
1341
09d8091c 1342/* Must have trace_types_lock held */
873c642f 1343void tracing_reset_all_online_cpus(void)
9456f0fa 1344{
873c642f
SRRH
1345 struct trace_array *tr;
1346
873c642f 1347 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
12883efb
SRRH
1348 tracing_reset_online_cpus(&tr->trace_buffer);
1349#ifdef CONFIG_TRACER_MAX_TRACE
1350 tracing_reset_online_cpus(&tr->max_buffer);
1351#endif
873c642f 1352 }
9456f0fa
SR
1353}
1354
939c7a4f 1355#define SAVED_CMDLINES_DEFAULT 128
2c7eea4c 1356#define NO_CMDLINE_MAP UINT_MAX
edc35bd7 1357static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
939c7a4f
YY
1358struct saved_cmdlines_buffer {
1359 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1360 unsigned *map_cmdline_to_pid;
1361 unsigned cmdline_num;
1362 int cmdline_idx;
1363 char *saved_cmdlines;
1364};
1365static struct saved_cmdlines_buffer *savedcmd;
25b0b44a 1366
25b0b44a 1367/* temporary disable recording */
4fd27358 1368static atomic_t trace_record_cmdline_disabled __read_mostly;
bc0c38d1 1369
939c7a4f
YY
1370static inline char *get_saved_cmdlines(int idx)
1371{
1372 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1373}
1374
1375static inline void set_cmdline(int idx, const char *cmdline)
bc0c38d1 1376{
939c7a4f
YY
1377 memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1378}
1379
1380static int allocate_cmdlines_buffer(unsigned int val,
1381 struct saved_cmdlines_buffer *s)
1382{
1383 s->map_cmdline_to_pid = kmalloc(val * sizeof(*s->map_cmdline_to_pid),
1384 GFP_KERNEL);
1385 if (!s->map_cmdline_to_pid)
1386 return -ENOMEM;
1387
1388 s->saved_cmdlines = kmalloc(val * TASK_COMM_LEN, GFP_KERNEL);
1389 if (!s->saved_cmdlines) {
1390 kfree(s->map_cmdline_to_pid);
1391 return -ENOMEM;
1392 }
1393
1394 s->cmdline_idx = 0;
1395 s->cmdline_num = val;
1396 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1397 sizeof(s->map_pid_to_cmdline));
1398 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1399 val * sizeof(*s->map_cmdline_to_pid));
1400
1401 return 0;
1402}
1403
1404static int trace_create_savedcmd(void)
1405{
1406 int ret;
1407
a6af8fbf 1408 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
939c7a4f
YY
1409 if (!savedcmd)
1410 return -ENOMEM;
1411
1412 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1413 if (ret < 0) {
1414 kfree(savedcmd);
1415 savedcmd = NULL;
1416 return -ENOMEM;
1417 }
1418
1419 return 0;
bc0c38d1
SR
1420}
1421
b5130b1e
CE
1422int is_tracing_stopped(void)
1423{
2b6080f2 1424 return global_trace.stop_count;
b5130b1e
CE
1425}
1426
0f048701
SR
1427/**
1428 * tracing_start - quick start of the tracer
1429 *
1430 * If tracing is enabled but was stopped by tracing_stop,
1431 * this will start the tracer back up.
1432 */
1433void tracing_start(void)
1434{
1435 struct ring_buffer *buffer;
1436 unsigned long flags;
1437
1438 if (tracing_disabled)
1439 return;
1440
2b6080f2
SR
1441 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1442 if (--global_trace.stop_count) {
1443 if (global_trace.stop_count < 0) {
b06a8301
SR
1444 /* Someone screwed up their debugging */
1445 WARN_ON_ONCE(1);
2b6080f2 1446 global_trace.stop_count = 0;
b06a8301 1447 }
0f048701
SR
1448 goto out;
1449 }
1450
a2f80714 1451 /* Prevent the buffers from switching */
0b9b12c1 1452 arch_spin_lock(&global_trace.max_lock);
0f048701 1453
12883efb 1454 buffer = global_trace.trace_buffer.buffer;
0f048701
SR
1455 if (buffer)
1456 ring_buffer_record_enable(buffer);
1457
12883efb
SRRH
1458#ifdef CONFIG_TRACER_MAX_TRACE
1459 buffer = global_trace.max_buffer.buffer;
0f048701
SR
1460 if (buffer)
1461 ring_buffer_record_enable(buffer);
12883efb 1462#endif
0f048701 1463
0b9b12c1 1464 arch_spin_unlock(&global_trace.max_lock);
a2f80714 1465
0f048701 1466 out:
2b6080f2
SR
1467 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1468}
1469
1470static void tracing_start_tr(struct trace_array *tr)
1471{
1472 struct ring_buffer *buffer;
1473 unsigned long flags;
1474
1475 if (tracing_disabled)
1476 return;
1477
1478 /* If global, we need to also start the max tracer */
1479 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1480 return tracing_start();
1481
1482 raw_spin_lock_irqsave(&tr->start_lock, flags);
1483
1484 if (--tr->stop_count) {
1485 if (tr->stop_count < 0) {
1486 /* Someone screwed up their debugging */
1487 WARN_ON_ONCE(1);
1488 tr->stop_count = 0;
1489 }
1490 goto out;
1491 }
1492
12883efb 1493 buffer = tr->trace_buffer.buffer;
2b6080f2
SR
1494 if (buffer)
1495 ring_buffer_record_enable(buffer);
1496
1497 out:
1498 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
0f048701
SR
1499}
1500
1501/**
1502 * tracing_stop - quick stop of the tracer
1503 *
1504 * Light weight way to stop tracing. Use in conjunction with
1505 * tracing_start.
1506 */
1507void tracing_stop(void)
1508{
1509 struct ring_buffer *buffer;
1510 unsigned long flags;
1511
2b6080f2
SR
1512 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1513 if (global_trace.stop_count++)
0f048701
SR
1514 goto out;
1515
a2f80714 1516 /* Prevent the buffers from switching */
0b9b12c1 1517 arch_spin_lock(&global_trace.max_lock);
a2f80714 1518
12883efb 1519 buffer = global_trace.trace_buffer.buffer;
0f048701
SR
1520 if (buffer)
1521 ring_buffer_record_disable(buffer);
1522
12883efb
SRRH
1523#ifdef CONFIG_TRACER_MAX_TRACE
1524 buffer = global_trace.max_buffer.buffer;
0f048701
SR
1525 if (buffer)
1526 ring_buffer_record_disable(buffer);
12883efb 1527#endif
0f048701 1528
0b9b12c1 1529 arch_spin_unlock(&global_trace.max_lock);
a2f80714 1530
0f048701 1531 out:
2b6080f2
SR
1532 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1533}
1534
1535static void tracing_stop_tr(struct trace_array *tr)
1536{
1537 struct ring_buffer *buffer;
1538 unsigned long flags;
1539
1540 /* If global, we need to also stop the max tracer */
1541 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1542 return tracing_stop();
1543
1544 raw_spin_lock_irqsave(&tr->start_lock, flags);
1545 if (tr->stop_count++)
1546 goto out;
1547
12883efb 1548 buffer = tr->trace_buffer.buffer;
2b6080f2
SR
1549 if (buffer)
1550 ring_buffer_record_disable(buffer);
1551
1552 out:
1553 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
0f048701
SR
1554}
1555
e309b41d 1556void trace_stop_cmdline_recording(void);
bc0c38d1 1557
379cfdac 1558static int trace_save_cmdline(struct task_struct *tsk)
bc0c38d1 1559{
a635cf04 1560 unsigned pid, idx;
bc0c38d1
SR
1561
1562 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
379cfdac 1563 return 0;
bc0c38d1
SR
1564
1565 /*
1566 * It's not the end of the world if we don't get
1567 * the lock, but we also don't want to spin
1568 * nor do we want to disable interrupts,
1569 * so if we miss here, then better luck next time.
1570 */
0199c4e6 1571 if (!arch_spin_trylock(&trace_cmdline_lock))
379cfdac 1572 return 0;
bc0c38d1 1573
939c7a4f 1574 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
2c7eea4c 1575 if (idx == NO_CMDLINE_MAP) {
939c7a4f 1576 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
bc0c38d1 1577
a635cf04
CE
1578 /*
1579 * Check whether the cmdline buffer at idx has a pid
1580 * mapped. We are going to overwrite that entry so we
1581 * need to clear the map_pid_to_cmdline. Otherwise we
1582 * would read the new comm for the old pid.
1583 */
939c7a4f 1584 pid = savedcmd->map_cmdline_to_pid[idx];
a635cf04 1585 if (pid != NO_CMDLINE_MAP)
939c7a4f 1586 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
bc0c38d1 1587
939c7a4f
YY
1588 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1589 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
bc0c38d1 1590
939c7a4f 1591 savedcmd->cmdline_idx = idx;
bc0c38d1
SR
1592 }
1593
939c7a4f 1594 set_cmdline(idx, tsk->comm);
bc0c38d1 1595
0199c4e6 1596 arch_spin_unlock(&trace_cmdline_lock);
379cfdac
SRRH
1597
1598 return 1;
bc0c38d1
SR
1599}
1600
4c27e756 1601static void __trace_find_cmdline(int pid, char comm[])
bc0c38d1 1602{
bc0c38d1
SR
1603 unsigned map;
1604
4ca53085
SR
1605 if (!pid) {
1606 strcpy(comm, "<idle>");
1607 return;
1608 }
bc0c38d1 1609
74bf4076
SR
1610 if (WARN_ON_ONCE(pid < 0)) {
1611 strcpy(comm, "<XXX>");
1612 return;
1613 }
1614
4ca53085
SR
1615 if (pid > PID_MAX_DEFAULT) {
1616 strcpy(comm, "<...>");
1617 return;
1618 }
bc0c38d1 1619
939c7a4f 1620 map = savedcmd->map_pid_to_cmdline[pid];
50d88758 1621 if (map != NO_CMDLINE_MAP)
939c7a4f 1622 strcpy(comm, get_saved_cmdlines(map));
50d88758
TG
1623 else
1624 strcpy(comm, "<...>");
4c27e756
SRRH
1625}
1626
1627void trace_find_cmdline(int pid, char comm[])
1628{
1629 preempt_disable();
1630 arch_spin_lock(&trace_cmdline_lock);
1631
1632 __trace_find_cmdline(pid, comm);
bc0c38d1 1633
0199c4e6 1634 arch_spin_unlock(&trace_cmdline_lock);
5b6045a9 1635 preempt_enable();
bc0c38d1
SR
1636}
1637
e309b41d 1638void tracing_record_cmdline(struct task_struct *tsk)
bc0c38d1 1639{
0fb9656d 1640 if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
bc0c38d1
SR
1641 return;
1642
7ffbd48d
SR
1643 if (!__this_cpu_read(trace_cmdline_save))
1644 return;
1645
379cfdac
SRRH
1646 if (trace_save_cmdline(tsk))
1647 __this_cpu_write(trace_cmdline_save, false);
bc0c38d1
SR
1648}
1649
45dcd8b8 1650void
38697053
SR
1651tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1652 int pc)
bc0c38d1
SR
1653{
1654 struct task_struct *tsk = current;
bc0c38d1 1655
777e208d
SR
1656 entry->preempt_count = pc & 0xff;
1657 entry->pid = (tsk) ? tsk->pid : 0;
1658 entry->flags =
9244489a 1659#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
2e2ca155 1660 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
9244489a
SR
1661#else
1662 TRACE_FLAG_IRQS_NOSUPPORT |
1663#endif
bc0c38d1
SR
1664 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1665 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
e5137b50
PZ
1666 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
1667 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
bc0c38d1 1668}
f413cdb8 1669EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
bc0c38d1 1670
e77405ad
SR
1671struct ring_buffer_event *
1672trace_buffer_lock_reserve(struct ring_buffer *buffer,
1673 int type,
1674 unsigned long len,
1675 unsigned long flags, int pc)
51a763dd
ACM
1676{
1677 struct ring_buffer_event *event;
1678
e77405ad 1679 event = ring_buffer_lock_reserve(buffer, len);
51a763dd
ACM
1680 if (event != NULL) {
1681 struct trace_entry *ent = ring_buffer_event_data(event);
1682
1683 tracing_generic_entry_update(ent, flags, pc);
1684 ent->type = type;
1685 }
1686
1687 return event;
1688}
51a763dd 1689
7ffbd48d
SR
1690void
1691__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
1692{
1693 __this_cpu_write(trace_cmdline_save, true);
1694 ring_buffer_unlock_commit(buffer, event);
1695}
1696
b7f0c959
SRRH
1697void trace_buffer_unlock_commit(struct trace_array *tr,
1698 struct ring_buffer *buffer,
1699 struct ring_buffer_event *event,
1700 unsigned long flags, int pc)
51a763dd 1701{
7ffbd48d 1702 __buffer_unlock_commit(buffer, event);
51a763dd 1703
2d34f489 1704 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
e77405ad 1705 ftrace_trace_userstack(buffer, flags, pc);
07edf712 1706}
0d5c6e1c 1707EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
51a763dd 1708
2c4a33ab
SRRH
1709static struct ring_buffer *temp_buffer;
1710
ccb469a1
SR
1711struct ring_buffer_event *
1712trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
7f1d2f82 1713 struct trace_event_file *trace_file,
ccb469a1
SR
1714 int type, unsigned long len,
1715 unsigned long flags, int pc)
1716{
2c4a33ab
SRRH
1717 struct ring_buffer_event *entry;
1718
7f1d2f82 1719 *current_rb = trace_file->tr->trace_buffer.buffer;
2c4a33ab 1720 entry = trace_buffer_lock_reserve(*current_rb,
ccb469a1 1721 type, len, flags, pc);
2c4a33ab
SRRH
1722 /*
1723 * If tracing is off, but we have triggers enabled
1724 * we still need to look at the event data. Use the temp_buffer
1725 * to store the trace event for the tigger to use. It's recusive
1726 * safe and will not be recorded anywhere.
1727 */
5d6ad960 1728 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
2c4a33ab
SRRH
1729 *current_rb = temp_buffer;
1730 entry = trace_buffer_lock_reserve(*current_rb,
1731 type, len, flags, pc);
1732 }
1733 return entry;
ccb469a1
SR
1734}
1735EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
1736
ef5580d0 1737struct ring_buffer_event *
e77405ad
SR
1738trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1739 int type, unsigned long len,
ef5580d0
SR
1740 unsigned long flags, int pc)
1741{
12883efb 1742 *current_rb = global_trace.trace_buffer.buffer;
e77405ad 1743 return trace_buffer_lock_reserve(*current_rb,
ef5580d0
SR
1744 type, len, flags, pc);
1745}
94487d6d 1746EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
ef5580d0 1747
b7f0c959
SRRH
1748void trace_buffer_unlock_commit_regs(struct trace_array *tr,
1749 struct ring_buffer *buffer,
0d5c6e1c
SR
1750 struct ring_buffer_event *event,
1751 unsigned long flags, int pc,
1752 struct pt_regs *regs)
1fd8df2c 1753{
7ffbd48d 1754 __buffer_unlock_commit(buffer, event);
1fd8df2c 1755
2d34f489 1756 ftrace_trace_stack(tr, buffer, flags, 6, pc, regs);
1fd8df2c
MH
1757 ftrace_trace_userstack(buffer, flags, pc);
1758}
0d5c6e1c 1759EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
1fd8df2c 1760
e77405ad
SR
1761void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1762 struct ring_buffer_event *event)
77d9f465 1763{
e77405ad 1764 ring_buffer_discard_commit(buffer, event);
ef5580d0 1765}
12acd473 1766EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
ef5580d0 1767
e309b41d 1768void
7be42151 1769trace_function(struct trace_array *tr,
38697053
SR
1770 unsigned long ip, unsigned long parent_ip, unsigned long flags,
1771 int pc)
bc0c38d1 1772{
2425bcb9 1773 struct trace_event_call *call = &event_function;
12883efb 1774 struct ring_buffer *buffer = tr->trace_buffer.buffer;
3928a8a2 1775 struct ring_buffer_event *event;
777e208d 1776 struct ftrace_entry *entry;
bc0c38d1 1777
d769041f 1778 /* If we are reading the ring buffer, don't trace */
dd17c8f7 1779 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
d769041f
SR
1780 return;
1781
e77405ad 1782 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
51a763dd 1783 flags, pc);
3928a8a2
SR
1784 if (!event)
1785 return;
1786 entry = ring_buffer_event_data(event);
777e208d
SR
1787 entry->ip = ip;
1788 entry->parent_ip = parent_ip;
e1112b4d 1789
f306cc82 1790 if (!call_filter_check_discard(call, entry, buffer, event))
7ffbd48d 1791 __buffer_unlock_commit(buffer, event);
bc0c38d1
SR
1792}
1793
c0a0d0d3 1794#ifdef CONFIG_STACKTRACE
4a9bd3f1
SR
1795
1796#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1797struct ftrace_stack {
1798 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
1799};
1800
1801static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
1802static DEFINE_PER_CPU(int, ftrace_stack_reserve);
1803
e77405ad 1804static void __ftrace_trace_stack(struct ring_buffer *buffer,
53614991 1805 unsigned long flags,
1fd8df2c 1806 int skip, int pc, struct pt_regs *regs)
86387f7e 1807{
2425bcb9 1808 struct trace_event_call *call = &event_kernel_stack;
3928a8a2 1809 struct ring_buffer_event *event;
777e208d 1810 struct stack_entry *entry;
86387f7e 1811 struct stack_trace trace;
4a9bd3f1
SR
1812 int use_stack;
1813 int size = FTRACE_STACK_ENTRIES;
1814
1815 trace.nr_entries = 0;
1816 trace.skip = skip;
1817
1818 /*
1819 * Since events can happen in NMIs there's no safe way to
1820 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1821 * or NMI comes in, it will just have to use the default
1822 * FTRACE_STACK_SIZE.
1823 */
1824 preempt_disable_notrace();
1825
82146529 1826 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
4a9bd3f1
SR
1827 /*
1828 * We don't need any atomic variables, just a barrier.
1829 * If an interrupt comes in, we don't care, because it would
1830 * have exited and put the counter back to what we want.
1831 * We just need a barrier to keep gcc from moving things
1832 * around.
1833 */
1834 barrier();
1835 if (use_stack == 1) {
bdffd893 1836 trace.entries = this_cpu_ptr(ftrace_stack.calls);
4a9bd3f1
SR
1837 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
1838
1839 if (regs)
1840 save_stack_trace_regs(regs, &trace);
1841 else
1842 save_stack_trace(&trace);
1843
1844 if (trace.nr_entries > size)
1845 size = trace.nr_entries;
1846 } else
1847 /* From now on, use_stack is a boolean */
1848 use_stack = 0;
1849
1850 size *= sizeof(unsigned long);
86387f7e 1851
e77405ad 1852 event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
4a9bd3f1 1853 sizeof(*entry) + size, flags, pc);
3928a8a2 1854 if (!event)
4a9bd3f1
SR
1855 goto out;
1856 entry = ring_buffer_event_data(event);
86387f7e 1857
4a9bd3f1
SR
1858 memset(&entry->caller, 0, size);
1859
1860 if (use_stack)
1861 memcpy(&entry->caller, trace.entries,
1862 trace.nr_entries * sizeof(unsigned long));
1863 else {
1864 trace.max_entries = FTRACE_STACK_ENTRIES;
1865 trace.entries = entry->caller;
1866 if (regs)
1867 save_stack_trace_regs(regs, &trace);
1868 else
1869 save_stack_trace(&trace);
1870 }
1871
1872 entry->size = trace.nr_entries;
86387f7e 1873
f306cc82 1874 if (!call_filter_check_discard(call, entry, buffer, event))
7ffbd48d 1875 __buffer_unlock_commit(buffer, event);
4a9bd3f1
SR
1876
1877 out:
1878 /* Again, don't let gcc optimize things here */
1879 barrier();
82146529 1880 __this_cpu_dec(ftrace_stack_reserve);
4a9bd3f1
SR
1881 preempt_enable_notrace();
1882
f0a920d5
IM
1883}
1884
2d34f489
SRRH
1885static inline void ftrace_trace_stack(struct trace_array *tr,
1886 struct ring_buffer *buffer,
73dddbb5
SRRH
1887 unsigned long flags,
1888 int skip, int pc, struct pt_regs *regs)
53614991 1889{
2d34f489 1890 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
53614991
SR
1891 return;
1892
73dddbb5 1893 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
53614991
SR
1894}
1895
c0a0d0d3
FW
1896void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1897 int pc)
38697053 1898{
12883efb 1899 __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
38697053
SR
1900}
1901
03889384
SR
1902/**
1903 * trace_dump_stack - record a stack back trace in the trace buffer
c142be8e 1904 * @skip: Number of functions to skip (helper handlers)
03889384 1905 */
c142be8e 1906void trace_dump_stack(int skip)
03889384
SR
1907{
1908 unsigned long flags;
1909
1910 if (tracing_disabled || tracing_selftest_running)
e36c5458 1911 return;
03889384
SR
1912
1913 local_save_flags(flags);
1914
c142be8e
SRRH
1915 /*
1916 * Skip 3 more, seems to get us at the caller of
1917 * this function.
1918 */
1919 skip += 3;
1920 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
1921 flags, skip, preempt_count(), NULL);
03889384
SR
1922}
1923
91e86e56
SR
1924static DEFINE_PER_CPU(int, user_stack_count);
1925
e77405ad
SR
1926void
1927ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
02b67518 1928{
2425bcb9 1929 struct trace_event_call *call = &event_user_stack;
8d7c6a96 1930 struct ring_buffer_event *event;
02b67518
TE
1931 struct userstack_entry *entry;
1932 struct stack_trace trace;
02b67518 1933
983f938a 1934 if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
02b67518
TE
1935 return;
1936
b6345879
SR
1937 /*
1938 * NMIs can not handle page faults, even with fix ups.
1939 * The save user stack can (and often does) fault.
1940 */
1941 if (unlikely(in_nmi()))
1942 return;
02b67518 1943
91e86e56
SR
1944 /*
1945 * prevent recursion, since the user stack tracing may
1946 * trigger other kernel events.
1947 */
1948 preempt_disable();
1949 if (__this_cpu_read(user_stack_count))
1950 goto out;
1951
1952 __this_cpu_inc(user_stack_count);
1953
e77405ad 1954 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
51a763dd 1955 sizeof(*entry), flags, pc);
02b67518 1956 if (!event)
1dbd1951 1957 goto out_drop_count;
02b67518 1958 entry = ring_buffer_event_data(event);
02b67518 1959
48659d31 1960 entry->tgid = current->tgid;
02b67518
TE
1961 memset(&entry->caller, 0, sizeof(entry->caller));
1962
1963 trace.nr_entries = 0;
1964 trace.max_entries = FTRACE_STACK_ENTRIES;
1965 trace.skip = 0;
1966 trace.entries = entry->caller;
1967
1968 save_stack_trace_user(&trace);
f306cc82 1969 if (!call_filter_check_discard(call, entry, buffer, event))
7ffbd48d 1970 __buffer_unlock_commit(buffer, event);
91e86e56 1971
1dbd1951 1972 out_drop_count:
91e86e56 1973 __this_cpu_dec(user_stack_count);
91e86e56
SR
1974 out:
1975 preempt_enable();
02b67518
TE
1976}
1977
4fd27358
HE
1978#ifdef UNUSED
1979static void __trace_userstack(struct trace_array *tr, unsigned long flags)
02b67518 1980{
7be42151 1981 ftrace_trace_userstack(tr, flags, preempt_count());
02b67518 1982}
4fd27358 1983#endif /* UNUSED */
02b67518 1984
c0a0d0d3
FW
1985#endif /* CONFIG_STACKTRACE */
1986
07d777fe
SR
1987/* created for use with alloc_percpu */
1988struct trace_buffer_struct {
1989 char buffer[TRACE_BUF_SIZE];
1990};
1991
1992static struct trace_buffer_struct *trace_percpu_buffer;
1993static struct trace_buffer_struct *trace_percpu_sirq_buffer;
1994static struct trace_buffer_struct *trace_percpu_irq_buffer;
1995static struct trace_buffer_struct *trace_percpu_nmi_buffer;
1996
1997/*
1998 * The buffer used is dependent on the context. There is a per cpu
1999 * buffer for normal context, softirq contex, hard irq context and
2000 * for NMI context. Thise allows for lockless recording.
2001 *
2002 * Note, if the buffers failed to be allocated, then this returns NULL
2003 */
2004static char *get_trace_buf(void)
2005{
2006 struct trace_buffer_struct *percpu_buffer;
07d777fe
SR
2007
2008 /*
2009 * If we have allocated per cpu buffers, then we do not
2010 * need to do any locking.
2011 */
2012 if (in_nmi())
2013 percpu_buffer = trace_percpu_nmi_buffer;
2014 else if (in_irq())
2015 percpu_buffer = trace_percpu_irq_buffer;
2016 else if (in_softirq())
2017 percpu_buffer = trace_percpu_sirq_buffer;
2018 else
2019 percpu_buffer = trace_percpu_buffer;
2020
2021 if (!percpu_buffer)
2022 return NULL;
2023
d8a0349c 2024 return this_cpu_ptr(&percpu_buffer->buffer[0]);
07d777fe
SR
2025}
2026
2027static int alloc_percpu_trace_buffer(void)
2028{
2029 struct trace_buffer_struct *buffers;
2030 struct trace_buffer_struct *sirq_buffers;
2031 struct trace_buffer_struct *irq_buffers;
2032 struct trace_buffer_struct *nmi_buffers;
2033
2034 buffers = alloc_percpu(struct trace_buffer_struct);
2035 if (!buffers)
2036 goto err_warn;
2037
2038 sirq_buffers = alloc_percpu(struct trace_buffer_struct);
2039 if (!sirq_buffers)
2040 goto err_sirq;
2041
2042 irq_buffers = alloc_percpu(struct trace_buffer_struct);
2043 if (!irq_buffers)
2044 goto err_irq;
2045
2046 nmi_buffers = alloc_percpu(struct trace_buffer_struct);
2047 if (!nmi_buffers)
2048 goto err_nmi;
2049
2050 trace_percpu_buffer = buffers;
2051 trace_percpu_sirq_buffer = sirq_buffers;
2052 trace_percpu_irq_buffer = irq_buffers;
2053 trace_percpu_nmi_buffer = nmi_buffers;
2054
2055 return 0;
2056
2057 err_nmi:
2058 free_percpu(irq_buffers);
2059 err_irq:
2060 free_percpu(sirq_buffers);
2061 err_sirq:
2062 free_percpu(buffers);
2063 err_warn:
2064 WARN(1, "Could not allocate percpu trace_printk buffer");
2065 return -ENOMEM;
2066}
2067
81698831
SR
2068static int buffers_allocated;
2069
07d777fe
SR
2070void trace_printk_init_buffers(void)
2071{
07d777fe
SR
2072 if (buffers_allocated)
2073 return;
2074
2075 if (alloc_percpu_trace_buffer())
2076 return;
2077
2184db46
SR
2078 /* trace_printk() is for debug use only. Don't use it in production. */
2079
69a1c994
BP
2080 pr_warning("\n");
2081 pr_warning("**********************************************************\n");
2184db46
SR
2082 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2083 pr_warning("** **\n");
2084 pr_warning("** trace_printk() being used. Allocating extra memory. **\n");
2085 pr_warning("** **\n");
2086 pr_warning("** This means that this is a DEBUG kernel and it is **\n");
eff264ef 2087 pr_warning("** unsafe for production use. **\n");
2184db46
SR
2088 pr_warning("** **\n");
2089 pr_warning("** If you see this message and you are not debugging **\n");
2090 pr_warning("** the kernel, report this immediately to your vendor! **\n");
2091 pr_warning("** **\n");
2092 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2093 pr_warning("**********************************************************\n");
07d777fe 2094
b382ede6
SR
2095 /* Expand the buffers to set size */
2096 tracing_update_buffers();
2097
07d777fe 2098 buffers_allocated = 1;
81698831
SR
2099
2100 /*
2101 * trace_printk_init_buffers() can be called by modules.
2102 * If that happens, then we need to start cmdline recording
2103 * directly here. If the global_trace.buffer is already
2104 * allocated here, then this was called by module code.
2105 */
12883efb 2106 if (global_trace.trace_buffer.buffer)
81698831
SR
2107 tracing_start_cmdline_record();
2108}
2109
2110void trace_printk_start_comm(void)
2111{
2112 /* Start tracing comms if trace printk is set */
2113 if (!buffers_allocated)
2114 return;
2115 tracing_start_cmdline_record();
2116}
2117
2118static void trace_printk_start_stop_comm(int enabled)
2119{
2120 if (!buffers_allocated)
2121 return;
2122
2123 if (enabled)
2124 tracing_start_cmdline_record();
2125 else
2126 tracing_stop_cmdline_record();
07d777fe
SR
2127}
2128
769b0441 2129/**
48ead020 2130 * trace_vbprintk - write binary msg to tracing buffer
769b0441
FW
2131 *
2132 */
40ce74f1 2133int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
769b0441 2134{
2425bcb9 2135 struct trace_event_call *call = &event_bprint;
769b0441 2136 struct ring_buffer_event *event;
e77405ad 2137 struct ring_buffer *buffer;
769b0441 2138 struct trace_array *tr = &global_trace;
48ead020 2139 struct bprint_entry *entry;
769b0441 2140 unsigned long flags;
07d777fe
SR
2141 char *tbuffer;
2142 int len = 0, size, pc;
769b0441
FW
2143
2144 if (unlikely(tracing_selftest_running || tracing_disabled))
2145 return 0;
2146
2147 /* Don't pollute graph traces with trace_vprintk internals */
2148 pause_graph_tracing();
2149
2150 pc = preempt_count();
5168ae50 2151 preempt_disable_notrace();
769b0441 2152
07d777fe
SR
2153 tbuffer = get_trace_buf();
2154 if (!tbuffer) {
2155 len = 0;
769b0441 2156 goto out;
07d777fe 2157 }
769b0441 2158
07d777fe 2159 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
769b0441 2160
07d777fe
SR
2161 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
2162 goto out;
769b0441 2163
07d777fe 2164 local_save_flags(flags);
769b0441 2165 size = sizeof(*entry) + sizeof(u32) * len;
12883efb 2166 buffer = tr->trace_buffer.buffer;
e77405ad
SR
2167 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2168 flags, pc);
769b0441 2169 if (!event)
07d777fe 2170 goto out;
769b0441
FW
2171 entry = ring_buffer_event_data(event);
2172 entry->ip = ip;
769b0441
FW
2173 entry->fmt = fmt;
2174
07d777fe 2175 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
f306cc82 2176 if (!call_filter_check_discard(call, entry, buffer, event)) {
7ffbd48d 2177 __buffer_unlock_commit(buffer, event);
2d34f489 2178 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
d931369b 2179 }
769b0441 2180
769b0441 2181out:
5168ae50 2182 preempt_enable_notrace();
769b0441
FW
2183 unpause_graph_tracing();
2184
2185 return len;
2186}
48ead020
FW
2187EXPORT_SYMBOL_GPL(trace_vbprintk);
2188
12883efb
SRRH
2189static int
2190__trace_array_vprintk(struct ring_buffer *buffer,
2191 unsigned long ip, const char *fmt, va_list args)
48ead020 2192{
2425bcb9 2193 struct trace_event_call *call = &event_print;
48ead020 2194 struct ring_buffer_event *event;
07d777fe 2195 int len = 0, size, pc;
48ead020 2196 struct print_entry *entry;
07d777fe
SR
2197 unsigned long flags;
2198 char *tbuffer;
48ead020
FW
2199
2200 if (tracing_disabled || tracing_selftest_running)
2201 return 0;
2202
07d777fe
SR
2203 /* Don't pollute graph traces with trace_vprintk internals */
2204 pause_graph_tracing();
2205
48ead020
FW
2206 pc = preempt_count();
2207 preempt_disable_notrace();
48ead020 2208
07d777fe
SR
2209
2210 tbuffer = get_trace_buf();
2211 if (!tbuffer) {
2212 len = 0;
48ead020 2213 goto out;
07d777fe 2214 }
48ead020 2215
3558a5ac 2216 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
48ead020 2217
07d777fe 2218 local_save_flags(flags);
48ead020 2219 size = sizeof(*entry) + len + 1;
e77405ad 2220 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
07d777fe 2221 flags, pc);
48ead020 2222 if (!event)
07d777fe 2223 goto out;
48ead020 2224 entry = ring_buffer_event_data(event);
c13d2f7c 2225 entry->ip = ip;
48ead020 2226
3558a5ac 2227 memcpy(&entry->buf, tbuffer, len + 1);
f306cc82 2228 if (!call_filter_check_discard(call, entry, buffer, event)) {
7ffbd48d 2229 __buffer_unlock_commit(buffer, event);
2d34f489 2230 ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
d931369b 2231 }
48ead020
FW
2232 out:
2233 preempt_enable_notrace();
07d777fe 2234 unpause_graph_tracing();
48ead020
FW
2235
2236 return len;
2237}
659372d3 2238
12883efb
SRRH
2239int trace_array_vprintk(struct trace_array *tr,
2240 unsigned long ip, const char *fmt, va_list args)
2241{
2242 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2243}
2244
2245int trace_array_printk(struct trace_array *tr,
2246 unsigned long ip, const char *fmt, ...)
2247{
2248 int ret;
2249 va_list ap;
2250
983f938a 2251 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
12883efb
SRRH
2252 return 0;
2253
2254 va_start(ap, fmt);
2255 ret = trace_array_vprintk(tr, ip, fmt, ap);
2256 va_end(ap);
2257 return ret;
2258}
2259
2260int trace_array_printk_buf(struct ring_buffer *buffer,
2261 unsigned long ip, const char *fmt, ...)
2262{
2263 int ret;
2264 va_list ap;
2265
983f938a 2266 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
12883efb
SRRH
2267 return 0;
2268
2269 va_start(ap, fmt);
2270 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2271 va_end(ap);
2272 return ret;
2273}
2274
659372d3
SR
2275int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2276{
a813a159 2277 return trace_array_vprintk(&global_trace, ip, fmt, args);
659372d3 2278}
769b0441
FW
2279EXPORT_SYMBOL_GPL(trace_vprintk);
2280
e2ac8ef5 2281static void trace_iterator_increment(struct trace_iterator *iter)
5a90f577 2282{
6d158a81
SR
2283 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2284
5a90f577 2285 iter->idx++;
6d158a81
SR
2286 if (buf_iter)
2287 ring_buffer_read(buf_iter, NULL);
5a90f577
SR
2288}
2289
e309b41d 2290static struct trace_entry *
bc21b478
SR
2291peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2292 unsigned long *lost_events)
dd0e545f 2293{
3928a8a2 2294 struct ring_buffer_event *event;
6d158a81 2295 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
dd0e545f 2296
d769041f
SR
2297 if (buf_iter)
2298 event = ring_buffer_iter_peek(buf_iter, ts);
2299 else
12883efb 2300 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
bc21b478 2301 lost_events);
d769041f 2302
4a9bd3f1
SR
2303 if (event) {
2304 iter->ent_size = ring_buffer_event_length(event);
2305 return ring_buffer_event_data(event);
2306 }
2307 iter->ent_size = 0;
2308 return NULL;
dd0e545f 2309}
d769041f 2310
dd0e545f 2311static struct trace_entry *
bc21b478
SR
2312__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2313 unsigned long *missing_events, u64 *ent_ts)
bc0c38d1 2314{
12883efb 2315 struct ring_buffer *buffer = iter->trace_buffer->buffer;
bc0c38d1 2316 struct trace_entry *ent, *next = NULL;
aa27497c 2317 unsigned long lost_events = 0, next_lost = 0;
b04cc6b1 2318 int cpu_file = iter->cpu_file;
3928a8a2 2319 u64 next_ts = 0, ts;
bc0c38d1 2320 int next_cpu = -1;
12b5da34 2321 int next_size = 0;
bc0c38d1
SR
2322 int cpu;
2323
b04cc6b1
FW
2324 /*
2325 * If we are in a per_cpu trace file, don't bother by iterating over
2326 * all cpu and peek directly.
2327 */
ae3b5093 2328 if (cpu_file > RING_BUFFER_ALL_CPUS) {
b04cc6b1
FW
2329 if (ring_buffer_empty_cpu(buffer, cpu_file))
2330 return NULL;
bc21b478 2331 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
b04cc6b1
FW
2332 if (ent_cpu)
2333 *ent_cpu = cpu_file;
2334
2335 return ent;
2336 }
2337
ab46428c 2338 for_each_tracing_cpu(cpu) {
dd0e545f 2339
3928a8a2
SR
2340 if (ring_buffer_empty_cpu(buffer, cpu))
2341 continue;
dd0e545f 2342
bc21b478 2343 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
dd0e545f 2344
cdd31cd2
IM
2345 /*
2346 * Pick the entry with the smallest timestamp:
2347 */
3928a8a2 2348 if (ent && (!next || ts < next_ts)) {
bc0c38d1
SR
2349 next = ent;
2350 next_cpu = cpu;
3928a8a2 2351 next_ts = ts;
bc21b478 2352 next_lost = lost_events;
12b5da34 2353 next_size = iter->ent_size;
bc0c38d1
SR
2354 }
2355 }
2356
12b5da34
SR
2357 iter->ent_size = next_size;
2358
bc0c38d1
SR
2359 if (ent_cpu)
2360 *ent_cpu = next_cpu;
2361
3928a8a2
SR
2362 if (ent_ts)
2363 *ent_ts = next_ts;
2364
bc21b478
SR
2365 if (missing_events)
2366 *missing_events = next_lost;
2367
bc0c38d1
SR
2368 return next;
2369}
2370
dd0e545f 2371/* Find the next real entry, without updating the iterator itself */
c4a8e8be
FW
2372struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
2373 int *ent_cpu, u64 *ent_ts)
bc0c38d1 2374{
bc21b478 2375 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
dd0e545f
SR
2376}
2377
2378/* Find the next real entry, and increment the iterator to the next entry */
955b61e5 2379void *trace_find_next_entry_inc(struct trace_iterator *iter)
dd0e545f 2380{
bc21b478
SR
2381 iter->ent = __find_next_entry(iter, &iter->cpu,
2382 &iter->lost_events, &iter->ts);
dd0e545f 2383
3928a8a2 2384 if (iter->ent)
e2ac8ef5 2385 trace_iterator_increment(iter);
dd0e545f 2386
3928a8a2 2387 return iter->ent ? iter : NULL;
b3806b43 2388}
bc0c38d1 2389
e309b41d 2390static void trace_consume(struct trace_iterator *iter)
b3806b43 2391{
12883efb 2392 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
bc21b478 2393 &iter->lost_events);
bc0c38d1
SR
2394}
2395
e309b41d 2396static void *s_next(struct seq_file *m, void *v, loff_t *pos)
bc0c38d1
SR
2397{
2398 struct trace_iterator *iter = m->private;
bc0c38d1 2399 int i = (int)*pos;
4e3c3333 2400 void *ent;
bc0c38d1 2401
a63ce5b3
SR
2402 WARN_ON_ONCE(iter->leftover);
2403
bc0c38d1
SR
2404 (*pos)++;
2405
2406 /* can't go backwards */
2407 if (iter->idx > i)
2408 return NULL;
2409
2410 if (iter->idx < 0)
955b61e5 2411 ent = trace_find_next_entry_inc(iter);
bc0c38d1
SR
2412 else
2413 ent = iter;
2414
2415 while (ent && iter->idx < i)
955b61e5 2416 ent = trace_find_next_entry_inc(iter);
bc0c38d1
SR
2417
2418 iter->pos = *pos;
2419
bc0c38d1
SR
2420 return ent;
2421}
2422
955b61e5 2423void tracing_iter_reset(struct trace_iterator *iter, int cpu)
2f26ebd5 2424{
2f26ebd5
SR
2425 struct ring_buffer_event *event;
2426 struct ring_buffer_iter *buf_iter;
2427 unsigned long entries = 0;
2428 u64 ts;
2429
12883efb 2430 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
2f26ebd5 2431
6d158a81
SR
2432 buf_iter = trace_buffer_iter(iter, cpu);
2433 if (!buf_iter)
2f26ebd5
SR
2434 return;
2435
2f26ebd5
SR
2436 ring_buffer_iter_reset(buf_iter);
2437
2438 /*
2439 * We could have the case with the max latency tracers
2440 * that a reset never took place on a cpu. This is evident
2441 * by the timestamp being before the start of the buffer.
2442 */
2443 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
12883efb 2444 if (ts >= iter->trace_buffer->time_start)
2f26ebd5
SR
2445 break;
2446 entries++;
2447 ring_buffer_read(buf_iter, NULL);
2448 }
2449
12883efb 2450 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
2f26ebd5
SR
2451}
2452
d7350c3f 2453/*
d7350c3f
FW
2454 * The current tracer is copied to avoid a global locking
2455 * all around.
2456 */
bc0c38d1
SR
2457static void *s_start(struct seq_file *m, loff_t *pos)
2458{
2459 struct trace_iterator *iter = m->private;
2b6080f2 2460 struct trace_array *tr = iter->tr;
b04cc6b1 2461 int cpu_file = iter->cpu_file;
bc0c38d1
SR
2462 void *p = NULL;
2463 loff_t l = 0;
3928a8a2 2464 int cpu;
bc0c38d1 2465
2fd196ec
HT
2466 /*
2467 * copy the tracer to avoid using a global lock all around.
2468 * iter->trace is a copy of current_trace, the pointer to the
2469 * name may be used instead of a strcmp(), as iter->trace->name
2470 * will point to the same string as current_trace->name.
2471 */
bc0c38d1 2472 mutex_lock(&trace_types_lock);
2b6080f2
SR
2473 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
2474 *iter->trace = *tr->current_trace;
d7350c3f 2475 mutex_unlock(&trace_types_lock);
bc0c38d1 2476
12883efb 2477#ifdef CONFIG_TRACER_MAX_TRACE
debdd57f
HT
2478 if (iter->snapshot && iter->trace->use_max_tr)
2479 return ERR_PTR(-EBUSY);
12883efb 2480#endif
debdd57f
HT
2481
2482 if (!iter->snapshot)
2483 atomic_inc(&trace_record_cmdline_disabled);
bc0c38d1 2484
bc0c38d1
SR
2485 if (*pos != iter->pos) {
2486 iter->ent = NULL;
2487 iter->cpu = 0;
2488 iter->idx = -1;
2489
ae3b5093 2490 if (cpu_file == RING_BUFFER_ALL_CPUS) {
b04cc6b1 2491 for_each_tracing_cpu(cpu)
2f26ebd5 2492 tracing_iter_reset(iter, cpu);
b04cc6b1 2493 } else
2f26ebd5 2494 tracing_iter_reset(iter, cpu_file);
bc0c38d1 2495
ac91d854 2496 iter->leftover = 0;
bc0c38d1
SR
2497 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
2498 ;
2499
2500 } else {
a63ce5b3
SR
2501 /*
2502 * If we overflowed the seq_file before, then we want
2503 * to just reuse the trace_seq buffer again.
2504 */
2505 if (iter->leftover)
2506 p = iter;
2507 else {
2508 l = *pos - 1;
2509 p = s_next(m, p, &l);
2510 }
bc0c38d1
SR
2511 }
2512
4f535968 2513 trace_event_read_lock();
7e53bd42 2514 trace_access_lock(cpu_file);
bc0c38d1
SR
2515 return p;
2516}
2517
2518static void s_stop(struct seq_file *m, void *p)
2519{
7e53bd42
LJ
2520 struct trace_iterator *iter = m->private;
2521
12883efb 2522#ifdef CONFIG_TRACER_MAX_TRACE
debdd57f
HT
2523 if (iter->snapshot && iter->trace->use_max_tr)
2524 return;
12883efb 2525#endif
debdd57f
HT
2526
2527 if (!iter->snapshot)
2528 atomic_dec(&trace_record_cmdline_disabled);
12883efb 2529
7e53bd42 2530 trace_access_unlock(iter->cpu_file);
4f535968 2531 trace_event_read_unlock();
bc0c38d1
SR
2532}
2533
39eaf7ef 2534static void
12883efb
SRRH
2535get_total_entries(struct trace_buffer *buf,
2536 unsigned long *total, unsigned long *entries)
39eaf7ef
SR
2537{
2538 unsigned long count;
2539 int cpu;
2540
2541 *total = 0;
2542 *entries = 0;
2543
2544 for_each_tracing_cpu(cpu) {
12883efb 2545 count = ring_buffer_entries_cpu(buf->buffer, cpu);
39eaf7ef
SR
2546 /*
2547 * If this buffer has skipped entries, then we hold all
2548 * entries for the trace and we need to ignore the
2549 * ones before the time stamp.
2550 */
12883efb
SRRH
2551 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
2552 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
39eaf7ef
SR
2553 /* total is the same as the entries */
2554 *total += count;
2555 } else
2556 *total += count +
12883efb 2557 ring_buffer_overrun_cpu(buf->buffer, cpu);
39eaf7ef
SR
2558 *entries += count;
2559 }
2560}
2561
e309b41d 2562static void print_lat_help_header(struct seq_file *m)
bc0c38d1 2563{
d79ac28f
RV
2564 seq_puts(m, "# _------=> CPU# \n"
2565 "# / _-----=> irqs-off \n"
2566 "# | / _----=> need-resched \n"
2567 "# || / _---=> hardirq/softirq \n"
2568 "# ||| / _--=> preempt-depth \n"
2569 "# |||| / delay \n"
2570 "# cmd pid ||||| time | caller \n"
2571 "# \\ / ||||| \\ | / \n");
bc0c38d1
SR
2572}
2573
12883efb 2574static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
bc0c38d1 2575{
39eaf7ef
SR
2576 unsigned long total;
2577 unsigned long entries;
2578
12883efb 2579 get_total_entries(buf, &total, &entries);
39eaf7ef
SR
2580 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
2581 entries, total, num_online_cpus());
2582 seq_puts(m, "#\n");
2583}
2584
12883efb 2585static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
39eaf7ef 2586{
12883efb 2587 print_event_info(buf, m);
d79ac28f
RV
2588 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"
2589 "# | | | | |\n");
bc0c38d1
SR
2590}
2591
12883efb 2592static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
77271ce4 2593{
12883efb 2594 print_event_info(buf, m);
d79ac28f
RV
2595 seq_puts(m, "# _-----=> irqs-off\n"
2596 "# / _----=> need-resched\n"
2597 "# | / _---=> hardirq/softirq\n"
2598 "# || / _--=> preempt-depth\n"
2599 "# ||| / delay\n"
2600 "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n"
2601 "# | | | |||| | |\n");
77271ce4 2602}
bc0c38d1 2603
62b915f1 2604void
bc0c38d1
SR
2605print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2606{
983f938a 2607 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
12883efb
SRRH
2608 struct trace_buffer *buf = iter->trace_buffer;
2609 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
2b6080f2 2610 struct tracer *type = iter->trace;
39eaf7ef
SR
2611 unsigned long entries;
2612 unsigned long total;
bc0c38d1
SR
2613 const char *name = "preemption";
2614
d840f718 2615 name = type->name;
bc0c38d1 2616
12883efb 2617 get_total_entries(buf, &total, &entries);
bc0c38d1 2618
888b55dc 2619 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
bc0c38d1 2620 name, UTS_RELEASE);
888b55dc 2621 seq_puts(m, "# -----------------------------------"
bc0c38d1 2622 "---------------------------------\n");
888b55dc 2623 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
bc0c38d1 2624 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
57f50be1 2625 nsecs_to_usecs(data->saved_latency),
bc0c38d1 2626 entries,
4c11d7ae 2627 total,
12883efb 2628 buf->cpu,
bc0c38d1
SR
2629#if defined(CONFIG_PREEMPT_NONE)
2630 "server",
2631#elif defined(CONFIG_PREEMPT_VOLUNTARY)
2632 "desktop",
b5c21b45 2633#elif defined(CONFIG_PREEMPT)
bc0c38d1
SR
2634 "preempt",
2635#else
2636 "unknown",
2637#endif
2638 /* These are reserved for later use */
2639 0, 0, 0, 0);
2640#ifdef CONFIG_SMP
2641 seq_printf(m, " #P:%d)\n", num_online_cpus());
2642#else
2643 seq_puts(m, ")\n");
2644#endif
888b55dc
KM
2645 seq_puts(m, "# -----------------\n");
2646 seq_printf(m, "# | task: %.16s-%d "
bc0c38d1 2647 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
d20b92ab
EB
2648 data->comm, data->pid,
2649 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
bc0c38d1 2650 data->policy, data->rt_priority);
888b55dc 2651 seq_puts(m, "# -----------------\n");
bc0c38d1
SR
2652
2653 if (data->critical_start) {
888b55dc 2654 seq_puts(m, "# => started at: ");
214023c3
SR
2655 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
2656 trace_print_seq(m, &iter->seq);
888b55dc 2657 seq_puts(m, "\n# => ended at: ");
214023c3
SR
2658 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
2659 trace_print_seq(m, &iter->seq);
8248ac05 2660 seq_puts(m, "\n#\n");
bc0c38d1
SR
2661 }
2662
888b55dc 2663 seq_puts(m, "#\n");
bc0c38d1
SR
2664}
2665
a309720c
SR
2666static void test_cpu_buff_start(struct trace_iterator *iter)
2667{
2668 struct trace_seq *s = &iter->seq;
983f938a 2669 struct trace_array *tr = iter->tr;
a309720c 2670
983f938a 2671 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
12ef7d44
SR
2672 return;
2673
2674 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
2675 return;
2676
919cd979 2677 if (iter->started && cpumask_test_cpu(iter->cpu, iter->started))
a309720c
SR
2678 return;
2679
12883efb 2680 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
2f26ebd5
SR
2681 return;
2682
919cd979
SL
2683 if (iter->started)
2684 cpumask_set_cpu(iter->cpu, iter->started);
b0dfa978
FW
2685
2686 /* Don't print started cpu buffer for the first entry of the trace */
2687 if (iter->idx > 1)
2688 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
2689 iter->cpu);
a309720c
SR
2690}
2691
2c4f035f 2692static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
bc0c38d1 2693{
983f938a 2694 struct trace_array *tr = iter->tr;
214023c3 2695 struct trace_seq *s = &iter->seq;
983f938a 2696 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
4e3c3333 2697 struct trace_entry *entry;
f633cef0 2698 struct trace_event *event;
bc0c38d1 2699
4e3c3333 2700 entry = iter->ent;
dd0e545f 2701
a309720c
SR
2702 test_cpu_buff_start(iter);
2703
c4a8e8be 2704 event = ftrace_find_event(entry->type);
bc0c38d1 2705
983f938a 2706 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
19a7fe20
SRRH
2707 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2708 trace_print_lat_context(iter);
2709 else
2710 trace_print_context(iter);
c4a8e8be 2711 }
bc0c38d1 2712
19a7fe20
SRRH
2713 if (trace_seq_has_overflowed(s))
2714 return TRACE_TYPE_PARTIAL_LINE;
2715
268ccda0 2716 if (event)
a9a57763 2717 return event->funcs->trace(iter, sym_flags, event);
d9793bd8 2718
19a7fe20 2719 trace_seq_printf(s, "Unknown type %d\n", entry->type);
02b67518 2720
19a7fe20 2721 return trace_handle_return(s);
bc0c38d1
SR
2722}
2723
2c4f035f 2724static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
f9896bf3 2725{
983f938a 2726 struct trace_array *tr = iter->tr;
f9896bf3
IM
2727 struct trace_seq *s = &iter->seq;
2728 struct trace_entry *entry;
f633cef0 2729 struct trace_event *event;
f9896bf3
IM
2730
2731 entry = iter->ent;
dd0e545f 2732
983f938a 2733 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
19a7fe20
SRRH
2734 trace_seq_printf(s, "%d %d %llu ",
2735 entry->pid, iter->cpu, iter->ts);
2736
2737 if (trace_seq_has_overflowed(s))
2738 return TRACE_TYPE_PARTIAL_LINE;
f9896bf3 2739
f633cef0 2740 event = ftrace_find_event(entry->type);
268ccda0 2741 if (event)
a9a57763 2742 return event->funcs->raw(iter, 0, event);
d9793bd8 2743
19a7fe20 2744 trace_seq_printf(s, "%d ?\n", entry->type);
777e208d 2745
19a7fe20 2746 return trace_handle_return(s);
f9896bf3
IM
2747}
2748
2c4f035f 2749static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
5e3ca0ec 2750{
983f938a 2751 struct trace_array *tr = iter->tr;
5e3ca0ec
IM
2752 struct trace_seq *s = &iter->seq;
2753 unsigned char newline = '\n';
2754 struct trace_entry *entry;
f633cef0 2755 struct trace_event *event;
5e3ca0ec
IM
2756
2757 entry = iter->ent;
dd0e545f 2758
983f938a 2759 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
19a7fe20
SRRH
2760 SEQ_PUT_HEX_FIELD(s, entry->pid);
2761 SEQ_PUT_HEX_FIELD(s, iter->cpu);
2762 SEQ_PUT_HEX_FIELD(s, iter->ts);
2763 if (trace_seq_has_overflowed(s))
2764 return TRACE_TYPE_PARTIAL_LINE;
c4a8e8be 2765 }
5e3ca0ec 2766
f633cef0 2767 event = ftrace_find_event(entry->type);
268ccda0 2768 if (event) {
a9a57763 2769 enum print_line_t ret = event->funcs->hex(iter, 0, event);
d9793bd8
ACM
2770 if (ret != TRACE_TYPE_HANDLED)
2771 return ret;
2772 }
7104f300 2773
19a7fe20 2774 SEQ_PUT_FIELD(s, newline);
5e3ca0ec 2775
19a7fe20 2776 return trace_handle_return(s);
5e3ca0ec
IM
2777}
2778
2c4f035f 2779static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
cb0f12aa 2780{
983f938a 2781 struct trace_array *tr = iter->tr;
cb0f12aa
IM
2782 struct trace_seq *s = &iter->seq;
2783 struct trace_entry *entry;
f633cef0 2784 struct trace_event *event;
cb0f12aa
IM
2785
2786 entry = iter->ent;
dd0e545f 2787
983f938a 2788 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
19a7fe20
SRRH
2789 SEQ_PUT_FIELD(s, entry->pid);
2790 SEQ_PUT_FIELD(s, iter->cpu);
2791 SEQ_PUT_FIELD(s, iter->ts);
2792 if (trace_seq_has_overflowed(s))
2793 return TRACE_TYPE_PARTIAL_LINE;
c4a8e8be 2794 }
cb0f12aa 2795
f633cef0 2796 event = ftrace_find_event(entry->type);
a9a57763
SR
2797 return event ? event->funcs->binary(iter, 0, event) :
2798 TRACE_TYPE_HANDLED;
cb0f12aa
IM
2799}
2800
62b915f1 2801int trace_empty(struct trace_iterator *iter)
bc0c38d1 2802{
6d158a81 2803 struct ring_buffer_iter *buf_iter;
bc0c38d1
SR
2804 int cpu;
2805
9aba60fe 2806 /* If we are looking at one CPU buffer, only check that one */
ae3b5093 2807 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
9aba60fe 2808 cpu = iter->cpu_file;
6d158a81
SR
2809 buf_iter = trace_buffer_iter(iter, cpu);
2810 if (buf_iter) {
2811 if (!ring_buffer_iter_empty(buf_iter))
9aba60fe
SR
2812 return 0;
2813 } else {
12883efb 2814 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
9aba60fe
SR
2815 return 0;
2816 }
2817 return 1;
2818 }
2819
ab46428c 2820 for_each_tracing_cpu(cpu) {
6d158a81
SR
2821 buf_iter = trace_buffer_iter(iter, cpu);
2822 if (buf_iter) {
2823 if (!ring_buffer_iter_empty(buf_iter))
d769041f
SR
2824 return 0;
2825 } else {
12883efb 2826 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
d769041f
SR
2827 return 0;
2828 }
bc0c38d1 2829 }
d769041f 2830
797d3712 2831 return 1;
bc0c38d1
SR
2832}
2833
4f535968 2834/* Called with trace_event_read_lock() held. */
955b61e5 2835enum print_line_t print_trace_line(struct trace_iterator *iter)
f9896bf3 2836{
983f938a
SRRH
2837 struct trace_array *tr = iter->tr;
2838 unsigned long trace_flags = tr->trace_flags;
2c4f035f
FW
2839 enum print_line_t ret;
2840
19a7fe20
SRRH
2841 if (iter->lost_events) {
2842 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2843 iter->cpu, iter->lost_events);
2844 if (trace_seq_has_overflowed(&iter->seq))
2845 return TRACE_TYPE_PARTIAL_LINE;
2846 }
bc21b478 2847
2c4f035f
FW
2848 if (iter->trace && iter->trace->print_line) {
2849 ret = iter->trace->print_line(iter);
2850 if (ret != TRACE_TYPE_UNHANDLED)
2851 return ret;
2852 }
72829bc3 2853
09ae7234
SRRH
2854 if (iter->ent->type == TRACE_BPUTS &&
2855 trace_flags & TRACE_ITER_PRINTK &&
2856 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2857 return trace_print_bputs_msg_only(iter);
2858
48ead020
FW
2859 if (iter->ent->type == TRACE_BPRINT &&
2860 trace_flags & TRACE_ITER_PRINTK &&
2861 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
5ef841f6 2862 return trace_print_bprintk_msg_only(iter);
48ead020 2863
66896a85
FW
2864 if (iter->ent->type == TRACE_PRINT &&
2865 trace_flags & TRACE_ITER_PRINTK &&
2866 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
5ef841f6 2867 return trace_print_printk_msg_only(iter);
66896a85 2868
cb0f12aa
IM
2869 if (trace_flags & TRACE_ITER_BIN)
2870 return print_bin_fmt(iter);
2871
5e3ca0ec
IM
2872 if (trace_flags & TRACE_ITER_HEX)
2873 return print_hex_fmt(iter);
2874
f9896bf3
IM
2875 if (trace_flags & TRACE_ITER_RAW)
2876 return print_raw_fmt(iter);
2877
f9896bf3
IM
2878 return print_trace_fmt(iter);
2879}
2880
7e9a49ef
JO
2881void trace_latency_header(struct seq_file *m)
2882{
2883 struct trace_iterator *iter = m->private;
983f938a 2884 struct trace_array *tr = iter->tr;
7e9a49ef
JO
2885
2886 /* print nothing if the buffers are empty */
2887 if (trace_empty(iter))
2888 return;
2889
2890 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2891 print_trace_header(m, iter);
2892
983f938a 2893 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
7e9a49ef
JO
2894 print_lat_help_header(m);
2895}
2896
62b915f1
JO
2897void trace_default_header(struct seq_file *m)
2898{
2899 struct trace_iterator *iter = m->private;
983f938a
SRRH
2900 struct trace_array *tr = iter->tr;
2901 unsigned long trace_flags = tr->trace_flags;
62b915f1 2902
f56e7f8e
JO
2903 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
2904 return;
2905
62b915f1
JO
2906 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2907 /* print nothing if the buffers are empty */
2908 if (trace_empty(iter))
2909 return;
2910 print_trace_header(m, iter);
2911 if (!(trace_flags & TRACE_ITER_VERBOSE))
2912 print_lat_help_header(m);
2913 } else {
77271ce4
SR
2914 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
2915 if (trace_flags & TRACE_ITER_IRQ_INFO)
12883efb 2916 print_func_help_header_irq(iter->trace_buffer, m);
77271ce4 2917 else
12883efb 2918 print_func_help_header(iter->trace_buffer, m);
77271ce4 2919 }
62b915f1
JO
2920 }
2921}
2922
e0a413f6
SR
2923static void test_ftrace_alive(struct seq_file *m)
2924{
2925 if (!ftrace_is_dead())
2926 return;
d79ac28f
RV
2927 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
2928 "# MAY BE MISSING FUNCTION EVENTS\n");
e0a413f6
SR
2929}
2930
d8741e2e 2931#ifdef CONFIG_TRACER_MAX_TRACE
f1affcaa 2932static void show_snapshot_main_help(struct seq_file *m)
d8741e2e 2933{
d79ac28f
RV
2934 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
2935 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2936 "# Takes a snapshot of the main buffer.\n"
2937 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
2938 "# (Doesn't have to be '2' works with any number that\n"
2939 "# is not a '0' or '1')\n");
d8741e2e 2940}
f1affcaa
SRRH
2941
2942static void show_snapshot_percpu_help(struct seq_file *m)
2943{
fa6f0cc7 2944 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
f1affcaa 2945#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
d79ac28f
RV
2946 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2947 "# Takes a snapshot of the main buffer for this cpu.\n");
f1affcaa 2948#else
d79ac28f
RV
2949 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
2950 "# Must use main snapshot file to allocate.\n");
f1affcaa 2951#endif
d79ac28f
RV
2952 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
2953 "# (Doesn't have to be '2' works with any number that\n"
2954 "# is not a '0' or '1')\n");
f1affcaa
SRRH
2955}
2956
d8741e2e
SRRH
2957static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
2958{
45ad21ca 2959 if (iter->tr->allocated_snapshot)
fa6f0cc7 2960 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
d8741e2e 2961 else
fa6f0cc7 2962 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
d8741e2e 2963
fa6f0cc7 2964 seq_puts(m, "# Snapshot commands:\n");
f1affcaa
SRRH
2965 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
2966 show_snapshot_main_help(m);
2967 else
2968 show_snapshot_percpu_help(m);
d8741e2e
SRRH
2969}
2970#else
2971/* Should never be called */
2972static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
2973#endif
2974
bc0c38d1
SR
2975static int s_show(struct seq_file *m, void *v)
2976{
2977 struct trace_iterator *iter = v;
a63ce5b3 2978 int ret;
bc0c38d1
SR
2979
2980 if (iter->ent == NULL) {
2981 if (iter->tr) {
2982 seq_printf(m, "# tracer: %s\n", iter->trace->name);
2983 seq_puts(m, "#\n");
e0a413f6 2984 test_ftrace_alive(m);
bc0c38d1 2985 }
d8741e2e
SRRH
2986 if (iter->snapshot && trace_empty(iter))
2987 print_snapshot_help(m, iter);
2988 else if (iter->trace && iter->trace->print_header)
8bba1bf5 2989 iter->trace->print_header(m);
62b915f1
JO
2990 else
2991 trace_default_header(m);
2992
a63ce5b3
SR
2993 } else if (iter->leftover) {
2994 /*
2995 * If we filled the seq_file buffer earlier, we
2996 * want to just show it now.
2997 */
2998 ret = trace_print_seq(m, &iter->seq);
2999
3000 /* ret should this time be zero, but you never know */
3001 iter->leftover = ret;
3002
bc0c38d1 3003 } else {
f9896bf3 3004 print_trace_line(iter);
a63ce5b3
SR
3005 ret = trace_print_seq(m, &iter->seq);
3006 /*
3007 * If we overflow the seq_file buffer, then it will
3008 * ask us for this data again at start up.
3009 * Use that instead.
3010 * ret is 0 if seq_file write succeeded.
3011 * -1 otherwise.
3012 */
3013 iter->leftover = ret;
bc0c38d1
SR
3014 }
3015
3016 return 0;
3017}
3018
649e9c70
ON
3019/*
3020 * Should be used after trace_array_get(), trace_types_lock
3021 * ensures that i_cdev was already initialized.
3022 */
3023static inline int tracing_get_cpu(struct inode *inode)
3024{
3025 if (inode->i_cdev) /* See trace_create_cpu_file() */
3026 return (long)inode->i_cdev - 1;
3027 return RING_BUFFER_ALL_CPUS;
3028}
3029
88e9d34c 3030static const struct seq_operations tracer_seq_ops = {
4bf39a94
IM
3031 .start = s_start,
3032 .next = s_next,
3033 .stop = s_stop,
3034 .show = s_show,
bc0c38d1
SR
3035};
3036
e309b41d 3037static struct trace_iterator *
6484c71c 3038__tracing_open(struct inode *inode, struct file *file, bool snapshot)
bc0c38d1 3039{
6484c71c 3040 struct trace_array *tr = inode->i_private;
bc0c38d1 3041 struct trace_iterator *iter;
50e18b94 3042 int cpu;
bc0c38d1 3043
85a2f9b4
SR
3044 if (tracing_disabled)
3045 return ERR_PTR(-ENODEV);
60a11774 3046
50e18b94 3047 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
85a2f9b4
SR
3048 if (!iter)
3049 return ERR_PTR(-ENOMEM);
bc0c38d1 3050
72917235 3051 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
6d158a81 3052 GFP_KERNEL);
93574fcc
DC
3053 if (!iter->buffer_iter)
3054 goto release;
3055
d7350c3f
FW
3056 /*
3057 * We make a copy of the current tracer to avoid concurrent
3058 * changes on it while we are reading.
3059 */
bc0c38d1 3060 mutex_lock(&trace_types_lock);
d7350c3f 3061 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
85a2f9b4 3062 if (!iter->trace)
d7350c3f 3063 goto fail;
85a2f9b4 3064
2b6080f2 3065 *iter->trace = *tr->current_trace;
d7350c3f 3066
79f55997 3067 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
b0dfa978
FW
3068 goto fail;
3069
12883efb
SRRH
3070 iter->tr = tr;
3071
3072#ifdef CONFIG_TRACER_MAX_TRACE
2b6080f2
SR
3073 /* Currently only the top directory has a snapshot */
3074 if (tr->current_trace->print_max || snapshot)
12883efb 3075 iter->trace_buffer = &tr->max_buffer;
bc0c38d1 3076 else
12883efb
SRRH
3077#endif
3078 iter->trace_buffer = &tr->trace_buffer;
debdd57f 3079 iter->snapshot = snapshot;
bc0c38d1 3080 iter->pos = -1;
6484c71c 3081 iter->cpu_file = tracing_get_cpu(inode);
d7350c3f 3082 mutex_init(&iter->mutex);
bc0c38d1 3083
8bba1bf5
MM
3084 /* Notify the tracer early; before we stop tracing. */
3085 if (iter->trace && iter->trace->open)
a93751ca 3086 iter->trace->open(iter);
8bba1bf5 3087
12ef7d44 3088 /* Annotate start of buffers if we had overruns */
12883efb 3089 if (ring_buffer_overruns(iter->trace_buffer->buffer))
12ef7d44
SR
3090 iter->iter_flags |= TRACE_FILE_ANNOTATE;
3091
8be0709f 3092 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
58e8eedf 3093 if (trace_clocks[tr->clock_id].in_ns)
8be0709f
DS
3094 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3095
debdd57f
HT
3096 /* stop the trace while dumping if we are not opening "snapshot" */
3097 if (!iter->snapshot)
2b6080f2 3098 tracing_stop_tr(tr);
2f26ebd5 3099
ae3b5093 3100 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
b04cc6b1 3101 for_each_tracing_cpu(cpu) {
b04cc6b1 3102 iter->buffer_iter[cpu] =
12883efb 3103 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
72c9ddfd
DM
3104 }
3105 ring_buffer_read_prepare_sync();
3106 for_each_tracing_cpu(cpu) {
3107 ring_buffer_read_start(iter->buffer_iter[cpu]);
2f26ebd5 3108 tracing_iter_reset(iter, cpu);
b04cc6b1
FW
3109 }
3110 } else {
3111 cpu = iter->cpu_file;
3928a8a2 3112 iter->buffer_iter[cpu] =
12883efb 3113 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
72c9ddfd
DM
3114 ring_buffer_read_prepare_sync();
3115 ring_buffer_read_start(iter->buffer_iter[cpu]);
2f26ebd5 3116 tracing_iter_reset(iter, cpu);
3928a8a2
SR
3117 }
3118
bc0c38d1
SR
3119 mutex_unlock(&trace_types_lock);
3120
bc0c38d1 3121 return iter;
3928a8a2 3122
d7350c3f 3123 fail:
3928a8a2 3124 mutex_unlock(&trace_types_lock);
d7350c3f 3125 kfree(iter->trace);
6d158a81 3126 kfree(iter->buffer_iter);
93574fcc 3127release:
50e18b94
JO
3128 seq_release_private(inode, file);
3129 return ERR_PTR(-ENOMEM);
bc0c38d1
SR
3130}
3131
3132int tracing_open_generic(struct inode *inode, struct file *filp)
3133{
60a11774
SR
3134 if (tracing_disabled)
3135 return -ENODEV;
3136
bc0c38d1
SR
3137 filp->private_data = inode->i_private;
3138 return 0;
3139}
3140
2e86421d
GB
3141bool tracing_is_disabled(void)
3142{
3143 return (tracing_disabled) ? true: false;
3144}
3145
7b85af63
SRRH
3146/*
3147 * Open and update trace_array ref count.
3148 * Must have the current trace_array passed to it.
3149 */
dcc30223 3150static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
7b85af63
SRRH
3151{
3152 struct trace_array *tr = inode->i_private;
3153
3154 if (tracing_disabled)
3155 return -ENODEV;
3156
3157 if (trace_array_get(tr) < 0)
3158 return -ENODEV;
3159
3160 filp->private_data = inode->i_private;
3161
3162 return 0;
7b85af63
SRRH
3163}
3164
4fd27358 3165static int tracing_release(struct inode *inode, struct file *file)
bc0c38d1 3166{
6484c71c 3167 struct trace_array *tr = inode->i_private;
907f2784 3168 struct seq_file *m = file->private_data;
4acd4d00 3169 struct trace_iterator *iter;
3928a8a2 3170 int cpu;
bc0c38d1 3171
ff451961 3172 if (!(file->f_mode & FMODE_READ)) {
6484c71c 3173 trace_array_put(tr);
4acd4d00 3174 return 0;
ff451961 3175 }
4acd4d00 3176
6484c71c 3177 /* Writes do not use seq_file */
4acd4d00 3178 iter = m->private;
bc0c38d1 3179 mutex_lock(&trace_types_lock);
a695cb58 3180
3928a8a2
SR
3181 for_each_tracing_cpu(cpu) {
3182 if (iter->buffer_iter[cpu])
3183 ring_buffer_read_finish(iter->buffer_iter[cpu]);
3184 }
3185
bc0c38d1
SR
3186 if (iter->trace && iter->trace->close)
3187 iter->trace->close(iter);
3188
debdd57f
HT
3189 if (!iter->snapshot)
3190 /* reenable tracing if it was previously enabled */
2b6080f2 3191 tracing_start_tr(tr);
f77d09a3
AL
3192
3193 __trace_array_put(tr);
3194
bc0c38d1
SR
3195 mutex_unlock(&trace_types_lock);
3196
d7350c3f 3197 mutex_destroy(&iter->mutex);
b0dfa978 3198 free_cpumask_var(iter->started);
d7350c3f 3199 kfree(iter->trace);
6d158a81 3200 kfree(iter->buffer_iter);
50e18b94 3201 seq_release_private(inode, file);
ff451961 3202
bc0c38d1
SR
3203 return 0;
3204}
3205
7b85af63
SRRH
3206static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3207{
3208 struct trace_array *tr = inode->i_private;
3209
3210 trace_array_put(tr);
bc0c38d1
SR
3211 return 0;
3212}
3213
7b85af63
SRRH
3214static int tracing_single_release_tr(struct inode *inode, struct file *file)
3215{
3216 struct trace_array *tr = inode->i_private;
3217
3218 trace_array_put(tr);
3219
3220 return single_release(inode, file);
3221}
3222
bc0c38d1
SR
3223static int tracing_open(struct inode *inode, struct file *file)
3224{
6484c71c 3225 struct trace_array *tr = inode->i_private;
85a2f9b4
SR
3226 struct trace_iterator *iter;
3227 int ret = 0;
bc0c38d1 3228
ff451961
SRRH
3229 if (trace_array_get(tr) < 0)
3230 return -ENODEV;
3231
4acd4d00 3232 /* If this file was open for write, then erase contents */
6484c71c
ON
3233 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
3234 int cpu = tracing_get_cpu(inode);
3235
3236 if (cpu == RING_BUFFER_ALL_CPUS)
12883efb 3237 tracing_reset_online_cpus(&tr->trace_buffer);
4acd4d00 3238 else
6484c71c 3239 tracing_reset(&tr->trace_buffer, cpu);
4acd4d00 3240 }
bc0c38d1 3241
4acd4d00 3242 if (file->f_mode & FMODE_READ) {
6484c71c 3243 iter = __tracing_open(inode, file, false);
4acd4d00
SR
3244 if (IS_ERR(iter))
3245 ret = PTR_ERR(iter);
983f938a 3246 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
4acd4d00
SR
3247 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3248 }
ff451961
SRRH
3249
3250 if (ret < 0)
3251 trace_array_put(tr);
3252
bc0c38d1
SR
3253 return ret;
3254}
3255
607e2ea1
SRRH
3256/*
3257 * Some tracers are not suitable for instance buffers.
3258 * A tracer is always available for the global array (toplevel)
3259 * or if it explicitly states that it is.
3260 */
3261static bool
3262trace_ok_for_array(struct tracer *t, struct trace_array *tr)
3263{
3264 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
3265}
3266
3267/* Find the next tracer that this trace array may use */
3268static struct tracer *
3269get_tracer_for_array(struct trace_array *tr, struct tracer *t)
3270{
3271 while (t && !trace_ok_for_array(t, tr))
3272 t = t->next;
3273
3274 return t;
3275}
3276
e309b41d 3277static void *
bc0c38d1
SR
3278t_next(struct seq_file *m, void *v, loff_t *pos)
3279{
607e2ea1 3280 struct trace_array *tr = m->private;
f129e965 3281 struct tracer *t = v;
bc0c38d1
SR
3282
3283 (*pos)++;
3284
3285 if (t)
607e2ea1 3286 t = get_tracer_for_array(tr, t->next);
bc0c38d1 3287
bc0c38d1
SR
3288 return t;
3289}
3290
3291static void *t_start(struct seq_file *m, loff_t *pos)
3292{
607e2ea1 3293 struct trace_array *tr = m->private;
f129e965 3294 struct tracer *t;
bc0c38d1
SR
3295 loff_t l = 0;
3296
3297 mutex_lock(&trace_types_lock);
607e2ea1
SRRH
3298
3299 t = get_tracer_for_array(tr, trace_types);
3300 for (; t && l < *pos; t = t_next(m, t, &l))
3301 ;
bc0c38d1
SR
3302
3303 return t;
3304}
3305
3306static void t_stop(struct seq_file *m, void *p)
3307{
3308 mutex_unlock(&trace_types_lock);
3309}
3310
3311static int t_show(struct seq_file *m, void *v)
3312{
3313 struct tracer *t = v;
3314
3315 if (!t)
3316 return 0;
3317
fa6f0cc7 3318 seq_puts(m, t->name);
bc0c38d1
SR
3319 if (t->next)
3320 seq_putc(m, ' ');
3321 else
3322 seq_putc(m, '\n');
3323
3324 return 0;
3325}
3326
88e9d34c 3327static const struct seq_operations show_traces_seq_ops = {
4bf39a94
IM
3328 .start = t_start,
3329 .next = t_next,
3330 .stop = t_stop,
3331 .show = t_show,
bc0c38d1
SR
3332};
3333
3334static int show_traces_open(struct inode *inode, struct file *file)
3335{
607e2ea1
SRRH
3336 struct trace_array *tr = inode->i_private;
3337 struct seq_file *m;
3338 int ret;
3339
60a11774
SR
3340 if (tracing_disabled)
3341 return -ENODEV;
3342
607e2ea1
SRRH
3343 ret = seq_open(file, &show_traces_seq_ops);
3344 if (ret)
3345 return ret;
3346
3347 m = file->private_data;
3348 m->private = tr;
3349
3350 return 0;
bc0c38d1
SR
3351}
3352
4acd4d00
SR
3353static ssize_t
3354tracing_write_stub(struct file *filp, const char __user *ubuf,
3355 size_t count, loff_t *ppos)
3356{
3357 return count;
3358}
3359
098c879e 3360loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
364829b1 3361{
098c879e
SRRH
3362 int ret;
3363
364829b1 3364 if (file->f_mode & FMODE_READ)
098c879e 3365 ret = seq_lseek(file, offset, whence);
364829b1 3366 else
098c879e
SRRH
3367 file->f_pos = ret = 0;
3368
3369 return ret;
364829b1
SP
3370}
3371
5e2336a0 3372static const struct file_operations tracing_fops = {
4bf39a94
IM
3373 .open = tracing_open,
3374 .read = seq_read,
4acd4d00 3375 .write = tracing_write_stub,
098c879e 3376 .llseek = tracing_lseek,
4bf39a94 3377 .release = tracing_release,
bc0c38d1
SR
3378};
3379
5e2336a0 3380static const struct file_operations show_traces_fops = {
c7078de1
IM
3381 .open = show_traces_open,
3382 .read = seq_read,
3383 .release = seq_release,
b444786f 3384 .llseek = seq_lseek,
c7078de1
IM
3385};
3386
36dfe925
IM
3387/*
3388 * The tracer itself will not take this lock, but still we want
3389 * to provide a consistent cpumask to user-space:
3390 */
3391static DEFINE_MUTEX(tracing_cpumask_update_lock);
3392
3393/*
3394 * Temporary storage for the character representation of the
3395 * CPU bitmask (and one more byte for the newline):
3396 */
3397static char mask_str[NR_CPUS + 1];
3398
c7078de1
IM
3399static ssize_t
3400tracing_cpumask_read(struct file *filp, char __user *ubuf,
3401 size_t count, loff_t *ppos)
3402{
ccfe9e42 3403 struct trace_array *tr = file_inode(filp)->i_private;
36dfe925 3404 int len;
c7078de1
IM
3405
3406 mutex_lock(&tracing_cpumask_update_lock);
36dfe925 3407
1a40243b
TH
3408 len = snprintf(mask_str, count, "%*pb\n",
3409 cpumask_pr_args(tr->tracing_cpumask));
3410 if (len >= count) {
36dfe925
IM
3411 count = -EINVAL;
3412 goto out_err;
3413 }
36dfe925
IM
3414 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
3415
3416out_err:
c7078de1
IM
3417 mutex_unlock(&tracing_cpumask_update_lock);
3418
3419 return count;
3420}
3421
3422static ssize_t
3423tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3424 size_t count, loff_t *ppos)
3425{
ccfe9e42 3426 struct trace_array *tr = file_inode(filp)->i_private;
9e01c1b7 3427 cpumask_var_t tracing_cpumask_new;
2b6080f2 3428 int err, cpu;
9e01c1b7
RR
3429
3430 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
3431 return -ENOMEM;
c7078de1 3432
9e01c1b7 3433 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
c7078de1 3434 if (err)
36dfe925
IM
3435 goto err_unlock;
3436
215368e8
LZ
3437 mutex_lock(&tracing_cpumask_update_lock);
3438
a5e25883 3439 local_irq_disable();
0b9b12c1 3440 arch_spin_lock(&tr->max_lock);
ab46428c 3441 for_each_tracing_cpu(cpu) {
36dfe925
IM
3442 /*
3443 * Increase/decrease the disabled counter if we are
3444 * about to flip a bit in the cpumask:
3445 */
ccfe9e42 3446 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
9e01c1b7 3447 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
12883efb
SRRH
3448 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3449 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
36dfe925 3450 }
ccfe9e42 3451 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
9e01c1b7 3452 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
12883efb
SRRH
3453 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3454 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
36dfe925
IM
3455 }
3456 }
0b9b12c1 3457 arch_spin_unlock(&tr->max_lock);
a5e25883 3458 local_irq_enable();
36dfe925 3459
ccfe9e42 3460 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
36dfe925
IM
3461
3462 mutex_unlock(&tracing_cpumask_update_lock);
9e01c1b7 3463 free_cpumask_var(tracing_cpumask_new);
c7078de1
IM
3464
3465 return count;
36dfe925
IM
3466
3467err_unlock:
215368e8 3468 free_cpumask_var(tracing_cpumask_new);
36dfe925
IM
3469
3470 return err;
c7078de1
IM
3471}
3472
5e2336a0 3473static const struct file_operations tracing_cpumask_fops = {
ccfe9e42 3474 .open = tracing_open_generic_tr,
c7078de1
IM
3475 .read = tracing_cpumask_read,
3476 .write = tracing_cpumask_write,
ccfe9e42 3477 .release = tracing_release_generic_tr,
b444786f 3478 .llseek = generic_file_llseek,
bc0c38d1
SR
3479};
3480
fdb372ed 3481static int tracing_trace_options_show(struct seq_file *m, void *v)
bc0c38d1 3482{
d8e83d26 3483 struct tracer_opt *trace_opts;
2b6080f2 3484 struct trace_array *tr = m->private;
d8e83d26 3485 u32 tracer_flags;
d8e83d26 3486 int i;
adf9f195 3487
d8e83d26 3488 mutex_lock(&trace_types_lock);
2b6080f2
SR
3489 tracer_flags = tr->current_trace->flags->val;
3490 trace_opts = tr->current_trace->flags->opts;
d8e83d26 3491
bc0c38d1 3492 for (i = 0; trace_options[i]; i++) {
983f938a 3493 if (tr->trace_flags & (1 << i))
fdb372ed 3494 seq_printf(m, "%s\n", trace_options[i]);
bc0c38d1 3495 else
fdb372ed 3496 seq_printf(m, "no%s\n", trace_options[i]);
bc0c38d1
SR
3497 }
3498
adf9f195
FW
3499 for (i = 0; trace_opts[i].name; i++) {
3500 if (tracer_flags & trace_opts[i].bit)
fdb372ed 3501 seq_printf(m, "%s\n", trace_opts[i].name);
adf9f195 3502 else
fdb372ed 3503 seq_printf(m, "no%s\n", trace_opts[i].name);
adf9f195 3504 }
d8e83d26 3505 mutex_unlock(&trace_types_lock);
adf9f195 3506
fdb372ed 3507 return 0;
bc0c38d1 3508}
bc0c38d1 3509
8c1a49ae 3510static int __set_tracer_option(struct trace_array *tr,
8d18eaaf
LZ
3511 struct tracer_flags *tracer_flags,
3512 struct tracer_opt *opts, int neg)
3513{
8c1a49ae 3514 struct tracer *trace = tr->current_trace;
8d18eaaf 3515 int ret;
bc0c38d1 3516
8c1a49ae 3517 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
8d18eaaf
LZ
3518 if (ret)
3519 return ret;
3520
3521 if (neg)
3522 tracer_flags->val &= ~opts->bit;
3523 else
3524 tracer_flags->val |= opts->bit;
3525 return 0;
bc0c38d1
SR
3526}
3527
adf9f195 3528/* Try to assign a tracer specific option */
8c1a49ae 3529static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
adf9f195 3530{
8c1a49ae 3531 struct tracer *trace = tr->current_trace;
7770841e 3532 struct tracer_flags *tracer_flags = trace->flags;
adf9f195 3533 struct tracer_opt *opts = NULL;
8d18eaaf 3534 int i;
adf9f195 3535
7770841e
Z
3536 for (i = 0; tracer_flags->opts[i].name; i++) {
3537 opts = &tracer_flags->opts[i];
adf9f195 3538
8d18eaaf 3539 if (strcmp(cmp, opts->name) == 0)
8c1a49ae 3540 return __set_tracer_option(tr, trace->flags, opts, neg);
adf9f195 3541 }
adf9f195 3542
8d18eaaf 3543 return -EINVAL;
adf9f195
FW
3544}
3545
613f04a0
SRRH
3546/* Some tracers require overwrite to stay enabled */
3547int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
3548{
3549 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
3550 return -1;
3551
3552 return 0;
3553}
3554
2b6080f2 3555int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
af4617bd
SR
3556{
3557 /* do nothing if flag is already set */
983f938a 3558 if (!!(tr->trace_flags & mask) == !!enabled)
613f04a0
SRRH
3559 return 0;
3560
3561 /* Give the tracer a chance to approve the change */
2b6080f2 3562 if (tr->current_trace->flag_changed)
bf6065b5 3563 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
613f04a0 3564 return -EINVAL;
af4617bd
SR
3565
3566 if (enabled)
983f938a 3567 tr->trace_flags |= mask;
af4617bd 3568 else
983f938a 3569 tr->trace_flags &= ~mask;
e870e9a1
LZ
3570
3571 if (mask == TRACE_ITER_RECORD_CMD)
3572 trace_event_enable_cmd_record(enabled);
750912fa 3573
80902822 3574 if (mask == TRACE_ITER_OVERWRITE) {
12883efb 3575 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
80902822 3576#ifdef CONFIG_TRACER_MAX_TRACE
12883efb 3577 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
80902822
SRRH
3578#endif
3579 }
81698831 3580
b9f9108c 3581 if (mask == TRACE_ITER_PRINTK) {
81698831 3582 trace_printk_start_stop_comm(enabled);
b9f9108c
SRRH
3583 trace_printk_control(enabled);
3584 }
613f04a0
SRRH
3585
3586 return 0;
af4617bd
SR
3587}
3588
2b6080f2 3589static int trace_set_options(struct trace_array *tr, char *option)
bc0c38d1 3590{
8d18eaaf 3591 char *cmp;
bc0c38d1 3592 int neg = 0;
613f04a0 3593 int ret = -ENODEV;
bc0c38d1 3594 int i;
a4d1e688 3595 size_t orig_len = strlen(option);
bc0c38d1 3596
7bcfaf54 3597 cmp = strstrip(option);
bc0c38d1 3598
8d18eaaf 3599 if (strncmp(cmp, "no", 2) == 0) {
bc0c38d1
SR
3600 neg = 1;
3601 cmp += 2;
3602 }
3603
69d34da2
SRRH
3604 mutex_lock(&trace_types_lock);
3605
bc0c38d1 3606 for (i = 0; trace_options[i]; i++) {
8d18eaaf 3607 if (strcmp(cmp, trace_options[i]) == 0) {
2b6080f2 3608 ret = set_tracer_flag(tr, 1 << i, !neg);
bc0c38d1
SR
3609 break;
3610 }
3611 }
adf9f195
FW
3612
3613 /* If no option could be set, test the specific tracer options */
69d34da2 3614 if (!trace_options[i])
8c1a49ae 3615 ret = set_tracer_option(tr, cmp, neg);
69d34da2
SRRH
3616
3617 mutex_unlock(&trace_types_lock);
bc0c38d1 3618
a4d1e688
JW
3619 /*
3620 * If the first trailing whitespace is replaced with '\0' by strstrip,
3621 * turn it back into a space.
3622 */
3623 if (orig_len > strlen(option))
3624 option[strlen(option)] = ' ';
3625
7bcfaf54
SR
3626 return ret;
3627}
3628
a4d1e688
JW
3629static void __init apply_trace_boot_options(void)
3630{
3631 char *buf = trace_boot_options_buf;
3632 char *option;
3633
3634 while (true) {
3635 option = strsep(&buf, ",");
3636
3637 if (!option)
3638 break;
3639 if (!*option)
3640 continue;
3641
3642 trace_set_options(&global_trace, option);
3643
3644 /* Put back the comma to allow this to be called again */
3645 if (buf)
3646 *(buf - 1) = ',';
3647 }
3648}
3649
7bcfaf54
SR
3650static ssize_t
3651tracing_trace_options_write(struct file *filp, const char __user *ubuf,
3652 size_t cnt, loff_t *ppos)
3653{
2b6080f2
SR
3654 struct seq_file *m = filp->private_data;
3655 struct trace_array *tr = m->private;
7bcfaf54 3656 char buf[64];
613f04a0 3657 int ret;
7bcfaf54
SR
3658
3659 if (cnt >= sizeof(buf))
3660 return -EINVAL;
3661
3662 if (copy_from_user(&buf, ubuf, cnt))
3663 return -EFAULT;
3664
a8dd2176
SR
3665 buf[cnt] = 0;
3666
2b6080f2 3667 ret = trace_set_options(tr, buf);
613f04a0
SRRH
3668 if (ret < 0)
3669 return ret;
7bcfaf54 3670
cf8517cf 3671 *ppos += cnt;
bc0c38d1
SR
3672
3673 return cnt;
3674}
3675
fdb372ed
LZ
3676static int tracing_trace_options_open(struct inode *inode, struct file *file)
3677{
7b85af63 3678 struct trace_array *tr = inode->i_private;
f77d09a3 3679 int ret;
7b85af63 3680
fdb372ed
LZ
3681 if (tracing_disabled)
3682 return -ENODEV;
2b6080f2 3683
7b85af63
SRRH
3684 if (trace_array_get(tr) < 0)
3685 return -ENODEV;
3686
f77d09a3
AL
3687 ret = single_open(file, tracing_trace_options_show, inode->i_private);
3688 if (ret < 0)
3689 trace_array_put(tr);
3690
3691 return ret;
fdb372ed
LZ
3692}
3693
5e2336a0 3694static const struct file_operations tracing_iter_fops = {
fdb372ed
LZ
3695 .open = tracing_trace_options_open,
3696 .read = seq_read,
3697 .llseek = seq_lseek,
7b85af63 3698 .release = tracing_single_release_tr,
ee6bce52 3699 .write = tracing_trace_options_write,
bc0c38d1
SR
3700};
3701
7bd2f24c
IM
3702static const char readme_msg[] =
3703 "tracing mini-HOWTO:\n\n"
22f45649
SRRH
3704 "# echo 0 > tracing_on : quick way to disable tracing\n"
3705 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
3706 " Important files:\n"
3707 " trace\t\t\t- The static contents of the buffer\n"
3708 "\t\t\t To clear the buffer write into this file: echo > trace\n"
3709 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
3710 " current_tracer\t- function and latency tracers\n"
3711 " available_tracers\t- list of configured tracers for current_tracer\n"
3712 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
3713 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
3714 " trace_clock\t\t-change the clock used to order events\n"
3715 " local: Per cpu clock but may not be synced across CPUs\n"
3716 " global: Synced across CPUs but slows tracing down.\n"
3717 " counter: Not a clock, but just an increment\n"
3718 " uptime: Jiffy counter from time of boot\n"
3719 " perf: Same clock that perf events use\n"
3720#ifdef CONFIG_X86_64
3721 " x86-tsc: TSC cycle counter\n"
3722#endif
3723 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
3724 " tracing_cpumask\t- Limit which CPUs to trace\n"
3725 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
3726 "\t\t\t Remove sub-buffer with rmdir\n"
3727 " trace_options\t\t- Set format or modify how tracing happens\n"
71485c45
SRRH
3728 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
3729 "\t\t\t option name\n"
939c7a4f 3730 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
22f45649
SRRH
3731#ifdef CONFIG_DYNAMIC_FTRACE
3732 "\n available_filter_functions - list of functions that can be filtered on\n"
71485c45
SRRH
3733 " set_ftrace_filter\t- echo function name in here to only trace these\n"
3734 "\t\t\t functions\n"
3735 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3736 "\t modules: Can select a group via module\n"
3737 "\t Format: :mod:<module-name>\n"
3738 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
3739 "\t triggers: a command to perform when function is hit\n"
3740 "\t Format: <function>:<trigger>[:count]\n"
3741 "\t trigger: traceon, traceoff\n"
3742 "\t\t enable_event:<system>:<event>\n"
3743 "\t\t disable_event:<system>:<event>\n"
22f45649 3744#ifdef CONFIG_STACKTRACE
71485c45 3745 "\t\t stacktrace\n"
22f45649
SRRH
3746#endif
3747#ifdef CONFIG_TRACER_SNAPSHOT
71485c45 3748 "\t\t snapshot\n"
22f45649 3749#endif
17a280ea
SRRH
3750 "\t\t dump\n"
3751 "\t\t cpudump\n"
71485c45
SRRH
3752 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
3753 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
3754 "\t The first one will disable tracing every time do_fault is hit\n"
3755 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
3756 "\t The first time do trap is hit and it disables tracing, the\n"
3757 "\t counter will decrement to 2. If tracing is already disabled,\n"
3758 "\t the counter will not decrement. It only decrements when the\n"
3759 "\t trigger did work\n"
3760 "\t To remove trigger without count:\n"
3761 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
3762 "\t To remove trigger with a count:\n"
3763 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
22f45649 3764 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
71485c45
SRRH
3765 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3766 "\t modules: Can select a group via module command :mod:\n"
3767 "\t Does not accept triggers\n"
22f45649
SRRH
3768#endif /* CONFIG_DYNAMIC_FTRACE */
3769#ifdef CONFIG_FUNCTION_TRACER
71485c45
SRRH
3770 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
3771 "\t\t (function)\n"
22f45649
SRRH
3772#endif
3773#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3774 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
d048a8c7 3775 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
22f45649
SRRH
3776 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
3777#endif
3778#ifdef CONFIG_TRACER_SNAPSHOT
71485c45
SRRH
3779 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
3780 "\t\t\t snapshot buffer. Read the contents for more\n"
3781 "\t\t\t information\n"
22f45649 3782#endif
991821c8 3783#ifdef CONFIG_STACK_TRACER
22f45649
SRRH
3784 " stack_trace\t\t- Shows the max stack trace when active\n"
3785 " stack_max_size\t- Shows current max stack size that was traced\n"
71485c45
SRRH
3786 "\t\t\t Write into this file to reset the max size (trigger a\n"
3787 "\t\t\t new trace)\n"
22f45649 3788#ifdef CONFIG_DYNAMIC_FTRACE
71485c45
SRRH
3789 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
3790 "\t\t\t traces\n"
22f45649 3791#endif
991821c8 3792#endif /* CONFIG_STACK_TRACER */
26f25564
TZ
3793 " events/\t\t- Directory containing all trace event subsystems:\n"
3794 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
3795 " events/<system>/\t- Directory containing all trace events for <system>:\n"
71485c45
SRRH
3796 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
3797 "\t\t\t events\n"
26f25564 3798 " filter\t\t- If set, only events passing filter are traced\n"
71485c45
SRRH
3799 " events/<system>/<event>/\t- Directory containing control files for\n"
3800 "\t\t\t <event>:\n"
26f25564
TZ
3801 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
3802 " filter\t\t- If set, only events passing filter are traced\n"
3803 " trigger\t\t- If set, a command to perform when event is hit\n"
71485c45
SRRH
3804 "\t Format: <trigger>[:count][if <filter>]\n"
3805 "\t trigger: traceon, traceoff\n"
3806 "\t enable_event:<system>:<event>\n"
3807 "\t disable_event:<system>:<event>\n"
26f25564 3808#ifdef CONFIG_STACKTRACE
71485c45 3809 "\t\t stacktrace\n"
26f25564
TZ
3810#endif
3811#ifdef CONFIG_TRACER_SNAPSHOT
71485c45 3812 "\t\t snapshot\n"
26f25564 3813#endif
71485c45
SRRH
3814 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
3815 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
3816 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
3817 "\t events/block/block_unplug/trigger\n"
3818 "\t The first disables tracing every time block_unplug is hit.\n"
3819 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
3820 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
3821 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
3822 "\t Like function triggers, the counter is only decremented if it\n"
3823 "\t enabled or disabled tracing.\n"
3824 "\t To remove a trigger without a count:\n"
3825 "\t echo '!<trigger> > <system>/<event>/trigger\n"
3826 "\t To remove a trigger with a count:\n"
3827 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
3828 "\t Filters can be ignored when removing a trigger.\n"
7bd2f24c
IM
3829;
3830
3831static ssize_t
3832tracing_readme_read(struct file *filp, char __user *ubuf,
3833 size_t cnt, loff_t *ppos)
3834{
3835 return simple_read_from_buffer(ubuf, cnt, ppos,
3836 readme_msg, strlen(readme_msg));
3837}
3838
5e2336a0 3839static const struct file_operations tracing_readme_fops = {
c7078de1
IM
3840 .open = tracing_open_generic,
3841 .read = tracing_readme_read,
b444786f 3842 .llseek = generic_file_llseek,
7bd2f24c
IM
3843};
3844
42584c81
YY
3845static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
3846{
3847 unsigned int *ptr = v;
69abe6a5 3848
42584c81
YY
3849 if (*pos || m->count)
3850 ptr++;
69abe6a5 3851
42584c81 3852 (*pos)++;
69abe6a5 3853
939c7a4f
YY
3854 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
3855 ptr++) {
42584c81
YY
3856 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
3857 continue;
69abe6a5 3858
42584c81
YY
3859 return ptr;
3860 }
69abe6a5 3861
42584c81
YY
3862 return NULL;
3863}
3864
3865static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
3866{
3867 void *v;
3868 loff_t l = 0;
69abe6a5 3869
4c27e756
SRRH
3870 preempt_disable();
3871 arch_spin_lock(&trace_cmdline_lock);
3872
939c7a4f 3873 v = &savedcmd->map_cmdline_to_pid[0];
42584c81
YY
3874 while (l <= *pos) {
3875 v = saved_cmdlines_next(m, v, &l);
3876 if (!v)
3877 return NULL;
69abe6a5
AP
3878 }
3879
42584c81
YY
3880 return v;
3881}
3882
3883static void saved_cmdlines_stop(struct seq_file *m, void *v)
3884{
4c27e756
SRRH
3885 arch_spin_unlock(&trace_cmdline_lock);
3886 preempt_enable();
42584c81 3887}
69abe6a5 3888
42584c81
YY
3889static int saved_cmdlines_show(struct seq_file *m, void *v)
3890{
3891 char buf[TASK_COMM_LEN];
3892 unsigned int *pid = v;
69abe6a5 3893
4c27e756 3894 __trace_find_cmdline(*pid, buf);
42584c81
YY
3895 seq_printf(m, "%d %s\n", *pid, buf);
3896 return 0;
3897}
3898
3899static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
3900 .start = saved_cmdlines_start,
3901 .next = saved_cmdlines_next,
3902 .stop = saved_cmdlines_stop,
3903 .show = saved_cmdlines_show,
3904};
3905
3906static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
3907{
3908 if (tracing_disabled)
3909 return -ENODEV;
3910
3911 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
69abe6a5
AP
3912}
3913
3914static const struct file_operations tracing_saved_cmdlines_fops = {
42584c81
YY
3915 .open = tracing_saved_cmdlines_open,
3916 .read = seq_read,
3917 .llseek = seq_lseek,
3918 .release = seq_release,
69abe6a5
AP
3919};
3920
939c7a4f
YY
3921static ssize_t
3922tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
3923 size_t cnt, loff_t *ppos)
3924{
3925 char buf[64];
3926 int r;
3927
3928 arch_spin_lock(&trace_cmdline_lock);
a6af8fbf 3929 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
939c7a4f
YY
3930 arch_spin_unlock(&trace_cmdline_lock);
3931
3932 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3933}
3934
3935static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
3936{
3937 kfree(s->saved_cmdlines);
3938 kfree(s->map_cmdline_to_pid);
3939 kfree(s);
3940}
3941
3942static int tracing_resize_saved_cmdlines(unsigned int val)
3943{
3944 struct saved_cmdlines_buffer *s, *savedcmd_temp;
3945
a6af8fbf 3946 s = kmalloc(sizeof(*s), GFP_KERNEL);
939c7a4f
YY
3947 if (!s)
3948 return -ENOMEM;
3949
3950 if (allocate_cmdlines_buffer(val, s) < 0) {
3951 kfree(s);
3952 return -ENOMEM;
3953 }
3954
3955 arch_spin_lock(&trace_cmdline_lock);
3956 savedcmd_temp = savedcmd;
3957 savedcmd = s;
3958 arch_spin_unlock(&trace_cmdline_lock);
3959 free_saved_cmdlines_buffer(savedcmd_temp);
3960
3961 return 0;
3962}
3963
3964static ssize_t
3965tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
3966 size_t cnt, loff_t *ppos)
3967{
3968 unsigned long val;
3969 int ret;
3970
3971 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
3972 if (ret)
3973 return ret;
3974
3975 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
3976 if (!val || val > PID_MAX_DEFAULT)
3977 return -EINVAL;
3978
3979 ret = tracing_resize_saved_cmdlines((unsigned int)val);
3980 if (ret < 0)
3981 return ret;
3982
3983 *ppos += cnt;
3984
3985 return cnt;
3986}
3987
3988static const struct file_operations tracing_saved_cmdlines_size_fops = {
3989 .open = tracing_open_generic,
3990 .read = tracing_saved_cmdlines_size_read,
3991 .write = tracing_saved_cmdlines_size_write,
3992};
3993
9828413d
SRRH
3994#ifdef CONFIG_TRACE_ENUM_MAP_FILE
3995static union trace_enum_map_item *
3996update_enum_map(union trace_enum_map_item *ptr)
3997{
3998 if (!ptr->map.enum_string) {
3999 if (ptr->tail.next) {
4000 ptr = ptr->tail.next;
4001 /* Set ptr to the next real item (skip head) */
4002 ptr++;
4003 } else
4004 return NULL;
4005 }
4006 return ptr;
4007}
4008
4009static void *enum_map_next(struct seq_file *m, void *v, loff_t *pos)
4010{
4011 union trace_enum_map_item *ptr = v;
4012
4013 /*
4014 * Paranoid! If ptr points to end, we don't want to increment past it.
4015 * This really should never happen.
4016 */
4017 ptr = update_enum_map(ptr);
4018 if (WARN_ON_ONCE(!ptr))
4019 return NULL;
4020
4021 ptr++;
4022
4023 (*pos)++;
4024
4025 ptr = update_enum_map(ptr);
4026
4027 return ptr;
4028}
4029
4030static void *enum_map_start(struct seq_file *m, loff_t *pos)
4031{
4032 union trace_enum_map_item *v;
4033 loff_t l = 0;
4034
4035 mutex_lock(&trace_enum_mutex);
4036
4037 v = trace_enum_maps;
4038 if (v)
4039 v++;
4040
4041 while (v && l < *pos) {
4042 v = enum_map_next(m, v, &l);
4043 }
4044
4045 return v;
4046}
4047
4048static void enum_map_stop(struct seq_file *m, void *v)
4049{
4050 mutex_unlock(&trace_enum_mutex);
4051}
4052
4053static int enum_map_show(struct seq_file *m, void *v)
4054{
4055 union trace_enum_map_item *ptr = v;
4056
4057 seq_printf(m, "%s %ld (%s)\n",
4058 ptr->map.enum_string, ptr->map.enum_value,
4059 ptr->map.system);
4060
4061 return 0;
4062}
4063
4064static const struct seq_operations tracing_enum_map_seq_ops = {
4065 .start = enum_map_start,
4066 .next = enum_map_next,
4067 .stop = enum_map_stop,
4068 .show = enum_map_show,
4069};
4070
4071static int tracing_enum_map_open(struct inode *inode, struct file *filp)
4072{
4073 if (tracing_disabled)
4074 return -ENODEV;
4075
4076 return seq_open(filp, &tracing_enum_map_seq_ops);
4077}
4078
4079static const struct file_operations tracing_enum_map_fops = {
4080 .open = tracing_enum_map_open,
4081 .read = seq_read,
4082 .llseek = seq_lseek,
4083 .release = seq_release,
4084};
4085
4086static inline union trace_enum_map_item *
4087trace_enum_jmp_to_tail(union trace_enum_map_item *ptr)
4088{
4089 /* Return tail of array given the head */
4090 return ptr + ptr->head.length + 1;
4091}
4092
4093static void
4094trace_insert_enum_map_file(struct module *mod, struct trace_enum_map **start,
4095 int len)
4096{
4097 struct trace_enum_map **stop;
4098 struct trace_enum_map **map;
4099 union trace_enum_map_item *map_array;
4100 union trace_enum_map_item *ptr;
4101
4102 stop = start + len;
4103
4104 /*
4105 * The trace_enum_maps contains the map plus a head and tail item,
4106 * where the head holds the module and length of array, and the
4107 * tail holds a pointer to the next list.
4108 */
4109 map_array = kmalloc(sizeof(*map_array) * (len + 2), GFP_KERNEL);
4110 if (!map_array) {
4111 pr_warning("Unable to allocate trace enum mapping\n");
4112 return;
4113 }
4114
4115 mutex_lock(&trace_enum_mutex);
4116
4117 if (!trace_enum_maps)
4118 trace_enum_maps = map_array;
4119 else {
4120 ptr = trace_enum_maps;
4121 for (;;) {
4122 ptr = trace_enum_jmp_to_tail(ptr);
4123 if (!ptr->tail.next)
4124 break;
4125 ptr = ptr->tail.next;
4126
4127 }
4128 ptr->tail.next = map_array;
4129 }
4130 map_array->head.mod = mod;
4131 map_array->head.length = len;
4132 map_array++;
4133
4134 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
4135 map_array->map = **map;
4136 map_array++;
4137 }
4138 memset(map_array, 0, sizeof(*map_array));
4139
4140 mutex_unlock(&trace_enum_mutex);
4141}
4142
4143static void trace_create_enum_file(struct dentry *d_tracer)
4144{
4145 trace_create_file("enum_map", 0444, d_tracer,
4146 NULL, &tracing_enum_map_fops);
4147}
4148
4149#else /* CONFIG_TRACE_ENUM_MAP_FILE */
4150static inline void trace_create_enum_file(struct dentry *d_tracer) { }
4151static inline void trace_insert_enum_map_file(struct module *mod,
4152 struct trace_enum_map **start, int len) { }
4153#endif /* !CONFIG_TRACE_ENUM_MAP_FILE */
4154
4155static void trace_insert_enum_map(struct module *mod,
4156 struct trace_enum_map **start, int len)
0c564a53
SRRH
4157{
4158 struct trace_enum_map **map;
0c564a53
SRRH
4159
4160 if (len <= 0)
4161 return;
4162
4163 map = start;
4164
4165 trace_event_enum_update(map, len);
9828413d
SRRH
4166
4167 trace_insert_enum_map_file(mod, start, len);
0c564a53
SRRH
4168}
4169
bc0c38d1
SR
4170static ssize_t
4171tracing_set_trace_read(struct file *filp, char __user *ubuf,
4172 size_t cnt, loff_t *ppos)
4173{
2b6080f2 4174 struct trace_array *tr = filp->private_data;
ee6c2c1b 4175 char buf[MAX_TRACER_SIZE+2];
bc0c38d1
SR
4176 int r;
4177
4178 mutex_lock(&trace_types_lock);
2b6080f2 4179 r = sprintf(buf, "%s\n", tr->current_trace->name);
bc0c38d1
SR
4180 mutex_unlock(&trace_types_lock);
4181
4bf39a94 4182 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
bc0c38d1
SR
4183}
4184
b6f11df2
ACM
4185int tracer_init(struct tracer *t, struct trace_array *tr)
4186{
12883efb 4187 tracing_reset_online_cpus(&tr->trace_buffer);
b6f11df2
ACM
4188 return t->init(tr);
4189}
4190
12883efb 4191static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
438ced17
VN
4192{
4193 int cpu;
737223fb 4194
438ced17 4195 for_each_tracing_cpu(cpu)
12883efb 4196 per_cpu_ptr(buf->data, cpu)->entries = val;
438ced17
VN
4197}
4198
12883efb 4199#ifdef CONFIG_TRACER_MAX_TRACE
d60da506 4200/* resize @tr's buffer to the size of @size_tr's entries */
12883efb
SRRH
4201static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
4202 struct trace_buffer *size_buf, int cpu_id)
d60da506
HT
4203{
4204 int cpu, ret = 0;
4205
4206 if (cpu_id == RING_BUFFER_ALL_CPUS) {
4207 for_each_tracing_cpu(cpu) {
12883efb
SRRH
4208 ret = ring_buffer_resize(trace_buf->buffer,
4209 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
d60da506
HT
4210 if (ret < 0)
4211 break;
12883efb
SRRH
4212 per_cpu_ptr(trace_buf->data, cpu)->entries =
4213 per_cpu_ptr(size_buf->data, cpu)->entries;
d60da506
HT
4214 }
4215 } else {
12883efb
SRRH
4216 ret = ring_buffer_resize(trace_buf->buffer,
4217 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
d60da506 4218 if (ret == 0)
12883efb
SRRH
4219 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
4220 per_cpu_ptr(size_buf->data, cpu_id)->entries;
d60da506
HT
4221 }
4222
4223 return ret;
4224}
12883efb 4225#endif /* CONFIG_TRACER_MAX_TRACE */
d60da506 4226
2b6080f2
SR
4227static int __tracing_resize_ring_buffer(struct trace_array *tr,
4228 unsigned long size, int cpu)
73c5162a
SR
4229{
4230 int ret;
4231
4232 /*
4233 * If kernel or user changes the size of the ring buffer
a123c52b
SR
4234 * we use the size that was given, and we can forget about
4235 * expanding it later.
73c5162a 4236 */
55034cd6 4237 ring_buffer_expanded = true;
73c5162a 4238
b382ede6 4239 /* May be called before buffers are initialized */
12883efb 4240 if (!tr->trace_buffer.buffer)
b382ede6
SR
4241 return 0;
4242
12883efb 4243 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
73c5162a
SR
4244 if (ret < 0)
4245 return ret;
4246
12883efb 4247#ifdef CONFIG_TRACER_MAX_TRACE
2b6080f2
SR
4248 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
4249 !tr->current_trace->use_max_tr)
ef710e10
KM
4250 goto out;
4251
12883efb 4252 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
73c5162a 4253 if (ret < 0) {
12883efb
SRRH
4254 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
4255 &tr->trace_buffer, cpu);
73c5162a 4256 if (r < 0) {
a123c52b
SR
4257 /*
4258 * AARGH! We are left with different
4259 * size max buffer!!!!
4260 * The max buffer is our "snapshot" buffer.
4261 * When a tracer needs a snapshot (one of the
4262 * latency tracers), it swaps the max buffer
4263 * with the saved snap shot. We succeeded to
4264 * update the size of the main buffer, but failed to
4265 * update the size of the max buffer. But when we tried
4266 * to reset the main buffer to the original size, we
4267 * failed there too. This is very unlikely to
4268 * happen, but if it does, warn and kill all
4269 * tracing.
4270 */
73c5162a
SR
4271 WARN_ON(1);
4272 tracing_disabled = 1;
4273 }
4274 return ret;
4275 }
4276
438ced17 4277 if (cpu == RING_BUFFER_ALL_CPUS)
12883efb 4278 set_buffer_entries(&tr->max_buffer, size);
438ced17 4279 else
12883efb 4280 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
438ced17 4281
ef710e10 4282 out:
12883efb
SRRH
4283#endif /* CONFIG_TRACER_MAX_TRACE */
4284
438ced17 4285 if (cpu == RING_BUFFER_ALL_CPUS)
12883efb 4286 set_buffer_entries(&tr->trace_buffer, size);
438ced17 4287 else
12883efb 4288 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
73c5162a
SR
4289
4290 return ret;
4291}
4292
2b6080f2
SR
4293static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
4294 unsigned long size, int cpu_id)
4f271a2a 4295{
83f40318 4296 int ret = size;
4f271a2a
VN
4297
4298 mutex_lock(&trace_types_lock);
4299
438ced17
VN
4300 if (cpu_id != RING_BUFFER_ALL_CPUS) {
4301 /* make sure, this cpu is enabled in the mask */
4302 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
4303 ret = -EINVAL;
4304 goto out;
4305 }
4306 }
4f271a2a 4307
2b6080f2 4308 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
4f271a2a
VN
4309 if (ret < 0)
4310 ret = -ENOMEM;
4311
438ced17 4312out:
4f271a2a
VN
4313 mutex_unlock(&trace_types_lock);
4314
4315 return ret;
4316}
4317
ef710e10 4318
1852fcce
SR
4319/**
4320 * tracing_update_buffers - used by tracing facility to expand ring buffers
4321 *
4322 * To save on memory when the tracing is never used on a system with it
4323 * configured in. The ring buffers are set to a minimum size. But once
4324 * a user starts to use the tracing facility, then they need to grow
4325 * to their default size.
4326 *
4327 * This function is to be called when a tracer is about to be used.
4328 */
4329int tracing_update_buffers(void)
4330{
4331 int ret = 0;
4332
1027fcb2 4333 mutex_lock(&trace_types_lock);
1852fcce 4334 if (!ring_buffer_expanded)
2b6080f2 4335 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
438ced17 4336 RING_BUFFER_ALL_CPUS);
1027fcb2 4337 mutex_unlock(&trace_types_lock);
1852fcce
SR
4338
4339 return ret;
4340}
4341
577b785f
SR
4342struct trace_option_dentry;
4343
37aea98b 4344static void
2b6080f2 4345create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
577b785f 4346
6b450d25
SRRH
4347/*
4348 * Used to clear out the tracer before deletion of an instance.
4349 * Must have trace_types_lock held.
4350 */
4351static void tracing_set_nop(struct trace_array *tr)
4352{
4353 if (tr->current_trace == &nop_trace)
4354 return;
4355
50512ab5 4356 tr->current_trace->enabled--;
6b450d25
SRRH
4357
4358 if (tr->current_trace->reset)
4359 tr->current_trace->reset(tr);
4360
4361 tr->current_trace = &nop_trace;
4362}
4363
41d9c0be 4364static void add_tracer_options(struct trace_array *tr, struct tracer *t)
bc0c38d1 4365{
09d23a1d
SRRH
4366 /* Only enable if the directory has been created already. */
4367 if (!tr->dir)
4368 return;
4369
37aea98b 4370 create_trace_option_files(tr, t);
09d23a1d
SRRH
4371}
4372
4373static int tracing_set_tracer(struct trace_array *tr, const char *buf)
4374{
bc0c38d1 4375 struct tracer *t;
12883efb 4376#ifdef CONFIG_TRACER_MAX_TRACE
34600f0e 4377 bool had_max_tr;
12883efb 4378#endif
d9e54076 4379 int ret = 0;
bc0c38d1 4380
1027fcb2
SR
4381 mutex_lock(&trace_types_lock);
4382
73c5162a 4383 if (!ring_buffer_expanded) {
2b6080f2 4384 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
438ced17 4385 RING_BUFFER_ALL_CPUS);
73c5162a 4386 if (ret < 0)
59f586db 4387 goto out;
73c5162a
SR
4388 ret = 0;
4389 }
4390
bc0c38d1
SR
4391 for (t = trace_types; t; t = t->next) {
4392 if (strcmp(t->name, buf) == 0)
4393 break;
4394 }
c2931e05
FW
4395 if (!t) {
4396 ret = -EINVAL;
4397 goto out;
4398 }
2b6080f2 4399 if (t == tr->current_trace)
bc0c38d1
SR
4400 goto out;
4401
607e2ea1
SRRH
4402 /* Some tracers are only allowed for the top level buffer */
4403 if (!trace_ok_for_array(t, tr)) {
4404 ret = -EINVAL;
4405 goto out;
4406 }
4407
cf6ab6d9
SRRH
4408 /* If trace pipe files are being read, we can't change the tracer */
4409 if (tr->current_trace->ref) {
4410 ret = -EBUSY;
4411 goto out;
4412 }
4413
9f029e83 4414 trace_branch_disable();
613f04a0 4415
50512ab5 4416 tr->current_trace->enabled--;
613f04a0 4417
2b6080f2
SR
4418 if (tr->current_trace->reset)
4419 tr->current_trace->reset(tr);
34600f0e 4420
12883efb 4421 /* Current trace needs to be nop_trace before synchronize_sched */
2b6080f2 4422 tr->current_trace = &nop_trace;
34600f0e 4423
45ad21ca
SRRH
4424#ifdef CONFIG_TRACER_MAX_TRACE
4425 had_max_tr = tr->allocated_snapshot;
34600f0e
SR
4426
4427 if (had_max_tr && !t->use_max_tr) {
4428 /*
4429 * We need to make sure that the update_max_tr sees that
4430 * current_trace changed to nop_trace to keep it from
4431 * swapping the buffers after we resize it.
4432 * The update_max_tr is called from interrupts disabled
4433 * so a synchronized_sched() is sufficient.
4434 */
4435 synchronize_sched();
3209cff4 4436 free_snapshot(tr);
ef710e10 4437 }
12883efb 4438#endif
12883efb
SRRH
4439
4440#ifdef CONFIG_TRACER_MAX_TRACE
34600f0e 4441 if (t->use_max_tr && !had_max_tr) {
3209cff4 4442 ret = alloc_snapshot(tr);
d60da506
HT
4443 if (ret < 0)
4444 goto out;
ef710e10 4445 }
12883efb 4446#endif
577b785f 4447
1c80025a 4448 if (t->init) {
b6f11df2 4449 ret = tracer_init(t, tr);
1c80025a
FW
4450 if (ret)
4451 goto out;
4452 }
bc0c38d1 4453
2b6080f2 4454 tr->current_trace = t;
50512ab5 4455 tr->current_trace->enabled++;
9f029e83 4456 trace_branch_enable(tr);
bc0c38d1
SR
4457 out:
4458 mutex_unlock(&trace_types_lock);
4459
d9e54076
PZ
4460 return ret;
4461}
4462
4463static ssize_t
4464tracing_set_trace_write(struct file *filp, const char __user *ubuf,
4465 size_t cnt, loff_t *ppos)
4466{
607e2ea1 4467 struct trace_array *tr = filp->private_data;
ee6c2c1b 4468 char buf[MAX_TRACER_SIZE+1];
d9e54076
PZ
4469 int i;
4470 size_t ret;
e6e7a65a
FW
4471 int err;
4472
4473 ret = cnt;
d9e54076 4474
ee6c2c1b
LZ
4475 if (cnt > MAX_TRACER_SIZE)
4476 cnt = MAX_TRACER_SIZE;
d9e54076
PZ
4477
4478 if (copy_from_user(&buf, ubuf, cnt))
4479 return -EFAULT;
4480
4481 buf[cnt] = 0;
4482
4483 /* strip ending whitespace. */
4484 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
4485 buf[i] = 0;
4486
607e2ea1 4487 err = tracing_set_tracer(tr, buf);
e6e7a65a
FW
4488 if (err)
4489 return err;
d9e54076 4490
cf8517cf 4491 *ppos += ret;
bc0c38d1 4492
c2931e05 4493 return ret;
bc0c38d1
SR
4494}
4495
4496static ssize_t
6508fa76
SF
4497tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
4498 size_t cnt, loff_t *ppos)
bc0c38d1 4499{
bc0c38d1
SR
4500 char buf[64];
4501 int r;
4502
cffae437 4503 r = snprintf(buf, sizeof(buf), "%ld\n",
bc0c38d1 4504 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
cffae437
SR
4505 if (r > sizeof(buf))
4506 r = sizeof(buf);
4bf39a94 4507 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
bc0c38d1
SR
4508}
4509
4510static ssize_t
6508fa76
SF
4511tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
4512 size_t cnt, loff_t *ppos)
bc0c38d1 4513{
5e39841c 4514 unsigned long val;
c6caeeb1 4515 int ret;
bc0c38d1 4516
22fe9b54
PH
4517 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4518 if (ret)
c6caeeb1 4519 return ret;
bc0c38d1
SR
4520
4521 *ptr = val * 1000;
4522
4523 return cnt;
4524}
4525
6508fa76
SF
4526static ssize_t
4527tracing_thresh_read(struct file *filp, char __user *ubuf,
4528 size_t cnt, loff_t *ppos)
4529{
4530 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
4531}
4532
4533static ssize_t
4534tracing_thresh_write(struct file *filp, const char __user *ubuf,
4535 size_t cnt, loff_t *ppos)
4536{
4537 struct trace_array *tr = filp->private_data;
4538 int ret;
4539
4540 mutex_lock(&trace_types_lock);
4541 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
4542 if (ret < 0)
4543 goto out;
4544
4545 if (tr->current_trace->update_thresh) {
4546 ret = tr->current_trace->update_thresh(tr);
4547 if (ret < 0)
4548 goto out;
4549 }
4550
4551 ret = cnt;
4552out:
4553 mutex_unlock(&trace_types_lock);
4554
4555 return ret;
4556}
4557
4558static ssize_t
4559tracing_max_lat_read(struct file *filp, char __user *ubuf,
4560 size_t cnt, loff_t *ppos)
4561{
4562 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
4563}
4564
4565static ssize_t
4566tracing_max_lat_write(struct file *filp, const char __user *ubuf,
4567 size_t cnt, loff_t *ppos)
4568{
4569 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
4570}
4571
b3806b43
SR
4572static int tracing_open_pipe(struct inode *inode, struct file *filp)
4573{
15544209 4574 struct trace_array *tr = inode->i_private;
b3806b43 4575 struct trace_iterator *iter;
b04cc6b1 4576 int ret = 0;
b3806b43
SR
4577
4578 if (tracing_disabled)
4579 return -ENODEV;
4580
7b85af63
SRRH
4581 if (trace_array_get(tr) < 0)
4582 return -ENODEV;
4583
b04cc6b1
FW
4584 mutex_lock(&trace_types_lock);
4585
b3806b43
SR
4586 /* create a buffer to store the information to pass to userspace */
4587 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
b04cc6b1
FW
4588 if (!iter) {
4589 ret = -ENOMEM;
f77d09a3 4590 __trace_array_put(tr);
b04cc6b1
FW
4591 goto out;
4592 }
b3806b43 4593
3a161d99 4594 trace_seq_init(&iter->seq);
d716ff71 4595 iter->trace = tr->current_trace;
d7350c3f 4596
4462344e 4597 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
b04cc6b1 4598 ret = -ENOMEM;
d7350c3f 4599 goto fail;
4462344e
RR
4600 }
4601
a309720c 4602 /* trace pipe does not show start of buffer */
4462344e 4603 cpumask_setall(iter->started);
a309720c 4604
983f938a 4605 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
112f38a7
SR
4606 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4607
8be0709f 4608 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
58e8eedf 4609 if (trace_clocks[tr->clock_id].in_ns)
8be0709f
DS
4610 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4611
15544209
ON
4612 iter->tr = tr;
4613 iter->trace_buffer = &tr->trace_buffer;
4614 iter->cpu_file = tracing_get_cpu(inode);
d7350c3f 4615 mutex_init(&iter->mutex);
b3806b43
SR
4616 filp->private_data = iter;
4617
107bad8b
SR
4618 if (iter->trace->pipe_open)
4619 iter->trace->pipe_open(iter);
107bad8b 4620
b444786f 4621 nonseekable_open(inode, filp);
cf6ab6d9
SRRH
4622
4623 tr->current_trace->ref++;
b04cc6b1
FW
4624out:
4625 mutex_unlock(&trace_types_lock);
4626 return ret;
d7350c3f
FW
4627
4628fail:
4629 kfree(iter->trace);
4630 kfree(iter);
7b85af63 4631 __trace_array_put(tr);
d7350c3f
FW
4632 mutex_unlock(&trace_types_lock);
4633 return ret;
b3806b43
SR
4634}
4635
4636static int tracing_release_pipe(struct inode *inode, struct file *file)
4637{
4638 struct trace_iterator *iter = file->private_data;
15544209 4639 struct trace_array *tr = inode->i_private;
b3806b43 4640
b04cc6b1
FW
4641 mutex_lock(&trace_types_lock);
4642
cf6ab6d9
SRRH
4643 tr->current_trace->ref--;
4644
29bf4a5e 4645 if (iter->trace->pipe_close)
c521efd1
SR
4646 iter->trace->pipe_close(iter);
4647
b04cc6b1
FW
4648 mutex_unlock(&trace_types_lock);
4649
4462344e 4650 free_cpumask_var(iter->started);
d7350c3f 4651 mutex_destroy(&iter->mutex);
b3806b43 4652 kfree(iter);
b3806b43 4653
7b85af63
SRRH
4654 trace_array_put(tr);
4655
b3806b43
SR
4656 return 0;
4657}
4658
2a2cc8f7 4659static unsigned int
cc60cdc9 4660trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
2a2cc8f7 4661{
983f938a
SRRH
4662 struct trace_array *tr = iter->tr;
4663
15693458
SRRH
4664 /* Iterators are static, they should be filled or empty */
4665 if (trace_buffer_iter(iter, iter->cpu_file))
4666 return POLLIN | POLLRDNORM;
2a2cc8f7 4667
983f938a 4668 if (tr->trace_flags & TRACE_ITER_BLOCK)
2a2cc8f7
SSP
4669 /*
4670 * Always select as readable when in blocking mode
4671 */
4672 return POLLIN | POLLRDNORM;
15693458 4673 else
12883efb 4674 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
15693458 4675 filp, poll_table);
2a2cc8f7 4676}
2a2cc8f7 4677
cc60cdc9
SR
4678static unsigned int
4679tracing_poll_pipe(struct file *filp, poll_table *poll_table)
4680{
4681 struct trace_iterator *iter = filp->private_data;
4682
4683 return trace_poll(iter, filp, poll_table);
2a2cc8f7
SSP
4684}
4685
d716ff71 4686/* Must be called with iter->mutex held. */
ff98781b 4687static int tracing_wait_pipe(struct file *filp)
b3806b43
SR
4688{
4689 struct trace_iterator *iter = filp->private_data;
8b8b3683 4690 int ret;
b3806b43 4691
b3806b43 4692 while (trace_empty(iter)) {
2dc8f095 4693
107bad8b 4694 if ((filp->f_flags & O_NONBLOCK)) {
ff98781b 4695 return -EAGAIN;
107bad8b 4696 }
2dc8f095 4697
b3806b43 4698 /*
250bfd3d 4699 * We block until we read something and tracing is disabled.
b3806b43
SR
4700 * We still block if tracing is disabled, but we have never
4701 * read anything. This allows a user to cat this file, and
4702 * then enable tracing. But after we have read something,
4703 * we give an EOF when tracing is again disabled.
4704 *
4705 * iter->pos will be 0 if we haven't read anything.
4706 */
10246fa3 4707 if (!tracing_is_on() && iter->pos)
b3806b43 4708 break;
f4874261
SRRH
4709
4710 mutex_unlock(&iter->mutex);
4711
e30f53aa 4712 ret = wait_on_pipe(iter, false);
f4874261
SRRH
4713
4714 mutex_lock(&iter->mutex);
4715
8b8b3683
SRRH
4716 if (ret)
4717 return ret;
b3806b43
SR
4718 }
4719
ff98781b
EGM
4720 return 1;
4721}
4722
4723/*
4724 * Consumer reader.
4725 */
4726static ssize_t
4727tracing_read_pipe(struct file *filp, char __user *ubuf,
4728 size_t cnt, loff_t *ppos)
4729{
4730 struct trace_iterator *iter = filp->private_data;
4731 ssize_t sret;
4732
4733 /* return any leftover data */
4734 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4735 if (sret != -EBUSY)
4736 return sret;
4737
f9520750 4738 trace_seq_init(&iter->seq);
ff98781b 4739
d7350c3f
FW
4740 /*
4741 * Avoid more than one consumer on a single file descriptor
4742 * This is just a matter of traces coherency, the ring buffer itself
4743 * is protected.
4744 */
4745 mutex_lock(&iter->mutex);
ff98781b
EGM
4746 if (iter->trace->read) {
4747 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
4748 if (sret)
4749 goto out;
4750 }
4751
4752waitagain:
4753 sret = tracing_wait_pipe(filp);
4754 if (sret <= 0)
4755 goto out;
4756
b3806b43 4757 /* stop when tracing is finished */
ff98781b
EGM
4758 if (trace_empty(iter)) {
4759 sret = 0;
107bad8b 4760 goto out;
ff98781b 4761 }
b3806b43
SR
4762
4763 if (cnt >= PAGE_SIZE)
4764 cnt = PAGE_SIZE - 1;
4765
53d0aa77 4766 /* reset all but tr, trace, and overruns */
53d0aa77
SR
4767 memset(&iter->seq, 0,
4768 sizeof(struct trace_iterator) -
4769 offsetof(struct trace_iterator, seq));
ed5467da 4770 cpumask_clear(iter->started);
4823ed7e 4771 iter->pos = -1;
b3806b43 4772
4f535968 4773 trace_event_read_lock();
7e53bd42 4774 trace_access_lock(iter->cpu_file);
955b61e5 4775 while (trace_find_next_entry_inc(iter) != NULL) {
2c4f035f 4776 enum print_line_t ret;
5ac48378 4777 int save_len = iter->seq.seq.len;
088b1e42 4778
f9896bf3 4779 ret = print_trace_line(iter);
2c4f035f 4780 if (ret == TRACE_TYPE_PARTIAL_LINE) {
088b1e42 4781 /* don't print partial lines */
5ac48378 4782 iter->seq.seq.len = save_len;
b3806b43 4783 break;
088b1e42 4784 }
b91facc3
FW
4785 if (ret != TRACE_TYPE_NO_CONSUME)
4786 trace_consume(iter);
b3806b43 4787
5ac48378 4788 if (trace_seq_used(&iter->seq) >= cnt)
b3806b43 4789 break;
ee5e51f5
JO
4790
4791 /*
4792 * Setting the full flag means we reached the trace_seq buffer
4793 * size and we should leave by partial output condition above.
4794 * One of the trace_seq_* functions is not used properly.
4795 */
4796 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
4797 iter->ent->type);
b3806b43 4798 }
7e53bd42 4799 trace_access_unlock(iter->cpu_file);
4f535968 4800 trace_event_read_unlock();
b3806b43 4801
b3806b43 4802 /* Now copy what we have to the user */
6c6c2796 4803 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
5ac48378 4804 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
f9520750 4805 trace_seq_init(&iter->seq);
9ff4b974
PP
4806
4807 /*
25985edc 4808 * If there was nothing to send to user, in spite of consuming trace
9ff4b974
PP
4809 * entries, go back to wait for more entries.
4810 */
6c6c2796 4811 if (sret == -EBUSY)
9ff4b974 4812 goto waitagain;
b3806b43 4813
107bad8b 4814out:
d7350c3f 4815 mutex_unlock(&iter->mutex);
107bad8b 4816
6c6c2796 4817 return sret;
b3806b43
SR
4818}
4819
3c56819b
EGM
4820static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
4821 unsigned int idx)
4822{
4823 __free_page(spd->pages[idx]);
4824}
4825
28dfef8f 4826static const struct pipe_buf_operations tracing_pipe_buf_ops = {
34cd4998 4827 .can_merge = 0,
34cd4998 4828 .confirm = generic_pipe_buf_confirm,
92fdd98c 4829 .release = generic_pipe_buf_release,
34cd4998
SR
4830 .steal = generic_pipe_buf_steal,
4831 .get = generic_pipe_buf_get,
3c56819b
EGM
4832};
4833
34cd4998 4834static size_t
fa7c7f6e 4835tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
34cd4998
SR
4836{
4837 size_t count;
74f06bb7 4838 int save_len;
34cd4998
SR
4839 int ret;
4840
4841 /* Seq buffer is page-sized, exactly what we need. */
4842 for (;;) {
74f06bb7 4843 save_len = iter->seq.seq.len;
34cd4998 4844 ret = print_trace_line(iter);
74f06bb7
SRRH
4845
4846 if (trace_seq_has_overflowed(&iter->seq)) {
4847 iter->seq.seq.len = save_len;
34cd4998
SR
4848 break;
4849 }
74f06bb7
SRRH
4850
4851 /*
4852 * This should not be hit, because it should only
4853 * be set if the iter->seq overflowed. But check it
4854 * anyway to be safe.
4855 */
34cd4998 4856 if (ret == TRACE_TYPE_PARTIAL_LINE) {
74f06bb7
SRRH
4857 iter->seq.seq.len = save_len;
4858 break;
4859 }
4860
5ac48378 4861 count = trace_seq_used(&iter->seq) - save_len;
74f06bb7
SRRH
4862 if (rem < count) {
4863 rem = 0;
4864 iter->seq.seq.len = save_len;
34cd4998
SR
4865 break;
4866 }
4867
74e7ff8c
LJ
4868 if (ret != TRACE_TYPE_NO_CONSUME)
4869 trace_consume(iter);
34cd4998 4870 rem -= count;
955b61e5 4871 if (!trace_find_next_entry_inc(iter)) {
34cd4998
SR
4872 rem = 0;
4873 iter->ent = NULL;
4874 break;
4875 }
4876 }
4877
4878 return rem;
4879}
4880
3c56819b
EGM
4881static ssize_t tracing_splice_read_pipe(struct file *filp,
4882 loff_t *ppos,
4883 struct pipe_inode_info *pipe,
4884 size_t len,
4885 unsigned int flags)
4886{
35f3d14d
JA
4887 struct page *pages_def[PIPE_DEF_BUFFERS];
4888 struct partial_page partial_def[PIPE_DEF_BUFFERS];
3c56819b
EGM
4889 struct trace_iterator *iter = filp->private_data;
4890 struct splice_pipe_desc spd = {
35f3d14d
JA
4891 .pages = pages_def,
4892 .partial = partial_def,
34cd4998 4893 .nr_pages = 0, /* This gets updated below. */
047fe360 4894 .nr_pages_max = PIPE_DEF_BUFFERS,
34cd4998
SR
4895 .flags = flags,
4896 .ops = &tracing_pipe_buf_ops,
4897 .spd_release = tracing_spd_release_pipe,
3c56819b
EGM
4898 };
4899 ssize_t ret;
34cd4998 4900 size_t rem;
3c56819b
EGM
4901 unsigned int i;
4902
35f3d14d
JA
4903 if (splice_grow_spd(pipe, &spd))
4904 return -ENOMEM;
4905
d7350c3f 4906 mutex_lock(&iter->mutex);
3c56819b
EGM
4907
4908 if (iter->trace->splice_read) {
4909 ret = iter->trace->splice_read(iter, filp,
4910 ppos, pipe, len, flags);
4911 if (ret)
34cd4998 4912 goto out_err;
3c56819b
EGM
4913 }
4914
4915 ret = tracing_wait_pipe(filp);
4916 if (ret <= 0)
34cd4998 4917 goto out_err;
3c56819b 4918
955b61e5 4919 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
3c56819b 4920 ret = -EFAULT;
34cd4998 4921 goto out_err;
3c56819b
EGM
4922 }
4923
4f535968 4924 trace_event_read_lock();
7e53bd42 4925 trace_access_lock(iter->cpu_file);
4f535968 4926
3c56819b 4927 /* Fill as many pages as possible. */
a786c06d 4928 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
35f3d14d
JA
4929 spd.pages[i] = alloc_page(GFP_KERNEL);
4930 if (!spd.pages[i])
34cd4998 4931 break;
3c56819b 4932
fa7c7f6e 4933 rem = tracing_fill_pipe_page(rem, iter);
3c56819b
EGM
4934
4935 /* Copy the data into the page, so we can start over. */
4936 ret = trace_seq_to_buffer(&iter->seq,
35f3d14d 4937 page_address(spd.pages[i]),
5ac48378 4938 trace_seq_used(&iter->seq));
3c56819b 4939 if (ret < 0) {
35f3d14d 4940 __free_page(spd.pages[i]);
3c56819b
EGM
4941 break;
4942 }
35f3d14d 4943 spd.partial[i].offset = 0;
5ac48378 4944 spd.partial[i].len = trace_seq_used(&iter->seq);
3c56819b 4945
f9520750 4946 trace_seq_init(&iter->seq);
3c56819b
EGM
4947 }
4948
7e53bd42 4949 trace_access_unlock(iter->cpu_file);
4f535968 4950 trace_event_read_unlock();
d7350c3f 4951 mutex_unlock(&iter->mutex);
3c56819b
EGM
4952
4953 spd.nr_pages = i;
4954
35f3d14d
JA
4955 ret = splice_to_pipe(pipe, &spd);
4956out:
047fe360 4957 splice_shrink_spd(&spd);
35f3d14d 4958 return ret;
3c56819b 4959
34cd4998 4960out_err:
d7350c3f 4961 mutex_unlock(&iter->mutex);
35f3d14d 4962 goto out;
3c56819b
EGM
4963}
4964
a98a3c3f
SR
4965static ssize_t
4966tracing_entries_read(struct file *filp, char __user *ubuf,
4967 size_t cnt, loff_t *ppos)
4968{
0bc392ee
ON
4969 struct inode *inode = file_inode(filp);
4970 struct trace_array *tr = inode->i_private;
4971 int cpu = tracing_get_cpu(inode);
438ced17
VN
4972 char buf[64];
4973 int r = 0;
4974 ssize_t ret;
a98a3c3f 4975
db526ca3 4976 mutex_lock(&trace_types_lock);
438ced17 4977
0bc392ee 4978 if (cpu == RING_BUFFER_ALL_CPUS) {
438ced17
VN
4979 int cpu, buf_size_same;
4980 unsigned long size;
4981
4982 size = 0;
4983 buf_size_same = 1;
4984 /* check if all cpu sizes are same */
4985 for_each_tracing_cpu(cpu) {
4986 /* fill in the size from first enabled cpu */
4987 if (size == 0)
12883efb
SRRH
4988 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
4989 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
438ced17
VN
4990 buf_size_same = 0;
4991 break;
4992 }
4993 }
4994
4995 if (buf_size_same) {
4996 if (!ring_buffer_expanded)
4997 r = sprintf(buf, "%lu (expanded: %lu)\n",
4998 size >> 10,
4999 trace_buf_size >> 10);
5000 else
5001 r = sprintf(buf, "%lu\n", size >> 10);
5002 } else
5003 r = sprintf(buf, "X\n");
5004 } else
0bc392ee 5005 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
438ced17 5006
db526ca3
SR
5007 mutex_unlock(&trace_types_lock);
5008
438ced17
VN
5009 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5010 return ret;
a98a3c3f
SR
5011}
5012
5013static ssize_t
5014tracing_entries_write(struct file *filp, const char __user *ubuf,
5015 size_t cnt, loff_t *ppos)
5016{
0bc392ee
ON
5017 struct inode *inode = file_inode(filp);
5018 struct trace_array *tr = inode->i_private;
a98a3c3f 5019 unsigned long val;
4f271a2a 5020 int ret;
a98a3c3f 5021
22fe9b54
PH
5022 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5023 if (ret)
c6caeeb1 5024 return ret;
a98a3c3f
SR
5025
5026 /* must have at least 1 entry */
5027 if (!val)
5028 return -EINVAL;
5029
1696b2b0
SR
5030 /* value is in KB */
5031 val <<= 10;
0bc392ee 5032 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
4f271a2a
VN
5033 if (ret < 0)
5034 return ret;
a98a3c3f 5035
cf8517cf 5036 *ppos += cnt;
a98a3c3f 5037
4f271a2a
VN
5038 return cnt;
5039}
bf5e6519 5040
f81ab074
VN
5041static ssize_t
5042tracing_total_entries_read(struct file *filp, char __user *ubuf,
5043 size_t cnt, loff_t *ppos)
5044{
5045 struct trace_array *tr = filp->private_data;
5046 char buf[64];
5047 int r, cpu;
5048 unsigned long size = 0, expanded_size = 0;
5049
5050 mutex_lock(&trace_types_lock);
5051 for_each_tracing_cpu(cpu) {
12883efb 5052 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
f81ab074
VN
5053 if (!ring_buffer_expanded)
5054 expanded_size += trace_buf_size >> 10;
5055 }
5056 if (ring_buffer_expanded)
5057 r = sprintf(buf, "%lu\n", size);
5058 else
5059 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
5060 mutex_unlock(&trace_types_lock);
5061
5062 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5063}
5064
4f271a2a
VN
5065static ssize_t
5066tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
5067 size_t cnt, loff_t *ppos)
5068{
5069 /*
5070 * There is no need to read what the user has written, this function
5071 * is just to make sure that there is no error when "echo" is used
5072 */
5073
5074 *ppos += cnt;
a98a3c3f
SR
5075
5076 return cnt;
5077}
5078
4f271a2a
VN
5079static int
5080tracing_free_buffer_release(struct inode *inode, struct file *filp)
5081{
2b6080f2
SR
5082 struct trace_array *tr = inode->i_private;
5083
cf30cf67 5084 /* disable tracing ? */
983f938a 5085 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
711e1243 5086 tracer_tracing_off(tr);
4f271a2a 5087 /* resize the ring buffer to 0 */
2b6080f2 5088 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
4f271a2a 5089
7b85af63
SRRH
5090 trace_array_put(tr);
5091
4f271a2a
VN
5092 return 0;
5093}
5094
5bf9a1ee
PP
5095static ssize_t
5096tracing_mark_write(struct file *filp, const char __user *ubuf,
5097 size_t cnt, loff_t *fpos)
5098{
d696b58c 5099 unsigned long addr = (unsigned long)ubuf;
2d71619c 5100 struct trace_array *tr = filp->private_data;
d696b58c
SR
5101 struct ring_buffer_event *event;
5102 struct ring_buffer *buffer;
5103 struct print_entry *entry;
5104 unsigned long irq_flags;
5105 struct page *pages[2];
6edb2a8a 5106 void *map_page[2];
d696b58c
SR
5107 int nr_pages = 1;
5108 ssize_t written;
d696b58c
SR
5109 int offset;
5110 int size;
5111 int len;
5112 int ret;
6edb2a8a 5113 int i;
5bf9a1ee 5114
c76f0694 5115 if (tracing_disabled)
5bf9a1ee
PP
5116 return -EINVAL;
5117
983f938a 5118 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
5224c3a3
MSB
5119 return -EINVAL;
5120
5bf9a1ee
PP
5121 if (cnt > TRACE_BUF_SIZE)
5122 cnt = TRACE_BUF_SIZE;
5123
d696b58c
SR
5124 /*
5125 * Userspace is injecting traces into the kernel trace buffer.
5126 * We want to be as non intrusive as possible.
5127 * To do so, we do not want to allocate any special buffers
5128 * or take any locks, but instead write the userspace data
5129 * straight into the ring buffer.
5130 *
5131 * First we need to pin the userspace buffer into memory,
5132 * which, most likely it is, because it just referenced it.
5133 * But there's no guarantee that it is. By using get_user_pages_fast()
5134 * and kmap_atomic/kunmap_atomic() we can get access to the
5135 * pages directly. We then write the data directly into the
5136 * ring buffer.
5137 */
5138 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
5bf9a1ee 5139
d696b58c
SR
5140 /* check if we cross pages */
5141 if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
5142 nr_pages = 2;
5143
5144 offset = addr & (PAGE_SIZE - 1);
5145 addr &= PAGE_MASK;
5146
5147 ret = get_user_pages_fast(addr, nr_pages, 0, pages);
5148 if (ret < nr_pages) {
5149 while (--ret >= 0)
5150 put_page(pages[ret]);
5151 written = -EFAULT;
5152 goto out;
5bf9a1ee 5153 }
d696b58c 5154
6edb2a8a
SR
5155 for (i = 0; i < nr_pages; i++)
5156 map_page[i] = kmap_atomic(pages[i]);
d696b58c
SR
5157
5158 local_save_flags(irq_flags);
5159 size = sizeof(*entry) + cnt + 2; /* possible \n added */
2d71619c 5160 buffer = tr->trace_buffer.buffer;
d696b58c
SR
5161 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
5162 irq_flags, preempt_count());
5163 if (!event) {
5164 /* Ring buffer disabled, return as if not open for write */
5165 written = -EBADF;
5166 goto out_unlock;
5bf9a1ee 5167 }
d696b58c
SR
5168
5169 entry = ring_buffer_event_data(event);
5170 entry->ip = _THIS_IP_;
5171
5172 if (nr_pages == 2) {
5173 len = PAGE_SIZE - offset;
6edb2a8a
SR
5174 memcpy(&entry->buf, map_page[0] + offset, len);
5175 memcpy(&entry->buf[len], map_page[1], cnt - len);
c13d2f7c 5176 } else
6edb2a8a 5177 memcpy(&entry->buf, map_page[0] + offset, cnt);
5bf9a1ee 5178
d696b58c
SR
5179 if (entry->buf[cnt - 1] != '\n') {
5180 entry->buf[cnt] = '\n';
5181 entry->buf[cnt + 1] = '\0';
5182 } else
5183 entry->buf[cnt] = '\0';
5184
7ffbd48d 5185 __buffer_unlock_commit(buffer, event);
5bf9a1ee 5186
d696b58c 5187 written = cnt;
5bf9a1ee 5188
d696b58c 5189 *fpos += written;
1aa54bca 5190
d696b58c 5191 out_unlock:
7215853e 5192 for (i = nr_pages - 1; i >= 0; i--) {
6edb2a8a
SR
5193 kunmap_atomic(map_page[i]);
5194 put_page(pages[i]);
5195 }
d696b58c 5196 out:
1aa54bca 5197 return written;
5bf9a1ee
PP
5198}
5199
13f16d20 5200static int tracing_clock_show(struct seq_file *m, void *v)
5079f326 5201{
2b6080f2 5202 struct trace_array *tr = m->private;
5079f326
Z
5203 int i;
5204
5205 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
13f16d20 5206 seq_printf(m,
5079f326 5207 "%s%s%s%s", i ? " " : "",
2b6080f2
SR
5208 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
5209 i == tr->clock_id ? "]" : "");
13f16d20 5210 seq_putc(m, '\n');
5079f326 5211
13f16d20 5212 return 0;
5079f326
Z
5213}
5214
e1e232ca 5215static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
5079f326 5216{
5079f326
Z
5217 int i;
5218
5079f326
Z
5219 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
5220 if (strcmp(trace_clocks[i].name, clockstr) == 0)
5221 break;
5222 }
5223 if (i == ARRAY_SIZE(trace_clocks))
5224 return -EINVAL;
5225
5079f326
Z
5226 mutex_lock(&trace_types_lock);
5227
2b6080f2
SR
5228 tr->clock_id = i;
5229
12883efb 5230 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
5079f326 5231
60303ed3
DS
5232 /*
5233 * New clock may not be consistent with the previous clock.
5234 * Reset the buffer so that it doesn't have incomparable timestamps.
5235 */
9457158b 5236 tracing_reset_online_cpus(&tr->trace_buffer);
12883efb
SRRH
5237
5238#ifdef CONFIG_TRACER_MAX_TRACE
5239 if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
5240 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
9457158b 5241 tracing_reset_online_cpus(&tr->max_buffer);
12883efb 5242#endif
60303ed3 5243
5079f326
Z
5244 mutex_unlock(&trace_types_lock);
5245
e1e232ca
SR
5246 return 0;
5247}
5248
5249static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
5250 size_t cnt, loff_t *fpos)
5251{
5252 struct seq_file *m = filp->private_data;
5253 struct trace_array *tr = m->private;
5254 char buf[64];
5255 const char *clockstr;
5256 int ret;
5257
5258 if (cnt >= sizeof(buf))
5259 return -EINVAL;
5260
5261 if (copy_from_user(&buf, ubuf, cnt))
5262 return -EFAULT;
5263
5264 buf[cnt] = 0;
5265
5266 clockstr = strstrip(buf);
5267
5268 ret = tracing_set_clock(tr, clockstr);
5269 if (ret)
5270 return ret;
5271
5079f326
Z
5272 *fpos += cnt;
5273
5274 return cnt;
5275}
5276
13f16d20
LZ
5277static int tracing_clock_open(struct inode *inode, struct file *file)
5278{
7b85af63
SRRH
5279 struct trace_array *tr = inode->i_private;
5280 int ret;
5281
13f16d20
LZ
5282 if (tracing_disabled)
5283 return -ENODEV;
2b6080f2 5284
7b85af63
SRRH
5285 if (trace_array_get(tr))
5286 return -ENODEV;
5287
5288 ret = single_open(file, tracing_clock_show, inode->i_private);
5289 if (ret < 0)
5290 trace_array_put(tr);
5291
5292 return ret;
13f16d20
LZ
5293}
5294
6de58e62
SRRH
5295struct ftrace_buffer_info {
5296 struct trace_iterator iter;
5297 void *spare;
5298 unsigned int read;
5299};
5300
debdd57f
HT
5301#ifdef CONFIG_TRACER_SNAPSHOT
5302static int tracing_snapshot_open(struct inode *inode, struct file *file)
5303{
6484c71c 5304 struct trace_array *tr = inode->i_private;
debdd57f 5305 struct trace_iterator *iter;
2b6080f2 5306 struct seq_file *m;
debdd57f
HT
5307 int ret = 0;
5308
ff451961
SRRH
5309 if (trace_array_get(tr) < 0)
5310 return -ENODEV;
5311
debdd57f 5312 if (file->f_mode & FMODE_READ) {
6484c71c 5313 iter = __tracing_open(inode, file, true);
debdd57f
HT
5314 if (IS_ERR(iter))
5315 ret = PTR_ERR(iter);
2b6080f2
SR
5316 } else {
5317 /* Writes still need the seq_file to hold the private data */
f77d09a3 5318 ret = -ENOMEM;
2b6080f2
SR
5319 m = kzalloc(sizeof(*m), GFP_KERNEL);
5320 if (!m)
f77d09a3 5321 goto out;
2b6080f2
SR
5322 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
5323 if (!iter) {
5324 kfree(m);
f77d09a3 5325 goto out;
2b6080f2 5326 }
f77d09a3
AL
5327 ret = 0;
5328
ff451961 5329 iter->tr = tr;
6484c71c
ON
5330 iter->trace_buffer = &tr->max_buffer;
5331 iter->cpu_file = tracing_get_cpu(inode);
2b6080f2
SR
5332 m->private = iter;
5333 file->private_data = m;
debdd57f 5334 }
f77d09a3 5335out:
ff451961
SRRH
5336 if (ret < 0)
5337 trace_array_put(tr);
5338
debdd57f
HT
5339 return ret;
5340}
5341
5342static ssize_t
5343tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
5344 loff_t *ppos)
5345{
2b6080f2
SR
5346 struct seq_file *m = filp->private_data;
5347 struct trace_iterator *iter = m->private;
5348 struct trace_array *tr = iter->tr;
debdd57f
HT
5349 unsigned long val;
5350 int ret;
5351
5352 ret = tracing_update_buffers();
5353 if (ret < 0)
5354 return ret;
5355
5356 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5357 if (ret)
5358 return ret;
5359
5360 mutex_lock(&trace_types_lock);
5361
2b6080f2 5362 if (tr->current_trace->use_max_tr) {
debdd57f
HT
5363 ret = -EBUSY;
5364 goto out;
5365 }
5366
5367 switch (val) {
5368 case 0:
f1affcaa
SRRH
5369 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5370 ret = -EINVAL;
5371 break;
debdd57f 5372 }
3209cff4
SRRH
5373 if (tr->allocated_snapshot)
5374 free_snapshot(tr);
debdd57f
HT
5375 break;
5376 case 1:
f1affcaa
SRRH
5377/* Only allow per-cpu swap if the ring buffer supports it */
5378#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
5379 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5380 ret = -EINVAL;
5381 break;
5382 }
5383#endif
45ad21ca 5384 if (!tr->allocated_snapshot) {
3209cff4 5385 ret = alloc_snapshot(tr);
debdd57f
HT
5386 if (ret < 0)
5387 break;
debdd57f 5388 }
debdd57f
HT
5389 local_irq_disable();
5390 /* Now, we're going to swap */
f1affcaa 5391 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
ce9bae55 5392 update_max_tr(tr, current, smp_processor_id());
f1affcaa 5393 else
ce9bae55 5394 update_max_tr_single(tr, current, iter->cpu_file);
debdd57f
HT
5395 local_irq_enable();
5396 break;
5397 default:
45ad21ca 5398 if (tr->allocated_snapshot) {
f1affcaa
SRRH
5399 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
5400 tracing_reset_online_cpus(&tr->max_buffer);
5401 else
5402 tracing_reset(&tr->max_buffer, iter->cpu_file);
5403 }
debdd57f
HT
5404 break;
5405 }
5406
5407 if (ret >= 0) {
5408 *ppos += cnt;
5409 ret = cnt;
5410 }
5411out:
5412 mutex_unlock(&trace_types_lock);
5413 return ret;
5414}
2b6080f2
SR
5415
5416static int tracing_snapshot_release(struct inode *inode, struct file *file)
5417{
5418 struct seq_file *m = file->private_data;
ff451961
SRRH
5419 int ret;
5420
5421 ret = tracing_release(inode, file);
2b6080f2
SR
5422
5423 if (file->f_mode & FMODE_READ)
ff451961 5424 return ret;
2b6080f2
SR
5425
5426 /* If write only, the seq_file is just a stub */
5427 if (m)
5428 kfree(m->private);
5429 kfree(m);
5430
5431 return 0;
5432}
5433
6de58e62
SRRH
5434static int tracing_buffers_open(struct inode *inode, struct file *filp);
5435static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
5436 size_t count, loff_t *ppos);
5437static int tracing_buffers_release(struct inode *inode, struct file *file);
5438static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5439 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
5440
5441static int snapshot_raw_open(struct inode *inode, struct file *filp)
5442{
5443 struct ftrace_buffer_info *info;
5444 int ret;
5445
5446 ret = tracing_buffers_open(inode, filp);
5447 if (ret < 0)
5448 return ret;
5449
5450 info = filp->private_data;
5451
5452 if (info->iter.trace->use_max_tr) {
5453 tracing_buffers_release(inode, filp);
5454 return -EBUSY;
5455 }
5456
5457 info->iter.snapshot = true;
5458 info->iter.trace_buffer = &info->iter.tr->max_buffer;
5459
5460 return ret;
5461}
5462
debdd57f
HT
5463#endif /* CONFIG_TRACER_SNAPSHOT */
5464
5465
6508fa76
SF
5466static const struct file_operations tracing_thresh_fops = {
5467 .open = tracing_open_generic,
5468 .read = tracing_thresh_read,
5469 .write = tracing_thresh_write,
5470 .llseek = generic_file_llseek,
5471};
5472
5e2336a0 5473static const struct file_operations tracing_max_lat_fops = {
4bf39a94
IM
5474 .open = tracing_open_generic,
5475 .read = tracing_max_lat_read,
5476 .write = tracing_max_lat_write,
b444786f 5477 .llseek = generic_file_llseek,
bc0c38d1
SR
5478};
5479
5e2336a0 5480static const struct file_operations set_tracer_fops = {
4bf39a94
IM
5481 .open = tracing_open_generic,
5482 .read = tracing_set_trace_read,
5483 .write = tracing_set_trace_write,
b444786f 5484 .llseek = generic_file_llseek,
bc0c38d1
SR
5485};
5486
5e2336a0 5487static const struct file_operations tracing_pipe_fops = {
4bf39a94 5488 .open = tracing_open_pipe,
2a2cc8f7 5489 .poll = tracing_poll_pipe,
4bf39a94 5490 .read = tracing_read_pipe,
3c56819b 5491 .splice_read = tracing_splice_read_pipe,
4bf39a94 5492 .release = tracing_release_pipe,
b444786f 5493 .llseek = no_llseek,
b3806b43
SR
5494};
5495
5e2336a0 5496static const struct file_operations tracing_entries_fops = {
0bc392ee 5497 .open = tracing_open_generic_tr,
a98a3c3f
SR
5498 .read = tracing_entries_read,
5499 .write = tracing_entries_write,
b444786f 5500 .llseek = generic_file_llseek,
0bc392ee 5501 .release = tracing_release_generic_tr,
a98a3c3f
SR
5502};
5503
f81ab074 5504static const struct file_operations tracing_total_entries_fops = {
7b85af63 5505 .open = tracing_open_generic_tr,
f81ab074
VN
5506 .read = tracing_total_entries_read,
5507 .llseek = generic_file_llseek,
7b85af63 5508 .release = tracing_release_generic_tr,
f81ab074
VN
5509};
5510
4f271a2a 5511static const struct file_operations tracing_free_buffer_fops = {
7b85af63 5512 .open = tracing_open_generic_tr,
4f271a2a
VN
5513 .write = tracing_free_buffer_write,
5514 .release = tracing_free_buffer_release,
5515};
5516
5e2336a0 5517static const struct file_operations tracing_mark_fops = {
7b85af63 5518 .open = tracing_open_generic_tr,
5bf9a1ee 5519 .write = tracing_mark_write,
b444786f 5520 .llseek = generic_file_llseek,
7b85af63 5521 .release = tracing_release_generic_tr,
5bf9a1ee
PP
5522};
5523
5079f326 5524static const struct file_operations trace_clock_fops = {
13f16d20
LZ
5525 .open = tracing_clock_open,
5526 .read = seq_read,
5527 .llseek = seq_lseek,
7b85af63 5528 .release = tracing_single_release_tr,
5079f326
Z
5529 .write = tracing_clock_write,
5530};
5531
debdd57f
HT
5532#ifdef CONFIG_TRACER_SNAPSHOT
5533static const struct file_operations snapshot_fops = {
5534 .open = tracing_snapshot_open,
5535 .read = seq_read,
5536 .write = tracing_snapshot_write,
098c879e 5537 .llseek = tracing_lseek,
2b6080f2 5538 .release = tracing_snapshot_release,
debdd57f 5539};
debdd57f 5540
6de58e62
SRRH
5541static const struct file_operations snapshot_raw_fops = {
5542 .open = snapshot_raw_open,
5543 .read = tracing_buffers_read,
5544 .release = tracing_buffers_release,
5545 .splice_read = tracing_buffers_splice_read,
5546 .llseek = no_llseek,
2cadf913
SR
5547};
5548
6de58e62
SRRH
5549#endif /* CONFIG_TRACER_SNAPSHOT */
5550
2cadf913
SR
5551static int tracing_buffers_open(struct inode *inode, struct file *filp)
5552{
46ef2be0 5553 struct trace_array *tr = inode->i_private;
2cadf913 5554 struct ftrace_buffer_info *info;
7b85af63 5555 int ret;
2cadf913
SR
5556
5557 if (tracing_disabled)
5558 return -ENODEV;
5559
7b85af63
SRRH
5560 if (trace_array_get(tr) < 0)
5561 return -ENODEV;
5562
2cadf913 5563 info = kzalloc(sizeof(*info), GFP_KERNEL);
7b85af63
SRRH
5564 if (!info) {
5565 trace_array_put(tr);
2cadf913 5566 return -ENOMEM;
7b85af63 5567 }
2cadf913 5568
a695cb58
SRRH
5569 mutex_lock(&trace_types_lock);
5570
cc60cdc9 5571 info->iter.tr = tr;
46ef2be0 5572 info->iter.cpu_file = tracing_get_cpu(inode);
b627344f 5573 info->iter.trace = tr->current_trace;
12883efb 5574 info->iter.trace_buffer = &tr->trace_buffer;
cc60cdc9 5575 info->spare = NULL;
2cadf913 5576 /* Force reading ring buffer for first read */
cc60cdc9 5577 info->read = (unsigned int)-1;
2cadf913
SR
5578
5579 filp->private_data = info;
5580
cf6ab6d9
SRRH
5581 tr->current_trace->ref++;
5582
a695cb58
SRRH
5583 mutex_unlock(&trace_types_lock);
5584
7b85af63
SRRH
5585 ret = nonseekable_open(inode, filp);
5586 if (ret < 0)
5587 trace_array_put(tr);
5588
5589 return ret;
2cadf913
SR
5590}
5591
cc60cdc9
SR
5592static unsigned int
5593tracing_buffers_poll(struct file *filp, poll_table *poll_table)
5594{
5595 struct ftrace_buffer_info *info = filp->private_data;
5596 struct trace_iterator *iter = &info->iter;
5597
5598 return trace_poll(iter, filp, poll_table);
5599}
5600
2cadf913
SR
5601static ssize_t
5602tracing_buffers_read(struct file *filp, char __user *ubuf,
5603 size_t count, loff_t *ppos)
5604{
5605 struct ftrace_buffer_info *info = filp->private_data;
cc60cdc9 5606 struct trace_iterator *iter = &info->iter;
2cadf913 5607 ssize_t ret;
6de58e62 5608 ssize_t size;
2cadf913 5609
2dc5d12b
SR
5610 if (!count)
5611 return 0;
5612
6de58e62 5613#ifdef CONFIG_TRACER_MAX_TRACE
d716ff71
SRRH
5614 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
5615 return -EBUSY;
6de58e62
SRRH
5616#endif
5617
ddd538f3 5618 if (!info->spare)
12883efb
SRRH
5619 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
5620 iter->cpu_file);
ddd538f3 5621 if (!info->spare)
d716ff71 5622 return -ENOMEM;
ddd538f3 5623
2cadf913
SR
5624 /* Do we have previous read data to read? */
5625 if (info->read < PAGE_SIZE)
5626 goto read;
5627
b627344f 5628 again:
cc60cdc9 5629 trace_access_lock(iter->cpu_file);
12883efb 5630 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
2cadf913
SR
5631 &info->spare,
5632 count,
cc60cdc9
SR
5633 iter->cpu_file, 0);
5634 trace_access_unlock(iter->cpu_file);
2cadf913 5635
b627344f
SR
5636 if (ret < 0) {
5637 if (trace_empty(iter)) {
d716ff71
SRRH
5638 if ((filp->f_flags & O_NONBLOCK))
5639 return -EAGAIN;
5640
e30f53aa 5641 ret = wait_on_pipe(iter, false);
d716ff71
SRRH
5642 if (ret)
5643 return ret;
5644
b627344f
SR
5645 goto again;
5646 }
d716ff71 5647 return 0;
b627344f 5648 }
436fc280 5649
436fc280 5650 info->read = 0;
b627344f 5651 read:
2cadf913
SR
5652 size = PAGE_SIZE - info->read;
5653 if (size > count)
5654 size = count;
5655
5656 ret = copy_to_user(ubuf, info->spare + info->read, size);
d716ff71
SRRH
5657 if (ret == size)
5658 return -EFAULT;
5659
2dc5d12b
SR
5660 size -= ret;
5661
2cadf913
SR
5662 *ppos += size;
5663 info->read += size;
5664
5665 return size;
5666}
5667
5668static int tracing_buffers_release(struct inode *inode, struct file *file)
5669{
5670 struct ftrace_buffer_info *info = file->private_data;
cc60cdc9 5671 struct trace_iterator *iter = &info->iter;
2cadf913 5672
a695cb58
SRRH
5673 mutex_lock(&trace_types_lock);
5674
cf6ab6d9
SRRH
5675 iter->tr->current_trace->ref--;
5676
ff451961 5677 __trace_array_put(iter->tr);
2cadf913 5678
ddd538f3 5679 if (info->spare)
12883efb 5680 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
2cadf913
SR
5681 kfree(info);
5682
a695cb58
SRRH
5683 mutex_unlock(&trace_types_lock);
5684
2cadf913
SR
5685 return 0;
5686}
5687
5688struct buffer_ref {
5689 struct ring_buffer *buffer;
5690 void *page;
5691 int ref;
5692};
5693
5694static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
5695 struct pipe_buffer *buf)
5696{
5697 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5698
5699 if (--ref->ref)
5700 return;
5701
5702 ring_buffer_free_read_page(ref->buffer, ref->page);
5703 kfree(ref);
5704 buf->private = 0;
5705}
5706
2cadf913
SR
5707static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
5708 struct pipe_buffer *buf)
5709{
5710 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5711
5712 ref->ref++;
5713}
5714
5715/* Pipe buffer operations for a buffer. */
28dfef8f 5716static const struct pipe_buf_operations buffer_pipe_buf_ops = {
2cadf913 5717 .can_merge = 0,
2cadf913
SR
5718 .confirm = generic_pipe_buf_confirm,
5719 .release = buffer_pipe_buf_release,
d55cb6cf 5720 .steal = generic_pipe_buf_steal,
2cadf913
SR
5721 .get = buffer_pipe_buf_get,
5722};
5723
5724/*
5725 * Callback from splice_to_pipe(), if we need to release some pages
5726 * at the end of the spd in case we error'ed out in filling the pipe.
5727 */
5728static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
5729{
5730 struct buffer_ref *ref =
5731 (struct buffer_ref *)spd->partial[i].private;
5732
5733 if (--ref->ref)
5734 return;
5735
5736 ring_buffer_free_read_page(ref->buffer, ref->page);
5737 kfree(ref);
5738 spd->partial[i].private = 0;
5739}
5740
5741static ssize_t
5742tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5743 struct pipe_inode_info *pipe, size_t len,
5744 unsigned int flags)
5745{
5746 struct ftrace_buffer_info *info = file->private_data;
cc60cdc9 5747 struct trace_iterator *iter = &info->iter;
35f3d14d
JA
5748 struct partial_page partial_def[PIPE_DEF_BUFFERS];
5749 struct page *pages_def[PIPE_DEF_BUFFERS];
2cadf913 5750 struct splice_pipe_desc spd = {
35f3d14d
JA
5751 .pages = pages_def,
5752 .partial = partial_def,
047fe360 5753 .nr_pages_max = PIPE_DEF_BUFFERS,
2cadf913
SR
5754 .flags = flags,
5755 .ops = &buffer_pipe_buf_ops,
5756 .spd_release = buffer_spd_release,
5757 };
5758 struct buffer_ref *ref;
93459c6c 5759 int entries, size, i;
07906da7 5760 ssize_t ret = 0;
2cadf913 5761
6de58e62 5762#ifdef CONFIG_TRACER_MAX_TRACE
d716ff71
SRRH
5763 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
5764 return -EBUSY;
6de58e62
SRRH
5765#endif
5766
d716ff71
SRRH
5767 if (splice_grow_spd(pipe, &spd))
5768 return -ENOMEM;
35f3d14d 5769
d716ff71
SRRH
5770 if (*ppos & (PAGE_SIZE - 1))
5771 return -EINVAL;
93cfb3c9
LJ
5772
5773 if (len & (PAGE_SIZE - 1)) {
d716ff71
SRRH
5774 if (len < PAGE_SIZE)
5775 return -EINVAL;
93cfb3c9
LJ
5776 len &= PAGE_MASK;
5777 }
5778
cc60cdc9
SR
5779 again:
5780 trace_access_lock(iter->cpu_file);
12883efb 5781 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
93459c6c 5782
a786c06d 5783 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
2cadf913
SR
5784 struct page *page;
5785 int r;
5786
5787 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
07906da7
RV
5788 if (!ref) {
5789 ret = -ENOMEM;
2cadf913 5790 break;
07906da7 5791 }
2cadf913 5792
7267fa68 5793 ref->ref = 1;
12883efb 5794 ref->buffer = iter->trace_buffer->buffer;
cc60cdc9 5795 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
2cadf913 5796 if (!ref->page) {
07906da7 5797 ret = -ENOMEM;
2cadf913
SR
5798 kfree(ref);
5799 break;
5800 }
5801
5802 r = ring_buffer_read_page(ref->buffer, &ref->page,
cc60cdc9 5803 len, iter->cpu_file, 1);
2cadf913 5804 if (r < 0) {
7ea59064 5805 ring_buffer_free_read_page(ref->buffer, ref->page);
2cadf913
SR
5806 kfree(ref);
5807 break;
5808 }
5809
5810 /*
5811 * zero out any left over data, this is going to
5812 * user land.
5813 */
5814 size = ring_buffer_page_len(ref->page);
5815 if (size < PAGE_SIZE)
5816 memset(ref->page + size, 0, PAGE_SIZE - size);
5817
5818 page = virt_to_page(ref->page);
5819
5820 spd.pages[i] = page;
5821 spd.partial[i].len = PAGE_SIZE;
5822 spd.partial[i].offset = 0;
5823 spd.partial[i].private = (unsigned long)ref;
5824 spd.nr_pages++;
93cfb3c9 5825 *ppos += PAGE_SIZE;
93459c6c 5826
12883efb 5827 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
2cadf913
SR
5828 }
5829
cc60cdc9 5830 trace_access_unlock(iter->cpu_file);
2cadf913
SR
5831 spd.nr_pages = i;
5832
5833 /* did we read anything? */
5834 if (!spd.nr_pages) {
07906da7 5835 if (ret)
d716ff71
SRRH
5836 return ret;
5837
5838 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
5839 return -EAGAIN;
07906da7 5840
e30f53aa 5841 ret = wait_on_pipe(iter, true);
8b8b3683 5842 if (ret)
d716ff71 5843 return ret;
e30f53aa 5844
cc60cdc9 5845 goto again;
2cadf913
SR
5846 }
5847
5848 ret = splice_to_pipe(pipe, &spd);
047fe360 5849 splice_shrink_spd(&spd);
6de58e62 5850
2cadf913
SR
5851 return ret;
5852}
5853
5854static const struct file_operations tracing_buffers_fops = {
5855 .open = tracing_buffers_open,
5856 .read = tracing_buffers_read,
cc60cdc9 5857 .poll = tracing_buffers_poll,
2cadf913
SR
5858 .release = tracing_buffers_release,
5859 .splice_read = tracing_buffers_splice_read,
5860 .llseek = no_llseek,
5861};
5862
c8d77183
SR
5863static ssize_t
5864tracing_stats_read(struct file *filp, char __user *ubuf,
5865 size_t count, loff_t *ppos)
5866{
4d3435b8
ON
5867 struct inode *inode = file_inode(filp);
5868 struct trace_array *tr = inode->i_private;
12883efb 5869 struct trace_buffer *trace_buf = &tr->trace_buffer;
4d3435b8 5870 int cpu = tracing_get_cpu(inode);
c8d77183
SR
5871 struct trace_seq *s;
5872 unsigned long cnt;
c64e148a
VN
5873 unsigned long long t;
5874 unsigned long usec_rem;
c8d77183 5875
e4f2d10f 5876 s = kmalloc(sizeof(*s), GFP_KERNEL);
c8d77183 5877 if (!s)
a646365c 5878 return -ENOMEM;
c8d77183
SR
5879
5880 trace_seq_init(s);
5881
12883efb 5882 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
c8d77183
SR
5883 trace_seq_printf(s, "entries: %ld\n", cnt);
5884
12883efb 5885 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
c8d77183
SR
5886 trace_seq_printf(s, "overrun: %ld\n", cnt);
5887
12883efb 5888 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
c8d77183
SR
5889 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
5890
12883efb 5891 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
c64e148a
VN
5892 trace_seq_printf(s, "bytes: %ld\n", cnt);
5893
58e8eedf 5894 if (trace_clocks[tr->clock_id].in_ns) {
11043d8b 5895 /* local or global for trace_clock */
12883efb 5896 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
11043d8b
YY
5897 usec_rem = do_div(t, USEC_PER_SEC);
5898 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
5899 t, usec_rem);
5900
12883efb 5901 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
11043d8b
YY
5902 usec_rem = do_div(t, USEC_PER_SEC);
5903 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
5904 } else {
5905 /* counter or tsc mode for trace_clock */
5906 trace_seq_printf(s, "oldest event ts: %llu\n",
12883efb 5907 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
c64e148a 5908
11043d8b 5909 trace_seq_printf(s, "now ts: %llu\n",
12883efb 5910 ring_buffer_time_stamp(trace_buf->buffer, cpu));
11043d8b 5911 }
c64e148a 5912
12883efb 5913 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
884bfe89
SP
5914 trace_seq_printf(s, "dropped events: %ld\n", cnt);
5915
12883efb 5916 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
ad964704
SRRH
5917 trace_seq_printf(s, "read events: %ld\n", cnt);
5918
5ac48378
SRRH
5919 count = simple_read_from_buffer(ubuf, count, ppos,
5920 s->buffer, trace_seq_used(s));
c8d77183
SR
5921
5922 kfree(s);
5923
5924 return count;
5925}
5926
5927static const struct file_operations tracing_stats_fops = {
4d3435b8 5928 .open = tracing_open_generic_tr,
c8d77183 5929 .read = tracing_stats_read,
b444786f 5930 .llseek = generic_file_llseek,
4d3435b8 5931 .release = tracing_release_generic_tr,
c8d77183
SR
5932};
5933
bc0c38d1
SR
5934#ifdef CONFIG_DYNAMIC_FTRACE
5935
b807c3d0
SR
5936int __weak ftrace_arch_read_dyn_info(char *buf, int size)
5937{
5938 return 0;
5939}
5940
bc0c38d1 5941static ssize_t
b807c3d0 5942tracing_read_dyn_info(struct file *filp, char __user *ubuf,
bc0c38d1
SR
5943 size_t cnt, loff_t *ppos)
5944{
a26a2a27
SR
5945 static char ftrace_dyn_info_buffer[1024];
5946 static DEFINE_MUTEX(dyn_info_mutex);
bc0c38d1 5947 unsigned long *p = filp->private_data;
b807c3d0 5948 char *buf = ftrace_dyn_info_buffer;
a26a2a27 5949 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
bc0c38d1
SR
5950 int r;
5951
b807c3d0
SR
5952 mutex_lock(&dyn_info_mutex);
5953 r = sprintf(buf, "%ld ", *p);
4bf39a94 5954
a26a2a27 5955 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
b807c3d0
SR
5956 buf[r++] = '\n';
5957
5958 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5959
5960 mutex_unlock(&dyn_info_mutex);
5961
5962 return r;
bc0c38d1
SR
5963}
5964
5e2336a0 5965static const struct file_operations tracing_dyn_info_fops = {
4bf39a94 5966 .open = tracing_open_generic,
b807c3d0 5967 .read = tracing_read_dyn_info,
b444786f 5968 .llseek = generic_file_llseek,
bc0c38d1 5969};
77fd5c15 5970#endif /* CONFIG_DYNAMIC_FTRACE */
bc0c38d1 5971
77fd5c15
SRRH
5972#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
5973static void
5974ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5975{
5976 tracing_snapshot();
5977}
bc0c38d1 5978
77fd5c15
SRRH
5979static void
5980ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
bc0c38d1 5981{
77fd5c15
SRRH
5982 unsigned long *count = (long *)data;
5983
5984 if (!*count)
5985 return;
bc0c38d1 5986
77fd5c15
SRRH
5987 if (*count != -1)
5988 (*count)--;
5989
5990 tracing_snapshot();
5991}
5992
5993static int
5994ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
5995 struct ftrace_probe_ops *ops, void *data)
5996{
5997 long count = (long)data;
5998
5999 seq_printf(m, "%ps:", (void *)ip);
6000
fa6f0cc7 6001 seq_puts(m, "snapshot");
77fd5c15
SRRH
6002
6003 if (count == -1)
fa6f0cc7 6004 seq_puts(m, ":unlimited\n");
77fd5c15
SRRH
6005 else
6006 seq_printf(m, ":count=%ld\n", count);
6007
6008 return 0;
6009}
6010
6011static struct ftrace_probe_ops snapshot_probe_ops = {
6012 .func = ftrace_snapshot,
6013 .print = ftrace_snapshot_print,
6014};
6015
6016static struct ftrace_probe_ops snapshot_count_probe_ops = {
6017 .func = ftrace_count_snapshot,
6018 .print = ftrace_snapshot_print,
6019};
6020
6021static int
6022ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
6023 char *glob, char *cmd, char *param, int enable)
6024{
6025 struct ftrace_probe_ops *ops;
6026 void *count = (void *)-1;
6027 char *number;
6028 int ret;
6029
6030 /* hash funcs only work with set_ftrace_filter */
6031 if (!enable)
6032 return -EINVAL;
6033
6034 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
6035
6036 if (glob[0] == '!') {
6037 unregister_ftrace_function_probe_func(glob+1, ops);
6038 return 0;
6039 }
6040
6041 if (!param)
6042 goto out_reg;
6043
6044 number = strsep(&param, ":");
6045
6046 if (!strlen(number))
6047 goto out_reg;
6048
6049 /*
6050 * We use the callback data field (which is a pointer)
6051 * as our counter.
6052 */
6053 ret = kstrtoul(number, 0, (unsigned long *)&count);
6054 if (ret)
6055 return ret;
6056
6057 out_reg:
6058 ret = register_ftrace_function_probe(glob, ops, count);
6059
6060 if (ret >= 0)
6061 alloc_snapshot(&global_trace);
6062
6063 return ret < 0 ? ret : 0;
6064}
6065
6066static struct ftrace_func_command ftrace_snapshot_cmd = {
6067 .name = "snapshot",
6068 .func = ftrace_trace_snapshot_callback,
6069};
6070
38de93ab 6071static __init int register_snapshot_cmd(void)
77fd5c15
SRRH
6072{
6073 return register_ftrace_command(&ftrace_snapshot_cmd);
6074}
6075#else
38de93ab 6076static inline __init int register_snapshot_cmd(void) { return 0; }
77fd5c15 6077#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
bc0c38d1 6078
7eeafbca 6079static struct dentry *tracing_get_dentry(struct trace_array *tr)
bc0c38d1 6080{
8434dc93
SRRH
6081 if (WARN_ON(!tr->dir))
6082 return ERR_PTR(-ENODEV);
6083
6084 /* Top directory uses NULL as the parent */
6085 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
6086 return NULL;
6087
6088 /* All sub buffers have a descriptor */
2b6080f2 6089 return tr->dir;
bc0c38d1
SR
6090}
6091
2b6080f2 6092static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
b04cc6b1 6093{
b04cc6b1
FW
6094 struct dentry *d_tracer;
6095
2b6080f2
SR
6096 if (tr->percpu_dir)
6097 return tr->percpu_dir;
b04cc6b1 6098
7eeafbca 6099 d_tracer = tracing_get_dentry(tr);
14a5ae40 6100 if (IS_ERR(d_tracer))
b04cc6b1
FW
6101 return NULL;
6102
8434dc93 6103 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
b04cc6b1 6104
2b6080f2 6105 WARN_ONCE(!tr->percpu_dir,
8434dc93 6106 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
b04cc6b1 6107
2b6080f2 6108 return tr->percpu_dir;
b04cc6b1
FW
6109}
6110
649e9c70
ON
6111static struct dentry *
6112trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
6113 void *data, long cpu, const struct file_operations *fops)
6114{
6115 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
6116
6117 if (ret) /* See tracing_get_cpu() */
7682c918 6118 d_inode(ret)->i_cdev = (void *)(cpu + 1);
649e9c70
ON
6119 return ret;
6120}
6121
2b6080f2 6122static void
8434dc93 6123tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
b04cc6b1 6124{
2b6080f2 6125 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
5452af66 6126 struct dentry *d_cpu;
dd49a38c 6127 char cpu_dir[30]; /* 30 characters should be more than enough */
b04cc6b1 6128
0a3d7ce7
NK
6129 if (!d_percpu)
6130 return;
6131
dd49a38c 6132 snprintf(cpu_dir, 30, "cpu%ld", cpu);
8434dc93 6133 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
8656e7a2 6134 if (!d_cpu) {
8434dc93 6135 pr_warning("Could not create tracefs '%s' entry\n", cpu_dir);
8656e7a2
FW
6136 return;
6137 }
b04cc6b1 6138
8656e7a2 6139 /* per cpu trace_pipe */
649e9c70 6140 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
15544209 6141 tr, cpu, &tracing_pipe_fops);
b04cc6b1
FW
6142
6143 /* per cpu trace */
649e9c70 6144 trace_create_cpu_file("trace", 0644, d_cpu,
6484c71c 6145 tr, cpu, &tracing_fops);
7f96f93f 6146
649e9c70 6147 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
46ef2be0 6148 tr, cpu, &tracing_buffers_fops);
7f96f93f 6149
649e9c70 6150 trace_create_cpu_file("stats", 0444, d_cpu,
4d3435b8 6151 tr, cpu, &tracing_stats_fops);
438ced17 6152
649e9c70 6153 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
0bc392ee 6154 tr, cpu, &tracing_entries_fops);
f1affcaa
SRRH
6155
6156#ifdef CONFIG_TRACER_SNAPSHOT
649e9c70 6157 trace_create_cpu_file("snapshot", 0644, d_cpu,
6484c71c 6158 tr, cpu, &snapshot_fops);
6de58e62 6159
649e9c70 6160 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
46ef2be0 6161 tr, cpu, &snapshot_raw_fops);
f1affcaa 6162#endif
b04cc6b1
FW
6163}
6164
60a11774
SR
6165#ifdef CONFIG_FTRACE_SELFTEST
6166/* Let selftest have access to static functions in this file */
6167#include "trace_selftest.c"
6168#endif
6169
577b785f
SR
6170static ssize_t
6171trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
6172 loff_t *ppos)
6173{
6174 struct trace_option_dentry *topt = filp->private_data;
6175 char *buf;
6176
6177 if (topt->flags->val & topt->opt->bit)
6178 buf = "1\n";
6179 else
6180 buf = "0\n";
6181
6182 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6183}
6184
6185static ssize_t
6186trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
6187 loff_t *ppos)
6188{
6189 struct trace_option_dentry *topt = filp->private_data;
6190 unsigned long val;
577b785f
SR
6191 int ret;
6192
22fe9b54
PH
6193 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6194 if (ret)
577b785f
SR
6195 return ret;
6196
8d18eaaf
LZ
6197 if (val != 0 && val != 1)
6198 return -EINVAL;
577b785f 6199
8d18eaaf 6200 if (!!(topt->flags->val & topt->opt->bit) != val) {
577b785f 6201 mutex_lock(&trace_types_lock);
8c1a49ae 6202 ret = __set_tracer_option(topt->tr, topt->flags,
c757bea9 6203 topt->opt, !val);
577b785f
SR
6204 mutex_unlock(&trace_types_lock);
6205 if (ret)
6206 return ret;
577b785f
SR
6207 }
6208
6209 *ppos += cnt;
6210
6211 return cnt;
6212}
6213
6214
6215static const struct file_operations trace_options_fops = {
6216 .open = tracing_open_generic,
6217 .read = trace_options_read,
6218 .write = trace_options_write,
b444786f 6219 .llseek = generic_file_llseek,
577b785f
SR
6220};
6221
9a38a885
SRRH
6222/*
6223 * In order to pass in both the trace_array descriptor as well as the index
6224 * to the flag that the trace option file represents, the trace_array
6225 * has a character array of trace_flags_index[], which holds the index
6226 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
6227 * The address of this character array is passed to the flag option file
6228 * read/write callbacks.
6229 *
6230 * In order to extract both the index and the trace_array descriptor,
6231 * get_tr_index() uses the following algorithm.
6232 *
6233 * idx = *ptr;
6234 *
6235 * As the pointer itself contains the address of the index (remember
6236 * index[1] == 1).
6237 *
6238 * Then to get the trace_array descriptor, by subtracting that index
6239 * from the ptr, we get to the start of the index itself.
6240 *
6241 * ptr - idx == &index[0]
6242 *
6243 * Then a simple container_of() from that pointer gets us to the
6244 * trace_array descriptor.
6245 */
6246static void get_tr_index(void *data, struct trace_array **ptr,
6247 unsigned int *pindex)
6248{
6249 *pindex = *(unsigned char *)data;
6250
6251 *ptr = container_of(data - *pindex, struct trace_array,
6252 trace_flags_index);
6253}
6254
a8259075
SR
6255static ssize_t
6256trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
6257 loff_t *ppos)
6258{
9a38a885
SRRH
6259 void *tr_index = filp->private_data;
6260 struct trace_array *tr;
6261 unsigned int index;
a8259075
SR
6262 char *buf;
6263
9a38a885
SRRH
6264 get_tr_index(tr_index, &tr, &index);
6265
6266 if (tr->trace_flags & (1 << index))
a8259075
SR
6267 buf = "1\n";
6268 else
6269 buf = "0\n";
6270
6271 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6272}
6273
6274static ssize_t
6275trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
6276 loff_t *ppos)
6277{
9a38a885
SRRH
6278 void *tr_index = filp->private_data;
6279 struct trace_array *tr;
6280 unsigned int index;
a8259075
SR
6281 unsigned long val;
6282 int ret;
6283
9a38a885
SRRH
6284 get_tr_index(tr_index, &tr, &index);
6285
22fe9b54
PH
6286 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6287 if (ret)
a8259075
SR
6288 return ret;
6289
f2d84b65 6290 if (val != 0 && val != 1)
a8259075 6291 return -EINVAL;
69d34da2
SRRH
6292
6293 mutex_lock(&trace_types_lock);
2b6080f2 6294 ret = set_tracer_flag(tr, 1 << index, val);
69d34da2 6295 mutex_unlock(&trace_types_lock);
a8259075 6296
613f04a0
SRRH
6297 if (ret < 0)
6298 return ret;
6299
a8259075
SR
6300 *ppos += cnt;
6301
6302 return cnt;
6303}
6304
a8259075
SR
6305static const struct file_operations trace_options_core_fops = {
6306 .open = tracing_open_generic,
6307 .read = trace_options_core_read,
6308 .write = trace_options_core_write,
b444786f 6309 .llseek = generic_file_llseek,
a8259075
SR
6310};
6311
5452af66 6312struct dentry *trace_create_file(const char *name,
f4ae40a6 6313 umode_t mode,
5452af66
FW
6314 struct dentry *parent,
6315 void *data,
6316 const struct file_operations *fops)
6317{
6318 struct dentry *ret;
6319
8434dc93 6320 ret = tracefs_create_file(name, mode, parent, data, fops);
5452af66 6321 if (!ret)
8434dc93 6322 pr_warning("Could not create tracefs '%s' entry\n", name);
5452af66
FW
6323
6324 return ret;
6325}
6326
6327
2b6080f2 6328static struct dentry *trace_options_init_dentry(struct trace_array *tr)
a8259075
SR
6329{
6330 struct dentry *d_tracer;
a8259075 6331
2b6080f2
SR
6332 if (tr->options)
6333 return tr->options;
a8259075 6334
7eeafbca 6335 d_tracer = tracing_get_dentry(tr);
14a5ae40 6336 if (IS_ERR(d_tracer))
a8259075
SR
6337 return NULL;
6338
8434dc93 6339 tr->options = tracefs_create_dir("options", d_tracer);
2b6080f2 6340 if (!tr->options) {
8434dc93 6341 pr_warning("Could not create tracefs directory 'options'\n");
a8259075
SR
6342 return NULL;
6343 }
6344
2b6080f2 6345 return tr->options;
a8259075
SR
6346}
6347
577b785f 6348static void
2b6080f2
SR
6349create_trace_option_file(struct trace_array *tr,
6350 struct trace_option_dentry *topt,
577b785f
SR
6351 struct tracer_flags *flags,
6352 struct tracer_opt *opt)
6353{
6354 struct dentry *t_options;
577b785f 6355
2b6080f2 6356 t_options = trace_options_init_dentry(tr);
577b785f
SR
6357 if (!t_options)
6358 return;
6359
6360 topt->flags = flags;
6361 topt->opt = opt;
2b6080f2 6362 topt->tr = tr;
577b785f 6363
5452af66 6364 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
577b785f
SR
6365 &trace_options_fops);
6366
577b785f
SR
6367}
6368
37aea98b 6369static void
2b6080f2 6370create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
577b785f
SR
6371{
6372 struct trace_option_dentry *topts;
37aea98b 6373 struct trace_options *tr_topts;
577b785f
SR
6374 struct tracer_flags *flags;
6375 struct tracer_opt *opts;
6376 int cnt;
37aea98b 6377 int i;
577b785f
SR
6378
6379 if (!tracer)
37aea98b 6380 return;
577b785f
SR
6381
6382 flags = tracer->flags;
6383
6384 if (!flags || !flags->opts)
37aea98b
SRRH
6385 return;
6386
6387 /*
6388 * If this is an instance, only create flags for tracers
6389 * the instance may have.
6390 */
6391 if (!trace_ok_for_array(tracer, tr))
6392 return;
6393
6394 for (i = 0; i < tr->nr_topts; i++) {
6395 /*
6396 * Check if these flags have already been added.
6397 * Some tracers share flags.
6398 */
6399 if (tr->topts[i].tracer->flags == tracer->flags)
6400 return;
6401 }
577b785f
SR
6402
6403 opts = flags->opts;
6404
6405 for (cnt = 0; opts[cnt].name; cnt++)
6406 ;
6407
0cfe8245 6408 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
577b785f 6409 if (!topts)
37aea98b
SRRH
6410 return;
6411
6412 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
6413 GFP_KERNEL);
6414 if (!tr_topts) {
6415 kfree(topts);
6416 return;
6417 }
6418
6419 tr->topts = tr_topts;
6420 tr->topts[tr->nr_topts].tracer = tracer;
6421 tr->topts[tr->nr_topts].topts = topts;
6422 tr->nr_topts++;
577b785f 6423
41d9c0be 6424 for (cnt = 0; opts[cnt].name; cnt++) {
2b6080f2 6425 create_trace_option_file(tr, &topts[cnt], flags,
577b785f 6426 &opts[cnt]);
41d9c0be
SRRH
6427 WARN_ONCE(topts[cnt].entry == NULL,
6428 "Failed to create trace option: %s",
6429 opts[cnt].name);
6430 }
577b785f
SR
6431}
6432
a8259075 6433static struct dentry *
2b6080f2
SR
6434create_trace_option_core_file(struct trace_array *tr,
6435 const char *option, long index)
a8259075
SR
6436{
6437 struct dentry *t_options;
a8259075 6438
2b6080f2 6439 t_options = trace_options_init_dentry(tr);
a8259075
SR
6440 if (!t_options)
6441 return NULL;
6442
9a38a885
SRRH
6443 return trace_create_file(option, 0644, t_options,
6444 (void *)&tr->trace_flags_index[index],
6445 &trace_options_core_fops);
a8259075
SR
6446}
6447
16270145 6448static void create_trace_options_dir(struct trace_array *tr)
a8259075
SR
6449{
6450 struct dentry *t_options;
16270145 6451 bool top_level = tr == &global_trace;
a8259075
SR
6452 int i;
6453
2b6080f2 6454 t_options = trace_options_init_dentry(tr);
a8259075
SR
6455 if (!t_options)
6456 return;
6457
16270145
SRRH
6458 for (i = 0; trace_options[i]; i++) {
6459 if (top_level ||
6460 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
6461 create_trace_option_core_file(tr, trace_options[i], i);
6462 }
a8259075
SR
6463}
6464
499e5470
SR
6465static ssize_t
6466rb_simple_read(struct file *filp, char __user *ubuf,
6467 size_t cnt, loff_t *ppos)
6468{
348f0fc2 6469 struct trace_array *tr = filp->private_data;
499e5470
SR
6470 char buf[64];
6471 int r;
6472
10246fa3 6473 r = tracer_tracing_is_on(tr);
499e5470
SR
6474 r = sprintf(buf, "%d\n", r);
6475
6476 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6477}
6478
6479static ssize_t
6480rb_simple_write(struct file *filp, const char __user *ubuf,
6481 size_t cnt, loff_t *ppos)
6482{
348f0fc2 6483 struct trace_array *tr = filp->private_data;
12883efb 6484 struct ring_buffer *buffer = tr->trace_buffer.buffer;
499e5470
SR
6485 unsigned long val;
6486 int ret;
6487
6488 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6489 if (ret)
6490 return ret;
6491
6492 if (buffer) {
2df8f8a6
SR
6493 mutex_lock(&trace_types_lock);
6494 if (val) {
10246fa3 6495 tracer_tracing_on(tr);
2b6080f2
SR
6496 if (tr->current_trace->start)
6497 tr->current_trace->start(tr);
2df8f8a6 6498 } else {
10246fa3 6499 tracer_tracing_off(tr);
2b6080f2
SR
6500 if (tr->current_trace->stop)
6501 tr->current_trace->stop(tr);
2df8f8a6
SR
6502 }
6503 mutex_unlock(&trace_types_lock);
499e5470
SR
6504 }
6505
6506 (*ppos)++;
6507
6508 return cnt;
6509}
6510
6511static const struct file_operations rb_simple_fops = {
7b85af63 6512 .open = tracing_open_generic_tr,
499e5470
SR
6513 .read = rb_simple_read,
6514 .write = rb_simple_write,
7b85af63 6515 .release = tracing_release_generic_tr,
499e5470
SR
6516 .llseek = default_llseek,
6517};
6518
277ba044
SR
6519struct dentry *trace_instance_dir;
6520
6521static void
8434dc93 6522init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
277ba044 6523
55034cd6
SRRH
6524static int
6525allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
277ba044
SR
6526{
6527 enum ring_buffer_flags rb_flags;
737223fb 6528
983f938a 6529 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
737223fb 6530
dced341b
SRRH
6531 buf->tr = tr;
6532
55034cd6
SRRH
6533 buf->buffer = ring_buffer_alloc(size, rb_flags);
6534 if (!buf->buffer)
6535 return -ENOMEM;
737223fb 6536
55034cd6
SRRH
6537 buf->data = alloc_percpu(struct trace_array_cpu);
6538 if (!buf->data) {
6539 ring_buffer_free(buf->buffer);
6540 return -ENOMEM;
6541 }
737223fb 6542
737223fb
SRRH
6543 /* Allocate the first page for all buffers */
6544 set_buffer_entries(&tr->trace_buffer,
6545 ring_buffer_size(tr->trace_buffer.buffer, 0));
6546
55034cd6
SRRH
6547 return 0;
6548}
737223fb 6549
55034cd6
SRRH
6550static int allocate_trace_buffers(struct trace_array *tr, int size)
6551{
6552 int ret;
737223fb 6553
55034cd6
SRRH
6554 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
6555 if (ret)
6556 return ret;
737223fb 6557
55034cd6
SRRH
6558#ifdef CONFIG_TRACER_MAX_TRACE
6559 ret = allocate_trace_buffer(tr, &tr->max_buffer,
6560 allocate_snapshot ? size : 1);
6561 if (WARN_ON(ret)) {
737223fb 6562 ring_buffer_free(tr->trace_buffer.buffer);
55034cd6
SRRH
6563 free_percpu(tr->trace_buffer.data);
6564 return -ENOMEM;
6565 }
6566 tr->allocated_snapshot = allocate_snapshot;
737223fb 6567
55034cd6
SRRH
6568 /*
6569 * Only the top level trace array gets its snapshot allocated
6570 * from the kernel command line.
6571 */
6572 allocate_snapshot = false;
737223fb 6573#endif
55034cd6 6574 return 0;
737223fb
SRRH
6575}
6576
f0b70cc4
SRRH
6577static void free_trace_buffer(struct trace_buffer *buf)
6578{
6579 if (buf->buffer) {
6580 ring_buffer_free(buf->buffer);
6581 buf->buffer = NULL;
6582 free_percpu(buf->data);
6583 buf->data = NULL;
6584 }
6585}
6586
23aaa3c1
SRRH
6587static void free_trace_buffers(struct trace_array *tr)
6588{
6589 if (!tr)
6590 return;
6591
f0b70cc4 6592 free_trace_buffer(&tr->trace_buffer);
23aaa3c1
SRRH
6593
6594#ifdef CONFIG_TRACER_MAX_TRACE
f0b70cc4 6595 free_trace_buffer(&tr->max_buffer);
23aaa3c1
SRRH
6596#endif
6597}
6598
9a38a885
SRRH
6599static void init_trace_flags_index(struct trace_array *tr)
6600{
6601 int i;
6602
6603 /* Used by the trace options files */
6604 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
6605 tr->trace_flags_index[i] = i;
6606}
6607
37aea98b
SRRH
6608static void __update_tracer_options(struct trace_array *tr)
6609{
6610 struct tracer *t;
6611
6612 for (t = trace_types; t; t = t->next)
6613 add_tracer_options(tr, t);
6614}
6615
6616static void update_tracer_options(struct trace_array *tr)
6617{
6618 mutex_lock(&trace_types_lock);
6619 __update_tracer_options(tr);
6620 mutex_unlock(&trace_types_lock);
6621}
6622
eae47358 6623static int instance_mkdir(const char *name)
737223fb 6624{
277ba044
SR
6625 struct trace_array *tr;
6626 int ret;
277ba044
SR
6627
6628 mutex_lock(&trace_types_lock);
6629
6630 ret = -EEXIST;
6631 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6632 if (tr->name && strcmp(tr->name, name) == 0)
6633 goto out_unlock;
6634 }
6635
6636 ret = -ENOMEM;
6637 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
6638 if (!tr)
6639 goto out_unlock;
6640
6641 tr->name = kstrdup(name, GFP_KERNEL);
6642 if (!tr->name)
6643 goto out_free_tr;
6644
ccfe9e42
AL
6645 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
6646 goto out_free_tr;
6647
983f938a
SRRH
6648 tr->trace_flags = global_trace.trace_flags;
6649
ccfe9e42
AL
6650 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
6651
277ba044
SR
6652 raw_spin_lock_init(&tr->start_lock);
6653
0b9b12c1
SRRH
6654 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6655
277ba044
SR
6656 tr->current_trace = &nop_trace;
6657
6658 INIT_LIST_HEAD(&tr->systems);
6659 INIT_LIST_HEAD(&tr->events);
6660
737223fb 6661 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
277ba044
SR
6662 goto out_free_tr;
6663
8434dc93 6664 tr->dir = tracefs_create_dir(name, trace_instance_dir);
277ba044
SR
6665 if (!tr->dir)
6666 goto out_free_tr;
6667
6668 ret = event_trace_add_tracer(tr->dir, tr);
609e85a7 6669 if (ret) {
8434dc93 6670 tracefs_remove_recursive(tr->dir);
277ba044 6671 goto out_free_tr;
609e85a7 6672 }
277ba044 6673
8434dc93 6674 init_tracer_tracefs(tr, tr->dir);
9a38a885 6675 init_trace_flags_index(tr);
37aea98b 6676 __update_tracer_options(tr);
277ba044
SR
6677
6678 list_add(&tr->list, &ftrace_trace_arrays);
6679
6680 mutex_unlock(&trace_types_lock);
6681
6682 return 0;
6683
6684 out_free_tr:
23aaa3c1 6685 free_trace_buffers(tr);
ccfe9e42 6686 free_cpumask_var(tr->tracing_cpumask);
277ba044
SR
6687 kfree(tr->name);
6688 kfree(tr);
6689
6690 out_unlock:
6691 mutex_unlock(&trace_types_lock);
6692
6693 return ret;
6694
6695}
6696
eae47358 6697static int instance_rmdir(const char *name)
0c8916c3
SR
6698{
6699 struct trace_array *tr;
6700 int found = 0;
6701 int ret;
37aea98b 6702 int i;
0c8916c3
SR
6703
6704 mutex_lock(&trace_types_lock);
6705
6706 ret = -ENODEV;
6707 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6708 if (tr->name && strcmp(tr->name, name) == 0) {
6709 found = 1;
6710 break;
6711 }
6712 }
6713 if (!found)
6714 goto out_unlock;
6715
a695cb58 6716 ret = -EBUSY;
cf6ab6d9 6717 if (tr->ref || (tr->current_trace && tr->current_trace->ref))
a695cb58
SRRH
6718 goto out_unlock;
6719
0c8916c3
SR
6720 list_del(&tr->list);
6721
6b450d25 6722 tracing_set_nop(tr);
0c8916c3 6723 event_trace_del_tracer(tr);
591dffda 6724 ftrace_destroy_function_files(tr);
681a4a2f 6725 tracefs_remove_recursive(tr->dir);
a9fcaaac 6726 free_trace_buffers(tr);
0c8916c3 6727
37aea98b
SRRH
6728 for (i = 0; i < tr->nr_topts; i++) {
6729 kfree(tr->topts[i].topts);
6730 }
6731 kfree(tr->topts);
6732
0c8916c3
SR
6733 kfree(tr->name);
6734 kfree(tr);
6735
6736 ret = 0;
6737
6738 out_unlock:
6739 mutex_unlock(&trace_types_lock);
6740
6741 return ret;
6742}
6743
277ba044
SR
6744static __init void create_trace_instances(struct dentry *d_tracer)
6745{
eae47358
SRRH
6746 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
6747 instance_mkdir,
6748 instance_rmdir);
277ba044
SR
6749 if (WARN_ON(!trace_instance_dir))
6750 return;
277ba044
SR
6751}
6752
2b6080f2 6753static void
8434dc93 6754init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
2b6080f2 6755{
121aaee7 6756 int cpu;
2b6080f2 6757
607e2ea1
SRRH
6758 trace_create_file("available_tracers", 0444, d_tracer,
6759 tr, &show_traces_fops);
6760
6761 trace_create_file("current_tracer", 0644, d_tracer,
6762 tr, &set_tracer_fops);
6763
ccfe9e42
AL
6764 trace_create_file("tracing_cpumask", 0644, d_tracer,
6765 tr, &tracing_cpumask_fops);
6766
2b6080f2
SR
6767 trace_create_file("trace_options", 0644, d_tracer,
6768 tr, &tracing_iter_fops);
6769
6770 trace_create_file("trace", 0644, d_tracer,
6484c71c 6771 tr, &tracing_fops);
2b6080f2
SR
6772
6773 trace_create_file("trace_pipe", 0444, d_tracer,
15544209 6774 tr, &tracing_pipe_fops);
2b6080f2
SR
6775
6776 trace_create_file("buffer_size_kb", 0644, d_tracer,
0bc392ee 6777 tr, &tracing_entries_fops);
2b6080f2
SR
6778
6779 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
6780 tr, &tracing_total_entries_fops);
6781
238ae93d 6782 trace_create_file("free_buffer", 0200, d_tracer,
2b6080f2
SR
6783 tr, &tracing_free_buffer_fops);
6784
6785 trace_create_file("trace_marker", 0220, d_tracer,
6786 tr, &tracing_mark_fops);
6787
6788 trace_create_file("trace_clock", 0644, d_tracer, tr,
6789 &trace_clock_fops);
6790
6791 trace_create_file("tracing_on", 0644, d_tracer,
6484c71c 6792 tr, &rb_simple_fops);
ce9bae55 6793
16270145
SRRH
6794 create_trace_options_dir(tr);
6795
6d9b3fa5
SRRH
6796#ifdef CONFIG_TRACER_MAX_TRACE
6797 trace_create_file("tracing_max_latency", 0644, d_tracer,
6798 &tr->max_latency, &tracing_max_lat_fops);
6799#endif
6800
591dffda
SRRH
6801 if (ftrace_create_function_files(tr, d_tracer))
6802 WARN(1, "Could not allocate function filter files");
6803
ce9bae55
SRRH
6804#ifdef CONFIG_TRACER_SNAPSHOT
6805 trace_create_file("snapshot", 0644, d_tracer,
6484c71c 6806 tr, &snapshot_fops);
ce9bae55 6807#endif
121aaee7
SRRH
6808
6809 for_each_tracing_cpu(cpu)
8434dc93 6810 tracing_init_tracefs_percpu(tr, cpu);
121aaee7 6811
2b6080f2
SR
6812}
6813
f76180bc
SRRH
6814static struct vfsmount *trace_automount(void *ingore)
6815{
6816 struct vfsmount *mnt;
6817 struct file_system_type *type;
6818
6819 /*
6820 * To maintain backward compatibility for tools that mount
6821 * debugfs to get to the tracing facility, tracefs is automatically
6822 * mounted to the debugfs/tracing directory.
6823 */
6824 type = get_fs_type("tracefs");
6825 if (!type)
6826 return NULL;
6827 mnt = vfs_kern_mount(type, 0, "tracefs", NULL);
6828 put_filesystem(type);
6829 if (IS_ERR(mnt))
6830 return NULL;
6831 mntget(mnt);
6832
6833 return mnt;
6834}
6835
7eeafbca
SRRH
6836/**
6837 * tracing_init_dentry - initialize top level trace array
6838 *
6839 * This is called when creating files or directories in the tracing
6840 * directory. It is called via fs_initcall() by any of the boot up code
6841 * and expects to return the dentry of the top level tracing directory.
6842 */
6843struct dentry *tracing_init_dentry(void)
6844{
6845 struct trace_array *tr = &global_trace;
6846
f76180bc 6847 /* The top level trace array uses NULL as parent */
7eeafbca 6848 if (tr->dir)
f76180bc 6849 return NULL;
7eeafbca
SRRH
6850
6851 if (WARN_ON(!debugfs_initialized()))
6852 return ERR_PTR(-ENODEV);
6853
f76180bc
SRRH
6854 /*
6855 * As there may still be users that expect the tracing
6856 * files to exist in debugfs/tracing, we must automount
6857 * the tracefs file system there, so older tools still
6858 * work with the newer kerenl.
6859 */
6860 tr->dir = debugfs_create_automount("tracing", NULL,
6861 trace_automount, NULL);
7eeafbca
SRRH
6862 if (!tr->dir) {
6863 pr_warn_once("Could not create debugfs directory 'tracing'\n");
6864 return ERR_PTR(-ENOMEM);
6865 }
6866
8434dc93 6867 return NULL;
7eeafbca
SRRH
6868}
6869
0c564a53
SRRH
6870extern struct trace_enum_map *__start_ftrace_enum_maps[];
6871extern struct trace_enum_map *__stop_ftrace_enum_maps[];
6872
6873static void __init trace_enum_init(void)
6874{
3673b8e4
SRRH
6875 int len;
6876
6877 len = __stop_ftrace_enum_maps - __start_ftrace_enum_maps;
9828413d 6878 trace_insert_enum_map(NULL, __start_ftrace_enum_maps, len);
3673b8e4
SRRH
6879}
6880
6881#ifdef CONFIG_MODULES
6882static void trace_module_add_enums(struct module *mod)
6883{
6884 if (!mod->num_trace_enums)
6885 return;
6886
6887 /*
6888 * Modules with bad taint do not have events created, do
6889 * not bother with enums either.
6890 */
6891 if (trace_module_has_bad_taint(mod))
6892 return;
6893
9828413d 6894 trace_insert_enum_map(mod, mod->trace_enums, mod->num_trace_enums);
3673b8e4
SRRH
6895}
6896
9828413d
SRRH
6897#ifdef CONFIG_TRACE_ENUM_MAP_FILE
6898static void trace_module_remove_enums(struct module *mod)
6899{
6900 union trace_enum_map_item *map;
6901 union trace_enum_map_item **last = &trace_enum_maps;
6902
6903 if (!mod->num_trace_enums)
6904 return;
6905
6906 mutex_lock(&trace_enum_mutex);
6907
6908 map = trace_enum_maps;
6909
6910 while (map) {
6911 if (map->head.mod == mod)
6912 break;
6913 map = trace_enum_jmp_to_tail(map);
6914 last = &map->tail.next;
6915 map = map->tail.next;
6916 }
6917 if (!map)
6918 goto out;
6919
6920 *last = trace_enum_jmp_to_tail(map)->tail.next;
6921 kfree(map);
6922 out:
6923 mutex_unlock(&trace_enum_mutex);
6924}
6925#else
6926static inline void trace_module_remove_enums(struct module *mod) { }
6927#endif /* CONFIG_TRACE_ENUM_MAP_FILE */
6928
3673b8e4
SRRH
6929static int trace_module_notify(struct notifier_block *self,
6930 unsigned long val, void *data)
6931{
6932 struct module *mod = data;
6933
6934 switch (val) {
6935 case MODULE_STATE_COMING:
6936 trace_module_add_enums(mod);
6937 break;
9828413d
SRRH
6938 case MODULE_STATE_GOING:
6939 trace_module_remove_enums(mod);
6940 break;
3673b8e4
SRRH
6941 }
6942
6943 return 0;
0c564a53
SRRH
6944}
6945
3673b8e4
SRRH
6946static struct notifier_block trace_module_nb = {
6947 .notifier_call = trace_module_notify,
6948 .priority = 0,
6949};
9828413d 6950#endif /* CONFIG_MODULES */
3673b8e4 6951
8434dc93 6952static __init int tracer_init_tracefs(void)
bc0c38d1
SR
6953{
6954 struct dentry *d_tracer;
bc0c38d1 6955
7e53bd42
LJ
6956 trace_access_lock_init();
6957
bc0c38d1 6958 d_tracer = tracing_init_dentry();
14a5ae40 6959 if (IS_ERR(d_tracer))
ed6f1c99 6960 return 0;
bc0c38d1 6961
8434dc93 6962 init_tracer_tracefs(&global_trace, d_tracer);
bc0c38d1 6963
5452af66 6964 trace_create_file("tracing_thresh", 0644, d_tracer,
6508fa76 6965 &global_trace, &tracing_thresh_fops);
a8259075 6966
339ae5d3 6967 trace_create_file("README", 0444, d_tracer,
5452af66
FW
6968 NULL, &tracing_readme_fops);
6969
69abe6a5
AP
6970 trace_create_file("saved_cmdlines", 0444, d_tracer,
6971 NULL, &tracing_saved_cmdlines_fops);
5bf9a1ee 6972
939c7a4f
YY
6973 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
6974 NULL, &tracing_saved_cmdlines_size_fops);
6975
0c564a53
SRRH
6976 trace_enum_init();
6977
9828413d
SRRH
6978 trace_create_enum_file(d_tracer);
6979
3673b8e4
SRRH
6980#ifdef CONFIG_MODULES
6981 register_module_notifier(&trace_module_nb);
6982#endif
6983
bc0c38d1 6984#ifdef CONFIG_DYNAMIC_FTRACE
5452af66
FW
6985 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
6986 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
bc0c38d1 6987#endif
b04cc6b1 6988
277ba044 6989 create_trace_instances(d_tracer);
5452af66 6990
37aea98b 6991 update_tracer_options(&global_trace);
09d23a1d 6992
b5ad384e 6993 return 0;
bc0c38d1
SR
6994}
6995
3f5a54e3
SR
6996static int trace_panic_handler(struct notifier_block *this,
6997 unsigned long event, void *unused)
6998{
944ac425 6999 if (ftrace_dump_on_oops)
cecbca96 7000 ftrace_dump(ftrace_dump_on_oops);
3f5a54e3
SR
7001 return NOTIFY_OK;
7002}
7003
7004static struct notifier_block trace_panic_notifier = {
7005 .notifier_call = trace_panic_handler,
7006 .next = NULL,
7007 .priority = 150 /* priority: INT_MAX >= x >= 0 */
7008};
7009
7010static int trace_die_handler(struct notifier_block *self,
7011 unsigned long val,
7012 void *data)
7013{
7014 switch (val) {
7015 case DIE_OOPS:
944ac425 7016 if (ftrace_dump_on_oops)
cecbca96 7017 ftrace_dump(ftrace_dump_on_oops);
3f5a54e3
SR
7018 break;
7019 default:
7020 break;
7021 }
7022 return NOTIFY_OK;
7023}
7024
7025static struct notifier_block trace_die_notifier = {
7026 .notifier_call = trace_die_handler,
7027 .priority = 200
7028};
7029
7030/*
7031 * printk is set to max of 1024, we really don't need it that big.
7032 * Nothing should be printing 1000 characters anyway.
7033 */
7034#define TRACE_MAX_PRINT 1000
7035
7036/*
7037 * Define here KERN_TRACE so that we have one place to modify
7038 * it if we decide to change what log level the ftrace dump
7039 * should be at.
7040 */
428aee14 7041#define KERN_TRACE KERN_EMERG
3f5a54e3 7042
955b61e5 7043void
3f5a54e3
SR
7044trace_printk_seq(struct trace_seq *s)
7045{
7046 /* Probably should print a warning here. */
3a161d99
SRRH
7047 if (s->seq.len >= TRACE_MAX_PRINT)
7048 s->seq.len = TRACE_MAX_PRINT;
3f5a54e3 7049
820b75f6
SRRH
7050 /*
7051 * More paranoid code. Although the buffer size is set to
7052 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
7053 * an extra layer of protection.
7054 */
7055 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
7056 s->seq.len = s->seq.size - 1;
3f5a54e3
SR
7057
7058 /* should be zero ended, but we are paranoid. */
3a161d99 7059 s->buffer[s->seq.len] = 0;
3f5a54e3
SR
7060
7061 printk(KERN_TRACE "%s", s->buffer);
7062
f9520750 7063 trace_seq_init(s);
3f5a54e3
SR
7064}
7065
955b61e5
JW
7066void trace_init_global_iter(struct trace_iterator *iter)
7067{
7068 iter->tr = &global_trace;
2b6080f2 7069 iter->trace = iter->tr->current_trace;
ae3b5093 7070 iter->cpu_file = RING_BUFFER_ALL_CPUS;
12883efb 7071 iter->trace_buffer = &global_trace.trace_buffer;
b2f974d6
CS
7072
7073 if (iter->trace && iter->trace->open)
7074 iter->trace->open(iter);
7075
7076 /* Annotate start of buffers if we had overruns */
7077 if (ring_buffer_overruns(iter->trace_buffer->buffer))
7078 iter->iter_flags |= TRACE_FILE_ANNOTATE;
7079
7080 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
7081 if (trace_clocks[iter->tr->clock_id].in_ns)
7082 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
955b61e5
JW
7083}
7084
7fe70b57 7085void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
3f5a54e3 7086{
3f5a54e3
SR
7087 /* use static because iter can be a bit big for the stack */
7088 static struct trace_iterator iter;
7fe70b57 7089 static atomic_t dump_running;
983f938a 7090 struct trace_array *tr = &global_trace;
cf586b61 7091 unsigned int old_userobj;
d769041f
SR
7092 unsigned long flags;
7093 int cnt = 0, cpu;
3f5a54e3 7094
7fe70b57
SRRH
7095 /* Only allow one dump user at a time. */
7096 if (atomic_inc_return(&dump_running) != 1) {
7097 atomic_dec(&dump_running);
7098 return;
7099 }
3f5a54e3 7100
7fe70b57
SRRH
7101 /*
7102 * Always turn off tracing when we dump.
7103 * We don't need to show trace output of what happens
7104 * between multiple crashes.
7105 *
7106 * If the user does a sysrq-z, then they can re-enable
7107 * tracing with echo 1 > tracing_on.
7108 */
0ee6b6cf 7109 tracing_off();
cf586b61 7110
7fe70b57 7111 local_irq_save(flags);
3f5a54e3 7112
38dbe0b1 7113 /* Simulate the iterator */
955b61e5
JW
7114 trace_init_global_iter(&iter);
7115
d769041f 7116 for_each_tracing_cpu(cpu) {
5e2d5ef8 7117 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
d769041f
SR
7118 }
7119
983f938a 7120 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
cf586b61 7121
b54d3de9 7122 /* don't look at user memory in panic mode */
983f938a 7123 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
b54d3de9 7124
cecbca96
FW
7125 switch (oops_dump_mode) {
7126 case DUMP_ALL:
ae3b5093 7127 iter.cpu_file = RING_BUFFER_ALL_CPUS;
cecbca96
FW
7128 break;
7129 case DUMP_ORIG:
7130 iter.cpu_file = raw_smp_processor_id();
7131 break;
7132 case DUMP_NONE:
7133 goto out_enable;
7134 default:
7135 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
ae3b5093 7136 iter.cpu_file = RING_BUFFER_ALL_CPUS;
cecbca96
FW
7137 }
7138
7139 printk(KERN_TRACE "Dumping ftrace buffer:\n");
3f5a54e3 7140
7fe70b57
SRRH
7141 /* Did function tracer already get disabled? */
7142 if (ftrace_is_dead()) {
7143 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
7144 printk("# MAY BE MISSING FUNCTION EVENTS\n");
7145 }
7146
3f5a54e3
SR
7147 /*
7148 * We need to stop all tracing on all CPUS to read the
7149 * the next buffer. This is a bit expensive, but is
7150 * not done often. We fill all what we can read,
7151 * and then release the locks again.
7152 */
7153
3f5a54e3
SR
7154 while (!trace_empty(&iter)) {
7155
7156 if (!cnt)
7157 printk(KERN_TRACE "---------------------------------\n");
7158
7159 cnt++;
7160
7161 /* reset all but tr, trace, and overruns */
7162 memset(&iter.seq, 0,
7163 sizeof(struct trace_iterator) -
7164 offsetof(struct trace_iterator, seq));
7165 iter.iter_flags |= TRACE_FILE_LAT_FMT;
7166 iter.pos = -1;
7167
955b61e5 7168 if (trace_find_next_entry_inc(&iter) != NULL) {
74e7ff8c
LJ
7169 int ret;
7170
7171 ret = print_trace_line(&iter);
7172 if (ret != TRACE_TYPE_NO_CONSUME)
7173 trace_consume(&iter);
3f5a54e3 7174 }
b892e5c8 7175 touch_nmi_watchdog();
3f5a54e3
SR
7176
7177 trace_printk_seq(&iter.seq);
7178 }
7179
7180 if (!cnt)
7181 printk(KERN_TRACE " (ftrace buffer empty)\n");
7182 else
7183 printk(KERN_TRACE "---------------------------------\n");
7184
cecbca96 7185 out_enable:
983f938a 7186 tr->trace_flags |= old_userobj;
cf586b61 7187
7fe70b57
SRRH
7188 for_each_tracing_cpu(cpu) {
7189 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
cf586b61 7190 }
7fe70b57 7191 atomic_dec(&dump_running);
cd891ae0 7192 local_irq_restore(flags);
3f5a54e3 7193}
a8eecf22 7194EXPORT_SYMBOL_GPL(ftrace_dump);
cf586b61 7195
3928a8a2 7196__init static int tracer_alloc_buffers(void)
bc0c38d1 7197{
73c5162a 7198 int ring_buf_size;
9e01c1b7 7199 int ret = -ENOMEM;
4c11d7ae 7200
b5e87c05
SRRH
7201 /*
7202 * Make sure we don't accidently add more trace options
7203 * than we have bits for.
7204 */
9a38a885 7205 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
b5e87c05 7206
9e01c1b7
RR
7207 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
7208 goto out;
7209
ccfe9e42 7210 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
9e01c1b7 7211 goto out_free_buffer_mask;
4c11d7ae 7212
07d777fe
SR
7213 /* Only allocate trace_printk buffers if a trace_printk exists */
7214 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
81698831 7215 /* Must be called before global_trace.buffer is allocated */
07d777fe
SR
7216 trace_printk_init_buffers();
7217
73c5162a
SR
7218 /* To save memory, keep the ring buffer size to its minimum */
7219 if (ring_buffer_expanded)
7220 ring_buf_size = trace_buf_size;
7221 else
7222 ring_buf_size = 1;
7223
9e01c1b7 7224 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
ccfe9e42 7225 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
9e01c1b7 7226
2b6080f2
SR
7227 raw_spin_lock_init(&global_trace.start_lock);
7228
2c4a33ab
SRRH
7229 /* Used for event triggers */
7230 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
7231 if (!temp_buffer)
7232 goto out_free_cpumask;
7233
939c7a4f
YY
7234 if (trace_create_savedcmd() < 0)
7235 goto out_free_temp_buffer;
7236
9e01c1b7 7237 /* TODO: make the number of buffers hot pluggable with CPUS */
737223fb 7238 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
3928a8a2
SR
7239 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
7240 WARN_ON(1);
939c7a4f 7241 goto out_free_savedcmd;
4c11d7ae 7242 }
a7603ff4 7243
499e5470
SR
7244 if (global_trace.buffer_disabled)
7245 tracing_off();
4c11d7ae 7246
e1e232ca
SR
7247 if (trace_boot_clock) {
7248 ret = tracing_set_clock(&global_trace, trace_boot_clock);
7249 if (ret < 0)
7250 pr_warning("Trace clock %s not defined, going back to default\n",
7251 trace_boot_clock);
7252 }
7253
ca164318
SRRH
7254 /*
7255 * register_tracer() might reference current_trace, so it
7256 * needs to be set before we register anything. This is
7257 * just a bootstrap of current_trace anyway.
7258 */
2b6080f2
SR
7259 global_trace.current_trace = &nop_trace;
7260
0b9b12c1
SRRH
7261 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
7262
4104d326
SRRH
7263 ftrace_init_global_array_ops(&global_trace);
7264
9a38a885
SRRH
7265 init_trace_flags_index(&global_trace);
7266
ca164318
SRRH
7267 register_tracer(&nop_trace);
7268
60a11774
SR
7269 /* All seems OK, enable tracing */
7270 tracing_disabled = 0;
3928a8a2 7271
3f5a54e3
SR
7272 atomic_notifier_chain_register(&panic_notifier_list,
7273 &trace_panic_notifier);
7274
7275 register_die_notifier(&trace_die_notifier);
2fc1dfbe 7276
ae63b31e
SR
7277 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
7278
7279 INIT_LIST_HEAD(&global_trace.systems);
7280 INIT_LIST_HEAD(&global_trace.events);
7281 list_add(&global_trace.list, &ftrace_trace_arrays);
7282
a4d1e688 7283 apply_trace_boot_options();
7bcfaf54 7284
77fd5c15
SRRH
7285 register_snapshot_cmd();
7286
2fc1dfbe 7287 return 0;
3f5a54e3 7288
939c7a4f
YY
7289out_free_savedcmd:
7290 free_saved_cmdlines_buffer(savedcmd);
2c4a33ab
SRRH
7291out_free_temp_buffer:
7292 ring_buffer_free(temp_buffer);
9e01c1b7 7293out_free_cpumask:
ccfe9e42 7294 free_cpumask_var(global_trace.tracing_cpumask);
9e01c1b7
RR
7295out_free_buffer_mask:
7296 free_cpumask_var(tracing_buffer_mask);
7297out:
7298 return ret;
bc0c38d1 7299}
b2821ae6 7300
5f893b26
SRRH
7301void __init trace_init(void)
7302{
0daa2302
SRRH
7303 if (tracepoint_printk) {
7304 tracepoint_print_iter =
7305 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
7306 if (WARN_ON(!tracepoint_print_iter))
7307 tracepoint_printk = 0;
7308 }
5f893b26 7309 tracer_alloc_buffers();
0c564a53 7310 trace_event_init();
5f893b26
SRRH
7311}
7312
b2821ae6
SR
7313__init static int clear_boot_tracer(void)
7314{
7315 /*
7316 * The default tracer at boot buffer is an init section.
7317 * This function is called in lateinit. If we did not
7318 * find the boot tracer, then clear it out, to prevent
7319 * later registration from accessing the buffer that is
7320 * about to be freed.
7321 */
7322 if (!default_bootup_tracer)
7323 return 0;
7324
7325 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
7326 default_bootup_tracer);
7327 default_bootup_tracer = NULL;
7328
7329 return 0;
7330}
7331
8434dc93 7332fs_initcall(tracer_init_tracefs);
b2821ae6 7333late_initcall(clear_boot_tracer);
This page took 1.124131 seconds and 5 git commands to generate.