tracing: Make ftrace_trace_stack() depend on general trace_array flag
[deliverable/linux.git] / kernel / trace / trace.c
CommitLineData
bc0c38d1
SR
1/*
2 * ring buffer based function tracer
3 *
2b6080f2 4 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
bc0c38d1
SR
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
6d49e352 12 * Copyright (C) 2004 Nadia Yvette Chambers
bc0c38d1 13 */
2cadf913 14#include <linux/ring_buffer.h>
273b281f 15#include <generated/utsrelease.h>
2cadf913
SR
16#include <linux/stacktrace.h>
17#include <linux/writeback.h>
bc0c38d1
SR
18#include <linux/kallsyms.h>
19#include <linux/seq_file.h>
3f5a54e3 20#include <linux/notifier.h>
2cadf913 21#include <linux/irqflags.h>
bc0c38d1 22#include <linux/debugfs.h>
8434dc93 23#include <linux/tracefs.h>
4c11d7ae 24#include <linux/pagemap.h>
bc0c38d1
SR
25#include <linux/hardirq.h>
26#include <linux/linkage.h>
27#include <linux/uaccess.h>
2cadf913 28#include <linux/kprobes.h>
bc0c38d1
SR
29#include <linux/ftrace.h>
30#include <linux/module.h>
31#include <linux/percpu.h>
2cadf913 32#include <linux/splice.h>
3f5a54e3 33#include <linux/kdebug.h>
5f0c6c03 34#include <linux/string.h>
f76180bc 35#include <linux/mount.h>
7e53bd42 36#include <linux/rwsem.h>
5a0e3ad6 37#include <linux/slab.h>
bc0c38d1
SR
38#include <linux/ctype.h>
39#include <linux/init.h>
2a2cc8f7 40#include <linux/poll.h>
b892e5c8 41#include <linux/nmi.h>
bc0c38d1 42#include <linux/fs.h>
8bd75c77 43#include <linux/sched/rt.h>
86387f7e 44
bc0c38d1 45#include "trace.h"
f0868d1e 46#include "trace_output.h"
bc0c38d1 47
73c5162a
SR
48/*
49 * On boot up, the ring buffer is set to the minimum size, so that
50 * we do not waste memory on systems that are not using tracing.
51 */
55034cd6 52bool ring_buffer_expanded;
73c5162a 53
8e1b82e0
FW
54/*
55 * We need to change this state when a selftest is running.
ff32504f
FW
56 * A selftest will lurk into the ring-buffer to count the
57 * entries inserted during the selftest although some concurrent
5e1607a0 58 * insertions into the ring-buffer such as trace_printk could occurred
ff32504f
FW
59 * at the same time, giving false positive or negative results.
60 */
8e1b82e0 61static bool __read_mostly tracing_selftest_running;
ff32504f 62
b2821ae6
SR
63/*
64 * If a tracer is running, we do not want to run SELFTEST.
65 */
020e5f85 66bool __read_mostly tracing_selftest_disabled;
b2821ae6 67
0daa2302
SRRH
68/* Pipe tracepoints to printk */
69struct trace_iterator *tracepoint_print_iter;
70int tracepoint_printk;
71
adf9f195
FW
72/* For tracers that don't implement custom flags */
73static struct tracer_opt dummy_tracer_opt[] = {
74 { }
75};
76
77static struct tracer_flags dummy_tracer_flags = {
78 .val = 0,
79 .opts = dummy_tracer_opt
80};
81
8c1a49ae
SRRH
82static int
83dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
adf9f195
FW
84{
85 return 0;
86}
0f048701 87
7ffbd48d
SR
88/*
89 * To prevent the comm cache from being overwritten when no
90 * tracing is active, only save the comm when a trace event
91 * occurred.
92 */
93static DEFINE_PER_CPU(bool, trace_cmdline_save);
94
0f048701
SR
95/*
96 * Kill all tracing for good (never come back).
97 * It is initialized to 1 but will turn to zero if the initialization
98 * of the tracer is successful. But that is the only place that sets
99 * this back to zero.
100 */
4fd27358 101static int tracing_disabled = 1;
0f048701 102
9288f99a 103DEFINE_PER_CPU(int, ftrace_cpu_disabled);
d769041f 104
955b61e5 105cpumask_var_t __read_mostly tracing_buffer_mask;
ab46428c 106
944ac425
SR
107/*
108 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
109 *
110 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
111 * is set, then ftrace_dump is called. This will output the contents
112 * of the ftrace buffers to the console. This is very useful for
113 * capturing traces that lead to crashes and outputing it to a
114 * serial console.
115 *
116 * It is default off, but you can enable it with either specifying
117 * "ftrace_dump_on_oops" in the kernel command line, or setting
cecbca96
FW
118 * /proc/sys/kernel/ftrace_dump_on_oops
119 * Set 1 if you want to dump buffers of all CPUs
120 * Set 2 if you want to dump the buffer of the CPU that triggered oops
944ac425 121 */
cecbca96
FW
122
123enum ftrace_dump_mode ftrace_dump_on_oops;
944ac425 124
de7edd31
SRRH
125/* When set, tracing will stop when a WARN*() is hit */
126int __disable_trace_on_warning;
127
9828413d
SRRH
128#ifdef CONFIG_TRACE_ENUM_MAP_FILE
129/* Map of enums to their values, for "enum_map" file */
130struct trace_enum_map_head {
131 struct module *mod;
132 unsigned long length;
133};
134
135union trace_enum_map_item;
136
137struct trace_enum_map_tail {
138 /*
139 * "end" is first and points to NULL as it must be different
140 * than "mod" or "enum_string"
141 */
142 union trace_enum_map_item *next;
143 const char *end; /* points to NULL */
144};
145
146static DEFINE_MUTEX(trace_enum_mutex);
147
148/*
149 * The trace_enum_maps are saved in an array with two extra elements,
150 * one at the beginning, and one at the end. The beginning item contains
151 * the count of the saved maps (head.length), and the module they
152 * belong to if not built in (head.mod). The ending item contains a
153 * pointer to the next array of saved enum_map items.
154 */
155union trace_enum_map_item {
156 struct trace_enum_map map;
157 struct trace_enum_map_head head;
158 struct trace_enum_map_tail tail;
159};
160
161static union trace_enum_map_item *trace_enum_maps;
162#endif /* CONFIG_TRACE_ENUM_MAP_FILE */
163
607e2ea1 164static int tracing_set_tracer(struct trace_array *tr, const char *buf);
b2821ae6 165
ee6c2c1b
LZ
166#define MAX_TRACER_SIZE 100
167static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
b2821ae6 168static char *default_bootup_tracer;
d9e54076 169
55034cd6
SRRH
170static bool allocate_snapshot;
171
1beee96b 172static int __init set_cmdline_ftrace(char *str)
d9e54076 173{
67012ab1 174 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
b2821ae6 175 default_bootup_tracer = bootup_tracer_buf;
73c5162a 176 /* We are using ftrace early, expand it */
55034cd6 177 ring_buffer_expanded = true;
d9e54076
PZ
178 return 1;
179}
1beee96b 180__setup("ftrace=", set_cmdline_ftrace);
d9e54076 181
944ac425
SR
182static int __init set_ftrace_dump_on_oops(char *str)
183{
cecbca96
FW
184 if (*str++ != '=' || !*str) {
185 ftrace_dump_on_oops = DUMP_ALL;
186 return 1;
187 }
188
189 if (!strcmp("orig_cpu", str)) {
190 ftrace_dump_on_oops = DUMP_ORIG;
191 return 1;
192 }
193
194 return 0;
944ac425
SR
195}
196__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
60a11774 197
de7edd31
SRRH
198static int __init stop_trace_on_warning(char *str)
199{
933ff9f2
LCG
200 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
201 __disable_trace_on_warning = 1;
de7edd31
SRRH
202 return 1;
203}
933ff9f2 204__setup("traceoff_on_warning", stop_trace_on_warning);
de7edd31 205
3209cff4 206static int __init boot_alloc_snapshot(char *str)
55034cd6
SRRH
207{
208 allocate_snapshot = true;
209 /* We also need the main ring buffer expanded */
210 ring_buffer_expanded = true;
211 return 1;
212}
3209cff4 213__setup("alloc_snapshot", boot_alloc_snapshot);
55034cd6 214
7bcfaf54
SR
215
216static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
217static char *trace_boot_options __initdata;
218
219static int __init set_trace_boot_options(char *str)
220{
67012ab1 221 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
7bcfaf54
SR
222 trace_boot_options = trace_boot_options_buf;
223 return 0;
224}
225__setup("trace_options=", set_trace_boot_options);
226
e1e232ca
SR
227static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
228static char *trace_boot_clock __initdata;
229
230static int __init set_trace_boot_clock(char *str)
231{
232 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
233 trace_boot_clock = trace_boot_clock_buf;
234 return 0;
235}
236__setup("trace_clock=", set_trace_boot_clock);
237
0daa2302
SRRH
238static int __init set_tracepoint_printk(char *str)
239{
240 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
241 tracepoint_printk = 1;
242 return 1;
243}
244__setup("tp_printk", set_tracepoint_printk);
de7edd31 245
cf8e3474 246unsigned long long ns2usecs(cycle_t nsec)
bc0c38d1
SR
247{
248 nsec += 500;
249 do_div(nsec, 1000);
250 return nsec;
251}
252
983f938a
SRRH
253/* trace_flags holds trace_options default values */
254#define TRACE_DEFAULT_FLAGS \
255 (FUNCTION_DEFAULT_FLAGS | \
256 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
257 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
258 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
259 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
260
4fcdae83
SR
261/*
262 * The global_trace is the descriptor that holds the tracing
263 * buffers for the live tracing. For each CPU, it contains
264 * a link list of pages that will store trace entries. The
265 * page descriptor of the pages in the memory is used to hold
266 * the link list by linking the lru item in the page descriptor
267 * to each of the pages in the buffer per CPU.
268 *
269 * For each active CPU there is a data field that holds the
270 * pages for the buffer for that CPU. Each CPU has the same number
271 * of pages allocated for its buffer.
272 */
983f938a
SRRH
273static struct trace_array global_trace = {
274 .trace_flags = TRACE_DEFAULT_FLAGS,
275};
bc0c38d1 276
ae63b31e 277LIST_HEAD(ftrace_trace_arrays);
bc0c38d1 278
ff451961
SRRH
279int trace_array_get(struct trace_array *this_tr)
280{
281 struct trace_array *tr;
282 int ret = -ENODEV;
283
284 mutex_lock(&trace_types_lock);
285 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
286 if (tr == this_tr) {
287 tr->ref++;
288 ret = 0;
289 break;
290 }
291 }
292 mutex_unlock(&trace_types_lock);
293
294 return ret;
295}
296
297static void __trace_array_put(struct trace_array *this_tr)
298{
299 WARN_ON(!this_tr->ref);
300 this_tr->ref--;
301}
302
303void trace_array_put(struct trace_array *this_tr)
304{
305 mutex_lock(&trace_types_lock);
306 __trace_array_put(this_tr);
307 mutex_unlock(&trace_types_lock);
308}
309
7f1d2f82 310int filter_check_discard(struct trace_event_file *file, void *rec,
f306cc82
TZ
311 struct ring_buffer *buffer,
312 struct ring_buffer_event *event)
eb02ce01 313{
5d6ad960 314 if (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
f306cc82
TZ
315 !filter_match_preds(file->filter, rec)) {
316 ring_buffer_discard_commit(buffer, event);
317 return 1;
318 }
319
320 return 0;
321}
322EXPORT_SYMBOL_GPL(filter_check_discard);
323
2425bcb9 324int call_filter_check_discard(struct trace_event_call *call, void *rec,
f306cc82
TZ
325 struct ring_buffer *buffer,
326 struct ring_buffer_event *event)
327{
328 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
329 !filter_match_preds(call->filter, rec)) {
330 ring_buffer_discard_commit(buffer, event);
331 return 1;
332 }
333
334 return 0;
eb02ce01 335}
f306cc82 336EXPORT_SYMBOL_GPL(call_filter_check_discard);
eb02ce01 337
ad1438a0 338static cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
37886f6a
SR
339{
340 u64 ts;
341
342 /* Early boot up does not have a buffer yet */
9457158b 343 if (!buf->buffer)
37886f6a
SR
344 return trace_clock_local();
345
9457158b
AL
346 ts = ring_buffer_time_stamp(buf->buffer, cpu);
347 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
37886f6a
SR
348
349 return ts;
350}
bc0c38d1 351
9457158b
AL
352cycle_t ftrace_now(int cpu)
353{
354 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
355}
356
10246fa3
SRRH
357/**
358 * tracing_is_enabled - Show if global_trace has been disabled
359 *
360 * Shows if the global trace has been enabled or not. It uses the
361 * mirror flag "buffer_disabled" to be used in fast paths such as for
362 * the irqsoff tracer. But it may be inaccurate due to races. If you
363 * need to know the accurate state, use tracing_is_on() which is a little
364 * slower, but accurate.
365 */
9036990d
SR
366int tracing_is_enabled(void)
367{
10246fa3
SRRH
368 /*
369 * For quick access (irqsoff uses this in fast path), just
370 * return the mirror variable of the state of the ring buffer.
371 * It's a little racy, but we don't really care.
372 */
373 smp_rmb();
374 return !global_trace.buffer_disabled;
9036990d
SR
375}
376
4fcdae83 377/*
3928a8a2
SR
378 * trace_buf_size is the size in bytes that is allocated
379 * for a buffer. Note, the number of bytes is always rounded
380 * to page size.
3f5a54e3
SR
381 *
382 * This number is purposely set to a low number of 16384.
383 * If the dump on oops happens, it will be much appreciated
384 * to not have to wait for all that output. Anyway this can be
385 * boot time and run time configurable.
4fcdae83 386 */
3928a8a2 387#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
3f5a54e3 388
3928a8a2 389static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
bc0c38d1 390
4fcdae83 391/* trace_types holds a link list of available tracers. */
bc0c38d1 392static struct tracer *trace_types __read_mostly;
4fcdae83 393
4fcdae83
SR
394/*
395 * trace_types_lock is used to protect the trace_types list.
4fcdae83 396 */
a8227415 397DEFINE_MUTEX(trace_types_lock);
4fcdae83 398
7e53bd42
LJ
399/*
400 * serialize the access of the ring buffer
401 *
402 * ring buffer serializes readers, but it is low level protection.
403 * The validity of the events (which returns by ring_buffer_peek() ..etc)
404 * are not protected by ring buffer.
405 *
406 * The content of events may become garbage if we allow other process consumes
407 * these events concurrently:
408 * A) the page of the consumed events may become a normal page
409 * (not reader page) in ring buffer, and this page will be rewrited
410 * by events producer.
411 * B) The page of the consumed events may become a page for splice_read,
412 * and this page will be returned to system.
413 *
414 * These primitives allow multi process access to different cpu ring buffer
415 * concurrently.
416 *
417 * These primitives don't distinguish read-only and read-consume access.
418 * Multi read-only access are also serialized.
419 */
420
421#ifdef CONFIG_SMP
422static DECLARE_RWSEM(all_cpu_access_lock);
423static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
424
425static inline void trace_access_lock(int cpu)
426{
ae3b5093 427 if (cpu == RING_BUFFER_ALL_CPUS) {
7e53bd42
LJ
428 /* gain it for accessing the whole ring buffer. */
429 down_write(&all_cpu_access_lock);
430 } else {
431 /* gain it for accessing a cpu ring buffer. */
432
ae3b5093 433 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
7e53bd42
LJ
434 down_read(&all_cpu_access_lock);
435
436 /* Secondly block other access to this @cpu ring buffer. */
437 mutex_lock(&per_cpu(cpu_access_lock, cpu));
438 }
439}
440
441static inline void trace_access_unlock(int cpu)
442{
ae3b5093 443 if (cpu == RING_BUFFER_ALL_CPUS) {
7e53bd42
LJ
444 up_write(&all_cpu_access_lock);
445 } else {
446 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
447 up_read(&all_cpu_access_lock);
448 }
449}
450
451static inline void trace_access_lock_init(void)
452{
453 int cpu;
454
455 for_each_possible_cpu(cpu)
456 mutex_init(&per_cpu(cpu_access_lock, cpu));
457}
458
459#else
460
461static DEFINE_MUTEX(access_lock);
462
463static inline void trace_access_lock(int cpu)
464{
465 (void)cpu;
466 mutex_lock(&access_lock);
467}
468
469static inline void trace_access_unlock(int cpu)
470{
471 (void)cpu;
472 mutex_unlock(&access_lock);
473}
474
475static inline void trace_access_lock_init(void)
476{
477}
478
479#endif
480
d78a4614
SRRH
481#ifdef CONFIG_STACKTRACE
482static void __ftrace_trace_stack(struct ring_buffer *buffer,
483 unsigned long flags,
484 int skip, int pc, struct pt_regs *regs);
2d34f489
SRRH
485static inline void ftrace_trace_stack(struct trace_array *tr,
486 struct ring_buffer *buffer,
73dddbb5
SRRH
487 unsigned long flags,
488 int skip, int pc, struct pt_regs *regs);
ca475e83 489
d78a4614
SRRH
490#else
491static inline void __ftrace_trace_stack(struct ring_buffer *buffer,
492 unsigned long flags,
493 int skip, int pc, struct pt_regs *regs)
494{
495}
2d34f489
SRRH
496static inline void ftrace_trace_stack(struct trace_array *tr,
497 struct ring_buffer *buffer,
73dddbb5
SRRH
498 unsigned long flags,
499 int skip, int pc, struct pt_regs *regs)
ca475e83
SRRH
500{
501}
502
d78a4614
SRRH
503#endif
504
5280bcef 505static void tracer_tracing_on(struct trace_array *tr)
10246fa3
SRRH
506{
507 if (tr->trace_buffer.buffer)
508 ring_buffer_record_on(tr->trace_buffer.buffer);
509 /*
510 * This flag is looked at when buffers haven't been allocated
511 * yet, or by some tracers (like irqsoff), that just want to
512 * know if the ring buffer has been disabled, but it can handle
513 * races of where it gets disabled but we still do a record.
514 * As the check is in the fast path of the tracers, it is more
515 * important to be fast than accurate.
516 */
517 tr->buffer_disabled = 0;
518 /* Make the flag seen by readers */
519 smp_wmb();
520}
521
499e5470
SR
522/**
523 * tracing_on - enable tracing buffers
524 *
525 * This function enables tracing buffers that may have been
526 * disabled with tracing_off.
527 */
528void tracing_on(void)
529{
10246fa3 530 tracer_tracing_on(&global_trace);
499e5470
SR
531}
532EXPORT_SYMBOL_GPL(tracing_on);
533
09ae7234
SRRH
534/**
535 * __trace_puts - write a constant string into the trace buffer.
536 * @ip: The address of the caller
537 * @str: The constant string to write
538 * @size: The size of the string.
539 */
540int __trace_puts(unsigned long ip, const char *str, int size)
541{
542 struct ring_buffer_event *event;
543 struct ring_buffer *buffer;
544 struct print_entry *entry;
545 unsigned long irq_flags;
546 int alloc;
8abfb872
J
547 int pc;
548
983f938a 549 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
f0160a5a
J
550 return 0;
551
8abfb872 552 pc = preempt_count();
09ae7234 553
3132e107
SRRH
554 if (unlikely(tracing_selftest_running || tracing_disabled))
555 return 0;
556
09ae7234
SRRH
557 alloc = sizeof(*entry) + size + 2; /* possible \n added */
558
559 local_save_flags(irq_flags);
560 buffer = global_trace.trace_buffer.buffer;
561 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
8abfb872 562 irq_flags, pc);
09ae7234
SRRH
563 if (!event)
564 return 0;
565
566 entry = ring_buffer_event_data(event);
567 entry->ip = ip;
568
569 memcpy(&entry->buf, str, size);
570
571 /* Add a newline if necessary */
572 if (entry->buf[size - 1] != '\n') {
573 entry->buf[size] = '\n';
574 entry->buf[size + 1] = '\0';
575 } else
576 entry->buf[size] = '\0';
577
578 __buffer_unlock_commit(buffer, event);
2d34f489 579 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
09ae7234
SRRH
580
581 return size;
582}
583EXPORT_SYMBOL_GPL(__trace_puts);
584
585/**
586 * __trace_bputs - write the pointer to a constant string into trace buffer
587 * @ip: The address of the caller
588 * @str: The constant string to write to the buffer to
589 */
590int __trace_bputs(unsigned long ip, const char *str)
591{
592 struct ring_buffer_event *event;
593 struct ring_buffer *buffer;
594 struct bputs_entry *entry;
595 unsigned long irq_flags;
596 int size = sizeof(struct bputs_entry);
8abfb872
J
597 int pc;
598
983f938a 599 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
f0160a5a
J
600 return 0;
601
8abfb872 602 pc = preempt_count();
09ae7234 603
3132e107
SRRH
604 if (unlikely(tracing_selftest_running || tracing_disabled))
605 return 0;
606
09ae7234
SRRH
607 local_save_flags(irq_flags);
608 buffer = global_trace.trace_buffer.buffer;
609 event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
8abfb872 610 irq_flags, pc);
09ae7234
SRRH
611 if (!event)
612 return 0;
613
614 entry = ring_buffer_event_data(event);
615 entry->ip = ip;
616 entry->str = str;
617
618 __buffer_unlock_commit(buffer, event);
2d34f489 619 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
09ae7234
SRRH
620
621 return 1;
622}
623EXPORT_SYMBOL_GPL(__trace_bputs);
624
ad909e21
SRRH
625#ifdef CONFIG_TRACER_SNAPSHOT
626/**
627 * trace_snapshot - take a snapshot of the current buffer.
628 *
629 * This causes a swap between the snapshot buffer and the current live
630 * tracing buffer. You can use this to take snapshots of the live
631 * trace when some condition is triggered, but continue to trace.
632 *
633 * Note, make sure to allocate the snapshot with either
634 * a tracing_snapshot_alloc(), or by doing it manually
635 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
636 *
637 * If the snapshot buffer is not allocated, it will stop tracing.
638 * Basically making a permanent snapshot.
639 */
640void tracing_snapshot(void)
641{
642 struct trace_array *tr = &global_trace;
643 struct tracer *tracer = tr->current_trace;
644 unsigned long flags;
645
1b22e382
SRRH
646 if (in_nmi()) {
647 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
648 internal_trace_puts("*** snapshot is being ignored ***\n");
649 return;
650 }
651
ad909e21 652 if (!tr->allocated_snapshot) {
ca268da6
SRRH
653 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
654 internal_trace_puts("*** stopping trace here! ***\n");
ad909e21
SRRH
655 tracing_off();
656 return;
657 }
658
659 /* Note, snapshot can not be used when the tracer uses it */
660 if (tracer->use_max_tr) {
ca268da6
SRRH
661 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
662 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
ad909e21
SRRH
663 return;
664 }
665
666 local_irq_save(flags);
667 update_max_tr(tr, current, smp_processor_id());
668 local_irq_restore(flags);
669}
1b22e382 670EXPORT_SYMBOL_GPL(tracing_snapshot);
ad909e21
SRRH
671
672static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
673 struct trace_buffer *size_buf, int cpu_id);
3209cff4
SRRH
674static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
675
676static int alloc_snapshot(struct trace_array *tr)
677{
678 int ret;
679
680 if (!tr->allocated_snapshot) {
681
682 /* allocate spare buffer */
683 ret = resize_buffer_duplicate_size(&tr->max_buffer,
684 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
685 if (ret < 0)
686 return ret;
687
688 tr->allocated_snapshot = true;
689 }
690
691 return 0;
692}
693
ad1438a0 694static void free_snapshot(struct trace_array *tr)
3209cff4
SRRH
695{
696 /*
697 * We don't free the ring buffer. instead, resize it because
698 * The max_tr ring buffer has some state (e.g. ring->clock) and
699 * we want preserve it.
700 */
701 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
702 set_buffer_entries(&tr->max_buffer, 1);
703 tracing_reset_online_cpus(&tr->max_buffer);
704 tr->allocated_snapshot = false;
705}
ad909e21 706
93e31ffb
TZ
707/**
708 * tracing_alloc_snapshot - allocate snapshot buffer.
709 *
710 * This only allocates the snapshot buffer if it isn't already
711 * allocated - it doesn't also take a snapshot.
712 *
713 * This is meant to be used in cases where the snapshot buffer needs
714 * to be set up for events that can't sleep but need to be able to
715 * trigger a snapshot.
716 */
717int tracing_alloc_snapshot(void)
718{
719 struct trace_array *tr = &global_trace;
720 int ret;
721
722 ret = alloc_snapshot(tr);
723 WARN_ON(ret < 0);
724
725 return ret;
726}
727EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
728
ad909e21
SRRH
729/**
730 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
731 *
732 * This is similar to trace_snapshot(), but it will allocate the
733 * snapshot buffer if it isn't already allocated. Use this only
734 * where it is safe to sleep, as the allocation may sleep.
735 *
736 * This causes a swap between the snapshot buffer and the current live
737 * tracing buffer. You can use this to take snapshots of the live
738 * trace when some condition is triggered, but continue to trace.
739 */
740void tracing_snapshot_alloc(void)
741{
ad909e21
SRRH
742 int ret;
743
93e31ffb
TZ
744 ret = tracing_alloc_snapshot();
745 if (ret < 0)
3209cff4 746 return;
ad909e21
SRRH
747
748 tracing_snapshot();
749}
1b22e382 750EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
ad909e21
SRRH
751#else
752void tracing_snapshot(void)
753{
754 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
755}
1b22e382 756EXPORT_SYMBOL_GPL(tracing_snapshot);
93e31ffb
TZ
757int tracing_alloc_snapshot(void)
758{
759 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
760 return -ENODEV;
761}
762EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
ad909e21
SRRH
763void tracing_snapshot_alloc(void)
764{
765 /* Give warning */
766 tracing_snapshot();
767}
1b22e382 768EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
ad909e21
SRRH
769#endif /* CONFIG_TRACER_SNAPSHOT */
770
5280bcef 771static void tracer_tracing_off(struct trace_array *tr)
10246fa3
SRRH
772{
773 if (tr->trace_buffer.buffer)
774 ring_buffer_record_off(tr->trace_buffer.buffer);
775 /*
776 * This flag is looked at when buffers haven't been allocated
777 * yet, or by some tracers (like irqsoff), that just want to
778 * know if the ring buffer has been disabled, but it can handle
779 * races of where it gets disabled but we still do a record.
780 * As the check is in the fast path of the tracers, it is more
781 * important to be fast than accurate.
782 */
783 tr->buffer_disabled = 1;
784 /* Make the flag seen by readers */
785 smp_wmb();
786}
787
499e5470
SR
788/**
789 * tracing_off - turn off tracing buffers
790 *
791 * This function stops the tracing buffers from recording data.
792 * It does not disable any overhead the tracers themselves may
793 * be causing. This function simply causes all recording to
794 * the ring buffers to fail.
795 */
796void tracing_off(void)
797{
10246fa3 798 tracer_tracing_off(&global_trace);
499e5470
SR
799}
800EXPORT_SYMBOL_GPL(tracing_off);
801
de7edd31
SRRH
802void disable_trace_on_warning(void)
803{
804 if (__disable_trace_on_warning)
805 tracing_off();
806}
807
10246fa3
SRRH
808/**
809 * tracer_tracing_is_on - show real state of ring buffer enabled
810 * @tr : the trace array to know if ring buffer is enabled
811 *
812 * Shows real state of the ring buffer if it is enabled or not.
813 */
5280bcef 814static int tracer_tracing_is_on(struct trace_array *tr)
10246fa3
SRRH
815{
816 if (tr->trace_buffer.buffer)
817 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
818 return !tr->buffer_disabled;
819}
820
499e5470
SR
821/**
822 * tracing_is_on - show state of ring buffers enabled
823 */
824int tracing_is_on(void)
825{
10246fa3 826 return tracer_tracing_is_on(&global_trace);
499e5470
SR
827}
828EXPORT_SYMBOL_GPL(tracing_is_on);
829
3928a8a2 830static int __init set_buf_size(char *str)
bc0c38d1 831{
3928a8a2 832 unsigned long buf_size;
c6caeeb1 833
bc0c38d1
SR
834 if (!str)
835 return 0;
9d612bef 836 buf_size = memparse(str, &str);
c6caeeb1 837 /* nr_entries can not be zero */
9d612bef 838 if (buf_size == 0)
c6caeeb1 839 return 0;
3928a8a2 840 trace_buf_size = buf_size;
bc0c38d1
SR
841 return 1;
842}
3928a8a2 843__setup("trace_buf_size=", set_buf_size);
bc0c38d1 844
0e950173
TB
845static int __init set_tracing_thresh(char *str)
846{
87abb3b1 847 unsigned long threshold;
0e950173
TB
848 int ret;
849
850 if (!str)
851 return 0;
bcd83ea6 852 ret = kstrtoul(str, 0, &threshold);
0e950173
TB
853 if (ret < 0)
854 return 0;
87abb3b1 855 tracing_thresh = threshold * 1000;
0e950173
TB
856 return 1;
857}
858__setup("tracing_thresh=", set_tracing_thresh);
859
57f50be1
SR
860unsigned long nsecs_to_usecs(unsigned long nsecs)
861{
862 return nsecs / 1000;
863}
864
a3418a36
SRRH
865/*
866 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
867 * It uses C(a, b) where 'a' is the enum name and 'b' is the string that
868 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
869 * of strings in the order that the enums were defined.
870 */
871#undef C
872#define C(a, b) b
873
4fcdae83 874/* These must match the bit postions in trace_iterator_flags */
bc0c38d1 875static const char *trace_options[] = {
a3418a36 876 TRACE_FLAGS
bc0c38d1
SR
877 NULL
878};
879
5079f326
Z
880static struct {
881 u64 (*func)(void);
882 const char *name;
8be0709f 883 int in_ns; /* is this clock in nanoseconds? */
5079f326 884} trace_clocks[] = {
1b3e5c09
TG
885 { trace_clock_local, "local", 1 },
886 { trace_clock_global, "global", 1 },
887 { trace_clock_counter, "counter", 0 },
e7fda6c4 888 { trace_clock_jiffies, "uptime", 0 },
1b3e5c09
TG
889 { trace_clock, "perf", 1 },
890 { ktime_get_mono_fast_ns, "mono", 1 },
aabfa5f2 891 { ktime_get_raw_fast_ns, "mono_raw", 1 },
8cbd9cc6 892 ARCH_TRACE_CLOCKS
5079f326
Z
893};
894
b63f39ea 895/*
896 * trace_parser_get_init - gets the buffer for trace parser
897 */
898int trace_parser_get_init(struct trace_parser *parser, int size)
899{
900 memset(parser, 0, sizeof(*parser));
901
902 parser->buffer = kmalloc(size, GFP_KERNEL);
903 if (!parser->buffer)
904 return 1;
905
906 parser->size = size;
907 return 0;
908}
909
910/*
911 * trace_parser_put - frees the buffer for trace parser
912 */
913void trace_parser_put(struct trace_parser *parser)
914{
915 kfree(parser->buffer);
916}
917
918/*
919 * trace_get_user - reads the user input string separated by space
920 * (matched by isspace(ch))
921 *
922 * For each string found the 'struct trace_parser' is updated,
923 * and the function returns.
924 *
925 * Returns number of bytes read.
926 *
927 * See kernel/trace/trace.h for 'struct trace_parser' details.
928 */
929int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
930 size_t cnt, loff_t *ppos)
931{
932 char ch;
933 size_t read = 0;
934 ssize_t ret;
935
936 if (!*ppos)
937 trace_parser_clear(parser);
938
939 ret = get_user(ch, ubuf++);
940 if (ret)
941 goto out;
942
943 read++;
944 cnt--;
945
946 /*
947 * The parser is not finished with the last write,
948 * continue reading the user input without skipping spaces.
949 */
950 if (!parser->cont) {
951 /* skip white space */
952 while (cnt && isspace(ch)) {
953 ret = get_user(ch, ubuf++);
954 if (ret)
955 goto out;
956 read++;
957 cnt--;
958 }
959
960 /* only spaces were written */
961 if (isspace(ch)) {
962 *ppos += read;
963 ret = read;
964 goto out;
965 }
966
967 parser->idx = 0;
968 }
969
970 /* read the non-space input */
971 while (cnt && !isspace(ch)) {
3c235a33 972 if (parser->idx < parser->size - 1)
b63f39ea 973 parser->buffer[parser->idx++] = ch;
974 else {
975 ret = -EINVAL;
976 goto out;
977 }
978 ret = get_user(ch, ubuf++);
979 if (ret)
980 goto out;
981 read++;
982 cnt--;
983 }
984
985 /* We either got finished input or we have to wait for another call. */
986 if (isspace(ch)) {
987 parser->buffer[parser->idx] = 0;
988 parser->cont = false;
057db848 989 } else if (parser->idx < parser->size - 1) {
b63f39ea 990 parser->cont = true;
991 parser->buffer[parser->idx++] = ch;
057db848
SR
992 } else {
993 ret = -EINVAL;
994 goto out;
b63f39ea 995 }
996
997 *ppos += read;
998 ret = read;
999
1000out:
1001 return ret;
1002}
1003
3a161d99 1004/* TODO add a seq_buf_to_buffer() */
b8b94265 1005static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
3c56819b
EGM
1006{
1007 int len;
3c56819b 1008
5ac48378 1009 if (trace_seq_used(s) <= s->seq.readpos)
3c56819b
EGM
1010 return -EBUSY;
1011
5ac48378 1012 len = trace_seq_used(s) - s->seq.readpos;
3c56819b
EGM
1013 if (cnt > len)
1014 cnt = len;
3a161d99 1015 memcpy(buf, s->buffer + s->seq.readpos, cnt);
3c56819b 1016
3a161d99 1017 s->seq.readpos += cnt;
3c56819b
EGM
1018 return cnt;
1019}
1020
0e950173
TB
1021unsigned long __read_mostly tracing_thresh;
1022
5d4a9dba 1023#ifdef CONFIG_TRACER_MAX_TRACE
5d4a9dba
SR
1024/*
1025 * Copy the new maximum trace into the separate maximum-trace
1026 * structure. (this way the maximum trace is permanently saved,
1027 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
1028 */
1029static void
1030__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1031{
12883efb
SRRH
1032 struct trace_buffer *trace_buf = &tr->trace_buffer;
1033 struct trace_buffer *max_buf = &tr->max_buffer;
1034 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1035 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
5d4a9dba 1036
12883efb
SRRH
1037 max_buf->cpu = cpu;
1038 max_buf->time_start = data->preempt_timestamp;
5d4a9dba 1039
6d9b3fa5 1040 max_data->saved_latency = tr->max_latency;
8248ac05
SR
1041 max_data->critical_start = data->critical_start;
1042 max_data->critical_end = data->critical_end;
5d4a9dba 1043
1acaa1b2 1044 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
8248ac05 1045 max_data->pid = tsk->pid;
f17a5194
SRRH
1046 /*
1047 * If tsk == current, then use current_uid(), as that does not use
1048 * RCU. The irq tracer can be called out of RCU scope.
1049 */
1050 if (tsk == current)
1051 max_data->uid = current_uid();
1052 else
1053 max_data->uid = task_uid(tsk);
1054
8248ac05
SR
1055 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1056 max_data->policy = tsk->policy;
1057 max_data->rt_priority = tsk->rt_priority;
5d4a9dba
SR
1058
1059 /* record this tasks comm */
1060 tracing_record_cmdline(tsk);
1061}
1062
4fcdae83
SR
1063/**
1064 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1065 * @tr: tracer
1066 * @tsk: the task with the latency
1067 * @cpu: The cpu that initiated the trace.
1068 *
1069 * Flip the buffers between the @tr and the max_tr and record information
1070 * about which task was the cause of this latency.
1071 */
e309b41d 1072void
bc0c38d1
SR
1073update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1074{
2721e72d 1075 struct ring_buffer *buf;
bc0c38d1 1076
2b6080f2 1077 if (tr->stop_count)
b8de7bd1
SR
1078 return;
1079
4c11d7ae 1080 WARN_ON_ONCE(!irqs_disabled());
34600f0e 1081
45ad21ca 1082 if (!tr->allocated_snapshot) {
debdd57f 1083 /* Only the nop tracer should hit this when disabling */
2b6080f2 1084 WARN_ON_ONCE(tr->current_trace != &nop_trace);
34600f0e 1085 return;
debdd57f 1086 }
34600f0e 1087
0b9b12c1 1088 arch_spin_lock(&tr->max_lock);
3928a8a2 1089
12883efb
SRRH
1090 buf = tr->trace_buffer.buffer;
1091 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1092 tr->max_buffer.buffer = buf;
3928a8a2 1093
bc0c38d1 1094 __update_max_tr(tr, tsk, cpu);
0b9b12c1 1095 arch_spin_unlock(&tr->max_lock);
bc0c38d1
SR
1096}
1097
1098/**
1099 * update_max_tr_single - only copy one trace over, and reset the rest
1100 * @tr - tracer
1101 * @tsk - task with the latency
1102 * @cpu - the cpu of the buffer to copy.
4fcdae83
SR
1103 *
1104 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
bc0c38d1 1105 */
e309b41d 1106void
bc0c38d1
SR
1107update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1108{
3928a8a2 1109 int ret;
bc0c38d1 1110
2b6080f2 1111 if (tr->stop_count)
b8de7bd1
SR
1112 return;
1113
4c11d7ae 1114 WARN_ON_ONCE(!irqs_disabled());
6c24499f 1115 if (!tr->allocated_snapshot) {
2930e04d 1116 /* Only the nop tracer should hit this when disabling */
9e8529af 1117 WARN_ON_ONCE(tr->current_trace != &nop_trace);
ef710e10 1118 return;
2930e04d 1119 }
ef710e10 1120
0b9b12c1 1121 arch_spin_lock(&tr->max_lock);
bc0c38d1 1122
12883efb 1123 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
3928a8a2 1124
e8165dbb
SR
1125 if (ret == -EBUSY) {
1126 /*
1127 * We failed to swap the buffer due to a commit taking
1128 * place on this CPU. We fail to record, but we reset
1129 * the max trace buffer (no one writes directly to it)
1130 * and flag that it failed.
1131 */
12883efb 1132 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
e8165dbb
SR
1133 "Failed to swap buffers due to commit in progress\n");
1134 }
1135
e8165dbb 1136 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
bc0c38d1
SR
1137
1138 __update_max_tr(tr, tsk, cpu);
0b9b12c1 1139 arch_spin_unlock(&tr->max_lock);
bc0c38d1 1140}
5d4a9dba 1141#endif /* CONFIG_TRACER_MAX_TRACE */
bc0c38d1 1142
e30f53aa 1143static int wait_on_pipe(struct trace_iterator *iter, bool full)
0d5c6e1c 1144{
15693458
SRRH
1145 /* Iterators are static, they should be filled or empty */
1146 if (trace_buffer_iter(iter, iter->cpu_file))
8b8b3683 1147 return 0;
0d5c6e1c 1148
e30f53aa
RV
1149 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1150 full);
0d5c6e1c
SR
1151}
1152
f4e781c0
SRRH
1153#ifdef CONFIG_FTRACE_STARTUP_TEST
1154static int run_tracer_selftest(struct tracer *type)
1155{
1156 struct trace_array *tr = &global_trace;
1157 struct tracer *saved_tracer = tr->current_trace;
1158 int ret;
0d5c6e1c 1159
f4e781c0
SRRH
1160 if (!type->selftest || tracing_selftest_disabled)
1161 return 0;
0d5c6e1c
SR
1162
1163 /*
f4e781c0
SRRH
1164 * Run a selftest on this tracer.
1165 * Here we reset the trace buffer, and set the current
1166 * tracer to be this tracer. The tracer can then run some
1167 * internal tracing to verify that everything is in order.
1168 * If we fail, we do not register this tracer.
0d5c6e1c 1169 */
f4e781c0 1170 tracing_reset_online_cpus(&tr->trace_buffer);
0d5c6e1c 1171
f4e781c0
SRRH
1172 tr->current_trace = type;
1173
1174#ifdef CONFIG_TRACER_MAX_TRACE
1175 if (type->use_max_tr) {
1176 /* If we expanded the buffers, make sure the max is expanded too */
1177 if (ring_buffer_expanded)
1178 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1179 RING_BUFFER_ALL_CPUS);
1180 tr->allocated_snapshot = true;
1181 }
1182#endif
1183
1184 /* the test is responsible for initializing and enabling */
1185 pr_info("Testing tracer %s: ", type->name);
1186 ret = type->selftest(type, tr);
1187 /* the test is responsible for resetting too */
1188 tr->current_trace = saved_tracer;
1189 if (ret) {
1190 printk(KERN_CONT "FAILED!\n");
1191 /* Add the warning after printing 'FAILED' */
1192 WARN_ON(1);
1193 return -1;
1194 }
1195 /* Only reset on passing, to avoid touching corrupted buffers */
1196 tracing_reset_online_cpus(&tr->trace_buffer);
1197
1198#ifdef CONFIG_TRACER_MAX_TRACE
1199 if (type->use_max_tr) {
1200 tr->allocated_snapshot = false;
0d5c6e1c 1201
f4e781c0
SRRH
1202 /* Shrink the max buffer again */
1203 if (ring_buffer_expanded)
1204 ring_buffer_resize(tr->max_buffer.buffer, 1,
1205 RING_BUFFER_ALL_CPUS);
1206 }
1207#endif
1208
1209 printk(KERN_CONT "PASSED\n");
1210 return 0;
1211}
1212#else
1213static inline int run_tracer_selftest(struct tracer *type)
1214{
1215 return 0;
0d5c6e1c 1216}
f4e781c0 1217#endif /* CONFIG_FTRACE_STARTUP_TEST */
0d5c6e1c 1218
41d9c0be
SRRH
1219static void add_tracer_options(struct trace_array *tr, struct tracer *t);
1220
4fcdae83
SR
1221/**
1222 * register_tracer - register a tracer with the ftrace system.
1223 * @type - the plugin for the tracer
1224 *
1225 * Register a new plugin tracer.
1226 */
bc0c38d1
SR
1227int register_tracer(struct tracer *type)
1228{
1229 struct tracer *t;
bc0c38d1
SR
1230 int ret = 0;
1231
1232 if (!type->name) {
1233 pr_info("Tracer must have a name\n");
1234 return -1;
1235 }
1236
24a461d5 1237 if (strlen(type->name) >= MAX_TRACER_SIZE) {
ee6c2c1b
LZ
1238 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1239 return -1;
1240 }
1241
bc0c38d1 1242 mutex_lock(&trace_types_lock);
86fa2f60 1243
8e1b82e0
FW
1244 tracing_selftest_running = true;
1245
bc0c38d1
SR
1246 for (t = trace_types; t; t = t->next) {
1247 if (strcmp(type->name, t->name) == 0) {
1248 /* already found */
ee6c2c1b 1249 pr_info("Tracer %s already registered\n",
bc0c38d1
SR
1250 type->name);
1251 ret = -1;
1252 goto out;
1253 }
1254 }
1255
adf9f195
FW
1256 if (!type->set_flag)
1257 type->set_flag = &dummy_set_flag;
1258 if (!type->flags)
1259 type->flags = &dummy_tracer_flags;
1260 else
1261 if (!type->flags->opts)
1262 type->flags->opts = dummy_tracer_opt;
6eaaa5d5 1263
f4e781c0
SRRH
1264 ret = run_tracer_selftest(type);
1265 if (ret < 0)
1266 goto out;
60a11774 1267
bc0c38d1
SR
1268 type->next = trace_types;
1269 trace_types = type;
41d9c0be 1270 add_tracer_options(&global_trace, type);
60a11774 1271
bc0c38d1 1272 out:
8e1b82e0 1273 tracing_selftest_running = false;
bc0c38d1
SR
1274 mutex_unlock(&trace_types_lock);
1275
dac74940
SR
1276 if (ret || !default_bootup_tracer)
1277 goto out_unlock;
1278
ee6c2c1b 1279 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
dac74940
SR
1280 goto out_unlock;
1281
1282 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1283 /* Do we want this tracer to start on bootup? */
607e2ea1 1284 tracing_set_tracer(&global_trace, type->name);
dac74940
SR
1285 default_bootup_tracer = NULL;
1286 /* disable other selftests, since this will break it. */
55034cd6 1287 tracing_selftest_disabled = true;
b2821ae6 1288#ifdef CONFIG_FTRACE_STARTUP_TEST
dac74940
SR
1289 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1290 type->name);
b2821ae6 1291#endif
b2821ae6 1292
dac74940 1293 out_unlock:
bc0c38d1
SR
1294 return ret;
1295}
1296
12883efb 1297void tracing_reset(struct trace_buffer *buf, int cpu)
f633903a 1298{
12883efb 1299 struct ring_buffer *buffer = buf->buffer;
f633903a 1300
a5416411
HT
1301 if (!buffer)
1302 return;
1303
f633903a
SR
1304 ring_buffer_record_disable(buffer);
1305
1306 /* Make sure all commits have finished */
1307 synchronize_sched();
68179686 1308 ring_buffer_reset_cpu(buffer, cpu);
f633903a
SR
1309
1310 ring_buffer_record_enable(buffer);
1311}
1312
12883efb 1313void tracing_reset_online_cpus(struct trace_buffer *buf)
213cc060 1314{
12883efb 1315 struct ring_buffer *buffer = buf->buffer;
213cc060
PE
1316 int cpu;
1317
a5416411
HT
1318 if (!buffer)
1319 return;
1320
621968cd
SR
1321 ring_buffer_record_disable(buffer);
1322
1323 /* Make sure all commits have finished */
1324 synchronize_sched();
1325
9457158b 1326 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
213cc060
PE
1327
1328 for_each_online_cpu(cpu)
68179686 1329 ring_buffer_reset_cpu(buffer, cpu);
621968cd
SR
1330
1331 ring_buffer_record_enable(buffer);
213cc060
PE
1332}
1333
09d8091c 1334/* Must have trace_types_lock held */
873c642f 1335void tracing_reset_all_online_cpus(void)
9456f0fa 1336{
873c642f
SRRH
1337 struct trace_array *tr;
1338
873c642f 1339 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
12883efb
SRRH
1340 tracing_reset_online_cpus(&tr->trace_buffer);
1341#ifdef CONFIG_TRACER_MAX_TRACE
1342 tracing_reset_online_cpus(&tr->max_buffer);
1343#endif
873c642f 1344 }
9456f0fa
SR
1345}
1346
939c7a4f 1347#define SAVED_CMDLINES_DEFAULT 128
2c7eea4c 1348#define NO_CMDLINE_MAP UINT_MAX
edc35bd7 1349static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
939c7a4f
YY
1350struct saved_cmdlines_buffer {
1351 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1352 unsigned *map_cmdline_to_pid;
1353 unsigned cmdline_num;
1354 int cmdline_idx;
1355 char *saved_cmdlines;
1356};
1357static struct saved_cmdlines_buffer *savedcmd;
25b0b44a 1358
25b0b44a 1359/* temporary disable recording */
4fd27358 1360static atomic_t trace_record_cmdline_disabled __read_mostly;
bc0c38d1 1361
939c7a4f
YY
1362static inline char *get_saved_cmdlines(int idx)
1363{
1364 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1365}
1366
1367static inline void set_cmdline(int idx, const char *cmdline)
bc0c38d1 1368{
939c7a4f
YY
1369 memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1370}
1371
1372static int allocate_cmdlines_buffer(unsigned int val,
1373 struct saved_cmdlines_buffer *s)
1374{
1375 s->map_cmdline_to_pid = kmalloc(val * sizeof(*s->map_cmdline_to_pid),
1376 GFP_KERNEL);
1377 if (!s->map_cmdline_to_pid)
1378 return -ENOMEM;
1379
1380 s->saved_cmdlines = kmalloc(val * TASK_COMM_LEN, GFP_KERNEL);
1381 if (!s->saved_cmdlines) {
1382 kfree(s->map_cmdline_to_pid);
1383 return -ENOMEM;
1384 }
1385
1386 s->cmdline_idx = 0;
1387 s->cmdline_num = val;
1388 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1389 sizeof(s->map_pid_to_cmdline));
1390 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1391 val * sizeof(*s->map_cmdline_to_pid));
1392
1393 return 0;
1394}
1395
1396static int trace_create_savedcmd(void)
1397{
1398 int ret;
1399
a6af8fbf 1400 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
939c7a4f
YY
1401 if (!savedcmd)
1402 return -ENOMEM;
1403
1404 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1405 if (ret < 0) {
1406 kfree(savedcmd);
1407 savedcmd = NULL;
1408 return -ENOMEM;
1409 }
1410
1411 return 0;
bc0c38d1
SR
1412}
1413
b5130b1e
CE
1414int is_tracing_stopped(void)
1415{
2b6080f2 1416 return global_trace.stop_count;
b5130b1e
CE
1417}
1418
0f048701
SR
1419/**
1420 * tracing_start - quick start of the tracer
1421 *
1422 * If tracing is enabled but was stopped by tracing_stop,
1423 * this will start the tracer back up.
1424 */
1425void tracing_start(void)
1426{
1427 struct ring_buffer *buffer;
1428 unsigned long flags;
1429
1430 if (tracing_disabled)
1431 return;
1432
2b6080f2
SR
1433 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1434 if (--global_trace.stop_count) {
1435 if (global_trace.stop_count < 0) {
b06a8301
SR
1436 /* Someone screwed up their debugging */
1437 WARN_ON_ONCE(1);
2b6080f2 1438 global_trace.stop_count = 0;
b06a8301 1439 }
0f048701
SR
1440 goto out;
1441 }
1442
a2f80714 1443 /* Prevent the buffers from switching */
0b9b12c1 1444 arch_spin_lock(&global_trace.max_lock);
0f048701 1445
12883efb 1446 buffer = global_trace.trace_buffer.buffer;
0f048701
SR
1447 if (buffer)
1448 ring_buffer_record_enable(buffer);
1449
12883efb
SRRH
1450#ifdef CONFIG_TRACER_MAX_TRACE
1451 buffer = global_trace.max_buffer.buffer;
0f048701
SR
1452 if (buffer)
1453 ring_buffer_record_enable(buffer);
12883efb 1454#endif
0f048701 1455
0b9b12c1 1456 arch_spin_unlock(&global_trace.max_lock);
a2f80714 1457
0f048701 1458 out:
2b6080f2
SR
1459 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1460}
1461
1462static void tracing_start_tr(struct trace_array *tr)
1463{
1464 struct ring_buffer *buffer;
1465 unsigned long flags;
1466
1467 if (tracing_disabled)
1468 return;
1469
1470 /* If global, we need to also start the max tracer */
1471 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1472 return tracing_start();
1473
1474 raw_spin_lock_irqsave(&tr->start_lock, flags);
1475
1476 if (--tr->stop_count) {
1477 if (tr->stop_count < 0) {
1478 /* Someone screwed up their debugging */
1479 WARN_ON_ONCE(1);
1480 tr->stop_count = 0;
1481 }
1482 goto out;
1483 }
1484
12883efb 1485 buffer = tr->trace_buffer.buffer;
2b6080f2
SR
1486 if (buffer)
1487 ring_buffer_record_enable(buffer);
1488
1489 out:
1490 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
0f048701
SR
1491}
1492
1493/**
1494 * tracing_stop - quick stop of the tracer
1495 *
1496 * Light weight way to stop tracing. Use in conjunction with
1497 * tracing_start.
1498 */
1499void tracing_stop(void)
1500{
1501 struct ring_buffer *buffer;
1502 unsigned long flags;
1503
2b6080f2
SR
1504 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1505 if (global_trace.stop_count++)
0f048701
SR
1506 goto out;
1507
a2f80714 1508 /* Prevent the buffers from switching */
0b9b12c1 1509 arch_spin_lock(&global_trace.max_lock);
a2f80714 1510
12883efb 1511 buffer = global_trace.trace_buffer.buffer;
0f048701
SR
1512 if (buffer)
1513 ring_buffer_record_disable(buffer);
1514
12883efb
SRRH
1515#ifdef CONFIG_TRACER_MAX_TRACE
1516 buffer = global_trace.max_buffer.buffer;
0f048701
SR
1517 if (buffer)
1518 ring_buffer_record_disable(buffer);
12883efb 1519#endif
0f048701 1520
0b9b12c1 1521 arch_spin_unlock(&global_trace.max_lock);
a2f80714 1522
0f048701 1523 out:
2b6080f2
SR
1524 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1525}
1526
1527static void tracing_stop_tr(struct trace_array *tr)
1528{
1529 struct ring_buffer *buffer;
1530 unsigned long flags;
1531
1532 /* If global, we need to also stop the max tracer */
1533 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1534 return tracing_stop();
1535
1536 raw_spin_lock_irqsave(&tr->start_lock, flags);
1537 if (tr->stop_count++)
1538 goto out;
1539
12883efb 1540 buffer = tr->trace_buffer.buffer;
2b6080f2
SR
1541 if (buffer)
1542 ring_buffer_record_disable(buffer);
1543
1544 out:
1545 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
0f048701
SR
1546}
1547
e309b41d 1548void trace_stop_cmdline_recording(void);
bc0c38d1 1549
379cfdac 1550static int trace_save_cmdline(struct task_struct *tsk)
bc0c38d1 1551{
a635cf04 1552 unsigned pid, idx;
bc0c38d1
SR
1553
1554 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
379cfdac 1555 return 0;
bc0c38d1
SR
1556
1557 /*
1558 * It's not the end of the world if we don't get
1559 * the lock, but we also don't want to spin
1560 * nor do we want to disable interrupts,
1561 * so if we miss here, then better luck next time.
1562 */
0199c4e6 1563 if (!arch_spin_trylock(&trace_cmdline_lock))
379cfdac 1564 return 0;
bc0c38d1 1565
939c7a4f 1566 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
2c7eea4c 1567 if (idx == NO_CMDLINE_MAP) {
939c7a4f 1568 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
bc0c38d1 1569
a635cf04
CE
1570 /*
1571 * Check whether the cmdline buffer at idx has a pid
1572 * mapped. We are going to overwrite that entry so we
1573 * need to clear the map_pid_to_cmdline. Otherwise we
1574 * would read the new comm for the old pid.
1575 */
939c7a4f 1576 pid = savedcmd->map_cmdline_to_pid[idx];
a635cf04 1577 if (pid != NO_CMDLINE_MAP)
939c7a4f 1578 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
bc0c38d1 1579
939c7a4f
YY
1580 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1581 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
bc0c38d1 1582
939c7a4f 1583 savedcmd->cmdline_idx = idx;
bc0c38d1
SR
1584 }
1585
939c7a4f 1586 set_cmdline(idx, tsk->comm);
bc0c38d1 1587
0199c4e6 1588 arch_spin_unlock(&trace_cmdline_lock);
379cfdac
SRRH
1589
1590 return 1;
bc0c38d1
SR
1591}
1592
4c27e756 1593static void __trace_find_cmdline(int pid, char comm[])
bc0c38d1 1594{
bc0c38d1
SR
1595 unsigned map;
1596
4ca53085
SR
1597 if (!pid) {
1598 strcpy(comm, "<idle>");
1599 return;
1600 }
bc0c38d1 1601
74bf4076
SR
1602 if (WARN_ON_ONCE(pid < 0)) {
1603 strcpy(comm, "<XXX>");
1604 return;
1605 }
1606
4ca53085
SR
1607 if (pid > PID_MAX_DEFAULT) {
1608 strcpy(comm, "<...>");
1609 return;
1610 }
bc0c38d1 1611
939c7a4f 1612 map = savedcmd->map_pid_to_cmdline[pid];
50d88758 1613 if (map != NO_CMDLINE_MAP)
939c7a4f 1614 strcpy(comm, get_saved_cmdlines(map));
50d88758
TG
1615 else
1616 strcpy(comm, "<...>");
4c27e756
SRRH
1617}
1618
1619void trace_find_cmdline(int pid, char comm[])
1620{
1621 preempt_disable();
1622 arch_spin_lock(&trace_cmdline_lock);
1623
1624 __trace_find_cmdline(pid, comm);
bc0c38d1 1625
0199c4e6 1626 arch_spin_unlock(&trace_cmdline_lock);
5b6045a9 1627 preempt_enable();
bc0c38d1
SR
1628}
1629
e309b41d 1630void tracing_record_cmdline(struct task_struct *tsk)
bc0c38d1 1631{
0fb9656d 1632 if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
bc0c38d1
SR
1633 return;
1634
7ffbd48d
SR
1635 if (!__this_cpu_read(trace_cmdline_save))
1636 return;
1637
379cfdac
SRRH
1638 if (trace_save_cmdline(tsk))
1639 __this_cpu_write(trace_cmdline_save, false);
bc0c38d1
SR
1640}
1641
45dcd8b8 1642void
38697053
SR
1643tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1644 int pc)
bc0c38d1
SR
1645{
1646 struct task_struct *tsk = current;
bc0c38d1 1647
777e208d
SR
1648 entry->preempt_count = pc & 0xff;
1649 entry->pid = (tsk) ? tsk->pid : 0;
1650 entry->flags =
9244489a 1651#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
2e2ca155 1652 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
9244489a
SR
1653#else
1654 TRACE_FLAG_IRQS_NOSUPPORT |
1655#endif
bc0c38d1
SR
1656 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1657 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
e5137b50
PZ
1658 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
1659 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
bc0c38d1 1660}
f413cdb8 1661EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
bc0c38d1 1662
e77405ad
SR
1663struct ring_buffer_event *
1664trace_buffer_lock_reserve(struct ring_buffer *buffer,
1665 int type,
1666 unsigned long len,
1667 unsigned long flags, int pc)
51a763dd
ACM
1668{
1669 struct ring_buffer_event *event;
1670
e77405ad 1671 event = ring_buffer_lock_reserve(buffer, len);
51a763dd
ACM
1672 if (event != NULL) {
1673 struct trace_entry *ent = ring_buffer_event_data(event);
1674
1675 tracing_generic_entry_update(ent, flags, pc);
1676 ent->type = type;
1677 }
1678
1679 return event;
1680}
51a763dd 1681
7ffbd48d
SR
1682void
1683__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
1684{
1685 __this_cpu_write(trace_cmdline_save, true);
1686 ring_buffer_unlock_commit(buffer, event);
1687}
1688
b7f0c959
SRRH
1689void trace_buffer_unlock_commit(struct trace_array *tr,
1690 struct ring_buffer *buffer,
1691 struct ring_buffer_event *event,
1692 unsigned long flags, int pc)
51a763dd 1693{
7ffbd48d 1694 __buffer_unlock_commit(buffer, event);
51a763dd 1695
2d34f489 1696 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
e77405ad 1697 ftrace_trace_userstack(buffer, flags, pc);
07edf712 1698}
0d5c6e1c 1699EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
51a763dd 1700
2c4a33ab
SRRH
1701static struct ring_buffer *temp_buffer;
1702
ccb469a1
SR
1703struct ring_buffer_event *
1704trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
7f1d2f82 1705 struct trace_event_file *trace_file,
ccb469a1
SR
1706 int type, unsigned long len,
1707 unsigned long flags, int pc)
1708{
2c4a33ab
SRRH
1709 struct ring_buffer_event *entry;
1710
7f1d2f82 1711 *current_rb = trace_file->tr->trace_buffer.buffer;
2c4a33ab 1712 entry = trace_buffer_lock_reserve(*current_rb,
ccb469a1 1713 type, len, flags, pc);
2c4a33ab
SRRH
1714 /*
1715 * If tracing is off, but we have triggers enabled
1716 * we still need to look at the event data. Use the temp_buffer
1717 * to store the trace event for the tigger to use. It's recusive
1718 * safe and will not be recorded anywhere.
1719 */
5d6ad960 1720 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
2c4a33ab
SRRH
1721 *current_rb = temp_buffer;
1722 entry = trace_buffer_lock_reserve(*current_rb,
1723 type, len, flags, pc);
1724 }
1725 return entry;
ccb469a1
SR
1726}
1727EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
1728
ef5580d0 1729struct ring_buffer_event *
e77405ad
SR
1730trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1731 int type, unsigned long len,
ef5580d0
SR
1732 unsigned long flags, int pc)
1733{
12883efb 1734 *current_rb = global_trace.trace_buffer.buffer;
e77405ad 1735 return trace_buffer_lock_reserve(*current_rb,
ef5580d0
SR
1736 type, len, flags, pc);
1737}
94487d6d 1738EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
ef5580d0 1739
b7f0c959
SRRH
1740void trace_buffer_unlock_commit_regs(struct trace_array *tr,
1741 struct ring_buffer *buffer,
0d5c6e1c
SR
1742 struct ring_buffer_event *event,
1743 unsigned long flags, int pc,
1744 struct pt_regs *regs)
1fd8df2c 1745{
7ffbd48d 1746 __buffer_unlock_commit(buffer, event);
1fd8df2c 1747
2d34f489 1748 ftrace_trace_stack(tr, buffer, flags, 6, pc, regs);
1fd8df2c
MH
1749 ftrace_trace_userstack(buffer, flags, pc);
1750}
0d5c6e1c 1751EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
1fd8df2c 1752
e77405ad
SR
1753void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1754 struct ring_buffer_event *event)
77d9f465 1755{
e77405ad 1756 ring_buffer_discard_commit(buffer, event);
ef5580d0 1757}
12acd473 1758EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
ef5580d0 1759
e309b41d 1760void
7be42151 1761trace_function(struct trace_array *tr,
38697053
SR
1762 unsigned long ip, unsigned long parent_ip, unsigned long flags,
1763 int pc)
bc0c38d1 1764{
2425bcb9 1765 struct trace_event_call *call = &event_function;
12883efb 1766 struct ring_buffer *buffer = tr->trace_buffer.buffer;
3928a8a2 1767 struct ring_buffer_event *event;
777e208d 1768 struct ftrace_entry *entry;
bc0c38d1 1769
d769041f 1770 /* If we are reading the ring buffer, don't trace */
dd17c8f7 1771 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
d769041f
SR
1772 return;
1773
e77405ad 1774 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
51a763dd 1775 flags, pc);
3928a8a2
SR
1776 if (!event)
1777 return;
1778 entry = ring_buffer_event_data(event);
777e208d
SR
1779 entry->ip = ip;
1780 entry->parent_ip = parent_ip;
e1112b4d 1781
f306cc82 1782 if (!call_filter_check_discard(call, entry, buffer, event))
7ffbd48d 1783 __buffer_unlock_commit(buffer, event);
bc0c38d1
SR
1784}
1785
c0a0d0d3 1786#ifdef CONFIG_STACKTRACE
4a9bd3f1
SR
1787
1788#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1789struct ftrace_stack {
1790 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
1791};
1792
1793static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
1794static DEFINE_PER_CPU(int, ftrace_stack_reserve);
1795
e77405ad 1796static void __ftrace_trace_stack(struct ring_buffer *buffer,
53614991 1797 unsigned long flags,
1fd8df2c 1798 int skip, int pc, struct pt_regs *regs)
86387f7e 1799{
2425bcb9 1800 struct trace_event_call *call = &event_kernel_stack;
3928a8a2 1801 struct ring_buffer_event *event;
777e208d 1802 struct stack_entry *entry;
86387f7e 1803 struct stack_trace trace;
4a9bd3f1
SR
1804 int use_stack;
1805 int size = FTRACE_STACK_ENTRIES;
1806
1807 trace.nr_entries = 0;
1808 trace.skip = skip;
1809
1810 /*
1811 * Since events can happen in NMIs there's no safe way to
1812 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1813 * or NMI comes in, it will just have to use the default
1814 * FTRACE_STACK_SIZE.
1815 */
1816 preempt_disable_notrace();
1817
82146529 1818 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
4a9bd3f1
SR
1819 /*
1820 * We don't need any atomic variables, just a barrier.
1821 * If an interrupt comes in, we don't care, because it would
1822 * have exited and put the counter back to what we want.
1823 * We just need a barrier to keep gcc from moving things
1824 * around.
1825 */
1826 barrier();
1827 if (use_stack == 1) {
bdffd893 1828 trace.entries = this_cpu_ptr(ftrace_stack.calls);
4a9bd3f1
SR
1829 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
1830
1831 if (regs)
1832 save_stack_trace_regs(regs, &trace);
1833 else
1834 save_stack_trace(&trace);
1835
1836 if (trace.nr_entries > size)
1837 size = trace.nr_entries;
1838 } else
1839 /* From now on, use_stack is a boolean */
1840 use_stack = 0;
1841
1842 size *= sizeof(unsigned long);
86387f7e 1843
e77405ad 1844 event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
4a9bd3f1 1845 sizeof(*entry) + size, flags, pc);
3928a8a2 1846 if (!event)
4a9bd3f1
SR
1847 goto out;
1848 entry = ring_buffer_event_data(event);
86387f7e 1849
4a9bd3f1
SR
1850 memset(&entry->caller, 0, size);
1851
1852 if (use_stack)
1853 memcpy(&entry->caller, trace.entries,
1854 trace.nr_entries * sizeof(unsigned long));
1855 else {
1856 trace.max_entries = FTRACE_STACK_ENTRIES;
1857 trace.entries = entry->caller;
1858 if (regs)
1859 save_stack_trace_regs(regs, &trace);
1860 else
1861 save_stack_trace(&trace);
1862 }
1863
1864 entry->size = trace.nr_entries;
86387f7e 1865
f306cc82 1866 if (!call_filter_check_discard(call, entry, buffer, event))
7ffbd48d 1867 __buffer_unlock_commit(buffer, event);
4a9bd3f1
SR
1868
1869 out:
1870 /* Again, don't let gcc optimize things here */
1871 barrier();
82146529 1872 __this_cpu_dec(ftrace_stack_reserve);
4a9bd3f1
SR
1873 preempt_enable_notrace();
1874
f0a920d5
IM
1875}
1876
2d34f489
SRRH
1877static inline void ftrace_trace_stack(struct trace_array *tr,
1878 struct ring_buffer *buffer,
73dddbb5
SRRH
1879 unsigned long flags,
1880 int skip, int pc, struct pt_regs *regs)
53614991 1881{
2d34f489 1882 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
53614991
SR
1883 return;
1884
73dddbb5 1885 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
53614991
SR
1886}
1887
c0a0d0d3
FW
1888void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1889 int pc)
38697053 1890{
12883efb 1891 __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
38697053
SR
1892}
1893
03889384
SR
1894/**
1895 * trace_dump_stack - record a stack back trace in the trace buffer
c142be8e 1896 * @skip: Number of functions to skip (helper handlers)
03889384 1897 */
c142be8e 1898void trace_dump_stack(int skip)
03889384
SR
1899{
1900 unsigned long flags;
1901
1902 if (tracing_disabled || tracing_selftest_running)
e36c5458 1903 return;
03889384
SR
1904
1905 local_save_flags(flags);
1906
c142be8e
SRRH
1907 /*
1908 * Skip 3 more, seems to get us at the caller of
1909 * this function.
1910 */
1911 skip += 3;
1912 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
1913 flags, skip, preempt_count(), NULL);
03889384
SR
1914}
1915
91e86e56
SR
1916static DEFINE_PER_CPU(int, user_stack_count);
1917
e77405ad
SR
1918void
1919ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
02b67518 1920{
2425bcb9 1921 struct trace_event_call *call = &event_user_stack;
8d7c6a96 1922 struct ring_buffer_event *event;
02b67518
TE
1923 struct userstack_entry *entry;
1924 struct stack_trace trace;
02b67518 1925
983f938a 1926 if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
02b67518
TE
1927 return;
1928
b6345879
SR
1929 /*
1930 * NMIs can not handle page faults, even with fix ups.
1931 * The save user stack can (and often does) fault.
1932 */
1933 if (unlikely(in_nmi()))
1934 return;
02b67518 1935
91e86e56
SR
1936 /*
1937 * prevent recursion, since the user stack tracing may
1938 * trigger other kernel events.
1939 */
1940 preempt_disable();
1941 if (__this_cpu_read(user_stack_count))
1942 goto out;
1943
1944 __this_cpu_inc(user_stack_count);
1945
e77405ad 1946 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
51a763dd 1947 sizeof(*entry), flags, pc);
02b67518 1948 if (!event)
1dbd1951 1949 goto out_drop_count;
02b67518 1950 entry = ring_buffer_event_data(event);
02b67518 1951
48659d31 1952 entry->tgid = current->tgid;
02b67518
TE
1953 memset(&entry->caller, 0, sizeof(entry->caller));
1954
1955 trace.nr_entries = 0;
1956 trace.max_entries = FTRACE_STACK_ENTRIES;
1957 trace.skip = 0;
1958 trace.entries = entry->caller;
1959
1960 save_stack_trace_user(&trace);
f306cc82 1961 if (!call_filter_check_discard(call, entry, buffer, event))
7ffbd48d 1962 __buffer_unlock_commit(buffer, event);
91e86e56 1963
1dbd1951 1964 out_drop_count:
91e86e56 1965 __this_cpu_dec(user_stack_count);
91e86e56
SR
1966 out:
1967 preempt_enable();
02b67518
TE
1968}
1969
4fd27358
HE
1970#ifdef UNUSED
1971static void __trace_userstack(struct trace_array *tr, unsigned long flags)
02b67518 1972{
7be42151 1973 ftrace_trace_userstack(tr, flags, preempt_count());
02b67518 1974}
4fd27358 1975#endif /* UNUSED */
02b67518 1976
c0a0d0d3
FW
1977#endif /* CONFIG_STACKTRACE */
1978
07d777fe
SR
1979/* created for use with alloc_percpu */
1980struct trace_buffer_struct {
1981 char buffer[TRACE_BUF_SIZE];
1982};
1983
1984static struct trace_buffer_struct *trace_percpu_buffer;
1985static struct trace_buffer_struct *trace_percpu_sirq_buffer;
1986static struct trace_buffer_struct *trace_percpu_irq_buffer;
1987static struct trace_buffer_struct *trace_percpu_nmi_buffer;
1988
1989/*
1990 * The buffer used is dependent on the context. There is a per cpu
1991 * buffer for normal context, softirq contex, hard irq context and
1992 * for NMI context. Thise allows for lockless recording.
1993 *
1994 * Note, if the buffers failed to be allocated, then this returns NULL
1995 */
1996static char *get_trace_buf(void)
1997{
1998 struct trace_buffer_struct *percpu_buffer;
07d777fe
SR
1999
2000 /*
2001 * If we have allocated per cpu buffers, then we do not
2002 * need to do any locking.
2003 */
2004 if (in_nmi())
2005 percpu_buffer = trace_percpu_nmi_buffer;
2006 else if (in_irq())
2007 percpu_buffer = trace_percpu_irq_buffer;
2008 else if (in_softirq())
2009 percpu_buffer = trace_percpu_sirq_buffer;
2010 else
2011 percpu_buffer = trace_percpu_buffer;
2012
2013 if (!percpu_buffer)
2014 return NULL;
2015
d8a0349c 2016 return this_cpu_ptr(&percpu_buffer->buffer[0]);
07d777fe
SR
2017}
2018
2019static int alloc_percpu_trace_buffer(void)
2020{
2021 struct trace_buffer_struct *buffers;
2022 struct trace_buffer_struct *sirq_buffers;
2023 struct trace_buffer_struct *irq_buffers;
2024 struct trace_buffer_struct *nmi_buffers;
2025
2026 buffers = alloc_percpu(struct trace_buffer_struct);
2027 if (!buffers)
2028 goto err_warn;
2029
2030 sirq_buffers = alloc_percpu(struct trace_buffer_struct);
2031 if (!sirq_buffers)
2032 goto err_sirq;
2033
2034 irq_buffers = alloc_percpu(struct trace_buffer_struct);
2035 if (!irq_buffers)
2036 goto err_irq;
2037
2038 nmi_buffers = alloc_percpu(struct trace_buffer_struct);
2039 if (!nmi_buffers)
2040 goto err_nmi;
2041
2042 trace_percpu_buffer = buffers;
2043 trace_percpu_sirq_buffer = sirq_buffers;
2044 trace_percpu_irq_buffer = irq_buffers;
2045 trace_percpu_nmi_buffer = nmi_buffers;
2046
2047 return 0;
2048
2049 err_nmi:
2050 free_percpu(irq_buffers);
2051 err_irq:
2052 free_percpu(sirq_buffers);
2053 err_sirq:
2054 free_percpu(buffers);
2055 err_warn:
2056 WARN(1, "Could not allocate percpu trace_printk buffer");
2057 return -ENOMEM;
2058}
2059
81698831
SR
2060static int buffers_allocated;
2061
07d777fe
SR
2062void trace_printk_init_buffers(void)
2063{
07d777fe
SR
2064 if (buffers_allocated)
2065 return;
2066
2067 if (alloc_percpu_trace_buffer())
2068 return;
2069
2184db46
SR
2070 /* trace_printk() is for debug use only. Don't use it in production. */
2071
69a1c994
BP
2072 pr_warning("\n");
2073 pr_warning("**********************************************************\n");
2184db46
SR
2074 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2075 pr_warning("** **\n");
2076 pr_warning("** trace_printk() being used. Allocating extra memory. **\n");
2077 pr_warning("** **\n");
2078 pr_warning("** This means that this is a DEBUG kernel and it is **\n");
eff264ef 2079 pr_warning("** unsafe for production use. **\n");
2184db46
SR
2080 pr_warning("** **\n");
2081 pr_warning("** If you see this message and you are not debugging **\n");
2082 pr_warning("** the kernel, report this immediately to your vendor! **\n");
2083 pr_warning("** **\n");
2084 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2085 pr_warning("**********************************************************\n");
07d777fe 2086
b382ede6
SR
2087 /* Expand the buffers to set size */
2088 tracing_update_buffers();
2089
07d777fe 2090 buffers_allocated = 1;
81698831
SR
2091
2092 /*
2093 * trace_printk_init_buffers() can be called by modules.
2094 * If that happens, then we need to start cmdline recording
2095 * directly here. If the global_trace.buffer is already
2096 * allocated here, then this was called by module code.
2097 */
12883efb 2098 if (global_trace.trace_buffer.buffer)
81698831
SR
2099 tracing_start_cmdline_record();
2100}
2101
2102void trace_printk_start_comm(void)
2103{
2104 /* Start tracing comms if trace printk is set */
2105 if (!buffers_allocated)
2106 return;
2107 tracing_start_cmdline_record();
2108}
2109
2110static void trace_printk_start_stop_comm(int enabled)
2111{
2112 if (!buffers_allocated)
2113 return;
2114
2115 if (enabled)
2116 tracing_start_cmdline_record();
2117 else
2118 tracing_stop_cmdline_record();
07d777fe
SR
2119}
2120
769b0441 2121/**
48ead020 2122 * trace_vbprintk - write binary msg to tracing buffer
769b0441
FW
2123 *
2124 */
40ce74f1 2125int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
769b0441 2126{
2425bcb9 2127 struct trace_event_call *call = &event_bprint;
769b0441 2128 struct ring_buffer_event *event;
e77405ad 2129 struct ring_buffer *buffer;
769b0441 2130 struct trace_array *tr = &global_trace;
48ead020 2131 struct bprint_entry *entry;
769b0441 2132 unsigned long flags;
07d777fe
SR
2133 char *tbuffer;
2134 int len = 0, size, pc;
769b0441
FW
2135
2136 if (unlikely(tracing_selftest_running || tracing_disabled))
2137 return 0;
2138
2139 /* Don't pollute graph traces with trace_vprintk internals */
2140 pause_graph_tracing();
2141
2142 pc = preempt_count();
5168ae50 2143 preempt_disable_notrace();
769b0441 2144
07d777fe
SR
2145 tbuffer = get_trace_buf();
2146 if (!tbuffer) {
2147 len = 0;
769b0441 2148 goto out;
07d777fe 2149 }
769b0441 2150
07d777fe 2151 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
769b0441 2152
07d777fe
SR
2153 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
2154 goto out;
769b0441 2155
07d777fe 2156 local_save_flags(flags);
769b0441 2157 size = sizeof(*entry) + sizeof(u32) * len;
12883efb 2158 buffer = tr->trace_buffer.buffer;
e77405ad
SR
2159 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2160 flags, pc);
769b0441 2161 if (!event)
07d777fe 2162 goto out;
769b0441
FW
2163 entry = ring_buffer_event_data(event);
2164 entry->ip = ip;
769b0441
FW
2165 entry->fmt = fmt;
2166
07d777fe 2167 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
f306cc82 2168 if (!call_filter_check_discard(call, entry, buffer, event)) {
7ffbd48d 2169 __buffer_unlock_commit(buffer, event);
2d34f489 2170 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
d931369b 2171 }
769b0441 2172
769b0441 2173out:
5168ae50 2174 preempt_enable_notrace();
769b0441
FW
2175 unpause_graph_tracing();
2176
2177 return len;
2178}
48ead020
FW
2179EXPORT_SYMBOL_GPL(trace_vbprintk);
2180
12883efb
SRRH
2181static int
2182__trace_array_vprintk(struct ring_buffer *buffer,
2183 unsigned long ip, const char *fmt, va_list args)
48ead020 2184{
2425bcb9 2185 struct trace_event_call *call = &event_print;
48ead020 2186 struct ring_buffer_event *event;
07d777fe 2187 int len = 0, size, pc;
48ead020 2188 struct print_entry *entry;
07d777fe
SR
2189 unsigned long flags;
2190 char *tbuffer;
48ead020
FW
2191
2192 if (tracing_disabled || tracing_selftest_running)
2193 return 0;
2194
07d777fe
SR
2195 /* Don't pollute graph traces with trace_vprintk internals */
2196 pause_graph_tracing();
2197
48ead020
FW
2198 pc = preempt_count();
2199 preempt_disable_notrace();
48ead020 2200
07d777fe
SR
2201
2202 tbuffer = get_trace_buf();
2203 if (!tbuffer) {
2204 len = 0;
48ead020 2205 goto out;
07d777fe 2206 }
48ead020 2207
3558a5ac 2208 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
48ead020 2209
07d777fe 2210 local_save_flags(flags);
48ead020 2211 size = sizeof(*entry) + len + 1;
e77405ad 2212 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
07d777fe 2213 flags, pc);
48ead020 2214 if (!event)
07d777fe 2215 goto out;
48ead020 2216 entry = ring_buffer_event_data(event);
c13d2f7c 2217 entry->ip = ip;
48ead020 2218
3558a5ac 2219 memcpy(&entry->buf, tbuffer, len + 1);
f306cc82 2220 if (!call_filter_check_discard(call, entry, buffer, event)) {
7ffbd48d 2221 __buffer_unlock_commit(buffer, event);
2d34f489 2222 ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
d931369b 2223 }
48ead020
FW
2224 out:
2225 preempt_enable_notrace();
07d777fe 2226 unpause_graph_tracing();
48ead020
FW
2227
2228 return len;
2229}
659372d3 2230
12883efb
SRRH
2231int trace_array_vprintk(struct trace_array *tr,
2232 unsigned long ip, const char *fmt, va_list args)
2233{
2234 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2235}
2236
2237int trace_array_printk(struct trace_array *tr,
2238 unsigned long ip, const char *fmt, ...)
2239{
2240 int ret;
2241 va_list ap;
2242
983f938a 2243 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
12883efb
SRRH
2244 return 0;
2245
2246 va_start(ap, fmt);
2247 ret = trace_array_vprintk(tr, ip, fmt, ap);
2248 va_end(ap);
2249 return ret;
2250}
2251
2252int trace_array_printk_buf(struct ring_buffer *buffer,
2253 unsigned long ip, const char *fmt, ...)
2254{
2255 int ret;
2256 va_list ap;
2257
983f938a 2258 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
12883efb
SRRH
2259 return 0;
2260
2261 va_start(ap, fmt);
2262 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2263 va_end(ap);
2264 return ret;
2265}
2266
659372d3
SR
2267int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2268{
a813a159 2269 return trace_array_vprintk(&global_trace, ip, fmt, args);
659372d3 2270}
769b0441
FW
2271EXPORT_SYMBOL_GPL(trace_vprintk);
2272
e2ac8ef5 2273static void trace_iterator_increment(struct trace_iterator *iter)
5a90f577 2274{
6d158a81
SR
2275 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2276
5a90f577 2277 iter->idx++;
6d158a81
SR
2278 if (buf_iter)
2279 ring_buffer_read(buf_iter, NULL);
5a90f577
SR
2280}
2281
e309b41d 2282static struct trace_entry *
bc21b478
SR
2283peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2284 unsigned long *lost_events)
dd0e545f 2285{
3928a8a2 2286 struct ring_buffer_event *event;
6d158a81 2287 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
dd0e545f 2288
d769041f
SR
2289 if (buf_iter)
2290 event = ring_buffer_iter_peek(buf_iter, ts);
2291 else
12883efb 2292 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
bc21b478 2293 lost_events);
d769041f 2294
4a9bd3f1
SR
2295 if (event) {
2296 iter->ent_size = ring_buffer_event_length(event);
2297 return ring_buffer_event_data(event);
2298 }
2299 iter->ent_size = 0;
2300 return NULL;
dd0e545f 2301}
d769041f 2302
dd0e545f 2303static struct trace_entry *
bc21b478
SR
2304__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2305 unsigned long *missing_events, u64 *ent_ts)
bc0c38d1 2306{
12883efb 2307 struct ring_buffer *buffer = iter->trace_buffer->buffer;
bc0c38d1 2308 struct trace_entry *ent, *next = NULL;
aa27497c 2309 unsigned long lost_events = 0, next_lost = 0;
b04cc6b1 2310 int cpu_file = iter->cpu_file;
3928a8a2 2311 u64 next_ts = 0, ts;
bc0c38d1 2312 int next_cpu = -1;
12b5da34 2313 int next_size = 0;
bc0c38d1
SR
2314 int cpu;
2315
b04cc6b1
FW
2316 /*
2317 * If we are in a per_cpu trace file, don't bother by iterating over
2318 * all cpu and peek directly.
2319 */
ae3b5093 2320 if (cpu_file > RING_BUFFER_ALL_CPUS) {
b04cc6b1
FW
2321 if (ring_buffer_empty_cpu(buffer, cpu_file))
2322 return NULL;
bc21b478 2323 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
b04cc6b1
FW
2324 if (ent_cpu)
2325 *ent_cpu = cpu_file;
2326
2327 return ent;
2328 }
2329
ab46428c 2330 for_each_tracing_cpu(cpu) {
dd0e545f 2331
3928a8a2
SR
2332 if (ring_buffer_empty_cpu(buffer, cpu))
2333 continue;
dd0e545f 2334
bc21b478 2335 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
dd0e545f 2336
cdd31cd2
IM
2337 /*
2338 * Pick the entry with the smallest timestamp:
2339 */
3928a8a2 2340 if (ent && (!next || ts < next_ts)) {
bc0c38d1
SR
2341 next = ent;
2342 next_cpu = cpu;
3928a8a2 2343 next_ts = ts;
bc21b478 2344 next_lost = lost_events;
12b5da34 2345 next_size = iter->ent_size;
bc0c38d1
SR
2346 }
2347 }
2348
12b5da34
SR
2349 iter->ent_size = next_size;
2350
bc0c38d1
SR
2351 if (ent_cpu)
2352 *ent_cpu = next_cpu;
2353
3928a8a2
SR
2354 if (ent_ts)
2355 *ent_ts = next_ts;
2356
bc21b478
SR
2357 if (missing_events)
2358 *missing_events = next_lost;
2359
bc0c38d1
SR
2360 return next;
2361}
2362
dd0e545f 2363/* Find the next real entry, without updating the iterator itself */
c4a8e8be
FW
2364struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
2365 int *ent_cpu, u64 *ent_ts)
bc0c38d1 2366{
bc21b478 2367 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
dd0e545f
SR
2368}
2369
2370/* Find the next real entry, and increment the iterator to the next entry */
955b61e5 2371void *trace_find_next_entry_inc(struct trace_iterator *iter)
dd0e545f 2372{
bc21b478
SR
2373 iter->ent = __find_next_entry(iter, &iter->cpu,
2374 &iter->lost_events, &iter->ts);
dd0e545f 2375
3928a8a2 2376 if (iter->ent)
e2ac8ef5 2377 trace_iterator_increment(iter);
dd0e545f 2378
3928a8a2 2379 return iter->ent ? iter : NULL;
b3806b43 2380}
bc0c38d1 2381
e309b41d 2382static void trace_consume(struct trace_iterator *iter)
b3806b43 2383{
12883efb 2384 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
bc21b478 2385 &iter->lost_events);
bc0c38d1
SR
2386}
2387
e309b41d 2388static void *s_next(struct seq_file *m, void *v, loff_t *pos)
bc0c38d1
SR
2389{
2390 struct trace_iterator *iter = m->private;
bc0c38d1 2391 int i = (int)*pos;
4e3c3333 2392 void *ent;
bc0c38d1 2393
a63ce5b3
SR
2394 WARN_ON_ONCE(iter->leftover);
2395
bc0c38d1
SR
2396 (*pos)++;
2397
2398 /* can't go backwards */
2399 if (iter->idx > i)
2400 return NULL;
2401
2402 if (iter->idx < 0)
955b61e5 2403 ent = trace_find_next_entry_inc(iter);
bc0c38d1
SR
2404 else
2405 ent = iter;
2406
2407 while (ent && iter->idx < i)
955b61e5 2408 ent = trace_find_next_entry_inc(iter);
bc0c38d1
SR
2409
2410 iter->pos = *pos;
2411
bc0c38d1
SR
2412 return ent;
2413}
2414
955b61e5 2415void tracing_iter_reset(struct trace_iterator *iter, int cpu)
2f26ebd5 2416{
2f26ebd5
SR
2417 struct ring_buffer_event *event;
2418 struct ring_buffer_iter *buf_iter;
2419 unsigned long entries = 0;
2420 u64 ts;
2421
12883efb 2422 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
2f26ebd5 2423
6d158a81
SR
2424 buf_iter = trace_buffer_iter(iter, cpu);
2425 if (!buf_iter)
2f26ebd5
SR
2426 return;
2427
2f26ebd5
SR
2428 ring_buffer_iter_reset(buf_iter);
2429
2430 /*
2431 * We could have the case with the max latency tracers
2432 * that a reset never took place on a cpu. This is evident
2433 * by the timestamp being before the start of the buffer.
2434 */
2435 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
12883efb 2436 if (ts >= iter->trace_buffer->time_start)
2f26ebd5
SR
2437 break;
2438 entries++;
2439 ring_buffer_read(buf_iter, NULL);
2440 }
2441
12883efb 2442 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
2f26ebd5
SR
2443}
2444
d7350c3f 2445/*
d7350c3f
FW
2446 * The current tracer is copied to avoid a global locking
2447 * all around.
2448 */
bc0c38d1
SR
2449static void *s_start(struct seq_file *m, loff_t *pos)
2450{
2451 struct trace_iterator *iter = m->private;
2b6080f2 2452 struct trace_array *tr = iter->tr;
b04cc6b1 2453 int cpu_file = iter->cpu_file;
bc0c38d1
SR
2454 void *p = NULL;
2455 loff_t l = 0;
3928a8a2 2456 int cpu;
bc0c38d1 2457
2fd196ec
HT
2458 /*
2459 * copy the tracer to avoid using a global lock all around.
2460 * iter->trace is a copy of current_trace, the pointer to the
2461 * name may be used instead of a strcmp(), as iter->trace->name
2462 * will point to the same string as current_trace->name.
2463 */
bc0c38d1 2464 mutex_lock(&trace_types_lock);
2b6080f2
SR
2465 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
2466 *iter->trace = *tr->current_trace;
d7350c3f 2467 mutex_unlock(&trace_types_lock);
bc0c38d1 2468
12883efb 2469#ifdef CONFIG_TRACER_MAX_TRACE
debdd57f
HT
2470 if (iter->snapshot && iter->trace->use_max_tr)
2471 return ERR_PTR(-EBUSY);
12883efb 2472#endif
debdd57f
HT
2473
2474 if (!iter->snapshot)
2475 atomic_inc(&trace_record_cmdline_disabled);
bc0c38d1 2476
bc0c38d1
SR
2477 if (*pos != iter->pos) {
2478 iter->ent = NULL;
2479 iter->cpu = 0;
2480 iter->idx = -1;
2481
ae3b5093 2482 if (cpu_file == RING_BUFFER_ALL_CPUS) {
b04cc6b1 2483 for_each_tracing_cpu(cpu)
2f26ebd5 2484 tracing_iter_reset(iter, cpu);
b04cc6b1 2485 } else
2f26ebd5 2486 tracing_iter_reset(iter, cpu_file);
bc0c38d1 2487
ac91d854 2488 iter->leftover = 0;
bc0c38d1
SR
2489 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
2490 ;
2491
2492 } else {
a63ce5b3
SR
2493 /*
2494 * If we overflowed the seq_file before, then we want
2495 * to just reuse the trace_seq buffer again.
2496 */
2497 if (iter->leftover)
2498 p = iter;
2499 else {
2500 l = *pos - 1;
2501 p = s_next(m, p, &l);
2502 }
bc0c38d1
SR
2503 }
2504
4f535968 2505 trace_event_read_lock();
7e53bd42 2506 trace_access_lock(cpu_file);
bc0c38d1
SR
2507 return p;
2508}
2509
2510static void s_stop(struct seq_file *m, void *p)
2511{
7e53bd42
LJ
2512 struct trace_iterator *iter = m->private;
2513
12883efb 2514#ifdef CONFIG_TRACER_MAX_TRACE
debdd57f
HT
2515 if (iter->snapshot && iter->trace->use_max_tr)
2516 return;
12883efb 2517#endif
debdd57f
HT
2518
2519 if (!iter->snapshot)
2520 atomic_dec(&trace_record_cmdline_disabled);
12883efb 2521
7e53bd42 2522 trace_access_unlock(iter->cpu_file);
4f535968 2523 trace_event_read_unlock();
bc0c38d1
SR
2524}
2525
39eaf7ef 2526static void
12883efb
SRRH
2527get_total_entries(struct trace_buffer *buf,
2528 unsigned long *total, unsigned long *entries)
39eaf7ef
SR
2529{
2530 unsigned long count;
2531 int cpu;
2532
2533 *total = 0;
2534 *entries = 0;
2535
2536 for_each_tracing_cpu(cpu) {
12883efb 2537 count = ring_buffer_entries_cpu(buf->buffer, cpu);
39eaf7ef
SR
2538 /*
2539 * If this buffer has skipped entries, then we hold all
2540 * entries for the trace and we need to ignore the
2541 * ones before the time stamp.
2542 */
12883efb
SRRH
2543 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
2544 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
39eaf7ef
SR
2545 /* total is the same as the entries */
2546 *total += count;
2547 } else
2548 *total += count +
12883efb 2549 ring_buffer_overrun_cpu(buf->buffer, cpu);
39eaf7ef
SR
2550 *entries += count;
2551 }
2552}
2553
e309b41d 2554static void print_lat_help_header(struct seq_file *m)
bc0c38d1 2555{
d79ac28f
RV
2556 seq_puts(m, "# _------=> CPU# \n"
2557 "# / _-----=> irqs-off \n"
2558 "# | / _----=> need-resched \n"
2559 "# || / _---=> hardirq/softirq \n"
2560 "# ||| / _--=> preempt-depth \n"
2561 "# |||| / delay \n"
2562 "# cmd pid ||||| time | caller \n"
2563 "# \\ / ||||| \\ | / \n");
bc0c38d1
SR
2564}
2565
12883efb 2566static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
bc0c38d1 2567{
39eaf7ef
SR
2568 unsigned long total;
2569 unsigned long entries;
2570
12883efb 2571 get_total_entries(buf, &total, &entries);
39eaf7ef
SR
2572 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
2573 entries, total, num_online_cpus());
2574 seq_puts(m, "#\n");
2575}
2576
12883efb 2577static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
39eaf7ef 2578{
12883efb 2579 print_event_info(buf, m);
d79ac28f
RV
2580 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"
2581 "# | | | | |\n");
bc0c38d1
SR
2582}
2583
12883efb 2584static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
77271ce4 2585{
12883efb 2586 print_event_info(buf, m);
d79ac28f
RV
2587 seq_puts(m, "# _-----=> irqs-off\n"
2588 "# / _----=> need-resched\n"
2589 "# | / _---=> hardirq/softirq\n"
2590 "# || / _--=> preempt-depth\n"
2591 "# ||| / delay\n"
2592 "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n"
2593 "# | | | |||| | |\n");
77271ce4 2594}
bc0c38d1 2595
62b915f1 2596void
bc0c38d1
SR
2597print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2598{
983f938a 2599 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
12883efb
SRRH
2600 struct trace_buffer *buf = iter->trace_buffer;
2601 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
2b6080f2 2602 struct tracer *type = iter->trace;
39eaf7ef
SR
2603 unsigned long entries;
2604 unsigned long total;
bc0c38d1
SR
2605 const char *name = "preemption";
2606
d840f718 2607 name = type->name;
bc0c38d1 2608
12883efb 2609 get_total_entries(buf, &total, &entries);
bc0c38d1 2610
888b55dc 2611 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
bc0c38d1 2612 name, UTS_RELEASE);
888b55dc 2613 seq_puts(m, "# -----------------------------------"
bc0c38d1 2614 "---------------------------------\n");
888b55dc 2615 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
bc0c38d1 2616 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
57f50be1 2617 nsecs_to_usecs(data->saved_latency),
bc0c38d1 2618 entries,
4c11d7ae 2619 total,
12883efb 2620 buf->cpu,
bc0c38d1
SR
2621#if defined(CONFIG_PREEMPT_NONE)
2622 "server",
2623#elif defined(CONFIG_PREEMPT_VOLUNTARY)
2624 "desktop",
b5c21b45 2625#elif defined(CONFIG_PREEMPT)
bc0c38d1
SR
2626 "preempt",
2627#else
2628 "unknown",
2629#endif
2630 /* These are reserved for later use */
2631 0, 0, 0, 0);
2632#ifdef CONFIG_SMP
2633 seq_printf(m, " #P:%d)\n", num_online_cpus());
2634#else
2635 seq_puts(m, ")\n");
2636#endif
888b55dc
KM
2637 seq_puts(m, "# -----------------\n");
2638 seq_printf(m, "# | task: %.16s-%d "
bc0c38d1 2639 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
d20b92ab
EB
2640 data->comm, data->pid,
2641 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
bc0c38d1 2642 data->policy, data->rt_priority);
888b55dc 2643 seq_puts(m, "# -----------------\n");
bc0c38d1
SR
2644
2645 if (data->critical_start) {
888b55dc 2646 seq_puts(m, "# => started at: ");
214023c3
SR
2647 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
2648 trace_print_seq(m, &iter->seq);
888b55dc 2649 seq_puts(m, "\n# => ended at: ");
214023c3
SR
2650 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
2651 trace_print_seq(m, &iter->seq);
8248ac05 2652 seq_puts(m, "\n#\n");
bc0c38d1
SR
2653 }
2654
888b55dc 2655 seq_puts(m, "#\n");
bc0c38d1
SR
2656}
2657
a309720c
SR
2658static void test_cpu_buff_start(struct trace_iterator *iter)
2659{
2660 struct trace_seq *s = &iter->seq;
983f938a 2661 struct trace_array *tr = iter->tr;
a309720c 2662
983f938a 2663 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
12ef7d44
SR
2664 return;
2665
2666 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
2667 return;
2668
4462344e 2669 if (cpumask_test_cpu(iter->cpu, iter->started))
a309720c
SR
2670 return;
2671
12883efb 2672 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
2f26ebd5
SR
2673 return;
2674
4462344e 2675 cpumask_set_cpu(iter->cpu, iter->started);
b0dfa978
FW
2676
2677 /* Don't print started cpu buffer for the first entry of the trace */
2678 if (iter->idx > 1)
2679 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
2680 iter->cpu);
a309720c
SR
2681}
2682
2c4f035f 2683static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
bc0c38d1 2684{
983f938a 2685 struct trace_array *tr = iter->tr;
214023c3 2686 struct trace_seq *s = &iter->seq;
983f938a 2687 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
4e3c3333 2688 struct trace_entry *entry;
f633cef0 2689 struct trace_event *event;
bc0c38d1 2690
4e3c3333 2691 entry = iter->ent;
dd0e545f 2692
a309720c
SR
2693 test_cpu_buff_start(iter);
2694
c4a8e8be 2695 event = ftrace_find_event(entry->type);
bc0c38d1 2696
983f938a 2697 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
19a7fe20
SRRH
2698 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2699 trace_print_lat_context(iter);
2700 else
2701 trace_print_context(iter);
c4a8e8be 2702 }
bc0c38d1 2703
19a7fe20
SRRH
2704 if (trace_seq_has_overflowed(s))
2705 return TRACE_TYPE_PARTIAL_LINE;
2706
268ccda0 2707 if (event)
a9a57763 2708 return event->funcs->trace(iter, sym_flags, event);
d9793bd8 2709
19a7fe20 2710 trace_seq_printf(s, "Unknown type %d\n", entry->type);
02b67518 2711
19a7fe20 2712 return trace_handle_return(s);
bc0c38d1
SR
2713}
2714
2c4f035f 2715static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
f9896bf3 2716{
983f938a 2717 struct trace_array *tr = iter->tr;
f9896bf3
IM
2718 struct trace_seq *s = &iter->seq;
2719 struct trace_entry *entry;
f633cef0 2720 struct trace_event *event;
f9896bf3
IM
2721
2722 entry = iter->ent;
dd0e545f 2723
983f938a 2724 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
19a7fe20
SRRH
2725 trace_seq_printf(s, "%d %d %llu ",
2726 entry->pid, iter->cpu, iter->ts);
2727
2728 if (trace_seq_has_overflowed(s))
2729 return TRACE_TYPE_PARTIAL_LINE;
f9896bf3 2730
f633cef0 2731 event = ftrace_find_event(entry->type);
268ccda0 2732 if (event)
a9a57763 2733 return event->funcs->raw(iter, 0, event);
d9793bd8 2734
19a7fe20 2735 trace_seq_printf(s, "%d ?\n", entry->type);
777e208d 2736
19a7fe20 2737 return trace_handle_return(s);
f9896bf3
IM
2738}
2739
2c4f035f 2740static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
5e3ca0ec 2741{
983f938a 2742 struct trace_array *tr = iter->tr;
5e3ca0ec
IM
2743 struct trace_seq *s = &iter->seq;
2744 unsigned char newline = '\n';
2745 struct trace_entry *entry;
f633cef0 2746 struct trace_event *event;
5e3ca0ec
IM
2747
2748 entry = iter->ent;
dd0e545f 2749
983f938a 2750 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
19a7fe20
SRRH
2751 SEQ_PUT_HEX_FIELD(s, entry->pid);
2752 SEQ_PUT_HEX_FIELD(s, iter->cpu);
2753 SEQ_PUT_HEX_FIELD(s, iter->ts);
2754 if (trace_seq_has_overflowed(s))
2755 return TRACE_TYPE_PARTIAL_LINE;
c4a8e8be 2756 }
5e3ca0ec 2757
f633cef0 2758 event = ftrace_find_event(entry->type);
268ccda0 2759 if (event) {
a9a57763 2760 enum print_line_t ret = event->funcs->hex(iter, 0, event);
d9793bd8
ACM
2761 if (ret != TRACE_TYPE_HANDLED)
2762 return ret;
2763 }
7104f300 2764
19a7fe20 2765 SEQ_PUT_FIELD(s, newline);
5e3ca0ec 2766
19a7fe20 2767 return trace_handle_return(s);
5e3ca0ec
IM
2768}
2769
2c4f035f 2770static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
cb0f12aa 2771{
983f938a 2772 struct trace_array *tr = iter->tr;
cb0f12aa
IM
2773 struct trace_seq *s = &iter->seq;
2774 struct trace_entry *entry;
f633cef0 2775 struct trace_event *event;
cb0f12aa
IM
2776
2777 entry = iter->ent;
dd0e545f 2778
983f938a 2779 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
19a7fe20
SRRH
2780 SEQ_PUT_FIELD(s, entry->pid);
2781 SEQ_PUT_FIELD(s, iter->cpu);
2782 SEQ_PUT_FIELD(s, iter->ts);
2783 if (trace_seq_has_overflowed(s))
2784 return TRACE_TYPE_PARTIAL_LINE;
c4a8e8be 2785 }
cb0f12aa 2786
f633cef0 2787 event = ftrace_find_event(entry->type);
a9a57763
SR
2788 return event ? event->funcs->binary(iter, 0, event) :
2789 TRACE_TYPE_HANDLED;
cb0f12aa
IM
2790}
2791
62b915f1 2792int trace_empty(struct trace_iterator *iter)
bc0c38d1 2793{
6d158a81 2794 struct ring_buffer_iter *buf_iter;
bc0c38d1
SR
2795 int cpu;
2796
9aba60fe 2797 /* If we are looking at one CPU buffer, only check that one */
ae3b5093 2798 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
9aba60fe 2799 cpu = iter->cpu_file;
6d158a81
SR
2800 buf_iter = trace_buffer_iter(iter, cpu);
2801 if (buf_iter) {
2802 if (!ring_buffer_iter_empty(buf_iter))
9aba60fe
SR
2803 return 0;
2804 } else {
12883efb 2805 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
9aba60fe
SR
2806 return 0;
2807 }
2808 return 1;
2809 }
2810
ab46428c 2811 for_each_tracing_cpu(cpu) {
6d158a81
SR
2812 buf_iter = trace_buffer_iter(iter, cpu);
2813 if (buf_iter) {
2814 if (!ring_buffer_iter_empty(buf_iter))
d769041f
SR
2815 return 0;
2816 } else {
12883efb 2817 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
d769041f
SR
2818 return 0;
2819 }
bc0c38d1 2820 }
d769041f 2821
797d3712 2822 return 1;
bc0c38d1
SR
2823}
2824
4f535968 2825/* Called with trace_event_read_lock() held. */
955b61e5 2826enum print_line_t print_trace_line(struct trace_iterator *iter)
f9896bf3 2827{
983f938a
SRRH
2828 struct trace_array *tr = iter->tr;
2829 unsigned long trace_flags = tr->trace_flags;
2c4f035f
FW
2830 enum print_line_t ret;
2831
19a7fe20
SRRH
2832 if (iter->lost_events) {
2833 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2834 iter->cpu, iter->lost_events);
2835 if (trace_seq_has_overflowed(&iter->seq))
2836 return TRACE_TYPE_PARTIAL_LINE;
2837 }
bc21b478 2838
2c4f035f
FW
2839 if (iter->trace && iter->trace->print_line) {
2840 ret = iter->trace->print_line(iter);
2841 if (ret != TRACE_TYPE_UNHANDLED)
2842 return ret;
2843 }
72829bc3 2844
09ae7234
SRRH
2845 if (iter->ent->type == TRACE_BPUTS &&
2846 trace_flags & TRACE_ITER_PRINTK &&
2847 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2848 return trace_print_bputs_msg_only(iter);
2849
48ead020
FW
2850 if (iter->ent->type == TRACE_BPRINT &&
2851 trace_flags & TRACE_ITER_PRINTK &&
2852 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
5ef841f6 2853 return trace_print_bprintk_msg_only(iter);
48ead020 2854
66896a85
FW
2855 if (iter->ent->type == TRACE_PRINT &&
2856 trace_flags & TRACE_ITER_PRINTK &&
2857 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
5ef841f6 2858 return trace_print_printk_msg_only(iter);
66896a85 2859
cb0f12aa
IM
2860 if (trace_flags & TRACE_ITER_BIN)
2861 return print_bin_fmt(iter);
2862
5e3ca0ec
IM
2863 if (trace_flags & TRACE_ITER_HEX)
2864 return print_hex_fmt(iter);
2865
f9896bf3
IM
2866 if (trace_flags & TRACE_ITER_RAW)
2867 return print_raw_fmt(iter);
2868
f9896bf3
IM
2869 return print_trace_fmt(iter);
2870}
2871
7e9a49ef
JO
2872void trace_latency_header(struct seq_file *m)
2873{
2874 struct trace_iterator *iter = m->private;
983f938a 2875 struct trace_array *tr = iter->tr;
7e9a49ef
JO
2876
2877 /* print nothing if the buffers are empty */
2878 if (trace_empty(iter))
2879 return;
2880
2881 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2882 print_trace_header(m, iter);
2883
983f938a 2884 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
7e9a49ef
JO
2885 print_lat_help_header(m);
2886}
2887
62b915f1
JO
2888void trace_default_header(struct seq_file *m)
2889{
2890 struct trace_iterator *iter = m->private;
983f938a
SRRH
2891 struct trace_array *tr = iter->tr;
2892 unsigned long trace_flags = tr->trace_flags;
62b915f1 2893
f56e7f8e
JO
2894 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
2895 return;
2896
62b915f1
JO
2897 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2898 /* print nothing if the buffers are empty */
2899 if (trace_empty(iter))
2900 return;
2901 print_trace_header(m, iter);
2902 if (!(trace_flags & TRACE_ITER_VERBOSE))
2903 print_lat_help_header(m);
2904 } else {
77271ce4
SR
2905 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
2906 if (trace_flags & TRACE_ITER_IRQ_INFO)
12883efb 2907 print_func_help_header_irq(iter->trace_buffer, m);
77271ce4 2908 else
12883efb 2909 print_func_help_header(iter->trace_buffer, m);
77271ce4 2910 }
62b915f1
JO
2911 }
2912}
2913
e0a413f6
SR
2914static void test_ftrace_alive(struct seq_file *m)
2915{
2916 if (!ftrace_is_dead())
2917 return;
d79ac28f
RV
2918 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
2919 "# MAY BE MISSING FUNCTION EVENTS\n");
e0a413f6
SR
2920}
2921
d8741e2e 2922#ifdef CONFIG_TRACER_MAX_TRACE
f1affcaa 2923static void show_snapshot_main_help(struct seq_file *m)
d8741e2e 2924{
d79ac28f
RV
2925 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
2926 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2927 "# Takes a snapshot of the main buffer.\n"
2928 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
2929 "# (Doesn't have to be '2' works with any number that\n"
2930 "# is not a '0' or '1')\n");
d8741e2e 2931}
f1affcaa
SRRH
2932
2933static void show_snapshot_percpu_help(struct seq_file *m)
2934{
fa6f0cc7 2935 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
f1affcaa 2936#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
d79ac28f
RV
2937 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2938 "# Takes a snapshot of the main buffer for this cpu.\n");
f1affcaa 2939#else
d79ac28f
RV
2940 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
2941 "# Must use main snapshot file to allocate.\n");
f1affcaa 2942#endif
d79ac28f
RV
2943 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
2944 "# (Doesn't have to be '2' works with any number that\n"
2945 "# is not a '0' or '1')\n");
f1affcaa
SRRH
2946}
2947
d8741e2e
SRRH
2948static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
2949{
45ad21ca 2950 if (iter->tr->allocated_snapshot)
fa6f0cc7 2951 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
d8741e2e 2952 else
fa6f0cc7 2953 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
d8741e2e 2954
fa6f0cc7 2955 seq_puts(m, "# Snapshot commands:\n");
f1affcaa
SRRH
2956 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
2957 show_snapshot_main_help(m);
2958 else
2959 show_snapshot_percpu_help(m);
d8741e2e
SRRH
2960}
2961#else
2962/* Should never be called */
2963static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
2964#endif
2965
bc0c38d1
SR
2966static int s_show(struct seq_file *m, void *v)
2967{
2968 struct trace_iterator *iter = v;
a63ce5b3 2969 int ret;
bc0c38d1
SR
2970
2971 if (iter->ent == NULL) {
2972 if (iter->tr) {
2973 seq_printf(m, "# tracer: %s\n", iter->trace->name);
2974 seq_puts(m, "#\n");
e0a413f6 2975 test_ftrace_alive(m);
bc0c38d1 2976 }
d8741e2e
SRRH
2977 if (iter->snapshot && trace_empty(iter))
2978 print_snapshot_help(m, iter);
2979 else if (iter->trace && iter->trace->print_header)
8bba1bf5 2980 iter->trace->print_header(m);
62b915f1
JO
2981 else
2982 trace_default_header(m);
2983
a63ce5b3
SR
2984 } else if (iter->leftover) {
2985 /*
2986 * If we filled the seq_file buffer earlier, we
2987 * want to just show it now.
2988 */
2989 ret = trace_print_seq(m, &iter->seq);
2990
2991 /* ret should this time be zero, but you never know */
2992 iter->leftover = ret;
2993
bc0c38d1 2994 } else {
f9896bf3 2995 print_trace_line(iter);
a63ce5b3
SR
2996 ret = trace_print_seq(m, &iter->seq);
2997 /*
2998 * If we overflow the seq_file buffer, then it will
2999 * ask us for this data again at start up.
3000 * Use that instead.
3001 * ret is 0 if seq_file write succeeded.
3002 * -1 otherwise.
3003 */
3004 iter->leftover = ret;
bc0c38d1
SR
3005 }
3006
3007 return 0;
3008}
3009
649e9c70
ON
3010/*
3011 * Should be used after trace_array_get(), trace_types_lock
3012 * ensures that i_cdev was already initialized.
3013 */
3014static inline int tracing_get_cpu(struct inode *inode)
3015{
3016 if (inode->i_cdev) /* See trace_create_cpu_file() */
3017 return (long)inode->i_cdev - 1;
3018 return RING_BUFFER_ALL_CPUS;
3019}
3020
88e9d34c 3021static const struct seq_operations tracer_seq_ops = {
4bf39a94
IM
3022 .start = s_start,
3023 .next = s_next,
3024 .stop = s_stop,
3025 .show = s_show,
bc0c38d1
SR
3026};
3027
e309b41d 3028static struct trace_iterator *
6484c71c 3029__tracing_open(struct inode *inode, struct file *file, bool snapshot)
bc0c38d1 3030{
6484c71c 3031 struct trace_array *tr = inode->i_private;
bc0c38d1 3032 struct trace_iterator *iter;
50e18b94 3033 int cpu;
bc0c38d1 3034
85a2f9b4
SR
3035 if (tracing_disabled)
3036 return ERR_PTR(-ENODEV);
60a11774 3037
50e18b94 3038 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
85a2f9b4
SR
3039 if (!iter)
3040 return ERR_PTR(-ENOMEM);
bc0c38d1 3041
72917235 3042 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
6d158a81 3043 GFP_KERNEL);
93574fcc
DC
3044 if (!iter->buffer_iter)
3045 goto release;
3046
d7350c3f
FW
3047 /*
3048 * We make a copy of the current tracer to avoid concurrent
3049 * changes on it while we are reading.
3050 */
bc0c38d1 3051 mutex_lock(&trace_types_lock);
d7350c3f 3052 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
85a2f9b4 3053 if (!iter->trace)
d7350c3f 3054 goto fail;
85a2f9b4 3055
2b6080f2 3056 *iter->trace = *tr->current_trace;
d7350c3f 3057
79f55997 3058 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
b0dfa978
FW
3059 goto fail;
3060
12883efb
SRRH
3061 iter->tr = tr;
3062
3063#ifdef CONFIG_TRACER_MAX_TRACE
2b6080f2
SR
3064 /* Currently only the top directory has a snapshot */
3065 if (tr->current_trace->print_max || snapshot)
12883efb 3066 iter->trace_buffer = &tr->max_buffer;
bc0c38d1 3067 else
12883efb
SRRH
3068#endif
3069 iter->trace_buffer = &tr->trace_buffer;
debdd57f 3070 iter->snapshot = snapshot;
bc0c38d1 3071 iter->pos = -1;
6484c71c 3072 iter->cpu_file = tracing_get_cpu(inode);
d7350c3f 3073 mutex_init(&iter->mutex);
bc0c38d1 3074
8bba1bf5
MM
3075 /* Notify the tracer early; before we stop tracing. */
3076 if (iter->trace && iter->trace->open)
a93751ca 3077 iter->trace->open(iter);
8bba1bf5 3078
12ef7d44 3079 /* Annotate start of buffers if we had overruns */
12883efb 3080 if (ring_buffer_overruns(iter->trace_buffer->buffer))
12ef7d44
SR
3081 iter->iter_flags |= TRACE_FILE_ANNOTATE;
3082
8be0709f 3083 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
58e8eedf 3084 if (trace_clocks[tr->clock_id].in_ns)
8be0709f
DS
3085 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3086
debdd57f
HT
3087 /* stop the trace while dumping if we are not opening "snapshot" */
3088 if (!iter->snapshot)
2b6080f2 3089 tracing_stop_tr(tr);
2f26ebd5 3090
ae3b5093 3091 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
b04cc6b1 3092 for_each_tracing_cpu(cpu) {
b04cc6b1 3093 iter->buffer_iter[cpu] =
12883efb 3094 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
72c9ddfd
DM
3095 }
3096 ring_buffer_read_prepare_sync();
3097 for_each_tracing_cpu(cpu) {
3098 ring_buffer_read_start(iter->buffer_iter[cpu]);
2f26ebd5 3099 tracing_iter_reset(iter, cpu);
b04cc6b1
FW
3100 }
3101 } else {
3102 cpu = iter->cpu_file;
3928a8a2 3103 iter->buffer_iter[cpu] =
12883efb 3104 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
72c9ddfd
DM
3105 ring_buffer_read_prepare_sync();
3106 ring_buffer_read_start(iter->buffer_iter[cpu]);
2f26ebd5 3107 tracing_iter_reset(iter, cpu);
3928a8a2
SR
3108 }
3109
bc0c38d1
SR
3110 mutex_unlock(&trace_types_lock);
3111
bc0c38d1 3112 return iter;
3928a8a2 3113
d7350c3f 3114 fail:
3928a8a2 3115 mutex_unlock(&trace_types_lock);
d7350c3f 3116 kfree(iter->trace);
6d158a81 3117 kfree(iter->buffer_iter);
93574fcc 3118release:
50e18b94
JO
3119 seq_release_private(inode, file);
3120 return ERR_PTR(-ENOMEM);
bc0c38d1
SR
3121}
3122
3123int tracing_open_generic(struct inode *inode, struct file *filp)
3124{
60a11774
SR
3125 if (tracing_disabled)
3126 return -ENODEV;
3127
bc0c38d1
SR
3128 filp->private_data = inode->i_private;
3129 return 0;
3130}
3131
2e86421d
GB
3132bool tracing_is_disabled(void)
3133{
3134 return (tracing_disabled) ? true: false;
3135}
3136
7b85af63
SRRH
3137/*
3138 * Open and update trace_array ref count.
3139 * Must have the current trace_array passed to it.
3140 */
dcc30223 3141static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
7b85af63
SRRH
3142{
3143 struct trace_array *tr = inode->i_private;
3144
3145 if (tracing_disabled)
3146 return -ENODEV;
3147
3148 if (trace_array_get(tr) < 0)
3149 return -ENODEV;
3150
3151 filp->private_data = inode->i_private;
3152
3153 return 0;
7b85af63
SRRH
3154}
3155
4fd27358 3156static int tracing_release(struct inode *inode, struct file *file)
bc0c38d1 3157{
6484c71c 3158 struct trace_array *tr = inode->i_private;
907f2784 3159 struct seq_file *m = file->private_data;
4acd4d00 3160 struct trace_iterator *iter;
3928a8a2 3161 int cpu;
bc0c38d1 3162
ff451961 3163 if (!(file->f_mode & FMODE_READ)) {
6484c71c 3164 trace_array_put(tr);
4acd4d00 3165 return 0;
ff451961 3166 }
4acd4d00 3167
6484c71c 3168 /* Writes do not use seq_file */
4acd4d00 3169 iter = m->private;
bc0c38d1 3170 mutex_lock(&trace_types_lock);
a695cb58 3171
3928a8a2
SR
3172 for_each_tracing_cpu(cpu) {
3173 if (iter->buffer_iter[cpu])
3174 ring_buffer_read_finish(iter->buffer_iter[cpu]);
3175 }
3176
bc0c38d1
SR
3177 if (iter->trace && iter->trace->close)
3178 iter->trace->close(iter);
3179
debdd57f
HT
3180 if (!iter->snapshot)
3181 /* reenable tracing if it was previously enabled */
2b6080f2 3182 tracing_start_tr(tr);
f77d09a3
AL
3183
3184 __trace_array_put(tr);
3185
bc0c38d1
SR
3186 mutex_unlock(&trace_types_lock);
3187
d7350c3f 3188 mutex_destroy(&iter->mutex);
b0dfa978 3189 free_cpumask_var(iter->started);
d7350c3f 3190 kfree(iter->trace);
6d158a81 3191 kfree(iter->buffer_iter);
50e18b94 3192 seq_release_private(inode, file);
ff451961 3193
bc0c38d1
SR
3194 return 0;
3195}
3196
7b85af63
SRRH
3197static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3198{
3199 struct trace_array *tr = inode->i_private;
3200
3201 trace_array_put(tr);
bc0c38d1
SR
3202 return 0;
3203}
3204
7b85af63
SRRH
3205static int tracing_single_release_tr(struct inode *inode, struct file *file)
3206{
3207 struct trace_array *tr = inode->i_private;
3208
3209 trace_array_put(tr);
3210
3211 return single_release(inode, file);
3212}
3213
bc0c38d1
SR
3214static int tracing_open(struct inode *inode, struct file *file)
3215{
6484c71c 3216 struct trace_array *tr = inode->i_private;
85a2f9b4
SR
3217 struct trace_iterator *iter;
3218 int ret = 0;
bc0c38d1 3219
ff451961
SRRH
3220 if (trace_array_get(tr) < 0)
3221 return -ENODEV;
3222
4acd4d00 3223 /* If this file was open for write, then erase contents */
6484c71c
ON
3224 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
3225 int cpu = tracing_get_cpu(inode);
3226
3227 if (cpu == RING_BUFFER_ALL_CPUS)
12883efb 3228 tracing_reset_online_cpus(&tr->trace_buffer);
4acd4d00 3229 else
6484c71c 3230 tracing_reset(&tr->trace_buffer, cpu);
4acd4d00 3231 }
bc0c38d1 3232
4acd4d00 3233 if (file->f_mode & FMODE_READ) {
6484c71c 3234 iter = __tracing_open(inode, file, false);
4acd4d00
SR
3235 if (IS_ERR(iter))
3236 ret = PTR_ERR(iter);
983f938a 3237 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
4acd4d00
SR
3238 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3239 }
ff451961
SRRH
3240
3241 if (ret < 0)
3242 trace_array_put(tr);
3243
bc0c38d1
SR
3244 return ret;
3245}
3246
607e2ea1
SRRH
3247/*
3248 * Some tracers are not suitable for instance buffers.
3249 * A tracer is always available for the global array (toplevel)
3250 * or if it explicitly states that it is.
3251 */
3252static bool
3253trace_ok_for_array(struct tracer *t, struct trace_array *tr)
3254{
3255 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
3256}
3257
3258/* Find the next tracer that this trace array may use */
3259static struct tracer *
3260get_tracer_for_array(struct trace_array *tr, struct tracer *t)
3261{
3262 while (t && !trace_ok_for_array(t, tr))
3263 t = t->next;
3264
3265 return t;
3266}
3267
e309b41d 3268static void *
bc0c38d1
SR
3269t_next(struct seq_file *m, void *v, loff_t *pos)
3270{
607e2ea1 3271 struct trace_array *tr = m->private;
f129e965 3272 struct tracer *t = v;
bc0c38d1
SR
3273
3274 (*pos)++;
3275
3276 if (t)
607e2ea1 3277 t = get_tracer_for_array(tr, t->next);
bc0c38d1 3278
bc0c38d1
SR
3279 return t;
3280}
3281
3282static void *t_start(struct seq_file *m, loff_t *pos)
3283{
607e2ea1 3284 struct trace_array *tr = m->private;
f129e965 3285 struct tracer *t;
bc0c38d1
SR
3286 loff_t l = 0;
3287
3288 mutex_lock(&trace_types_lock);
607e2ea1
SRRH
3289
3290 t = get_tracer_for_array(tr, trace_types);
3291 for (; t && l < *pos; t = t_next(m, t, &l))
3292 ;
bc0c38d1
SR
3293
3294 return t;
3295}
3296
3297static void t_stop(struct seq_file *m, void *p)
3298{
3299 mutex_unlock(&trace_types_lock);
3300}
3301
3302static int t_show(struct seq_file *m, void *v)
3303{
3304 struct tracer *t = v;
3305
3306 if (!t)
3307 return 0;
3308
fa6f0cc7 3309 seq_puts(m, t->name);
bc0c38d1
SR
3310 if (t->next)
3311 seq_putc(m, ' ');
3312 else
3313 seq_putc(m, '\n');
3314
3315 return 0;
3316}
3317
88e9d34c 3318static const struct seq_operations show_traces_seq_ops = {
4bf39a94
IM
3319 .start = t_start,
3320 .next = t_next,
3321 .stop = t_stop,
3322 .show = t_show,
bc0c38d1
SR
3323};
3324
3325static int show_traces_open(struct inode *inode, struct file *file)
3326{
607e2ea1
SRRH
3327 struct trace_array *tr = inode->i_private;
3328 struct seq_file *m;
3329 int ret;
3330
60a11774
SR
3331 if (tracing_disabled)
3332 return -ENODEV;
3333
607e2ea1
SRRH
3334 ret = seq_open(file, &show_traces_seq_ops);
3335 if (ret)
3336 return ret;
3337
3338 m = file->private_data;
3339 m->private = tr;
3340
3341 return 0;
bc0c38d1
SR
3342}
3343
4acd4d00
SR
3344static ssize_t
3345tracing_write_stub(struct file *filp, const char __user *ubuf,
3346 size_t count, loff_t *ppos)
3347{
3348 return count;
3349}
3350
098c879e 3351loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
364829b1 3352{
098c879e
SRRH
3353 int ret;
3354
364829b1 3355 if (file->f_mode & FMODE_READ)
098c879e 3356 ret = seq_lseek(file, offset, whence);
364829b1 3357 else
098c879e
SRRH
3358 file->f_pos = ret = 0;
3359
3360 return ret;
364829b1
SP
3361}
3362
5e2336a0 3363static const struct file_operations tracing_fops = {
4bf39a94
IM
3364 .open = tracing_open,
3365 .read = seq_read,
4acd4d00 3366 .write = tracing_write_stub,
098c879e 3367 .llseek = tracing_lseek,
4bf39a94 3368 .release = tracing_release,
bc0c38d1
SR
3369};
3370
5e2336a0 3371static const struct file_operations show_traces_fops = {
c7078de1
IM
3372 .open = show_traces_open,
3373 .read = seq_read,
3374 .release = seq_release,
b444786f 3375 .llseek = seq_lseek,
c7078de1
IM
3376};
3377
36dfe925
IM
3378/*
3379 * The tracer itself will not take this lock, but still we want
3380 * to provide a consistent cpumask to user-space:
3381 */
3382static DEFINE_MUTEX(tracing_cpumask_update_lock);
3383
3384/*
3385 * Temporary storage for the character representation of the
3386 * CPU bitmask (and one more byte for the newline):
3387 */
3388static char mask_str[NR_CPUS + 1];
3389
c7078de1
IM
3390static ssize_t
3391tracing_cpumask_read(struct file *filp, char __user *ubuf,
3392 size_t count, loff_t *ppos)
3393{
ccfe9e42 3394 struct trace_array *tr = file_inode(filp)->i_private;
36dfe925 3395 int len;
c7078de1
IM
3396
3397 mutex_lock(&tracing_cpumask_update_lock);
36dfe925 3398
1a40243b
TH
3399 len = snprintf(mask_str, count, "%*pb\n",
3400 cpumask_pr_args(tr->tracing_cpumask));
3401 if (len >= count) {
36dfe925
IM
3402 count = -EINVAL;
3403 goto out_err;
3404 }
36dfe925
IM
3405 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
3406
3407out_err:
c7078de1
IM
3408 mutex_unlock(&tracing_cpumask_update_lock);
3409
3410 return count;
3411}
3412
3413static ssize_t
3414tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3415 size_t count, loff_t *ppos)
3416{
ccfe9e42 3417 struct trace_array *tr = file_inode(filp)->i_private;
9e01c1b7 3418 cpumask_var_t tracing_cpumask_new;
2b6080f2 3419 int err, cpu;
9e01c1b7
RR
3420
3421 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
3422 return -ENOMEM;
c7078de1 3423
9e01c1b7 3424 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
c7078de1 3425 if (err)
36dfe925
IM
3426 goto err_unlock;
3427
215368e8
LZ
3428 mutex_lock(&tracing_cpumask_update_lock);
3429
a5e25883 3430 local_irq_disable();
0b9b12c1 3431 arch_spin_lock(&tr->max_lock);
ab46428c 3432 for_each_tracing_cpu(cpu) {
36dfe925
IM
3433 /*
3434 * Increase/decrease the disabled counter if we are
3435 * about to flip a bit in the cpumask:
3436 */
ccfe9e42 3437 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
9e01c1b7 3438 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
12883efb
SRRH
3439 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3440 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
36dfe925 3441 }
ccfe9e42 3442 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
9e01c1b7 3443 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
12883efb
SRRH
3444 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3445 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
36dfe925
IM
3446 }
3447 }
0b9b12c1 3448 arch_spin_unlock(&tr->max_lock);
a5e25883 3449 local_irq_enable();
36dfe925 3450
ccfe9e42 3451 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
36dfe925
IM
3452
3453 mutex_unlock(&tracing_cpumask_update_lock);
9e01c1b7 3454 free_cpumask_var(tracing_cpumask_new);
c7078de1
IM
3455
3456 return count;
36dfe925
IM
3457
3458err_unlock:
215368e8 3459 free_cpumask_var(tracing_cpumask_new);
36dfe925
IM
3460
3461 return err;
c7078de1
IM
3462}
3463
5e2336a0 3464static const struct file_operations tracing_cpumask_fops = {
ccfe9e42 3465 .open = tracing_open_generic_tr,
c7078de1
IM
3466 .read = tracing_cpumask_read,
3467 .write = tracing_cpumask_write,
ccfe9e42 3468 .release = tracing_release_generic_tr,
b444786f 3469 .llseek = generic_file_llseek,
bc0c38d1
SR
3470};
3471
fdb372ed 3472static int tracing_trace_options_show(struct seq_file *m, void *v)
bc0c38d1 3473{
d8e83d26 3474 struct tracer_opt *trace_opts;
2b6080f2 3475 struct trace_array *tr = m->private;
d8e83d26 3476 u32 tracer_flags;
d8e83d26 3477 int i;
adf9f195 3478
d8e83d26 3479 mutex_lock(&trace_types_lock);
2b6080f2
SR
3480 tracer_flags = tr->current_trace->flags->val;
3481 trace_opts = tr->current_trace->flags->opts;
d8e83d26 3482
bc0c38d1 3483 for (i = 0; trace_options[i]; i++) {
983f938a 3484 if (tr->trace_flags & (1 << i))
fdb372ed 3485 seq_printf(m, "%s\n", trace_options[i]);
bc0c38d1 3486 else
fdb372ed 3487 seq_printf(m, "no%s\n", trace_options[i]);
bc0c38d1
SR
3488 }
3489
adf9f195
FW
3490 for (i = 0; trace_opts[i].name; i++) {
3491 if (tracer_flags & trace_opts[i].bit)
fdb372ed 3492 seq_printf(m, "%s\n", trace_opts[i].name);
adf9f195 3493 else
fdb372ed 3494 seq_printf(m, "no%s\n", trace_opts[i].name);
adf9f195 3495 }
d8e83d26 3496 mutex_unlock(&trace_types_lock);
adf9f195 3497
fdb372ed 3498 return 0;
bc0c38d1 3499}
bc0c38d1 3500
8c1a49ae 3501static int __set_tracer_option(struct trace_array *tr,
8d18eaaf
LZ
3502 struct tracer_flags *tracer_flags,
3503 struct tracer_opt *opts, int neg)
3504{
8c1a49ae 3505 struct tracer *trace = tr->current_trace;
8d18eaaf 3506 int ret;
bc0c38d1 3507
8c1a49ae 3508 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
8d18eaaf
LZ
3509 if (ret)
3510 return ret;
3511
3512 if (neg)
3513 tracer_flags->val &= ~opts->bit;
3514 else
3515 tracer_flags->val |= opts->bit;
3516 return 0;
bc0c38d1
SR
3517}
3518
adf9f195 3519/* Try to assign a tracer specific option */
8c1a49ae 3520static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
adf9f195 3521{
8c1a49ae 3522 struct tracer *trace = tr->current_trace;
7770841e 3523 struct tracer_flags *tracer_flags = trace->flags;
adf9f195 3524 struct tracer_opt *opts = NULL;
8d18eaaf 3525 int i;
adf9f195 3526
7770841e
Z
3527 for (i = 0; tracer_flags->opts[i].name; i++) {
3528 opts = &tracer_flags->opts[i];
adf9f195 3529
8d18eaaf 3530 if (strcmp(cmp, opts->name) == 0)
8c1a49ae 3531 return __set_tracer_option(tr, trace->flags, opts, neg);
adf9f195 3532 }
adf9f195 3533
8d18eaaf 3534 return -EINVAL;
adf9f195
FW
3535}
3536
613f04a0
SRRH
3537/* Some tracers require overwrite to stay enabled */
3538int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
3539{
3540 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
3541 return -1;
3542
3543 return 0;
3544}
3545
2b6080f2 3546int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
af4617bd
SR
3547{
3548 /* do nothing if flag is already set */
983f938a 3549 if (!!(tr->trace_flags & mask) == !!enabled)
613f04a0
SRRH
3550 return 0;
3551
3552 /* Give the tracer a chance to approve the change */
2b6080f2 3553 if (tr->current_trace->flag_changed)
bf6065b5 3554 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
613f04a0 3555 return -EINVAL;
af4617bd
SR
3556
3557 if (enabled)
983f938a 3558 tr->trace_flags |= mask;
af4617bd 3559 else
983f938a 3560 tr->trace_flags &= ~mask;
e870e9a1
LZ
3561
3562 if (mask == TRACE_ITER_RECORD_CMD)
3563 trace_event_enable_cmd_record(enabled);
750912fa 3564
80902822 3565 if (mask == TRACE_ITER_OVERWRITE) {
12883efb 3566 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
80902822 3567#ifdef CONFIG_TRACER_MAX_TRACE
12883efb 3568 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
80902822
SRRH
3569#endif
3570 }
81698831 3571
b9f9108c 3572 if (mask == TRACE_ITER_PRINTK) {
81698831 3573 trace_printk_start_stop_comm(enabled);
b9f9108c
SRRH
3574 trace_printk_control(enabled);
3575 }
613f04a0
SRRH
3576
3577 return 0;
af4617bd
SR
3578}
3579
2b6080f2 3580static int trace_set_options(struct trace_array *tr, char *option)
bc0c38d1 3581{
8d18eaaf 3582 char *cmp;
bc0c38d1 3583 int neg = 0;
613f04a0 3584 int ret = -ENODEV;
bc0c38d1
SR
3585 int i;
3586
7bcfaf54 3587 cmp = strstrip(option);
bc0c38d1 3588
8d18eaaf 3589 if (strncmp(cmp, "no", 2) == 0) {
bc0c38d1
SR
3590 neg = 1;
3591 cmp += 2;
3592 }
3593
69d34da2
SRRH
3594 mutex_lock(&trace_types_lock);
3595
bc0c38d1 3596 for (i = 0; trace_options[i]; i++) {
8d18eaaf 3597 if (strcmp(cmp, trace_options[i]) == 0) {
2b6080f2 3598 ret = set_tracer_flag(tr, 1 << i, !neg);
bc0c38d1
SR
3599 break;
3600 }
3601 }
adf9f195
FW
3602
3603 /* If no option could be set, test the specific tracer options */
69d34da2 3604 if (!trace_options[i])
8c1a49ae 3605 ret = set_tracer_option(tr, cmp, neg);
69d34da2
SRRH
3606
3607 mutex_unlock(&trace_types_lock);
bc0c38d1 3608
7bcfaf54
SR
3609 return ret;
3610}
3611
3612static ssize_t
3613tracing_trace_options_write(struct file *filp, const char __user *ubuf,
3614 size_t cnt, loff_t *ppos)
3615{
2b6080f2
SR
3616 struct seq_file *m = filp->private_data;
3617 struct trace_array *tr = m->private;
7bcfaf54 3618 char buf[64];
613f04a0 3619 int ret;
7bcfaf54
SR
3620
3621 if (cnt >= sizeof(buf))
3622 return -EINVAL;
3623
3624 if (copy_from_user(&buf, ubuf, cnt))
3625 return -EFAULT;
3626
a8dd2176
SR
3627 buf[cnt] = 0;
3628
2b6080f2 3629 ret = trace_set_options(tr, buf);
613f04a0
SRRH
3630 if (ret < 0)
3631 return ret;
7bcfaf54 3632
cf8517cf 3633 *ppos += cnt;
bc0c38d1
SR
3634
3635 return cnt;
3636}
3637
fdb372ed
LZ
3638static int tracing_trace_options_open(struct inode *inode, struct file *file)
3639{
7b85af63 3640 struct trace_array *tr = inode->i_private;
f77d09a3 3641 int ret;
7b85af63 3642
fdb372ed
LZ
3643 if (tracing_disabled)
3644 return -ENODEV;
2b6080f2 3645
7b85af63
SRRH
3646 if (trace_array_get(tr) < 0)
3647 return -ENODEV;
3648
f77d09a3
AL
3649 ret = single_open(file, tracing_trace_options_show, inode->i_private);
3650 if (ret < 0)
3651 trace_array_put(tr);
3652
3653 return ret;
fdb372ed
LZ
3654}
3655
5e2336a0 3656static const struct file_operations tracing_iter_fops = {
fdb372ed
LZ
3657 .open = tracing_trace_options_open,
3658 .read = seq_read,
3659 .llseek = seq_lseek,
7b85af63 3660 .release = tracing_single_release_tr,
ee6bce52 3661 .write = tracing_trace_options_write,
bc0c38d1
SR
3662};
3663
7bd2f24c
IM
3664static const char readme_msg[] =
3665 "tracing mini-HOWTO:\n\n"
22f45649
SRRH
3666 "# echo 0 > tracing_on : quick way to disable tracing\n"
3667 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
3668 " Important files:\n"
3669 " trace\t\t\t- The static contents of the buffer\n"
3670 "\t\t\t To clear the buffer write into this file: echo > trace\n"
3671 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
3672 " current_tracer\t- function and latency tracers\n"
3673 " available_tracers\t- list of configured tracers for current_tracer\n"
3674 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
3675 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
3676 " trace_clock\t\t-change the clock used to order events\n"
3677 " local: Per cpu clock but may not be synced across CPUs\n"
3678 " global: Synced across CPUs but slows tracing down.\n"
3679 " counter: Not a clock, but just an increment\n"
3680 " uptime: Jiffy counter from time of boot\n"
3681 " perf: Same clock that perf events use\n"
3682#ifdef CONFIG_X86_64
3683 " x86-tsc: TSC cycle counter\n"
3684#endif
3685 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
3686 " tracing_cpumask\t- Limit which CPUs to trace\n"
3687 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
3688 "\t\t\t Remove sub-buffer with rmdir\n"
3689 " trace_options\t\t- Set format or modify how tracing happens\n"
71485c45
SRRH
3690 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
3691 "\t\t\t option name\n"
939c7a4f 3692 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
22f45649
SRRH
3693#ifdef CONFIG_DYNAMIC_FTRACE
3694 "\n available_filter_functions - list of functions that can be filtered on\n"
71485c45
SRRH
3695 " set_ftrace_filter\t- echo function name in here to only trace these\n"
3696 "\t\t\t functions\n"
3697 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3698 "\t modules: Can select a group via module\n"
3699 "\t Format: :mod:<module-name>\n"
3700 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
3701 "\t triggers: a command to perform when function is hit\n"
3702 "\t Format: <function>:<trigger>[:count]\n"
3703 "\t trigger: traceon, traceoff\n"
3704 "\t\t enable_event:<system>:<event>\n"
3705 "\t\t disable_event:<system>:<event>\n"
22f45649 3706#ifdef CONFIG_STACKTRACE
71485c45 3707 "\t\t stacktrace\n"
22f45649
SRRH
3708#endif
3709#ifdef CONFIG_TRACER_SNAPSHOT
71485c45 3710 "\t\t snapshot\n"
22f45649 3711#endif
17a280ea
SRRH
3712 "\t\t dump\n"
3713 "\t\t cpudump\n"
71485c45
SRRH
3714 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
3715 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
3716 "\t The first one will disable tracing every time do_fault is hit\n"
3717 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
3718 "\t The first time do trap is hit and it disables tracing, the\n"
3719 "\t counter will decrement to 2. If tracing is already disabled,\n"
3720 "\t the counter will not decrement. It only decrements when the\n"
3721 "\t trigger did work\n"
3722 "\t To remove trigger without count:\n"
3723 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
3724 "\t To remove trigger with a count:\n"
3725 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
22f45649 3726 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
71485c45
SRRH
3727 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3728 "\t modules: Can select a group via module command :mod:\n"
3729 "\t Does not accept triggers\n"
22f45649
SRRH
3730#endif /* CONFIG_DYNAMIC_FTRACE */
3731#ifdef CONFIG_FUNCTION_TRACER
71485c45
SRRH
3732 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
3733 "\t\t (function)\n"
22f45649
SRRH
3734#endif
3735#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3736 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
d048a8c7 3737 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
22f45649
SRRH
3738 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
3739#endif
3740#ifdef CONFIG_TRACER_SNAPSHOT
71485c45
SRRH
3741 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
3742 "\t\t\t snapshot buffer. Read the contents for more\n"
3743 "\t\t\t information\n"
22f45649 3744#endif
991821c8 3745#ifdef CONFIG_STACK_TRACER
22f45649
SRRH
3746 " stack_trace\t\t- Shows the max stack trace when active\n"
3747 " stack_max_size\t- Shows current max stack size that was traced\n"
71485c45
SRRH
3748 "\t\t\t Write into this file to reset the max size (trigger a\n"
3749 "\t\t\t new trace)\n"
22f45649 3750#ifdef CONFIG_DYNAMIC_FTRACE
71485c45
SRRH
3751 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
3752 "\t\t\t traces\n"
22f45649 3753#endif
991821c8 3754#endif /* CONFIG_STACK_TRACER */
26f25564
TZ
3755 " events/\t\t- Directory containing all trace event subsystems:\n"
3756 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
3757 " events/<system>/\t- Directory containing all trace events for <system>:\n"
71485c45
SRRH
3758 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
3759 "\t\t\t events\n"
26f25564 3760 " filter\t\t- If set, only events passing filter are traced\n"
71485c45
SRRH
3761 " events/<system>/<event>/\t- Directory containing control files for\n"
3762 "\t\t\t <event>:\n"
26f25564
TZ
3763 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
3764 " filter\t\t- If set, only events passing filter are traced\n"
3765 " trigger\t\t- If set, a command to perform when event is hit\n"
71485c45
SRRH
3766 "\t Format: <trigger>[:count][if <filter>]\n"
3767 "\t trigger: traceon, traceoff\n"
3768 "\t enable_event:<system>:<event>\n"
3769 "\t disable_event:<system>:<event>\n"
26f25564 3770#ifdef CONFIG_STACKTRACE
71485c45 3771 "\t\t stacktrace\n"
26f25564
TZ
3772#endif
3773#ifdef CONFIG_TRACER_SNAPSHOT
71485c45 3774 "\t\t snapshot\n"
26f25564 3775#endif
71485c45
SRRH
3776 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
3777 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
3778 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
3779 "\t events/block/block_unplug/trigger\n"
3780 "\t The first disables tracing every time block_unplug is hit.\n"
3781 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
3782 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
3783 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
3784 "\t Like function triggers, the counter is only decremented if it\n"
3785 "\t enabled or disabled tracing.\n"
3786 "\t To remove a trigger without a count:\n"
3787 "\t echo '!<trigger> > <system>/<event>/trigger\n"
3788 "\t To remove a trigger with a count:\n"
3789 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
3790 "\t Filters can be ignored when removing a trigger.\n"
7bd2f24c
IM
3791;
3792
3793static ssize_t
3794tracing_readme_read(struct file *filp, char __user *ubuf,
3795 size_t cnt, loff_t *ppos)
3796{
3797 return simple_read_from_buffer(ubuf, cnt, ppos,
3798 readme_msg, strlen(readme_msg));
3799}
3800
5e2336a0 3801static const struct file_operations tracing_readme_fops = {
c7078de1
IM
3802 .open = tracing_open_generic,
3803 .read = tracing_readme_read,
b444786f 3804 .llseek = generic_file_llseek,
7bd2f24c
IM
3805};
3806
42584c81
YY
3807static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
3808{
3809 unsigned int *ptr = v;
69abe6a5 3810
42584c81
YY
3811 if (*pos || m->count)
3812 ptr++;
69abe6a5 3813
42584c81 3814 (*pos)++;
69abe6a5 3815
939c7a4f
YY
3816 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
3817 ptr++) {
42584c81
YY
3818 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
3819 continue;
69abe6a5 3820
42584c81
YY
3821 return ptr;
3822 }
69abe6a5 3823
42584c81
YY
3824 return NULL;
3825}
3826
3827static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
3828{
3829 void *v;
3830 loff_t l = 0;
69abe6a5 3831
4c27e756
SRRH
3832 preempt_disable();
3833 arch_spin_lock(&trace_cmdline_lock);
3834
939c7a4f 3835 v = &savedcmd->map_cmdline_to_pid[0];
42584c81
YY
3836 while (l <= *pos) {
3837 v = saved_cmdlines_next(m, v, &l);
3838 if (!v)
3839 return NULL;
69abe6a5
AP
3840 }
3841
42584c81
YY
3842 return v;
3843}
3844
3845static void saved_cmdlines_stop(struct seq_file *m, void *v)
3846{
4c27e756
SRRH
3847 arch_spin_unlock(&trace_cmdline_lock);
3848 preempt_enable();
42584c81 3849}
69abe6a5 3850
42584c81
YY
3851static int saved_cmdlines_show(struct seq_file *m, void *v)
3852{
3853 char buf[TASK_COMM_LEN];
3854 unsigned int *pid = v;
69abe6a5 3855
4c27e756 3856 __trace_find_cmdline(*pid, buf);
42584c81
YY
3857 seq_printf(m, "%d %s\n", *pid, buf);
3858 return 0;
3859}
3860
3861static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
3862 .start = saved_cmdlines_start,
3863 .next = saved_cmdlines_next,
3864 .stop = saved_cmdlines_stop,
3865 .show = saved_cmdlines_show,
3866};
3867
3868static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
3869{
3870 if (tracing_disabled)
3871 return -ENODEV;
3872
3873 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
69abe6a5
AP
3874}
3875
3876static const struct file_operations tracing_saved_cmdlines_fops = {
42584c81
YY
3877 .open = tracing_saved_cmdlines_open,
3878 .read = seq_read,
3879 .llseek = seq_lseek,
3880 .release = seq_release,
69abe6a5
AP
3881};
3882
939c7a4f
YY
3883static ssize_t
3884tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
3885 size_t cnt, loff_t *ppos)
3886{
3887 char buf[64];
3888 int r;
3889
3890 arch_spin_lock(&trace_cmdline_lock);
a6af8fbf 3891 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
939c7a4f
YY
3892 arch_spin_unlock(&trace_cmdline_lock);
3893
3894 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3895}
3896
3897static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
3898{
3899 kfree(s->saved_cmdlines);
3900 kfree(s->map_cmdline_to_pid);
3901 kfree(s);
3902}
3903
3904static int tracing_resize_saved_cmdlines(unsigned int val)
3905{
3906 struct saved_cmdlines_buffer *s, *savedcmd_temp;
3907
a6af8fbf 3908 s = kmalloc(sizeof(*s), GFP_KERNEL);
939c7a4f
YY
3909 if (!s)
3910 return -ENOMEM;
3911
3912 if (allocate_cmdlines_buffer(val, s) < 0) {
3913 kfree(s);
3914 return -ENOMEM;
3915 }
3916
3917 arch_spin_lock(&trace_cmdline_lock);
3918 savedcmd_temp = savedcmd;
3919 savedcmd = s;
3920 arch_spin_unlock(&trace_cmdline_lock);
3921 free_saved_cmdlines_buffer(savedcmd_temp);
3922
3923 return 0;
3924}
3925
3926static ssize_t
3927tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
3928 size_t cnt, loff_t *ppos)
3929{
3930 unsigned long val;
3931 int ret;
3932
3933 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
3934 if (ret)
3935 return ret;
3936
3937 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
3938 if (!val || val > PID_MAX_DEFAULT)
3939 return -EINVAL;
3940
3941 ret = tracing_resize_saved_cmdlines((unsigned int)val);
3942 if (ret < 0)
3943 return ret;
3944
3945 *ppos += cnt;
3946
3947 return cnt;
3948}
3949
3950static const struct file_operations tracing_saved_cmdlines_size_fops = {
3951 .open = tracing_open_generic,
3952 .read = tracing_saved_cmdlines_size_read,
3953 .write = tracing_saved_cmdlines_size_write,
3954};
3955
9828413d
SRRH
3956#ifdef CONFIG_TRACE_ENUM_MAP_FILE
3957static union trace_enum_map_item *
3958update_enum_map(union trace_enum_map_item *ptr)
3959{
3960 if (!ptr->map.enum_string) {
3961 if (ptr->tail.next) {
3962 ptr = ptr->tail.next;
3963 /* Set ptr to the next real item (skip head) */
3964 ptr++;
3965 } else
3966 return NULL;
3967 }
3968 return ptr;
3969}
3970
3971static void *enum_map_next(struct seq_file *m, void *v, loff_t *pos)
3972{
3973 union trace_enum_map_item *ptr = v;
3974
3975 /*
3976 * Paranoid! If ptr points to end, we don't want to increment past it.
3977 * This really should never happen.
3978 */
3979 ptr = update_enum_map(ptr);
3980 if (WARN_ON_ONCE(!ptr))
3981 return NULL;
3982
3983 ptr++;
3984
3985 (*pos)++;
3986
3987 ptr = update_enum_map(ptr);
3988
3989 return ptr;
3990}
3991
3992static void *enum_map_start(struct seq_file *m, loff_t *pos)
3993{
3994 union trace_enum_map_item *v;
3995 loff_t l = 0;
3996
3997 mutex_lock(&trace_enum_mutex);
3998
3999 v = trace_enum_maps;
4000 if (v)
4001 v++;
4002
4003 while (v && l < *pos) {
4004 v = enum_map_next(m, v, &l);
4005 }
4006
4007 return v;
4008}
4009
4010static void enum_map_stop(struct seq_file *m, void *v)
4011{
4012 mutex_unlock(&trace_enum_mutex);
4013}
4014
4015static int enum_map_show(struct seq_file *m, void *v)
4016{
4017 union trace_enum_map_item *ptr = v;
4018
4019 seq_printf(m, "%s %ld (%s)\n",
4020 ptr->map.enum_string, ptr->map.enum_value,
4021 ptr->map.system);
4022
4023 return 0;
4024}
4025
4026static const struct seq_operations tracing_enum_map_seq_ops = {
4027 .start = enum_map_start,
4028 .next = enum_map_next,
4029 .stop = enum_map_stop,
4030 .show = enum_map_show,
4031};
4032
4033static int tracing_enum_map_open(struct inode *inode, struct file *filp)
4034{
4035 if (tracing_disabled)
4036 return -ENODEV;
4037
4038 return seq_open(filp, &tracing_enum_map_seq_ops);
4039}
4040
4041static const struct file_operations tracing_enum_map_fops = {
4042 .open = tracing_enum_map_open,
4043 .read = seq_read,
4044 .llseek = seq_lseek,
4045 .release = seq_release,
4046};
4047
4048static inline union trace_enum_map_item *
4049trace_enum_jmp_to_tail(union trace_enum_map_item *ptr)
4050{
4051 /* Return tail of array given the head */
4052 return ptr + ptr->head.length + 1;
4053}
4054
4055static void
4056trace_insert_enum_map_file(struct module *mod, struct trace_enum_map **start,
4057 int len)
4058{
4059 struct trace_enum_map **stop;
4060 struct trace_enum_map **map;
4061 union trace_enum_map_item *map_array;
4062 union trace_enum_map_item *ptr;
4063
4064 stop = start + len;
4065
4066 /*
4067 * The trace_enum_maps contains the map plus a head and tail item,
4068 * where the head holds the module and length of array, and the
4069 * tail holds a pointer to the next list.
4070 */
4071 map_array = kmalloc(sizeof(*map_array) * (len + 2), GFP_KERNEL);
4072 if (!map_array) {
4073 pr_warning("Unable to allocate trace enum mapping\n");
4074 return;
4075 }
4076
4077 mutex_lock(&trace_enum_mutex);
4078
4079 if (!trace_enum_maps)
4080 trace_enum_maps = map_array;
4081 else {
4082 ptr = trace_enum_maps;
4083 for (;;) {
4084 ptr = trace_enum_jmp_to_tail(ptr);
4085 if (!ptr->tail.next)
4086 break;
4087 ptr = ptr->tail.next;
4088
4089 }
4090 ptr->tail.next = map_array;
4091 }
4092 map_array->head.mod = mod;
4093 map_array->head.length = len;
4094 map_array++;
4095
4096 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
4097 map_array->map = **map;
4098 map_array++;
4099 }
4100 memset(map_array, 0, sizeof(*map_array));
4101
4102 mutex_unlock(&trace_enum_mutex);
4103}
4104
4105static void trace_create_enum_file(struct dentry *d_tracer)
4106{
4107 trace_create_file("enum_map", 0444, d_tracer,
4108 NULL, &tracing_enum_map_fops);
4109}
4110
4111#else /* CONFIG_TRACE_ENUM_MAP_FILE */
4112static inline void trace_create_enum_file(struct dentry *d_tracer) { }
4113static inline void trace_insert_enum_map_file(struct module *mod,
4114 struct trace_enum_map **start, int len) { }
4115#endif /* !CONFIG_TRACE_ENUM_MAP_FILE */
4116
4117static void trace_insert_enum_map(struct module *mod,
4118 struct trace_enum_map **start, int len)
0c564a53
SRRH
4119{
4120 struct trace_enum_map **map;
0c564a53
SRRH
4121
4122 if (len <= 0)
4123 return;
4124
4125 map = start;
4126
4127 trace_event_enum_update(map, len);
9828413d
SRRH
4128
4129 trace_insert_enum_map_file(mod, start, len);
0c564a53
SRRH
4130}
4131
bc0c38d1
SR
4132static ssize_t
4133tracing_set_trace_read(struct file *filp, char __user *ubuf,
4134 size_t cnt, loff_t *ppos)
4135{
2b6080f2 4136 struct trace_array *tr = filp->private_data;
ee6c2c1b 4137 char buf[MAX_TRACER_SIZE+2];
bc0c38d1
SR
4138 int r;
4139
4140 mutex_lock(&trace_types_lock);
2b6080f2 4141 r = sprintf(buf, "%s\n", tr->current_trace->name);
bc0c38d1
SR
4142 mutex_unlock(&trace_types_lock);
4143
4bf39a94 4144 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
bc0c38d1
SR
4145}
4146
b6f11df2
ACM
4147int tracer_init(struct tracer *t, struct trace_array *tr)
4148{
12883efb 4149 tracing_reset_online_cpus(&tr->trace_buffer);
b6f11df2
ACM
4150 return t->init(tr);
4151}
4152
12883efb 4153static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
438ced17
VN
4154{
4155 int cpu;
737223fb 4156
438ced17 4157 for_each_tracing_cpu(cpu)
12883efb 4158 per_cpu_ptr(buf->data, cpu)->entries = val;
438ced17
VN
4159}
4160
12883efb 4161#ifdef CONFIG_TRACER_MAX_TRACE
d60da506 4162/* resize @tr's buffer to the size of @size_tr's entries */
12883efb
SRRH
4163static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
4164 struct trace_buffer *size_buf, int cpu_id)
d60da506
HT
4165{
4166 int cpu, ret = 0;
4167
4168 if (cpu_id == RING_BUFFER_ALL_CPUS) {
4169 for_each_tracing_cpu(cpu) {
12883efb
SRRH
4170 ret = ring_buffer_resize(trace_buf->buffer,
4171 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
d60da506
HT
4172 if (ret < 0)
4173 break;
12883efb
SRRH
4174 per_cpu_ptr(trace_buf->data, cpu)->entries =
4175 per_cpu_ptr(size_buf->data, cpu)->entries;
d60da506
HT
4176 }
4177 } else {
12883efb
SRRH
4178 ret = ring_buffer_resize(trace_buf->buffer,
4179 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
d60da506 4180 if (ret == 0)
12883efb
SRRH
4181 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
4182 per_cpu_ptr(size_buf->data, cpu_id)->entries;
d60da506
HT
4183 }
4184
4185 return ret;
4186}
12883efb 4187#endif /* CONFIG_TRACER_MAX_TRACE */
d60da506 4188
2b6080f2
SR
4189static int __tracing_resize_ring_buffer(struct trace_array *tr,
4190 unsigned long size, int cpu)
73c5162a
SR
4191{
4192 int ret;
4193
4194 /*
4195 * If kernel or user changes the size of the ring buffer
a123c52b
SR
4196 * we use the size that was given, and we can forget about
4197 * expanding it later.
73c5162a 4198 */
55034cd6 4199 ring_buffer_expanded = true;
73c5162a 4200
b382ede6 4201 /* May be called before buffers are initialized */
12883efb 4202 if (!tr->trace_buffer.buffer)
b382ede6
SR
4203 return 0;
4204
12883efb 4205 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
73c5162a
SR
4206 if (ret < 0)
4207 return ret;
4208
12883efb 4209#ifdef CONFIG_TRACER_MAX_TRACE
2b6080f2
SR
4210 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
4211 !tr->current_trace->use_max_tr)
ef710e10
KM
4212 goto out;
4213
12883efb 4214 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
73c5162a 4215 if (ret < 0) {
12883efb
SRRH
4216 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
4217 &tr->trace_buffer, cpu);
73c5162a 4218 if (r < 0) {
a123c52b
SR
4219 /*
4220 * AARGH! We are left with different
4221 * size max buffer!!!!
4222 * The max buffer is our "snapshot" buffer.
4223 * When a tracer needs a snapshot (one of the
4224 * latency tracers), it swaps the max buffer
4225 * with the saved snap shot. We succeeded to
4226 * update the size of the main buffer, but failed to
4227 * update the size of the max buffer. But when we tried
4228 * to reset the main buffer to the original size, we
4229 * failed there too. This is very unlikely to
4230 * happen, but if it does, warn and kill all
4231 * tracing.
4232 */
73c5162a
SR
4233 WARN_ON(1);
4234 tracing_disabled = 1;
4235 }
4236 return ret;
4237 }
4238
438ced17 4239 if (cpu == RING_BUFFER_ALL_CPUS)
12883efb 4240 set_buffer_entries(&tr->max_buffer, size);
438ced17 4241 else
12883efb 4242 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
438ced17 4243
ef710e10 4244 out:
12883efb
SRRH
4245#endif /* CONFIG_TRACER_MAX_TRACE */
4246
438ced17 4247 if (cpu == RING_BUFFER_ALL_CPUS)
12883efb 4248 set_buffer_entries(&tr->trace_buffer, size);
438ced17 4249 else
12883efb 4250 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
73c5162a
SR
4251
4252 return ret;
4253}
4254
2b6080f2
SR
4255static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
4256 unsigned long size, int cpu_id)
4f271a2a 4257{
83f40318 4258 int ret = size;
4f271a2a
VN
4259
4260 mutex_lock(&trace_types_lock);
4261
438ced17
VN
4262 if (cpu_id != RING_BUFFER_ALL_CPUS) {
4263 /* make sure, this cpu is enabled in the mask */
4264 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
4265 ret = -EINVAL;
4266 goto out;
4267 }
4268 }
4f271a2a 4269
2b6080f2 4270 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
4f271a2a
VN
4271 if (ret < 0)
4272 ret = -ENOMEM;
4273
438ced17 4274out:
4f271a2a
VN
4275 mutex_unlock(&trace_types_lock);
4276
4277 return ret;
4278}
4279
ef710e10 4280
1852fcce
SR
4281/**
4282 * tracing_update_buffers - used by tracing facility to expand ring buffers
4283 *
4284 * To save on memory when the tracing is never used on a system with it
4285 * configured in. The ring buffers are set to a minimum size. But once
4286 * a user starts to use the tracing facility, then they need to grow
4287 * to their default size.
4288 *
4289 * This function is to be called when a tracer is about to be used.
4290 */
4291int tracing_update_buffers(void)
4292{
4293 int ret = 0;
4294
1027fcb2 4295 mutex_lock(&trace_types_lock);
1852fcce 4296 if (!ring_buffer_expanded)
2b6080f2 4297 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
438ced17 4298 RING_BUFFER_ALL_CPUS);
1027fcb2 4299 mutex_unlock(&trace_types_lock);
1852fcce
SR
4300
4301 return ret;
4302}
4303
577b785f
SR
4304struct trace_option_dentry;
4305
4306static struct trace_option_dentry *
2b6080f2 4307create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
577b785f 4308
6b450d25
SRRH
4309/*
4310 * Used to clear out the tracer before deletion of an instance.
4311 * Must have trace_types_lock held.
4312 */
4313static void tracing_set_nop(struct trace_array *tr)
4314{
4315 if (tr->current_trace == &nop_trace)
4316 return;
4317
50512ab5 4318 tr->current_trace->enabled--;
6b450d25
SRRH
4319
4320 if (tr->current_trace->reset)
4321 tr->current_trace->reset(tr);
4322
4323 tr->current_trace = &nop_trace;
4324}
4325
41d9c0be 4326static void add_tracer_options(struct trace_array *tr, struct tracer *t)
bc0c38d1 4327{
09d23a1d
SRRH
4328 /* Only enable if the directory has been created already. */
4329 if (!tr->dir)
4330 return;
4331
4332 /* Currently, only the top instance has options */
4333 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL))
4334 return;
4335
41d9c0be
SRRH
4336 /* Ignore if they were already created */
4337 if (t->topts)
4338 return;
4339
4340 t->topts = create_trace_option_files(tr, t);
09d23a1d
SRRH
4341}
4342
4343static int tracing_set_tracer(struct trace_array *tr, const char *buf)
4344{
bc0c38d1 4345 struct tracer *t;
12883efb 4346#ifdef CONFIG_TRACER_MAX_TRACE
34600f0e 4347 bool had_max_tr;
12883efb 4348#endif
d9e54076 4349 int ret = 0;
bc0c38d1 4350
1027fcb2
SR
4351 mutex_lock(&trace_types_lock);
4352
73c5162a 4353 if (!ring_buffer_expanded) {
2b6080f2 4354 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
438ced17 4355 RING_BUFFER_ALL_CPUS);
73c5162a 4356 if (ret < 0)
59f586db 4357 goto out;
73c5162a
SR
4358 ret = 0;
4359 }
4360
bc0c38d1
SR
4361 for (t = trace_types; t; t = t->next) {
4362 if (strcmp(t->name, buf) == 0)
4363 break;
4364 }
c2931e05
FW
4365 if (!t) {
4366 ret = -EINVAL;
4367 goto out;
4368 }
2b6080f2 4369 if (t == tr->current_trace)
bc0c38d1
SR
4370 goto out;
4371
607e2ea1
SRRH
4372 /* Some tracers are only allowed for the top level buffer */
4373 if (!trace_ok_for_array(t, tr)) {
4374 ret = -EINVAL;
4375 goto out;
4376 }
4377
cf6ab6d9
SRRH
4378 /* If trace pipe files are being read, we can't change the tracer */
4379 if (tr->current_trace->ref) {
4380 ret = -EBUSY;
4381 goto out;
4382 }
4383
9f029e83 4384 trace_branch_disable();
613f04a0 4385
50512ab5 4386 tr->current_trace->enabled--;
613f04a0 4387
2b6080f2
SR
4388 if (tr->current_trace->reset)
4389 tr->current_trace->reset(tr);
34600f0e 4390
12883efb 4391 /* Current trace needs to be nop_trace before synchronize_sched */
2b6080f2 4392 tr->current_trace = &nop_trace;
34600f0e 4393
45ad21ca
SRRH
4394#ifdef CONFIG_TRACER_MAX_TRACE
4395 had_max_tr = tr->allocated_snapshot;
34600f0e
SR
4396
4397 if (had_max_tr && !t->use_max_tr) {
4398 /*
4399 * We need to make sure that the update_max_tr sees that
4400 * current_trace changed to nop_trace to keep it from
4401 * swapping the buffers after we resize it.
4402 * The update_max_tr is called from interrupts disabled
4403 * so a synchronized_sched() is sufficient.
4404 */
4405 synchronize_sched();
3209cff4 4406 free_snapshot(tr);
ef710e10 4407 }
12883efb 4408#endif
12883efb
SRRH
4409
4410#ifdef CONFIG_TRACER_MAX_TRACE
34600f0e 4411 if (t->use_max_tr && !had_max_tr) {
3209cff4 4412 ret = alloc_snapshot(tr);
d60da506
HT
4413 if (ret < 0)
4414 goto out;
ef710e10 4415 }
12883efb 4416#endif
577b785f 4417
1c80025a 4418 if (t->init) {
b6f11df2 4419 ret = tracer_init(t, tr);
1c80025a
FW
4420 if (ret)
4421 goto out;
4422 }
bc0c38d1 4423
2b6080f2 4424 tr->current_trace = t;
50512ab5 4425 tr->current_trace->enabled++;
9f029e83 4426 trace_branch_enable(tr);
bc0c38d1
SR
4427 out:
4428 mutex_unlock(&trace_types_lock);
4429
d9e54076
PZ
4430 return ret;
4431}
4432
4433static ssize_t
4434tracing_set_trace_write(struct file *filp, const char __user *ubuf,
4435 size_t cnt, loff_t *ppos)
4436{
607e2ea1 4437 struct trace_array *tr = filp->private_data;
ee6c2c1b 4438 char buf[MAX_TRACER_SIZE+1];
d9e54076
PZ
4439 int i;
4440 size_t ret;
e6e7a65a
FW
4441 int err;
4442
4443 ret = cnt;
d9e54076 4444
ee6c2c1b
LZ
4445 if (cnt > MAX_TRACER_SIZE)
4446 cnt = MAX_TRACER_SIZE;
d9e54076
PZ
4447
4448 if (copy_from_user(&buf, ubuf, cnt))
4449 return -EFAULT;
4450
4451 buf[cnt] = 0;
4452
4453 /* strip ending whitespace. */
4454 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
4455 buf[i] = 0;
4456
607e2ea1 4457 err = tracing_set_tracer(tr, buf);
e6e7a65a
FW
4458 if (err)
4459 return err;
d9e54076 4460
cf8517cf 4461 *ppos += ret;
bc0c38d1 4462
c2931e05 4463 return ret;
bc0c38d1
SR
4464}
4465
4466static ssize_t
6508fa76
SF
4467tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
4468 size_t cnt, loff_t *ppos)
bc0c38d1 4469{
bc0c38d1
SR
4470 char buf[64];
4471 int r;
4472
cffae437 4473 r = snprintf(buf, sizeof(buf), "%ld\n",
bc0c38d1 4474 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
cffae437
SR
4475 if (r > sizeof(buf))
4476 r = sizeof(buf);
4bf39a94 4477 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
bc0c38d1
SR
4478}
4479
4480static ssize_t
6508fa76
SF
4481tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
4482 size_t cnt, loff_t *ppos)
bc0c38d1 4483{
5e39841c 4484 unsigned long val;
c6caeeb1 4485 int ret;
bc0c38d1 4486
22fe9b54
PH
4487 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4488 if (ret)
c6caeeb1 4489 return ret;
bc0c38d1
SR
4490
4491 *ptr = val * 1000;
4492
4493 return cnt;
4494}
4495
6508fa76
SF
4496static ssize_t
4497tracing_thresh_read(struct file *filp, char __user *ubuf,
4498 size_t cnt, loff_t *ppos)
4499{
4500 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
4501}
4502
4503static ssize_t
4504tracing_thresh_write(struct file *filp, const char __user *ubuf,
4505 size_t cnt, loff_t *ppos)
4506{
4507 struct trace_array *tr = filp->private_data;
4508 int ret;
4509
4510 mutex_lock(&trace_types_lock);
4511 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
4512 if (ret < 0)
4513 goto out;
4514
4515 if (tr->current_trace->update_thresh) {
4516 ret = tr->current_trace->update_thresh(tr);
4517 if (ret < 0)
4518 goto out;
4519 }
4520
4521 ret = cnt;
4522out:
4523 mutex_unlock(&trace_types_lock);
4524
4525 return ret;
4526}
4527
4528static ssize_t
4529tracing_max_lat_read(struct file *filp, char __user *ubuf,
4530 size_t cnt, loff_t *ppos)
4531{
4532 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
4533}
4534
4535static ssize_t
4536tracing_max_lat_write(struct file *filp, const char __user *ubuf,
4537 size_t cnt, loff_t *ppos)
4538{
4539 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
4540}
4541
b3806b43
SR
4542static int tracing_open_pipe(struct inode *inode, struct file *filp)
4543{
15544209 4544 struct trace_array *tr = inode->i_private;
b3806b43 4545 struct trace_iterator *iter;
b04cc6b1 4546 int ret = 0;
b3806b43
SR
4547
4548 if (tracing_disabled)
4549 return -ENODEV;
4550
7b85af63
SRRH
4551 if (trace_array_get(tr) < 0)
4552 return -ENODEV;
4553
b04cc6b1
FW
4554 mutex_lock(&trace_types_lock);
4555
b3806b43
SR
4556 /* create a buffer to store the information to pass to userspace */
4557 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
b04cc6b1
FW
4558 if (!iter) {
4559 ret = -ENOMEM;
f77d09a3 4560 __trace_array_put(tr);
b04cc6b1
FW
4561 goto out;
4562 }
b3806b43 4563
3a161d99 4564 trace_seq_init(&iter->seq);
d716ff71 4565 iter->trace = tr->current_trace;
d7350c3f 4566
4462344e 4567 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
b04cc6b1 4568 ret = -ENOMEM;
d7350c3f 4569 goto fail;
4462344e
RR
4570 }
4571
a309720c 4572 /* trace pipe does not show start of buffer */
4462344e 4573 cpumask_setall(iter->started);
a309720c 4574
983f938a 4575 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
112f38a7
SR
4576 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4577
8be0709f 4578 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
58e8eedf 4579 if (trace_clocks[tr->clock_id].in_ns)
8be0709f
DS
4580 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4581
15544209
ON
4582 iter->tr = tr;
4583 iter->trace_buffer = &tr->trace_buffer;
4584 iter->cpu_file = tracing_get_cpu(inode);
d7350c3f 4585 mutex_init(&iter->mutex);
b3806b43
SR
4586 filp->private_data = iter;
4587
107bad8b
SR
4588 if (iter->trace->pipe_open)
4589 iter->trace->pipe_open(iter);
107bad8b 4590
b444786f 4591 nonseekable_open(inode, filp);
cf6ab6d9
SRRH
4592
4593 tr->current_trace->ref++;
b04cc6b1
FW
4594out:
4595 mutex_unlock(&trace_types_lock);
4596 return ret;
d7350c3f
FW
4597
4598fail:
4599 kfree(iter->trace);
4600 kfree(iter);
7b85af63 4601 __trace_array_put(tr);
d7350c3f
FW
4602 mutex_unlock(&trace_types_lock);
4603 return ret;
b3806b43
SR
4604}
4605
4606static int tracing_release_pipe(struct inode *inode, struct file *file)
4607{
4608 struct trace_iterator *iter = file->private_data;
15544209 4609 struct trace_array *tr = inode->i_private;
b3806b43 4610
b04cc6b1
FW
4611 mutex_lock(&trace_types_lock);
4612
cf6ab6d9
SRRH
4613 tr->current_trace->ref--;
4614
29bf4a5e 4615 if (iter->trace->pipe_close)
c521efd1
SR
4616 iter->trace->pipe_close(iter);
4617
b04cc6b1
FW
4618 mutex_unlock(&trace_types_lock);
4619
4462344e 4620 free_cpumask_var(iter->started);
d7350c3f 4621 mutex_destroy(&iter->mutex);
b3806b43 4622 kfree(iter);
b3806b43 4623
7b85af63
SRRH
4624 trace_array_put(tr);
4625
b3806b43
SR
4626 return 0;
4627}
4628
2a2cc8f7 4629static unsigned int
cc60cdc9 4630trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
2a2cc8f7 4631{
983f938a
SRRH
4632 struct trace_array *tr = iter->tr;
4633
15693458
SRRH
4634 /* Iterators are static, they should be filled or empty */
4635 if (trace_buffer_iter(iter, iter->cpu_file))
4636 return POLLIN | POLLRDNORM;
2a2cc8f7 4637
983f938a 4638 if (tr->trace_flags & TRACE_ITER_BLOCK)
2a2cc8f7
SSP
4639 /*
4640 * Always select as readable when in blocking mode
4641 */
4642 return POLLIN | POLLRDNORM;
15693458 4643 else
12883efb 4644 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
15693458 4645 filp, poll_table);
2a2cc8f7 4646}
2a2cc8f7 4647
cc60cdc9
SR
4648static unsigned int
4649tracing_poll_pipe(struct file *filp, poll_table *poll_table)
4650{
4651 struct trace_iterator *iter = filp->private_data;
4652
4653 return trace_poll(iter, filp, poll_table);
2a2cc8f7
SSP
4654}
4655
d716ff71 4656/* Must be called with iter->mutex held. */
ff98781b 4657static int tracing_wait_pipe(struct file *filp)
b3806b43
SR
4658{
4659 struct trace_iterator *iter = filp->private_data;
8b8b3683 4660 int ret;
b3806b43 4661
b3806b43 4662 while (trace_empty(iter)) {
2dc8f095 4663
107bad8b 4664 if ((filp->f_flags & O_NONBLOCK)) {
ff98781b 4665 return -EAGAIN;
107bad8b 4666 }
2dc8f095 4667
b3806b43 4668 /*
250bfd3d 4669 * We block until we read something and tracing is disabled.
b3806b43
SR
4670 * We still block if tracing is disabled, but we have never
4671 * read anything. This allows a user to cat this file, and
4672 * then enable tracing. But after we have read something,
4673 * we give an EOF when tracing is again disabled.
4674 *
4675 * iter->pos will be 0 if we haven't read anything.
4676 */
10246fa3 4677 if (!tracing_is_on() && iter->pos)
b3806b43 4678 break;
f4874261
SRRH
4679
4680 mutex_unlock(&iter->mutex);
4681
e30f53aa 4682 ret = wait_on_pipe(iter, false);
f4874261
SRRH
4683
4684 mutex_lock(&iter->mutex);
4685
8b8b3683
SRRH
4686 if (ret)
4687 return ret;
b3806b43
SR
4688 }
4689
ff98781b
EGM
4690 return 1;
4691}
4692
4693/*
4694 * Consumer reader.
4695 */
4696static ssize_t
4697tracing_read_pipe(struct file *filp, char __user *ubuf,
4698 size_t cnt, loff_t *ppos)
4699{
4700 struct trace_iterator *iter = filp->private_data;
4701 ssize_t sret;
4702
4703 /* return any leftover data */
4704 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4705 if (sret != -EBUSY)
4706 return sret;
4707
f9520750 4708 trace_seq_init(&iter->seq);
ff98781b 4709
d7350c3f
FW
4710 /*
4711 * Avoid more than one consumer on a single file descriptor
4712 * This is just a matter of traces coherency, the ring buffer itself
4713 * is protected.
4714 */
4715 mutex_lock(&iter->mutex);
ff98781b
EGM
4716 if (iter->trace->read) {
4717 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
4718 if (sret)
4719 goto out;
4720 }
4721
4722waitagain:
4723 sret = tracing_wait_pipe(filp);
4724 if (sret <= 0)
4725 goto out;
4726
b3806b43 4727 /* stop when tracing is finished */
ff98781b
EGM
4728 if (trace_empty(iter)) {
4729 sret = 0;
107bad8b 4730 goto out;
ff98781b 4731 }
b3806b43
SR
4732
4733 if (cnt >= PAGE_SIZE)
4734 cnt = PAGE_SIZE - 1;
4735
53d0aa77 4736 /* reset all but tr, trace, and overruns */
53d0aa77
SR
4737 memset(&iter->seq, 0,
4738 sizeof(struct trace_iterator) -
4739 offsetof(struct trace_iterator, seq));
ed5467da 4740 cpumask_clear(iter->started);
4823ed7e 4741 iter->pos = -1;
b3806b43 4742
4f535968 4743 trace_event_read_lock();
7e53bd42 4744 trace_access_lock(iter->cpu_file);
955b61e5 4745 while (trace_find_next_entry_inc(iter) != NULL) {
2c4f035f 4746 enum print_line_t ret;
5ac48378 4747 int save_len = iter->seq.seq.len;
088b1e42 4748
f9896bf3 4749 ret = print_trace_line(iter);
2c4f035f 4750 if (ret == TRACE_TYPE_PARTIAL_LINE) {
088b1e42 4751 /* don't print partial lines */
5ac48378 4752 iter->seq.seq.len = save_len;
b3806b43 4753 break;
088b1e42 4754 }
b91facc3
FW
4755 if (ret != TRACE_TYPE_NO_CONSUME)
4756 trace_consume(iter);
b3806b43 4757
5ac48378 4758 if (trace_seq_used(&iter->seq) >= cnt)
b3806b43 4759 break;
ee5e51f5
JO
4760
4761 /*
4762 * Setting the full flag means we reached the trace_seq buffer
4763 * size and we should leave by partial output condition above.
4764 * One of the trace_seq_* functions is not used properly.
4765 */
4766 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
4767 iter->ent->type);
b3806b43 4768 }
7e53bd42 4769 trace_access_unlock(iter->cpu_file);
4f535968 4770 trace_event_read_unlock();
b3806b43 4771
b3806b43 4772 /* Now copy what we have to the user */
6c6c2796 4773 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
5ac48378 4774 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
f9520750 4775 trace_seq_init(&iter->seq);
9ff4b974
PP
4776
4777 /*
25985edc 4778 * If there was nothing to send to user, in spite of consuming trace
9ff4b974
PP
4779 * entries, go back to wait for more entries.
4780 */
6c6c2796 4781 if (sret == -EBUSY)
9ff4b974 4782 goto waitagain;
b3806b43 4783
107bad8b 4784out:
d7350c3f 4785 mutex_unlock(&iter->mutex);
107bad8b 4786
6c6c2796 4787 return sret;
b3806b43
SR
4788}
4789
3c56819b
EGM
4790static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
4791 unsigned int idx)
4792{
4793 __free_page(spd->pages[idx]);
4794}
4795
28dfef8f 4796static const struct pipe_buf_operations tracing_pipe_buf_ops = {
34cd4998 4797 .can_merge = 0,
34cd4998 4798 .confirm = generic_pipe_buf_confirm,
92fdd98c 4799 .release = generic_pipe_buf_release,
34cd4998
SR
4800 .steal = generic_pipe_buf_steal,
4801 .get = generic_pipe_buf_get,
3c56819b
EGM
4802};
4803
34cd4998 4804static size_t
fa7c7f6e 4805tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
34cd4998
SR
4806{
4807 size_t count;
74f06bb7 4808 int save_len;
34cd4998
SR
4809 int ret;
4810
4811 /* Seq buffer is page-sized, exactly what we need. */
4812 for (;;) {
74f06bb7 4813 save_len = iter->seq.seq.len;
34cd4998 4814 ret = print_trace_line(iter);
74f06bb7
SRRH
4815
4816 if (trace_seq_has_overflowed(&iter->seq)) {
4817 iter->seq.seq.len = save_len;
34cd4998
SR
4818 break;
4819 }
74f06bb7
SRRH
4820
4821 /*
4822 * This should not be hit, because it should only
4823 * be set if the iter->seq overflowed. But check it
4824 * anyway to be safe.
4825 */
34cd4998 4826 if (ret == TRACE_TYPE_PARTIAL_LINE) {
74f06bb7
SRRH
4827 iter->seq.seq.len = save_len;
4828 break;
4829 }
4830
5ac48378 4831 count = trace_seq_used(&iter->seq) - save_len;
74f06bb7
SRRH
4832 if (rem < count) {
4833 rem = 0;
4834 iter->seq.seq.len = save_len;
34cd4998
SR
4835 break;
4836 }
4837
74e7ff8c
LJ
4838 if (ret != TRACE_TYPE_NO_CONSUME)
4839 trace_consume(iter);
34cd4998 4840 rem -= count;
955b61e5 4841 if (!trace_find_next_entry_inc(iter)) {
34cd4998
SR
4842 rem = 0;
4843 iter->ent = NULL;
4844 break;
4845 }
4846 }
4847
4848 return rem;
4849}
4850
3c56819b
EGM
4851static ssize_t tracing_splice_read_pipe(struct file *filp,
4852 loff_t *ppos,
4853 struct pipe_inode_info *pipe,
4854 size_t len,
4855 unsigned int flags)
4856{
35f3d14d
JA
4857 struct page *pages_def[PIPE_DEF_BUFFERS];
4858 struct partial_page partial_def[PIPE_DEF_BUFFERS];
3c56819b
EGM
4859 struct trace_iterator *iter = filp->private_data;
4860 struct splice_pipe_desc spd = {
35f3d14d
JA
4861 .pages = pages_def,
4862 .partial = partial_def,
34cd4998 4863 .nr_pages = 0, /* This gets updated below. */
047fe360 4864 .nr_pages_max = PIPE_DEF_BUFFERS,
34cd4998
SR
4865 .flags = flags,
4866 .ops = &tracing_pipe_buf_ops,
4867 .spd_release = tracing_spd_release_pipe,
3c56819b
EGM
4868 };
4869 ssize_t ret;
34cd4998 4870 size_t rem;
3c56819b
EGM
4871 unsigned int i;
4872
35f3d14d
JA
4873 if (splice_grow_spd(pipe, &spd))
4874 return -ENOMEM;
4875
d7350c3f 4876 mutex_lock(&iter->mutex);
3c56819b
EGM
4877
4878 if (iter->trace->splice_read) {
4879 ret = iter->trace->splice_read(iter, filp,
4880 ppos, pipe, len, flags);
4881 if (ret)
34cd4998 4882 goto out_err;
3c56819b
EGM
4883 }
4884
4885 ret = tracing_wait_pipe(filp);
4886 if (ret <= 0)
34cd4998 4887 goto out_err;
3c56819b 4888
955b61e5 4889 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
3c56819b 4890 ret = -EFAULT;
34cd4998 4891 goto out_err;
3c56819b
EGM
4892 }
4893
4f535968 4894 trace_event_read_lock();
7e53bd42 4895 trace_access_lock(iter->cpu_file);
4f535968 4896
3c56819b 4897 /* Fill as many pages as possible. */
a786c06d 4898 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
35f3d14d
JA
4899 spd.pages[i] = alloc_page(GFP_KERNEL);
4900 if (!spd.pages[i])
34cd4998 4901 break;
3c56819b 4902
fa7c7f6e 4903 rem = tracing_fill_pipe_page(rem, iter);
3c56819b
EGM
4904
4905 /* Copy the data into the page, so we can start over. */
4906 ret = trace_seq_to_buffer(&iter->seq,
35f3d14d 4907 page_address(spd.pages[i]),
5ac48378 4908 trace_seq_used(&iter->seq));
3c56819b 4909 if (ret < 0) {
35f3d14d 4910 __free_page(spd.pages[i]);
3c56819b
EGM
4911 break;
4912 }
35f3d14d 4913 spd.partial[i].offset = 0;
5ac48378 4914 spd.partial[i].len = trace_seq_used(&iter->seq);
3c56819b 4915
f9520750 4916 trace_seq_init(&iter->seq);
3c56819b
EGM
4917 }
4918
7e53bd42 4919 trace_access_unlock(iter->cpu_file);
4f535968 4920 trace_event_read_unlock();
d7350c3f 4921 mutex_unlock(&iter->mutex);
3c56819b
EGM
4922
4923 spd.nr_pages = i;
4924
35f3d14d
JA
4925 ret = splice_to_pipe(pipe, &spd);
4926out:
047fe360 4927 splice_shrink_spd(&spd);
35f3d14d 4928 return ret;
3c56819b 4929
34cd4998 4930out_err:
d7350c3f 4931 mutex_unlock(&iter->mutex);
35f3d14d 4932 goto out;
3c56819b
EGM
4933}
4934
a98a3c3f
SR
4935static ssize_t
4936tracing_entries_read(struct file *filp, char __user *ubuf,
4937 size_t cnt, loff_t *ppos)
4938{
0bc392ee
ON
4939 struct inode *inode = file_inode(filp);
4940 struct trace_array *tr = inode->i_private;
4941 int cpu = tracing_get_cpu(inode);
438ced17
VN
4942 char buf[64];
4943 int r = 0;
4944 ssize_t ret;
a98a3c3f 4945
db526ca3 4946 mutex_lock(&trace_types_lock);
438ced17 4947
0bc392ee 4948 if (cpu == RING_BUFFER_ALL_CPUS) {
438ced17
VN
4949 int cpu, buf_size_same;
4950 unsigned long size;
4951
4952 size = 0;
4953 buf_size_same = 1;
4954 /* check if all cpu sizes are same */
4955 for_each_tracing_cpu(cpu) {
4956 /* fill in the size from first enabled cpu */
4957 if (size == 0)
12883efb
SRRH
4958 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
4959 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
438ced17
VN
4960 buf_size_same = 0;
4961 break;
4962 }
4963 }
4964
4965 if (buf_size_same) {
4966 if (!ring_buffer_expanded)
4967 r = sprintf(buf, "%lu (expanded: %lu)\n",
4968 size >> 10,
4969 trace_buf_size >> 10);
4970 else
4971 r = sprintf(buf, "%lu\n", size >> 10);
4972 } else
4973 r = sprintf(buf, "X\n");
4974 } else
0bc392ee 4975 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
438ced17 4976
db526ca3
SR
4977 mutex_unlock(&trace_types_lock);
4978
438ced17
VN
4979 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4980 return ret;
a98a3c3f
SR
4981}
4982
4983static ssize_t
4984tracing_entries_write(struct file *filp, const char __user *ubuf,
4985 size_t cnt, loff_t *ppos)
4986{
0bc392ee
ON
4987 struct inode *inode = file_inode(filp);
4988 struct trace_array *tr = inode->i_private;
a98a3c3f 4989 unsigned long val;
4f271a2a 4990 int ret;
a98a3c3f 4991
22fe9b54
PH
4992 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4993 if (ret)
c6caeeb1 4994 return ret;
a98a3c3f
SR
4995
4996 /* must have at least 1 entry */
4997 if (!val)
4998 return -EINVAL;
4999
1696b2b0
SR
5000 /* value is in KB */
5001 val <<= 10;
0bc392ee 5002 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
4f271a2a
VN
5003 if (ret < 0)
5004 return ret;
a98a3c3f 5005
cf8517cf 5006 *ppos += cnt;
a98a3c3f 5007
4f271a2a
VN
5008 return cnt;
5009}
bf5e6519 5010
f81ab074
VN
5011static ssize_t
5012tracing_total_entries_read(struct file *filp, char __user *ubuf,
5013 size_t cnt, loff_t *ppos)
5014{
5015 struct trace_array *tr = filp->private_data;
5016 char buf[64];
5017 int r, cpu;
5018 unsigned long size = 0, expanded_size = 0;
5019
5020 mutex_lock(&trace_types_lock);
5021 for_each_tracing_cpu(cpu) {
12883efb 5022 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
f81ab074
VN
5023 if (!ring_buffer_expanded)
5024 expanded_size += trace_buf_size >> 10;
5025 }
5026 if (ring_buffer_expanded)
5027 r = sprintf(buf, "%lu\n", size);
5028 else
5029 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
5030 mutex_unlock(&trace_types_lock);
5031
5032 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5033}
5034
4f271a2a
VN
5035static ssize_t
5036tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
5037 size_t cnt, loff_t *ppos)
5038{
5039 /*
5040 * There is no need to read what the user has written, this function
5041 * is just to make sure that there is no error when "echo" is used
5042 */
5043
5044 *ppos += cnt;
a98a3c3f
SR
5045
5046 return cnt;
5047}
5048
4f271a2a
VN
5049static int
5050tracing_free_buffer_release(struct inode *inode, struct file *filp)
5051{
2b6080f2
SR
5052 struct trace_array *tr = inode->i_private;
5053
cf30cf67 5054 /* disable tracing ? */
983f938a 5055 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
711e1243 5056 tracer_tracing_off(tr);
4f271a2a 5057 /* resize the ring buffer to 0 */
2b6080f2 5058 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
4f271a2a 5059
7b85af63
SRRH
5060 trace_array_put(tr);
5061
4f271a2a
VN
5062 return 0;
5063}
5064
5bf9a1ee
PP
5065static ssize_t
5066tracing_mark_write(struct file *filp, const char __user *ubuf,
5067 size_t cnt, loff_t *fpos)
5068{
d696b58c 5069 unsigned long addr = (unsigned long)ubuf;
2d71619c 5070 struct trace_array *tr = filp->private_data;
d696b58c
SR
5071 struct ring_buffer_event *event;
5072 struct ring_buffer *buffer;
5073 struct print_entry *entry;
5074 unsigned long irq_flags;
5075 struct page *pages[2];
6edb2a8a 5076 void *map_page[2];
d696b58c
SR
5077 int nr_pages = 1;
5078 ssize_t written;
d696b58c
SR
5079 int offset;
5080 int size;
5081 int len;
5082 int ret;
6edb2a8a 5083 int i;
5bf9a1ee 5084
c76f0694 5085 if (tracing_disabled)
5bf9a1ee
PP
5086 return -EINVAL;
5087
983f938a 5088 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
5224c3a3
MSB
5089 return -EINVAL;
5090
5bf9a1ee
PP
5091 if (cnt > TRACE_BUF_SIZE)
5092 cnt = TRACE_BUF_SIZE;
5093
d696b58c
SR
5094 /*
5095 * Userspace is injecting traces into the kernel trace buffer.
5096 * We want to be as non intrusive as possible.
5097 * To do so, we do not want to allocate any special buffers
5098 * or take any locks, but instead write the userspace data
5099 * straight into the ring buffer.
5100 *
5101 * First we need to pin the userspace buffer into memory,
5102 * which, most likely it is, because it just referenced it.
5103 * But there's no guarantee that it is. By using get_user_pages_fast()
5104 * and kmap_atomic/kunmap_atomic() we can get access to the
5105 * pages directly. We then write the data directly into the
5106 * ring buffer.
5107 */
5108 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
5bf9a1ee 5109
d696b58c
SR
5110 /* check if we cross pages */
5111 if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
5112 nr_pages = 2;
5113
5114 offset = addr & (PAGE_SIZE - 1);
5115 addr &= PAGE_MASK;
5116
5117 ret = get_user_pages_fast(addr, nr_pages, 0, pages);
5118 if (ret < nr_pages) {
5119 while (--ret >= 0)
5120 put_page(pages[ret]);
5121 written = -EFAULT;
5122 goto out;
5bf9a1ee 5123 }
d696b58c 5124
6edb2a8a
SR
5125 for (i = 0; i < nr_pages; i++)
5126 map_page[i] = kmap_atomic(pages[i]);
d696b58c
SR
5127
5128 local_save_flags(irq_flags);
5129 size = sizeof(*entry) + cnt + 2; /* possible \n added */
2d71619c 5130 buffer = tr->trace_buffer.buffer;
d696b58c
SR
5131 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
5132 irq_flags, preempt_count());
5133 if (!event) {
5134 /* Ring buffer disabled, return as if not open for write */
5135 written = -EBADF;
5136 goto out_unlock;
5bf9a1ee 5137 }
d696b58c
SR
5138
5139 entry = ring_buffer_event_data(event);
5140 entry->ip = _THIS_IP_;
5141
5142 if (nr_pages == 2) {
5143 len = PAGE_SIZE - offset;
6edb2a8a
SR
5144 memcpy(&entry->buf, map_page[0] + offset, len);
5145 memcpy(&entry->buf[len], map_page[1], cnt - len);
c13d2f7c 5146 } else
6edb2a8a 5147 memcpy(&entry->buf, map_page[0] + offset, cnt);
5bf9a1ee 5148
d696b58c
SR
5149 if (entry->buf[cnt - 1] != '\n') {
5150 entry->buf[cnt] = '\n';
5151 entry->buf[cnt + 1] = '\0';
5152 } else
5153 entry->buf[cnt] = '\0';
5154
7ffbd48d 5155 __buffer_unlock_commit(buffer, event);
5bf9a1ee 5156
d696b58c 5157 written = cnt;
5bf9a1ee 5158
d696b58c 5159 *fpos += written;
1aa54bca 5160
d696b58c 5161 out_unlock:
7215853e 5162 for (i = nr_pages - 1; i >= 0; i--) {
6edb2a8a
SR
5163 kunmap_atomic(map_page[i]);
5164 put_page(pages[i]);
5165 }
d696b58c 5166 out:
1aa54bca 5167 return written;
5bf9a1ee
PP
5168}
5169
13f16d20 5170static int tracing_clock_show(struct seq_file *m, void *v)
5079f326 5171{
2b6080f2 5172 struct trace_array *tr = m->private;
5079f326
Z
5173 int i;
5174
5175 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
13f16d20 5176 seq_printf(m,
5079f326 5177 "%s%s%s%s", i ? " " : "",
2b6080f2
SR
5178 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
5179 i == tr->clock_id ? "]" : "");
13f16d20 5180 seq_putc(m, '\n');
5079f326 5181
13f16d20 5182 return 0;
5079f326
Z
5183}
5184
e1e232ca 5185static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
5079f326 5186{
5079f326
Z
5187 int i;
5188
5079f326
Z
5189 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
5190 if (strcmp(trace_clocks[i].name, clockstr) == 0)
5191 break;
5192 }
5193 if (i == ARRAY_SIZE(trace_clocks))
5194 return -EINVAL;
5195
5079f326
Z
5196 mutex_lock(&trace_types_lock);
5197
2b6080f2
SR
5198 tr->clock_id = i;
5199
12883efb 5200 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
5079f326 5201
60303ed3
DS
5202 /*
5203 * New clock may not be consistent with the previous clock.
5204 * Reset the buffer so that it doesn't have incomparable timestamps.
5205 */
9457158b 5206 tracing_reset_online_cpus(&tr->trace_buffer);
12883efb
SRRH
5207
5208#ifdef CONFIG_TRACER_MAX_TRACE
5209 if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
5210 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
9457158b 5211 tracing_reset_online_cpus(&tr->max_buffer);
12883efb 5212#endif
60303ed3 5213
5079f326
Z
5214 mutex_unlock(&trace_types_lock);
5215
e1e232ca
SR
5216 return 0;
5217}
5218
5219static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
5220 size_t cnt, loff_t *fpos)
5221{
5222 struct seq_file *m = filp->private_data;
5223 struct trace_array *tr = m->private;
5224 char buf[64];
5225 const char *clockstr;
5226 int ret;
5227
5228 if (cnt >= sizeof(buf))
5229 return -EINVAL;
5230
5231 if (copy_from_user(&buf, ubuf, cnt))
5232 return -EFAULT;
5233
5234 buf[cnt] = 0;
5235
5236 clockstr = strstrip(buf);
5237
5238 ret = tracing_set_clock(tr, clockstr);
5239 if (ret)
5240 return ret;
5241
5079f326
Z
5242 *fpos += cnt;
5243
5244 return cnt;
5245}
5246
13f16d20
LZ
5247static int tracing_clock_open(struct inode *inode, struct file *file)
5248{
7b85af63
SRRH
5249 struct trace_array *tr = inode->i_private;
5250 int ret;
5251
13f16d20
LZ
5252 if (tracing_disabled)
5253 return -ENODEV;
2b6080f2 5254
7b85af63
SRRH
5255 if (trace_array_get(tr))
5256 return -ENODEV;
5257
5258 ret = single_open(file, tracing_clock_show, inode->i_private);
5259 if (ret < 0)
5260 trace_array_put(tr);
5261
5262 return ret;
13f16d20
LZ
5263}
5264
6de58e62
SRRH
5265struct ftrace_buffer_info {
5266 struct trace_iterator iter;
5267 void *spare;
5268 unsigned int read;
5269};
5270
debdd57f
HT
5271#ifdef CONFIG_TRACER_SNAPSHOT
5272static int tracing_snapshot_open(struct inode *inode, struct file *file)
5273{
6484c71c 5274 struct trace_array *tr = inode->i_private;
debdd57f 5275 struct trace_iterator *iter;
2b6080f2 5276 struct seq_file *m;
debdd57f
HT
5277 int ret = 0;
5278
ff451961
SRRH
5279 if (trace_array_get(tr) < 0)
5280 return -ENODEV;
5281
debdd57f 5282 if (file->f_mode & FMODE_READ) {
6484c71c 5283 iter = __tracing_open(inode, file, true);
debdd57f
HT
5284 if (IS_ERR(iter))
5285 ret = PTR_ERR(iter);
2b6080f2
SR
5286 } else {
5287 /* Writes still need the seq_file to hold the private data */
f77d09a3 5288 ret = -ENOMEM;
2b6080f2
SR
5289 m = kzalloc(sizeof(*m), GFP_KERNEL);
5290 if (!m)
f77d09a3 5291 goto out;
2b6080f2
SR
5292 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
5293 if (!iter) {
5294 kfree(m);
f77d09a3 5295 goto out;
2b6080f2 5296 }
f77d09a3
AL
5297 ret = 0;
5298
ff451961 5299 iter->tr = tr;
6484c71c
ON
5300 iter->trace_buffer = &tr->max_buffer;
5301 iter->cpu_file = tracing_get_cpu(inode);
2b6080f2
SR
5302 m->private = iter;
5303 file->private_data = m;
debdd57f 5304 }
f77d09a3 5305out:
ff451961
SRRH
5306 if (ret < 0)
5307 trace_array_put(tr);
5308
debdd57f
HT
5309 return ret;
5310}
5311
5312static ssize_t
5313tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
5314 loff_t *ppos)
5315{
2b6080f2
SR
5316 struct seq_file *m = filp->private_data;
5317 struct trace_iterator *iter = m->private;
5318 struct trace_array *tr = iter->tr;
debdd57f
HT
5319 unsigned long val;
5320 int ret;
5321
5322 ret = tracing_update_buffers();
5323 if (ret < 0)
5324 return ret;
5325
5326 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5327 if (ret)
5328 return ret;
5329
5330 mutex_lock(&trace_types_lock);
5331
2b6080f2 5332 if (tr->current_trace->use_max_tr) {
debdd57f
HT
5333 ret = -EBUSY;
5334 goto out;
5335 }
5336
5337 switch (val) {
5338 case 0:
f1affcaa
SRRH
5339 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5340 ret = -EINVAL;
5341 break;
debdd57f 5342 }
3209cff4
SRRH
5343 if (tr->allocated_snapshot)
5344 free_snapshot(tr);
debdd57f
HT
5345 break;
5346 case 1:
f1affcaa
SRRH
5347/* Only allow per-cpu swap if the ring buffer supports it */
5348#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
5349 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5350 ret = -EINVAL;
5351 break;
5352 }
5353#endif
45ad21ca 5354 if (!tr->allocated_snapshot) {
3209cff4 5355 ret = alloc_snapshot(tr);
debdd57f
HT
5356 if (ret < 0)
5357 break;
debdd57f 5358 }
debdd57f
HT
5359 local_irq_disable();
5360 /* Now, we're going to swap */
f1affcaa 5361 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
ce9bae55 5362 update_max_tr(tr, current, smp_processor_id());
f1affcaa 5363 else
ce9bae55 5364 update_max_tr_single(tr, current, iter->cpu_file);
debdd57f
HT
5365 local_irq_enable();
5366 break;
5367 default:
45ad21ca 5368 if (tr->allocated_snapshot) {
f1affcaa
SRRH
5369 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
5370 tracing_reset_online_cpus(&tr->max_buffer);
5371 else
5372 tracing_reset(&tr->max_buffer, iter->cpu_file);
5373 }
debdd57f
HT
5374 break;
5375 }
5376
5377 if (ret >= 0) {
5378 *ppos += cnt;
5379 ret = cnt;
5380 }
5381out:
5382 mutex_unlock(&trace_types_lock);
5383 return ret;
5384}
2b6080f2
SR
5385
5386static int tracing_snapshot_release(struct inode *inode, struct file *file)
5387{
5388 struct seq_file *m = file->private_data;
ff451961
SRRH
5389 int ret;
5390
5391 ret = tracing_release(inode, file);
2b6080f2
SR
5392
5393 if (file->f_mode & FMODE_READ)
ff451961 5394 return ret;
2b6080f2
SR
5395
5396 /* If write only, the seq_file is just a stub */
5397 if (m)
5398 kfree(m->private);
5399 kfree(m);
5400
5401 return 0;
5402}
5403
6de58e62
SRRH
5404static int tracing_buffers_open(struct inode *inode, struct file *filp);
5405static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
5406 size_t count, loff_t *ppos);
5407static int tracing_buffers_release(struct inode *inode, struct file *file);
5408static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5409 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
5410
5411static int snapshot_raw_open(struct inode *inode, struct file *filp)
5412{
5413 struct ftrace_buffer_info *info;
5414 int ret;
5415
5416 ret = tracing_buffers_open(inode, filp);
5417 if (ret < 0)
5418 return ret;
5419
5420 info = filp->private_data;
5421
5422 if (info->iter.trace->use_max_tr) {
5423 tracing_buffers_release(inode, filp);
5424 return -EBUSY;
5425 }
5426
5427 info->iter.snapshot = true;
5428 info->iter.trace_buffer = &info->iter.tr->max_buffer;
5429
5430 return ret;
5431}
5432
debdd57f
HT
5433#endif /* CONFIG_TRACER_SNAPSHOT */
5434
5435
6508fa76
SF
5436static const struct file_operations tracing_thresh_fops = {
5437 .open = tracing_open_generic,
5438 .read = tracing_thresh_read,
5439 .write = tracing_thresh_write,
5440 .llseek = generic_file_llseek,
5441};
5442
5e2336a0 5443static const struct file_operations tracing_max_lat_fops = {
4bf39a94
IM
5444 .open = tracing_open_generic,
5445 .read = tracing_max_lat_read,
5446 .write = tracing_max_lat_write,
b444786f 5447 .llseek = generic_file_llseek,
bc0c38d1
SR
5448};
5449
5e2336a0 5450static const struct file_operations set_tracer_fops = {
4bf39a94
IM
5451 .open = tracing_open_generic,
5452 .read = tracing_set_trace_read,
5453 .write = tracing_set_trace_write,
b444786f 5454 .llseek = generic_file_llseek,
bc0c38d1
SR
5455};
5456
5e2336a0 5457static const struct file_operations tracing_pipe_fops = {
4bf39a94 5458 .open = tracing_open_pipe,
2a2cc8f7 5459 .poll = tracing_poll_pipe,
4bf39a94 5460 .read = tracing_read_pipe,
3c56819b 5461 .splice_read = tracing_splice_read_pipe,
4bf39a94 5462 .release = tracing_release_pipe,
b444786f 5463 .llseek = no_llseek,
b3806b43
SR
5464};
5465
5e2336a0 5466static const struct file_operations tracing_entries_fops = {
0bc392ee 5467 .open = tracing_open_generic_tr,
a98a3c3f
SR
5468 .read = tracing_entries_read,
5469 .write = tracing_entries_write,
b444786f 5470 .llseek = generic_file_llseek,
0bc392ee 5471 .release = tracing_release_generic_tr,
a98a3c3f
SR
5472};
5473
f81ab074 5474static const struct file_operations tracing_total_entries_fops = {
7b85af63 5475 .open = tracing_open_generic_tr,
f81ab074
VN
5476 .read = tracing_total_entries_read,
5477 .llseek = generic_file_llseek,
7b85af63 5478 .release = tracing_release_generic_tr,
f81ab074
VN
5479};
5480
4f271a2a 5481static const struct file_operations tracing_free_buffer_fops = {
7b85af63 5482 .open = tracing_open_generic_tr,
4f271a2a
VN
5483 .write = tracing_free_buffer_write,
5484 .release = tracing_free_buffer_release,
5485};
5486
5e2336a0 5487static const struct file_operations tracing_mark_fops = {
7b85af63 5488 .open = tracing_open_generic_tr,
5bf9a1ee 5489 .write = tracing_mark_write,
b444786f 5490 .llseek = generic_file_llseek,
7b85af63 5491 .release = tracing_release_generic_tr,
5bf9a1ee
PP
5492};
5493
5079f326 5494static const struct file_operations trace_clock_fops = {
13f16d20
LZ
5495 .open = tracing_clock_open,
5496 .read = seq_read,
5497 .llseek = seq_lseek,
7b85af63 5498 .release = tracing_single_release_tr,
5079f326
Z
5499 .write = tracing_clock_write,
5500};
5501
debdd57f
HT
5502#ifdef CONFIG_TRACER_SNAPSHOT
5503static const struct file_operations snapshot_fops = {
5504 .open = tracing_snapshot_open,
5505 .read = seq_read,
5506 .write = tracing_snapshot_write,
098c879e 5507 .llseek = tracing_lseek,
2b6080f2 5508 .release = tracing_snapshot_release,
debdd57f 5509};
debdd57f 5510
6de58e62
SRRH
5511static const struct file_operations snapshot_raw_fops = {
5512 .open = snapshot_raw_open,
5513 .read = tracing_buffers_read,
5514 .release = tracing_buffers_release,
5515 .splice_read = tracing_buffers_splice_read,
5516 .llseek = no_llseek,
2cadf913
SR
5517};
5518
6de58e62
SRRH
5519#endif /* CONFIG_TRACER_SNAPSHOT */
5520
2cadf913
SR
5521static int tracing_buffers_open(struct inode *inode, struct file *filp)
5522{
46ef2be0 5523 struct trace_array *tr = inode->i_private;
2cadf913 5524 struct ftrace_buffer_info *info;
7b85af63 5525 int ret;
2cadf913
SR
5526
5527 if (tracing_disabled)
5528 return -ENODEV;
5529
7b85af63
SRRH
5530 if (trace_array_get(tr) < 0)
5531 return -ENODEV;
5532
2cadf913 5533 info = kzalloc(sizeof(*info), GFP_KERNEL);
7b85af63
SRRH
5534 if (!info) {
5535 trace_array_put(tr);
2cadf913 5536 return -ENOMEM;
7b85af63 5537 }
2cadf913 5538
a695cb58
SRRH
5539 mutex_lock(&trace_types_lock);
5540
cc60cdc9 5541 info->iter.tr = tr;
46ef2be0 5542 info->iter.cpu_file = tracing_get_cpu(inode);
b627344f 5543 info->iter.trace = tr->current_trace;
12883efb 5544 info->iter.trace_buffer = &tr->trace_buffer;
cc60cdc9 5545 info->spare = NULL;
2cadf913 5546 /* Force reading ring buffer for first read */
cc60cdc9 5547 info->read = (unsigned int)-1;
2cadf913
SR
5548
5549 filp->private_data = info;
5550
cf6ab6d9
SRRH
5551 tr->current_trace->ref++;
5552
a695cb58
SRRH
5553 mutex_unlock(&trace_types_lock);
5554
7b85af63
SRRH
5555 ret = nonseekable_open(inode, filp);
5556 if (ret < 0)
5557 trace_array_put(tr);
5558
5559 return ret;
2cadf913
SR
5560}
5561
cc60cdc9
SR
5562static unsigned int
5563tracing_buffers_poll(struct file *filp, poll_table *poll_table)
5564{
5565 struct ftrace_buffer_info *info = filp->private_data;
5566 struct trace_iterator *iter = &info->iter;
5567
5568 return trace_poll(iter, filp, poll_table);
5569}
5570
2cadf913
SR
5571static ssize_t
5572tracing_buffers_read(struct file *filp, char __user *ubuf,
5573 size_t count, loff_t *ppos)
5574{
5575 struct ftrace_buffer_info *info = filp->private_data;
cc60cdc9 5576 struct trace_iterator *iter = &info->iter;
2cadf913 5577 ssize_t ret;
6de58e62 5578 ssize_t size;
2cadf913 5579
2dc5d12b
SR
5580 if (!count)
5581 return 0;
5582
6de58e62 5583#ifdef CONFIG_TRACER_MAX_TRACE
d716ff71
SRRH
5584 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
5585 return -EBUSY;
6de58e62
SRRH
5586#endif
5587
ddd538f3 5588 if (!info->spare)
12883efb
SRRH
5589 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
5590 iter->cpu_file);
ddd538f3 5591 if (!info->spare)
d716ff71 5592 return -ENOMEM;
ddd538f3 5593
2cadf913
SR
5594 /* Do we have previous read data to read? */
5595 if (info->read < PAGE_SIZE)
5596 goto read;
5597
b627344f 5598 again:
cc60cdc9 5599 trace_access_lock(iter->cpu_file);
12883efb 5600 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
2cadf913
SR
5601 &info->spare,
5602 count,
cc60cdc9
SR
5603 iter->cpu_file, 0);
5604 trace_access_unlock(iter->cpu_file);
2cadf913 5605
b627344f
SR
5606 if (ret < 0) {
5607 if (trace_empty(iter)) {
d716ff71
SRRH
5608 if ((filp->f_flags & O_NONBLOCK))
5609 return -EAGAIN;
5610
e30f53aa 5611 ret = wait_on_pipe(iter, false);
d716ff71
SRRH
5612 if (ret)
5613 return ret;
5614
b627344f
SR
5615 goto again;
5616 }
d716ff71 5617 return 0;
b627344f 5618 }
436fc280 5619
436fc280 5620 info->read = 0;
b627344f 5621 read:
2cadf913
SR
5622 size = PAGE_SIZE - info->read;
5623 if (size > count)
5624 size = count;
5625
5626 ret = copy_to_user(ubuf, info->spare + info->read, size);
d716ff71
SRRH
5627 if (ret == size)
5628 return -EFAULT;
5629
2dc5d12b
SR
5630 size -= ret;
5631
2cadf913
SR
5632 *ppos += size;
5633 info->read += size;
5634
5635 return size;
5636}
5637
5638static int tracing_buffers_release(struct inode *inode, struct file *file)
5639{
5640 struct ftrace_buffer_info *info = file->private_data;
cc60cdc9 5641 struct trace_iterator *iter = &info->iter;
2cadf913 5642
a695cb58
SRRH
5643 mutex_lock(&trace_types_lock);
5644
cf6ab6d9
SRRH
5645 iter->tr->current_trace->ref--;
5646
ff451961 5647 __trace_array_put(iter->tr);
2cadf913 5648
ddd538f3 5649 if (info->spare)
12883efb 5650 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
2cadf913
SR
5651 kfree(info);
5652
a695cb58
SRRH
5653 mutex_unlock(&trace_types_lock);
5654
2cadf913
SR
5655 return 0;
5656}
5657
5658struct buffer_ref {
5659 struct ring_buffer *buffer;
5660 void *page;
5661 int ref;
5662};
5663
5664static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
5665 struct pipe_buffer *buf)
5666{
5667 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5668
5669 if (--ref->ref)
5670 return;
5671
5672 ring_buffer_free_read_page(ref->buffer, ref->page);
5673 kfree(ref);
5674 buf->private = 0;
5675}
5676
2cadf913
SR
5677static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
5678 struct pipe_buffer *buf)
5679{
5680 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5681
5682 ref->ref++;
5683}
5684
5685/* Pipe buffer operations for a buffer. */
28dfef8f 5686static const struct pipe_buf_operations buffer_pipe_buf_ops = {
2cadf913 5687 .can_merge = 0,
2cadf913
SR
5688 .confirm = generic_pipe_buf_confirm,
5689 .release = buffer_pipe_buf_release,
d55cb6cf 5690 .steal = generic_pipe_buf_steal,
2cadf913
SR
5691 .get = buffer_pipe_buf_get,
5692};
5693
5694/*
5695 * Callback from splice_to_pipe(), if we need to release some pages
5696 * at the end of the spd in case we error'ed out in filling the pipe.
5697 */
5698static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
5699{
5700 struct buffer_ref *ref =
5701 (struct buffer_ref *)spd->partial[i].private;
5702
5703 if (--ref->ref)
5704 return;
5705
5706 ring_buffer_free_read_page(ref->buffer, ref->page);
5707 kfree(ref);
5708 spd->partial[i].private = 0;
5709}
5710
5711static ssize_t
5712tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5713 struct pipe_inode_info *pipe, size_t len,
5714 unsigned int flags)
5715{
5716 struct ftrace_buffer_info *info = file->private_data;
cc60cdc9 5717 struct trace_iterator *iter = &info->iter;
35f3d14d
JA
5718 struct partial_page partial_def[PIPE_DEF_BUFFERS];
5719 struct page *pages_def[PIPE_DEF_BUFFERS];
2cadf913 5720 struct splice_pipe_desc spd = {
35f3d14d
JA
5721 .pages = pages_def,
5722 .partial = partial_def,
047fe360 5723 .nr_pages_max = PIPE_DEF_BUFFERS,
2cadf913
SR
5724 .flags = flags,
5725 .ops = &buffer_pipe_buf_ops,
5726 .spd_release = buffer_spd_release,
5727 };
5728 struct buffer_ref *ref;
93459c6c 5729 int entries, size, i;
07906da7 5730 ssize_t ret = 0;
2cadf913 5731
6de58e62 5732#ifdef CONFIG_TRACER_MAX_TRACE
d716ff71
SRRH
5733 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
5734 return -EBUSY;
6de58e62
SRRH
5735#endif
5736
d716ff71
SRRH
5737 if (splice_grow_spd(pipe, &spd))
5738 return -ENOMEM;
35f3d14d 5739
d716ff71
SRRH
5740 if (*ppos & (PAGE_SIZE - 1))
5741 return -EINVAL;
93cfb3c9
LJ
5742
5743 if (len & (PAGE_SIZE - 1)) {
d716ff71
SRRH
5744 if (len < PAGE_SIZE)
5745 return -EINVAL;
93cfb3c9
LJ
5746 len &= PAGE_MASK;
5747 }
5748
cc60cdc9
SR
5749 again:
5750 trace_access_lock(iter->cpu_file);
12883efb 5751 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
93459c6c 5752
a786c06d 5753 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
2cadf913
SR
5754 struct page *page;
5755 int r;
5756
5757 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
07906da7
RV
5758 if (!ref) {
5759 ret = -ENOMEM;
2cadf913 5760 break;
07906da7 5761 }
2cadf913 5762
7267fa68 5763 ref->ref = 1;
12883efb 5764 ref->buffer = iter->trace_buffer->buffer;
cc60cdc9 5765 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
2cadf913 5766 if (!ref->page) {
07906da7 5767 ret = -ENOMEM;
2cadf913
SR
5768 kfree(ref);
5769 break;
5770 }
5771
5772 r = ring_buffer_read_page(ref->buffer, &ref->page,
cc60cdc9 5773 len, iter->cpu_file, 1);
2cadf913 5774 if (r < 0) {
7ea59064 5775 ring_buffer_free_read_page(ref->buffer, ref->page);
2cadf913
SR
5776 kfree(ref);
5777 break;
5778 }
5779
5780 /*
5781 * zero out any left over data, this is going to
5782 * user land.
5783 */
5784 size = ring_buffer_page_len(ref->page);
5785 if (size < PAGE_SIZE)
5786 memset(ref->page + size, 0, PAGE_SIZE - size);
5787
5788 page = virt_to_page(ref->page);
5789
5790 spd.pages[i] = page;
5791 spd.partial[i].len = PAGE_SIZE;
5792 spd.partial[i].offset = 0;
5793 spd.partial[i].private = (unsigned long)ref;
5794 spd.nr_pages++;
93cfb3c9 5795 *ppos += PAGE_SIZE;
93459c6c 5796
12883efb 5797 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
2cadf913
SR
5798 }
5799
cc60cdc9 5800 trace_access_unlock(iter->cpu_file);
2cadf913
SR
5801 spd.nr_pages = i;
5802
5803 /* did we read anything? */
5804 if (!spd.nr_pages) {
07906da7 5805 if (ret)
d716ff71
SRRH
5806 return ret;
5807
5808 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
5809 return -EAGAIN;
07906da7 5810
e30f53aa 5811 ret = wait_on_pipe(iter, true);
8b8b3683 5812 if (ret)
d716ff71 5813 return ret;
e30f53aa 5814
cc60cdc9 5815 goto again;
2cadf913
SR
5816 }
5817
5818 ret = splice_to_pipe(pipe, &spd);
047fe360 5819 splice_shrink_spd(&spd);
6de58e62 5820
2cadf913
SR
5821 return ret;
5822}
5823
5824static const struct file_operations tracing_buffers_fops = {
5825 .open = tracing_buffers_open,
5826 .read = tracing_buffers_read,
cc60cdc9 5827 .poll = tracing_buffers_poll,
2cadf913
SR
5828 .release = tracing_buffers_release,
5829 .splice_read = tracing_buffers_splice_read,
5830 .llseek = no_llseek,
5831};
5832
c8d77183
SR
5833static ssize_t
5834tracing_stats_read(struct file *filp, char __user *ubuf,
5835 size_t count, loff_t *ppos)
5836{
4d3435b8
ON
5837 struct inode *inode = file_inode(filp);
5838 struct trace_array *tr = inode->i_private;
12883efb 5839 struct trace_buffer *trace_buf = &tr->trace_buffer;
4d3435b8 5840 int cpu = tracing_get_cpu(inode);
c8d77183
SR
5841 struct trace_seq *s;
5842 unsigned long cnt;
c64e148a
VN
5843 unsigned long long t;
5844 unsigned long usec_rem;
c8d77183 5845
e4f2d10f 5846 s = kmalloc(sizeof(*s), GFP_KERNEL);
c8d77183 5847 if (!s)
a646365c 5848 return -ENOMEM;
c8d77183
SR
5849
5850 trace_seq_init(s);
5851
12883efb 5852 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
c8d77183
SR
5853 trace_seq_printf(s, "entries: %ld\n", cnt);
5854
12883efb 5855 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
c8d77183
SR
5856 trace_seq_printf(s, "overrun: %ld\n", cnt);
5857
12883efb 5858 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
c8d77183
SR
5859 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
5860
12883efb 5861 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
c64e148a
VN
5862 trace_seq_printf(s, "bytes: %ld\n", cnt);
5863
58e8eedf 5864 if (trace_clocks[tr->clock_id].in_ns) {
11043d8b 5865 /* local or global for trace_clock */
12883efb 5866 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
11043d8b
YY
5867 usec_rem = do_div(t, USEC_PER_SEC);
5868 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
5869 t, usec_rem);
5870
12883efb 5871 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
11043d8b
YY
5872 usec_rem = do_div(t, USEC_PER_SEC);
5873 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
5874 } else {
5875 /* counter or tsc mode for trace_clock */
5876 trace_seq_printf(s, "oldest event ts: %llu\n",
12883efb 5877 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
c64e148a 5878
11043d8b 5879 trace_seq_printf(s, "now ts: %llu\n",
12883efb 5880 ring_buffer_time_stamp(trace_buf->buffer, cpu));
11043d8b 5881 }
c64e148a 5882
12883efb 5883 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
884bfe89
SP
5884 trace_seq_printf(s, "dropped events: %ld\n", cnt);
5885
12883efb 5886 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
ad964704
SRRH
5887 trace_seq_printf(s, "read events: %ld\n", cnt);
5888
5ac48378
SRRH
5889 count = simple_read_from_buffer(ubuf, count, ppos,
5890 s->buffer, trace_seq_used(s));
c8d77183
SR
5891
5892 kfree(s);
5893
5894 return count;
5895}
5896
5897static const struct file_operations tracing_stats_fops = {
4d3435b8 5898 .open = tracing_open_generic_tr,
c8d77183 5899 .read = tracing_stats_read,
b444786f 5900 .llseek = generic_file_llseek,
4d3435b8 5901 .release = tracing_release_generic_tr,
c8d77183
SR
5902};
5903
bc0c38d1
SR
5904#ifdef CONFIG_DYNAMIC_FTRACE
5905
b807c3d0
SR
5906int __weak ftrace_arch_read_dyn_info(char *buf, int size)
5907{
5908 return 0;
5909}
5910
bc0c38d1 5911static ssize_t
b807c3d0 5912tracing_read_dyn_info(struct file *filp, char __user *ubuf,
bc0c38d1
SR
5913 size_t cnt, loff_t *ppos)
5914{
a26a2a27
SR
5915 static char ftrace_dyn_info_buffer[1024];
5916 static DEFINE_MUTEX(dyn_info_mutex);
bc0c38d1 5917 unsigned long *p = filp->private_data;
b807c3d0 5918 char *buf = ftrace_dyn_info_buffer;
a26a2a27 5919 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
bc0c38d1
SR
5920 int r;
5921
b807c3d0
SR
5922 mutex_lock(&dyn_info_mutex);
5923 r = sprintf(buf, "%ld ", *p);
4bf39a94 5924
a26a2a27 5925 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
b807c3d0
SR
5926 buf[r++] = '\n';
5927
5928 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5929
5930 mutex_unlock(&dyn_info_mutex);
5931
5932 return r;
bc0c38d1
SR
5933}
5934
5e2336a0 5935static const struct file_operations tracing_dyn_info_fops = {
4bf39a94 5936 .open = tracing_open_generic,
b807c3d0 5937 .read = tracing_read_dyn_info,
b444786f 5938 .llseek = generic_file_llseek,
bc0c38d1 5939};
77fd5c15 5940#endif /* CONFIG_DYNAMIC_FTRACE */
bc0c38d1 5941
77fd5c15
SRRH
5942#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
5943static void
5944ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5945{
5946 tracing_snapshot();
5947}
bc0c38d1 5948
77fd5c15
SRRH
5949static void
5950ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
bc0c38d1 5951{
77fd5c15
SRRH
5952 unsigned long *count = (long *)data;
5953
5954 if (!*count)
5955 return;
bc0c38d1 5956
77fd5c15
SRRH
5957 if (*count != -1)
5958 (*count)--;
5959
5960 tracing_snapshot();
5961}
5962
5963static int
5964ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
5965 struct ftrace_probe_ops *ops, void *data)
5966{
5967 long count = (long)data;
5968
5969 seq_printf(m, "%ps:", (void *)ip);
5970
fa6f0cc7 5971 seq_puts(m, "snapshot");
77fd5c15
SRRH
5972
5973 if (count == -1)
fa6f0cc7 5974 seq_puts(m, ":unlimited\n");
77fd5c15
SRRH
5975 else
5976 seq_printf(m, ":count=%ld\n", count);
5977
5978 return 0;
5979}
5980
5981static struct ftrace_probe_ops snapshot_probe_ops = {
5982 .func = ftrace_snapshot,
5983 .print = ftrace_snapshot_print,
5984};
5985
5986static struct ftrace_probe_ops snapshot_count_probe_ops = {
5987 .func = ftrace_count_snapshot,
5988 .print = ftrace_snapshot_print,
5989};
5990
5991static int
5992ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
5993 char *glob, char *cmd, char *param, int enable)
5994{
5995 struct ftrace_probe_ops *ops;
5996 void *count = (void *)-1;
5997 char *number;
5998 int ret;
5999
6000 /* hash funcs only work with set_ftrace_filter */
6001 if (!enable)
6002 return -EINVAL;
6003
6004 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
6005
6006 if (glob[0] == '!') {
6007 unregister_ftrace_function_probe_func(glob+1, ops);
6008 return 0;
6009 }
6010
6011 if (!param)
6012 goto out_reg;
6013
6014 number = strsep(&param, ":");
6015
6016 if (!strlen(number))
6017 goto out_reg;
6018
6019 /*
6020 * We use the callback data field (which is a pointer)
6021 * as our counter.
6022 */
6023 ret = kstrtoul(number, 0, (unsigned long *)&count);
6024 if (ret)
6025 return ret;
6026
6027 out_reg:
6028 ret = register_ftrace_function_probe(glob, ops, count);
6029
6030 if (ret >= 0)
6031 alloc_snapshot(&global_trace);
6032
6033 return ret < 0 ? ret : 0;
6034}
6035
6036static struct ftrace_func_command ftrace_snapshot_cmd = {
6037 .name = "snapshot",
6038 .func = ftrace_trace_snapshot_callback,
6039};
6040
38de93ab 6041static __init int register_snapshot_cmd(void)
77fd5c15
SRRH
6042{
6043 return register_ftrace_command(&ftrace_snapshot_cmd);
6044}
6045#else
38de93ab 6046static inline __init int register_snapshot_cmd(void) { return 0; }
77fd5c15 6047#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
bc0c38d1 6048
7eeafbca 6049static struct dentry *tracing_get_dentry(struct trace_array *tr)
bc0c38d1 6050{
8434dc93
SRRH
6051 if (WARN_ON(!tr->dir))
6052 return ERR_PTR(-ENODEV);
6053
6054 /* Top directory uses NULL as the parent */
6055 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
6056 return NULL;
6057
6058 /* All sub buffers have a descriptor */
2b6080f2 6059 return tr->dir;
bc0c38d1
SR
6060}
6061
2b6080f2 6062static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
b04cc6b1 6063{
b04cc6b1
FW
6064 struct dentry *d_tracer;
6065
2b6080f2
SR
6066 if (tr->percpu_dir)
6067 return tr->percpu_dir;
b04cc6b1 6068
7eeafbca 6069 d_tracer = tracing_get_dentry(tr);
14a5ae40 6070 if (IS_ERR(d_tracer))
b04cc6b1
FW
6071 return NULL;
6072
8434dc93 6073 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
b04cc6b1 6074
2b6080f2 6075 WARN_ONCE(!tr->percpu_dir,
8434dc93 6076 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
b04cc6b1 6077
2b6080f2 6078 return tr->percpu_dir;
b04cc6b1
FW
6079}
6080
649e9c70
ON
6081static struct dentry *
6082trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
6083 void *data, long cpu, const struct file_operations *fops)
6084{
6085 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
6086
6087 if (ret) /* See tracing_get_cpu() */
7682c918 6088 d_inode(ret)->i_cdev = (void *)(cpu + 1);
649e9c70
ON
6089 return ret;
6090}
6091
2b6080f2 6092static void
8434dc93 6093tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
b04cc6b1 6094{
2b6080f2 6095 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
5452af66 6096 struct dentry *d_cpu;
dd49a38c 6097 char cpu_dir[30]; /* 30 characters should be more than enough */
b04cc6b1 6098
0a3d7ce7
NK
6099 if (!d_percpu)
6100 return;
6101
dd49a38c 6102 snprintf(cpu_dir, 30, "cpu%ld", cpu);
8434dc93 6103 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
8656e7a2 6104 if (!d_cpu) {
8434dc93 6105 pr_warning("Could not create tracefs '%s' entry\n", cpu_dir);
8656e7a2
FW
6106 return;
6107 }
b04cc6b1 6108
8656e7a2 6109 /* per cpu trace_pipe */
649e9c70 6110 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
15544209 6111 tr, cpu, &tracing_pipe_fops);
b04cc6b1
FW
6112
6113 /* per cpu trace */
649e9c70 6114 trace_create_cpu_file("trace", 0644, d_cpu,
6484c71c 6115 tr, cpu, &tracing_fops);
7f96f93f 6116
649e9c70 6117 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
46ef2be0 6118 tr, cpu, &tracing_buffers_fops);
7f96f93f 6119
649e9c70 6120 trace_create_cpu_file("stats", 0444, d_cpu,
4d3435b8 6121 tr, cpu, &tracing_stats_fops);
438ced17 6122
649e9c70 6123 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
0bc392ee 6124 tr, cpu, &tracing_entries_fops);
f1affcaa
SRRH
6125
6126#ifdef CONFIG_TRACER_SNAPSHOT
649e9c70 6127 trace_create_cpu_file("snapshot", 0644, d_cpu,
6484c71c 6128 tr, cpu, &snapshot_fops);
6de58e62 6129
649e9c70 6130 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
46ef2be0 6131 tr, cpu, &snapshot_raw_fops);
f1affcaa 6132#endif
b04cc6b1
FW
6133}
6134
60a11774
SR
6135#ifdef CONFIG_FTRACE_SELFTEST
6136/* Let selftest have access to static functions in this file */
6137#include "trace_selftest.c"
6138#endif
6139
577b785f
SR
6140static ssize_t
6141trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
6142 loff_t *ppos)
6143{
6144 struct trace_option_dentry *topt = filp->private_data;
6145 char *buf;
6146
6147 if (topt->flags->val & topt->opt->bit)
6148 buf = "1\n";
6149 else
6150 buf = "0\n";
6151
6152 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6153}
6154
6155static ssize_t
6156trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
6157 loff_t *ppos)
6158{
6159 struct trace_option_dentry *topt = filp->private_data;
6160 unsigned long val;
577b785f
SR
6161 int ret;
6162
22fe9b54
PH
6163 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6164 if (ret)
577b785f
SR
6165 return ret;
6166
8d18eaaf
LZ
6167 if (val != 0 && val != 1)
6168 return -EINVAL;
577b785f 6169
8d18eaaf 6170 if (!!(topt->flags->val & topt->opt->bit) != val) {
577b785f 6171 mutex_lock(&trace_types_lock);
8c1a49ae 6172 ret = __set_tracer_option(topt->tr, topt->flags,
c757bea9 6173 topt->opt, !val);
577b785f
SR
6174 mutex_unlock(&trace_types_lock);
6175 if (ret)
6176 return ret;
577b785f
SR
6177 }
6178
6179 *ppos += cnt;
6180
6181 return cnt;
6182}
6183
6184
6185static const struct file_operations trace_options_fops = {
6186 .open = tracing_open_generic,
6187 .read = trace_options_read,
6188 .write = trace_options_write,
b444786f 6189 .llseek = generic_file_llseek,
577b785f
SR
6190};
6191
9a38a885
SRRH
6192/*
6193 * In order to pass in both the trace_array descriptor as well as the index
6194 * to the flag that the trace option file represents, the trace_array
6195 * has a character array of trace_flags_index[], which holds the index
6196 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
6197 * The address of this character array is passed to the flag option file
6198 * read/write callbacks.
6199 *
6200 * In order to extract both the index and the trace_array descriptor,
6201 * get_tr_index() uses the following algorithm.
6202 *
6203 * idx = *ptr;
6204 *
6205 * As the pointer itself contains the address of the index (remember
6206 * index[1] == 1).
6207 *
6208 * Then to get the trace_array descriptor, by subtracting that index
6209 * from the ptr, we get to the start of the index itself.
6210 *
6211 * ptr - idx == &index[0]
6212 *
6213 * Then a simple container_of() from that pointer gets us to the
6214 * trace_array descriptor.
6215 */
6216static void get_tr_index(void *data, struct trace_array **ptr,
6217 unsigned int *pindex)
6218{
6219 *pindex = *(unsigned char *)data;
6220
6221 *ptr = container_of(data - *pindex, struct trace_array,
6222 trace_flags_index);
6223}
6224
a8259075
SR
6225static ssize_t
6226trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
6227 loff_t *ppos)
6228{
9a38a885
SRRH
6229 void *tr_index = filp->private_data;
6230 struct trace_array *tr;
6231 unsigned int index;
a8259075
SR
6232 char *buf;
6233
9a38a885
SRRH
6234 get_tr_index(tr_index, &tr, &index);
6235
6236 if (tr->trace_flags & (1 << index))
a8259075
SR
6237 buf = "1\n";
6238 else
6239 buf = "0\n";
6240
6241 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6242}
6243
6244static ssize_t
6245trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
6246 loff_t *ppos)
6247{
9a38a885
SRRH
6248 void *tr_index = filp->private_data;
6249 struct trace_array *tr;
6250 unsigned int index;
a8259075
SR
6251 unsigned long val;
6252 int ret;
6253
9a38a885
SRRH
6254 get_tr_index(tr_index, &tr, &index);
6255
22fe9b54
PH
6256 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6257 if (ret)
a8259075
SR
6258 return ret;
6259
f2d84b65 6260 if (val != 0 && val != 1)
a8259075 6261 return -EINVAL;
69d34da2
SRRH
6262
6263 mutex_lock(&trace_types_lock);
2b6080f2 6264 ret = set_tracer_flag(tr, 1 << index, val);
69d34da2 6265 mutex_unlock(&trace_types_lock);
a8259075 6266
613f04a0
SRRH
6267 if (ret < 0)
6268 return ret;
6269
a8259075
SR
6270 *ppos += cnt;
6271
6272 return cnt;
6273}
6274
a8259075
SR
6275static const struct file_operations trace_options_core_fops = {
6276 .open = tracing_open_generic,
6277 .read = trace_options_core_read,
6278 .write = trace_options_core_write,
b444786f 6279 .llseek = generic_file_llseek,
a8259075
SR
6280};
6281
5452af66 6282struct dentry *trace_create_file(const char *name,
f4ae40a6 6283 umode_t mode,
5452af66
FW
6284 struct dentry *parent,
6285 void *data,
6286 const struct file_operations *fops)
6287{
6288 struct dentry *ret;
6289
8434dc93 6290 ret = tracefs_create_file(name, mode, parent, data, fops);
5452af66 6291 if (!ret)
8434dc93 6292 pr_warning("Could not create tracefs '%s' entry\n", name);
5452af66
FW
6293
6294 return ret;
6295}
6296
6297
2b6080f2 6298static struct dentry *trace_options_init_dentry(struct trace_array *tr)
a8259075
SR
6299{
6300 struct dentry *d_tracer;
a8259075 6301
2b6080f2
SR
6302 if (tr->options)
6303 return tr->options;
a8259075 6304
7eeafbca 6305 d_tracer = tracing_get_dentry(tr);
14a5ae40 6306 if (IS_ERR(d_tracer))
a8259075
SR
6307 return NULL;
6308
8434dc93 6309 tr->options = tracefs_create_dir("options", d_tracer);
2b6080f2 6310 if (!tr->options) {
8434dc93 6311 pr_warning("Could not create tracefs directory 'options'\n");
a8259075
SR
6312 return NULL;
6313 }
6314
2b6080f2 6315 return tr->options;
a8259075
SR
6316}
6317
577b785f 6318static void
2b6080f2
SR
6319create_trace_option_file(struct trace_array *tr,
6320 struct trace_option_dentry *topt,
577b785f
SR
6321 struct tracer_flags *flags,
6322 struct tracer_opt *opt)
6323{
6324 struct dentry *t_options;
577b785f 6325
2b6080f2 6326 t_options = trace_options_init_dentry(tr);
577b785f
SR
6327 if (!t_options)
6328 return;
6329
6330 topt->flags = flags;
6331 topt->opt = opt;
2b6080f2 6332 topt->tr = tr;
577b785f 6333
5452af66 6334 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
577b785f
SR
6335 &trace_options_fops);
6336
577b785f
SR
6337}
6338
6339static struct trace_option_dentry *
2b6080f2 6340create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
577b785f
SR
6341{
6342 struct trace_option_dentry *topts;
6343 struct tracer_flags *flags;
6344 struct tracer_opt *opts;
6345 int cnt;
6346
6347 if (!tracer)
6348 return NULL;
6349
6350 flags = tracer->flags;
6351
6352 if (!flags || !flags->opts)
6353 return NULL;
6354
6355 opts = flags->opts;
6356
6357 for (cnt = 0; opts[cnt].name; cnt++)
6358 ;
6359
0cfe8245 6360 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
577b785f
SR
6361 if (!topts)
6362 return NULL;
6363
41d9c0be 6364 for (cnt = 0; opts[cnt].name; cnt++) {
2b6080f2 6365 create_trace_option_file(tr, &topts[cnt], flags,
577b785f 6366 &opts[cnt]);
41d9c0be
SRRH
6367 WARN_ONCE(topts[cnt].entry == NULL,
6368 "Failed to create trace option: %s",
6369 opts[cnt].name);
6370 }
577b785f
SR
6371
6372 return topts;
6373}
6374
a8259075 6375static struct dentry *
2b6080f2
SR
6376create_trace_option_core_file(struct trace_array *tr,
6377 const char *option, long index)
a8259075
SR
6378{
6379 struct dentry *t_options;
a8259075 6380
2b6080f2 6381 t_options = trace_options_init_dentry(tr);
a8259075
SR
6382 if (!t_options)
6383 return NULL;
6384
9a38a885
SRRH
6385 return trace_create_file(option, 0644, t_options,
6386 (void *)&tr->trace_flags_index[index],
6387 &trace_options_core_fops);
a8259075
SR
6388}
6389
2b6080f2 6390static __init void create_trace_options_dir(struct trace_array *tr)
a8259075
SR
6391{
6392 struct dentry *t_options;
a8259075
SR
6393 int i;
6394
2b6080f2 6395 t_options = trace_options_init_dentry(tr);
a8259075
SR
6396 if (!t_options)
6397 return;
6398
5452af66 6399 for (i = 0; trace_options[i]; i++)
2b6080f2 6400 create_trace_option_core_file(tr, trace_options[i], i);
a8259075
SR
6401}
6402
499e5470
SR
6403static ssize_t
6404rb_simple_read(struct file *filp, char __user *ubuf,
6405 size_t cnt, loff_t *ppos)
6406{
348f0fc2 6407 struct trace_array *tr = filp->private_data;
499e5470
SR
6408 char buf[64];
6409 int r;
6410
10246fa3 6411 r = tracer_tracing_is_on(tr);
499e5470
SR
6412 r = sprintf(buf, "%d\n", r);
6413
6414 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6415}
6416
6417static ssize_t
6418rb_simple_write(struct file *filp, const char __user *ubuf,
6419 size_t cnt, loff_t *ppos)
6420{
348f0fc2 6421 struct trace_array *tr = filp->private_data;
12883efb 6422 struct ring_buffer *buffer = tr->trace_buffer.buffer;
499e5470
SR
6423 unsigned long val;
6424 int ret;
6425
6426 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6427 if (ret)
6428 return ret;
6429
6430 if (buffer) {
2df8f8a6
SR
6431 mutex_lock(&trace_types_lock);
6432 if (val) {
10246fa3 6433 tracer_tracing_on(tr);
2b6080f2
SR
6434 if (tr->current_trace->start)
6435 tr->current_trace->start(tr);
2df8f8a6 6436 } else {
10246fa3 6437 tracer_tracing_off(tr);
2b6080f2
SR
6438 if (tr->current_trace->stop)
6439 tr->current_trace->stop(tr);
2df8f8a6
SR
6440 }
6441 mutex_unlock(&trace_types_lock);
499e5470
SR
6442 }
6443
6444 (*ppos)++;
6445
6446 return cnt;
6447}
6448
6449static const struct file_operations rb_simple_fops = {
7b85af63 6450 .open = tracing_open_generic_tr,
499e5470
SR
6451 .read = rb_simple_read,
6452 .write = rb_simple_write,
7b85af63 6453 .release = tracing_release_generic_tr,
499e5470
SR
6454 .llseek = default_llseek,
6455};
6456
277ba044
SR
6457struct dentry *trace_instance_dir;
6458
6459static void
8434dc93 6460init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
277ba044 6461
55034cd6
SRRH
6462static int
6463allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
277ba044
SR
6464{
6465 enum ring_buffer_flags rb_flags;
737223fb 6466
983f938a 6467 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
737223fb 6468
dced341b
SRRH
6469 buf->tr = tr;
6470
55034cd6
SRRH
6471 buf->buffer = ring_buffer_alloc(size, rb_flags);
6472 if (!buf->buffer)
6473 return -ENOMEM;
737223fb 6474
55034cd6
SRRH
6475 buf->data = alloc_percpu(struct trace_array_cpu);
6476 if (!buf->data) {
6477 ring_buffer_free(buf->buffer);
6478 return -ENOMEM;
6479 }
737223fb 6480
737223fb
SRRH
6481 /* Allocate the first page for all buffers */
6482 set_buffer_entries(&tr->trace_buffer,
6483 ring_buffer_size(tr->trace_buffer.buffer, 0));
6484
55034cd6
SRRH
6485 return 0;
6486}
737223fb 6487
55034cd6
SRRH
6488static int allocate_trace_buffers(struct trace_array *tr, int size)
6489{
6490 int ret;
737223fb 6491
55034cd6
SRRH
6492 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
6493 if (ret)
6494 return ret;
737223fb 6495
55034cd6
SRRH
6496#ifdef CONFIG_TRACER_MAX_TRACE
6497 ret = allocate_trace_buffer(tr, &tr->max_buffer,
6498 allocate_snapshot ? size : 1);
6499 if (WARN_ON(ret)) {
737223fb 6500 ring_buffer_free(tr->trace_buffer.buffer);
55034cd6
SRRH
6501 free_percpu(tr->trace_buffer.data);
6502 return -ENOMEM;
6503 }
6504 tr->allocated_snapshot = allocate_snapshot;
737223fb 6505
55034cd6
SRRH
6506 /*
6507 * Only the top level trace array gets its snapshot allocated
6508 * from the kernel command line.
6509 */
6510 allocate_snapshot = false;
737223fb 6511#endif
55034cd6 6512 return 0;
737223fb
SRRH
6513}
6514
f0b70cc4
SRRH
6515static void free_trace_buffer(struct trace_buffer *buf)
6516{
6517 if (buf->buffer) {
6518 ring_buffer_free(buf->buffer);
6519 buf->buffer = NULL;
6520 free_percpu(buf->data);
6521 buf->data = NULL;
6522 }
6523}
6524
23aaa3c1
SRRH
6525static void free_trace_buffers(struct trace_array *tr)
6526{
6527 if (!tr)
6528 return;
6529
f0b70cc4 6530 free_trace_buffer(&tr->trace_buffer);
23aaa3c1
SRRH
6531
6532#ifdef CONFIG_TRACER_MAX_TRACE
f0b70cc4 6533 free_trace_buffer(&tr->max_buffer);
23aaa3c1
SRRH
6534#endif
6535}
6536
9a38a885
SRRH
6537static void init_trace_flags_index(struct trace_array *tr)
6538{
6539 int i;
6540
6541 /* Used by the trace options files */
6542 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
6543 tr->trace_flags_index[i] = i;
6544}
6545
eae47358 6546static int instance_mkdir(const char *name)
737223fb 6547{
277ba044
SR
6548 struct trace_array *tr;
6549 int ret;
277ba044
SR
6550
6551 mutex_lock(&trace_types_lock);
6552
6553 ret = -EEXIST;
6554 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6555 if (tr->name && strcmp(tr->name, name) == 0)
6556 goto out_unlock;
6557 }
6558
6559 ret = -ENOMEM;
6560 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
6561 if (!tr)
6562 goto out_unlock;
6563
6564 tr->name = kstrdup(name, GFP_KERNEL);
6565 if (!tr->name)
6566 goto out_free_tr;
6567
ccfe9e42
AL
6568 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
6569 goto out_free_tr;
6570
983f938a
SRRH
6571 tr->trace_flags = global_trace.trace_flags;
6572
ccfe9e42
AL
6573 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
6574
277ba044
SR
6575 raw_spin_lock_init(&tr->start_lock);
6576
0b9b12c1
SRRH
6577 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6578
277ba044
SR
6579 tr->current_trace = &nop_trace;
6580
6581 INIT_LIST_HEAD(&tr->systems);
6582 INIT_LIST_HEAD(&tr->events);
6583
737223fb 6584 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
277ba044
SR
6585 goto out_free_tr;
6586
8434dc93 6587 tr->dir = tracefs_create_dir(name, trace_instance_dir);
277ba044
SR
6588 if (!tr->dir)
6589 goto out_free_tr;
6590
6591 ret = event_trace_add_tracer(tr->dir, tr);
609e85a7 6592 if (ret) {
8434dc93 6593 tracefs_remove_recursive(tr->dir);
277ba044 6594 goto out_free_tr;
609e85a7 6595 }
277ba044 6596
8434dc93 6597 init_tracer_tracefs(tr, tr->dir);
9a38a885 6598 init_trace_flags_index(tr);
277ba044
SR
6599
6600 list_add(&tr->list, &ftrace_trace_arrays);
6601
6602 mutex_unlock(&trace_types_lock);
6603
6604 return 0;
6605
6606 out_free_tr:
23aaa3c1 6607 free_trace_buffers(tr);
ccfe9e42 6608 free_cpumask_var(tr->tracing_cpumask);
277ba044
SR
6609 kfree(tr->name);
6610 kfree(tr);
6611
6612 out_unlock:
6613 mutex_unlock(&trace_types_lock);
6614
6615 return ret;
6616
6617}
6618
eae47358 6619static int instance_rmdir(const char *name)
0c8916c3
SR
6620{
6621 struct trace_array *tr;
6622 int found = 0;
6623 int ret;
6624
6625 mutex_lock(&trace_types_lock);
6626
6627 ret = -ENODEV;
6628 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6629 if (tr->name && strcmp(tr->name, name) == 0) {
6630 found = 1;
6631 break;
6632 }
6633 }
6634 if (!found)
6635 goto out_unlock;
6636
a695cb58 6637 ret = -EBUSY;
cf6ab6d9 6638 if (tr->ref || (tr->current_trace && tr->current_trace->ref))
a695cb58
SRRH
6639 goto out_unlock;
6640
0c8916c3
SR
6641 list_del(&tr->list);
6642
6b450d25 6643 tracing_set_nop(tr);
0c8916c3 6644 event_trace_del_tracer(tr);
591dffda 6645 ftrace_destroy_function_files(tr);
0c8916c3 6646 debugfs_remove_recursive(tr->dir);
a9fcaaac 6647 free_trace_buffers(tr);
0c8916c3
SR
6648
6649 kfree(tr->name);
6650 kfree(tr);
6651
6652 ret = 0;
6653
6654 out_unlock:
6655 mutex_unlock(&trace_types_lock);
6656
6657 return ret;
6658}
6659
277ba044
SR
6660static __init void create_trace_instances(struct dentry *d_tracer)
6661{
eae47358
SRRH
6662 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
6663 instance_mkdir,
6664 instance_rmdir);
277ba044
SR
6665 if (WARN_ON(!trace_instance_dir))
6666 return;
277ba044
SR
6667}
6668
2b6080f2 6669static void
8434dc93 6670init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
2b6080f2 6671{
121aaee7 6672 int cpu;
2b6080f2 6673
607e2ea1
SRRH
6674 trace_create_file("available_tracers", 0444, d_tracer,
6675 tr, &show_traces_fops);
6676
6677 trace_create_file("current_tracer", 0644, d_tracer,
6678 tr, &set_tracer_fops);
6679
ccfe9e42
AL
6680 trace_create_file("tracing_cpumask", 0644, d_tracer,
6681 tr, &tracing_cpumask_fops);
6682
2b6080f2
SR
6683 trace_create_file("trace_options", 0644, d_tracer,
6684 tr, &tracing_iter_fops);
6685
6686 trace_create_file("trace", 0644, d_tracer,
6484c71c 6687 tr, &tracing_fops);
2b6080f2
SR
6688
6689 trace_create_file("trace_pipe", 0444, d_tracer,
15544209 6690 tr, &tracing_pipe_fops);
2b6080f2
SR
6691
6692 trace_create_file("buffer_size_kb", 0644, d_tracer,
0bc392ee 6693 tr, &tracing_entries_fops);
2b6080f2
SR
6694
6695 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
6696 tr, &tracing_total_entries_fops);
6697
238ae93d 6698 trace_create_file("free_buffer", 0200, d_tracer,
2b6080f2
SR
6699 tr, &tracing_free_buffer_fops);
6700
6701 trace_create_file("trace_marker", 0220, d_tracer,
6702 tr, &tracing_mark_fops);
6703
6704 trace_create_file("trace_clock", 0644, d_tracer, tr,
6705 &trace_clock_fops);
6706
6707 trace_create_file("tracing_on", 0644, d_tracer,
6484c71c 6708 tr, &rb_simple_fops);
ce9bae55 6709
6d9b3fa5
SRRH
6710#ifdef CONFIG_TRACER_MAX_TRACE
6711 trace_create_file("tracing_max_latency", 0644, d_tracer,
6712 &tr->max_latency, &tracing_max_lat_fops);
6713#endif
6714
591dffda
SRRH
6715 if (ftrace_create_function_files(tr, d_tracer))
6716 WARN(1, "Could not allocate function filter files");
6717
ce9bae55
SRRH
6718#ifdef CONFIG_TRACER_SNAPSHOT
6719 trace_create_file("snapshot", 0644, d_tracer,
6484c71c 6720 tr, &snapshot_fops);
ce9bae55 6721#endif
121aaee7
SRRH
6722
6723 for_each_tracing_cpu(cpu)
8434dc93 6724 tracing_init_tracefs_percpu(tr, cpu);
121aaee7 6725
2b6080f2
SR
6726}
6727
f76180bc
SRRH
6728static struct vfsmount *trace_automount(void *ingore)
6729{
6730 struct vfsmount *mnt;
6731 struct file_system_type *type;
6732
6733 /*
6734 * To maintain backward compatibility for tools that mount
6735 * debugfs to get to the tracing facility, tracefs is automatically
6736 * mounted to the debugfs/tracing directory.
6737 */
6738 type = get_fs_type("tracefs");
6739 if (!type)
6740 return NULL;
6741 mnt = vfs_kern_mount(type, 0, "tracefs", NULL);
6742 put_filesystem(type);
6743 if (IS_ERR(mnt))
6744 return NULL;
6745 mntget(mnt);
6746
6747 return mnt;
6748}
6749
7eeafbca
SRRH
6750/**
6751 * tracing_init_dentry - initialize top level trace array
6752 *
6753 * This is called when creating files or directories in the tracing
6754 * directory. It is called via fs_initcall() by any of the boot up code
6755 * and expects to return the dentry of the top level tracing directory.
6756 */
6757struct dentry *tracing_init_dentry(void)
6758{
6759 struct trace_array *tr = &global_trace;
6760
f76180bc 6761 /* The top level trace array uses NULL as parent */
7eeafbca 6762 if (tr->dir)
f76180bc 6763 return NULL;
7eeafbca
SRRH
6764
6765 if (WARN_ON(!debugfs_initialized()))
6766 return ERR_PTR(-ENODEV);
6767
f76180bc
SRRH
6768 /*
6769 * As there may still be users that expect the tracing
6770 * files to exist in debugfs/tracing, we must automount
6771 * the tracefs file system there, so older tools still
6772 * work with the newer kerenl.
6773 */
6774 tr->dir = debugfs_create_automount("tracing", NULL,
6775 trace_automount, NULL);
7eeafbca
SRRH
6776 if (!tr->dir) {
6777 pr_warn_once("Could not create debugfs directory 'tracing'\n");
6778 return ERR_PTR(-ENOMEM);
6779 }
6780
8434dc93 6781 return NULL;
7eeafbca
SRRH
6782}
6783
0c564a53
SRRH
6784extern struct trace_enum_map *__start_ftrace_enum_maps[];
6785extern struct trace_enum_map *__stop_ftrace_enum_maps[];
6786
6787static void __init trace_enum_init(void)
6788{
3673b8e4
SRRH
6789 int len;
6790
6791 len = __stop_ftrace_enum_maps - __start_ftrace_enum_maps;
9828413d 6792 trace_insert_enum_map(NULL, __start_ftrace_enum_maps, len);
3673b8e4
SRRH
6793}
6794
6795#ifdef CONFIG_MODULES
6796static void trace_module_add_enums(struct module *mod)
6797{
6798 if (!mod->num_trace_enums)
6799 return;
6800
6801 /*
6802 * Modules with bad taint do not have events created, do
6803 * not bother with enums either.
6804 */
6805 if (trace_module_has_bad_taint(mod))
6806 return;
6807
9828413d 6808 trace_insert_enum_map(mod, mod->trace_enums, mod->num_trace_enums);
3673b8e4
SRRH
6809}
6810
9828413d
SRRH
6811#ifdef CONFIG_TRACE_ENUM_MAP_FILE
6812static void trace_module_remove_enums(struct module *mod)
6813{
6814 union trace_enum_map_item *map;
6815 union trace_enum_map_item **last = &trace_enum_maps;
6816
6817 if (!mod->num_trace_enums)
6818 return;
6819
6820 mutex_lock(&trace_enum_mutex);
6821
6822 map = trace_enum_maps;
6823
6824 while (map) {
6825 if (map->head.mod == mod)
6826 break;
6827 map = trace_enum_jmp_to_tail(map);
6828 last = &map->tail.next;
6829 map = map->tail.next;
6830 }
6831 if (!map)
6832 goto out;
6833
6834 *last = trace_enum_jmp_to_tail(map)->tail.next;
6835 kfree(map);
6836 out:
6837 mutex_unlock(&trace_enum_mutex);
6838}
6839#else
6840static inline void trace_module_remove_enums(struct module *mod) { }
6841#endif /* CONFIG_TRACE_ENUM_MAP_FILE */
6842
3673b8e4
SRRH
6843static int trace_module_notify(struct notifier_block *self,
6844 unsigned long val, void *data)
6845{
6846 struct module *mod = data;
6847
6848 switch (val) {
6849 case MODULE_STATE_COMING:
6850 trace_module_add_enums(mod);
6851 break;
9828413d
SRRH
6852 case MODULE_STATE_GOING:
6853 trace_module_remove_enums(mod);
6854 break;
3673b8e4
SRRH
6855 }
6856
6857 return 0;
0c564a53
SRRH
6858}
6859
3673b8e4
SRRH
6860static struct notifier_block trace_module_nb = {
6861 .notifier_call = trace_module_notify,
6862 .priority = 0,
6863};
9828413d 6864#endif /* CONFIG_MODULES */
3673b8e4 6865
8434dc93 6866static __init int tracer_init_tracefs(void)
bc0c38d1
SR
6867{
6868 struct dentry *d_tracer;
41d9c0be 6869 struct tracer *t;
bc0c38d1 6870
7e53bd42
LJ
6871 trace_access_lock_init();
6872
bc0c38d1 6873 d_tracer = tracing_init_dentry();
14a5ae40 6874 if (IS_ERR(d_tracer))
ed6f1c99 6875 return 0;
bc0c38d1 6876
8434dc93 6877 init_tracer_tracefs(&global_trace, d_tracer);
bc0c38d1 6878
5452af66 6879 trace_create_file("tracing_thresh", 0644, d_tracer,
6508fa76 6880 &global_trace, &tracing_thresh_fops);
a8259075 6881
339ae5d3 6882 trace_create_file("README", 0444, d_tracer,
5452af66
FW
6883 NULL, &tracing_readme_fops);
6884
69abe6a5
AP
6885 trace_create_file("saved_cmdlines", 0444, d_tracer,
6886 NULL, &tracing_saved_cmdlines_fops);
5bf9a1ee 6887
939c7a4f
YY
6888 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
6889 NULL, &tracing_saved_cmdlines_size_fops);
6890
0c564a53
SRRH
6891 trace_enum_init();
6892
9828413d
SRRH
6893 trace_create_enum_file(d_tracer);
6894
3673b8e4
SRRH
6895#ifdef CONFIG_MODULES
6896 register_module_notifier(&trace_module_nb);
6897#endif
6898
bc0c38d1 6899#ifdef CONFIG_DYNAMIC_FTRACE
5452af66
FW
6900 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
6901 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
bc0c38d1 6902#endif
b04cc6b1 6903
277ba044 6904 create_trace_instances(d_tracer);
5452af66 6905
2b6080f2 6906 create_trace_options_dir(&global_trace);
b04cc6b1 6907
41d9c0be
SRRH
6908 mutex_lock(&trace_types_lock);
6909 for (t = trace_types; t; t = t->next)
6910 add_tracer_options(&global_trace, t);
6911 mutex_unlock(&trace_types_lock);
09d23a1d 6912
b5ad384e 6913 return 0;
bc0c38d1
SR
6914}
6915
3f5a54e3
SR
6916static int trace_panic_handler(struct notifier_block *this,
6917 unsigned long event, void *unused)
6918{
944ac425 6919 if (ftrace_dump_on_oops)
cecbca96 6920 ftrace_dump(ftrace_dump_on_oops);
3f5a54e3
SR
6921 return NOTIFY_OK;
6922}
6923
6924static struct notifier_block trace_panic_notifier = {
6925 .notifier_call = trace_panic_handler,
6926 .next = NULL,
6927 .priority = 150 /* priority: INT_MAX >= x >= 0 */
6928};
6929
6930static int trace_die_handler(struct notifier_block *self,
6931 unsigned long val,
6932 void *data)
6933{
6934 switch (val) {
6935 case DIE_OOPS:
944ac425 6936 if (ftrace_dump_on_oops)
cecbca96 6937 ftrace_dump(ftrace_dump_on_oops);
3f5a54e3
SR
6938 break;
6939 default:
6940 break;
6941 }
6942 return NOTIFY_OK;
6943}
6944
6945static struct notifier_block trace_die_notifier = {
6946 .notifier_call = trace_die_handler,
6947 .priority = 200
6948};
6949
6950/*
6951 * printk is set to max of 1024, we really don't need it that big.
6952 * Nothing should be printing 1000 characters anyway.
6953 */
6954#define TRACE_MAX_PRINT 1000
6955
6956/*
6957 * Define here KERN_TRACE so that we have one place to modify
6958 * it if we decide to change what log level the ftrace dump
6959 * should be at.
6960 */
428aee14 6961#define KERN_TRACE KERN_EMERG
3f5a54e3 6962
955b61e5 6963void
3f5a54e3
SR
6964trace_printk_seq(struct trace_seq *s)
6965{
6966 /* Probably should print a warning here. */
3a161d99
SRRH
6967 if (s->seq.len >= TRACE_MAX_PRINT)
6968 s->seq.len = TRACE_MAX_PRINT;
3f5a54e3 6969
820b75f6
SRRH
6970 /*
6971 * More paranoid code. Although the buffer size is set to
6972 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
6973 * an extra layer of protection.
6974 */
6975 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
6976 s->seq.len = s->seq.size - 1;
3f5a54e3
SR
6977
6978 /* should be zero ended, but we are paranoid. */
3a161d99 6979 s->buffer[s->seq.len] = 0;
3f5a54e3
SR
6980
6981 printk(KERN_TRACE "%s", s->buffer);
6982
f9520750 6983 trace_seq_init(s);
3f5a54e3
SR
6984}
6985
955b61e5
JW
6986void trace_init_global_iter(struct trace_iterator *iter)
6987{
6988 iter->tr = &global_trace;
2b6080f2 6989 iter->trace = iter->tr->current_trace;
ae3b5093 6990 iter->cpu_file = RING_BUFFER_ALL_CPUS;
12883efb 6991 iter->trace_buffer = &global_trace.trace_buffer;
b2f974d6
CS
6992
6993 if (iter->trace && iter->trace->open)
6994 iter->trace->open(iter);
6995
6996 /* Annotate start of buffers if we had overruns */
6997 if (ring_buffer_overruns(iter->trace_buffer->buffer))
6998 iter->iter_flags |= TRACE_FILE_ANNOTATE;
6999
7000 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
7001 if (trace_clocks[iter->tr->clock_id].in_ns)
7002 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
955b61e5
JW
7003}
7004
7fe70b57 7005void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
3f5a54e3 7006{
3f5a54e3
SR
7007 /* use static because iter can be a bit big for the stack */
7008 static struct trace_iterator iter;
7fe70b57 7009 static atomic_t dump_running;
983f938a 7010 struct trace_array *tr = &global_trace;
cf586b61 7011 unsigned int old_userobj;
d769041f
SR
7012 unsigned long flags;
7013 int cnt = 0, cpu;
3f5a54e3 7014
7fe70b57
SRRH
7015 /* Only allow one dump user at a time. */
7016 if (atomic_inc_return(&dump_running) != 1) {
7017 atomic_dec(&dump_running);
7018 return;
7019 }
3f5a54e3 7020
7fe70b57
SRRH
7021 /*
7022 * Always turn off tracing when we dump.
7023 * We don't need to show trace output of what happens
7024 * between multiple crashes.
7025 *
7026 * If the user does a sysrq-z, then they can re-enable
7027 * tracing with echo 1 > tracing_on.
7028 */
0ee6b6cf 7029 tracing_off();
cf586b61 7030
7fe70b57 7031 local_irq_save(flags);
3f5a54e3 7032
38dbe0b1 7033 /* Simulate the iterator */
955b61e5
JW
7034 trace_init_global_iter(&iter);
7035
d769041f 7036 for_each_tracing_cpu(cpu) {
5e2d5ef8 7037 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
d769041f
SR
7038 }
7039
983f938a 7040 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
cf586b61 7041
b54d3de9 7042 /* don't look at user memory in panic mode */
983f938a 7043 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
b54d3de9 7044
cecbca96
FW
7045 switch (oops_dump_mode) {
7046 case DUMP_ALL:
ae3b5093 7047 iter.cpu_file = RING_BUFFER_ALL_CPUS;
cecbca96
FW
7048 break;
7049 case DUMP_ORIG:
7050 iter.cpu_file = raw_smp_processor_id();
7051 break;
7052 case DUMP_NONE:
7053 goto out_enable;
7054 default:
7055 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
ae3b5093 7056 iter.cpu_file = RING_BUFFER_ALL_CPUS;
cecbca96
FW
7057 }
7058
7059 printk(KERN_TRACE "Dumping ftrace buffer:\n");
3f5a54e3 7060
7fe70b57
SRRH
7061 /* Did function tracer already get disabled? */
7062 if (ftrace_is_dead()) {
7063 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
7064 printk("# MAY BE MISSING FUNCTION EVENTS\n");
7065 }
7066
3f5a54e3
SR
7067 /*
7068 * We need to stop all tracing on all CPUS to read the
7069 * the next buffer. This is a bit expensive, but is
7070 * not done often. We fill all what we can read,
7071 * and then release the locks again.
7072 */
7073
3f5a54e3
SR
7074 while (!trace_empty(&iter)) {
7075
7076 if (!cnt)
7077 printk(KERN_TRACE "---------------------------------\n");
7078
7079 cnt++;
7080
7081 /* reset all but tr, trace, and overruns */
7082 memset(&iter.seq, 0,
7083 sizeof(struct trace_iterator) -
7084 offsetof(struct trace_iterator, seq));
7085 iter.iter_flags |= TRACE_FILE_LAT_FMT;
7086 iter.pos = -1;
7087
955b61e5 7088 if (trace_find_next_entry_inc(&iter) != NULL) {
74e7ff8c
LJ
7089 int ret;
7090
7091 ret = print_trace_line(&iter);
7092 if (ret != TRACE_TYPE_NO_CONSUME)
7093 trace_consume(&iter);
3f5a54e3 7094 }
b892e5c8 7095 touch_nmi_watchdog();
3f5a54e3
SR
7096
7097 trace_printk_seq(&iter.seq);
7098 }
7099
7100 if (!cnt)
7101 printk(KERN_TRACE " (ftrace buffer empty)\n");
7102 else
7103 printk(KERN_TRACE "---------------------------------\n");
7104
cecbca96 7105 out_enable:
983f938a 7106 tr->trace_flags |= old_userobj;
cf586b61 7107
7fe70b57
SRRH
7108 for_each_tracing_cpu(cpu) {
7109 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
cf586b61 7110 }
7fe70b57 7111 atomic_dec(&dump_running);
cd891ae0 7112 local_irq_restore(flags);
3f5a54e3 7113}
a8eecf22 7114EXPORT_SYMBOL_GPL(ftrace_dump);
cf586b61 7115
3928a8a2 7116__init static int tracer_alloc_buffers(void)
bc0c38d1 7117{
73c5162a 7118 int ring_buf_size;
9e01c1b7 7119 int ret = -ENOMEM;
4c11d7ae 7120
b5e87c05
SRRH
7121 /*
7122 * Make sure we don't accidently add more trace options
7123 * than we have bits for.
7124 */
9a38a885 7125 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
b5e87c05 7126
9e01c1b7
RR
7127 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
7128 goto out;
7129
ccfe9e42 7130 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
9e01c1b7 7131 goto out_free_buffer_mask;
4c11d7ae 7132
07d777fe
SR
7133 /* Only allocate trace_printk buffers if a trace_printk exists */
7134 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
81698831 7135 /* Must be called before global_trace.buffer is allocated */
07d777fe
SR
7136 trace_printk_init_buffers();
7137
73c5162a
SR
7138 /* To save memory, keep the ring buffer size to its minimum */
7139 if (ring_buffer_expanded)
7140 ring_buf_size = trace_buf_size;
7141 else
7142 ring_buf_size = 1;
7143
9e01c1b7 7144 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
ccfe9e42 7145 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
9e01c1b7 7146
2b6080f2
SR
7147 raw_spin_lock_init(&global_trace.start_lock);
7148
2c4a33ab
SRRH
7149 /* Used for event triggers */
7150 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
7151 if (!temp_buffer)
7152 goto out_free_cpumask;
7153
939c7a4f
YY
7154 if (trace_create_savedcmd() < 0)
7155 goto out_free_temp_buffer;
7156
9e01c1b7 7157 /* TODO: make the number of buffers hot pluggable with CPUS */
737223fb 7158 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
3928a8a2
SR
7159 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
7160 WARN_ON(1);
939c7a4f 7161 goto out_free_savedcmd;
4c11d7ae 7162 }
a7603ff4 7163
499e5470
SR
7164 if (global_trace.buffer_disabled)
7165 tracing_off();
4c11d7ae 7166
e1e232ca
SR
7167 if (trace_boot_clock) {
7168 ret = tracing_set_clock(&global_trace, trace_boot_clock);
7169 if (ret < 0)
7170 pr_warning("Trace clock %s not defined, going back to default\n",
7171 trace_boot_clock);
7172 }
7173
ca164318
SRRH
7174 /*
7175 * register_tracer() might reference current_trace, so it
7176 * needs to be set before we register anything. This is
7177 * just a bootstrap of current_trace anyway.
7178 */
2b6080f2
SR
7179 global_trace.current_trace = &nop_trace;
7180
0b9b12c1
SRRH
7181 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
7182
4104d326
SRRH
7183 ftrace_init_global_array_ops(&global_trace);
7184
9a38a885
SRRH
7185 init_trace_flags_index(&global_trace);
7186
ca164318
SRRH
7187 register_tracer(&nop_trace);
7188
60a11774
SR
7189 /* All seems OK, enable tracing */
7190 tracing_disabled = 0;
3928a8a2 7191
3f5a54e3
SR
7192 atomic_notifier_chain_register(&panic_notifier_list,
7193 &trace_panic_notifier);
7194
7195 register_die_notifier(&trace_die_notifier);
2fc1dfbe 7196
ae63b31e
SR
7197 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
7198
7199 INIT_LIST_HEAD(&global_trace.systems);
7200 INIT_LIST_HEAD(&global_trace.events);
7201 list_add(&global_trace.list, &ftrace_trace_arrays);
7202
7bcfaf54
SR
7203 while (trace_boot_options) {
7204 char *option;
7205
7206 option = strsep(&trace_boot_options, ",");
2b6080f2 7207 trace_set_options(&global_trace, option);
7bcfaf54
SR
7208 }
7209
77fd5c15
SRRH
7210 register_snapshot_cmd();
7211
2fc1dfbe 7212 return 0;
3f5a54e3 7213
939c7a4f
YY
7214out_free_savedcmd:
7215 free_saved_cmdlines_buffer(savedcmd);
2c4a33ab
SRRH
7216out_free_temp_buffer:
7217 ring_buffer_free(temp_buffer);
9e01c1b7 7218out_free_cpumask:
ccfe9e42 7219 free_cpumask_var(global_trace.tracing_cpumask);
9e01c1b7
RR
7220out_free_buffer_mask:
7221 free_cpumask_var(tracing_buffer_mask);
7222out:
7223 return ret;
bc0c38d1 7224}
b2821ae6 7225
5f893b26
SRRH
7226void __init trace_init(void)
7227{
0daa2302
SRRH
7228 if (tracepoint_printk) {
7229 tracepoint_print_iter =
7230 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
7231 if (WARN_ON(!tracepoint_print_iter))
7232 tracepoint_printk = 0;
7233 }
5f893b26 7234 tracer_alloc_buffers();
0c564a53 7235 trace_event_init();
5f893b26
SRRH
7236}
7237
b2821ae6
SR
7238__init static int clear_boot_tracer(void)
7239{
7240 /*
7241 * The default tracer at boot buffer is an init section.
7242 * This function is called in lateinit. If we did not
7243 * find the boot tracer, then clear it out, to prevent
7244 * later registration from accessing the buffer that is
7245 * about to be freed.
7246 */
7247 if (!default_bootup_tracer)
7248 return 0;
7249
7250 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
7251 default_bootup_tracer);
7252 default_bootup_tracer = NULL;
7253
7254 return 0;
7255}
7256
8434dc93 7257fs_initcall(tracer_init_tracefs);
b2821ae6 7258late_initcall(clear_boot_tracer);
This page took 1.09375 seconds and 5 git commands to generate.