tracing: Remove unneeded includes of debugfs.h and fs.h
[deliverable/linux.git] / kernel / trace / trace.c
1 /*
2 * ring buffer based function tracer
3 *
4 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
12 * Copyright (C) 2004 Nadia Yvette Chambers
13 */
14 #include <linux/ring_buffer.h>
15 #include <generated/utsrelease.h>
16 #include <linux/stacktrace.h>
17 #include <linux/writeback.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/notifier.h>
21 #include <linux/irqflags.h>
22 #include <linux/debugfs.h>
23 #include <linux/pagemap.h>
24 #include <linux/hardirq.h>
25 #include <linux/linkage.h>
26 #include <linux/uaccess.h>
27 #include <linux/kprobes.h>
28 #include <linux/ftrace.h>
29 #include <linux/module.h>
30 #include <linux/percpu.h>
31 #include <linux/splice.h>
32 #include <linux/kdebug.h>
33 #include <linux/string.h>
34 #include <linux/rwsem.h>
35 #include <linux/slab.h>
36 #include <linux/ctype.h>
37 #include <linux/init.h>
38 #include <linux/poll.h>
39 #include <linux/nmi.h>
40 #include <linux/fs.h>
41 #include <linux/sched/rt.h>
42
43 #include "trace.h"
44 #include "trace_output.h"
45
46 /*
47 * On boot up, the ring buffer is set to the minimum size, so that
48 * we do not waste memory on systems that are not using tracing.
49 */
50 bool ring_buffer_expanded;
51
52 /*
53 * We need to change this state when a selftest is running.
54 * A selftest will lurk into the ring-buffer to count the
55 * entries inserted during the selftest although some concurrent
56 * insertions into the ring-buffer such as trace_printk could occurred
57 * at the same time, giving false positive or negative results.
58 */
59 static bool __read_mostly tracing_selftest_running;
60
61 /*
62 * If a tracer is running, we do not want to run SELFTEST.
63 */
64 bool __read_mostly tracing_selftest_disabled;
65
66 /* Pipe tracepoints to printk */
67 struct trace_iterator *tracepoint_print_iter;
68 int tracepoint_printk;
69
70 /* For tracers that don't implement custom flags */
71 static struct tracer_opt dummy_tracer_opt[] = {
72 { }
73 };
74
75 static struct tracer_flags dummy_tracer_flags = {
76 .val = 0,
77 .opts = dummy_tracer_opt
78 };
79
80 static int
81 dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
82 {
83 return 0;
84 }
85
86 /*
87 * To prevent the comm cache from being overwritten when no
88 * tracing is active, only save the comm when a trace event
89 * occurred.
90 */
91 static DEFINE_PER_CPU(bool, trace_cmdline_save);
92
93 /*
94 * Kill all tracing for good (never come back).
95 * It is initialized to 1 but will turn to zero if the initialization
96 * of the tracer is successful. But that is the only place that sets
97 * this back to zero.
98 */
99 static int tracing_disabled = 1;
100
101 DEFINE_PER_CPU(int, ftrace_cpu_disabled);
102
103 cpumask_var_t __read_mostly tracing_buffer_mask;
104
105 /*
106 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
107 *
108 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
109 * is set, then ftrace_dump is called. This will output the contents
110 * of the ftrace buffers to the console. This is very useful for
111 * capturing traces that lead to crashes and outputing it to a
112 * serial console.
113 *
114 * It is default off, but you can enable it with either specifying
115 * "ftrace_dump_on_oops" in the kernel command line, or setting
116 * /proc/sys/kernel/ftrace_dump_on_oops
117 * Set 1 if you want to dump buffers of all CPUs
118 * Set 2 if you want to dump the buffer of the CPU that triggered oops
119 */
120
121 enum ftrace_dump_mode ftrace_dump_on_oops;
122
123 /* When set, tracing will stop when a WARN*() is hit */
124 int __disable_trace_on_warning;
125
126 static int tracing_set_tracer(struct trace_array *tr, const char *buf);
127
128 #define MAX_TRACER_SIZE 100
129 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
130 static char *default_bootup_tracer;
131
132 static bool allocate_snapshot;
133
134 static int __init set_cmdline_ftrace(char *str)
135 {
136 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
137 default_bootup_tracer = bootup_tracer_buf;
138 /* We are using ftrace early, expand it */
139 ring_buffer_expanded = true;
140 return 1;
141 }
142 __setup("ftrace=", set_cmdline_ftrace);
143
144 static int __init set_ftrace_dump_on_oops(char *str)
145 {
146 if (*str++ != '=' || !*str) {
147 ftrace_dump_on_oops = DUMP_ALL;
148 return 1;
149 }
150
151 if (!strcmp("orig_cpu", str)) {
152 ftrace_dump_on_oops = DUMP_ORIG;
153 return 1;
154 }
155
156 return 0;
157 }
158 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
159
160 static int __init stop_trace_on_warning(char *str)
161 {
162 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
163 __disable_trace_on_warning = 1;
164 return 1;
165 }
166 __setup("traceoff_on_warning", stop_trace_on_warning);
167
168 static int __init boot_alloc_snapshot(char *str)
169 {
170 allocate_snapshot = true;
171 /* We also need the main ring buffer expanded */
172 ring_buffer_expanded = true;
173 return 1;
174 }
175 __setup("alloc_snapshot", boot_alloc_snapshot);
176
177
178 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
179 static char *trace_boot_options __initdata;
180
181 static int __init set_trace_boot_options(char *str)
182 {
183 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
184 trace_boot_options = trace_boot_options_buf;
185 return 0;
186 }
187 __setup("trace_options=", set_trace_boot_options);
188
189 static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
190 static char *trace_boot_clock __initdata;
191
192 static int __init set_trace_boot_clock(char *str)
193 {
194 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
195 trace_boot_clock = trace_boot_clock_buf;
196 return 0;
197 }
198 __setup("trace_clock=", set_trace_boot_clock);
199
200 static int __init set_tracepoint_printk(char *str)
201 {
202 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
203 tracepoint_printk = 1;
204 return 1;
205 }
206 __setup("tp_printk", set_tracepoint_printk);
207
208 unsigned long long ns2usecs(cycle_t nsec)
209 {
210 nsec += 500;
211 do_div(nsec, 1000);
212 return nsec;
213 }
214
215 /*
216 * The global_trace is the descriptor that holds the tracing
217 * buffers for the live tracing. For each CPU, it contains
218 * a link list of pages that will store trace entries. The
219 * page descriptor of the pages in the memory is used to hold
220 * the link list by linking the lru item in the page descriptor
221 * to each of the pages in the buffer per CPU.
222 *
223 * For each active CPU there is a data field that holds the
224 * pages for the buffer for that CPU. Each CPU has the same number
225 * of pages allocated for its buffer.
226 */
227 static struct trace_array global_trace;
228
229 LIST_HEAD(ftrace_trace_arrays);
230
231 int trace_array_get(struct trace_array *this_tr)
232 {
233 struct trace_array *tr;
234 int ret = -ENODEV;
235
236 mutex_lock(&trace_types_lock);
237 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
238 if (tr == this_tr) {
239 tr->ref++;
240 ret = 0;
241 break;
242 }
243 }
244 mutex_unlock(&trace_types_lock);
245
246 return ret;
247 }
248
249 static void __trace_array_put(struct trace_array *this_tr)
250 {
251 WARN_ON(!this_tr->ref);
252 this_tr->ref--;
253 }
254
255 void trace_array_put(struct trace_array *this_tr)
256 {
257 mutex_lock(&trace_types_lock);
258 __trace_array_put(this_tr);
259 mutex_unlock(&trace_types_lock);
260 }
261
262 int filter_check_discard(struct ftrace_event_file *file, void *rec,
263 struct ring_buffer *buffer,
264 struct ring_buffer_event *event)
265 {
266 if (unlikely(file->flags & FTRACE_EVENT_FL_FILTERED) &&
267 !filter_match_preds(file->filter, rec)) {
268 ring_buffer_discard_commit(buffer, event);
269 return 1;
270 }
271
272 return 0;
273 }
274 EXPORT_SYMBOL_GPL(filter_check_discard);
275
276 int call_filter_check_discard(struct ftrace_event_call *call, void *rec,
277 struct ring_buffer *buffer,
278 struct ring_buffer_event *event)
279 {
280 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
281 !filter_match_preds(call->filter, rec)) {
282 ring_buffer_discard_commit(buffer, event);
283 return 1;
284 }
285
286 return 0;
287 }
288 EXPORT_SYMBOL_GPL(call_filter_check_discard);
289
290 static cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
291 {
292 u64 ts;
293
294 /* Early boot up does not have a buffer yet */
295 if (!buf->buffer)
296 return trace_clock_local();
297
298 ts = ring_buffer_time_stamp(buf->buffer, cpu);
299 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
300
301 return ts;
302 }
303
304 cycle_t ftrace_now(int cpu)
305 {
306 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
307 }
308
309 /**
310 * tracing_is_enabled - Show if global_trace has been disabled
311 *
312 * Shows if the global trace has been enabled or not. It uses the
313 * mirror flag "buffer_disabled" to be used in fast paths such as for
314 * the irqsoff tracer. But it may be inaccurate due to races. If you
315 * need to know the accurate state, use tracing_is_on() which is a little
316 * slower, but accurate.
317 */
318 int tracing_is_enabled(void)
319 {
320 /*
321 * For quick access (irqsoff uses this in fast path), just
322 * return the mirror variable of the state of the ring buffer.
323 * It's a little racy, but we don't really care.
324 */
325 smp_rmb();
326 return !global_trace.buffer_disabled;
327 }
328
329 /*
330 * trace_buf_size is the size in bytes that is allocated
331 * for a buffer. Note, the number of bytes is always rounded
332 * to page size.
333 *
334 * This number is purposely set to a low number of 16384.
335 * If the dump on oops happens, it will be much appreciated
336 * to not have to wait for all that output. Anyway this can be
337 * boot time and run time configurable.
338 */
339 #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
340
341 static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
342
343 /* trace_types holds a link list of available tracers. */
344 static struct tracer *trace_types __read_mostly;
345
346 /*
347 * trace_types_lock is used to protect the trace_types list.
348 */
349 DEFINE_MUTEX(trace_types_lock);
350
351 /*
352 * serialize the access of the ring buffer
353 *
354 * ring buffer serializes readers, but it is low level protection.
355 * The validity of the events (which returns by ring_buffer_peek() ..etc)
356 * are not protected by ring buffer.
357 *
358 * The content of events may become garbage if we allow other process consumes
359 * these events concurrently:
360 * A) the page of the consumed events may become a normal page
361 * (not reader page) in ring buffer, and this page will be rewrited
362 * by events producer.
363 * B) The page of the consumed events may become a page for splice_read,
364 * and this page will be returned to system.
365 *
366 * These primitives allow multi process access to different cpu ring buffer
367 * concurrently.
368 *
369 * These primitives don't distinguish read-only and read-consume access.
370 * Multi read-only access are also serialized.
371 */
372
373 #ifdef CONFIG_SMP
374 static DECLARE_RWSEM(all_cpu_access_lock);
375 static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
376
377 static inline void trace_access_lock(int cpu)
378 {
379 if (cpu == RING_BUFFER_ALL_CPUS) {
380 /* gain it for accessing the whole ring buffer. */
381 down_write(&all_cpu_access_lock);
382 } else {
383 /* gain it for accessing a cpu ring buffer. */
384
385 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
386 down_read(&all_cpu_access_lock);
387
388 /* Secondly block other access to this @cpu ring buffer. */
389 mutex_lock(&per_cpu(cpu_access_lock, cpu));
390 }
391 }
392
393 static inline void trace_access_unlock(int cpu)
394 {
395 if (cpu == RING_BUFFER_ALL_CPUS) {
396 up_write(&all_cpu_access_lock);
397 } else {
398 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
399 up_read(&all_cpu_access_lock);
400 }
401 }
402
403 static inline void trace_access_lock_init(void)
404 {
405 int cpu;
406
407 for_each_possible_cpu(cpu)
408 mutex_init(&per_cpu(cpu_access_lock, cpu));
409 }
410
411 #else
412
413 static DEFINE_MUTEX(access_lock);
414
415 static inline void trace_access_lock(int cpu)
416 {
417 (void)cpu;
418 mutex_lock(&access_lock);
419 }
420
421 static inline void trace_access_unlock(int cpu)
422 {
423 (void)cpu;
424 mutex_unlock(&access_lock);
425 }
426
427 static inline void trace_access_lock_init(void)
428 {
429 }
430
431 #endif
432
433 /* trace_flags holds trace_options default values */
434 unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
435 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
436 TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
437 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | TRACE_ITER_FUNCTION;
438
439 static void tracer_tracing_on(struct trace_array *tr)
440 {
441 if (tr->trace_buffer.buffer)
442 ring_buffer_record_on(tr->trace_buffer.buffer);
443 /*
444 * This flag is looked at when buffers haven't been allocated
445 * yet, or by some tracers (like irqsoff), that just want to
446 * know if the ring buffer has been disabled, but it can handle
447 * races of where it gets disabled but we still do a record.
448 * As the check is in the fast path of the tracers, it is more
449 * important to be fast than accurate.
450 */
451 tr->buffer_disabled = 0;
452 /* Make the flag seen by readers */
453 smp_wmb();
454 }
455
456 /**
457 * tracing_on - enable tracing buffers
458 *
459 * This function enables tracing buffers that may have been
460 * disabled with tracing_off.
461 */
462 void tracing_on(void)
463 {
464 tracer_tracing_on(&global_trace);
465 }
466 EXPORT_SYMBOL_GPL(tracing_on);
467
468 /**
469 * __trace_puts - write a constant string into the trace buffer.
470 * @ip: The address of the caller
471 * @str: The constant string to write
472 * @size: The size of the string.
473 */
474 int __trace_puts(unsigned long ip, const char *str, int size)
475 {
476 struct ring_buffer_event *event;
477 struct ring_buffer *buffer;
478 struct print_entry *entry;
479 unsigned long irq_flags;
480 int alloc;
481 int pc;
482
483 if (!(trace_flags & TRACE_ITER_PRINTK))
484 return 0;
485
486 pc = preempt_count();
487
488 if (unlikely(tracing_selftest_running || tracing_disabled))
489 return 0;
490
491 alloc = sizeof(*entry) + size + 2; /* possible \n added */
492
493 local_save_flags(irq_flags);
494 buffer = global_trace.trace_buffer.buffer;
495 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
496 irq_flags, pc);
497 if (!event)
498 return 0;
499
500 entry = ring_buffer_event_data(event);
501 entry->ip = ip;
502
503 memcpy(&entry->buf, str, size);
504
505 /* Add a newline if necessary */
506 if (entry->buf[size - 1] != '\n') {
507 entry->buf[size] = '\n';
508 entry->buf[size + 1] = '\0';
509 } else
510 entry->buf[size] = '\0';
511
512 __buffer_unlock_commit(buffer, event);
513 ftrace_trace_stack(buffer, irq_flags, 4, pc);
514
515 return size;
516 }
517 EXPORT_SYMBOL_GPL(__trace_puts);
518
519 /**
520 * __trace_bputs - write the pointer to a constant string into trace buffer
521 * @ip: The address of the caller
522 * @str: The constant string to write to the buffer to
523 */
524 int __trace_bputs(unsigned long ip, const char *str)
525 {
526 struct ring_buffer_event *event;
527 struct ring_buffer *buffer;
528 struct bputs_entry *entry;
529 unsigned long irq_flags;
530 int size = sizeof(struct bputs_entry);
531 int pc;
532
533 if (!(trace_flags & TRACE_ITER_PRINTK))
534 return 0;
535
536 pc = preempt_count();
537
538 if (unlikely(tracing_selftest_running || tracing_disabled))
539 return 0;
540
541 local_save_flags(irq_flags);
542 buffer = global_trace.trace_buffer.buffer;
543 event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
544 irq_flags, pc);
545 if (!event)
546 return 0;
547
548 entry = ring_buffer_event_data(event);
549 entry->ip = ip;
550 entry->str = str;
551
552 __buffer_unlock_commit(buffer, event);
553 ftrace_trace_stack(buffer, irq_flags, 4, pc);
554
555 return 1;
556 }
557 EXPORT_SYMBOL_GPL(__trace_bputs);
558
559 #ifdef CONFIG_TRACER_SNAPSHOT
560 /**
561 * trace_snapshot - take a snapshot of the current buffer.
562 *
563 * This causes a swap between the snapshot buffer and the current live
564 * tracing buffer. You can use this to take snapshots of the live
565 * trace when some condition is triggered, but continue to trace.
566 *
567 * Note, make sure to allocate the snapshot with either
568 * a tracing_snapshot_alloc(), or by doing it manually
569 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
570 *
571 * If the snapshot buffer is not allocated, it will stop tracing.
572 * Basically making a permanent snapshot.
573 */
574 void tracing_snapshot(void)
575 {
576 struct trace_array *tr = &global_trace;
577 struct tracer *tracer = tr->current_trace;
578 unsigned long flags;
579
580 if (in_nmi()) {
581 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
582 internal_trace_puts("*** snapshot is being ignored ***\n");
583 return;
584 }
585
586 if (!tr->allocated_snapshot) {
587 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
588 internal_trace_puts("*** stopping trace here! ***\n");
589 tracing_off();
590 return;
591 }
592
593 /* Note, snapshot can not be used when the tracer uses it */
594 if (tracer->use_max_tr) {
595 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
596 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
597 return;
598 }
599
600 local_irq_save(flags);
601 update_max_tr(tr, current, smp_processor_id());
602 local_irq_restore(flags);
603 }
604 EXPORT_SYMBOL_GPL(tracing_snapshot);
605
606 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
607 struct trace_buffer *size_buf, int cpu_id);
608 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
609
610 static int alloc_snapshot(struct trace_array *tr)
611 {
612 int ret;
613
614 if (!tr->allocated_snapshot) {
615
616 /* allocate spare buffer */
617 ret = resize_buffer_duplicate_size(&tr->max_buffer,
618 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
619 if (ret < 0)
620 return ret;
621
622 tr->allocated_snapshot = true;
623 }
624
625 return 0;
626 }
627
628 static void free_snapshot(struct trace_array *tr)
629 {
630 /*
631 * We don't free the ring buffer. instead, resize it because
632 * The max_tr ring buffer has some state (e.g. ring->clock) and
633 * we want preserve it.
634 */
635 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
636 set_buffer_entries(&tr->max_buffer, 1);
637 tracing_reset_online_cpus(&tr->max_buffer);
638 tr->allocated_snapshot = false;
639 }
640
641 /**
642 * tracing_alloc_snapshot - allocate snapshot buffer.
643 *
644 * This only allocates the snapshot buffer if it isn't already
645 * allocated - it doesn't also take a snapshot.
646 *
647 * This is meant to be used in cases where the snapshot buffer needs
648 * to be set up for events that can't sleep but need to be able to
649 * trigger a snapshot.
650 */
651 int tracing_alloc_snapshot(void)
652 {
653 struct trace_array *tr = &global_trace;
654 int ret;
655
656 ret = alloc_snapshot(tr);
657 WARN_ON(ret < 0);
658
659 return ret;
660 }
661 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
662
663 /**
664 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
665 *
666 * This is similar to trace_snapshot(), but it will allocate the
667 * snapshot buffer if it isn't already allocated. Use this only
668 * where it is safe to sleep, as the allocation may sleep.
669 *
670 * This causes a swap between the snapshot buffer and the current live
671 * tracing buffer. You can use this to take snapshots of the live
672 * trace when some condition is triggered, but continue to trace.
673 */
674 void tracing_snapshot_alloc(void)
675 {
676 int ret;
677
678 ret = tracing_alloc_snapshot();
679 if (ret < 0)
680 return;
681
682 tracing_snapshot();
683 }
684 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
685 #else
686 void tracing_snapshot(void)
687 {
688 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
689 }
690 EXPORT_SYMBOL_GPL(tracing_snapshot);
691 int tracing_alloc_snapshot(void)
692 {
693 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
694 return -ENODEV;
695 }
696 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
697 void tracing_snapshot_alloc(void)
698 {
699 /* Give warning */
700 tracing_snapshot();
701 }
702 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
703 #endif /* CONFIG_TRACER_SNAPSHOT */
704
705 static void tracer_tracing_off(struct trace_array *tr)
706 {
707 if (tr->trace_buffer.buffer)
708 ring_buffer_record_off(tr->trace_buffer.buffer);
709 /*
710 * This flag is looked at when buffers haven't been allocated
711 * yet, or by some tracers (like irqsoff), that just want to
712 * know if the ring buffer has been disabled, but it can handle
713 * races of where it gets disabled but we still do a record.
714 * As the check is in the fast path of the tracers, it is more
715 * important to be fast than accurate.
716 */
717 tr->buffer_disabled = 1;
718 /* Make the flag seen by readers */
719 smp_wmb();
720 }
721
722 /**
723 * tracing_off - turn off tracing buffers
724 *
725 * This function stops the tracing buffers from recording data.
726 * It does not disable any overhead the tracers themselves may
727 * be causing. This function simply causes all recording to
728 * the ring buffers to fail.
729 */
730 void tracing_off(void)
731 {
732 tracer_tracing_off(&global_trace);
733 }
734 EXPORT_SYMBOL_GPL(tracing_off);
735
736 void disable_trace_on_warning(void)
737 {
738 if (__disable_trace_on_warning)
739 tracing_off();
740 }
741
742 /**
743 * tracer_tracing_is_on - show real state of ring buffer enabled
744 * @tr : the trace array to know if ring buffer is enabled
745 *
746 * Shows real state of the ring buffer if it is enabled or not.
747 */
748 static int tracer_tracing_is_on(struct trace_array *tr)
749 {
750 if (tr->trace_buffer.buffer)
751 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
752 return !tr->buffer_disabled;
753 }
754
755 /**
756 * tracing_is_on - show state of ring buffers enabled
757 */
758 int tracing_is_on(void)
759 {
760 return tracer_tracing_is_on(&global_trace);
761 }
762 EXPORT_SYMBOL_GPL(tracing_is_on);
763
764 static int __init set_buf_size(char *str)
765 {
766 unsigned long buf_size;
767
768 if (!str)
769 return 0;
770 buf_size = memparse(str, &str);
771 /* nr_entries can not be zero */
772 if (buf_size == 0)
773 return 0;
774 trace_buf_size = buf_size;
775 return 1;
776 }
777 __setup("trace_buf_size=", set_buf_size);
778
779 static int __init set_tracing_thresh(char *str)
780 {
781 unsigned long threshold;
782 int ret;
783
784 if (!str)
785 return 0;
786 ret = kstrtoul(str, 0, &threshold);
787 if (ret < 0)
788 return 0;
789 tracing_thresh = threshold * 1000;
790 return 1;
791 }
792 __setup("tracing_thresh=", set_tracing_thresh);
793
794 unsigned long nsecs_to_usecs(unsigned long nsecs)
795 {
796 return nsecs / 1000;
797 }
798
799 /* These must match the bit postions in trace_iterator_flags */
800 static const char *trace_options[] = {
801 "print-parent",
802 "sym-offset",
803 "sym-addr",
804 "verbose",
805 "raw",
806 "hex",
807 "bin",
808 "block",
809 "stacktrace",
810 "trace_printk",
811 "ftrace_preempt",
812 "branch",
813 "annotate",
814 "userstacktrace",
815 "sym-userobj",
816 "printk-msg-only",
817 "context-info",
818 "latency-format",
819 "sleep-time",
820 "graph-time",
821 "record-cmd",
822 "overwrite",
823 "disable_on_free",
824 "irq-info",
825 "markers",
826 "function-trace",
827 NULL
828 };
829
830 static struct {
831 u64 (*func)(void);
832 const char *name;
833 int in_ns; /* is this clock in nanoseconds? */
834 } trace_clocks[] = {
835 { trace_clock_local, "local", 1 },
836 { trace_clock_global, "global", 1 },
837 { trace_clock_counter, "counter", 0 },
838 { trace_clock_jiffies, "uptime", 0 },
839 { trace_clock, "perf", 1 },
840 { ktime_get_mono_fast_ns, "mono", 1 },
841 ARCH_TRACE_CLOCKS
842 };
843
844 /*
845 * trace_parser_get_init - gets the buffer for trace parser
846 */
847 int trace_parser_get_init(struct trace_parser *parser, int size)
848 {
849 memset(parser, 0, sizeof(*parser));
850
851 parser->buffer = kmalloc(size, GFP_KERNEL);
852 if (!parser->buffer)
853 return 1;
854
855 parser->size = size;
856 return 0;
857 }
858
859 /*
860 * trace_parser_put - frees the buffer for trace parser
861 */
862 void trace_parser_put(struct trace_parser *parser)
863 {
864 kfree(parser->buffer);
865 }
866
867 /*
868 * trace_get_user - reads the user input string separated by space
869 * (matched by isspace(ch))
870 *
871 * For each string found the 'struct trace_parser' is updated,
872 * and the function returns.
873 *
874 * Returns number of bytes read.
875 *
876 * See kernel/trace/trace.h for 'struct trace_parser' details.
877 */
878 int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
879 size_t cnt, loff_t *ppos)
880 {
881 char ch;
882 size_t read = 0;
883 ssize_t ret;
884
885 if (!*ppos)
886 trace_parser_clear(parser);
887
888 ret = get_user(ch, ubuf++);
889 if (ret)
890 goto out;
891
892 read++;
893 cnt--;
894
895 /*
896 * The parser is not finished with the last write,
897 * continue reading the user input without skipping spaces.
898 */
899 if (!parser->cont) {
900 /* skip white space */
901 while (cnt && isspace(ch)) {
902 ret = get_user(ch, ubuf++);
903 if (ret)
904 goto out;
905 read++;
906 cnt--;
907 }
908
909 /* only spaces were written */
910 if (isspace(ch)) {
911 *ppos += read;
912 ret = read;
913 goto out;
914 }
915
916 parser->idx = 0;
917 }
918
919 /* read the non-space input */
920 while (cnt && !isspace(ch)) {
921 if (parser->idx < parser->size - 1)
922 parser->buffer[parser->idx++] = ch;
923 else {
924 ret = -EINVAL;
925 goto out;
926 }
927 ret = get_user(ch, ubuf++);
928 if (ret)
929 goto out;
930 read++;
931 cnt--;
932 }
933
934 /* We either got finished input or we have to wait for another call. */
935 if (isspace(ch)) {
936 parser->buffer[parser->idx] = 0;
937 parser->cont = false;
938 } else if (parser->idx < parser->size - 1) {
939 parser->cont = true;
940 parser->buffer[parser->idx++] = ch;
941 } else {
942 ret = -EINVAL;
943 goto out;
944 }
945
946 *ppos += read;
947 ret = read;
948
949 out:
950 return ret;
951 }
952
953 /* TODO add a seq_buf_to_buffer() */
954 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
955 {
956 int len;
957
958 if (trace_seq_used(s) <= s->seq.readpos)
959 return -EBUSY;
960
961 len = trace_seq_used(s) - s->seq.readpos;
962 if (cnt > len)
963 cnt = len;
964 memcpy(buf, s->buffer + s->seq.readpos, cnt);
965
966 s->seq.readpos += cnt;
967 return cnt;
968 }
969
970 unsigned long __read_mostly tracing_thresh;
971
972 #ifdef CONFIG_TRACER_MAX_TRACE
973 /*
974 * Copy the new maximum trace into the separate maximum-trace
975 * structure. (this way the maximum trace is permanently saved,
976 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
977 */
978 static void
979 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
980 {
981 struct trace_buffer *trace_buf = &tr->trace_buffer;
982 struct trace_buffer *max_buf = &tr->max_buffer;
983 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
984 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
985
986 max_buf->cpu = cpu;
987 max_buf->time_start = data->preempt_timestamp;
988
989 max_data->saved_latency = tr->max_latency;
990 max_data->critical_start = data->critical_start;
991 max_data->critical_end = data->critical_end;
992
993 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
994 max_data->pid = tsk->pid;
995 /*
996 * If tsk == current, then use current_uid(), as that does not use
997 * RCU. The irq tracer can be called out of RCU scope.
998 */
999 if (tsk == current)
1000 max_data->uid = current_uid();
1001 else
1002 max_data->uid = task_uid(tsk);
1003
1004 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1005 max_data->policy = tsk->policy;
1006 max_data->rt_priority = tsk->rt_priority;
1007
1008 /* record this tasks comm */
1009 tracing_record_cmdline(tsk);
1010 }
1011
1012 /**
1013 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1014 * @tr: tracer
1015 * @tsk: the task with the latency
1016 * @cpu: The cpu that initiated the trace.
1017 *
1018 * Flip the buffers between the @tr and the max_tr and record information
1019 * about which task was the cause of this latency.
1020 */
1021 void
1022 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1023 {
1024 struct ring_buffer *buf;
1025
1026 if (tr->stop_count)
1027 return;
1028
1029 WARN_ON_ONCE(!irqs_disabled());
1030
1031 if (!tr->allocated_snapshot) {
1032 /* Only the nop tracer should hit this when disabling */
1033 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1034 return;
1035 }
1036
1037 arch_spin_lock(&tr->max_lock);
1038
1039 buf = tr->trace_buffer.buffer;
1040 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1041 tr->max_buffer.buffer = buf;
1042
1043 __update_max_tr(tr, tsk, cpu);
1044 arch_spin_unlock(&tr->max_lock);
1045 }
1046
1047 /**
1048 * update_max_tr_single - only copy one trace over, and reset the rest
1049 * @tr - tracer
1050 * @tsk - task with the latency
1051 * @cpu - the cpu of the buffer to copy.
1052 *
1053 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1054 */
1055 void
1056 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1057 {
1058 int ret;
1059
1060 if (tr->stop_count)
1061 return;
1062
1063 WARN_ON_ONCE(!irqs_disabled());
1064 if (!tr->allocated_snapshot) {
1065 /* Only the nop tracer should hit this when disabling */
1066 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1067 return;
1068 }
1069
1070 arch_spin_lock(&tr->max_lock);
1071
1072 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
1073
1074 if (ret == -EBUSY) {
1075 /*
1076 * We failed to swap the buffer due to a commit taking
1077 * place on this CPU. We fail to record, but we reset
1078 * the max trace buffer (no one writes directly to it)
1079 * and flag that it failed.
1080 */
1081 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1082 "Failed to swap buffers due to commit in progress\n");
1083 }
1084
1085 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1086
1087 __update_max_tr(tr, tsk, cpu);
1088 arch_spin_unlock(&tr->max_lock);
1089 }
1090 #endif /* CONFIG_TRACER_MAX_TRACE */
1091
1092 static int wait_on_pipe(struct trace_iterator *iter, bool full)
1093 {
1094 /* Iterators are static, they should be filled or empty */
1095 if (trace_buffer_iter(iter, iter->cpu_file))
1096 return 0;
1097
1098 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1099 full);
1100 }
1101
1102 #ifdef CONFIG_FTRACE_STARTUP_TEST
1103 static int run_tracer_selftest(struct tracer *type)
1104 {
1105 struct trace_array *tr = &global_trace;
1106 struct tracer *saved_tracer = tr->current_trace;
1107 int ret;
1108
1109 if (!type->selftest || tracing_selftest_disabled)
1110 return 0;
1111
1112 /*
1113 * Run a selftest on this tracer.
1114 * Here we reset the trace buffer, and set the current
1115 * tracer to be this tracer. The tracer can then run some
1116 * internal tracing to verify that everything is in order.
1117 * If we fail, we do not register this tracer.
1118 */
1119 tracing_reset_online_cpus(&tr->trace_buffer);
1120
1121 tr->current_trace = type;
1122
1123 #ifdef CONFIG_TRACER_MAX_TRACE
1124 if (type->use_max_tr) {
1125 /* If we expanded the buffers, make sure the max is expanded too */
1126 if (ring_buffer_expanded)
1127 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1128 RING_BUFFER_ALL_CPUS);
1129 tr->allocated_snapshot = true;
1130 }
1131 #endif
1132
1133 /* the test is responsible for initializing and enabling */
1134 pr_info("Testing tracer %s: ", type->name);
1135 ret = type->selftest(type, tr);
1136 /* the test is responsible for resetting too */
1137 tr->current_trace = saved_tracer;
1138 if (ret) {
1139 printk(KERN_CONT "FAILED!\n");
1140 /* Add the warning after printing 'FAILED' */
1141 WARN_ON(1);
1142 return -1;
1143 }
1144 /* Only reset on passing, to avoid touching corrupted buffers */
1145 tracing_reset_online_cpus(&tr->trace_buffer);
1146
1147 #ifdef CONFIG_TRACER_MAX_TRACE
1148 if (type->use_max_tr) {
1149 tr->allocated_snapshot = false;
1150
1151 /* Shrink the max buffer again */
1152 if (ring_buffer_expanded)
1153 ring_buffer_resize(tr->max_buffer.buffer, 1,
1154 RING_BUFFER_ALL_CPUS);
1155 }
1156 #endif
1157
1158 printk(KERN_CONT "PASSED\n");
1159 return 0;
1160 }
1161 #else
1162 static inline int run_tracer_selftest(struct tracer *type)
1163 {
1164 return 0;
1165 }
1166 #endif /* CONFIG_FTRACE_STARTUP_TEST */
1167
1168 /**
1169 * register_tracer - register a tracer with the ftrace system.
1170 * @type - the plugin for the tracer
1171 *
1172 * Register a new plugin tracer.
1173 */
1174 int register_tracer(struct tracer *type)
1175 {
1176 struct tracer *t;
1177 int ret = 0;
1178
1179 if (!type->name) {
1180 pr_info("Tracer must have a name\n");
1181 return -1;
1182 }
1183
1184 if (strlen(type->name) >= MAX_TRACER_SIZE) {
1185 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1186 return -1;
1187 }
1188
1189 mutex_lock(&trace_types_lock);
1190
1191 tracing_selftest_running = true;
1192
1193 for (t = trace_types; t; t = t->next) {
1194 if (strcmp(type->name, t->name) == 0) {
1195 /* already found */
1196 pr_info("Tracer %s already registered\n",
1197 type->name);
1198 ret = -1;
1199 goto out;
1200 }
1201 }
1202
1203 if (!type->set_flag)
1204 type->set_flag = &dummy_set_flag;
1205 if (!type->flags)
1206 type->flags = &dummy_tracer_flags;
1207 else
1208 if (!type->flags->opts)
1209 type->flags->opts = dummy_tracer_opt;
1210
1211 ret = run_tracer_selftest(type);
1212 if (ret < 0)
1213 goto out;
1214
1215 type->next = trace_types;
1216 trace_types = type;
1217
1218 out:
1219 tracing_selftest_running = false;
1220 mutex_unlock(&trace_types_lock);
1221
1222 if (ret || !default_bootup_tracer)
1223 goto out_unlock;
1224
1225 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
1226 goto out_unlock;
1227
1228 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1229 /* Do we want this tracer to start on bootup? */
1230 tracing_set_tracer(&global_trace, type->name);
1231 default_bootup_tracer = NULL;
1232 /* disable other selftests, since this will break it. */
1233 tracing_selftest_disabled = true;
1234 #ifdef CONFIG_FTRACE_STARTUP_TEST
1235 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1236 type->name);
1237 #endif
1238
1239 out_unlock:
1240 return ret;
1241 }
1242
1243 void tracing_reset(struct trace_buffer *buf, int cpu)
1244 {
1245 struct ring_buffer *buffer = buf->buffer;
1246
1247 if (!buffer)
1248 return;
1249
1250 ring_buffer_record_disable(buffer);
1251
1252 /* Make sure all commits have finished */
1253 synchronize_sched();
1254 ring_buffer_reset_cpu(buffer, cpu);
1255
1256 ring_buffer_record_enable(buffer);
1257 }
1258
1259 void tracing_reset_online_cpus(struct trace_buffer *buf)
1260 {
1261 struct ring_buffer *buffer = buf->buffer;
1262 int cpu;
1263
1264 if (!buffer)
1265 return;
1266
1267 ring_buffer_record_disable(buffer);
1268
1269 /* Make sure all commits have finished */
1270 synchronize_sched();
1271
1272 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
1273
1274 for_each_online_cpu(cpu)
1275 ring_buffer_reset_cpu(buffer, cpu);
1276
1277 ring_buffer_record_enable(buffer);
1278 }
1279
1280 /* Must have trace_types_lock held */
1281 void tracing_reset_all_online_cpus(void)
1282 {
1283 struct trace_array *tr;
1284
1285 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1286 tracing_reset_online_cpus(&tr->trace_buffer);
1287 #ifdef CONFIG_TRACER_MAX_TRACE
1288 tracing_reset_online_cpus(&tr->max_buffer);
1289 #endif
1290 }
1291 }
1292
1293 #define SAVED_CMDLINES_DEFAULT 128
1294 #define NO_CMDLINE_MAP UINT_MAX
1295 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
1296 struct saved_cmdlines_buffer {
1297 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1298 unsigned *map_cmdline_to_pid;
1299 unsigned cmdline_num;
1300 int cmdline_idx;
1301 char *saved_cmdlines;
1302 };
1303 static struct saved_cmdlines_buffer *savedcmd;
1304
1305 /* temporary disable recording */
1306 static atomic_t trace_record_cmdline_disabled __read_mostly;
1307
1308 static inline char *get_saved_cmdlines(int idx)
1309 {
1310 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1311 }
1312
1313 static inline void set_cmdline(int idx, const char *cmdline)
1314 {
1315 memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1316 }
1317
1318 static int allocate_cmdlines_buffer(unsigned int val,
1319 struct saved_cmdlines_buffer *s)
1320 {
1321 s->map_cmdline_to_pid = kmalloc(val * sizeof(*s->map_cmdline_to_pid),
1322 GFP_KERNEL);
1323 if (!s->map_cmdline_to_pid)
1324 return -ENOMEM;
1325
1326 s->saved_cmdlines = kmalloc(val * TASK_COMM_LEN, GFP_KERNEL);
1327 if (!s->saved_cmdlines) {
1328 kfree(s->map_cmdline_to_pid);
1329 return -ENOMEM;
1330 }
1331
1332 s->cmdline_idx = 0;
1333 s->cmdline_num = val;
1334 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1335 sizeof(s->map_pid_to_cmdline));
1336 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1337 val * sizeof(*s->map_cmdline_to_pid));
1338
1339 return 0;
1340 }
1341
1342 static int trace_create_savedcmd(void)
1343 {
1344 int ret;
1345
1346 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
1347 if (!savedcmd)
1348 return -ENOMEM;
1349
1350 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1351 if (ret < 0) {
1352 kfree(savedcmd);
1353 savedcmd = NULL;
1354 return -ENOMEM;
1355 }
1356
1357 return 0;
1358 }
1359
1360 int is_tracing_stopped(void)
1361 {
1362 return global_trace.stop_count;
1363 }
1364
1365 /**
1366 * tracing_start - quick start of the tracer
1367 *
1368 * If tracing is enabled but was stopped by tracing_stop,
1369 * this will start the tracer back up.
1370 */
1371 void tracing_start(void)
1372 {
1373 struct ring_buffer *buffer;
1374 unsigned long flags;
1375
1376 if (tracing_disabled)
1377 return;
1378
1379 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1380 if (--global_trace.stop_count) {
1381 if (global_trace.stop_count < 0) {
1382 /* Someone screwed up their debugging */
1383 WARN_ON_ONCE(1);
1384 global_trace.stop_count = 0;
1385 }
1386 goto out;
1387 }
1388
1389 /* Prevent the buffers from switching */
1390 arch_spin_lock(&global_trace.max_lock);
1391
1392 buffer = global_trace.trace_buffer.buffer;
1393 if (buffer)
1394 ring_buffer_record_enable(buffer);
1395
1396 #ifdef CONFIG_TRACER_MAX_TRACE
1397 buffer = global_trace.max_buffer.buffer;
1398 if (buffer)
1399 ring_buffer_record_enable(buffer);
1400 #endif
1401
1402 arch_spin_unlock(&global_trace.max_lock);
1403
1404 out:
1405 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1406 }
1407
1408 static void tracing_start_tr(struct trace_array *tr)
1409 {
1410 struct ring_buffer *buffer;
1411 unsigned long flags;
1412
1413 if (tracing_disabled)
1414 return;
1415
1416 /* If global, we need to also start the max tracer */
1417 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1418 return tracing_start();
1419
1420 raw_spin_lock_irqsave(&tr->start_lock, flags);
1421
1422 if (--tr->stop_count) {
1423 if (tr->stop_count < 0) {
1424 /* Someone screwed up their debugging */
1425 WARN_ON_ONCE(1);
1426 tr->stop_count = 0;
1427 }
1428 goto out;
1429 }
1430
1431 buffer = tr->trace_buffer.buffer;
1432 if (buffer)
1433 ring_buffer_record_enable(buffer);
1434
1435 out:
1436 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
1437 }
1438
1439 /**
1440 * tracing_stop - quick stop of the tracer
1441 *
1442 * Light weight way to stop tracing. Use in conjunction with
1443 * tracing_start.
1444 */
1445 void tracing_stop(void)
1446 {
1447 struct ring_buffer *buffer;
1448 unsigned long flags;
1449
1450 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1451 if (global_trace.stop_count++)
1452 goto out;
1453
1454 /* Prevent the buffers from switching */
1455 arch_spin_lock(&global_trace.max_lock);
1456
1457 buffer = global_trace.trace_buffer.buffer;
1458 if (buffer)
1459 ring_buffer_record_disable(buffer);
1460
1461 #ifdef CONFIG_TRACER_MAX_TRACE
1462 buffer = global_trace.max_buffer.buffer;
1463 if (buffer)
1464 ring_buffer_record_disable(buffer);
1465 #endif
1466
1467 arch_spin_unlock(&global_trace.max_lock);
1468
1469 out:
1470 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1471 }
1472
1473 static void tracing_stop_tr(struct trace_array *tr)
1474 {
1475 struct ring_buffer *buffer;
1476 unsigned long flags;
1477
1478 /* If global, we need to also stop the max tracer */
1479 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1480 return tracing_stop();
1481
1482 raw_spin_lock_irqsave(&tr->start_lock, flags);
1483 if (tr->stop_count++)
1484 goto out;
1485
1486 buffer = tr->trace_buffer.buffer;
1487 if (buffer)
1488 ring_buffer_record_disable(buffer);
1489
1490 out:
1491 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
1492 }
1493
1494 void trace_stop_cmdline_recording(void);
1495
1496 static int trace_save_cmdline(struct task_struct *tsk)
1497 {
1498 unsigned pid, idx;
1499
1500 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
1501 return 0;
1502
1503 /*
1504 * It's not the end of the world if we don't get
1505 * the lock, but we also don't want to spin
1506 * nor do we want to disable interrupts,
1507 * so if we miss here, then better luck next time.
1508 */
1509 if (!arch_spin_trylock(&trace_cmdline_lock))
1510 return 0;
1511
1512 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
1513 if (idx == NO_CMDLINE_MAP) {
1514 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
1515
1516 /*
1517 * Check whether the cmdline buffer at idx has a pid
1518 * mapped. We are going to overwrite that entry so we
1519 * need to clear the map_pid_to_cmdline. Otherwise we
1520 * would read the new comm for the old pid.
1521 */
1522 pid = savedcmd->map_cmdline_to_pid[idx];
1523 if (pid != NO_CMDLINE_MAP)
1524 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
1525
1526 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1527 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
1528
1529 savedcmd->cmdline_idx = idx;
1530 }
1531
1532 set_cmdline(idx, tsk->comm);
1533
1534 arch_spin_unlock(&trace_cmdline_lock);
1535
1536 return 1;
1537 }
1538
1539 static void __trace_find_cmdline(int pid, char comm[])
1540 {
1541 unsigned map;
1542
1543 if (!pid) {
1544 strcpy(comm, "<idle>");
1545 return;
1546 }
1547
1548 if (WARN_ON_ONCE(pid < 0)) {
1549 strcpy(comm, "<XXX>");
1550 return;
1551 }
1552
1553 if (pid > PID_MAX_DEFAULT) {
1554 strcpy(comm, "<...>");
1555 return;
1556 }
1557
1558 map = savedcmd->map_pid_to_cmdline[pid];
1559 if (map != NO_CMDLINE_MAP)
1560 strcpy(comm, get_saved_cmdlines(map));
1561 else
1562 strcpy(comm, "<...>");
1563 }
1564
1565 void trace_find_cmdline(int pid, char comm[])
1566 {
1567 preempt_disable();
1568 arch_spin_lock(&trace_cmdline_lock);
1569
1570 __trace_find_cmdline(pid, comm);
1571
1572 arch_spin_unlock(&trace_cmdline_lock);
1573 preempt_enable();
1574 }
1575
1576 void tracing_record_cmdline(struct task_struct *tsk)
1577 {
1578 if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
1579 return;
1580
1581 if (!__this_cpu_read(trace_cmdline_save))
1582 return;
1583
1584 if (trace_save_cmdline(tsk))
1585 __this_cpu_write(trace_cmdline_save, false);
1586 }
1587
1588 void
1589 tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1590 int pc)
1591 {
1592 struct task_struct *tsk = current;
1593
1594 entry->preempt_count = pc & 0xff;
1595 entry->pid = (tsk) ? tsk->pid : 0;
1596 entry->flags =
1597 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
1598 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
1599 #else
1600 TRACE_FLAG_IRQS_NOSUPPORT |
1601 #endif
1602 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1603 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
1604 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
1605 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
1606 }
1607 EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
1608
1609 struct ring_buffer_event *
1610 trace_buffer_lock_reserve(struct ring_buffer *buffer,
1611 int type,
1612 unsigned long len,
1613 unsigned long flags, int pc)
1614 {
1615 struct ring_buffer_event *event;
1616
1617 event = ring_buffer_lock_reserve(buffer, len);
1618 if (event != NULL) {
1619 struct trace_entry *ent = ring_buffer_event_data(event);
1620
1621 tracing_generic_entry_update(ent, flags, pc);
1622 ent->type = type;
1623 }
1624
1625 return event;
1626 }
1627
1628 void
1629 __buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
1630 {
1631 __this_cpu_write(trace_cmdline_save, true);
1632 ring_buffer_unlock_commit(buffer, event);
1633 }
1634
1635 static inline void
1636 __trace_buffer_unlock_commit(struct ring_buffer *buffer,
1637 struct ring_buffer_event *event,
1638 unsigned long flags, int pc)
1639 {
1640 __buffer_unlock_commit(buffer, event);
1641
1642 ftrace_trace_stack(buffer, flags, 6, pc);
1643 ftrace_trace_userstack(buffer, flags, pc);
1644 }
1645
1646 void trace_buffer_unlock_commit(struct ring_buffer *buffer,
1647 struct ring_buffer_event *event,
1648 unsigned long flags, int pc)
1649 {
1650 __trace_buffer_unlock_commit(buffer, event, flags, pc);
1651 }
1652 EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
1653
1654 static struct ring_buffer *temp_buffer;
1655
1656 struct ring_buffer_event *
1657 trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
1658 struct ftrace_event_file *ftrace_file,
1659 int type, unsigned long len,
1660 unsigned long flags, int pc)
1661 {
1662 struct ring_buffer_event *entry;
1663
1664 *current_rb = ftrace_file->tr->trace_buffer.buffer;
1665 entry = trace_buffer_lock_reserve(*current_rb,
1666 type, len, flags, pc);
1667 /*
1668 * If tracing is off, but we have triggers enabled
1669 * we still need to look at the event data. Use the temp_buffer
1670 * to store the trace event for the tigger to use. It's recusive
1671 * safe and will not be recorded anywhere.
1672 */
1673 if (!entry && ftrace_file->flags & FTRACE_EVENT_FL_TRIGGER_COND) {
1674 *current_rb = temp_buffer;
1675 entry = trace_buffer_lock_reserve(*current_rb,
1676 type, len, flags, pc);
1677 }
1678 return entry;
1679 }
1680 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
1681
1682 struct ring_buffer_event *
1683 trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1684 int type, unsigned long len,
1685 unsigned long flags, int pc)
1686 {
1687 *current_rb = global_trace.trace_buffer.buffer;
1688 return trace_buffer_lock_reserve(*current_rb,
1689 type, len, flags, pc);
1690 }
1691 EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
1692
1693 void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
1694 struct ring_buffer_event *event,
1695 unsigned long flags, int pc)
1696 {
1697 __trace_buffer_unlock_commit(buffer, event, flags, pc);
1698 }
1699 EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit);
1700
1701 void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer,
1702 struct ring_buffer_event *event,
1703 unsigned long flags, int pc,
1704 struct pt_regs *regs)
1705 {
1706 __buffer_unlock_commit(buffer, event);
1707
1708 ftrace_trace_stack_regs(buffer, flags, 0, pc, regs);
1709 ftrace_trace_userstack(buffer, flags, pc);
1710 }
1711 EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
1712
1713 void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1714 struct ring_buffer_event *event)
1715 {
1716 ring_buffer_discard_commit(buffer, event);
1717 }
1718 EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
1719
1720 void
1721 trace_function(struct trace_array *tr,
1722 unsigned long ip, unsigned long parent_ip, unsigned long flags,
1723 int pc)
1724 {
1725 struct ftrace_event_call *call = &event_function;
1726 struct ring_buffer *buffer = tr->trace_buffer.buffer;
1727 struct ring_buffer_event *event;
1728 struct ftrace_entry *entry;
1729
1730 /* If we are reading the ring buffer, don't trace */
1731 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
1732 return;
1733
1734 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
1735 flags, pc);
1736 if (!event)
1737 return;
1738 entry = ring_buffer_event_data(event);
1739 entry->ip = ip;
1740 entry->parent_ip = parent_ip;
1741
1742 if (!call_filter_check_discard(call, entry, buffer, event))
1743 __buffer_unlock_commit(buffer, event);
1744 }
1745
1746 #ifdef CONFIG_STACKTRACE
1747
1748 #define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1749 struct ftrace_stack {
1750 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
1751 };
1752
1753 static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
1754 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
1755
1756 static void __ftrace_trace_stack(struct ring_buffer *buffer,
1757 unsigned long flags,
1758 int skip, int pc, struct pt_regs *regs)
1759 {
1760 struct ftrace_event_call *call = &event_kernel_stack;
1761 struct ring_buffer_event *event;
1762 struct stack_entry *entry;
1763 struct stack_trace trace;
1764 int use_stack;
1765 int size = FTRACE_STACK_ENTRIES;
1766
1767 trace.nr_entries = 0;
1768 trace.skip = skip;
1769
1770 /*
1771 * Since events can happen in NMIs there's no safe way to
1772 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1773 * or NMI comes in, it will just have to use the default
1774 * FTRACE_STACK_SIZE.
1775 */
1776 preempt_disable_notrace();
1777
1778 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
1779 /*
1780 * We don't need any atomic variables, just a barrier.
1781 * If an interrupt comes in, we don't care, because it would
1782 * have exited and put the counter back to what we want.
1783 * We just need a barrier to keep gcc from moving things
1784 * around.
1785 */
1786 barrier();
1787 if (use_stack == 1) {
1788 trace.entries = this_cpu_ptr(ftrace_stack.calls);
1789 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
1790
1791 if (regs)
1792 save_stack_trace_regs(regs, &trace);
1793 else
1794 save_stack_trace(&trace);
1795
1796 if (trace.nr_entries > size)
1797 size = trace.nr_entries;
1798 } else
1799 /* From now on, use_stack is a boolean */
1800 use_stack = 0;
1801
1802 size *= sizeof(unsigned long);
1803
1804 event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
1805 sizeof(*entry) + size, flags, pc);
1806 if (!event)
1807 goto out;
1808 entry = ring_buffer_event_data(event);
1809
1810 memset(&entry->caller, 0, size);
1811
1812 if (use_stack)
1813 memcpy(&entry->caller, trace.entries,
1814 trace.nr_entries * sizeof(unsigned long));
1815 else {
1816 trace.max_entries = FTRACE_STACK_ENTRIES;
1817 trace.entries = entry->caller;
1818 if (regs)
1819 save_stack_trace_regs(regs, &trace);
1820 else
1821 save_stack_trace(&trace);
1822 }
1823
1824 entry->size = trace.nr_entries;
1825
1826 if (!call_filter_check_discard(call, entry, buffer, event))
1827 __buffer_unlock_commit(buffer, event);
1828
1829 out:
1830 /* Again, don't let gcc optimize things here */
1831 barrier();
1832 __this_cpu_dec(ftrace_stack_reserve);
1833 preempt_enable_notrace();
1834
1835 }
1836
1837 void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
1838 int skip, int pc, struct pt_regs *regs)
1839 {
1840 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1841 return;
1842
1843 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
1844 }
1845
1846 void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
1847 int skip, int pc)
1848 {
1849 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1850 return;
1851
1852 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
1853 }
1854
1855 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1856 int pc)
1857 {
1858 __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
1859 }
1860
1861 /**
1862 * trace_dump_stack - record a stack back trace in the trace buffer
1863 * @skip: Number of functions to skip (helper handlers)
1864 */
1865 void trace_dump_stack(int skip)
1866 {
1867 unsigned long flags;
1868
1869 if (tracing_disabled || tracing_selftest_running)
1870 return;
1871
1872 local_save_flags(flags);
1873
1874 /*
1875 * Skip 3 more, seems to get us at the caller of
1876 * this function.
1877 */
1878 skip += 3;
1879 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
1880 flags, skip, preempt_count(), NULL);
1881 }
1882
1883 static DEFINE_PER_CPU(int, user_stack_count);
1884
1885 void
1886 ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
1887 {
1888 struct ftrace_event_call *call = &event_user_stack;
1889 struct ring_buffer_event *event;
1890 struct userstack_entry *entry;
1891 struct stack_trace trace;
1892
1893 if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
1894 return;
1895
1896 /*
1897 * NMIs can not handle page faults, even with fix ups.
1898 * The save user stack can (and often does) fault.
1899 */
1900 if (unlikely(in_nmi()))
1901 return;
1902
1903 /*
1904 * prevent recursion, since the user stack tracing may
1905 * trigger other kernel events.
1906 */
1907 preempt_disable();
1908 if (__this_cpu_read(user_stack_count))
1909 goto out;
1910
1911 __this_cpu_inc(user_stack_count);
1912
1913 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
1914 sizeof(*entry), flags, pc);
1915 if (!event)
1916 goto out_drop_count;
1917 entry = ring_buffer_event_data(event);
1918
1919 entry->tgid = current->tgid;
1920 memset(&entry->caller, 0, sizeof(entry->caller));
1921
1922 trace.nr_entries = 0;
1923 trace.max_entries = FTRACE_STACK_ENTRIES;
1924 trace.skip = 0;
1925 trace.entries = entry->caller;
1926
1927 save_stack_trace_user(&trace);
1928 if (!call_filter_check_discard(call, entry, buffer, event))
1929 __buffer_unlock_commit(buffer, event);
1930
1931 out_drop_count:
1932 __this_cpu_dec(user_stack_count);
1933 out:
1934 preempt_enable();
1935 }
1936
1937 #ifdef UNUSED
1938 static void __trace_userstack(struct trace_array *tr, unsigned long flags)
1939 {
1940 ftrace_trace_userstack(tr, flags, preempt_count());
1941 }
1942 #endif /* UNUSED */
1943
1944 #endif /* CONFIG_STACKTRACE */
1945
1946 /* created for use with alloc_percpu */
1947 struct trace_buffer_struct {
1948 char buffer[TRACE_BUF_SIZE];
1949 };
1950
1951 static struct trace_buffer_struct *trace_percpu_buffer;
1952 static struct trace_buffer_struct *trace_percpu_sirq_buffer;
1953 static struct trace_buffer_struct *trace_percpu_irq_buffer;
1954 static struct trace_buffer_struct *trace_percpu_nmi_buffer;
1955
1956 /*
1957 * The buffer used is dependent on the context. There is a per cpu
1958 * buffer for normal context, softirq contex, hard irq context and
1959 * for NMI context. Thise allows for lockless recording.
1960 *
1961 * Note, if the buffers failed to be allocated, then this returns NULL
1962 */
1963 static char *get_trace_buf(void)
1964 {
1965 struct trace_buffer_struct *percpu_buffer;
1966
1967 /*
1968 * If we have allocated per cpu buffers, then we do not
1969 * need to do any locking.
1970 */
1971 if (in_nmi())
1972 percpu_buffer = trace_percpu_nmi_buffer;
1973 else if (in_irq())
1974 percpu_buffer = trace_percpu_irq_buffer;
1975 else if (in_softirq())
1976 percpu_buffer = trace_percpu_sirq_buffer;
1977 else
1978 percpu_buffer = trace_percpu_buffer;
1979
1980 if (!percpu_buffer)
1981 return NULL;
1982
1983 return this_cpu_ptr(&percpu_buffer->buffer[0]);
1984 }
1985
1986 static int alloc_percpu_trace_buffer(void)
1987 {
1988 struct trace_buffer_struct *buffers;
1989 struct trace_buffer_struct *sirq_buffers;
1990 struct trace_buffer_struct *irq_buffers;
1991 struct trace_buffer_struct *nmi_buffers;
1992
1993 buffers = alloc_percpu(struct trace_buffer_struct);
1994 if (!buffers)
1995 goto err_warn;
1996
1997 sirq_buffers = alloc_percpu(struct trace_buffer_struct);
1998 if (!sirq_buffers)
1999 goto err_sirq;
2000
2001 irq_buffers = alloc_percpu(struct trace_buffer_struct);
2002 if (!irq_buffers)
2003 goto err_irq;
2004
2005 nmi_buffers = alloc_percpu(struct trace_buffer_struct);
2006 if (!nmi_buffers)
2007 goto err_nmi;
2008
2009 trace_percpu_buffer = buffers;
2010 trace_percpu_sirq_buffer = sirq_buffers;
2011 trace_percpu_irq_buffer = irq_buffers;
2012 trace_percpu_nmi_buffer = nmi_buffers;
2013
2014 return 0;
2015
2016 err_nmi:
2017 free_percpu(irq_buffers);
2018 err_irq:
2019 free_percpu(sirq_buffers);
2020 err_sirq:
2021 free_percpu(buffers);
2022 err_warn:
2023 WARN(1, "Could not allocate percpu trace_printk buffer");
2024 return -ENOMEM;
2025 }
2026
2027 static int buffers_allocated;
2028
2029 void trace_printk_init_buffers(void)
2030 {
2031 if (buffers_allocated)
2032 return;
2033
2034 if (alloc_percpu_trace_buffer())
2035 return;
2036
2037 /* trace_printk() is for debug use only. Don't use it in production. */
2038
2039 pr_warning("\n**********************************************************\n");
2040 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2041 pr_warning("** **\n");
2042 pr_warning("** trace_printk() being used. Allocating extra memory. **\n");
2043 pr_warning("** **\n");
2044 pr_warning("** This means that this is a DEBUG kernel and it is **\n");
2045 pr_warning("** unsafe for production use. **\n");
2046 pr_warning("** **\n");
2047 pr_warning("** If you see this message and you are not debugging **\n");
2048 pr_warning("** the kernel, report this immediately to your vendor! **\n");
2049 pr_warning("** **\n");
2050 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2051 pr_warning("**********************************************************\n");
2052
2053 /* Expand the buffers to set size */
2054 tracing_update_buffers();
2055
2056 buffers_allocated = 1;
2057
2058 /*
2059 * trace_printk_init_buffers() can be called by modules.
2060 * If that happens, then we need to start cmdline recording
2061 * directly here. If the global_trace.buffer is already
2062 * allocated here, then this was called by module code.
2063 */
2064 if (global_trace.trace_buffer.buffer)
2065 tracing_start_cmdline_record();
2066 }
2067
2068 void trace_printk_start_comm(void)
2069 {
2070 /* Start tracing comms if trace printk is set */
2071 if (!buffers_allocated)
2072 return;
2073 tracing_start_cmdline_record();
2074 }
2075
2076 static void trace_printk_start_stop_comm(int enabled)
2077 {
2078 if (!buffers_allocated)
2079 return;
2080
2081 if (enabled)
2082 tracing_start_cmdline_record();
2083 else
2084 tracing_stop_cmdline_record();
2085 }
2086
2087 /**
2088 * trace_vbprintk - write binary msg to tracing buffer
2089 *
2090 */
2091 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
2092 {
2093 struct ftrace_event_call *call = &event_bprint;
2094 struct ring_buffer_event *event;
2095 struct ring_buffer *buffer;
2096 struct trace_array *tr = &global_trace;
2097 struct bprint_entry *entry;
2098 unsigned long flags;
2099 char *tbuffer;
2100 int len = 0, size, pc;
2101
2102 if (unlikely(tracing_selftest_running || tracing_disabled))
2103 return 0;
2104
2105 /* Don't pollute graph traces with trace_vprintk internals */
2106 pause_graph_tracing();
2107
2108 pc = preempt_count();
2109 preempt_disable_notrace();
2110
2111 tbuffer = get_trace_buf();
2112 if (!tbuffer) {
2113 len = 0;
2114 goto out;
2115 }
2116
2117 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
2118
2119 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
2120 goto out;
2121
2122 local_save_flags(flags);
2123 size = sizeof(*entry) + sizeof(u32) * len;
2124 buffer = tr->trace_buffer.buffer;
2125 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2126 flags, pc);
2127 if (!event)
2128 goto out;
2129 entry = ring_buffer_event_data(event);
2130 entry->ip = ip;
2131 entry->fmt = fmt;
2132
2133 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
2134 if (!call_filter_check_discard(call, entry, buffer, event)) {
2135 __buffer_unlock_commit(buffer, event);
2136 ftrace_trace_stack(buffer, flags, 6, pc);
2137 }
2138
2139 out:
2140 preempt_enable_notrace();
2141 unpause_graph_tracing();
2142
2143 return len;
2144 }
2145 EXPORT_SYMBOL_GPL(trace_vbprintk);
2146
2147 static int
2148 __trace_array_vprintk(struct ring_buffer *buffer,
2149 unsigned long ip, const char *fmt, va_list args)
2150 {
2151 struct ftrace_event_call *call = &event_print;
2152 struct ring_buffer_event *event;
2153 int len = 0, size, pc;
2154 struct print_entry *entry;
2155 unsigned long flags;
2156 char *tbuffer;
2157
2158 if (tracing_disabled || tracing_selftest_running)
2159 return 0;
2160
2161 /* Don't pollute graph traces with trace_vprintk internals */
2162 pause_graph_tracing();
2163
2164 pc = preempt_count();
2165 preempt_disable_notrace();
2166
2167
2168 tbuffer = get_trace_buf();
2169 if (!tbuffer) {
2170 len = 0;
2171 goto out;
2172 }
2173
2174 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
2175
2176 local_save_flags(flags);
2177 size = sizeof(*entry) + len + 1;
2178 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
2179 flags, pc);
2180 if (!event)
2181 goto out;
2182 entry = ring_buffer_event_data(event);
2183 entry->ip = ip;
2184
2185 memcpy(&entry->buf, tbuffer, len + 1);
2186 if (!call_filter_check_discard(call, entry, buffer, event)) {
2187 __buffer_unlock_commit(buffer, event);
2188 ftrace_trace_stack(buffer, flags, 6, pc);
2189 }
2190 out:
2191 preempt_enable_notrace();
2192 unpause_graph_tracing();
2193
2194 return len;
2195 }
2196
2197 int trace_array_vprintk(struct trace_array *tr,
2198 unsigned long ip, const char *fmt, va_list args)
2199 {
2200 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2201 }
2202
2203 int trace_array_printk(struct trace_array *tr,
2204 unsigned long ip, const char *fmt, ...)
2205 {
2206 int ret;
2207 va_list ap;
2208
2209 if (!(trace_flags & TRACE_ITER_PRINTK))
2210 return 0;
2211
2212 va_start(ap, fmt);
2213 ret = trace_array_vprintk(tr, ip, fmt, ap);
2214 va_end(ap);
2215 return ret;
2216 }
2217
2218 int trace_array_printk_buf(struct ring_buffer *buffer,
2219 unsigned long ip, const char *fmt, ...)
2220 {
2221 int ret;
2222 va_list ap;
2223
2224 if (!(trace_flags & TRACE_ITER_PRINTK))
2225 return 0;
2226
2227 va_start(ap, fmt);
2228 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2229 va_end(ap);
2230 return ret;
2231 }
2232
2233 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2234 {
2235 return trace_array_vprintk(&global_trace, ip, fmt, args);
2236 }
2237 EXPORT_SYMBOL_GPL(trace_vprintk);
2238
2239 static void trace_iterator_increment(struct trace_iterator *iter)
2240 {
2241 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2242
2243 iter->idx++;
2244 if (buf_iter)
2245 ring_buffer_read(buf_iter, NULL);
2246 }
2247
2248 static struct trace_entry *
2249 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2250 unsigned long *lost_events)
2251 {
2252 struct ring_buffer_event *event;
2253 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
2254
2255 if (buf_iter)
2256 event = ring_buffer_iter_peek(buf_iter, ts);
2257 else
2258 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
2259 lost_events);
2260
2261 if (event) {
2262 iter->ent_size = ring_buffer_event_length(event);
2263 return ring_buffer_event_data(event);
2264 }
2265 iter->ent_size = 0;
2266 return NULL;
2267 }
2268
2269 static struct trace_entry *
2270 __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2271 unsigned long *missing_events, u64 *ent_ts)
2272 {
2273 struct ring_buffer *buffer = iter->trace_buffer->buffer;
2274 struct trace_entry *ent, *next = NULL;
2275 unsigned long lost_events = 0, next_lost = 0;
2276 int cpu_file = iter->cpu_file;
2277 u64 next_ts = 0, ts;
2278 int next_cpu = -1;
2279 int next_size = 0;
2280 int cpu;
2281
2282 /*
2283 * If we are in a per_cpu trace file, don't bother by iterating over
2284 * all cpu and peek directly.
2285 */
2286 if (cpu_file > RING_BUFFER_ALL_CPUS) {
2287 if (ring_buffer_empty_cpu(buffer, cpu_file))
2288 return NULL;
2289 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
2290 if (ent_cpu)
2291 *ent_cpu = cpu_file;
2292
2293 return ent;
2294 }
2295
2296 for_each_tracing_cpu(cpu) {
2297
2298 if (ring_buffer_empty_cpu(buffer, cpu))
2299 continue;
2300
2301 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
2302
2303 /*
2304 * Pick the entry with the smallest timestamp:
2305 */
2306 if (ent && (!next || ts < next_ts)) {
2307 next = ent;
2308 next_cpu = cpu;
2309 next_ts = ts;
2310 next_lost = lost_events;
2311 next_size = iter->ent_size;
2312 }
2313 }
2314
2315 iter->ent_size = next_size;
2316
2317 if (ent_cpu)
2318 *ent_cpu = next_cpu;
2319
2320 if (ent_ts)
2321 *ent_ts = next_ts;
2322
2323 if (missing_events)
2324 *missing_events = next_lost;
2325
2326 return next;
2327 }
2328
2329 /* Find the next real entry, without updating the iterator itself */
2330 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
2331 int *ent_cpu, u64 *ent_ts)
2332 {
2333 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
2334 }
2335
2336 /* Find the next real entry, and increment the iterator to the next entry */
2337 void *trace_find_next_entry_inc(struct trace_iterator *iter)
2338 {
2339 iter->ent = __find_next_entry(iter, &iter->cpu,
2340 &iter->lost_events, &iter->ts);
2341
2342 if (iter->ent)
2343 trace_iterator_increment(iter);
2344
2345 return iter->ent ? iter : NULL;
2346 }
2347
2348 static void trace_consume(struct trace_iterator *iter)
2349 {
2350 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
2351 &iter->lost_events);
2352 }
2353
2354 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
2355 {
2356 struct trace_iterator *iter = m->private;
2357 int i = (int)*pos;
2358 void *ent;
2359
2360 WARN_ON_ONCE(iter->leftover);
2361
2362 (*pos)++;
2363
2364 /* can't go backwards */
2365 if (iter->idx > i)
2366 return NULL;
2367
2368 if (iter->idx < 0)
2369 ent = trace_find_next_entry_inc(iter);
2370 else
2371 ent = iter;
2372
2373 while (ent && iter->idx < i)
2374 ent = trace_find_next_entry_inc(iter);
2375
2376 iter->pos = *pos;
2377
2378 return ent;
2379 }
2380
2381 void tracing_iter_reset(struct trace_iterator *iter, int cpu)
2382 {
2383 struct ring_buffer_event *event;
2384 struct ring_buffer_iter *buf_iter;
2385 unsigned long entries = 0;
2386 u64 ts;
2387
2388 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
2389
2390 buf_iter = trace_buffer_iter(iter, cpu);
2391 if (!buf_iter)
2392 return;
2393
2394 ring_buffer_iter_reset(buf_iter);
2395
2396 /*
2397 * We could have the case with the max latency tracers
2398 * that a reset never took place on a cpu. This is evident
2399 * by the timestamp being before the start of the buffer.
2400 */
2401 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
2402 if (ts >= iter->trace_buffer->time_start)
2403 break;
2404 entries++;
2405 ring_buffer_read(buf_iter, NULL);
2406 }
2407
2408 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
2409 }
2410
2411 /*
2412 * The current tracer is copied to avoid a global locking
2413 * all around.
2414 */
2415 static void *s_start(struct seq_file *m, loff_t *pos)
2416 {
2417 struct trace_iterator *iter = m->private;
2418 struct trace_array *tr = iter->tr;
2419 int cpu_file = iter->cpu_file;
2420 void *p = NULL;
2421 loff_t l = 0;
2422 int cpu;
2423
2424 /*
2425 * copy the tracer to avoid using a global lock all around.
2426 * iter->trace is a copy of current_trace, the pointer to the
2427 * name may be used instead of a strcmp(), as iter->trace->name
2428 * will point to the same string as current_trace->name.
2429 */
2430 mutex_lock(&trace_types_lock);
2431 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
2432 *iter->trace = *tr->current_trace;
2433 mutex_unlock(&trace_types_lock);
2434
2435 #ifdef CONFIG_TRACER_MAX_TRACE
2436 if (iter->snapshot && iter->trace->use_max_tr)
2437 return ERR_PTR(-EBUSY);
2438 #endif
2439
2440 if (!iter->snapshot)
2441 atomic_inc(&trace_record_cmdline_disabled);
2442
2443 if (*pos != iter->pos) {
2444 iter->ent = NULL;
2445 iter->cpu = 0;
2446 iter->idx = -1;
2447
2448 if (cpu_file == RING_BUFFER_ALL_CPUS) {
2449 for_each_tracing_cpu(cpu)
2450 tracing_iter_reset(iter, cpu);
2451 } else
2452 tracing_iter_reset(iter, cpu_file);
2453
2454 iter->leftover = 0;
2455 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
2456 ;
2457
2458 } else {
2459 /*
2460 * If we overflowed the seq_file before, then we want
2461 * to just reuse the trace_seq buffer again.
2462 */
2463 if (iter->leftover)
2464 p = iter;
2465 else {
2466 l = *pos - 1;
2467 p = s_next(m, p, &l);
2468 }
2469 }
2470
2471 trace_event_read_lock();
2472 trace_access_lock(cpu_file);
2473 return p;
2474 }
2475
2476 static void s_stop(struct seq_file *m, void *p)
2477 {
2478 struct trace_iterator *iter = m->private;
2479
2480 #ifdef CONFIG_TRACER_MAX_TRACE
2481 if (iter->snapshot && iter->trace->use_max_tr)
2482 return;
2483 #endif
2484
2485 if (!iter->snapshot)
2486 atomic_dec(&trace_record_cmdline_disabled);
2487
2488 trace_access_unlock(iter->cpu_file);
2489 trace_event_read_unlock();
2490 }
2491
2492 static void
2493 get_total_entries(struct trace_buffer *buf,
2494 unsigned long *total, unsigned long *entries)
2495 {
2496 unsigned long count;
2497 int cpu;
2498
2499 *total = 0;
2500 *entries = 0;
2501
2502 for_each_tracing_cpu(cpu) {
2503 count = ring_buffer_entries_cpu(buf->buffer, cpu);
2504 /*
2505 * If this buffer has skipped entries, then we hold all
2506 * entries for the trace and we need to ignore the
2507 * ones before the time stamp.
2508 */
2509 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
2510 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
2511 /* total is the same as the entries */
2512 *total += count;
2513 } else
2514 *total += count +
2515 ring_buffer_overrun_cpu(buf->buffer, cpu);
2516 *entries += count;
2517 }
2518 }
2519
2520 static void print_lat_help_header(struct seq_file *m)
2521 {
2522 seq_puts(m, "# _------=> CPU# \n"
2523 "# / _-----=> irqs-off \n"
2524 "# | / _----=> need-resched \n"
2525 "# || / _---=> hardirq/softirq \n"
2526 "# ||| / _--=> preempt-depth \n"
2527 "# |||| / delay \n"
2528 "# cmd pid ||||| time | caller \n"
2529 "# \\ / ||||| \\ | / \n");
2530 }
2531
2532 static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
2533 {
2534 unsigned long total;
2535 unsigned long entries;
2536
2537 get_total_entries(buf, &total, &entries);
2538 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
2539 entries, total, num_online_cpus());
2540 seq_puts(m, "#\n");
2541 }
2542
2543 static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
2544 {
2545 print_event_info(buf, m);
2546 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"
2547 "# | | | | |\n");
2548 }
2549
2550 static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
2551 {
2552 print_event_info(buf, m);
2553 seq_puts(m, "# _-----=> irqs-off\n"
2554 "# / _----=> need-resched\n"
2555 "# | / _---=> hardirq/softirq\n"
2556 "# || / _--=> preempt-depth\n"
2557 "# ||| / delay\n"
2558 "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n"
2559 "# | | | |||| | |\n");
2560 }
2561
2562 void
2563 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2564 {
2565 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
2566 struct trace_buffer *buf = iter->trace_buffer;
2567 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
2568 struct tracer *type = iter->trace;
2569 unsigned long entries;
2570 unsigned long total;
2571 const char *name = "preemption";
2572
2573 name = type->name;
2574
2575 get_total_entries(buf, &total, &entries);
2576
2577 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
2578 name, UTS_RELEASE);
2579 seq_puts(m, "# -----------------------------------"
2580 "---------------------------------\n");
2581 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
2582 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
2583 nsecs_to_usecs(data->saved_latency),
2584 entries,
2585 total,
2586 buf->cpu,
2587 #if defined(CONFIG_PREEMPT_NONE)
2588 "server",
2589 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
2590 "desktop",
2591 #elif defined(CONFIG_PREEMPT)
2592 "preempt",
2593 #else
2594 "unknown",
2595 #endif
2596 /* These are reserved for later use */
2597 0, 0, 0, 0);
2598 #ifdef CONFIG_SMP
2599 seq_printf(m, " #P:%d)\n", num_online_cpus());
2600 #else
2601 seq_puts(m, ")\n");
2602 #endif
2603 seq_puts(m, "# -----------------\n");
2604 seq_printf(m, "# | task: %.16s-%d "
2605 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
2606 data->comm, data->pid,
2607 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
2608 data->policy, data->rt_priority);
2609 seq_puts(m, "# -----------------\n");
2610
2611 if (data->critical_start) {
2612 seq_puts(m, "# => started at: ");
2613 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
2614 trace_print_seq(m, &iter->seq);
2615 seq_puts(m, "\n# => ended at: ");
2616 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
2617 trace_print_seq(m, &iter->seq);
2618 seq_puts(m, "\n#\n");
2619 }
2620
2621 seq_puts(m, "#\n");
2622 }
2623
2624 static void test_cpu_buff_start(struct trace_iterator *iter)
2625 {
2626 struct trace_seq *s = &iter->seq;
2627
2628 if (!(trace_flags & TRACE_ITER_ANNOTATE))
2629 return;
2630
2631 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
2632 return;
2633
2634 if (cpumask_test_cpu(iter->cpu, iter->started))
2635 return;
2636
2637 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
2638 return;
2639
2640 cpumask_set_cpu(iter->cpu, iter->started);
2641
2642 /* Don't print started cpu buffer for the first entry of the trace */
2643 if (iter->idx > 1)
2644 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
2645 iter->cpu);
2646 }
2647
2648 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
2649 {
2650 struct trace_seq *s = &iter->seq;
2651 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
2652 struct trace_entry *entry;
2653 struct trace_event *event;
2654
2655 entry = iter->ent;
2656
2657 test_cpu_buff_start(iter);
2658
2659 event = ftrace_find_event(entry->type);
2660
2661 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2662 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2663 trace_print_lat_context(iter);
2664 else
2665 trace_print_context(iter);
2666 }
2667
2668 if (trace_seq_has_overflowed(s))
2669 return TRACE_TYPE_PARTIAL_LINE;
2670
2671 if (event)
2672 return event->funcs->trace(iter, sym_flags, event);
2673
2674 trace_seq_printf(s, "Unknown type %d\n", entry->type);
2675
2676 return trace_handle_return(s);
2677 }
2678
2679 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
2680 {
2681 struct trace_seq *s = &iter->seq;
2682 struct trace_entry *entry;
2683 struct trace_event *event;
2684
2685 entry = iter->ent;
2686
2687 if (trace_flags & TRACE_ITER_CONTEXT_INFO)
2688 trace_seq_printf(s, "%d %d %llu ",
2689 entry->pid, iter->cpu, iter->ts);
2690
2691 if (trace_seq_has_overflowed(s))
2692 return TRACE_TYPE_PARTIAL_LINE;
2693
2694 event = ftrace_find_event(entry->type);
2695 if (event)
2696 return event->funcs->raw(iter, 0, event);
2697
2698 trace_seq_printf(s, "%d ?\n", entry->type);
2699
2700 return trace_handle_return(s);
2701 }
2702
2703 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
2704 {
2705 struct trace_seq *s = &iter->seq;
2706 unsigned char newline = '\n';
2707 struct trace_entry *entry;
2708 struct trace_event *event;
2709
2710 entry = iter->ent;
2711
2712 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2713 SEQ_PUT_HEX_FIELD(s, entry->pid);
2714 SEQ_PUT_HEX_FIELD(s, iter->cpu);
2715 SEQ_PUT_HEX_FIELD(s, iter->ts);
2716 if (trace_seq_has_overflowed(s))
2717 return TRACE_TYPE_PARTIAL_LINE;
2718 }
2719
2720 event = ftrace_find_event(entry->type);
2721 if (event) {
2722 enum print_line_t ret = event->funcs->hex(iter, 0, event);
2723 if (ret != TRACE_TYPE_HANDLED)
2724 return ret;
2725 }
2726
2727 SEQ_PUT_FIELD(s, newline);
2728
2729 return trace_handle_return(s);
2730 }
2731
2732 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
2733 {
2734 struct trace_seq *s = &iter->seq;
2735 struct trace_entry *entry;
2736 struct trace_event *event;
2737
2738 entry = iter->ent;
2739
2740 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2741 SEQ_PUT_FIELD(s, entry->pid);
2742 SEQ_PUT_FIELD(s, iter->cpu);
2743 SEQ_PUT_FIELD(s, iter->ts);
2744 if (trace_seq_has_overflowed(s))
2745 return TRACE_TYPE_PARTIAL_LINE;
2746 }
2747
2748 event = ftrace_find_event(entry->type);
2749 return event ? event->funcs->binary(iter, 0, event) :
2750 TRACE_TYPE_HANDLED;
2751 }
2752
2753 int trace_empty(struct trace_iterator *iter)
2754 {
2755 struct ring_buffer_iter *buf_iter;
2756 int cpu;
2757
2758 /* If we are looking at one CPU buffer, only check that one */
2759 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
2760 cpu = iter->cpu_file;
2761 buf_iter = trace_buffer_iter(iter, cpu);
2762 if (buf_iter) {
2763 if (!ring_buffer_iter_empty(buf_iter))
2764 return 0;
2765 } else {
2766 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
2767 return 0;
2768 }
2769 return 1;
2770 }
2771
2772 for_each_tracing_cpu(cpu) {
2773 buf_iter = trace_buffer_iter(iter, cpu);
2774 if (buf_iter) {
2775 if (!ring_buffer_iter_empty(buf_iter))
2776 return 0;
2777 } else {
2778 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
2779 return 0;
2780 }
2781 }
2782
2783 return 1;
2784 }
2785
2786 /* Called with trace_event_read_lock() held. */
2787 enum print_line_t print_trace_line(struct trace_iterator *iter)
2788 {
2789 enum print_line_t ret;
2790
2791 if (iter->lost_events) {
2792 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2793 iter->cpu, iter->lost_events);
2794 if (trace_seq_has_overflowed(&iter->seq))
2795 return TRACE_TYPE_PARTIAL_LINE;
2796 }
2797
2798 if (iter->trace && iter->trace->print_line) {
2799 ret = iter->trace->print_line(iter);
2800 if (ret != TRACE_TYPE_UNHANDLED)
2801 return ret;
2802 }
2803
2804 if (iter->ent->type == TRACE_BPUTS &&
2805 trace_flags & TRACE_ITER_PRINTK &&
2806 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2807 return trace_print_bputs_msg_only(iter);
2808
2809 if (iter->ent->type == TRACE_BPRINT &&
2810 trace_flags & TRACE_ITER_PRINTK &&
2811 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2812 return trace_print_bprintk_msg_only(iter);
2813
2814 if (iter->ent->type == TRACE_PRINT &&
2815 trace_flags & TRACE_ITER_PRINTK &&
2816 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2817 return trace_print_printk_msg_only(iter);
2818
2819 if (trace_flags & TRACE_ITER_BIN)
2820 return print_bin_fmt(iter);
2821
2822 if (trace_flags & TRACE_ITER_HEX)
2823 return print_hex_fmt(iter);
2824
2825 if (trace_flags & TRACE_ITER_RAW)
2826 return print_raw_fmt(iter);
2827
2828 return print_trace_fmt(iter);
2829 }
2830
2831 void trace_latency_header(struct seq_file *m)
2832 {
2833 struct trace_iterator *iter = m->private;
2834
2835 /* print nothing if the buffers are empty */
2836 if (trace_empty(iter))
2837 return;
2838
2839 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2840 print_trace_header(m, iter);
2841
2842 if (!(trace_flags & TRACE_ITER_VERBOSE))
2843 print_lat_help_header(m);
2844 }
2845
2846 void trace_default_header(struct seq_file *m)
2847 {
2848 struct trace_iterator *iter = m->private;
2849
2850 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
2851 return;
2852
2853 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2854 /* print nothing if the buffers are empty */
2855 if (trace_empty(iter))
2856 return;
2857 print_trace_header(m, iter);
2858 if (!(trace_flags & TRACE_ITER_VERBOSE))
2859 print_lat_help_header(m);
2860 } else {
2861 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
2862 if (trace_flags & TRACE_ITER_IRQ_INFO)
2863 print_func_help_header_irq(iter->trace_buffer, m);
2864 else
2865 print_func_help_header(iter->trace_buffer, m);
2866 }
2867 }
2868 }
2869
2870 static void test_ftrace_alive(struct seq_file *m)
2871 {
2872 if (!ftrace_is_dead())
2873 return;
2874 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
2875 "# MAY BE MISSING FUNCTION EVENTS\n");
2876 }
2877
2878 #ifdef CONFIG_TRACER_MAX_TRACE
2879 static void show_snapshot_main_help(struct seq_file *m)
2880 {
2881 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
2882 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2883 "# Takes a snapshot of the main buffer.\n"
2884 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
2885 "# (Doesn't have to be '2' works with any number that\n"
2886 "# is not a '0' or '1')\n");
2887 }
2888
2889 static void show_snapshot_percpu_help(struct seq_file *m)
2890 {
2891 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
2892 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2893 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2894 "# Takes a snapshot of the main buffer for this cpu.\n");
2895 #else
2896 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
2897 "# Must use main snapshot file to allocate.\n");
2898 #endif
2899 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
2900 "# (Doesn't have to be '2' works with any number that\n"
2901 "# is not a '0' or '1')\n");
2902 }
2903
2904 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
2905 {
2906 if (iter->tr->allocated_snapshot)
2907 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
2908 else
2909 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
2910
2911 seq_puts(m, "# Snapshot commands:\n");
2912 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
2913 show_snapshot_main_help(m);
2914 else
2915 show_snapshot_percpu_help(m);
2916 }
2917 #else
2918 /* Should never be called */
2919 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
2920 #endif
2921
2922 static int s_show(struct seq_file *m, void *v)
2923 {
2924 struct trace_iterator *iter = v;
2925 int ret;
2926
2927 if (iter->ent == NULL) {
2928 if (iter->tr) {
2929 seq_printf(m, "# tracer: %s\n", iter->trace->name);
2930 seq_puts(m, "#\n");
2931 test_ftrace_alive(m);
2932 }
2933 if (iter->snapshot && trace_empty(iter))
2934 print_snapshot_help(m, iter);
2935 else if (iter->trace && iter->trace->print_header)
2936 iter->trace->print_header(m);
2937 else
2938 trace_default_header(m);
2939
2940 } else if (iter->leftover) {
2941 /*
2942 * If we filled the seq_file buffer earlier, we
2943 * want to just show it now.
2944 */
2945 ret = trace_print_seq(m, &iter->seq);
2946
2947 /* ret should this time be zero, but you never know */
2948 iter->leftover = ret;
2949
2950 } else {
2951 print_trace_line(iter);
2952 ret = trace_print_seq(m, &iter->seq);
2953 /*
2954 * If we overflow the seq_file buffer, then it will
2955 * ask us for this data again at start up.
2956 * Use that instead.
2957 * ret is 0 if seq_file write succeeded.
2958 * -1 otherwise.
2959 */
2960 iter->leftover = ret;
2961 }
2962
2963 return 0;
2964 }
2965
2966 /*
2967 * Should be used after trace_array_get(), trace_types_lock
2968 * ensures that i_cdev was already initialized.
2969 */
2970 static inline int tracing_get_cpu(struct inode *inode)
2971 {
2972 if (inode->i_cdev) /* See trace_create_cpu_file() */
2973 return (long)inode->i_cdev - 1;
2974 return RING_BUFFER_ALL_CPUS;
2975 }
2976
2977 static const struct seq_operations tracer_seq_ops = {
2978 .start = s_start,
2979 .next = s_next,
2980 .stop = s_stop,
2981 .show = s_show,
2982 };
2983
2984 static struct trace_iterator *
2985 __tracing_open(struct inode *inode, struct file *file, bool snapshot)
2986 {
2987 struct trace_array *tr = inode->i_private;
2988 struct trace_iterator *iter;
2989 int cpu;
2990
2991 if (tracing_disabled)
2992 return ERR_PTR(-ENODEV);
2993
2994 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
2995 if (!iter)
2996 return ERR_PTR(-ENOMEM);
2997
2998 iter->buffer_iter = kzalloc(sizeof(*iter->buffer_iter) * num_possible_cpus(),
2999 GFP_KERNEL);
3000 if (!iter->buffer_iter)
3001 goto release;
3002
3003 /*
3004 * We make a copy of the current tracer to avoid concurrent
3005 * changes on it while we are reading.
3006 */
3007 mutex_lock(&trace_types_lock);
3008 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
3009 if (!iter->trace)
3010 goto fail;
3011
3012 *iter->trace = *tr->current_trace;
3013
3014 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
3015 goto fail;
3016
3017 iter->tr = tr;
3018
3019 #ifdef CONFIG_TRACER_MAX_TRACE
3020 /* Currently only the top directory has a snapshot */
3021 if (tr->current_trace->print_max || snapshot)
3022 iter->trace_buffer = &tr->max_buffer;
3023 else
3024 #endif
3025 iter->trace_buffer = &tr->trace_buffer;
3026 iter->snapshot = snapshot;
3027 iter->pos = -1;
3028 iter->cpu_file = tracing_get_cpu(inode);
3029 mutex_init(&iter->mutex);
3030
3031 /* Notify the tracer early; before we stop tracing. */
3032 if (iter->trace && iter->trace->open)
3033 iter->trace->open(iter);
3034
3035 /* Annotate start of buffers if we had overruns */
3036 if (ring_buffer_overruns(iter->trace_buffer->buffer))
3037 iter->iter_flags |= TRACE_FILE_ANNOTATE;
3038
3039 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
3040 if (trace_clocks[tr->clock_id].in_ns)
3041 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3042
3043 /* stop the trace while dumping if we are not opening "snapshot" */
3044 if (!iter->snapshot)
3045 tracing_stop_tr(tr);
3046
3047 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
3048 for_each_tracing_cpu(cpu) {
3049 iter->buffer_iter[cpu] =
3050 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
3051 }
3052 ring_buffer_read_prepare_sync();
3053 for_each_tracing_cpu(cpu) {
3054 ring_buffer_read_start(iter->buffer_iter[cpu]);
3055 tracing_iter_reset(iter, cpu);
3056 }
3057 } else {
3058 cpu = iter->cpu_file;
3059 iter->buffer_iter[cpu] =
3060 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
3061 ring_buffer_read_prepare_sync();
3062 ring_buffer_read_start(iter->buffer_iter[cpu]);
3063 tracing_iter_reset(iter, cpu);
3064 }
3065
3066 mutex_unlock(&trace_types_lock);
3067
3068 return iter;
3069
3070 fail:
3071 mutex_unlock(&trace_types_lock);
3072 kfree(iter->trace);
3073 kfree(iter->buffer_iter);
3074 release:
3075 seq_release_private(inode, file);
3076 return ERR_PTR(-ENOMEM);
3077 }
3078
3079 int tracing_open_generic(struct inode *inode, struct file *filp)
3080 {
3081 if (tracing_disabled)
3082 return -ENODEV;
3083
3084 filp->private_data = inode->i_private;
3085 return 0;
3086 }
3087
3088 bool tracing_is_disabled(void)
3089 {
3090 return (tracing_disabled) ? true: false;
3091 }
3092
3093 /*
3094 * Open and update trace_array ref count.
3095 * Must have the current trace_array passed to it.
3096 */
3097 static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
3098 {
3099 struct trace_array *tr = inode->i_private;
3100
3101 if (tracing_disabled)
3102 return -ENODEV;
3103
3104 if (trace_array_get(tr) < 0)
3105 return -ENODEV;
3106
3107 filp->private_data = inode->i_private;
3108
3109 return 0;
3110 }
3111
3112 static int tracing_release(struct inode *inode, struct file *file)
3113 {
3114 struct trace_array *tr = inode->i_private;
3115 struct seq_file *m = file->private_data;
3116 struct trace_iterator *iter;
3117 int cpu;
3118
3119 if (!(file->f_mode & FMODE_READ)) {
3120 trace_array_put(tr);
3121 return 0;
3122 }
3123
3124 /* Writes do not use seq_file */
3125 iter = m->private;
3126 mutex_lock(&trace_types_lock);
3127
3128 for_each_tracing_cpu(cpu) {
3129 if (iter->buffer_iter[cpu])
3130 ring_buffer_read_finish(iter->buffer_iter[cpu]);
3131 }
3132
3133 if (iter->trace && iter->trace->close)
3134 iter->trace->close(iter);
3135
3136 if (!iter->snapshot)
3137 /* reenable tracing if it was previously enabled */
3138 tracing_start_tr(tr);
3139
3140 __trace_array_put(tr);
3141
3142 mutex_unlock(&trace_types_lock);
3143
3144 mutex_destroy(&iter->mutex);
3145 free_cpumask_var(iter->started);
3146 kfree(iter->trace);
3147 kfree(iter->buffer_iter);
3148 seq_release_private(inode, file);
3149
3150 return 0;
3151 }
3152
3153 static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3154 {
3155 struct trace_array *tr = inode->i_private;
3156
3157 trace_array_put(tr);
3158 return 0;
3159 }
3160
3161 static int tracing_single_release_tr(struct inode *inode, struct file *file)
3162 {
3163 struct trace_array *tr = inode->i_private;
3164
3165 trace_array_put(tr);
3166
3167 return single_release(inode, file);
3168 }
3169
3170 static int tracing_open(struct inode *inode, struct file *file)
3171 {
3172 struct trace_array *tr = inode->i_private;
3173 struct trace_iterator *iter;
3174 int ret = 0;
3175
3176 if (trace_array_get(tr) < 0)
3177 return -ENODEV;
3178
3179 /* If this file was open for write, then erase contents */
3180 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
3181 int cpu = tracing_get_cpu(inode);
3182
3183 if (cpu == RING_BUFFER_ALL_CPUS)
3184 tracing_reset_online_cpus(&tr->trace_buffer);
3185 else
3186 tracing_reset(&tr->trace_buffer, cpu);
3187 }
3188
3189 if (file->f_mode & FMODE_READ) {
3190 iter = __tracing_open(inode, file, false);
3191 if (IS_ERR(iter))
3192 ret = PTR_ERR(iter);
3193 else if (trace_flags & TRACE_ITER_LATENCY_FMT)
3194 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3195 }
3196
3197 if (ret < 0)
3198 trace_array_put(tr);
3199
3200 return ret;
3201 }
3202
3203 /*
3204 * Some tracers are not suitable for instance buffers.
3205 * A tracer is always available for the global array (toplevel)
3206 * or if it explicitly states that it is.
3207 */
3208 static bool
3209 trace_ok_for_array(struct tracer *t, struct trace_array *tr)
3210 {
3211 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
3212 }
3213
3214 /* Find the next tracer that this trace array may use */
3215 static struct tracer *
3216 get_tracer_for_array(struct trace_array *tr, struct tracer *t)
3217 {
3218 while (t && !trace_ok_for_array(t, tr))
3219 t = t->next;
3220
3221 return t;
3222 }
3223
3224 static void *
3225 t_next(struct seq_file *m, void *v, loff_t *pos)
3226 {
3227 struct trace_array *tr = m->private;
3228 struct tracer *t = v;
3229
3230 (*pos)++;
3231
3232 if (t)
3233 t = get_tracer_for_array(tr, t->next);
3234
3235 return t;
3236 }
3237
3238 static void *t_start(struct seq_file *m, loff_t *pos)
3239 {
3240 struct trace_array *tr = m->private;
3241 struct tracer *t;
3242 loff_t l = 0;
3243
3244 mutex_lock(&trace_types_lock);
3245
3246 t = get_tracer_for_array(tr, trace_types);
3247 for (; t && l < *pos; t = t_next(m, t, &l))
3248 ;
3249
3250 return t;
3251 }
3252
3253 static void t_stop(struct seq_file *m, void *p)
3254 {
3255 mutex_unlock(&trace_types_lock);
3256 }
3257
3258 static int t_show(struct seq_file *m, void *v)
3259 {
3260 struct tracer *t = v;
3261
3262 if (!t)
3263 return 0;
3264
3265 seq_puts(m, t->name);
3266 if (t->next)
3267 seq_putc(m, ' ');
3268 else
3269 seq_putc(m, '\n');
3270
3271 return 0;
3272 }
3273
3274 static const struct seq_operations show_traces_seq_ops = {
3275 .start = t_start,
3276 .next = t_next,
3277 .stop = t_stop,
3278 .show = t_show,
3279 };
3280
3281 static int show_traces_open(struct inode *inode, struct file *file)
3282 {
3283 struct trace_array *tr = inode->i_private;
3284 struct seq_file *m;
3285 int ret;
3286
3287 if (tracing_disabled)
3288 return -ENODEV;
3289
3290 ret = seq_open(file, &show_traces_seq_ops);
3291 if (ret)
3292 return ret;
3293
3294 m = file->private_data;
3295 m->private = tr;
3296
3297 return 0;
3298 }
3299
3300 static ssize_t
3301 tracing_write_stub(struct file *filp, const char __user *ubuf,
3302 size_t count, loff_t *ppos)
3303 {
3304 return count;
3305 }
3306
3307 loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
3308 {
3309 int ret;
3310
3311 if (file->f_mode & FMODE_READ)
3312 ret = seq_lseek(file, offset, whence);
3313 else
3314 file->f_pos = ret = 0;
3315
3316 return ret;
3317 }
3318
3319 static const struct file_operations tracing_fops = {
3320 .open = tracing_open,
3321 .read = seq_read,
3322 .write = tracing_write_stub,
3323 .llseek = tracing_lseek,
3324 .release = tracing_release,
3325 };
3326
3327 static const struct file_operations show_traces_fops = {
3328 .open = show_traces_open,
3329 .read = seq_read,
3330 .release = seq_release,
3331 .llseek = seq_lseek,
3332 };
3333
3334 /*
3335 * The tracer itself will not take this lock, but still we want
3336 * to provide a consistent cpumask to user-space:
3337 */
3338 static DEFINE_MUTEX(tracing_cpumask_update_lock);
3339
3340 /*
3341 * Temporary storage for the character representation of the
3342 * CPU bitmask (and one more byte for the newline):
3343 */
3344 static char mask_str[NR_CPUS + 1];
3345
3346 static ssize_t
3347 tracing_cpumask_read(struct file *filp, char __user *ubuf,
3348 size_t count, loff_t *ppos)
3349 {
3350 struct trace_array *tr = file_inode(filp)->i_private;
3351 int len;
3352
3353 mutex_lock(&tracing_cpumask_update_lock);
3354
3355 len = cpumask_scnprintf(mask_str, count, tr->tracing_cpumask);
3356 if (count - len < 2) {
3357 count = -EINVAL;
3358 goto out_err;
3359 }
3360 len += sprintf(mask_str + len, "\n");
3361 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
3362
3363 out_err:
3364 mutex_unlock(&tracing_cpumask_update_lock);
3365
3366 return count;
3367 }
3368
3369 static ssize_t
3370 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3371 size_t count, loff_t *ppos)
3372 {
3373 struct trace_array *tr = file_inode(filp)->i_private;
3374 cpumask_var_t tracing_cpumask_new;
3375 int err, cpu;
3376
3377 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
3378 return -ENOMEM;
3379
3380 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
3381 if (err)
3382 goto err_unlock;
3383
3384 mutex_lock(&tracing_cpumask_update_lock);
3385
3386 local_irq_disable();
3387 arch_spin_lock(&tr->max_lock);
3388 for_each_tracing_cpu(cpu) {
3389 /*
3390 * Increase/decrease the disabled counter if we are
3391 * about to flip a bit in the cpumask:
3392 */
3393 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
3394 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
3395 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3396 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
3397 }
3398 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
3399 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
3400 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3401 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
3402 }
3403 }
3404 arch_spin_unlock(&tr->max_lock);
3405 local_irq_enable();
3406
3407 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
3408
3409 mutex_unlock(&tracing_cpumask_update_lock);
3410 free_cpumask_var(tracing_cpumask_new);
3411
3412 return count;
3413
3414 err_unlock:
3415 free_cpumask_var(tracing_cpumask_new);
3416
3417 return err;
3418 }
3419
3420 static const struct file_operations tracing_cpumask_fops = {
3421 .open = tracing_open_generic_tr,
3422 .read = tracing_cpumask_read,
3423 .write = tracing_cpumask_write,
3424 .release = tracing_release_generic_tr,
3425 .llseek = generic_file_llseek,
3426 };
3427
3428 static int tracing_trace_options_show(struct seq_file *m, void *v)
3429 {
3430 struct tracer_opt *trace_opts;
3431 struct trace_array *tr = m->private;
3432 u32 tracer_flags;
3433 int i;
3434
3435 mutex_lock(&trace_types_lock);
3436 tracer_flags = tr->current_trace->flags->val;
3437 trace_opts = tr->current_trace->flags->opts;
3438
3439 for (i = 0; trace_options[i]; i++) {
3440 if (trace_flags & (1 << i))
3441 seq_printf(m, "%s\n", trace_options[i]);
3442 else
3443 seq_printf(m, "no%s\n", trace_options[i]);
3444 }
3445
3446 for (i = 0; trace_opts[i].name; i++) {
3447 if (tracer_flags & trace_opts[i].bit)
3448 seq_printf(m, "%s\n", trace_opts[i].name);
3449 else
3450 seq_printf(m, "no%s\n", trace_opts[i].name);
3451 }
3452 mutex_unlock(&trace_types_lock);
3453
3454 return 0;
3455 }
3456
3457 static int __set_tracer_option(struct trace_array *tr,
3458 struct tracer_flags *tracer_flags,
3459 struct tracer_opt *opts, int neg)
3460 {
3461 struct tracer *trace = tr->current_trace;
3462 int ret;
3463
3464 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
3465 if (ret)
3466 return ret;
3467
3468 if (neg)
3469 tracer_flags->val &= ~opts->bit;
3470 else
3471 tracer_flags->val |= opts->bit;
3472 return 0;
3473 }
3474
3475 /* Try to assign a tracer specific option */
3476 static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
3477 {
3478 struct tracer *trace = tr->current_trace;
3479 struct tracer_flags *tracer_flags = trace->flags;
3480 struct tracer_opt *opts = NULL;
3481 int i;
3482
3483 for (i = 0; tracer_flags->opts[i].name; i++) {
3484 opts = &tracer_flags->opts[i];
3485
3486 if (strcmp(cmp, opts->name) == 0)
3487 return __set_tracer_option(tr, trace->flags, opts, neg);
3488 }
3489
3490 return -EINVAL;
3491 }
3492
3493 /* Some tracers require overwrite to stay enabled */
3494 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
3495 {
3496 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
3497 return -1;
3498
3499 return 0;
3500 }
3501
3502 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
3503 {
3504 /* do nothing if flag is already set */
3505 if (!!(trace_flags & mask) == !!enabled)
3506 return 0;
3507
3508 /* Give the tracer a chance to approve the change */
3509 if (tr->current_trace->flag_changed)
3510 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
3511 return -EINVAL;
3512
3513 if (enabled)
3514 trace_flags |= mask;
3515 else
3516 trace_flags &= ~mask;
3517
3518 if (mask == TRACE_ITER_RECORD_CMD)
3519 trace_event_enable_cmd_record(enabled);
3520
3521 if (mask == TRACE_ITER_OVERWRITE) {
3522 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
3523 #ifdef CONFIG_TRACER_MAX_TRACE
3524 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
3525 #endif
3526 }
3527
3528 if (mask == TRACE_ITER_PRINTK)
3529 trace_printk_start_stop_comm(enabled);
3530
3531 return 0;
3532 }
3533
3534 static int trace_set_options(struct trace_array *tr, char *option)
3535 {
3536 char *cmp;
3537 int neg = 0;
3538 int ret = -ENODEV;
3539 int i;
3540
3541 cmp = strstrip(option);
3542
3543 if (strncmp(cmp, "no", 2) == 0) {
3544 neg = 1;
3545 cmp += 2;
3546 }
3547
3548 mutex_lock(&trace_types_lock);
3549
3550 for (i = 0; trace_options[i]; i++) {
3551 if (strcmp(cmp, trace_options[i]) == 0) {
3552 ret = set_tracer_flag(tr, 1 << i, !neg);
3553 break;
3554 }
3555 }
3556
3557 /* If no option could be set, test the specific tracer options */
3558 if (!trace_options[i])
3559 ret = set_tracer_option(tr, cmp, neg);
3560
3561 mutex_unlock(&trace_types_lock);
3562
3563 return ret;
3564 }
3565
3566 static ssize_t
3567 tracing_trace_options_write(struct file *filp, const char __user *ubuf,
3568 size_t cnt, loff_t *ppos)
3569 {
3570 struct seq_file *m = filp->private_data;
3571 struct trace_array *tr = m->private;
3572 char buf[64];
3573 int ret;
3574
3575 if (cnt >= sizeof(buf))
3576 return -EINVAL;
3577
3578 if (copy_from_user(&buf, ubuf, cnt))
3579 return -EFAULT;
3580
3581 buf[cnt] = 0;
3582
3583 ret = trace_set_options(tr, buf);
3584 if (ret < 0)
3585 return ret;
3586
3587 *ppos += cnt;
3588
3589 return cnt;
3590 }
3591
3592 static int tracing_trace_options_open(struct inode *inode, struct file *file)
3593 {
3594 struct trace_array *tr = inode->i_private;
3595 int ret;
3596
3597 if (tracing_disabled)
3598 return -ENODEV;
3599
3600 if (trace_array_get(tr) < 0)
3601 return -ENODEV;
3602
3603 ret = single_open(file, tracing_trace_options_show, inode->i_private);
3604 if (ret < 0)
3605 trace_array_put(tr);
3606
3607 return ret;
3608 }
3609
3610 static const struct file_operations tracing_iter_fops = {
3611 .open = tracing_trace_options_open,
3612 .read = seq_read,
3613 .llseek = seq_lseek,
3614 .release = tracing_single_release_tr,
3615 .write = tracing_trace_options_write,
3616 };
3617
3618 static const char readme_msg[] =
3619 "tracing mini-HOWTO:\n\n"
3620 "# echo 0 > tracing_on : quick way to disable tracing\n"
3621 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
3622 " Important files:\n"
3623 " trace\t\t\t- The static contents of the buffer\n"
3624 "\t\t\t To clear the buffer write into this file: echo > trace\n"
3625 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
3626 " current_tracer\t- function and latency tracers\n"
3627 " available_tracers\t- list of configured tracers for current_tracer\n"
3628 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
3629 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
3630 " trace_clock\t\t-change the clock used to order events\n"
3631 " local: Per cpu clock but may not be synced across CPUs\n"
3632 " global: Synced across CPUs but slows tracing down.\n"
3633 " counter: Not a clock, but just an increment\n"
3634 " uptime: Jiffy counter from time of boot\n"
3635 " perf: Same clock that perf events use\n"
3636 #ifdef CONFIG_X86_64
3637 " x86-tsc: TSC cycle counter\n"
3638 #endif
3639 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
3640 " tracing_cpumask\t- Limit which CPUs to trace\n"
3641 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
3642 "\t\t\t Remove sub-buffer with rmdir\n"
3643 " trace_options\t\t- Set format or modify how tracing happens\n"
3644 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
3645 "\t\t\t option name\n"
3646 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
3647 #ifdef CONFIG_DYNAMIC_FTRACE
3648 "\n available_filter_functions - list of functions that can be filtered on\n"
3649 " set_ftrace_filter\t- echo function name in here to only trace these\n"
3650 "\t\t\t functions\n"
3651 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3652 "\t modules: Can select a group via module\n"
3653 "\t Format: :mod:<module-name>\n"
3654 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
3655 "\t triggers: a command to perform when function is hit\n"
3656 "\t Format: <function>:<trigger>[:count]\n"
3657 "\t trigger: traceon, traceoff\n"
3658 "\t\t enable_event:<system>:<event>\n"
3659 "\t\t disable_event:<system>:<event>\n"
3660 #ifdef CONFIG_STACKTRACE
3661 "\t\t stacktrace\n"
3662 #endif
3663 #ifdef CONFIG_TRACER_SNAPSHOT
3664 "\t\t snapshot\n"
3665 #endif
3666 "\t\t dump\n"
3667 "\t\t cpudump\n"
3668 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
3669 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
3670 "\t The first one will disable tracing every time do_fault is hit\n"
3671 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
3672 "\t The first time do trap is hit and it disables tracing, the\n"
3673 "\t counter will decrement to 2. If tracing is already disabled,\n"
3674 "\t the counter will not decrement. It only decrements when the\n"
3675 "\t trigger did work\n"
3676 "\t To remove trigger without count:\n"
3677 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
3678 "\t To remove trigger with a count:\n"
3679 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
3680 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
3681 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3682 "\t modules: Can select a group via module command :mod:\n"
3683 "\t Does not accept triggers\n"
3684 #endif /* CONFIG_DYNAMIC_FTRACE */
3685 #ifdef CONFIG_FUNCTION_TRACER
3686 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
3687 "\t\t (function)\n"
3688 #endif
3689 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3690 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
3691 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
3692 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
3693 #endif
3694 #ifdef CONFIG_TRACER_SNAPSHOT
3695 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
3696 "\t\t\t snapshot buffer. Read the contents for more\n"
3697 "\t\t\t information\n"
3698 #endif
3699 #ifdef CONFIG_STACK_TRACER
3700 " stack_trace\t\t- Shows the max stack trace when active\n"
3701 " stack_max_size\t- Shows current max stack size that was traced\n"
3702 "\t\t\t Write into this file to reset the max size (trigger a\n"
3703 "\t\t\t new trace)\n"
3704 #ifdef CONFIG_DYNAMIC_FTRACE
3705 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
3706 "\t\t\t traces\n"
3707 #endif
3708 #endif /* CONFIG_STACK_TRACER */
3709 " events/\t\t- Directory containing all trace event subsystems:\n"
3710 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
3711 " events/<system>/\t- Directory containing all trace events for <system>:\n"
3712 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
3713 "\t\t\t events\n"
3714 " filter\t\t- If set, only events passing filter are traced\n"
3715 " events/<system>/<event>/\t- Directory containing control files for\n"
3716 "\t\t\t <event>:\n"
3717 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
3718 " filter\t\t- If set, only events passing filter are traced\n"
3719 " trigger\t\t- If set, a command to perform when event is hit\n"
3720 "\t Format: <trigger>[:count][if <filter>]\n"
3721 "\t trigger: traceon, traceoff\n"
3722 "\t enable_event:<system>:<event>\n"
3723 "\t disable_event:<system>:<event>\n"
3724 #ifdef CONFIG_STACKTRACE
3725 "\t\t stacktrace\n"
3726 #endif
3727 #ifdef CONFIG_TRACER_SNAPSHOT
3728 "\t\t snapshot\n"
3729 #endif
3730 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
3731 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
3732 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
3733 "\t events/block/block_unplug/trigger\n"
3734 "\t The first disables tracing every time block_unplug is hit.\n"
3735 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
3736 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
3737 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
3738 "\t Like function triggers, the counter is only decremented if it\n"
3739 "\t enabled or disabled tracing.\n"
3740 "\t To remove a trigger without a count:\n"
3741 "\t echo '!<trigger> > <system>/<event>/trigger\n"
3742 "\t To remove a trigger with a count:\n"
3743 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
3744 "\t Filters can be ignored when removing a trigger.\n"
3745 ;
3746
3747 static ssize_t
3748 tracing_readme_read(struct file *filp, char __user *ubuf,
3749 size_t cnt, loff_t *ppos)
3750 {
3751 return simple_read_from_buffer(ubuf, cnt, ppos,
3752 readme_msg, strlen(readme_msg));
3753 }
3754
3755 static const struct file_operations tracing_readme_fops = {
3756 .open = tracing_open_generic,
3757 .read = tracing_readme_read,
3758 .llseek = generic_file_llseek,
3759 };
3760
3761 static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
3762 {
3763 unsigned int *ptr = v;
3764
3765 if (*pos || m->count)
3766 ptr++;
3767
3768 (*pos)++;
3769
3770 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
3771 ptr++) {
3772 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
3773 continue;
3774
3775 return ptr;
3776 }
3777
3778 return NULL;
3779 }
3780
3781 static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
3782 {
3783 void *v;
3784 loff_t l = 0;
3785
3786 preempt_disable();
3787 arch_spin_lock(&trace_cmdline_lock);
3788
3789 v = &savedcmd->map_cmdline_to_pid[0];
3790 while (l <= *pos) {
3791 v = saved_cmdlines_next(m, v, &l);
3792 if (!v)
3793 return NULL;
3794 }
3795
3796 return v;
3797 }
3798
3799 static void saved_cmdlines_stop(struct seq_file *m, void *v)
3800 {
3801 arch_spin_unlock(&trace_cmdline_lock);
3802 preempt_enable();
3803 }
3804
3805 static int saved_cmdlines_show(struct seq_file *m, void *v)
3806 {
3807 char buf[TASK_COMM_LEN];
3808 unsigned int *pid = v;
3809
3810 __trace_find_cmdline(*pid, buf);
3811 seq_printf(m, "%d %s\n", *pid, buf);
3812 return 0;
3813 }
3814
3815 static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
3816 .start = saved_cmdlines_start,
3817 .next = saved_cmdlines_next,
3818 .stop = saved_cmdlines_stop,
3819 .show = saved_cmdlines_show,
3820 };
3821
3822 static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
3823 {
3824 if (tracing_disabled)
3825 return -ENODEV;
3826
3827 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
3828 }
3829
3830 static const struct file_operations tracing_saved_cmdlines_fops = {
3831 .open = tracing_saved_cmdlines_open,
3832 .read = seq_read,
3833 .llseek = seq_lseek,
3834 .release = seq_release,
3835 };
3836
3837 static ssize_t
3838 tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
3839 size_t cnt, loff_t *ppos)
3840 {
3841 char buf[64];
3842 int r;
3843
3844 arch_spin_lock(&trace_cmdline_lock);
3845 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
3846 arch_spin_unlock(&trace_cmdline_lock);
3847
3848 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3849 }
3850
3851 static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
3852 {
3853 kfree(s->saved_cmdlines);
3854 kfree(s->map_cmdline_to_pid);
3855 kfree(s);
3856 }
3857
3858 static int tracing_resize_saved_cmdlines(unsigned int val)
3859 {
3860 struct saved_cmdlines_buffer *s, *savedcmd_temp;
3861
3862 s = kmalloc(sizeof(*s), GFP_KERNEL);
3863 if (!s)
3864 return -ENOMEM;
3865
3866 if (allocate_cmdlines_buffer(val, s) < 0) {
3867 kfree(s);
3868 return -ENOMEM;
3869 }
3870
3871 arch_spin_lock(&trace_cmdline_lock);
3872 savedcmd_temp = savedcmd;
3873 savedcmd = s;
3874 arch_spin_unlock(&trace_cmdline_lock);
3875 free_saved_cmdlines_buffer(savedcmd_temp);
3876
3877 return 0;
3878 }
3879
3880 static ssize_t
3881 tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
3882 size_t cnt, loff_t *ppos)
3883 {
3884 unsigned long val;
3885 int ret;
3886
3887 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
3888 if (ret)
3889 return ret;
3890
3891 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
3892 if (!val || val > PID_MAX_DEFAULT)
3893 return -EINVAL;
3894
3895 ret = tracing_resize_saved_cmdlines((unsigned int)val);
3896 if (ret < 0)
3897 return ret;
3898
3899 *ppos += cnt;
3900
3901 return cnt;
3902 }
3903
3904 static const struct file_operations tracing_saved_cmdlines_size_fops = {
3905 .open = tracing_open_generic,
3906 .read = tracing_saved_cmdlines_size_read,
3907 .write = tracing_saved_cmdlines_size_write,
3908 };
3909
3910 static ssize_t
3911 tracing_set_trace_read(struct file *filp, char __user *ubuf,
3912 size_t cnt, loff_t *ppos)
3913 {
3914 struct trace_array *tr = filp->private_data;
3915 char buf[MAX_TRACER_SIZE+2];
3916 int r;
3917
3918 mutex_lock(&trace_types_lock);
3919 r = sprintf(buf, "%s\n", tr->current_trace->name);
3920 mutex_unlock(&trace_types_lock);
3921
3922 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3923 }
3924
3925 int tracer_init(struct tracer *t, struct trace_array *tr)
3926 {
3927 tracing_reset_online_cpus(&tr->trace_buffer);
3928 return t->init(tr);
3929 }
3930
3931 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
3932 {
3933 int cpu;
3934
3935 for_each_tracing_cpu(cpu)
3936 per_cpu_ptr(buf->data, cpu)->entries = val;
3937 }
3938
3939 #ifdef CONFIG_TRACER_MAX_TRACE
3940 /* resize @tr's buffer to the size of @size_tr's entries */
3941 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
3942 struct trace_buffer *size_buf, int cpu_id)
3943 {
3944 int cpu, ret = 0;
3945
3946 if (cpu_id == RING_BUFFER_ALL_CPUS) {
3947 for_each_tracing_cpu(cpu) {
3948 ret = ring_buffer_resize(trace_buf->buffer,
3949 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
3950 if (ret < 0)
3951 break;
3952 per_cpu_ptr(trace_buf->data, cpu)->entries =
3953 per_cpu_ptr(size_buf->data, cpu)->entries;
3954 }
3955 } else {
3956 ret = ring_buffer_resize(trace_buf->buffer,
3957 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
3958 if (ret == 0)
3959 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
3960 per_cpu_ptr(size_buf->data, cpu_id)->entries;
3961 }
3962
3963 return ret;
3964 }
3965 #endif /* CONFIG_TRACER_MAX_TRACE */
3966
3967 static int __tracing_resize_ring_buffer(struct trace_array *tr,
3968 unsigned long size, int cpu)
3969 {
3970 int ret;
3971
3972 /*
3973 * If kernel or user changes the size of the ring buffer
3974 * we use the size that was given, and we can forget about
3975 * expanding it later.
3976 */
3977 ring_buffer_expanded = true;
3978
3979 /* May be called before buffers are initialized */
3980 if (!tr->trace_buffer.buffer)
3981 return 0;
3982
3983 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
3984 if (ret < 0)
3985 return ret;
3986
3987 #ifdef CONFIG_TRACER_MAX_TRACE
3988 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
3989 !tr->current_trace->use_max_tr)
3990 goto out;
3991
3992 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
3993 if (ret < 0) {
3994 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
3995 &tr->trace_buffer, cpu);
3996 if (r < 0) {
3997 /*
3998 * AARGH! We are left with different
3999 * size max buffer!!!!
4000 * The max buffer is our "snapshot" buffer.
4001 * When a tracer needs a snapshot (one of the
4002 * latency tracers), it swaps the max buffer
4003 * with the saved snap shot. We succeeded to
4004 * update the size of the main buffer, but failed to
4005 * update the size of the max buffer. But when we tried
4006 * to reset the main buffer to the original size, we
4007 * failed there too. This is very unlikely to
4008 * happen, but if it does, warn and kill all
4009 * tracing.
4010 */
4011 WARN_ON(1);
4012 tracing_disabled = 1;
4013 }
4014 return ret;
4015 }
4016
4017 if (cpu == RING_BUFFER_ALL_CPUS)
4018 set_buffer_entries(&tr->max_buffer, size);
4019 else
4020 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
4021
4022 out:
4023 #endif /* CONFIG_TRACER_MAX_TRACE */
4024
4025 if (cpu == RING_BUFFER_ALL_CPUS)
4026 set_buffer_entries(&tr->trace_buffer, size);
4027 else
4028 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
4029
4030 return ret;
4031 }
4032
4033 static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
4034 unsigned long size, int cpu_id)
4035 {
4036 int ret = size;
4037
4038 mutex_lock(&trace_types_lock);
4039
4040 if (cpu_id != RING_BUFFER_ALL_CPUS) {
4041 /* make sure, this cpu is enabled in the mask */
4042 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
4043 ret = -EINVAL;
4044 goto out;
4045 }
4046 }
4047
4048 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
4049 if (ret < 0)
4050 ret = -ENOMEM;
4051
4052 out:
4053 mutex_unlock(&trace_types_lock);
4054
4055 return ret;
4056 }
4057
4058
4059 /**
4060 * tracing_update_buffers - used by tracing facility to expand ring buffers
4061 *
4062 * To save on memory when the tracing is never used on a system with it
4063 * configured in. The ring buffers are set to a minimum size. But once
4064 * a user starts to use the tracing facility, then they need to grow
4065 * to their default size.
4066 *
4067 * This function is to be called when a tracer is about to be used.
4068 */
4069 int tracing_update_buffers(void)
4070 {
4071 int ret = 0;
4072
4073 mutex_lock(&trace_types_lock);
4074 if (!ring_buffer_expanded)
4075 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
4076 RING_BUFFER_ALL_CPUS);
4077 mutex_unlock(&trace_types_lock);
4078
4079 return ret;
4080 }
4081
4082 struct trace_option_dentry;
4083
4084 static struct trace_option_dentry *
4085 create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
4086
4087 static void
4088 destroy_trace_option_files(struct trace_option_dentry *topts);
4089
4090 /*
4091 * Used to clear out the tracer before deletion of an instance.
4092 * Must have trace_types_lock held.
4093 */
4094 static void tracing_set_nop(struct trace_array *tr)
4095 {
4096 if (tr->current_trace == &nop_trace)
4097 return;
4098
4099 tr->current_trace->enabled--;
4100
4101 if (tr->current_trace->reset)
4102 tr->current_trace->reset(tr);
4103
4104 tr->current_trace = &nop_trace;
4105 }
4106
4107 static int tracing_set_tracer(struct trace_array *tr, const char *buf)
4108 {
4109 static struct trace_option_dentry *topts;
4110 struct tracer *t;
4111 #ifdef CONFIG_TRACER_MAX_TRACE
4112 bool had_max_tr;
4113 #endif
4114 int ret = 0;
4115
4116 mutex_lock(&trace_types_lock);
4117
4118 if (!ring_buffer_expanded) {
4119 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
4120 RING_BUFFER_ALL_CPUS);
4121 if (ret < 0)
4122 goto out;
4123 ret = 0;
4124 }
4125
4126 for (t = trace_types; t; t = t->next) {
4127 if (strcmp(t->name, buf) == 0)
4128 break;
4129 }
4130 if (!t) {
4131 ret = -EINVAL;
4132 goto out;
4133 }
4134 if (t == tr->current_trace)
4135 goto out;
4136
4137 /* Some tracers are only allowed for the top level buffer */
4138 if (!trace_ok_for_array(t, tr)) {
4139 ret = -EINVAL;
4140 goto out;
4141 }
4142
4143 /* If trace pipe files are being read, we can't change the tracer */
4144 if (tr->current_trace->ref) {
4145 ret = -EBUSY;
4146 goto out;
4147 }
4148
4149 trace_branch_disable();
4150
4151 tr->current_trace->enabled--;
4152
4153 if (tr->current_trace->reset)
4154 tr->current_trace->reset(tr);
4155
4156 /* Current trace needs to be nop_trace before synchronize_sched */
4157 tr->current_trace = &nop_trace;
4158
4159 #ifdef CONFIG_TRACER_MAX_TRACE
4160 had_max_tr = tr->allocated_snapshot;
4161
4162 if (had_max_tr && !t->use_max_tr) {
4163 /*
4164 * We need to make sure that the update_max_tr sees that
4165 * current_trace changed to nop_trace to keep it from
4166 * swapping the buffers after we resize it.
4167 * The update_max_tr is called from interrupts disabled
4168 * so a synchronized_sched() is sufficient.
4169 */
4170 synchronize_sched();
4171 free_snapshot(tr);
4172 }
4173 #endif
4174 /* Currently, only the top instance has options */
4175 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
4176 destroy_trace_option_files(topts);
4177 topts = create_trace_option_files(tr, t);
4178 }
4179
4180 #ifdef CONFIG_TRACER_MAX_TRACE
4181 if (t->use_max_tr && !had_max_tr) {
4182 ret = alloc_snapshot(tr);
4183 if (ret < 0)
4184 goto out;
4185 }
4186 #endif
4187
4188 if (t->init) {
4189 ret = tracer_init(t, tr);
4190 if (ret)
4191 goto out;
4192 }
4193
4194 tr->current_trace = t;
4195 tr->current_trace->enabled++;
4196 trace_branch_enable(tr);
4197 out:
4198 mutex_unlock(&trace_types_lock);
4199
4200 return ret;
4201 }
4202
4203 static ssize_t
4204 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
4205 size_t cnt, loff_t *ppos)
4206 {
4207 struct trace_array *tr = filp->private_data;
4208 char buf[MAX_TRACER_SIZE+1];
4209 int i;
4210 size_t ret;
4211 int err;
4212
4213 ret = cnt;
4214
4215 if (cnt > MAX_TRACER_SIZE)
4216 cnt = MAX_TRACER_SIZE;
4217
4218 if (copy_from_user(&buf, ubuf, cnt))
4219 return -EFAULT;
4220
4221 buf[cnt] = 0;
4222
4223 /* strip ending whitespace. */
4224 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
4225 buf[i] = 0;
4226
4227 err = tracing_set_tracer(tr, buf);
4228 if (err)
4229 return err;
4230
4231 *ppos += ret;
4232
4233 return ret;
4234 }
4235
4236 static ssize_t
4237 tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
4238 size_t cnt, loff_t *ppos)
4239 {
4240 char buf[64];
4241 int r;
4242
4243 r = snprintf(buf, sizeof(buf), "%ld\n",
4244 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
4245 if (r > sizeof(buf))
4246 r = sizeof(buf);
4247 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4248 }
4249
4250 static ssize_t
4251 tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
4252 size_t cnt, loff_t *ppos)
4253 {
4254 unsigned long val;
4255 int ret;
4256
4257 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4258 if (ret)
4259 return ret;
4260
4261 *ptr = val * 1000;
4262
4263 return cnt;
4264 }
4265
4266 static ssize_t
4267 tracing_thresh_read(struct file *filp, char __user *ubuf,
4268 size_t cnt, loff_t *ppos)
4269 {
4270 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
4271 }
4272
4273 static ssize_t
4274 tracing_thresh_write(struct file *filp, const char __user *ubuf,
4275 size_t cnt, loff_t *ppos)
4276 {
4277 struct trace_array *tr = filp->private_data;
4278 int ret;
4279
4280 mutex_lock(&trace_types_lock);
4281 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
4282 if (ret < 0)
4283 goto out;
4284
4285 if (tr->current_trace->update_thresh) {
4286 ret = tr->current_trace->update_thresh(tr);
4287 if (ret < 0)
4288 goto out;
4289 }
4290
4291 ret = cnt;
4292 out:
4293 mutex_unlock(&trace_types_lock);
4294
4295 return ret;
4296 }
4297
4298 static ssize_t
4299 tracing_max_lat_read(struct file *filp, char __user *ubuf,
4300 size_t cnt, loff_t *ppos)
4301 {
4302 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
4303 }
4304
4305 static ssize_t
4306 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
4307 size_t cnt, loff_t *ppos)
4308 {
4309 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
4310 }
4311
4312 static int tracing_open_pipe(struct inode *inode, struct file *filp)
4313 {
4314 struct trace_array *tr = inode->i_private;
4315 struct trace_iterator *iter;
4316 int ret = 0;
4317
4318 if (tracing_disabled)
4319 return -ENODEV;
4320
4321 if (trace_array_get(tr) < 0)
4322 return -ENODEV;
4323
4324 mutex_lock(&trace_types_lock);
4325
4326 /* create a buffer to store the information to pass to userspace */
4327 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
4328 if (!iter) {
4329 ret = -ENOMEM;
4330 __trace_array_put(tr);
4331 goto out;
4332 }
4333
4334 trace_seq_init(&iter->seq);
4335 iter->trace = tr->current_trace;
4336
4337 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
4338 ret = -ENOMEM;
4339 goto fail;
4340 }
4341
4342 /* trace pipe does not show start of buffer */
4343 cpumask_setall(iter->started);
4344
4345 if (trace_flags & TRACE_ITER_LATENCY_FMT)
4346 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4347
4348 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
4349 if (trace_clocks[tr->clock_id].in_ns)
4350 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4351
4352 iter->tr = tr;
4353 iter->trace_buffer = &tr->trace_buffer;
4354 iter->cpu_file = tracing_get_cpu(inode);
4355 mutex_init(&iter->mutex);
4356 filp->private_data = iter;
4357
4358 if (iter->trace->pipe_open)
4359 iter->trace->pipe_open(iter);
4360
4361 nonseekable_open(inode, filp);
4362
4363 tr->current_trace->ref++;
4364 out:
4365 mutex_unlock(&trace_types_lock);
4366 return ret;
4367
4368 fail:
4369 kfree(iter->trace);
4370 kfree(iter);
4371 __trace_array_put(tr);
4372 mutex_unlock(&trace_types_lock);
4373 return ret;
4374 }
4375
4376 static int tracing_release_pipe(struct inode *inode, struct file *file)
4377 {
4378 struct trace_iterator *iter = file->private_data;
4379 struct trace_array *tr = inode->i_private;
4380
4381 mutex_lock(&trace_types_lock);
4382
4383 tr->current_trace->ref--;
4384
4385 if (iter->trace->pipe_close)
4386 iter->trace->pipe_close(iter);
4387
4388 mutex_unlock(&trace_types_lock);
4389
4390 free_cpumask_var(iter->started);
4391 mutex_destroy(&iter->mutex);
4392 kfree(iter);
4393
4394 trace_array_put(tr);
4395
4396 return 0;
4397 }
4398
4399 static unsigned int
4400 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
4401 {
4402 /* Iterators are static, they should be filled or empty */
4403 if (trace_buffer_iter(iter, iter->cpu_file))
4404 return POLLIN | POLLRDNORM;
4405
4406 if (trace_flags & TRACE_ITER_BLOCK)
4407 /*
4408 * Always select as readable when in blocking mode
4409 */
4410 return POLLIN | POLLRDNORM;
4411 else
4412 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
4413 filp, poll_table);
4414 }
4415
4416 static unsigned int
4417 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
4418 {
4419 struct trace_iterator *iter = filp->private_data;
4420
4421 return trace_poll(iter, filp, poll_table);
4422 }
4423
4424 /* Must be called with iter->mutex held. */
4425 static int tracing_wait_pipe(struct file *filp)
4426 {
4427 struct trace_iterator *iter = filp->private_data;
4428 int ret;
4429
4430 while (trace_empty(iter)) {
4431
4432 if ((filp->f_flags & O_NONBLOCK)) {
4433 return -EAGAIN;
4434 }
4435
4436 /*
4437 * We block until we read something and tracing is disabled.
4438 * We still block if tracing is disabled, but we have never
4439 * read anything. This allows a user to cat this file, and
4440 * then enable tracing. But after we have read something,
4441 * we give an EOF when tracing is again disabled.
4442 *
4443 * iter->pos will be 0 if we haven't read anything.
4444 */
4445 if (!tracing_is_on() && iter->pos)
4446 break;
4447
4448 mutex_unlock(&iter->mutex);
4449
4450 ret = wait_on_pipe(iter, false);
4451
4452 mutex_lock(&iter->mutex);
4453
4454 if (ret)
4455 return ret;
4456 }
4457
4458 return 1;
4459 }
4460
4461 /*
4462 * Consumer reader.
4463 */
4464 static ssize_t
4465 tracing_read_pipe(struct file *filp, char __user *ubuf,
4466 size_t cnt, loff_t *ppos)
4467 {
4468 struct trace_iterator *iter = filp->private_data;
4469 ssize_t sret;
4470
4471 /* return any leftover data */
4472 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4473 if (sret != -EBUSY)
4474 return sret;
4475
4476 trace_seq_init(&iter->seq);
4477
4478 /*
4479 * Avoid more than one consumer on a single file descriptor
4480 * This is just a matter of traces coherency, the ring buffer itself
4481 * is protected.
4482 */
4483 mutex_lock(&iter->mutex);
4484 if (iter->trace->read) {
4485 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
4486 if (sret)
4487 goto out;
4488 }
4489
4490 waitagain:
4491 sret = tracing_wait_pipe(filp);
4492 if (sret <= 0)
4493 goto out;
4494
4495 /* stop when tracing is finished */
4496 if (trace_empty(iter)) {
4497 sret = 0;
4498 goto out;
4499 }
4500
4501 if (cnt >= PAGE_SIZE)
4502 cnt = PAGE_SIZE - 1;
4503
4504 /* reset all but tr, trace, and overruns */
4505 memset(&iter->seq, 0,
4506 sizeof(struct trace_iterator) -
4507 offsetof(struct trace_iterator, seq));
4508 cpumask_clear(iter->started);
4509 iter->pos = -1;
4510
4511 trace_event_read_lock();
4512 trace_access_lock(iter->cpu_file);
4513 while (trace_find_next_entry_inc(iter) != NULL) {
4514 enum print_line_t ret;
4515 int save_len = iter->seq.seq.len;
4516
4517 ret = print_trace_line(iter);
4518 if (ret == TRACE_TYPE_PARTIAL_LINE) {
4519 /* don't print partial lines */
4520 iter->seq.seq.len = save_len;
4521 break;
4522 }
4523 if (ret != TRACE_TYPE_NO_CONSUME)
4524 trace_consume(iter);
4525
4526 if (trace_seq_used(&iter->seq) >= cnt)
4527 break;
4528
4529 /*
4530 * Setting the full flag means we reached the trace_seq buffer
4531 * size and we should leave by partial output condition above.
4532 * One of the trace_seq_* functions is not used properly.
4533 */
4534 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
4535 iter->ent->type);
4536 }
4537 trace_access_unlock(iter->cpu_file);
4538 trace_event_read_unlock();
4539
4540 /* Now copy what we have to the user */
4541 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4542 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
4543 trace_seq_init(&iter->seq);
4544
4545 /*
4546 * If there was nothing to send to user, in spite of consuming trace
4547 * entries, go back to wait for more entries.
4548 */
4549 if (sret == -EBUSY)
4550 goto waitagain;
4551
4552 out:
4553 mutex_unlock(&iter->mutex);
4554
4555 return sret;
4556 }
4557
4558 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
4559 unsigned int idx)
4560 {
4561 __free_page(spd->pages[idx]);
4562 }
4563
4564 static const struct pipe_buf_operations tracing_pipe_buf_ops = {
4565 .can_merge = 0,
4566 .confirm = generic_pipe_buf_confirm,
4567 .release = generic_pipe_buf_release,
4568 .steal = generic_pipe_buf_steal,
4569 .get = generic_pipe_buf_get,
4570 };
4571
4572 static size_t
4573 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
4574 {
4575 size_t count;
4576 int save_len;
4577 int ret;
4578
4579 /* Seq buffer is page-sized, exactly what we need. */
4580 for (;;) {
4581 save_len = iter->seq.seq.len;
4582 ret = print_trace_line(iter);
4583
4584 if (trace_seq_has_overflowed(&iter->seq)) {
4585 iter->seq.seq.len = save_len;
4586 break;
4587 }
4588
4589 /*
4590 * This should not be hit, because it should only
4591 * be set if the iter->seq overflowed. But check it
4592 * anyway to be safe.
4593 */
4594 if (ret == TRACE_TYPE_PARTIAL_LINE) {
4595 iter->seq.seq.len = save_len;
4596 break;
4597 }
4598
4599 count = trace_seq_used(&iter->seq) - save_len;
4600 if (rem < count) {
4601 rem = 0;
4602 iter->seq.seq.len = save_len;
4603 break;
4604 }
4605
4606 if (ret != TRACE_TYPE_NO_CONSUME)
4607 trace_consume(iter);
4608 rem -= count;
4609 if (!trace_find_next_entry_inc(iter)) {
4610 rem = 0;
4611 iter->ent = NULL;
4612 break;
4613 }
4614 }
4615
4616 return rem;
4617 }
4618
4619 static ssize_t tracing_splice_read_pipe(struct file *filp,
4620 loff_t *ppos,
4621 struct pipe_inode_info *pipe,
4622 size_t len,
4623 unsigned int flags)
4624 {
4625 struct page *pages_def[PIPE_DEF_BUFFERS];
4626 struct partial_page partial_def[PIPE_DEF_BUFFERS];
4627 struct trace_iterator *iter = filp->private_data;
4628 struct splice_pipe_desc spd = {
4629 .pages = pages_def,
4630 .partial = partial_def,
4631 .nr_pages = 0, /* This gets updated below. */
4632 .nr_pages_max = PIPE_DEF_BUFFERS,
4633 .flags = flags,
4634 .ops = &tracing_pipe_buf_ops,
4635 .spd_release = tracing_spd_release_pipe,
4636 };
4637 ssize_t ret;
4638 size_t rem;
4639 unsigned int i;
4640
4641 if (splice_grow_spd(pipe, &spd))
4642 return -ENOMEM;
4643
4644 mutex_lock(&iter->mutex);
4645
4646 if (iter->trace->splice_read) {
4647 ret = iter->trace->splice_read(iter, filp,
4648 ppos, pipe, len, flags);
4649 if (ret)
4650 goto out_err;
4651 }
4652
4653 ret = tracing_wait_pipe(filp);
4654 if (ret <= 0)
4655 goto out_err;
4656
4657 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
4658 ret = -EFAULT;
4659 goto out_err;
4660 }
4661
4662 trace_event_read_lock();
4663 trace_access_lock(iter->cpu_file);
4664
4665 /* Fill as many pages as possible. */
4666 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
4667 spd.pages[i] = alloc_page(GFP_KERNEL);
4668 if (!spd.pages[i])
4669 break;
4670
4671 rem = tracing_fill_pipe_page(rem, iter);
4672
4673 /* Copy the data into the page, so we can start over. */
4674 ret = trace_seq_to_buffer(&iter->seq,
4675 page_address(spd.pages[i]),
4676 trace_seq_used(&iter->seq));
4677 if (ret < 0) {
4678 __free_page(spd.pages[i]);
4679 break;
4680 }
4681 spd.partial[i].offset = 0;
4682 spd.partial[i].len = trace_seq_used(&iter->seq);
4683
4684 trace_seq_init(&iter->seq);
4685 }
4686
4687 trace_access_unlock(iter->cpu_file);
4688 trace_event_read_unlock();
4689 mutex_unlock(&iter->mutex);
4690
4691 spd.nr_pages = i;
4692
4693 ret = splice_to_pipe(pipe, &spd);
4694 out:
4695 splice_shrink_spd(&spd);
4696 return ret;
4697
4698 out_err:
4699 mutex_unlock(&iter->mutex);
4700 goto out;
4701 }
4702
4703 static ssize_t
4704 tracing_entries_read(struct file *filp, char __user *ubuf,
4705 size_t cnt, loff_t *ppos)
4706 {
4707 struct inode *inode = file_inode(filp);
4708 struct trace_array *tr = inode->i_private;
4709 int cpu = tracing_get_cpu(inode);
4710 char buf[64];
4711 int r = 0;
4712 ssize_t ret;
4713
4714 mutex_lock(&trace_types_lock);
4715
4716 if (cpu == RING_BUFFER_ALL_CPUS) {
4717 int cpu, buf_size_same;
4718 unsigned long size;
4719
4720 size = 0;
4721 buf_size_same = 1;
4722 /* check if all cpu sizes are same */
4723 for_each_tracing_cpu(cpu) {
4724 /* fill in the size from first enabled cpu */
4725 if (size == 0)
4726 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
4727 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
4728 buf_size_same = 0;
4729 break;
4730 }
4731 }
4732
4733 if (buf_size_same) {
4734 if (!ring_buffer_expanded)
4735 r = sprintf(buf, "%lu (expanded: %lu)\n",
4736 size >> 10,
4737 trace_buf_size >> 10);
4738 else
4739 r = sprintf(buf, "%lu\n", size >> 10);
4740 } else
4741 r = sprintf(buf, "X\n");
4742 } else
4743 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
4744
4745 mutex_unlock(&trace_types_lock);
4746
4747 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4748 return ret;
4749 }
4750
4751 static ssize_t
4752 tracing_entries_write(struct file *filp, const char __user *ubuf,
4753 size_t cnt, loff_t *ppos)
4754 {
4755 struct inode *inode = file_inode(filp);
4756 struct trace_array *tr = inode->i_private;
4757 unsigned long val;
4758 int ret;
4759
4760 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4761 if (ret)
4762 return ret;
4763
4764 /* must have at least 1 entry */
4765 if (!val)
4766 return -EINVAL;
4767
4768 /* value is in KB */
4769 val <<= 10;
4770 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
4771 if (ret < 0)
4772 return ret;
4773
4774 *ppos += cnt;
4775
4776 return cnt;
4777 }
4778
4779 static ssize_t
4780 tracing_total_entries_read(struct file *filp, char __user *ubuf,
4781 size_t cnt, loff_t *ppos)
4782 {
4783 struct trace_array *tr = filp->private_data;
4784 char buf[64];
4785 int r, cpu;
4786 unsigned long size = 0, expanded_size = 0;
4787
4788 mutex_lock(&trace_types_lock);
4789 for_each_tracing_cpu(cpu) {
4790 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
4791 if (!ring_buffer_expanded)
4792 expanded_size += trace_buf_size >> 10;
4793 }
4794 if (ring_buffer_expanded)
4795 r = sprintf(buf, "%lu\n", size);
4796 else
4797 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
4798 mutex_unlock(&trace_types_lock);
4799
4800 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4801 }
4802
4803 static ssize_t
4804 tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
4805 size_t cnt, loff_t *ppos)
4806 {
4807 /*
4808 * There is no need to read what the user has written, this function
4809 * is just to make sure that there is no error when "echo" is used
4810 */
4811
4812 *ppos += cnt;
4813
4814 return cnt;
4815 }
4816
4817 static int
4818 tracing_free_buffer_release(struct inode *inode, struct file *filp)
4819 {
4820 struct trace_array *tr = inode->i_private;
4821
4822 /* disable tracing ? */
4823 if (trace_flags & TRACE_ITER_STOP_ON_FREE)
4824 tracer_tracing_off(tr);
4825 /* resize the ring buffer to 0 */
4826 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
4827
4828 trace_array_put(tr);
4829
4830 return 0;
4831 }
4832
4833 static ssize_t
4834 tracing_mark_write(struct file *filp, const char __user *ubuf,
4835 size_t cnt, loff_t *fpos)
4836 {
4837 unsigned long addr = (unsigned long)ubuf;
4838 struct trace_array *tr = filp->private_data;
4839 struct ring_buffer_event *event;
4840 struct ring_buffer *buffer;
4841 struct print_entry *entry;
4842 unsigned long irq_flags;
4843 struct page *pages[2];
4844 void *map_page[2];
4845 int nr_pages = 1;
4846 ssize_t written;
4847 int offset;
4848 int size;
4849 int len;
4850 int ret;
4851 int i;
4852
4853 if (tracing_disabled)
4854 return -EINVAL;
4855
4856 if (!(trace_flags & TRACE_ITER_MARKERS))
4857 return -EINVAL;
4858
4859 if (cnt > TRACE_BUF_SIZE)
4860 cnt = TRACE_BUF_SIZE;
4861
4862 /*
4863 * Userspace is injecting traces into the kernel trace buffer.
4864 * We want to be as non intrusive as possible.
4865 * To do so, we do not want to allocate any special buffers
4866 * or take any locks, but instead write the userspace data
4867 * straight into the ring buffer.
4868 *
4869 * First we need to pin the userspace buffer into memory,
4870 * which, most likely it is, because it just referenced it.
4871 * But there's no guarantee that it is. By using get_user_pages_fast()
4872 * and kmap_atomic/kunmap_atomic() we can get access to the
4873 * pages directly. We then write the data directly into the
4874 * ring buffer.
4875 */
4876 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
4877
4878 /* check if we cross pages */
4879 if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
4880 nr_pages = 2;
4881
4882 offset = addr & (PAGE_SIZE - 1);
4883 addr &= PAGE_MASK;
4884
4885 ret = get_user_pages_fast(addr, nr_pages, 0, pages);
4886 if (ret < nr_pages) {
4887 while (--ret >= 0)
4888 put_page(pages[ret]);
4889 written = -EFAULT;
4890 goto out;
4891 }
4892
4893 for (i = 0; i < nr_pages; i++)
4894 map_page[i] = kmap_atomic(pages[i]);
4895
4896 local_save_flags(irq_flags);
4897 size = sizeof(*entry) + cnt + 2; /* possible \n added */
4898 buffer = tr->trace_buffer.buffer;
4899 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
4900 irq_flags, preempt_count());
4901 if (!event) {
4902 /* Ring buffer disabled, return as if not open for write */
4903 written = -EBADF;
4904 goto out_unlock;
4905 }
4906
4907 entry = ring_buffer_event_data(event);
4908 entry->ip = _THIS_IP_;
4909
4910 if (nr_pages == 2) {
4911 len = PAGE_SIZE - offset;
4912 memcpy(&entry->buf, map_page[0] + offset, len);
4913 memcpy(&entry->buf[len], map_page[1], cnt - len);
4914 } else
4915 memcpy(&entry->buf, map_page[0] + offset, cnt);
4916
4917 if (entry->buf[cnt - 1] != '\n') {
4918 entry->buf[cnt] = '\n';
4919 entry->buf[cnt + 1] = '\0';
4920 } else
4921 entry->buf[cnt] = '\0';
4922
4923 __buffer_unlock_commit(buffer, event);
4924
4925 written = cnt;
4926
4927 *fpos += written;
4928
4929 out_unlock:
4930 for (i = 0; i < nr_pages; i++){
4931 kunmap_atomic(map_page[i]);
4932 put_page(pages[i]);
4933 }
4934 out:
4935 return written;
4936 }
4937
4938 static int tracing_clock_show(struct seq_file *m, void *v)
4939 {
4940 struct trace_array *tr = m->private;
4941 int i;
4942
4943 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
4944 seq_printf(m,
4945 "%s%s%s%s", i ? " " : "",
4946 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
4947 i == tr->clock_id ? "]" : "");
4948 seq_putc(m, '\n');
4949
4950 return 0;
4951 }
4952
4953 static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
4954 {
4955 int i;
4956
4957 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
4958 if (strcmp(trace_clocks[i].name, clockstr) == 0)
4959 break;
4960 }
4961 if (i == ARRAY_SIZE(trace_clocks))
4962 return -EINVAL;
4963
4964 mutex_lock(&trace_types_lock);
4965
4966 tr->clock_id = i;
4967
4968 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
4969
4970 /*
4971 * New clock may not be consistent with the previous clock.
4972 * Reset the buffer so that it doesn't have incomparable timestamps.
4973 */
4974 tracing_reset_online_cpus(&tr->trace_buffer);
4975
4976 #ifdef CONFIG_TRACER_MAX_TRACE
4977 if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
4978 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
4979 tracing_reset_online_cpus(&tr->max_buffer);
4980 #endif
4981
4982 mutex_unlock(&trace_types_lock);
4983
4984 return 0;
4985 }
4986
4987 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
4988 size_t cnt, loff_t *fpos)
4989 {
4990 struct seq_file *m = filp->private_data;
4991 struct trace_array *tr = m->private;
4992 char buf[64];
4993 const char *clockstr;
4994 int ret;
4995
4996 if (cnt >= sizeof(buf))
4997 return -EINVAL;
4998
4999 if (copy_from_user(&buf, ubuf, cnt))
5000 return -EFAULT;
5001
5002 buf[cnt] = 0;
5003
5004 clockstr = strstrip(buf);
5005
5006 ret = tracing_set_clock(tr, clockstr);
5007 if (ret)
5008 return ret;
5009
5010 *fpos += cnt;
5011
5012 return cnt;
5013 }
5014
5015 static int tracing_clock_open(struct inode *inode, struct file *file)
5016 {
5017 struct trace_array *tr = inode->i_private;
5018 int ret;
5019
5020 if (tracing_disabled)
5021 return -ENODEV;
5022
5023 if (trace_array_get(tr))
5024 return -ENODEV;
5025
5026 ret = single_open(file, tracing_clock_show, inode->i_private);
5027 if (ret < 0)
5028 trace_array_put(tr);
5029
5030 return ret;
5031 }
5032
5033 struct ftrace_buffer_info {
5034 struct trace_iterator iter;
5035 void *spare;
5036 unsigned int read;
5037 };
5038
5039 #ifdef CONFIG_TRACER_SNAPSHOT
5040 static int tracing_snapshot_open(struct inode *inode, struct file *file)
5041 {
5042 struct trace_array *tr = inode->i_private;
5043 struct trace_iterator *iter;
5044 struct seq_file *m;
5045 int ret = 0;
5046
5047 if (trace_array_get(tr) < 0)
5048 return -ENODEV;
5049
5050 if (file->f_mode & FMODE_READ) {
5051 iter = __tracing_open(inode, file, true);
5052 if (IS_ERR(iter))
5053 ret = PTR_ERR(iter);
5054 } else {
5055 /* Writes still need the seq_file to hold the private data */
5056 ret = -ENOMEM;
5057 m = kzalloc(sizeof(*m), GFP_KERNEL);
5058 if (!m)
5059 goto out;
5060 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
5061 if (!iter) {
5062 kfree(m);
5063 goto out;
5064 }
5065 ret = 0;
5066
5067 iter->tr = tr;
5068 iter->trace_buffer = &tr->max_buffer;
5069 iter->cpu_file = tracing_get_cpu(inode);
5070 m->private = iter;
5071 file->private_data = m;
5072 }
5073 out:
5074 if (ret < 0)
5075 trace_array_put(tr);
5076
5077 return ret;
5078 }
5079
5080 static ssize_t
5081 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
5082 loff_t *ppos)
5083 {
5084 struct seq_file *m = filp->private_data;
5085 struct trace_iterator *iter = m->private;
5086 struct trace_array *tr = iter->tr;
5087 unsigned long val;
5088 int ret;
5089
5090 ret = tracing_update_buffers();
5091 if (ret < 0)
5092 return ret;
5093
5094 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5095 if (ret)
5096 return ret;
5097
5098 mutex_lock(&trace_types_lock);
5099
5100 if (tr->current_trace->use_max_tr) {
5101 ret = -EBUSY;
5102 goto out;
5103 }
5104
5105 switch (val) {
5106 case 0:
5107 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5108 ret = -EINVAL;
5109 break;
5110 }
5111 if (tr->allocated_snapshot)
5112 free_snapshot(tr);
5113 break;
5114 case 1:
5115 /* Only allow per-cpu swap if the ring buffer supports it */
5116 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
5117 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5118 ret = -EINVAL;
5119 break;
5120 }
5121 #endif
5122 if (!tr->allocated_snapshot) {
5123 ret = alloc_snapshot(tr);
5124 if (ret < 0)
5125 break;
5126 }
5127 local_irq_disable();
5128 /* Now, we're going to swap */
5129 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
5130 update_max_tr(tr, current, smp_processor_id());
5131 else
5132 update_max_tr_single(tr, current, iter->cpu_file);
5133 local_irq_enable();
5134 break;
5135 default:
5136 if (tr->allocated_snapshot) {
5137 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
5138 tracing_reset_online_cpus(&tr->max_buffer);
5139 else
5140 tracing_reset(&tr->max_buffer, iter->cpu_file);
5141 }
5142 break;
5143 }
5144
5145 if (ret >= 0) {
5146 *ppos += cnt;
5147 ret = cnt;
5148 }
5149 out:
5150 mutex_unlock(&trace_types_lock);
5151 return ret;
5152 }
5153
5154 static int tracing_snapshot_release(struct inode *inode, struct file *file)
5155 {
5156 struct seq_file *m = file->private_data;
5157 int ret;
5158
5159 ret = tracing_release(inode, file);
5160
5161 if (file->f_mode & FMODE_READ)
5162 return ret;
5163
5164 /* If write only, the seq_file is just a stub */
5165 if (m)
5166 kfree(m->private);
5167 kfree(m);
5168
5169 return 0;
5170 }
5171
5172 static int tracing_buffers_open(struct inode *inode, struct file *filp);
5173 static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
5174 size_t count, loff_t *ppos);
5175 static int tracing_buffers_release(struct inode *inode, struct file *file);
5176 static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5177 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
5178
5179 static int snapshot_raw_open(struct inode *inode, struct file *filp)
5180 {
5181 struct ftrace_buffer_info *info;
5182 int ret;
5183
5184 ret = tracing_buffers_open(inode, filp);
5185 if (ret < 0)
5186 return ret;
5187
5188 info = filp->private_data;
5189
5190 if (info->iter.trace->use_max_tr) {
5191 tracing_buffers_release(inode, filp);
5192 return -EBUSY;
5193 }
5194
5195 info->iter.snapshot = true;
5196 info->iter.trace_buffer = &info->iter.tr->max_buffer;
5197
5198 return ret;
5199 }
5200
5201 #endif /* CONFIG_TRACER_SNAPSHOT */
5202
5203
5204 static const struct file_operations tracing_thresh_fops = {
5205 .open = tracing_open_generic,
5206 .read = tracing_thresh_read,
5207 .write = tracing_thresh_write,
5208 .llseek = generic_file_llseek,
5209 };
5210
5211 static const struct file_operations tracing_max_lat_fops = {
5212 .open = tracing_open_generic,
5213 .read = tracing_max_lat_read,
5214 .write = tracing_max_lat_write,
5215 .llseek = generic_file_llseek,
5216 };
5217
5218 static const struct file_operations set_tracer_fops = {
5219 .open = tracing_open_generic,
5220 .read = tracing_set_trace_read,
5221 .write = tracing_set_trace_write,
5222 .llseek = generic_file_llseek,
5223 };
5224
5225 static const struct file_operations tracing_pipe_fops = {
5226 .open = tracing_open_pipe,
5227 .poll = tracing_poll_pipe,
5228 .read = tracing_read_pipe,
5229 .splice_read = tracing_splice_read_pipe,
5230 .release = tracing_release_pipe,
5231 .llseek = no_llseek,
5232 };
5233
5234 static const struct file_operations tracing_entries_fops = {
5235 .open = tracing_open_generic_tr,
5236 .read = tracing_entries_read,
5237 .write = tracing_entries_write,
5238 .llseek = generic_file_llseek,
5239 .release = tracing_release_generic_tr,
5240 };
5241
5242 static const struct file_operations tracing_total_entries_fops = {
5243 .open = tracing_open_generic_tr,
5244 .read = tracing_total_entries_read,
5245 .llseek = generic_file_llseek,
5246 .release = tracing_release_generic_tr,
5247 };
5248
5249 static const struct file_operations tracing_free_buffer_fops = {
5250 .open = tracing_open_generic_tr,
5251 .write = tracing_free_buffer_write,
5252 .release = tracing_free_buffer_release,
5253 };
5254
5255 static const struct file_operations tracing_mark_fops = {
5256 .open = tracing_open_generic_tr,
5257 .write = tracing_mark_write,
5258 .llseek = generic_file_llseek,
5259 .release = tracing_release_generic_tr,
5260 };
5261
5262 static const struct file_operations trace_clock_fops = {
5263 .open = tracing_clock_open,
5264 .read = seq_read,
5265 .llseek = seq_lseek,
5266 .release = tracing_single_release_tr,
5267 .write = tracing_clock_write,
5268 };
5269
5270 #ifdef CONFIG_TRACER_SNAPSHOT
5271 static const struct file_operations snapshot_fops = {
5272 .open = tracing_snapshot_open,
5273 .read = seq_read,
5274 .write = tracing_snapshot_write,
5275 .llseek = tracing_lseek,
5276 .release = tracing_snapshot_release,
5277 };
5278
5279 static const struct file_operations snapshot_raw_fops = {
5280 .open = snapshot_raw_open,
5281 .read = tracing_buffers_read,
5282 .release = tracing_buffers_release,
5283 .splice_read = tracing_buffers_splice_read,
5284 .llseek = no_llseek,
5285 };
5286
5287 #endif /* CONFIG_TRACER_SNAPSHOT */
5288
5289 static int tracing_buffers_open(struct inode *inode, struct file *filp)
5290 {
5291 struct trace_array *tr = inode->i_private;
5292 struct ftrace_buffer_info *info;
5293 int ret;
5294
5295 if (tracing_disabled)
5296 return -ENODEV;
5297
5298 if (trace_array_get(tr) < 0)
5299 return -ENODEV;
5300
5301 info = kzalloc(sizeof(*info), GFP_KERNEL);
5302 if (!info) {
5303 trace_array_put(tr);
5304 return -ENOMEM;
5305 }
5306
5307 mutex_lock(&trace_types_lock);
5308
5309 info->iter.tr = tr;
5310 info->iter.cpu_file = tracing_get_cpu(inode);
5311 info->iter.trace = tr->current_trace;
5312 info->iter.trace_buffer = &tr->trace_buffer;
5313 info->spare = NULL;
5314 /* Force reading ring buffer for first read */
5315 info->read = (unsigned int)-1;
5316
5317 filp->private_data = info;
5318
5319 tr->current_trace->ref++;
5320
5321 mutex_unlock(&trace_types_lock);
5322
5323 ret = nonseekable_open(inode, filp);
5324 if (ret < 0)
5325 trace_array_put(tr);
5326
5327 return ret;
5328 }
5329
5330 static unsigned int
5331 tracing_buffers_poll(struct file *filp, poll_table *poll_table)
5332 {
5333 struct ftrace_buffer_info *info = filp->private_data;
5334 struct trace_iterator *iter = &info->iter;
5335
5336 return trace_poll(iter, filp, poll_table);
5337 }
5338
5339 static ssize_t
5340 tracing_buffers_read(struct file *filp, char __user *ubuf,
5341 size_t count, loff_t *ppos)
5342 {
5343 struct ftrace_buffer_info *info = filp->private_data;
5344 struct trace_iterator *iter = &info->iter;
5345 ssize_t ret;
5346 ssize_t size;
5347
5348 if (!count)
5349 return 0;
5350
5351 #ifdef CONFIG_TRACER_MAX_TRACE
5352 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
5353 return -EBUSY;
5354 #endif
5355
5356 if (!info->spare)
5357 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
5358 iter->cpu_file);
5359 if (!info->spare)
5360 return -ENOMEM;
5361
5362 /* Do we have previous read data to read? */
5363 if (info->read < PAGE_SIZE)
5364 goto read;
5365
5366 again:
5367 trace_access_lock(iter->cpu_file);
5368 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
5369 &info->spare,
5370 count,
5371 iter->cpu_file, 0);
5372 trace_access_unlock(iter->cpu_file);
5373
5374 if (ret < 0) {
5375 if (trace_empty(iter)) {
5376 if ((filp->f_flags & O_NONBLOCK))
5377 return -EAGAIN;
5378
5379 ret = wait_on_pipe(iter, false);
5380 if (ret)
5381 return ret;
5382
5383 goto again;
5384 }
5385 return 0;
5386 }
5387
5388 info->read = 0;
5389 read:
5390 size = PAGE_SIZE - info->read;
5391 if (size > count)
5392 size = count;
5393
5394 ret = copy_to_user(ubuf, info->spare + info->read, size);
5395 if (ret == size)
5396 return -EFAULT;
5397
5398 size -= ret;
5399
5400 *ppos += size;
5401 info->read += size;
5402
5403 return size;
5404 }
5405
5406 static int tracing_buffers_release(struct inode *inode, struct file *file)
5407 {
5408 struct ftrace_buffer_info *info = file->private_data;
5409 struct trace_iterator *iter = &info->iter;
5410
5411 mutex_lock(&trace_types_lock);
5412
5413 iter->tr->current_trace->ref--;
5414
5415 __trace_array_put(iter->tr);
5416
5417 if (info->spare)
5418 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
5419 kfree(info);
5420
5421 mutex_unlock(&trace_types_lock);
5422
5423 return 0;
5424 }
5425
5426 struct buffer_ref {
5427 struct ring_buffer *buffer;
5428 void *page;
5429 int ref;
5430 };
5431
5432 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
5433 struct pipe_buffer *buf)
5434 {
5435 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5436
5437 if (--ref->ref)
5438 return;
5439
5440 ring_buffer_free_read_page(ref->buffer, ref->page);
5441 kfree(ref);
5442 buf->private = 0;
5443 }
5444
5445 static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
5446 struct pipe_buffer *buf)
5447 {
5448 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5449
5450 ref->ref++;
5451 }
5452
5453 /* Pipe buffer operations for a buffer. */
5454 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
5455 .can_merge = 0,
5456 .confirm = generic_pipe_buf_confirm,
5457 .release = buffer_pipe_buf_release,
5458 .steal = generic_pipe_buf_steal,
5459 .get = buffer_pipe_buf_get,
5460 };
5461
5462 /*
5463 * Callback from splice_to_pipe(), if we need to release some pages
5464 * at the end of the spd in case we error'ed out in filling the pipe.
5465 */
5466 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
5467 {
5468 struct buffer_ref *ref =
5469 (struct buffer_ref *)spd->partial[i].private;
5470
5471 if (--ref->ref)
5472 return;
5473
5474 ring_buffer_free_read_page(ref->buffer, ref->page);
5475 kfree(ref);
5476 spd->partial[i].private = 0;
5477 }
5478
5479 static ssize_t
5480 tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5481 struct pipe_inode_info *pipe, size_t len,
5482 unsigned int flags)
5483 {
5484 struct ftrace_buffer_info *info = file->private_data;
5485 struct trace_iterator *iter = &info->iter;
5486 struct partial_page partial_def[PIPE_DEF_BUFFERS];
5487 struct page *pages_def[PIPE_DEF_BUFFERS];
5488 struct splice_pipe_desc spd = {
5489 .pages = pages_def,
5490 .partial = partial_def,
5491 .nr_pages_max = PIPE_DEF_BUFFERS,
5492 .flags = flags,
5493 .ops = &buffer_pipe_buf_ops,
5494 .spd_release = buffer_spd_release,
5495 };
5496 struct buffer_ref *ref;
5497 int entries, size, i;
5498 ssize_t ret = 0;
5499
5500 #ifdef CONFIG_TRACER_MAX_TRACE
5501 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
5502 return -EBUSY;
5503 #endif
5504
5505 if (splice_grow_spd(pipe, &spd))
5506 return -ENOMEM;
5507
5508 if (*ppos & (PAGE_SIZE - 1))
5509 return -EINVAL;
5510
5511 if (len & (PAGE_SIZE - 1)) {
5512 if (len < PAGE_SIZE)
5513 return -EINVAL;
5514 len &= PAGE_MASK;
5515 }
5516
5517 again:
5518 trace_access_lock(iter->cpu_file);
5519 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
5520
5521 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
5522 struct page *page;
5523 int r;
5524
5525 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
5526 if (!ref) {
5527 ret = -ENOMEM;
5528 break;
5529 }
5530
5531 ref->ref = 1;
5532 ref->buffer = iter->trace_buffer->buffer;
5533 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
5534 if (!ref->page) {
5535 ret = -ENOMEM;
5536 kfree(ref);
5537 break;
5538 }
5539
5540 r = ring_buffer_read_page(ref->buffer, &ref->page,
5541 len, iter->cpu_file, 1);
5542 if (r < 0) {
5543 ring_buffer_free_read_page(ref->buffer, ref->page);
5544 kfree(ref);
5545 break;
5546 }
5547
5548 /*
5549 * zero out any left over data, this is going to
5550 * user land.
5551 */
5552 size = ring_buffer_page_len(ref->page);
5553 if (size < PAGE_SIZE)
5554 memset(ref->page + size, 0, PAGE_SIZE - size);
5555
5556 page = virt_to_page(ref->page);
5557
5558 spd.pages[i] = page;
5559 spd.partial[i].len = PAGE_SIZE;
5560 spd.partial[i].offset = 0;
5561 spd.partial[i].private = (unsigned long)ref;
5562 spd.nr_pages++;
5563 *ppos += PAGE_SIZE;
5564
5565 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
5566 }
5567
5568 trace_access_unlock(iter->cpu_file);
5569 spd.nr_pages = i;
5570
5571 /* did we read anything? */
5572 if (!spd.nr_pages) {
5573 if (ret)
5574 return ret;
5575
5576 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
5577 return -EAGAIN;
5578
5579 ret = wait_on_pipe(iter, true);
5580 if (ret)
5581 return ret;
5582
5583 goto again;
5584 }
5585
5586 ret = splice_to_pipe(pipe, &spd);
5587 splice_shrink_spd(&spd);
5588
5589 return ret;
5590 }
5591
5592 static const struct file_operations tracing_buffers_fops = {
5593 .open = tracing_buffers_open,
5594 .read = tracing_buffers_read,
5595 .poll = tracing_buffers_poll,
5596 .release = tracing_buffers_release,
5597 .splice_read = tracing_buffers_splice_read,
5598 .llseek = no_llseek,
5599 };
5600
5601 static ssize_t
5602 tracing_stats_read(struct file *filp, char __user *ubuf,
5603 size_t count, loff_t *ppos)
5604 {
5605 struct inode *inode = file_inode(filp);
5606 struct trace_array *tr = inode->i_private;
5607 struct trace_buffer *trace_buf = &tr->trace_buffer;
5608 int cpu = tracing_get_cpu(inode);
5609 struct trace_seq *s;
5610 unsigned long cnt;
5611 unsigned long long t;
5612 unsigned long usec_rem;
5613
5614 s = kmalloc(sizeof(*s), GFP_KERNEL);
5615 if (!s)
5616 return -ENOMEM;
5617
5618 trace_seq_init(s);
5619
5620 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
5621 trace_seq_printf(s, "entries: %ld\n", cnt);
5622
5623 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
5624 trace_seq_printf(s, "overrun: %ld\n", cnt);
5625
5626 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
5627 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
5628
5629 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
5630 trace_seq_printf(s, "bytes: %ld\n", cnt);
5631
5632 if (trace_clocks[tr->clock_id].in_ns) {
5633 /* local or global for trace_clock */
5634 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
5635 usec_rem = do_div(t, USEC_PER_SEC);
5636 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
5637 t, usec_rem);
5638
5639 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
5640 usec_rem = do_div(t, USEC_PER_SEC);
5641 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
5642 } else {
5643 /* counter or tsc mode for trace_clock */
5644 trace_seq_printf(s, "oldest event ts: %llu\n",
5645 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
5646
5647 trace_seq_printf(s, "now ts: %llu\n",
5648 ring_buffer_time_stamp(trace_buf->buffer, cpu));
5649 }
5650
5651 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
5652 trace_seq_printf(s, "dropped events: %ld\n", cnt);
5653
5654 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
5655 trace_seq_printf(s, "read events: %ld\n", cnt);
5656
5657 count = simple_read_from_buffer(ubuf, count, ppos,
5658 s->buffer, trace_seq_used(s));
5659
5660 kfree(s);
5661
5662 return count;
5663 }
5664
5665 static const struct file_operations tracing_stats_fops = {
5666 .open = tracing_open_generic_tr,
5667 .read = tracing_stats_read,
5668 .llseek = generic_file_llseek,
5669 .release = tracing_release_generic_tr,
5670 };
5671
5672 #ifdef CONFIG_DYNAMIC_FTRACE
5673
5674 int __weak ftrace_arch_read_dyn_info(char *buf, int size)
5675 {
5676 return 0;
5677 }
5678
5679 static ssize_t
5680 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
5681 size_t cnt, loff_t *ppos)
5682 {
5683 static char ftrace_dyn_info_buffer[1024];
5684 static DEFINE_MUTEX(dyn_info_mutex);
5685 unsigned long *p = filp->private_data;
5686 char *buf = ftrace_dyn_info_buffer;
5687 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
5688 int r;
5689
5690 mutex_lock(&dyn_info_mutex);
5691 r = sprintf(buf, "%ld ", *p);
5692
5693 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
5694 buf[r++] = '\n';
5695
5696 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5697
5698 mutex_unlock(&dyn_info_mutex);
5699
5700 return r;
5701 }
5702
5703 static const struct file_operations tracing_dyn_info_fops = {
5704 .open = tracing_open_generic,
5705 .read = tracing_read_dyn_info,
5706 .llseek = generic_file_llseek,
5707 };
5708 #endif /* CONFIG_DYNAMIC_FTRACE */
5709
5710 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
5711 static void
5712 ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5713 {
5714 tracing_snapshot();
5715 }
5716
5717 static void
5718 ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5719 {
5720 unsigned long *count = (long *)data;
5721
5722 if (!*count)
5723 return;
5724
5725 if (*count != -1)
5726 (*count)--;
5727
5728 tracing_snapshot();
5729 }
5730
5731 static int
5732 ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
5733 struct ftrace_probe_ops *ops, void *data)
5734 {
5735 long count = (long)data;
5736
5737 seq_printf(m, "%ps:", (void *)ip);
5738
5739 seq_puts(m, "snapshot");
5740
5741 if (count == -1)
5742 seq_puts(m, ":unlimited\n");
5743 else
5744 seq_printf(m, ":count=%ld\n", count);
5745
5746 return 0;
5747 }
5748
5749 static struct ftrace_probe_ops snapshot_probe_ops = {
5750 .func = ftrace_snapshot,
5751 .print = ftrace_snapshot_print,
5752 };
5753
5754 static struct ftrace_probe_ops snapshot_count_probe_ops = {
5755 .func = ftrace_count_snapshot,
5756 .print = ftrace_snapshot_print,
5757 };
5758
5759 static int
5760 ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
5761 char *glob, char *cmd, char *param, int enable)
5762 {
5763 struct ftrace_probe_ops *ops;
5764 void *count = (void *)-1;
5765 char *number;
5766 int ret;
5767
5768 /* hash funcs only work with set_ftrace_filter */
5769 if (!enable)
5770 return -EINVAL;
5771
5772 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
5773
5774 if (glob[0] == '!') {
5775 unregister_ftrace_function_probe_func(glob+1, ops);
5776 return 0;
5777 }
5778
5779 if (!param)
5780 goto out_reg;
5781
5782 number = strsep(&param, ":");
5783
5784 if (!strlen(number))
5785 goto out_reg;
5786
5787 /*
5788 * We use the callback data field (which is a pointer)
5789 * as our counter.
5790 */
5791 ret = kstrtoul(number, 0, (unsigned long *)&count);
5792 if (ret)
5793 return ret;
5794
5795 out_reg:
5796 ret = register_ftrace_function_probe(glob, ops, count);
5797
5798 if (ret >= 0)
5799 alloc_snapshot(&global_trace);
5800
5801 return ret < 0 ? ret : 0;
5802 }
5803
5804 static struct ftrace_func_command ftrace_snapshot_cmd = {
5805 .name = "snapshot",
5806 .func = ftrace_trace_snapshot_callback,
5807 };
5808
5809 static __init int register_snapshot_cmd(void)
5810 {
5811 return register_ftrace_command(&ftrace_snapshot_cmd);
5812 }
5813 #else
5814 static inline __init int register_snapshot_cmd(void) { return 0; }
5815 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
5816
5817 struct dentry *tracing_init_dentry_tr(struct trace_array *tr)
5818 {
5819 if (tr->dir)
5820 return tr->dir;
5821
5822 if (!debugfs_initialized())
5823 return NULL;
5824
5825 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
5826 tr->dir = debugfs_create_dir("tracing", NULL);
5827
5828 if (!tr->dir)
5829 pr_warn_once("Could not create debugfs directory 'tracing'\n");
5830
5831 return tr->dir;
5832 }
5833
5834 struct dentry *tracing_init_dentry(void)
5835 {
5836 return tracing_init_dentry_tr(&global_trace);
5837 }
5838
5839 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
5840 {
5841 struct dentry *d_tracer;
5842
5843 if (tr->percpu_dir)
5844 return tr->percpu_dir;
5845
5846 d_tracer = tracing_init_dentry_tr(tr);
5847 if (!d_tracer)
5848 return NULL;
5849
5850 tr->percpu_dir = debugfs_create_dir("per_cpu", d_tracer);
5851
5852 WARN_ONCE(!tr->percpu_dir,
5853 "Could not create debugfs directory 'per_cpu/%d'\n", cpu);
5854
5855 return tr->percpu_dir;
5856 }
5857
5858 static struct dentry *
5859 trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
5860 void *data, long cpu, const struct file_operations *fops)
5861 {
5862 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
5863
5864 if (ret) /* See tracing_get_cpu() */
5865 ret->d_inode->i_cdev = (void *)(cpu + 1);
5866 return ret;
5867 }
5868
5869 static void
5870 tracing_init_debugfs_percpu(struct trace_array *tr, long cpu)
5871 {
5872 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
5873 struct dentry *d_cpu;
5874 char cpu_dir[30]; /* 30 characters should be more than enough */
5875
5876 if (!d_percpu)
5877 return;
5878
5879 snprintf(cpu_dir, 30, "cpu%ld", cpu);
5880 d_cpu = debugfs_create_dir(cpu_dir, d_percpu);
5881 if (!d_cpu) {
5882 pr_warning("Could not create debugfs '%s' entry\n", cpu_dir);
5883 return;
5884 }
5885
5886 /* per cpu trace_pipe */
5887 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
5888 tr, cpu, &tracing_pipe_fops);
5889
5890 /* per cpu trace */
5891 trace_create_cpu_file("trace", 0644, d_cpu,
5892 tr, cpu, &tracing_fops);
5893
5894 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
5895 tr, cpu, &tracing_buffers_fops);
5896
5897 trace_create_cpu_file("stats", 0444, d_cpu,
5898 tr, cpu, &tracing_stats_fops);
5899
5900 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
5901 tr, cpu, &tracing_entries_fops);
5902
5903 #ifdef CONFIG_TRACER_SNAPSHOT
5904 trace_create_cpu_file("snapshot", 0644, d_cpu,
5905 tr, cpu, &snapshot_fops);
5906
5907 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
5908 tr, cpu, &snapshot_raw_fops);
5909 #endif
5910 }
5911
5912 #ifdef CONFIG_FTRACE_SELFTEST
5913 /* Let selftest have access to static functions in this file */
5914 #include "trace_selftest.c"
5915 #endif
5916
5917 struct trace_option_dentry {
5918 struct tracer_opt *opt;
5919 struct tracer_flags *flags;
5920 struct trace_array *tr;
5921 struct dentry *entry;
5922 };
5923
5924 static ssize_t
5925 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
5926 loff_t *ppos)
5927 {
5928 struct trace_option_dentry *topt = filp->private_data;
5929 char *buf;
5930
5931 if (topt->flags->val & topt->opt->bit)
5932 buf = "1\n";
5933 else
5934 buf = "0\n";
5935
5936 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
5937 }
5938
5939 static ssize_t
5940 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
5941 loff_t *ppos)
5942 {
5943 struct trace_option_dentry *topt = filp->private_data;
5944 unsigned long val;
5945 int ret;
5946
5947 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5948 if (ret)
5949 return ret;
5950
5951 if (val != 0 && val != 1)
5952 return -EINVAL;
5953
5954 if (!!(topt->flags->val & topt->opt->bit) != val) {
5955 mutex_lock(&trace_types_lock);
5956 ret = __set_tracer_option(topt->tr, topt->flags,
5957 topt->opt, !val);
5958 mutex_unlock(&trace_types_lock);
5959 if (ret)
5960 return ret;
5961 }
5962
5963 *ppos += cnt;
5964
5965 return cnt;
5966 }
5967
5968
5969 static const struct file_operations trace_options_fops = {
5970 .open = tracing_open_generic,
5971 .read = trace_options_read,
5972 .write = trace_options_write,
5973 .llseek = generic_file_llseek,
5974 };
5975
5976 static ssize_t
5977 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
5978 loff_t *ppos)
5979 {
5980 long index = (long)filp->private_data;
5981 char *buf;
5982
5983 if (trace_flags & (1 << index))
5984 buf = "1\n";
5985 else
5986 buf = "0\n";
5987
5988 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
5989 }
5990
5991 static ssize_t
5992 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
5993 loff_t *ppos)
5994 {
5995 struct trace_array *tr = &global_trace;
5996 long index = (long)filp->private_data;
5997 unsigned long val;
5998 int ret;
5999
6000 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6001 if (ret)
6002 return ret;
6003
6004 if (val != 0 && val != 1)
6005 return -EINVAL;
6006
6007 mutex_lock(&trace_types_lock);
6008 ret = set_tracer_flag(tr, 1 << index, val);
6009 mutex_unlock(&trace_types_lock);
6010
6011 if (ret < 0)
6012 return ret;
6013
6014 *ppos += cnt;
6015
6016 return cnt;
6017 }
6018
6019 static const struct file_operations trace_options_core_fops = {
6020 .open = tracing_open_generic,
6021 .read = trace_options_core_read,
6022 .write = trace_options_core_write,
6023 .llseek = generic_file_llseek,
6024 };
6025
6026 struct dentry *trace_create_file(const char *name,
6027 umode_t mode,
6028 struct dentry *parent,
6029 void *data,
6030 const struct file_operations *fops)
6031 {
6032 struct dentry *ret;
6033
6034 ret = debugfs_create_file(name, mode, parent, data, fops);
6035 if (!ret)
6036 pr_warning("Could not create debugfs '%s' entry\n", name);
6037
6038 return ret;
6039 }
6040
6041
6042 static struct dentry *trace_options_init_dentry(struct trace_array *tr)
6043 {
6044 struct dentry *d_tracer;
6045
6046 if (tr->options)
6047 return tr->options;
6048
6049 d_tracer = tracing_init_dentry_tr(tr);
6050 if (!d_tracer)
6051 return NULL;
6052
6053 tr->options = debugfs_create_dir("options", d_tracer);
6054 if (!tr->options) {
6055 pr_warning("Could not create debugfs directory 'options'\n");
6056 return NULL;
6057 }
6058
6059 return tr->options;
6060 }
6061
6062 static void
6063 create_trace_option_file(struct trace_array *tr,
6064 struct trace_option_dentry *topt,
6065 struct tracer_flags *flags,
6066 struct tracer_opt *opt)
6067 {
6068 struct dentry *t_options;
6069
6070 t_options = trace_options_init_dentry(tr);
6071 if (!t_options)
6072 return;
6073
6074 topt->flags = flags;
6075 topt->opt = opt;
6076 topt->tr = tr;
6077
6078 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
6079 &trace_options_fops);
6080
6081 }
6082
6083 static struct trace_option_dentry *
6084 create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
6085 {
6086 struct trace_option_dentry *topts;
6087 struct tracer_flags *flags;
6088 struct tracer_opt *opts;
6089 int cnt;
6090
6091 if (!tracer)
6092 return NULL;
6093
6094 flags = tracer->flags;
6095
6096 if (!flags || !flags->opts)
6097 return NULL;
6098
6099 opts = flags->opts;
6100
6101 for (cnt = 0; opts[cnt].name; cnt++)
6102 ;
6103
6104 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
6105 if (!topts)
6106 return NULL;
6107
6108 for (cnt = 0; opts[cnt].name; cnt++)
6109 create_trace_option_file(tr, &topts[cnt], flags,
6110 &opts[cnt]);
6111
6112 return topts;
6113 }
6114
6115 static void
6116 destroy_trace_option_files(struct trace_option_dentry *topts)
6117 {
6118 int cnt;
6119
6120 if (!topts)
6121 return;
6122
6123 for (cnt = 0; topts[cnt].opt; cnt++)
6124 debugfs_remove(topts[cnt].entry);
6125
6126 kfree(topts);
6127 }
6128
6129 static struct dentry *
6130 create_trace_option_core_file(struct trace_array *tr,
6131 const char *option, long index)
6132 {
6133 struct dentry *t_options;
6134
6135 t_options = trace_options_init_dentry(tr);
6136 if (!t_options)
6137 return NULL;
6138
6139 return trace_create_file(option, 0644, t_options, (void *)index,
6140 &trace_options_core_fops);
6141 }
6142
6143 static __init void create_trace_options_dir(struct trace_array *tr)
6144 {
6145 struct dentry *t_options;
6146 int i;
6147
6148 t_options = trace_options_init_dentry(tr);
6149 if (!t_options)
6150 return;
6151
6152 for (i = 0; trace_options[i]; i++)
6153 create_trace_option_core_file(tr, trace_options[i], i);
6154 }
6155
6156 static ssize_t
6157 rb_simple_read(struct file *filp, char __user *ubuf,
6158 size_t cnt, loff_t *ppos)
6159 {
6160 struct trace_array *tr = filp->private_data;
6161 char buf[64];
6162 int r;
6163
6164 r = tracer_tracing_is_on(tr);
6165 r = sprintf(buf, "%d\n", r);
6166
6167 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6168 }
6169
6170 static ssize_t
6171 rb_simple_write(struct file *filp, const char __user *ubuf,
6172 size_t cnt, loff_t *ppos)
6173 {
6174 struct trace_array *tr = filp->private_data;
6175 struct ring_buffer *buffer = tr->trace_buffer.buffer;
6176 unsigned long val;
6177 int ret;
6178
6179 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6180 if (ret)
6181 return ret;
6182
6183 if (buffer) {
6184 mutex_lock(&trace_types_lock);
6185 if (val) {
6186 tracer_tracing_on(tr);
6187 if (tr->current_trace->start)
6188 tr->current_trace->start(tr);
6189 } else {
6190 tracer_tracing_off(tr);
6191 if (tr->current_trace->stop)
6192 tr->current_trace->stop(tr);
6193 }
6194 mutex_unlock(&trace_types_lock);
6195 }
6196
6197 (*ppos)++;
6198
6199 return cnt;
6200 }
6201
6202 static const struct file_operations rb_simple_fops = {
6203 .open = tracing_open_generic_tr,
6204 .read = rb_simple_read,
6205 .write = rb_simple_write,
6206 .release = tracing_release_generic_tr,
6207 .llseek = default_llseek,
6208 };
6209
6210 struct dentry *trace_instance_dir;
6211
6212 static void
6213 init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer);
6214
6215 static int
6216 allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
6217 {
6218 enum ring_buffer_flags rb_flags;
6219
6220 rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
6221
6222 buf->tr = tr;
6223
6224 buf->buffer = ring_buffer_alloc(size, rb_flags);
6225 if (!buf->buffer)
6226 return -ENOMEM;
6227
6228 buf->data = alloc_percpu(struct trace_array_cpu);
6229 if (!buf->data) {
6230 ring_buffer_free(buf->buffer);
6231 return -ENOMEM;
6232 }
6233
6234 /* Allocate the first page for all buffers */
6235 set_buffer_entries(&tr->trace_buffer,
6236 ring_buffer_size(tr->trace_buffer.buffer, 0));
6237
6238 return 0;
6239 }
6240
6241 static int allocate_trace_buffers(struct trace_array *tr, int size)
6242 {
6243 int ret;
6244
6245 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
6246 if (ret)
6247 return ret;
6248
6249 #ifdef CONFIG_TRACER_MAX_TRACE
6250 ret = allocate_trace_buffer(tr, &tr->max_buffer,
6251 allocate_snapshot ? size : 1);
6252 if (WARN_ON(ret)) {
6253 ring_buffer_free(tr->trace_buffer.buffer);
6254 free_percpu(tr->trace_buffer.data);
6255 return -ENOMEM;
6256 }
6257 tr->allocated_snapshot = allocate_snapshot;
6258
6259 /*
6260 * Only the top level trace array gets its snapshot allocated
6261 * from the kernel command line.
6262 */
6263 allocate_snapshot = false;
6264 #endif
6265 return 0;
6266 }
6267
6268 static void free_trace_buffer(struct trace_buffer *buf)
6269 {
6270 if (buf->buffer) {
6271 ring_buffer_free(buf->buffer);
6272 buf->buffer = NULL;
6273 free_percpu(buf->data);
6274 buf->data = NULL;
6275 }
6276 }
6277
6278 static void free_trace_buffers(struct trace_array *tr)
6279 {
6280 if (!tr)
6281 return;
6282
6283 free_trace_buffer(&tr->trace_buffer);
6284
6285 #ifdef CONFIG_TRACER_MAX_TRACE
6286 free_trace_buffer(&tr->max_buffer);
6287 #endif
6288 }
6289
6290 static int new_instance_create(const char *name)
6291 {
6292 struct trace_array *tr;
6293 int ret;
6294
6295 mutex_lock(&trace_types_lock);
6296
6297 ret = -EEXIST;
6298 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6299 if (tr->name && strcmp(tr->name, name) == 0)
6300 goto out_unlock;
6301 }
6302
6303 ret = -ENOMEM;
6304 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
6305 if (!tr)
6306 goto out_unlock;
6307
6308 tr->name = kstrdup(name, GFP_KERNEL);
6309 if (!tr->name)
6310 goto out_free_tr;
6311
6312 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
6313 goto out_free_tr;
6314
6315 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
6316
6317 raw_spin_lock_init(&tr->start_lock);
6318
6319 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6320
6321 tr->current_trace = &nop_trace;
6322
6323 INIT_LIST_HEAD(&tr->systems);
6324 INIT_LIST_HEAD(&tr->events);
6325
6326 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
6327 goto out_free_tr;
6328
6329 tr->dir = debugfs_create_dir(name, trace_instance_dir);
6330 if (!tr->dir)
6331 goto out_free_tr;
6332
6333 ret = event_trace_add_tracer(tr->dir, tr);
6334 if (ret) {
6335 debugfs_remove_recursive(tr->dir);
6336 goto out_free_tr;
6337 }
6338
6339 init_tracer_debugfs(tr, tr->dir);
6340
6341 list_add(&tr->list, &ftrace_trace_arrays);
6342
6343 mutex_unlock(&trace_types_lock);
6344
6345 return 0;
6346
6347 out_free_tr:
6348 free_trace_buffers(tr);
6349 free_cpumask_var(tr->tracing_cpumask);
6350 kfree(tr->name);
6351 kfree(tr);
6352
6353 out_unlock:
6354 mutex_unlock(&trace_types_lock);
6355
6356 return ret;
6357
6358 }
6359
6360 static int instance_delete(const char *name)
6361 {
6362 struct trace_array *tr;
6363 int found = 0;
6364 int ret;
6365
6366 mutex_lock(&trace_types_lock);
6367
6368 ret = -ENODEV;
6369 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6370 if (tr->name && strcmp(tr->name, name) == 0) {
6371 found = 1;
6372 break;
6373 }
6374 }
6375 if (!found)
6376 goto out_unlock;
6377
6378 ret = -EBUSY;
6379 if (tr->ref || (tr->current_trace && tr->current_trace->ref))
6380 goto out_unlock;
6381
6382 list_del(&tr->list);
6383
6384 tracing_set_nop(tr);
6385 event_trace_del_tracer(tr);
6386 ftrace_destroy_function_files(tr);
6387 debugfs_remove_recursive(tr->dir);
6388 free_trace_buffers(tr);
6389
6390 kfree(tr->name);
6391 kfree(tr);
6392
6393 ret = 0;
6394
6395 out_unlock:
6396 mutex_unlock(&trace_types_lock);
6397
6398 return ret;
6399 }
6400
6401 static int instance_mkdir (struct inode *inode, struct dentry *dentry, umode_t mode)
6402 {
6403 struct dentry *parent;
6404 int ret;
6405
6406 /* Paranoid: Make sure the parent is the "instances" directory */
6407 parent = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
6408 if (WARN_ON_ONCE(parent != trace_instance_dir))
6409 return -ENOENT;
6410
6411 /*
6412 * The inode mutex is locked, but debugfs_create_dir() will also
6413 * take the mutex. As the instances directory can not be destroyed
6414 * or changed in any other way, it is safe to unlock it, and
6415 * let the dentry try. If two users try to make the same dir at
6416 * the same time, then the new_instance_create() will determine the
6417 * winner.
6418 */
6419 mutex_unlock(&inode->i_mutex);
6420
6421 ret = new_instance_create(dentry->d_iname);
6422
6423 mutex_lock(&inode->i_mutex);
6424
6425 return ret;
6426 }
6427
6428 static int instance_rmdir(struct inode *inode, struct dentry *dentry)
6429 {
6430 struct dentry *parent;
6431 int ret;
6432
6433 /* Paranoid: Make sure the parent is the "instances" directory */
6434 parent = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
6435 if (WARN_ON_ONCE(parent != trace_instance_dir))
6436 return -ENOENT;
6437
6438 /* The caller did a dget() on dentry */
6439 mutex_unlock(&dentry->d_inode->i_mutex);
6440
6441 /*
6442 * The inode mutex is locked, but debugfs_create_dir() will also
6443 * take the mutex. As the instances directory can not be destroyed
6444 * or changed in any other way, it is safe to unlock it, and
6445 * let the dentry try. If two users try to make the same dir at
6446 * the same time, then the instance_delete() will determine the
6447 * winner.
6448 */
6449 mutex_unlock(&inode->i_mutex);
6450
6451 ret = instance_delete(dentry->d_iname);
6452
6453 mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT);
6454 mutex_lock(&dentry->d_inode->i_mutex);
6455
6456 return ret;
6457 }
6458
6459 static const struct inode_operations instance_dir_inode_operations = {
6460 .lookup = simple_lookup,
6461 .mkdir = instance_mkdir,
6462 .rmdir = instance_rmdir,
6463 };
6464
6465 static __init void create_trace_instances(struct dentry *d_tracer)
6466 {
6467 trace_instance_dir = debugfs_create_dir("instances", d_tracer);
6468 if (WARN_ON(!trace_instance_dir))
6469 return;
6470
6471 /* Hijack the dir inode operations, to allow mkdir */
6472 trace_instance_dir->d_inode->i_op = &instance_dir_inode_operations;
6473 }
6474
6475 static void
6476 init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer)
6477 {
6478 int cpu;
6479
6480 trace_create_file("available_tracers", 0444, d_tracer,
6481 tr, &show_traces_fops);
6482
6483 trace_create_file("current_tracer", 0644, d_tracer,
6484 tr, &set_tracer_fops);
6485
6486 trace_create_file("tracing_cpumask", 0644, d_tracer,
6487 tr, &tracing_cpumask_fops);
6488
6489 trace_create_file("trace_options", 0644, d_tracer,
6490 tr, &tracing_iter_fops);
6491
6492 trace_create_file("trace", 0644, d_tracer,
6493 tr, &tracing_fops);
6494
6495 trace_create_file("trace_pipe", 0444, d_tracer,
6496 tr, &tracing_pipe_fops);
6497
6498 trace_create_file("buffer_size_kb", 0644, d_tracer,
6499 tr, &tracing_entries_fops);
6500
6501 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
6502 tr, &tracing_total_entries_fops);
6503
6504 trace_create_file("free_buffer", 0200, d_tracer,
6505 tr, &tracing_free_buffer_fops);
6506
6507 trace_create_file("trace_marker", 0220, d_tracer,
6508 tr, &tracing_mark_fops);
6509
6510 trace_create_file("trace_clock", 0644, d_tracer, tr,
6511 &trace_clock_fops);
6512
6513 trace_create_file("tracing_on", 0644, d_tracer,
6514 tr, &rb_simple_fops);
6515
6516 #ifdef CONFIG_TRACER_MAX_TRACE
6517 trace_create_file("tracing_max_latency", 0644, d_tracer,
6518 &tr->max_latency, &tracing_max_lat_fops);
6519 #endif
6520
6521 if (ftrace_create_function_files(tr, d_tracer))
6522 WARN(1, "Could not allocate function filter files");
6523
6524 #ifdef CONFIG_TRACER_SNAPSHOT
6525 trace_create_file("snapshot", 0644, d_tracer,
6526 tr, &snapshot_fops);
6527 #endif
6528
6529 for_each_tracing_cpu(cpu)
6530 tracing_init_debugfs_percpu(tr, cpu);
6531
6532 }
6533
6534 static __init int tracer_init_debugfs(void)
6535 {
6536 struct dentry *d_tracer;
6537
6538 trace_access_lock_init();
6539
6540 d_tracer = tracing_init_dentry();
6541 if (!d_tracer)
6542 return 0;
6543
6544 init_tracer_debugfs(&global_trace, d_tracer);
6545
6546 trace_create_file("tracing_thresh", 0644, d_tracer,
6547 &global_trace, &tracing_thresh_fops);
6548
6549 trace_create_file("README", 0444, d_tracer,
6550 NULL, &tracing_readme_fops);
6551
6552 trace_create_file("saved_cmdlines", 0444, d_tracer,
6553 NULL, &tracing_saved_cmdlines_fops);
6554
6555 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
6556 NULL, &tracing_saved_cmdlines_size_fops);
6557
6558 #ifdef CONFIG_DYNAMIC_FTRACE
6559 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
6560 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
6561 #endif
6562
6563 create_trace_instances(d_tracer);
6564
6565 create_trace_options_dir(&global_trace);
6566
6567 return 0;
6568 }
6569
6570 static int trace_panic_handler(struct notifier_block *this,
6571 unsigned long event, void *unused)
6572 {
6573 if (ftrace_dump_on_oops)
6574 ftrace_dump(ftrace_dump_on_oops);
6575 return NOTIFY_OK;
6576 }
6577
6578 static struct notifier_block trace_panic_notifier = {
6579 .notifier_call = trace_panic_handler,
6580 .next = NULL,
6581 .priority = 150 /* priority: INT_MAX >= x >= 0 */
6582 };
6583
6584 static int trace_die_handler(struct notifier_block *self,
6585 unsigned long val,
6586 void *data)
6587 {
6588 switch (val) {
6589 case DIE_OOPS:
6590 if (ftrace_dump_on_oops)
6591 ftrace_dump(ftrace_dump_on_oops);
6592 break;
6593 default:
6594 break;
6595 }
6596 return NOTIFY_OK;
6597 }
6598
6599 static struct notifier_block trace_die_notifier = {
6600 .notifier_call = trace_die_handler,
6601 .priority = 200
6602 };
6603
6604 /*
6605 * printk is set to max of 1024, we really don't need it that big.
6606 * Nothing should be printing 1000 characters anyway.
6607 */
6608 #define TRACE_MAX_PRINT 1000
6609
6610 /*
6611 * Define here KERN_TRACE so that we have one place to modify
6612 * it if we decide to change what log level the ftrace dump
6613 * should be at.
6614 */
6615 #define KERN_TRACE KERN_EMERG
6616
6617 void
6618 trace_printk_seq(struct trace_seq *s)
6619 {
6620 /* Probably should print a warning here. */
6621 if (s->seq.len >= TRACE_MAX_PRINT)
6622 s->seq.len = TRACE_MAX_PRINT;
6623
6624 /*
6625 * More paranoid code. Although the buffer size is set to
6626 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
6627 * an extra layer of protection.
6628 */
6629 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
6630 s->seq.len = s->seq.size - 1;
6631
6632 /* should be zero ended, but we are paranoid. */
6633 s->buffer[s->seq.len] = 0;
6634
6635 printk(KERN_TRACE "%s", s->buffer);
6636
6637 trace_seq_init(s);
6638 }
6639
6640 void trace_init_global_iter(struct trace_iterator *iter)
6641 {
6642 iter->tr = &global_trace;
6643 iter->trace = iter->tr->current_trace;
6644 iter->cpu_file = RING_BUFFER_ALL_CPUS;
6645 iter->trace_buffer = &global_trace.trace_buffer;
6646
6647 if (iter->trace && iter->trace->open)
6648 iter->trace->open(iter);
6649
6650 /* Annotate start of buffers if we had overruns */
6651 if (ring_buffer_overruns(iter->trace_buffer->buffer))
6652 iter->iter_flags |= TRACE_FILE_ANNOTATE;
6653
6654 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
6655 if (trace_clocks[iter->tr->clock_id].in_ns)
6656 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
6657 }
6658
6659 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
6660 {
6661 /* use static because iter can be a bit big for the stack */
6662 static struct trace_iterator iter;
6663 static atomic_t dump_running;
6664 unsigned int old_userobj;
6665 unsigned long flags;
6666 int cnt = 0, cpu;
6667
6668 /* Only allow one dump user at a time. */
6669 if (atomic_inc_return(&dump_running) != 1) {
6670 atomic_dec(&dump_running);
6671 return;
6672 }
6673
6674 /*
6675 * Always turn off tracing when we dump.
6676 * We don't need to show trace output of what happens
6677 * between multiple crashes.
6678 *
6679 * If the user does a sysrq-z, then they can re-enable
6680 * tracing with echo 1 > tracing_on.
6681 */
6682 tracing_off();
6683
6684 local_irq_save(flags);
6685
6686 /* Simulate the iterator */
6687 trace_init_global_iter(&iter);
6688
6689 for_each_tracing_cpu(cpu) {
6690 atomic_inc(&per_cpu_ptr(iter.tr->trace_buffer.data, cpu)->disabled);
6691 }
6692
6693 old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
6694
6695 /* don't look at user memory in panic mode */
6696 trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
6697
6698 switch (oops_dump_mode) {
6699 case DUMP_ALL:
6700 iter.cpu_file = RING_BUFFER_ALL_CPUS;
6701 break;
6702 case DUMP_ORIG:
6703 iter.cpu_file = raw_smp_processor_id();
6704 break;
6705 case DUMP_NONE:
6706 goto out_enable;
6707 default:
6708 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
6709 iter.cpu_file = RING_BUFFER_ALL_CPUS;
6710 }
6711
6712 printk(KERN_TRACE "Dumping ftrace buffer:\n");
6713
6714 /* Did function tracer already get disabled? */
6715 if (ftrace_is_dead()) {
6716 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
6717 printk("# MAY BE MISSING FUNCTION EVENTS\n");
6718 }
6719
6720 /*
6721 * We need to stop all tracing on all CPUS to read the
6722 * the next buffer. This is a bit expensive, but is
6723 * not done often. We fill all what we can read,
6724 * and then release the locks again.
6725 */
6726
6727 while (!trace_empty(&iter)) {
6728
6729 if (!cnt)
6730 printk(KERN_TRACE "---------------------------------\n");
6731
6732 cnt++;
6733
6734 /* reset all but tr, trace, and overruns */
6735 memset(&iter.seq, 0,
6736 sizeof(struct trace_iterator) -
6737 offsetof(struct trace_iterator, seq));
6738 iter.iter_flags |= TRACE_FILE_LAT_FMT;
6739 iter.pos = -1;
6740
6741 if (trace_find_next_entry_inc(&iter) != NULL) {
6742 int ret;
6743
6744 ret = print_trace_line(&iter);
6745 if (ret != TRACE_TYPE_NO_CONSUME)
6746 trace_consume(&iter);
6747 }
6748 touch_nmi_watchdog();
6749
6750 trace_printk_seq(&iter.seq);
6751 }
6752
6753 if (!cnt)
6754 printk(KERN_TRACE " (ftrace buffer empty)\n");
6755 else
6756 printk(KERN_TRACE "---------------------------------\n");
6757
6758 out_enable:
6759 trace_flags |= old_userobj;
6760
6761 for_each_tracing_cpu(cpu) {
6762 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
6763 }
6764 atomic_dec(&dump_running);
6765 local_irq_restore(flags);
6766 }
6767 EXPORT_SYMBOL_GPL(ftrace_dump);
6768
6769 __init static int tracer_alloc_buffers(void)
6770 {
6771 int ring_buf_size;
6772 int ret = -ENOMEM;
6773
6774
6775 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
6776 goto out;
6777
6778 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
6779 goto out_free_buffer_mask;
6780
6781 /* Only allocate trace_printk buffers if a trace_printk exists */
6782 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
6783 /* Must be called before global_trace.buffer is allocated */
6784 trace_printk_init_buffers();
6785
6786 /* To save memory, keep the ring buffer size to its minimum */
6787 if (ring_buffer_expanded)
6788 ring_buf_size = trace_buf_size;
6789 else
6790 ring_buf_size = 1;
6791
6792 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
6793 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
6794
6795 raw_spin_lock_init(&global_trace.start_lock);
6796
6797 /* Used for event triggers */
6798 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
6799 if (!temp_buffer)
6800 goto out_free_cpumask;
6801
6802 if (trace_create_savedcmd() < 0)
6803 goto out_free_temp_buffer;
6804
6805 /* TODO: make the number of buffers hot pluggable with CPUS */
6806 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
6807 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
6808 WARN_ON(1);
6809 goto out_free_savedcmd;
6810 }
6811
6812 if (global_trace.buffer_disabled)
6813 tracing_off();
6814
6815 if (trace_boot_clock) {
6816 ret = tracing_set_clock(&global_trace, trace_boot_clock);
6817 if (ret < 0)
6818 pr_warning("Trace clock %s not defined, going back to default\n",
6819 trace_boot_clock);
6820 }
6821
6822 /*
6823 * register_tracer() might reference current_trace, so it
6824 * needs to be set before we register anything. This is
6825 * just a bootstrap of current_trace anyway.
6826 */
6827 global_trace.current_trace = &nop_trace;
6828
6829 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6830
6831 ftrace_init_global_array_ops(&global_trace);
6832
6833 register_tracer(&nop_trace);
6834
6835 /* All seems OK, enable tracing */
6836 tracing_disabled = 0;
6837
6838 atomic_notifier_chain_register(&panic_notifier_list,
6839 &trace_panic_notifier);
6840
6841 register_die_notifier(&trace_die_notifier);
6842
6843 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
6844
6845 INIT_LIST_HEAD(&global_trace.systems);
6846 INIT_LIST_HEAD(&global_trace.events);
6847 list_add(&global_trace.list, &ftrace_trace_arrays);
6848
6849 while (trace_boot_options) {
6850 char *option;
6851
6852 option = strsep(&trace_boot_options, ",");
6853 trace_set_options(&global_trace, option);
6854 }
6855
6856 register_snapshot_cmd();
6857
6858 return 0;
6859
6860 out_free_savedcmd:
6861 free_saved_cmdlines_buffer(savedcmd);
6862 out_free_temp_buffer:
6863 ring_buffer_free(temp_buffer);
6864 out_free_cpumask:
6865 free_cpumask_var(global_trace.tracing_cpumask);
6866 out_free_buffer_mask:
6867 free_cpumask_var(tracing_buffer_mask);
6868 out:
6869 return ret;
6870 }
6871
6872 void __init trace_init(void)
6873 {
6874 if (tracepoint_printk) {
6875 tracepoint_print_iter =
6876 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
6877 if (WARN_ON(!tracepoint_print_iter))
6878 tracepoint_printk = 0;
6879 }
6880 tracer_alloc_buffers();
6881 init_ftrace_syscalls();
6882 trace_event_init();
6883 }
6884
6885 __init static int clear_boot_tracer(void)
6886 {
6887 /*
6888 * The default tracer at boot buffer is an init section.
6889 * This function is called in lateinit. If we did not
6890 * find the boot tracer, then clear it out, to prevent
6891 * later registration from accessing the buffer that is
6892 * about to be freed.
6893 */
6894 if (!default_bootup_tracer)
6895 return 0;
6896
6897 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
6898 default_bootup_tracer);
6899 default_bootup_tracer = NULL;
6900
6901 return 0;
6902 }
6903
6904 fs_initcall(tracer_init_debugfs);
6905 late_initcall(clear_boot_tracer);
This page took 0.395232 seconds and 6 git commands to generate.