ftrace, trivial: fix typo "resgister" -> "register"
[deliverable/linux.git] / kernel / trace / trace.c
CommitLineData
bc0c38d1
SR
1/*
2 * ring buffer based function tracer
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
12 * Copyright (C) 2004 William Lee Irwin III
13 */
14#include <linux/utsrelease.h>
15#include <linux/kallsyms.h>
16#include <linux/seq_file.h>
3f5a54e3 17#include <linux/notifier.h>
bc0c38d1 18#include <linux/debugfs.h>
4c11d7ae 19#include <linux/pagemap.h>
bc0c38d1
SR
20#include <linux/hardirq.h>
21#include <linux/linkage.h>
22#include <linux/uaccess.h>
23#include <linux/ftrace.h>
24#include <linux/module.h>
25#include <linux/percpu.h>
3f5a54e3 26#include <linux/kdebug.h>
bc0c38d1
SR
27#include <linux/ctype.h>
28#include <linux/init.h>
2a2cc8f7 29#include <linux/poll.h>
bc0c38d1
SR
30#include <linux/gfp.h>
31#include <linux/fs.h>
76094a2c 32#include <linux/kprobes.h>
3eefae99 33#include <linux/writeback.h>
bc0c38d1 34
86387f7e 35#include <linux/stacktrace.h>
3928a8a2 36#include <linux/ring_buffer.h>
21798a84 37#include <linux/irqflags.h>
86387f7e 38
bc0c38d1 39#include "trace.h"
f0868d1e 40#include "trace_output.h"
bc0c38d1 41
3928a8a2
SR
42#define TRACE_BUFFER_FLAGS (RB_FL_OVERWRITE)
43
bc0c38d1
SR
44unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX;
45unsigned long __read_mostly tracing_thresh;
46
8e1b82e0
FW
47/*
48 * We need to change this state when a selftest is running.
ff32504f
FW
49 * A selftest will lurk into the ring-buffer to count the
50 * entries inserted during the selftest although some concurrent
51 * insertions into the ring-buffer such as ftrace_printk could occurred
52 * at the same time, giving false positive or negative results.
53 */
8e1b82e0 54static bool __read_mostly tracing_selftest_running;
ff32504f 55
adf9f195
FW
56/* For tracers that don't implement custom flags */
57static struct tracer_opt dummy_tracer_opt[] = {
58 { }
59};
60
61static struct tracer_flags dummy_tracer_flags = {
62 .val = 0,
63 .opts = dummy_tracer_opt
64};
65
66static int dummy_set_flag(u32 old_flags, u32 bit, int set)
67{
68 return 0;
69}
0f048701
SR
70
71/*
72 * Kill all tracing for good (never come back).
73 * It is initialized to 1 but will turn to zero if the initialization
74 * of the tracer is successful. But that is the only place that sets
75 * this back to zero.
76 */
77int tracing_disabled = 1;
78
d769041f
SR
79static DEFINE_PER_CPU(local_t, ftrace_cpu_disabled);
80
81static inline void ftrace_disable_cpu(void)
82{
83 preempt_disable();
84 local_inc(&__get_cpu_var(ftrace_cpu_disabled));
85}
86
87static inline void ftrace_enable_cpu(void)
88{
89 local_dec(&__get_cpu_var(ftrace_cpu_disabled));
90 preempt_enable();
91}
92
9e01c1b7 93static cpumask_var_t __read_mostly tracing_buffer_mask;
ab46428c
SR
94
95#define for_each_tracing_cpu(cpu) \
9e01c1b7 96 for_each_cpu(cpu, tracing_buffer_mask)
ab46428c 97
944ac425
SR
98/*
99 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
100 *
101 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
102 * is set, then ftrace_dump is called. This will output the contents
103 * of the ftrace buffers to the console. This is very useful for
104 * capturing traces that lead to crashes and outputing it to a
105 * serial console.
106 *
107 * It is default off, but you can enable it with either specifying
108 * "ftrace_dump_on_oops" in the kernel command line, or setting
109 * /proc/sys/kernel/ftrace_dump_on_oops to true.
110 */
111int ftrace_dump_on_oops;
112
d9e54076
PZ
113static int tracing_set_tracer(char *buf);
114
115static int __init set_ftrace(char *str)
116{
117 tracing_set_tracer(str);
118 return 1;
119}
120__setup("ftrace", set_ftrace);
121
944ac425
SR
122static int __init set_ftrace_dump_on_oops(char *str)
123{
124 ftrace_dump_on_oops = 1;
125 return 1;
126}
127__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
60a11774 128
72829bc3 129long
bc0c38d1
SR
130ns2usecs(cycle_t nsec)
131{
132 nsec += 500;
133 do_div(nsec, 1000);
134 return nsec;
135}
136
e309b41d 137cycle_t ftrace_now(int cpu)
750ed1a4 138{
3928a8a2
SR
139 u64 ts = ring_buffer_time_stamp(cpu);
140 ring_buffer_normalize_time_stamp(cpu, &ts);
141 return ts;
750ed1a4
IM
142}
143
4fcdae83
SR
144/*
145 * The global_trace is the descriptor that holds the tracing
146 * buffers for the live tracing. For each CPU, it contains
147 * a link list of pages that will store trace entries. The
148 * page descriptor of the pages in the memory is used to hold
149 * the link list by linking the lru item in the page descriptor
150 * to each of the pages in the buffer per CPU.
151 *
152 * For each active CPU there is a data field that holds the
153 * pages for the buffer for that CPU. Each CPU has the same number
154 * of pages allocated for its buffer.
155 */
bc0c38d1
SR
156static struct trace_array global_trace;
157
158static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu);
159
4fcdae83
SR
160/*
161 * The max_tr is used to snapshot the global_trace when a maximum
162 * latency is reached. Some tracers will use this to store a maximum
163 * trace while it continues examining live traces.
164 *
165 * The buffers for the max_tr are set up the same as the global_trace.
166 * When a snapshot is taken, the link list of the max_tr is swapped
167 * with the link list of the global_trace and the buffers are reset for
168 * the global_trace so the tracing can continue.
169 */
bc0c38d1
SR
170static struct trace_array max_tr;
171
172static DEFINE_PER_CPU(struct trace_array_cpu, max_data);
173
4fcdae83 174/* tracer_enabled is used to toggle activation of a tracer */
26994ead 175static int tracer_enabled = 1;
4fcdae83 176
9036990d
SR
177/**
178 * tracing_is_enabled - return tracer_enabled status
179 *
180 * This function is used by other tracers to know the status
181 * of the tracer_enabled flag. Tracers may use this function
182 * to know if it should enable their features when starting
183 * up. See irqsoff tracer for an example (start_irqsoff_tracer).
184 */
185int tracing_is_enabled(void)
186{
187 return tracer_enabled;
188}
189
60bc0800
SR
190/* function tracing enabled */
191int ftrace_function_enabled;
192
4fcdae83 193/*
3928a8a2
SR
194 * trace_buf_size is the size in bytes that is allocated
195 * for a buffer. Note, the number of bytes is always rounded
196 * to page size.
3f5a54e3
SR
197 *
198 * This number is purposely set to a low number of 16384.
199 * If the dump on oops happens, it will be much appreciated
200 * to not have to wait for all that output. Anyway this can be
201 * boot time and run time configurable.
4fcdae83 202 */
3928a8a2 203#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
3f5a54e3 204
3928a8a2 205static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
bc0c38d1 206
4fcdae83 207/* trace_types holds a link list of available tracers. */
bc0c38d1 208static struct tracer *trace_types __read_mostly;
4fcdae83
SR
209
210/* current_trace points to the tracer that is currently active */
bc0c38d1 211static struct tracer *current_trace __read_mostly;
4fcdae83
SR
212
213/*
214 * max_tracer_type_len is used to simplify the allocating of
215 * buffers to read userspace tracer names. We keep track of
216 * the longest tracer name registered.
217 */
bc0c38d1
SR
218static int max_tracer_type_len;
219
4fcdae83
SR
220/*
221 * trace_types_lock is used to protect the trace_types list.
222 * This lock is also used to keep user access serialized.
223 * Accesses from userspace will grab this lock while userspace
224 * activities happen inside the kernel.
225 */
bc0c38d1 226static DEFINE_MUTEX(trace_types_lock);
4fcdae83
SR
227
228/* trace_wait is a waitqueue for tasks blocked on trace_poll */
4e655519
IM
229static DECLARE_WAIT_QUEUE_HEAD(trace_wait);
230
ee6bce52 231/* trace_flags holds trace_options default values */
12ef7d44
SR
232unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
233 TRACE_ITER_ANNOTATE;
4e655519 234
4fcdae83
SR
235/**
236 * trace_wake_up - wake up tasks waiting for trace input
237 *
238 * Simply wakes up any task that is blocked on the trace_wait
239 * queue. These is used with trace_poll for tasks polling the trace.
240 */
4e655519
IM
241void trace_wake_up(void)
242{
017730c1
IM
243 /*
244 * The runqueue_is_locked() can fail, but this is the best we
245 * have for now:
246 */
247 if (!(trace_flags & TRACE_ITER_BLOCK) && !runqueue_is_locked())
4e655519
IM
248 wake_up(&trace_wait);
249}
bc0c38d1 250
3928a8a2 251static int __init set_buf_size(char *str)
bc0c38d1 252{
3928a8a2 253 unsigned long buf_size;
c6caeeb1
SR
254 int ret;
255
bc0c38d1
SR
256 if (!str)
257 return 0;
3928a8a2 258 ret = strict_strtoul(str, 0, &buf_size);
c6caeeb1 259 /* nr_entries can not be zero */
3928a8a2 260 if (ret < 0 || buf_size == 0)
c6caeeb1 261 return 0;
3928a8a2 262 trace_buf_size = buf_size;
bc0c38d1
SR
263 return 1;
264}
3928a8a2 265__setup("trace_buf_size=", set_buf_size);
bc0c38d1 266
57f50be1
SR
267unsigned long nsecs_to_usecs(unsigned long nsecs)
268{
269 return nsecs / 1000;
270}
271
4fcdae83 272/* These must match the bit postions in trace_iterator_flags */
bc0c38d1
SR
273static const char *trace_options[] = {
274 "print-parent",
275 "sym-offset",
276 "sym-addr",
277 "verbose",
f9896bf3 278 "raw",
5e3ca0ec 279 "hex",
cb0f12aa 280 "bin",
2a2cc8f7 281 "block",
86387f7e 282 "stacktrace",
4ac3ba41 283 "sched-tree",
f09ce573 284 "ftrace_printk",
b2a866f9 285 "ftrace_preempt",
9f029e83 286 "branch",
12ef7d44 287 "annotate",
02b67518 288 "userstacktrace",
b54d3de9 289 "sym-userobj",
66896a85 290 "printk-msg-only",
bc0c38d1
SR
291 NULL
292};
293
4fcdae83
SR
294/*
295 * ftrace_max_lock is used to protect the swapping of buffers
296 * when taking a max snapshot. The buffers themselves are
297 * protected by per_cpu spinlocks. But the action of the swap
298 * needs its own lock.
299 *
300 * This is defined as a raw_spinlock_t in order to help
301 * with performance when lockdep debugging is enabled.
302 */
92205c23
SR
303static raw_spinlock_t ftrace_max_lock =
304 (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
bc0c38d1
SR
305
306/*
307 * Copy the new maximum trace into the separate maximum-trace
308 * structure. (this way the maximum trace is permanently saved,
309 * for later retrieval via /debugfs/tracing/latency_trace)
310 */
e309b41d 311static void
bc0c38d1
SR
312__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
313{
314 struct trace_array_cpu *data = tr->data[cpu];
315
316 max_tr.cpu = cpu;
317 max_tr.time_start = data->preempt_timestamp;
318
319 data = max_tr.data[cpu];
320 data->saved_latency = tracing_max_latency;
321
322 memcpy(data->comm, tsk->comm, TASK_COMM_LEN);
323 data->pid = tsk->pid;
b6dff3ec 324 data->uid = task_uid(tsk);
bc0c38d1
SR
325 data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
326 data->policy = tsk->policy;
327 data->rt_priority = tsk->rt_priority;
328
329 /* record this tasks comm */
330 tracing_record_cmdline(current);
331}
332
e309b41d 333static void
214023c3
SR
334trace_seq_reset(struct trace_seq *s)
335{
336 s->len = 0;
6c6c2796
PP
337 s->readpos = 0;
338}
339
340ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
341{
342 int len;
343 int ret;
344
345 if (s->len <= s->readpos)
346 return -EBUSY;
347
348 len = s->len - s->readpos;
349 if (cnt > len)
350 cnt = len;
351 ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
352 if (ret)
353 return -EFAULT;
354
355 s->readpos += len;
356 return cnt;
214023c3
SR
357}
358
e309b41d 359static void
214023c3
SR
360trace_print_seq(struct seq_file *m, struct trace_seq *s)
361{
362 int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len;
363
364 s->buffer[len] = 0;
365 seq_puts(m, s->buffer);
366
367 trace_seq_reset(s);
368}
369
4fcdae83
SR
370/**
371 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
372 * @tr: tracer
373 * @tsk: the task with the latency
374 * @cpu: The cpu that initiated the trace.
375 *
376 * Flip the buffers between the @tr and the max_tr and record information
377 * about which task was the cause of this latency.
378 */
e309b41d 379void
bc0c38d1
SR
380update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
381{
3928a8a2 382 struct ring_buffer *buf = tr->buffer;
bc0c38d1 383
4c11d7ae 384 WARN_ON_ONCE(!irqs_disabled());
92205c23 385 __raw_spin_lock(&ftrace_max_lock);
3928a8a2
SR
386
387 tr->buffer = max_tr.buffer;
388 max_tr.buffer = buf;
389
d769041f 390 ftrace_disable_cpu();
3928a8a2 391 ring_buffer_reset(tr->buffer);
d769041f 392 ftrace_enable_cpu();
bc0c38d1
SR
393
394 __update_max_tr(tr, tsk, cpu);
92205c23 395 __raw_spin_unlock(&ftrace_max_lock);
bc0c38d1
SR
396}
397
398/**
399 * update_max_tr_single - only copy one trace over, and reset the rest
400 * @tr - tracer
401 * @tsk - task with the latency
402 * @cpu - the cpu of the buffer to copy.
4fcdae83
SR
403 *
404 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
bc0c38d1 405 */
e309b41d 406void
bc0c38d1
SR
407update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
408{
3928a8a2 409 int ret;
bc0c38d1 410
4c11d7ae 411 WARN_ON_ONCE(!irqs_disabled());
92205c23 412 __raw_spin_lock(&ftrace_max_lock);
bc0c38d1 413
d769041f
SR
414 ftrace_disable_cpu();
415
3928a8a2
SR
416 ring_buffer_reset(max_tr.buffer);
417 ret = ring_buffer_swap_cpu(max_tr.buffer, tr->buffer, cpu);
418
d769041f
SR
419 ftrace_enable_cpu();
420
3928a8a2 421 WARN_ON_ONCE(ret);
bc0c38d1
SR
422
423 __update_max_tr(tr, tsk, cpu);
92205c23 424 __raw_spin_unlock(&ftrace_max_lock);
bc0c38d1
SR
425}
426
4fcdae83
SR
427/**
428 * register_tracer - register a tracer with the ftrace system.
429 * @type - the plugin for the tracer
430 *
431 * Register a new plugin tracer.
432 */
bc0c38d1
SR
433int register_tracer(struct tracer *type)
434{
435 struct tracer *t;
436 int len;
437 int ret = 0;
438
439 if (!type->name) {
440 pr_info("Tracer must have a name\n");
441 return -1;
442 }
443
86fa2f60
IM
444 /*
445 * When this gets called we hold the BKL which means that
446 * preemption is disabled. Various trace selftests however
447 * need to disable and enable preemption for successful tests.
448 * So we drop the BKL here and grab it after the tests again.
449 */
450 unlock_kernel();
bc0c38d1 451 mutex_lock(&trace_types_lock);
86fa2f60 452
8e1b82e0
FW
453 tracing_selftest_running = true;
454
bc0c38d1
SR
455 for (t = trace_types; t; t = t->next) {
456 if (strcmp(type->name, t->name) == 0) {
457 /* already found */
458 pr_info("Trace %s already registered\n",
459 type->name);
460 ret = -1;
461 goto out;
462 }
463 }
464
adf9f195
FW
465 if (!type->set_flag)
466 type->set_flag = &dummy_set_flag;
467 if (!type->flags)
468 type->flags = &dummy_tracer_flags;
469 else
470 if (!type->flags->opts)
471 type->flags->opts = dummy_tracer_opt;
472
60a11774
SR
473#ifdef CONFIG_FTRACE_STARTUP_TEST
474 if (type->selftest) {
475 struct tracer *saved_tracer = current_trace;
60a11774 476 struct trace_array *tr = &global_trace;
60a11774 477 int i;
ff32504f 478
60a11774
SR
479 /*
480 * Run a selftest on this tracer.
481 * Here we reset the trace buffer, and set the current
482 * tracer to be this tracer. The tracer can then run some
483 * internal tracing to verify that everything is in order.
484 * If we fail, we do not register this tracer.
485 */
86fa2f60 486 for_each_tracing_cpu(i)
3928a8a2 487 tracing_reset(tr, i);
86fa2f60 488
60a11774 489 current_trace = type;
60a11774
SR
490 /* the test is responsible for initializing and enabling */
491 pr_info("Testing tracer %s: ", type->name);
492 ret = type->selftest(type, tr);
493 /* the test is responsible for resetting too */
494 current_trace = saved_tracer;
60a11774
SR
495 if (ret) {
496 printk(KERN_CONT "FAILED!\n");
497 goto out;
498 }
1d4db00a 499 /* Only reset on passing, to avoid touching corrupted buffers */
86fa2f60 500 for_each_tracing_cpu(i)
3928a8a2 501 tracing_reset(tr, i);
86fa2f60 502
60a11774
SR
503 printk(KERN_CONT "PASSED\n");
504 }
505#endif
506
bc0c38d1
SR
507 type->next = trace_types;
508 trace_types = type;
509 len = strlen(type->name);
510 if (len > max_tracer_type_len)
511 max_tracer_type_len = len;
60a11774 512
bc0c38d1 513 out:
8e1b82e0 514 tracing_selftest_running = false;
bc0c38d1 515 mutex_unlock(&trace_types_lock);
86fa2f60 516 lock_kernel();
bc0c38d1
SR
517
518 return ret;
519}
520
521void unregister_tracer(struct tracer *type)
522{
523 struct tracer **t;
524 int len;
525
526 mutex_lock(&trace_types_lock);
527 for (t = &trace_types; *t; t = &(*t)->next) {
528 if (*t == type)
529 goto found;
530 }
531 pr_info("Trace %s not registered\n", type->name);
532 goto out;
533
534 found:
535 *t = (*t)->next;
536 if (strlen(type->name) != max_tracer_type_len)
537 goto out;
538
539 max_tracer_type_len = 0;
540 for (t = &trace_types; *t; t = &(*t)->next) {
541 len = strlen((*t)->name);
542 if (len > max_tracer_type_len)
543 max_tracer_type_len = len;
544 }
545 out:
546 mutex_unlock(&trace_types_lock);
547}
548
3928a8a2 549void tracing_reset(struct trace_array *tr, int cpu)
bc0c38d1 550{
d769041f 551 ftrace_disable_cpu();
3928a8a2 552 ring_buffer_reset_cpu(tr->buffer, cpu);
d769041f 553 ftrace_enable_cpu();
bc0c38d1
SR
554}
555
213cc060
PE
556void tracing_reset_online_cpus(struct trace_array *tr)
557{
558 int cpu;
559
560 tr->time_start = ftrace_now(tr->cpu);
561
562 for_each_online_cpu(cpu)
563 tracing_reset(tr, cpu);
564}
565
bc0c38d1
SR
566#define SAVED_CMDLINES 128
567static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
568static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
569static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
570static int cmdline_idx;
571static DEFINE_SPINLOCK(trace_cmdline_lock);
25b0b44a 572
25b0b44a
SR
573/* temporary disable recording */
574atomic_t trace_record_cmdline_disabled __read_mostly;
bc0c38d1
SR
575
576static void trace_init_cmdlines(void)
577{
578 memset(&map_pid_to_cmdline, -1, sizeof(map_pid_to_cmdline));
579 memset(&map_cmdline_to_pid, -1, sizeof(map_cmdline_to_pid));
580 cmdline_idx = 0;
581}
582
0f048701
SR
583static int trace_stop_count;
584static DEFINE_SPINLOCK(tracing_start_lock);
585
69bb54ec
SR
586/**
587 * ftrace_off_permanent - disable all ftrace code permanently
588 *
589 * This should only be called when a serious anomally has
590 * been detected. This will turn off the function tracing,
591 * ring buffers, and other tracing utilites. It takes no
592 * locks and can be called from any context.
593 */
594void ftrace_off_permanent(void)
595{
596 tracing_disabled = 1;
597 ftrace_stop();
598 tracing_off_permanent();
599}
600
0f048701
SR
601/**
602 * tracing_start - quick start of the tracer
603 *
604 * If tracing is enabled but was stopped by tracing_stop,
605 * this will start the tracer back up.
606 */
607void tracing_start(void)
608{
609 struct ring_buffer *buffer;
610 unsigned long flags;
611
612 if (tracing_disabled)
613 return;
614
615 spin_lock_irqsave(&tracing_start_lock, flags);
616 if (--trace_stop_count)
617 goto out;
618
619 if (trace_stop_count < 0) {
620 /* Someone screwed up their debugging */
621 WARN_ON_ONCE(1);
622 trace_stop_count = 0;
623 goto out;
624 }
625
626
627 buffer = global_trace.buffer;
628 if (buffer)
629 ring_buffer_record_enable(buffer);
630
631 buffer = max_tr.buffer;
632 if (buffer)
633 ring_buffer_record_enable(buffer);
634
635 ftrace_start();
636 out:
637 spin_unlock_irqrestore(&tracing_start_lock, flags);
638}
639
640/**
641 * tracing_stop - quick stop of the tracer
642 *
643 * Light weight way to stop tracing. Use in conjunction with
644 * tracing_start.
645 */
646void tracing_stop(void)
647{
648 struct ring_buffer *buffer;
649 unsigned long flags;
650
651 ftrace_stop();
652 spin_lock_irqsave(&tracing_start_lock, flags);
653 if (trace_stop_count++)
654 goto out;
655
656 buffer = global_trace.buffer;
657 if (buffer)
658 ring_buffer_record_disable(buffer);
659
660 buffer = max_tr.buffer;
661 if (buffer)
662 ring_buffer_record_disable(buffer);
663
664 out:
665 spin_unlock_irqrestore(&tracing_start_lock, flags);
666}
667
e309b41d 668void trace_stop_cmdline_recording(void);
bc0c38d1 669
e309b41d 670static void trace_save_cmdline(struct task_struct *tsk)
bc0c38d1
SR
671{
672 unsigned map;
673 unsigned idx;
674
675 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
676 return;
677
678 /*
679 * It's not the end of the world if we don't get
680 * the lock, but we also don't want to spin
681 * nor do we want to disable interrupts,
682 * so if we miss here, then better luck next time.
683 */
684 if (!spin_trylock(&trace_cmdline_lock))
685 return;
686
687 idx = map_pid_to_cmdline[tsk->pid];
688 if (idx >= SAVED_CMDLINES) {
689 idx = (cmdline_idx + 1) % SAVED_CMDLINES;
690
691 map = map_cmdline_to_pid[idx];
692 if (map <= PID_MAX_DEFAULT)
693 map_pid_to_cmdline[map] = (unsigned)-1;
694
695 map_pid_to_cmdline[tsk->pid] = idx;
696
697 cmdline_idx = idx;
698 }
699
700 memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
701
702 spin_unlock(&trace_cmdline_lock);
703}
704
660c7f9b 705char *trace_find_cmdline(int pid)
bc0c38d1
SR
706{
707 char *cmdline = "<...>";
708 unsigned map;
709
710 if (!pid)
711 return "<idle>";
712
713 if (pid > PID_MAX_DEFAULT)
714 goto out;
715
716 map = map_pid_to_cmdline[pid];
717 if (map >= SAVED_CMDLINES)
718 goto out;
719
720 cmdline = saved_cmdlines[map];
721
722 out:
723 return cmdline;
724}
725
e309b41d 726void tracing_record_cmdline(struct task_struct *tsk)
bc0c38d1
SR
727{
728 if (atomic_read(&trace_record_cmdline_disabled))
729 return;
730
731 trace_save_cmdline(tsk);
732}
733
45dcd8b8 734void
38697053
SR
735tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
736 int pc)
bc0c38d1
SR
737{
738 struct task_struct *tsk = current;
bc0c38d1 739
777e208d
SR
740 entry->preempt_count = pc & 0xff;
741 entry->pid = (tsk) ? tsk->pid : 0;
b54d3de9 742 entry->tgid = (tsk) ? tsk->tgid : 0;
777e208d 743 entry->flags =
9244489a 744#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
2e2ca155 745 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
9244489a
SR
746#else
747 TRACE_FLAG_IRQS_NOSUPPORT |
748#endif
bc0c38d1
SR
749 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
750 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
751 (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
752}
753
e309b41d 754void
6fb44b71 755trace_function(struct trace_array *tr, struct trace_array_cpu *data,
38697053
SR
756 unsigned long ip, unsigned long parent_ip, unsigned long flags,
757 int pc)
bc0c38d1 758{
3928a8a2 759 struct ring_buffer_event *event;
777e208d 760 struct ftrace_entry *entry;
dcb6308f 761 unsigned long irq_flags;
bc0c38d1 762
d769041f
SR
763 /* If we are reading the ring buffer, don't trace */
764 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
765 return;
766
3928a8a2
SR
767 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
768 &irq_flags);
769 if (!event)
770 return;
771 entry = ring_buffer_event_data(event);
38697053 772 tracing_generic_entry_update(&entry->ent, flags, pc);
777e208d
SR
773 entry->ent.type = TRACE_FN;
774 entry->ip = ip;
775 entry->parent_ip = parent_ip;
3928a8a2 776 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
bc0c38d1
SR
777}
778
fb52607a 779#ifdef CONFIG_FUNCTION_GRAPH_TRACER
287b6e68
FW
780static void __trace_graph_entry(struct trace_array *tr,
781 struct trace_array_cpu *data,
782 struct ftrace_graph_ent *trace,
783 unsigned long flags,
784 int pc)
785{
786 struct ring_buffer_event *event;
787 struct ftrace_graph_ent_entry *entry;
788 unsigned long irq_flags;
789
790 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
791 return;
792
793 event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry),
794 &irq_flags);
795 if (!event)
796 return;
797 entry = ring_buffer_event_data(event);
798 tracing_generic_entry_update(&entry->ent, flags, pc);
799 entry->ent.type = TRACE_GRAPH_ENT;
800 entry->graph_ent = *trace;
801 ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags);
802}
803
804static void __trace_graph_return(struct trace_array *tr,
15e6cb36 805 struct trace_array_cpu *data,
fb52607a 806 struct ftrace_graph_ret *trace,
15e6cb36
FW
807 unsigned long flags,
808 int pc)
809{
810 struct ring_buffer_event *event;
287b6e68 811 struct ftrace_graph_ret_entry *entry;
15e6cb36
FW
812 unsigned long irq_flags;
813
814 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
815 return;
816
817 event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry),
818 &irq_flags);
819 if (!event)
820 return;
821 entry = ring_buffer_event_data(event);
822 tracing_generic_entry_update(&entry->ent, flags, pc);
287b6e68
FW
823 entry->ent.type = TRACE_GRAPH_RET;
824 entry->ret = *trace;
15e6cb36
FW
825 ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags);
826}
827#endif
828
e309b41d 829void
2e0f5761 830ftrace(struct trace_array *tr, struct trace_array_cpu *data,
38697053
SR
831 unsigned long ip, unsigned long parent_ip, unsigned long flags,
832 int pc)
2e0f5761
IM
833{
834 if (likely(!atomic_read(&data->disabled)))
38697053 835 trace_function(tr, data, ip, parent_ip, flags, pc);
2e0f5761
IM
836}
837
38697053
SR
838static void ftrace_trace_stack(struct trace_array *tr,
839 struct trace_array_cpu *data,
840 unsigned long flags,
841 int skip, int pc)
86387f7e 842{
c2c80529 843#ifdef CONFIG_STACKTRACE
3928a8a2 844 struct ring_buffer_event *event;
777e208d 845 struct stack_entry *entry;
86387f7e 846 struct stack_trace trace;
3928a8a2 847 unsigned long irq_flags;
86387f7e
IM
848
849 if (!(trace_flags & TRACE_ITER_STACKTRACE))
850 return;
851
3928a8a2
SR
852 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
853 &irq_flags);
854 if (!event)
855 return;
856 entry = ring_buffer_event_data(event);
38697053 857 tracing_generic_entry_update(&entry->ent, flags, pc);
777e208d 858 entry->ent.type = TRACE_STACK;
86387f7e 859
777e208d 860 memset(&entry->caller, 0, sizeof(entry->caller));
86387f7e
IM
861
862 trace.nr_entries = 0;
863 trace.max_entries = FTRACE_STACK_ENTRIES;
864 trace.skip = skip;
777e208d 865 trace.entries = entry->caller;
86387f7e
IM
866
867 save_stack_trace(&trace);
3928a8a2 868 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
c2c80529 869#endif
f0a920d5
IM
870}
871
38697053
SR
872void __trace_stack(struct trace_array *tr,
873 struct trace_array_cpu *data,
874 unsigned long flags,
875 int skip)
876{
877 ftrace_trace_stack(tr, data, flags, skip, preempt_count());
878}
879
02b67518
TE
880static void ftrace_trace_userstack(struct trace_array *tr,
881 struct trace_array_cpu *data,
882 unsigned long flags, int pc)
883{
c7425acb 884#ifdef CONFIG_STACKTRACE
8d7c6a96 885 struct ring_buffer_event *event;
02b67518
TE
886 struct userstack_entry *entry;
887 struct stack_trace trace;
02b67518
TE
888 unsigned long irq_flags;
889
890 if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
891 return;
892
893 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
894 &irq_flags);
895 if (!event)
896 return;
897 entry = ring_buffer_event_data(event);
898 tracing_generic_entry_update(&entry->ent, flags, pc);
899 entry->ent.type = TRACE_USER_STACK;
900
901 memset(&entry->caller, 0, sizeof(entry->caller));
902
903 trace.nr_entries = 0;
904 trace.max_entries = FTRACE_STACK_ENTRIES;
905 trace.skip = 0;
906 trace.entries = entry->caller;
907
908 save_stack_trace_user(&trace);
909 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
c7425acb 910#endif
02b67518
TE
911}
912
913void __trace_userstack(struct trace_array *tr,
914 struct trace_array_cpu *data,
915 unsigned long flags)
916{
917 ftrace_trace_userstack(tr, data, flags, preempt_count());
918}
919
38697053
SR
920static void
921ftrace_trace_special(void *__tr, void *__data,
922 unsigned long arg1, unsigned long arg2, unsigned long arg3,
923 int pc)
a4feb834 924{
3928a8a2 925 struct ring_buffer_event *event;
a4feb834
IM
926 struct trace_array_cpu *data = __data;
927 struct trace_array *tr = __tr;
777e208d 928 struct special_entry *entry;
a4feb834
IM
929 unsigned long irq_flags;
930
3928a8a2
SR
931 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
932 &irq_flags);
933 if (!event)
934 return;
935 entry = ring_buffer_event_data(event);
38697053 936 tracing_generic_entry_update(&entry->ent, 0, pc);
777e208d
SR
937 entry->ent.type = TRACE_SPECIAL;
938 entry->arg1 = arg1;
939 entry->arg2 = arg2;
940 entry->arg3 = arg3;
3928a8a2 941 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
38697053 942 ftrace_trace_stack(tr, data, irq_flags, 4, pc);
02b67518 943 ftrace_trace_userstack(tr, data, irq_flags, pc);
a4feb834
IM
944
945 trace_wake_up();
946}
947
38697053
SR
948void
949__trace_special(void *__tr, void *__data,
950 unsigned long arg1, unsigned long arg2, unsigned long arg3)
951{
952 ftrace_trace_special(__tr, __data, arg1, arg2, arg3, preempt_count());
953}
954
e309b41d 955void
bc0c38d1
SR
956tracing_sched_switch_trace(struct trace_array *tr,
957 struct trace_array_cpu *data,
86387f7e
IM
958 struct task_struct *prev,
959 struct task_struct *next,
38697053 960 unsigned long flags, int pc)
bc0c38d1 961{
3928a8a2 962 struct ring_buffer_event *event;
777e208d 963 struct ctx_switch_entry *entry;
dcb6308f 964 unsigned long irq_flags;
bc0c38d1 965
3928a8a2
SR
966 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
967 &irq_flags);
968 if (!event)
969 return;
970 entry = ring_buffer_event_data(event);
38697053 971 tracing_generic_entry_update(&entry->ent, flags, pc);
777e208d
SR
972 entry->ent.type = TRACE_CTX;
973 entry->prev_pid = prev->pid;
974 entry->prev_prio = prev->prio;
975 entry->prev_state = prev->state;
976 entry->next_pid = next->pid;
977 entry->next_prio = next->prio;
978 entry->next_state = next->state;
979 entry->next_cpu = task_cpu(next);
3928a8a2 980 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
38697053 981 ftrace_trace_stack(tr, data, flags, 5, pc);
02b67518 982 ftrace_trace_userstack(tr, data, flags, pc);
bc0c38d1
SR
983}
984
57422797
IM
985void
986tracing_sched_wakeup_trace(struct trace_array *tr,
987 struct trace_array_cpu *data,
86387f7e
IM
988 struct task_struct *wakee,
989 struct task_struct *curr,
38697053 990 unsigned long flags, int pc)
57422797 991{
3928a8a2 992 struct ring_buffer_event *event;
777e208d 993 struct ctx_switch_entry *entry;
57422797
IM
994 unsigned long irq_flags;
995
3928a8a2
SR
996 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
997 &irq_flags);
998 if (!event)
999 return;
1000 entry = ring_buffer_event_data(event);
38697053 1001 tracing_generic_entry_update(&entry->ent, flags, pc);
777e208d
SR
1002 entry->ent.type = TRACE_WAKE;
1003 entry->prev_pid = curr->pid;
1004 entry->prev_prio = curr->prio;
1005 entry->prev_state = curr->state;
1006 entry->next_pid = wakee->pid;
1007 entry->next_prio = wakee->prio;
1008 entry->next_state = wakee->state;
1009 entry->next_cpu = task_cpu(wakee);
3928a8a2 1010 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
38697053 1011 ftrace_trace_stack(tr, data, flags, 6, pc);
02b67518 1012 ftrace_trace_userstack(tr, data, flags, pc);
017730c1
IM
1013
1014 trace_wake_up();
57422797
IM
1015}
1016
4902f884
SR
1017void
1018ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
1019{
1020 struct trace_array *tr = &global_trace;
1021 struct trace_array_cpu *data;
5aa1ba6a 1022 unsigned long flags;
4902f884 1023 int cpu;
38697053 1024 int pc;
4902f884 1025
c76f0694 1026 if (tracing_disabled)
4902f884
SR
1027 return;
1028
38697053 1029 pc = preempt_count();
5aa1ba6a 1030 local_irq_save(flags);
4902f884
SR
1031 cpu = raw_smp_processor_id();
1032 data = tr->data[cpu];
4902f884 1033
5aa1ba6a 1034 if (likely(atomic_inc_return(&data->disabled) == 1))
38697053 1035 ftrace_trace_special(tr, data, arg1, arg2, arg3, pc);
4902f884 1036
5aa1ba6a
SR
1037 atomic_dec(&data->disabled);
1038 local_irq_restore(flags);
4902f884
SR
1039}
1040
606576ce 1041#ifdef CONFIG_FUNCTION_TRACER
e309b41d 1042static void
b2a866f9 1043function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
2e0f5761
IM
1044{
1045 struct trace_array *tr = &global_trace;
1046 struct trace_array_cpu *data;
1047 unsigned long flags;
1048 long disabled;
38697053
SR
1049 int cpu, resched;
1050 int pc;
2e0f5761 1051
60bc0800 1052 if (unlikely(!ftrace_function_enabled))
2e0f5761
IM
1053 return;
1054
38697053 1055 pc = preempt_count();
182e9f5f 1056 resched = ftrace_preempt_disable();
38697053 1057 local_save_flags(flags);
2e0f5761
IM
1058 cpu = raw_smp_processor_id();
1059 data = tr->data[cpu];
1060 disabled = atomic_inc_return(&data->disabled);
1061
1062 if (likely(disabled == 1))
38697053 1063 trace_function(tr, data, ip, parent_ip, flags, pc);
2e0f5761
IM
1064
1065 atomic_dec(&data->disabled);
182e9f5f 1066 ftrace_preempt_enable(resched);
2e0f5761
IM
1067}
1068
b2a866f9
SR
1069static void
1070function_trace_call(unsigned long ip, unsigned long parent_ip)
1071{
1072 struct trace_array *tr = &global_trace;
1073 struct trace_array_cpu *data;
1074 unsigned long flags;
1075 long disabled;
1076 int cpu;
1077 int pc;
1078
1079 if (unlikely(!ftrace_function_enabled))
1080 return;
1081
1082 /*
1083 * Need to use raw, since this must be called before the
1084 * recursive protection is performed.
1085 */
d51ad7ac 1086 local_irq_save(flags);
b2a866f9
SR
1087 cpu = raw_smp_processor_id();
1088 data = tr->data[cpu];
1089 disabled = atomic_inc_return(&data->disabled);
1090
1091 if (likely(disabled == 1)) {
1092 pc = preempt_count();
1093 trace_function(tr, data, ip, parent_ip, flags, pc);
1094 }
1095
1096 atomic_dec(&data->disabled);
d51ad7ac 1097 local_irq_restore(flags);
b2a866f9
SR
1098}
1099
fb52607a 1100#ifdef CONFIG_FUNCTION_GRAPH_TRACER
e49dc19c 1101int trace_graph_entry(struct ftrace_graph_ent *trace)
15e6cb36
FW
1102{
1103 struct trace_array *tr = &global_trace;
1104 struct trace_array_cpu *data;
1105 unsigned long flags;
1106 long disabled;
1107 int cpu;
1108 int pc;
1109
804a6851
SR
1110 if (!ftrace_trace_task(current))
1111 return 0;
1112
ea4e2bc4
SR
1113 if (!ftrace_graph_addr(trace->func))
1114 return 0;
1115
a5e25883 1116 local_irq_save(flags);
15e6cb36
FW
1117 cpu = raw_smp_processor_id();
1118 data = tr->data[cpu];
1119 disabled = atomic_inc_return(&data->disabled);
1120 if (likely(disabled == 1)) {
1121 pc = preempt_count();
287b6e68
FW
1122 __trace_graph_entry(tr, data, trace, flags, pc);
1123 }
ea4e2bc4
SR
1124 /* Only do the atomic if it is not already set */
1125 if (!test_tsk_trace_graph(current))
1126 set_tsk_trace_graph(current);
287b6e68 1127 atomic_dec(&data->disabled);
a5e25883 1128 local_irq_restore(flags);
e49dc19c
SR
1129
1130 return 1;
287b6e68
FW
1131}
1132
1133void trace_graph_return(struct ftrace_graph_ret *trace)
1134{
1135 struct trace_array *tr = &global_trace;
1136 struct trace_array_cpu *data;
1137 unsigned long flags;
1138 long disabled;
1139 int cpu;
1140 int pc;
1141
a5e25883 1142 local_irq_save(flags);
287b6e68
FW
1143 cpu = raw_smp_processor_id();
1144 data = tr->data[cpu];
1145 disabled = atomic_inc_return(&data->disabled);
1146 if (likely(disabled == 1)) {
1147 pc = preempt_count();
1148 __trace_graph_return(tr, data, trace, flags, pc);
15e6cb36 1149 }
ea4e2bc4
SR
1150 if (!trace->depth)
1151 clear_tsk_trace_graph(current);
15e6cb36 1152 atomic_dec(&data->disabled);
a5e25883 1153 local_irq_restore(flags);
15e6cb36 1154}
fb52607a 1155#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
15e6cb36 1156
2e0f5761
IM
1157static struct ftrace_ops trace_ops __read_mostly =
1158{
1159 .func = function_trace_call,
1160};
1161
e309b41d 1162void tracing_start_function_trace(void)
2e0f5761 1163{
60bc0800 1164 ftrace_function_enabled = 0;
b2a866f9
SR
1165
1166 if (trace_flags & TRACE_ITER_PREEMPTONLY)
1167 trace_ops.func = function_trace_call_preempt_only;
1168 else
1169 trace_ops.func = function_trace_call;
1170
2e0f5761 1171 register_ftrace_function(&trace_ops);
9036990d 1172 ftrace_function_enabled = 1;
2e0f5761
IM
1173}
1174
e309b41d 1175void tracing_stop_function_trace(void)
2e0f5761 1176{
60bc0800 1177 ftrace_function_enabled = 0;
2e0f5761
IM
1178 unregister_ftrace_function(&trace_ops);
1179}
1180#endif
1181
bc0c38d1
SR
1182enum trace_file_type {
1183 TRACE_FILE_LAT_FMT = 1,
12ef7d44 1184 TRACE_FILE_ANNOTATE = 2,
bc0c38d1
SR
1185};
1186
e2ac8ef5 1187static void trace_iterator_increment(struct trace_iterator *iter)
5a90f577 1188{
d769041f
SR
1189 /* Don't allow ftrace to trace into the ring buffers */
1190 ftrace_disable_cpu();
1191
5a90f577 1192 iter->idx++;
d769041f
SR
1193 if (iter->buffer_iter[iter->cpu])
1194 ring_buffer_read(iter->buffer_iter[iter->cpu], NULL);
1195
1196 ftrace_enable_cpu();
5a90f577
SR
1197}
1198
e309b41d 1199static struct trace_entry *
3928a8a2 1200peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts)
dd0e545f 1201{
3928a8a2
SR
1202 struct ring_buffer_event *event;
1203 struct ring_buffer_iter *buf_iter = iter->buffer_iter[cpu];
dd0e545f 1204
d769041f
SR
1205 /* Don't allow ftrace to trace into the ring buffers */
1206 ftrace_disable_cpu();
1207
1208 if (buf_iter)
1209 event = ring_buffer_iter_peek(buf_iter, ts);
1210 else
1211 event = ring_buffer_peek(iter->tr->buffer, cpu, ts);
1212
1213 ftrace_enable_cpu();
1214
3928a8a2 1215 return event ? ring_buffer_event_data(event) : NULL;
dd0e545f 1216}
d769041f 1217
dd0e545f 1218static struct trace_entry *
3928a8a2 1219__find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts)
bc0c38d1 1220{
3928a8a2 1221 struct ring_buffer *buffer = iter->tr->buffer;
bc0c38d1 1222 struct trace_entry *ent, *next = NULL;
3928a8a2 1223 u64 next_ts = 0, ts;
bc0c38d1
SR
1224 int next_cpu = -1;
1225 int cpu;
1226
ab46428c 1227 for_each_tracing_cpu(cpu) {
dd0e545f 1228
3928a8a2
SR
1229 if (ring_buffer_empty_cpu(buffer, cpu))
1230 continue;
dd0e545f 1231
3928a8a2 1232 ent = peek_next_entry(iter, cpu, &ts);
dd0e545f 1233
cdd31cd2
IM
1234 /*
1235 * Pick the entry with the smallest timestamp:
1236 */
3928a8a2 1237 if (ent && (!next || ts < next_ts)) {
bc0c38d1
SR
1238 next = ent;
1239 next_cpu = cpu;
3928a8a2 1240 next_ts = ts;
bc0c38d1
SR
1241 }
1242 }
1243
1244 if (ent_cpu)
1245 *ent_cpu = next_cpu;
1246
3928a8a2
SR
1247 if (ent_ts)
1248 *ent_ts = next_ts;
1249
bc0c38d1
SR
1250 return next;
1251}
1252
dd0e545f
SR
1253/* Find the next real entry, without updating the iterator itself */
1254static struct trace_entry *
3928a8a2 1255find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts)
bc0c38d1 1256{
3928a8a2 1257 return __find_next_entry(iter, ent_cpu, ent_ts);
dd0e545f
SR
1258}
1259
1260/* Find the next real entry, and increment the iterator to the next entry */
1261static void *find_next_entry_inc(struct trace_iterator *iter)
1262{
3928a8a2 1263 iter->ent = __find_next_entry(iter, &iter->cpu, &iter->ts);
dd0e545f 1264
3928a8a2 1265 if (iter->ent)
e2ac8ef5 1266 trace_iterator_increment(iter);
dd0e545f 1267
3928a8a2 1268 return iter->ent ? iter : NULL;
b3806b43 1269}
bc0c38d1 1270
e309b41d 1271static void trace_consume(struct trace_iterator *iter)
b3806b43 1272{
d769041f
SR
1273 /* Don't allow ftrace to trace into the ring buffers */
1274 ftrace_disable_cpu();
3928a8a2 1275 ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts);
d769041f 1276 ftrace_enable_cpu();
bc0c38d1
SR
1277}
1278
e309b41d 1279static void *s_next(struct seq_file *m, void *v, loff_t *pos)
bc0c38d1
SR
1280{
1281 struct trace_iterator *iter = m->private;
bc0c38d1 1282 int i = (int)*pos;
4e3c3333 1283 void *ent;
bc0c38d1
SR
1284
1285 (*pos)++;
1286
1287 /* can't go backwards */
1288 if (iter->idx > i)
1289 return NULL;
1290
1291 if (iter->idx < 0)
1292 ent = find_next_entry_inc(iter);
1293 else
1294 ent = iter;
1295
1296 while (ent && iter->idx < i)
1297 ent = find_next_entry_inc(iter);
1298
1299 iter->pos = *pos;
1300
bc0c38d1
SR
1301 return ent;
1302}
1303
1304static void *s_start(struct seq_file *m, loff_t *pos)
1305{
1306 struct trace_iterator *iter = m->private;
1307 void *p = NULL;
1308 loff_t l = 0;
3928a8a2 1309 int cpu;
bc0c38d1
SR
1310
1311 mutex_lock(&trace_types_lock);
1312
d15f57f2
SR
1313 if (!current_trace || current_trace != iter->trace) {
1314 mutex_unlock(&trace_types_lock);
bc0c38d1 1315 return NULL;
d15f57f2 1316 }
bc0c38d1
SR
1317
1318 atomic_inc(&trace_record_cmdline_disabled);
1319
bc0c38d1
SR
1320 if (*pos != iter->pos) {
1321 iter->ent = NULL;
1322 iter->cpu = 0;
1323 iter->idx = -1;
1324
d769041f
SR
1325 ftrace_disable_cpu();
1326
3928a8a2
SR
1327 for_each_tracing_cpu(cpu) {
1328 ring_buffer_iter_reset(iter->buffer_iter[cpu]);
4c11d7ae 1329 }
bc0c38d1 1330
d769041f
SR
1331 ftrace_enable_cpu();
1332
bc0c38d1
SR
1333 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
1334 ;
1335
1336 } else {
4c11d7ae 1337 l = *pos - 1;
bc0c38d1
SR
1338 p = s_next(m, p, &l);
1339 }
1340
1341 return p;
1342}
1343
1344static void s_stop(struct seq_file *m, void *p)
1345{
bc0c38d1 1346 atomic_dec(&trace_record_cmdline_disabled);
bc0c38d1
SR
1347 mutex_unlock(&trace_types_lock);
1348}
1349
e309b41d 1350static void print_lat_help_header(struct seq_file *m)
bc0c38d1 1351{
a6168353
ME
1352 seq_puts(m, "# _------=> CPU# \n");
1353 seq_puts(m, "# / _-----=> irqs-off \n");
1354 seq_puts(m, "# | / _----=> need-resched \n");
1355 seq_puts(m, "# || / _---=> hardirq/softirq \n");
1356 seq_puts(m, "# ||| / _--=> preempt-depth \n");
1357 seq_puts(m, "# |||| / \n");
1358 seq_puts(m, "# ||||| delay \n");
1359 seq_puts(m, "# cmd pid ||||| time | caller \n");
1360 seq_puts(m, "# \\ / ||||| \\ | / \n");
bc0c38d1
SR
1361}
1362
e309b41d 1363static void print_func_help_header(struct seq_file *m)
bc0c38d1 1364{
a6168353
ME
1365 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n");
1366 seq_puts(m, "# | | | | |\n");
bc0c38d1
SR
1367}
1368
1369
e309b41d 1370static void
bc0c38d1
SR
1371print_trace_header(struct seq_file *m, struct trace_iterator *iter)
1372{
1373 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
1374 struct trace_array *tr = iter->tr;
1375 struct trace_array_cpu *data = tr->data[tr->cpu];
1376 struct tracer *type = current_trace;
3928a8a2
SR
1377 unsigned long total;
1378 unsigned long entries;
bc0c38d1
SR
1379 const char *name = "preemption";
1380
1381 if (type)
1382 name = type->name;
1383
3928a8a2
SR
1384 entries = ring_buffer_entries(iter->tr->buffer);
1385 total = entries +
1386 ring_buffer_overruns(iter->tr->buffer);
bc0c38d1
SR
1387
1388 seq_printf(m, "%s latency trace v1.1.5 on %s\n",
1389 name, UTS_RELEASE);
1390 seq_puts(m, "-----------------------------------"
1391 "---------------------------------\n");
1392 seq_printf(m, " latency: %lu us, #%lu/%lu, CPU#%d |"
1393 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
57f50be1 1394 nsecs_to_usecs(data->saved_latency),
bc0c38d1 1395 entries,
4c11d7ae 1396 total,
bc0c38d1
SR
1397 tr->cpu,
1398#if defined(CONFIG_PREEMPT_NONE)
1399 "server",
1400#elif defined(CONFIG_PREEMPT_VOLUNTARY)
1401 "desktop",
b5c21b45 1402#elif defined(CONFIG_PREEMPT)
bc0c38d1
SR
1403 "preempt",
1404#else
1405 "unknown",
1406#endif
1407 /* These are reserved for later use */
1408 0, 0, 0, 0);
1409#ifdef CONFIG_SMP
1410 seq_printf(m, " #P:%d)\n", num_online_cpus());
1411#else
1412 seq_puts(m, ")\n");
1413#endif
1414 seq_puts(m, " -----------------\n");
1415 seq_printf(m, " | task: %.16s-%d "
1416 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
1417 data->comm, data->pid, data->uid, data->nice,
1418 data->policy, data->rt_priority);
1419 seq_puts(m, " -----------------\n");
1420
1421 if (data->critical_start) {
1422 seq_puts(m, " => started at: ");
214023c3
SR
1423 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
1424 trace_print_seq(m, &iter->seq);
bc0c38d1 1425 seq_puts(m, "\n => ended at: ");
214023c3
SR
1426 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
1427 trace_print_seq(m, &iter->seq);
bc0c38d1
SR
1428 seq_puts(m, "\n");
1429 }
1430
1431 seq_puts(m, "\n");
1432}
1433
e309b41d 1434static void
214023c3 1435lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu)
bc0c38d1
SR
1436{
1437 int hardirq, softirq;
1438 char *comm;
1439
777e208d 1440 comm = trace_find_cmdline(entry->pid);
bc0c38d1 1441
777e208d 1442 trace_seq_printf(s, "%8.8s-%-5d ", comm, entry->pid);
a6168353 1443 trace_seq_printf(s, "%3d", cpu);
214023c3 1444 trace_seq_printf(s, "%c%c",
9244489a
SR
1445 (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' :
1446 (entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ? 'X' : '.',
777e208d 1447 ((entry->flags & TRACE_FLAG_NEED_RESCHED) ? 'N' : '.'));
bc0c38d1 1448
777e208d
SR
1449 hardirq = entry->flags & TRACE_FLAG_HARDIRQ;
1450 softirq = entry->flags & TRACE_FLAG_SOFTIRQ;
afc2abc0 1451 if (hardirq && softirq) {
214023c3 1452 trace_seq_putc(s, 'H');
afc2abc0
IM
1453 } else {
1454 if (hardirq) {
214023c3 1455 trace_seq_putc(s, 'h');
afc2abc0 1456 } else {
bc0c38d1 1457 if (softirq)
214023c3 1458 trace_seq_putc(s, 's');
bc0c38d1 1459 else
214023c3 1460 trace_seq_putc(s, '.');
bc0c38d1
SR
1461 }
1462 }
1463
777e208d
SR
1464 if (entry->preempt_count)
1465 trace_seq_printf(s, "%x", entry->preempt_count);
bc0c38d1 1466 else
214023c3 1467 trace_seq_puts(s, ".");
bc0c38d1
SR
1468}
1469
1470unsigned long preempt_mark_thresh = 100;
1471
e309b41d 1472static void
3928a8a2 1473lat_print_timestamp(struct trace_seq *s, u64 abs_usecs,
bc0c38d1
SR
1474 unsigned long rel_usecs)
1475{
214023c3 1476 trace_seq_printf(s, " %4lldus", abs_usecs);
bc0c38d1 1477 if (rel_usecs > preempt_mark_thresh)
214023c3 1478 trace_seq_puts(s, "!: ");
bc0c38d1 1479 else if (rel_usecs > 1)
214023c3 1480 trace_seq_puts(s, "+: ");
bc0c38d1 1481 else
214023c3 1482 trace_seq_puts(s, " : ");
bc0c38d1
SR
1483}
1484
a309720c
SR
1485static void test_cpu_buff_start(struct trace_iterator *iter)
1486{
1487 struct trace_seq *s = &iter->seq;
1488
12ef7d44
SR
1489 if (!(trace_flags & TRACE_ITER_ANNOTATE))
1490 return;
1491
1492 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
1493 return;
1494
4462344e 1495 if (cpumask_test_cpu(iter->cpu, iter->started))
a309720c
SR
1496 return;
1497
4462344e 1498 cpumask_set_cpu(iter->cpu, iter->started);
a309720c
SR
1499 trace_seq_printf(s, "##### CPU %u buffer started ####\n", iter->cpu);
1500}
1501
2c4f035f 1502static enum print_line_t
214023c3 1503print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
bc0c38d1 1504{
214023c3 1505 struct trace_seq *s = &iter->seq;
bc0c38d1 1506 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
3928a8a2 1507 struct trace_entry *next_entry;
f633cef0 1508 struct trace_event *event;
bc0c38d1
SR
1509 unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE);
1510 struct trace_entry *entry = iter->ent;
1511 unsigned long abs_usecs;
1512 unsigned long rel_usecs;
3928a8a2 1513 u64 next_ts;
bc0c38d1 1514 char *comm;
f633cef0 1515 int ret;
dd0e545f 1516
a309720c
SR
1517 test_cpu_buff_start(iter);
1518
3928a8a2
SR
1519 next_entry = find_next_entry(iter, NULL, &next_ts);
1520 if (!next_entry)
1521 next_ts = iter->ts;
1522 rel_usecs = ns2usecs(next_ts - iter->ts);
1523 abs_usecs = ns2usecs(iter->ts - iter->tr->time_start);
bc0c38d1
SR
1524
1525 if (verbose) {
777e208d 1526 comm = trace_find_cmdline(entry->pid);
a6168353 1527 trace_seq_printf(s, "%16s %5d %3d %d %08x %08x [%08lx]"
214023c3
SR
1528 " %ld.%03ldms (+%ld.%03ldms): ",
1529 comm,
777e208d
SR
1530 entry->pid, cpu, entry->flags,
1531 entry->preempt_count, trace_idx,
3928a8a2 1532 ns2usecs(iter->ts),
214023c3
SR
1533 abs_usecs/1000,
1534 abs_usecs % 1000, rel_usecs/1000,
1535 rel_usecs % 1000);
bc0c38d1 1536 } else {
f29c73fe
IM
1537 lat_print_generic(s, entry, cpu);
1538 lat_print_timestamp(s, abs_usecs, rel_usecs);
bc0c38d1 1539 }
777e208d 1540
f633cef0
SR
1541 event = ftrace_find_event(entry->type);
1542 if (event && event->latency_trace) {
1543 ret = event->latency_trace(s, entry, sym_flags);
1544 if (ret)
1545 return ret;
1546 return TRACE_TYPE_HANDLED;
52f232cb 1547 }
02b67518 1548
f633cef0 1549 trace_seq_printf(s, "Unknown type %d\n", entry->type);
2c4f035f 1550 return TRACE_TYPE_HANDLED;
bc0c38d1
SR
1551}
1552
2c4f035f 1553static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
bc0c38d1 1554{
214023c3 1555 struct trace_seq *s = &iter->seq;
bc0c38d1 1556 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
4e3c3333 1557 struct trace_entry *entry;
f633cef0 1558 struct trace_event *event;
bc0c38d1
SR
1559 unsigned long usec_rem;
1560 unsigned long long t;
1561 unsigned long secs;
1562 char *comm;
b3806b43 1563 int ret;
bc0c38d1 1564
4e3c3333 1565 entry = iter->ent;
dd0e545f 1566
a309720c
SR
1567 test_cpu_buff_start(iter);
1568
777e208d 1569 comm = trace_find_cmdline(iter->ent->pid);
bc0c38d1 1570
3928a8a2 1571 t = ns2usecs(iter->ts);
bc0c38d1
SR
1572 usec_rem = do_div(t, 1000000ULL);
1573 secs = (unsigned long)t;
1574
777e208d 1575 ret = trace_seq_printf(s, "%16s-%-5d ", comm, entry->pid);
f29c73fe 1576 if (!ret)
2c4f035f 1577 return TRACE_TYPE_PARTIAL_LINE;
a6168353 1578 ret = trace_seq_printf(s, "[%03d] ", iter->cpu);
f29c73fe 1579 if (!ret)
2c4f035f 1580 return TRACE_TYPE_PARTIAL_LINE;
f29c73fe
IM
1581 ret = trace_seq_printf(s, "%5lu.%06lu: ", secs, usec_rem);
1582 if (!ret)
2c4f035f 1583 return TRACE_TYPE_PARTIAL_LINE;
bc0c38d1 1584
f633cef0
SR
1585 event = ftrace_find_event(entry->type);
1586 if (event && event->trace) {
1587 ret = event->trace(s, entry, sym_flags);
1588 if (ret)
1589 return ret;
1590 return TRACE_TYPE_HANDLED;
52f232cb 1591 }
f633cef0
SR
1592 ret = trace_seq_printf(s, "Unknown type %d\n", entry->type);
1593 if (!ret)
1594 return TRACE_TYPE_PARTIAL_LINE;
02b67518 1595
2c4f035f 1596 return TRACE_TYPE_HANDLED;
bc0c38d1
SR
1597}
1598
2c4f035f 1599static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
f9896bf3
IM
1600{
1601 struct trace_seq *s = &iter->seq;
1602 struct trace_entry *entry;
f633cef0 1603 struct trace_event *event;
f9896bf3 1604 int ret;
f9896bf3
IM
1605
1606 entry = iter->ent;
dd0e545f 1607
f9896bf3 1608 ret = trace_seq_printf(s, "%d %d %llu ",
777e208d 1609 entry->pid, iter->cpu, iter->ts);
f9896bf3 1610 if (!ret)
2c4f035f 1611 return TRACE_TYPE_PARTIAL_LINE;
f9896bf3 1612
f633cef0
SR
1613 event = ftrace_find_event(entry->type);
1614 if (event && event->raw) {
1615 ret = event->raw(s, entry, 0);
1616 if (ret)
1617 return ret;
1618 return TRACE_TYPE_HANDLED;
777e208d 1619 }
f633cef0
SR
1620 ret = trace_seq_printf(s, "%d ?\n", entry->type);
1621 if (!ret)
1622 return TRACE_TYPE_PARTIAL_LINE;
777e208d 1623
2c4f035f 1624 return TRACE_TYPE_HANDLED;
f9896bf3
IM
1625}
1626
2c4f035f 1627static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
5e3ca0ec
IM
1628{
1629 struct trace_seq *s = &iter->seq;
1630 unsigned char newline = '\n';
1631 struct trace_entry *entry;
f633cef0 1632 struct trace_event *event;
5e3ca0ec
IM
1633
1634 entry = iter->ent;
dd0e545f 1635
777e208d 1636 SEQ_PUT_HEX_FIELD_RET(s, entry->pid);
5e3ca0ec 1637 SEQ_PUT_HEX_FIELD_RET(s, iter->cpu);
3928a8a2 1638 SEQ_PUT_HEX_FIELD_RET(s, iter->ts);
5e3ca0ec 1639
f633cef0
SR
1640 event = ftrace_find_event(entry->type);
1641 if (event && event->hex)
1642 event->hex(s, entry, 0);
7104f300 1643
5e3ca0ec
IM
1644 SEQ_PUT_FIELD_RET(s, newline);
1645
2c4f035f 1646 return TRACE_TYPE_HANDLED;
5e3ca0ec
IM
1647}
1648
66896a85
FW
1649static enum print_line_t print_printk_msg_only(struct trace_iterator *iter)
1650{
1651 struct trace_seq *s = &iter->seq;
1652 struct trace_entry *entry = iter->ent;
1653 struct print_entry *field;
1654 int ret;
1655
1656 trace_assign_type(field, entry);
1657
1658 ret = trace_seq_printf(s, field->buf);
1659 if (!ret)
1660 return TRACE_TYPE_PARTIAL_LINE;
1661
66896a85
FW
1662 return TRACE_TYPE_HANDLED;
1663}
1664
2c4f035f 1665static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
cb0f12aa
IM
1666{
1667 struct trace_seq *s = &iter->seq;
1668 struct trace_entry *entry;
f633cef0 1669 struct trace_event *event;
cb0f12aa
IM
1670
1671 entry = iter->ent;
dd0e545f 1672
777e208d 1673 SEQ_PUT_FIELD_RET(s, entry->pid);
072ba498 1674 SEQ_PUT_FIELD_RET(s, entry->cpu);
3928a8a2 1675 SEQ_PUT_FIELD_RET(s, iter->ts);
cb0f12aa 1676
f633cef0
SR
1677 event = ftrace_find_event(entry->type);
1678 if (event && event->binary)
1679 event->binary(s, entry, 0);
777e208d 1680
f633cef0 1681 return TRACE_TYPE_HANDLED;
cb0f12aa
IM
1682}
1683
bc0c38d1
SR
1684static int trace_empty(struct trace_iterator *iter)
1685{
bc0c38d1
SR
1686 int cpu;
1687
ab46428c 1688 for_each_tracing_cpu(cpu) {
d769041f
SR
1689 if (iter->buffer_iter[cpu]) {
1690 if (!ring_buffer_iter_empty(iter->buffer_iter[cpu]))
1691 return 0;
1692 } else {
1693 if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu))
1694 return 0;
1695 }
bc0c38d1 1696 }
d769041f 1697
797d3712 1698 return 1;
bc0c38d1
SR
1699}
1700
2c4f035f 1701static enum print_line_t print_trace_line(struct trace_iterator *iter)
f9896bf3 1702{
2c4f035f
FW
1703 enum print_line_t ret;
1704
1705 if (iter->trace && iter->trace->print_line) {
1706 ret = iter->trace->print_line(iter);
1707 if (ret != TRACE_TYPE_UNHANDLED)
1708 return ret;
1709 }
72829bc3 1710
66896a85
FW
1711 if (iter->ent->type == TRACE_PRINT &&
1712 trace_flags & TRACE_ITER_PRINTK &&
1713 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
1714 return print_printk_msg_only(iter);
1715
cb0f12aa
IM
1716 if (trace_flags & TRACE_ITER_BIN)
1717 return print_bin_fmt(iter);
1718
5e3ca0ec
IM
1719 if (trace_flags & TRACE_ITER_HEX)
1720 return print_hex_fmt(iter);
1721
f9896bf3
IM
1722 if (trace_flags & TRACE_ITER_RAW)
1723 return print_raw_fmt(iter);
1724
1725 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
1726 return print_lat_fmt(iter, iter->idx, iter->cpu);
1727
1728 return print_trace_fmt(iter);
1729}
1730
bc0c38d1
SR
1731static int s_show(struct seq_file *m, void *v)
1732{
1733 struct trace_iterator *iter = v;
1734
1735 if (iter->ent == NULL) {
1736 if (iter->tr) {
1737 seq_printf(m, "# tracer: %s\n", iter->trace->name);
1738 seq_puts(m, "#\n");
1739 }
8bba1bf5
MM
1740 if (iter->trace && iter->trace->print_header)
1741 iter->trace->print_header(m);
1742 else if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
bc0c38d1
SR
1743 /* print nothing if the buffers are empty */
1744 if (trace_empty(iter))
1745 return 0;
1746 print_trace_header(m, iter);
1747 if (!(trace_flags & TRACE_ITER_VERBOSE))
1748 print_lat_help_header(m);
1749 } else {
1750 if (!(trace_flags & TRACE_ITER_VERBOSE))
1751 print_func_help_header(m);
1752 }
1753 } else {
f9896bf3 1754 print_trace_line(iter);
214023c3 1755 trace_print_seq(m, &iter->seq);
bc0c38d1
SR
1756 }
1757
1758 return 0;
1759}
1760
1761static struct seq_operations tracer_seq_ops = {
4bf39a94
IM
1762 .start = s_start,
1763 .next = s_next,
1764 .stop = s_stop,
1765 .show = s_show,
bc0c38d1
SR
1766};
1767
e309b41d 1768static struct trace_iterator *
bc0c38d1
SR
1769__tracing_open(struct inode *inode, struct file *file, int *ret)
1770{
1771 struct trace_iterator *iter;
3928a8a2
SR
1772 struct seq_file *m;
1773 int cpu;
bc0c38d1 1774
60a11774
SR
1775 if (tracing_disabled) {
1776 *ret = -ENODEV;
1777 return NULL;
1778 }
1779
bc0c38d1
SR
1780 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1781 if (!iter) {
1782 *ret = -ENOMEM;
1783 goto out;
1784 }
1785
1786 mutex_lock(&trace_types_lock);
1787 if (current_trace && current_trace->print_max)
1788 iter->tr = &max_tr;
1789 else
1790 iter->tr = inode->i_private;
1791 iter->trace = current_trace;
1792 iter->pos = -1;
1793
8bba1bf5
MM
1794 /* Notify the tracer early; before we stop tracing. */
1795 if (iter->trace && iter->trace->open)
a93751ca 1796 iter->trace->open(iter);
8bba1bf5 1797
12ef7d44
SR
1798 /* Annotate start of buffers if we had overruns */
1799 if (ring_buffer_overruns(iter->tr->buffer))
1800 iter->iter_flags |= TRACE_FILE_ANNOTATE;
1801
1802
3928a8a2 1803 for_each_tracing_cpu(cpu) {
d769041f 1804
3928a8a2
SR
1805 iter->buffer_iter[cpu] =
1806 ring_buffer_read_start(iter->tr->buffer, cpu);
d769041f 1807
3928a8a2
SR
1808 if (!iter->buffer_iter[cpu])
1809 goto fail_buffer;
1810 }
1811
bc0c38d1
SR
1812 /* TODO stop tracer */
1813 *ret = seq_open(file, &tracer_seq_ops);
3928a8a2
SR
1814 if (*ret)
1815 goto fail_buffer;
bc0c38d1 1816
3928a8a2
SR
1817 m = file->private_data;
1818 m->private = iter;
bc0c38d1 1819
3928a8a2 1820 /* stop the trace while dumping */
9036990d 1821 tracing_stop();
3928a8a2 1822
bc0c38d1
SR
1823 mutex_unlock(&trace_types_lock);
1824
1825 out:
1826 return iter;
3928a8a2
SR
1827
1828 fail_buffer:
1829 for_each_tracing_cpu(cpu) {
1830 if (iter->buffer_iter[cpu])
1831 ring_buffer_read_finish(iter->buffer_iter[cpu]);
1832 }
1833 mutex_unlock(&trace_types_lock);
0bb943c7 1834 kfree(iter);
3928a8a2
SR
1835
1836 return ERR_PTR(-ENOMEM);
bc0c38d1
SR
1837}
1838
1839int tracing_open_generic(struct inode *inode, struct file *filp)
1840{
60a11774
SR
1841 if (tracing_disabled)
1842 return -ENODEV;
1843
bc0c38d1
SR
1844 filp->private_data = inode->i_private;
1845 return 0;
1846}
1847
1848int tracing_release(struct inode *inode, struct file *file)
1849{
1850 struct seq_file *m = (struct seq_file *)file->private_data;
1851 struct trace_iterator *iter = m->private;
3928a8a2 1852 int cpu;
bc0c38d1
SR
1853
1854 mutex_lock(&trace_types_lock);
3928a8a2
SR
1855 for_each_tracing_cpu(cpu) {
1856 if (iter->buffer_iter[cpu])
1857 ring_buffer_read_finish(iter->buffer_iter[cpu]);
1858 }
1859
bc0c38d1
SR
1860 if (iter->trace && iter->trace->close)
1861 iter->trace->close(iter);
1862
1863 /* reenable tracing if it was previously enabled */
9036990d 1864 tracing_start();
bc0c38d1
SR
1865 mutex_unlock(&trace_types_lock);
1866
1867 seq_release(inode, file);
1868 kfree(iter);
1869 return 0;
1870}
1871
1872static int tracing_open(struct inode *inode, struct file *file)
1873{
1874 int ret;
1875
1876 __tracing_open(inode, file, &ret);
1877
1878 return ret;
1879}
1880
1881static int tracing_lt_open(struct inode *inode, struct file *file)
1882{
1883 struct trace_iterator *iter;
1884 int ret;
1885
1886 iter = __tracing_open(inode, file, &ret);
1887
1888 if (!ret)
1889 iter->iter_flags |= TRACE_FILE_LAT_FMT;
1890
1891 return ret;
1892}
1893
1894
e309b41d 1895static void *
bc0c38d1
SR
1896t_next(struct seq_file *m, void *v, loff_t *pos)
1897{
1898 struct tracer *t = m->private;
1899
1900 (*pos)++;
1901
1902 if (t)
1903 t = t->next;
1904
1905 m->private = t;
1906
1907 return t;
1908}
1909
1910static void *t_start(struct seq_file *m, loff_t *pos)
1911{
1912 struct tracer *t = m->private;
1913 loff_t l = 0;
1914
1915 mutex_lock(&trace_types_lock);
1916 for (; t && l < *pos; t = t_next(m, t, &l))
1917 ;
1918
1919 return t;
1920}
1921
1922static void t_stop(struct seq_file *m, void *p)
1923{
1924 mutex_unlock(&trace_types_lock);
1925}
1926
1927static int t_show(struct seq_file *m, void *v)
1928{
1929 struct tracer *t = v;
1930
1931 if (!t)
1932 return 0;
1933
1934 seq_printf(m, "%s", t->name);
1935 if (t->next)
1936 seq_putc(m, ' ');
1937 else
1938 seq_putc(m, '\n');
1939
1940 return 0;
1941}
1942
1943static struct seq_operations show_traces_seq_ops = {
4bf39a94
IM
1944 .start = t_start,
1945 .next = t_next,
1946 .stop = t_stop,
1947 .show = t_show,
bc0c38d1
SR
1948};
1949
1950static int show_traces_open(struct inode *inode, struct file *file)
1951{
1952 int ret;
1953
60a11774
SR
1954 if (tracing_disabled)
1955 return -ENODEV;
1956
bc0c38d1
SR
1957 ret = seq_open(file, &show_traces_seq_ops);
1958 if (!ret) {
1959 struct seq_file *m = file->private_data;
1960 m->private = trace_types;
1961 }
1962
1963 return ret;
1964}
1965
1966static struct file_operations tracing_fops = {
4bf39a94
IM
1967 .open = tracing_open,
1968 .read = seq_read,
1969 .llseek = seq_lseek,
1970 .release = tracing_release,
bc0c38d1
SR
1971};
1972
1973static struct file_operations tracing_lt_fops = {
4bf39a94
IM
1974 .open = tracing_lt_open,
1975 .read = seq_read,
1976 .llseek = seq_lseek,
1977 .release = tracing_release,
bc0c38d1
SR
1978};
1979
1980static struct file_operations show_traces_fops = {
c7078de1
IM
1981 .open = show_traces_open,
1982 .read = seq_read,
1983 .release = seq_release,
1984};
1985
36dfe925
IM
1986/*
1987 * Only trace on a CPU if the bitmask is set:
1988 */
9e01c1b7 1989static cpumask_var_t tracing_cpumask;
36dfe925
IM
1990
1991/*
1992 * The tracer itself will not take this lock, but still we want
1993 * to provide a consistent cpumask to user-space:
1994 */
1995static DEFINE_MUTEX(tracing_cpumask_update_lock);
1996
1997/*
1998 * Temporary storage for the character representation of the
1999 * CPU bitmask (and one more byte for the newline):
2000 */
2001static char mask_str[NR_CPUS + 1];
2002
c7078de1
IM
2003static ssize_t
2004tracing_cpumask_read(struct file *filp, char __user *ubuf,
2005 size_t count, loff_t *ppos)
2006{
36dfe925 2007 int len;
c7078de1
IM
2008
2009 mutex_lock(&tracing_cpumask_update_lock);
36dfe925 2010
9e01c1b7 2011 len = cpumask_scnprintf(mask_str, count, tracing_cpumask);
36dfe925
IM
2012 if (count - len < 2) {
2013 count = -EINVAL;
2014 goto out_err;
2015 }
2016 len += sprintf(mask_str + len, "\n");
2017 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
2018
2019out_err:
c7078de1
IM
2020 mutex_unlock(&tracing_cpumask_update_lock);
2021
2022 return count;
2023}
2024
2025static ssize_t
2026tracing_cpumask_write(struct file *filp, const char __user *ubuf,
2027 size_t count, loff_t *ppos)
2028{
36dfe925 2029 int err, cpu;
9e01c1b7
RR
2030 cpumask_var_t tracing_cpumask_new;
2031
2032 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
2033 return -ENOMEM;
c7078de1
IM
2034
2035 mutex_lock(&tracing_cpumask_update_lock);
9e01c1b7 2036 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
c7078de1 2037 if (err)
36dfe925
IM
2038 goto err_unlock;
2039
a5e25883 2040 local_irq_disable();
92205c23 2041 __raw_spin_lock(&ftrace_max_lock);
ab46428c 2042 for_each_tracing_cpu(cpu) {
36dfe925
IM
2043 /*
2044 * Increase/decrease the disabled counter if we are
2045 * about to flip a bit in the cpumask:
2046 */
9e01c1b7
RR
2047 if (cpumask_test_cpu(cpu, tracing_cpumask) &&
2048 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
36dfe925
IM
2049 atomic_inc(&global_trace.data[cpu]->disabled);
2050 }
9e01c1b7
RR
2051 if (!cpumask_test_cpu(cpu, tracing_cpumask) &&
2052 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
36dfe925
IM
2053 atomic_dec(&global_trace.data[cpu]->disabled);
2054 }
2055 }
92205c23 2056 __raw_spin_unlock(&ftrace_max_lock);
a5e25883 2057 local_irq_enable();
36dfe925 2058
9e01c1b7 2059 cpumask_copy(tracing_cpumask, tracing_cpumask_new);
36dfe925
IM
2060
2061 mutex_unlock(&tracing_cpumask_update_lock);
9e01c1b7 2062 free_cpumask_var(tracing_cpumask_new);
c7078de1
IM
2063
2064 return count;
36dfe925
IM
2065
2066err_unlock:
2067 mutex_unlock(&tracing_cpumask_update_lock);
9e01c1b7 2068 free_cpumask_var(tracing_cpumask);
36dfe925
IM
2069
2070 return err;
c7078de1
IM
2071}
2072
2073static struct file_operations tracing_cpumask_fops = {
2074 .open = tracing_open_generic,
2075 .read = tracing_cpumask_read,
2076 .write = tracing_cpumask_write,
bc0c38d1
SR
2077};
2078
2079static ssize_t
ee6bce52 2080tracing_trace_options_read(struct file *filp, char __user *ubuf,
bc0c38d1
SR
2081 size_t cnt, loff_t *ppos)
2082{
adf9f195 2083 int i;
bc0c38d1
SR
2084 char *buf;
2085 int r = 0;
2086 int len = 0;
adf9f195
FW
2087 u32 tracer_flags = current_trace->flags->val;
2088 struct tracer_opt *trace_opts = current_trace->flags->opts;
2089
bc0c38d1
SR
2090
2091 /* calulate max size */
2092 for (i = 0; trace_options[i]; i++) {
2093 len += strlen(trace_options[i]);
2094 len += 3; /* "no" and space */
2095 }
2096
adf9f195
FW
2097 /*
2098 * Increase the size with names of options specific
2099 * of the current tracer.
2100 */
2101 for (i = 0; trace_opts[i].name; i++) {
2102 len += strlen(trace_opts[i].name);
2103 len += 3; /* "no" and space */
2104 }
2105
bc0c38d1
SR
2106 /* +2 for \n and \0 */
2107 buf = kmalloc(len + 2, GFP_KERNEL);
2108 if (!buf)
2109 return -ENOMEM;
2110
2111 for (i = 0; trace_options[i]; i++) {
2112 if (trace_flags & (1 << i))
2113 r += sprintf(buf + r, "%s ", trace_options[i]);
2114 else
2115 r += sprintf(buf + r, "no%s ", trace_options[i]);
2116 }
2117
adf9f195
FW
2118 for (i = 0; trace_opts[i].name; i++) {
2119 if (tracer_flags & trace_opts[i].bit)
2120 r += sprintf(buf + r, "%s ",
2121 trace_opts[i].name);
2122 else
2123 r += sprintf(buf + r, "no%s ",
2124 trace_opts[i].name);
2125 }
2126
bc0c38d1
SR
2127 r += sprintf(buf + r, "\n");
2128 WARN_ON(r >= len + 2);
2129
36dfe925 2130 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
bc0c38d1
SR
2131
2132 kfree(buf);
2133
2134 return r;
2135}
2136
adf9f195
FW
2137/* Try to assign a tracer specific option */
2138static int set_tracer_option(struct tracer *trace, char *cmp, int neg)
2139{
2140 struct tracer_flags *trace_flags = trace->flags;
2141 struct tracer_opt *opts = NULL;
2142 int ret = 0, i = 0;
2143 int len;
2144
2145 for (i = 0; trace_flags->opts[i].name; i++) {
2146 opts = &trace_flags->opts[i];
2147 len = strlen(opts->name);
2148
2149 if (strncmp(cmp, opts->name, len) == 0) {
2150 ret = trace->set_flag(trace_flags->val,
2151 opts->bit, !neg);
2152 break;
2153 }
2154 }
2155 /* Not found */
2156 if (!trace_flags->opts[i].name)
2157 return -EINVAL;
2158
2159 /* Refused to handle */
2160 if (ret)
2161 return ret;
2162
2163 if (neg)
2164 trace_flags->val &= ~opts->bit;
2165 else
2166 trace_flags->val |= opts->bit;
2167
2168 return 0;
2169}
2170
bc0c38d1 2171static ssize_t
ee6bce52 2172tracing_trace_options_write(struct file *filp, const char __user *ubuf,
bc0c38d1
SR
2173 size_t cnt, loff_t *ppos)
2174{
2175 char buf[64];
2176 char *cmp = buf;
2177 int neg = 0;
adf9f195 2178 int ret;
bc0c38d1
SR
2179 int i;
2180
cffae437
SR
2181 if (cnt >= sizeof(buf))
2182 return -EINVAL;
bc0c38d1
SR
2183
2184 if (copy_from_user(&buf, ubuf, cnt))
2185 return -EFAULT;
2186
2187 buf[cnt] = 0;
2188
2189 if (strncmp(buf, "no", 2) == 0) {
2190 neg = 1;
2191 cmp += 2;
2192 }
2193
2194 for (i = 0; trace_options[i]; i++) {
2195 int len = strlen(trace_options[i]);
2196
2197 if (strncmp(cmp, trace_options[i], len) == 0) {
2198 if (neg)
2199 trace_flags &= ~(1 << i);
2200 else
2201 trace_flags |= (1 << i);
2202 break;
2203 }
2204 }
adf9f195
FW
2205
2206 /* If no option could be set, test the specific tracer options */
2207 if (!trace_options[i]) {
2208 ret = set_tracer_option(current_trace, cmp, neg);
2209 if (ret)
2210 return ret;
2211 }
bc0c38d1
SR
2212
2213 filp->f_pos += cnt;
2214
2215 return cnt;
2216}
2217
2218static struct file_operations tracing_iter_fops = {
c7078de1 2219 .open = tracing_open_generic,
ee6bce52
SR
2220 .read = tracing_trace_options_read,
2221 .write = tracing_trace_options_write,
bc0c38d1
SR
2222};
2223
7bd2f24c
IM
2224static const char readme_msg[] =
2225 "tracing mini-HOWTO:\n\n"
2226 "# mkdir /debug\n"
2227 "# mount -t debugfs nodev /debug\n\n"
2228 "# cat /debug/tracing/available_tracers\n"
2229 "wakeup preemptirqsoff preemptoff irqsoff ftrace sched_switch none\n\n"
2230 "# cat /debug/tracing/current_tracer\n"
2231 "none\n"
2232 "# echo sched_switch > /debug/tracing/current_tracer\n"
2233 "# cat /debug/tracing/current_tracer\n"
2234 "sched_switch\n"
ee6bce52 2235 "# cat /debug/tracing/trace_options\n"
7bd2f24c 2236 "noprint-parent nosym-offset nosym-addr noverbose\n"
ee6bce52 2237 "# echo print-parent > /debug/tracing/trace_options\n"
7bd2f24c
IM
2238 "# echo 1 > /debug/tracing/tracing_enabled\n"
2239 "# cat /debug/tracing/trace > /tmp/trace.txt\n"
2240 "echo 0 > /debug/tracing/tracing_enabled\n"
2241;
2242
2243static ssize_t
2244tracing_readme_read(struct file *filp, char __user *ubuf,
2245 size_t cnt, loff_t *ppos)
2246{
2247 return simple_read_from_buffer(ubuf, cnt, ppos,
2248 readme_msg, strlen(readme_msg));
2249}
2250
2251static struct file_operations tracing_readme_fops = {
c7078de1
IM
2252 .open = tracing_open_generic,
2253 .read = tracing_readme_read,
7bd2f24c
IM
2254};
2255
bc0c38d1
SR
2256static ssize_t
2257tracing_ctrl_read(struct file *filp, char __user *ubuf,
2258 size_t cnt, loff_t *ppos)
2259{
bc0c38d1
SR
2260 char buf[64];
2261 int r;
2262
9036990d 2263 r = sprintf(buf, "%u\n", tracer_enabled);
4e3c3333 2264 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
bc0c38d1
SR
2265}
2266
2267static ssize_t
2268tracing_ctrl_write(struct file *filp, const char __user *ubuf,
2269 size_t cnt, loff_t *ppos)
2270{
2271 struct trace_array *tr = filp->private_data;
bc0c38d1 2272 char buf[64];
c6caeeb1
SR
2273 long val;
2274 int ret;
bc0c38d1 2275
cffae437
SR
2276 if (cnt >= sizeof(buf))
2277 return -EINVAL;
bc0c38d1
SR
2278
2279 if (copy_from_user(&buf, ubuf, cnt))
2280 return -EFAULT;
2281
2282 buf[cnt] = 0;
2283
c6caeeb1
SR
2284 ret = strict_strtoul(buf, 10, &val);
2285 if (ret < 0)
2286 return ret;
bc0c38d1
SR
2287
2288 val = !!val;
2289
2290 mutex_lock(&trace_types_lock);
9036990d
SR
2291 if (tracer_enabled ^ val) {
2292 if (val) {
bc0c38d1 2293 tracer_enabled = 1;
9036990d
SR
2294 if (current_trace->start)
2295 current_trace->start(tr);
2296 tracing_start();
2297 } else {
bc0c38d1 2298 tracer_enabled = 0;
9036990d
SR
2299 tracing_stop();
2300 if (current_trace->stop)
2301 current_trace->stop(tr);
2302 }
bc0c38d1
SR
2303 }
2304 mutex_unlock(&trace_types_lock);
2305
2306 filp->f_pos += cnt;
2307
2308 return cnt;
2309}
2310
2311static ssize_t
2312tracing_set_trace_read(struct file *filp, char __user *ubuf,
2313 size_t cnt, loff_t *ppos)
2314{
2315 char buf[max_tracer_type_len+2];
2316 int r;
2317
2318 mutex_lock(&trace_types_lock);
2319 if (current_trace)
2320 r = sprintf(buf, "%s\n", current_trace->name);
2321 else
2322 r = sprintf(buf, "\n");
2323 mutex_unlock(&trace_types_lock);
2324
4bf39a94 2325 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
bc0c38d1
SR
2326}
2327
d9e54076 2328static int tracing_set_tracer(char *buf)
bc0c38d1
SR
2329{
2330 struct trace_array *tr = &global_trace;
2331 struct tracer *t;
d9e54076 2332 int ret = 0;
bc0c38d1
SR
2333
2334 mutex_lock(&trace_types_lock);
2335 for (t = trace_types; t; t = t->next) {
2336 if (strcmp(t->name, buf) == 0)
2337 break;
2338 }
c2931e05
FW
2339 if (!t) {
2340 ret = -EINVAL;
2341 goto out;
2342 }
2343 if (t == current_trace)
bc0c38d1
SR
2344 goto out;
2345
9f029e83 2346 trace_branch_disable();
bc0c38d1
SR
2347 if (current_trace && current_trace->reset)
2348 current_trace->reset(tr);
2349
2350 current_trace = t;
1c80025a
FW
2351 if (t->init) {
2352 ret = t->init(tr);
2353 if (ret)
2354 goto out;
2355 }
bc0c38d1 2356
9f029e83 2357 trace_branch_enable(tr);
bc0c38d1
SR
2358 out:
2359 mutex_unlock(&trace_types_lock);
2360
d9e54076
PZ
2361 return ret;
2362}
2363
2364static ssize_t
2365tracing_set_trace_write(struct file *filp, const char __user *ubuf,
2366 size_t cnt, loff_t *ppos)
2367{
2368 char buf[max_tracer_type_len+1];
2369 int i;
2370 size_t ret;
e6e7a65a
FW
2371 int err;
2372
2373 ret = cnt;
d9e54076
PZ
2374
2375 if (cnt > max_tracer_type_len)
2376 cnt = max_tracer_type_len;
2377
2378 if (copy_from_user(&buf, ubuf, cnt))
2379 return -EFAULT;
2380
2381 buf[cnt] = 0;
2382
2383 /* strip ending whitespace. */
2384 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
2385 buf[i] = 0;
2386
e6e7a65a
FW
2387 err = tracing_set_tracer(buf);
2388 if (err)
2389 return err;
d9e54076 2390
e6e7a65a 2391 filp->f_pos += ret;
bc0c38d1 2392
c2931e05 2393 return ret;
bc0c38d1
SR
2394}
2395
2396static ssize_t
2397tracing_max_lat_read(struct file *filp, char __user *ubuf,
2398 size_t cnt, loff_t *ppos)
2399{
2400 unsigned long *ptr = filp->private_data;
2401 char buf[64];
2402 int r;
2403
cffae437 2404 r = snprintf(buf, sizeof(buf), "%ld\n",
bc0c38d1 2405 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
cffae437
SR
2406 if (r > sizeof(buf))
2407 r = sizeof(buf);
4bf39a94 2408 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
bc0c38d1
SR
2409}
2410
2411static ssize_t
2412tracing_max_lat_write(struct file *filp, const char __user *ubuf,
2413 size_t cnt, loff_t *ppos)
2414{
2415 long *ptr = filp->private_data;
bc0c38d1 2416 char buf[64];
c6caeeb1
SR
2417 long val;
2418 int ret;
bc0c38d1 2419
cffae437
SR
2420 if (cnt >= sizeof(buf))
2421 return -EINVAL;
bc0c38d1
SR
2422
2423 if (copy_from_user(&buf, ubuf, cnt))
2424 return -EFAULT;
2425
2426 buf[cnt] = 0;
2427
c6caeeb1
SR
2428 ret = strict_strtoul(buf, 10, &val);
2429 if (ret < 0)
2430 return ret;
bc0c38d1
SR
2431
2432 *ptr = val * 1000;
2433
2434 return cnt;
2435}
2436
b3806b43
SR
2437static atomic_t tracing_reader;
2438
2439static int tracing_open_pipe(struct inode *inode, struct file *filp)
2440{
2441 struct trace_iterator *iter;
2442
2443 if (tracing_disabled)
2444 return -ENODEV;
2445
2446 /* We only allow for reader of the pipe */
2447 if (atomic_inc_return(&tracing_reader) != 1) {
2448 atomic_dec(&tracing_reader);
2449 return -EBUSY;
2450 }
2451
2452 /* create a buffer to store the information to pass to userspace */
2453 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2454 if (!iter)
2455 return -ENOMEM;
2456
4462344e
RR
2457 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
2458 kfree(iter);
2459 return -ENOMEM;
2460 }
2461
107bad8b 2462 mutex_lock(&trace_types_lock);
a309720c
SR
2463
2464 /* trace pipe does not show start of buffer */
4462344e 2465 cpumask_setall(iter->started);
a309720c 2466
b3806b43 2467 iter->tr = &global_trace;
72829bc3 2468 iter->trace = current_trace;
b3806b43
SR
2469 filp->private_data = iter;
2470
107bad8b
SR
2471 if (iter->trace->pipe_open)
2472 iter->trace->pipe_open(iter);
2473 mutex_unlock(&trace_types_lock);
2474
b3806b43
SR
2475 return 0;
2476}
2477
2478static int tracing_release_pipe(struct inode *inode, struct file *file)
2479{
2480 struct trace_iterator *iter = file->private_data;
2481
4462344e 2482 free_cpumask_var(iter->started);
b3806b43
SR
2483 kfree(iter);
2484 atomic_dec(&tracing_reader);
2485
2486 return 0;
2487}
2488
2a2cc8f7
SSP
2489static unsigned int
2490tracing_poll_pipe(struct file *filp, poll_table *poll_table)
2491{
2492 struct trace_iterator *iter = filp->private_data;
2493
2494 if (trace_flags & TRACE_ITER_BLOCK) {
2495 /*
2496 * Always select as readable when in blocking mode
2497 */
2498 return POLLIN | POLLRDNORM;
afc2abc0 2499 } else {
2a2cc8f7
SSP
2500 if (!trace_empty(iter))
2501 return POLLIN | POLLRDNORM;
2502 poll_wait(filp, &trace_wait, poll_table);
2503 if (!trace_empty(iter))
2504 return POLLIN | POLLRDNORM;
2505
2506 return 0;
2507 }
2508}
2509
b3806b43
SR
2510/*
2511 * Consumer reader.
2512 */
2513static ssize_t
2514tracing_read_pipe(struct file *filp, char __user *ubuf,
2515 size_t cnt, loff_t *ppos)
2516{
2517 struct trace_iterator *iter = filp->private_data;
6c6c2796 2518 ssize_t sret;
b3806b43
SR
2519
2520 /* return any leftover data */
6c6c2796
PP
2521 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
2522 if (sret != -EBUSY)
2523 return sret;
b3806b43 2524
6c6c2796 2525 trace_seq_reset(&iter->seq);
b3806b43 2526
107bad8b
SR
2527 mutex_lock(&trace_types_lock);
2528 if (iter->trace->read) {
6c6c2796
PP
2529 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
2530 if (sret)
107bad8b 2531 goto out;
107bad8b
SR
2532 }
2533
9ff4b974
PP
2534waitagain:
2535 sret = 0;
b3806b43 2536 while (trace_empty(iter)) {
2dc8f095 2537
107bad8b 2538 if ((filp->f_flags & O_NONBLOCK)) {
6c6c2796 2539 sret = -EAGAIN;
107bad8b
SR
2540 goto out;
2541 }
2dc8f095 2542
b3806b43
SR
2543 /*
2544 * This is a make-shift waitqueue. The reason we don't use
2545 * an actual wait queue is because:
2546 * 1) we only ever have one waiter
2547 * 2) the tracing, traces all functions, we don't want
2548 * the overhead of calling wake_up and friends
2549 * (and tracing them too)
2550 * Anyway, this is really very primitive wakeup.
2551 */
2552 set_current_state(TASK_INTERRUPTIBLE);
2553 iter->tr->waiter = current;
2554
107bad8b
SR
2555 mutex_unlock(&trace_types_lock);
2556
9fe068e9
IM
2557 /* sleep for 100 msecs, and try again. */
2558 schedule_timeout(HZ/10);
b3806b43 2559
107bad8b
SR
2560 mutex_lock(&trace_types_lock);
2561
b3806b43
SR
2562 iter->tr->waiter = NULL;
2563
107bad8b 2564 if (signal_pending(current)) {
6c6c2796 2565 sret = -EINTR;
107bad8b
SR
2566 goto out;
2567 }
b3806b43 2568
84527997 2569 if (iter->trace != current_trace)
107bad8b 2570 goto out;
84527997 2571
b3806b43
SR
2572 /*
2573 * We block until we read something and tracing is disabled.
2574 * We still block if tracing is disabled, but we have never
2575 * read anything. This allows a user to cat this file, and
2576 * then enable tracing. But after we have read something,
2577 * we give an EOF when tracing is again disabled.
2578 *
2579 * iter->pos will be 0 if we haven't read anything.
2580 */
2581 if (!tracer_enabled && iter->pos)
2582 break;
2583
2584 continue;
2585 }
2586
2587 /* stop when tracing is finished */
2588 if (trace_empty(iter))
107bad8b 2589 goto out;
b3806b43
SR
2590
2591 if (cnt >= PAGE_SIZE)
2592 cnt = PAGE_SIZE - 1;
2593
53d0aa77 2594 /* reset all but tr, trace, and overruns */
53d0aa77
SR
2595 memset(&iter->seq, 0,
2596 sizeof(struct trace_iterator) -
2597 offsetof(struct trace_iterator, seq));
4823ed7e 2598 iter->pos = -1;
b3806b43 2599
088b1e42 2600 while (find_next_entry_inc(iter) != NULL) {
2c4f035f 2601 enum print_line_t ret;
088b1e42
SR
2602 int len = iter->seq.len;
2603
f9896bf3 2604 ret = print_trace_line(iter);
2c4f035f 2605 if (ret == TRACE_TYPE_PARTIAL_LINE) {
088b1e42
SR
2606 /* don't print partial lines */
2607 iter->seq.len = len;
b3806b43 2608 break;
088b1e42 2609 }
b3806b43
SR
2610
2611 trace_consume(iter);
2612
2613 if (iter->seq.len >= cnt)
2614 break;
b3806b43
SR
2615 }
2616
b3806b43 2617 /* Now copy what we have to the user */
6c6c2796
PP
2618 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
2619 if (iter->seq.readpos >= iter->seq.len)
b3806b43 2620 trace_seq_reset(&iter->seq);
9ff4b974
PP
2621
2622 /*
2623 * If there was nothing to send to user, inspite of consuming trace
2624 * entries, go back to wait for more entries.
2625 */
6c6c2796 2626 if (sret == -EBUSY)
9ff4b974 2627 goto waitagain;
b3806b43 2628
107bad8b
SR
2629out:
2630 mutex_unlock(&trace_types_lock);
2631
6c6c2796 2632 return sret;
b3806b43
SR
2633}
2634
a98a3c3f
SR
2635static ssize_t
2636tracing_entries_read(struct file *filp, char __user *ubuf,
2637 size_t cnt, loff_t *ppos)
2638{
2639 struct trace_array *tr = filp->private_data;
2640 char buf[64];
2641 int r;
2642
1696b2b0 2643 r = sprintf(buf, "%lu\n", tr->entries >> 10);
a98a3c3f
SR
2644 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2645}
2646
2647static ssize_t
2648tracing_entries_write(struct file *filp, const char __user *ubuf,
2649 size_t cnt, loff_t *ppos)
2650{
2651 unsigned long val;
2652 char buf[64];
bf5e6519 2653 int ret, cpu;
a98a3c3f 2654
cffae437
SR
2655 if (cnt >= sizeof(buf))
2656 return -EINVAL;
a98a3c3f
SR
2657
2658 if (copy_from_user(&buf, ubuf, cnt))
2659 return -EFAULT;
2660
2661 buf[cnt] = 0;
2662
c6caeeb1
SR
2663 ret = strict_strtoul(buf, 10, &val);
2664 if (ret < 0)
2665 return ret;
a98a3c3f
SR
2666
2667 /* must have at least 1 entry */
2668 if (!val)
2669 return -EINVAL;
2670
2671 mutex_lock(&trace_types_lock);
2672
c76f0694 2673 tracing_stop();
a98a3c3f 2674
bf5e6519
SR
2675 /* disable all cpu buffers */
2676 for_each_tracing_cpu(cpu) {
2677 if (global_trace.data[cpu])
2678 atomic_inc(&global_trace.data[cpu]->disabled);
2679 if (max_tr.data[cpu])
2680 atomic_inc(&max_tr.data[cpu]->disabled);
2681 }
2682
1696b2b0
SR
2683 /* value is in KB */
2684 val <<= 10;
2685
3928a8a2
SR
2686 if (val != global_trace.entries) {
2687 ret = ring_buffer_resize(global_trace.buffer, val);
2688 if (ret < 0) {
2689 cnt = ret;
3eefae99
SR
2690 goto out;
2691 }
2692
3928a8a2
SR
2693 ret = ring_buffer_resize(max_tr.buffer, val);
2694 if (ret < 0) {
2695 int r;
2696 cnt = ret;
2697 r = ring_buffer_resize(global_trace.buffer,
2698 global_trace.entries);
2699 if (r < 0) {
2700 /* AARGH! We are left with different
2701 * size max buffer!!!! */
2702 WARN_ON(1);
2703 tracing_disabled = 1;
a98a3c3f 2704 }
3928a8a2 2705 goto out;
a98a3c3f 2706 }
3eefae99 2707
3928a8a2 2708 global_trace.entries = val;
a98a3c3f
SR
2709 }
2710
2711 filp->f_pos += cnt;
2712
19384c03
SR
2713 /* If check pages failed, return ENOMEM */
2714 if (tracing_disabled)
2715 cnt = -ENOMEM;
a98a3c3f 2716 out:
bf5e6519
SR
2717 for_each_tracing_cpu(cpu) {
2718 if (global_trace.data[cpu])
2719 atomic_dec(&global_trace.data[cpu]->disabled);
2720 if (max_tr.data[cpu])
2721 atomic_dec(&max_tr.data[cpu]->disabled);
2722 }
2723
c76f0694 2724 tracing_start();
a98a3c3f
SR
2725 max_tr.entries = global_trace.entries;
2726 mutex_unlock(&trace_types_lock);
2727
2728 return cnt;
2729}
2730
5bf9a1ee
PP
2731static int mark_printk(const char *fmt, ...)
2732{
2733 int ret;
2734 va_list args;
2735 va_start(args, fmt);
1fd8f2a3 2736 ret = trace_vprintk(0, -1, fmt, args);
5bf9a1ee
PP
2737 va_end(args);
2738 return ret;
2739}
2740
2741static ssize_t
2742tracing_mark_write(struct file *filp, const char __user *ubuf,
2743 size_t cnt, loff_t *fpos)
2744{
2745 char *buf;
2746 char *end;
5bf9a1ee 2747
c76f0694 2748 if (tracing_disabled)
5bf9a1ee
PP
2749 return -EINVAL;
2750
2751 if (cnt > TRACE_BUF_SIZE)
2752 cnt = TRACE_BUF_SIZE;
2753
2754 buf = kmalloc(cnt + 1, GFP_KERNEL);
2755 if (buf == NULL)
2756 return -ENOMEM;
2757
2758 if (copy_from_user(buf, ubuf, cnt)) {
2759 kfree(buf);
2760 return -EFAULT;
2761 }
2762
2763 /* Cut from the first nil or newline. */
2764 buf[cnt] = '\0';
2765 end = strchr(buf, '\n');
2766 if (end)
2767 *end = '\0';
2768
2769 cnt = mark_printk("%s\n", buf);
2770 kfree(buf);
2771 *fpos += cnt;
2772
2773 return cnt;
2774}
2775
bc0c38d1 2776static struct file_operations tracing_max_lat_fops = {
4bf39a94
IM
2777 .open = tracing_open_generic,
2778 .read = tracing_max_lat_read,
2779 .write = tracing_max_lat_write,
bc0c38d1
SR
2780};
2781
2782static struct file_operations tracing_ctrl_fops = {
4bf39a94
IM
2783 .open = tracing_open_generic,
2784 .read = tracing_ctrl_read,
2785 .write = tracing_ctrl_write,
bc0c38d1
SR
2786};
2787
2788static struct file_operations set_tracer_fops = {
4bf39a94
IM
2789 .open = tracing_open_generic,
2790 .read = tracing_set_trace_read,
2791 .write = tracing_set_trace_write,
bc0c38d1
SR
2792};
2793
b3806b43 2794static struct file_operations tracing_pipe_fops = {
4bf39a94 2795 .open = tracing_open_pipe,
2a2cc8f7 2796 .poll = tracing_poll_pipe,
4bf39a94
IM
2797 .read = tracing_read_pipe,
2798 .release = tracing_release_pipe,
b3806b43
SR
2799};
2800
a98a3c3f
SR
2801static struct file_operations tracing_entries_fops = {
2802 .open = tracing_open_generic,
2803 .read = tracing_entries_read,
2804 .write = tracing_entries_write,
2805};
2806
5bf9a1ee 2807static struct file_operations tracing_mark_fops = {
43a15386 2808 .open = tracing_open_generic,
5bf9a1ee
PP
2809 .write = tracing_mark_write,
2810};
2811
bc0c38d1
SR
2812#ifdef CONFIG_DYNAMIC_FTRACE
2813
b807c3d0
SR
2814int __weak ftrace_arch_read_dyn_info(char *buf, int size)
2815{
2816 return 0;
2817}
2818
bc0c38d1 2819static ssize_t
b807c3d0 2820tracing_read_dyn_info(struct file *filp, char __user *ubuf,
bc0c38d1
SR
2821 size_t cnt, loff_t *ppos)
2822{
a26a2a27
SR
2823 static char ftrace_dyn_info_buffer[1024];
2824 static DEFINE_MUTEX(dyn_info_mutex);
bc0c38d1 2825 unsigned long *p = filp->private_data;
b807c3d0 2826 char *buf = ftrace_dyn_info_buffer;
a26a2a27 2827 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
bc0c38d1
SR
2828 int r;
2829
b807c3d0
SR
2830 mutex_lock(&dyn_info_mutex);
2831 r = sprintf(buf, "%ld ", *p);
4bf39a94 2832
a26a2a27 2833 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
b807c3d0
SR
2834 buf[r++] = '\n';
2835
2836 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2837
2838 mutex_unlock(&dyn_info_mutex);
2839
2840 return r;
bc0c38d1
SR
2841}
2842
b807c3d0 2843static struct file_operations tracing_dyn_info_fops = {
4bf39a94 2844 .open = tracing_open_generic,
b807c3d0 2845 .read = tracing_read_dyn_info,
bc0c38d1
SR
2846};
2847#endif
2848
2849static struct dentry *d_tracer;
2850
2851struct dentry *tracing_init_dentry(void)
2852{
2853 static int once;
2854
2855 if (d_tracer)
2856 return d_tracer;
2857
2858 d_tracer = debugfs_create_dir("tracing", NULL);
2859
2860 if (!d_tracer && !once) {
2861 once = 1;
2862 pr_warning("Could not create debugfs directory 'tracing'\n");
2863 return NULL;
2864 }
2865
2866 return d_tracer;
2867}
2868
60a11774
SR
2869#ifdef CONFIG_FTRACE_SELFTEST
2870/* Let selftest have access to static functions in this file */
2871#include "trace_selftest.c"
2872#endif
2873
b5ad384e 2874static __init int tracer_init_debugfs(void)
bc0c38d1
SR
2875{
2876 struct dentry *d_tracer;
2877 struct dentry *entry;
2878
2879 d_tracer = tracing_init_dentry();
2880
2881 entry = debugfs_create_file("tracing_enabled", 0644, d_tracer,
2882 &global_trace, &tracing_ctrl_fops);
2883 if (!entry)
2884 pr_warning("Could not create debugfs 'tracing_enabled' entry\n");
2885
ee6bce52 2886 entry = debugfs_create_file("trace_options", 0644, d_tracer,
bc0c38d1
SR
2887 NULL, &tracing_iter_fops);
2888 if (!entry)
ee6bce52 2889 pr_warning("Could not create debugfs 'trace_options' entry\n");
bc0c38d1 2890
c7078de1
IM
2891 entry = debugfs_create_file("tracing_cpumask", 0644, d_tracer,
2892 NULL, &tracing_cpumask_fops);
2893 if (!entry)
2894 pr_warning("Could not create debugfs 'tracing_cpumask' entry\n");
2895
bc0c38d1
SR
2896 entry = debugfs_create_file("latency_trace", 0444, d_tracer,
2897 &global_trace, &tracing_lt_fops);
2898 if (!entry)
2899 pr_warning("Could not create debugfs 'latency_trace' entry\n");
2900
2901 entry = debugfs_create_file("trace", 0444, d_tracer,
2902 &global_trace, &tracing_fops);
2903 if (!entry)
2904 pr_warning("Could not create debugfs 'trace' entry\n");
2905
2906 entry = debugfs_create_file("available_tracers", 0444, d_tracer,
2907 &global_trace, &show_traces_fops);
2908 if (!entry)
98a983aa 2909 pr_warning("Could not create debugfs 'available_tracers' entry\n");
bc0c38d1
SR
2910
2911 entry = debugfs_create_file("current_tracer", 0444, d_tracer,
2912 &global_trace, &set_tracer_fops);
2913 if (!entry)
98a983aa 2914 pr_warning("Could not create debugfs 'current_tracer' entry\n");
bc0c38d1
SR
2915
2916 entry = debugfs_create_file("tracing_max_latency", 0644, d_tracer,
2917 &tracing_max_latency,
2918 &tracing_max_lat_fops);
2919 if (!entry)
2920 pr_warning("Could not create debugfs "
2921 "'tracing_max_latency' entry\n");
2922
2923 entry = debugfs_create_file("tracing_thresh", 0644, d_tracer,
2924 &tracing_thresh, &tracing_max_lat_fops);
2925 if (!entry)
2926 pr_warning("Could not create debugfs "
98a983aa 2927 "'tracing_thresh' entry\n");
7bd2f24c
IM
2928 entry = debugfs_create_file("README", 0644, d_tracer,
2929 NULL, &tracing_readme_fops);
2930 if (!entry)
2931 pr_warning("Could not create debugfs 'README' entry\n");
2932
b3806b43
SR
2933 entry = debugfs_create_file("trace_pipe", 0644, d_tracer,
2934 NULL, &tracing_pipe_fops);
2935 if (!entry)
2936 pr_warning("Could not create debugfs "
98a983aa 2937 "'trace_pipe' entry\n");
bc0c38d1 2938
a94c80e7 2939 entry = debugfs_create_file("buffer_size_kb", 0644, d_tracer,
a98a3c3f
SR
2940 &global_trace, &tracing_entries_fops);
2941 if (!entry)
2942 pr_warning("Could not create debugfs "
a94c80e7 2943 "'buffer_size_kb' entry\n");
a98a3c3f 2944
5bf9a1ee
PP
2945 entry = debugfs_create_file("trace_marker", 0220, d_tracer,
2946 NULL, &tracing_mark_fops);
2947 if (!entry)
2948 pr_warning("Could not create debugfs "
2949 "'trace_marker' entry\n");
2950
bc0c38d1
SR
2951#ifdef CONFIG_DYNAMIC_FTRACE
2952 entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer,
2953 &ftrace_update_tot_cnt,
b807c3d0 2954 &tracing_dyn_info_fops);
bc0c38d1
SR
2955 if (!entry)
2956 pr_warning("Could not create debugfs "
2957 "'dyn_ftrace_total_info' entry\n");
2958#endif
d618b3e6
IM
2959#ifdef CONFIG_SYSPROF_TRACER
2960 init_tracer_sysprof_debugfs(d_tracer);
2961#endif
b5ad384e 2962 return 0;
bc0c38d1
SR
2963}
2964
1fd8f2a3 2965int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args)
dd0e545f 2966{
dd0e545f
SR
2967 static DEFINE_SPINLOCK(trace_buf_lock);
2968 static char trace_buf[TRACE_BUF_SIZE];
f09ce573 2969
3928a8a2 2970 struct ring_buffer_event *event;
f09ce573 2971 struct trace_array *tr = &global_trace;
dd0e545f 2972 struct trace_array_cpu *data;
38697053 2973 int cpu, len = 0, size, pc;
e726f5f9
IM
2974 struct print_entry *entry;
2975 unsigned long irq_flags;
dd0e545f 2976
8e1b82e0 2977 if (tracing_disabled || tracing_selftest_running)
dd0e545f
SR
2978 return 0;
2979
38697053
SR
2980 pc = preempt_count();
2981 preempt_disable_notrace();
dd0e545f
SR
2982 cpu = raw_smp_processor_id();
2983 data = tr->data[cpu];
dd0e545f 2984
3ea2e6d7 2985 if (unlikely(atomic_read(&data->disabled)))
dd0e545f
SR
2986 goto out;
2987
380c4b14
FW
2988 pause_graph_tracing();
2989 spin_lock_irqsave(&trace_buf_lock, irq_flags);
801fe400 2990 len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args);
dd0e545f
SR
2991
2992 len = min(len, TRACE_BUF_SIZE-1);
2993 trace_buf[len] = 0;
2994
777e208d
SR
2995 size = sizeof(*entry) + len + 1;
2996 event = ring_buffer_lock_reserve(tr->buffer, size, &irq_flags);
3928a8a2
SR
2997 if (!event)
2998 goto out_unlock;
777e208d 2999 entry = ring_buffer_event_data(event);
e726f5f9 3000 tracing_generic_entry_update(&entry->ent, irq_flags, pc);
777e208d
SR
3001 entry->ent.type = TRACE_PRINT;
3002 entry->ip = ip;
1fd8f2a3 3003 entry->depth = depth;
dd0e545f 3004
777e208d
SR
3005 memcpy(&entry->buf, trace_buf, len);
3006 entry->buf[len] = 0;
3928a8a2 3007 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
dd0e545f 3008
3928a8a2 3009 out_unlock:
380c4b14
FW
3010 spin_unlock_irqrestore(&trace_buf_lock, irq_flags);
3011 unpause_graph_tracing();
dd0e545f 3012 out:
38697053 3013 preempt_enable_notrace();
dd0e545f
SR
3014
3015 return len;
3016}
801fe400
PP
3017EXPORT_SYMBOL_GPL(trace_vprintk);
3018
3019int __ftrace_printk(unsigned long ip, const char *fmt, ...)
3020{
3021 int ret;
3022 va_list ap;
3023
3024 if (!(trace_flags & TRACE_ITER_PRINTK))
3025 return 0;
3026
3027 va_start(ap, fmt);
21a8c466 3028 ret = trace_vprintk(ip, task_curr_ret_stack(current), fmt, ap);
801fe400
PP
3029 va_end(ap);
3030 return ret;
3031}
dd0e545f
SR
3032EXPORT_SYMBOL_GPL(__ftrace_printk);
3033
3f5a54e3
SR
3034static int trace_panic_handler(struct notifier_block *this,
3035 unsigned long event, void *unused)
3036{
944ac425
SR
3037 if (ftrace_dump_on_oops)
3038 ftrace_dump();
3f5a54e3
SR
3039 return NOTIFY_OK;
3040}
3041
3042static struct notifier_block trace_panic_notifier = {
3043 .notifier_call = trace_panic_handler,
3044 .next = NULL,
3045 .priority = 150 /* priority: INT_MAX >= x >= 0 */
3046};
3047
3048static int trace_die_handler(struct notifier_block *self,
3049 unsigned long val,
3050 void *data)
3051{
3052 switch (val) {
3053 case DIE_OOPS:
944ac425
SR
3054 if (ftrace_dump_on_oops)
3055 ftrace_dump();
3f5a54e3
SR
3056 break;
3057 default:
3058 break;
3059 }
3060 return NOTIFY_OK;
3061}
3062
3063static struct notifier_block trace_die_notifier = {
3064 .notifier_call = trace_die_handler,
3065 .priority = 200
3066};
3067
3068/*
3069 * printk is set to max of 1024, we really don't need it that big.
3070 * Nothing should be printing 1000 characters anyway.
3071 */
3072#define TRACE_MAX_PRINT 1000
3073
3074/*
3075 * Define here KERN_TRACE so that we have one place to modify
3076 * it if we decide to change what log level the ftrace dump
3077 * should be at.
3078 */
3079#define KERN_TRACE KERN_INFO
3080
3081static void
3082trace_printk_seq(struct trace_seq *s)
3083{
3084 /* Probably should print a warning here. */
3085 if (s->len >= 1000)
3086 s->len = 1000;
3087
3088 /* should be zero ended, but we are paranoid. */
3089 s->buffer[s->len] = 0;
3090
3091 printk(KERN_TRACE "%s", s->buffer);
3092
3093 trace_seq_reset(s);
3094}
3095
3f5a54e3
SR
3096void ftrace_dump(void)
3097{
3098 static DEFINE_SPINLOCK(ftrace_dump_lock);
3099 /* use static because iter can be a bit big for the stack */
3100 static struct trace_iterator iter;
3f5a54e3 3101 static int dump_ran;
d769041f
SR
3102 unsigned long flags;
3103 int cnt = 0, cpu;
3f5a54e3
SR
3104
3105 /* only one dump */
3106 spin_lock_irqsave(&ftrace_dump_lock, flags);
3107 if (dump_ran)
3108 goto out;
3109
3110 dump_ran = 1;
3111
3112 /* No turning back! */
81adbdc0 3113 ftrace_kill();
3f5a54e3 3114
d769041f
SR
3115 for_each_tracing_cpu(cpu) {
3116 atomic_inc(&global_trace.data[cpu]->disabled);
3117 }
3118
b54d3de9
TE
3119 /* don't look at user memory in panic mode */
3120 trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
3121
3f5a54e3
SR
3122 printk(KERN_TRACE "Dumping ftrace buffer:\n");
3123
3124 iter.tr = &global_trace;
3125 iter.trace = current_trace;
3126
3127 /*
3128 * We need to stop all tracing on all CPUS to read the
3129 * the next buffer. This is a bit expensive, but is
3130 * not done often. We fill all what we can read,
3131 * and then release the locks again.
3132 */
3133
3f5a54e3
SR
3134 while (!trace_empty(&iter)) {
3135
3136 if (!cnt)
3137 printk(KERN_TRACE "---------------------------------\n");
3138
3139 cnt++;
3140
3141 /* reset all but tr, trace, and overruns */
3142 memset(&iter.seq, 0,
3143 sizeof(struct trace_iterator) -
3144 offsetof(struct trace_iterator, seq));
3145 iter.iter_flags |= TRACE_FILE_LAT_FMT;
3146 iter.pos = -1;
3147
3148 if (find_next_entry_inc(&iter) != NULL) {
3149 print_trace_line(&iter);
3150 trace_consume(&iter);
3151 }
3152
3153 trace_printk_seq(&iter.seq);
3154 }
3155
3156 if (!cnt)
3157 printk(KERN_TRACE " (ftrace buffer empty)\n");
3158 else
3159 printk(KERN_TRACE "---------------------------------\n");
3160
3f5a54e3
SR
3161 out:
3162 spin_unlock_irqrestore(&ftrace_dump_lock, flags);
3163}
3164
3928a8a2 3165__init static int tracer_alloc_buffers(void)
bc0c38d1 3166{
4c11d7ae 3167 struct trace_array_cpu *data;
4c11d7ae 3168 int i;
9e01c1b7 3169 int ret = -ENOMEM;
4c11d7ae 3170
9e01c1b7
RR
3171 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
3172 goto out;
3173
3174 if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL))
3175 goto out_free_buffer_mask;
4c11d7ae 3176
9e01c1b7
RR
3177 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
3178 cpumask_copy(tracing_cpumask, cpu_all_mask);
3179
3180 /* TODO: make the number of buffers hot pluggable with CPUS */
3928a8a2
SR
3181 global_trace.buffer = ring_buffer_alloc(trace_buf_size,
3182 TRACE_BUFFER_FLAGS);
3183 if (!global_trace.buffer) {
3184 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
3185 WARN_ON(1);
9e01c1b7 3186 goto out_free_cpumask;
4c11d7ae 3187 }
3928a8a2 3188 global_trace.entries = ring_buffer_size(global_trace.buffer);
4c11d7ae 3189
9e01c1b7 3190
4c11d7ae 3191#ifdef CONFIG_TRACER_MAX_TRACE
3928a8a2
SR
3192 max_tr.buffer = ring_buffer_alloc(trace_buf_size,
3193 TRACE_BUFFER_FLAGS);
3194 if (!max_tr.buffer) {
3195 printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n");
3196 WARN_ON(1);
3197 ring_buffer_free(global_trace.buffer);
9e01c1b7 3198 goto out_free_cpumask;
4c11d7ae 3199 }
3928a8a2
SR
3200 max_tr.entries = ring_buffer_size(max_tr.buffer);
3201 WARN_ON(max_tr.entries != global_trace.entries);
a98a3c3f 3202#endif
ab46428c 3203
4c11d7ae 3204 /* Allocate the first page for all buffers */
ab46428c 3205 for_each_tracing_cpu(i) {
4c11d7ae 3206 data = global_trace.data[i] = &per_cpu(global_trace_cpu, i);
bc0c38d1 3207 max_tr.data[i] = &per_cpu(max_data, i);
4c11d7ae 3208 }
bc0c38d1 3209
bc0c38d1
SR
3210 trace_init_cmdlines();
3211
43a15386 3212 register_tracer(&nop_trace);
b5ad384e
FW
3213#ifdef CONFIG_BOOT_TRACER
3214 register_tracer(&boot_tracer);
3215 current_trace = &boot_tracer;
3216 current_trace->init(&global_trace);
3217#else
43a15386 3218 current_trace = &nop_trace;
b5ad384e 3219#endif
60a11774
SR
3220 /* All seems OK, enable tracing */
3221 tracing_disabled = 0;
3928a8a2 3222
3f5a54e3
SR
3223 atomic_notifier_chain_register(&panic_notifier_list,
3224 &trace_panic_notifier);
3225
3226 register_die_notifier(&trace_die_notifier);
9e01c1b7 3227 ret = 0;
3f5a54e3 3228
9e01c1b7
RR
3229out_free_cpumask:
3230 free_cpumask_var(tracing_cpumask);
3231out_free_buffer_mask:
3232 free_cpumask_var(tracing_buffer_mask);
3233out:
3234 return ret;
bc0c38d1 3235}
b5ad384e
FW
3236early_initcall(tracer_alloc_buffers);
3237fs_initcall(tracer_init_debugfs);
This page took 0.282367 seconds and 5 git commands to generate.