Merge branches 'tracing/kmemtrace2' and 'tracing/ftrace' into tracing/urgent
[deliverable/linux.git] / kernel / trace / trace_functions_graph.c
1 /*
2 *
3 * Function graph tracer.
4 * Copyright (c) 2008 Frederic Weisbecker <fweisbec@gmail.com>
5 * Mostly borrowed from function tracer which
6 * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
7 *
8 */
9 #include <linux/debugfs.h>
10 #include <linux/uaccess.h>
11 #include <linux/ftrace.h>
12 #include <linux/fs.h>
13
14 #include "trace.h"
15 #include "trace_output.h"
16
17 #define TRACE_GRAPH_INDENT 2
18
19 /* Flag options */
20 #define TRACE_GRAPH_PRINT_OVERRUN 0x1
21 #define TRACE_GRAPH_PRINT_CPU 0x2
22 #define TRACE_GRAPH_PRINT_OVERHEAD 0x4
23 #define TRACE_GRAPH_PRINT_PROC 0x8
24
25 static struct tracer_opt trace_opts[] = {
26 /* Display overruns ? */
27 { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
28 /* Display CPU ? */
29 { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
30 /* Display Overhead ? */
31 { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
32 /* Display proc name/pid */
33 { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
34 { } /* Empty entry */
35 };
36
37 static struct tracer_flags tracer_flags = {
38 /* Don't display overruns and proc by default */
39 .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD,
40 .opts = trace_opts
41 };
42
43 /* pid on the last trace processed */
44 static pid_t last_pid[NR_CPUS] = { [0 ... NR_CPUS-1] = -1 };
45
46 static int graph_trace_init(struct trace_array *tr)
47 {
48 int cpu, ret;
49
50 for_each_online_cpu(cpu)
51 tracing_reset(tr, cpu);
52
53 ret = register_ftrace_graph(&trace_graph_return,
54 &trace_graph_entry);
55 if (ret)
56 return ret;
57 tracing_start_cmdline_record();
58
59 return 0;
60 }
61
62 static void graph_trace_reset(struct trace_array *tr)
63 {
64 tracing_stop_cmdline_record();
65 unregister_ftrace_graph();
66 }
67
68 static inline int log10_cpu(int nb)
69 {
70 if (nb / 100)
71 return 3;
72 if (nb / 10)
73 return 2;
74 return 1;
75 }
76
77 static enum print_line_t
78 print_graph_cpu(struct trace_seq *s, int cpu)
79 {
80 int i;
81 int ret;
82 int log10_this = log10_cpu(cpu);
83 int log10_all = log10_cpu(cpumask_weight(cpu_online_mask));
84
85
86 /*
87 * Start with a space character - to make it stand out
88 * to the right a bit when trace output is pasted into
89 * email:
90 */
91 ret = trace_seq_printf(s, " ");
92
93 /*
94 * Tricky - we space the CPU field according to the max
95 * number of online CPUs. On a 2-cpu system it would take
96 * a maximum of 1 digit - on a 128 cpu system it would
97 * take up to 3 digits:
98 */
99 for (i = 0; i < log10_all - log10_this; i++) {
100 ret = trace_seq_printf(s, " ");
101 if (!ret)
102 return TRACE_TYPE_PARTIAL_LINE;
103 }
104 ret = trace_seq_printf(s, "%d) ", cpu);
105 if (!ret)
106 return TRACE_TYPE_PARTIAL_LINE;
107
108 return TRACE_TYPE_HANDLED;
109 }
110
111 #define TRACE_GRAPH_PROCINFO_LENGTH 14
112
113 static enum print_line_t
114 print_graph_proc(struct trace_seq *s, pid_t pid)
115 {
116 int i;
117 int ret;
118 int len;
119 char comm[8];
120 int spaces = 0;
121 /* sign + log10(MAX_INT) + '\0' */
122 char pid_str[11];
123
124 strncpy(comm, trace_find_cmdline(pid), 7);
125 comm[7] = '\0';
126 sprintf(pid_str, "%d", pid);
127
128 /* 1 stands for the "-" character */
129 len = strlen(comm) + strlen(pid_str) + 1;
130
131 if (len < TRACE_GRAPH_PROCINFO_LENGTH)
132 spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
133
134 /* First spaces to align center */
135 for (i = 0; i < spaces / 2; i++) {
136 ret = trace_seq_printf(s, " ");
137 if (!ret)
138 return TRACE_TYPE_PARTIAL_LINE;
139 }
140
141 ret = trace_seq_printf(s, "%s-%s", comm, pid_str);
142 if (!ret)
143 return TRACE_TYPE_PARTIAL_LINE;
144
145 /* Last spaces to align center */
146 for (i = 0; i < spaces - (spaces / 2); i++) {
147 ret = trace_seq_printf(s, " ");
148 if (!ret)
149 return TRACE_TYPE_PARTIAL_LINE;
150 }
151 return TRACE_TYPE_HANDLED;
152 }
153
154
155 /* If the pid changed since the last trace, output this event */
156 static enum print_line_t
157 verif_pid(struct trace_seq *s, pid_t pid, int cpu)
158 {
159 pid_t prev_pid;
160 int ret;
161
162 if (last_pid[cpu] != -1 && last_pid[cpu] == pid)
163 return TRACE_TYPE_HANDLED;
164
165 prev_pid = last_pid[cpu];
166 last_pid[cpu] = pid;
167
168 /*
169 * Context-switch trace line:
170
171 ------------------------------------------
172 | 1) migration/0--1 => sshd-1755
173 ------------------------------------------
174
175 */
176 ret = trace_seq_printf(s,
177 " ------------------------------------------\n");
178 if (!ret)
179 TRACE_TYPE_PARTIAL_LINE;
180
181 ret = print_graph_cpu(s, cpu);
182 if (ret == TRACE_TYPE_PARTIAL_LINE)
183 TRACE_TYPE_PARTIAL_LINE;
184
185 ret = print_graph_proc(s, prev_pid);
186 if (ret == TRACE_TYPE_PARTIAL_LINE)
187 TRACE_TYPE_PARTIAL_LINE;
188
189 ret = trace_seq_printf(s, " => ");
190 if (!ret)
191 TRACE_TYPE_PARTIAL_LINE;
192
193 ret = print_graph_proc(s, pid);
194 if (ret == TRACE_TYPE_PARTIAL_LINE)
195 TRACE_TYPE_PARTIAL_LINE;
196
197 ret = trace_seq_printf(s,
198 "\n ------------------------------------------\n\n");
199 if (!ret)
200 TRACE_TYPE_PARTIAL_LINE;
201
202 return ret;
203 }
204
205 static bool
206 trace_branch_is_leaf(struct trace_iterator *iter,
207 struct ftrace_graph_ent_entry *curr)
208 {
209 struct ring_buffer_iter *ring_iter;
210 struct ring_buffer_event *event;
211 struct ftrace_graph_ret_entry *next;
212
213 ring_iter = iter->buffer_iter[iter->cpu];
214
215 if (!ring_iter)
216 return false;
217
218 event = ring_buffer_iter_peek(ring_iter, NULL);
219
220 if (!event)
221 return false;
222
223 next = ring_buffer_event_data(event);
224
225 if (next->ent.type != TRACE_GRAPH_RET)
226 return false;
227
228 if (curr->ent.pid != next->ent.pid ||
229 curr->graph_ent.func != next->ret.func)
230 return false;
231
232 return true;
233 }
234
235 static enum print_line_t
236 print_graph_irq(struct trace_seq *s, unsigned long addr,
237 enum trace_type type, int cpu, pid_t pid)
238 {
239 int ret;
240
241 if (addr < (unsigned long)__irqentry_text_start ||
242 addr >= (unsigned long)__irqentry_text_end)
243 return TRACE_TYPE_UNHANDLED;
244
245 if (type == TRACE_GRAPH_ENT) {
246 ret = trace_seq_printf(s, "==========> | ");
247 } else {
248 /* Cpu */
249 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
250 ret = print_graph_cpu(s, cpu);
251 if (ret == TRACE_TYPE_PARTIAL_LINE)
252 return TRACE_TYPE_PARTIAL_LINE;
253 }
254 /* Proc */
255 if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) {
256 ret = print_graph_proc(s, pid);
257 if (ret == TRACE_TYPE_PARTIAL_LINE)
258 return TRACE_TYPE_PARTIAL_LINE;
259
260 ret = trace_seq_printf(s, " | ");
261 if (!ret)
262 return TRACE_TYPE_PARTIAL_LINE;
263 }
264
265 /* No overhead */
266 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
267 ret = trace_seq_printf(s, " ");
268 if (!ret)
269 return TRACE_TYPE_PARTIAL_LINE;
270 }
271
272 ret = trace_seq_printf(s, "<========== |\n");
273 }
274 if (!ret)
275 return TRACE_TYPE_PARTIAL_LINE;
276 return TRACE_TYPE_HANDLED;
277 }
278
279 static enum print_line_t
280 print_graph_duration(unsigned long long duration, struct trace_seq *s)
281 {
282 unsigned long nsecs_rem = do_div(duration, 1000);
283 /* log10(ULONG_MAX) + '\0' */
284 char msecs_str[21];
285 char nsecs_str[5];
286 int ret, len;
287 int i;
288
289 sprintf(msecs_str, "%lu", (unsigned long) duration);
290
291 /* Print msecs */
292 ret = trace_seq_printf(s, msecs_str);
293 if (!ret)
294 return TRACE_TYPE_PARTIAL_LINE;
295
296 len = strlen(msecs_str);
297
298 /* Print nsecs (we don't want to exceed 7 numbers) */
299 if (len < 7) {
300 snprintf(nsecs_str, 8 - len, "%03lu", nsecs_rem);
301 ret = trace_seq_printf(s, ".%s", nsecs_str);
302 if (!ret)
303 return TRACE_TYPE_PARTIAL_LINE;
304 len += strlen(nsecs_str);
305 }
306
307 ret = trace_seq_printf(s, " us ");
308 if (!ret)
309 return TRACE_TYPE_PARTIAL_LINE;
310
311 /* Print remaining spaces to fit the row's width */
312 for (i = len; i < 7; i++) {
313 ret = trace_seq_printf(s, " ");
314 if (!ret)
315 return TRACE_TYPE_PARTIAL_LINE;
316 }
317
318 ret = trace_seq_printf(s, "| ");
319 if (!ret)
320 return TRACE_TYPE_PARTIAL_LINE;
321 return TRACE_TYPE_HANDLED;
322
323 }
324
325 /* Signal a overhead of time execution to the output */
326 static int
327 print_graph_overhead(unsigned long long duration, struct trace_seq *s)
328 {
329 /* Duration exceeded 100 msecs */
330 if (duration > 100000ULL)
331 return trace_seq_printf(s, "! ");
332
333 /* Duration exceeded 10 msecs */
334 if (duration > 10000ULL)
335 return trace_seq_printf(s, "+ ");
336
337 return trace_seq_printf(s, " ");
338 }
339
340 /* Case of a leaf function on its call entry */
341 static enum print_line_t
342 print_graph_entry_leaf(struct trace_iterator *iter,
343 struct ftrace_graph_ent_entry *entry, struct trace_seq *s)
344 {
345 struct ftrace_graph_ret_entry *ret_entry;
346 struct ftrace_graph_ret *graph_ret;
347 struct ring_buffer_event *event;
348 struct ftrace_graph_ent *call;
349 unsigned long long duration;
350 int ret;
351 int i;
352
353 event = ring_buffer_read(iter->buffer_iter[iter->cpu], NULL);
354 ret_entry = ring_buffer_event_data(event);
355 graph_ret = &ret_entry->ret;
356 call = &entry->graph_ent;
357 duration = graph_ret->rettime - graph_ret->calltime;
358
359 /* Overhead */
360 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
361 ret = print_graph_overhead(duration, s);
362 if (!ret)
363 return TRACE_TYPE_PARTIAL_LINE;
364 }
365
366 /* Duration */
367 ret = print_graph_duration(duration, s);
368 if (ret == TRACE_TYPE_PARTIAL_LINE)
369 return TRACE_TYPE_PARTIAL_LINE;
370
371 /* Function */
372 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
373 ret = trace_seq_printf(s, " ");
374 if (!ret)
375 return TRACE_TYPE_PARTIAL_LINE;
376 }
377
378 ret = seq_print_ip_sym(s, call->func, 0);
379 if (!ret)
380 return TRACE_TYPE_PARTIAL_LINE;
381
382 ret = trace_seq_printf(s, "();\n");
383 if (!ret)
384 return TRACE_TYPE_PARTIAL_LINE;
385
386 return TRACE_TYPE_HANDLED;
387 }
388
389 static enum print_line_t
390 print_graph_entry_nested(struct ftrace_graph_ent_entry *entry,
391 struct trace_seq *s, pid_t pid, int cpu)
392 {
393 int i;
394 int ret;
395 struct ftrace_graph_ent *call = &entry->graph_ent;
396
397 /* No overhead */
398 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
399 ret = trace_seq_printf(s, " ");
400 if (!ret)
401 return TRACE_TYPE_PARTIAL_LINE;
402 }
403
404 /* Interrupt */
405 ret = print_graph_irq(s, call->func, TRACE_GRAPH_ENT, cpu, pid);
406 if (ret == TRACE_TYPE_UNHANDLED) {
407 /* No time */
408 ret = trace_seq_printf(s, " | ");
409 if (!ret)
410 return TRACE_TYPE_PARTIAL_LINE;
411 } else {
412 if (ret == TRACE_TYPE_PARTIAL_LINE)
413 return TRACE_TYPE_PARTIAL_LINE;
414 }
415
416
417 /* Function */
418 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
419 ret = trace_seq_printf(s, " ");
420 if (!ret)
421 return TRACE_TYPE_PARTIAL_LINE;
422 }
423
424 ret = seq_print_ip_sym(s, call->func, 0);
425 if (!ret)
426 return TRACE_TYPE_PARTIAL_LINE;
427
428 ret = trace_seq_printf(s, "() {\n");
429 if (!ret)
430 return TRACE_TYPE_PARTIAL_LINE;
431
432 return TRACE_TYPE_HANDLED;
433 }
434
435 static enum print_line_t
436 print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
437 struct trace_iterator *iter, int cpu)
438 {
439 int ret;
440 struct trace_entry *ent = iter->ent;
441
442 /* Pid */
443 if (verif_pid(s, ent->pid, cpu) == TRACE_TYPE_PARTIAL_LINE)
444 return TRACE_TYPE_PARTIAL_LINE;
445
446 /* Cpu */
447 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
448 ret = print_graph_cpu(s, cpu);
449 if (ret == TRACE_TYPE_PARTIAL_LINE)
450 return TRACE_TYPE_PARTIAL_LINE;
451 }
452
453 /* Proc */
454 if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) {
455 ret = print_graph_proc(s, ent->pid);
456 if (ret == TRACE_TYPE_PARTIAL_LINE)
457 return TRACE_TYPE_PARTIAL_LINE;
458
459 ret = trace_seq_printf(s, " | ");
460 if (!ret)
461 return TRACE_TYPE_PARTIAL_LINE;
462 }
463
464 if (trace_branch_is_leaf(iter, field))
465 return print_graph_entry_leaf(iter, field, s);
466 else
467 return print_graph_entry_nested(field, s, iter->ent->pid, cpu);
468
469 }
470
471 static enum print_line_t
472 print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
473 struct trace_entry *ent, int cpu)
474 {
475 int i;
476 int ret;
477 unsigned long long duration = trace->rettime - trace->calltime;
478
479 /* Pid */
480 if (verif_pid(s, ent->pid, cpu) == TRACE_TYPE_PARTIAL_LINE)
481 return TRACE_TYPE_PARTIAL_LINE;
482
483 /* Cpu */
484 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
485 ret = print_graph_cpu(s, cpu);
486 if (ret == TRACE_TYPE_PARTIAL_LINE)
487 return TRACE_TYPE_PARTIAL_LINE;
488 }
489
490 /* Proc */
491 if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) {
492 ret = print_graph_proc(s, ent->pid);
493 if (ret == TRACE_TYPE_PARTIAL_LINE)
494 return TRACE_TYPE_PARTIAL_LINE;
495
496 ret = trace_seq_printf(s, " | ");
497 if (!ret)
498 return TRACE_TYPE_PARTIAL_LINE;
499 }
500
501 /* Overhead */
502 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
503 ret = print_graph_overhead(duration, s);
504 if (!ret)
505 return TRACE_TYPE_PARTIAL_LINE;
506 }
507
508 /* Duration */
509 ret = print_graph_duration(duration, s);
510 if (ret == TRACE_TYPE_PARTIAL_LINE)
511 return TRACE_TYPE_PARTIAL_LINE;
512
513 /* Closing brace */
514 for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) {
515 ret = trace_seq_printf(s, " ");
516 if (!ret)
517 return TRACE_TYPE_PARTIAL_LINE;
518 }
519
520 ret = trace_seq_printf(s, "}\n");
521 if (!ret)
522 return TRACE_TYPE_PARTIAL_LINE;
523
524 /* Overrun */
525 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERRUN) {
526 ret = trace_seq_printf(s, " (Overruns: %lu)\n",
527 trace->overrun);
528 if (!ret)
529 return TRACE_TYPE_PARTIAL_LINE;
530 }
531
532 ret = print_graph_irq(s, trace->func, TRACE_GRAPH_RET, cpu, ent->pid);
533 if (ret == TRACE_TYPE_PARTIAL_LINE)
534 return TRACE_TYPE_PARTIAL_LINE;
535
536 return TRACE_TYPE_HANDLED;
537 }
538
539 static enum print_line_t
540 print_graph_comment(struct print_entry *trace, struct trace_seq *s,
541 struct trace_entry *ent, struct trace_iterator *iter)
542 {
543 int i;
544 int ret;
545
546 /* Pid */
547 if (verif_pid(s, ent->pid, iter->cpu) == TRACE_TYPE_PARTIAL_LINE)
548 return TRACE_TYPE_PARTIAL_LINE;
549
550 /* Cpu */
551 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
552 ret = print_graph_cpu(s, iter->cpu);
553 if (ret == TRACE_TYPE_PARTIAL_LINE)
554 return TRACE_TYPE_PARTIAL_LINE;
555 }
556
557 /* Proc */
558 if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) {
559 ret = print_graph_proc(s, ent->pid);
560 if (ret == TRACE_TYPE_PARTIAL_LINE)
561 return TRACE_TYPE_PARTIAL_LINE;
562
563 ret = trace_seq_printf(s, " | ");
564 if (!ret)
565 return TRACE_TYPE_PARTIAL_LINE;
566 }
567
568 /* No overhead */
569 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
570 ret = trace_seq_printf(s, " ");
571 if (!ret)
572 return TRACE_TYPE_PARTIAL_LINE;
573 }
574
575 /* No time */
576 ret = trace_seq_printf(s, " | ");
577 if (!ret)
578 return TRACE_TYPE_PARTIAL_LINE;
579
580 /* Indentation */
581 if (trace->depth > 0)
582 for (i = 0; i < (trace->depth + 1) * TRACE_GRAPH_INDENT; i++) {
583 ret = trace_seq_printf(s, " ");
584 if (!ret)
585 return TRACE_TYPE_PARTIAL_LINE;
586 }
587
588 /* The comment */
589 ret = trace_seq_printf(s, "/* %s", trace->buf);
590 if (!ret)
591 return TRACE_TYPE_PARTIAL_LINE;
592
593 /* Strip ending newline */
594 if (s->buffer[s->len - 1] == '\n') {
595 s->buffer[s->len - 1] = '\0';
596 s->len--;
597 }
598
599 ret = trace_seq_printf(s, " */\n");
600 if (!ret)
601 return TRACE_TYPE_PARTIAL_LINE;
602
603 return TRACE_TYPE_HANDLED;
604 }
605
606
607 enum print_line_t
608 print_graph_function(struct trace_iterator *iter)
609 {
610 struct trace_seq *s = &iter->seq;
611 struct trace_entry *entry = iter->ent;
612
613 switch (entry->type) {
614 case TRACE_GRAPH_ENT: {
615 struct ftrace_graph_ent_entry *field;
616 trace_assign_type(field, entry);
617 return print_graph_entry(field, s, iter,
618 iter->cpu);
619 }
620 case TRACE_GRAPH_RET: {
621 struct ftrace_graph_ret_entry *field;
622 trace_assign_type(field, entry);
623 return print_graph_return(&field->ret, s, entry, iter->cpu);
624 }
625 case TRACE_PRINT: {
626 struct print_entry *field;
627 trace_assign_type(field, entry);
628 return print_graph_comment(field, s, entry, iter);
629 }
630 default:
631 return TRACE_TYPE_UNHANDLED;
632 }
633 }
634
635 static void print_graph_headers(struct seq_file *s)
636 {
637 /* 1st line */
638 seq_printf(s, "# ");
639 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU)
640 seq_printf(s, "CPU ");
641 if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC)
642 seq_printf(s, "TASK/PID ");
643 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD)
644 seq_printf(s, "OVERHEAD/");
645 seq_printf(s, "DURATION FUNCTION CALLS\n");
646
647 /* 2nd line */
648 seq_printf(s, "# ");
649 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU)
650 seq_printf(s, "| ");
651 if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC)
652 seq_printf(s, "| | ");
653 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
654 seq_printf(s, "| ");
655 seq_printf(s, "| | | | |\n");
656 } else
657 seq_printf(s, " | | | | |\n");
658 }
659 static struct tracer graph_trace __read_mostly = {
660 .name = "function_graph",
661 .init = graph_trace_init,
662 .reset = graph_trace_reset,
663 .print_line = print_graph_function,
664 .print_header = print_graph_headers,
665 .flags = &tracer_flags,
666 };
667
668 static __init int init_graph_trace(void)
669 {
670 return register_tracer(&graph_trace);
671 }
672
673 device_initcall(init_graph_trace);
This page took 0.045662 seconds and 5 git commands to generate.