Merge branches 'tracing/kmemtrace2' and 'tracing/ftrace' into tracing/urgent
[deliverable/linux.git] / kernel / trace / trace_output.c
1 /*
2 * trace_output.c
3 *
4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
5 *
6 */
7
8 #include <linux/module.h>
9 #include <linux/mutex.h>
10 #include <linux/ftrace.h>
11
12 #include "trace_output.h"
13
14 /* must be a power of 2 */
15 #define EVENT_HASHSIZE 128
16
17 static DEFINE_MUTEX(trace_event_mutex);
18 static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly;
19
20 static int next_event_type = __TRACE_LAST_TYPE + 1;
21
22 /**
23 * trace_seq_printf - sequence printing of trace information
24 * @s: trace sequence descriptor
25 * @fmt: printf format string
26 *
27 * The tracer may use either sequence operations or its own
28 * copy to user routines. To simplify formating of a trace
29 * trace_seq_printf is used to store strings into a special
30 * buffer (@s). Then the output may be either used by
31 * the sequencer or pulled into another buffer.
32 */
33 int
34 trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
35 {
36 int len = (PAGE_SIZE - 1) - s->len;
37 va_list ap;
38 int ret;
39
40 if (!len)
41 return 0;
42
43 va_start(ap, fmt);
44 ret = vsnprintf(s->buffer + s->len, len, fmt, ap);
45 va_end(ap);
46
47 /* If we can't write it all, don't bother writing anything */
48 if (ret >= len)
49 return 0;
50
51 s->len += ret;
52
53 return len;
54 }
55
56 /**
57 * trace_seq_puts - trace sequence printing of simple string
58 * @s: trace sequence descriptor
59 * @str: simple string to record
60 *
61 * The tracer may use either the sequence operations or its own
62 * copy to user routines. This function records a simple string
63 * into a special buffer (@s) for later retrieval by a sequencer
64 * or other mechanism.
65 */
66 int trace_seq_puts(struct trace_seq *s, const char *str)
67 {
68 int len = strlen(str);
69
70 if (len > ((PAGE_SIZE - 1) - s->len))
71 return 0;
72
73 memcpy(s->buffer + s->len, str, len);
74 s->len += len;
75
76 return len;
77 }
78
79 int trace_seq_putc(struct trace_seq *s, unsigned char c)
80 {
81 if (s->len >= (PAGE_SIZE - 1))
82 return 0;
83
84 s->buffer[s->len++] = c;
85
86 return 1;
87 }
88
89 int trace_seq_putmem(struct trace_seq *s, void *mem, size_t len)
90 {
91 if (len > ((PAGE_SIZE - 1) - s->len))
92 return 0;
93
94 memcpy(s->buffer + s->len, mem, len);
95 s->len += len;
96
97 return len;
98 }
99
100 int trace_seq_putmem_hex(struct trace_seq *s, void *mem, size_t len)
101 {
102 unsigned char hex[HEX_CHARS];
103 unsigned char *data = mem;
104 int i, j;
105
106 #ifdef __BIG_ENDIAN
107 for (i = 0, j = 0; i < len; i++) {
108 #else
109 for (i = len-1, j = 0; i >= 0; i--) {
110 #endif
111 hex[j++] = hex_asc_hi(data[i]);
112 hex[j++] = hex_asc_lo(data[i]);
113 }
114 hex[j++] = ' ';
115
116 return trace_seq_putmem(s, hex, j);
117 }
118
119 int trace_seq_path(struct trace_seq *s, struct path *path)
120 {
121 unsigned char *p;
122
123 if (s->len >= (PAGE_SIZE - 1))
124 return 0;
125 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
126 if (!IS_ERR(p)) {
127 p = mangle_path(s->buffer + s->len, p, "\n");
128 if (p) {
129 s->len = p - s->buffer;
130 return 1;
131 }
132 } else {
133 s->buffer[s->len++] = '?';
134 return 1;
135 }
136
137 return 0;
138 }
139
140 #ifdef CONFIG_KRETPROBES
141 static inline const char *kretprobed(const char *name)
142 {
143 static const char tramp_name[] = "kretprobe_trampoline";
144 int size = sizeof(tramp_name);
145
146 if (strncmp(tramp_name, name, size) == 0)
147 return "[unknown/kretprobe'd]";
148 return name;
149 }
150 #else
151 static inline const char *kretprobed(const char *name)
152 {
153 return name;
154 }
155 #endif /* CONFIG_KRETPROBES */
156
157 static int
158 seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address)
159 {
160 #ifdef CONFIG_KALLSYMS
161 char str[KSYM_SYMBOL_LEN];
162 const char *name;
163
164 kallsyms_lookup(address, NULL, NULL, NULL, str);
165
166 name = kretprobed(str);
167
168 return trace_seq_printf(s, fmt, name);
169 #endif
170 return 1;
171 }
172
173 static int
174 seq_print_sym_offset(struct trace_seq *s, const char *fmt,
175 unsigned long address)
176 {
177 #ifdef CONFIG_KALLSYMS
178 char str[KSYM_SYMBOL_LEN];
179 const char *name;
180
181 sprint_symbol(str, address);
182 name = kretprobed(str);
183
184 return trace_seq_printf(s, fmt, name);
185 #endif
186 return 1;
187 }
188
189 #ifndef CONFIG_64BIT
190 # define IP_FMT "%08lx"
191 #else
192 # define IP_FMT "%016lx"
193 #endif
194
195 int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm,
196 unsigned long ip, unsigned long sym_flags)
197 {
198 struct file *file = NULL;
199 unsigned long vmstart = 0;
200 int ret = 1;
201
202 if (mm) {
203 const struct vm_area_struct *vma;
204
205 down_read(&mm->mmap_sem);
206 vma = find_vma(mm, ip);
207 if (vma) {
208 file = vma->vm_file;
209 vmstart = vma->vm_start;
210 }
211 if (file) {
212 ret = trace_seq_path(s, &file->f_path);
213 if (ret)
214 ret = trace_seq_printf(s, "[+0x%lx]",
215 ip - vmstart);
216 }
217 up_read(&mm->mmap_sem);
218 }
219 if (ret && ((sym_flags & TRACE_ITER_SYM_ADDR) || !file))
220 ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
221 return ret;
222 }
223
224 int
225 seq_print_userip_objs(const struct userstack_entry *entry, struct trace_seq *s,
226 unsigned long sym_flags)
227 {
228 struct mm_struct *mm = NULL;
229 int ret = 1;
230 unsigned int i;
231
232 if (trace_flags & TRACE_ITER_SYM_USEROBJ) {
233 struct task_struct *task;
234 /*
235 * we do the lookup on the thread group leader,
236 * since individual threads might have already quit!
237 */
238 rcu_read_lock();
239 task = find_task_by_vpid(entry->ent.tgid);
240 if (task)
241 mm = get_task_mm(task);
242 rcu_read_unlock();
243 }
244
245 for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
246 unsigned long ip = entry->caller[i];
247
248 if (ip == ULONG_MAX || !ret)
249 break;
250 if (i && ret)
251 ret = trace_seq_puts(s, " <- ");
252 if (!ip) {
253 if (ret)
254 ret = trace_seq_puts(s, "??");
255 continue;
256 }
257 if (!ret)
258 break;
259 if (ret)
260 ret = seq_print_user_ip(s, mm, ip, sym_flags);
261 }
262
263 if (mm)
264 mmput(mm);
265 return ret;
266 }
267
268 int
269 seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
270 {
271 int ret;
272
273 if (!ip)
274 return trace_seq_printf(s, "0");
275
276 if (sym_flags & TRACE_ITER_SYM_OFFSET)
277 ret = seq_print_sym_offset(s, "%s", ip);
278 else
279 ret = seq_print_sym_short(s, "%s", ip);
280
281 if (!ret)
282 return 0;
283
284 if (sym_flags & TRACE_ITER_SYM_ADDR)
285 ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
286 return ret;
287 }
288
289 static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
290
291 static int task_state_char(unsigned long state)
292 {
293 int bit = state ? __ffs(state) + 1 : 0;
294
295 return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?';
296 }
297
298 /**
299 * ftrace_find_event - find a registered event
300 * @type: the type of event to look for
301 *
302 * Returns an event of type @type otherwise NULL
303 */
304 struct trace_event *ftrace_find_event(int type)
305 {
306 struct trace_event *event;
307 struct hlist_node *n;
308 unsigned key;
309
310 key = type & (EVENT_HASHSIZE - 1);
311
312 hlist_for_each_entry_rcu(event, n, &event_hash[key], node) {
313 if (event->type == type)
314 return event;
315 }
316
317 return NULL;
318 }
319
320 /**
321 * register_ftrace_event - register output for an event type
322 * @event: the event type to register
323 *
324 * Event types are stored in a hash and this hash is used to
325 * find a way to print an event. If the @event->type is set
326 * then it will use that type, otherwise it will assign a
327 * type to use.
328 *
329 * If you assign your own type, please make sure it is added
330 * to the trace_type enum in trace.h, to avoid collisions
331 * with the dynamic types.
332 *
333 * Returns the event type number or zero on error.
334 */
335 int register_ftrace_event(struct trace_event *event)
336 {
337 unsigned key;
338 int ret = 0;
339
340 mutex_lock(&trace_event_mutex);
341
342 if (!event->type)
343 event->type = next_event_type++;
344 else if (event->type > __TRACE_LAST_TYPE) {
345 printk(KERN_WARNING "Need to add type to trace.h\n");
346 WARN_ON(1);
347 }
348
349 if (ftrace_find_event(event->type))
350 goto out;
351
352 key = event->type & (EVENT_HASHSIZE - 1);
353
354 hlist_add_head_rcu(&event->node, &event_hash[key]);
355
356 ret = event->type;
357 out:
358 mutex_unlock(&trace_event_mutex);
359
360 return ret;
361 }
362
363 /**
364 * unregister_ftrace_event - remove a no longer used event
365 * @event: the event to remove
366 */
367 int unregister_ftrace_event(struct trace_event *event)
368 {
369 mutex_lock(&trace_event_mutex);
370 hlist_del(&event->node);
371 mutex_unlock(&trace_event_mutex);
372
373 return 0;
374 }
375
376 /*
377 * Standard events
378 */
379
380 int
381 trace_nop_print(struct trace_seq *s, struct trace_entry *entry, int flags)
382 {
383 return 0;
384 }
385
386 /* TRACE_FN */
387 static int
388 trace_fn_latency(struct trace_seq *s, struct trace_entry *entry, int flags)
389 {
390 struct ftrace_entry *field;
391
392 trace_assign_type(field, entry);
393
394 if (!seq_print_ip_sym(s, field->ip, flags))
395 goto partial;
396 if (!trace_seq_puts(s, " ("))
397 goto partial;
398 if (!seq_print_ip_sym(s, field->parent_ip, flags))
399 goto partial;
400 if (!trace_seq_puts(s, ")\n"))
401 goto partial;
402
403 return 0;
404
405 partial:
406 return TRACE_TYPE_PARTIAL_LINE;
407 }
408
409 static int
410 trace_fn_trace(struct trace_seq *s, struct trace_entry *entry, int flags)
411 {
412 struct ftrace_entry *field;
413
414 trace_assign_type(field, entry);
415
416 if (!seq_print_ip_sym(s, field->ip, flags))
417 goto partial;
418
419 if ((flags & TRACE_ITER_PRINT_PARENT) && field->parent_ip) {
420 if (!trace_seq_printf(s, " <-"))
421 goto partial;
422 if (!seq_print_ip_sym(s,
423 field->parent_ip,
424 flags))
425 goto partial;
426 }
427 if (!trace_seq_printf(s, "\n"))
428 goto partial;
429
430 return 0;
431
432 partial:
433 return TRACE_TYPE_PARTIAL_LINE;
434 }
435
436 static int
437 trace_fn_raw(struct trace_seq *s, struct trace_entry *entry, int flags)
438 {
439 struct ftrace_entry *field;
440
441 trace_assign_type(field, entry);
442
443 if (trace_seq_printf(s, "%x %x\n",
444 field->ip,
445 field->parent_ip))
446 return TRACE_TYPE_PARTIAL_LINE;
447
448 return 0;
449 }
450
451 static int
452 trace_fn_hex(struct trace_seq *s, struct trace_entry *entry, int flags)
453 {
454 struct ftrace_entry *field;
455
456 trace_assign_type(field, entry);
457
458 SEQ_PUT_HEX_FIELD_RET(s, field->ip);
459 SEQ_PUT_HEX_FIELD_RET(s, field->parent_ip);
460
461 return 0;
462 }
463
464 static int
465 trace_fn_bin(struct trace_seq *s, struct trace_entry *entry, int flags)
466 {
467 struct ftrace_entry *field;
468
469 trace_assign_type(field, entry);
470
471 SEQ_PUT_FIELD_RET(s, field->ip);
472 SEQ_PUT_FIELD_RET(s, field->parent_ip);
473
474 return 0;
475 }
476
477 static struct trace_event trace_fn_event = {
478 .type = TRACE_FN,
479 .trace = trace_fn_trace,
480 .latency_trace = trace_fn_latency,
481 .raw = trace_fn_raw,
482 .hex = trace_fn_hex,
483 .binary = trace_fn_bin,
484 };
485
486 /* TRACE_CTX an TRACE_WAKE */
487 static int
488 trace_ctxwake_print(struct trace_seq *s, struct trace_entry *entry, int flags,
489 char *delim)
490 {
491 struct ctx_switch_entry *field;
492 char *comm;
493 int S, T;
494
495 trace_assign_type(field, entry);
496
497 T = task_state_char(field->next_state);
498 S = task_state_char(field->prev_state);
499 comm = trace_find_cmdline(field->next_pid);
500 if (trace_seq_printf(s, " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n",
501 field->prev_pid,
502 field->prev_prio,
503 S, delim,
504 field->next_cpu,
505 field->next_pid,
506 field->next_prio,
507 T, comm))
508 return TRACE_TYPE_PARTIAL_LINE;
509
510 return 0;
511 }
512
513 static int
514 trace_ctx_print(struct trace_seq *s, struct trace_entry *entry, int flags)
515 {
516 return trace_ctxwake_print(s, entry, flags, "==>");
517 }
518
519 static int
520 trace_wake_print(struct trace_seq *s, struct trace_entry *entry, int flags)
521 {
522 return trace_ctxwake_print(s, entry, flags, " +");
523 }
524
525 static int
526 trace_ctxwake_raw(struct trace_seq *s, struct trace_entry *entry, int flags,
527 char S)
528 {
529 struct ctx_switch_entry *field;
530 int T;
531
532 trace_assign_type(field, entry);
533
534 if (!S)
535 task_state_char(field->prev_state);
536 T = task_state_char(field->next_state);
537 if (trace_seq_printf(s, "%d %d %c %d %d %d %c\n",
538 field->prev_pid,
539 field->prev_prio,
540 S,
541 field->next_cpu,
542 field->next_pid,
543 field->next_prio,
544 T))
545 return TRACE_TYPE_PARTIAL_LINE;
546
547 return 0;
548 }
549
550 static int
551 trace_ctx_raw(struct trace_seq *s, struct trace_entry *entry, int flags)
552 {
553 return trace_ctxwake_raw(s, entry, flags, 0);
554 }
555
556 static int
557 trace_wake_raw(struct trace_seq *s, struct trace_entry *entry, int flags)
558 {
559 return trace_ctxwake_raw(s, entry, flags, '+');
560 }
561
562
563 static int
564 trace_ctxwake_hex(struct trace_seq *s, struct trace_entry *entry, int flags,
565 char S)
566 {
567 struct ctx_switch_entry *field;
568 int T;
569
570 trace_assign_type(field, entry);
571
572 if (!S)
573 task_state_char(field->prev_state);
574 T = task_state_char(field->next_state);
575
576 SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid);
577 SEQ_PUT_HEX_FIELD_RET(s, field->prev_prio);
578 SEQ_PUT_HEX_FIELD_RET(s, S);
579 SEQ_PUT_HEX_FIELD_RET(s, field->next_cpu);
580 SEQ_PUT_HEX_FIELD_RET(s, field->next_pid);
581 SEQ_PUT_HEX_FIELD_RET(s, field->next_prio);
582 SEQ_PUT_HEX_FIELD_RET(s, T);
583
584 return 0;
585 }
586
587 static int
588 trace_ctx_hex(struct trace_seq *s, struct trace_entry *entry, int flags)
589 {
590 return trace_ctxwake_hex(s, entry, flags, 0);
591 }
592
593 static int
594 trace_wake_hex(struct trace_seq *s, struct trace_entry *entry, int flags)
595 {
596 return trace_ctxwake_hex(s, entry, flags, '+');
597 }
598
599 static int
600 trace_ctxwake_bin(struct trace_seq *s, struct trace_entry *entry, int flags)
601 {
602 struct ctx_switch_entry *field;
603
604 trace_assign_type(field, entry);
605
606 SEQ_PUT_FIELD_RET(s, field->prev_pid);
607 SEQ_PUT_FIELD_RET(s, field->prev_prio);
608 SEQ_PUT_FIELD_RET(s, field->prev_state);
609 SEQ_PUT_FIELD_RET(s, field->next_pid);
610 SEQ_PUT_FIELD_RET(s, field->next_prio);
611 SEQ_PUT_FIELD_RET(s, field->next_state);
612
613 return 0;
614 }
615
616 static struct trace_event trace_ctx_event = {
617 .type = TRACE_CTX,
618 .trace = trace_ctx_print,
619 .latency_trace = trace_ctx_print,
620 .raw = trace_ctx_raw,
621 .hex = trace_ctx_hex,
622 .binary = trace_ctxwake_bin,
623 };
624
625 static struct trace_event trace_wake_event = {
626 .type = TRACE_WAKE,
627 .trace = trace_wake_print,
628 .latency_trace = trace_wake_print,
629 .raw = trace_wake_raw,
630 .hex = trace_wake_hex,
631 .binary = trace_ctxwake_bin,
632 };
633
634 /* TRACE_SPECIAL */
635 static int
636 trace_special_print(struct trace_seq *s, struct trace_entry *entry, int flags)
637 {
638 struct special_entry *field;
639
640 trace_assign_type(field, entry);
641
642 if (trace_seq_printf(s, "# %ld %ld %ld\n",
643 field->arg1,
644 field->arg2,
645 field->arg3))
646 return TRACE_TYPE_PARTIAL_LINE;
647
648 return 0;
649 }
650
651 static int
652 trace_special_hex(struct trace_seq *s, struct trace_entry *entry, int flags)
653 {
654 struct special_entry *field;
655
656 trace_assign_type(field, entry);
657
658 SEQ_PUT_HEX_FIELD_RET(s, field->arg1);
659 SEQ_PUT_HEX_FIELD_RET(s, field->arg2);
660 SEQ_PUT_HEX_FIELD_RET(s, field->arg3);
661
662 return 0;
663 }
664
665 static int
666 trace_special_bin(struct trace_seq *s, struct trace_entry *entry, int flags)
667 {
668 struct special_entry *field;
669
670 trace_assign_type(field, entry);
671
672 SEQ_PUT_FIELD_RET(s, field->arg1);
673 SEQ_PUT_FIELD_RET(s, field->arg2);
674 SEQ_PUT_FIELD_RET(s, field->arg3);
675
676 return 0;
677 }
678
679 static struct trace_event trace_special_event = {
680 .type = TRACE_SPECIAL,
681 .trace = trace_special_print,
682 .latency_trace = trace_special_print,
683 .raw = trace_special_print,
684 .hex = trace_special_hex,
685 .binary = trace_special_bin,
686 };
687
688 /* TRACE_STACK */
689
690 static int
691 trace_stack_print(struct trace_seq *s, struct trace_entry *entry, int flags)
692 {
693 struct stack_entry *field;
694 int i;
695
696 trace_assign_type(field, entry);
697
698 for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
699 if (i) {
700 if (trace_seq_puts(s, " <= "))
701 goto partial;
702
703 if (seq_print_ip_sym(s, field->caller[i], flags))
704 goto partial;
705 }
706 if (trace_seq_puts(s, "\n"))
707 goto partial;
708 }
709
710 return 0;
711
712 partial:
713 return TRACE_TYPE_PARTIAL_LINE;
714 }
715
716 static struct trace_event trace_stack_event = {
717 .type = TRACE_STACK,
718 .trace = trace_stack_print,
719 .latency_trace = trace_stack_print,
720 .raw = trace_special_print,
721 .hex = trace_special_hex,
722 .binary = trace_special_bin,
723 };
724
725 /* TRACE_USER_STACK */
726 static int
727 trace_user_stack_print(struct trace_seq *s, struct trace_entry *entry,
728 int flags)
729 {
730 struct userstack_entry *field;
731
732 trace_assign_type(field, entry);
733
734 if (seq_print_userip_objs(field, s, flags))
735 goto partial;
736
737 if (trace_seq_putc(s, '\n'))
738 goto partial;
739
740 return 0;
741
742 partial:
743 return TRACE_TYPE_PARTIAL_LINE;
744 }
745
746 static struct trace_event trace_user_stack_event = {
747 .type = TRACE_USER_STACK,
748 .trace = trace_user_stack_print,
749 .latency_trace = trace_user_stack_print,
750 .raw = trace_special_print,
751 .hex = trace_special_hex,
752 .binary = trace_special_bin,
753 };
754
755 /* TRACE_PRINT */
756 static int
757 trace_print_print(struct trace_seq *s, struct trace_entry *entry, int flags)
758 {
759 struct print_entry *field;
760
761 trace_assign_type(field, entry);
762
763 if (seq_print_ip_sym(s, field->ip, flags))
764 goto partial;
765
766 if (trace_seq_printf(s, ": %s", field->buf))
767 goto partial;
768
769 return 0;
770
771 partial:
772 return TRACE_TYPE_PARTIAL_LINE;
773 }
774
775 static int
776 trace_print_raw(struct trace_seq *s, struct trace_entry *entry, int flags)
777 {
778 struct print_entry *field;
779
780 trace_assign_type(field, entry);
781
782 if (seq_print_ip_sym(s, field->ip, flags))
783 goto partial;
784
785 if (trace_seq_printf(s, "# %lx %s", field->ip, field->buf))
786 goto partial;
787
788 return 0;
789
790 partial:
791 return TRACE_TYPE_PARTIAL_LINE;
792 }
793
794 static struct trace_event trace_print_event = {
795 .type = TRACE_PRINT,
796 .trace = trace_print_print,
797 .latency_trace = trace_print_print,
798 .raw = trace_print_raw,
799 .hex = trace_nop_print,
800 .binary = trace_nop_print,
801 };
802
803 static struct trace_event *events[] __initdata = {
804 &trace_fn_event,
805 &trace_ctx_event,
806 &trace_wake_event,
807 &trace_special_event,
808 &trace_stack_event,
809 &trace_user_stack_event,
810 &trace_print_event,
811 NULL
812 };
813
814 __init static int init_events(void)
815 {
816 struct trace_event *event;
817 int i, ret;
818
819 for (i = 0; events[i]; i++) {
820 event = events[i];
821
822 ret = register_ftrace_event(event);
823 if (!ret) {
824 printk(KERN_WARNING "event %d failed to register\n",
825 event->type);
826 WARN_ON_ONCE(1);
827 }
828 }
829
830 return 0;
831 }
832 device_initcall(init_events);
This page took 0.048947 seconds and 5 git commands to generate.