ee8d74840b88316c5d41aba0ccd0d931ebf2644d
[deliverable/linux.git] / kernel / trace / trace_output.c
1 /*
2 * trace_output.c
3 *
4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
5 *
6 */
7
8 #include <linux/module.h>
9 #include <linux/mutex.h>
10 #include <linux/ftrace.h>
11
12 #include "trace_output.h"
13
14 /* must be a power of 2 */
15 #define EVENT_HASHSIZE 128
16
17 DECLARE_RWSEM(trace_event_sem);
18
19 static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly;
20
21 static int next_event_type = __TRACE_LAST_TYPE + 1;
22
23 #define EVENT_STORAGE_SIZE 128
24 static DEFINE_MUTEX(event_storage_mutex);
25 static char event_storage[EVENT_STORAGE_SIZE];
26
27 int trace_print_seq(struct seq_file *m, struct trace_seq *s)
28 {
29 int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len;
30 int ret;
31
32 ret = seq_write(m, s->buffer, len);
33
34 /*
35 * Only reset this buffer if we successfully wrote to the
36 * seq_file buffer.
37 */
38 if (!ret)
39 trace_seq_init(s);
40
41 return ret;
42 }
43
44 enum print_line_t trace_print_bputs_msg_only(struct trace_iterator *iter)
45 {
46 struct trace_seq *s = &iter->seq;
47 struct trace_entry *entry = iter->ent;
48 struct bputs_entry *field;
49 int ret;
50
51 trace_assign_type(field, entry);
52
53 ret = trace_seq_puts(s, field->str);
54 if (!ret)
55 return TRACE_TYPE_PARTIAL_LINE;
56
57 return TRACE_TYPE_HANDLED;
58 }
59
60 enum print_line_t trace_print_bprintk_msg_only(struct trace_iterator *iter)
61 {
62 struct trace_seq *s = &iter->seq;
63 struct trace_entry *entry = iter->ent;
64 struct bprint_entry *field;
65 int ret;
66
67 trace_assign_type(field, entry);
68
69 ret = trace_seq_bprintf(s, field->fmt, field->buf);
70 if (!ret)
71 return TRACE_TYPE_PARTIAL_LINE;
72
73 return TRACE_TYPE_HANDLED;
74 }
75
76 enum print_line_t trace_print_printk_msg_only(struct trace_iterator *iter)
77 {
78 struct trace_seq *s = &iter->seq;
79 struct trace_entry *entry = iter->ent;
80 struct print_entry *field;
81 int ret;
82
83 trace_assign_type(field, entry);
84
85 ret = trace_seq_puts(s, field->buf);
86 if (!ret)
87 return TRACE_TYPE_PARTIAL_LINE;
88
89 return TRACE_TYPE_HANDLED;
90 }
91
92 /**
93 * trace_seq_printf - sequence printing of trace information
94 * @s: trace sequence descriptor
95 * @fmt: printf format string
96 *
97 * It returns 0 if the trace oversizes the buffer's free
98 * space, 1 otherwise.
99 *
100 * The tracer may use either sequence operations or its own
101 * copy to user routines. To simplify formating of a trace
102 * trace_seq_printf is used to store strings into a special
103 * buffer (@s). Then the output may be either used by
104 * the sequencer or pulled into another buffer.
105 */
106 int
107 trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
108 {
109 int len = (PAGE_SIZE - 1) - s->len;
110 va_list ap;
111 int ret;
112
113 if (s->full || !len)
114 return 0;
115
116 va_start(ap, fmt);
117 ret = vsnprintf(s->buffer + s->len, len, fmt, ap);
118 va_end(ap);
119
120 /* If we can't write it all, don't bother writing anything */
121 if (ret >= len) {
122 s->full = 1;
123 return 0;
124 }
125
126 s->len += ret;
127
128 return 1;
129 }
130 EXPORT_SYMBOL_GPL(trace_seq_printf);
131
132 /**
133 * trace_seq_vprintf - sequence printing of trace information
134 * @s: trace sequence descriptor
135 * @fmt: printf format string
136 *
137 * The tracer may use either sequence operations or its own
138 * copy to user routines. To simplify formating of a trace
139 * trace_seq_printf is used to store strings into a special
140 * buffer (@s). Then the output may be either used by
141 * the sequencer or pulled into another buffer.
142 */
143 int
144 trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args)
145 {
146 int len = (PAGE_SIZE - 1) - s->len;
147 int ret;
148
149 if (s->full || !len)
150 return 0;
151
152 ret = vsnprintf(s->buffer + s->len, len, fmt, args);
153
154 /* If we can't write it all, don't bother writing anything */
155 if (ret >= len) {
156 s->full = 1;
157 return 0;
158 }
159
160 s->len += ret;
161
162 return len;
163 }
164 EXPORT_SYMBOL_GPL(trace_seq_vprintf);
165
166 int trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary)
167 {
168 int len = (PAGE_SIZE - 1) - s->len;
169 int ret;
170
171 if (s->full || !len)
172 return 0;
173
174 ret = bstr_printf(s->buffer + s->len, len, fmt, binary);
175
176 /* If we can't write it all, don't bother writing anything */
177 if (ret >= len) {
178 s->full = 1;
179 return 0;
180 }
181
182 s->len += ret;
183
184 return len;
185 }
186
187 /**
188 * trace_seq_puts - trace sequence printing of simple string
189 * @s: trace sequence descriptor
190 * @str: simple string to record
191 *
192 * The tracer may use either the sequence operations or its own
193 * copy to user routines. This function records a simple string
194 * into a special buffer (@s) for later retrieval by a sequencer
195 * or other mechanism.
196 */
197 int trace_seq_puts(struct trace_seq *s, const char *str)
198 {
199 int len = strlen(str);
200
201 if (s->full)
202 return 0;
203
204 if (len > ((PAGE_SIZE - 1) - s->len)) {
205 s->full = 1;
206 return 0;
207 }
208
209 memcpy(s->buffer + s->len, str, len);
210 s->len += len;
211
212 return len;
213 }
214
215 int trace_seq_putc(struct trace_seq *s, unsigned char c)
216 {
217 if (s->full)
218 return 0;
219
220 if (s->len >= (PAGE_SIZE - 1)) {
221 s->full = 1;
222 return 0;
223 }
224
225 s->buffer[s->len++] = c;
226
227 return 1;
228 }
229 EXPORT_SYMBOL(trace_seq_putc);
230
231 int trace_seq_putmem(struct trace_seq *s, const void *mem, size_t len)
232 {
233 if (s->full)
234 return 0;
235
236 if (len > ((PAGE_SIZE - 1) - s->len)) {
237 s->full = 1;
238 return 0;
239 }
240
241 memcpy(s->buffer + s->len, mem, len);
242 s->len += len;
243
244 return len;
245 }
246
247 int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, size_t len)
248 {
249 unsigned char hex[HEX_CHARS];
250 const unsigned char *data = mem;
251 int i, j;
252
253 if (s->full)
254 return 0;
255
256 #ifdef __BIG_ENDIAN
257 for (i = 0, j = 0; i < len; i++) {
258 #else
259 for (i = len-1, j = 0; i >= 0; i--) {
260 #endif
261 hex[j++] = hex_asc_hi(data[i]);
262 hex[j++] = hex_asc_lo(data[i]);
263 }
264 hex[j++] = ' ';
265
266 return trace_seq_putmem(s, hex, j);
267 }
268
269 void *trace_seq_reserve(struct trace_seq *s, size_t len)
270 {
271 void *ret;
272
273 if (s->full)
274 return NULL;
275
276 if (len > ((PAGE_SIZE - 1) - s->len)) {
277 s->full = 1;
278 return NULL;
279 }
280
281 ret = s->buffer + s->len;
282 s->len += len;
283
284 return ret;
285 }
286
287 int trace_seq_path(struct trace_seq *s, const struct path *path)
288 {
289 unsigned char *p;
290
291 if (s->full)
292 return 0;
293
294 if (s->len >= (PAGE_SIZE - 1)) {
295 s->full = 1;
296 return 0;
297 }
298
299 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
300 if (!IS_ERR(p)) {
301 p = mangle_path(s->buffer + s->len, p, "\n");
302 if (p) {
303 s->len = p - s->buffer;
304 return 1;
305 }
306 } else {
307 s->buffer[s->len++] = '?';
308 return 1;
309 }
310
311 s->full = 1;
312 return 0;
313 }
314
315 const char *
316 ftrace_print_flags_seq(struct trace_seq *p, const char *delim,
317 unsigned long flags,
318 const struct trace_print_flags *flag_array)
319 {
320 unsigned long mask;
321 const char *str;
322 const char *ret = p->buffer + p->len;
323 int i, first = 1;
324
325 for (i = 0; flag_array[i].name && flags; i++) {
326
327 mask = flag_array[i].mask;
328 if ((flags & mask) != mask)
329 continue;
330
331 str = flag_array[i].name;
332 flags &= ~mask;
333 if (!first && delim)
334 trace_seq_puts(p, delim);
335 else
336 first = 0;
337 trace_seq_puts(p, str);
338 }
339
340 /* check for left over flags */
341 if (flags) {
342 if (!first && delim)
343 trace_seq_puts(p, delim);
344 trace_seq_printf(p, "0x%lx", flags);
345 }
346
347 trace_seq_putc(p, 0);
348
349 return ret;
350 }
351 EXPORT_SYMBOL(ftrace_print_flags_seq);
352
353 const char *
354 ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val,
355 const struct trace_print_flags *symbol_array)
356 {
357 int i;
358 const char *ret = p->buffer + p->len;
359
360 for (i = 0; symbol_array[i].name; i++) {
361
362 if (val != symbol_array[i].mask)
363 continue;
364
365 trace_seq_puts(p, symbol_array[i].name);
366 break;
367 }
368
369 if (ret == (const char *)(p->buffer + p->len))
370 trace_seq_printf(p, "0x%lx", val);
371
372 trace_seq_putc(p, 0);
373
374 return ret;
375 }
376 EXPORT_SYMBOL(ftrace_print_symbols_seq);
377
378 #if BITS_PER_LONG == 32
379 const char *
380 ftrace_print_symbols_seq_u64(struct trace_seq *p, unsigned long long val,
381 const struct trace_print_flags_u64 *symbol_array)
382 {
383 int i;
384 const char *ret = p->buffer + p->len;
385
386 for (i = 0; symbol_array[i].name; i++) {
387
388 if (val != symbol_array[i].mask)
389 continue;
390
391 trace_seq_puts(p, symbol_array[i].name);
392 break;
393 }
394
395 if (ret == (const char *)(p->buffer + p->len))
396 trace_seq_printf(p, "0x%llx", val);
397
398 trace_seq_putc(p, 0);
399
400 return ret;
401 }
402 EXPORT_SYMBOL(ftrace_print_symbols_seq_u64);
403 #endif
404
405 const char *
406 ftrace_print_hex_seq(struct trace_seq *p, const unsigned char *buf, int buf_len)
407 {
408 int i;
409 const char *ret = p->buffer + p->len;
410
411 for (i = 0; i < buf_len; i++)
412 trace_seq_printf(p, "%s%2.2x", i == 0 ? "" : " ", buf[i]);
413
414 trace_seq_putc(p, 0);
415
416 return ret;
417 }
418 EXPORT_SYMBOL(ftrace_print_hex_seq);
419
420 int ftrace_raw_output_prep(struct trace_iterator *iter,
421 struct trace_event *trace_event)
422 {
423 struct ftrace_event_call *event;
424 struct trace_seq *s = &iter->seq;
425 struct trace_seq *p = &iter->tmp_seq;
426 struct trace_entry *entry;
427 int ret;
428
429 event = container_of(trace_event, struct ftrace_event_call, event);
430 entry = iter->ent;
431
432 if (entry->type != event->event.type) {
433 WARN_ON_ONCE(1);
434 return TRACE_TYPE_UNHANDLED;
435 }
436
437 trace_seq_init(p);
438 ret = trace_seq_printf(s, "%s: ", event->name);
439 if (!ret)
440 return TRACE_TYPE_PARTIAL_LINE;
441
442 return 0;
443 }
444 EXPORT_SYMBOL(ftrace_raw_output_prep);
445
446 static int ftrace_output_raw(struct trace_iterator *iter, char *name,
447 char *fmt, va_list ap)
448 {
449 struct trace_seq *s = &iter->seq;
450 int ret;
451
452 ret = trace_seq_printf(s, "%s: ", name);
453 if (!ret)
454 return TRACE_TYPE_PARTIAL_LINE;
455
456 ret = trace_seq_vprintf(s, fmt, ap);
457
458 if (!ret)
459 return TRACE_TYPE_PARTIAL_LINE;
460
461 return TRACE_TYPE_HANDLED;
462 }
463
464 int ftrace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...)
465 {
466 va_list ap;
467 int ret;
468
469 va_start(ap, fmt);
470 ret = ftrace_output_raw(iter, name, fmt, ap);
471 va_end(ap);
472
473 return ret;
474 }
475 EXPORT_SYMBOL_GPL(ftrace_output_call);
476
477 int ftrace_event_define_field(struct ftrace_event_call *call,
478 char *type, int len, char *item, int offset,
479 int field_size, int sign, int filter)
480 {
481 int ret;
482
483 mutex_lock(&event_storage_mutex);
484 snprintf(event_storage, sizeof(event_storage),
485 "%s[%d]", type, len);
486 ret = trace_define_field(call, event_storage, item, offset,
487 field_size, sign, filter);
488 mutex_unlock(&event_storage_mutex);
489
490 return ret;
491 }
492 EXPORT_SYMBOL_GPL(ftrace_event_define_field);
493
494 #ifdef CONFIG_KRETPROBES
495 static inline const char *kretprobed(const char *name)
496 {
497 static const char tramp_name[] = "kretprobe_trampoline";
498 int size = sizeof(tramp_name);
499
500 if (strncmp(tramp_name, name, size) == 0)
501 return "[unknown/kretprobe'd]";
502 return name;
503 }
504 #else
505 static inline const char *kretprobed(const char *name)
506 {
507 return name;
508 }
509 #endif /* CONFIG_KRETPROBES */
510
511 static int
512 seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address)
513 {
514 #ifdef CONFIG_KALLSYMS
515 char str[KSYM_SYMBOL_LEN];
516 const char *name;
517
518 kallsyms_lookup(address, NULL, NULL, NULL, str);
519
520 name = kretprobed(str);
521
522 return trace_seq_printf(s, fmt, name);
523 #endif
524 return 1;
525 }
526
527 static int
528 seq_print_sym_offset(struct trace_seq *s, const char *fmt,
529 unsigned long address)
530 {
531 #ifdef CONFIG_KALLSYMS
532 char str[KSYM_SYMBOL_LEN];
533 const char *name;
534
535 sprint_symbol(str, address);
536 name = kretprobed(str);
537
538 return trace_seq_printf(s, fmt, name);
539 #endif
540 return 1;
541 }
542
543 #ifndef CONFIG_64BIT
544 # define IP_FMT "%08lx"
545 #else
546 # define IP_FMT "%016lx"
547 #endif
548
549 int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm,
550 unsigned long ip, unsigned long sym_flags)
551 {
552 struct file *file = NULL;
553 unsigned long vmstart = 0;
554 int ret = 1;
555
556 if (s->full)
557 return 0;
558
559 if (mm) {
560 const struct vm_area_struct *vma;
561
562 down_read(&mm->mmap_sem);
563 vma = find_vma(mm, ip);
564 if (vma) {
565 file = vma->vm_file;
566 vmstart = vma->vm_start;
567 }
568 if (file) {
569 ret = trace_seq_path(s, &file->f_path);
570 if (ret)
571 ret = trace_seq_printf(s, "[+0x%lx]",
572 ip - vmstart);
573 }
574 up_read(&mm->mmap_sem);
575 }
576 if (ret && ((sym_flags & TRACE_ITER_SYM_ADDR) || !file))
577 ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
578 return ret;
579 }
580
581 int
582 seq_print_userip_objs(const struct userstack_entry *entry, struct trace_seq *s,
583 unsigned long sym_flags)
584 {
585 struct mm_struct *mm = NULL;
586 int ret = 1;
587 unsigned int i;
588
589 if (trace_flags & TRACE_ITER_SYM_USEROBJ) {
590 struct task_struct *task;
591 /*
592 * we do the lookup on the thread group leader,
593 * since individual threads might have already quit!
594 */
595 rcu_read_lock();
596 task = find_task_by_vpid(entry->tgid);
597 if (task)
598 mm = get_task_mm(task);
599 rcu_read_unlock();
600 }
601
602 for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
603 unsigned long ip = entry->caller[i];
604
605 if (ip == ULONG_MAX || !ret)
606 break;
607 if (ret)
608 ret = trace_seq_puts(s, " => ");
609 if (!ip) {
610 if (ret)
611 ret = trace_seq_puts(s, "??");
612 if (ret)
613 ret = trace_seq_putc(s, '\n');
614 continue;
615 }
616 if (!ret)
617 break;
618 if (ret)
619 ret = seq_print_user_ip(s, mm, ip, sym_flags);
620 ret = trace_seq_putc(s, '\n');
621 }
622
623 if (mm)
624 mmput(mm);
625 return ret;
626 }
627
628 int
629 seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
630 {
631 int ret;
632
633 if (!ip)
634 return trace_seq_putc(s, '0');
635
636 if (sym_flags & TRACE_ITER_SYM_OFFSET)
637 ret = seq_print_sym_offset(s, "%s", ip);
638 else
639 ret = seq_print_sym_short(s, "%s", ip);
640
641 if (!ret)
642 return 0;
643
644 if (sym_flags & TRACE_ITER_SYM_ADDR)
645 ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
646 return ret;
647 }
648
649 /**
650 * trace_print_lat_fmt - print the irq, preempt and lockdep fields
651 * @s: trace seq struct to write to
652 * @entry: The trace entry field from the ring buffer
653 *
654 * Prints the generic fields of irqs off, in hard or softirq, preempt
655 * count.
656 */
657 int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
658 {
659 char hardsoft_irq;
660 char need_resched;
661 char irqs_off;
662 int hardirq;
663 int softirq;
664 int ret;
665
666 hardirq = entry->flags & TRACE_FLAG_HARDIRQ;
667 softirq = entry->flags & TRACE_FLAG_SOFTIRQ;
668
669 irqs_off =
670 (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' :
671 (entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ? 'X' :
672 '.';
673
674 switch (entry->flags & (TRACE_FLAG_NEED_RESCHED |
675 TRACE_FLAG_PREEMPT_RESCHED)) {
676 case TRACE_FLAG_NEED_RESCHED | TRACE_FLAG_PREEMPT_RESCHED:
677 need_resched = 'N';
678 break;
679 case TRACE_FLAG_NEED_RESCHED:
680 need_resched = 'n';
681 break;
682 case TRACE_FLAG_PREEMPT_RESCHED:
683 need_resched = 'p';
684 break;
685 default:
686 need_resched = '.';
687 break;
688 }
689
690 hardsoft_irq =
691 (hardirq && softirq) ? 'H' :
692 hardirq ? 'h' :
693 softirq ? 's' :
694 '.';
695
696 if (!trace_seq_printf(s, "%c%c%c",
697 irqs_off, need_resched, hardsoft_irq))
698 return 0;
699
700 if (entry->preempt_count)
701 ret = trace_seq_printf(s, "%x", entry->preempt_count);
702 else
703 ret = trace_seq_putc(s, '.');
704
705 return ret;
706 }
707
708 static int
709 lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu)
710 {
711 char comm[TASK_COMM_LEN];
712
713 trace_find_cmdline(entry->pid, comm);
714
715 if (!trace_seq_printf(s, "%8.8s-%-5d %3d",
716 comm, entry->pid, cpu))
717 return 0;
718
719 return trace_print_lat_fmt(s, entry);
720 }
721
722 static unsigned long preempt_mark_thresh_us = 100;
723
724 static int
725 lat_print_timestamp(struct trace_iterator *iter, u64 next_ts)
726 {
727 unsigned long verbose = trace_flags & TRACE_ITER_VERBOSE;
728 unsigned long in_ns = iter->iter_flags & TRACE_FILE_TIME_IN_NS;
729 unsigned long long abs_ts = iter->ts - iter->trace_buffer->time_start;
730 unsigned long long rel_ts = next_ts - iter->ts;
731 struct trace_seq *s = &iter->seq;
732
733 if (in_ns) {
734 abs_ts = ns2usecs(abs_ts);
735 rel_ts = ns2usecs(rel_ts);
736 }
737
738 if (verbose && in_ns) {
739 unsigned long abs_usec = do_div(abs_ts, USEC_PER_MSEC);
740 unsigned long abs_msec = (unsigned long)abs_ts;
741 unsigned long rel_usec = do_div(rel_ts, USEC_PER_MSEC);
742 unsigned long rel_msec = (unsigned long)rel_ts;
743
744 return trace_seq_printf(
745 s, "[%08llx] %ld.%03ldms (+%ld.%03ldms): ",
746 ns2usecs(iter->ts),
747 abs_msec, abs_usec,
748 rel_msec, rel_usec);
749 } else if (verbose && !in_ns) {
750 return trace_seq_printf(
751 s, "[%016llx] %lld (+%lld): ",
752 iter->ts, abs_ts, rel_ts);
753 } else if (!verbose && in_ns) {
754 return trace_seq_printf(
755 s, " %4lldus%c: ",
756 abs_ts,
757 rel_ts > preempt_mark_thresh_us ? '!' :
758 rel_ts > 1 ? '+' : ' ');
759 } else { /* !verbose && !in_ns */
760 return trace_seq_printf(s, " %4lld: ", abs_ts);
761 }
762 }
763
764 int trace_print_context(struct trace_iterator *iter)
765 {
766 struct trace_seq *s = &iter->seq;
767 struct trace_entry *entry = iter->ent;
768 unsigned long long t;
769 unsigned long secs, usec_rem;
770 char comm[TASK_COMM_LEN];
771 int ret;
772
773 trace_find_cmdline(entry->pid, comm);
774
775 ret = trace_seq_printf(s, "%16s-%-5d [%03d] ",
776 comm, entry->pid, iter->cpu);
777 if (!ret)
778 return 0;
779
780 if (trace_flags & TRACE_ITER_IRQ_INFO) {
781 ret = trace_print_lat_fmt(s, entry);
782 if (!ret)
783 return 0;
784 }
785
786 if (iter->iter_flags & TRACE_FILE_TIME_IN_NS) {
787 t = ns2usecs(iter->ts);
788 usec_rem = do_div(t, USEC_PER_SEC);
789 secs = (unsigned long)t;
790 return trace_seq_printf(s, " %5lu.%06lu: ", secs, usec_rem);
791 } else
792 return trace_seq_printf(s, " %12llu: ", iter->ts);
793 }
794
795 int trace_print_lat_context(struct trace_iterator *iter)
796 {
797 u64 next_ts;
798 int ret;
799 /* trace_find_next_entry will reset ent_size */
800 int ent_size = iter->ent_size;
801 struct trace_seq *s = &iter->seq;
802 struct trace_entry *entry = iter->ent,
803 *next_entry = trace_find_next_entry(iter, NULL,
804 &next_ts);
805 unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE);
806
807 /* Restore the original ent_size */
808 iter->ent_size = ent_size;
809
810 if (!next_entry)
811 next_ts = iter->ts;
812
813 if (verbose) {
814 char comm[TASK_COMM_LEN];
815
816 trace_find_cmdline(entry->pid, comm);
817
818 ret = trace_seq_printf(
819 s, "%16s %5d %3d %d %08x %08lx ",
820 comm, entry->pid, iter->cpu, entry->flags,
821 entry->preempt_count, iter->idx);
822 } else {
823 ret = lat_print_generic(s, entry, iter->cpu);
824 }
825
826 if (ret)
827 ret = lat_print_timestamp(iter, next_ts);
828
829 return ret;
830 }
831
832 static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
833
834 static int task_state_char(unsigned long state)
835 {
836 int bit = state ? __ffs(state) + 1 : 0;
837
838 return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?';
839 }
840
841 /**
842 * ftrace_find_event - find a registered event
843 * @type: the type of event to look for
844 *
845 * Returns an event of type @type otherwise NULL
846 * Called with trace_event_read_lock() held.
847 */
848 struct trace_event *ftrace_find_event(int type)
849 {
850 struct trace_event *event;
851 unsigned key;
852
853 key = type & (EVENT_HASHSIZE - 1);
854
855 hlist_for_each_entry(event, &event_hash[key], node) {
856 if (event->type == type)
857 return event;
858 }
859
860 return NULL;
861 }
862
863 static LIST_HEAD(ftrace_event_list);
864
865 static int trace_search_list(struct list_head **list)
866 {
867 struct trace_event *e;
868 int last = __TRACE_LAST_TYPE;
869
870 if (list_empty(&ftrace_event_list)) {
871 *list = &ftrace_event_list;
872 return last + 1;
873 }
874
875 /*
876 * We used up all possible max events,
877 * lets see if somebody freed one.
878 */
879 list_for_each_entry(e, &ftrace_event_list, list) {
880 if (e->type != last + 1)
881 break;
882 last++;
883 }
884
885 /* Did we used up all 65 thousand events??? */
886 if ((last + 1) > FTRACE_MAX_EVENT)
887 return 0;
888
889 *list = &e->list;
890 return last + 1;
891 }
892
893 void trace_event_read_lock(void)
894 {
895 down_read(&trace_event_sem);
896 }
897
898 void trace_event_read_unlock(void)
899 {
900 up_read(&trace_event_sem);
901 }
902
903 /**
904 * register_ftrace_event - register output for an event type
905 * @event: the event type to register
906 *
907 * Event types are stored in a hash and this hash is used to
908 * find a way to print an event. If the @event->type is set
909 * then it will use that type, otherwise it will assign a
910 * type to use.
911 *
912 * If you assign your own type, please make sure it is added
913 * to the trace_type enum in trace.h, to avoid collisions
914 * with the dynamic types.
915 *
916 * Returns the event type number or zero on error.
917 */
918 int register_ftrace_event(struct trace_event *event)
919 {
920 unsigned key;
921 int ret = 0;
922
923 down_write(&trace_event_sem);
924
925 if (WARN_ON(!event))
926 goto out;
927
928 if (WARN_ON(!event->funcs))
929 goto out;
930
931 INIT_LIST_HEAD(&event->list);
932
933 if (!event->type) {
934 struct list_head *list = NULL;
935
936 if (next_event_type > FTRACE_MAX_EVENT) {
937
938 event->type = trace_search_list(&list);
939 if (!event->type)
940 goto out;
941
942 } else {
943
944 event->type = next_event_type++;
945 list = &ftrace_event_list;
946 }
947
948 if (WARN_ON(ftrace_find_event(event->type)))
949 goto out;
950
951 list_add_tail(&event->list, list);
952
953 } else if (event->type > __TRACE_LAST_TYPE) {
954 printk(KERN_WARNING "Need to add type to trace.h\n");
955 WARN_ON(1);
956 goto out;
957 } else {
958 /* Is this event already used */
959 if (ftrace_find_event(event->type))
960 goto out;
961 }
962
963 if (event->funcs->trace == NULL)
964 event->funcs->trace = trace_nop_print;
965 if (event->funcs->raw == NULL)
966 event->funcs->raw = trace_nop_print;
967 if (event->funcs->hex == NULL)
968 event->funcs->hex = trace_nop_print;
969 if (event->funcs->binary == NULL)
970 event->funcs->binary = trace_nop_print;
971
972 key = event->type & (EVENT_HASHSIZE - 1);
973
974 hlist_add_head(&event->node, &event_hash[key]);
975
976 ret = event->type;
977 out:
978 up_write(&trace_event_sem);
979
980 return ret;
981 }
982 EXPORT_SYMBOL_GPL(register_ftrace_event);
983
984 /*
985 * Used by module code with the trace_event_sem held for write.
986 */
987 int __unregister_ftrace_event(struct trace_event *event)
988 {
989 hlist_del(&event->node);
990 list_del(&event->list);
991 return 0;
992 }
993
994 /**
995 * unregister_ftrace_event - remove a no longer used event
996 * @event: the event to remove
997 */
998 int unregister_ftrace_event(struct trace_event *event)
999 {
1000 down_write(&trace_event_sem);
1001 __unregister_ftrace_event(event);
1002 up_write(&trace_event_sem);
1003
1004 return 0;
1005 }
1006 EXPORT_SYMBOL_GPL(unregister_ftrace_event);
1007
1008 /*
1009 * Standard events
1010 */
1011
1012 enum print_line_t trace_nop_print(struct trace_iterator *iter, int flags,
1013 struct trace_event *event)
1014 {
1015 if (!trace_seq_printf(&iter->seq, "type: %d\n", iter->ent->type))
1016 return TRACE_TYPE_PARTIAL_LINE;
1017
1018 return TRACE_TYPE_HANDLED;
1019 }
1020
1021 /* TRACE_FN */
1022 static enum print_line_t trace_fn_trace(struct trace_iterator *iter, int flags,
1023 struct trace_event *event)
1024 {
1025 struct ftrace_entry *field;
1026 struct trace_seq *s = &iter->seq;
1027
1028 trace_assign_type(field, iter->ent);
1029
1030 if (!seq_print_ip_sym(s, field->ip, flags))
1031 goto partial;
1032
1033 if ((flags & TRACE_ITER_PRINT_PARENT) && field->parent_ip) {
1034 if (!trace_seq_puts(s, " <-"))
1035 goto partial;
1036 if (!seq_print_ip_sym(s,
1037 field->parent_ip,
1038 flags))
1039 goto partial;
1040 }
1041 if (!trace_seq_putc(s, '\n'))
1042 goto partial;
1043
1044 return TRACE_TYPE_HANDLED;
1045
1046 partial:
1047 return TRACE_TYPE_PARTIAL_LINE;
1048 }
1049
1050 static enum print_line_t trace_fn_raw(struct trace_iterator *iter, int flags,
1051 struct trace_event *event)
1052 {
1053 struct ftrace_entry *field;
1054
1055 trace_assign_type(field, iter->ent);
1056
1057 if (!trace_seq_printf(&iter->seq, "%lx %lx\n",
1058 field->ip,
1059 field->parent_ip))
1060 return TRACE_TYPE_PARTIAL_LINE;
1061
1062 return TRACE_TYPE_HANDLED;
1063 }
1064
1065 static enum print_line_t trace_fn_hex(struct trace_iterator *iter, int flags,
1066 struct trace_event *event)
1067 {
1068 struct ftrace_entry *field;
1069 struct trace_seq *s = &iter->seq;
1070
1071 trace_assign_type(field, iter->ent);
1072
1073 SEQ_PUT_HEX_FIELD_RET(s, field->ip);
1074 SEQ_PUT_HEX_FIELD_RET(s, field->parent_ip);
1075
1076 return TRACE_TYPE_HANDLED;
1077 }
1078
1079 static enum print_line_t trace_fn_bin(struct trace_iterator *iter, int flags,
1080 struct trace_event *event)
1081 {
1082 struct ftrace_entry *field;
1083 struct trace_seq *s = &iter->seq;
1084
1085 trace_assign_type(field, iter->ent);
1086
1087 SEQ_PUT_FIELD_RET(s, field->ip);
1088 SEQ_PUT_FIELD_RET(s, field->parent_ip);
1089
1090 return TRACE_TYPE_HANDLED;
1091 }
1092
1093 static struct trace_event_functions trace_fn_funcs = {
1094 .trace = trace_fn_trace,
1095 .raw = trace_fn_raw,
1096 .hex = trace_fn_hex,
1097 .binary = trace_fn_bin,
1098 };
1099
1100 static struct trace_event trace_fn_event = {
1101 .type = TRACE_FN,
1102 .funcs = &trace_fn_funcs,
1103 };
1104
1105 /* TRACE_CTX an TRACE_WAKE */
1106 static enum print_line_t trace_ctxwake_print(struct trace_iterator *iter,
1107 char *delim)
1108 {
1109 struct ctx_switch_entry *field;
1110 char comm[TASK_COMM_LEN];
1111 int S, T;
1112
1113
1114 trace_assign_type(field, iter->ent);
1115
1116 T = task_state_char(field->next_state);
1117 S = task_state_char(field->prev_state);
1118 trace_find_cmdline(field->next_pid, comm);
1119 if (!trace_seq_printf(&iter->seq,
1120 " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n",
1121 field->prev_pid,
1122 field->prev_prio,
1123 S, delim,
1124 field->next_cpu,
1125 field->next_pid,
1126 field->next_prio,
1127 T, comm))
1128 return TRACE_TYPE_PARTIAL_LINE;
1129
1130 return TRACE_TYPE_HANDLED;
1131 }
1132
1133 static enum print_line_t trace_ctx_print(struct trace_iterator *iter, int flags,
1134 struct trace_event *event)
1135 {
1136 return trace_ctxwake_print(iter, "==>");
1137 }
1138
1139 static enum print_line_t trace_wake_print(struct trace_iterator *iter,
1140 int flags, struct trace_event *event)
1141 {
1142 return trace_ctxwake_print(iter, " +");
1143 }
1144
1145 static int trace_ctxwake_raw(struct trace_iterator *iter, char S)
1146 {
1147 struct ctx_switch_entry *field;
1148 int T;
1149
1150 trace_assign_type(field, iter->ent);
1151
1152 if (!S)
1153 S = task_state_char(field->prev_state);
1154 T = task_state_char(field->next_state);
1155 if (!trace_seq_printf(&iter->seq, "%d %d %c %d %d %d %c\n",
1156 field->prev_pid,
1157 field->prev_prio,
1158 S,
1159 field->next_cpu,
1160 field->next_pid,
1161 field->next_prio,
1162 T))
1163 return TRACE_TYPE_PARTIAL_LINE;
1164
1165 return TRACE_TYPE_HANDLED;
1166 }
1167
1168 static enum print_line_t trace_ctx_raw(struct trace_iterator *iter, int flags,
1169 struct trace_event *event)
1170 {
1171 return trace_ctxwake_raw(iter, 0);
1172 }
1173
1174 static enum print_line_t trace_wake_raw(struct trace_iterator *iter, int flags,
1175 struct trace_event *event)
1176 {
1177 return trace_ctxwake_raw(iter, '+');
1178 }
1179
1180
1181 static int trace_ctxwake_hex(struct trace_iterator *iter, char S)
1182 {
1183 struct ctx_switch_entry *field;
1184 struct trace_seq *s = &iter->seq;
1185 int T;
1186
1187 trace_assign_type(field, iter->ent);
1188
1189 if (!S)
1190 S = task_state_char(field->prev_state);
1191 T = task_state_char(field->next_state);
1192
1193 SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid);
1194 SEQ_PUT_HEX_FIELD_RET(s, field->prev_prio);
1195 SEQ_PUT_HEX_FIELD_RET(s, S);
1196 SEQ_PUT_HEX_FIELD_RET(s, field->next_cpu);
1197 SEQ_PUT_HEX_FIELD_RET(s, field->next_pid);
1198 SEQ_PUT_HEX_FIELD_RET(s, field->next_prio);
1199 SEQ_PUT_HEX_FIELD_RET(s, T);
1200
1201 return TRACE_TYPE_HANDLED;
1202 }
1203
1204 static enum print_line_t trace_ctx_hex(struct trace_iterator *iter, int flags,
1205 struct trace_event *event)
1206 {
1207 return trace_ctxwake_hex(iter, 0);
1208 }
1209
1210 static enum print_line_t trace_wake_hex(struct trace_iterator *iter, int flags,
1211 struct trace_event *event)
1212 {
1213 return trace_ctxwake_hex(iter, '+');
1214 }
1215
1216 static enum print_line_t trace_ctxwake_bin(struct trace_iterator *iter,
1217 int flags, struct trace_event *event)
1218 {
1219 struct ctx_switch_entry *field;
1220 struct trace_seq *s = &iter->seq;
1221
1222 trace_assign_type(field, iter->ent);
1223
1224 SEQ_PUT_FIELD_RET(s, field->prev_pid);
1225 SEQ_PUT_FIELD_RET(s, field->prev_prio);
1226 SEQ_PUT_FIELD_RET(s, field->prev_state);
1227 SEQ_PUT_FIELD_RET(s, field->next_pid);
1228 SEQ_PUT_FIELD_RET(s, field->next_prio);
1229 SEQ_PUT_FIELD_RET(s, field->next_state);
1230
1231 return TRACE_TYPE_HANDLED;
1232 }
1233
1234 static struct trace_event_functions trace_ctx_funcs = {
1235 .trace = trace_ctx_print,
1236 .raw = trace_ctx_raw,
1237 .hex = trace_ctx_hex,
1238 .binary = trace_ctxwake_bin,
1239 };
1240
1241 static struct trace_event trace_ctx_event = {
1242 .type = TRACE_CTX,
1243 .funcs = &trace_ctx_funcs,
1244 };
1245
1246 static struct trace_event_functions trace_wake_funcs = {
1247 .trace = trace_wake_print,
1248 .raw = trace_wake_raw,
1249 .hex = trace_wake_hex,
1250 .binary = trace_ctxwake_bin,
1251 };
1252
1253 static struct trace_event trace_wake_event = {
1254 .type = TRACE_WAKE,
1255 .funcs = &trace_wake_funcs,
1256 };
1257
1258 /* TRACE_STACK */
1259
1260 static enum print_line_t trace_stack_print(struct trace_iterator *iter,
1261 int flags, struct trace_event *event)
1262 {
1263 struct stack_entry *field;
1264 struct trace_seq *s = &iter->seq;
1265 unsigned long *p;
1266 unsigned long *end;
1267
1268 trace_assign_type(field, iter->ent);
1269 end = (unsigned long *)((long)iter->ent + iter->ent_size);
1270
1271 if (!trace_seq_puts(s, "<stack trace>\n"))
1272 goto partial;
1273
1274 for (p = field->caller; p && *p != ULONG_MAX && p < end; p++) {
1275 if (!trace_seq_puts(s, " => "))
1276 goto partial;
1277
1278 if (!seq_print_ip_sym(s, *p, flags))
1279 goto partial;
1280 if (!trace_seq_putc(s, '\n'))
1281 goto partial;
1282 }
1283
1284 return TRACE_TYPE_HANDLED;
1285
1286 partial:
1287 return TRACE_TYPE_PARTIAL_LINE;
1288 }
1289
1290 static struct trace_event_functions trace_stack_funcs = {
1291 .trace = trace_stack_print,
1292 };
1293
1294 static struct trace_event trace_stack_event = {
1295 .type = TRACE_STACK,
1296 .funcs = &trace_stack_funcs,
1297 };
1298
1299 /* TRACE_USER_STACK */
1300 static enum print_line_t trace_user_stack_print(struct trace_iterator *iter,
1301 int flags, struct trace_event *event)
1302 {
1303 struct userstack_entry *field;
1304 struct trace_seq *s = &iter->seq;
1305
1306 trace_assign_type(field, iter->ent);
1307
1308 if (!trace_seq_puts(s, "<user stack trace>\n"))
1309 goto partial;
1310
1311 if (!seq_print_userip_objs(field, s, flags))
1312 goto partial;
1313
1314 return TRACE_TYPE_HANDLED;
1315
1316 partial:
1317 return TRACE_TYPE_PARTIAL_LINE;
1318 }
1319
1320 static struct trace_event_functions trace_user_stack_funcs = {
1321 .trace = trace_user_stack_print,
1322 };
1323
1324 static struct trace_event trace_user_stack_event = {
1325 .type = TRACE_USER_STACK,
1326 .funcs = &trace_user_stack_funcs,
1327 };
1328
1329 /* TRACE_BPUTS */
1330 static enum print_line_t
1331 trace_bputs_print(struct trace_iterator *iter, int flags,
1332 struct trace_event *event)
1333 {
1334 struct trace_entry *entry = iter->ent;
1335 struct trace_seq *s = &iter->seq;
1336 struct bputs_entry *field;
1337
1338 trace_assign_type(field, entry);
1339
1340 if (!seq_print_ip_sym(s, field->ip, flags))
1341 goto partial;
1342
1343 if (!trace_seq_puts(s, ": "))
1344 goto partial;
1345
1346 if (!trace_seq_puts(s, field->str))
1347 goto partial;
1348
1349 return TRACE_TYPE_HANDLED;
1350
1351 partial:
1352 return TRACE_TYPE_PARTIAL_LINE;
1353 }
1354
1355
1356 static enum print_line_t
1357 trace_bputs_raw(struct trace_iterator *iter, int flags,
1358 struct trace_event *event)
1359 {
1360 struct bputs_entry *field;
1361 struct trace_seq *s = &iter->seq;
1362
1363 trace_assign_type(field, iter->ent);
1364
1365 if (!trace_seq_printf(s, ": %lx : ", field->ip))
1366 goto partial;
1367
1368 if (!trace_seq_puts(s, field->str))
1369 goto partial;
1370
1371 return TRACE_TYPE_HANDLED;
1372
1373 partial:
1374 return TRACE_TYPE_PARTIAL_LINE;
1375 }
1376
1377 static struct trace_event_functions trace_bputs_funcs = {
1378 .trace = trace_bputs_print,
1379 .raw = trace_bputs_raw,
1380 };
1381
1382 static struct trace_event trace_bputs_event = {
1383 .type = TRACE_BPUTS,
1384 .funcs = &trace_bputs_funcs,
1385 };
1386
1387 /* TRACE_BPRINT */
1388 static enum print_line_t
1389 trace_bprint_print(struct trace_iterator *iter, int flags,
1390 struct trace_event *event)
1391 {
1392 struct trace_entry *entry = iter->ent;
1393 struct trace_seq *s = &iter->seq;
1394 struct bprint_entry *field;
1395
1396 trace_assign_type(field, entry);
1397
1398 if (!seq_print_ip_sym(s, field->ip, flags))
1399 goto partial;
1400
1401 if (!trace_seq_puts(s, ": "))
1402 goto partial;
1403
1404 if (!trace_seq_bprintf(s, field->fmt, field->buf))
1405 goto partial;
1406
1407 return TRACE_TYPE_HANDLED;
1408
1409 partial:
1410 return TRACE_TYPE_PARTIAL_LINE;
1411 }
1412
1413
1414 static enum print_line_t
1415 trace_bprint_raw(struct trace_iterator *iter, int flags,
1416 struct trace_event *event)
1417 {
1418 struct bprint_entry *field;
1419 struct trace_seq *s = &iter->seq;
1420
1421 trace_assign_type(field, iter->ent);
1422
1423 if (!trace_seq_printf(s, ": %lx : ", field->ip))
1424 goto partial;
1425
1426 if (!trace_seq_bprintf(s, field->fmt, field->buf))
1427 goto partial;
1428
1429 return TRACE_TYPE_HANDLED;
1430
1431 partial:
1432 return TRACE_TYPE_PARTIAL_LINE;
1433 }
1434
1435 static struct trace_event_functions trace_bprint_funcs = {
1436 .trace = trace_bprint_print,
1437 .raw = trace_bprint_raw,
1438 };
1439
1440 static struct trace_event trace_bprint_event = {
1441 .type = TRACE_BPRINT,
1442 .funcs = &trace_bprint_funcs,
1443 };
1444
1445 /* TRACE_PRINT */
1446 static enum print_line_t trace_print_print(struct trace_iterator *iter,
1447 int flags, struct trace_event *event)
1448 {
1449 struct print_entry *field;
1450 struct trace_seq *s = &iter->seq;
1451
1452 trace_assign_type(field, iter->ent);
1453
1454 if (!seq_print_ip_sym(s, field->ip, flags))
1455 goto partial;
1456
1457 if (!trace_seq_printf(s, ": %s", field->buf))
1458 goto partial;
1459
1460 return TRACE_TYPE_HANDLED;
1461
1462 partial:
1463 return TRACE_TYPE_PARTIAL_LINE;
1464 }
1465
1466 static enum print_line_t trace_print_raw(struct trace_iterator *iter, int flags,
1467 struct trace_event *event)
1468 {
1469 struct print_entry *field;
1470
1471 trace_assign_type(field, iter->ent);
1472
1473 if (!trace_seq_printf(&iter->seq, "# %lx %s", field->ip, field->buf))
1474 goto partial;
1475
1476 return TRACE_TYPE_HANDLED;
1477
1478 partial:
1479 return TRACE_TYPE_PARTIAL_LINE;
1480 }
1481
1482 static struct trace_event_functions trace_print_funcs = {
1483 .trace = trace_print_print,
1484 .raw = trace_print_raw,
1485 };
1486
1487 static struct trace_event trace_print_event = {
1488 .type = TRACE_PRINT,
1489 .funcs = &trace_print_funcs,
1490 };
1491
1492
1493 static struct trace_event *events[] __initdata = {
1494 &trace_fn_event,
1495 &trace_ctx_event,
1496 &trace_wake_event,
1497 &trace_stack_event,
1498 &trace_user_stack_event,
1499 &trace_bputs_event,
1500 &trace_bprint_event,
1501 &trace_print_event,
1502 NULL
1503 };
1504
1505 __init static int init_events(void)
1506 {
1507 struct trace_event *event;
1508 int i, ret;
1509
1510 for (i = 0; events[i]; i++) {
1511 event = events[i];
1512
1513 ret = register_ftrace_event(event);
1514 if (!ret) {
1515 printk(KERN_WARNING "event %d failed to register\n",
1516 event->type);
1517 WARN_ON_ONCE(1);
1518 }
1519 }
1520
1521 return 0;
1522 }
1523 early_initcall(init_events);
This page took 0.062506 seconds and 4 git commands to generate.