4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
8 #include <linux/module.h>
9 #include <linux/mutex.h>
10 #include <linux/ftrace.h>
12 #include "trace_output.h"
14 /* must be a power of 2 */
15 #define EVENT_HASHSIZE 128
17 DECLARE_RWSEM(trace_event_sem
);
19 static struct hlist_head event_hash
[EVENT_HASHSIZE
] __read_mostly
;
21 static int next_event_type
= __TRACE_LAST_TYPE
+ 1;
23 enum print_line_t
trace_print_bputs_msg_only(struct trace_iterator
*iter
)
25 struct trace_seq
*s
= &iter
->seq
;
26 struct trace_entry
*entry
= iter
->ent
;
27 struct bputs_entry
*field
;
30 trace_assign_type(field
, entry
);
32 ret
= trace_seq_puts(s
, field
->str
);
34 return TRACE_TYPE_PARTIAL_LINE
;
36 return TRACE_TYPE_HANDLED
;
39 enum print_line_t
trace_print_bprintk_msg_only(struct trace_iterator
*iter
)
41 struct trace_seq
*s
= &iter
->seq
;
42 struct trace_entry
*entry
= iter
->ent
;
43 struct bprint_entry
*field
;
46 trace_assign_type(field
, entry
);
48 ret
= trace_seq_bprintf(s
, field
->fmt
, field
->buf
);
50 return TRACE_TYPE_PARTIAL_LINE
;
52 return TRACE_TYPE_HANDLED
;
55 enum print_line_t
trace_print_printk_msg_only(struct trace_iterator
*iter
)
57 struct trace_seq
*s
= &iter
->seq
;
58 struct trace_entry
*entry
= iter
->ent
;
59 struct print_entry
*field
;
62 trace_assign_type(field
, entry
);
64 ret
= trace_seq_puts(s
, field
->buf
);
66 return TRACE_TYPE_PARTIAL_LINE
;
68 return TRACE_TYPE_HANDLED
;
72 ftrace_print_flags_seq(struct trace_seq
*p
, const char *delim
,
74 const struct trace_print_flags
*flag_array
)
78 const char *ret
= trace_seq_buffer_ptr(p
);
81 for (i
= 0; flag_array
[i
].name
&& flags
; i
++) {
83 mask
= flag_array
[i
].mask
;
84 if ((flags
& mask
) != mask
)
87 str
= flag_array
[i
].name
;
90 trace_seq_puts(p
, delim
);
93 trace_seq_puts(p
, str
);
96 /* check for left over flags */
99 trace_seq_puts(p
, delim
);
100 trace_seq_printf(p
, "0x%lx", flags
);
103 trace_seq_putc(p
, 0);
107 EXPORT_SYMBOL(ftrace_print_flags_seq
);
110 ftrace_print_symbols_seq(struct trace_seq
*p
, unsigned long val
,
111 const struct trace_print_flags
*symbol_array
)
114 const char *ret
= trace_seq_buffer_ptr(p
);
116 for (i
= 0; symbol_array
[i
].name
; i
++) {
118 if (val
!= symbol_array
[i
].mask
)
121 trace_seq_puts(p
, symbol_array
[i
].name
);
125 if (ret
== (const char *)(trace_seq_buffer_ptr(p
)))
126 trace_seq_printf(p
, "0x%lx", val
);
128 trace_seq_putc(p
, 0);
132 EXPORT_SYMBOL(ftrace_print_symbols_seq
);
134 #if BITS_PER_LONG == 32
136 ftrace_print_symbols_seq_u64(struct trace_seq
*p
, unsigned long long val
,
137 const struct trace_print_flags_u64
*symbol_array
)
140 const char *ret
= trace_seq_buffer_ptr(p
);
142 for (i
= 0; symbol_array
[i
].name
; i
++) {
144 if (val
!= symbol_array
[i
].mask
)
147 trace_seq_puts(p
, symbol_array
[i
].name
);
151 if (ret
== (const char *)(trace_seq_buffer_ptr(p
)))
152 trace_seq_printf(p
, "0x%llx", val
);
154 trace_seq_putc(p
, 0);
158 EXPORT_SYMBOL(ftrace_print_symbols_seq_u64
);
162 ftrace_print_bitmask_seq(struct trace_seq
*p
, void *bitmask_ptr
,
163 unsigned int bitmask_size
)
165 const char *ret
= trace_seq_buffer_ptr(p
);
167 trace_seq_bitmask(p
, bitmask_ptr
, bitmask_size
* 8);
168 trace_seq_putc(p
, 0);
172 EXPORT_SYMBOL_GPL(ftrace_print_bitmask_seq
);
175 ftrace_print_hex_seq(struct trace_seq
*p
, const unsigned char *buf
, int buf_len
)
178 const char *ret
= trace_seq_buffer_ptr(p
);
180 for (i
= 0; i
< buf_len
; i
++)
181 trace_seq_printf(p
, "%s%2.2x", i
== 0 ? "" : " ", buf
[i
]);
183 trace_seq_putc(p
, 0);
187 EXPORT_SYMBOL(ftrace_print_hex_seq
);
189 int ftrace_raw_output_prep(struct trace_iterator
*iter
,
190 struct trace_event
*trace_event
)
192 struct ftrace_event_call
*event
;
193 struct trace_seq
*s
= &iter
->seq
;
194 struct trace_seq
*p
= &iter
->tmp_seq
;
195 struct trace_entry
*entry
;
198 event
= container_of(trace_event
, struct ftrace_event_call
, event
);
201 if (entry
->type
!= event
->event
.type
) {
203 return TRACE_TYPE_UNHANDLED
;
207 ret
= trace_seq_printf(s
, "%s: ", ftrace_event_name(event
));
209 return TRACE_TYPE_PARTIAL_LINE
;
213 EXPORT_SYMBOL(ftrace_raw_output_prep
);
215 static int ftrace_output_raw(struct trace_iterator
*iter
, char *name
,
216 char *fmt
, va_list ap
)
218 struct trace_seq
*s
= &iter
->seq
;
221 ret
= trace_seq_printf(s
, "%s: ", name
);
223 return TRACE_TYPE_PARTIAL_LINE
;
225 ret
= trace_seq_vprintf(s
, fmt
, ap
);
228 return TRACE_TYPE_PARTIAL_LINE
;
230 return TRACE_TYPE_HANDLED
;
233 int ftrace_output_call(struct trace_iterator
*iter
, char *name
, char *fmt
, ...)
239 ret
= ftrace_output_raw(iter
, name
, fmt
, ap
);
244 EXPORT_SYMBOL_GPL(ftrace_output_call
);
246 #ifdef CONFIG_KRETPROBES
247 static inline const char *kretprobed(const char *name
)
249 static const char tramp_name
[] = "kretprobe_trampoline";
250 int size
= sizeof(tramp_name
);
252 if (strncmp(tramp_name
, name
, size
) == 0)
253 return "[unknown/kretprobe'd]";
257 static inline const char *kretprobed(const char *name
)
261 #endif /* CONFIG_KRETPROBES */
264 seq_print_sym_short(struct trace_seq
*s
, const char *fmt
, unsigned long address
)
266 #ifdef CONFIG_KALLSYMS
267 char str
[KSYM_SYMBOL_LEN
];
270 kallsyms_lookup(address
, NULL
, NULL
, NULL
, str
);
272 name
= kretprobed(str
);
274 return trace_seq_printf(s
, fmt
, name
);
280 seq_print_sym_offset(struct trace_seq
*s
, const char *fmt
,
281 unsigned long address
)
283 #ifdef CONFIG_KALLSYMS
284 char str
[KSYM_SYMBOL_LEN
];
287 sprint_symbol(str
, address
);
288 name
= kretprobed(str
);
290 return trace_seq_printf(s
, fmt
, name
);
296 # define IP_FMT "%08lx"
298 # define IP_FMT "%016lx"
301 int seq_print_user_ip(struct trace_seq
*s
, struct mm_struct
*mm
,
302 unsigned long ip
, unsigned long sym_flags
)
304 struct file
*file
= NULL
;
305 unsigned long vmstart
= 0;
312 const struct vm_area_struct
*vma
;
314 down_read(&mm
->mmap_sem
);
315 vma
= find_vma(mm
, ip
);
318 vmstart
= vma
->vm_start
;
321 ret
= trace_seq_path(s
, &file
->f_path
);
323 ret
= trace_seq_printf(s
, "[+0x%lx]",
326 up_read(&mm
->mmap_sem
);
328 if (ret
&& ((sym_flags
& TRACE_ITER_SYM_ADDR
) || !file
))
329 ret
= trace_seq_printf(s
, " <" IP_FMT
">", ip
);
334 seq_print_userip_objs(const struct userstack_entry
*entry
, struct trace_seq
*s
,
335 unsigned long sym_flags
)
337 struct mm_struct
*mm
= NULL
;
341 if (trace_flags
& TRACE_ITER_SYM_USEROBJ
) {
342 struct task_struct
*task
;
344 * we do the lookup on the thread group leader,
345 * since individual threads might have already quit!
348 task
= find_task_by_vpid(entry
->tgid
);
350 mm
= get_task_mm(task
);
354 for (i
= 0; i
< FTRACE_STACK_ENTRIES
; i
++) {
355 unsigned long ip
= entry
->caller
[i
];
357 if (ip
== ULONG_MAX
|| !ret
)
360 ret
= trace_seq_puts(s
, " => ");
363 ret
= trace_seq_puts(s
, "??");
365 ret
= trace_seq_putc(s
, '\n');
371 ret
= seq_print_user_ip(s
, mm
, ip
, sym_flags
);
372 ret
= trace_seq_putc(s
, '\n');
381 seq_print_ip_sym(struct trace_seq
*s
, unsigned long ip
, unsigned long sym_flags
)
386 return trace_seq_putc(s
, '0');
388 if (sym_flags
& TRACE_ITER_SYM_OFFSET
)
389 ret
= seq_print_sym_offset(s
, "%s", ip
);
391 ret
= seq_print_sym_short(s
, "%s", ip
);
396 if (sym_flags
& TRACE_ITER_SYM_ADDR
)
397 ret
= trace_seq_printf(s
, " <" IP_FMT
">", ip
);
402 * trace_print_lat_fmt - print the irq, preempt and lockdep fields
403 * @s: trace seq struct to write to
404 * @entry: The trace entry field from the ring buffer
406 * Prints the generic fields of irqs off, in hard or softirq, preempt
409 int trace_print_lat_fmt(struct trace_seq
*s
, struct trace_entry
*entry
)
418 hardirq
= entry
->flags
& TRACE_FLAG_HARDIRQ
;
419 softirq
= entry
->flags
& TRACE_FLAG_SOFTIRQ
;
422 (entry
->flags
& TRACE_FLAG_IRQS_OFF
) ? 'd' :
423 (entry
->flags
& TRACE_FLAG_IRQS_NOSUPPORT
) ? 'X' :
426 switch (entry
->flags
& (TRACE_FLAG_NEED_RESCHED
|
427 TRACE_FLAG_PREEMPT_RESCHED
)) {
428 case TRACE_FLAG_NEED_RESCHED
| TRACE_FLAG_PREEMPT_RESCHED
:
431 case TRACE_FLAG_NEED_RESCHED
:
434 case TRACE_FLAG_PREEMPT_RESCHED
:
443 (hardirq
&& softirq
) ? 'H' :
448 if (!trace_seq_printf(s
, "%c%c%c",
449 irqs_off
, need_resched
, hardsoft_irq
))
452 if (entry
->preempt_count
)
453 ret
= trace_seq_printf(s
, "%x", entry
->preempt_count
);
455 ret
= trace_seq_putc(s
, '.');
461 lat_print_generic(struct trace_seq
*s
, struct trace_entry
*entry
, int cpu
)
463 char comm
[TASK_COMM_LEN
];
465 trace_find_cmdline(entry
->pid
, comm
);
467 if (!trace_seq_printf(s
, "%8.8s-%-5d %3d",
468 comm
, entry
->pid
, cpu
))
471 return trace_print_lat_fmt(s
, entry
);
474 static unsigned long preempt_mark_thresh_us
= 100;
477 lat_print_timestamp(struct trace_iterator
*iter
, u64 next_ts
)
479 unsigned long verbose
= trace_flags
& TRACE_ITER_VERBOSE
;
480 unsigned long in_ns
= iter
->iter_flags
& TRACE_FILE_TIME_IN_NS
;
481 unsigned long long abs_ts
= iter
->ts
- iter
->trace_buffer
->time_start
;
482 unsigned long long rel_ts
= next_ts
- iter
->ts
;
483 struct trace_seq
*s
= &iter
->seq
;
486 abs_ts
= ns2usecs(abs_ts
);
487 rel_ts
= ns2usecs(rel_ts
);
490 if (verbose
&& in_ns
) {
491 unsigned long abs_usec
= do_div(abs_ts
, USEC_PER_MSEC
);
492 unsigned long abs_msec
= (unsigned long)abs_ts
;
493 unsigned long rel_usec
= do_div(rel_ts
, USEC_PER_MSEC
);
494 unsigned long rel_msec
= (unsigned long)rel_ts
;
496 return trace_seq_printf(
497 s
, "[%08llx] %ld.%03ldms (+%ld.%03ldms): ",
501 } else if (verbose
&& !in_ns
) {
502 return trace_seq_printf(
503 s
, "[%016llx] %lld (+%lld): ",
504 iter
->ts
, abs_ts
, rel_ts
);
505 } else if (!verbose
&& in_ns
) {
506 return trace_seq_printf(
509 rel_ts
> preempt_mark_thresh_us
? '!' :
510 rel_ts
> 1 ? '+' : ' ');
511 } else { /* !verbose && !in_ns */
512 return trace_seq_printf(s
, " %4lld: ", abs_ts
);
516 int trace_print_context(struct trace_iterator
*iter
)
518 struct trace_seq
*s
= &iter
->seq
;
519 struct trace_entry
*entry
= iter
->ent
;
520 unsigned long long t
;
521 unsigned long secs
, usec_rem
;
522 char comm
[TASK_COMM_LEN
];
525 trace_find_cmdline(entry
->pid
, comm
);
527 ret
= trace_seq_printf(s
, "%16s-%-5d [%03d] ",
528 comm
, entry
->pid
, iter
->cpu
);
532 if (trace_flags
& TRACE_ITER_IRQ_INFO
) {
533 ret
= trace_print_lat_fmt(s
, entry
);
538 if (iter
->iter_flags
& TRACE_FILE_TIME_IN_NS
) {
539 t
= ns2usecs(iter
->ts
);
540 usec_rem
= do_div(t
, USEC_PER_SEC
);
541 secs
= (unsigned long)t
;
542 return trace_seq_printf(s
, " %5lu.%06lu: ", secs
, usec_rem
);
544 return trace_seq_printf(s
, " %12llu: ", iter
->ts
);
547 int trace_print_lat_context(struct trace_iterator
*iter
)
551 /* trace_find_next_entry will reset ent_size */
552 int ent_size
= iter
->ent_size
;
553 struct trace_seq
*s
= &iter
->seq
;
554 struct trace_entry
*entry
= iter
->ent
,
555 *next_entry
= trace_find_next_entry(iter
, NULL
,
557 unsigned long verbose
= (trace_flags
& TRACE_ITER_VERBOSE
);
559 /* Restore the original ent_size */
560 iter
->ent_size
= ent_size
;
566 char comm
[TASK_COMM_LEN
];
568 trace_find_cmdline(entry
->pid
, comm
);
570 ret
= trace_seq_printf(
571 s
, "%16s %5d %3d %d %08x %08lx ",
572 comm
, entry
->pid
, iter
->cpu
, entry
->flags
,
573 entry
->preempt_count
, iter
->idx
);
575 ret
= lat_print_generic(s
, entry
, iter
->cpu
);
579 ret
= lat_print_timestamp(iter
, next_ts
);
584 static const char state_to_char
[] = TASK_STATE_TO_CHAR_STR
;
586 static int task_state_char(unsigned long state
)
588 int bit
= state
? __ffs(state
) + 1 : 0;
590 return bit
< sizeof(state_to_char
) - 1 ? state_to_char
[bit
] : '?';
594 * ftrace_find_event - find a registered event
595 * @type: the type of event to look for
597 * Returns an event of type @type otherwise NULL
598 * Called with trace_event_read_lock() held.
600 struct trace_event
*ftrace_find_event(int type
)
602 struct trace_event
*event
;
605 key
= type
& (EVENT_HASHSIZE
- 1);
607 hlist_for_each_entry(event
, &event_hash
[key
], node
) {
608 if (event
->type
== type
)
615 static LIST_HEAD(ftrace_event_list
);
617 static int trace_search_list(struct list_head
**list
)
619 struct trace_event
*e
;
620 int last
= __TRACE_LAST_TYPE
;
622 if (list_empty(&ftrace_event_list
)) {
623 *list
= &ftrace_event_list
;
628 * We used up all possible max events,
629 * lets see if somebody freed one.
631 list_for_each_entry(e
, &ftrace_event_list
, list
) {
632 if (e
->type
!= last
+ 1)
637 /* Did we used up all 65 thousand events??? */
638 if ((last
+ 1) > FTRACE_MAX_EVENT
)
645 void trace_event_read_lock(void)
647 down_read(&trace_event_sem
);
650 void trace_event_read_unlock(void)
652 up_read(&trace_event_sem
);
656 * register_ftrace_event - register output for an event type
657 * @event: the event type to register
659 * Event types are stored in a hash and this hash is used to
660 * find a way to print an event. If the @event->type is set
661 * then it will use that type, otherwise it will assign a
664 * If you assign your own type, please make sure it is added
665 * to the trace_type enum in trace.h, to avoid collisions
666 * with the dynamic types.
668 * Returns the event type number or zero on error.
670 int register_ftrace_event(struct trace_event
*event
)
675 down_write(&trace_event_sem
);
680 if (WARN_ON(!event
->funcs
))
683 INIT_LIST_HEAD(&event
->list
);
686 struct list_head
*list
= NULL
;
688 if (next_event_type
> FTRACE_MAX_EVENT
) {
690 event
->type
= trace_search_list(&list
);
696 event
->type
= next_event_type
++;
697 list
= &ftrace_event_list
;
700 if (WARN_ON(ftrace_find_event(event
->type
)))
703 list_add_tail(&event
->list
, list
);
705 } else if (event
->type
> __TRACE_LAST_TYPE
) {
706 printk(KERN_WARNING
"Need to add type to trace.h\n");
710 /* Is this event already used */
711 if (ftrace_find_event(event
->type
))
715 if (event
->funcs
->trace
== NULL
)
716 event
->funcs
->trace
= trace_nop_print
;
717 if (event
->funcs
->raw
== NULL
)
718 event
->funcs
->raw
= trace_nop_print
;
719 if (event
->funcs
->hex
== NULL
)
720 event
->funcs
->hex
= trace_nop_print
;
721 if (event
->funcs
->binary
== NULL
)
722 event
->funcs
->binary
= trace_nop_print
;
724 key
= event
->type
& (EVENT_HASHSIZE
- 1);
726 hlist_add_head(&event
->node
, &event_hash
[key
]);
730 up_write(&trace_event_sem
);
734 EXPORT_SYMBOL_GPL(register_ftrace_event
);
737 * Used by module code with the trace_event_sem held for write.
739 int __unregister_ftrace_event(struct trace_event
*event
)
741 hlist_del(&event
->node
);
742 list_del(&event
->list
);
747 * unregister_ftrace_event - remove a no longer used event
748 * @event: the event to remove
750 int unregister_ftrace_event(struct trace_event
*event
)
752 down_write(&trace_event_sem
);
753 __unregister_ftrace_event(event
);
754 up_write(&trace_event_sem
);
758 EXPORT_SYMBOL_GPL(unregister_ftrace_event
);
764 enum print_line_t
trace_nop_print(struct trace_iterator
*iter
, int flags
,
765 struct trace_event
*event
)
767 if (!trace_seq_printf(&iter
->seq
, "type: %d\n", iter
->ent
->type
))
768 return TRACE_TYPE_PARTIAL_LINE
;
770 return TRACE_TYPE_HANDLED
;
774 static enum print_line_t
trace_fn_trace(struct trace_iterator
*iter
, int flags
,
775 struct trace_event
*event
)
777 struct ftrace_entry
*field
;
778 struct trace_seq
*s
= &iter
->seq
;
780 trace_assign_type(field
, iter
->ent
);
782 if (!seq_print_ip_sym(s
, field
->ip
, flags
))
785 if ((flags
& TRACE_ITER_PRINT_PARENT
) && field
->parent_ip
) {
786 if (!trace_seq_puts(s
, " <-"))
788 if (!seq_print_ip_sym(s
,
793 if (!trace_seq_putc(s
, '\n'))
796 return TRACE_TYPE_HANDLED
;
799 return TRACE_TYPE_PARTIAL_LINE
;
802 static enum print_line_t
trace_fn_raw(struct trace_iterator
*iter
, int flags
,
803 struct trace_event
*event
)
805 struct ftrace_entry
*field
;
807 trace_assign_type(field
, iter
->ent
);
809 if (!trace_seq_printf(&iter
->seq
, "%lx %lx\n",
812 return TRACE_TYPE_PARTIAL_LINE
;
814 return TRACE_TYPE_HANDLED
;
817 static enum print_line_t
trace_fn_hex(struct trace_iterator
*iter
, int flags
,
818 struct trace_event
*event
)
820 struct ftrace_entry
*field
;
821 struct trace_seq
*s
= &iter
->seq
;
823 trace_assign_type(field
, iter
->ent
);
825 SEQ_PUT_HEX_FIELD_RET(s
, field
->ip
);
826 SEQ_PUT_HEX_FIELD_RET(s
, field
->parent_ip
);
828 return TRACE_TYPE_HANDLED
;
831 static enum print_line_t
trace_fn_bin(struct trace_iterator
*iter
, int flags
,
832 struct trace_event
*event
)
834 struct ftrace_entry
*field
;
835 struct trace_seq
*s
= &iter
->seq
;
837 trace_assign_type(field
, iter
->ent
);
839 SEQ_PUT_FIELD_RET(s
, field
->ip
);
840 SEQ_PUT_FIELD_RET(s
, field
->parent_ip
);
842 return TRACE_TYPE_HANDLED
;
845 static struct trace_event_functions trace_fn_funcs
= {
846 .trace
= trace_fn_trace
,
849 .binary
= trace_fn_bin
,
852 static struct trace_event trace_fn_event
= {
854 .funcs
= &trace_fn_funcs
,
857 /* TRACE_CTX an TRACE_WAKE */
858 static enum print_line_t
trace_ctxwake_print(struct trace_iterator
*iter
,
861 struct ctx_switch_entry
*field
;
862 char comm
[TASK_COMM_LEN
];
866 trace_assign_type(field
, iter
->ent
);
868 T
= task_state_char(field
->next_state
);
869 S
= task_state_char(field
->prev_state
);
870 trace_find_cmdline(field
->next_pid
, comm
);
871 if (!trace_seq_printf(&iter
->seq
,
872 " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n",
880 return TRACE_TYPE_PARTIAL_LINE
;
882 return TRACE_TYPE_HANDLED
;
885 static enum print_line_t
trace_ctx_print(struct trace_iterator
*iter
, int flags
,
886 struct trace_event
*event
)
888 return trace_ctxwake_print(iter
, "==>");
891 static enum print_line_t
trace_wake_print(struct trace_iterator
*iter
,
892 int flags
, struct trace_event
*event
)
894 return trace_ctxwake_print(iter
, " +");
897 static int trace_ctxwake_raw(struct trace_iterator
*iter
, char S
)
899 struct ctx_switch_entry
*field
;
902 trace_assign_type(field
, iter
->ent
);
905 S
= task_state_char(field
->prev_state
);
906 T
= task_state_char(field
->next_state
);
907 if (!trace_seq_printf(&iter
->seq
, "%d %d %c %d %d %d %c\n",
915 return TRACE_TYPE_PARTIAL_LINE
;
917 return TRACE_TYPE_HANDLED
;
920 static enum print_line_t
trace_ctx_raw(struct trace_iterator
*iter
, int flags
,
921 struct trace_event
*event
)
923 return trace_ctxwake_raw(iter
, 0);
926 static enum print_line_t
trace_wake_raw(struct trace_iterator
*iter
, int flags
,
927 struct trace_event
*event
)
929 return trace_ctxwake_raw(iter
, '+');
933 static int trace_ctxwake_hex(struct trace_iterator
*iter
, char S
)
935 struct ctx_switch_entry
*field
;
936 struct trace_seq
*s
= &iter
->seq
;
939 trace_assign_type(field
, iter
->ent
);
942 S
= task_state_char(field
->prev_state
);
943 T
= task_state_char(field
->next_state
);
945 SEQ_PUT_HEX_FIELD_RET(s
, field
->prev_pid
);
946 SEQ_PUT_HEX_FIELD_RET(s
, field
->prev_prio
);
947 SEQ_PUT_HEX_FIELD_RET(s
, S
);
948 SEQ_PUT_HEX_FIELD_RET(s
, field
->next_cpu
);
949 SEQ_PUT_HEX_FIELD_RET(s
, field
->next_pid
);
950 SEQ_PUT_HEX_FIELD_RET(s
, field
->next_prio
);
951 SEQ_PUT_HEX_FIELD_RET(s
, T
);
953 return TRACE_TYPE_HANDLED
;
956 static enum print_line_t
trace_ctx_hex(struct trace_iterator
*iter
, int flags
,
957 struct trace_event
*event
)
959 return trace_ctxwake_hex(iter
, 0);
962 static enum print_line_t
trace_wake_hex(struct trace_iterator
*iter
, int flags
,
963 struct trace_event
*event
)
965 return trace_ctxwake_hex(iter
, '+');
968 static enum print_line_t
trace_ctxwake_bin(struct trace_iterator
*iter
,
969 int flags
, struct trace_event
*event
)
971 struct ctx_switch_entry
*field
;
972 struct trace_seq
*s
= &iter
->seq
;
974 trace_assign_type(field
, iter
->ent
);
976 SEQ_PUT_FIELD_RET(s
, field
->prev_pid
);
977 SEQ_PUT_FIELD_RET(s
, field
->prev_prio
);
978 SEQ_PUT_FIELD_RET(s
, field
->prev_state
);
979 SEQ_PUT_FIELD_RET(s
, field
->next_pid
);
980 SEQ_PUT_FIELD_RET(s
, field
->next_prio
);
981 SEQ_PUT_FIELD_RET(s
, field
->next_state
);
983 return TRACE_TYPE_HANDLED
;
986 static struct trace_event_functions trace_ctx_funcs
= {
987 .trace
= trace_ctx_print
,
988 .raw
= trace_ctx_raw
,
989 .hex
= trace_ctx_hex
,
990 .binary
= trace_ctxwake_bin
,
993 static struct trace_event trace_ctx_event
= {
995 .funcs
= &trace_ctx_funcs
,
998 static struct trace_event_functions trace_wake_funcs
= {
999 .trace
= trace_wake_print
,
1000 .raw
= trace_wake_raw
,
1001 .hex
= trace_wake_hex
,
1002 .binary
= trace_ctxwake_bin
,
1005 static struct trace_event trace_wake_event
= {
1007 .funcs
= &trace_wake_funcs
,
1012 static enum print_line_t
trace_stack_print(struct trace_iterator
*iter
,
1013 int flags
, struct trace_event
*event
)
1015 struct stack_entry
*field
;
1016 struct trace_seq
*s
= &iter
->seq
;
1020 trace_assign_type(field
, iter
->ent
);
1021 end
= (unsigned long *)((long)iter
->ent
+ iter
->ent_size
);
1023 if (!trace_seq_puts(s
, "<stack trace>\n"))
1026 for (p
= field
->caller
; p
&& *p
!= ULONG_MAX
&& p
< end
; p
++) {
1027 if (!trace_seq_puts(s
, " => "))
1030 if (!seq_print_ip_sym(s
, *p
, flags
))
1032 if (!trace_seq_putc(s
, '\n'))
1036 return TRACE_TYPE_HANDLED
;
1039 return TRACE_TYPE_PARTIAL_LINE
;
1042 static struct trace_event_functions trace_stack_funcs
= {
1043 .trace
= trace_stack_print
,
1046 static struct trace_event trace_stack_event
= {
1047 .type
= TRACE_STACK
,
1048 .funcs
= &trace_stack_funcs
,
1051 /* TRACE_USER_STACK */
1052 static enum print_line_t
trace_user_stack_print(struct trace_iterator
*iter
,
1053 int flags
, struct trace_event
*event
)
1055 struct userstack_entry
*field
;
1056 struct trace_seq
*s
= &iter
->seq
;
1058 trace_assign_type(field
, iter
->ent
);
1060 if (!trace_seq_puts(s
, "<user stack trace>\n"))
1063 if (!seq_print_userip_objs(field
, s
, flags
))
1066 return TRACE_TYPE_HANDLED
;
1069 return TRACE_TYPE_PARTIAL_LINE
;
1072 static struct trace_event_functions trace_user_stack_funcs
= {
1073 .trace
= trace_user_stack_print
,
1076 static struct trace_event trace_user_stack_event
= {
1077 .type
= TRACE_USER_STACK
,
1078 .funcs
= &trace_user_stack_funcs
,
1082 static enum print_line_t
1083 trace_bputs_print(struct trace_iterator
*iter
, int flags
,
1084 struct trace_event
*event
)
1086 struct trace_entry
*entry
= iter
->ent
;
1087 struct trace_seq
*s
= &iter
->seq
;
1088 struct bputs_entry
*field
;
1090 trace_assign_type(field
, entry
);
1092 if (!seq_print_ip_sym(s
, field
->ip
, flags
))
1095 if (!trace_seq_puts(s
, ": "))
1098 if (!trace_seq_puts(s
, field
->str
))
1101 return TRACE_TYPE_HANDLED
;
1104 return TRACE_TYPE_PARTIAL_LINE
;
1108 static enum print_line_t
1109 trace_bputs_raw(struct trace_iterator
*iter
, int flags
,
1110 struct trace_event
*event
)
1112 struct bputs_entry
*field
;
1113 struct trace_seq
*s
= &iter
->seq
;
1115 trace_assign_type(field
, iter
->ent
);
1117 if (!trace_seq_printf(s
, ": %lx : ", field
->ip
))
1120 if (!trace_seq_puts(s
, field
->str
))
1123 return TRACE_TYPE_HANDLED
;
1126 return TRACE_TYPE_PARTIAL_LINE
;
1129 static struct trace_event_functions trace_bputs_funcs
= {
1130 .trace
= trace_bputs_print
,
1131 .raw
= trace_bputs_raw
,
1134 static struct trace_event trace_bputs_event
= {
1135 .type
= TRACE_BPUTS
,
1136 .funcs
= &trace_bputs_funcs
,
1140 static enum print_line_t
1141 trace_bprint_print(struct trace_iterator
*iter
, int flags
,
1142 struct trace_event
*event
)
1144 struct trace_entry
*entry
= iter
->ent
;
1145 struct trace_seq
*s
= &iter
->seq
;
1146 struct bprint_entry
*field
;
1148 trace_assign_type(field
, entry
);
1150 if (!seq_print_ip_sym(s
, field
->ip
, flags
))
1153 if (!trace_seq_puts(s
, ": "))
1156 if (!trace_seq_bprintf(s
, field
->fmt
, field
->buf
))
1159 return TRACE_TYPE_HANDLED
;
1162 return TRACE_TYPE_PARTIAL_LINE
;
1166 static enum print_line_t
1167 trace_bprint_raw(struct trace_iterator
*iter
, int flags
,
1168 struct trace_event
*event
)
1170 struct bprint_entry
*field
;
1171 struct trace_seq
*s
= &iter
->seq
;
1173 trace_assign_type(field
, iter
->ent
);
1175 if (!trace_seq_printf(s
, ": %lx : ", field
->ip
))
1178 if (!trace_seq_bprintf(s
, field
->fmt
, field
->buf
))
1181 return TRACE_TYPE_HANDLED
;
1184 return TRACE_TYPE_PARTIAL_LINE
;
1187 static struct trace_event_functions trace_bprint_funcs
= {
1188 .trace
= trace_bprint_print
,
1189 .raw
= trace_bprint_raw
,
1192 static struct trace_event trace_bprint_event
= {
1193 .type
= TRACE_BPRINT
,
1194 .funcs
= &trace_bprint_funcs
,
1198 static enum print_line_t
trace_print_print(struct trace_iterator
*iter
,
1199 int flags
, struct trace_event
*event
)
1201 struct print_entry
*field
;
1202 struct trace_seq
*s
= &iter
->seq
;
1204 trace_assign_type(field
, iter
->ent
);
1206 if (!seq_print_ip_sym(s
, field
->ip
, flags
))
1209 if (!trace_seq_printf(s
, ": %s", field
->buf
))
1212 return TRACE_TYPE_HANDLED
;
1215 return TRACE_TYPE_PARTIAL_LINE
;
1218 static enum print_line_t
trace_print_raw(struct trace_iterator
*iter
, int flags
,
1219 struct trace_event
*event
)
1221 struct print_entry
*field
;
1223 trace_assign_type(field
, iter
->ent
);
1225 if (!trace_seq_printf(&iter
->seq
, "# %lx %s", field
->ip
, field
->buf
))
1228 return TRACE_TYPE_HANDLED
;
1231 return TRACE_TYPE_PARTIAL_LINE
;
1234 static struct trace_event_functions trace_print_funcs
= {
1235 .trace
= trace_print_print
,
1236 .raw
= trace_print_raw
,
1239 static struct trace_event trace_print_event
= {
1240 .type
= TRACE_PRINT
,
1241 .funcs
= &trace_print_funcs
,
1245 static struct trace_event
*events
[] __initdata
= {
1250 &trace_user_stack_event
,
1252 &trace_bprint_event
,
1257 __init
static int init_events(void)
1259 struct trace_event
*event
;
1262 for (i
= 0; events
[i
]; i
++) {
1265 ret
= register_ftrace_event(event
);
1267 printk(KERN_WARNING
"event %d failed to register\n",
1275 early_initcall(init_events
);