1 #include <trace/syscall.h>
2 #include <trace/events/syscalls.h>
3 #include <linux/slab.h>
4 #include <linux/kernel.h>
5 #include <linux/module.h> /* for MODULE_NAME_LEN via KSYM_SYMBOL_LEN */
6 #include <linux/ftrace.h>
7 #include <linux/perf_event.h>
8 #include <asm/syscall.h>
9 #include <asm/asm-offsets.h>
11 #include "trace_output.h"
14 static DEFINE_MUTEX(syscall_trace_lock
);
15 static int sys_refcount_enter
;
16 static int sys_refcount_exit
;
17 static DECLARE_BITMAP(enabled_enter_syscalls
, NR_syscalls
);
18 static DECLARE_BITMAP(enabled_exit_syscalls
, NR_syscalls
);
20 static int syscall_enter_register(struct ftrace_event_call
*event
,
22 static int syscall_exit_register(struct ftrace_event_call
*event
,
25 static int syscall_enter_define_fields(struct ftrace_event_call
*call
);
26 static int syscall_exit_define_fields(struct ftrace_event_call
*call
);
28 static struct list_head
*
29 syscall_get_enter_fields(struct ftrace_event_call
*call
)
31 struct syscall_metadata
*entry
= call
->data
;
33 return &entry
->enter_fields
;
36 struct trace_event_functions enter_syscall_print_funcs
= {
37 .trace
= print_syscall_enter
,
40 struct trace_event_functions exit_syscall_print_funcs
= {
41 .trace
= print_syscall_exit
,
44 struct ftrace_event_class event_class_syscall_enter
= {
46 .reg
= syscall_enter_register
,
47 .define_fields
= syscall_enter_define_fields
,
48 .get_fields
= syscall_get_enter_fields
,
49 .raw_init
= init_syscall_trace
,
52 struct ftrace_event_class event_class_syscall_exit
= {
54 .reg
= syscall_exit_register
,
55 .define_fields
= syscall_exit_define_fields
,
56 .fields
= LIST_HEAD_INIT(event_class_syscall_exit
.fields
),
57 .raw_init
= init_syscall_trace
,
60 extern struct syscall_metadata
*__start_syscalls_metadata
[];
61 extern struct syscall_metadata
*__stop_syscalls_metadata
[];
63 static struct syscall_metadata
**syscalls_metadata
;
65 #ifndef ARCH_HAS_SYSCALL_MATCH_SYM_NAME
66 static inline bool arch_syscall_match_sym_name(const char *sym
, const char *name
)
69 * Only compare after the "sys" prefix. Archs that use
70 * syscall wrappers may have syscalls symbols aliases prefixed
71 * with "SyS" instead of "sys", leading to an unwanted
74 return !strcmp(sym
+ 3, name
+ 3);
78 static __init
struct syscall_metadata
*
79 find_syscall_meta(unsigned long syscall
)
81 struct syscall_metadata
**start
;
82 struct syscall_metadata
**stop
;
83 char str
[KSYM_SYMBOL_LEN
];
86 start
= __start_syscalls_metadata
;
87 stop
= __stop_syscalls_metadata
;
88 kallsyms_lookup(syscall
, NULL
, NULL
, NULL
, str
);
90 if (arch_syscall_match_sym_name(str
, "sys_ni_syscall"))
93 for ( ; start
< stop
; start
++) {
94 if ((*start
)->name
&& arch_syscall_match_sym_name(str
, (*start
)->name
))
100 static struct syscall_metadata
*syscall_nr_to_meta(int nr
)
102 if (!syscalls_metadata
|| nr
>= NR_syscalls
|| nr
< 0)
105 return syscalls_metadata
[nr
];
109 print_syscall_enter(struct trace_iterator
*iter
, int flags
,
110 struct trace_event
*event
)
112 struct trace_seq
*s
= &iter
->seq
;
113 struct trace_entry
*ent
= iter
->ent
;
114 struct syscall_trace_enter
*trace
;
115 struct syscall_metadata
*entry
;
118 trace
= (typeof(trace
))ent
;
120 entry
= syscall_nr_to_meta(syscall
);
125 if (entry
->enter_event
->event
.type
!= ent
->type
) {
130 ret
= trace_seq_printf(s
, "%s(", entry
->name
);
132 return TRACE_TYPE_PARTIAL_LINE
;
134 for (i
= 0; i
< entry
->nb_args
; i
++) {
135 /* parameter types */
136 if (trace_flags
& TRACE_ITER_VERBOSE
) {
137 ret
= trace_seq_printf(s
, "%s ", entry
->types
[i
]);
139 return TRACE_TYPE_PARTIAL_LINE
;
141 /* parameter values */
142 ret
= trace_seq_printf(s
, "%s: %lx%s", entry
->args
[i
],
144 i
== entry
->nb_args
- 1 ? "" : ", ");
146 return TRACE_TYPE_PARTIAL_LINE
;
149 ret
= trace_seq_putc(s
, ')');
151 return TRACE_TYPE_PARTIAL_LINE
;
154 ret
= trace_seq_putc(s
, '\n');
156 return TRACE_TYPE_PARTIAL_LINE
;
158 return TRACE_TYPE_HANDLED
;
162 print_syscall_exit(struct trace_iterator
*iter
, int flags
,
163 struct trace_event
*event
)
165 struct trace_seq
*s
= &iter
->seq
;
166 struct trace_entry
*ent
= iter
->ent
;
167 struct syscall_trace_exit
*trace
;
169 struct syscall_metadata
*entry
;
172 trace
= (typeof(trace
))ent
;
174 entry
= syscall_nr_to_meta(syscall
);
177 trace_seq_printf(s
, "\n");
178 return TRACE_TYPE_HANDLED
;
181 if (entry
->exit_event
->event
.type
!= ent
->type
) {
183 return TRACE_TYPE_UNHANDLED
;
186 ret
= trace_seq_printf(s
, "%s -> 0x%lx\n", entry
->name
,
189 return TRACE_TYPE_PARTIAL_LINE
;
191 return TRACE_TYPE_HANDLED
;
194 extern char *__bad_type_size(void);
196 #define SYSCALL_FIELD(type, name) \
197 sizeof(type) != sizeof(trace.name) ? \
198 __bad_type_size() : \
199 #type, #name, offsetof(typeof(trace), name), \
200 sizeof(trace.name), is_signed_type(type)
203 int __set_enter_print_fmt(struct syscall_metadata
*entry
, char *buf
, int len
)
208 /* When len=0, we just calculate the needed length */
209 #define LEN_OR_ZERO (len ? len - pos : 0)
211 pos
+= snprintf(buf
+ pos
, LEN_OR_ZERO
, "\"");
212 for (i
= 0; i
< entry
->nb_args
; i
++) {
213 pos
+= snprintf(buf
+ pos
, LEN_OR_ZERO
, "%s: 0x%%0%zulx%s",
214 entry
->args
[i
], sizeof(unsigned long),
215 i
== entry
->nb_args
- 1 ? "" : ", ");
217 pos
+= snprintf(buf
+ pos
, LEN_OR_ZERO
, "\"");
219 for (i
= 0; i
< entry
->nb_args
; i
++) {
220 pos
+= snprintf(buf
+ pos
, LEN_OR_ZERO
,
221 ", ((unsigned long)(REC->%s))", entry
->args
[i
]);
226 /* return the length of print_fmt */
230 static int set_syscall_print_fmt(struct ftrace_event_call
*call
)
234 struct syscall_metadata
*entry
= call
->data
;
236 if (entry
->enter_event
!= call
) {
237 call
->print_fmt
= "\"0x%lx\", REC->ret";
241 /* First: called with 0 length to calculate the needed length */
242 len
= __set_enter_print_fmt(entry
, NULL
, 0);
244 print_fmt
= kmalloc(len
+ 1, GFP_KERNEL
);
248 /* Second: actually write the @print_fmt */
249 __set_enter_print_fmt(entry
, print_fmt
, len
+ 1);
250 call
->print_fmt
= print_fmt
;
255 static void free_syscall_print_fmt(struct ftrace_event_call
*call
)
257 struct syscall_metadata
*entry
= call
->data
;
259 if (entry
->enter_event
== call
)
260 kfree(call
->print_fmt
);
263 static int syscall_enter_define_fields(struct ftrace_event_call
*call
)
265 struct syscall_trace_enter trace
;
266 struct syscall_metadata
*meta
= call
->data
;
269 int offset
= offsetof(typeof(trace
), args
);
271 ret
= trace_define_field(call
, SYSCALL_FIELD(int, nr
), FILTER_OTHER
);
275 for (i
= 0; i
< meta
->nb_args
; i
++) {
276 ret
= trace_define_field(call
, meta
->types
[i
],
277 meta
->args
[i
], offset
,
278 sizeof(unsigned long), 0,
280 offset
+= sizeof(unsigned long);
286 static int syscall_exit_define_fields(struct ftrace_event_call
*call
)
288 struct syscall_trace_exit trace
;
291 ret
= trace_define_field(call
, SYSCALL_FIELD(int, nr
), FILTER_OTHER
);
295 ret
= trace_define_field(call
, SYSCALL_FIELD(long, ret
),
301 void ftrace_syscall_enter(void *ignore
, struct pt_regs
*regs
, long id
)
303 struct syscall_trace_enter
*entry
;
304 struct syscall_metadata
*sys_data
;
305 struct ring_buffer_event
*event
;
306 struct ring_buffer
*buffer
;
310 syscall_nr
= syscall_get_nr(current
, regs
);
313 if (!test_bit(syscall_nr
, enabled_enter_syscalls
))
316 sys_data
= syscall_nr_to_meta(syscall_nr
);
320 size
= sizeof(*entry
) + sizeof(unsigned long) * sys_data
->nb_args
;
322 event
= trace_current_buffer_lock_reserve(&buffer
,
323 sys_data
->enter_event
->event
.type
, size
, 0, 0);
327 entry
= ring_buffer_event_data(event
);
328 entry
->nr
= syscall_nr
;
329 syscall_get_arguments(current
, regs
, 0, sys_data
->nb_args
, entry
->args
);
331 if (!filter_current_check_discard(buffer
, sys_data
->enter_event
,
333 trace_current_buffer_unlock_commit(buffer
, event
, 0, 0);
336 void ftrace_syscall_exit(void *ignore
, struct pt_regs
*regs
, long ret
)
338 struct syscall_trace_exit
*entry
;
339 struct syscall_metadata
*sys_data
;
340 struct ring_buffer_event
*event
;
341 struct ring_buffer
*buffer
;
344 syscall_nr
= syscall_get_nr(current
, regs
);
347 if (!test_bit(syscall_nr
, enabled_exit_syscalls
))
350 sys_data
= syscall_nr_to_meta(syscall_nr
);
354 event
= trace_current_buffer_lock_reserve(&buffer
,
355 sys_data
->exit_event
->event
.type
, sizeof(*entry
), 0, 0);
359 entry
= ring_buffer_event_data(event
);
360 entry
->nr
= syscall_nr
;
361 entry
->ret
= syscall_get_return_value(current
, regs
);
363 if (!filter_current_check_discard(buffer
, sys_data
->exit_event
,
365 trace_current_buffer_unlock_commit(buffer
, event
, 0, 0);
368 int reg_event_syscall_enter(struct ftrace_event_call
*call
)
373 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
374 if (WARN_ON_ONCE(num
< 0 || num
>= NR_syscalls
))
376 mutex_lock(&syscall_trace_lock
);
377 if (!sys_refcount_enter
)
378 ret
= register_trace_sys_enter(ftrace_syscall_enter
, NULL
);
380 set_bit(num
, enabled_enter_syscalls
);
381 sys_refcount_enter
++;
383 mutex_unlock(&syscall_trace_lock
);
387 void unreg_event_syscall_enter(struct ftrace_event_call
*call
)
391 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
392 if (WARN_ON_ONCE(num
< 0 || num
>= NR_syscalls
))
394 mutex_lock(&syscall_trace_lock
);
395 sys_refcount_enter
--;
396 clear_bit(num
, enabled_enter_syscalls
);
397 if (!sys_refcount_enter
)
398 unregister_trace_sys_enter(ftrace_syscall_enter
, NULL
);
399 mutex_unlock(&syscall_trace_lock
);
402 int reg_event_syscall_exit(struct ftrace_event_call
*call
)
407 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
408 if (WARN_ON_ONCE(num
< 0 || num
>= NR_syscalls
))
410 mutex_lock(&syscall_trace_lock
);
411 if (!sys_refcount_exit
)
412 ret
= register_trace_sys_exit(ftrace_syscall_exit
, NULL
);
414 set_bit(num
, enabled_exit_syscalls
);
417 mutex_unlock(&syscall_trace_lock
);
421 void unreg_event_syscall_exit(struct ftrace_event_call
*call
)
425 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
426 if (WARN_ON_ONCE(num
< 0 || num
>= NR_syscalls
))
428 mutex_lock(&syscall_trace_lock
);
430 clear_bit(num
, enabled_exit_syscalls
);
431 if (!sys_refcount_exit
)
432 unregister_trace_sys_exit(ftrace_syscall_exit
, NULL
);
433 mutex_unlock(&syscall_trace_lock
);
436 int init_syscall_trace(struct ftrace_event_call
*call
)
441 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
442 if (num
< 0 || num
>= NR_syscalls
) {
443 pr_debug("syscall %s metadata not mapped, disabling ftrace event\n",
444 ((struct syscall_metadata
*)call
->data
)->name
);
448 if (set_syscall_print_fmt(call
) < 0)
451 id
= trace_event_raw_init(call
);
454 free_syscall_print_fmt(call
);
461 unsigned long __init __weak
arch_syscall_addr(int nr
)
463 return (unsigned long)sys_call_table
[nr
];
466 int __init
init_ftrace_syscalls(void)
468 struct syscall_metadata
*meta
;
472 syscalls_metadata
= kzalloc(sizeof(*syscalls_metadata
) *
473 NR_syscalls
, GFP_KERNEL
);
474 if (!syscalls_metadata
) {
479 for (i
= 0; i
< NR_syscalls
; i
++) {
480 addr
= arch_syscall_addr(i
);
481 meta
= find_syscall_meta(addr
);
485 meta
->syscall_nr
= i
;
486 syscalls_metadata
[i
] = meta
;
491 core_initcall(init_ftrace_syscalls
);
493 #ifdef CONFIG_PERF_EVENTS
495 static DECLARE_BITMAP(enabled_perf_enter_syscalls
, NR_syscalls
);
496 static DECLARE_BITMAP(enabled_perf_exit_syscalls
, NR_syscalls
);
497 static int sys_perf_refcount_enter
;
498 static int sys_perf_refcount_exit
;
500 static void perf_syscall_enter(void *ignore
, struct pt_regs
*regs
, long id
)
502 struct syscall_metadata
*sys_data
;
503 struct syscall_trace_enter
*rec
;
504 struct hlist_head
*head
;
509 syscall_nr
= syscall_get_nr(current
, regs
);
510 if (!test_bit(syscall_nr
, enabled_perf_enter_syscalls
))
513 sys_data
= syscall_nr_to_meta(syscall_nr
);
517 /* get the size after alignment with the u32 buffer size field */
518 size
= sizeof(unsigned long) * sys_data
->nb_args
+ sizeof(*rec
);
519 size
= ALIGN(size
+ sizeof(u32
), sizeof(u64
));
522 if (WARN_ONCE(size
> PERF_MAX_TRACE_SIZE
,
523 "perf buffer not large enough"))
526 rec
= (struct syscall_trace_enter
*)perf_trace_buf_prepare(size
,
527 sys_data
->enter_event
->event
.type
, regs
, &rctx
);
531 rec
->nr
= syscall_nr
;
532 syscall_get_arguments(current
, regs
, 0, sys_data
->nb_args
,
533 (unsigned long *)&rec
->args
);
535 head
= this_cpu_ptr(sys_data
->enter_event
->perf_events
);
536 perf_trace_buf_submit(rec
, size
, rctx
, 0, 1, regs
, head
);
539 int perf_sysenter_enable(struct ftrace_event_call
*call
)
544 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
546 mutex_lock(&syscall_trace_lock
);
547 if (!sys_perf_refcount_enter
)
548 ret
= register_trace_sys_enter(perf_syscall_enter
, NULL
);
550 pr_info("event trace: Could not activate"
551 "syscall entry trace point");
553 set_bit(num
, enabled_perf_enter_syscalls
);
554 sys_perf_refcount_enter
++;
556 mutex_unlock(&syscall_trace_lock
);
560 void perf_sysenter_disable(struct ftrace_event_call
*call
)
564 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
566 mutex_lock(&syscall_trace_lock
);
567 sys_perf_refcount_enter
--;
568 clear_bit(num
, enabled_perf_enter_syscalls
);
569 if (!sys_perf_refcount_enter
)
570 unregister_trace_sys_enter(perf_syscall_enter
, NULL
);
571 mutex_unlock(&syscall_trace_lock
);
574 static void perf_syscall_exit(void *ignore
, struct pt_regs
*regs
, long ret
)
576 struct syscall_metadata
*sys_data
;
577 struct syscall_trace_exit
*rec
;
578 struct hlist_head
*head
;
583 syscall_nr
= syscall_get_nr(current
, regs
);
584 if (!test_bit(syscall_nr
, enabled_perf_exit_syscalls
))
587 sys_data
= syscall_nr_to_meta(syscall_nr
);
591 /* We can probably do that at build time */
592 size
= ALIGN(sizeof(*rec
) + sizeof(u32
), sizeof(u64
));
596 * Impossible, but be paranoid with the future
597 * How to put this check outside runtime?
599 if (WARN_ONCE(size
> PERF_MAX_TRACE_SIZE
,
600 "exit event has grown above perf buffer size"))
603 rec
= (struct syscall_trace_exit
*)perf_trace_buf_prepare(size
,
604 sys_data
->exit_event
->event
.type
, regs
, &rctx
);
608 rec
->nr
= syscall_nr
;
609 rec
->ret
= syscall_get_return_value(current
, regs
);
611 head
= this_cpu_ptr(sys_data
->exit_event
->perf_events
);
612 perf_trace_buf_submit(rec
, size
, rctx
, 0, 1, regs
, head
);
615 int perf_sysexit_enable(struct ftrace_event_call
*call
)
620 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
622 mutex_lock(&syscall_trace_lock
);
623 if (!sys_perf_refcount_exit
)
624 ret
= register_trace_sys_exit(perf_syscall_exit
, NULL
);
626 pr_info("event trace: Could not activate"
627 "syscall exit trace point");
629 set_bit(num
, enabled_perf_exit_syscalls
);
630 sys_perf_refcount_exit
++;
632 mutex_unlock(&syscall_trace_lock
);
636 void perf_sysexit_disable(struct ftrace_event_call
*call
)
640 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
642 mutex_lock(&syscall_trace_lock
);
643 sys_perf_refcount_exit
--;
644 clear_bit(num
, enabled_perf_exit_syscalls
);
645 if (!sys_perf_refcount_exit
)
646 unregister_trace_sys_exit(perf_syscall_exit
, NULL
);
647 mutex_unlock(&syscall_trace_lock
);
650 #endif /* CONFIG_PERF_EVENTS */
652 static int syscall_enter_register(struct ftrace_event_call
*event
,
656 case TRACE_REG_REGISTER
:
657 return reg_event_syscall_enter(event
);
658 case TRACE_REG_UNREGISTER
:
659 unreg_event_syscall_enter(event
);
662 #ifdef CONFIG_PERF_EVENTS
663 case TRACE_REG_PERF_REGISTER
:
664 return perf_sysenter_enable(event
);
665 case TRACE_REG_PERF_UNREGISTER
:
666 perf_sysenter_disable(event
);
673 static int syscall_exit_register(struct ftrace_event_call
*event
,
677 case TRACE_REG_REGISTER
:
678 return reg_event_syscall_exit(event
);
679 case TRACE_REG_UNREGISTER
:
680 unreg_event_syscall_exit(event
);
683 #ifdef CONFIG_PERF_EVENTS
684 case TRACE_REG_PERF_REGISTER
:
685 return perf_sysexit_enable(event
);
686 case TRACE_REG_PERF_UNREGISTER
:
687 perf_sysexit_disable(event
);