tracing: Remove per event trace registering
[deliverable/linux.git] / kernel / trace / trace_syscalls.c
1 #include <trace/syscall.h>
2 #include <trace/events/syscalls.h>
3 #include <linux/slab.h>
4 #include <linux/kernel.h>
5 #include <linux/ftrace.h>
6 #include <linux/perf_event.h>
7 #include <asm/syscall.h>
8
9 #include "trace_output.h"
10 #include "trace.h"
11
12 static DEFINE_MUTEX(syscall_trace_lock);
13 static int sys_refcount_enter;
14 static int sys_refcount_exit;
15 static DECLARE_BITMAP(enabled_enter_syscalls, NR_syscalls);
16 static DECLARE_BITMAP(enabled_exit_syscalls, NR_syscalls);
17
18 static int syscall_enter_register(struct ftrace_event_call *event,
19 enum trace_reg type);
20 static int syscall_exit_register(struct ftrace_event_call *event,
21 enum trace_reg type);
22
23 struct ftrace_event_class event_class_syscall_enter = {
24 .system = "syscalls",
25 .reg = syscall_enter_register
26 };
27
28 struct ftrace_event_class event_class_syscall_exit = {
29 .system = "syscalls",
30 .reg = syscall_exit_register
31 };
32
33 extern unsigned long __start_syscalls_metadata[];
34 extern unsigned long __stop_syscalls_metadata[];
35
36 static struct syscall_metadata **syscalls_metadata;
37
38 static struct syscall_metadata *find_syscall_meta(unsigned long syscall)
39 {
40 struct syscall_metadata *start;
41 struct syscall_metadata *stop;
42 char str[KSYM_SYMBOL_LEN];
43
44
45 start = (struct syscall_metadata *)__start_syscalls_metadata;
46 stop = (struct syscall_metadata *)__stop_syscalls_metadata;
47 kallsyms_lookup(syscall, NULL, NULL, NULL, str);
48
49 for ( ; start < stop; start++) {
50 /*
51 * Only compare after the "sys" prefix. Archs that use
52 * syscall wrappers may have syscalls symbols aliases prefixed
53 * with "SyS" instead of "sys", leading to an unwanted
54 * mismatch.
55 */
56 if (start->name && !strcmp(start->name + 3, str + 3))
57 return start;
58 }
59 return NULL;
60 }
61
62 static struct syscall_metadata *syscall_nr_to_meta(int nr)
63 {
64 if (!syscalls_metadata || nr >= NR_syscalls || nr < 0)
65 return NULL;
66
67 return syscalls_metadata[nr];
68 }
69
70 enum print_line_t
71 print_syscall_enter(struct trace_iterator *iter, int flags)
72 {
73 struct trace_seq *s = &iter->seq;
74 struct trace_entry *ent = iter->ent;
75 struct syscall_trace_enter *trace;
76 struct syscall_metadata *entry;
77 int i, ret, syscall;
78
79 trace = (typeof(trace))ent;
80 syscall = trace->nr;
81 entry = syscall_nr_to_meta(syscall);
82
83 if (!entry)
84 goto end;
85
86 if (entry->enter_event->id != ent->type) {
87 WARN_ON_ONCE(1);
88 goto end;
89 }
90
91 ret = trace_seq_printf(s, "%s(", entry->name);
92 if (!ret)
93 return TRACE_TYPE_PARTIAL_LINE;
94
95 for (i = 0; i < entry->nb_args; i++) {
96 /* parameter types */
97 if (trace_flags & TRACE_ITER_VERBOSE) {
98 ret = trace_seq_printf(s, "%s ", entry->types[i]);
99 if (!ret)
100 return TRACE_TYPE_PARTIAL_LINE;
101 }
102 /* parameter values */
103 ret = trace_seq_printf(s, "%s: %lx%s", entry->args[i],
104 trace->args[i],
105 i == entry->nb_args - 1 ? "" : ", ");
106 if (!ret)
107 return TRACE_TYPE_PARTIAL_LINE;
108 }
109
110 ret = trace_seq_putc(s, ')');
111 if (!ret)
112 return TRACE_TYPE_PARTIAL_LINE;
113
114 end:
115 ret = trace_seq_putc(s, '\n');
116 if (!ret)
117 return TRACE_TYPE_PARTIAL_LINE;
118
119 return TRACE_TYPE_HANDLED;
120 }
121
122 enum print_line_t
123 print_syscall_exit(struct trace_iterator *iter, int flags)
124 {
125 struct trace_seq *s = &iter->seq;
126 struct trace_entry *ent = iter->ent;
127 struct syscall_trace_exit *trace;
128 int syscall;
129 struct syscall_metadata *entry;
130 int ret;
131
132 trace = (typeof(trace))ent;
133 syscall = trace->nr;
134 entry = syscall_nr_to_meta(syscall);
135
136 if (!entry) {
137 trace_seq_printf(s, "\n");
138 return TRACE_TYPE_HANDLED;
139 }
140
141 if (entry->exit_event->id != ent->type) {
142 WARN_ON_ONCE(1);
143 return TRACE_TYPE_UNHANDLED;
144 }
145
146 ret = trace_seq_printf(s, "%s -> 0x%lx\n", entry->name,
147 trace->ret);
148 if (!ret)
149 return TRACE_TYPE_PARTIAL_LINE;
150
151 return TRACE_TYPE_HANDLED;
152 }
153
154 extern char *__bad_type_size(void);
155
156 #define SYSCALL_FIELD(type, name) \
157 sizeof(type) != sizeof(trace.name) ? \
158 __bad_type_size() : \
159 #type, #name, offsetof(typeof(trace), name), \
160 sizeof(trace.name), is_signed_type(type)
161
162 static
163 int __set_enter_print_fmt(struct syscall_metadata *entry, char *buf, int len)
164 {
165 int i;
166 int pos = 0;
167
168 /* When len=0, we just calculate the needed length */
169 #define LEN_OR_ZERO (len ? len - pos : 0)
170
171 pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
172 for (i = 0; i < entry->nb_args; i++) {
173 pos += snprintf(buf + pos, LEN_OR_ZERO, "%s: 0x%%0%zulx%s",
174 entry->args[i], sizeof(unsigned long),
175 i == entry->nb_args - 1 ? "" : ", ");
176 }
177 pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
178
179 for (i = 0; i < entry->nb_args; i++) {
180 pos += snprintf(buf + pos, LEN_OR_ZERO,
181 ", ((unsigned long)(REC->%s))", entry->args[i]);
182 }
183
184 #undef LEN_OR_ZERO
185
186 /* return the length of print_fmt */
187 return pos;
188 }
189
190 static int set_syscall_print_fmt(struct ftrace_event_call *call)
191 {
192 char *print_fmt;
193 int len;
194 struct syscall_metadata *entry = call->data;
195
196 if (entry->enter_event != call) {
197 call->print_fmt = "\"0x%lx\", REC->ret";
198 return 0;
199 }
200
201 /* First: called with 0 length to calculate the needed length */
202 len = __set_enter_print_fmt(entry, NULL, 0);
203
204 print_fmt = kmalloc(len + 1, GFP_KERNEL);
205 if (!print_fmt)
206 return -ENOMEM;
207
208 /* Second: actually write the @print_fmt */
209 __set_enter_print_fmt(entry, print_fmt, len + 1);
210 call->print_fmt = print_fmt;
211
212 return 0;
213 }
214
215 static void free_syscall_print_fmt(struct ftrace_event_call *call)
216 {
217 struct syscall_metadata *entry = call->data;
218
219 if (entry->enter_event == call)
220 kfree(call->print_fmt);
221 }
222
223 int syscall_enter_define_fields(struct ftrace_event_call *call)
224 {
225 struct syscall_trace_enter trace;
226 struct syscall_metadata *meta = call->data;
227 int ret;
228 int i;
229 int offset = offsetof(typeof(trace), args);
230
231 ret = trace_define_field(call, SYSCALL_FIELD(int, nr), FILTER_OTHER);
232 if (ret)
233 return ret;
234
235 for (i = 0; i < meta->nb_args; i++) {
236 ret = trace_define_field(call, meta->types[i],
237 meta->args[i], offset,
238 sizeof(unsigned long), 0,
239 FILTER_OTHER);
240 offset += sizeof(unsigned long);
241 }
242
243 return ret;
244 }
245
246 int syscall_exit_define_fields(struct ftrace_event_call *call)
247 {
248 struct syscall_trace_exit trace;
249 int ret;
250
251 ret = trace_define_field(call, SYSCALL_FIELD(int, nr), FILTER_OTHER);
252 if (ret)
253 return ret;
254
255 ret = trace_define_field(call, SYSCALL_FIELD(long, ret),
256 FILTER_OTHER);
257
258 return ret;
259 }
260
261 void ftrace_syscall_enter(void *ignore, struct pt_regs *regs, long id)
262 {
263 struct syscall_trace_enter *entry;
264 struct syscall_metadata *sys_data;
265 struct ring_buffer_event *event;
266 struct ring_buffer *buffer;
267 int size;
268 int syscall_nr;
269
270 syscall_nr = syscall_get_nr(current, regs);
271 if (syscall_nr < 0)
272 return;
273 if (!test_bit(syscall_nr, enabled_enter_syscalls))
274 return;
275
276 sys_data = syscall_nr_to_meta(syscall_nr);
277 if (!sys_data)
278 return;
279
280 size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args;
281
282 event = trace_current_buffer_lock_reserve(&buffer,
283 sys_data->enter_event->id, size, 0, 0);
284 if (!event)
285 return;
286
287 entry = ring_buffer_event_data(event);
288 entry->nr = syscall_nr;
289 syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args);
290
291 if (!filter_current_check_discard(buffer, sys_data->enter_event,
292 entry, event))
293 trace_current_buffer_unlock_commit(buffer, event, 0, 0);
294 }
295
296 void ftrace_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
297 {
298 struct syscall_trace_exit *entry;
299 struct syscall_metadata *sys_data;
300 struct ring_buffer_event *event;
301 struct ring_buffer *buffer;
302 int syscall_nr;
303
304 syscall_nr = syscall_get_nr(current, regs);
305 if (syscall_nr < 0)
306 return;
307 if (!test_bit(syscall_nr, enabled_exit_syscalls))
308 return;
309
310 sys_data = syscall_nr_to_meta(syscall_nr);
311 if (!sys_data)
312 return;
313
314 event = trace_current_buffer_lock_reserve(&buffer,
315 sys_data->exit_event->id, sizeof(*entry), 0, 0);
316 if (!event)
317 return;
318
319 entry = ring_buffer_event_data(event);
320 entry->nr = syscall_nr;
321 entry->ret = syscall_get_return_value(current, regs);
322
323 if (!filter_current_check_discard(buffer, sys_data->exit_event,
324 entry, event))
325 trace_current_buffer_unlock_commit(buffer, event, 0, 0);
326 }
327
328 int reg_event_syscall_enter(struct ftrace_event_call *call)
329 {
330 int ret = 0;
331 int num;
332
333 num = ((struct syscall_metadata *)call->data)->syscall_nr;
334 if (num < 0 || num >= NR_syscalls)
335 return -ENOSYS;
336 mutex_lock(&syscall_trace_lock);
337 if (!sys_refcount_enter)
338 ret = register_trace_sys_enter(ftrace_syscall_enter, NULL);
339 if (!ret) {
340 set_bit(num, enabled_enter_syscalls);
341 sys_refcount_enter++;
342 }
343 mutex_unlock(&syscall_trace_lock);
344 return ret;
345 }
346
347 void unreg_event_syscall_enter(struct ftrace_event_call *call)
348 {
349 int num;
350
351 num = ((struct syscall_metadata *)call->data)->syscall_nr;
352 if (num < 0 || num >= NR_syscalls)
353 return;
354 mutex_lock(&syscall_trace_lock);
355 sys_refcount_enter--;
356 clear_bit(num, enabled_enter_syscalls);
357 if (!sys_refcount_enter)
358 unregister_trace_sys_enter(ftrace_syscall_enter, NULL);
359 mutex_unlock(&syscall_trace_lock);
360 }
361
362 int reg_event_syscall_exit(struct ftrace_event_call *call)
363 {
364 int ret = 0;
365 int num;
366
367 num = ((struct syscall_metadata *)call->data)->syscall_nr;
368 if (num < 0 || num >= NR_syscalls)
369 return -ENOSYS;
370 mutex_lock(&syscall_trace_lock);
371 if (!sys_refcount_exit)
372 ret = register_trace_sys_exit(ftrace_syscall_exit, NULL);
373 if (!ret) {
374 set_bit(num, enabled_exit_syscalls);
375 sys_refcount_exit++;
376 }
377 mutex_unlock(&syscall_trace_lock);
378 return ret;
379 }
380
381 void unreg_event_syscall_exit(struct ftrace_event_call *call)
382 {
383 int num;
384
385 num = ((struct syscall_metadata *)call->data)->syscall_nr;
386 if (num < 0 || num >= NR_syscalls)
387 return;
388 mutex_lock(&syscall_trace_lock);
389 sys_refcount_exit--;
390 clear_bit(num, enabled_exit_syscalls);
391 if (!sys_refcount_exit)
392 unregister_trace_sys_exit(ftrace_syscall_exit, NULL);
393 mutex_unlock(&syscall_trace_lock);
394 }
395
396 int init_syscall_trace(struct ftrace_event_call *call)
397 {
398 int id;
399
400 if (set_syscall_print_fmt(call) < 0)
401 return -ENOMEM;
402
403 id = trace_event_raw_init(call);
404
405 if (id < 0) {
406 free_syscall_print_fmt(call);
407 return id;
408 }
409
410 return id;
411 }
412
413 unsigned long __init arch_syscall_addr(int nr)
414 {
415 return (unsigned long)sys_call_table[nr];
416 }
417
418 int __init init_ftrace_syscalls(void)
419 {
420 struct syscall_metadata *meta;
421 unsigned long addr;
422 int i;
423
424 syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) *
425 NR_syscalls, GFP_KERNEL);
426 if (!syscalls_metadata) {
427 WARN_ON(1);
428 return -ENOMEM;
429 }
430
431 for (i = 0; i < NR_syscalls; i++) {
432 addr = arch_syscall_addr(i);
433 meta = find_syscall_meta(addr);
434 if (!meta)
435 continue;
436
437 meta->syscall_nr = i;
438 syscalls_metadata[i] = meta;
439 }
440
441 return 0;
442 }
443 core_initcall(init_ftrace_syscalls);
444
445 #ifdef CONFIG_PERF_EVENTS
446
447 static DECLARE_BITMAP(enabled_perf_enter_syscalls, NR_syscalls);
448 static DECLARE_BITMAP(enabled_perf_exit_syscalls, NR_syscalls);
449 static int sys_perf_refcount_enter;
450 static int sys_perf_refcount_exit;
451
452 static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
453 {
454 struct syscall_metadata *sys_data;
455 struct syscall_trace_enter *rec;
456 unsigned long flags;
457 int syscall_nr;
458 int rctx;
459 int size;
460
461 syscall_nr = syscall_get_nr(current, regs);
462 if (!test_bit(syscall_nr, enabled_perf_enter_syscalls))
463 return;
464
465 sys_data = syscall_nr_to_meta(syscall_nr);
466 if (!sys_data)
467 return;
468
469 /* get the size after alignment with the u32 buffer size field */
470 size = sizeof(unsigned long) * sys_data->nb_args + sizeof(*rec);
471 size = ALIGN(size + sizeof(u32), sizeof(u64));
472 size -= sizeof(u32);
473
474 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
475 "perf buffer not large enough"))
476 return;
477
478 rec = (struct syscall_trace_enter *)perf_trace_buf_prepare(size,
479 sys_data->enter_event->id, &rctx, &flags);
480 if (!rec)
481 return;
482
483 rec->nr = syscall_nr;
484 syscall_get_arguments(current, regs, 0, sys_data->nb_args,
485 (unsigned long *)&rec->args);
486 perf_trace_buf_submit(rec, size, rctx, 0, 1, flags, regs);
487 }
488
489 int perf_sysenter_enable(struct ftrace_event_call *call)
490 {
491 int ret = 0;
492 int num;
493
494 num = ((struct syscall_metadata *)call->data)->syscall_nr;
495
496 mutex_lock(&syscall_trace_lock);
497 if (!sys_perf_refcount_enter)
498 ret = register_trace_sys_enter(perf_syscall_enter, NULL);
499 if (ret) {
500 pr_info("event trace: Could not activate"
501 "syscall entry trace point");
502 } else {
503 set_bit(num, enabled_perf_enter_syscalls);
504 sys_perf_refcount_enter++;
505 }
506 mutex_unlock(&syscall_trace_lock);
507 return ret;
508 }
509
510 void perf_sysenter_disable(struct ftrace_event_call *call)
511 {
512 int num;
513
514 num = ((struct syscall_metadata *)call->data)->syscall_nr;
515
516 mutex_lock(&syscall_trace_lock);
517 sys_perf_refcount_enter--;
518 clear_bit(num, enabled_perf_enter_syscalls);
519 if (!sys_perf_refcount_enter)
520 unregister_trace_sys_enter(perf_syscall_enter, NULL);
521 mutex_unlock(&syscall_trace_lock);
522 }
523
524 static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
525 {
526 struct syscall_metadata *sys_data;
527 struct syscall_trace_exit *rec;
528 unsigned long flags;
529 int syscall_nr;
530 int rctx;
531 int size;
532
533 syscall_nr = syscall_get_nr(current, regs);
534 if (!test_bit(syscall_nr, enabled_perf_exit_syscalls))
535 return;
536
537 sys_data = syscall_nr_to_meta(syscall_nr);
538 if (!sys_data)
539 return;
540
541 /* We can probably do that at build time */
542 size = ALIGN(sizeof(*rec) + sizeof(u32), sizeof(u64));
543 size -= sizeof(u32);
544
545 /*
546 * Impossible, but be paranoid with the future
547 * How to put this check outside runtime?
548 */
549 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
550 "exit event has grown above perf buffer size"))
551 return;
552
553 rec = (struct syscall_trace_exit *)perf_trace_buf_prepare(size,
554 sys_data->exit_event->id, &rctx, &flags);
555 if (!rec)
556 return;
557
558 rec->nr = syscall_nr;
559 rec->ret = syscall_get_return_value(current, regs);
560
561 perf_trace_buf_submit(rec, size, rctx, 0, 1, flags, regs);
562 }
563
564 int perf_sysexit_enable(struct ftrace_event_call *call)
565 {
566 int ret = 0;
567 int num;
568
569 num = ((struct syscall_metadata *)call->data)->syscall_nr;
570
571 mutex_lock(&syscall_trace_lock);
572 if (!sys_perf_refcount_exit)
573 ret = register_trace_sys_exit(perf_syscall_exit, NULL);
574 if (ret) {
575 pr_info("event trace: Could not activate"
576 "syscall exit trace point");
577 } else {
578 set_bit(num, enabled_perf_exit_syscalls);
579 sys_perf_refcount_exit++;
580 }
581 mutex_unlock(&syscall_trace_lock);
582 return ret;
583 }
584
585 void perf_sysexit_disable(struct ftrace_event_call *call)
586 {
587 int num;
588
589 num = ((struct syscall_metadata *)call->data)->syscall_nr;
590
591 mutex_lock(&syscall_trace_lock);
592 sys_perf_refcount_exit--;
593 clear_bit(num, enabled_perf_exit_syscalls);
594 if (!sys_perf_refcount_exit)
595 unregister_trace_sys_exit(perf_syscall_exit, NULL);
596 mutex_unlock(&syscall_trace_lock);
597 }
598
599 #endif /* CONFIG_PERF_EVENTS */
600
601 static int syscall_enter_register(struct ftrace_event_call *event,
602 enum trace_reg type)
603 {
604 switch (type) {
605 case TRACE_REG_REGISTER:
606 return reg_event_syscall_enter(event);
607 case TRACE_REG_UNREGISTER:
608 unreg_event_syscall_enter(event);
609 return 0;
610
611 #ifdef CONFIG_PERF_EVENTS
612 case TRACE_REG_PERF_REGISTER:
613 return perf_sysenter_enable(event);
614 case TRACE_REG_PERF_UNREGISTER:
615 perf_sysenter_disable(event);
616 return 0;
617 #endif
618 }
619 return 0;
620 }
621
622 static int syscall_exit_register(struct ftrace_event_call *event,
623 enum trace_reg type)
624 {
625 switch (type) {
626 case TRACE_REG_REGISTER:
627 return reg_event_syscall_exit(event);
628 case TRACE_REG_UNREGISTER:
629 unreg_event_syscall_exit(event);
630 return 0;
631
632 #ifdef CONFIG_PERF_EVENTS
633 case TRACE_REG_PERF_REGISTER:
634 return perf_sysexit_enable(event);
635 case TRACE_REG_PERF_UNREGISTER:
636 perf_sysexit_disable(event);
637 return 0;
638 #endif
639 }
640 return 0;
641 }
This page took 0.0642 seconds and 5 git commands to generate.