58b8e5370767e30703d4667f035a6f2a9234cf36
[deliverable/linux.git] / kernel / trace / trace_syscalls.c
1 #include <trace/syscall.h>
2 #include <trace/events/syscalls.h>
3 #include <linux/kernel.h>
4 #include <linux/ftrace.h>
5 #include <linux/perf_event.h>
6 #include <asm/syscall.h>
7
8 #include "trace_output.h"
9 #include "trace.h"
10
11 static DEFINE_MUTEX(syscall_trace_lock);
12 static int sys_refcount_enter;
13 static int sys_refcount_exit;
14 static DECLARE_BITMAP(enabled_enter_syscalls, NR_syscalls);
15 static DECLARE_BITMAP(enabled_exit_syscalls, NR_syscalls);
16
17 extern unsigned long __start_syscalls_metadata[];
18 extern unsigned long __stop_syscalls_metadata[];
19
20 static struct syscall_metadata **syscalls_metadata;
21
22 static struct syscall_metadata *find_syscall_meta(unsigned long syscall)
23 {
24 struct syscall_metadata *start;
25 struct syscall_metadata *stop;
26 char str[KSYM_SYMBOL_LEN];
27
28
29 start = (struct syscall_metadata *)__start_syscalls_metadata;
30 stop = (struct syscall_metadata *)__stop_syscalls_metadata;
31 kallsyms_lookup(syscall, NULL, NULL, NULL, str);
32
33 for ( ; start < stop; start++) {
34 /*
35 * Only compare after the "sys" prefix. Archs that use
36 * syscall wrappers may have syscalls symbols aliases prefixed
37 * with "SyS" instead of "sys", leading to an unwanted
38 * mismatch.
39 */
40 if (start->name && !strcmp(start->name + 3, str + 3))
41 return start;
42 }
43 return NULL;
44 }
45
46 static struct syscall_metadata *syscall_nr_to_meta(int nr)
47 {
48 if (!syscalls_metadata || nr >= NR_syscalls || nr < 0)
49 return NULL;
50
51 return syscalls_metadata[nr];
52 }
53
54 int syscall_name_to_nr(char *name)
55 {
56 int i;
57
58 if (!syscalls_metadata)
59 return -1;
60
61 for (i = 0; i < NR_syscalls; i++) {
62 if (syscalls_metadata[i]) {
63 if (!strcmp(syscalls_metadata[i]->name, name))
64 return i;
65 }
66 }
67 return -1;
68 }
69
70 void set_syscall_enter_id(int num, int id)
71 {
72 syscalls_metadata[num]->enter_id = id;
73 }
74
75 void set_syscall_exit_id(int num, int id)
76 {
77 syscalls_metadata[num]->exit_id = id;
78 }
79
80 enum print_line_t
81 print_syscall_enter(struct trace_iterator *iter, int flags)
82 {
83 struct trace_seq *s = &iter->seq;
84 struct trace_entry *ent = iter->ent;
85 struct syscall_trace_enter *trace;
86 struct syscall_metadata *entry;
87 int i, ret, syscall;
88
89 trace = (typeof(trace))ent;
90 syscall = trace->nr;
91 entry = syscall_nr_to_meta(syscall);
92
93 if (!entry)
94 goto end;
95
96 if (entry->enter_id != ent->type) {
97 WARN_ON_ONCE(1);
98 goto end;
99 }
100
101 ret = trace_seq_printf(s, "%s(", entry->name);
102 if (!ret)
103 return TRACE_TYPE_PARTIAL_LINE;
104
105 for (i = 0; i < entry->nb_args; i++) {
106 /* parameter types */
107 if (trace_flags & TRACE_ITER_VERBOSE) {
108 ret = trace_seq_printf(s, "%s ", entry->types[i]);
109 if (!ret)
110 return TRACE_TYPE_PARTIAL_LINE;
111 }
112 /* parameter values */
113 ret = trace_seq_printf(s, "%s: %lx%s", entry->args[i],
114 trace->args[i],
115 i == entry->nb_args - 1 ? "" : ", ");
116 if (!ret)
117 return TRACE_TYPE_PARTIAL_LINE;
118 }
119
120 ret = trace_seq_putc(s, ')');
121 if (!ret)
122 return TRACE_TYPE_PARTIAL_LINE;
123
124 end:
125 ret = trace_seq_putc(s, '\n');
126 if (!ret)
127 return TRACE_TYPE_PARTIAL_LINE;
128
129 return TRACE_TYPE_HANDLED;
130 }
131
132 enum print_line_t
133 print_syscall_exit(struct trace_iterator *iter, int flags)
134 {
135 struct trace_seq *s = &iter->seq;
136 struct trace_entry *ent = iter->ent;
137 struct syscall_trace_exit *trace;
138 int syscall;
139 struct syscall_metadata *entry;
140 int ret;
141
142 trace = (typeof(trace))ent;
143 syscall = trace->nr;
144 entry = syscall_nr_to_meta(syscall);
145
146 if (!entry) {
147 trace_seq_printf(s, "\n");
148 return TRACE_TYPE_HANDLED;
149 }
150
151 if (entry->exit_id != ent->type) {
152 WARN_ON_ONCE(1);
153 return TRACE_TYPE_UNHANDLED;
154 }
155
156 ret = trace_seq_printf(s, "%s -> 0x%lx\n", entry->name,
157 trace->ret);
158 if (!ret)
159 return TRACE_TYPE_PARTIAL_LINE;
160
161 return TRACE_TYPE_HANDLED;
162 }
163
164 extern char *__bad_type_size(void);
165
166 #define SYSCALL_FIELD(type, name) \
167 sizeof(type) != sizeof(trace.name) ? \
168 __bad_type_size() : \
169 #type, #name, offsetof(typeof(trace), name), \
170 sizeof(trace.name), is_signed_type(type)
171
172 int syscall_enter_format(struct ftrace_event_call *call, struct trace_seq *s)
173 {
174 int i;
175 int nr;
176 int ret;
177 struct syscall_metadata *entry;
178 struct syscall_trace_enter trace;
179 int offset = offsetof(struct syscall_trace_enter, args);
180
181 nr = syscall_name_to_nr(call->data);
182 entry = syscall_nr_to_meta(nr);
183
184 if (!entry)
185 return 0;
186
187 ret = trace_seq_printf(s, "\tfield:%s %s;\toffset:%zu;\tsize:%zu;"
188 "\tsigned:%u;\n",
189 SYSCALL_FIELD(int, nr));
190 if (!ret)
191 return 0;
192
193 for (i = 0; i < entry->nb_args; i++) {
194 ret = trace_seq_printf(s, "\tfield:%s %s;", entry->types[i],
195 entry->args[i]);
196 if (!ret)
197 return 0;
198 ret = trace_seq_printf(s, "\toffset:%d;\tsize:%zu;"
199 "\tsigned:%u;\n", offset,
200 sizeof(unsigned long),
201 is_signed_type(unsigned long));
202 if (!ret)
203 return 0;
204 offset += sizeof(unsigned long);
205 }
206
207 trace_seq_puts(s, "\nprint fmt: \"");
208 for (i = 0; i < entry->nb_args; i++) {
209 ret = trace_seq_printf(s, "%s: 0x%%0%zulx%s", entry->args[i],
210 sizeof(unsigned long),
211 i == entry->nb_args - 1 ? "" : ", ");
212 if (!ret)
213 return 0;
214 }
215 trace_seq_putc(s, '"');
216
217 for (i = 0; i < entry->nb_args; i++) {
218 ret = trace_seq_printf(s, ", ((unsigned long)(REC->%s))",
219 entry->args[i]);
220 if (!ret)
221 return 0;
222 }
223
224 return trace_seq_putc(s, '\n');
225 }
226
227 int syscall_exit_format(struct ftrace_event_call *call, struct trace_seq *s)
228 {
229 int ret;
230 struct syscall_trace_exit trace;
231
232 ret = trace_seq_printf(s,
233 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;"
234 "\tsigned:%u;\n"
235 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;"
236 "\tsigned:%u;\n",
237 SYSCALL_FIELD(int, nr),
238 SYSCALL_FIELD(long, ret));
239 if (!ret)
240 return 0;
241
242 return trace_seq_printf(s, "\nprint fmt: \"0x%%lx\", REC->ret\n");
243 }
244
245 int syscall_enter_define_fields(struct ftrace_event_call *call)
246 {
247 struct syscall_trace_enter trace;
248 struct syscall_metadata *meta;
249 int ret;
250 int nr;
251 int i;
252 int offset = offsetof(typeof(trace), args);
253
254 nr = syscall_name_to_nr(call->data);
255 meta = syscall_nr_to_meta(nr);
256
257 if (!meta)
258 return 0;
259
260 ret = trace_define_common_fields(call);
261 if (ret)
262 return ret;
263
264 for (i = 0; i < meta->nb_args; i++) {
265 ret = trace_define_field(call, meta->types[i],
266 meta->args[i], offset,
267 sizeof(unsigned long), 0,
268 FILTER_OTHER);
269 offset += sizeof(unsigned long);
270 }
271
272 return ret;
273 }
274
275 int syscall_exit_define_fields(struct ftrace_event_call *call)
276 {
277 struct syscall_trace_exit trace;
278 int ret;
279
280 ret = trace_define_common_fields(call);
281 if (ret)
282 return ret;
283
284 ret = trace_define_field(call, SYSCALL_FIELD(long, ret),
285 FILTER_OTHER);
286
287 return ret;
288 }
289
290 void ftrace_syscall_enter(struct pt_regs *regs, long id)
291 {
292 struct syscall_trace_enter *entry;
293 struct syscall_metadata *sys_data;
294 struct ring_buffer_event *event;
295 struct ring_buffer *buffer;
296 int size;
297 int syscall_nr;
298
299 syscall_nr = syscall_get_nr(current, regs);
300 if (syscall_nr < 0)
301 return;
302 if (!test_bit(syscall_nr, enabled_enter_syscalls))
303 return;
304
305 sys_data = syscall_nr_to_meta(syscall_nr);
306 if (!sys_data)
307 return;
308
309 size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args;
310
311 event = trace_current_buffer_lock_reserve(&buffer, sys_data->enter_id,
312 size, 0, 0);
313 if (!event)
314 return;
315
316 entry = ring_buffer_event_data(event);
317 entry->nr = syscall_nr;
318 syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args);
319
320 if (!filter_current_check_discard(buffer, sys_data->enter_event,
321 entry, event))
322 trace_current_buffer_unlock_commit(buffer, event, 0, 0);
323 }
324
325 void ftrace_syscall_exit(struct pt_regs *regs, long ret)
326 {
327 struct syscall_trace_exit *entry;
328 struct syscall_metadata *sys_data;
329 struct ring_buffer_event *event;
330 struct ring_buffer *buffer;
331 int syscall_nr;
332
333 syscall_nr = syscall_get_nr(current, regs);
334 if (syscall_nr < 0)
335 return;
336 if (!test_bit(syscall_nr, enabled_exit_syscalls))
337 return;
338
339 sys_data = syscall_nr_to_meta(syscall_nr);
340 if (!sys_data)
341 return;
342
343 event = trace_current_buffer_lock_reserve(&buffer, sys_data->exit_id,
344 sizeof(*entry), 0, 0);
345 if (!event)
346 return;
347
348 entry = ring_buffer_event_data(event);
349 entry->nr = syscall_nr;
350 entry->ret = syscall_get_return_value(current, regs);
351
352 if (!filter_current_check_discard(buffer, sys_data->exit_event,
353 entry, event))
354 trace_current_buffer_unlock_commit(buffer, event, 0, 0);
355 }
356
357 int reg_event_syscall_enter(struct ftrace_event_call *call)
358 {
359 int ret = 0;
360 int num;
361 char *name;
362
363 name = (char *)call->data;
364 num = syscall_name_to_nr(name);
365 if (num < 0 || num >= NR_syscalls)
366 return -ENOSYS;
367 mutex_lock(&syscall_trace_lock);
368 if (!sys_refcount_enter)
369 ret = register_trace_sys_enter(ftrace_syscall_enter);
370 if (ret) {
371 pr_info("event trace: Could not activate"
372 "syscall entry trace point");
373 } else {
374 set_bit(num, enabled_enter_syscalls);
375 sys_refcount_enter++;
376 }
377 mutex_unlock(&syscall_trace_lock);
378 return ret;
379 }
380
381 void unreg_event_syscall_enter(struct ftrace_event_call *call)
382 {
383 int num;
384 char *name;
385
386 name = (char *)call->data;
387 num = syscall_name_to_nr(name);
388 if (num < 0 || num >= NR_syscalls)
389 return;
390 mutex_lock(&syscall_trace_lock);
391 sys_refcount_enter--;
392 clear_bit(num, enabled_enter_syscalls);
393 if (!sys_refcount_enter)
394 unregister_trace_sys_enter(ftrace_syscall_enter);
395 mutex_unlock(&syscall_trace_lock);
396 }
397
398 int reg_event_syscall_exit(struct ftrace_event_call *call)
399 {
400 int ret = 0;
401 int num;
402 char *name;
403
404 name = call->data;
405 num = syscall_name_to_nr(name);
406 if (num < 0 || num >= NR_syscalls)
407 return -ENOSYS;
408 mutex_lock(&syscall_trace_lock);
409 if (!sys_refcount_exit)
410 ret = register_trace_sys_exit(ftrace_syscall_exit);
411 if (ret) {
412 pr_info("event trace: Could not activate"
413 "syscall exit trace point");
414 } else {
415 set_bit(num, enabled_exit_syscalls);
416 sys_refcount_exit++;
417 }
418 mutex_unlock(&syscall_trace_lock);
419 return ret;
420 }
421
422 void unreg_event_syscall_exit(struct ftrace_event_call *call)
423 {
424 int num;
425 char *name;
426
427 name = call->data;
428 num = syscall_name_to_nr(name);
429 if (num < 0 || num >= NR_syscalls)
430 return;
431 mutex_lock(&syscall_trace_lock);
432 sys_refcount_exit--;
433 clear_bit(num, enabled_exit_syscalls);
434 if (!sys_refcount_exit)
435 unregister_trace_sys_exit(ftrace_syscall_exit);
436 mutex_unlock(&syscall_trace_lock);
437 }
438
439 struct trace_event event_syscall_enter = {
440 .trace = print_syscall_enter,
441 };
442
443 struct trace_event event_syscall_exit = {
444 .trace = print_syscall_exit,
445 };
446
447 int __init init_ftrace_syscalls(void)
448 {
449 struct syscall_metadata *meta;
450 unsigned long addr;
451 int i;
452
453 syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) *
454 NR_syscalls, GFP_KERNEL);
455 if (!syscalls_metadata) {
456 WARN_ON(1);
457 return -ENOMEM;
458 }
459
460 for (i = 0; i < NR_syscalls; i++) {
461 addr = arch_syscall_addr(i);
462 meta = find_syscall_meta(addr);
463 syscalls_metadata[i] = meta;
464 }
465
466 return 0;
467 }
468 core_initcall(init_ftrace_syscalls);
469
470 #ifdef CONFIG_EVENT_PROFILE
471
472 static DECLARE_BITMAP(enabled_prof_enter_syscalls, NR_syscalls);
473 static DECLARE_BITMAP(enabled_prof_exit_syscalls, NR_syscalls);
474 static int sys_prof_refcount_enter;
475 static int sys_prof_refcount_exit;
476
477 static void prof_syscall_enter(struct pt_regs *regs, long id)
478 {
479 struct syscall_metadata *sys_data;
480 struct syscall_trace_enter *rec;
481 unsigned long flags;
482 char *raw_data;
483 int syscall_nr;
484 int size;
485 int cpu;
486
487 syscall_nr = syscall_get_nr(current, regs);
488 if (!test_bit(syscall_nr, enabled_prof_enter_syscalls))
489 return;
490
491 sys_data = syscall_nr_to_meta(syscall_nr);
492 if (!sys_data)
493 return;
494
495 /* get the size after alignment with the u32 buffer size field */
496 size = sizeof(unsigned long) * sys_data->nb_args + sizeof(*rec);
497 size = ALIGN(size + sizeof(u32), sizeof(u64));
498 size -= sizeof(u32);
499
500 if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE,
501 "profile buffer not large enough"))
502 return;
503
504 /* Protect the per cpu buffer, begin the rcu read side */
505 local_irq_save(flags);
506
507 cpu = smp_processor_id();
508
509 if (in_nmi())
510 raw_data = rcu_dereference(trace_profile_buf_nmi);
511 else
512 raw_data = rcu_dereference(trace_profile_buf);
513
514 if (!raw_data)
515 goto end;
516
517 raw_data = per_cpu_ptr(raw_data, cpu);
518
519 /* zero the dead bytes from align to not leak stack to user */
520 *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
521
522 rec = (struct syscall_trace_enter *) raw_data;
523 tracing_generic_entry_update(&rec->ent, 0, 0);
524 rec->ent.type = sys_data->enter_id;
525 rec->nr = syscall_nr;
526 syscall_get_arguments(current, regs, 0, sys_data->nb_args,
527 (unsigned long *)&rec->args);
528 perf_tp_event(sys_data->enter_id, 0, 1, rec, size);
529
530 end:
531 local_irq_restore(flags);
532 }
533
534 int reg_prof_syscall_enter(char *name)
535 {
536 int ret = 0;
537 int num;
538
539 num = syscall_name_to_nr(name);
540 if (num < 0 || num >= NR_syscalls)
541 return -ENOSYS;
542
543 mutex_lock(&syscall_trace_lock);
544 if (!sys_prof_refcount_enter)
545 ret = register_trace_sys_enter(prof_syscall_enter);
546 if (ret) {
547 pr_info("event trace: Could not activate"
548 "syscall entry trace point");
549 } else {
550 set_bit(num, enabled_prof_enter_syscalls);
551 sys_prof_refcount_enter++;
552 }
553 mutex_unlock(&syscall_trace_lock);
554 return ret;
555 }
556
557 void unreg_prof_syscall_enter(char *name)
558 {
559 int num;
560
561 num = syscall_name_to_nr(name);
562 if (num < 0 || num >= NR_syscalls)
563 return;
564
565 mutex_lock(&syscall_trace_lock);
566 sys_prof_refcount_enter--;
567 clear_bit(num, enabled_prof_enter_syscalls);
568 if (!sys_prof_refcount_enter)
569 unregister_trace_sys_enter(prof_syscall_enter);
570 mutex_unlock(&syscall_trace_lock);
571 }
572
573 static void prof_syscall_exit(struct pt_regs *regs, long ret)
574 {
575 struct syscall_metadata *sys_data;
576 struct syscall_trace_exit *rec;
577 unsigned long flags;
578 int syscall_nr;
579 char *raw_data;
580 int size;
581 int cpu;
582
583 syscall_nr = syscall_get_nr(current, regs);
584 if (!test_bit(syscall_nr, enabled_prof_exit_syscalls))
585 return;
586
587 sys_data = syscall_nr_to_meta(syscall_nr);
588 if (!sys_data)
589 return;
590
591 /* We can probably do that at build time */
592 size = ALIGN(sizeof(*rec) + sizeof(u32), sizeof(u64));
593 size -= sizeof(u32);
594
595 /*
596 * Impossible, but be paranoid with the future
597 * How to put this check outside runtime?
598 */
599 if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE,
600 "exit event has grown above profile buffer size"))
601 return;
602
603 /* Protect the per cpu buffer, begin the rcu read side */
604 local_irq_save(flags);
605 cpu = smp_processor_id();
606
607 if (in_nmi())
608 raw_data = rcu_dereference(trace_profile_buf_nmi);
609 else
610 raw_data = rcu_dereference(trace_profile_buf);
611
612 if (!raw_data)
613 goto end;
614
615 raw_data = per_cpu_ptr(raw_data, cpu);
616
617 /* zero the dead bytes from align to not leak stack to user */
618 *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
619
620 rec = (struct syscall_trace_exit *)raw_data;
621
622 tracing_generic_entry_update(&rec->ent, 0, 0);
623 rec->ent.type = sys_data->exit_id;
624 rec->nr = syscall_nr;
625 rec->ret = syscall_get_return_value(current, regs);
626
627 perf_tp_event(sys_data->exit_id, 0, 1, rec, size);
628
629 end:
630 local_irq_restore(flags);
631 }
632
633 int reg_prof_syscall_exit(char *name)
634 {
635 int ret = 0;
636 int num;
637
638 num = syscall_name_to_nr(name);
639 if (num < 0 || num >= NR_syscalls)
640 return -ENOSYS;
641
642 mutex_lock(&syscall_trace_lock);
643 if (!sys_prof_refcount_exit)
644 ret = register_trace_sys_exit(prof_syscall_exit);
645 if (ret) {
646 pr_info("event trace: Could not activate"
647 "syscall entry trace point");
648 } else {
649 set_bit(num, enabled_prof_exit_syscalls);
650 sys_prof_refcount_exit++;
651 }
652 mutex_unlock(&syscall_trace_lock);
653 return ret;
654 }
655
656 void unreg_prof_syscall_exit(char *name)
657 {
658 int num;
659
660 num = syscall_name_to_nr(name);
661 if (num < 0 || num >= NR_syscalls)
662 return;
663
664 mutex_lock(&syscall_trace_lock);
665 sys_prof_refcount_exit--;
666 clear_bit(num, enabled_prof_exit_syscalls);
667 if (!sys_prof_refcount_exit)
668 unregister_trace_sys_exit(prof_syscall_exit);
669 mutex_unlock(&syscall_trace_lock);
670 }
671
672 #endif
673
674
This page took 0.065688 seconds and 5 git commands to generate.