tracing: Use the perf recursion protection from trace event
[deliverable/linux.git] / kernel / trace / trace_syscalls.c
CommitLineData
47788c58 1#include <trace/syscall.h>
1c569f02 2#include <trace/events/syscalls.h>
ee08c6ec 3#include <linux/kernel.h>
fb34a08c 4#include <linux/ftrace.h>
cdd6c482 5#include <linux/perf_event.h>
ee08c6ec
FW
6#include <asm/syscall.h>
7
8#include "trace_output.h"
9#include "trace.h"
10
5be71b61 11static DEFINE_MUTEX(syscall_trace_lock);
fb34a08c
JB
12static int sys_refcount_enter;
13static int sys_refcount_exit;
57421dbb
JB
14static DECLARE_BITMAP(enabled_enter_syscalls, NR_syscalls);
15static DECLARE_BITMAP(enabled_exit_syscalls, NR_syscalls);
ee08c6ec 16
c44fc770
FW
17extern unsigned long __start_syscalls_metadata[];
18extern unsigned long __stop_syscalls_metadata[];
19
20static struct syscall_metadata **syscalls_metadata;
21
22static struct syscall_metadata *find_syscall_meta(unsigned long syscall)
23{
24 struct syscall_metadata *start;
25 struct syscall_metadata *stop;
26 char str[KSYM_SYMBOL_LEN];
27
28
29 start = (struct syscall_metadata *)__start_syscalls_metadata;
30 stop = (struct syscall_metadata *)__stop_syscalls_metadata;
31 kallsyms_lookup(syscall, NULL, NULL, NULL, str);
32
33 for ( ; start < stop; start++) {
34 /*
35 * Only compare after the "sys" prefix. Archs that use
36 * syscall wrappers may have syscalls symbols aliases prefixed
37 * with "SyS" instead of "sys", leading to an unwanted
38 * mismatch.
39 */
40 if (start->name && !strcmp(start->name + 3, str + 3))
41 return start;
42 }
43 return NULL;
44}
45
46static struct syscall_metadata *syscall_nr_to_meta(int nr)
47{
48 if (!syscalls_metadata || nr >= NR_syscalls || nr < 0)
49 return NULL;
50
51 return syscalls_metadata[nr];
52}
53
54int syscall_name_to_nr(char *name)
55{
56 int i;
57
58 if (!syscalls_metadata)
59 return -1;
60
61 for (i = 0; i < NR_syscalls; i++) {
62 if (syscalls_metadata[i]) {
63 if (!strcmp(syscalls_metadata[i]->name, name))
64 return i;
65 }
66 }
67 return -1;
68}
69
70void set_syscall_enter_id(int num, int id)
71{
72 syscalls_metadata[num]->enter_id = id;
73}
74
75void set_syscall_exit_id(int num, int id)
76{
77 syscalls_metadata[num]->exit_id = id;
78}
79
bed1ffca
FW
80enum print_line_t
81print_syscall_enter(struct trace_iterator *iter, int flags)
82{
83 struct trace_seq *s = &iter->seq;
84 struct trace_entry *ent = iter->ent;
85 struct syscall_trace_enter *trace;
86 struct syscall_metadata *entry;
87 int i, ret, syscall;
88
64c12e04 89 trace = (typeof(trace))ent;
bed1ffca 90 syscall = trace->nr;
bed1ffca 91 entry = syscall_nr_to_meta(syscall);
64c12e04 92
bed1ffca
FW
93 if (!entry)
94 goto end;
95
64c12e04
JB
96 if (entry->enter_id != ent->type) {
97 WARN_ON_ONCE(1);
98 goto end;
99 }
100
bed1ffca
FW
101 ret = trace_seq_printf(s, "%s(", entry->name);
102 if (!ret)
103 return TRACE_TYPE_PARTIAL_LINE;
104
105 for (i = 0; i < entry->nb_args; i++) {
106 /* parameter types */
ba8b3a40 107 if (trace_flags & TRACE_ITER_VERBOSE) {
bed1ffca
FW
108 ret = trace_seq_printf(s, "%s ", entry->types[i]);
109 if (!ret)
110 return TRACE_TYPE_PARTIAL_LINE;
111 }
112 /* parameter values */
4539f077 113 ret = trace_seq_printf(s, "%s: %lx%s", entry->args[i],
bed1ffca 114 trace->args[i],
4539f077 115 i == entry->nb_args - 1 ? "" : ", ");
bed1ffca
FW
116 if (!ret)
117 return TRACE_TYPE_PARTIAL_LINE;
118 }
119
4539f077
LZ
120 ret = trace_seq_putc(s, ')');
121 if (!ret)
122 return TRACE_TYPE_PARTIAL_LINE;
123
bed1ffca 124end:
4539f077
LZ
125 ret = trace_seq_putc(s, '\n');
126 if (!ret)
127 return TRACE_TYPE_PARTIAL_LINE;
128
bed1ffca
FW
129 return TRACE_TYPE_HANDLED;
130}
131
132enum print_line_t
133print_syscall_exit(struct trace_iterator *iter, int flags)
134{
135 struct trace_seq *s = &iter->seq;
136 struct trace_entry *ent = iter->ent;
137 struct syscall_trace_exit *trace;
138 int syscall;
139 struct syscall_metadata *entry;
140 int ret;
141
64c12e04 142 trace = (typeof(trace))ent;
bed1ffca 143 syscall = trace->nr;
bed1ffca 144 entry = syscall_nr_to_meta(syscall);
64c12e04 145
bed1ffca
FW
146 if (!entry) {
147 trace_seq_printf(s, "\n");
148 return TRACE_TYPE_HANDLED;
149 }
150
64c12e04
JB
151 if (entry->exit_id != ent->type) {
152 WARN_ON_ONCE(1);
153 return TRACE_TYPE_UNHANDLED;
154 }
155
bed1ffca
FW
156 ret = trace_seq_printf(s, "%s -> 0x%lx\n", entry->name,
157 trace->ret);
158 if (!ret)
159 return TRACE_TYPE_PARTIAL_LINE;
160
161 return TRACE_TYPE_HANDLED;
162}
163
e6971969
LZ
164extern char *__bad_type_size(void);
165
166#define SYSCALL_FIELD(type, name) \
167 sizeof(type) != sizeof(trace.name) ? \
168 __bad_type_size() : \
26a50744
TZ
169 #type, #name, offsetof(typeof(trace), name), \
170 sizeof(trace.name), is_signed_type(type)
e6971969 171
10a5b66f 172int syscall_enter_format(struct ftrace_event_call *call, struct trace_seq *s)
dc4ddb4c
FW
173{
174 int i;
175 int nr;
e6971969 176 int ret;
dc4ddb4c 177 struct syscall_metadata *entry;
e6971969
LZ
178 struct syscall_trace_enter trace;
179 int offset = offsetof(struct syscall_trace_enter, args);
dc4ddb4c 180
e6971969 181 nr = syscall_name_to_nr(call->data);
dc4ddb4c
FW
182 entry = syscall_nr_to_meta(nr);
183
184 if (!entry)
e6971969
LZ
185 return 0;
186
26a50744
TZ
187 ret = trace_seq_printf(s, "\tfield:%s %s;\toffset:%zu;\tsize:%zu;"
188 "\tsigned:%u;\n",
e6971969
LZ
189 SYSCALL_FIELD(int, nr));
190 if (!ret)
191 return 0;
dc4ddb4c
FW
192
193 for (i = 0; i < entry->nb_args; i++) {
194 ret = trace_seq_printf(s, "\tfield:%s %s;", entry->types[i],
195 entry->args[i]);
196 if (!ret)
197 return 0;
26a50744
TZ
198 ret = trace_seq_printf(s, "\toffset:%d;\tsize:%zu;"
199 "\tsigned:%u;\n", offset,
200 sizeof(unsigned long),
201 is_signed_type(unsigned long));
dc4ddb4c
FW
202 if (!ret)
203 return 0;
204 offset += sizeof(unsigned long);
205 }
206
4539f077 207 trace_seq_puts(s, "\nprint fmt: \"");
dc4ddb4c 208 for (i = 0; i < entry->nb_args; i++) {
e6971969 209 ret = trace_seq_printf(s, "%s: 0x%%0%zulx%s", entry->args[i],
dc4ddb4c 210 sizeof(unsigned long),
4539f077 211 i == entry->nb_args - 1 ? "" : ", ");
dc4ddb4c
FW
212 if (!ret)
213 return 0;
214 }
4539f077 215 trace_seq_putc(s, '"');
dc4ddb4c
FW
216
217 for (i = 0; i < entry->nb_args; i++) {
4539f077
LZ
218 ret = trace_seq_printf(s, ", ((unsigned long)(REC->%s))",
219 entry->args[i]);
dc4ddb4c
FW
220 if (!ret)
221 return 0;
222 }
223
4539f077 224 return trace_seq_putc(s, '\n');
dc4ddb4c
FW
225}
226
10a5b66f
LZ
227int syscall_exit_format(struct ftrace_event_call *call, struct trace_seq *s)
228{
229 int ret;
230 struct syscall_trace_exit trace;
231
232 ret = trace_seq_printf(s,
26a50744
TZ
233 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;"
234 "\tsigned:%u;\n"
235 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;"
236 "\tsigned:%u;\n",
10a5b66f 237 SYSCALL_FIELD(int, nr),
ee949a86 238 SYSCALL_FIELD(long, ret));
10a5b66f
LZ
239 if (!ret)
240 return 0;
241
242 return trace_seq_printf(s, "\nprint fmt: \"0x%%lx\", REC->ret\n");
243}
244
540b7b8d
LZ
245int syscall_enter_define_fields(struct ftrace_event_call *call)
246{
247 struct syscall_trace_enter trace;
248 struct syscall_metadata *meta;
249 int ret;
250 int nr;
251 int i;
252 int offset = offsetof(typeof(trace), args);
253
254 nr = syscall_name_to_nr(call->data);
255 meta = syscall_nr_to_meta(nr);
256
257 if (!meta)
258 return 0;
259
260 ret = trace_define_common_fields(call);
261 if (ret)
262 return ret;
263
264 for (i = 0; i < meta->nb_args; i++) {
aeaeae11
FW
265 ret = trace_define_field(call, meta->types[i],
266 meta->args[i], offset,
43b51ead
LZ
267 sizeof(unsigned long), 0,
268 FILTER_OTHER);
540b7b8d
LZ
269 offset += sizeof(unsigned long);
270 }
271
272 return ret;
273}
274
275int syscall_exit_define_fields(struct ftrace_event_call *call)
276{
277 struct syscall_trace_exit trace;
278 int ret;
279
280 ret = trace_define_common_fields(call);
281 if (ret)
282 return ret;
283
26a50744 284 ret = trace_define_field(call, SYSCALL_FIELD(long, ret),
43b51ead 285 FILTER_OTHER);
540b7b8d
LZ
286
287 return ret;
288}
289
fb34a08c 290void ftrace_syscall_enter(struct pt_regs *regs, long id)
ee08c6ec 291{
bed1ffca
FW
292 struct syscall_trace_enter *entry;
293 struct syscall_metadata *sys_data;
294 struct ring_buffer_event *event;
e77405ad 295 struct ring_buffer *buffer;
bed1ffca 296 int size;
ee08c6ec
FW
297 int syscall_nr;
298
299 syscall_nr = syscall_get_nr(current, regs);
cd0980fc
HB
300 if (syscall_nr < 0)
301 return;
fb34a08c
JB
302 if (!test_bit(syscall_nr, enabled_enter_syscalls))
303 return;
ee08c6ec 304
bed1ffca
FW
305 sys_data = syscall_nr_to_meta(syscall_nr);
306 if (!sys_data)
307 return;
308
309 size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args;
310
e77405ad
SR
311 event = trace_current_buffer_lock_reserve(&buffer, sys_data->enter_id,
312 size, 0, 0);
bed1ffca
FW
313 if (!event)
314 return;
315
316 entry = ring_buffer_event_data(event);
317 entry->nr = syscall_nr;
318 syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args);
319
e77405ad
SR
320 if (!filter_current_check_discard(buffer, sys_data->enter_event,
321 entry, event))
322 trace_current_buffer_unlock_commit(buffer, event, 0, 0);
ee08c6ec
FW
323}
324
fb34a08c 325void ftrace_syscall_exit(struct pt_regs *regs, long ret)
ee08c6ec 326{
bed1ffca
FW
327 struct syscall_trace_exit *entry;
328 struct syscall_metadata *sys_data;
329 struct ring_buffer_event *event;
e77405ad 330 struct ring_buffer *buffer;
ee08c6ec
FW
331 int syscall_nr;
332
333 syscall_nr = syscall_get_nr(current, regs);
cd0980fc
HB
334 if (syscall_nr < 0)
335 return;
fb34a08c
JB
336 if (!test_bit(syscall_nr, enabled_exit_syscalls))
337 return;
ee08c6ec 338
bed1ffca
FW
339 sys_data = syscall_nr_to_meta(syscall_nr);
340 if (!sys_data)
341 return;
342
e77405ad 343 event = trace_current_buffer_lock_reserve(&buffer, sys_data->exit_id,
bed1ffca
FW
344 sizeof(*entry), 0, 0);
345 if (!event)
346 return;
347
348 entry = ring_buffer_event_data(event);
349 entry->nr = syscall_nr;
350 entry->ret = syscall_get_return_value(current, regs);
351
e77405ad
SR
352 if (!filter_current_check_discard(buffer, sys_data->exit_event,
353 entry, event))
354 trace_current_buffer_unlock_commit(buffer, event, 0, 0);
ee08c6ec
FW
355}
356
bd1a5c84 357int reg_event_syscall_enter(struct ftrace_event_call *call)
ee08c6ec 358{
fb34a08c
JB
359 int ret = 0;
360 int num;
361 char *name;
362
bd1a5c84 363 name = (char *)call->data;
fb34a08c 364 num = syscall_name_to_nr(name);
57421dbb 365 if (num < 0 || num >= NR_syscalls)
fb34a08c
JB
366 return -ENOSYS;
367 mutex_lock(&syscall_trace_lock);
368 if (!sys_refcount_enter)
1c569f02 369 ret = register_trace_sys_enter(ftrace_syscall_enter);
fb34a08c
JB
370 if (ret) {
371 pr_info("event trace: Could not activate"
372 "syscall entry trace point");
373 } else {
374 set_bit(num, enabled_enter_syscalls);
375 sys_refcount_enter++;
376 }
377 mutex_unlock(&syscall_trace_lock);
378 return ret;
ee08c6ec
FW
379}
380
bd1a5c84 381void unreg_event_syscall_enter(struct ftrace_event_call *call)
ee08c6ec 382{
fb34a08c
JB
383 int num;
384 char *name;
ee08c6ec 385
bd1a5c84 386 name = (char *)call->data;
fb34a08c 387 num = syscall_name_to_nr(name);
57421dbb 388 if (num < 0 || num >= NR_syscalls)
fb34a08c
JB
389 return;
390 mutex_lock(&syscall_trace_lock);
391 sys_refcount_enter--;
392 clear_bit(num, enabled_enter_syscalls);
393 if (!sys_refcount_enter)
1c569f02 394 unregister_trace_sys_enter(ftrace_syscall_enter);
fb34a08c
JB
395 mutex_unlock(&syscall_trace_lock);
396}
ee08c6ec 397
bd1a5c84 398int reg_event_syscall_exit(struct ftrace_event_call *call)
ee08c6ec 399{
fb34a08c
JB
400 int ret = 0;
401 int num;
402 char *name;
403
f8468f36 404 name = call->data;
fb34a08c 405 num = syscall_name_to_nr(name);
57421dbb 406 if (num < 0 || num >= NR_syscalls)
fb34a08c
JB
407 return -ENOSYS;
408 mutex_lock(&syscall_trace_lock);
409 if (!sys_refcount_exit)
1c569f02 410 ret = register_trace_sys_exit(ftrace_syscall_exit);
fb34a08c
JB
411 if (ret) {
412 pr_info("event trace: Could not activate"
413 "syscall exit trace point");
414 } else {
415 set_bit(num, enabled_exit_syscalls);
416 sys_refcount_exit++;
ee08c6ec 417 }
fb34a08c
JB
418 mutex_unlock(&syscall_trace_lock);
419 return ret;
420}
ee08c6ec 421
bd1a5c84 422void unreg_event_syscall_exit(struct ftrace_event_call *call)
fb34a08c
JB
423{
424 int num;
425 char *name;
ee08c6ec 426
f8468f36 427 name = call->data;
fb34a08c 428 num = syscall_name_to_nr(name);
57421dbb 429 if (num < 0 || num >= NR_syscalls)
fb34a08c
JB
430 return;
431 mutex_lock(&syscall_trace_lock);
432 sys_refcount_exit--;
433 clear_bit(num, enabled_exit_syscalls);
434 if (!sys_refcount_exit)
1c569f02 435 unregister_trace_sys_exit(ftrace_syscall_exit);
fb34a08c 436 mutex_unlock(&syscall_trace_lock);
ee08c6ec 437}
fb34a08c
JB
438
439struct trace_event event_syscall_enter = {
440 .trace = print_syscall_enter,
fb34a08c
JB
441};
442
443struct trace_event event_syscall_exit = {
444 .trace = print_syscall_exit,
fb34a08c 445};
f4b5ffcc 446
c44fc770
FW
447int __init init_ftrace_syscalls(void)
448{
449 struct syscall_metadata *meta;
450 unsigned long addr;
451 int i;
452
453 syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) *
454 NR_syscalls, GFP_KERNEL);
455 if (!syscalls_metadata) {
456 WARN_ON(1);
457 return -ENOMEM;
458 }
459
460 for (i = 0; i < NR_syscalls; i++) {
461 addr = arch_syscall_addr(i);
462 meta = find_syscall_meta(addr);
463 syscalls_metadata[i] = meta;
464 }
465
466 return 0;
467}
468core_initcall(init_ftrace_syscalls);
469
f4b5ffcc 470#ifdef CONFIG_EVENT_PROFILE
19007a67 471
57421dbb
JB
472static DECLARE_BITMAP(enabled_prof_enter_syscalls, NR_syscalls);
473static DECLARE_BITMAP(enabled_prof_exit_syscalls, NR_syscalls);
f4b5ffcc
JB
474static int sys_prof_refcount_enter;
475static int sys_prof_refcount_exit;
476
477static void prof_syscall_enter(struct pt_regs *regs, long id)
478{
479 struct syscall_metadata *sys_data;
20ab4425
FW
480 struct syscall_trace_enter *rec;
481 unsigned long flags;
ce71b9df 482 char *trace_buf;
20ab4425 483 char *raw_data;
ce71b9df 484 int *recursion;
f4b5ffcc 485 int syscall_nr;
19007a67 486 int size;
20ab4425 487 int cpu;
f4b5ffcc
JB
488
489 syscall_nr = syscall_get_nr(current, regs);
490 if (!test_bit(syscall_nr, enabled_prof_enter_syscalls))
491 return;
492
493 sys_data = syscall_nr_to_meta(syscall_nr);
494 if (!sys_data)
495 return;
496
19007a67
FW
497 /* get the size after alignment with the u32 buffer size field */
498 size = sizeof(unsigned long) * sys_data->nb_args + sizeof(*rec);
499 size = ALIGN(size + sizeof(u32), sizeof(u64));
500 size -= sizeof(u32);
501
20ab4425
FW
502 if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE,
503 "profile buffer not large enough"))
504 return;
505
506 /* Protect the per cpu buffer, begin the rcu read side */
507 local_irq_save(flags);
19007a67 508
ce71b9df
FW
509 if (perf_swevent_get_recursion_context(&recursion))
510 goto end_recursion;
511
20ab4425
FW
512 cpu = smp_processor_id();
513
514 if (in_nmi())
444a2a3b 515 trace_buf = rcu_dereference(perf_trace_buf_nmi);
20ab4425 516 else
444a2a3b 517 trace_buf = rcu_dereference(perf_trace_buf);
20ab4425 518
444a2a3b 519 if (!trace_buf)
20ab4425 520 goto end;
19007a67 521
ce71b9df 522 raw_data = per_cpu_ptr(trace_buf, cpu);
20ab4425
FW
523
524 /* zero the dead bytes from align to not leak stack to user */
525 *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
526
527 rec = (struct syscall_trace_enter *) raw_data;
528 tracing_generic_entry_update(&rec->ent, 0, 0);
529 rec->ent.type = sys_data->enter_id;
530 rec->nr = syscall_nr;
531 syscall_get_arguments(current, regs, 0, sys_data->nb_args,
532 (unsigned long *)&rec->args);
43c1266c 533 perf_tp_event(sys_data->enter_id, 0, 1, rec, size);
20ab4425
FW
534
535end:
ce71b9df
FW
536 perf_swevent_put_recursion_context(recursion);
537end_recursion:
20ab4425 538 local_irq_restore(flags);
f4b5ffcc
JB
539}
540
541int reg_prof_syscall_enter(char *name)
542{
543 int ret = 0;
544 int num;
545
546 num = syscall_name_to_nr(name);
57421dbb 547 if (num < 0 || num >= NR_syscalls)
f4b5ffcc
JB
548 return -ENOSYS;
549
550 mutex_lock(&syscall_trace_lock);
551 if (!sys_prof_refcount_enter)
1c569f02 552 ret = register_trace_sys_enter(prof_syscall_enter);
f4b5ffcc
JB
553 if (ret) {
554 pr_info("event trace: Could not activate"
555 "syscall entry trace point");
556 } else {
557 set_bit(num, enabled_prof_enter_syscalls);
558 sys_prof_refcount_enter++;
559 }
560 mutex_unlock(&syscall_trace_lock);
561 return ret;
562}
563
564void unreg_prof_syscall_enter(char *name)
565{
566 int num;
567
568 num = syscall_name_to_nr(name);
57421dbb 569 if (num < 0 || num >= NR_syscalls)
f4b5ffcc
JB
570 return;
571
572 mutex_lock(&syscall_trace_lock);
573 sys_prof_refcount_enter--;
574 clear_bit(num, enabled_prof_enter_syscalls);
575 if (!sys_prof_refcount_enter)
1c569f02 576 unregister_trace_sys_enter(prof_syscall_enter);
f4b5ffcc
JB
577 mutex_unlock(&syscall_trace_lock);
578}
579
580static void prof_syscall_exit(struct pt_regs *regs, long ret)
581{
582 struct syscall_metadata *sys_data;
20ab4425
FW
583 struct syscall_trace_exit *rec;
584 unsigned long flags;
f4b5ffcc 585 int syscall_nr;
ce71b9df 586 char *trace_buf;
20ab4425 587 char *raw_data;
ce71b9df 588 int *recursion;
20ab4425
FW
589 int size;
590 int cpu;
f4b5ffcc
JB
591
592 syscall_nr = syscall_get_nr(current, regs);
593 if (!test_bit(syscall_nr, enabled_prof_exit_syscalls))
594 return;
595
596 sys_data = syscall_nr_to_meta(syscall_nr);
597 if (!sys_data)
598 return;
599
20ab4425
FW
600 /* We can probably do that at build time */
601 size = ALIGN(sizeof(*rec) + sizeof(u32), sizeof(u64));
602 size -= sizeof(u32);
19007a67 603
20ab4425
FW
604 /*
605 * Impossible, but be paranoid with the future
606 * How to put this check outside runtime?
607 */
608 if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE,
609 "exit event has grown above profile buffer size"))
610 return;
611
612 /* Protect the per cpu buffer, begin the rcu read side */
613 local_irq_save(flags);
ce71b9df
FW
614
615 if (perf_swevent_get_recursion_context(&recursion))
616 goto end_recursion;
617
20ab4425
FW
618 cpu = smp_processor_id();
619
620 if (in_nmi())
444a2a3b 621 trace_buf = rcu_dereference(perf_trace_buf_nmi);
20ab4425 622 else
444a2a3b 623 trace_buf = rcu_dereference(perf_trace_buf);
20ab4425 624
444a2a3b 625 if (!trace_buf)
20ab4425
FW
626 goto end;
627
ce71b9df 628 raw_data = per_cpu_ptr(trace_buf, cpu);
20ab4425
FW
629
630 /* zero the dead bytes from align to not leak stack to user */
631 *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
632
633 rec = (struct syscall_trace_exit *)raw_data;
634
635 tracing_generic_entry_update(&rec->ent, 0, 0);
636 rec->ent.type = sys_data->exit_id;
637 rec->nr = syscall_nr;
638 rec->ret = syscall_get_return_value(current, regs);
639
43c1266c 640 perf_tp_event(sys_data->exit_id, 0, 1, rec, size);
20ab4425
FW
641
642end:
ce71b9df
FW
643 perf_swevent_put_recursion_context(recursion);
644end_recursion:
20ab4425 645 local_irq_restore(flags);
f4b5ffcc
JB
646}
647
648int reg_prof_syscall_exit(char *name)
649{
650 int ret = 0;
651 int num;
652
653 num = syscall_name_to_nr(name);
57421dbb 654 if (num < 0 || num >= NR_syscalls)
f4b5ffcc
JB
655 return -ENOSYS;
656
657 mutex_lock(&syscall_trace_lock);
658 if (!sys_prof_refcount_exit)
1c569f02 659 ret = register_trace_sys_exit(prof_syscall_exit);
f4b5ffcc
JB
660 if (ret) {
661 pr_info("event trace: Could not activate"
662 "syscall entry trace point");
663 } else {
664 set_bit(num, enabled_prof_exit_syscalls);
665 sys_prof_refcount_exit++;
666 }
667 mutex_unlock(&syscall_trace_lock);
668 return ret;
669}
670
671void unreg_prof_syscall_exit(char *name)
672{
673 int num;
674
675 num = syscall_name_to_nr(name);
57421dbb 676 if (num < 0 || num >= NR_syscalls)
f4b5ffcc
JB
677 return;
678
679 mutex_lock(&syscall_trace_lock);
680 sys_prof_refcount_exit--;
681 clear_bit(num, enabled_prof_exit_syscalls);
682 if (!sys_prof_refcount_exit)
1c569f02 683 unregister_trace_sys_exit(prof_syscall_exit);
f4b5ffcc
JB
684 mutex_unlock(&syscall_trace_lock);
685}
686
687#endif
688
689
This page took 0.12892 seconds and 5 git commands to generate.