tracing/kprobes: Add failure messages for debugging
[deliverable/linux.git] / kernel / trace / trace_kprobe.c
1 /*
2 * kprobe based kernel tracer
3 *
4 * Created by Masami Hiramatsu <mhiramat@redhat.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20 #include <linux/module.h>
21 #include <linux/uaccess.h>
22 #include <linux/kprobes.h>
23 #include <linux/seq_file.h>
24 #include <linux/slab.h>
25 #include <linux/smp.h>
26 #include <linux/debugfs.h>
27 #include <linux/types.h>
28 #include <linux/string.h>
29 #include <linux/ctype.h>
30 #include <linux/ptrace.h>
31 #include <linux/perf_event.h>
32
33 #include "trace.h"
34 #include "trace_output.h"
35
36 #define MAX_TRACE_ARGS 128
37 #define MAX_ARGSTR_LEN 63
38 #define MAX_EVENT_NAME_LEN 64
39 #define KPROBE_EVENT_SYSTEM "kprobes"
40
41 /* Reserved field names */
42 #define FIELD_STRING_IP "__probe_ip"
43 #define FIELD_STRING_NARGS "__probe_nargs"
44 #define FIELD_STRING_RETIP "__probe_ret_ip"
45 #define FIELD_STRING_FUNC "__probe_func"
46
47 const char *reserved_field_names[] = {
48 "common_type",
49 "common_flags",
50 "common_preempt_count",
51 "common_pid",
52 "common_tgid",
53 "common_lock_depth",
54 FIELD_STRING_IP,
55 FIELD_STRING_NARGS,
56 FIELD_STRING_RETIP,
57 FIELD_STRING_FUNC,
58 };
59
60 /* currently, trace_kprobe only supports X86. */
61
62 struct fetch_func {
63 unsigned long (*func)(struct pt_regs *, void *);
64 void *data;
65 };
66
67 static __kprobes unsigned long call_fetch(struct fetch_func *f,
68 struct pt_regs *regs)
69 {
70 return f->func(regs, f->data);
71 }
72
73 /* fetch handlers */
74 static __kprobes unsigned long fetch_register(struct pt_regs *regs,
75 void *offset)
76 {
77 return regs_get_register(regs, (unsigned int)((unsigned long)offset));
78 }
79
80 static __kprobes unsigned long fetch_stack(struct pt_regs *regs,
81 void *num)
82 {
83 return regs_get_kernel_stack_nth(regs,
84 (unsigned int)((unsigned long)num));
85 }
86
87 static __kprobes unsigned long fetch_memory(struct pt_regs *regs, void *addr)
88 {
89 unsigned long retval;
90
91 if (probe_kernel_address(addr, retval))
92 return 0;
93 return retval;
94 }
95
96 static __kprobes unsigned long fetch_argument(struct pt_regs *regs, void *num)
97 {
98 return regs_get_argument_nth(regs, (unsigned int)((unsigned long)num));
99 }
100
101 static __kprobes unsigned long fetch_retvalue(struct pt_regs *regs,
102 void *dummy)
103 {
104 return regs_return_value(regs);
105 }
106
107 static __kprobes unsigned long fetch_stack_address(struct pt_regs *regs,
108 void *dummy)
109 {
110 return kernel_stack_pointer(regs);
111 }
112
113 /* Memory fetching by symbol */
114 struct symbol_cache {
115 char *symbol;
116 long offset;
117 unsigned long addr;
118 };
119
120 static unsigned long update_symbol_cache(struct symbol_cache *sc)
121 {
122 sc->addr = (unsigned long)kallsyms_lookup_name(sc->symbol);
123 if (sc->addr)
124 sc->addr += sc->offset;
125 return sc->addr;
126 }
127
128 static void free_symbol_cache(struct symbol_cache *sc)
129 {
130 kfree(sc->symbol);
131 kfree(sc);
132 }
133
134 static struct symbol_cache *alloc_symbol_cache(const char *sym, long offset)
135 {
136 struct symbol_cache *sc;
137
138 if (!sym || strlen(sym) == 0)
139 return NULL;
140 sc = kzalloc(sizeof(struct symbol_cache), GFP_KERNEL);
141 if (!sc)
142 return NULL;
143
144 sc->symbol = kstrdup(sym, GFP_KERNEL);
145 if (!sc->symbol) {
146 kfree(sc);
147 return NULL;
148 }
149 sc->offset = offset;
150
151 update_symbol_cache(sc);
152 return sc;
153 }
154
155 static __kprobes unsigned long fetch_symbol(struct pt_regs *regs, void *data)
156 {
157 struct symbol_cache *sc = data;
158
159 if (sc->addr)
160 return fetch_memory(regs, (void *)sc->addr);
161 else
162 return 0;
163 }
164
165 /* Special indirect memory access interface */
166 struct indirect_fetch_data {
167 struct fetch_func orig;
168 long offset;
169 };
170
171 static __kprobes unsigned long fetch_indirect(struct pt_regs *regs, void *data)
172 {
173 struct indirect_fetch_data *ind = data;
174 unsigned long addr;
175
176 addr = call_fetch(&ind->orig, regs);
177 if (addr) {
178 addr += ind->offset;
179 return fetch_memory(regs, (void *)addr);
180 } else
181 return 0;
182 }
183
184 static __kprobes void free_indirect_fetch_data(struct indirect_fetch_data *data)
185 {
186 if (data->orig.func == fetch_indirect)
187 free_indirect_fetch_data(data->orig.data);
188 else if (data->orig.func == fetch_symbol)
189 free_symbol_cache(data->orig.data);
190 kfree(data);
191 }
192
193 /**
194 * Kprobe tracer core functions
195 */
196
197 struct probe_arg {
198 struct fetch_func fetch;
199 const char *name;
200 };
201
202 /* Flags for trace_probe */
203 #define TP_FLAG_TRACE 1
204 #define TP_FLAG_PROFILE 2
205
206 struct trace_probe {
207 struct list_head list;
208 struct kretprobe rp; /* Use rp.kp for kprobe use */
209 unsigned long nhit;
210 unsigned int flags; /* For TP_FLAG_* */
211 const char *symbol; /* symbol name */
212 struct ftrace_event_call call;
213 struct trace_event event;
214 unsigned int nr_args;
215 struct probe_arg args[];
216 };
217
218 #define SIZEOF_TRACE_PROBE(n) \
219 (offsetof(struct trace_probe, args) + \
220 (sizeof(struct probe_arg) * (n)))
221
222 static __kprobes int probe_is_return(struct trace_probe *tp)
223 {
224 return tp->rp.handler != NULL;
225 }
226
227 static __kprobes const char *probe_symbol(struct trace_probe *tp)
228 {
229 return tp->symbol ? tp->symbol : "unknown";
230 }
231
232 static int probe_arg_string(char *buf, size_t n, struct fetch_func *ff)
233 {
234 int ret = -EINVAL;
235
236 if (ff->func == fetch_argument)
237 ret = snprintf(buf, n, "$arg%lu", (unsigned long)ff->data);
238 else if (ff->func == fetch_register) {
239 const char *name;
240 name = regs_query_register_name((unsigned int)((long)ff->data));
241 ret = snprintf(buf, n, "%%%s", name);
242 } else if (ff->func == fetch_stack)
243 ret = snprintf(buf, n, "$stack%lu", (unsigned long)ff->data);
244 else if (ff->func == fetch_memory)
245 ret = snprintf(buf, n, "@0x%p", ff->data);
246 else if (ff->func == fetch_symbol) {
247 struct symbol_cache *sc = ff->data;
248 ret = snprintf(buf, n, "@%s%+ld", sc->symbol, sc->offset);
249 } else if (ff->func == fetch_retvalue)
250 ret = snprintf(buf, n, "$retval");
251 else if (ff->func == fetch_stack_address)
252 ret = snprintf(buf, n, "$stack");
253 else if (ff->func == fetch_indirect) {
254 struct indirect_fetch_data *id = ff->data;
255 size_t l = 0;
256 ret = snprintf(buf, n, "%+ld(", id->offset);
257 if (ret >= n)
258 goto end;
259 l += ret;
260 ret = probe_arg_string(buf + l, n - l, &id->orig);
261 if (ret < 0)
262 goto end;
263 l += ret;
264 ret = snprintf(buf + l, n - l, ")");
265 ret += l;
266 }
267 end:
268 if (ret >= n)
269 return -ENOSPC;
270 return ret;
271 }
272
273 static int register_probe_event(struct trace_probe *tp);
274 static void unregister_probe_event(struct trace_probe *tp);
275
276 static DEFINE_MUTEX(probe_lock);
277 static LIST_HEAD(probe_list);
278
279 static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
280 static int kretprobe_dispatcher(struct kretprobe_instance *ri,
281 struct pt_regs *regs);
282
283 /*
284 * Allocate new trace_probe and initialize it (including kprobes).
285 */
286 static struct trace_probe *alloc_trace_probe(const char *group,
287 const char *event,
288 void *addr,
289 const char *symbol,
290 unsigned long offs,
291 int nargs, int is_return)
292 {
293 struct trace_probe *tp;
294
295 tp = kzalloc(SIZEOF_TRACE_PROBE(nargs), GFP_KERNEL);
296 if (!tp)
297 return ERR_PTR(-ENOMEM);
298
299 if (symbol) {
300 tp->symbol = kstrdup(symbol, GFP_KERNEL);
301 if (!tp->symbol)
302 goto error;
303 tp->rp.kp.symbol_name = tp->symbol;
304 tp->rp.kp.offset = offs;
305 } else
306 tp->rp.kp.addr = addr;
307
308 if (is_return)
309 tp->rp.handler = kretprobe_dispatcher;
310 else
311 tp->rp.kp.pre_handler = kprobe_dispatcher;
312
313 if (!event)
314 goto error;
315 tp->call.name = kstrdup(event, GFP_KERNEL);
316 if (!tp->call.name)
317 goto error;
318
319 if (!group)
320 goto error;
321 tp->call.system = kstrdup(group, GFP_KERNEL);
322 if (!tp->call.system)
323 goto error;
324
325 INIT_LIST_HEAD(&tp->list);
326 return tp;
327 error:
328 kfree(tp->call.name);
329 kfree(tp->symbol);
330 kfree(tp);
331 return ERR_PTR(-ENOMEM);
332 }
333
334 static void free_probe_arg(struct probe_arg *arg)
335 {
336 if (arg->fetch.func == fetch_symbol)
337 free_symbol_cache(arg->fetch.data);
338 else if (arg->fetch.func == fetch_indirect)
339 free_indirect_fetch_data(arg->fetch.data);
340 kfree(arg->name);
341 }
342
343 static void free_trace_probe(struct trace_probe *tp)
344 {
345 int i;
346
347 for (i = 0; i < tp->nr_args; i++)
348 free_probe_arg(&tp->args[i]);
349
350 kfree(tp->call.system);
351 kfree(tp->call.name);
352 kfree(tp->symbol);
353 kfree(tp);
354 }
355
356 static struct trace_probe *find_probe_event(const char *event)
357 {
358 struct trace_probe *tp;
359
360 list_for_each_entry(tp, &probe_list, list)
361 if (!strcmp(tp->call.name, event))
362 return tp;
363 return NULL;
364 }
365
366 /* Unregister a trace_probe and probe_event: call with locking probe_lock */
367 static void unregister_trace_probe(struct trace_probe *tp)
368 {
369 if (probe_is_return(tp))
370 unregister_kretprobe(&tp->rp);
371 else
372 unregister_kprobe(&tp->rp.kp);
373 list_del(&tp->list);
374 unregister_probe_event(tp);
375 }
376
377 /* Register a trace_probe and probe_event */
378 static int register_trace_probe(struct trace_probe *tp)
379 {
380 struct trace_probe *old_tp;
381 int ret;
382
383 mutex_lock(&probe_lock);
384
385 /* register as an event */
386 old_tp = find_probe_event(tp->call.name);
387 if (old_tp) {
388 /* delete old event */
389 unregister_trace_probe(old_tp);
390 free_trace_probe(old_tp);
391 }
392 ret = register_probe_event(tp);
393 if (ret) {
394 pr_warning("Faild to register probe event(%d)\n", ret);
395 goto end;
396 }
397
398 tp->rp.kp.flags |= KPROBE_FLAG_DISABLED;
399 if (probe_is_return(tp))
400 ret = register_kretprobe(&tp->rp);
401 else
402 ret = register_kprobe(&tp->rp.kp);
403
404 if (ret) {
405 pr_warning("Could not insert probe(%d)\n", ret);
406 if (ret == -EILSEQ) {
407 pr_warning("Probing address(0x%p) is not an "
408 "instruction boundary.\n",
409 tp->rp.kp.addr);
410 ret = -EINVAL;
411 }
412 unregister_probe_event(tp);
413 } else
414 list_add_tail(&tp->list, &probe_list);
415 end:
416 mutex_unlock(&probe_lock);
417 return ret;
418 }
419
420 /* Split symbol and offset. */
421 static int split_symbol_offset(char *symbol, unsigned long *offset)
422 {
423 char *tmp;
424 int ret;
425
426 if (!offset)
427 return -EINVAL;
428
429 tmp = strchr(symbol, '+');
430 if (tmp) {
431 /* skip sign because strict_strtol doesn't accept '+' */
432 ret = strict_strtoul(tmp + 1, 0, offset);
433 if (ret)
434 return ret;
435 *tmp = '\0';
436 } else
437 *offset = 0;
438 return 0;
439 }
440
441 #define PARAM_MAX_ARGS 16
442 #define PARAM_MAX_STACK (THREAD_SIZE / sizeof(unsigned long))
443
444 static int parse_probe_vars(char *arg, struct fetch_func *ff, int is_return)
445 {
446 int ret = 0;
447 unsigned long param;
448
449 if (strcmp(arg, "retval") == 0) {
450 if (is_return) {
451 ff->func = fetch_retvalue;
452 ff->data = NULL;
453 } else
454 ret = -EINVAL;
455 } else if (strncmp(arg, "stack", 5) == 0) {
456 if (arg[5] == '\0') {
457 ff->func = fetch_stack_address;
458 ff->data = NULL;
459 } else if (isdigit(arg[5])) {
460 ret = strict_strtoul(arg + 5, 10, &param);
461 if (ret || param > PARAM_MAX_STACK)
462 ret = -EINVAL;
463 else {
464 ff->func = fetch_stack;
465 ff->data = (void *)param;
466 }
467 } else
468 ret = -EINVAL;
469 } else if (strncmp(arg, "arg", 3) == 0 && isdigit(arg[3])) {
470 ret = strict_strtoul(arg + 3, 10, &param);
471 if (ret || param > PARAM_MAX_ARGS)
472 ret = -EINVAL;
473 else {
474 ff->func = fetch_argument;
475 ff->data = (void *)param;
476 }
477 } else
478 ret = -EINVAL;
479 return ret;
480 }
481
482 static int parse_probe_arg(char *arg, struct fetch_func *ff, int is_return)
483 {
484 int ret = 0;
485 unsigned long param;
486 long offset;
487 char *tmp;
488
489 switch (arg[0]) {
490 case '$':
491 ret = parse_probe_vars(arg + 1, ff, is_return);
492 break;
493 case '%': /* named register */
494 ret = regs_query_register_offset(arg + 1);
495 if (ret >= 0) {
496 ff->func = fetch_register;
497 ff->data = (void *)(unsigned long)ret;
498 ret = 0;
499 }
500 break;
501 case '@': /* memory or symbol */
502 if (isdigit(arg[1])) {
503 ret = strict_strtoul(arg + 1, 0, &param);
504 if (ret)
505 break;
506 ff->func = fetch_memory;
507 ff->data = (void *)param;
508 } else {
509 ret = split_symbol_offset(arg + 1, &offset);
510 if (ret)
511 break;
512 ff->data = alloc_symbol_cache(arg + 1, offset);
513 if (ff->data)
514 ff->func = fetch_symbol;
515 else
516 ret = -EINVAL;
517 }
518 break;
519 case '+': /* indirect memory */
520 case '-':
521 tmp = strchr(arg, '(');
522 if (!tmp) {
523 ret = -EINVAL;
524 break;
525 }
526 *tmp = '\0';
527 ret = strict_strtol(arg + 1, 0, &offset);
528 if (ret)
529 break;
530 if (arg[0] == '-')
531 offset = -offset;
532 arg = tmp + 1;
533 tmp = strrchr(arg, ')');
534 if (tmp) {
535 struct indirect_fetch_data *id;
536 *tmp = '\0';
537 id = kzalloc(sizeof(struct indirect_fetch_data),
538 GFP_KERNEL);
539 if (!id)
540 return -ENOMEM;
541 id->offset = offset;
542 ret = parse_probe_arg(arg, &id->orig, is_return);
543 if (ret)
544 kfree(id);
545 else {
546 ff->func = fetch_indirect;
547 ff->data = (void *)id;
548 }
549 } else
550 ret = -EINVAL;
551 break;
552 default:
553 /* TODO: support custom handler */
554 ret = -EINVAL;
555 }
556 return ret;
557 }
558
559 /* Return 1 if name is reserved or already used by another argument */
560 static int conflict_field_name(const char *name,
561 struct probe_arg *args, int narg)
562 {
563 int i;
564 for (i = 0; i < ARRAY_SIZE(reserved_field_names); i++)
565 if (strcmp(reserved_field_names[i], name) == 0)
566 return 1;
567 for (i = 0; i < narg; i++)
568 if (strcmp(args[i].name, name) == 0)
569 return 1;
570 return 0;
571 }
572
573 static int create_trace_probe(int argc, char **argv)
574 {
575 /*
576 * Argument syntax:
577 * - Add kprobe: p[:[GRP/]EVENT] KSYM[+OFFS]|KADDR [FETCHARGS]
578 * - Add kretprobe: r[:[GRP/]EVENT] KSYM[+0] [FETCHARGS]
579 * Fetch args:
580 * $argN : fetch Nth of function argument. (N:0-)
581 * $retval : fetch return value
582 * $stack : fetch stack address
583 * $stackN : fetch Nth of stack (N:0-)
584 * @ADDR : fetch memory at ADDR (ADDR should be in kernel)
585 * @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
586 * %REG : fetch register REG
587 * Indirect memory fetch:
588 * +|-offs(ARG) : fetch memory at ARG +|- offs address.
589 * Alias name of args:
590 * NAME=FETCHARG : set NAME as alias of FETCHARG.
591 */
592 struct trace_probe *tp;
593 int i, ret = 0;
594 int is_return = 0;
595 char *symbol = NULL, *event = NULL, *arg = NULL, *group = NULL;
596 unsigned long offset = 0;
597 void *addr = NULL;
598 char buf[MAX_EVENT_NAME_LEN];
599
600 if (argc < 2) {
601 pr_info("Probe point is not specified.\n");
602 return -EINVAL;
603 }
604
605 if (argv[0][0] == 'p')
606 is_return = 0;
607 else if (argv[0][0] == 'r')
608 is_return = 1;
609 else {
610 pr_info("Probe definition must be started with 'p' or 'r'.\n");
611 return -EINVAL;
612 }
613
614 if (argv[0][1] == ':') {
615 event = &argv[0][2];
616 if (strchr(event, '/')) {
617 group = event;
618 event = strchr(group, '/') + 1;
619 event[-1] = '\0';
620 if (strlen(group) == 0) {
621 pr_info("Group name is not specifiled\n");
622 return -EINVAL;
623 }
624 }
625 if (strlen(event) == 0) {
626 pr_info("Event name is not specifiled\n");
627 return -EINVAL;
628 }
629 }
630
631 if (isdigit(argv[1][0])) {
632 if (is_return) {
633 pr_info("Return probe point must be a symbol.\n");
634 return -EINVAL;
635 }
636 /* an address specified */
637 ret = strict_strtoul(&argv[0][2], 0, (unsigned long *)&addr);
638 if (ret) {
639 pr_info("Failed to parse address.\n");
640 return ret;
641 }
642 } else {
643 /* a symbol specified */
644 symbol = argv[1];
645 /* TODO: support .init module functions */
646 ret = split_symbol_offset(symbol, &offset);
647 if (ret) {
648 pr_info("Failed to parse symbol.\n");
649 return ret;
650 }
651 if (offset && is_return) {
652 pr_info("Return probe must be used without offset.\n");
653 return -EINVAL;
654 }
655 }
656 argc -= 2; argv += 2;
657
658 /* setup a probe */
659 if (!group)
660 group = KPROBE_EVENT_SYSTEM;
661 if (!event) {
662 /* Make a new event name */
663 if (symbol)
664 snprintf(buf, MAX_EVENT_NAME_LEN, "%c@%s%+ld",
665 is_return ? 'r' : 'p', symbol, offset);
666 else
667 snprintf(buf, MAX_EVENT_NAME_LEN, "%c@0x%p",
668 is_return ? 'r' : 'p', addr);
669 event = buf;
670 }
671 tp = alloc_trace_probe(group, event, addr, symbol, offset, argc,
672 is_return);
673 if (IS_ERR(tp)) {
674 pr_info("Failed to allocate trace_probe.(%d)\n",
675 (int)PTR_ERR(tp));
676 return PTR_ERR(tp);
677 }
678
679 /* parse arguments */
680 ret = 0;
681 for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
682 /* Parse argument name */
683 arg = strchr(argv[i], '=');
684 if (arg)
685 *arg++ = '\0';
686 else
687 arg = argv[i];
688
689 if (conflict_field_name(argv[i], tp->args, i)) {
690 pr_info("Argument%d name '%s' conflicts with "
691 "another field.\n", i, argv[i]);
692 ret = -EINVAL;
693 goto error;
694 }
695
696 tp->args[i].name = kstrdup(argv[i], GFP_KERNEL);
697
698 /* Parse fetch argument */
699 if (strlen(arg) > MAX_ARGSTR_LEN) {
700 pr_info("Argument%d(%s) is too long.\n", i, arg);
701 ret = -ENOSPC;
702 goto error;
703 }
704 ret = parse_probe_arg(arg, &tp->args[i].fetch, is_return);
705 if (ret) {
706 pr_info("Parse error at argument%d. (%d)\n", i, ret);
707 goto error;
708 }
709 }
710 tp->nr_args = i;
711
712 ret = register_trace_probe(tp);
713 if (ret)
714 goto error;
715 return 0;
716
717 error:
718 free_trace_probe(tp);
719 return ret;
720 }
721
722 static void cleanup_all_probes(void)
723 {
724 struct trace_probe *tp;
725
726 mutex_lock(&probe_lock);
727 /* TODO: Use batch unregistration */
728 while (!list_empty(&probe_list)) {
729 tp = list_entry(probe_list.next, struct trace_probe, list);
730 unregister_trace_probe(tp);
731 free_trace_probe(tp);
732 }
733 mutex_unlock(&probe_lock);
734 }
735
736
737 /* Probes listing interfaces */
738 static void *probes_seq_start(struct seq_file *m, loff_t *pos)
739 {
740 mutex_lock(&probe_lock);
741 return seq_list_start(&probe_list, *pos);
742 }
743
744 static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos)
745 {
746 return seq_list_next(v, &probe_list, pos);
747 }
748
749 static void probes_seq_stop(struct seq_file *m, void *v)
750 {
751 mutex_unlock(&probe_lock);
752 }
753
754 static int probes_seq_show(struct seq_file *m, void *v)
755 {
756 struct trace_probe *tp = v;
757 int i, ret;
758 char buf[MAX_ARGSTR_LEN + 1];
759
760 seq_printf(m, "%c", probe_is_return(tp) ? 'r' : 'p');
761 seq_printf(m, ":%s", tp->call.name);
762
763 if (tp->symbol)
764 seq_printf(m, " %s+%u", probe_symbol(tp), tp->rp.kp.offset);
765 else
766 seq_printf(m, " 0x%p", tp->rp.kp.addr);
767
768 for (i = 0; i < tp->nr_args; i++) {
769 ret = probe_arg_string(buf, MAX_ARGSTR_LEN, &tp->args[i].fetch);
770 if (ret < 0) {
771 pr_warning("Argument%d decoding error(%d).\n", i, ret);
772 return ret;
773 }
774 seq_printf(m, " %s=%s", tp->args[i].name, buf);
775 }
776 seq_printf(m, "\n");
777 return 0;
778 }
779
780 static const struct seq_operations probes_seq_op = {
781 .start = probes_seq_start,
782 .next = probes_seq_next,
783 .stop = probes_seq_stop,
784 .show = probes_seq_show
785 };
786
787 static int probes_open(struct inode *inode, struct file *file)
788 {
789 if ((file->f_mode & FMODE_WRITE) &&
790 (file->f_flags & O_TRUNC))
791 cleanup_all_probes();
792
793 return seq_open(file, &probes_seq_op);
794 }
795
796 static int command_trace_probe(const char *buf)
797 {
798 char **argv;
799 int argc = 0, ret = 0;
800
801 argv = argv_split(GFP_KERNEL, buf, &argc);
802 if (!argv)
803 return -ENOMEM;
804
805 if (argc)
806 ret = create_trace_probe(argc, argv);
807
808 argv_free(argv);
809 return ret;
810 }
811
812 #define WRITE_BUFSIZE 128
813
814 static ssize_t probes_write(struct file *file, const char __user *buffer,
815 size_t count, loff_t *ppos)
816 {
817 char *kbuf, *tmp;
818 int ret;
819 size_t done;
820 size_t size;
821
822 kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
823 if (!kbuf)
824 return -ENOMEM;
825
826 ret = done = 0;
827 while (done < count) {
828 size = count - done;
829 if (size >= WRITE_BUFSIZE)
830 size = WRITE_BUFSIZE - 1;
831 if (copy_from_user(kbuf, buffer + done, size)) {
832 ret = -EFAULT;
833 goto out;
834 }
835 kbuf[size] = '\0';
836 tmp = strchr(kbuf, '\n');
837 if (tmp) {
838 *tmp = '\0';
839 size = tmp - kbuf + 1;
840 } else if (done + size < count) {
841 pr_warning("Line length is too long: "
842 "Should be less than %d.", WRITE_BUFSIZE);
843 ret = -EINVAL;
844 goto out;
845 }
846 done += size;
847 /* Remove comments */
848 tmp = strchr(kbuf, '#');
849 if (tmp)
850 *tmp = '\0';
851
852 ret = command_trace_probe(kbuf);
853 if (ret)
854 goto out;
855 }
856 ret = done;
857 out:
858 kfree(kbuf);
859 return ret;
860 }
861
862 static const struct file_operations kprobe_events_ops = {
863 .owner = THIS_MODULE,
864 .open = probes_open,
865 .read = seq_read,
866 .llseek = seq_lseek,
867 .release = seq_release,
868 .write = probes_write,
869 };
870
871 /* Probes profiling interfaces */
872 static int probes_profile_seq_show(struct seq_file *m, void *v)
873 {
874 struct trace_probe *tp = v;
875
876 seq_printf(m, " %-44s %15lu %15lu\n", tp->call.name, tp->nhit,
877 tp->rp.kp.nmissed);
878
879 return 0;
880 }
881
882 static const struct seq_operations profile_seq_op = {
883 .start = probes_seq_start,
884 .next = probes_seq_next,
885 .stop = probes_seq_stop,
886 .show = probes_profile_seq_show
887 };
888
889 static int profile_open(struct inode *inode, struct file *file)
890 {
891 return seq_open(file, &profile_seq_op);
892 }
893
894 static const struct file_operations kprobe_profile_ops = {
895 .owner = THIS_MODULE,
896 .open = profile_open,
897 .read = seq_read,
898 .llseek = seq_lseek,
899 .release = seq_release,
900 };
901
902 /* Kprobe handler */
903 static __kprobes int kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs)
904 {
905 struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
906 struct kprobe_trace_entry *entry;
907 struct ring_buffer_event *event;
908 struct ring_buffer *buffer;
909 int size, i, pc;
910 unsigned long irq_flags;
911 struct ftrace_event_call *call = &tp->call;
912
913 tp->nhit++;
914
915 local_save_flags(irq_flags);
916 pc = preempt_count();
917
918 size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args);
919
920 event = trace_current_buffer_lock_reserve(&buffer, call->id, size,
921 irq_flags, pc);
922 if (!event)
923 return 0;
924
925 entry = ring_buffer_event_data(event);
926 entry->nargs = tp->nr_args;
927 entry->ip = (unsigned long)kp->addr;
928 for (i = 0; i < tp->nr_args; i++)
929 entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
930
931 if (!filter_current_check_discard(buffer, call, entry, event))
932 trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc);
933 return 0;
934 }
935
936 /* Kretprobe handler */
937 static __kprobes int kretprobe_trace_func(struct kretprobe_instance *ri,
938 struct pt_regs *regs)
939 {
940 struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
941 struct kretprobe_trace_entry *entry;
942 struct ring_buffer_event *event;
943 struct ring_buffer *buffer;
944 int size, i, pc;
945 unsigned long irq_flags;
946 struct ftrace_event_call *call = &tp->call;
947
948 local_save_flags(irq_flags);
949 pc = preempt_count();
950
951 size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args);
952
953 event = trace_current_buffer_lock_reserve(&buffer, call->id, size,
954 irq_flags, pc);
955 if (!event)
956 return 0;
957
958 entry = ring_buffer_event_data(event);
959 entry->nargs = tp->nr_args;
960 entry->func = (unsigned long)tp->rp.kp.addr;
961 entry->ret_ip = (unsigned long)ri->ret_addr;
962 for (i = 0; i < tp->nr_args; i++)
963 entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
964
965 if (!filter_current_check_discard(buffer, call, entry, event))
966 trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc);
967
968 return 0;
969 }
970
971 /* Event entry printers */
972 enum print_line_t
973 print_kprobe_event(struct trace_iterator *iter, int flags)
974 {
975 struct kprobe_trace_entry *field;
976 struct trace_seq *s = &iter->seq;
977 struct trace_event *event;
978 struct trace_probe *tp;
979 int i;
980
981 field = (struct kprobe_trace_entry *)iter->ent;
982 event = ftrace_find_event(field->ent.type);
983 tp = container_of(event, struct trace_probe, event);
984
985 if (!trace_seq_printf(s, "%s: (", tp->call.name))
986 goto partial;
987
988 if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
989 goto partial;
990
991 if (!trace_seq_puts(s, ")"))
992 goto partial;
993
994 for (i = 0; i < field->nargs; i++)
995 if (!trace_seq_printf(s, " %s=%lx",
996 tp->args[i].name, field->args[i]))
997 goto partial;
998
999 if (!trace_seq_puts(s, "\n"))
1000 goto partial;
1001
1002 return TRACE_TYPE_HANDLED;
1003 partial:
1004 return TRACE_TYPE_PARTIAL_LINE;
1005 }
1006
1007 enum print_line_t
1008 print_kretprobe_event(struct trace_iterator *iter, int flags)
1009 {
1010 struct kretprobe_trace_entry *field;
1011 struct trace_seq *s = &iter->seq;
1012 struct trace_event *event;
1013 struct trace_probe *tp;
1014 int i;
1015
1016 field = (struct kretprobe_trace_entry *)iter->ent;
1017 event = ftrace_find_event(field->ent.type);
1018 tp = container_of(event, struct trace_probe, event);
1019
1020 if (!trace_seq_printf(s, "%s: (", tp->call.name))
1021 goto partial;
1022
1023 if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
1024 goto partial;
1025
1026 if (!trace_seq_puts(s, " <- "))
1027 goto partial;
1028
1029 if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET))
1030 goto partial;
1031
1032 if (!trace_seq_puts(s, ")"))
1033 goto partial;
1034
1035 for (i = 0; i < field->nargs; i++)
1036 if (!trace_seq_printf(s, " %s=%lx",
1037 tp->args[i].name, field->args[i]))
1038 goto partial;
1039
1040 if (!trace_seq_puts(s, "\n"))
1041 goto partial;
1042
1043 return TRACE_TYPE_HANDLED;
1044 partial:
1045 return TRACE_TYPE_PARTIAL_LINE;
1046 }
1047
1048 static int probe_event_enable(struct ftrace_event_call *call)
1049 {
1050 struct trace_probe *tp = (struct trace_probe *)call->data;
1051
1052 tp->flags |= TP_FLAG_TRACE;
1053 if (probe_is_return(tp))
1054 return enable_kretprobe(&tp->rp);
1055 else
1056 return enable_kprobe(&tp->rp.kp);
1057 }
1058
1059 static void probe_event_disable(struct ftrace_event_call *call)
1060 {
1061 struct trace_probe *tp = (struct trace_probe *)call->data;
1062
1063 tp->flags &= ~TP_FLAG_TRACE;
1064 if (!(tp->flags & (TP_FLAG_TRACE | TP_FLAG_PROFILE))) {
1065 if (probe_is_return(tp))
1066 disable_kretprobe(&tp->rp);
1067 else
1068 disable_kprobe(&tp->rp.kp);
1069 }
1070 }
1071
1072 static int probe_event_raw_init(struct ftrace_event_call *event_call)
1073 {
1074 INIT_LIST_HEAD(&event_call->fields);
1075
1076 return 0;
1077 }
1078
1079 #undef DEFINE_FIELD
1080 #define DEFINE_FIELD(type, item, name, is_signed) \
1081 do { \
1082 ret = trace_define_field(event_call, #type, name, \
1083 offsetof(typeof(field), item), \
1084 sizeof(field.item), is_signed, \
1085 FILTER_OTHER); \
1086 if (ret) \
1087 return ret; \
1088 } while (0)
1089
1090 static int kprobe_event_define_fields(struct ftrace_event_call *event_call)
1091 {
1092 int ret, i;
1093 struct kprobe_trace_entry field;
1094 struct trace_probe *tp = (struct trace_probe *)event_call->data;
1095
1096 ret = trace_define_common_fields(event_call);
1097 if (!ret)
1098 return ret;
1099
1100 DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
1101 DEFINE_FIELD(int, nargs, FIELD_STRING_NARGS, 1);
1102 /* Set argument names as fields */
1103 for (i = 0; i < tp->nr_args; i++)
1104 DEFINE_FIELD(unsigned long, args[i], tp->args[i].name, 0);
1105 return 0;
1106 }
1107
1108 static int kretprobe_event_define_fields(struct ftrace_event_call *event_call)
1109 {
1110 int ret, i;
1111 struct kretprobe_trace_entry field;
1112 struct trace_probe *tp = (struct trace_probe *)event_call->data;
1113
1114 ret = trace_define_common_fields(event_call);
1115 if (!ret)
1116 return ret;
1117
1118 DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
1119 DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
1120 DEFINE_FIELD(int, nargs, FIELD_STRING_NARGS, 1);
1121 /* Set argument names as fields */
1122 for (i = 0; i < tp->nr_args; i++)
1123 DEFINE_FIELD(unsigned long, args[i], tp->args[i].name, 0);
1124 return 0;
1125 }
1126
1127 static int __probe_event_show_format(struct trace_seq *s,
1128 struct trace_probe *tp, const char *fmt,
1129 const char *arg)
1130 {
1131 int i;
1132
1133 /* Show format */
1134 if (!trace_seq_printf(s, "\nprint fmt: \"%s", fmt))
1135 return 0;
1136
1137 for (i = 0; i < tp->nr_args; i++)
1138 if (!trace_seq_printf(s, " %s=%%lx", tp->args[i].name))
1139 return 0;
1140
1141 if (!trace_seq_printf(s, "\", %s", arg))
1142 return 0;
1143
1144 for (i = 0; i < tp->nr_args; i++)
1145 if (!trace_seq_printf(s, ", REC->%s", tp->args[i].name))
1146 return 0;
1147
1148 return trace_seq_puts(s, "\n");
1149 }
1150
1151 #undef SHOW_FIELD
1152 #define SHOW_FIELD(type, item, name) \
1153 do { \
1154 ret = trace_seq_printf(s, "\tfield: " #type " %s;\t" \
1155 "offset:%u;\tsize:%u;\n", name, \
1156 (unsigned int)offsetof(typeof(field), item),\
1157 (unsigned int)sizeof(type)); \
1158 if (!ret) \
1159 return 0; \
1160 } while (0)
1161
1162 static int kprobe_event_show_format(struct ftrace_event_call *call,
1163 struct trace_seq *s)
1164 {
1165 struct kprobe_trace_entry field __attribute__((unused));
1166 int ret, i;
1167 struct trace_probe *tp = (struct trace_probe *)call->data;
1168
1169 SHOW_FIELD(unsigned long, ip, FIELD_STRING_IP);
1170 SHOW_FIELD(int, nargs, FIELD_STRING_NARGS);
1171
1172 /* Show fields */
1173 for (i = 0; i < tp->nr_args; i++)
1174 SHOW_FIELD(unsigned long, args[i], tp->args[i].name);
1175 trace_seq_puts(s, "\n");
1176
1177 return __probe_event_show_format(s, tp, "(%lx)",
1178 "REC->" FIELD_STRING_IP);
1179 }
1180
1181 static int kretprobe_event_show_format(struct ftrace_event_call *call,
1182 struct trace_seq *s)
1183 {
1184 struct kretprobe_trace_entry field __attribute__((unused));
1185 int ret, i;
1186 struct trace_probe *tp = (struct trace_probe *)call->data;
1187
1188 SHOW_FIELD(unsigned long, func, FIELD_STRING_FUNC);
1189 SHOW_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP);
1190 SHOW_FIELD(int, nargs, FIELD_STRING_NARGS);
1191
1192 /* Show fields */
1193 for (i = 0; i < tp->nr_args; i++)
1194 SHOW_FIELD(unsigned long, args[i], tp->args[i].name);
1195 trace_seq_puts(s, "\n");
1196
1197 return __probe_event_show_format(s, tp, "(%lx <- %lx)",
1198 "REC->" FIELD_STRING_FUNC
1199 ", REC->" FIELD_STRING_RETIP);
1200 }
1201
1202 #ifdef CONFIG_EVENT_PROFILE
1203
1204 /* Kprobe profile handler */
1205 static __kprobes int kprobe_profile_func(struct kprobe *kp,
1206 struct pt_regs *regs)
1207 {
1208 struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
1209 struct ftrace_event_call *call = &tp->call;
1210 struct kprobe_trace_entry *entry;
1211 struct trace_entry *ent;
1212 int size, __size, i, pc, __cpu;
1213 unsigned long irq_flags;
1214 char *raw_data;
1215
1216 pc = preempt_count();
1217 __size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args);
1218 size = ALIGN(__size + sizeof(u32), sizeof(u64));
1219 size -= sizeof(u32);
1220 if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE,
1221 "profile buffer not large enough"))
1222 return 0;
1223
1224 /*
1225 * Protect the non nmi buffer
1226 * This also protects the rcu read side
1227 */
1228 local_irq_save(irq_flags);
1229 __cpu = smp_processor_id();
1230
1231 if (in_nmi())
1232 raw_data = rcu_dereference(trace_profile_buf_nmi);
1233 else
1234 raw_data = rcu_dereference(trace_profile_buf);
1235
1236 if (!raw_data)
1237 goto end;
1238
1239 raw_data = per_cpu_ptr(raw_data, __cpu);
1240 /* Zero dead bytes from alignment to avoid buffer leak to userspace */
1241 *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
1242 entry = (struct kprobe_trace_entry *)raw_data;
1243 ent = &entry->ent;
1244
1245 tracing_generic_entry_update(ent, irq_flags, pc);
1246 ent->type = call->id;
1247 entry->nargs = tp->nr_args;
1248 entry->ip = (unsigned long)kp->addr;
1249 for (i = 0; i < tp->nr_args; i++)
1250 entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
1251 perf_tp_event(call->id, entry->ip, 1, entry, size);
1252 end:
1253 local_irq_restore(irq_flags);
1254 return 0;
1255 }
1256
1257 /* Kretprobe profile handler */
1258 static __kprobes int kretprobe_profile_func(struct kretprobe_instance *ri,
1259 struct pt_regs *regs)
1260 {
1261 struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
1262 struct ftrace_event_call *call = &tp->call;
1263 struct kretprobe_trace_entry *entry;
1264 struct trace_entry *ent;
1265 int size, __size, i, pc, __cpu;
1266 unsigned long irq_flags;
1267 char *raw_data;
1268
1269 pc = preempt_count();
1270 __size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args);
1271 size = ALIGN(__size + sizeof(u32), sizeof(u64));
1272 size -= sizeof(u32);
1273 if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE,
1274 "profile buffer not large enough"))
1275 return 0;
1276
1277 /*
1278 * Protect the non nmi buffer
1279 * This also protects the rcu read side
1280 */
1281 local_irq_save(irq_flags);
1282 __cpu = smp_processor_id();
1283
1284 if (in_nmi())
1285 raw_data = rcu_dereference(trace_profile_buf_nmi);
1286 else
1287 raw_data = rcu_dereference(trace_profile_buf);
1288
1289 if (!raw_data)
1290 goto end;
1291
1292 raw_data = per_cpu_ptr(raw_data, __cpu);
1293 /* Zero dead bytes from alignment to avoid buffer leak to userspace */
1294 *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
1295 entry = (struct kretprobe_trace_entry *)raw_data;
1296 ent = &entry->ent;
1297
1298 tracing_generic_entry_update(ent, irq_flags, pc);
1299 ent->type = call->id;
1300 entry->nargs = tp->nr_args;
1301 entry->func = (unsigned long)tp->rp.kp.addr;
1302 entry->ret_ip = (unsigned long)ri->ret_addr;
1303 for (i = 0; i < tp->nr_args; i++)
1304 entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
1305 perf_tp_event(call->id, entry->ret_ip, 1, entry, size);
1306 end:
1307 local_irq_restore(irq_flags);
1308 return 0;
1309 }
1310
1311 static int probe_profile_enable(struct ftrace_event_call *call)
1312 {
1313 struct trace_probe *tp = (struct trace_probe *)call->data;
1314
1315 tp->flags |= TP_FLAG_PROFILE;
1316
1317 if (probe_is_return(tp))
1318 return enable_kretprobe(&tp->rp);
1319 else
1320 return enable_kprobe(&tp->rp.kp);
1321 }
1322
1323 static void probe_profile_disable(struct ftrace_event_call *call)
1324 {
1325 struct trace_probe *tp = (struct trace_probe *)call->data;
1326
1327 tp->flags &= ~TP_FLAG_PROFILE;
1328
1329 if (!(tp->flags & TP_FLAG_TRACE)) {
1330 if (probe_is_return(tp))
1331 disable_kretprobe(&tp->rp);
1332 else
1333 disable_kprobe(&tp->rp.kp);
1334 }
1335 }
1336 #endif /* CONFIG_EVENT_PROFILE */
1337
1338
1339 static __kprobes
1340 int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
1341 {
1342 struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
1343
1344 if (tp->flags & TP_FLAG_TRACE)
1345 kprobe_trace_func(kp, regs);
1346 #ifdef CONFIG_EVENT_PROFILE
1347 if (tp->flags & TP_FLAG_PROFILE)
1348 kprobe_profile_func(kp, regs);
1349 #endif /* CONFIG_EVENT_PROFILE */
1350 return 0; /* We don't tweek kernel, so just return 0 */
1351 }
1352
1353 static __kprobes
1354 int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
1355 {
1356 struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
1357
1358 if (tp->flags & TP_FLAG_TRACE)
1359 kretprobe_trace_func(ri, regs);
1360 #ifdef CONFIG_EVENT_PROFILE
1361 if (tp->flags & TP_FLAG_PROFILE)
1362 kretprobe_profile_func(ri, regs);
1363 #endif /* CONFIG_EVENT_PROFILE */
1364 return 0; /* We don't tweek kernel, so just return 0 */
1365 }
1366
1367 static int register_probe_event(struct trace_probe *tp)
1368 {
1369 struct ftrace_event_call *call = &tp->call;
1370 int ret;
1371
1372 /* Initialize ftrace_event_call */
1373 if (probe_is_return(tp)) {
1374 tp->event.trace = print_kretprobe_event;
1375 call->raw_init = probe_event_raw_init;
1376 call->show_format = kretprobe_event_show_format;
1377 call->define_fields = kretprobe_event_define_fields;
1378 } else {
1379 tp->event.trace = print_kprobe_event;
1380 call->raw_init = probe_event_raw_init;
1381 call->show_format = kprobe_event_show_format;
1382 call->define_fields = kprobe_event_define_fields;
1383 }
1384 call->event = &tp->event;
1385 call->id = register_ftrace_event(&tp->event);
1386 if (!call->id)
1387 return -ENODEV;
1388 call->enabled = 0;
1389 call->regfunc = probe_event_enable;
1390 call->unregfunc = probe_event_disable;
1391
1392 #ifdef CONFIG_EVENT_PROFILE
1393 atomic_set(&call->profile_count, -1);
1394 call->profile_enable = probe_profile_enable;
1395 call->profile_disable = probe_profile_disable;
1396 #endif
1397 call->data = tp;
1398 ret = trace_add_event_call(call);
1399 if (ret) {
1400 pr_info("Failed to register kprobe event: %s\n", call->name);
1401 unregister_ftrace_event(&tp->event);
1402 }
1403 return ret;
1404 }
1405
1406 static void unregister_probe_event(struct trace_probe *tp)
1407 {
1408 /* tp->event is unregistered in trace_remove_event_call() */
1409 trace_remove_event_call(&tp->call);
1410 }
1411
1412 /* Make a debugfs interface for controling probe points */
1413 static __init int init_kprobe_trace(void)
1414 {
1415 struct dentry *d_tracer;
1416 struct dentry *entry;
1417
1418 d_tracer = tracing_init_dentry();
1419 if (!d_tracer)
1420 return 0;
1421
1422 entry = debugfs_create_file("kprobe_events", 0644, d_tracer,
1423 NULL, &kprobe_events_ops);
1424
1425 /* Event list interface */
1426 if (!entry)
1427 pr_warning("Could not create debugfs "
1428 "'kprobe_events' entry\n");
1429
1430 /* Profile interface */
1431 entry = debugfs_create_file("kprobe_profile", 0444, d_tracer,
1432 NULL, &kprobe_profile_ops);
1433
1434 if (!entry)
1435 pr_warning("Could not create debugfs "
1436 "'kprobe_profile' entry\n");
1437 return 0;
1438 }
1439 fs_initcall(init_kprobe_trace);
1440
1441
1442 #ifdef CONFIG_FTRACE_STARTUP_TEST
1443
1444 static int kprobe_trace_selftest_target(int a1, int a2, int a3,
1445 int a4, int a5, int a6)
1446 {
1447 return a1 + a2 + a3 + a4 + a5 + a6;
1448 }
1449
1450 static __init int kprobe_trace_self_tests_init(void)
1451 {
1452 int ret;
1453 int (*target)(int, int, int, int, int, int);
1454
1455 target = kprobe_trace_selftest_target;
1456
1457 pr_info("Testing kprobe tracing: ");
1458
1459 ret = command_trace_probe("p:testprobe kprobe_trace_selftest_target "
1460 "$arg1 $arg2 $arg3 $arg4 $stack $stack0");
1461 if (WARN_ON_ONCE(ret))
1462 pr_warning("error enabling function entry\n");
1463
1464 ret = command_trace_probe("r:testprobe2 kprobe_trace_selftest_target "
1465 "$retval");
1466 if (WARN_ON_ONCE(ret))
1467 pr_warning("error enabling function return\n");
1468
1469 ret = target(1, 2, 3, 4, 5, 6);
1470
1471 cleanup_all_probes();
1472
1473 pr_cont("OK\n");
1474 return 0;
1475 }
1476
1477 late_initcall(kprobe_trace_self_tests_init);
1478
1479 #endif
This page took 0.135161 seconds and 5 git commands to generate.