staging: imx-drm: Replace DRM_LOG_KMS() by DRM_DEBUG_KMS()
[deliverable/linux.git] / kernel / trace / trace_uprobe.c
1 /*
2 * uprobes-based tracing events
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
16 *
17 * Copyright (C) IBM Corporation, 2010-2012
18 * Author: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
19 */
20
21 #include <linux/module.h>
22 #include <linux/uaccess.h>
23 #include <linux/uprobes.h>
24 #include <linux/namei.h>
25 #include <linux/string.h>
26
27 #include "trace_probe.h"
28
29 #define UPROBE_EVENT_SYSTEM "uprobes"
30
31 struct uprobe_trace_entry_head {
32 struct trace_entry ent;
33 unsigned long vaddr[];
34 };
35
36 #define SIZEOF_TRACE_ENTRY(is_return) \
37 (sizeof(struct uprobe_trace_entry_head) + \
38 sizeof(unsigned long) * (is_return ? 2 : 1))
39
40 #define DATAOF_TRACE_ENTRY(entry, is_return) \
41 ((void*)(entry) + SIZEOF_TRACE_ENTRY(is_return))
42
43 struct trace_uprobe_filter {
44 rwlock_t rwlock;
45 int nr_systemwide;
46 struct list_head perf_events;
47 };
48
49 /*
50 * uprobe event core functions
51 */
52 struct trace_uprobe {
53 struct list_head list;
54 struct trace_uprobe_filter filter;
55 struct uprobe_consumer consumer;
56 struct inode *inode;
57 char *filename;
58 unsigned long offset;
59 unsigned long nhit;
60 struct trace_probe tp;
61 };
62
63 #define SIZEOF_TRACE_UPROBE(n) \
64 (offsetof(struct trace_uprobe, tp.args) + \
65 (sizeof(struct probe_arg) * (n)))
66
67 static int register_uprobe_event(struct trace_uprobe *tu);
68 static int unregister_uprobe_event(struct trace_uprobe *tu);
69
70 static DEFINE_MUTEX(uprobe_lock);
71 static LIST_HEAD(uprobe_list);
72
73 struct uprobe_dispatch_data {
74 struct trace_uprobe *tu;
75 unsigned long bp_addr;
76 };
77
78 static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs);
79 static int uretprobe_dispatcher(struct uprobe_consumer *con,
80 unsigned long func, struct pt_regs *regs);
81
82 #ifdef CONFIG_STACK_GROWSUP
83 static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
84 {
85 return addr - (n * sizeof(long));
86 }
87 #else
88 static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
89 {
90 return addr + (n * sizeof(long));
91 }
92 #endif
93
94 static unsigned long get_user_stack_nth(struct pt_regs *regs, unsigned int n)
95 {
96 unsigned long ret;
97 unsigned long addr = user_stack_pointer(regs);
98
99 addr = adjust_stack_addr(addr, n);
100
101 if (copy_from_user(&ret, (void __force __user *) addr, sizeof(ret)))
102 return 0;
103
104 return ret;
105 }
106
107 /*
108 * Uprobes-specific fetch functions
109 */
110 #define DEFINE_FETCH_stack(type) \
111 static __kprobes void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs,\
112 void *offset, void *dest) \
113 { \
114 *(type *)dest = (type)get_user_stack_nth(regs, \
115 ((unsigned long)offset)); \
116 }
117 DEFINE_BASIC_FETCH_FUNCS(stack)
118 /* No string on the stack entry */
119 #define fetch_stack_string NULL
120 #define fetch_stack_string_size NULL
121
122 #define DEFINE_FETCH_memory(type) \
123 static __kprobes void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs,\
124 void *addr, void *dest) \
125 { \
126 type retval; \
127 void __user *vaddr = (void __force __user *) addr; \
128 \
129 if (copy_from_user(&retval, vaddr, sizeof(type))) \
130 *(type *)dest = 0; \
131 else \
132 *(type *) dest = retval; \
133 }
134 DEFINE_BASIC_FETCH_FUNCS(memory)
135 /*
136 * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
137 * length and relative data location.
138 */
139 static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
140 void *addr, void *dest)
141 {
142 long ret;
143 u32 rloc = *(u32 *)dest;
144 int maxlen = get_rloc_len(rloc);
145 u8 *dst = get_rloc_data(dest);
146 void __user *src = (void __force __user *) addr;
147
148 if (!maxlen)
149 return;
150
151 ret = strncpy_from_user(dst, src, maxlen);
152
153 if (ret < 0) { /* Failed to fetch string */
154 ((u8 *)get_rloc_data(dest))[0] = '\0';
155 *(u32 *)dest = make_data_rloc(0, get_rloc_offs(rloc));
156 } else {
157 *(u32 *)dest = make_data_rloc(ret, get_rloc_offs(rloc));
158 }
159 }
160
161 static __kprobes void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
162 void *addr, void *dest)
163 {
164 int len;
165 void __user *vaddr = (void __force __user *) addr;
166
167 len = strnlen_user(vaddr, MAX_STRING_SIZE);
168
169 if (len == 0 || len > MAX_STRING_SIZE) /* Failed to check length */
170 *(u32 *)dest = 0;
171 else
172 *(u32 *)dest = len;
173 }
174
175 static unsigned long translate_user_vaddr(void *file_offset)
176 {
177 unsigned long base_addr;
178 struct uprobe_dispatch_data *udd;
179
180 udd = (void *) current->utask->vaddr;
181
182 base_addr = udd->bp_addr - udd->tu->offset;
183 return base_addr + (unsigned long)file_offset;
184 }
185
186 #define DEFINE_FETCH_file_offset(type) \
187 static __kprobes void FETCH_FUNC_NAME(file_offset, type)(struct pt_regs *regs,\
188 void *offset, void *dest) \
189 { \
190 void *vaddr = (void *)translate_user_vaddr(offset); \
191 \
192 FETCH_FUNC_NAME(memory, type)(regs, vaddr, dest); \
193 }
194 DEFINE_BASIC_FETCH_FUNCS(file_offset)
195 DEFINE_FETCH_file_offset(string)
196 DEFINE_FETCH_file_offset(string_size)
197
198 /* Fetch type information table */
199 const struct fetch_type uprobes_fetch_type_table[] = {
200 /* Special types */
201 [FETCH_TYPE_STRING] = __ASSIGN_FETCH_TYPE("string", string, string,
202 sizeof(u32), 1, "__data_loc char[]"),
203 [FETCH_TYPE_STRSIZE] = __ASSIGN_FETCH_TYPE("string_size", u32,
204 string_size, sizeof(u32), 0, "u32"),
205 /* Basic types */
206 ASSIGN_FETCH_TYPE(u8, u8, 0),
207 ASSIGN_FETCH_TYPE(u16, u16, 0),
208 ASSIGN_FETCH_TYPE(u32, u32, 0),
209 ASSIGN_FETCH_TYPE(u64, u64, 0),
210 ASSIGN_FETCH_TYPE(s8, u8, 1),
211 ASSIGN_FETCH_TYPE(s16, u16, 1),
212 ASSIGN_FETCH_TYPE(s32, u32, 1),
213 ASSIGN_FETCH_TYPE(s64, u64, 1),
214
215 ASSIGN_FETCH_TYPE_END
216 };
217
218 static inline void init_trace_uprobe_filter(struct trace_uprobe_filter *filter)
219 {
220 rwlock_init(&filter->rwlock);
221 filter->nr_systemwide = 0;
222 INIT_LIST_HEAD(&filter->perf_events);
223 }
224
225 static inline bool uprobe_filter_is_empty(struct trace_uprobe_filter *filter)
226 {
227 return !filter->nr_systemwide && list_empty(&filter->perf_events);
228 }
229
230 static inline bool is_ret_probe(struct trace_uprobe *tu)
231 {
232 return tu->consumer.ret_handler != NULL;
233 }
234
235 /*
236 * Allocate new trace_uprobe and initialize it (including uprobes).
237 */
238 static struct trace_uprobe *
239 alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
240 {
241 struct trace_uprobe *tu;
242
243 if (!event || !is_good_name(event))
244 return ERR_PTR(-EINVAL);
245
246 if (!group || !is_good_name(group))
247 return ERR_PTR(-EINVAL);
248
249 tu = kzalloc(SIZEOF_TRACE_UPROBE(nargs), GFP_KERNEL);
250 if (!tu)
251 return ERR_PTR(-ENOMEM);
252
253 tu->tp.call.class = &tu->tp.class;
254 tu->tp.call.name = kstrdup(event, GFP_KERNEL);
255 if (!tu->tp.call.name)
256 goto error;
257
258 tu->tp.class.system = kstrdup(group, GFP_KERNEL);
259 if (!tu->tp.class.system)
260 goto error;
261
262 INIT_LIST_HEAD(&tu->list);
263 tu->consumer.handler = uprobe_dispatcher;
264 if (is_ret)
265 tu->consumer.ret_handler = uretprobe_dispatcher;
266 init_trace_uprobe_filter(&tu->filter);
267 tu->tp.call.flags |= TRACE_EVENT_FL_USE_CALL_FILTER;
268 return tu;
269
270 error:
271 kfree(tu->tp.call.name);
272 kfree(tu);
273
274 return ERR_PTR(-ENOMEM);
275 }
276
277 static void free_trace_uprobe(struct trace_uprobe *tu)
278 {
279 int i;
280
281 for (i = 0; i < tu->tp.nr_args; i++)
282 traceprobe_free_probe_arg(&tu->tp.args[i]);
283
284 iput(tu->inode);
285 kfree(tu->tp.call.class->system);
286 kfree(tu->tp.call.name);
287 kfree(tu->filename);
288 kfree(tu);
289 }
290
291 static struct trace_uprobe *find_probe_event(const char *event, const char *group)
292 {
293 struct trace_uprobe *tu;
294
295 list_for_each_entry(tu, &uprobe_list, list)
296 if (strcmp(tu->tp.call.name, event) == 0 &&
297 strcmp(tu->tp.call.class->system, group) == 0)
298 return tu;
299
300 return NULL;
301 }
302
303 /* Unregister a trace_uprobe and probe_event: call with locking uprobe_lock */
304 static int unregister_trace_uprobe(struct trace_uprobe *tu)
305 {
306 int ret;
307
308 ret = unregister_uprobe_event(tu);
309 if (ret)
310 return ret;
311
312 list_del(&tu->list);
313 free_trace_uprobe(tu);
314 return 0;
315 }
316
317 /* Register a trace_uprobe and probe_event */
318 static int register_trace_uprobe(struct trace_uprobe *tu)
319 {
320 struct trace_uprobe *old_tu;
321 int ret;
322
323 mutex_lock(&uprobe_lock);
324
325 /* register as an event */
326 old_tu = find_probe_event(tu->tp.call.name, tu->tp.call.class->system);
327 if (old_tu) {
328 /* delete old event */
329 ret = unregister_trace_uprobe(old_tu);
330 if (ret)
331 goto end;
332 }
333
334 ret = register_uprobe_event(tu);
335 if (ret) {
336 pr_warning("Failed to register probe event(%d)\n", ret);
337 goto end;
338 }
339
340 list_add_tail(&tu->list, &uprobe_list);
341
342 end:
343 mutex_unlock(&uprobe_lock);
344
345 return ret;
346 }
347
348 /*
349 * Argument syntax:
350 * - Add uprobe: p|r[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS]
351 *
352 * - Remove uprobe: -:[GRP/]EVENT
353 */
354 static int create_trace_uprobe(int argc, char **argv)
355 {
356 struct trace_uprobe *tu;
357 struct inode *inode;
358 char *arg, *event, *group, *filename;
359 char buf[MAX_EVENT_NAME_LEN];
360 struct path path;
361 unsigned long offset;
362 bool is_delete, is_return;
363 int i, ret;
364
365 inode = NULL;
366 ret = 0;
367 is_delete = false;
368 is_return = false;
369 event = NULL;
370 group = NULL;
371
372 /* argc must be >= 1 */
373 if (argv[0][0] == '-')
374 is_delete = true;
375 else if (argv[0][0] == 'r')
376 is_return = true;
377 else if (argv[0][0] != 'p') {
378 pr_info("Probe definition must be started with 'p', 'r' or '-'.\n");
379 return -EINVAL;
380 }
381
382 if (argv[0][1] == ':') {
383 event = &argv[0][2];
384 arg = strchr(event, '/');
385
386 if (arg) {
387 group = event;
388 event = arg + 1;
389 event[-1] = '\0';
390
391 if (strlen(group) == 0) {
392 pr_info("Group name is not specified\n");
393 return -EINVAL;
394 }
395 }
396 if (strlen(event) == 0) {
397 pr_info("Event name is not specified\n");
398 return -EINVAL;
399 }
400 }
401 if (!group)
402 group = UPROBE_EVENT_SYSTEM;
403
404 if (is_delete) {
405 int ret;
406
407 if (!event) {
408 pr_info("Delete command needs an event name.\n");
409 return -EINVAL;
410 }
411 mutex_lock(&uprobe_lock);
412 tu = find_probe_event(event, group);
413
414 if (!tu) {
415 mutex_unlock(&uprobe_lock);
416 pr_info("Event %s/%s doesn't exist.\n", group, event);
417 return -ENOENT;
418 }
419 /* delete an event */
420 ret = unregister_trace_uprobe(tu);
421 mutex_unlock(&uprobe_lock);
422 return ret;
423 }
424
425 if (argc < 2) {
426 pr_info("Probe point is not specified.\n");
427 return -EINVAL;
428 }
429 if (isdigit(argv[1][0])) {
430 pr_info("probe point must be have a filename.\n");
431 return -EINVAL;
432 }
433 arg = strchr(argv[1], ':');
434 if (!arg) {
435 ret = -EINVAL;
436 goto fail_address_parse;
437 }
438
439 *arg++ = '\0';
440 filename = argv[1];
441 ret = kern_path(filename, LOOKUP_FOLLOW, &path);
442 if (ret)
443 goto fail_address_parse;
444
445 inode = igrab(path.dentry->d_inode);
446 path_put(&path);
447
448 if (!inode || !S_ISREG(inode->i_mode)) {
449 ret = -EINVAL;
450 goto fail_address_parse;
451 }
452
453 ret = kstrtoul(arg, 0, &offset);
454 if (ret)
455 goto fail_address_parse;
456
457 argc -= 2;
458 argv += 2;
459
460 /* setup a probe */
461 if (!event) {
462 char *tail;
463 char *ptr;
464
465 tail = kstrdup(kbasename(filename), GFP_KERNEL);
466 if (!tail) {
467 ret = -ENOMEM;
468 goto fail_address_parse;
469 }
470
471 ptr = strpbrk(tail, ".-_");
472 if (ptr)
473 *ptr = '\0';
474
475 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_0x%lx", 'p', tail, offset);
476 event = buf;
477 kfree(tail);
478 }
479
480 tu = alloc_trace_uprobe(group, event, argc, is_return);
481 if (IS_ERR(tu)) {
482 pr_info("Failed to allocate trace_uprobe.(%d)\n", (int)PTR_ERR(tu));
483 ret = PTR_ERR(tu);
484 goto fail_address_parse;
485 }
486 tu->offset = offset;
487 tu->inode = inode;
488 tu->filename = kstrdup(filename, GFP_KERNEL);
489
490 if (!tu->filename) {
491 pr_info("Failed to allocate filename.\n");
492 ret = -ENOMEM;
493 goto error;
494 }
495
496 /* parse arguments */
497 ret = 0;
498 for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
499 struct probe_arg *parg = &tu->tp.args[i];
500
501 /* Increment count for freeing args in error case */
502 tu->tp.nr_args++;
503
504 /* Parse argument name */
505 arg = strchr(argv[i], '=');
506 if (arg) {
507 *arg++ = '\0';
508 parg->name = kstrdup(argv[i], GFP_KERNEL);
509 } else {
510 arg = argv[i];
511 /* If argument name is omitted, set "argN" */
512 snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1);
513 parg->name = kstrdup(buf, GFP_KERNEL);
514 }
515
516 if (!parg->name) {
517 pr_info("Failed to allocate argument[%d] name.\n", i);
518 ret = -ENOMEM;
519 goto error;
520 }
521
522 if (!is_good_name(parg->name)) {
523 pr_info("Invalid argument[%d] name: %s\n", i, parg->name);
524 ret = -EINVAL;
525 goto error;
526 }
527
528 if (traceprobe_conflict_field_name(parg->name, tu->tp.args, i)) {
529 pr_info("Argument[%d] name '%s' conflicts with "
530 "another field.\n", i, argv[i]);
531 ret = -EINVAL;
532 goto error;
533 }
534
535 /* Parse fetch argument */
536 ret = traceprobe_parse_probe_arg(arg, &tu->tp.size, parg,
537 is_return, false);
538 if (ret) {
539 pr_info("Parse error at argument[%d]. (%d)\n", i, ret);
540 goto error;
541 }
542 }
543
544 ret = register_trace_uprobe(tu);
545 if (ret)
546 goto error;
547 return 0;
548
549 error:
550 free_trace_uprobe(tu);
551 return ret;
552
553 fail_address_parse:
554 if (inode)
555 iput(inode);
556
557 pr_info("Failed to parse address or file.\n");
558
559 return ret;
560 }
561
562 static int cleanup_all_probes(void)
563 {
564 struct trace_uprobe *tu;
565 int ret = 0;
566
567 mutex_lock(&uprobe_lock);
568 while (!list_empty(&uprobe_list)) {
569 tu = list_entry(uprobe_list.next, struct trace_uprobe, list);
570 ret = unregister_trace_uprobe(tu);
571 if (ret)
572 break;
573 }
574 mutex_unlock(&uprobe_lock);
575 return ret;
576 }
577
578 /* Probes listing interfaces */
579 static void *probes_seq_start(struct seq_file *m, loff_t *pos)
580 {
581 mutex_lock(&uprobe_lock);
582 return seq_list_start(&uprobe_list, *pos);
583 }
584
585 static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos)
586 {
587 return seq_list_next(v, &uprobe_list, pos);
588 }
589
590 static void probes_seq_stop(struct seq_file *m, void *v)
591 {
592 mutex_unlock(&uprobe_lock);
593 }
594
595 static int probes_seq_show(struct seq_file *m, void *v)
596 {
597 struct trace_uprobe *tu = v;
598 char c = is_ret_probe(tu) ? 'r' : 'p';
599 int i;
600
601 seq_printf(m, "%c:%s/%s", c, tu->tp.call.class->system, tu->tp.call.name);
602 seq_printf(m, " %s:0x%p", tu->filename, (void *)tu->offset);
603
604 for (i = 0; i < tu->tp.nr_args; i++)
605 seq_printf(m, " %s=%s", tu->tp.args[i].name, tu->tp.args[i].comm);
606
607 seq_printf(m, "\n");
608 return 0;
609 }
610
611 static const struct seq_operations probes_seq_op = {
612 .start = probes_seq_start,
613 .next = probes_seq_next,
614 .stop = probes_seq_stop,
615 .show = probes_seq_show
616 };
617
618 static int probes_open(struct inode *inode, struct file *file)
619 {
620 int ret;
621
622 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
623 ret = cleanup_all_probes();
624 if (ret)
625 return ret;
626 }
627
628 return seq_open(file, &probes_seq_op);
629 }
630
631 static ssize_t probes_write(struct file *file, const char __user *buffer,
632 size_t count, loff_t *ppos)
633 {
634 return traceprobe_probes_write(file, buffer, count, ppos, create_trace_uprobe);
635 }
636
637 static const struct file_operations uprobe_events_ops = {
638 .owner = THIS_MODULE,
639 .open = probes_open,
640 .read = seq_read,
641 .llseek = seq_lseek,
642 .release = seq_release,
643 .write = probes_write,
644 };
645
646 /* Probes profiling interfaces */
647 static int probes_profile_seq_show(struct seq_file *m, void *v)
648 {
649 struct trace_uprobe *tu = v;
650
651 seq_printf(m, " %s %-44s %15lu\n", tu->filename, tu->tp.call.name, tu->nhit);
652 return 0;
653 }
654
655 static const struct seq_operations profile_seq_op = {
656 .start = probes_seq_start,
657 .next = probes_seq_next,
658 .stop = probes_seq_stop,
659 .show = probes_profile_seq_show
660 };
661
662 static int profile_open(struct inode *inode, struct file *file)
663 {
664 return seq_open(file, &profile_seq_op);
665 }
666
667 static const struct file_operations uprobe_profile_ops = {
668 .owner = THIS_MODULE,
669 .open = profile_open,
670 .read = seq_read,
671 .llseek = seq_lseek,
672 .release = seq_release,
673 };
674
675 struct uprobe_cpu_buffer {
676 struct mutex mutex;
677 void *buf;
678 };
679 static struct uprobe_cpu_buffer __percpu *uprobe_cpu_buffer;
680 static int uprobe_buffer_refcnt;
681
682 static int uprobe_buffer_init(void)
683 {
684 int cpu, err_cpu;
685
686 uprobe_cpu_buffer = alloc_percpu(struct uprobe_cpu_buffer);
687 if (uprobe_cpu_buffer == NULL)
688 return -ENOMEM;
689
690 for_each_possible_cpu(cpu) {
691 struct page *p = alloc_pages_node(cpu_to_node(cpu),
692 GFP_KERNEL, 0);
693 if (p == NULL) {
694 err_cpu = cpu;
695 goto err;
696 }
697 per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf = page_address(p);
698 mutex_init(&per_cpu_ptr(uprobe_cpu_buffer, cpu)->mutex);
699 }
700
701 return 0;
702
703 err:
704 for_each_possible_cpu(cpu) {
705 if (cpu == err_cpu)
706 break;
707 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf);
708 }
709
710 free_percpu(uprobe_cpu_buffer);
711 return -ENOMEM;
712 }
713
714 static int uprobe_buffer_enable(void)
715 {
716 int ret = 0;
717
718 BUG_ON(!mutex_is_locked(&event_mutex));
719
720 if (uprobe_buffer_refcnt++ == 0) {
721 ret = uprobe_buffer_init();
722 if (ret < 0)
723 uprobe_buffer_refcnt--;
724 }
725
726 return ret;
727 }
728
729 static void uprobe_buffer_disable(void)
730 {
731 BUG_ON(!mutex_is_locked(&event_mutex));
732
733 if (--uprobe_buffer_refcnt == 0) {
734 free_percpu(uprobe_cpu_buffer);
735 uprobe_cpu_buffer = NULL;
736 }
737 }
738
739 static struct uprobe_cpu_buffer *uprobe_buffer_get(void)
740 {
741 struct uprobe_cpu_buffer *ucb;
742 int cpu;
743
744 cpu = raw_smp_processor_id();
745 ucb = per_cpu_ptr(uprobe_cpu_buffer, cpu);
746
747 /*
748 * Use per-cpu buffers for fastest access, but we might migrate
749 * so the mutex makes sure we have sole access to it.
750 */
751 mutex_lock(&ucb->mutex);
752
753 return ucb;
754 }
755
756 static void uprobe_buffer_put(struct uprobe_cpu_buffer *ucb)
757 {
758 mutex_unlock(&ucb->mutex);
759 }
760
761 static void uprobe_trace_print(struct trace_uprobe *tu,
762 unsigned long func, struct pt_regs *regs)
763 {
764 struct uprobe_trace_entry_head *entry;
765 struct ring_buffer_event *event;
766 struct ring_buffer *buffer;
767 struct uprobe_cpu_buffer *ucb;
768 void *data;
769 int size, dsize, esize;
770 struct ftrace_event_call *call = &tu->tp.call;
771
772 dsize = __get_data_size(&tu->tp, regs);
773 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
774
775 if (WARN_ON_ONCE(!uprobe_cpu_buffer || tu->tp.size + dsize > PAGE_SIZE))
776 return;
777
778 ucb = uprobe_buffer_get();
779 store_trace_args(esize, &tu->tp, regs, ucb->buf, dsize);
780
781 size = esize + tu->tp.size + dsize;
782 event = trace_current_buffer_lock_reserve(&buffer, call->event.type,
783 size, 0, 0);
784 if (!event)
785 goto out;
786
787 entry = ring_buffer_event_data(event);
788 if (is_ret_probe(tu)) {
789 entry->vaddr[0] = func;
790 entry->vaddr[1] = instruction_pointer(regs);
791 data = DATAOF_TRACE_ENTRY(entry, true);
792 } else {
793 entry->vaddr[0] = instruction_pointer(regs);
794 data = DATAOF_TRACE_ENTRY(entry, false);
795 }
796
797 memcpy(data, ucb->buf, tu->tp.size + dsize);
798
799 if (!call_filter_check_discard(call, entry, buffer, event))
800 trace_buffer_unlock_commit(buffer, event, 0, 0);
801
802 out:
803 uprobe_buffer_put(ucb);
804 }
805
806 /* uprobe handler */
807 static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs)
808 {
809 if (!is_ret_probe(tu))
810 uprobe_trace_print(tu, 0, regs);
811 return 0;
812 }
813
814 static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func,
815 struct pt_regs *regs)
816 {
817 uprobe_trace_print(tu, func, regs);
818 }
819
820 /* Event entry printers */
821 static enum print_line_t
822 print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *event)
823 {
824 struct uprobe_trace_entry_head *entry;
825 struct trace_seq *s = &iter->seq;
826 struct trace_uprobe *tu;
827 u8 *data;
828 int i;
829
830 entry = (struct uprobe_trace_entry_head *)iter->ent;
831 tu = container_of(event, struct trace_uprobe, tp.call.event);
832
833 if (is_ret_probe(tu)) {
834 if (!trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)", tu->tp.call.name,
835 entry->vaddr[1], entry->vaddr[0]))
836 goto partial;
837 data = DATAOF_TRACE_ENTRY(entry, true);
838 } else {
839 if (!trace_seq_printf(s, "%s: (0x%lx)", tu->tp.call.name,
840 entry->vaddr[0]))
841 goto partial;
842 data = DATAOF_TRACE_ENTRY(entry, false);
843 }
844
845 for (i = 0; i < tu->tp.nr_args; i++) {
846 struct probe_arg *parg = &tu->tp.args[i];
847
848 if (!parg->type->print(s, parg->name, data + parg->offset, entry))
849 goto partial;
850 }
851
852 if (trace_seq_puts(s, "\n"))
853 return TRACE_TYPE_HANDLED;
854
855 partial:
856 return TRACE_TYPE_PARTIAL_LINE;
857 }
858
859 typedef bool (*filter_func_t)(struct uprobe_consumer *self,
860 enum uprobe_filter_ctx ctx,
861 struct mm_struct *mm);
862
863 static int
864 probe_event_enable(struct trace_uprobe *tu, int flag, filter_func_t filter)
865 {
866 int ret = 0;
867
868 if (trace_probe_is_enabled(&tu->tp))
869 return -EINTR;
870
871 ret = uprobe_buffer_enable();
872 if (ret < 0)
873 return ret;
874
875 WARN_ON(!uprobe_filter_is_empty(&tu->filter));
876
877 tu->tp.flags |= flag;
878 tu->consumer.filter = filter;
879 ret = uprobe_register(tu->inode, tu->offset, &tu->consumer);
880 if (ret)
881 tu->tp.flags &= ~flag;
882
883 return ret;
884 }
885
886 static void probe_event_disable(struct trace_uprobe *tu, int flag)
887 {
888 if (!trace_probe_is_enabled(&tu->tp))
889 return;
890
891 WARN_ON(!uprobe_filter_is_empty(&tu->filter));
892
893 uprobe_unregister(tu->inode, tu->offset, &tu->consumer);
894 tu->tp.flags &= ~flag;
895
896 uprobe_buffer_disable();
897 }
898
899 static int uprobe_event_define_fields(struct ftrace_event_call *event_call)
900 {
901 int ret, i, size;
902 struct uprobe_trace_entry_head field;
903 struct trace_uprobe *tu = event_call->data;
904
905 if (is_ret_probe(tu)) {
906 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_FUNC, 0);
907 DEFINE_FIELD(unsigned long, vaddr[1], FIELD_STRING_RETIP, 0);
908 size = SIZEOF_TRACE_ENTRY(true);
909 } else {
910 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_IP, 0);
911 size = SIZEOF_TRACE_ENTRY(false);
912 }
913 /* Set argument names as fields */
914 for (i = 0; i < tu->tp.nr_args; i++) {
915 struct probe_arg *parg = &tu->tp.args[i];
916
917 ret = trace_define_field(event_call, parg->type->fmttype,
918 parg->name, size + parg->offset,
919 parg->type->size, parg->type->is_signed,
920 FILTER_OTHER);
921
922 if (ret)
923 return ret;
924 }
925 return 0;
926 }
927
928 #ifdef CONFIG_PERF_EVENTS
929 static bool
930 __uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
931 {
932 struct perf_event *event;
933
934 if (filter->nr_systemwide)
935 return true;
936
937 list_for_each_entry(event, &filter->perf_events, hw.tp_list) {
938 if (event->hw.tp_target->mm == mm)
939 return true;
940 }
941
942 return false;
943 }
944
945 static inline bool
946 uprobe_filter_event(struct trace_uprobe *tu, struct perf_event *event)
947 {
948 return __uprobe_perf_filter(&tu->filter, event->hw.tp_target->mm);
949 }
950
951 static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event)
952 {
953 bool done;
954
955 write_lock(&tu->filter.rwlock);
956 if (event->hw.tp_target) {
957 /*
958 * event->parent != NULL means copy_process(), we can avoid
959 * uprobe_apply(). current->mm must be probed and we can rely
960 * on dup_mmap() which preserves the already installed bp's.
961 *
962 * attr.enable_on_exec means that exec/mmap will install the
963 * breakpoints we need.
964 */
965 done = tu->filter.nr_systemwide ||
966 event->parent || event->attr.enable_on_exec ||
967 uprobe_filter_event(tu, event);
968 list_add(&event->hw.tp_list, &tu->filter.perf_events);
969 } else {
970 done = tu->filter.nr_systemwide;
971 tu->filter.nr_systemwide++;
972 }
973 write_unlock(&tu->filter.rwlock);
974
975 if (!done)
976 uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
977
978 return 0;
979 }
980
981 static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event)
982 {
983 bool done;
984
985 write_lock(&tu->filter.rwlock);
986 if (event->hw.tp_target) {
987 list_del(&event->hw.tp_list);
988 done = tu->filter.nr_systemwide ||
989 (event->hw.tp_target->flags & PF_EXITING) ||
990 uprobe_filter_event(tu, event);
991 } else {
992 tu->filter.nr_systemwide--;
993 done = tu->filter.nr_systemwide;
994 }
995 write_unlock(&tu->filter.rwlock);
996
997 if (!done)
998 uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
999
1000 return 0;
1001 }
1002
1003 static bool uprobe_perf_filter(struct uprobe_consumer *uc,
1004 enum uprobe_filter_ctx ctx, struct mm_struct *mm)
1005 {
1006 struct trace_uprobe *tu;
1007 int ret;
1008
1009 tu = container_of(uc, struct trace_uprobe, consumer);
1010 read_lock(&tu->filter.rwlock);
1011 ret = __uprobe_perf_filter(&tu->filter, mm);
1012 read_unlock(&tu->filter.rwlock);
1013
1014 return ret;
1015 }
1016
1017 static void uprobe_perf_print(struct trace_uprobe *tu,
1018 unsigned long func, struct pt_regs *regs)
1019 {
1020 struct ftrace_event_call *call = &tu->tp.call;
1021 struct uprobe_trace_entry_head *entry;
1022 struct hlist_head *head;
1023 struct uprobe_cpu_buffer *ucb;
1024 void *data;
1025 int size, dsize, esize;
1026 int rctx;
1027
1028 dsize = __get_data_size(&tu->tp, regs);
1029 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1030
1031 if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1032 return;
1033
1034 size = esize + tu->tp.size + dsize;
1035 size = ALIGN(size + sizeof(u32), sizeof(u64)) - sizeof(u32);
1036 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough"))
1037 return;
1038
1039 ucb = uprobe_buffer_get();
1040 store_trace_args(esize, &tu->tp, regs, ucb->buf, dsize);
1041
1042 preempt_disable();
1043 head = this_cpu_ptr(call->perf_events);
1044 if (hlist_empty(head))
1045 goto out;
1046
1047 entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx);
1048 if (!entry)
1049 goto out;
1050
1051 if (is_ret_probe(tu)) {
1052 entry->vaddr[0] = func;
1053 entry->vaddr[1] = instruction_pointer(regs);
1054 data = DATAOF_TRACE_ENTRY(entry, true);
1055 } else {
1056 entry->vaddr[0] = instruction_pointer(regs);
1057 data = DATAOF_TRACE_ENTRY(entry, false);
1058 }
1059
1060 memcpy(data, ucb->buf, tu->tp.size + dsize);
1061
1062 if (size - esize > tu->tp.size + dsize) {
1063 int len = tu->tp.size + dsize;
1064
1065 memset(data + len, 0, size - esize - len);
1066 }
1067
1068 perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL);
1069 out:
1070 preempt_enable();
1071 uprobe_buffer_put(ucb);
1072 }
1073
1074 /* uprobe profile handler */
1075 static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs)
1076 {
1077 if (!uprobe_perf_filter(&tu->consumer, 0, current->mm))
1078 return UPROBE_HANDLER_REMOVE;
1079
1080 if (!is_ret_probe(tu))
1081 uprobe_perf_print(tu, 0, regs);
1082 return 0;
1083 }
1084
1085 static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func,
1086 struct pt_regs *regs)
1087 {
1088 uprobe_perf_print(tu, func, regs);
1089 }
1090 #endif /* CONFIG_PERF_EVENTS */
1091
1092 static
1093 int trace_uprobe_register(struct ftrace_event_call *event, enum trace_reg type, void *data)
1094 {
1095 struct trace_uprobe *tu = event->data;
1096
1097 switch (type) {
1098 case TRACE_REG_REGISTER:
1099 return probe_event_enable(tu, TP_FLAG_TRACE, NULL);
1100
1101 case TRACE_REG_UNREGISTER:
1102 probe_event_disable(tu, TP_FLAG_TRACE);
1103 return 0;
1104
1105 #ifdef CONFIG_PERF_EVENTS
1106 case TRACE_REG_PERF_REGISTER:
1107 return probe_event_enable(tu, TP_FLAG_PROFILE, uprobe_perf_filter);
1108
1109 case TRACE_REG_PERF_UNREGISTER:
1110 probe_event_disable(tu, TP_FLAG_PROFILE);
1111 return 0;
1112
1113 case TRACE_REG_PERF_OPEN:
1114 return uprobe_perf_open(tu, data);
1115
1116 case TRACE_REG_PERF_CLOSE:
1117 return uprobe_perf_close(tu, data);
1118
1119 #endif
1120 default:
1121 return 0;
1122 }
1123 return 0;
1124 }
1125
1126 static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
1127 {
1128 struct trace_uprobe *tu;
1129 struct uprobe_dispatch_data udd;
1130 int ret = 0;
1131
1132 tu = container_of(con, struct trace_uprobe, consumer);
1133 tu->nhit++;
1134
1135 udd.tu = tu;
1136 udd.bp_addr = instruction_pointer(regs);
1137
1138 current->utask->vaddr = (unsigned long) &udd;
1139
1140 if (tu->tp.flags & TP_FLAG_TRACE)
1141 ret |= uprobe_trace_func(tu, regs);
1142
1143 #ifdef CONFIG_PERF_EVENTS
1144 if (tu->tp.flags & TP_FLAG_PROFILE)
1145 ret |= uprobe_perf_func(tu, regs);
1146 #endif
1147 return ret;
1148 }
1149
1150 static int uretprobe_dispatcher(struct uprobe_consumer *con,
1151 unsigned long func, struct pt_regs *regs)
1152 {
1153 struct trace_uprobe *tu;
1154 struct uprobe_dispatch_data udd;
1155
1156 tu = container_of(con, struct trace_uprobe, consumer);
1157
1158 udd.tu = tu;
1159 udd.bp_addr = func;
1160
1161 current->utask->vaddr = (unsigned long) &udd;
1162
1163 if (tu->tp.flags & TP_FLAG_TRACE)
1164 uretprobe_trace_func(tu, func, regs);
1165
1166 #ifdef CONFIG_PERF_EVENTS
1167 if (tu->tp.flags & TP_FLAG_PROFILE)
1168 uretprobe_perf_func(tu, func, regs);
1169 #endif
1170 return 0;
1171 }
1172
1173 static struct trace_event_functions uprobe_funcs = {
1174 .trace = print_uprobe_event
1175 };
1176
1177 static int register_uprobe_event(struct trace_uprobe *tu)
1178 {
1179 struct ftrace_event_call *call = &tu->tp.call;
1180 int ret;
1181
1182 /* Initialize ftrace_event_call */
1183 INIT_LIST_HEAD(&call->class->fields);
1184 call->event.funcs = &uprobe_funcs;
1185 call->class->define_fields = uprobe_event_define_fields;
1186
1187 if (set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0)
1188 return -ENOMEM;
1189
1190 ret = register_ftrace_event(&call->event);
1191 if (!ret) {
1192 kfree(call->print_fmt);
1193 return -ENODEV;
1194 }
1195 call->flags = 0;
1196 call->class->reg = trace_uprobe_register;
1197 call->data = tu;
1198 ret = trace_add_event_call(call);
1199
1200 if (ret) {
1201 pr_info("Failed to register uprobe event: %s\n", call->name);
1202 kfree(call->print_fmt);
1203 unregister_ftrace_event(&call->event);
1204 }
1205
1206 return ret;
1207 }
1208
1209 static int unregister_uprobe_event(struct trace_uprobe *tu)
1210 {
1211 int ret;
1212
1213 /* tu->event is unregistered in trace_remove_event_call() */
1214 ret = trace_remove_event_call(&tu->tp.call);
1215 if (ret)
1216 return ret;
1217 kfree(tu->tp.call.print_fmt);
1218 tu->tp.call.print_fmt = NULL;
1219 return 0;
1220 }
1221
1222 /* Make a trace interface for controling probe points */
1223 static __init int init_uprobe_trace(void)
1224 {
1225 struct dentry *d_tracer;
1226
1227 d_tracer = tracing_init_dentry();
1228 if (!d_tracer)
1229 return 0;
1230
1231 trace_create_file("uprobe_events", 0644, d_tracer,
1232 NULL, &uprobe_events_ops);
1233 /* Profile interface */
1234 trace_create_file("uprobe_profile", 0444, d_tracer,
1235 NULL, &uprobe_profile_ops);
1236 return 0;
1237 }
1238
1239 fs_initcall(init_uprobe_trace);
This page took 0.079138 seconds and 5 git commands to generate.