3 * Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
5 * LTTng kprobes integration module.
7 * Dual LGPL v2.1/GPL v2 license.
10 #include <linux/module.h>
11 #include <linux/kprobes.h>
12 #include <linux/marker.h>
13 #include <linux/mutex.h>
14 #include <linux/jhash.h>
15 #include <linux/seq_file.h>
16 #include <linux/slab.h>
17 #include <linux/debugfs.h>
18 #include <linux/kallsyms.h>
20 #include "ltt-type-serializer.h"
21 #include "ltt-tracer.h"
23 #define LTT_KPROBES_DIR "kprobes"
24 #define LTT_KPROBES_ENABLE "enable"
25 #define LTT_KPROBES_DISABLE "disable"
26 #define LTT_KPROBES_LIST "list"
28 /* Active LTTng kprobes hash table */
29 static DEFINE_MUTEX(ltt_kprobes_mutex
);
31 #define LTT_KPROBE_HASH_BITS 6
32 #define LTT_KPROBE_TABLE_SIZE (1 << LTT_KPROBE_HASH_BITS)
33 static struct hlist_head ltt_kprobe_table
[LTT_KPROBE_TABLE_SIZE
];
36 struct hlist_node hlist
;
41 static struct dentry
*ltt_kprobes_dir
,
42 *ltt_kprobes_enable_dentry
,
43 *ltt_kprobes_disable_dentry
,
44 *ltt_kprobes_list_dentry
;
46 static int module_exit
;
49 static void trace_kprobe_table_entry(void *call_data
, struct kprobe_entry
*e
)
52 char *namebuf
= (char *)__get_free_page(GFP_KERNEL
);
55 sprint_symbol(namebuf
, (unsigned long)e
->kp
.addr
);
56 addr
= (unsigned long)e
->kp
.addr
;
58 strncpy(namebuf
, e
->kp
.symbol_name
, PAGE_SIZE
- 1);
59 /* TODO : add offset */
60 addr
= kallsyms_lookup_name(namebuf
);
63 __trace_mark(0, kprobe_state
, kprobe_table
, call_data
,
64 "ip 0x%lX symbol %s", addr
, namebuf
);
65 free_page((unsigned long)namebuf
);
68 DEFINE_MARKER(kernel
, kprobe
, "ip %lX");
70 static int ltt_kprobe_handler_pre(struct kprobe
*p
, struct pt_regs
*regs
)
72 struct marker
*marker
;
75 data
= (unsigned long)p
->addr
;
76 marker
= &GET_MARKER(kernel
, kprobe
);
77 ltt_specialized_trace(marker
, marker
->single
.probe_private
,
78 &data
, sizeof(data
), sizeof(data
));
82 static int ltt_register_kprobe(const char *key
)
84 struct hlist_head
*head
;
85 struct hlist_node
*node
;
86 struct kprobe_entry
*e
= NULL
;
87 char *symbol_name
= NULL
;
89 unsigned int offset
= 0;
91 size_t key_len
= strlen(key
) + 1;
95 return -ENOENT
; /* only \0 */
97 if (sscanf(key
, "%li", &addr
) != 1)
101 const char *symbol_end
= NULL
;
102 unsigned int symbol_len
; /* includes final \0 */
104 symbol_end
= strchr(key
, ' ');
106 symbol_len
= symbol_end
- key
+ 1;
108 symbol_len
= key_len
;
109 symbol_name
= kmalloc(symbol_len
, GFP_KERNEL
);
114 memcpy(symbol_name
, key
, symbol_len
- 1);
115 symbol_name
[symbol_len
-1] = '\0';
117 symbol_end
++; /* start of offset */
118 if (sscanf(symbol_end
, "%i", &offset
) != 1)
123 hash
= jhash(key
, key_len
-1, 0);
124 head
= <t_kprobe_table
[hash
& ((1 << LTT_KPROBE_HASH_BITS
)-1)];
125 hlist_for_each_entry(e
, node
, head
, hlist
) {
126 if (!strcmp(key
, e
->key
)) {
127 printk(KERN_NOTICE
"Kprobe %s busy\n", key
);
133 * Using kzalloc here to allocate a variable length element. Could
134 * cause some memory fragmentation if overused.
136 e
= kzalloc(sizeof(struct kprobe_entry
) + key_len
, GFP_KERNEL
);
141 memcpy(e
->key
, key
, key_len
);
142 hlist_add_head(&e
->hlist
, head
);
143 e
->kp
.pre_handler
= ltt_kprobe_handler_pre
;
144 e
->kp
.symbol_name
= symbol_name
;
145 e
->kp
.offset
= offset
;
146 e
->kp
.addr
= (void *)addr
;
147 ret
= register_kprobe(&e
->kp
);
150 trace_kprobe_table_entry(NULL
, e
);
154 hlist_del(&e
->hlist
);
161 static int ltt_unregister_kprobe(const char *key
)
163 struct hlist_head
*head
;
164 struct hlist_node
*node
;
165 struct kprobe_entry
*e
;
167 size_t key_len
= strlen(key
) + 1;
170 hash
= jhash(key
, key_len
-1, 0);
171 head
= <t_kprobe_table
[hash
& ((1 << LTT_KPROBE_HASH_BITS
)-1)];
172 hlist_for_each_entry(e
, node
, head
, hlist
) {
173 if (!strcmp(key
, e
->key
)) {
180 hlist_del(&e
->hlist
);
181 unregister_kprobe(&e
->kp
);
182 kfree(e
->kp
.symbol_name
);
187 static void ltt_unregister_all_kprobes(void)
189 struct kprobe_entry
*e
;
190 struct hlist_head
*head
;
191 struct hlist_node
*node
, *tmp
;
194 for (i
= 0; i
< LTT_KPROBE_TABLE_SIZE
; i
++) {
195 head
= <t_kprobe_table
[i
];
196 hlist_for_each_entry_safe(e
, node
, tmp
, head
, hlist
) {
197 hlist_del(&e
->hlist
);
198 unregister_kprobe(&e
->kp
);
199 kfree(e
->kp
.symbol_name
);
206 * Allows to specify either
211 static ssize_t
enable_op_write(struct file
*file
,
212 const char __user
*user_buf
, size_t count
, loff_t
*ppos
)
216 char *buf
= (char *)__get_free_page(GFP_KERNEL
);
218 mutex_lock(<t_kprobes_mutex
);
224 buf_size
= min_t(size_t, count
, PAGE_SIZE
- 1);
225 err
= copy_from_user(buf
, user_buf
, buf_size
);
228 buf
[buf_size
] = '\0';
229 end
= strchr(buf
, '\n');
232 err
= ltt_register_kprobe(buf
);
236 mutex_unlock(<t_kprobes_mutex
);
237 free_page((unsigned long)buf
);
240 mutex_unlock(<t_kprobes_mutex
);
241 free_page((unsigned long)buf
);
245 static const struct file_operations ltt_kprobes_enable
= {
246 .write
= enable_op_write
,
249 static ssize_t
disable_op_write(struct file
*file
,
250 const char __user
*user_buf
, size_t count
, loff_t
*ppos
)
254 char *buf
= (char *)__get_free_page(GFP_KERNEL
);
256 mutex_lock(<t_kprobes_mutex
);
260 buf_size
= min_t(size_t, count
, PAGE_SIZE
- 1);
261 err
= copy_from_user(buf
, user_buf
, buf_size
);
264 buf
[buf_size
] = '\0';
265 end
= strchr(buf
, '\n');
268 err
= ltt_unregister_kprobe(buf
);
272 mutex_unlock(<t_kprobes_mutex
);
273 free_page((unsigned long)buf
);
276 mutex_unlock(<t_kprobes_mutex
);
277 free_page((unsigned long)buf
);
281 static const struct file_operations ltt_kprobes_disable
= {
282 .write
= disable_op_write
,
286 * This seqfile read is not perfectly safe, as a kprobe could be removed from
287 * the hash table between two reads. This will result in an incomplete output.
289 static struct kprobe_entry
*ltt_find_next_kprobe(struct kprobe_entry
*prev
)
291 struct kprobe_entry
*e
;
292 struct hlist_head
*head
;
293 struct hlist_node
*node
;
297 if (prev
== (void *)-1UL)
303 for (i
= 0; i
< LTT_KPROBE_TABLE_SIZE
; i
++) {
304 head
= <t_kprobe_table
[i
];
305 hlist_for_each_entry(e
, node
, head
, hlist
) {
315 static void *lk_next(struct seq_file
*m
, void *p
, loff_t
*pos
)
317 m
->private = ltt_find_next_kprobe(m
->private);
319 m
->private = (void *)-1UL;
325 static void *lk_start(struct seq_file
*m
, loff_t
*pos
)
327 mutex_lock(<t_kprobes_mutex
);
330 m
->private = ltt_find_next_kprobe(m
->private);
332 m
->private = (void *)-1UL;
338 static void lk_stop(struct seq_file
*m
, void *p
)
340 mutex_unlock(<t_kprobes_mutex
);
343 static int lk_show(struct seq_file
*m
, void *p
)
345 struct kprobe_entry
*e
= m
->private;
346 seq_printf(m
, "%s\n", e
->key
);
350 static const struct seq_operations ltt_kprobes_list_op
= {
357 static int ltt_kprobes_list_open(struct inode
*inode
, struct file
*file
)
361 ret
= seq_open(file
, <t_kprobes_list_op
);
363 ((struct seq_file
*)file
->private_data
)->private = NULL
;
367 static int ltt_kprobes_list_release(struct inode
*inode
, struct file
*file
)
369 struct seq_file
*seq
= file
->private_data
;
372 return seq_release(inode
, file
);
375 static const struct file_operations ltt_kprobes_list
= {
376 .open
= ltt_kprobes_list_open
,
379 .release
= ltt_kprobes_list_release
,
383 * kprobes table dump. Callback invoked by ltt-statedump. ltt-statedump must
384 * take a reference to this module before calling this callback.
386 void ltt_dump_kprobes_table(void *call_data
)
388 struct kprobe_entry
*e
;
389 struct hlist_head
*head
;
390 struct hlist_node
*node
;
393 for (i
= 0; i
< LTT_KPROBE_TABLE_SIZE
; i
++) {
394 head
= <t_kprobe_table
[i
];
395 hlist_for_each_entry(e
, node
, head
, hlist
)
396 trace_kprobe_table_entry(call_data
, e
);
399 EXPORT_SYMBOL_GPL(ltt_dump_kprobes_table
);
401 static int __init
ltt_kprobes_init(void)
403 struct dentry
*ltt_root_dentry
;
406 printk(KERN_INFO
"LTT : ltt-kprobes init\n");
407 mutex_lock(<t_kprobes_mutex
);
409 ltt_root_dentry
= get_ltt_root();
410 if (!ltt_root_dentry
) {
415 ltt_kprobes_dir
= debugfs_create_dir(LTT_KPROBES_DIR
, ltt_root_dentry
);
416 if (!ltt_kprobes_dir
) {
418 "ltt_kprobes_init: failed to create dir %s\n",
424 ltt_kprobes_enable_dentry
= debugfs_create_file(LTT_KPROBES_ENABLE
,
426 ltt_kprobes_dir
, NULL
,
427 <t_kprobes_enable
);
428 if (IS_ERR(ltt_kprobes_enable_dentry
) || !ltt_kprobes_enable_dentry
) {
430 "ltt_kprobes_init: failed to create file %s\n",
436 ltt_kprobes_disable_dentry
= debugfs_create_file(LTT_KPROBES_DISABLE
,
438 ltt_kprobes_dir
, NULL
,
439 <t_kprobes_disable
);
440 if (IS_ERR(ltt_kprobes_disable_dentry
) || !ltt_kprobes_disable_dentry
) {
442 "ltt_kprobes_init: failed to create file %s\n",
443 LTT_KPROBES_DISABLE
);
448 ltt_kprobes_list_dentry
= debugfs_create_file(LTT_KPROBES_LIST
,
449 S_IWUSR
, ltt_kprobes_dir
,
450 NULL
, <t_kprobes_list
);
451 if (IS_ERR(ltt_kprobes_list_dentry
) || !ltt_kprobes_list_dentry
) {
453 "ltt_kprobes_init: failed to create file %s\n",
458 ltt_statedump_register_kprobes_dump(ltt_dump_kprobes_table
);
460 mutex_unlock(<t_kprobes_mutex
);
464 debugfs_remove(ltt_kprobes_disable_dentry
);
466 debugfs_remove(ltt_kprobes_enable_dentry
);
468 debugfs_remove(ltt_kprobes_dir
);
471 mutex_unlock(<t_kprobes_mutex
);
474 module_init(ltt_kprobes_init
);
476 static void __exit
ltt_kprobes_exit(void)
478 printk(KERN_INFO
"LTT : ltt-kprobes exit\n");
479 mutex_lock(<t_kprobes_mutex
);
481 ltt_statedump_unregister_kprobes_dump(ltt_dump_kprobes_table
);
482 debugfs_remove(ltt_kprobes_list_dentry
);
483 debugfs_remove(ltt_kprobes_disable_dentry
);
484 debugfs_remove(ltt_kprobes_enable_dentry
);
485 debugfs_remove(ltt_kprobes_dir
);
486 ltt_unregister_all_kprobes();
487 mutex_unlock(<t_kprobes_mutex
);
489 module_exit(ltt_kprobes_exit
);
491 MODULE_LICENSE("GPL and additional rights");
492 MODULE_AUTHOR("Mathieu Desnoyers");
493 MODULE_DESCRIPTION("Linux Trace Toolkit Kprobes Support");