kprobes: fix sparse NULL warning
[deliverable/linux.git] / kernel / kprobes.c
1 /*
2 * Kernel Probes (KProbes)
3 * kernel/kprobes.c
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 *
19 * Copyright (C) IBM Corporation, 2002, 2004
20 *
21 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
22 * Probes initial implementation (includes suggestions from
23 * Rusty Russell).
24 * 2004-Aug Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
25 * hlists and exceptions notifier as suggested by Andi Kleen.
26 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
27 * interface to access function arguments.
28 * 2004-Sep Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
29 * exceptions notifier to be first on the priority list.
30 * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston
31 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
32 * <prasanna@in.ibm.com> added function-return probes.
33 */
34 #include <linux/kprobes.h>
35 #include <linux/hash.h>
36 #include <linux/init.h>
37 #include <linux/slab.h>
38 #include <linux/stddef.h>
39 #include <linux/module.h>
40 #include <linux/moduleloader.h>
41 #include <linux/kallsyms.h>
42 #include <linux/freezer.h>
43 #include <linux/seq_file.h>
44 #include <linux/debugfs.h>
45 #include <asm-generic/sections.h>
46 #include <asm/cacheflush.h>
47 #include <asm/errno.h>
48 #include <asm/kdebug.h>
49
50 #define KPROBE_HASH_BITS 6
51 #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
52
53
54 /*
55 * Some oddball architectures like 64bit powerpc have function descriptors
56 * so this must be overridable.
57 */
58 #ifndef kprobe_lookup_name
59 #define kprobe_lookup_name(name, addr) \
60 addr = ((kprobe_opcode_t *)(kallsyms_lookup_name(name)))
61 #endif
62
63 static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
64 static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
65 static atomic_t kprobe_count;
66
67 DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */
68 DEFINE_SPINLOCK(kretprobe_lock); /* Protects kretprobe_inst_table */
69 static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
70
71 static struct notifier_block kprobe_page_fault_nb = {
72 .notifier_call = kprobe_exceptions_notify,
73 .priority = 0x7fffffff /* we need to notified first */
74 };
75
76 #ifdef __ARCH_WANT_KPROBES_INSN_SLOT
77 /*
78 * kprobe->ainsn.insn points to the copy of the instruction to be
79 * single-stepped. x86_64, POWER4 and above have no-exec support and
80 * stepping on the instruction on a vmalloced/kmalloced/data page
81 * is a recipe for disaster
82 */
83 #define INSNS_PER_PAGE (PAGE_SIZE/(MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
84
85 struct kprobe_insn_page {
86 struct hlist_node hlist;
87 kprobe_opcode_t *insns; /* Page of instruction slots */
88 char slot_used[INSNS_PER_PAGE];
89 int nused;
90 int ngarbage;
91 };
92
93 enum kprobe_slot_state {
94 SLOT_CLEAN = 0,
95 SLOT_DIRTY = 1,
96 SLOT_USED = 2,
97 };
98
99 static struct hlist_head kprobe_insn_pages;
100 static int kprobe_garbage_slots;
101 static int collect_garbage_slots(void);
102
103 static int __kprobes check_safety(void)
104 {
105 int ret = 0;
106 #if defined(CONFIG_PREEMPT) && defined(CONFIG_PM)
107 ret = freeze_processes();
108 if (ret == 0) {
109 struct task_struct *p, *q;
110 do_each_thread(p, q) {
111 if (p != current && p->state == TASK_RUNNING &&
112 p->pid != 0) {
113 printk("Check failed: %s is running\n",p->comm);
114 ret = -1;
115 goto loop_end;
116 }
117 } while_each_thread(p, q);
118 }
119 loop_end:
120 thaw_processes();
121 #else
122 synchronize_sched();
123 #endif
124 return ret;
125 }
126
127 /**
128 * get_insn_slot() - Find a slot on an executable page for an instruction.
129 * We allocate an executable page if there's no room on existing ones.
130 */
131 kprobe_opcode_t __kprobes *get_insn_slot(void)
132 {
133 struct kprobe_insn_page *kip;
134 struct hlist_node *pos;
135
136 retry:
137 hlist_for_each(pos, &kprobe_insn_pages) {
138 kip = hlist_entry(pos, struct kprobe_insn_page, hlist);
139 if (kip->nused < INSNS_PER_PAGE) {
140 int i;
141 for (i = 0; i < INSNS_PER_PAGE; i++) {
142 if (kip->slot_used[i] == SLOT_CLEAN) {
143 kip->slot_used[i] = SLOT_USED;
144 kip->nused++;
145 return kip->insns + (i * MAX_INSN_SIZE);
146 }
147 }
148 /* Surprise! No unused slots. Fix kip->nused. */
149 kip->nused = INSNS_PER_PAGE;
150 }
151 }
152
153 /* If there are any garbage slots, collect it and try again. */
154 if (kprobe_garbage_slots && collect_garbage_slots() == 0) {
155 goto retry;
156 }
157 /* All out of space. Need to allocate a new page. Use slot 0. */
158 kip = kmalloc(sizeof(struct kprobe_insn_page), GFP_KERNEL);
159 if (!kip) {
160 return NULL;
161 }
162
163 /*
164 * Use module_alloc so this page is within +/- 2GB of where the
165 * kernel image and loaded module images reside. This is required
166 * so x86_64 can correctly handle the %rip-relative fixups.
167 */
168 kip->insns = module_alloc(PAGE_SIZE);
169 if (!kip->insns) {
170 kfree(kip);
171 return NULL;
172 }
173 INIT_HLIST_NODE(&kip->hlist);
174 hlist_add_head(&kip->hlist, &kprobe_insn_pages);
175 memset(kip->slot_used, SLOT_CLEAN, INSNS_PER_PAGE);
176 kip->slot_used[0] = SLOT_USED;
177 kip->nused = 1;
178 kip->ngarbage = 0;
179 return kip->insns;
180 }
181
182 /* Return 1 if all garbages are collected, otherwise 0. */
183 static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
184 {
185 kip->slot_used[idx] = SLOT_CLEAN;
186 kip->nused--;
187 if (kip->nused == 0) {
188 /*
189 * Page is no longer in use. Free it unless
190 * it's the last one. We keep the last one
191 * so as not to have to set it up again the
192 * next time somebody inserts a probe.
193 */
194 hlist_del(&kip->hlist);
195 if (hlist_empty(&kprobe_insn_pages)) {
196 INIT_HLIST_NODE(&kip->hlist);
197 hlist_add_head(&kip->hlist,
198 &kprobe_insn_pages);
199 } else {
200 module_free(NULL, kip->insns);
201 kfree(kip);
202 }
203 return 1;
204 }
205 return 0;
206 }
207
208 static int __kprobes collect_garbage_slots(void)
209 {
210 struct kprobe_insn_page *kip;
211 struct hlist_node *pos, *next;
212
213 /* Ensure no-one is preepmted on the garbages */
214 if (check_safety() != 0)
215 return -EAGAIN;
216
217 hlist_for_each_safe(pos, next, &kprobe_insn_pages) {
218 int i;
219 kip = hlist_entry(pos, struct kprobe_insn_page, hlist);
220 if (kip->ngarbage == 0)
221 continue;
222 kip->ngarbage = 0; /* we will collect all garbages */
223 for (i = 0; i < INSNS_PER_PAGE; i++) {
224 if (kip->slot_used[i] == SLOT_DIRTY &&
225 collect_one_slot(kip, i))
226 break;
227 }
228 }
229 kprobe_garbage_slots = 0;
230 return 0;
231 }
232
233 void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty)
234 {
235 struct kprobe_insn_page *kip;
236 struct hlist_node *pos;
237
238 hlist_for_each(pos, &kprobe_insn_pages) {
239 kip = hlist_entry(pos, struct kprobe_insn_page, hlist);
240 if (kip->insns <= slot &&
241 slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) {
242 int i = (slot - kip->insns) / MAX_INSN_SIZE;
243 if (dirty) {
244 kip->slot_used[i] = SLOT_DIRTY;
245 kip->ngarbage++;
246 } else {
247 collect_one_slot(kip, i);
248 }
249 break;
250 }
251 }
252 if (dirty && (++kprobe_garbage_slots > INSNS_PER_PAGE)) {
253 collect_garbage_slots();
254 }
255 }
256 #endif
257
258 /* We have preemption disabled.. so it is safe to use __ versions */
259 static inline void set_kprobe_instance(struct kprobe *kp)
260 {
261 __get_cpu_var(kprobe_instance) = kp;
262 }
263
264 static inline void reset_kprobe_instance(void)
265 {
266 __get_cpu_var(kprobe_instance) = NULL;
267 }
268
269 /*
270 * This routine is called either:
271 * - under the kprobe_mutex - during kprobe_[un]register()
272 * OR
273 * - with preemption disabled - from arch/xxx/kernel/kprobes.c
274 */
275 struct kprobe __kprobes *get_kprobe(void *addr)
276 {
277 struct hlist_head *head;
278 struct hlist_node *node;
279 struct kprobe *p;
280
281 head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
282 hlist_for_each_entry_rcu(p, node, head, hlist) {
283 if (p->addr == addr)
284 return p;
285 }
286 return NULL;
287 }
288
289 /*
290 * Aggregate handlers for multiple kprobes support - these handlers
291 * take care of invoking the individual kprobe handlers on p->list
292 */
293 static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
294 {
295 struct kprobe *kp;
296
297 list_for_each_entry_rcu(kp, &p->list, list) {
298 if (kp->pre_handler) {
299 set_kprobe_instance(kp);
300 if (kp->pre_handler(kp, regs))
301 return 1;
302 }
303 reset_kprobe_instance();
304 }
305 return 0;
306 }
307
308 static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
309 unsigned long flags)
310 {
311 struct kprobe *kp;
312
313 list_for_each_entry_rcu(kp, &p->list, list) {
314 if (kp->post_handler) {
315 set_kprobe_instance(kp);
316 kp->post_handler(kp, regs, flags);
317 reset_kprobe_instance();
318 }
319 }
320 return;
321 }
322
323 static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
324 int trapnr)
325 {
326 struct kprobe *cur = __get_cpu_var(kprobe_instance);
327
328 /*
329 * if we faulted "during" the execution of a user specified
330 * probe handler, invoke just that probe's fault handler
331 */
332 if (cur && cur->fault_handler) {
333 if (cur->fault_handler(cur, regs, trapnr))
334 return 1;
335 }
336 return 0;
337 }
338
339 static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
340 {
341 struct kprobe *cur = __get_cpu_var(kprobe_instance);
342 int ret = 0;
343
344 if (cur && cur->break_handler) {
345 if (cur->break_handler(cur, regs))
346 ret = 1;
347 }
348 reset_kprobe_instance();
349 return ret;
350 }
351
352 /* Walks the list and increments nmissed count for multiprobe case */
353 void __kprobes kprobes_inc_nmissed_count(struct kprobe *p)
354 {
355 struct kprobe *kp;
356 if (p->pre_handler != aggr_pre_handler) {
357 p->nmissed++;
358 } else {
359 list_for_each_entry_rcu(kp, &p->list, list)
360 kp->nmissed++;
361 }
362 return;
363 }
364
365 /* Called with kretprobe_lock held */
366 struct kretprobe_instance __kprobes *get_free_rp_inst(struct kretprobe *rp)
367 {
368 struct hlist_node *node;
369 struct kretprobe_instance *ri;
370 hlist_for_each_entry(ri, node, &rp->free_instances, uflist)
371 return ri;
372 return NULL;
373 }
374
375 /* Called with kretprobe_lock held */
376 static struct kretprobe_instance __kprobes *get_used_rp_inst(struct kretprobe
377 *rp)
378 {
379 struct hlist_node *node;
380 struct kretprobe_instance *ri;
381 hlist_for_each_entry(ri, node, &rp->used_instances, uflist)
382 return ri;
383 return NULL;
384 }
385
386 /* Called with kretprobe_lock held */
387 void __kprobes add_rp_inst(struct kretprobe_instance *ri)
388 {
389 /*
390 * Remove rp inst off the free list -
391 * Add it back when probed function returns
392 */
393 hlist_del(&ri->uflist);
394
395 /* Add rp inst onto table */
396 INIT_HLIST_NODE(&ri->hlist);
397 hlist_add_head(&ri->hlist,
398 &kretprobe_inst_table[hash_ptr(ri->task, KPROBE_HASH_BITS)]);
399
400 /* Also add this rp inst to the used list. */
401 INIT_HLIST_NODE(&ri->uflist);
402 hlist_add_head(&ri->uflist, &ri->rp->used_instances);
403 }
404
405 /* Called with kretprobe_lock held */
406 void __kprobes recycle_rp_inst(struct kretprobe_instance *ri,
407 struct hlist_head *head)
408 {
409 /* remove rp inst off the rprobe_inst_table */
410 hlist_del(&ri->hlist);
411 if (ri->rp) {
412 /* remove rp inst off the used list */
413 hlist_del(&ri->uflist);
414 /* put rp inst back onto the free list */
415 INIT_HLIST_NODE(&ri->uflist);
416 hlist_add_head(&ri->uflist, &ri->rp->free_instances);
417 } else
418 /* Unregistering */
419 hlist_add_head(&ri->hlist, head);
420 }
421
422 struct hlist_head __kprobes *kretprobe_inst_table_head(struct task_struct *tsk)
423 {
424 return &kretprobe_inst_table[hash_ptr(tsk, KPROBE_HASH_BITS)];
425 }
426
427 /*
428 * This function is called from finish_task_switch when task tk becomes dead,
429 * so that we can recycle any function-return probe instances associated
430 * with this task. These left over instances represent probed functions
431 * that have been called but will never return.
432 */
433 void __kprobes kprobe_flush_task(struct task_struct *tk)
434 {
435 struct kretprobe_instance *ri;
436 struct hlist_head *head, empty_rp;
437 struct hlist_node *node, *tmp;
438 unsigned long flags = 0;
439
440 INIT_HLIST_HEAD(&empty_rp);
441 spin_lock_irqsave(&kretprobe_lock, flags);
442 head = kretprobe_inst_table_head(tk);
443 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
444 if (ri->task == tk)
445 recycle_rp_inst(ri, &empty_rp);
446 }
447 spin_unlock_irqrestore(&kretprobe_lock, flags);
448
449 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
450 hlist_del(&ri->hlist);
451 kfree(ri);
452 }
453 }
454
455 static inline void free_rp_inst(struct kretprobe *rp)
456 {
457 struct kretprobe_instance *ri;
458 while ((ri = get_free_rp_inst(rp)) != NULL) {
459 hlist_del(&ri->uflist);
460 kfree(ri);
461 }
462 }
463
464 /*
465 * Keep all fields in the kprobe consistent
466 */
467 static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p)
468 {
469 memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t));
470 memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn));
471 }
472
473 /*
474 * Add the new probe to old_p->list. Fail if this is the
475 * second jprobe at the address - two jprobes can't coexist
476 */
477 static int __kprobes add_new_kprobe(struct kprobe *old_p, struct kprobe *p)
478 {
479 if (p->break_handler) {
480 if (old_p->break_handler)
481 return -EEXIST;
482 list_add_tail_rcu(&p->list, &old_p->list);
483 old_p->break_handler = aggr_break_handler;
484 } else
485 list_add_rcu(&p->list, &old_p->list);
486 if (p->post_handler && !old_p->post_handler)
487 old_p->post_handler = aggr_post_handler;
488 return 0;
489 }
490
491 /*
492 * Fill in the required fields of the "manager kprobe". Replace the
493 * earlier kprobe in the hlist with the manager kprobe
494 */
495 static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
496 {
497 copy_kprobe(p, ap);
498 flush_insn_slot(ap);
499 ap->addr = p->addr;
500 ap->pre_handler = aggr_pre_handler;
501 ap->fault_handler = aggr_fault_handler;
502 if (p->post_handler)
503 ap->post_handler = aggr_post_handler;
504 if (p->break_handler)
505 ap->break_handler = aggr_break_handler;
506
507 INIT_LIST_HEAD(&ap->list);
508 list_add_rcu(&p->list, &ap->list);
509
510 hlist_replace_rcu(&p->hlist, &ap->hlist);
511 }
512
513 /*
514 * This is the second or subsequent kprobe at the address - handle
515 * the intricacies
516 */
517 static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
518 struct kprobe *p)
519 {
520 int ret = 0;
521 struct kprobe *ap;
522
523 if (old_p->pre_handler == aggr_pre_handler) {
524 copy_kprobe(old_p, p);
525 ret = add_new_kprobe(old_p, p);
526 } else {
527 ap = kzalloc(sizeof(struct kprobe), GFP_KERNEL);
528 if (!ap)
529 return -ENOMEM;
530 add_aggr_kprobe(ap, old_p);
531 copy_kprobe(ap, p);
532 ret = add_new_kprobe(ap, p);
533 }
534 return ret;
535 }
536
537 static int __kprobes in_kprobes_functions(unsigned long addr)
538 {
539 if (addr >= (unsigned long)__kprobes_text_start
540 && addr < (unsigned long)__kprobes_text_end)
541 return -EINVAL;
542 return 0;
543 }
544
545 static int __kprobes __register_kprobe(struct kprobe *p,
546 unsigned long called_from)
547 {
548 int ret = 0;
549 struct kprobe *old_p;
550 struct module *probed_mod;
551
552 /*
553 * If we have a symbol_name argument look it up,
554 * and add it to the address. That way the addr
555 * field can either be global or relative to a symbol.
556 */
557 if (p->symbol_name) {
558 if (p->addr)
559 return -EINVAL;
560 kprobe_lookup_name(p->symbol_name, p->addr);
561 }
562
563 if (!p->addr)
564 return -EINVAL;
565 p->addr = (kprobe_opcode_t *)(((char *)p->addr)+ p->offset);
566
567 if ((!kernel_text_address((unsigned long) p->addr)) ||
568 in_kprobes_functions((unsigned long) p->addr))
569 return -EINVAL;
570
571 p->mod_refcounted = 0;
572 /* Check are we probing a module */
573 if ((probed_mod = module_text_address((unsigned long) p->addr))) {
574 struct module *calling_mod = module_text_address(called_from);
575 /* We must allow modules to probe themself and
576 * in this case avoid incrementing the module refcount,
577 * so as to allow unloading of self probing modules.
578 */
579 if (calling_mod && (calling_mod != probed_mod)) {
580 if (unlikely(!try_module_get(probed_mod)))
581 return -EINVAL;
582 p->mod_refcounted = 1;
583 } else
584 probed_mod = NULL;
585 }
586
587 p->nmissed = 0;
588 mutex_lock(&kprobe_mutex);
589 old_p = get_kprobe(p->addr);
590 if (old_p) {
591 ret = register_aggr_kprobe(old_p, p);
592 if (!ret)
593 atomic_inc(&kprobe_count);
594 goto out;
595 }
596
597 if ((ret = arch_prepare_kprobe(p)) != 0)
598 goto out;
599
600 INIT_HLIST_NODE(&p->hlist);
601 hlist_add_head_rcu(&p->hlist,
602 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
603
604 if (atomic_add_return(1, &kprobe_count) == \
605 (ARCH_INACTIVE_KPROBE_COUNT + 1))
606 register_page_fault_notifier(&kprobe_page_fault_nb);
607
608 arch_arm_kprobe(p);
609
610 out:
611 mutex_unlock(&kprobe_mutex);
612
613 if (ret && probed_mod)
614 module_put(probed_mod);
615 return ret;
616 }
617
618 int __kprobes register_kprobe(struct kprobe *p)
619 {
620 return __register_kprobe(p,
621 (unsigned long)__builtin_return_address(0));
622 }
623
624 void __kprobes unregister_kprobe(struct kprobe *p)
625 {
626 struct module *mod;
627 struct kprobe *old_p, *list_p;
628 int cleanup_p;
629
630 mutex_lock(&kprobe_mutex);
631 old_p = get_kprobe(p->addr);
632 if (unlikely(!old_p)) {
633 mutex_unlock(&kprobe_mutex);
634 return;
635 }
636 if (p != old_p) {
637 list_for_each_entry_rcu(list_p, &old_p->list, list)
638 if (list_p == p)
639 /* kprobe p is a valid probe */
640 goto valid_p;
641 mutex_unlock(&kprobe_mutex);
642 return;
643 }
644 valid_p:
645 if ((old_p == p) || ((old_p->pre_handler == aggr_pre_handler) &&
646 (p->list.next == &old_p->list) &&
647 (p->list.prev == &old_p->list))) {
648 /* Only probe on the hash list */
649 arch_disarm_kprobe(p);
650 hlist_del_rcu(&old_p->hlist);
651 cleanup_p = 1;
652 } else {
653 list_del_rcu(&p->list);
654 cleanup_p = 0;
655 }
656
657 mutex_unlock(&kprobe_mutex);
658
659 synchronize_sched();
660 if (p->mod_refcounted &&
661 (mod = module_text_address((unsigned long)p->addr)))
662 module_put(mod);
663
664 if (cleanup_p) {
665 if (p != old_p) {
666 list_del_rcu(&p->list);
667 kfree(old_p);
668 }
669 arch_remove_kprobe(p);
670 } else {
671 mutex_lock(&kprobe_mutex);
672 if (p->break_handler)
673 old_p->break_handler = NULL;
674 if (p->post_handler){
675 list_for_each_entry_rcu(list_p, &old_p->list, list){
676 if (list_p->post_handler){
677 cleanup_p = 2;
678 break;
679 }
680 }
681 if (cleanup_p == 0)
682 old_p->post_handler = NULL;
683 }
684 mutex_unlock(&kprobe_mutex);
685 }
686
687 /* Call unregister_page_fault_notifier()
688 * if no probes are active
689 */
690 mutex_lock(&kprobe_mutex);
691 if (atomic_add_return(-1, &kprobe_count) == \
692 ARCH_INACTIVE_KPROBE_COUNT)
693 unregister_page_fault_notifier(&kprobe_page_fault_nb);
694 mutex_unlock(&kprobe_mutex);
695 return;
696 }
697
698 static struct notifier_block kprobe_exceptions_nb = {
699 .notifier_call = kprobe_exceptions_notify,
700 .priority = 0x7fffffff /* we need to be notified first */
701 };
702
703
704 int __kprobes register_jprobe(struct jprobe *jp)
705 {
706 /* Todo: Verify probepoint is a function entry point */
707 jp->kp.pre_handler = setjmp_pre_handler;
708 jp->kp.break_handler = longjmp_break_handler;
709
710 return __register_kprobe(&jp->kp,
711 (unsigned long)__builtin_return_address(0));
712 }
713
714 void __kprobes unregister_jprobe(struct jprobe *jp)
715 {
716 unregister_kprobe(&jp->kp);
717 }
718
719 #ifdef ARCH_SUPPORTS_KRETPROBES
720
721 /*
722 * This kprobe pre_handler is registered with every kretprobe. When probe
723 * hits it will set up the return probe.
724 */
725 static int __kprobes pre_handler_kretprobe(struct kprobe *p,
726 struct pt_regs *regs)
727 {
728 struct kretprobe *rp = container_of(p, struct kretprobe, kp);
729 unsigned long flags = 0;
730
731 /*TODO: consider to only swap the RA after the last pre_handler fired */
732 spin_lock_irqsave(&kretprobe_lock, flags);
733 arch_prepare_kretprobe(rp, regs);
734 spin_unlock_irqrestore(&kretprobe_lock, flags);
735 return 0;
736 }
737
738 int __kprobes register_kretprobe(struct kretprobe *rp)
739 {
740 int ret = 0;
741 struct kretprobe_instance *inst;
742 int i;
743
744 rp->kp.pre_handler = pre_handler_kretprobe;
745 rp->kp.post_handler = NULL;
746 rp->kp.fault_handler = NULL;
747 rp->kp.break_handler = NULL;
748
749 /* Pre-allocate memory for max kretprobe instances */
750 if (rp->maxactive <= 0) {
751 #ifdef CONFIG_PREEMPT
752 rp->maxactive = max(10, 2 * NR_CPUS);
753 #else
754 rp->maxactive = NR_CPUS;
755 #endif
756 }
757 INIT_HLIST_HEAD(&rp->used_instances);
758 INIT_HLIST_HEAD(&rp->free_instances);
759 for (i = 0; i < rp->maxactive; i++) {
760 inst = kmalloc(sizeof(struct kretprobe_instance), GFP_KERNEL);
761 if (inst == NULL) {
762 free_rp_inst(rp);
763 return -ENOMEM;
764 }
765 INIT_HLIST_NODE(&inst->uflist);
766 hlist_add_head(&inst->uflist, &rp->free_instances);
767 }
768
769 rp->nmissed = 0;
770 /* Establish function entry probe point */
771 if ((ret = __register_kprobe(&rp->kp,
772 (unsigned long)__builtin_return_address(0))) != 0)
773 free_rp_inst(rp);
774 return ret;
775 }
776
777 #else /* ARCH_SUPPORTS_KRETPROBES */
778
779 int __kprobes register_kretprobe(struct kretprobe *rp)
780 {
781 return -ENOSYS;
782 }
783
784 static int __kprobes pre_handler_kretprobe(struct kprobe *p,
785 struct pt_regs *regs)
786 {
787 return 0;
788 }
789
790 #endif /* ARCH_SUPPORTS_KRETPROBES */
791
792 void __kprobes unregister_kretprobe(struct kretprobe *rp)
793 {
794 unsigned long flags;
795 struct kretprobe_instance *ri;
796
797 unregister_kprobe(&rp->kp);
798 /* No race here */
799 spin_lock_irqsave(&kretprobe_lock, flags);
800 while ((ri = get_used_rp_inst(rp)) != NULL) {
801 ri->rp = NULL;
802 hlist_del(&ri->uflist);
803 }
804 spin_unlock_irqrestore(&kretprobe_lock, flags);
805 free_rp_inst(rp);
806 }
807
808 static int __init init_kprobes(void)
809 {
810 int i, err = 0;
811
812 /* FIXME allocate the probe table, currently defined statically */
813 /* initialize all list heads */
814 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
815 INIT_HLIST_HEAD(&kprobe_table[i]);
816 INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
817 }
818 atomic_set(&kprobe_count, 0);
819
820 err = arch_init_kprobes();
821 if (!err)
822 err = register_die_notifier(&kprobe_exceptions_nb);
823
824 return err;
825 }
826
827 #ifdef CONFIG_DEBUG_FS
828 static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
829 const char *sym, int offset,char *modname)
830 {
831 char *kprobe_type;
832
833 if (p->pre_handler == pre_handler_kretprobe)
834 kprobe_type = "r";
835 else if (p->pre_handler == setjmp_pre_handler)
836 kprobe_type = "j";
837 else
838 kprobe_type = "k";
839 if (sym)
840 seq_printf(pi, "%p %s %s+0x%x %s\n", p->addr, kprobe_type,
841 sym, offset, (modname ? modname : " "));
842 else
843 seq_printf(pi, "%p %s %p\n", p->addr, kprobe_type, p->addr);
844 }
845
846 static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos)
847 {
848 return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
849 }
850
851 static void __kprobes *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos)
852 {
853 (*pos)++;
854 if (*pos >= KPROBE_TABLE_SIZE)
855 return NULL;
856 return pos;
857 }
858
859 static void __kprobes kprobe_seq_stop(struct seq_file *f, void *v)
860 {
861 /* Nothing to do */
862 }
863
864 static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
865 {
866 struct hlist_head *head;
867 struct hlist_node *node;
868 struct kprobe *p, *kp;
869 const char *sym = NULL;
870 unsigned int i = *(loff_t *) v;
871 unsigned long size, offset = 0;
872 char *modname, namebuf[128];
873
874 head = &kprobe_table[i];
875 preempt_disable();
876 hlist_for_each_entry_rcu(p, node, head, hlist) {
877 sym = kallsyms_lookup((unsigned long)p->addr, &size,
878 &offset, &modname, namebuf);
879 if (p->pre_handler == aggr_pre_handler) {
880 list_for_each_entry_rcu(kp, &p->list, list)
881 report_probe(pi, kp, sym, offset, modname);
882 } else
883 report_probe(pi, p, sym, offset, modname);
884 }
885 preempt_enable();
886 return 0;
887 }
888
889 static struct seq_operations kprobes_seq_ops = {
890 .start = kprobe_seq_start,
891 .next = kprobe_seq_next,
892 .stop = kprobe_seq_stop,
893 .show = show_kprobe_addr
894 };
895
896 static int __kprobes kprobes_open(struct inode *inode, struct file *filp)
897 {
898 return seq_open(filp, &kprobes_seq_ops);
899 }
900
901 static struct file_operations debugfs_kprobes_operations = {
902 .open = kprobes_open,
903 .read = seq_read,
904 .llseek = seq_lseek,
905 .release = seq_release,
906 };
907
908 static int __kprobes debugfs_kprobe_init(void)
909 {
910 struct dentry *dir, *file;
911
912 dir = debugfs_create_dir("kprobes", NULL);
913 if (!dir)
914 return -ENOMEM;
915
916 file = debugfs_create_file("list", 0444, dir, NULL,
917 &debugfs_kprobes_operations);
918 if (!file) {
919 debugfs_remove(dir);
920 return -ENOMEM;
921 }
922
923 return 0;
924 }
925
926 late_initcall(debugfs_kprobe_init);
927 #endif /* CONFIG_DEBUG_FS */
928
929 module_init(init_kprobes);
930
931 EXPORT_SYMBOL_GPL(register_kprobe);
932 EXPORT_SYMBOL_GPL(unregister_kprobe);
933 EXPORT_SYMBOL_GPL(register_jprobe);
934 EXPORT_SYMBOL_GPL(unregister_jprobe);
935 EXPORT_SYMBOL_GPL(jprobe_return);
936 EXPORT_SYMBOL_GPL(register_kretprobe);
937 EXPORT_SYMBOL_GPL(unregister_kretprobe);
This page took 0.04988 seconds and 6 git commands to generate.