uprobes/core: Move insn to arch specific structure
[deliverable/linux.git] / kernel / events / uprobes.c
1 /*
2 * User-space Probes (UProbes)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright (C) IBM Corporation, 2008-2011
19 * Authors:
20 * Srikar Dronamraju
21 * Jim Keniston
22 */
23
24 #include <linux/kernel.h>
25 #include <linux/highmem.h>
26 #include <linux/pagemap.h> /* read_mapping_page */
27 #include <linux/slab.h>
28 #include <linux/sched.h>
29 #include <linux/rmap.h> /* anon_vma_prepare */
30 #include <linux/mmu_notifier.h> /* set_pte_at_notify */
31 #include <linux/swap.h> /* try_to_free_swap */
32
33 #include <linux/uprobes.h>
34
35 static struct rb_root uprobes_tree = RB_ROOT;
36
37 static DEFINE_SPINLOCK(uprobes_treelock); /* serialize rbtree access */
38
39 #define UPROBES_HASH_SZ 13
40
41 /* serialize (un)register */
42 static struct mutex uprobes_mutex[UPROBES_HASH_SZ];
43
44 #define uprobes_hash(v) (&uprobes_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ])
45
46 /* serialize uprobe->pending_list */
47 static struct mutex uprobes_mmap_mutex[UPROBES_HASH_SZ];
48 #define uprobes_mmap_hash(v) (&uprobes_mmap_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ])
49
50 /*
51 * uprobe_events allows us to skip the uprobe_mmap if there are no uprobe
52 * events active at this time. Probably a fine grained per inode count is
53 * better?
54 */
55 static atomic_t uprobe_events = ATOMIC_INIT(0);
56
57 /*
58 * Maintain a temporary per vma info that can be used to search if a vma
59 * has already been handled. This structure is introduced since extending
60 * vm_area_struct wasnt recommended.
61 */
62 struct vma_info {
63 struct list_head probe_list;
64 struct mm_struct *mm;
65 loff_t vaddr;
66 };
67
68 struct uprobe {
69 struct rb_node rb_node; /* node in the rb tree */
70 atomic_t ref;
71 struct rw_semaphore consumer_rwsem;
72 struct list_head pending_list;
73 struct uprobe_consumer *consumers;
74 struct inode *inode; /* Also hold a ref to inode */
75 loff_t offset;
76 int flags;
77 struct arch_uprobe arch;
78 };
79
80 /*
81 * valid_vma: Verify if the specified vma is an executable vma
82 * Relax restrictions while unregistering: vm_flags might have
83 * changed after breakpoint was inserted.
84 * - is_register: indicates if we are in register context.
85 * - Return 1 if the specified virtual address is in an
86 * executable vma.
87 */
88 static bool valid_vma(struct vm_area_struct *vma, bool is_register)
89 {
90 if (!vma->vm_file)
91 return false;
92
93 if (!is_register)
94 return true;
95
96 if ((vma->vm_flags & (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)) == (VM_READ|VM_EXEC))
97 return true;
98
99 return false;
100 }
101
102 static loff_t vma_address(struct vm_area_struct *vma, loff_t offset)
103 {
104 loff_t vaddr;
105
106 vaddr = vma->vm_start + offset;
107 vaddr -= vma->vm_pgoff << PAGE_SHIFT;
108
109 return vaddr;
110 }
111
112 /**
113 * __replace_page - replace page in vma by new page.
114 * based on replace_page in mm/ksm.c
115 *
116 * @vma: vma that holds the pte pointing to page
117 * @page: the cowed page we are replacing by kpage
118 * @kpage: the modified page we replace page by
119 *
120 * Returns 0 on success, -EFAULT on failure.
121 */
122 static int __replace_page(struct vm_area_struct *vma, struct page *page, struct page *kpage)
123 {
124 struct mm_struct *mm = vma->vm_mm;
125 pgd_t *pgd;
126 pud_t *pud;
127 pmd_t *pmd;
128 pte_t *ptep;
129 spinlock_t *ptl;
130 unsigned long addr;
131 int err = -EFAULT;
132
133 addr = page_address_in_vma(page, vma);
134 if (addr == -EFAULT)
135 goto out;
136
137 pgd = pgd_offset(mm, addr);
138 if (!pgd_present(*pgd))
139 goto out;
140
141 pud = pud_offset(pgd, addr);
142 if (!pud_present(*pud))
143 goto out;
144
145 pmd = pmd_offset(pud, addr);
146 if (!pmd_present(*pmd))
147 goto out;
148
149 ptep = pte_offset_map_lock(mm, pmd, addr, &ptl);
150 if (!ptep)
151 goto out;
152
153 get_page(kpage);
154 page_add_new_anon_rmap(kpage, vma, addr);
155
156 flush_cache_page(vma, addr, pte_pfn(*ptep));
157 ptep_clear_flush(vma, addr, ptep);
158 set_pte_at_notify(mm, addr, ptep, mk_pte(kpage, vma->vm_page_prot));
159
160 page_remove_rmap(page);
161 if (!page_mapped(page))
162 try_to_free_swap(page);
163 put_page(page);
164 pte_unmap_unlock(ptep, ptl);
165 err = 0;
166
167 out:
168 return err;
169 }
170
171 /**
172 * is_bkpt_insn - check if instruction is breakpoint instruction.
173 * @insn: instruction to be checked.
174 * Default implementation of is_bkpt_insn
175 * Returns true if @insn is a breakpoint instruction.
176 */
177 bool __weak is_bkpt_insn(uprobe_opcode_t *insn)
178 {
179 return *insn == UPROBES_BKPT_INSN;
180 }
181
182 /*
183 * NOTE:
184 * Expect the breakpoint instruction to be the smallest size instruction for
185 * the architecture. If an arch has variable length instruction and the
186 * breakpoint instruction is not of the smallest length instruction
187 * supported by that architecture then we need to modify read_opcode /
188 * write_opcode accordingly. This would never be a problem for archs that
189 * have fixed length instructions.
190 */
191
192 /*
193 * write_opcode - write the opcode at a given virtual address.
194 * @mm: the probed process address space.
195 * @arch_uprobe: the breakpointing information.
196 * @vaddr: the virtual address to store the opcode.
197 * @opcode: opcode to be written at @vaddr.
198 *
199 * Called with mm->mmap_sem held (for read and with a reference to
200 * mm).
201 *
202 * For mm @mm, write the opcode at @vaddr.
203 * Return 0 (success) or a negative errno.
204 */
205 static int write_opcode(struct mm_struct *mm, struct arch_uprobe *auprobe,
206 unsigned long vaddr, uprobe_opcode_t opcode)
207 {
208 struct page *old_page, *new_page;
209 struct address_space *mapping;
210 void *vaddr_old, *vaddr_new;
211 struct vm_area_struct *vma;
212 struct uprobe *uprobe;
213 loff_t addr;
214 int ret;
215
216 /* Read the page with vaddr into memory */
217 ret = get_user_pages(NULL, mm, vaddr, 1, 0, 0, &old_page, &vma);
218 if (ret <= 0)
219 return ret;
220
221 ret = -EINVAL;
222
223 /*
224 * We are interested in text pages only. Our pages of interest
225 * should be mapped for read and execute only. We desist from
226 * adding probes in write mapped pages since the breakpoints
227 * might end up in the file copy.
228 */
229 if (!valid_vma(vma, is_bkpt_insn(&opcode)))
230 goto put_out;
231
232 uprobe = container_of(auprobe, struct uprobe, arch);
233 mapping = uprobe->inode->i_mapping;
234 if (mapping != vma->vm_file->f_mapping)
235 goto put_out;
236
237 addr = vma_address(vma, uprobe->offset);
238 if (vaddr != (unsigned long)addr)
239 goto put_out;
240
241 ret = -ENOMEM;
242 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr);
243 if (!new_page)
244 goto put_out;
245
246 __SetPageUptodate(new_page);
247
248 /*
249 * lock page will serialize against do_wp_page()'s
250 * PageAnon() handling
251 */
252 lock_page(old_page);
253 /* copy the page now that we've got it stable */
254 vaddr_old = kmap_atomic(old_page);
255 vaddr_new = kmap_atomic(new_page);
256
257 memcpy(vaddr_new, vaddr_old, PAGE_SIZE);
258
259 /* poke the new insn in, ASSUMES we don't cross page boundary */
260 vaddr &= ~PAGE_MASK;
261 BUG_ON(vaddr + UPROBES_BKPT_INSN_SIZE > PAGE_SIZE);
262 memcpy(vaddr_new + vaddr, &opcode, UPROBES_BKPT_INSN_SIZE);
263
264 kunmap_atomic(vaddr_new);
265 kunmap_atomic(vaddr_old);
266
267 ret = anon_vma_prepare(vma);
268 if (ret)
269 goto unlock_out;
270
271 lock_page(new_page);
272 ret = __replace_page(vma, old_page, new_page);
273 unlock_page(new_page);
274
275 unlock_out:
276 unlock_page(old_page);
277 page_cache_release(new_page);
278
279 put_out:
280 put_page(old_page);
281
282 return ret;
283 }
284
285 /**
286 * read_opcode - read the opcode at a given virtual address.
287 * @mm: the probed process address space.
288 * @vaddr: the virtual address to read the opcode.
289 * @opcode: location to store the read opcode.
290 *
291 * Called with mm->mmap_sem held (for read and with a reference to
292 * mm.
293 *
294 * For mm @mm, read the opcode at @vaddr and store it in @opcode.
295 * Return 0 (success) or a negative errno.
296 */
297 static int read_opcode(struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_t *opcode)
298 {
299 struct page *page;
300 void *vaddr_new;
301 int ret;
302
303 ret = get_user_pages(NULL, mm, vaddr, 1, 0, 0, &page, NULL);
304 if (ret <= 0)
305 return ret;
306
307 lock_page(page);
308 vaddr_new = kmap_atomic(page);
309 vaddr &= ~PAGE_MASK;
310 memcpy(opcode, vaddr_new + vaddr, UPROBES_BKPT_INSN_SIZE);
311 kunmap_atomic(vaddr_new);
312 unlock_page(page);
313
314 put_page(page);
315
316 return 0;
317 }
318
319 static int is_bkpt_at_addr(struct mm_struct *mm, unsigned long vaddr)
320 {
321 uprobe_opcode_t opcode;
322 int result;
323
324 result = read_opcode(mm, vaddr, &opcode);
325 if (result)
326 return result;
327
328 if (is_bkpt_insn(&opcode))
329 return 1;
330
331 return 0;
332 }
333
334 /**
335 * set_bkpt - store breakpoint at a given address.
336 * @mm: the probed process address space.
337 * @uprobe: the probepoint information.
338 * @vaddr: the virtual address to insert the opcode.
339 *
340 * For mm @mm, store the breakpoint instruction at @vaddr.
341 * Return 0 (success) or a negative errno.
342 */
343 int __weak set_bkpt(struct mm_struct *mm, struct arch_uprobe *auprobe, unsigned long vaddr)
344 {
345 int result;
346
347 result = is_bkpt_at_addr(mm, vaddr);
348 if (result == 1)
349 return -EEXIST;
350
351 if (result)
352 return result;
353
354 return write_opcode(mm, auprobe, vaddr, UPROBES_BKPT_INSN);
355 }
356
357 /**
358 * set_orig_insn - Restore the original instruction.
359 * @mm: the probed process address space.
360 * @uprobe: the probepoint information.
361 * @vaddr: the virtual address to insert the opcode.
362 * @verify: if true, verify existance of breakpoint instruction.
363 *
364 * For mm @mm, restore the original opcode (opcode) at @vaddr.
365 * Return 0 (success) or a negative errno.
366 */
367 int __weak
368 set_orig_insn(struct mm_struct *mm, struct arch_uprobe *auprobe, unsigned long vaddr, bool verify)
369 {
370 if (verify) {
371 int result;
372
373 result = is_bkpt_at_addr(mm, vaddr);
374 if (!result)
375 return -EINVAL;
376
377 if (result != 1)
378 return result;
379 }
380 return write_opcode(mm, auprobe, vaddr, *(uprobe_opcode_t *)auprobe->insn);
381 }
382
383 static int match_uprobe(struct uprobe *l, struct uprobe *r)
384 {
385 if (l->inode < r->inode)
386 return -1;
387
388 if (l->inode > r->inode)
389 return 1;
390
391 if (l->offset < r->offset)
392 return -1;
393
394 if (l->offset > r->offset)
395 return 1;
396
397 return 0;
398 }
399
400 static struct uprobe *__find_uprobe(struct inode *inode, loff_t offset)
401 {
402 struct uprobe u = { .inode = inode, .offset = offset };
403 struct rb_node *n = uprobes_tree.rb_node;
404 struct uprobe *uprobe;
405 int match;
406
407 while (n) {
408 uprobe = rb_entry(n, struct uprobe, rb_node);
409 match = match_uprobe(&u, uprobe);
410 if (!match) {
411 atomic_inc(&uprobe->ref);
412 return uprobe;
413 }
414
415 if (match < 0)
416 n = n->rb_left;
417 else
418 n = n->rb_right;
419 }
420 return NULL;
421 }
422
423 /*
424 * Find a uprobe corresponding to a given inode:offset
425 * Acquires uprobes_treelock
426 */
427 static struct uprobe *find_uprobe(struct inode *inode, loff_t offset)
428 {
429 struct uprobe *uprobe;
430 unsigned long flags;
431
432 spin_lock_irqsave(&uprobes_treelock, flags);
433 uprobe = __find_uprobe(inode, offset);
434 spin_unlock_irqrestore(&uprobes_treelock, flags);
435
436 return uprobe;
437 }
438
439 static struct uprobe *__insert_uprobe(struct uprobe *uprobe)
440 {
441 struct rb_node **p = &uprobes_tree.rb_node;
442 struct rb_node *parent = NULL;
443 struct uprobe *u;
444 int match;
445
446 while (*p) {
447 parent = *p;
448 u = rb_entry(parent, struct uprobe, rb_node);
449 match = match_uprobe(uprobe, u);
450 if (!match) {
451 atomic_inc(&u->ref);
452 return u;
453 }
454
455 if (match < 0)
456 p = &parent->rb_left;
457 else
458 p = &parent->rb_right;
459
460 }
461
462 u = NULL;
463 rb_link_node(&uprobe->rb_node, parent, p);
464 rb_insert_color(&uprobe->rb_node, &uprobes_tree);
465 /* get access + creation ref */
466 atomic_set(&uprobe->ref, 2);
467
468 return u;
469 }
470
471 /*
472 * Acquire uprobes_treelock.
473 * Matching uprobe already exists in rbtree;
474 * increment (access refcount) and return the matching uprobe.
475 *
476 * No matching uprobe; insert the uprobe in rb_tree;
477 * get a double refcount (access + creation) and return NULL.
478 */
479 static struct uprobe *insert_uprobe(struct uprobe *uprobe)
480 {
481 unsigned long flags;
482 struct uprobe *u;
483
484 spin_lock_irqsave(&uprobes_treelock, flags);
485 u = __insert_uprobe(uprobe);
486 spin_unlock_irqrestore(&uprobes_treelock, flags);
487
488 return u;
489 }
490
491 static void put_uprobe(struct uprobe *uprobe)
492 {
493 if (atomic_dec_and_test(&uprobe->ref))
494 kfree(uprobe);
495 }
496
497 static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset)
498 {
499 struct uprobe *uprobe, *cur_uprobe;
500
501 uprobe = kzalloc(sizeof(struct uprobe), GFP_KERNEL);
502 if (!uprobe)
503 return NULL;
504
505 uprobe->inode = igrab(inode);
506 uprobe->offset = offset;
507 init_rwsem(&uprobe->consumer_rwsem);
508 INIT_LIST_HEAD(&uprobe->pending_list);
509
510 /* add to uprobes_tree, sorted on inode:offset */
511 cur_uprobe = insert_uprobe(uprobe);
512
513 /* a uprobe exists for this inode:offset combination */
514 if (cur_uprobe) {
515 kfree(uprobe);
516 uprobe = cur_uprobe;
517 iput(inode);
518 } else {
519 atomic_inc(&uprobe_events);
520 }
521
522 return uprobe;
523 }
524
525 /* Returns the previous consumer */
526 static struct uprobe_consumer *
527 consumer_add(struct uprobe *uprobe, struct uprobe_consumer *consumer)
528 {
529 down_write(&uprobe->consumer_rwsem);
530 consumer->next = uprobe->consumers;
531 uprobe->consumers = consumer;
532 up_write(&uprobe->consumer_rwsem);
533
534 return consumer->next;
535 }
536
537 /*
538 * For uprobe @uprobe, delete the consumer @consumer.
539 * Return true if the @consumer is deleted successfully
540 * or return false.
541 */
542 static bool consumer_del(struct uprobe *uprobe, struct uprobe_consumer *consumer)
543 {
544 struct uprobe_consumer **con;
545 bool ret = false;
546
547 down_write(&uprobe->consumer_rwsem);
548 for (con = &uprobe->consumers; *con; con = &(*con)->next) {
549 if (*con == consumer) {
550 *con = consumer->next;
551 ret = true;
552 break;
553 }
554 }
555 up_write(&uprobe->consumer_rwsem);
556
557 return ret;
558 }
559
560 static int __copy_insn(struct address_space *mapping,
561 struct vm_area_struct *vma, char *insn,
562 unsigned long nbytes, unsigned long offset)
563 {
564 struct file *filp = vma->vm_file;
565 struct page *page;
566 void *vaddr;
567 unsigned long off1;
568 unsigned long idx;
569
570 if (!filp)
571 return -EINVAL;
572
573 idx = (unsigned long)(offset >> PAGE_CACHE_SHIFT);
574 off1 = offset &= ~PAGE_MASK;
575
576 /*
577 * Ensure that the page that has the original instruction is
578 * populated and in page-cache.
579 */
580 page = read_mapping_page(mapping, idx, filp);
581 if (IS_ERR(page))
582 return PTR_ERR(page);
583
584 vaddr = kmap_atomic(page);
585 memcpy(insn, vaddr + off1, nbytes);
586 kunmap_atomic(vaddr);
587 page_cache_release(page);
588
589 return 0;
590 }
591
592 static int copy_insn(struct uprobe *uprobe, struct vm_area_struct *vma, unsigned long addr)
593 {
594 struct address_space *mapping;
595 unsigned long nbytes;
596 int bytes;
597
598 addr &= ~PAGE_MASK;
599 nbytes = PAGE_SIZE - addr;
600 mapping = uprobe->inode->i_mapping;
601
602 /* Instruction at end of binary; copy only available bytes */
603 if (uprobe->offset + MAX_UINSN_BYTES > uprobe->inode->i_size)
604 bytes = uprobe->inode->i_size - uprobe->offset;
605 else
606 bytes = MAX_UINSN_BYTES;
607
608 /* Instruction at the page-boundary; copy bytes in second page */
609 if (nbytes < bytes) {
610 if (__copy_insn(mapping, vma, uprobe->arch.insn + nbytes,
611 bytes - nbytes, uprobe->offset + nbytes))
612 return -ENOMEM;
613
614 bytes = nbytes;
615 }
616 return __copy_insn(mapping, vma, uprobe->arch.insn, bytes, uprobe->offset);
617 }
618
619 static int install_breakpoint(struct mm_struct *mm, struct uprobe *uprobe,
620 struct vm_area_struct *vma, loff_t vaddr)
621 {
622 unsigned long addr;
623 int ret;
624
625 /*
626 * If probe is being deleted, unregister thread could be done with
627 * the vma-rmap-walk through. Adding a probe now can be fatal since
628 * nobody will be able to cleanup. Also we could be from fork or
629 * mremap path, where the probe might have already been inserted.
630 * Hence behave as if probe already existed.
631 */
632 if (!uprobe->consumers)
633 return -EEXIST;
634
635 addr = (unsigned long)vaddr;
636
637 if (!(uprobe->flags & UPROBES_COPY_INSN)) {
638 ret = copy_insn(uprobe, vma, addr);
639 if (ret)
640 return ret;
641
642 if (is_bkpt_insn((uprobe_opcode_t *)uprobe->arch.insn))
643 return -EEXIST;
644
645 ret = arch_uprobes_analyze_insn(mm, &uprobe->arch);
646 if (ret)
647 return ret;
648
649 uprobe->flags |= UPROBES_COPY_INSN;
650 }
651 ret = set_bkpt(mm, &uprobe->arch, addr);
652
653 return ret;
654 }
655
656 static void remove_breakpoint(struct mm_struct *mm, struct uprobe *uprobe, loff_t vaddr)
657 {
658 set_orig_insn(mm, &uprobe->arch, (unsigned long)vaddr, true);
659 }
660
661 static void delete_uprobe(struct uprobe *uprobe)
662 {
663 unsigned long flags;
664
665 spin_lock_irqsave(&uprobes_treelock, flags);
666 rb_erase(&uprobe->rb_node, &uprobes_tree);
667 spin_unlock_irqrestore(&uprobes_treelock, flags);
668 iput(uprobe->inode);
669 put_uprobe(uprobe);
670 atomic_dec(&uprobe_events);
671 }
672
673 static struct vma_info *__find_next_vma_info(struct list_head *head,
674 loff_t offset, struct address_space *mapping,
675 struct vma_info *vi, bool is_register)
676 {
677 struct prio_tree_iter iter;
678 struct vm_area_struct *vma;
679 struct vma_info *tmpvi;
680 unsigned long pgoff;
681 int existing_vma;
682 loff_t vaddr;
683
684 pgoff = offset >> PAGE_SHIFT;
685
686 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
687 if (!valid_vma(vma, is_register))
688 continue;
689
690 existing_vma = 0;
691 vaddr = vma_address(vma, offset);
692
693 list_for_each_entry(tmpvi, head, probe_list) {
694 if (tmpvi->mm == vma->vm_mm && tmpvi->vaddr == vaddr) {
695 existing_vma = 1;
696 break;
697 }
698 }
699
700 /*
701 * Another vma needs a probe to be installed. However skip
702 * installing the probe if the vma is about to be unlinked.
703 */
704 if (!existing_vma && atomic_inc_not_zero(&vma->vm_mm->mm_users)) {
705 vi->mm = vma->vm_mm;
706 vi->vaddr = vaddr;
707 list_add(&vi->probe_list, head);
708
709 return vi;
710 }
711 }
712
713 return NULL;
714 }
715
716 /*
717 * Iterate in the rmap prio tree and find a vma where a probe has not
718 * yet been inserted.
719 */
720 static struct vma_info *
721 find_next_vma_info(struct list_head *head, loff_t offset, struct address_space *mapping,
722 bool is_register)
723 {
724 struct vma_info *vi, *retvi;
725
726 vi = kzalloc(sizeof(struct vma_info), GFP_KERNEL);
727 if (!vi)
728 return ERR_PTR(-ENOMEM);
729
730 mutex_lock(&mapping->i_mmap_mutex);
731 retvi = __find_next_vma_info(head, offset, mapping, vi, is_register);
732 mutex_unlock(&mapping->i_mmap_mutex);
733
734 if (!retvi)
735 kfree(vi);
736
737 return retvi;
738 }
739
740 static int register_for_each_vma(struct uprobe *uprobe, bool is_register)
741 {
742 struct list_head try_list;
743 struct vm_area_struct *vma;
744 struct address_space *mapping;
745 struct vma_info *vi, *tmpvi;
746 struct mm_struct *mm;
747 loff_t vaddr;
748 int ret;
749
750 mapping = uprobe->inode->i_mapping;
751 INIT_LIST_HEAD(&try_list);
752
753 ret = 0;
754
755 for (;;) {
756 vi = find_next_vma_info(&try_list, uprobe->offset, mapping, is_register);
757 if (!vi)
758 break;
759
760 if (IS_ERR(vi)) {
761 ret = PTR_ERR(vi);
762 break;
763 }
764
765 mm = vi->mm;
766 down_read(&mm->mmap_sem);
767 vma = find_vma(mm, (unsigned long)vi->vaddr);
768 if (!vma || !valid_vma(vma, is_register)) {
769 list_del(&vi->probe_list);
770 kfree(vi);
771 up_read(&mm->mmap_sem);
772 mmput(mm);
773 continue;
774 }
775 vaddr = vma_address(vma, uprobe->offset);
776 if (vma->vm_file->f_mapping->host != uprobe->inode ||
777 vaddr != vi->vaddr) {
778 list_del(&vi->probe_list);
779 kfree(vi);
780 up_read(&mm->mmap_sem);
781 mmput(mm);
782 continue;
783 }
784
785 if (is_register)
786 ret = install_breakpoint(mm, uprobe, vma, vi->vaddr);
787 else
788 remove_breakpoint(mm, uprobe, vi->vaddr);
789
790 up_read(&mm->mmap_sem);
791 mmput(mm);
792 if (is_register) {
793 if (ret && ret == -EEXIST)
794 ret = 0;
795 if (ret)
796 break;
797 }
798 }
799
800 list_for_each_entry_safe(vi, tmpvi, &try_list, probe_list) {
801 list_del(&vi->probe_list);
802 kfree(vi);
803 }
804
805 return ret;
806 }
807
808 static int __uprobe_register(struct uprobe *uprobe)
809 {
810 return register_for_each_vma(uprobe, true);
811 }
812
813 static void __uprobe_unregister(struct uprobe *uprobe)
814 {
815 if (!register_for_each_vma(uprobe, false))
816 delete_uprobe(uprobe);
817
818 /* TODO : cant unregister? schedule a worker thread */
819 }
820
821 /*
822 * uprobe_register - register a probe
823 * @inode: the file in which the probe has to be placed.
824 * @offset: offset from the start of the file.
825 * @consumer: information on howto handle the probe..
826 *
827 * Apart from the access refcount, uprobe_register() takes a creation
828 * refcount (thro alloc_uprobe) if and only if this @uprobe is getting
829 * inserted into the rbtree (i.e first consumer for a @inode:@offset
830 * tuple). Creation refcount stops uprobe_unregister from freeing the
831 * @uprobe even before the register operation is complete. Creation
832 * refcount is released when the last @consumer for the @uprobe
833 * unregisters.
834 *
835 * Return errno if it cannot successully install probes
836 * else return 0 (success)
837 */
838 int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *consumer)
839 {
840 struct uprobe *uprobe;
841 int ret;
842
843 if (!inode || !consumer || consumer->next)
844 return -EINVAL;
845
846 if (offset > i_size_read(inode))
847 return -EINVAL;
848
849 ret = 0;
850 mutex_lock(uprobes_hash(inode));
851 uprobe = alloc_uprobe(inode, offset);
852
853 if (uprobe && !consumer_add(uprobe, consumer)) {
854 ret = __uprobe_register(uprobe);
855 if (ret) {
856 uprobe->consumers = NULL;
857 __uprobe_unregister(uprobe);
858 } else {
859 uprobe->flags |= UPROBES_RUN_HANDLER;
860 }
861 }
862
863 mutex_unlock(uprobes_hash(inode));
864 put_uprobe(uprobe);
865
866 return ret;
867 }
868
869 /*
870 * uprobe_unregister - unregister a already registered probe.
871 * @inode: the file in which the probe has to be removed.
872 * @offset: offset from the start of the file.
873 * @consumer: identify which probe if multiple probes are colocated.
874 */
875 void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *consumer)
876 {
877 struct uprobe *uprobe;
878
879 if (!inode || !consumer)
880 return;
881
882 uprobe = find_uprobe(inode, offset);
883 if (!uprobe)
884 return;
885
886 mutex_lock(uprobes_hash(inode));
887
888 if (consumer_del(uprobe, consumer)) {
889 if (!uprobe->consumers) {
890 __uprobe_unregister(uprobe);
891 uprobe->flags &= ~UPROBES_RUN_HANDLER;
892 }
893 }
894
895 mutex_unlock(uprobes_hash(inode));
896 if (uprobe)
897 put_uprobe(uprobe);
898 }
899
900 /*
901 * Of all the nodes that correspond to the given inode, return the node
902 * with the least offset.
903 */
904 static struct rb_node *find_least_offset_node(struct inode *inode)
905 {
906 struct uprobe u = { .inode = inode, .offset = 0};
907 struct rb_node *n = uprobes_tree.rb_node;
908 struct rb_node *close_node = NULL;
909 struct uprobe *uprobe;
910 int match;
911
912 while (n) {
913 uprobe = rb_entry(n, struct uprobe, rb_node);
914 match = match_uprobe(&u, uprobe);
915
916 if (uprobe->inode == inode)
917 close_node = n;
918
919 if (!match)
920 return close_node;
921
922 if (match < 0)
923 n = n->rb_left;
924 else
925 n = n->rb_right;
926 }
927
928 return close_node;
929 }
930
931 /*
932 * For a given inode, build a list of probes that need to be inserted.
933 */
934 static void build_probe_list(struct inode *inode, struct list_head *head)
935 {
936 struct uprobe *uprobe;
937 unsigned long flags;
938 struct rb_node *n;
939
940 spin_lock_irqsave(&uprobes_treelock, flags);
941
942 n = find_least_offset_node(inode);
943
944 for (; n; n = rb_next(n)) {
945 uprobe = rb_entry(n, struct uprobe, rb_node);
946 if (uprobe->inode != inode)
947 break;
948
949 list_add(&uprobe->pending_list, head);
950 atomic_inc(&uprobe->ref);
951 }
952
953 spin_unlock_irqrestore(&uprobes_treelock, flags);
954 }
955
956 /*
957 * Called from mmap_region.
958 * called with mm->mmap_sem acquired.
959 *
960 * Return -ve no if we fail to insert probes and we cannot
961 * bail-out.
962 * Return 0 otherwise. i.e:
963 *
964 * - successful insertion of probes
965 * - (or) no possible probes to be inserted.
966 * - (or) insertion of probes failed but we can bail-out.
967 */
968 int uprobe_mmap(struct vm_area_struct *vma)
969 {
970 struct list_head tmp_list;
971 struct uprobe *uprobe, *u;
972 struct inode *inode;
973 int ret;
974
975 if (!atomic_read(&uprobe_events) || !valid_vma(vma, true))
976 return 0;
977
978 inode = vma->vm_file->f_mapping->host;
979 if (!inode)
980 return 0;
981
982 INIT_LIST_HEAD(&tmp_list);
983 mutex_lock(uprobes_mmap_hash(inode));
984 build_probe_list(inode, &tmp_list);
985
986 ret = 0;
987
988 list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) {
989 loff_t vaddr;
990
991 list_del(&uprobe->pending_list);
992 if (!ret) {
993 vaddr = vma_address(vma, uprobe->offset);
994 if (vaddr >= vma->vm_start && vaddr < vma->vm_end) {
995 ret = install_breakpoint(vma->vm_mm, uprobe, vma, vaddr);
996 /* Ignore double add: */
997 if (ret == -EEXIST)
998 ret = 0;
999 }
1000 }
1001 put_uprobe(uprobe);
1002 }
1003
1004 mutex_unlock(uprobes_mmap_hash(inode));
1005
1006 return ret;
1007 }
1008
1009 static int __init init_uprobes(void)
1010 {
1011 int i;
1012
1013 for (i = 0; i < UPROBES_HASH_SZ; i++) {
1014 mutex_init(&uprobes_mutex[i]);
1015 mutex_init(&uprobes_mmap_mutex[i]);
1016 }
1017 return 0;
1018 }
1019
1020 static void __exit exit_uprobes(void)
1021 {
1022 }
1023
1024 module_init(init_uprobes);
1025 module_exit(exit_uprobes);
This page took 0.054238 seconds and 5 git commands to generate.