2 * User-space Probes (UProbes)
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * Copyright (C) IBM Corporation, 2008-2012
22 * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
25 #include <linux/kernel.h>
26 #include <linux/highmem.h>
27 #include <linux/pagemap.h> /* read_mapping_page */
28 #include <linux/slab.h>
29 #include <linux/sched.h>
30 #include <linux/rmap.h> /* anon_vma_prepare */
31 #include <linux/mmu_notifier.h> /* set_pte_at_notify */
32 #include <linux/swap.h> /* try_to_free_swap */
33 #include <linux/ptrace.h> /* user_enable_single_step */
34 #include <linux/kdebug.h> /* notifier mechanism */
35 #include "../../mm/internal.h" /* munlock_vma_page */
37 #include <linux/uprobes.h>
39 #define UINSNS_PER_PAGE (PAGE_SIZE/UPROBE_XOL_SLOT_BYTES)
40 #define MAX_UPROBE_XOL_SLOTS UINSNS_PER_PAGE
42 static struct rb_root uprobes_tree
= RB_ROOT
;
44 static DEFINE_SPINLOCK(uprobes_treelock
); /* serialize rbtree access */
46 #define UPROBES_HASH_SZ 13
49 * We need separate register/unregister and mmap/munmap lock hashes because
50 * of mmap_sem nesting.
52 * uprobe_register() needs to install probes on (potentially) all processes
53 * and thus needs to acquire multiple mmap_sems (consequtively, not
54 * concurrently), whereas uprobe_mmap() is called while holding mmap_sem
55 * for the particular process doing the mmap.
57 * uprobe_register()->register_for_each_vma() needs to drop/acquire mmap_sem
58 * because of lock order against i_mmap_mutex. This means there's a hole in
59 * the register vma iteration where a mmap() can happen.
61 * Thus uprobe_register() can race with uprobe_mmap() and we can try and
62 * install a probe where one is already installed.
65 /* serialize (un)register */
66 static struct mutex uprobes_mutex
[UPROBES_HASH_SZ
];
68 #define uprobes_hash(v) (&uprobes_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ])
70 /* serialize uprobe->pending_list */
71 static struct mutex uprobes_mmap_mutex
[UPROBES_HASH_SZ
];
72 #define uprobes_mmap_hash(v) (&uprobes_mmap_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ])
75 * uprobe_events allows us to skip the uprobe_mmap if there are no uprobe
76 * events active at this time. Probably a fine grained per inode count is
79 static atomic_t uprobe_events
= ATOMIC_INIT(0);
82 struct rb_node rb_node
; /* node in the rb tree */
84 struct rw_semaphore consumer_rwsem
;
85 struct list_head pending_list
;
86 struct uprobe_consumer
*consumers
;
87 struct inode
*inode
; /* Also hold a ref to inode */
90 struct arch_uprobe arch
;
94 * valid_vma: Verify if the specified vma is an executable vma
95 * Relax restrictions while unregistering: vm_flags might have
96 * changed after breakpoint was inserted.
97 * - is_register: indicates if we are in register context.
98 * - Return 1 if the specified virtual address is in an
101 static bool valid_vma(struct vm_area_struct
*vma
, bool is_register
)
109 if ((vma
->vm_flags
& (VM_HUGETLB
|VM_READ
|VM_WRITE
|VM_EXEC
|VM_SHARED
))
110 == (VM_READ
|VM_EXEC
))
116 static unsigned long offset_to_vaddr(struct vm_area_struct
*vma
, loff_t offset
)
118 return vma
->vm_start
+ offset
- ((loff_t
)vma
->vm_pgoff
<< PAGE_SHIFT
);
121 static loff_t
vaddr_to_offset(struct vm_area_struct
*vma
, unsigned long vaddr
)
123 return ((loff_t
)vma
->vm_pgoff
<< PAGE_SHIFT
) + (vaddr
- vma
->vm_start
);
127 * __replace_page - replace page in vma by new page.
128 * based on replace_page in mm/ksm.c
130 * @vma: vma that holds the pte pointing to page
131 * @addr: address the old @page is mapped at
132 * @page: the cowed page we are replacing by kpage
133 * @kpage: the modified page we replace page by
135 * Returns 0 on success, -EFAULT on failure.
137 static int __replace_page(struct vm_area_struct
*vma
, unsigned long addr
,
138 struct page
*page
, struct page
*kpage
)
140 struct mm_struct
*mm
= vma
->vm_mm
;
145 /* For try_to_free_swap() and munlock_vma_page() below */
149 ptep
= page_check_address(page
, mm
, addr
, &ptl
, 0);
154 page_add_new_anon_rmap(kpage
, vma
, addr
);
156 if (!PageAnon(page
)) {
157 dec_mm_counter(mm
, MM_FILEPAGES
);
158 inc_mm_counter(mm
, MM_ANONPAGES
);
161 flush_cache_page(vma
, addr
, pte_pfn(*ptep
));
162 ptep_clear_flush(vma
, addr
, ptep
);
163 set_pte_at_notify(mm
, addr
, ptep
, mk_pte(kpage
, vma
->vm_page_prot
));
165 page_remove_rmap(page
);
166 if (!page_mapped(page
))
167 try_to_free_swap(page
);
168 pte_unmap_unlock(ptep
, ptl
);
170 if (vma
->vm_flags
& VM_LOCKED
)
171 munlock_vma_page(page
);
181 * is_swbp_insn - check if instruction is breakpoint instruction.
182 * @insn: instruction to be checked.
183 * Default implementation of is_swbp_insn
184 * Returns true if @insn is a breakpoint instruction.
186 bool __weak
is_swbp_insn(uprobe_opcode_t
*insn
)
188 return *insn
== UPROBE_SWBP_INSN
;
193 * Expect the breakpoint instruction to be the smallest size instruction for
194 * the architecture. If an arch has variable length instruction and the
195 * breakpoint instruction is not of the smallest length instruction
196 * supported by that architecture then we need to modify read_opcode /
197 * write_opcode accordingly. This would never be a problem for archs that
198 * have fixed length instructions.
202 * write_opcode - write the opcode at a given virtual address.
203 * @auprobe: arch breakpointing information.
204 * @mm: the probed process address space.
205 * @vaddr: the virtual address to store the opcode.
206 * @opcode: opcode to be written at @vaddr.
208 * Called with mm->mmap_sem held (for read and with a reference to
211 * For mm @mm, write the opcode at @vaddr.
212 * Return 0 (success) or a negative errno.
214 static int write_opcode(struct arch_uprobe
*auprobe
, struct mm_struct
*mm
,
215 unsigned long vaddr
, uprobe_opcode_t opcode
)
217 struct page
*old_page
, *new_page
;
218 void *vaddr_old
, *vaddr_new
;
219 struct vm_area_struct
*vma
;
223 /* Read the page with vaddr into memory */
224 ret
= get_user_pages(NULL
, mm
, vaddr
, 1, 0, 0, &old_page
, &vma
);
229 new_page
= alloc_page_vma(GFP_HIGHUSER_MOVABLE
, vma
, vaddr
);
233 __SetPageUptodate(new_page
);
235 /* copy the page now that we've got it stable */
236 vaddr_old
= kmap_atomic(old_page
);
237 vaddr_new
= kmap_atomic(new_page
);
239 memcpy(vaddr_new
, vaddr_old
, PAGE_SIZE
);
240 memcpy(vaddr_new
+ (vaddr
& ~PAGE_MASK
), &opcode
, UPROBE_SWBP_INSN_SIZE
);
242 kunmap_atomic(vaddr_new
);
243 kunmap_atomic(vaddr_old
);
245 ret
= anon_vma_prepare(vma
);
249 ret
= __replace_page(vma
, vaddr
, old_page
, new_page
);
252 page_cache_release(new_page
);
256 if (unlikely(ret
== -EAGAIN
))
262 * read_opcode - read the opcode at a given virtual address.
263 * @mm: the probed process address space.
264 * @vaddr: the virtual address to read the opcode.
265 * @opcode: location to store the read opcode.
267 * Called with mm->mmap_sem held (for read and with a reference to
270 * For mm @mm, read the opcode at @vaddr and store it in @opcode.
271 * Return 0 (success) or a negative errno.
273 static int read_opcode(struct mm_struct
*mm
, unsigned long vaddr
, uprobe_opcode_t
*opcode
)
279 ret
= get_user_pages(NULL
, mm
, vaddr
, 1, 0, 1, &page
, NULL
);
283 vaddr_new
= kmap_atomic(page
);
285 memcpy(opcode
, vaddr_new
+ vaddr
, UPROBE_SWBP_INSN_SIZE
);
286 kunmap_atomic(vaddr_new
);
293 static int is_swbp_at_addr(struct mm_struct
*mm
, unsigned long vaddr
)
295 uprobe_opcode_t opcode
;
298 if (current
->mm
== mm
) {
300 result
= __copy_from_user_inatomic(&opcode
, (void __user
*)vaddr
,
304 if (likely(result
== 0))
308 result
= read_opcode(mm
, vaddr
, &opcode
);
312 if (is_swbp_insn(&opcode
))
319 * set_swbp - store breakpoint at a given address.
320 * @auprobe: arch specific probepoint information.
321 * @mm: the probed process address space.
322 * @vaddr: the virtual address to insert the opcode.
324 * For mm @mm, store the breakpoint instruction at @vaddr.
325 * Return 0 (success) or a negative errno.
327 int __weak
set_swbp(struct arch_uprobe
*auprobe
, struct mm_struct
*mm
, unsigned long vaddr
)
331 * See the comment near uprobes_hash().
333 result
= is_swbp_at_addr(mm
, vaddr
);
340 return write_opcode(auprobe
, mm
, vaddr
, UPROBE_SWBP_INSN
);
344 * set_orig_insn - Restore the original instruction.
345 * @mm: the probed process address space.
346 * @auprobe: arch specific probepoint information.
347 * @vaddr: the virtual address to insert the opcode.
349 * For mm @mm, restore the original opcode (opcode) at @vaddr.
350 * Return 0 (success) or a negative errno.
353 set_orig_insn(struct arch_uprobe
*auprobe
, struct mm_struct
*mm
, unsigned long vaddr
)
357 result
= is_swbp_at_addr(mm
, vaddr
);
364 return write_opcode(auprobe
, mm
, vaddr
, *(uprobe_opcode_t
*)auprobe
->insn
);
367 static int match_uprobe(struct uprobe
*l
, struct uprobe
*r
)
369 if (l
->inode
< r
->inode
)
372 if (l
->inode
> r
->inode
)
375 if (l
->offset
< r
->offset
)
378 if (l
->offset
> r
->offset
)
384 static struct uprobe
*__find_uprobe(struct inode
*inode
, loff_t offset
)
386 struct uprobe u
= { .inode
= inode
, .offset
= offset
};
387 struct rb_node
*n
= uprobes_tree
.rb_node
;
388 struct uprobe
*uprobe
;
392 uprobe
= rb_entry(n
, struct uprobe
, rb_node
);
393 match
= match_uprobe(&u
, uprobe
);
395 atomic_inc(&uprobe
->ref
);
408 * Find a uprobe corresponding to a given inode:offset
409 * Acquires uprobes_treelock
411 static struct uprobe
*find_uprobe(struct inode
*inode
, loff_t offset
)
413 struct uprobe
*uprobe
;
415 spin_lock(&uprobes_treelock
);
416 uprobe
= __find_uprobe(inode
, offset
);
417 spin_unlock(&uprobes_treelock
);
422 static struct uprobe
*__insert_uprobe(struct uprobe
*uprobe
)
424 struct rb_node
**p
= &uprobes_tree
.rb_node
;
425 struct rb_node
*parent
= NULL
;
431 u
= rb_entry(parent
, struct uprobe
, rb_node
);
432 match
= match_uprobe(uprobe
, u
);
439 p
= &parent
->rb_left
;
441 p
= &parent
->rb_right
;
446 rb_link_node(&uprobe
->rb_node
, parent
, p
);
447 rb_insert_color(&uprobe
->rb_node
, &uprobes_tree
);
448 /* get access + creation ref */
449 atomic_set(&uprobe
->ref
, 2);
455 * Acquire uprobes_treelock.
456 * Matching uprobe already exists in rbtree;
457 * increment (access refcount) and return the matching uprobe.
459 * No matching uprobe; insert the uprobe in rb_tree;
460 * get a double refcount (access + creation) and return NULL.
462 static struct uprobe
*insert_uprobe(struct uprobe
*uprobe
)
466 spin_lock(&uprobes_treelock
);
467 u
= __insert_uprobe(uprobe
);
468 spin_unlock(&uprobes_treelock
);
470 /* For now assume that the instruction need not be single-stepped */
471 uprobe
->flags
|= UPROBE_SKIP_SSTEP
;
476 static void put_uprobe(struct uprobe
*uprobe
)
478 if (atomic_dec_and_test(&uprobe
->ref
))
482 static struct uprobe
*alloc_uprobe(struct inode
*inode
, loff_t offset
)
484 struct uprobe
*uprobe
, *cur_uprobe
;
486 uprobe
= kzalloc(sizeof(struct uprobe
), GFP_KERNEL
);
490 uprobe
->inode
= igrab(inode
);
491 uprobe
->offset
= offset
;
492 init_rwsem(&uprobe
->consumer_rwsem
);
494 /* add to uprobes_tree, sorted on inode:offset */
495 cur_uprobe
= insert_uprobe(uprobe
);
497 /* a uprobe exists for this inode:offset combination */
503 atomic_inc(&uprobe_events
);
509 static void handler_chain(struct uprobe
*uprobe
, struct pt_regs
*regs
)
511 struct uprobe_consumer
*uc
;
513 if (!(uprobe
->flags
& UPROBE_RUN_HANDLER
))
516 down_read(&uprobe
->consumer_rwsem
);
517 for (uc
= uprobe
->consumers
; uc
; uc
= uc
->next
) {
518 if (!uc
->filter
|| uc
->filter(uc
, current
))
519 uc
->handler(uc
, regs
);
521 up_read(&uprobe
->consumer_rwsem
);
524 /* Returns the previous consumer */
525 static struct uprobe_consumer
*
526 consumer_add(struct uprobe
*uprobe
, struct uprobe_consumer
*uc
)
528 down_write(&uprobe
->consumer_rwsem
);
529 uc
->next
= uprobe
->consumers
;
530 uprobe
->consumers
= uc
;
531 up_write(&uprobe
->consumer_rwsem
);
537 * For uprobe @uprobe, delete the consumer @uc.
538 * Return true if the @uc is deleted successfully
541 static bool consumer_del(struct uprobe
*uprobe
, struct uprobe_consumer
*uc
)
543 struct uprobe_consumer
**con
;
546 down_write(&uprobe
->consumer_rwsem
);
547 for (con
= &uprobe
->consumers
; *con
; con
= &(*con
)->next
) {
554 up_write(&uprobe
->consumer_rwsem
);
560 __copy_insn(struct address_space
*mapping
, struct file
*filp
, char *insn
,
561 unsigned long nbytes
, loff_t offset
)
571 if (!mapping
->a_ops
->readpage
)
574 idx
= offset
>> PAGE_CACHE_SHIFT
;
575 off
= offset
& ~PAGE_MASK
;
578 * Ensure that the page that has the original instruction is
579 * populated and in page-cache.
581 page
= read_mapping_page(mapping
, idx
, filp
);
583 return PTR_ERR(page
);
585 vaddr
= kmap_atomic(page
);
586 memcpy(insn
, vaddr
+ off
, nbytes
);
587 kunmap_atomic(vaddr
);
588 page_cache_release(page
);
593 static int copy_insn(struct uprobe
*uprobe
, struct file
*filp
)
595 struct address_space
*mapping
;
596 unsigned long nbytes
;
599 nbytes
= PAGE_SIZE
- (uprobe
->offset
& ~PAGE_MASK
);
600 mapping
= uprobe
->inode
->i_mapping
;
602 /* Instruction at end of binary; copy only available bytes */
603 if (uprobe
->offset
+ MAX_UINSN_BYTES
> uprobe
->inode
->i_size
)
604 bytes
= uprobe
->inode
->i_size
- uprobe
->offset
;
606 bytes
= MAX_UINSN_BYTES
;
608 /* Instruction at the page-boundary; copy bytes in second page */
609 if (nbytes
< bytes
) {
610 int err
= __copy_insn(mapping
, filp
, uprobe
->arch
.insn
+ nbytes
,
611 bytes
- nbytes
, uprobe
->offset
+ nbytes
);
616 return __copy_insn(mapping
, filp
, uprobe
->arch
.insn
, bytes
, uprobe
->offset
);
620 * How mm->uprobes_state.count gets updated
621 * uprobe_mmap() increments the count if
622 * - it successfully adds a breakpoint.
623 * - it cannot add a breakpoint, but sees that there is a underlying
624 * breakpoint (via a is_swbp_at_addr()).
626 * uprobe_munmap() decrements the count if
627 * - it sees a underlying breakpoint, (via is_swbp_at_addr)
628 * (Subsequent uprobe_unregister wouldnt find the breakpoint
629 * unless a uprobe_mmap kicks in, since the old vma would be
630 * dropped just after uprobe_munmap.)
632 * uprobe_register increments the count if:
633 * - it successfully adds a breakpoint.
635 * uprobe_unregister decrements the count if:
636 * - it sees a underlying breakpoint and removes successfully.
637 * (via is_swbp_at_addr)
638 * (Subsequent uprobe_munmap wouldnt find the breakpoint
639 * since there is no underlying breakpoint after the
640 * breakpoint removal.)
643 install_breakpoint(struct uprobe
*uprobe
, struct mm_struct
*mm
,
644 struct vm_area_struct
*vma
, unsigned long vaddr
)
650 * If probe is being deleted, unregister thread could be done with
651 * the vma-rmap-walk through. Adding a probe now can be fatal since
652 * nobody will be able to cleanup. Also we could be from fork or
653 * mremap path, where the probe might have already been inserted.
654 * Hence behave as if probe already existed.
656 if (!uprobe
->consumers
)
659 if (!(uprobe
->flags
& UPROBE_COPY_INSN
)) {
660 ret
= copy_insn(uprobe
, vma
->vm_file
);
664 if (is_swbp_insn((uprobe_opcode_t
*)uprobe
->arch
.insn
))
667 ret
= arch_uprobe_analyze_insn(&uprobe
->arch
, mm
, vaddr
);
671 /* write_opcode() assumes we don't cross page boundary */
672 BUG_ON((uprobe
->offset
& ~PAGE_MASK
) +
673 UPROBE_SWBP_INSN_SIZE
> PAGE_SIZE
);
675 uprobe
->flags
|= UPROBE_COPY_INSN
;
679 * set MMF_HAS_UPROBES in advance for uprobe_pre_sstep_notifier(),
680 * the task can hit this breakpoint right after __replace_page().
682 first_uprobe
= !test_bit(MMF_HAS_UPROBES
, &mm
->flags
);
684 set_bit(MMF_HAS_UPROBES
, &mm
->flags
);
686 ret
= set_swbp(&uprobe
->arch
, mm
, vaddr
);
688 clear_bit(MMF_RECALC_UPROBES
, &mm
->flags
);
689 else if (first_uprobe
)
690 clear_bit(MMF_HAS_UPROBES
, &mm
->flags
);
696 remove_breakpoint(struct uprobe
*uprobe
, struct mm_struct
*mm
, unsigned long vaddr
)
698 /* can happen if uprobe_register() fails */
699 if (!test_bit(MMF_HAS_UPROBES
, &mm
->flags
))
702 set_bit(MMF_RECALC_UPROBES
, &mm
->flags
);
703 set_orig_insn(&uprobe
->arch
, mm
, vaddr
);
707 * There could be threads that have already hit the breakpoint. They
708 * will recheck the current insn and restart if find_uprobe() fails.
709 * See find_active_uprobe().
711 static void delete_uprobe(struct uprobe
*uprobe
)
713 spin_lock(&uprobes_treelock
);
714 rb_erase(&uprobe
->rb_node
, &uprobes_tree
);
715 spin_unlock(&uprobes_treelock
);
718 atomic_dec(&uprobe_events
);
722 struct map_info
*next
;
723 struct mm_struct
*mm
;
727 static inline struct map_info
*free_map_info(struct map_info
*info
)
729 struct map_info
*next
= info
->next
;
734 static struct map_info
*
735 build_map_info(struct address_space
*mapping
, loff_t offset
, bool is_register
)
737 unsigned long pgoff
= offset
>> PAGE_SHIFT
;
738 struct vm_area_struct
*vma
;
739 struct map_info
*curr
= NULL
;
740 struct map_info
*prev
= NULL
;
741 struct map_info
*info
;
745 mutex_lock(&mapping
->i_mmap_mutex
);
746 vma_interval_tree_foreach(vma
, &mapping
->i_mmap
, pgoff
, pgoff
) {
747 if (!valid_vma(vma
, is_register
))
750 if (!prev
&& !more
) {
752 * Needs GFP_NOWAIT to avoid i_mmap_mutex recursion through
753 * reclaim. This is optimistic, no harm done if it fails.
755 prev
= kmalloc(sizeof(struct map_info
),
756 GFP_NOWAIT
| __GFP_NOMEMALLOC
| __GFP_NOWARN
);
765 if (!atomic_inc_not_zero(&vma
->vm_mm
->mm_users
))
773 info
->mm
= vma
->vm_mm
;
774 info
->vaddr
= offset_to_vaddr(vma
, offset
);
776 mutex_unlock(&mapping
->i_mmap_mutex
);
788 info
= kmalloc(sizeof(struct map_info
), GFP_KERNEL
);
790 curr
= ERR_PTR(-ENOMEM
);
800 prev
= free_map_info(prev
);
804 static int register_for_each_vma(struct uprobe
*uprobe
, bool is_register
)
806 struct map_info
*info
;
809 info
= build_map_info(uprobe
->inode
->i_mapping
,
810 uprobe
->offset
, is_register
);
812 return PTR_ERR(info
);
815 struct mm_struct
*mm
= info
->mm
;
816 struct vm_area_struct
*vma
;
821 down_write(&mm
->mmap_sem
);
822 vma
= find_vma(mm
, info
->vaddr
);
823 if (!vma
|| !valid_vma(vma
, is_register
) ||
824 vma
->vm_file
->f_mapping
->host
!= uprobe
->inode
)
827 if (vma
->vm_start
> info
->vaddr
||
828 vaddr_to_offset(vma
, info
->vaddr
) != uprobe
->offset
)
832 err
= install_breakpoint(uprobe
, mm
, vma
, info
->vaddr
);
834 remove_breakpoint(uprobe
, mm
, info
->vaddr
);
837 up_write(&mm
->mmap_sem
);
840 info
= free_map_info(info
);
846 static int __uprobe_register(struct uprobe
*uprobe
)
848 return register_for_each_vma(uprobe
, true);
851 static void __uprobe_unregister(struct uprobe
*uprobe
)
853 if (!register_for_each_vma(uprobe
, false))
854 delete_uprobe(uprobe
);
856 /* TODO : cant unregister? schedule a worker thread */
860 * uprobe_register - register a probe
861 * @inode: the file in which the probe has to be placed.
862 * @offset: offset from the start of the file.
863 * @uc: information on howto handle the probe..
865 * Apart from the access refcount, uprobe_register() takes a creation
866 * refcount (thro alloc_uprobe) if and only if this @uprobe is getting
867 * inserted into the rbtree (i.e first consumer for a @inode:@offset
868 * tuple). Creation refcount stops uprobe_unregister from freeing the
869 * @uprobe even before the register operation is complete. Creation
870 * refcount is released when the last @uc for the @uprobe
873 * Return errno if it cannot successully install probes
874 * else return 0 (success)
876 int uprobe_register(struct inode
*inode
, loff_t offset
, struct uprobe_consumer
*uc
)
878 struct uprobe
*uprobe
;
881 if (!inode
|| !uc
|| uc
->next
)
884 if (offset
> i_size_read(inode
))
888 mutex_lock(uprobes_hash(inode
));
889 uprobe
= alloc_uprobe(inode
, offset
);
891 if (uprobe
&& !consumer_add(uprobe
, uc
)) {
892 ret
= __uprobe_register(uprobe
);
894 uprobe
->consumers
= NULL
;
895 __uprobe_unregister(uprobe
);
897 uprobe
->flags
|= UPROBE_RUN_HANDLER
;
901 mutex_unlock(uprobes_hash(inode
));
909 * uprobe_unregister - unregister a already registered probe.
910 * @inode: the file in which the probe has to be removed.
911 * @offset: offset from the start of the file.
912 * @uc: identify which probe if multiple probes are colocated.
914 void uprobe_unregister(struct inode
*inode
, loff_t offset
, struct uprobe_consumer
*uc
)
916 struct uprobe
*uprobe
;
921 uprobe
= find_uprobe(inode
, offset
);
925 mutex_lock(uprobes_hash(inode
));
927 if (consumer_del(uprobe
, uc
)) {
928 if (!uprobe
->consumers
) {
929 __uprobe_unregister(uprobe
);
930 uprobe
->flags
&= ~UPROBE_RUN_HANDLER
;
934 mutex_unlock(uprobes_hash(inode
));
939 static struct rb_node
*
940 find_node_in_range(struct inode
*inode
, loff_t min
, loff_t max
)
942 struct rb_node
*n
= uprobes_tree
.rb_node
;
945 struct uprobe
*u
= rb_entry(n
, struct uprobe
, rb_node
);
947 if (inode
< u
->inode
) {
949 } else if (inode
> u
->inode
) {
954 else if (min
> u
->offset
)
965 * For a given range in vma, build a list of probes that need to be inserted.
967 static void build_probe_list(struct inode
*inode
,
968 struct vm_area_struct
*vma
,
969 unsigned long start
, unsigned long end
,
970 struct list_head
*head
)
973 struct rb_node
*n
, *t
;
976 INIT_LIST_HEAD(head
);
977 min
= vaddr_to_offset(vma
, start
);
978 max
= min
+ (end
- start
) - 1;
980 spin_lock(&uprobes_treelock
);
981 n
= find_node_in_range(inode
, min
, max
);
983 for (t
= n
; t
; t
= rb_prev(t
)) {
984 u
= rb_entry(t
, struct uprobe
, rb_node
);
985 if (u
->inode
!= inode
|| u
->offset
< min
)
987 list_add(&u
->pending_list
, head
);
990 for (t
= n
; (t
= rb_next(t
)); ) {
991 u
= rb_entry(t
, struct uprobe
, rb_node
);
992 if (u
->inode
!= inode
|| u
->offset
> max
)
994 list_add(&u
->pending_list
, head
);
998 spin_unlock(&uprobes_treelock
);
1002 * Called from mmap_region/vma_adjust with mm->mmap_sem acquired.
1004 * Currently we ignore all errors and always return 0, the callers
1005 * can't handle the failure anyway.
1007 int uprobe_mmap(struct vm_area_struct
*vma
)
1009 struct list_head tmp_list
;
1010 struct uprobe
*uprobe
, *u
;
1011 struct inode
*inode
;
1013 if (!atomic_read(&uprobe_events
) || !valid_vma(vma
, true))
1016 inode
= vma
->vm_file
->f_mapping
->host
;
1020 mutex_lock(uprobes_mmap_hash(inode
));
1021 build_probe_list(inode
, vma
, vma
->vm_start
, vma
->vm_end
, &tmp_list
);
1023 list_for_each_entry_safe(uprobe
, u
, &tmp_list
, pending_list
) {
1024 if (!fatal_signal_pending(current
)) {
1025 unsigned long vaddr
= offset_to_vaddr(vma
, uprobe
->offset
);
1026 install_breakpoint(uprobe
, vma
->vm_mm
, vma
, vaddr
);
1030 mutex_unlock(uprobes_mmap_hash(inode
));
1036 vma_has_uprobes(struct vm_area_struct
*vma
, unsigned long start
, unsigned long end
)
1039 struct inode
*inode
;
1042 inode
= vma
->vm_file
->f_mapping
->host
;
1044 min
= vaddr_to_offset(vma
, start
);
1045 max
= min
+ (end
- start
) - 1;
1047 spin_lock(&uprobes_treelock
);
1048 n
= find_node_in_range(inode
, min
, max
);
1049 spin_unlock(&uprobes_treelock
);
1055 * Called in context of a munmap of a vma.
1057 void uprobe_munmap(struct vm_area_struct
*vma
, unsigned long start
, unsigned long end
)
1059 if (!atomic_read(&uprobe_events
) || !valid_vma(vma
, false))
1062 if (!atomic_read(&vma
->vm_mm
->mm_users
)) /* called by mmput() ? */
1065 if (!test_bit(MMF_HAS_UPROBES
, &vma
->vm_mm
->flags
) ||
1066 test_bit(MMF_RECALC_UPROBES
, &vma
->vm_mm
->flags
))
1069 if (vma_has_uprobes(vma
, start
, end
))
1070 set_bit(MMF_RECALC_UPROBES
, &vma
->vm_mm
->flags
);
1073 /* Slot allocation for XOL */
1074 static int xol_add_vma(struct xol_area
*area
)
1076 struct mm_struct
*mm
;
1079 area
->page
= alloc_page(GFP_HIGHUSER
);
1086 down_write(&mm
->mmap_sem
);
1087 if (mm
->uprobes_state
.xol_area
)
1092 /* Try to map as high as possible, this is only a hint. */
1093 area
->vaddr
= get_unmapped_area(NULL
, TASK_SIZE
- PAGE_SIZE
, PAGE_SIZE
, 0, 0);
1094 if (area
->vaddr
& ~PAGE_MASK
) {
1099 ret
= install_special_mapping(mm
, area
->vaddr
, PAGE_SIZE
,
1100 VM_EXEC
|VM_MAYEXEC
|VM_DONTCOPY
|VM_IO
, &area
->page
);
1104 smp_wmb(); /* pairs with get_xol_area() */
1105 mm
->uprobes_state
.xol_area
= area
;
1109 up_write(&mm
->mmap_sem
);
1111 __free_page(area
->page
);
1116 static struct xol_area
*get_xol_area(struct mm_struct
*mm
)
1118 struct xol_area
*area
;
1120 area
= mm
->uprobes_state
.xol_area
;
1121 smp_read_barrier_depends(); /* pairs with wmb in xol_add_vma() */
1127 * xol_alloc_area - Allocate process's xol_area.
1128 * This area will be used for storing instructions for execution out of
1131 * Returns the allocated area or NULL.
1133 static struct xol_area
*xol_alloc_area(void)
1135 struct xol_area
*area
;
1137 area
= kzalloc(sizeof(*area
), GFP_KERNEL
);
1138 if (unlikely(!area
))
1141 area
->bitmap
= kzalloc(BITS_TO_LONGS(UINSNS_PER_PAGE
) * sizeof(long), GFP_KERNEL
);
1146 init_waitqueue_head(&area
->wq
);
1147 if (!xol_add_vma(area
))
1151 kfree(area
->bitmap
);
1154 return get_xol_area(current
->mm
);
1158 * uprobe_clear_state - Free the area allocated for slots.
1160 void uprobe_clear_state(struct mm_struct
*mm
)
1162 struct xol_area
*area
= mm
->uprobes_state
.xol_area
;
1167 put_page(area
->page
);
1168 kfree(area
->bitmap
);
1172 void uprobe_dup_mmap(struct mm_struct
*oldmm
, struct mm_struct
*newmm
)
1174 newmm
->uprobes_state
.xol_area
= NULL
;
1176 if (test_bit(MMF_HAS_UPROBES
, &oldmm
->flags
)) {
1177 set_bit(MMF_HAS_UPROBES
, &newmm
->flags
);
1178 /* unconditionally, dup_mmap() skips VM_DONTCOPY vmas */
1179 set_bit(MMF_RECALC_UPROBES
, &newmm
->flags
);
1184 * - search for a free slot.
1186 static unsigned long xol_take_insn_slot(struct xol_area
*area
)
1188 unsigned long slot_addr
;
1192 slot_nr
= find_first_zero_bit(area
->bitmap
, UINSNS_PER_PAGE
);
1193 if (slot_nr
< UINSNS_PER_PAGE
) {
1194 if (!test_and_set_bit(slot_nr
, area
->bitmap
))
1197 slot_nr
= UINSNS_PER_PAGE
;
1200 wait_event(area
->wq
, (atomic_read(&area
->slot_count
) < UINSNS_PER_PAGE
));
1201 } while (slot_nr
>= UINSNS_PER_PAGE
);
1203 slot_addr
= area
->vaddr
+ (slot_nr
* UPROBE_XOL_SLOT_BYTES
);
1204 atomic_inc(&area
->slot_count
);
1210 * xol_get_insn_slot - If was not allocated a slot, then
1212 * Returns the allocated slot address or 0.
1214 static unsigned long xol_get_insn_slot(struct uprobe
*uprobe
, unsigned long slot_addr
)
1216 struct xol_area
*area
;
1217 unsigned long offset
;
1220 area
= get_xol_area(current
->mm
);
1222 area
= xol_alloc_area();
1226 current
->utask
->xol_vaddr
= xol_take_insn_slot(area
);
1229 * Initialize the slot if xol_vaddr points to valid
1232 if (unlikely(!current
->utask
->xol_vaddr
))
1235 current
->utask
->vaddr
= slot_addr
;
1236 offset
= current
->utask
->xol_vaddr
& ~PAGE_MASK
;
1237 vaddr
= kmap_atomic(area
->page
);
1238 memcpy(vaddr
+ offset
, uprobe
->arch
.insn
, MAX_UINSN_BYTES
);
1239 kunmap_atomic(vaddr
);
1241 return current
->utask
->xol_vaddr
;
1245 * xol_free_insn_slot - If slot was earlier allocated by
1246 * @xol_get_insn_slot(), make the slot available for
1247 * subsequent requests.
1249 static void xol_free_insn_slot(struct task_struct
*tsk
)
1251 struct xol_area
*area
;
1252 unsigned long vma_end
;
1253 unsigned long slot_addr
;
1255 if (!tsk
->mm
|| !tsk
->mm
->uprobes_state
.xol_area
|| !tsk
->utask
)
1258 slot_addr
= tsk
->utask
->xol_vaddr
;
1260 if (unlikely(!slot_addr
|| IS_ERR_VALUE(slot_addr
)))
1263 area
= tsk
->mm
->uprobes_state
.xol_area
;
1264 vma_end
= area
->vaddr
+ PAGE_SIZE
;
1265 if (area
->vaddr
<= slot_addr
&& slot_addr
< vma_end
) {
1266 unsigned long offset
;
1269 offset
= slot_addr
- area
->vaddr
;
1270 slot_nr
= offset
/ UPROBE_XOL_SLOT_BYTES
;
1271 if (slot_nr
>= UINSNS_PER_PAGE
)
1274 clear_bit(slot_nr
, area
->bitmap
);
1275 atomic_dec(&area
->slot_count
);
1276 if (waitqueue_active(&area
->wq
))
1279 tsk
->utask
->xol_vaddr
= 0;
1284 * uprobe_get_swbp_addr - compute address of swbp given post-swbp regs
1285 * @regs: Reflects the saved state of the task after it has hit a breakpoint
1287 * Return the address of the breakpoint instruction.
1289 unsigned long __weak
uprobe_get_swbp_addr(struct pt_regs
*regs
)
1291 return instruction_pointer(regs
) - UPROBE_SWBP_INSN_SIZE
;
1295 * Called with no locks held.
1296 * Called in context of a exiting or a exec-ing thread.
1298 void uprobe_free_utask(struct task_struct
*t
)
1300 struct uprobe_task
*utask
= t
->utask
;
1305 if (utask
->active_uprobe
)
1306 put_uprobe(utask
->active_uprobe
);
1308 xol_free_insn_slot(t
);
1314 * Called in context of a new clone/fork from copy_process.
1316 void uprobe_copy_process(struct task_struct
*t
)
1322 * Allocate a uprobe_task object for the task.
1323 * Called when the thread hits a breakpoint for the first time.
1326 * - pointer to new uprobe_task on success
1329 static struct uprobe_task
*add_utask(void)
1331 struct uprobe_task
*utask
;
1333 utask
= kzalloc(sizeof *utask
, GFP_KERNEL
);
1334 if (unlikely(!utask
))
1337 current
->utask
= utask
;
1341 /* Prepare to single-step probed instruction out of line. */
1343 pre_ssout(struct uprobe
*uprobe
, struct pt_regs
*regs
, unsigned long vaddr
)
1345 if (xol_get_insn_slot(uprobe
, vaddr
) && !arch_uprobe_pre_xol(&uprobe
->arch
, regs
))
1352 * If we are singlestepping, then ensure this thread is not connected to
1353 * non-fatal signals until completion of singlestep. When xol insn itself
1354 * triggers the signal, restart the original insn even if the task is
1355 * already SIGKILL'ed (since coredump should report the correct ip). This
1356 * is even more important if the task has a handler for SIGSEGV/etc, The
1357 * _same_ instruction should be repeated again after return from the signal
1358 * handler, and SSTEP can never finish in this case.
1360 bool uprobe_deny_signal(void)
1362 struct task_struct
*t
= current
;
1363 struct uprobe_task
*utask
= t
->utask
;
1365 if (likely(!utask
|| !utask
->active_uprobe
))
1368 WARN_ON_ONCE(utask
->state
!= UTASK_SSTEP
);
1370 if (signal_pending(t
)) {
1371 spin_lock_irq(&t
->sighand
->siglock
);
1372 clear_tsk_thread_flag(t
, TIF_SIGPENDING
);
1373 spin_unlock_irq(&t
->sighand
->siglock
);
1375 if (__fatal_signal_pending(t
) || arch_uprobe_xol_was_trapped(t
)) {
1376 utask
->state
= UTASK_SSTEP_TRAPPED
;
1377 set_tsk_thread_flag(t
, TIF_UPROBE
);
1378 set_tsk_thread_flag(t
, TIF_NOTIFY_RESUME
);
1386 * Avoid singlestepping the original instruction if the original instruction
1387 * is a NOP or can be emulated.
1389 static bool can_skip_sstep(struct uprobe
*uprobe
, struct pt_regs
*regs
)
1391 if (arch_uprobe_skip_sstep(&uprobe
->arch
, regs
))
1394 uprobe
->flags
&= ~UPROBE_SKIP_SSTEP
;
1398 static void mmf_recalc_uprobes(struct mm_struct
*mm
)
1400 struct vm_area_struct
*vma
;
1402 for (vma
= mm
->mmap
; vma
; vma
= vma
->vm_next
) {
1403 if (!valid_vma(vma
, false))
1406 * This is not strictly accurate, we can race with
1407 * uprobe_unregister() and see the already removed
1408 * uprobe if delete_uprobe() was not yet called.
1410 if (vma_has_uprobes(vma
, vma
->vm_start
, vma
->vm_end
))
1414 clear_bit(MMF_HAS_UPROBES
, &mm
->flags
);
1417 static struct uprobe
*find_active_uprobe(unsigned long bp_vaddr
, int *is_swbp
)
1419 struct mm_struct
*mm
= current
->mm
;
1420 struct uprobe
*uprobe
= NULL
;
1421 struct vm_area_struct
*vma
;
1423 down_read(&mm
->mmap_sem
);
1424 vma
= find_vma(mm
, bp_vaddr
);
1425 if (vma
&& vma
->vm_start
<= bp_vaddr
) {
1426 if (valid_vma(vma
, false)) {
1427 struct inode
*inode
= vma
->vm_file
->f_mapping
->host
;
1428 loff_t offset
= vaddr_to_offset(vma
, bp_vaddr
);
1430 uprobe
= find_uprobe(inode
, offset
);
1434 *is_swbp
= is_swbp_at_addr(mm
, bp_vaddr
);
1439 if (!uprobe
&& test_and_clear_bit(MMF_RECALC_UPROBES
, &mm
->flags
))
1440 mmf_recalc_uprobes(mm
);
1441 up_read(&mm
->mmap_sem
);
1446 void __weak
arch_uprobe_enable_step(struct arch_uprobe
*arch
)
1448 user_enable_single_step(current
);
1451 void __weak
arch_uprobe_disable_step(struct arch_uprobe
*arch
)
1453 user_disable_single_step(current
);
1457 * Run handler and ask thread to singlestep.
1458 * Ensure all non-fatal signals cannot interrupt thread while it singlesteps.
1460 static void handle_swbp(struct pt_regs
*regs
)
1462 struct uprobe_task
*utask
;
1463 struct uprobe
*uprobe
;
1464 unsigned long bp_vaddr
;
1465 int uninitialized_var(is_swbp
);
1467 bp_vaddr
= uprobe_get_swbp_addr(regs
);
1468 uprobe
= find_active_uprobe(bp_vaddr
, &is_swbp
);
1472 /* No matching uprobe; signal SIGTRAP. */
1473 send_sig(SIGTRAP
, current
, 0);
1476 * Either we raced with uprobe_unregister() or we can't
1477 * access this memory. The latter is only possible if
1478 * another thread plays with our ->mm. In both cases
1479 * we can simply restart. If this vma was unmapped we
1480 * can pretend this insn was not executed yet and get
1481 * the (correct) SIGSEGV after restart.
1483 instruction_pointer_set(regs
, bp_vaddr
);
1488 utask
= current
->utask
;
1490 utask
= add_utask();
1491 /* Cannot allocate; re-execute the instruction. */
1495 utask
->active_uprobe
= uprobe
;
1496 handler_chain(uprobe
, regs
);
1497 if (uprobe
->flags
& UPROBE_SKIP_SSTEP
&& can_skip_sstep(uprobe
, regs
))
1500 utask
->state
= UTASK_SSTEP
;
1501 if (!pre_ssout(uprobe
, regs
, bp_vaddr
)) {
1502 arch_uprobe_enable_step(&uprobe
->arch
);
1508 utask
->active_uprobe
= NULL
;
1509 utask
->state
= UTASK_RUNNING
;
1511 if (!(uprobe
->flags
& UPROBE_SKIP_SSTEP
))
1514 * cannot singlestep; cannot skip instruction;
1515 * re-execute the instruction.
1517 instruction_pointer_set(regs
, bp_vaddr
);
1523 * Perform required fix-ups and disable singlestep.
1524 * Allow pending signals to take effect.
1526 static void handle_singlestep(struct uprobe_task
*utask
, struct pt_regs
*regs
)
1528 struct uprobe
*uprobe
;
1530 uprobe
= utask
->active_uprobe
;
1531 if (utask
->state
== UTASK_SSTEP_ACK
)
1532 arch_uprobe_post_xol(&uprobe
->arch
, regs
);
1533 else if (utask
->state
== UTASK_SSTEP_TRAPPED
)
1534 arch_uprobe_abort_xol(&uprobe
->arch
, regs
);
1538 arch_uprobe_disable_step(&uprobe
->arch
);
1540 utask
->active_uprobe
= NULL
;
1541 utask
->state
= UTASK_RUNNING
;
1542 xol_free_insn_slot(current
);
1544 spin_lock_irq(¤t
->sighand
->siglock
);
1545 recalc_sigpending(); /* see uprobe_deny_signal() */
1546 spin_unlock_irq(¤t
->sighand
->siglock
);
1550 * On breakpoint hit, breakpoint notifier sets the TIF_UPROBE flag. (and on
1551 * subsequent probe hits on the thread sets the state to UTASK_BP_HIT) and
1552 * allows the thread to return from interrupt.
1554 * On singlestep exception, singlestep notifier sets the TIF_UPROBE flag and
1555 * also sets the state to UTASK_SSTEP_ACK and allows the thread to return from
1558 * While returning to userspace, thread notices the TIF_UPROBE flag and calls
1559 * uprobe_notify_resume().
1561 void uprobe_notify_resume(struct pt_regs
*regs
)
1563 struct uprobe_task
*utask
;
1565 utask
= current
->utask
;
1566 if (!utask
|| utask
->state
== UTASK_BP_HIT
)
1569 handle_singlestep(utask
, regs
);
1573 * uprobe_pre_sstep_notifier gets called from interrupt context as part of
1574 * notifier mechanism. Set TIF_UPROBE flag and indicate breakpoint hit.
1576 int uprobe_pre_sstep_notifier(struct pt_regs
*regs
)
1578 struct uprobe_task
*utask
;
1580 if (!current
->mm
|| !test_bit(MMF_HAS_UPROBES
, ¤t
->mm
->flags
))
1583 utask
= current
->utask
;
1585 utask
->state
= UTASK_BP_HIT
;
1587 set_thread_flag(TIF_UPROBE
);
1593 * uprobe_post_sstep_notifier gets called in interrupt context as part of notifier
1594 * mechanism. Set TIF_UPROBE flag and indicate completion of singlestep.
1596 int uprobe_post_sstep_notifier(struct pt_regs
*regs
)
1598 struct uprobe_task
*utask
= current
->utask
;
1600 if (!current
->mm
|| !utask
|| !utask
->active_uprobe
)
1601 /* task is currently not uprobed */
1604 utask
->state
= UTASK_SSTEP_ACK
;
1605 set_thread_flag(TIF_UPROBE
);
1609 static struct notifier_block uprobe_exception_nb
= {
1610 .notifier_call
= arch_uprobe_exception_notify
,
1611 .priority
= INT_MAX
-1, /* notified after kprobes, kgdb */
1614 static int __init
init_uprobes(void)
1618 for (i
= 0; i
< UPROBES_HASH_SZ
; i
++) {
1619 mutex_init(&uprobes_mutex
[i
]);
1620 mutex_init(&uprobes_mmap_mutex
[i
]);
1623 return register_die_notifier(&uprobe_exception_nb
);
1625 module_init(init_uprobes
);
1627 static void __exit
exit_uprobes(void)
1630 module_exit(exit_uprobes
);