Commit | Line | Data |
---|---|---|
2b144498 | 1 | /* |
7b2d81d4 | 2 | * User-space Probes (UProbes) |
2b144498 SD |
3 | * |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License as published by | |
6 | * the Free Software Foundation; either version 2 of the License, or | |
7 | * (at your option) any later version. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program; if not, write to the Free Software | |
16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
17 | * | |
35aa621b | 18 | * Copyright (C) IBM Corporation, 2008-2012 |
2b144498 SD |
19 | * Authors: |
20 | * Srikar Dronamraju | |
21 | * Jim Keniston | |
35aa621b | 22 | * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> |
2b144498 SD |
23 | */ |
24 | ||
25 | #include <linux/kernel.h> | |
26 | #include <linux/highmem.h> | |
27 | #include <linux/pagemap.h> /* read_mapping_page */ | |
28 | #include <linux/slab.h> | |
29 | #include <linux/sched.h> | |
30 | #include <linux/rmap.h> /* anon_vma_prepare */ | |
31 | #include <linux/mmu_notifier.h> /* set_pte_at_notify */ | |
32 | #include <linux/swap.h> /* try_to_free_swap */ | |
0326f5a9 SD |
33 | #include <linux/ptrace.h> /* user_enable_single_step */ |
34 | #include <linux/kdebug.h> /* notifier mechanism */ | |
7b2d81d4 | 35 | |
2b144498 SD |
36 | #include <linux/uprobes.h> |
37 | ||
d4b3b638 SD |
38 | #define UINSNS_PER_PAGE (PAGE_SIZE/UPROBE_XOL_SLOT_BYTES) |
39 | #define MAX_UPROBE_XOL_SLOTS UINSNS_PER_PAGE | |
40 | ||
2b144498 | 41 | static struct rb_root uprobes_tree = RB_ROOT; |
7b2d81d4 | 42 | |
2b144498 SD |
43 | static DEFINE_SPINLOCK(uprobes_treelock); /* serialize rbtree access */ |
44 | ||
45 | #define UPROBES_HASH_SZ 13 | |
7b2d81d4 | 46 | |
2b144498 SD |
47 | /* serialize (un)register */ |
48 | static struct mutex uprobes_mutex[UPROBES_HASH_SZ]; | |
7b2d81d4 IM |
49 | |
50 | #define uprobes_hash(v) (&uprobes_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ]) | |
2b144498 SD |
51 | |
52 | /* serialize uprobe->pending_list */ | |
53 | static struct mutex uprobes_mmap_mutex[UPROBES_HASH_SZ]; | |
7b2d81d4 | 54 | #define uprobes_mmap_hash(v) (&uprobes_mmap_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ]) |
2b144498 SD |
55 | |
56 | /* | |
7b2d81d4 | 57 | * uprobe_events allows us to skip the uprobe_mmap if there are no uprobe |
2b144498 SD |
58 | * events active at this time. Probably a fine grained per inode count is |
59 | * better? | |
60 | */ | |
61 | static atomic_t uprobe_events = ATOMIC_INIT(0); | |
62 | ||
63 | /* | |
64 | * Maintain a temporary per vma info that can be used to search if a vma | |
65 | * has already been handled. This structure is introduced since extending | |
66 | * vm_area_struct wasnt recommended. | |
67 | */ | |
68 | struct vma_info { | |
7b2d81d4 IM |
69 | struct list_head probe_list; |
70 | struct mm_struct *mm; | |
71 | loff_t vaddr; | |
2b144498 SD |
72 | }; |
73 | ||
3ff54efd SD |
74 | struct uprobe { |
75 | struct rb_node rb_node; /* node in the rb tree */ | |
76 | atomic_t ref; | |
77 | struct rw_semaphore consumer_rwsem; | |
78 | struct list_head pending_list; | |
79 | struct uprobe_consumer *consumers; | |
80 | struct inode *inode; /* Also hold a ref to inode */ | |
81 | loff_t offset; | |
82 | int flags; | |
83 | struct arch_uprobe arch; | |
84 | }; | |
85 | ||
2b144498 SD |
86 | /* |
87 | * valid_vma: Verify if the specified vma is an executable vma | |
88 | * Relax restrictions while unregistering: vm_flags might have | |
89 | * changed after breakpoint was inserted. | |
90 | * - is_register: indicates if we are in register context. | |
91 | * - Return 1 if the specified virtual address is in an | |
92 | * executable vma. | |
93 | */ | |
94 | static bool valid_vma(struct vm_area_struct *vma, bool is_register) | |
95 | { | |
96 | if (!vma->vm_file) | |
97 | return false; | |
98 | ||
99 | if (!is_register) | |
100 | return true; | |
101 | ||
ea131377 ON |
102 | if ((vma->vm_flags & (VM_HUGETLB|VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)) |
103 | == (VM_READ|VM_EXEC)) | |
2b144498 SD |
104 | return true; |
105 | ||
106 | return false; | |
107 | } | |
108 | ||
109 | static loff_t vma_address(struct vm_area_struct *vma, loff_t offset) | |
110 | { | |
111 | loff_t vaddr; | |
112 | ||
113 | vaddr = vma->vm_start + offset; | |
114 | vaddr -= vma->vm_pgoff << PAGE_SHIFT; | |
7b2d81d4 | 115 | |
2b144498 SD |
116 | return vaddr; |
117 | } | |
118 | ||
119 | /** | |
120 | * __replace_page - replace page in vma by new page. | |
121 | * based on replace_page in mm/ksm.c | |
122 | * | |
123 | * @vma: vma that holds the pte pointing to page | |
124 | * @page: the cowed page we are replacing by kpage | |
125 | * @kpage: the modified page we replace page by | |
126 | * | |
127 | * Returns 0 on success, -EFAULT on failure. | |
128 | */ | |
7b2d81d4 | 129 | static int __replace_page(struct vm_area_struct *vma, struct page *page, struct page *kpage) |
2b144498 SD |
130 | { |
131 | struct mm_struct *mm = vma->vm_mm; | |
2b144498 | 132 | unsigned long addr; |
5323ce71 ON |
133 | spinlock_t *ptl; |
134 | pte_t *ptep; | |
2b144498 SD |
135 | |
136 | addr = page_address_in_vma(page, vma); | |
137 | if (addr == -EFAULT) | |
5323ce71 | 138 | return -EFAULT; |
2b144498 | 139 | |
5323ce71 | 140 | ptep = page_check_address(page, mm, addr, &ptl, 0); |
2b144498 | 141 | if (!ptep) |
5323ce71 | 142 | return -EAGAIN; |
2b144498 SD |
143 | |
144 | get_page(kpage); | |
145 | page_add_new_anon_rmap(kpage, vma, addr); | |
146 | ||
7396fa81 SD |
147 | if (!PageAnon(page)) { |
148 | dec_mm_counter(mm, MM_FILEPAGES); | |
149 | inc_mm_counter(mm, MM_ANONPAGES); | |
150 | } | |
151 | ||
2b144498 SD |
152 | flush_cache_page(vma, addr, pte_pfn(*ptep)); |
153 | ptep_clear_flush(vma, addr, ptep); | |
154 | set_pte_at_notify(mm, addr, ptep, mk_pte(kpage, vma->vm_page_prot)); | |
155 | ||
156 | page_remove_rmap(page); | |
157 | if (!page_mapped(page)) | |
158 | try_to_free_swap(page); | |
159 | put_page(page); | |
160 | pte_unmap_unlock(ptep, ptl); | |
2b144498 | 161 | |
5323ce71 | 162 | return 0; |
2b144498 SD |
163 | } |
164 | ||
165 | /** | |
5cb4ac3a | 166 | * is_swbp_insn - check if instruction is breakpoint instruction. |
2b144498 | 167 | * @insn: instruction to be checked. |
5cb4ac3a | 168 | * Default implementation of is_swbp_insn |
2b144498 SD |
169 | * Returns true if @insn is a breakpoint instruction. |
170 | */ | |
5cb4ac3a | 171 | bool __weak is_swbp_insn(uprobe_opcode_t *insn) |
2b144498 | 172 | { |
5cb4ac3a | 173 | return *insn == UPROBE_SWBP_INSN; |
2b144498 SD |
174 | } |
175 | ||
176 | /* | |
177 | * NOTE: | |
178 | * Expect the breakpoint instruction to be the smallest size instruction for | |
179 | * the architecture. If an arch has variable length instruction and the | |
180 | * breakpoint instruction is not of the smallest length instruction | |
181 | * supported by that architecture then we need to modify read_opcode / | |
182 | * write_opcode accordingly. This would never be a problem for archs that | |
183 | * have fixed length instructions. | |
184 | */ | |
185 | ||
186 | /* | |
187 | * write_opcode - write the opcode at a given virtual address. | |
e3343e6a | 188 | * @auprobe: arch breakpointing information. |
2b144498 | 189 | * @mm: the probed process address space. |
2b144498 SD |
190 | * @vaddr: the virtual address to store the opcode. |
191 | * @opcode: opcode to be written at @vaddr. | |
192 | * | |
193 | * Called with mm->mmap_sem held (for read and with a reference to | |
194 | * mm). | |
195 | * | |
196 | * For mm @mm, write the opcode at @vaddr. | |
197 | * Return 0 (success) or a negative errno. | |
198 | */ | |
e3343e6a | 199 | static int write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm, |
2b144498 SD |
200 | unsigned long vaddr, uprobe_opcode_t opcode) |
201 | { | |
202 | struct page *old_page, *new_page; | |
203 | struct address_space *mapping; | |
204 | void *vaddr_old, *vaddr_new; | |
205 | struct vm_area_struct *vma; | |
3ff54efd | 206 | struct uprobe *uprobe; |
5323ce71 | 207 | unsigned long pgoff; |
2b144498 SD |
208 | loff_t addr; |
209 | int ret; | |
5323ce71 | 210 | retry: |
2b144498 SD |
211 | /* Read the page with vaddr into memory */ |
212 | ret = get_user_pages(NULL, mm, vaddr, 1, 0, 0, &old_page, &vma); | |
213 | if (ret <= 0) | |
214 | return ret; | |
7b2d81d4 | 215 | |
2b144498 SD |
216 | ret = -EINVAL; |
217 | ||
218 | /* | |
219 | * We are interested in text pages only. Our pages of interest | |
220 | * should be mapped for read and execute only. We desist from | |
221 | * adding probes in write mapped pages since the breakpoints | |
222 | * might end up in the file copy. | |
223 | */ | |
5cb4ac3a | 224 | if (!valid_vma(vma, is_swbp_insn(&opcode))) |
2b144498 SD |
225 | goto put_out; |
226 | ||
3ff54efd | 227 | uprobe = container_of(auprobe, struct uprobe, arch); |
2b144498 SD |
228 | mapping = uprobe->inode->i_mapping; |
229 | if (mapping != vma->vm_file->f_mapping) | |
230 | goto put_out; | |
231 | ||
232 | addr = vma_address(vma, uprobe->offset); | |
233 | if (vaddr != (unsigned long)addr) | |
234 | goto put_out; | |
235 | ||
236 | ret = -ENOMEM; | |
237 | new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr); | |
238 | if (!new_page) | |
239 | goto put_out; | |
240 | ||
241 | __SetPageUptodate(new_page); | |
242 | ||
243 | /* | |
244 | * lock page will serialize against do_wp_page()'s | |
245 | * PageAnon() handling | |
246 | */ | |
247 | lock_page(old_page); | |
248 | /* copy the page now that we've got it stable */ | |
249 | vaddr_old = kmap_atomic(old_page); | |
250 | vaddr_new = kmap_atomic(new_page); | |
251 | ||
252 | memcpy(vaddr_new, vaddr_old, PAGE_SIZE); | |
7b2d81d4 | 253 | |
2b144498 | 254 | /* poke the new insn in, ASSUMES we don't cross page boundary */ |
5323ce71 ON |
255 | pgoff = (vaddr & ~PAGE_MASK); |
256 | BUG_ON(pgoff + UPROBE_SWBP_INSN_SIZE > PAGE_SIZE); | |
257 | memcpy(vaddr_new + pgoff, &opcode, UPROBE_SWBP_INSN_SIZE); | |
2b144498 SD |
258 | |
259 | kunmap_atomic(vaddr_new); | |
260 | kunmap_atomic(vaddr_old); | |
261 | ||
262 | ret = anon_vma_prepare(vma); | |
263 | if (ret) | |
264 | goto unlock_out; | |
265 | ||
266 | lock_page(new_page); | |
267 | ret = __replace_page(vma, old_page, new_page); | |
268 | unlock_page(new_page); | |
269 | ||
270 | unlock_out: | |
271 | unlock_page(old_page); | |
272 | page_cache_release(new_page); | |
273 | ||
274 | put_out: | |
7b2d81d4 IM |
275 | put_page(old_page); |
276 | ||
5323ce71 ON |
277 | if (unlikely(ret == -EAGAIN)) |
278 | goto retry; | |
2b144498 SD |
279 | return ret; |
280 | } | |
281 | ||
282 | /** | |
283 | * read_opcode - read the opcode at a given virtual address. | |
284 | * @mm: the probed process address space. | |
285 | * @vaddr: the virtual address to read the opcode. | |
286 | * @opcode: location to store the read opcode. | |
287 | * | |
288 | * Called with mm->mmap_sem held (for read and with a reference to | |
289 | * mm. | |
290 | * | |
291 | * For mm @mm, read the opcode at @vaddr and store it in @opcode. | |
292 | * Return 0 (success) or a negative errno. | |
293 | */ | |
7b2d81d4 | 294 | static int read_opcode(struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_t *opcode) |
2b144498 SD |
295 | { |
296 | struct page *page; | |
297 | void *vaddr_new; | |
298 | int ret; | |
299 | ||
a3d7bb47 | 300 | ret = get_user_pages(NULL, mm, vaddr, 1, 0, 1, &page, NULL); |
2b144498 SD |
301 | if (ret <= 0) |
302 | return ret; | |
303 | ||
304 | lock_page(page); | |
305 | vaddr_new = kmap_atomic(page); | |
306 | vaddr &= ~PAGE_MASK; | |
5cb4ac3a | 307 | memcpy(opcode, vaddr_new + vaddr, UPROBE_SWBP_INSN_SIZE); |
2b144498 SD |
308 | kunmap_atomic(vaddr_new); |
309 | unlock_page(page); | |
7b2d81d4 IM |
310 | |
311 | put_page(page); | |
312 | ||
2b144498 SD |
313 | return 0; |
314 | } | |
315 | ||
5cb4ac3a | 316 | static int is_swbp_at_addr(struct mm_struct *mm, unsigned long vaddr) |
2b144498 SD |
317 | { |
318 | uprobe_opcode_t opcode; | |
7b2d81d4 | 319 | int result; |
2b144498 | 320 | |
c00b2750 ON |
321 | if (current->mm == mm) { |
322 | pagefault_disable(); | |
323 | result = __copy_from_user_inatomic(&opcode, (void __user*)vaddr, | |
324 | sizeof(opcode)); | |
325 | pagefault_enable(); | |
326 | ||
327 | if (likely(result == 0)) | |
328 | goto out; | |
329 | } | |
330 | ||
7b2d81d4 | 331 | result = read_opcode(mm, vaddr, &opcode); |
2b144498 SD |
332 | if (result) |
333 | return result; | |
c00b2750 | 334 | out: |
5cb4ac3a | 335 | if (is_swbp_insn(&opcode)) |
2b144498 SD |
336 | return 1; |
337 | ||
338 | return 0; | |
339 | } | |
340 | ||
341 | /** | |
5cb4ac3a | 342 | * set_swbp - store breakpoint at a given address. |
e3343e6a | 343 | * @auprobe: arch specific probepoint information. |
2b144498 | 344 | * @mm: the probed process address space. |
2b144498 SD |
345 | * @vaddr: the virtual address to insert the opcode. |
346 | * | |
347 | * For mm @mm, store the breakpoint instruction at @vaddr. | |
348 | * Return 0 (success) or a negative errno. | |
349 | */ | |
5cb4ac3a | 350 | int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr) |
2b144498 | 351 | { |
7b2d81d4 | 352 | int result; |
2b144498 | 353 | |
5cb4ac3a | 354 | result = is_swbp_at_addr(mm, vaddr); |
2b144498 SD |
355 | if (result == 1) |
356 | return -EEXIST; | |
357 | ||
358 | if (result) | |
359 | return result; | |
360 | ||
5cb4ac3a | 361 | return write_opcode(auprobe, mm, vaddr, UPROBE_SWBP_INSN); |
2b144498 SD |
362 | } |
363 | ||
364 | /** | |
365 | * set_orig_insn - Restore the original instruction. | |
366 | * @mm: the probed process address space. | |
e3343e6a | 367 | * @auprobe: arch specific probepoint information. |
2b144498 SD |
368 | * @vaddr: the virtual address to insert the opcode. |
369 | * @verify: if true, verify existance of breakpoint instruction. | |
370 | * | |
371 | * For mm @mm, restore the original opcode (opcode) at @vaddr. | |
372 | * Return 0 (success) or a negative errno. | |
373 | */ | |
7b2d81d4 | 374 | int __weak |
e3343e6a | 375 | set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr, bool verify) |
2b144498 SD |
376 | { |
377 | if (verify) { | |
7b2d81d4 | 378 | int result; |
2b144498 | 379 | |
5cb4ac3a | 380 | result = is_swbp_at_addr(mm, vaddr); |
2b144498 SD |
381 | if (!result) |
382 | return -EINVAL; | |
383 | ||
384 | if (result != 1) | |
385 | return result; | |
386 | } | |
e3343e6a | 387 | return write_opcode(auprobe, mm, vaddr, *(uprobe_opcode_t *)auprobe->insn); |
2b144498 SD |
388 | } |
389 | ||
390 | static int match_uprobe(struct uprobe *l, struct uprobe *r) | |
391 | { | |
392 | if (l->inode < r->inode) | |
393 | return -1; | |
7b2d81d4 | 394 | |
2b144498 SD |
395 | if (l->inode > r->inode) |
396 | return 1; | |
2b144498 | 397 | |
7b2d81d4 IM |
398 | if (l->offset < r->offset) |
399 | return -1; | |
400 | ||
401 | if (l->offset > r->offset) | |
402 | return 1; | |
2b144498 SD |
403 | |
404 | return 0; | |
405 | } | |
406 | ||
407 | static struct uprobe *__find_uprobe(struct inode *inode, loff_t offset) | |
408 | { | |
409 | struct uprobe u = { .inode = inode, .offset = offset }; | |
410 | struct rb_node *n = uprobes_tree.rb_node; | |
411 | struct uprobe *uprobe; | |
412 | int match; | |
413 | ||
414 | while (n) { | |
415 | uprobe = rb_entry(n, struct uprobe, rb_node); | |
416 | match = match_uprobe(&u, uprobe); | |
417 | if (!match) { | |
418 | atomic_inc(&uprobe->ref); | |
419 | return uprobe; | |
420 | } | |
7b2d81d4 | 421 | |
2b144498 SD |
422 | if (match < 0) |
423 | n = n->rb_left; | |
424 | else | |
425 | n = n->rb_right; | |
426 | } | |
427 | return NULL; | |
428 | } | |
429 | ||
430 | /* | |
431 | * Find a uprobe corresponding to a given inode:offset | |
432 | * Acquires uprobes_treelock | |
433 | */ | |
434 | static struct uprobe *find_uprobe(struct inode *inode, loff_t offset) | |
435 | { | |
436 | struct uprobe *uprobe; | |
437 | unsigned long flags; | |
438 | ||
439 | spin_lock_irqsave(&uprobes_treelock, flags); | |
440 | uprobe = __find_uprobe(inode, offset); | |
441 | spin_unlock_irqrestore(&uprobes_treelock, flags); | |
7b2d81d4 | 442 | |
2b144498 SD |
443 | return uprobe; |
444 | } | |
445 | ||
446 | static struct uprobe *__insert_uprobe(struct uprobe *uprobe) | |
447 | { | |
448 | struct rb_node **p = &uprobes_tree.rb_node; | |
449 | struct rb_node *parent = NULL; | |
450 | struct uprobe *u; | |
451 | int match; | |
452 | ||
453 | while (*p) { | |
454 | parent = *p; | |
455 | u = rb_entry(parent, struct uprobe, rb_node); | |
456 | match = match_uprobe(uprobe, u); | |
457 | if (!match) { | |
458 | atomic_inc(&u->ref); | |
459 | return u; | |
460 | } | |
461 | ||
462 | if (match < 0) | |
463 | p = &parent->rb_left; | |
464 | else | |
465 | p = &parent->rb_right; | |
466 | ||
467 | } | |
7b2d81d4 | 468 | |
2b144498 SD |
469 | u = NULL; |
470 | rb_link_node(&uprobe->rb_node, parent, p); | |
471 | rb_insert_color(&uprobe->rb_node, &uprobes_tree); | |
472 | /* get access + creation ref */ | |
473 | atomic_set(&uprobe->ref, 2); | |
7b2d81d4 | 474 | |
2b144498 SD |
475 | return u; |
476 | } | |
477 | ||
478 | /* | |
7b2d81d4 | 479 | * Acquire uprobes_treelock. |
2b144498 SD |
480 | * Matching uprobe already exists in rbtree; |
481 | * increment (access refcount) and return the matching uprobe. | |
482 | * | |
483 | * No matching uprobe; insert the uprobe in rb_tree; | |
484 | * get a double refcount (access + creation) and return NULL. | |
485 | */ | |
486 | static struct uprobe *insert_uprobe(struct uprobe *uprobe) | |
487 | { | |
488 | unsigned long flags; | |
489 | struct uprobe *u; | |
490 | ||
491 | spin_lock_irqsave(&uprobes_treelock, flags); | |
492 | u = __insert_uprobe(uprobe); | |
493 | spin_unlock_irqrestore(&uprobes_treelock, flags); | |
7b2d81d4 | 494 | |
0326f5a9 SD |
495 | /* For now assume that the instruction need not be single-stepped */ |
496 | uprobe->flags |= UPROBE_SKIP_SSTEP; | |
497 | ||
2b144498 SD |
498 | return u; |
499 | } | |
500 | ||
501 | static void put_uprobe(struct uprobe *uprobe) | |
502 | { | |
503 | if (atomic_dec_and_test(&uprobe->ref)) | |
504 | kfree(uprobe); | |
505 | } | |
506 | ||
507 | static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset) | |
508 | { | |
509 | struct uprobe *uprobe, *cur_uprobe; | |
510 | ||
511 | uprobe = kzalloc(sizeof(struct uprobe), GFP_KERNEL); | |
512 | if (!uprobe) | |
513 | return NULL; | |
514 | ||
515 | uprobe->inode = igrab(inode); | |
516 | uprobe->offset = offset; | |
517 | init_rwsem(&uprobe->consumer_rwsem); | |
518 | INIT_LIST_HEAD(&uprobe->pending_list); | |
519 | ||
520 | /* add to uprobes_tree, sorted on inode:offset */ | |
521 | cur_uprobe = insert_uprobe(uprobe); | |
522 | ||
523 | /* a uprobe exists for this inode:offset combination */ | |
524 | if (cur_uprobe) { | |
525 | kfree(uprobe); | |
526 | uprobe = cur_uprobe; | |
527 | iput(inode); | |
7b2d81d4 | 528 | } else { |
2b144498 | 529 | atomic_inc(&uprobe_events); |
7b2d81d4 IM |
530 | } |
531 | ||
2b144498 SD |
532 | return uprobe; |
533 | } | |
534 | ||
0326f5a9 SD |
535 | static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs) |
536 | { | |
537 | struct uprobe_consumer *uc; | |
538 | ||
539 | if (!(uprobe->flags & UPROBE_RUN_HANDLER)) | |
540 | return; | |
541 | ||
542 | down_read(&uprobe->consumer_rwsem); | |
543 | for (uc = uprobe->consumers; uc; uc = uc->next) { | |
544 | if (!uc->filter || uc->filter(uc, current)) | |
545 | uc->handler(uc, regs); | |
546 | } | |
547 | up_read(&uprobe->consumer_rwsem); | |
548 | } | |
549 | ||
2b144498 | 550 | /* Returns the previous consumer */ |
7b2d81d4 | 551 | static struct uprobe_consumer * |
e3343e6a | 552 | consumer_add(struct uprobe *uprobe, struct uprobe_consumer *uc) |
2b144498 SD |
553 | { |
554 | down_write(&uprobe->consumer_rwsem); | |
e3343e6a SD |
555 | uc->next = uprobe->consumers; |
556 | uprobe->consumers = uc; | |
2b144498 | 557 | up_write(&uprobe->consumer_rwsem); |
7b2d81d4 | 558 | |
e3343e6a | 559 | return uc->next; |
2b144498 SD |
560 | } |
561 | ||
562 | /* | |
e3343e6a SD |
563 | * For uprobe @uprobe, delete the consumer @uc. |
564 | * Return true if the @uc is deleted successfully | |
2b144498 SD |
565 | * or return false. |
566 | */ | |
e3343e6a | 567 | static bool consumer_del(struct uprobe *uprobe, struct uprobe_consumer *uc) |
2b144498 SD |
568 | { |
569 | struct uprobe_consumer **con; | |
570 | bool ret = false; | |
571 | ||
572 | down_write(&uprobe->consumer_rwsem); | |
573 | for (con = &uprobe->consumers; *con; con = &(*con)->next) { | |
e3343e6a SD |
574 | if (*con == uc) { |
575 | *con = uc->next; | |
2b144498 SD |
576 | ret = true; |
577 | break; | |
578 | } | |
579 | } | |
580 | up_write(&uprobe->consumer_rwsem); | |
7b2d81d4 | 581 | |
2b144498 SD |
582 | return ret; |
583 | } | |
584 | ||
e3343e6a SD |
585 | static int |
586 | __copy_insn(struct address_space *mapping, struct vm_area_struct *vma, char *insn, | |
2b144498 SD |
587 | unsigned long nbytes, unsigned long offset) |
588 | { | |
589 | struct file *filp = vma->vm_file; | |
590 | struct page *page; | |
591 | void *vaddr; | |
592 | unsigned long off1; | |
593 | unsigned long idx; | |
594 | ||
595 | if (!filp) | |
596 | return -EINVAL; | |
597 | ||
cc359d18 ON |
598 | if (!mapping->a_ops->readpage) |
599 | return -EIO; | |
600 | ||
2b144498 SD |
601 | idx = (unsigned long)(offset >> PAGE_CACHE_SHIFT); |
602 | off1 = offset &= ~PAGE_MASK; | |
603 | ||
604 | /* | |
605 | * Ensure that the page that has the original instruction is | |
606 | * populated and in page-cache. | |
607 | */ | |
608 | page = read_mapping_page(mapping, idx, filp); | |
609 | if (IS_ERR(page)) | |
610 | return PTR_ERR(page); | |
611 | ||
612 | vaddr = kmap_atomic(page); | |
613 | memcpy(insn, vaddr + off1, nbytes); | |
614 | kunmap_atomic(vaddr); | |
615 | page_cache_release(page); | |
7b2d81d4 | 616 | |
2b144498 SD |
617 | return 0; |
618 | } | |
619 | ||
e3343e6a SD |
620 | static int |
621 | copy_insn(struct uprobe *uprobe, struct vm_area_struct *vma, unsigned long addr) | |
2b144498 SD |
622 | { |
623 | struct address_space *mapping; | |
2b144498 | 624 | unsigned long nbytes; |
7b2d81d4 | 625 | int bytes; |
2b144498 SD |
626 | |
627 | addr &= ~PAGE_MASK; | |
628 | nbytes = PAGE_SIZE - addr; | |
629 | mapping = uprobe->inode->i_mapping; | |
630 | ||
631 | /* Instruction at end of binary; copy only available bytes */ | |
632 | if (uprobe->offset + MAX_UINSN_BYTES > uprobe->inode->i_size) | |
633 | bytes = uprobe->inode->i_size - uprobe->offset; | |
634 | else | |
635 | bytes = MAX_UINSN_BYTES; | |
636 | ||
637 | /* Instruction at the page-boundary; copy bytes in second page */ | |
638 | if (nbytes < bytes) { | |
3ff54efd | 639 | if (__copy_insn(mapping, vma, uprobe->arch.insn + nbytes, |
2b144498 SD |
640 | bytes - nbytes, uprobe->offset + nbytes)) |
641 | return -ENOMEM; | |
642 | ||
643 | bytes = nbytes; | |
644 | } | |
3ff54efd | 645 | return __copy_insn(mapping, vma, uprobe->arch.insn, bytes, uprobe->offset); |
2b144498 SD |
646 | } |
647 | ||
682968e0 SD |
648 | /* |
649 | * How mm->uprobes_state.count gets updated | |
650 | * uprobe_mmap() increments the count if | |
651 | * - it successfully adds a breakpoint. | |
652 | * - it cannot add a breakpoint, but sees that there is a underlying | |
653 | * breakpoint (via a is_swbp_at_addr()). | |
654 | * | |
655 | * uprobe_munmap() decrements the count if | |
656 | * - it sees a underlying breakpoint, (via is_swbp_at_addr) | |
657 | * (Subsequent uprobe_unregister wouldnt find the breakpoint | |
658 | * unless a uprobe_mmap kicks in, since the old vma would be | |
659 | * dropped just after uprobe_munmap.) | |
660 | * | |
661 | * uprobe_register increments the count if: | |
662 | * - it successfully adds a breakpoint. | |
663 | * | |
664 | * uprobe_unregister decrements the count if: | |
665 | * - it sees a underlying breakpoint and removes successfully. | |
666 | * (via is_swbp_at_addr) | |
667 | * (Subsequent uprobe_munmap wouldnt find the breakpoint | |
668 | * since there is no underlying breakpoint after the | |
669 | * breakpoint removal.) | |
670 | */ | |
e3343e6a SD |
671 | static int |
672 | install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, | |
673 | struct vm_area_struct *vma, loff_t vaddr) | |
2b144498 SD |
674 | { |
675 | unsigned long addr; | |
676 | int ret; | |
677 | ||
678 | /* | |
679 | * If probe is being deleted, unregister thread could be done with | |
680 | * the vma-rmap-walk through. Adding a probe now can be fatal since | |
681 | * nobody will be able to cleanup. Also we could be from fork or | |
682 | * mremap path, where the probe might have already been inserted. | |
683 | * Hence behave as if probe already existed. | |
684 | */ | |
685 | if (!uprobe->consumers) | |
686 | return -EEXIST; | |
687 | ||
688 | addr = (unsigned long)vaddr; | |
7b2d81d4 | 689 | |
900771a4 | 690 | if (!(uprobe->flags & UPROBE_COPY_INSN)) { |
2b144498 SD |
691 | ret = copy_insn(uprobe, vma, addr); |
692 | if (ret) | |
693 | return ret; | |
694 | ||
5cb4ac3a | 695 | if (is_swbp_insn((uprobe_opcode_t *)uprobe->arch.insn)) |
2b144498 SD |
696 | return -EEXIST; |
697 | ||
7eb9ba5e | 698 | ret = arch_uprobe_analyze_insn(&uprobe->arch, mm, addr); |
2b144498 SD |
699 | if (ret) |
700 | return ret; | |
701 | ||
900771a4 | 702 | uprobe->flags |= UPROBE_COPY_INSN; |
2b144498 | 703 | } |
682968e0 SD |
704 | |
705 | /* | |
706 | * Ideally, should be updating the probe count after the breakpoint | |
707 | * has been successfully inserted. However a thread could hit the | |
708 | * breakpoint we just inserted even before the probe count is | |
709 | * incremented. If this is the first breakpoint placed, breakpoint | |
710 | * notifier might ignore uprobes and pass the trap to the thread. | |
711 | * Hence increment before and decrement on failure. | |
712 | */ | |
713 | atomic_inc(&mm->uprobes_state.count); | |
5cb4ac3a | 714 | ret = set_swbp(&uprobe->arch, mm, addr); |
682968e0 SD |
715 | if (ret) |
716 | atomic_dec(&mm->uprobes_state.count); | |
2b144498 SD |
717 | |
718 | return ret; | |
719 | } | |
720 | ||
e3343e6a SD |
721 | static void |
722 | remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, loff_t vaddr) | |
2b144498 | 723 | { |
682968e0 SD |
724 | if (!set_orig_insn(&uprobe->arch, mm, (unsigned long)vaddr, true)) |
725 | atomic_dec(&mm->uprobes_state.count); | |
2b144498 SD |
726 | } |
727 | ||
0326f5a9 | 728 | /* |
778b032d ON |
729 | * There could be threads that have already hit the breakpoint. They |
730 | * will recheck the current insn and restart if find_uprobe() fails. | |
731 | * See find_active_uprobe(). | |
0326f5a9 | 732 | */ |
2b144498 SD |
733 | static void delete_uprobe(struct uprobe *uprobe) |
734 | { | |
735 | unsigned long flags; | |
736 | ||
737 | spin_lock_irqsave(&uprobes_treelock, flags); | |
738 | rb_erase(&uprobe->rb_node, &uprobes_tree); | |
739 | spin_unlock_irqrestore(&uprobes_treelock, flags); | |
740 | iput(uprobe->inode); | |
741 | put_uprobe(uprobe); | |
742 | atomic_dec(&uprobe_events); | |
743 | } | |
744 | ||
e3343e6a SD |
745 | static struct vma_info * |
746 | __find_next_vma_info(struct address_space *mapping, struct list_head *head, | |
747 | struct vma_info *vi, loff_t offset, bool is_register) | |
2b144498 SD |
748 | { |
749 | struct prio_tree_iter iter; | |
750 | struct vm_area_struct *vma; | |
751 | struct vma_info *tmpvi; | |
7b2d81d4 | 752 | unsigned long pgoff; |
2b144498 | 753 | int existing_vma; |
7b2d81d4 IM |
754 | loff_t vaddr; |
755 | ||
756 | pgoff = offset >> PAGE_SHIFT; | |
2b144498 SD |
757 | |
758 | vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { | |
759 | if (!valid_vma(vma, is_register)) | |
760 | continue; | |
761 | ||
762 | existing_vma = 0; | |
763 | vaddr = vma_address(vma, offset); | |
7b2d81d4 | 764 | |
2b144498 SD |
765 | list_for_each_entry(tmpvi, head, probe_list) { |
766 | if (tmpvi->mm == vma->vm_mm && tmpvi->vaddr == vaddr) { | |
767 | existing_vma = 1; | |
768 | break; | |
769 | } | |
770 | } | |
771 | ||
772 | /* | |
773 | * Another vma needs a probe to be installed. However skip | |
774 | * installing the probe if the vma is about to be unlinked. | |
775 | */ | |
7b2d81d4 | 776 | if (!existing_vma && atomic_inc_not_zero(&vma->vm_mm->mm_users)) { |
2b144498 SD |
777 | vi->mm = vma->vm_mm; |
778 | vi->vaddr = vaddr; | |
779 | list_add(&vi->probe_list, head); | |
7b2d81d4 | 780 | |
2b144498 SD |
781 | return vi; |
782 | } | |
783 | } | |
7b2d81d4 | 784 | |
2b144498 SD |
785 | return NULL; |
786 | } | |
787 | ||
788 | /* | |
789 | * Iterate in the rmap prio tree and find a vma where a probe has not | |
790 | * yet been inserted. | |
791 | */ | |
7b2d81d4 | 792 | static struct vma_info * |
e3343e6a SD |
793 | find_next_vma_info(struct address_space *mapping, struct list_head *head, |
794 | loff_t offset, bool is_register) | |
2b144498 SD |
795 | { |
796 | struct vma_info *vi, *retvi; | |
7b2d81d4 | 797 | |
2b144498 SD |
798 | vi = kzalloc(sizeof(struct vma_info), GFP_KERNEL); |
799 | if (!vi) | |
800 | return ERR_PTR(-ENOMEM); | |
801 | ||
802 | mutex_lock(&mapping->i_mmap_mutex); | |
e3343e6a | 803 | retvi = __find_next_vma_info(mapping, head, vi, offset, is_register); |
2b144498 SD |
804 | mutex_unlock(&mapping->i_mmap_mutex); |
805 | ||
806 | if (!retvi) | |
807 | kfree(vi); | |
7b2d81d4 | 808 | |
2b144498 SD |
809 | return retvi; |
810 | } | |
811 | ||
812 | static int register_for_each_vma(struct uprobe *uprobe, bool is_register) | |
813 | { | |
814 | struct list_head try_list; | |
815 | struct vm_area_struct *vma; | |
816 | struct address_space *mapping; | |
817 | struct vma_info *vi, *tmpvi; | |
818 | struct mm_struct *mm; | |
819 | loff_t vaddr; | |
7b2d81d4 | 820 | int ret; |
2b144498 SD |
821 | |
822 | mapping = uprobe->inode->i_mapping; | |
823 | INIT_LIST_HEAD(&try_list); | |
7b2d81d4 IM |
824 | |
825 | ret = 0; | |
826 | ||
827 | for (;;) { | |
e3343e6a | 828 | vi = find_next_vma_info(mapping, &try_list, uprobe->offset, is_register); |
7b2d81d4 IM |
829 | if (!vi) |
830 | break; | |
831 | ||
2b144498 SD |
832 | if (IS_ERR(vi)) { |
833 | ret = PTR_ERR(vi); | |
834 | break; | |
835 | } | |
7b2d81d4 | 836 | |
2b144498 | 837 | mm = vi->mm; |
77fc4af1 | 838 | down_write(&mm->mmap_sem); |
2b144498 SD |
839 | vma = find_vma(mm, (unsigned long)vi->vaddr); |
840 | if (!vma || !valid_vma(vma, is_register)) { | |
841 | list_del(&vi->probe_list); | |
842 | kfree(vi); | |
77fc4af1 | 843 | up_write(&mm->mmap_sem); |
2b144498 SD |
844 | mmput(mm); |
845 | continue; | |
846 | } | |
847 | vaddr = vma_address(vma, uprobe->offset); | |
848 | if (vma->vm_file->f_mapping->host != uprobe->inode || | |
849 | vaddr != vi->vaddr) { | |
850 | list_del(&vi->probe_list); | |
851 | kfree(vi); | |
77fc4af1 | 852 | up_write(&mm->mmap_sem); |
2b144498 SD |
853 | mmput(mm); |
854 | continue; | |
855 | } | |
856 | ||
857 | if (is_register) | |
e3343e6a | 858 | ret = install_breakpoint(uprobe, mm, vma, vi->vaddr); |
2b144498 | 859 | else |
e3343e6a | 860 | remove_breakpoint(uprobe, mm, vi->vaddr); |
2b144498 | 861 | |
77fc4af1 | 862 | up_write(&mm->mmap_sem); |
2b144498 SD |
863 | mmput(mm); |
864 | if (is_register) { | |
865 | if (ret && ret == -EEXIST) | |
866 | ret = 0; | |
867 | if (ret) | |
868 | break; | |
869 | } | |
870 | } | |
7b2d81d4 | 871 | |
2b144498 SD |
872 | list_for_each_entry_safe(vi, tmpvi, &try_list, probe_list) { |
873 | list_del(&vi->probe_list); | |
874 | kfree(vi); | |
875 | } | |
7b2d81d4 | 876 | |
2b144498 SD |
877 | return ret; |
878 | } | |
879 | ||
7b2d81d4 | 880 | static int __uprobe_register(struct uprobe *uprobe) |
2b144498 SD |
881 | { |
882 | return register_for_each_vma(uprobe, true); | |
883 | } | |
884 | ||
7b2d81d4 | 885 | static void __uprobe_unregister(struct uprobe *uprobe) |
2b144498 SD |
886 | { |
887 | if (!register_for_each_vma(uprobe, false)) | |
888 | delete_uprobe(uprobe); | |
889 | ||
890 | /* TODO : cant unregister? schedule a worker thread */ | |
891 | } | |
892 | ||
893 | /* | |
7b2d81d4 | 894 | * uprobe_register - register a probe |
2b144498 SD |
895 | * @inode: the file in which the probe has to be placed. |
896 | * @offset: offset from the start of the file. | |
e3343e6a | 897 | * @uc: information on howto handle the probe.. |
2b144498 | 898 | * |
7b2d81d4 | 899 | * Apart from the access refcount, uprobe_register() takes a creation |
2b144498 SD |
900 | * refcount (thro alloc_uprobe) if and only if this @uprobe is getting |
901 | * inserted into the rbtree (i.e first consumer for a @inode:@offset | |
7b2d81d4 | 902 | * tuple). Creation refcount stops uprobe_unregister from freeing the |
2b144498 | 903 | * @uprobe even before the register operation is complete. Creation |
e3343e6a | 904 | * refcount is released when the last @uc for the @uprobe |
2b144498 SD |
905 | * unregisters. |
906 | * | |
907 | * Return errno if it cannot successully install probes | |
908 | * else return 0 (success) | |
909 | */ | |
e3343e6a | 910 | int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc) |
2b144498 SD |
911 | { |
912 | struct uprobe *uprobe; | |
7b2d81d4 | 913 | int ret; |
2b144498 | 914 | |
e3343e6a | 915 | if (!inode || !uc || uc->next) |
7b2d81d4 | 916 | return -EINVAL; |
2b144498 SD |
917 | |
918 | if (offset > i_size_read(inode)) | |
7b2d81d4 | 919 | return -EINVAL; |
2b144498 SD |
920 | |
921 | ret = 0; | |
922 | mutex_lock(uprobes_hash(inode)); | |
923 | uprobe = alloc_uprobe(inode, offset); | |
7b2d81d4 | 924 | |
e3343e6a | 925 | if (uprobe && !consumer_add(uprobe, uc)) { |
7b2d81d4 | 926 | ret = __uprobe_register(uprobe); |
2b144498 SD |
927 | if (ret) { |
928 | uprobe->consumers = NULL; | |
7b2d81d4 IM |
929 | __uprobe_unregister(uprobe); |
930 | } else { | |
900771a4 | 931 | uprobe->flags |= UPROBE_RUN_HANDLER; |
7b2d81d4 | 932 | } |
2b144498 SD |
933 | } |
934 | ||
935 | mutex_unlock(uprobes_hash(inode)); | |
936 | put_uprobe(uprobe); | |
937 | ||
938 | return ret; | |
939 | } | |
940 | ||
941 | /* | |
7b2d81d4 | 942 | * uprobe_unregister - unregister a already registered probe. |
2b144498 SD |
943 | * @inode: the file in which the probe has to be removed. |
944 | * @offset: offset from the start of the file. | |
e3343e6a | 945 | * @uc: identify which probe if multiple probes are colocated. |
2b144498 | 946 | */ |
e3343e6a | 947 | void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc) |
2b144498 | 948 | { |
7b2d81d4 | 949 | struct uprobe *uprobe; |
2b144498 | 950 | |
e3343e6a | 951 | if (!inode || !uc) |
2b144498 SD |
952 | return; |
953 | ||
954 | uprobe = find_uprobe(inode, offset); | |
955 | if (!uprobe) | |
956 | return; | |
957 | ||
958 | mutex_lock(uprobes_hash(inode)); | |
2b144498 | 959 | |
e3343e6a | 960 | if (consumer_del(uprobe, uc)) { |
7b2d81d4 IM |
961 | if (!uprobe->consumers) { |
962 | __uprobe_unregister(uprobe); | |
900771a4 | 963 | uprobe->flags &= ~UPROBE_RUN_HANDLER; |
7b2d81d4 | 964 | } |
2b144498 SD |
965 | } |
966 | ||
2b144498 SD |
967 | mutex_unlock(uprobes_hash(inode)); |
968 | if (uprobe) | |
969 | put_uprobe(uprobe); | |
970 | } | |
971 | ||
972 | /* | |
973 | * Of all the nodes that correspond to the given inode, return the node | |
974 | * with the least offset. | |
975 | */ | |
976 | static struct rb_node *find_least_offset_node(struct inode *inode) | |
977 | { | |
978 | struct uprobe u = { .inode = inode, .offset = 0}; | |
979 | struct rb_node *n = uprobes_tree.rb_node; | |
980 | struct rb_node *close_node = NULL; | |
981 | struct uprobe *uprobe; | |
982 | int match; | |
983 | ||
984 | while (n) { | |
985 | uprobe = rb_entry(n, struct uprobe, rb_node); | |
986 | match = match_uprobe(&u, uprobe); | |
7b2d81d4 | 987 | |
2b144498 SD |
988 | if (uprobe->inode == inode) |
989 | close_node = n; | |
990 | ||
991 | if (!match) | |
992 | return close_node; | |
993 | ||
994 | if (match < 0) | |
995 | n = n->rb_left; | |
996 | else | |
997 | n = n->rb_right; | |
998 | } | |
7b2d81d4 | 999 | |
2b144498 SD |
1000 | return close_node; |
1001 | } | |
1002 | ||
1003 | /* | |
1004 | * For a given inode, build a list of probes that need to be inserted. | |
1005 | */ | |
1006 | static void build_probe_list(struct inode *inode, struct list_head *head) | |
1007 | { | |
1008 | struct uprobe *uprobe; | |
2b144498 | 1009 | unsigned long flags; |
7b2d81d4 | 1010 | struct rb_node *n; |
2b144498 SD |
1011 | |
1012 | spin_lock_irqsave(&uprobes_treelock, flags); | |
7b2d81d4 | 1013 | |
2b144498 | 1014 | n = find_least_offset_node(inode); |
7b2d81d4 | 1015 | |
2b144498 SD |
1016 | for (; n; n = rb_next(n)) { |
1017 | uprobe = rb_entry(n, struct uprobe, rb_node); | |
1018 | if (uprobe->inode != inode) | |
1019 | break; | |
1020 | ||
1021 | list_add(&uprobe->pending_list, head); | |
1022 | atomic_inc(&uprobe->ref); | |
1023 | } | |
7b2d81d4 | 1024 | |
2b144498 SD |
1025 | spin_unlock_irqrestore(&uprobes_treelock, flags); |
1026 | } | |
1027 | ||
1028 | /* | |
1029 | * Called from mmap_region. | |
1030 | * called with mm->mmap_sem acquired. | |
1031 | * | |
1032 | * Return -ve no if we fail to insert probes and we cannot | |
1033 | * bail-out. | |
7b2d81d4 IM |
1034 | * Return 0 otherwise. i.e: |
1035 | * | |
2b144498 SD |
1036 | * - successful insertion of probes |
1037 | * - (or) no possible probes to be inserted. | |
1038 | * - (or) insertion of probes failed but we can bail-out. | |
1039 | */ | |
7b2d81d4 | 1040 | int uprobe_mmap(struct vm_area_struct *vma) |
2b144498 SD |
1041 | { |
1042 | struct list_head tmp_list; | |
1043 | struct uprobe *uprobe, *u; | |
1044 | struct inode *inode; | |
682968e0 | 1045 | int ret, count; |
2b144498 SD |
1046 | |
1047 | if (!atomic_read(&uprobe_events) || !valid_vma(vma, true)) | |
7b2d81d4 | 1048 | return 0; |
2b144498 SD |
1049 | |
1050 | inode = vma->vm_file->f_mapping->host; | |
1051 | if (!inode) | |
7b2d81d4 | 1052 | return 0; |
2b144498 SD |
1053 | |
1054 | INIT_LIST_HEAD(&tmp_list); | |
1055 | mutex_lock(uprobes_mmap_hash(inode)); | |
1056 | build_probe_list(inode, &tmp_list); | |
7b2d81d4 IM |
1057 | |
1058 | ret = 0; | |
682968e0 | 1059 | count = 0; |
7b2d81d4 | 1060 | |
2b144498 SD |
1061 | list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) { |
1062 | loff_t vaddr; | |
1063 | ||
1064 | list_del(&uprobe->pending_list); | |
1065 | if (!ret) { | |
1066 | vaddr = vma_address(vma, uprobe->offset); | |
682968e0 SD |
1067 | |
1068 | if (vaddr < vma->vm_start || vaddr >= vma->vm_end) { | |
1069 | put_uprobe(uprobe); | |
1070 | continue; | |
2b144498 | 1071 | } |
682968e0 SD |
1072 | |
1073 | ret = install_breakpoint(uprobe, vma->vm_mm, vma, vaddr); | |
1074 | ||
1075 | /* Ignore double add: */ | |
1076 | if (ret == -EEXIST) { | |
1077 | ret = 0; | |
1078 | ||
1079 | if (!is_swbp_at_addr(vma->vm_mm, vaddr)) | |
1080 | continue; | |
1081 | ||
1082 | /* | |
1083 | * Unable to insert a breakpoint, but | |
1084 | * breakpoint lies underneath. Increment the | |
1085 | * probe count. | |
1086 | */ | |
1087 | atomic_inc(&vma->vm_mm->uprobes_state.count); | |
1088 | } | |
1089 | ||
1090 | if (!ret) | |
1091 | count++; | |
2b144498 SD |
1092 | } |
1093 | put_uprobe(uprobe); | |
1094 | } | |
1095 | ||
1096 | mutex_unlock(uprobes_mmap_hash(inode)); | |
1097 | ||
682968e0 SD |
1098 | if (ret) |
1099 | atomic_sub(count, &vma->vm_mm->uprobes_state.count); | |
1100 | ||
2b144498 SD |
1101 | return ret; |
1102 | } | |
1103 | ||
682968e0 SD |
1104 | /* |
1105 | * Called in context of a munmap of a vma. | |
1106 | */ | |
cbc91f71 | 1107 | void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end) |
682968e0 SD |
1108 | { |
1109 | struct list_head tmp_list; | |
1110 | struct uprobe *uprobe, *u; | |
1111 | struct inode *inode; | |
1112 | ||
1113 | if (!atomic_read(&uprobe_events) || !valid_vma(vma, false)) | |
1114 | return; | |
1115 | ||
1116 | if (!atomic_read(&vma->vm_mm->uprobes_state.count)) | |
1117 | return; | |
1118 | ||
1119 | inode = vma->vm_file->f_mapping->host; | |
1120 | if (!inode) | |
1121 | return; | |
1122 | ||
1123 | INIT_LIST_HEAD(&tmp_list); | |
1124 | mutex_lock(uprobes_mmap_hash(inode)); | |
1125 | build_probe_list(inode, &tmp_list); | |
1126 | ||
1127 | list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) { | |
1128 | loff_t vaddr; | |
1129 | ||
1130 | list_del(&uprobe->pending_list); | |
1131 | vaddr = vma_address(vma, uprobe->offset); | |
1132 | ||
cbc91f71 | 1133 | if (vaddr >= start && vaddr < end) { |
682968e0 SD |
1134 | /* |
1135 | * An unregister could have removed the probe before | |
1136 | * unmap. So check before we decrement the count. | |
1137 | */ | |
1138 | if (is_swbp_at_addr(vma->vm_mm, vaddr) == 1) | |
1139 | atomic_dec(&vma->vm_mm->uprobes_state.count); | |
1140 | } | |
1141 | put_uprobe(uprobe); | |
1142 | } | |
1143 | mutex_unlock(uprobes_mmap_hash(inode)); | |
1144 | } | |
1145 | ||
d4b3b638 SD |
1146 | /* Slot allocation for XOL */ |
1147 | static int xol_add_vma(struct xol_area *area) | |
1148 | { | |
1149 | struct mm_struct *mm; | |
1150 | int ret; | |
1151 | ||
1152 | area->page = alloc_page(GFP_HIGHUSER); | |
1153 | if (!area->page) | |
1154 | return -ENOMEM; | |
1155 | ||
1156 | ret = -EALREADY; | |
1157 | mm = current->mm; | |
1158 | ||
1159 | down_write(&mm->mmap_sem); | |
1160 | if (mm->uprobes_state.xol_area) | |
1161 | goto fail; | |
1162 | ||
1163 | ret = -ENOMEM; | |
1164 | ||
1165 | /* Try to map as high as possible, this is only a hint. */ | |
1166 | area->vaddr = get_unmapped_area(NULL, TASK_SIZE - PAGE_SIZE, PAGE_SIZE, 0, 0); | |
1167 | if (area->vaddr & ~PAGE_MASK) { | |
1168 | ret = area->vaddr; | |
1169 | goto fail; | |
1170 | } | |
1171 | ||
1172 | ret = install_special_mapping(mm, area->vaddr, PAGE_SIZE, | |
1173 | VM_EXEC|VM_MAYEXEC|VM_DONTCOPY|VM_IO, &area->page); | |
1174 | if (ret) | |
1175 | goto fail; | |
1176 | ||
1177 | smp_wmb(); /* pairs with get_xol_area() */ | |
1178 | mm->uprobes_state.xol_area = area; | |
1179 | ret = 0; | |
1180 | ||
1181 | fail: | |
1182 | up_write(&mm->mmap_sem); | |
1183 | if (ret) | |
1184 | __free_page(area->page); | |
1185 | ||
1186 | return ret; | |
1187 | } | |
1188 | ||
1189 | static struct xol_area *get_xol_area(struct mm_struct *mm) | |
1190 | { | |
1191 | struct xol_area *area; | |
1192 | ||
1193 | area = mm->uprobes_state.xol_area; | |
1194 | smp_read_barrier_depends(); /* pairs with wmb in xol_add_vma() */ | |
1195 | ||
1196 | return area; | |
1197 | } | |
1198 | ||
1199 | /* | |
1200 | * xol_alloc_area - Allocate process's xol_area. | |
1201 | * This area will be used for storing instructions for execution out of | |
1202 | * line. | |
1203 | * | |
1204 | * Returns the allocated area or NULL. | |
1205 | */ | |
1206 | static struct xol_area *xol_alloc_area(void) | |
1207 | { | |
1208 | struct xol_area *area; | |
1209 | ||
1210 | area = kzalloc(sizeof(*area), GFP_KERNEL); | |
1211 | if (unlikely(!area)) | |
1212 | return NULL; | |
1213 | ||
1214 | area->bitmap = kzalloc(BITS_TO_LONGS(UINSNS_PER_PAGE) * sizeof(long), GFP_KERNEL); | |
1215 | ||
1216 | if (!area->bitmap) | |
1217 | goto fail; | |
1218 | ||
1219 | init_waitqueue_head(&area->wq); | |
1220 | if (!xol_add_vma(area)) | |
1221 | return area; | |
1222 | ||
1223 | fail: | |
1224 | kfree(area->bitmap); | |
1225 | kfree(area); | |
1226 | ||
1227 | return get_xol_area(current->mm); | |
1228 | } | |
1229 | ||
1230 | /* | |
1231 | * uprobe_clear_state - Free the area allocated for slots. | |
1232 | */ | |
1233 | void uprobe_clear_state(struct mm_struct *mm) | |
1234 | { | |
1235 | struct xol_area *area = mm->uprobes_state.xol_area; | |
1236 | ||
1237 | if (!area) | |
1238 | return; | |
1239 | ||
1240 | put_page(area->page); | |
1241 | kfree(area->bitmap); | |
1242 | kfree(area); | |
1243 | } | |
1244 | ||
1245 | /* | |
1246 | * uprobe_reset_state - Free the area allocated for slots. | |
1247 | */ | |
1248 | void uprobe_reset_state(struct mm_struct *mm) | |
1249 | { | |
1250 | mm->uprobes_state.xol_area = NULL; | |
682968e0 | 1251 | atomic_set(&mm->uprobes_state.count, 0); |
d4b3b638 SD |
1252 | } |
1253 | ||
1254 | /* | |
1255 | * - search for a free slot. | |
1256 | */ | |
1257 | static unsigned long xol_take_insn_slot(struct xol_area *area) | |
1258 | { | |
1259 | unsigned long slot_addr; | |
1260 | int slot_nr; | |
1261 | ||
1262 | do { | |
1263 | slot_nr = find_first_zero_bit(area->bitmap, UINSNS_PER_PAGE); | |
1264 | if (slot_nr < UINSNS_PER_PAGE) { | |
1265 | if (!test_and_set_bit(slot_nr, area->bitmap)) | |
1266 | break; | |
1267 | ||
1268 | slot_nr = UINSNS_PER_PAGE; | |
1269 | continue; | |
1270 | } | |
1271 | wait_event(area->wq, (atomic_read(&area->slot_count) < UINSNS_PER_PAGE)); | |
1272 | } while (slot_nr >= UINSNS_PER_PAGE); | |
1273 | ||
1274 | slot_addr = area->vaddr + (slot_nr * UPROBE_XOL_SLOT_BYTES); | |
1275 | atomic_inc(&area->slot_count); | |
1276 | ||
1277 | return slot_addr; | |
1278 | } | |
1279 | ||
1280 | /* | |
1281 | * xol_get_insn_slot - If was not allocated a slot, then | |
1282 | * allocate a slot. | |
1283 | * Returns the allocated slot address or 0. | |
1284 | */ | |
1285 | static unsigned long xol_get_insn_slot(struct uprobe *uprobe, unsigned long slot_addr) | |
1286 | { | |
1287 | struct xol_area *area; | |
1288 | unsigned long offset; | |
1289 | void *vaddr; | |
1290 | ||
1291 | area = get_xol_area(current->mm); | |
1292 | if (!area) { | |
1293 | area = xol_alloc_area(); | |
1294 | if (!area) | |
1295 | return 0; | |
1296 | } | |
1297 | current->utask->xol_vaddr = xol_take_insn_slot(area); | |
1298 | ||
1299 | /* | |
1300 | * Initialize the slot if xol_vaddr points to valid | |
1301 | * instruction slot. | |
1302 | */ | |
1303 | if (unlikely(!current->utask->xol_vaddr)) | |
1304 | return 0; | |
1305 | ||
1306 | current->utask->vaddr = slot_addr; | |
1307 | offset = current->utask->xol_vaddr & ~PAGE_MASK; | |
1308 | vaddr = kmap_atomic(area->page); | |
1309 | memcpy(vaddr + offset, uprobe->arch.insn, MAX_UINSN_BYTES); | |
1310 | kunmap_atomic(vaddr); | |
1311 | ||
1312 | return current->utask->xol_vaddr; | |
1313 | } | |
1314 | ||
1315 | /* | |
1316 | * xol_free_insn_slot - If slot was earlier allocated by | |
1317 | * @xol_get_insn_slot(), make the slot available for | |
1318 | * subsequent requests. | |
1319 | */ | |
1320 | static void xol_free_insn_slot(struct task_struct *tsk) | |
1321 | { | |
1322 | struct xol_area *area; | |
1323 | unsigned long vma_end; | |
1324 | unsigned long slot_addr; | |
1325 | ||
1326 | if (!tsk->mm || !tsk->mm->uprobes_state.xol_area || !tsk->utask) | |
1327 | return; | |
1328 | ||
1329 | slot_addr = tsk->utask->xol_vaddr; | |
1330 | ||
1331 | if (unlikely(!slot_addr || IS_ERR_VALUE(slot_addr))) | |
1332 | return; | |
1333 | ||
1334 | area = tsk->mm->uprobes_state.xol_area; | |
1335 | vma_end = area->vaddr + PAGE_SIZE; | |
1336 | if (area->vaddr <= slot_addr && slot_addr < vma_end) { | |
1337 | unsigned long offset; | |
1338 | int slot_nr; | |
1339 | ||
1340 | offset = slot_addr - area->vaddr; | |
1341 | slot_nr = offset / UPROBE_XOL_SLOT_BYTES; | |
1342 | if (slot_nr >= UINSNS_PER_PAGE) | |
1343 | return; | |
1344 | ||
1345 | clear_bit(slot_nr, area->bitmap); | |
1346 | atomic_dec(&area->slot_count); | |
1347 | if (waitqueue_active(&area->wq)) | |
1348 | wake_up(&area->wq); | |
1349 | ||
1350 | tsk->utask->xol_vaddr = 0; | |
1351 | } | |
1352 | } | |
1353 | ||
0326f5a9 SD |
1354 | /** |
1355 | * uprobe_get_swbp_addr - compute address of swbp given post-swbp regs | |
1356 | * @regs: Reflects the saved state of the task after it has hit a breakpoint | |
1357 | * instruction. | |
1358 | * Return the address of the breakpoint instruction. | |
1359 | */ | |
1360 | unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs) | |
1361 | { | |
1362 | return instruction_pointer(regs) - UPROBE_SWBP_INSN_SIZE; | |
1363 | } | |
1364 | ||
1365 | /* | |
1366 | * Called with no locks held. | |
1367 | * Called in context of a exiting or a exec-ing thread. | |
1368 | */ | |
1369 | void uprobe_free_utask(struct task_struct *t) | |
1370 | { | |
1371 | struct uprobe_task *utask = t->utask; | |
1372 | ||
0326f5a9 SD |
1373 | if (!utask) |
1374 | return; | |
1375 | ||
1376 | if (utask->active_uprobe) | |
1377 | put_uprobe(utask->active_uprobe); | |
1378 | ||
d4b3b638 | 1379 | xol_free_insn_slot(t); |
0326f5a9 SD |
1380 | kfree(utask); |
1381 | t->utask = NULL; | |
1382 | } | |
1383 | ||
1384 | /* | |
1385 | * Called in context of a new clone/fork from copy_process. | |
1386 | */ | |
1387 | void uprobe_copy_process(struct task_struct *t) | |
1388 | { | |
1389 | t->utask = NULL; | |
0326f5a9 SD |
1390 | } |
1391 | ||
1392 | /* | |
1393 | * Allocate a uprobe_task object for the task. | |
1394 | * Called when the thread hits a breakpoint for the first time. | |
1395 | * | |
1396 | * Returns: | |
1397 | * - pointer to new uprobe_task on success | |
1398 | * - NULL otherwise | |
1399 | */ | |
1400 | static struct uprobe_task *add_utask(void) | |
1401 | { | |
1402 | struct uprobe_task *utask; | |
1403 | ||
1404 | utask = kzalloc(sizeof *utask, GFP_KERNEL); | |
1405 | if (unlikely(!utask)) | |
1406 | return NULL; | |
1407 | ||
1408 | utask->active_uprobe = NULL; | |
1409 | current->utask = utask; | |
1410 | return utask; | |
1411 | } | |
1412 | ||
1413 | /* Prepare to single-step probed instruction out of line. */ | |
1414 | static int | |
1415 | pre_ssout(struct uprobe *uprobe, struct pt_regs *regs, unsigned long vaddr) | |
1416 | { | |
d4b3b638 SD |
1417 | if (xol_get_insn_slot(uprobe, vaddr) && !arch_uprobe_pre_xol(&uprobe->arch, regs)) |
1418 | return 0; | |
1419 | ||
0326f5a9 SD |
1420 | return -EFAULT; |
1421 | } | |
1422 | ||
1423 | /* | |
1424 | * If we are singlestepping, then ensure this thread is not connected to | |
1425 | * non-fatal signals until completion of singlestep. When xol insn itself | |
1426 | * triggers the signal, restart the original insn even if the task is | |
1427 | * already SIGKILL'ed (since coredump should report the correct ip). This | |
1428 | * is even more important if the task has a handler for SIGSEGV/etc, The | |
1429 | * _same_ instruction should be repeated again after return from the signal | |
1430 | * handler, and SSTEP can never finish in this case. | |
1431 | */ | |
1432 | bool uprobe_deny_signal(void) | |
1433 | { | |
1434 | struct task_struct *t = current; | |
1435 | struct uprobe_task *utask = t->utask; | |
1436 | ||
1437 | if (likely(!utask || !utask->active_uprobe)) | |
1438 | return false; | |
1439 | ||
1440 | WARN_ON_ONCE(utask->state != UTASK_SSTEP); | |
1441 | ||
1442 | if (signal_pending(t)) { | |
1443 | spin_lock_irq(&t->sighand->siglock); | |
1444 | clear_tsk_thread_flag(t, TIF_SIGPENDING); | |
1445 | spin_unlock_irq(&t->sighand->siglock); | |
1446 | ||
1447 | if (__fatal_signal_pending(t) || arch_uprobe_xol_was_trapped(t)) { | |
1448 | utask->state = UTASK_SSTEP_TRAPPED; | |
1449 | set_tsk_thread_flag(t, TIF_UPROBE); | |
1450 | set_tsk_thread_flag(t, TIF_NOTIFY_RESUME); | |
1451 | } | |
1452 | } | |
1453 | ||
1454 | return true; | |
1455 | } | |
1456 | ||
1457 | /* | |
1458 | * Avoid singlestepping the original instruction if the original instruction | |
1459 | * is a NOP or can be emulated. | |
1460 | */ | |
1461 | static bool can_skip_sstep(struct uprobe *uprobe, struct pt_regs *regs) | |
1462 | { | |
1463 | if (arch_uprobe_skip_sstep(&uprobe->arch, regs)) | |
1464 | return true; | |
1465 | ||
1466 | uprobe->flags &= ~UPROBE_SKIP_SSTEP; | |
1467 | return false; | |
1468 | } | |
1469 | ||
d790d346 | 1470 | static struct uprobe *find_active_uprobe(unsigned long bp_vaddr, int *is_swbp) |
0326f5a9 | 1471 | { |
3a9ea052 ON |
1472 | struct mm_struct *mm = current->mm; |
1473 | struct uprobe *uprobe = NULL; | |
0326f5a9 | 1474 | struct vm_area_struct *vma; |
0326f5a9 | 1475 | |
0326f5a9 SD |
1476 | down_read(&mm->mmap_sem); |
1477 | vma = find_vma(mm, bp_vaddr); | |
3a9ea052 ON |
1478 | if (vma && vma->vm_start <= bp_vaddr) { |
1479 | if (valid_vma(vma, false)) { | |
1480 | struct inode *inode; | |
1481 | loff_t offset; | |
0326f5a9 | 1482 | |
3a9ea052 ON |
1483 | inode = vma->vm_file->f_mapping->host; |
1484 | offset = bp_vaddr - vma->vm_start; | |
1485 | offset += (vma->vm_pgoff << PAGE_SHIFT); | |
1486 | uprobe = find_uprobe(inode, offset); | |
1487 | } | |
d790d346 ON |
1488 | |
1489 | if (!uprobe) | |
1490 | *is_swbp = is_swbp_at_addr(mm, bp_vaddr); | |
1491 | } else { | |
1492 | *is_swbp = -EFAULT; | |
0326f5a9 | 1493 | } |
0326f5a9 SD |
1494 | up_read(&mm->mmap_sem); |
1495 | ||
3a9ea052 ON |
1496 | return uprobe; |
1497 | } | |
1498 | ||
1499 | /* | |
1500 | * Run handler and ask thread to singlestep. | |
1501 | * Ensure all non-fatal signals cannot interrupt thread while it singlesteps. | |
1502 | */ | |
1503 | static void handle_swbp(struct pt_regs *regs) | |
1504 | { | |
1505 | struct uprobe_task *utask; | |
1506 | struct uprobe *uprobe; | |
1507 | unsigned long bp_vaddr; | |
56bb4cf6 | 1508 | int uninitialized_var(is_swbp); |
3a9ea052 ON |
1509 | |
1510 | bp_vaddr = uprobe_get_swbp_addr(regs); | |
d790d346 | 1511 | uprobe = find_active_uprobe(bp_vaddr, &is_swbp); |
3a9ea052 | 1512 | |
0326f5a9 | 1513 | if (!uprobe) { |
56bb4cf6 ON |
1514 | if (is_swbp > 0) { |
1515 | /* No matching uprobe; signal SIGTRAP. */ | |
1516 | send_sig(SIGTRAP, current, 0); | |
1517 | } else { | |
1518 | /* | |
1519 | * Either we raced with uprobe_unregister() or we can't | |
1520 | * access this memory. The latter is only possible if | |
1521 | * another thread plays with our ->mm. In both cases | |
1522 | * we can simply restart. If this vma was unmapped we | |
1523 | * can pretend this insn was not executed yet and get | |
1524 | * the (correct) SIGSEGV after restart. | |
1525 | */ | |
1526 | instruction_pointer_set(regs, bp_vaddr); | |
1527 | } | |
0326f5a9 SD |
1528 | return; |
1529 | } | |
1530 | ||
1531 | utask = current->utask; | |
1532 | if (!utask) { | |
1533 | utask = add_utask(); | |
1534 | /* Cannot allocate; re-execute the instruction. */ | |
1535 | if (!utask) | |
1536 | goto cleanup_ret; | |
1537 | } | |
1538 | utask->active_uprobe = uprobe; | |
1539 | handler_chain(uprobe, regs); | |
1540 | if (uprobe->flags & UPROBE_SKIP_SSTEP && can_skip_sstep(uprobe, regs)) | |
1541 | goto cleanup_ret; | |
1542 | ||
1543 | utask->state = UTASK_SSTEP; | |
1544 | if (!pre_ssout(uprobe, regs, bp_vaddr)) { | |
1545 | user_enable_single_step(current); | |
1546 | return; | |
1547 | } | |
1548 | ||
1549 | cleanup_ret: | |
1550 | if (utask) { | |
1551 | utask->active_uprobe = NULL; | |
1552 | utask->state = UTASK_RUNNING; | |
1553 | } | |
1554 | if (uprobe) { | |
1555 | if (!(uprobe->flags & UPROBE_SKIP_SSTEP)) | |
1556 | ||
1557 | /* | |
1558 | * cannot singlestep; cannot skip instruction; | |
1559 | * re-execute the instruction. | |
1560 | */ | |
1561 | instruction_pointer_set(regs, bp_vaddr); | |
1562 | ||
1563 | put_uprobe(uprobe); | |
1564 | } | |
1565 | } | |
1566 | ||
1567 | /* | |
1568 | * Perform required fix-ups and disable singlestep. | |
1569 | * Allow pending signals to take effect. | |
1570 | */ | |
1571 | static void handle_singlestep(struct uprobe_task *utask, struct pt_regs *regs) | |
1572 | { | |
1573 | struct uprobe *uprobe; | |
1574 | ||
1575 | uprobe = utask->active_uprobe; | |
1576 | if (utask->state == UTASK_SSTEP_ACK) | |
1577 | arch_uprobe_post_xol(&uprobe->arch, regs); | |
1578 | else if (utask->state == UTASK_SSTEP_TRAPPED) | |
1579 | arch_uprobe_abort_xol(&uprobe->arch, regs); | |
1580 | else | |
1581 | WARN_ON_ONCE(1); | |
1582 | ||
1583 | put_uprobe(uprobe); | |
1584 | utask->active_uprobe = NULL; | |
1585 | utask->state = UTASK_RUNNING; | |
1586 | user_disable_single_step(current); | |
d4b3b638 | 1587 | xol_free_insn_slot(current); |
0326f5a9 SD |
1588 | |
1589 | spin_lock_irq(¤t->sighand->siglock); | |
1590 | recalc_sigpending(); /* see uprobe_deny_signal() */ | |
1591 | spin_unlock_irq(¤t->sighand->siglock); | |
1592 | } | |
1593 | ||
1594 | /* | |
1595 | * On breakpoint hit, breakpoint notifier sets the TIF_UPROBE flag. (and on | |
1596 | * subsequent probe hits on the thread sets the state to UTASK_BP_HIT) and | |
1597 | * allows the thread to return from interrupt. | |
1598 | * | |
1599 | * On singlestep exception, singlestep notifier sets the TIF_UPROBE flag and | |
1600 | * also sets the state to UTASK_SSTEP_ACK and allows the thread to return from | |
1601 | * interrupt. | |
1602 | * | |
1603 | * While returning to userspace, thread notices the TIF_UPROBE flag and calls | |
1604 | * uprobe_notify_resume(). | |
1605 | */ | |
1606 | void uprobe_notify_resume(struct pt_regs *regs) | |
1607 | { | |
1608 | struct uprobe_task *utask; | |
1609 | ||
1610 | utask = current->utask; | |
1611 | if (!utask || utask->state == UTASK_BP_HIT) | |
1612 | handle_swbp(regs); | |
1613 | else | |
1614 | handle_singlestep(utask, regs); | |
1615 | } | |
1616 | ||
1617 | /* | |
1618 | * uprobe_pre_sstep_notifier gets called from interrupt context as part of | |
1619 | * notifier mechanism. Set TIF_UPROBE flag and indicate breakpoint hit. | |
1620 | */ | |
1621 | int uprobe_pre_sstep_notifier(struct pt_regs *regs) | |
1622 | { | |
1623 | struct uprobe_task *utask; | |
1624 | ||
682968e0 SD |
1625 | if (!current->mm || !atomic_read(¤t->mm->uprobes_state.count)) |
1626 | /* task is currently not uprobed */ | |
0326f5a9 SD |
1627 | return 0; |
1628 | ||
1629 | utask = current->utask; | |
1630 | if (utask) | |
1631 | utask->state = UTASK_BP_HIT; | |
1632 | ||
1633 | set_thread_flag(TIF_UPROBE); | |
0326f5a9 SD |
1634 | |
1635 | return 1; | |
1636 | } | |
1637 | ||
1638 | /* | |
1639 | * uprobe_post_sstep_notifier gets called in interrupt context as part of notifier | |
1640 | * mechanism. Set TIF_UPROBE flag and indicate completion of singlestep. | |
1641 | */ | |
1642 | int uprobe_post_sstep_notifier(struct pt_regs *regs) | |
1643 | { | |
1644 | struct uprobe_task *utask = current->utask; | |
1645 | ||
1646 | if (!current->mm || !utask || !utask->active_uprobe) | |
1647 | /* task is currently not uprobed */ | |
1648 | return 0; | |
1649 | ||
1650 | utask->state = UTASK_SSTEP_ACK; | |
1651 | set_thread_flag(TIF_UPROBE); | |
1652 | return 1; | |
1653 | } | |
1654 | ||
1655 | static struct notifier_block uprobe_exception_nb = { | |
1656 | .notifier_call = arch_uprobe_exception_notify, | |
1657 | .priority = INT_MAX-1, /* notified after kprobes, kgdb */ | |
1658 | }; | |
1659 | ||
2b144498 SD |
1660 | static int __init init_uprobes(void) |
1661 | { | |
1662 | int i; | |
1663 | ||
1664 | for (i = 0; i < UPROBES_HASH_SZ; i++) { | |
1665 | mutex_init(&uprobes_mutex[i]); | |
1666 | mutex_init(&uprobes_mmap_mutex[i]); | |
1667 | } | |
0326f5a9 SD |
1668 | |
1669 | return register_die_notifier(&uprobe_exception_nb); | |
2b144498 | 1670 | } |
0326f5a9 | 1671 | module_init(init_uprobes); |
2b144498 SD |
1672 | |
1673 | static void __exit exit_uprobes(void) | |
1674 | { | |
1675 | } | |
2b144498 | 1676 | module_exit(exit_uprobes); |