Commit | Line | Data |
---|---|---|
2b144498 | 1 | /* |
7b2d81d4 | 2 | * User-space Probes (UProbes) |
2b144498 SD |
3 | * |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License as published by | |
6 | * the Free Software Foundation; either version 2 of the License, or | |
7 | * (at your option) any later version. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program; if not, write to the Free Software | |
16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
17 | * | |
35aa621b | 18 | * Copyright (C) IBM Corporation, 2008-2012 |
2b144498 SD |
19 | * Authors: |
20 | * Srikar Dronamraju | |
21 | * Jim Keniston | |
35aa621b | 22 | * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> |
2b144498 SD |
23 | */ |
24 | ||
25 | #include <linux/kernel.h> | |
26 | #include <linux/highmem.h> | |
27 | #include <linux/pagemap.h> /* read_mapping_page */ | |
28 | #include <linux/slab.h> | |
29 | #include <linux/sched.h> | |
e8440c14 | 30 | #include <linux/export.h> |
2b144498 SD |
31 | #include <linux/rmap.h> /* anon_vma_prepare */ |
32 | #include <linux/mmu_notifier.h> /* set_pte_at_notify */ | |
33 | #include <linux/swap.h> /* try_to_free_swap */ | |
0326f5a9 SD |
34 | #include <linux/ptrace.h> /* user_enable_single_step */ |
35 | #include <linux/kdebug.h> /* notifier mechanism */ | |
194f8dcb | 36 | #include "../../mm/internal.h" /* munlock_vma_page */ |
32cdba1e | 37 | #include <linux/percpu-rwsem.h> |
aa59c53f | 38 | #include <linux/task_work.h> |
7b2d81d4 | 39 | |
2b144498 SD |
40 | #include <linux/uprobes.h> |
41 | ||
d4b3b638 SD |
42 | #define UINSNS_PER_PAGE (PAGE_SIZE/UPROBE_XOL_SLOT_BYTES) |
43 | #define MAX_UPROBE_XOL_SLOTS UINSNS_PER_PAGE | |
44 | ||
2b144498 | 45 | static struct rb_root uprobes_tree = RB_ROOT; |
441f1eb7 ON |
46 | /* |
47 | * allows us to skip the uprobe_mmap if there are no uprobe events active | |
48 | * at this time. Probably a fine grained per inode count is better? | |
49 | */ | |
50 | #define no_uprobe_events() RB_EMPTY_ROOT(&uprobes_tree) | |
7b2d81d4 | 51 | |
2b144498 SD |
52 | static DEFINE_SPINLOCK(uprobes_treelock); /* serialize rbtree access */ |
53 | ||
54 | #define UPROBES_HASH_SZ 13 | |
2b144498 SD |
55 | /* serialize uprobe->pending_list */ |
56 | static struct mutex uprobes_mmap_mutex[UPROBES_HASH_SZ]; | |
7b2d81d4 | 57 | #define uprobes_mmap_hash(v) (&uprobes_mmap_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ]) |
2b144498 | 58 | |
32cdba1e ON |
59 | static struct percpu_rw_semaphore dup_mmap_sem; |
60 | ||
cb9a19fe | 61 | /* Have a copy of original instruction */ |
71434f2f | 62 | #define UPROBE_COPY_INSN 0 |
cb9a19fe | 63 | /* Can skip singlestep */ |
bb929284 | 64 | #define UPROBE_SKIP_SSTEP 1 |
cb9a19fe | 65 | |
3ff54efd SD |
66 | struct uprobe { |
67 | struct rb_node rb_node; /* node in the rb tree */ | |
68 | atomic_t ref; | |
e591c8d7 | 69 | struct rw_semaphore register_rwsem; |
3ff54efd SD |
70 | struct rw_semaphore consumer_rwsem; |
71 | struct list_head pending_list; | |
72 | struct uprobe_consumer *consumers; | |
73 | struct inode *inode; /* Also hold a ref to inode */ | |
74 | loff_t offset; | |
71434f2f | 75 | unsigned long flags; |
3ff54efd SD |
76 | struct arch_uprobe arch; |
77 | }; | |
78 | ||
0dfd0eb8 AA |
79 | struct return_instance { |
80 | struct uprobe *uprobe; | |
81 | unsigned long func; | |
82 | unsigned long orig_ret_vaddr; /* original return address */ | |
83 | bool chained; /* true, if instance is nested */ | |
84 | ||
85 | struct return_instance *next; /* keep as stack */ | |
86 | }; | |
87 | ||
2b144498 SD |
88 | /* |
89 | * valid_vma: Verify if the specified vma is an executable vma | |
90 | * Relax restrictions while unregistering: vm_flags might have | |
91 | * changed after breakpoint was inserted. | |
92 | * - is_register: indicates if we are in register context. | |
93 | * - Return 1 if the specified virtual address is in an | |
94 | * executable vma. | |
95 | */ | |
96 | static bool valid_vma(struct vm_area_struct *vma, bool is_register) | |
97 | { | |
e40cfce6 | 98 | vm_flags_t flags = VM_HUGETLB | VM_MAYEXEC | VM_SHARED; |
2b144498 | 99 | |
e40cfce6 ON |
100 | if (is_register) |
101 | flags |= VM_WRITE; | |
2b144498 | 102 | |
e40cfce6 | 103 | return vma->vm_file && (vma->vm_flags & flags) == VM_MAYEXEC; |
2b144498 SD |
104 | } |
105 | ||
57683f72 | 106 | static unsigned long offset_to_vaddr(struct vm_area_struct *vma, loff_t offset) |
2b144498 | 107 | { |
57683f72 | 108 | return vma->vm_start + offset - ((loff_t)vma->vm_pgoff << PAGE_SHIFT); |
2b144498 SD |
109 | } |
110 | ||
cb113b47 ON |
111 | static loff_t vaddr_to_offset(struct vm_area_struct *vma, unsigned long vaddr) |
112 | { | |
113 | return ((loff_t)vma->vm_pgoff << PAGE_SHIFT) + (vaddr - vma->vm_start); | |
114 | } | |
115 | ||
2b144498 SD |
116 | /** |
117 | * __replace_page - replace page in vma by new page. | |
118 | * based on replace_page in mm/ksm.c | |
119 | * | |
120 | * @vma: vma that holds the pte pointing to page | |
c517ee74 | 121 | * @addr: address the old @page is mapped at |
2b144498 SD |
122 | * @page: the cowed page we are replacing by kpage |
123 | * @kpage: the modified page we replace page by | |
124 | * | |
125 | * Returns 0 on success, -EFAULT on failure. | |
126 | */ | |
c517ee74 ON |
127 | static int __replace_page(struct vm_area_struct *vma, unsigned long addr, |
128 | struct page *page, struct page *kpage) | |
2b144498 SD |
129 | { |
130 | struct mm_struct *mm = vma->vm_mm; | |
5323ce71 ON |
131 | spinlock_t *ptl; |
132 | pte_t *ptep; | |
9f92448c | 133 | int err; |
6bdb913f HE |
134 | /* For mmu_notifiers */ |
135 | const unsigned long mmun_start = addr; | |
136 | const unsigned long mmun_end = addr + PAGE_SIZE; | |
2b144498 | 137 | |
194f8dcb | 138 | /* For try_to_free_swap() and munlock_vma_page() below */ |
9f92448c ON |
139 | lock_page(page); |
140 | ||
6bdb913f | 141 | mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); |
9f92448c | 142 | err = -EAGAIN; |
5323ce71 | 143 | ptep = page_check_address(page, mm, addr, &ptl, 0); |
2b144498 | 144 | if (!ptep) |
9f92448c | 145 | goto unlock; |
2b144498 SD |
146 | |
147 | get_page(kpage); | |
148 | page_add_new_anon_rmap(kpage, vma, addr); | |
149 | ||
7396fa81 SD |
150 | if (!PageAnon(page)) { |
151 | dec_mm_counter(mm, MM_FILEPAGES); | |
152 | inc_mm_counter(mm, MM_ANONPAGES); | |
153 | } | |
154 | ||
2b144498 SD |
155 | flush_cache_page(vma, addr, pte_pfn(*ptep)); |
156 | ptep_clear_flush(vma, addr, ptep); | |
157 | set_pte_at_notify(mm, addr, ptep, mk_pte(kpage, vma->vm_page_prot)); | |
158 | ||
159 | page_remove_rmap(page); | |
160 | if (!page_mapped(page)) | |
161 | try_to_free_swap(page); | |
2b144498 | 162 | pte_unmap_unlock(ptep, ptl); |
2b144498 | 163 | |
194f8dcb ON |
164 | if (vma->vm_flags & VM_LOCKED) |
165 | munlock_vma_page(page); | |
166 | put_page(page); | |
167 | ||
9f92448c ON |
168 | err = 0; |
169 | unlock: | |
6bdb913f | 170 | mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); |
9f92448c ON |
171 | unlock_page(page); |
172 | return err; | |
2b144498 SD |
173 | } |
174 | ||
175 | /** | |
5cb4ac3a | 176 | * is_swbp_insn - check if instruction is breakpoint instruction. |
2b144498 | 177 | * @insn: instruction to be checked. |
5cb4ac3a | 178 | * Default implementation of is_swbp_insn |
2b144498 SD |
179 | * Returns true if @insn is a breakpoint instruction. |
180 | */ | |
5cb4ac3a | 181 | bool __weak is_swbp_insn(uprobe_opcode_t *insn) |
2b144498 | 182 | { |
5cb4ac3a | 183 | return *insn == UPROBE_SWBP_INSN; |
2b144498 SD |
184 | } |
185 | ||
0908ad6e AM |
186 | /** |
187 | * is_trap_insn - check if instruction is breakpoint instruction. | |
188 | * @insn: instruction to be checked. | |
189 | * Default implementation of is_trap_insn | |
190 | * Returns true if @insn is a breakpoint instruction. | |
191 | * | |
192 | * This function is needed for the case where an architecture has multiple | |
193 | * trap instructions (like powerpc). | |
194 | */ | |
195 | bool __weak is_trap_insn(uprobe_opcode_t *insn) | |
196 | { | |
197 | return is_swbp_insn(insn); | |
198 | } | |
199 | ||
ab0d805c | 200 | static void copy_from_page(struct page *page, unsigned long vaddr, void *dst, int len) |
cceb55aa ON |
201 | { |
202 | void *kaddr = kmap_atomic(page); | |
ab0d805c | 203 | memcpy(dst, kaddr + (vaddr & ~PAGE_MASK), len); |
cceb55aa ON |
204 | kunmap_atomic(kaddr); |
205 | } | |
206 | ||
5669ccee ON |
207 | static void copy_to_page(struct page *page, unsigned long vaddr, const void *src, int len) |
208 | { | |
209 | void *kaddr = kmap_atomic(page); | |
210 | memcpy(kaddr + (vaddr & ~PAGE_MASK), src, len); | |
211 | kunmap_atomic(kaddr); | |
212 | } | |
213 | ||
ed6f6a50 ON |
214 | static int verify_opcode(struct page *page, unsigned long vaddr, uprobe_opcode_t *new_opcode) |
215 | { | |
216 | uprobe_opcode_t old_opcode; | |
217 | bool is_swbp; | |
218 | ||
0908ad6e AM |
219 | /* |
220 | * Note: We only check if the old_opcode is UPROBE_SWBP_INSN here. | |
221 | * We do not check if it is any other 'trap variant' which could | |
222 | * be conditional trap instruction such as the one powerpc supports. | |
223 | * | |
224 | * The logic is that we do not care if the underlying instruction | |
225 | * is a trap variant; uprobes always wins over any other (gdb) | |
226 | * breakpoint. | |
227 | */ | |
ab0d805c | 228 | copy_from_page(page, vaddr, &old_opcode, UPROBE_SWBP_INSN_SIZE); |
ed6f6a50 ON |
229 | is_swbp = is_swbp_insn(&old_opcode); |
230 | ||
231 | if (is_swbp_insn(new_opcode)) { | |
232 | if (is_swbp) /* register: already installed? */ | |
233 | return 0; | |
234 | } else { | |
235 | if (!is_swbp) /* unregister: was it changed by us? */ | |
076a365b | 236 | return 0; |
ed6f6a50 ON |
237 | } |
238 | ||
239 | return 1; | |
240 | } | |
241 | ||
2b144498 SD |
242 | /* |
243 | * NOTE: | |
244 | * Expect the breakpoint instruction to be the smallest size instruction for | |
245 | * the architecture. If an arch has variable length instruction and the | |
246 | * breakpoint instruction is not of the smallest length instruction | |
0908ad6e | 247 | * supported by that architecture then we need to modify is_trap_at_addr and |
f72d41fa ON |
248 | * uprobe_write_opcode accordingly. This would never be a problem for archs |
249 | * that have fixed length instructions. | |
2b144498 SD |
250 | */ |
251 | ||
252 | /* | |
f72d41fa | 253 | * uprobe_write_opcode - write the opcode at a given virtual address. |
2b144498 | 254 | * @mm: the probed process address space. |
2b144498 SD |
255 | * @vaddr: the virtual address to store the opcode. |
256 | * @opcode: opcode to be written at @vaddr. | |
257 | * | |
258 | * Called with mm->mmap_sem held (for read and with a reference to | |
259 | * mm). | |
260 | * | |
261 | * For mm @mm, write the opcode at @vaddr. | |
262 | * Return 0 (success) or a negative errno. | |
263 | */ | |
f72d41fa | 264 | int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr, |
cceb55aa | 265 | uprobe_opcode_t opcode) |
2b144498 SD |
266 | { |
267 | struct page *old_page, *new_page; | |
2b144498 | 268 | struct vm_area_struct *vma; |
2b144498 | 269 | int ret; |
f403072c | 270 | |
5323ce71 | 271 | retry: |
2b144498 | 272 | /* Read the page with vaddr into memory */ |
75ed82ea | 273 | ret = get_user_pages(NULL, mm, vaddr, 1, 0, 1, &old_page, &vma); |
2b144498 SD |
274 | if (ret <= 0) |
275 | return ret; | |
7b2d81d4 | 276 | |
ed6f6a50 ON |
277 | ret = verify_opcode(old_page, vaddr, &opcode); |
278 | if (ret <= 0) | |
279 | goto put_old; | |
280 | ||
2b144498 SD |
281 | ret = -ENOMEM; |
282 | new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr); | |
283 | if (!new_page) | |
9f92448c | 284 | goto put_old; |
2b144498 SD |
285 | |
286 | __SetPageUptodate(new_page); | |
287 | ||
3f47107c ON |
288 | copy_highpage(new_page, old_page); |
289 | copy_to_page(new_page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE); | |
2b144498 SD |
290 | |
291 | ret = anon_vma_prepare(vma); | |
292 | if (ret) | |
9f92448c | 293 | goto put_new; |
2b144498 | 294 | |
c517ee74 | 295 | ret = __replace_page(vma, vaddr, old_page, new_page); |
2b144498 | 296 | |
9f92448c | 297 | put_new: |
2b144498 | 298 | page_cache_release(new_page); |
9f92448c | 299 | put_old: |
7b2d81d4 IM |
300 | put_page(old_page); |
301 | ||
5323ce71 ON |
302 | if (unlikely(ret == -EAGAIN)) |
303 | goto retry; | |
2b144498 SD |
304 | return ret; |
305 | } | |
306 | ||
2b144498 | 307 | /** |
5cb4ac3a | 308 | * set_swbp - store breakpoint at a given address. |
e3343e6a | 309 | * @auprobe: arch specific probepoint information. |
2b144498 | 310 | * @mm: the probed process address space. |
2b144498 SD |
311 | * @vaddr: the virtual address to insert the opcode. |
312 | * | |
313 | * For mm @mm, store the breakpoint instruction at @vaddr. | |
314 | * Return 0 (success) or a negative errno. | |
315 | */ | |
5cb4ac3a | 316 | int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr) |
2b144498 | 317 | { |
f72d41fa | 318 | return uprobe_write_opcode(mm, vaddr, UPROBE_SWBP_INSN); |
2b144498 SD |
319 | } |
320 | ||
321 | /** | |
322 | * set_orig_insn - Restore the original instruction. | |
323 | * @mm: the probed process address space. | |
e3343e6a | 324 | * @auprobe: arch specific probepoint information. |
2b144498 | 325 | * @vaddr: the virtual address to insert the opcode. |
2b144498 SD |
326 | * |
327 | * For mm @mm, restore the original opcode (opcode) at @vaddr. | |
328 | * Return 0 (success) or a negative errno. | |
329 | */ | |
7b2d81d4 | 330 | int __weak |
ded86e7c | 331 | set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr) |
2b144498 | 332 | { |
f72d41fa | 333 | return uprobe_write_opcode(mm, vaddr, *(uprobe_opcode_t *)auprobe->insn); |
2b144498 SD |
334 | } |
335 | ||
336 | static int match_uprobe(struct uprobe *l, struct uprobe *r) | |
337 | { | |
338 | if (l->inode < r->inode) | |
339 | return -1; | |
7b2d81d4 | 340 | |
2b144498 SD |
341 | if (l->inode > r->inode) |
342 | return 1; | |
2b144498 | 343 | |
7b2d81d4 IM |
344 | if (l->offset < r->offset) |
345 | return -1; | |
346 | ||
347 | if (l->offset > r->offset) | |
348 | return 1; | |
2b144498 SD |
349 | |
350 | return 0; | |
351 | } | |
352 | ||
353 | static struct uprobe *__find_uprobe(struct inode *inode, loff_t offset) | |
354 | { | |
355 | struct uprobe u = { .inode = inode, .offset = offset }; | |
356 | struct rb_node *n = uprobes_tree.rb_node; | |
357 | struct uprobe *uprobe; | |
358 | int match; | |
359 | ||
360 | while (n) { | |
361 | uprobe = rb_entry(n, struct uprobe, rb_node); | |
362 | match = match_uprobe(&u, uprobe); | |
363 | if (!match) { | |
364 | atomic_inc(&uprobe->ref); | |
365 | return uprobe; | |
366 | } | |
7b2d81d4 | 367 | |
2b144498 SD |
368 | if (match < 0) |
369 | n = n->rb_left; | |
370 | else | |
371 | n = n->rb_right; | |
372 | } | |
373 | return NULL; | |
374 | } | |
375 | ||
376 | /* | |
377 | * Find a uprobe corresponding to a given inode:offset | |
378 | * Acquires uprobes_treelock | |
379 | */ | |
380 | static struct uprobe *find_uprobe(struct inode *inode, loff_t offset) | |
381 | { | |
382 | struct uprobe *uprobe; | |
2b144498 | 383 | |
6f47caa0 | 384 | spin_lock(&uprobes_treelock); |
2b144498 | 385 | uprobe = __find_uprobe(inode, offset); |
6f47caa0 | 386 | spin_unlock(&uprobes_treelock); |
7b2d81d4 | 387 | |
2b144498 SD |
388 | return uprobe; |
389 | } | |
390 | ||
391 | static struct uprobe *__insert_uprobe(struct uprobe *uprobe) | |
392 | { | |
393 | struct rb_node **p = &uprobes_tree.rb_node; | |
394 | struct rb_node *parent = NULL; | |
395 | struct uprobe *u; | |
396 | int match; | |
397 | ||
398 | while (*p) { | |
399 | parent = *p; | |
400 | u = rb_entry(parent, struct uprobe, rb_node); | |
401 | match = match_uprobe(uprobe, u); | |
402 | if (!match) { | |
403 | atomic_inc(&u->ref); | |
404 | return u; | |
405 | } | |
406 | ||
407 | if (match < 0) | |
408 | p = &parent->rb_left; | |
409 | else | |
410 | p = &parent->rb_right; | |
411 | ||
412 | } | |
7b2d81d4 | 413 | |
2b144498 SD |
414 | u = NULL; |
415 | rb_link_node(&uprobe->rb_node, parent, p); | |
416 | rb_insert_color(&uprobe->rb_node, &uprobes_tree); | |
417 | /* get access + creation ref */ | |
418 | atomic_set(&uprobe->ref, 2); | |
7b2d81d4 | 419 | |
2b144498 SD |
420 | return u; |
421 | } | |
422 | ||
423 | /* | |
7b2d81d4 | 424 | * Acquire uprobes_treelock. |
2b144498 SD |
425 | * Matching uprobe already exists in rbtree; |
426 | * increment (access refcount) and return the matching uprobe. | |
427 | * | |
428 | * No matching uprobe; insert the uprobe in rb_tree; | |
429 | * get a double refcount (access + creation) and return NULL. | |
430 | */ | |
431 | static struct uprobe *insert_uprobe(struct uprobe *uprobe) | |
432 | { | |
2b144498 SD |
433 | struct uprobe *u; |
434 | ||
6f47caa0 | 435 | spin_lock(&uprobes_treelock); |
2b144498 | 436 | u = __insert_uprobe(uprobe); |
6f47caa0 | 437 | spin_unlock(&uprobes_treelock); |
7b2d81d4 | 438 | |
2b144498 SD |
439 | return u; |
440 | } | |
441 | ||
442 | static void put_uprobe(struct uprobe *uprobe) | |
443 | { | |
444 | if (atomic_dec_and_test(&uprobe->ref)) | |
445 | kfree(uprobe); | |
446 | } | |
447 | ||
448 | static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset) | |
449 | { | |
450 | struct uprobe *uprobe, *cur_uprobe; | |
451 | ||
452 | uprobe = kzalloc(sizeof(struct uprobe), GFP_KERNEL); | |
453 | if (!uprobe) | |
454 | return NULL; | |
455 | ||
456 | uprobe->inode = igrab(inode); | |
457 | uprobe->offset = offset; | |
e591c8d7 | 458 | init_rwsem(&uprobe->register_rwsem); |
2b144498 | 459 | init_rwsem(&uprobe->consumer_rwsem); |
bbc33d05 ON |
460 | /* For now assume that the instruction need not be single-stepped */ |
461 | __set_bit(UPROBE_SKIP_SSTEP, &uprobe->flags); | |
2b144498 SD |
462 | |
463 | /* add to uprobes_tree, sorted on inode:offset */ | |
464 | cur_uprobe = insert_uprobe(uprobe); | |
465 | ||
466 | /* a uprobe exists for this inode:offset combination */ | |
467 | if (cur_uprobe) { | |
468 | kfree(uprobe); | |
469 | uprobe = cur_uprobe; | |
470 | iput(inode); | |
7b2d81d4 IM |
471 | } |
472 | ||
2b144498 SD |
473 | return uprobe; |
474 | } | |
475 | ||
9a98e03c | 476 | static void consumer_add(struct uprobe *uprobe, struct uprobe_consumer *uc) |
2b144498 SD |
477 | { |
478 | down_write(&uprobe->consumer_rwsem); | |
e3343e6a SD |
479 | uc->next = uprobe->consumers; |
480 | uprobe->consumers = uc; | |
2b144498 | 481 | up_write(&uprobe->consumer_rwsem); |
2b144498 SD |
482 | } |
483 | ||
484 | /* | |
e3343e6a SD |
485 | * For uprobe @uprobe, delete the consumer @uc. |
486 | * Return true if the @uc is deleted successfully | |
2b144498 SD |
487 | * or return false. |
488 | */ | |
e3343e6a | 489 | static bool consumer_del(struct uprobe *uprobe, struct uprobe_consumer *uc) |
2b144498 SD |
490 | { |
491 | struct uprobe_consumer **con; | |
492 | bool ret = false; | |
493 | ||
494 | down_write(&uprobe->consumer_rwsem); | |
495 | for (con = &uprobe->consumers; *con; con = &(*con)->next) { | |
e3343e6a SD |
496 | if (*con == uc) { |
497 | *con = uc->next; | |
2b144498 SD |
498 | ret = true; |
499 | break; | |
500 | } | |
501 | } | |
502 | up_write(&uprobe->consumer_rwsem); | |
7b2d81d4 | 503 | |
2b144498 SD |
504 | return ret; |
505 | } | |
506 | ||
e3343e6a | 507 | static int |
d436615e | 508 | __copy_insn(struct address_space *mapping, struct file *filp, char *insn, |
593609a5 | 509 | unsigned long nbytes, loff_t offset) |
2b144498 | 510 | { |
2b144498 | 511 | struct page *page; |
2b144498 | 512 | |
cc359d18 ON |
513 | if (!mapping->a_ops->readpage) |
514 | return -EIO; | |
2b144498 SD |
515 | /* |
516 | * Ensure that the page that has the original instruction is | |
517 | * populated and in page-cache. | |
518 | */ | |
2edb7b55 | 519 | page = read_mapping_page(mapping, offset >> PAGE_CACHE_SHIFT, filp); |
2b144498 SD |
520 | if (IS_ERR(page)) |
521 | return PTR_ERR(page); | |
522 | ||
2edb7b55 | 523 | copy_from_page(page, offset, insn, nbytes); |
2b144498 | 524 | page_cache_release(page); |
7b2d81d4 | 525 | |
2b144498 SD |
526 | return 0; |
527 | } | |
528 | ||
d436615e | 529 | static int copy_insn(struct uprobe *uprobe, struct file *filp) |
2b144498 SD |
530 | { |
531 | struct address_space *mapping; | |
2b144498 | 532 | unsigned long nbytes; |
7b2d81d4 | 533 | int bytes; |
2b144498 | 534 | |
d436615e | 535 | nbytes = PAGE_SIZE - (uprobe->offset & ~PAGE_MASK); |
2b144498 SD |
536 | mapping = uprobe->inode->i_mapping; |
537 | ||
538 | /* Instruction at end of binary; copy only available bytes */ | |
539 | if (uprobe->offset + MAX_UINSN_BYTES > uprobe->inode->i_size) | |
540 | bytes = uprobe->inode->i_size - uprobe->offset; | |
541 | else | |
542 | bytes = MAX_UINSN_BYTES; | |
543 | ||
544 | /* Instruction at the page-boundary; copy bytes in second page */ | |
545 | if (nbytes < bytes) { | |
fc36f595 ON |
546 | int err = __copy_insn(mapping, filp, uprobe->arch.insn + nbytes, |
547 | bytes - nbytes, uprobe->offset + nbytes); | |
548 | if (err) | |
549 | return err; | |
2b144498 SD |
550 | bytes = nbytes; |
551 | } | |
d436615e | 552 | return __copy_insn(mapping, filp, uprobe->arch.insn, bytes, uprobe->offset); |
2b144498 SD |
553 | } |
554 | ||
cb9a19fe ON |
555 | static int prepare_uprobe(struct uprobe *uprobe, struct file *file, |
556 | struct mm_struct *mm, unsigned long vaddr) | |
557 | { | |
558 | int ret = 0; | |
559 | ||
71434f2f | 560 | if (test_bit(UPROBE_COPY_INSN, &uprobe->flags)) |
cb9a19fe ON |
561 | return ret; |
562 | ||
d4d3ccc6 ON |
563 | /* TODO: move this into _register, until then we abuse this sem. */ |
564 | down_write(&uprobe->consumer_rwsem); | |
71434f2f | 565 | if (test_bit(UPROBE_COPY_INSN, &uprobe->flags)) |
4710f05f ON |
566 | goto out; |
567 | ||
cb9a19fe ON |
568 | ret = copy_insn(uprobe, file); |
569 | if (ret) | |
570 | goto out; | |
571 | ||
572 | ret = -ENOTSUPP; | |
0908ad6e | 573 | if (is_trap_insn((uprobe_opcode_t *)uprobe->arch.insn)) |
cb9a19fe ON |
574 | goto out; |
575 | ||
576 | ret = arch_uprobe_analyze_insn(&uprobe->arch, mm, vaddr); | |
577 | if (ret) | |
578 | goto out; | |
579 | ||
f72d41fa | 580 | /* uprobe_write_opcode() assumes we don't cross page boundary */ |
cb9a19fe ON |
581 | BUG_ON((uprobe->offset & ~PAGE_MASK) + |
582 | UPROBE_SWBP_INSN_SIZE > PAGE_SIZE); | |
583 | ||
584 | smp_wmb(); /* pairs with rmb() in find_active_uprobe() */ | |
71434f2f | 585 | set_bit(UPROBE_COPY_INSN, &uprobe->flags); |
cb9a19fe ON |
586 | |
587 | out: | |
d4d3ccc6 | 588 | up_write(&uprobe->consumer_rwsem); |
4710f05f | 589 | |
cb9a19fe ON |
590 | return ret; |
591 | } | |
592 | ||
8a7f2fa0 ON |
593 | static inline bool consumer_filter(struct uprobe_consumer *uc, |
594 | enum uprobe_filter_ctx ctx, struct mm_struct *mm) | |
806a98bd | 595 | { |
8a7f2fa0 | 596 | return !uc->filter || uc->filter(uc, ctx, mm); |
806a98bd ON |
597 | } |
598 | ||
8a7f2fa0 ON |
599 | static bool filter_chain(struct uprobe *uprobe, |
600 | enum uprobe_filter_ctx ctx, struct mm_struct *mm) | |
63633cbf | 601 | { |
1ff6fee5 ON |
602 | struct uprobe_consumer *uc; |
603 | bool ret = false; | |
604 | ||
605 | down_read(&uprobe->consumer_rwsem); | |
606 | for (uc = uprobe->consumers; uc; uc = uc->next) { | |
8a7f2fa0 | 607 | ret = consumer_filter(uc, ctx, mm); |
1ff6fee5 ON |
608 | if (ret) |
609 | break; | |
610 | } | |
611 | up_read(&uprobe->consumer_rwsem); | |
612 | ||
613 | return ret; | |
63633cbf ON |
614 | } |
615 | ||
e3343e6a SD |
616 | static int |
617 | install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, | |
816c03fb | 618 | struct vm_area_struct *vma, unsigned long vaddr) |
2b144498 | 619 | { |
f8ac4ec9 | 620 | bool first_uprobe; |
2b144498 SD |
621 | int ret; |
622 | ||
cb9a19fe ON |
623 | ret = prepare_uprobe(uprobe, vma->vm_file, mm, vaddr); |
624 | if (ret) | |
625 | return ret; | |
682968e0 | 626 | |
f8ac4ec9 ON |
627 | /* |
628 | * set MMF_HAS_UPROBES in advance for uprobe_pre_sstep_notifier(), | |
629 | * the task can hit this breakpoint right after __replace_page(). | |
630 | */ | |
631 | first_uprobe = !test_bit(MMF_HAS_UPROBES, &mm->flags); | |
632 | if (first_uprobe) | |
633 | set_bit(MMF_HAS_UPROBES, &mm->flags); | |
634 | ||
816c03fb | 635 | ret = set_swbp(&uprobe->arch, mm, vaddr); |
9f68f672 ON |
636 | if (!ret) |
637 | clear_bit(MMF_RECALC_UPROBES, &mm->flags); | |
638 | else if (first_uprobe) | |
f8ac4ec9 | 639 | clear_bit(MMF_HAS_UPROBES, &mm->flags); |
2b144498 SD |
640 | |
641 | return ret; | |
642 | } | |
643 | ||
076a365b | 644 | static int |
816c03fb | 645 | remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vaddr) |
2b144498 | 646 | { |
9f68f672 | 647 | set_bit(MMF_RECALC_UPROBES, &mm->flags); |
076a365b | 648 | return set_orig_insn(&uprobe->arch, mm, vaddr); |
2b144498 SD |
649 | } |
650 | ||
06b7bcd8 ON |
651 | static inline bool uprobe_is_active(struct uprobe *uprobe) |
652 | { | |
653 | return !RB_EMPTY_NODE(&uprobe->rb_node); | |
654 | } | |
0326f5a9 | 655 | /* |
778b032d ON |
656 | * There could be threads that have already hit the breakpoint. They |
657 | * will recheck the current insn and restart if find_uprobe() fails. | |
658 | * See find_active_uprobe(). | |
0326f5a9 | 659 | */ |
2b144498 SD |
660 | static void delete_uprobe(struct uprobe *uprobe) |
661 | { | |
06b7bcd8 ON |
662 | if (WARN_ON(!uprobe_is_active(uprobe))) |
663 | return; | |
664 | ||
6f47caa0 | 665 | spin_lock(&uprobes_treelock); |
2b144498 | 666 | rb_erase(&uprobe->rb_node, &uprobes_tree); |
6f47caa0 | 667 | spin_unlock(&uprobes_treelock); |
06b7bcd8 | 668 | RB_CLEAR_NODE(&uprobe->rb_node); /* for uprobe_is_active() */ |
2b144498 SD |
669 | iput(uprobe->inode); |
670 | put_uprobe(uprobe); | |
2b144498 SD |
671 | } |
672 | ||
26872090 ON |
673 | struct map_info { |
674 | struct map_info *next; | |
675 | struct mm_struct *mm; | |
816c03fb | 676 | unsigned long vaddr; |
26872090 ON |
677 | }; |
678 | ||
679 | static inline struct map_info *free_map_info(struct map_info *info) | |
2b144498 | 680 | { |
26872090 ON |
681 | struct map_info *next = info->next; |
682 | kfree(info); | |
683 | return next; | |
684 | } | |
685 | ||
686 | static struct map_info * | |
687 | build_map_info(struct address_space *mapping, loff_t offset, bool is_register) | |
688 | { | |
689 | unsigned long pgoff = offset >> PAGE_SHIFT; | |
2b144498 | 690 | struct vm_area_struct *vma; |
26872090 ON |
691 | struct map_info *curr = NULL; |
692 | struct map_info *prev = NULL; | |
693 | struct map_info *info; | |
694 | int more = 0; | |
2b144498 | 695 | |
26872090 ON |
696 | again: |
697 | mutex_lock(&mapping->i_mmap_mutex); | |
6b2dbba8 | 698 | vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { |
2b144498 SD |
699 | if (!valid_vma(vma, is_register)) |
700 | continue; | |
701 | ||
7a5bfb66 ON |
702 | if (!prev && !more) { |
703 | /* | |
704 | * Needs GFP_NOWAIT to avoid i_mmap_mutex recursion through | |
705 | * reclaim. This is optimistic, no harm done if it fails. | |
706 | */ | |
707 | prev = kmalloc(sizeof(struct map_info), | |
708 | GFP_NOWAIT | __GFP_NOMEMALLOC | __GFP_NOWARN); | |
709 | if (prev) | |
710 | prev->next = NULL; | |
711 | } | |
26872090 ON |
712 | if (!prev) { |
713 | more++; | |
714 | continue; | |
2b144498 | 715 | } |
2b144498 | 716 | |
26872090 ON |
717 | if (!atomic_inc_not_zero(&vma->vm_mm->mm_users)) |
718 | continue; | |
7b2d81d4 | 719 | |
26872090 ON |
720 | info = prev; |
721 | prev = prev->next; | |
722 | info->next = curr; | |
723 | curr = info; | |
2b144498 | 724 | |
26872090 | 725 | info->mm = vma->vm_mm; |
57683f72 | 726 | info->vaddr = offset_to_vaddr(vma, offset); |
26872090 | 727 | } |
2b144498 SD |
728 | mutex_unlock(&mapping->i_mmap_mutex); |
729 | ||
26872090 ON |
730 | if (!more) |
731 | goto out; | |
732 | ||
733 | prev = curr; | |
734 | while (curr) { | |
735 | mmput(curr->mm); | |
736 | curr = curr->next; | |
737 | } | |
7b2d81d4 | 738 | |
26872090 ON |
739 | do { |
740 | info = kmalloc(sizeof(struct map_info), GFP_KERNEL); | |
741 | if (!info) { | |
742 | curr = ERR_PTR(-ENOMEM); | |
743 | goto out; | |
744 | } | |
745 | info->next = prev; | |
746 | prev = info; | |
747 | } while (--more); | |
748 | ||
749 | goto again; | |
750 | out: | |
751 | while (prev) | |
752 | prev = free_map_info(prev); | |
753 | return curr; | |
2b144498 SD |
754 | } |
755 | ||
bdf8647c ON |
756 | static int |
757 | register_for_each_vma(struct uprobe *uprobe, struct uprobe_consumer *new) | |
2b144498 | 758 | { |
bdf8647c | 759 | bool is_register = !!new; |
26872090 ON |
760 | struct map_info *info; |
761 | int err = 0; | |
2b144498 | 762 | |
32cdba1e | 763 | percpu_down_write(&dup_mmap_sem); |
26872090 ON |
764 | info = build_map_info(uprobe->inode->i_mapping, |
765 | uprobe->offset, is_register); | |
32cdba1e ON |
766 | if (IS_ERR(info)) { |
767 | err = PTR_ERR(info); | |
768 | goto out; | |
769 | } | |
7b2d81d4 | 770 | |
26872090 ON |
771 | while (info) { |
772 | struct mm_struct *mm = info->mm; | |
773 | struct vm_area_struct *vma; | |
7b2d81d4 | 774 | |
076a365b | 775 | if (err && is_register) |
26872090 | 776 | goto free; |
7b2d81d4 | 777 | |
77fc4af1 | 778 | down_write(&mm->mmap_sem); |
f4d6dfe5 ON |
779 | vma = find_vma(mm, info->vaddr); |
780 | if (!vma || !valid_vma(vma, is_register) || | |
f281769e | 781 | file_inode(vma->vm_file) != uprobe->inode) |
26872090 ON |
782 | goto unlock; |
783 | ||
f4d6dfe5 ON |
784 | if (vma->vm_start > info->vaddr || |
785 | vaddr_to_offset(vma, info->vaddr) != uprobe->offset) | |
26872090 | 786 | goto unlock; |
2b144498 | 787 | |
806a98bd ON |
788 | if (is_register) { |
789 | /* consult only the "caller", new consumer. */ | |
bdf8647c | 790 | if (consumer_filter(new, |
8a7f2fa0 | 791 | UPROBE_FILTER_REGISTER, mm)) |
806a98bd ON |
792 | err = install_breakpoint(uprobe, mm, vma, info->vaddr); |
793 | } else if (test_bit(MMF_HAS_UPROBES, &mm->flags)) { | |
8a7f2fa0 ON |
794 | if (!filter_chain(uprobe, |
795 | UPROBE_FILTER_UNREGISTER, mm)) | |
806a98bd ON |
796 | err |= remove_breakpoint(uprobe, mm, info->vaddr); |
797 | } | |
78f74116 | 798 | |
26872090 ON |
799 | unlock: |
800 | up_write(&mm->mmap_sem); | |
801 | free: | |
802 | mmput(mm); | |
803 | info = free_map_info(info); | |
2b144498 | 804 | } |
32cdba1e ON |
805 | out: |
806 | percpu_up_write(&dup_mmap_sem); | |
26872090 | 807 | return err; |
2b144498 SD |
808 | } |
809 | ||
9a98e03c | 810 | static int __uprobe_register(struct uprobe *uprobe, struct uprobe_consumer *uc) |
2b144498 | 811 | { |
9a98e03c | 812 | consumer_add(uprobe, uc); |
bdf8647c | 813 | return register_for_each_vma(uprobe, uc); |
2b144498 SD |
814 | } |
815 | ||
04aab9b2 | 816 | static void __uprobe_unregister(struct uprobe *uprobe, struct uprobe_consumer *uc) |
2b144498 | 817 | { |
04aab9b2 ON |
818 | int err; |
819 | ||
820 | if (!consumer_del(uprobe, uc)) /* WARN? */ | |
821 | return; | |
2b144498 | 822 | |
bdf8647c | 823 | err = register_for_each_vma(uprobe, NULL); |
bb929284 ON |
824 | /* TODO : cant unregister? schedule a worker thread */ |
825 | if (!uprobe->consumers && !err) | |
826 | delete_uprobe(uprobe); | |
2b144498 SD |
827 | } |
828 | ||
829 | /* | |
7b2d81d4 | 830 | * uprobe_register - register a probe |
2b144498 SD |
831 | * @inode: the file in which the probe has to be placed. |
832 | * @offset: offset from the start of the file. | |
e3343e6a | 833 | * @uc: information on howto handle the probe.. |
2b144498 | 834 | * |
7b2d81d4 | 835 | * Apart from the access refcount, uprobe_register() takes a creation |
2b144498 SD |
836 | * refcount (thro alloc_uprobe) if and only if this @uprobe is getting |
837 | * inserted into the rbtree (i.e first consumer for a @inode:@offset | |
7b2d81d4 | 838 | * tuple). Creation refcount stops uprobe_unregister from freeing the |
2b144498 | 839 | * @uprobe even before the register operation is complete. Creation |
e3343e6a | 840 | * refcount is released when the last @uc for the @uprobe |
2b144498 SD |
841 | * unregisters. |
842 | * | |
843 | * Return errno if it cannot successully install probes | |
844 | * else return 0 (success) | |
845 | */ | |
e3343e6a | 846 | int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc) |
2b144498 SD |
847 | { |
848 | struct uprobe *uprobe; | |
7b2d81d4 | 849 | int ret; |
2b144498 | 850 | |
ea024870 AA |
851 | /* Uprobe must have at least one set consumer */ |
852 | if (!uc->handler && !uc->ret_handler) | |
853 | return -EINVAL; | |
854 | ||
f0744af7 | 855 | /* Racy, just to catch the obvious mistakes */ |
2b144498 | 856 | if (offset > i_size_read(inode)) |
7b2d81d4 | 857 | return -EINVAL; |
2b144498 | 858 | |
66d06dff | 859 | retry: |
2b144498 | 860 | uprobe = alloc_uprobe(inode, offset); |
66d06dff ON |
861 | if (!uprobe) |
862 | return -ENOMEM; | |
863 | /* | |
864 | * We can race with uprobe_unregister()->delete_uprobe(). | |
865 | * Check uprobe_is_active() and retry if it is false. | |
866 | */ | |
867 | down_write(&uprobe->register_rwsem); | |
868 | ret = -EAGAIN; | |
869 | if (likely(uprobe_is_active(uprobe))) { | |
9a98e03c ON |
870 | ret = __uprobe_register(uprobe, uc); |
871 | if (ret) | |
04aab9b2 | 872 | __uprobe_unregister(uprobe, uc); |
2b144498 | 873 | } |
66d06dff ON |
874 | up_write(&uprobe->register_rwsem); |
875 | put_uprobe(uprobe); | |
2b144498 | 876 | |
66d06dff ON |
877 | if (unlikely(ret == -EAGAIN)) |
878 | goto retry; | |
2b144498 SD |
879 | return ret; |
880 | } | |
e8440c14 | 881 | EXPORT_SYMBOL_GPL(uprobe_register); |
2b144498 | 882 | |
bdf8647c ON |
883 | /* |
884 | * uprobe_apply - unregister a already registered probe. | |
885 | * @inode: the file in which the probe has to be removed. | |
886 | * @offset: offset from the start of the file. | |
887 | * @uc: consumer which wants to add more or remove some breakpoints | |
888 | * @add: add or remove the breakpoints | |
889 | */ | |
890 | int uprobe_apply(struct inode *inode, loff_t offset, | |
891 | struct uprobe_consumer *uc, bool add) | |
892 | { | |
893 | struct uprobe *uprobe; | |
894 | struct uprobe_consumer *con; | |
895 | int ret = -ENOENT; | |
896 | ||
897 | uprobe = find_uprobe(inode, offset); | |
898 | if (!uprobe) | |
899 | return ret; | |
900 | ||
901 | down_write(&uprobe->register_rwsem); | |
902 | for (con = uprobe->consumers; con && con != uc ; con = con->next) | |
903 | ; | |
904 | if (con) | |
905 | ret = register_for_each_vma(uprobe, add ? uc : NULL); | |
906 | up_write(&uprobe->register_rwsem); | |
907 | put_uprobe(uprobe); | |
908 | ||
909 | return ret; | |
910 | } | |
911 | ||
2b144498 | 912 | /* |
7b2d81d4 | 913 | * uprobe_unregister - unregister a already registered probe. |
2b144498 SD |
914 | * @inode: the file in which the probe has to be removed. |
915 | * @offset: offset from the start of the file. | |
e3343e6a | 916 | * @uc: identify which probe if multiple probes are colocated. |
2b144498 | 917 | */ |
e3343e6a | 918 | void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc) |
2b144498 | 919 | { |
7b2d81d4 | 920 | struct uprobe *uprobe; |
2b144498 | 921 | |
2b144498 SD |
922 | uprobe = find_uprobe(inode, offset); |
923 | if (!uprobe) | |
924 | return; | |
925 | ||
e591c8d7 | 926 | down_write(&uprobe->register_rwsem); |
04aab9b2 | 927 | __uprobe_unregister(uprobe, uc); |
e591c8d7 | 928 | up_write(&uprobe->register_rwsem); |
c91368c4 | 929 | put_uprobe(uprobe); |
2b144498 | 930 | } |
e8440c14 | 931 | EXPORT_SYMBOL_GPL(uprobe_unregister); |
2b144498 | 932 | |
da1816b1 ON |
933 | static int unapply_uprobe(struct uprobe *uprobe, struct mm_struct *mm) |
934 | { | |
935 | struct vm_area_struct *vma; | |
936 | int err = 0; | |
937 | ||
938 | down_read(&mm->mmap_sem); | |
939 | for (vma = mm->mmap; vma; vma = vma->vm_next) { | |
940 | unsigned long vaddr; | |
941 | loff_t offset; | |
942 | ||
943 | if (!valid_vma(vma, false) || | |
f281769e | 944 | file_inode(vma->vm_file) != uprobe->inode) |
da1816b1 ON |
945 | continue; |
946 | ||
947 | offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT; | |
948 | if (uprobe->offset < offset || | |
949 | uprobe->offset >= offset + vma->vm_end - vma->vm_start) | |
950 | continue; | |
951 | ||
952 | vaddr = offset_to_vaddr(vma, uprobe->offset); | |
953 | err |= remove_breakpoint(uprobe, mm, vaddr); | |
954 | } | |
955 | up_read(&mm->mmap_sem); | |
956 | ||
957 | return err; | |
958 | } | |
959 | ||
891c3970 ON |
960 | static struct rb_node * |
961 | find_node_in_range(struct inode *inode, loff_t min, loff_t max) | |
2b144498 | 962 | { |
2b144498 | 963 | struct rb_node *n = uprobes_tree.rb_node; |
2b144498 SD |
964 | |
965 | while (n) { | |
891c3970 | 966 | struct uprobe *u = rb_entry(n, struct uprobe, rb_node); |
2b144498 | 967 | |
891c3970 | 968 | if (inode < u->inode) { |
2b144498 | 969 | n = n->rb_left; |
891c3970 | 970 | } else if (inode > u->inode) { |
2b144498 | 971 | n = n->rb_right; |
891c3970 ON |
972 | } else { |
973 | if (max < u->offset) | |
974 | n = n->rb_left; | |
975 | else if (min > u->offset) | |
976 | n = n->rb_right; | |
977 | else | |
978 | break; | |
979 | } | |
2b144498 | 980 | } |
7b2d81d4 | 981 | |
891c3970 | 982 | return n; |
2b144498 SD |
983 | } |
984 | ||
985 | /* | |
891c3970 | 986 | * For a given range in vma, build a list of probes that need to be inserted. |
2b144498 | 987 | */ |
891c3970 ON |
988 | static void build_probe_list(struct inode *inode, |
989 | struct vm_area_struct *vma, | |
990 | unsigned long start, unsigned long end, | |
991 | struct list_head *head) | |
2b144498 | 992 | { |
891c3970 | 993 | loff_t min, max; |
891c3970 ON |
994 | struct rb_node *n, *t; |
995 | struct uprobe *u; | |
7b2d81d4 | 996 | |
891c3970 | 997 | INIT_LIST_HEAD(head); |
cb113b47 | 998 | min = vaddr_to_offset(vma, start); |
891c3970 | 999 | max = min + (end - start) - 1; |
2b144498 | 1000 | |
6f47caa0 | 1001 | spin_lock(&uprobes_treelock); |
891c3970 ON |
1002 | n = find_node_in_range(inode, min, max); |
1003 | if (n) { | |
1004 | for (t = n; t; t = rb_prev(t)) { | |
1005 | u = rb_entry(t, struct uprobe, rb_node); | |
1006 | if (u->inode != inode || u->offset < min) | |
1007 | break; | |
1008 | list_add(&u->pending_list, head); | |
1009 | atomic_inc(&u->ref); | |
1010 | } | |
1011 | for (t = n; (t = rb_next(t)); ) { | |
1012 | u = rb_entry(t, struct uprobe, rb_node); | |
1013 | if (u->inode != inode || u->offset > max) | |
1014 | break; | |
1015 | list_add(&u->pending_list, head); | |
1016 | atomic_inc(&u->ref); | |
1017 | } | |
2b144498 | 1018 | } |
6f47caa0 | 1019 | spin_unlock(&uprobes_treelock); |
2b144498 SD |
1020 | } |
1021 | ||
1022 | /* | |
5e5be71a | 1023 | * Called from mmap_region/vma_adjust with mm->mmap_sem acquired. |
2b144498 | 1024 | * |
5e5be71a ON |
1025 | * Currently we ignore all errors and always return 0, the callers |
1026 | * can't handle the failure anyway. | |
2b144498 | 1027 | */ |
7b2d81d4 | 1028 | int uprobe_mmap(struct vm_area_struct *vma) |
2b144498 SD |
1029 | { |
1030 | struct list_head tmp_list; | |
665605a2 | 1031 | struct uprobe *uprobe, *u; |
2b144498 | 1032 | struct inode *inode; |
2b144498 | 1033 | |
441f1eb7 | 1034 | if (no_uprobe_events() || !valid_vma(vma, true)) |
7b2d81d4 | 1035 | return 0; |
2b144498 | 1036 | |
f281769e | 1037 | inode = file_inode(vma->vm_file); |
2b144498 | 1038 | if (!inode) |
7b2d81d4 | 1039 | return 0; |
2b144498 | 1040 | |
2b144498 | 1041 | mutex_lock(uprobes_mmap_hash(inode)); |
891c3970 | 1042 | build_probe_list(inode, vma, vma->vm_start, vma->vm_end, &tmp_list); |
806a98bd ON |
1043 | /* |
1044 | * We can race with uprobe_unregister(), this uprobe can be already | |
1045 | * removed. But in this case filter_chain() must return false, all | |
1046 | * consumers have gone away. | |
1047 | */ | |
665605a2 | 1048 | list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) { |
806a98bd | 1049 | if (!fatal_signal_pending(current) && |
8a7f2fa0 | 1050 | filter_chain(uprobe, UPROBE_FILTER_MMAP, vma->vm_mm)) { |
57683f72 | 1051 | unsigned long vaddr = offset_to_vaddr(vma, uprobe->offset); |
5e5be71a | 1052 | install_breakpoint(uprobe, vma->vm_mm, vma, vaddr); |
2b144498 SD |
1053 | } |
1054 | put_uprobe(uprobe); | |
1055 | } | |
2b144498 SD |
1056 | mutex_unlock(uprobes_mmap_hash(inode)); |
1057 | ||
5e5be71a | 1058 | return 0; |
2b144498 SD |
1059 | } |
1060 | ||
9f68f672 ON |
1061 | static bool |
1062 | vma_has_uprobes(struct vm_area_struct *vma, unsigned long start, unsigned long end) | |
1063 | { | |
1064 | loff_t min, max; | |
1065 | struct inode *inode; | |
1066 | struct rb_node *n; | |
1067 | ||
f281769e | 1068 | inode = file_inode(vma->vm_file); |
9f68f672 ON |
1069 | |
1070 | min = vaddr_to_offset(vma, start); | |
1071 | max = min + (end - start) - 1; | |
1072 | ||
1073 | spin_lock(&uprobes_treelock); | |
1074 | n = find_node_in_range(inode, min, max); | |
1075 | spin_unlock(&uprobes_treelock); | |
1076 | ||
1077 | return !!n; | |
1078 | } | |
1079 | ||
682968e0 SD |
1080 | /* |
1081 | * Called in context of a munmap of a vma. | |
1082 | */ | |
cbc91f71 | 1083 | void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end) |
682968e0 | 1084 | { |
441f1eb7 | 1085 | if (no_uprobe_events() || !valid_vma(vma, false)) |
682968e0 SD |
1086 | return; |
1087 | ||
2fd611a9 ON |
1088 | if (!atomic_read(&vma->vm_mm->mm_users)) /* called by mmput() ? */ |
1089 | return; | |
1090 | ||
9f68f672 ON |
1091 | if (!test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags) || |
1092 | test_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags)) | |
f8ac4ec9 ON |
1093 | return; |
1094 | ||
9f68f672 ON |
1095 | if (vma_has_uprobes(vma, start, end)) |
1096 | set_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags); | |
682968e0 SD |
1097 | } |
1098 | ||
d4b3b638 | 1099 | /* Slot allocation for XOL */ |
6441ec8b | 1100 | static int xol_add_vma(struct mm_struct *mm, struct xol_area *area) |
d4b3b638 | 1101 | { |
c8a82538 | 1102 | int ret = -EALREADY; |
d4b3b638 SD |
1103 | |
1104 | down_write(&mm->mmap_sem); | |
1105 | if (mm->uprobes_state.xol_area) | |
1106 | goto fail; | |
1107 | ||
af0d95af ON |
1108 | if (!area->vaddr) { |
1109 | /* Try to map as high as possible, this is only a hint. */ | |
1110 | area->vaddr = get_unmapped_area(NULL, TASK_SIZE - PAGE_SIZE, | |
1111 | PAGE_SIZE, 0, 0); | |
1112 | if (area->vaddr & ~PAGE_MASK) { | |
1113 | ret = area->vaddr; | |
1114 | goto fail; | |
1115 | } | |
d4b3b638 SD |
1116 | } |
1117 | ||
1118 | ret = install_special_mapping(mm, area->vaddr, PAGE_SIZE, | |
1119 | VM_EXEC|VM_MAYEXEC|VM_DONTCOPY|VM_IO, &area->page); | |
1120 | if (ret) | |
1121 | goto fail; | |
1122 | ||
1123 | smp_wmb(); /* pairs with get_xol_area() */ | |
1124 | mm->uprobes_state.xol_area = area; | |
c8a82538 | 1125 | fail: |
d4b3b638 | 1126 | up_write(&mm->mmap_sem); |
d4b3b638 SD |
1127 | |
1128 | return ret; | |
1129 | } | |
1130 | ||
af0d95af | 1131 | static struct xol_area *__create_xol_area(unsigned long vaddr) |
d4b3b638 | 1132 | { |
9b545df8 | 1133 | struct mm_struct *mm = current->mm; |
e78aebfd | 1134 | uprobe_opcode_t insn = UPROBE_SWBP_INSN; |
6441ec8b | 1135 | struct xol_area *area; |
9b545df8 | 1136 | |
af0d95af | 1137 | area = kmalloc(sizeof(*area), GFP_KERNEL); |
d4b3b638 | 1138 | if (unlikely(!area)) |
c8a82538 | 1139 | goto out; |
d4b3b638 SD |
1140 | |
1141 | area->bitmap = kzalloc(BITS_TO_LONGS(UINSNS_PER_PAGE) * sizeof(long), GFP_KERNEL); | |
d4b3b638 | 1142 | if (!area->bitmap) |
c8a82538 ON |
1143 | goto free_area; |
1144 | ||
1145 | area->page = alloc_page(GFP_HIGHUSER); | |
1146 | if (!area->page) | |
1147 | goto free_bitmap; | |
d4b3b638 | 1148 | |
af0d95af | 1149 | area->vaddr = vaddr; |
6441ec8b ON |
1150 | init_waitqueue_head(&area->wq); |
1151 | /* Reserve the 1st slot for get_trampoline_vaddr() */ | |
e78aebfd | 1152 | set_bit(0, area->bitmap); |
e78aebfd | 1153 | atomic_set(&area->slot_count, 1); |
6441ec8b | 1154 | copy_to_page(area->page, 0, &insn, UPROBE_SWBP_INSN_SIZE); |
e78aebfd | 1155 | |
6441ec8b | 1156 | if (!xol_add_vma(mm, area)) |
d4b3b638 SD |
1157 | return area; |
1158 | ||
c8a82538 ON |
1159 | __free_page(area->page); |
1160 | free_bitmap: | |
d4b3b638 | 1161 | kfree(area->bitmap); |
c8a82538 | 1162 | free_area: |
d4b3b638 | 1163 | kfree(area); |
c8a82538 | 1164 | out: |
6441ec8b ON |
1165 | return NULL; |
1166 | } | |
1167 | ||
1168 | /* | |
1169 | * get_xol_area - Allocate process's xol_area if necessary. | |
1170 | * This area will be used for storing instructions for execution out of line. | |
1171 | * | |
1172 | * Returns the allocated area or NULL. | |
1173 | */ | |
1174 | static struct xol_area *get_xol_area(void) | |
1175 | { | |
1176 | struct mm_struct *mm = current->mm; | |
1177 | struct xol_area *area; | |
1178 | ||
1179 | if (!mm->uprobes_state.xol_area) | |
af0d95af | 1180 | __create_xol_area(0); |
6441ec8b | 1181 | |
9b545df8 | 1182 | area = mm->uprobes_state.xol_area; |
6441ec8b | 1183 | smp_read_barrier_depends(); /* pairs with wmb in xol_add_vma() */ |
9b545df8 | 1184 | return area; |
d4b3b638 SD |
1185 | } |
1186 | ||
1187 | /* | |
1188 | * uprobe_clear_state - Free the area allocated for slots. | |
1189 | */ | |
1190 | void uprobe_clear_state(struct mm_struct *mm) | |
1191 | { | |
1192 | struct xol_area *area = mm->uprobes_state.xol_area; | |
1193 | ||
1194 | if (!area) | |
1195 | return; | |
1196 | ||
1197 | put_page(area->page); | |
1198 | kfree(area->bitmap); | |
1199 | kfree(area); | |
1200 | } | |
1201 | ||
32cdba1e ON |
1202 | void uprobe_start_dup_mmap(void) |
1203 | { | |
1204 | percpu_down_read(&dup_mmap_sem); | |
1205 | } | |
1206 | ||
1207 | void uprobe_end_dup_mmap(void) | |
1208 | { | |
1209 | percpu_up_read(&dup_mmap_sem); | |
1210 | } | |
1211 | ||
f8ac4ec9 ON |
1212 | void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm) |
1213 | { | |
61559a81 ON |
1214 | newmm->uprobes_state.xol_area = NULL; |
1215 | ||
9f68f672 | 1216 | if (test_bit(MMF_HAS_UPROBES, &oldmm->flags)) { |
f8ac4ec9 | 1217 | set_bit(MMF_HAS_UPROBES, &newmm->flags); |
9f68f672 ON |
1218 | /* unconditionally, dup_mmap() skips VM_DONTCOPY vmas */ |
1219 | set_bit(MMF_RECALC_UPROBES, &newmm->flags); | |
1220 | } | |
f8ac4ec9 ON |
1221 | } |
1222 | ||
d4b3b638 SD |
1223 | /* |
1224 | * - search for a free slot. | |
1225 | */ | |
1226 | static unsigned long xol_take_insn_slot(struct xol_area *area) | |
1227 | { | |
1228 | unsigned long slot_addr; | |
1229 | int slot_nr; | |
1230 | ||
1231 | do { | |
1232 | slot_nr = find_first_zero_bit(area->bitmap, UINSNS_PER_PAGE); | |
1233 | if (slot_nr < UINSNS_PER_PAGE) { | |
1234 | if (!test_and_set_bit(slot_nr, area->bitmap)) | |
1235 | break; | |
1236 | ||
1237 | slot_nr = UINSNS_PER_PAGE; | |
1238 | continue; | |
1239 | } | |
1240 | wait_event(area->wq, (atomic_read(&area->slot_count) < UINSNS_PER_PAGE)); | |
1241 | } while (slot_nr >= UINSNS_PER_PAGE); | |
1242 | ||
1243 | slot_addr = area->vaddr + (slot_nr * UPROBE_XOL_SLOT_BYTES); | |
1244 | atomic_inc(&area->slot_count); | |
1245 | ||
1246 | return slot_addr; | |
1247 | } | |
1248 | ||
1249 | /* | |
a6cb3f6d | 1250 | * xol_get_insn_slot - allocate a slot for xol. |
d4b3b638 SD |
1251 | * Returns the allocated slot address or 0. |
1252 | */ | |
a6cb3f6d | 1253 | static unsigned long xol_get_insn_slot(struct uprobe *uprobe) |
d4b3b638 SD |
1254 | { |
1255 | struct xol_area *area; | |
a6cb3f6d | 1256 | unsigned long xol_vaddr; |
d4b3b638 | 1257 | |
9b545df8 ON |
1258 | area = get_xol_area(); |
1259 | if (!area) | |
1260 | return 0; | |
d4b3b638 | 1261 | |
a6cb3f6d ON |
1262 | xol_vaddr = xol_take_insn_slot(area); |
1263 | if (unlikely(!xol_vaddr)) | |
d4b3b638 SD |
1264 | return 0; |
1265 | ||
a6cb3f6d | 1266 | /* Initialize the slot */ |
8a8de66c ON |
1267 | copy_to_page(area->page, xol_vaddr, |
1268 | uprobe->arch.ixol, sizeof(uprobe->arch.ixol)); | |
65b6ecc0 RV |
1269 | /* |
1270 | * We probably need flush_icache_user_range() but it needs vma. | |
1271 | * This should work on supported architectures too. | |
1272 | */ | |
1273 | flush_dcache_page(area->page); | |
d4b3b638 | 1274 | |
a6cb3f6d | 1275 | return xol_vaddr; |
d4b3b638 SD |
1276 | } |
1277 | ||
1278 | /* | |
1279 | * xol_free_insn_slot - If slot was earlier allocated by | |
1280 | * @xol_get_insn_slot(), make the slot available for | |
1281 | * subsequent requests. | |
1282 | */ | |
1283 | static void xol_free_insn_slot(struct task_struct *tsk) | |
1284 | { | |
1285 | struct xol_area *area; | |
1286 | unsigned long vma_end; | |
1287 | unsigned long slot_addr; | |
1288 | ||
1289 | if (!tsk->mm || !tsk->mm->uprobes_state.xol_area || !tsk->utask) | |
1290 | return; | |
1291 | ||
1292 | slot_addr = tsk->utask->xol_vaddr; | |
af4355e9 | 1293 | if (unlikely(!slot_addr)) |
d4b3b638 SD |
1294 | return; |
1295 | ||
1296 | area = tsk->mm->uprobes_state.xol_area; | |
1297 | vma_end = area->vaddr + PAGE_SIZE; | |
1298 | if (area->vaddr <= slot_addr && slot_addr < vma_end) { | |
1299 | unsigned long offset; | |
1300 | int slot_nr; | |
1301 | ||
1302 | offset = slot_addr - area->vaddr; | |
1303 | slot_nr = offset / UPROBE_XOL_SLOT_BYTES; | |
1304 | if (slot_nr >= UINSNS_PER_PAGE) | |
1305 | return; | |
1306 | ||
1307 | clear_bit(slot_nr, area->bitmap); | |
1308 | atomic_dec(&area->slot_count); | |
1309 | if (waitqueue_active(&area->wq)) | |
1310 | wake_up(&area->wq); | |
1311 | ||
1312 | tsk->utask->xol_vaddr = 0; | |
1313 | } | |
1314 | } | |
1315 | ||
0326f5a9 SD |
1316 | /** |
1317 | * uprobe_get_swbp_addr - compute address of swbp given post-swbp regs | |
1318 | * @regs: Reflects the saved state of the task after it has hit a breakpoint | |
1319 | * instruction. | |
1320 | * Return the address of the breakpoint instruction. | |
1321 | */ | |
1322 | unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs) | |
1323 | { | |
1324 | return instruction_pointer(regs) - UPROBE_SWBP_INSN_SIZE; | |
1325 | } | |
1326 | ||
1327 | /* | |
1328 | * Called with no locks held. | |
1329 | * Called in context of a exiting or a exec-ing thread. | |
1330 | */ | |
1331 | void uprobe_free_utask(struct task_struct *t) | |
1332 | { | |
1333 | struct uprobe_task *utask = t->utask; | |
0dfd0eb8 | 1334 | struct return_instance *ri, *tmp; |
0326f5a9 | 1335 | |
0326f5a9 SD |
1336 | if (!utask) |
1337 | return; | |
1338 | ||
1339 | if (utask->active_uprobe) | |
1340 | put_uprobe(utask->active_uprobe); | |
1341 | ||
0dfd0eb8 AA |
1342 | ri = utask->return_instances; |
1343 | while (ri) { | |
1344 | tmp = ri; | |
1345 | ri = ri->next; | |
1346 | ||
1347 | put_uprobe(tmp->uprobe); | |
1348 | kfree(tmp); | |
1349 | } | |
1350 | ||
d4b3b638 | 1351 | xol_free_insn_slot(t); |
0326f5a9 SD |
1352 | kfree(utask); |
1353 | t->utask = NULL; | |
1354 | } | |
1355 | ||
0326f5a9 | 1356 | /* |
5a2df662 ON |
1357 | * Allocate a uprobe_task object for the task if if necessary. |
1358 | * Called when the thread hits a breakpoint. | |
0326f5a9 SD |
1359 | * |
1360 | * Returns: | |
1361 | * - pointer to new uprobe_task on success | |
1362 | * - NULL otherwise | |
1363 | */ | |
5a2df662 | 1364 | static struct uprobe_task *get_utask(void) |
0326f5a9 | 1365 | { |
5a2df662 ON |
1366 | if (!current->utask) |
1367 | current->utask = kzalloc(sizeof(struct uprobe_task), GFP_KERNEL); | |
1368 | return current->utask; | |
0326f5a9 SD |
1369 | } |
1370 | ||
248d3a7b ON |
1371 | static int dup_utask(struct task_struct *t, struct uprobe_task *o_utask) |
1372 | { | |
1373 | struct uprobe_task *n_utask; | |
1374 | struct return_instance **p, *o, *n; | |
1375 | ||
1376 | n_utask = kzalloc(sizeof(struct uprobe_task), GFP_KERNEL); | |
1377 | if (!n_utask) | |
1378 | return -ENOMEM; | |
1379 | t->utask = n_utask; | |
1380 | ||
1381 | p = &n_utask->return_instances; | |
1382 | for (o = o_utask->return_instances; o; o = o->next) { | |
1383 | n = kmalloc(sizeof(struct return_instance), GFP_KERNEL); | |
1384 | if (!n) | |
1385 | return -ENOMEM; | |
1386 | ||
1387 | *n = *o; | |
1388 | atomic_inc(&n->uprobe->ref); | |
1389 | n->next = NULL; | |
1390 | ||
1391 | *p = n; | |
1392 | p = &n->next; | |
1393 | n_utask->depth++; | |
1394 | } | |
1395 | ||
1396 | return 0; | |
1397 | } | |
1398 | ||
1399 | static void uprobe_warn(struct task_struct *t, const char *msg) | |
1400 | { | |
1401 | pr_warn("uprobe: %s:%d failed to %s\n", | |
1402 | current->comm, current->pid, msg); | |
1403 | } | |
1404 | ||
aa59c53f ON |
1405 | static void dup_xol_work(struct callback_head *work) |
1406 | { | |
1407 | kfree(work); | |
1408 | ||
1409 | if (current->flags & PF_EXITING) | |
1410 | return; | |
1411 | ||
1412 | if (!__create_xol_area(current->utask->vaddr)) | |
1413 | uprobe_warn(current, "dup xol area"); | |
1414 | } | |
1415 | ||
b68e0749 ON |
1416 | /* |
1417 | * Called in context of a new clone/fork from copy_process. | |
1418 | */ | |
3ab67966 | 1419 | void uprobe_copy_process(struct task_struct *t, unsigned long flags) |
b68e0749 | 1420 | { |
248d3a7b ON |
1421 | struct uprobe_task *utask = current->utask; |
1422 | struct mm_struct *mm = current->mm; | |
aa59c53f ON |
1423 | struct callback_head *work; |
1424 | struct xol_area *area; | |
248d3a7b | 1425 | |
b68e0749 | 1426 | t->utask = NULL; |
248d3a7b | 1427 | |
3ab67966 ON |
1428 | if (!utask || !utask->return_instances) |
1429 | return; | |
1430 | ||
1431 | if (mm == t->mm && !(flags & CLONE_VFORK)) | |
248d3a7b ON |
1432 | return; |
1433 | ||
1434 | if (dup_utask(t, utask)) | |
1435 | return uprobe_warn(t, "dup ret instances"); | |
aa59c53f ON |
1436 | |
1437 | /* The task can fork() after dup_xol_work() fails */ | |
1438 | area = mm->uprobes_state.xol_area; | |
1439 | if (!area) | |
1440 | return uprobe_warn(t, "dup xol area"); | |
1441 | ||
3ab67966 ON |
1442 | if (mm == t->mm) |
1443 | return; | |
1444 | ||
aa59c53f ON |
1445 | /* TODO: move it into the union in uprobe_task */ |
1446 | work = kmalloc(sizeof(*work), GFP_KERNEL); | |
1447 | if (!work) | |
1448 | return uprobe_warn(t, "dup xol area"); | |
1449 | ||
70d7f987 | 1450 | t->utask->vaddr = area->vaddr; |
aa59c53f ON |
1451 | init_task_work(work, dup_xol_work); |
1452 | task_work_add(t, work, true); | |
b68e0749 ON |
1453 | } |
1454 | ||
e78aebfd AA |
1455 | /* |
1456 | * Current area->vaddr notion assume the trampoline address is always | |
1457 | * equal area->vaddr. | |
1458 | * | |
1459 | * Returns -1 in case the xol_area is not allocated. | |
1460 | */ | |
1461 | static unsigned long get_trampoline_vaddr(void) | |
1462 | { | |
1463 | struct xol_area *area; | |
1464 | unsigned long trampoline_vaddr = -1; | |
1465 | ||
1466 | area = current->mm->uprobes_state.xol_area; | |
1467 | smp_read_barrier_depends(); | |
1468 | if (area) | |
1469 | trampoline_vaddr = area->vaddr; | |
1470 | ||
1471 | return trampoline_vaddr; | |
1472 | } | |
1473 | ||
0dfd0eb8 AA |
1474 | static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs) |
1475 | { | |
1476 | struct return_instance *ri; | |
1477 | struct uprobe_task *utask; | |
1478 | unsigned long orig_ret_vaddr, trampoline_vaddr; | |
1479 | bool chained = false; | |
1480 | ||
1481 | if (!get_xol_area()) | |
1482 | return; | |
1483 | ||
1484 | utask = get_utask(); | |
1485 | if (!utask) | |
1486 | return; | |
1487 | ||
ded49c55 AA |
1488 | if (utask->depth >= MAX_URETPROBE_DEPTH) { |
1489 | printk_ratelimited(KERN_INFO "uprobe: omit uretprobe due to" | |
1490 | " nestedness limit pid/tgid=%d/%d\n", | |
1491 | current->pid, current->tgid); | |
1492 | return; | |
1493 | } | |
1494 | ||
0dfd0eb8 AA |
1495 | ri = kzalloc(sizeof(struct return_instance), GFP_KERNEL); |
1496 | if (!ri) | |
1497 | goto fail; | |
1498 | ||
1499 | trampoline_vaddr = get_trampoline_vaddr(); | |
1500 | orig_ret_vaddr = arch_uretprobe_hijack_return_addr(trampoline_vaddr, regs); | |
1501 | if (orig_ret_vaddr == -1) | |
1502 | goto fail; | |
1503 | ||
1504 | /* | |
1505 | * We don't want to keep trampoline address in stack, rather keep the | |
1506 | * original return address of first caller thru all the consequent | |
1507 | * instances. This also makes breakpoint unwrapping easier. | |
1508 | */ | |
1509 | if (orig_ret_vaddr == trampoline_vaddr) { | |
1510 | if (!utask->return_instances) { | |
1511 | /* | |
1512 | * This situation is not possible. Likely we have an | |
1513 | * attack from user-space. | |
1514 | */ | |
1515 | pr_warn("uprobe: unable to set uretprobe pid/tgid=%d/%d\n", | |
1516 | current->pid, current->tgid); | |
1517 | goto fail; | |
1518 | } | |
1519 | ||
1520 | chained = true; | |
1521 | orig_ret_vaddr = utask->return_instances->orig_ret_vaddr; | |
1522 | } | |
1523 | ||
1524 | atomic_inc(&uprobe->ref); | |
1525 | ri->uprobe = uprobe; | |
1526 | ri->func = instruction_pointer(regs); | |
1527 | ri->orig_ret_vaddr = orig_ret_vaddr; | |
1528 | ri->chained = chained; | |
1529 | ||
ded49c55 AA |
1530 | utask->depth++; |
1531 | ||
0dfd0eb8 AA |
1532 | /* add instance to the stack */ |
1533 | ri->next = utask->return_instances; | |
1534 | utask->return_instances = ri; | |
1535 | ||
1536 | return; | |
1537 | ||
1538 | fail: | |
1539 | kfree(ri); | |
1540 | } | |
1541 | ||
0326f5a9 SD |
1542 | /* Prepare to single-step probed instruction out of line. */ |
1543 | static int | |
a6cb3f6d | 1544 | pre_ssout(struct uprobe *uprobe, struct pt_regs *regs, unsigned long bp_vaddr) |
0326f5a9 | 1545 | { |
a6cb3f6d ON |
1546 | struct uprobe_task *utask; |
1547 | unsigned long xol_vaddr; | |
aba51024 | 1548 | int err; |
a6cb3f6d | 1549 | |
608e7427 ON |
1550 | utask = get_utask(); |
1551 | if (!utask) | |
1552 | return -ENOMEM; | |
a6cb3f6d ON |
1553 | |
1554 | xol_vaddr = xol_get_insn_slot(uprobe); | |
1555 | if (!xol_vaddr) | |
1556 | return -ENOMEM; | |
1557 | ||
1558 | utask->xol_vaddr = xol_vaddr; | |
1559 | utask->vaddr = bp_vaddr; | |
d4b3b638 | 1560 | |
aba51024 ON |
1561 | err = arch_uprobe_pre_xol(&uprobe->arch, regs); |
1562 | if (unlikely(err)) { | |
1563 | xol_free_insn_slot(current); | |
1564 | return err; | |
1565 | } | |
1566 | ||
608e7427 ON |
1567 | utask->active_uprobe = uprobe; |
1568 | utask->state = UTASK_SSTEP; | |
aba51024 | 1569 | return 0; |
0326f5a9 SD |
1570 | } |
1571 | ||
1572 | /* | |
1573 | * If we are singlestepping, then ensure this thread is not connected to | |
1574 | * non-fatal signals until completion of singlestep. When xol insn itself | |
1575 | * triggers the signal, restart the original insn even if the task is | |
1576 | * already SIGKILL'ed (since coredump should report the correct ip). This | |
1577 | * is even more important if the task has a handler for SIGSEGV/etc, The | |
1578 | * _same_ instruction should be repeated again after return from the signal | |
1579 | * handler, and SSTEP can never finish in this case. | |
1580 | */ | |
1581 | bool uprobe_deny_signal(void) | |
1582 | { | |
1583 | struct task_struct *t = current; | |
1584 | struct uprobe_task *utask = t->utask; | |
1585 | ||
1586 | if (likely(!utask || !utask->active_uprobe)) | |
1587 | return false; | |
1588 | ||
1589 | WARN_ON_ONCE(utask->state != UTASK_SSTEP); | |
1590 | ||
1591 | if (signal_pending(t)) { | |
1592 | spin_lock_irq(&t->sighand->siglock); | |
1593 | clear_tsk_thread_flag(t, TIF_SIGPENDING); | |
1594 | spin_unlock_irq(&t->sighand->siglock); | |
1595 | ||
1596 | if (__fatal_signal_pending(t) || arch_uprobe_xol_was_trapped(t)) { | |
1597 | utask->state = UTASK_SSTEP_TRAPPED; | |
1598 | set_tsk_thread_flag(t, TIF_UPROBE); | |
1599 | set_tsk_thread_flag(t, TIF_NOTIFY_RESUME); | |
1600 | } | |
1601 | } | |
1602 | ||
1603 | return true; | |
1604 | } | |
1605 | ||
1606 | /* | |
1607 | * Avoid singlestepping the original instruction if the original instruction | |
1608 | * is a NOP or can be emulated. | |
1609 | */ | |
1610 | static bool can_skip_sstep(struct uprobe *uprobe, struct pt_regs *regs) | |
1611 | { | |
71434f2f | 1612 | if (test_bit(UPROBE_SKIP_SSTEP, &uprobe->flags)) { |
0578a970 ON |
1613 | if (arch_uprobe_skip_sstep(&uprobe->arch, regs)) |
1614 | return true; | |
71434f2f | 1615 | clear_bit(UPROBE_SKIP_SSTEP, &uprobe->flags); |
0578a970 | 1616 | } |
0326f5a9 SD |
1617 | return false; |
1618 | } | |
1619 | ||
499a4f3e ON |
1620 | static void mmf_recalc_uprobes(struct mm_struct *mm) |
1621 | { | |
1622 | struct vm_area_struct *vma; | |
1623 | ||
1624 | for (vma = mm->mmap; vma; vma = vma->vm_next) { | |
1625 | if (!valid_vma(vma, false)) | |
1626 | continue; | |
1627 | /* | |
1628 | * This is not strictly accurate, we can race with | |
1629 | * uprobe_unregister() and see the already removed | |
1630 | * uprobe if delete_uprobe() was not yet called. | |
63633cbf | 1631 | * Or this uprobe can be filtered out. |
499a4f3e ON |
1632 | */ |
1633 | if (vma_has_uprobes(vma, vma->vm_start, vma->vm_end)) | |
1634 | return; | |
1635 | } | |
1636 | ||
1637 | clear_bit(MMF_HAS_UPROBES, &mm->flags); | |
1638 | } | |
1639 | ||
0908ad6e | 1640 | static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr) |
ec75fba9 ON |
1641 | { |
1642 | struct page *page; | |
1643 | uprobe_opcode_t opcode; | |
1644 | int result; | |
1645 | ||
1646 | pagefault_disable(); | |
1647 | result = __copy_from_user_inatomic(&opcode, (void __user*)vaddr, | |
1648 | sizeof(opcode)); | |
1649 | pagefault_enable(); | |
1650 | ||
1651 | if (likely(result == 0)) | |
1652 | goto out; | |
1653 | ||
1654 | result = get_user_pages(NULL, mm, vaddr, 1, 0, 1, &page, NULL); | |
1655 | if (result < 0) | |
1656 | return result; | |
1657 | ||
ab0d805c | 1658 | copy_from_page(page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE); |
ec75fba9 ON |
1659 | put_page(page); |
1660 | out: | |
0908ad6e AM |
1661 | /* This needs to return true for any variant of the trap insn */ |
1662 | return is_trap_insn(&opcode); | |
ec75fba9 ON |
1663 | } |
1664 | ||
d790d346 | 1665 | static struct uprobe *find_active_uprobe(unsigned long bp_vaddr, int *is_swbp) |
0326f5a9 | 1666 | { |
3a9ea052 ON |
1667 | struct mm_struct *mm = current->mm; |
1668 | struct uprobe *uprobe = NULL; | |
0326f5a9 | 1669 | struct vm_area_struct *vma; |
0326f5a9 | 1670 | |
0326f5a9 SD |
1671 | down_read(&mm->mmap_sem); |
1672 | vma = find_vma(mm, bp_vaddr); | |
3a9ea052 ON |
1673 | if (vma && vma->vm_start <= bp_vaddr) { |
1674 | if (valid_vma(vma, false)) { | |
f281769e | 1675 | struct inode *inode = file_inode(vma->vm_file); |
cb113b47 | 1676 | loff_t offset = vaddr_to_offset(vma, bp_vaddr); |
0326f5a9 | 1677 | |
3a9ea052 ON |
1678 | uprobe = find_uprobe(inode, offset); |
1679 | } | |
d790d346 ON |
1680 | |
1681 | if (!uprobe) | |
0908ad6e | 1682 | *is_swbp = is_trap_at_addr(mm, bp_vaddr); |
d790d346 ON |
1683 | } else { |
1684 | *is_swbp = -EFAULT; | |
0326f5a9 | 1685 | } |
499a4f3e ON |
1686 | |
1687 | if (!uprobe && test_and_clear_bit(MMF_RECALC_UPROBES, &mm->flags)) | |
1688 | mmf_recalc_uprobes(mm); | |
0326f5a9 SD |
1689 | up_read(&mm->mmap_sem); |
1690 | ||
3a9ea052 ON |
1691 | return uprobe; |
1692 | } | |
1693 | ||
da1816b1 ON |
1694 | static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs) |
1695 | { | |
1696 | struct uprobe_consumer *uc; | |
1697 | int remove = UPROBE_HANDLER_REMOVE; | |
0dfd0eb8 | 1698 | bool need_prep = false; /* prepare return uprobe, when needed */ |
da1816b1 ON |
1699 | |
1700 | down_read(&uprobe->register_rwsem); | |
1701 | for (uc = uprobe->consumers; uc; uc = uc->next) { | |
ea024870 | 1702 | int rc = 0; |
da1816b1 | 1703 | |
ea024870 AA |
1704 | if (uc->handler) { |
1705 | rc = uc->handler(uc, regs); | |
1706 | WARN(rc & ~UPROBE_HANDLER_MASK, | |
1707 | "bad rc=0x%x from %pf()\n", rc, uc->handler); | |
1708 | } | |
0dfd0eb8 AA |
1709 | |
1710 | if (uc->ret_handler) | |
1711 | need_prep = true; | |
1712 | ||
da1816b1 ON |
1713 | remove &= rc; |
1714 | } | |
1715 | ||
0dfd0eb8 AA |
1716 | if (need_prep && !remove) |
1717 | prepare_uretprobe(uprobe, regs); /* put bp at return */ | |
1718 | ||
da1816b1 ON |
1719 | if (remove && uprobe->consumers) { |
1720 | WARN_ON(!uprobe_is_active(uprobe)); | |
1721 | unapply_uprobe(uprobe, current->mm); | |
1722 | } | |
1723 | up_read(&uprobe->register_rwsem); | |
1724 | } | |
1725 | ||
fec8898d AA |
1726 | static void |
1727 | handle_uretprobe_chain(struct return_instance *ri, struct pt_regs *regs) | |
1728 | { | |
1729 | struct uprobe *uprobe = ri->uprobe; | |
1730 | struct uprobe_consumer *uc; | |
1731 | ||
1732 | down_read(&uprobe->register_rwsem); | |
1733 | for (uc = uprobe->consumers; uc; uc = uc->next) { | |
1734 | if (uc->ret_handler) | |
1735 | uc->ret_handler(uc, ri->func, regs); | |
1736 | } | |
1737 | up_read(&uprobe->register_rwsem); | |
1738 | } | |
1739 | ||
1740 | static bool handle_trampoline(struct pt_regs *regs) | |
1741 | { | |
1742 | struct uprobe_task *utask; | |
1743 | struct return_instance *ri, *tmp; | |
1744 | bool chained; | |
1745 | ||
1746 | utask = current->utask; | |
1747 | if (!utask) | |
1748 | return false; | |
1749 | ||
1750 | ri = utask->return_instances; | |
1751 | if (!ri) | |
1752 | return false; | |
1753 | ||
1754 | /* | |
1755 | * TODO: we should throw out return_instance's invalidated by | |
1756 | * longjmp(), currently we assume that the probed function always | |
1757 | * returns. | |
1758 | */ | |
1759 | instruction_pointer_set(regs, ri->orig_ret_vaddr); | |
1760 | ||
1761 | for (;;) { | |
1762 | handle_uretprobe_chain(ri, regs); | |
1763 | ||
1764 | chained = ri->chained; | |
1765 | put_uprobe(ri->uprobe); | |
1766 | ||
1767 | tmp = ri; | |
1768 | ri = ri->next; | |
1769 | kfree(tmp); | |
878b5a6e | 1770 | utask->depth--; |
fec8898d AA |
1771 | |
1772 | if (!chained) | |
1773 | break; | |
fec8898d AA |
1774 | BUG_ON(!ri); |
1775 | } | |
1776 | ||
1777 | utask->return_instances = ri; | |
1778 | ||
1779 | return true; | |
1780 | } | |
1781 | ||
3a9ea052 ON |
1782 | /* |
1783 | * Run handler and ask thread to singlestep. | |
1784 | * Ensure all non-fatal signals cannot interrupt thread while it singlesteps. | |
1785 | */ | |
1786 | static void handle_swbp(struct pt_regs *regs) | |
1787 | { | |
3a9ea052 ON |
1788 | struct uprobe *uprobe; |
1789 | unsigned long bp_vaddr; | |
56bb4cf6 | 1790 | int uninitialized_var(is_swbp); |
3a9ea052 ON |
1791 | |
1792 | bp_vaddr = uprobe_get_swbp_addr(regs); | |
fec8898d AA |
1793 | if (bp_vaddr == get_trampoline_vaddr()) { |
1794 | if (handle_trampoline(regs)) | |
1795 | return; | |
3a9ea052 | 1796 | |
fec8898d AA |
1797 | pr_warn("uprobe: unable to handle uretprobe pid/tgid=%d/%d\n", |
1798 | current->pid, current->tgid); | |
1799 | } | |
1800 | ||
1801 | uprobe = find_active_uprobe(bp_vaddr, &is_swbp); | |
0326f5a9 | 1802 | if (!uprobe) { |
56bb4cf6 ON |
1803 | if (is_swbp > 0) { |
1804 | /* No matching uprobe; signal SIGTRAP. */ | |
1805 | send_sig(SIGTRAP, current, 0); | |
1806 | } else { | |
1807 | /* | |
1808 | * Either we raced with uprobe_unregister() or we can't | |
1809 | * access this memory. The latter is only possible if | |
1810 | * another thread plays with our ->mm. In both cases | |
1811 | * we can simply restart. If this vma was unmapped we | |
1812 | * can pretend this insn was not executed yet and get | |
1813 | * the (correct) SIGSEGV after restart. | |
1814 | */ | |
1815 | instruction_pointer_set(regs, bp_vaddr); | |
1816 | } | |
0326f5a9 SD |
1817 | return; |
1818 | } | |
74e59dfc ON |
1819 | |
1820 | /* change it in advance for ->handler() and restart */ | |
1821 | instruction_pointer_set(regs, bp_vaddr); | |
1822 | ||
142b18dd ON |
1823 | /* |
1824 | * TODO: move copy_insn/etc into _register and remove this hack. | |
1825 | * After we hit the bp, _unregister + _register can install the | |
1826 | * new and not-yet-analyzed uprobe at the same address, restart. | |
1827 | */ | |
1828 | smp_rmb(); /* pairs with wmb() in install_breakpoint() */ | |
71434f2f | 1829 | if (unlikely(!test_bit(UPROBE_COPY_INSN, &uprobe->flags))) |
74e59dfc | 1830 | goto out; |
0326f5a9 | 1831 | |
0326f5a9 | 1832 | handler_chain(uprobe, regs); |
0578a970 ON |
1833 | if (can_skip_sstep(uprobe, regs)) |
1834 | goto out; | |
0326f5a9 | 1835 | |
608e7427 | 1836 | if (!pre_ssout(uprobe, regs, bp_vaddr)) |
0326f5a9 | 1837 | return; |
0326f5a9 | 1838 | |
74e59dfc | 1839 | /* can_skip_sstep() succeeded, or restart if can't singlestep */ |
0578a970 | 1840 | out: |
8bd87445 | 1841 | put_uprobe(uprobe); |
0326f5a9 SD |
1842 | } |
1843 | ||
1844 | /* | |
1845 | * Perform required fix-ups and disable singlestep. | |
1846 | * Allow pending signals to take effect. | |
1847 | */ | |
1848 | static void handle_singlestep(struct uprobe_task *utask, struct pt_regs *regs) | |
1849 | { | |
1850 | struct uprobe *uprobe; | |
1851 | ||
1852 | uprobe = utask->active_uprobe; | |
1853 | if (utask->state == UTASK_SSTEP_ACK) | |
1854 | arch_uprobe_post_xol(&uprobe->arch, regs); | |
1855 | else if (utask->state == UTASK_SSTEP_TRAPPED) | |
1856 | arch_uprobe_abort_xol(&uprobe->arch, regs); | |
1857 | else | |
1858 | WARN_ON_ONCE(1); | |
1859 | ||
1860 | put_uprobe(uprobe); | |
1861 | utask->active_uprobe = NULL; | |
1862 | utask->state = UTASK_RUNNING; | |
d4b3b638 | 1863 | xol_free_insn_slot(current); |
0326f5a9 SD |
1864 | |
1865 | spin_lock_irq(¤t->sighand->siglock); | |
1866 | recalc_sigpending(); /* see uprobe_deny_signal() */ | |
1867 | spin_unlock_irq(¤t->sighand->siglock); | |
1868 | } | |
1869 | ||
1870 | /* | |
1b08e907 ON |
1871 | * On breakpoint hit, breakpoint notifier sets the TIF_UPROBE flag and |
1872 | * allows the thread to return from interrupt. After that handle_swbp() | |
1873 | * sets utask->active_uprobe. | |
0326f5a9 | 1874 | * |
1b08e907 ON |
1875 | * On singlestep exception, singlestep notifier sets the TIF_UPROBE flag |
1876 | * and allows the thread to return from interrupt. | |
0326f5a9 SD |
1877 | * |
1878 | * While returning to userspace, thread notices the TIF_UPROBE flag and calls | |
1879 | * uprobe_notify_resume(). | |
1880 | */ | |
1881 | void uprobe_notify_resume(struct pt_regs *regs) | |
1882 | { | |
1883 | struct uprobe_task *utask; | |
1884 | ||
db023ea5 ON |
1885 | clear_thread_flag(TIF_UPROBE); |
1886 | ||
0326f5a9 | 1887 | utask = current->utask; |
1b08e907 | 1888 | if (utask && utask->active_uprobe) |
0326f5a9 | 1889 | handle_singlestep(utask, regs); |
1b08e907 ON |
1890 | else |
1891 | handle_swbp(regs); | |
0326f5a9 SD |
1892 | } |
1893 | ||
1894 | /* | |
1895 | * uprobe_pre_sstep_notifier gets called from interrupt context as part of | |
1896 | * notifier mechanism. Set TIF_UPROBE flag and indicate breakpoint hit. | |
1897 | */ | |
1898 | int uprobe_pre_sstep_notifier(struct pt_regs *regs) | |
1899 | { | |
0dfd0eb8 AA |
1900 | if (!current->mm) |
1901 | return 0; | |
1902 | ||
1903 | if (!test_bit(MMF_HAS_UPROBES, ¤t->mm->flags) && | |
1904 | (!current->utask || !current->utask->return_instances)) | |
0326f5a9 SD |
1905 | return 0; |
1906 | ||
0326f5a9 | 1907 | set_thread_flag(TIF_UPROBE); |
0326f5a9 SD |
1908 | return 1; |
1909 | } | |
1910 | ||
1911 | /* | |
1912 | * uprobe_post_sstep_notifier gets called in interrupt context as part of notifier | |
1913 | * mechanism. Set TIF_UPROBE flag and indicate completion of singlestep. | |
1914 | */ | |
1915 | int uprobe_post_sstep_notifier(struct pt_regs *regs) | |
1916 | { | |
1917 | struct uprobe_task *utask = current->utask; | |
1918 | ||
1919 | if (!current->mm || !utask || !utask->active_uprobe) | |
1920 | /* task is currently not uprobed */ | |
1921 | return 0; | |
1922 | ||
1923 | utask->state = UTASK_SSTEP_ACK; | |
1924 | set_thread_flag(TIF_UPROBE); | |
1925 | return 1; | |
1926 | } | |
1927 | ||
1928 | static struct notifier_block uprobe_exception_nb = { | |
1929 | .notifier_call = arch_uprobe_exception_notify, | |
1930 | .priority = INT_MAX-1, /* notified after kprobes, kgdb */ | |
1931 | }; | |
1932 | ||
2b144498 SD |
1933 | static int __init init_uprobes(void) |
1934 | { | |
1935 | int i; | |
1936 | ||
66d06dff | 1937 | for (i = 0; i < UPROBES_HASH_SZ; i++) |
2b144498 | 1938 | mutex_init(&uprobes_mmap_mutex[i]); |
0326f5a9 | 1939 | |
32cdba1e ON |
1940 | if (percpu_init_rwsem(&dup_mmap_sem)) |
1941 | return -ENOMEM; | |
1942 | ||
0326f5a9 | 1943 | return register_die_notifier(&uprobe_exception_nb); |
2b144498 | 1944 | } |
736e89d9 | 1945 | __initcall(init_uprobes); |