Commit | Line | Data |
---|---|---|
24ba613c AS |
1 | /* |
2 | * arch/arm/kernel/kprobes.c | |
3 | * | |
4 | * Kprobes on ARM | |
5 | * | |
6 | * Abhishek Sagar <sagar.abhishek@gmail.com> | |
7 | * Copyright (C) 2006, 2007 Motorola Inc. | |
8 | * | |
9 | * Nicolas Pitre <nico@marvell.com> | |
10 | * Copyright (C) 2007 Marvell Ltd. | |
11 | * | |
12 | * This program is free software; you can redistribute it and/or modify | |
13 | * it under the terms of the GNU General Public License version 2 as | |
14 | * published by the Free Software Foundation. | |
15 | * | |
16 | * This program is distributed in the hope that it will be useful, | |
17 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
19 | * General Public License for more details. | |
20 | */ | |
21 | ||
22 | #include <linux/kernel.h> | |
23 | #include <linux/kprobes.h> | |
24 | #include <linux/module.h> | |
5a0e3ad6 | 25 | #include <linux/slab.h> |
2003b7af | 26 | #include <linux/stop_machine.h> |
24ba613c AS |
27 | #include <linux/stringify.h> |
28 | #include <asm/traps.h> | |
29 | #include <asm/cacheflush.h> | |
30 | ||
221bf15f JM |
31 | #include "kprobes.h" |
32 | ||
24ba613c AS |
33 | #define MIN_STACK_SIZE(addr) \ |
34 | min((unsigned long)MAX_STACK_SIZE, \ | |
35 | (unsigned long)current_thread_info() + THREAD_START_SP - (addr)) | |
36 | ||
aceb487a | 37 | #define flush_insns(addr, size) \ |
24ba613c AS |
38 | flush_icache_range((unsigned long)(addr), \ |
39 | (unsigned long)(addr) + \ | |
aceb487a | 40 | (size)) |
24ba613c AS |
41 | |
42 | /* Used as a marker in ARM_pc to note when we're in a jprobe. */ | |
43 | #define JPROBE_MAGIC_ADDR 0xffffffff | |
44 | ||
45 | DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; | |
46 | DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); | |
47 | ||
48 | ||
49 | int __kprobes arch_prepare_kprobe(struct kprobe *p) | |
50 | { | |
51 | kprobe_opcode_t insn; | |
52 | kprobe_opcode_t tmp_insn[MAX_INSN_SIZE]; | |
53 | unsigned long addr = (unsigned long)p->addr; | |
24371707 | 54 | kprobe_decode_insn_t *decode_insn; |
24ba613c AS |
55 | int is; |
56 | ||
24371707 | 57 | if (in_exception_text(addr)) |
24ba613c AS |
58 | return -EINVAL; |
59 | ||
24371707 JM |
60 | #ifdef CONFIG_THUMB2_KERNEL |
61 | addr &= ~1; /* Bit 0 would normally be set to indicate Thumb code */ | |
62 | insn = ((u16 *)addr)[0]; | |
63 | if (is_wide_instruction(insn)) { | |
64 | insn <<= 16; | |
65 | insn |= ((u16 *)addr)[1]; | |
66 | decode_insn = thumb32_kprobe_decode_insn; | |
67 | } else | |
68 | decode_insn = thumb16_kprobe_decode_insn; | |
69 | #else /* !CONFIG_THUMB2_KERNEL */ | |
70 | if (addr & 0x3) | |
71 | return -EINVAL; | |
24ba613c | 72 | insn = *p->addr; |
24371707 JM |
73 | decode_insn = arm_kprobe_decode_insn; |
74 | #endif | |
75 | ||
24ba613c AS |
76 | p->opcode = insn; |
77 | p->ainsn.insn = tmp_insn; | |
78 | ||
24371707 | 79 | switch ((*decode_insn)(insn, &p->ainsn)) { |
24ba613c AS |
80 | case INSN_REJECTED: /* not supported */ |
81 | return -EINVAL; | |
82 | ||
83 | case INSN_GOOD: /* instruction uses slot */ | |
84 | p->ainsn.insn = get_insn_slot(); | |
85 | if (!p->ainsn.insn) | |
86 | return -ENOMEM; | |
87 | for (is = 0; is < MAX_INSN_SIZE; ++is) | |
88 | p->ainsn.insn[is] = tmp_insn[is]; | |
aceb487a JM |
89 | flush_insns(p->ainsn.insn, |
90 | sizeof(p->ainsn.insn[0]) * MAX_INSN_SIZE); | |
24ba613c AS |
91 | break; |
92 | ||
93 | case INSN_GOOD_NO_SLOT: /* instruction doesn't need insn slot */ | |
94 | p->ainsn.insn = NULL; | |
95 | break; | |
96 | } | |
97 | ||
98 | return 0; | |
99 | } | |
100 | ||
aceb487a JM |
101 | #ifdef CONFIG_THUMB2_KERNEL |
102 | ||
103 | /* | |
104 | * For a 32-bit Thumb breakpoint spanning two memory words we need to take | |
105 | * special precautions to insert the breakpoint atomically, especially on SMP | |
106 | * systems. This is achieved by calling this arming function using stop_machine. | |
107 | */ | |
108 | static int __kprobes set_t32_breakpoint(void *addr) | |
109 | { | |
110 | ((u16 *)addr)[0] = KPROBE_THUMB32_BREAKPOINT_INSTRUCTION >> 16; | |
111 | ((u16 *)addr)[1] = KPROBE_THUMB32_BREAKPOINT_INSTRUCTION & 0xffff; | |
112 | flush_insns(addr, 2*sizeof(u16)); | |
113 | return 0; | |
114 | } | |
115 | ||
24ba613c AS |
116 | void __kprobes arch_arm_kprobe(struct kprobe *p) |
117 | { | |
aceb487a JM |
118 | uintptr_t addr = (uintptr_t)p->addr & ~1; /* Remove any Thumb flag */ |
119 | ||
120 | if (!is_wide_instruction(p->opcode)) { | |
121 | *(u16 *)addr = KPROBE_THUMB16_BREAKPOINT_INSTRUCTION; | |
122 | flush_insns(addr, sizeof(u16)); | |
123 | } else if (addr & 2) { | |
124 | /* A 32-bit instruction spanning two words needs special care */ | |
125 | stop_machine(set_t32_breakpoint, (void *)addr, &cpu_online_map); | |
126 | } else { | |
127 | /* Word aligned 32-bit instruction can be written atomically */ | |
128 | u32 bkp = KPROBE_THUMB32_BREAKPOINT_INSTRUCTION; | |
129 | #ifndef __ARMEB__ /* Swap halfwords for little-endian */ | |
130 | bkp = (bkp >> 16) | (bkp << 16); | |
131 | #endif | |
132 | *(u32 *)addr = bkp; | |
133 | flush_insns(addr, sizeof(u32)); | |
134 | } | |
24ba613c AS |
135 | } |
136 | ||
aceb487a JM |
137 | #else /* !CONFIG_THUMB2_KERNEL */ |
138 | ||
139 | void __kprobes arch_arm_kprobe(struct kprobe *p) | |
140 | { | |
3b269455 JM |
141 | kprobe_opcode_t insn = p->opcode; |
142 | kprobe_opcode_t brkp = KPROBE_ARM_BREAKPOINT_INSTRUCTION; | |
143 | if (insn >= 0xe0000000) | |
144 | brkp |= 0xe0000000; /* Unconditional instruction */ | |
145 | else | |
146 | brkp |= insn & 0xf0000000; /* Copy condition from insn */ | |
147 | *p->addr = brkp; | |
aceb487a JM |
148 | flush_insns(p->addr, sizeof(p->addr[0])); |
149 | } | |
150 | ||
151 | #endif /* !CONFIG_THUMB2_KERNEL */ | |
152 | ||
2003b7af FR |
153 | /* |
154 | * The actual disarming is done here on each CPU and synchronized using | |
155 | * stop_machine. This synchronization is necessary on SMP to avoid removing | |
156 | * a probe between the moment the 'Undefined Instruction' exception is raised | |
157 | * and the moment the exception handler reads the faulting instruction from | |
aceb487a JM |
158 | * memory. It is also needed to atomically set the two half-words of a 32-bit |
159 | * Thumb breakpoint. | |
2003b7af FR |
160 | */ |
161 | int __kprobes __arch_disarm_kprobe(void *p) | |
162 | { | |
163 | struct kprobe *kp = p; | |
aceb487a JM |
164 | #ifdef CONFIG_THUMB2_KERNEL |
165 | u16 *addr = (u16 *)((uintptr_t)kp->addr & ~1); | |
166 | kprobe_opcode_t insn = kp->opcode; | |
167 | unsigned int len; | |
168 | ||
169 | if (is_wide_instruction(insn)) { | |
170 | ((u16 *)addr)[0] = insn>>16; | |
171 | ((u16 *)addr)[1] = insn; | |
172 | len = 2*sizeof(u16); | |
173 | } else { | |
174 | ((u16 *)addr)[0] = insn; | |
175 | len = sizeof(u16); | |
176 | } | |
177 | flush_insns(addr, len); | |
178 | ||
179 | #else /* !CONFIG_THUMB2_KERNEL */ | |
2003b7af | 180 | *kp->addr = kp->opcode; |
aceb487a JM |
181 | flush_insns(kp->addr, sizeof(kp->addr[0])); |
182 | #endif | |
2003b7af FR |
183 | return 0; |
184 | } | |
185 | ||
24ba613c AS |
186 | void __kprobes arch_disarm_kprobe(struct kprobe *p) |
187 | { | |
2003b7af | 188 | stop_machine(__arch_disarm_kprobe, p, &cpu_online_map); |
24ba613c AS |
189 | } |
190 | ||
191 | void __kprobes arch_remove_kprobe(struct kprobe *p) | |
192 | { | |
193 | if (p->ainsn.insn) { | |
24ba613c | 194 | free_insn_slot(p->ainsn.insn, 0); |
24ba613c AS |
195 | p->ainsn.insn = NULL; |
196 | } | |
197 | } | |
198 | ||
199 | static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) | |
200 | { | |
201 | kcb->prev_kprobe.kp = kprobe_running(); | |
202 | kcb->prev_kprobe.status = kcb->kprobe_status; | |
203 | } | |
204 | ||
205 | static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) | |
206 | { | |
207 | __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp; | |
208 | kcb->kprobe_status = kcb->prev_kprobe.status; | |
209 | } | |
210 | ||
211 | static void __kprobes set_current_kprobe(struct kprobe *p) | |
212 | { | |
213 | __get_cpu_var(current_kprobe) = p; | |
214 | } | |
215 | ||
3cca6c24 JM |
216 | static void __kprobes |
217 | singlestep_skip(struct kprobe *p, struct pt_regs *regs) | |
218 | { | |
219 | #ifdef CONFIG_THUMB2_KERNEL | |
220 | regs->ARM_cpsr = it_advance(regs->ARM_cpsr); | |
221 | if (is_wide_instruction(p->opcode)) | |
222 | regs->ARM_pc += 4; | |
223 | else | |
224 | regs->ARM_pc += 2; | |
225 | #else | |
226 | regs->ARM_pc += 4; | |
227 | #endif | |
228 | } | |
229 | ||
24ba613c AS |
230 | static void __kprobes singlestep(struct kprobe *p, struct pt_regs *regs, |
231 | struct kprobe_ctlblk *kcb) | |
232 | { | |
233 | regs->ARM_pc += 4; | |
073090cb JM |
234 | if (p->ainsn.insn_check_cc(regs->ARM_cpsr)) |
235 | p->ainsn.insn_handler(p, regs); | |
24ba613c AS |
236 | } |
237 | ||
238 | /* | |
239 | * Called with IRQs disabled. IRQs must remain disabled from that point | |
240 | * all the way until processing this kprobe is complete. The current | |
241 | * kprobes implementation cannot process more than one nested level of | |
242 | * kprobe, and that level is reserved for user kprobe handlers, so we can't | |
243 | * risk encountering a new kprobe in an interrupt handler. | |
244 | */ | |
245 | void __kprobes kprobe_handler(struct pt_regs *regs) | |
246 | { | |
247 | struct kprobe *p, *cur; | |
248 | struct kprobe_ctlblk *kcb; | |
24ba613c AS |
249 | |
250 | kcb = get_kprobe_ctlblk(); | |
251 | cur = kprobe_running(); | |
aceb487a JM |
252 | |
253 | #ifdef CONFIG_THUMB2_KERNEL | |
254 | /* | |
255 | * First look for a probe which was registered using an address with | |
256 | * bit 0 set, this is the usual situation for pointers to Thumb code. | |
257 | * If not found, fallback to looking for one with bit 0 clear. | |
258 | */ | |
259 | p = get_kprobe((kprobe_opcode_t *)(regs->ARM_pc | 1)); | |
260 | if (!p) | |
261 | p = get_kprobe((kprobe_opcode_t *)regs->ARM_pc); | |
262 | ||
263 | #else /* ! CONFIG_THUMB2_KERNEL */ | |
264 | p = get_kprobe((kprobe_opcode_t *)regs->ARM_pc); | |
265 | #endif | |
24ba613c AS |
266 | |
267 | if (p) { | |
268 | if (cur) { | |
269 | /* Kprobe is pending, so we're recursing. */ | |
270 | switch (kcb->kprobe_status) { | |
271 | case KPROBE_HIT_ACTIVE: | |
272 | case KPROBE_HIT_SSDONE: | |
273 | /* A pre- or post-handler probe got us here. */ | |
274 | kprobes_inc_nmissed_count(p); | |
275 | save_previous_kprobe(kcb); | |
276 | set_current_kprobe(p); | |
277 | kcb->kprobe_status = KPROBE_REENTER; | |
278 | singlestep(p, regs, kcb); | |
279 | restore_previous_kprobe(kcb); | |
280 | break; | |
281 | default: | |
282 | /* impossible cases */ | |
283 | BUG(); | |
284 | } | |
3cca6c24 JM |
285 | } else if (p->ainsn.insn_check_cc(regs->ARM_cpsr)) { |
286 | /* Probe hit and conditional execution check ok. */ | |
24ba613c AS |
287 | set_current_kprobe(p); |
288 | kcb->kprobe_status = KPROBE_HIT_ACTIVE; | |
289 | ||
290 | /* | |
291 | * If we have no pre-handler or it returned 0, we | |
292 | * continue with normal processing. If we have a | |
293 | * pre-handler and it returned non-zero, it prepped | |
294 | * for calling the break_handler below on re-entry, | |
295 | * so get out doing nothing more here. | |
296 | */ | |
297 | if (!p->pre_handler || !p->pre_handler(p, regs)) { | |
298 | kcb->kprobe_status = KPROBE_HIT_SS; | |
299 | singlestep(p, regs, kcb); | |
300 | if (p->post_handler) { | |
301 | kcb->kprobe_status = KPROBE_HIT_SSDONE; | |
302 | p->post_handler(p, regs, 0); | |
303 | } | |
304 | reset_current_kprobe(); | |
305 | } | |
3cca6c24 JM |
306 | } else { |
307 | /* | |
308 | * Probe hit but conditional execution check failed, | |
309 | * so just skip the instruction and continue as if | |
310 | * nothing had happened. | |
311 | */ | |
312 | singlestep_skip(p, regs); | |
24ba613c AS |
313 | } |
314 | } else if (cur) { | |
315 | /* We probably hit a jprobe. Call its break handler. */ | |
316 | if (cur->break_handler && cur->break_handler(cur, regs)) { | |
317 | kcb->kprobe_status = KPROBE_HIT_SS; | |
318 | singlestep(cur, regs, kcb); | |
319 | if (cur->post_handler) { | |
320 | kcb->kprobe_status = KPROBE_HIT_SSDONE; | |
321 | cur->post_handler(cur, regs, 0); | |
322 | } | |
323 | } | |
324 | reset_current_kprobe(); | |
325 | } else { | |
326 | /* | |
327 | * The probe was removed and a race is in progress. | |
328 | * There is nothing we can do about it. Let's restart | |
329 | * the instruction. By the time we can restart, the | |
330 | * real instruction will be there. | |
331 | */ | |
332 | } | |
333 | } | |
334 | ||
3305a607 | 335 | static int __kprobes kprobe_trap_handler(struct pt_regs *regs, unsigned int instr) |
24ba613c | 336 | { |
3305a607 NP |
337 | unsigned long flags; |
338 | local_irq_save(flags); | |
24ba613c | 339 | kprobe_handler(regs); |
3305a607 | 340 | local_irq_restore(flags); |
24ba613c AS |
341 | return 0; |
342 | } | |
343 | ||
344 | int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr) | |
345 | { | |
346 | struct kprobe *cur = kprobe_running(); | |
347 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | |
348 | ||
349 | switch (kcb->kprobe_status) { | |
350 | case KPROBE_HIT_SS: | |
351 | case KPROBE_REENTER: | |
352 | /* | |
353 | * We are here because the instruction being single | |
354 | * stepped caused a page fault. We reset the current | |
355 | * kprobe and the PC to point back to the probe address | |
356 | * and allow the page fault handler to continue as a | |
357 | * normal page fault. | |
358 | */ | |
359 | regs->ARM_pc = (long)cur->addr; | |
360 | if (kcb->kprobe_status == KPROBE_REENTER) { | |
361 | restore_previous_kprobe(kcb); | |
362 | } else { | |
363 | reset_current_kprobe(); | |
364 | } | |
365 | break; | |
366 | ||
367 | case KPROBE_HIT_ACTIVE: | |
368 | case KPROBE_HIT_SSDONE: | |
369 | /* | |
370 | * We increment the nmissed count for accounting, | |
371 | * we can also use npre/npostfault count for accounting | |
372 | * these specific fault cases. | |
373 | */ | |
374 | kprobes_inc_nmissed_count(cur); | |
375 | ||
376 | /* | |
377 | * We come here because instructions in the pre/post | |
378 | * handler caused the page_fault, this could happen | |
379 | * if handler tries to access user space by | |
380 | * copy_from_user(), get_user() etc. Let the | |
381 | * user-specified handler try to fix it. | |
382 | */ | |
383 | if (cur->fault_handler && cur->fault_handler(cur, regs, fsr)) | |
384 | return 1; | |
385 | break; | |
386 | ||
387 | default: | |
388 | break; | |
389 | } | |
390 | ||
391 | return 0; | |
392 | } | |
393 | ||
394 | int __kprobes kprobe_exceptions_notify(struct notifier_block *self, | |
395 | unsigned long val, void *data) | |
396 | { | |
397 | /* | |
398 | * notify_die() is currently never called on ARM, | |
399 | * so this callback is currently empty. | |
400 | */ | |
401 | return NOTIFY_DONE; | |
402 | } | |
403 | ||
404 | /* | |
405 | * When a retprobed function returns, trampoline_handler() is called, | |
406 | * calling the kretprobe's handler. We construct a struct pt_regs to | |
407 | * give a view of registers r0-r11 to the user return-handler. This is | |
408 | * not a complete pt_regs structure, but that should be plenty sufficient | |
409 | * for kretprobe handlers which should normally be interested in r0 only | |
410 | * anyway. | |
411 | */ | |
e0773410 | 412 | void __naked __kprobes kretprobe_trampoline(void) |
24ba613c AS |
413 | { |
414 | __asm__ __volatile__ ( | |
415 | "stmdb sp!, {r0 - r11} \n\t" | |
416 | "mov r0, sp \n\t" | |
417 | "bl trampoline_handler \n\t" | |
418 | "mov lr, r0 \n\t" | |
419 | "ldmia sp!, {r0 - r11} \n\t" | |
de419840 JM |
420 | #ifdef CONFIG_THUMB2_KERNEL |
421 | "bx lr \n\t" | |
422 | #else | |
24ba613c | 423 | "mov pc, lr \n\t" |
de419840 | 424 | #endif |
24ba613c AS |
425 | : : : "memory"); |
426 | } | |
427 | ||
428 | /* Called from kretprobe_trampoline */ | |
429 | static __used __kprobes void *trampoline_handler(struct pt_regs *regs) | |
430 | { | |
431 | struct kretprobe_instance *ri = NULL; | |
432 | struct hlist_head *head, empty_rp; | |
433 | struct hlist_node *node, *tmp; | |
434 | unsigned long flags, orig_ret_address = 0; | |
435 | unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline; | |
436 | ||
437 | INIT_HLIST_HEAD(&empty_rp); | |
ef53d9c5 | 438 | kretprobe_hash_lock(current, &head, &flags); |
24ba613c AS |
439 | |
440 | /* | |
441 | * It is possible to have multiple instances associated with a given | |
442 | * task either because multiple functions in the call path have | |
443 | * a return probe installed on them, and/or more than one return | |
444 | * probe was registered for a target function. | |
445 | * | |
446 | * We can handle this because: | |
447 | * - instances are always inserted at the head of the list | |
448 | * - when multiple return probes are registered for the same | |
449 | * function, the first instance's ret_addr will point to the | |
450 | * real return address, and all the rest will point to | |
451 | * kretprobe_trampoline | |
452 | */ | |
453 | hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { | |
454 | if (ri->task != current) | |
455 | /* another task is sharing our hash bucket */ | |
456 | continue; | |
457 | ||
458 | if (ri->rp && ri->rp->handler) { | |
459 | __get_cpu_var(current_kprobe) = &ri->rp->kp; | |
460 | get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE; | |
461 | ri->rp->handler(ri, regs); | |
462 | __get_cpu_var(current_kprobe) = NULL; | |
463 | } | |
464 | ||
465 | orig_ret_address = (unsigned long)ri->ret_addr; | |
466 | recycle_rp_inst(ri, &empty_rp); | |
467 | ||
468 | if (orig_ret_address != trampoline_address) | |
469 | /* | |
470 | * This is the real return address. Any other | |
471 | * instances associated with this task are for | |
472 | * other calls deeper on the call stack | |
473 | */ | |
474 | break; | |
475 | } | |
476 | ||
477 | kretprobe_assert(ri, orig_ret_address, trampoline_address); | |
ef53d9c5 | 478 | kretprobe_hash_unlock(current, &flags); |
24ba613c AS |
479 | |
480 | hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { | |
481 | hlist_del(&ri->hlist); | |
482 | kfree(ri); | |
483 | } | |
484 | ||
485 | return (void *)orig_ret_address; | |
486 | } | |
487 | ||
24ba613c AS |
488 | void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, |
489 | struct pt_regs *regs) | |
490 | { | |
491 | ri->ret_addr = (kprobe_opcode_t *)regs->ARM_lr; | |
492 | ||
493 | /* Replace the return addr with trampoline addr. */ | |
494 | regs->ARM_lr = (unsigned long)&kretprobe_trampoline; | |
495 | } | |
496 | ||
497 | int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) | |
498 | { | |
499 | struct jprobe *jp = container_of(p, struct jprobe, kp); | |
500 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | |
501 | long sp_addr = regs->ARM_sp; | |
de419840 | 502 | long cpsr; |
24ba613c AS |
503 | |
504 | kcb->jprobe_saved_regs = *regs; | |
505 | memcpy(kcb->jprobes_stack, (void *)sp_addr, MIN_STACK_SIZE(sp_addr)); | |
506 | regs->ARM_pc = (long)jp->entry; | |
de419840 JM |
507 | |
508 | cpsr = regs->ARM_cpsr | PSR_I_BIT; | |
509 | #ifdef CONFIG_THUMB2_KERNEL | |
510 | /* Set correct Thumb state in cpsr */ | |
511 | if (regs->ARM_pc & 1) | |
512 | cpsr |= PSR_T_BIT; | |
513 | else | |
514 | cpsr &= ~PSR_T_BIT; | |
515 | #endif | |
516 | regs->ARM_cpsr = cpsr; | |
517 | ||
24ba613c AS |
518 | preempt_disable(); |
519 | return 1; | |
520 | } | |
521 | ||
522 | void __kprobes jprobe_return(void) | |
523 | { | |
524 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | |
525 | ||
526 | __asm__ __volatile__ ( | |
527 | /* | |
528 | * Setup an empty pt_regs. Fill SP and PC fields as | |
529 | * they're needed by longjmp_break_handler. | |
782a0fd1 MW |
530 | * |
531 | * We allocate some slack between the original SP and start of | |
532 | * our fabricated regs. To be precise we want to have worst case | |
533 | * covered which is STMFD with all 16 regs so we allocate 2 * | |
534 | * sizeof(struct_pt_regs)). | |
535 | * | |
536 | * This is to prevent any simulated instruction from writing | |
537 | * over the regs when they are accessing the stack. | |
24ba613c | 538 | */ |
de419840 JM |
539 | #ifdef CONFIG_THUMB2_KERNEL |
540 | "sub r0, %0, %1 \n\t" | |
541 | "mov sp, r0 \n\t" | |
542 | #else | |
24ba613c | 543 | "sub sp, %0, %1 \n\t" |
de419840 | 544 | #endif |
24ba613c AS |
545 | "ldr r0, ="__stringify(JPROBE_MAGIC_ADDR)"\n\t" |
546 | "str %0, [sp, %2] \n\t" | |
547 | "str r0, [sp, %3] \n\t" | |
548 | "mov r0, sp \n\t" | |
549 | "bl kprobe_handler \n\t" | |
550 | ||
551 | /* | |
552 | * Return to the context saved by setjmp_pre_handler | |
553 | * and restored by longjmp_break_handler. | |
554 | */ | |
de419840 JM |
555 | #ifdef CONFIG_THUMB2_KERNEL |
556 | "ldr lr, [sp, %2] \n\t" /* lr = saved sp */ | |
557 | "ldrd r0, r1, [sp, %5] \n\t" /* r0,r1 = saved lr,pc */ | |
558 | "ldr r2, [sp, %4] \n\t" /* r2 = saved psr */ | |
559 | "stmdb lr!, {r0, r1, r2} \n\t" /* push saved lr and */ | |
560 | /* rfe context */ | |
561 | "ldmia sp, {r0 - r12} \n\t" | |
562 | "mov sp, lr \n\t" | |
563 | "ldr lr, [sp], #4 \n\t" | |
564 | "rfeia sp! \n\t" | |
565 | #else | |
24ba613c AS |
566 | "ldr r0, [sp, %4] \n\t" |
567 | "msr cpsr_cxsf, r0 \n\t" | |
568 | "ldmia sp, {r0 - pc} \n\t" | |
de419840 | 569 | #endif |
24ba613c AS |
570 | : |
571 | : "r" (kcb->jprobe_saved_regs.ARM_sp), | |
782a0fd1 | 572 | "I" (sizeof(struct pt_regs) * 2), |
24ba613c AS |
573 | "J" (offsetof(struct pt_regs, ARM_sp)), |
574 | "J" (offsetof(struct pt_regs, ARM_pc)), | |
de419840 JM |
575 | "J" (offsetof(struct pt_regs, ARM_cpsr)), |
576 | "J" (offsetof(struct pt_regs, ARM_lr)) | |
24ba613c AS |
577 | : "memory", "cc"); |
578 | } | |
579 | ||
580 | int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) | |
581 | { | |
582 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | |
583 | long stack_addr = kcb->jprobe_saved_regs.ARM_sp; | |
584 | long orig_sp = regs->ARM_sp; | |
585 | struct jprobe *jp = container_of(p, struct jprobe, kp); | |
586 | ||
587 | if (regs->ARM_pc == JPROBE_MAGIC_ADDR) { | |
588 | if (orig_sp != stack_addr) { | |
589 | struct pt_regs *saved_regs = | |
590 | (struct pt_regs *)kcb->jprobe_saved_regs.ARM_sp; | |
591 | printk("current sp %lx does not match saved sp %lx\n", | |
592 | orig_sp, stack_addr); | |
593 | printk("Saved registers for jprobe %p\n", jp); | |
594 | show_regs(saved_regs); | |
595 | printk("Current registers\n"); | |
596 | show_regs(regs); | |
597 | BUG(); | |
598 | } | |
599 | *regs = kcb->jprobe_saved_regs; | |
600 | memcpy((void *)stack_addr, kcb->jprobes_stack, | |
601 | MIN_STACK_SIZE(stack_addr)); | |
602 | preempt_enable_no_resched(); | |
603 | return 1; | |
604 | } | |
605 | return 0; | |
606 | } | |
607 | ||
b24061fa NP |
608 | int __kprobes arch_trampoline_kprobe(struct kprobe *p) |
609 | { | |
610 | return 0; | |
611 | } | |
612 | ||
aceb487a JM |
613 | #ifdef CONFIG_THUMB2_KERNEL |
614 | ||
615 | static struct undef_hook kprobes_thumb16_break_hook = { | |
616 | .instr_mask = 0xffff, | |
617 | .instr_val = KPROBE_THUMB16_BREAKPOINT_INSTRUCTION, | |
618 | .cpsr_mask = MODE_MASK, | |
619 | .cpsr_val = SVC_MODE, | |
620 | .fn = kprobe_trap_handler, | |
621 | }; | |
622 | ||
623 | static struct undef_hook kprobes_thumb32_break_hook = { | |
624 | .instr_mask = 0xffffffff, | |
625 | .instr_val = KPROBE_THUMB32_BREAKPOINT_INSTRUCTION, | |
626 | .cpsr_mask = MODE_MASK, | |
627 | .cpsr_val = SVC_MODE, | |
628 | .fn = kprobe_trap_handler, | |
629 | }; | |
630 | ||
631 | #else /* !CONFIG_THUMB2_KERNEL */ | |
632 | ||
633 | static struct undef_hook kprobes_arm_break_hook = { | |
3b269455 | 634 | .instr_mask = 0x0fffffff, |
aceb487a | 635 | .instr_val = KPROBE_ARM_BREAKPOINT_INSTRUCTION, |
24ba613c AS |
636 | .cpsr_mask = MODE_MASK, |
637 | .cpsr_val = SVC_MODE, | |
638 | .fn = kprobe_trap_handler, | |
639 | }; | |
640 | ||
aceb487a JM |
641 | #endif /* !CONFIG_THUMB2_KERNEL */ |
642 | ||
24ba613c AS |
643 | int __init arch_init_kprobes() |
644 | { | |
645 | arm_kprobe_decode_init(); | |
aceb487a JM |
646 | #ifdef CONFIG_THUMB2_KERNEL |
647 | register_undef_hook(&kprobes_thumb16_break_hook); | |
648 | register_undef_hook(&kprobes_thumb32_break_hook); | |
649 | #else | |
650 | register_undef_hook(&kprobes_arm_break_hook); | |
651 | #endif | |
24ba613c AS |
652 | return 0; |
653 | } |