Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* arch/sparc64/kernel/kprobes.c |
2 | * | |
3 | * Copyright (C) 2004 David S. Miller <davem@davemloft.net> | |
4 | */ | |
5 | ||
6 | #include <linux/config.h> | |
7 | #include <linux/kernel.h> | |
8 | #include <linux/kprobes.h> | |
1da177e4 LT |
9 | #include <asm/kdebug.h> |
10 | #include <asm/signal.h> | |
11 | ||
12 | /* We do not have hardware single-stepping on sparc64. | |
13 | * So we implement software single-stepping with breakpoint | |
14 | * traps. The top-level scheme is similar to that used | |
15 | * in the x86 kprobes implementation. | |
16 | * | |
17 | * In the kprobe->ainsn.insn[] array we store the original | |
18 | * instruction at index zero and a break instruction at | |
19 | * index one. | |
20 | * | |
21 | * When we hit a kprobe we: | |
22 | * - Run the pre-handler | |
23 | * - Remember "regs->tnpc" and interrupt level stored in | |
24 | * "regs->tstate" so we can restore them later | |
25 | * - Disable PIL interrupts | |
26 | * - Set regs->tpc to point to kprobe->ainsn.insn[0] | |
27 | * - Set regs->tnpc to point to kprobe->ainsn.insn[1] | |
28 | * - Mark that we are actively in a kprobe | |
29 | * | |
30 | * At this point we wait for the second breakpoint at | |
31 | * kprobe->ainsn.insn[1] to hit. When it does we: | |
32 | * - Run the post-handler | |
33 | * - Set regs->tpc to "remembered" regs->tnpc stored above, | |
34 | * restore the PIL interrupt level in "regs->tstate" as well | |
35 | * - Make any adjustments necessary to regs->tnpc in order | |
36 | * to handle relative branches correctly. See below. | |
37 | * - Mark that we are no longer actively in a kprobe. | |
38 | */ | |
39 | ||
40 | int arch_prepare_kprobe(struct kprobe *p) | |
41 | { | |
42 | return 0; | |
43 | } | |
44 | ||
45 | void arch_copy_kprobe(struct kprobe *p) | |
46 | { | |
47 | p->ainsn.insn[0] = *p->addr; | |
48 | p->ainsn.insn[1] = BREAKPOINT_INSTRUCTION_2; | |
7e1048b1 RL |
49 | p->opcode = *p->addr; |
50 | } | |
51 | ||
52 | void arch_arm_kprobe(struct kprobe *p) | |
53 | { | |
54 | *p->addr = BREAKPOINT_INSTRUCTION; | |
55 | flushi(p->addr); | |
56 | } | |
57 | ||
58 | void arch_disarm_kprobe(struct kprobe *p) | |
59 | { | |
60 | *p->addr = p->opcode; | |
61 | flushi(p->addr); | |
1da177e4 LT |
62 | } |
63 | ||
64 | void arch_remove_kprobe(struct kprobe *p) | |
65 | { | |
66 | } | |
67 | ||
68 | /* kprobe_status settings */ | |
69 | #define KPROBE_HIT_ACTIVE 0x00000001 | |
70 | #define KPROBE_HIT_SS 0x00000002 | |
71 | ||
72 | static struct kprobe *current_kprobe; | |
73 | static unsigned long current_kprobe_orig_tnpc; | |
74 | static unsigned long current_kprobe_orig_tstate_pil; | |
75 | static unsigned int kprobe_status; | |
76 | ||
77 | static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs) | |
78 | { | |
79 | current_kprobe_orig_tnpc = regs->tnpc; | |
80 | current_kprobe_orig_tstate_pil = (regs->tstate & TSTATE_PIL); | |
81 | regs->tstate |= TSTATE_PIL; | |
82 | ||
83 | /*single step inline, if it a breakpoint instruction*/ | |
84 | if (p->opcode == BREAKPOINT_INSTRUCTION) { | |
85 | regs->tpc = (unsigned long) p->addr; | |
86 | regs->tnpc = current_kprobe_orig_tnpc; | |
87 | } else { | |
88 | regs->tpc = (unsigned long) &p->ainsn.insn[0]; | |
89 | regs->tnpc = (unsigned long) &p->ainsn.insn[1]; | |
90 | } | |
91 | } | |
92 | ||
1da177e4 LT |
93 | static int kprobe_handler(struct pt_regs *regs) |
94 | { | |
95 | struct kprobe *p; | |
96 | void *addr = (void *) regs->tpc; | |
97 | int ret = 0; | |
98 | ||
99 | preempt_disable(); | |
100 | ||
101 | if (kprobe_running()) { | |
102 | /* We *are* holding lock here, so this is safe. | |
103 | * Disarm the probe we just hit, and ignore it. | |
104 | */ | |
105 | p = get_kprobe(addr); | |
106 | if (p) { | |
107 | if (kprobe_status == KPROBE_HIT_SS) { | |
108 | regs->tstate = ((regs->tstate & ~TSTATE_PIL) | | |
109 | current_kprobe_orig_tstate_pil); | |
110 | unlock_kprobes(); | |
111 | goto no_kprobe; | |
112 | } | |
7e1048b1 RL |
113 | arch_disarm_kprobe(p); |
114 | regs->tpc = (unsigned long) p->addr; | |
115 | regs->tnpc = current_kprobe_orig_tnpc; | |
116 | regs->tstate = ((regs->tstate & ~TSTATE_PIL) | | |
117 | current_kprobe_orig_tstate_pil); | |
1da177e4 LT |
118 | ret = 1; |
119 | } else { | |
120 | p = current_kprobe; | |
121 | if (p->break_handler && p->break_handler(p, regs)) | |
122 | goto ss_probe; | |
123 | } | |
124 | /* If it's not ours, can't be delete race, (we hold lock). */ | |
125 | goto no_kprobe; | |
126 | } | |
127 | ||
128 | lock_kprobes(); | |
129 | p = get_kprobe(addr); | |
130 | if (!p) { | |
131 | unlock_kprobes(); | |
132 | if (*(u32 *)addr != BREAKPOINT_INSTRUCTION) { | |
133 | /* | |
134 | * The breakpoint instruction was removed right | |
135 | * after we hit it. Another cpu has removed | |
136 | * either a probepoint or a debugger breakpoint | |
137 | * at this address. In either case, no further | |
138 | * handling of this interrupt is appropriate. | |
139 | */ | |
140 | ret = 1; | |
141 | } | |
142 | /* Not one of ours: let kernel handle it */ | |
143 | goto no_kprobe; | |
144 | } | |
145 | ||
146 | kprobe_status = KPROBE_HIT_ACTIVE; | |
147 | current_kprobe = p; | |
148 | if (p->pre_handler && p->pre_handler(p, regs)) | |
149 | return 1; | |
150 | ||
151 | ss_probe: | |
152 | prepare_singlestep(p, regs); | |
153 | kprobe_status = KPROBE_HIT_SS; | |
154 | return 1; | |
155 | ||
156 | no_kprobe: | |
157 | preempt_enable_no_resched(); | |
158 | return ret; | |
159 | } | |
160 | ||
161 | /* If INSN is a relative control transfer instruction, | |
162 | * return the corrected branch destination value. | |
163 | * | |
164 | * The original INSN location was REAL_PC, it actually | |
165 | * executed at PC and produced destination address NPC. | |
166 | */ | |
167 | static unsigned long relbranch_fixup(u32 insn, unsigned long real_pc, | |
168 | unsigned long pc, unsigned long npc) | |
169 | { | |
170 | /* Branch not taken, no mods necessary. */ | |
171 | if (npc == pc + 0x4UL) | |
172 | return real_pc + 0x4UL; | |
173 | ||
174 | /* The three cases are call, branch w/prediction, | |
175 | * and traditional branch. | |
176 | */ | |
177 | if ((insn & 0xc0000000) == 0x40000000 || | |
178 | (insn & 0xc1c00000) == 0x00400000 || | |
179 | (insn & 0xc1c00000) == 0x00800000) { | |
180 | /* The instruction did all the work for us | |
181 | * already, just apply the offset to the correct | |
182 | * instruction location. | |
183 | */ | |
184 | return (real_pc + (npc - pc)); | |
185 | } | |
186 | ||
187 | return real_pc + 0x4UL; | |
188 | } | |
189 | ||
190 | /* If INSN is an instruction which writes it's PC location | |
191 | * into a destination register, fix that up. | |
192 | */ | |
193 | static void retpc_fixup(struct pt_regs *regs, u32 insn, unsigned long real_pc) | |
194 | { | |
195 | unsigned long *slot = NULL; | |
196 | ||
197 | /* Simplest cast is call, which always uses %o7 */ | |
198 | if ((insn & 0xc0000000) == 0x40000000) { | |
199 | slot = ®s->u_regs[UREG_I7]; | |
200 | } | |
201 | ||
202 | /* Jmpl encodes the register inside of the opcode */ | |
203 | if ((insn & 0xc1f80000) == 0x81c00000) { | |
204 | unsigned long rd = ((insn >> 25) & 0x1f); | |
205 | ||
206 | if (rd <= 15) { | |
207 | slot = ®s->u_regs[rd]; | |
208 | } else { | |
209 | /* Hard case, it goes onto the stack. */ | |
210 | flushw_all(); | |
211 | ||
212 | rd -= 16; | |
213 | slot = (unsigned long *) | |
214 | (regs->u_regs[UREG_FP] + STACK_BIAS); | |
215 | slot += rd; | |
216 | } | |
217 | } | |
218 | if (slot != NULL) | |
219 | *slot = real_pc; | |
220 | } | |
221 | ||
222 | /* | |
223 | * Called after single-stepping. p->addr is the address of the | |
224 | * instruction whose first byte has been replaced by the breakpoint | |
225 | * instruction. To avoid the SMP problems that can occur when we | |
226 | * temporarily put back the original opcode to single-step, we | |
227 | * single-stepped a copy of the instruction. The address of this | |
228 | * copy is p->ainsn.insn. | |
229 | * | |
230 | * This function prepares to return from the post-single-step | |
231 | * breakpoint trap. | |
232 | */ | |
233 | static void resume_execution(struct kprobe *p, struct pt_regs *regs) | |
234 | { | |
235 | u32 insn = p->ainsn.insn[0]; | |
236 | ||
237 | regs->tpc = current_kprobe_orig_tnpc; | |
238 | regs->tnpc = relbranch_fixup(insn, | |
239 | (unsigned long) p->addr, | |
240 | (unsigned long) &p->ainsn.insn[0], | |
241 | regs->tnpc); | |
242 | retpc_fixup(regs, insn, (unsigned long) p->addr); | |
243 | ||
244 | regs->tstate = ((regs->tstate & ~TSTATE_PIL) | | |
245 | current_kprobe_orig_tstate_pil); | |
246 | } | |
247 | ||
248 | static inline int post_kprobe_handler(struct pt_regs *regs) | |
249 | { | |
250 | if (!kprobe_running()) | |
251 | return 0; | |
252 | ||
253 | if (current_kprobe->post_handler) | |
254 | current_kprobe->post_handler(current_kprobe, regs, 0); | |
255 | ||
256 | resume_execution(current_kprobe, regs); | |
257 | ||
258 | unlock_kprobes(); | |
259 | preempt_enable_no_resched(); | |
260 | ||
261 | return 1; | |
262 | } | |
263 | ||
264 | /* Interrupts disabled, kprobe_lock held. */ | |
265 | static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr) | |
266 | { | |
267 | if (current_kprobe->fault_handler | |
268 | && current_kprobe->fault_handler(current_kprobe, regs, trapnr)) | |
269 | return 1; | |
270 | ||
271 | if (kprobe_status & KPROBE_HIT_SS) { | |
272 | resume_execution(current_kprobe, regs); | |
273 | ||
274 | unlock_kprobes(); | |
275 | preempt_enable_no_resched(); | |
276 | } | |
277 | return 0; | |
278 | } | |
279 | ||
280 | /* | |
281 | * Wrapper routine to for handling exceptions. | |
282 | */ | |
283 | int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, | |
284 | void *data) | |
285 | { | |
286 | struct die_args *args = (struct die_args *)data; | |
287 | switch (val) { | |
288 | case DIE_DEBUG: | |
289 | if (kprobe_handler(args->regs)) | |
290 | return NOTIFY_STOP; | |
291 | break; | |
292 | case DIE_DEBUG_2: | |
293 | if (post_kprobe_handler(args->regs)) | |
294 | return NOTIFY_STOP; | |
295 | break; | |
296 | case DIE_GPF: | |
297 | if (kprobe_running() && | |
298 | kprobe_fault_handler(args->regs, args->trapnr)) | |
299 | return NOTIFY_STOP; | |
300 | break; | |
301 | case DIE_PAGE_FAULT: | |
302 | if (kprobe_running() && | |
303 | kprobe_fault_handler(args->regs, args->trapnr)) | |
304 | return NOTIFY_STOP; | |
305 | break; | |
306 | default: | |
307 | break; | |
308 | } | |
309 | return NOTIFY_DONE; | |
310 | } | |
311 | ||
312 | asmlinkage void kprobe_trap(unsigned long trap_level, struct pt_regs *regs) | |
313 | { | |
314 | BUG_ON(trap_level != 0x170 && trap_level != 0x171); | |
315 | ||
316 | if (user_mode(regs)) { | |
317 | local_irq_enable(); | |
318 | bad_trap(regs, trap_level); | |
319 | return; | |
320 | } | |
321 | ||
322 | /* trap_level == 0x170 --> ta 0x70 | |
323 | * trap_level == 0x171 --> ta 0x71 | |
324 | */ | |
325 | if (notify_die((trap_level == 0x170) ? DIE_DEBUG : DIE_DEBUG_2, | |
326 | (trap_level == 0x170) ? "debug" : "debug_2", | |
327 | regs, 0, trap_level, SIGTRAP) != NOTIFY_STOP) | |
328 | bad_trap(regs, trap_level); | |
329 | } | |
330 | ||
331 | /* Jprobes support. */ | |
332 | static struct pt_regs jprobe_saved_regs; | |
333 | static struct pt_regs *jprobe_saved_regs_location; | |
334 | static struct sparc_stackf jprobe_saved_stack; | |
335 | ||
336 | int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) | |
337 | { | |
338 | struct jprobe *jp = container_of(p, struct jprobe, kp); | |
339 | ||
340 | jprobe_saved_regs_location = regs; | |
341 | memcpy(&jprobe_saved_regs, regs, sizeof(*regs)); | |
342 | ||
343 | /* Save a whole stack frame, this gets arguments | |
344 | * pushed onto the stack after using up all the | |
345 | * arg registers. | |
346 | */ | |
347 | memcpy(&jprobe_saved_stack, | |
348 | (char *) (regs->u_regs[UREG_FP] + STACK_BIAS), | |
349 | sizeof(jprobe_saved_stack)); | |
350 | ||
351 | regs->tpc = (unsigned long) jp->entry; | |
352 | regs->tnpc = ((unsigned long) jp->entry) + 0x4UL; | |
353 | regs->tstate |= TSTATE_PIL; | |
354 | ||
355 | return 1; | |
356 | } | |
357 | ||
358 | void jprobe_return(void) | |
359 | { | |
360 | preempt_enable_no_resched(); | |
361 | __asm__ __volatile__( | |
362 | ".globl jprobe_return_trap_instruction\n" | |
363 | "jprobe_return_trap_instruction:\n\t" | |
364 | "ta 0x70"); | |
365 | } | |
366 | ||
367 | extern void jprobe_return_trap_instruction(void); | |
368 | ||
369 | extern void __show_regs(struct pt_regs * regs); | |
370 | ||
371 | int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) | |
372 | { | |
373 | u32 *addr = (u32 *) regs->tpc; | |
374 | ||
375 | if (addr == (u32 *) jprobe_return_trap_instruction) { | |
376 | if (jprobe_saved_regs_location != regs) { | |
377 | printk("JPROBE: Current regs (%p) does not match " | |
378 | "saved regs (%p).\n", | |
379 | regs, jprobe_saved_regs_location); | |
380 | printk("JPROBE: Saved registers\n"); | |
381 | __show_regs(jprobe_saved_regs_location); | |
382 | printk("JPROBE: Current registers\n"); | |
383 | __show_regs(regs); | |
384 | BUG(); | |
385 | } | |
386 | /* Restore old register state. Do pt_regs | |
387 | * first so that UREG_FP is the original one for | |
388 | * the stack frame restore. | |
389 | */ | |
390 | memcpy(regs, &jprobe_saved_regs, sizeof(*regs)); | |
391 | ||
392 | memcpy((char *) (regs->u_regs[UREG_FP] + STACK_BIAS), | |
393 | &jprobe_saved_stack, | |
394 | sizeof(jprobe_saved_stack)); | |
395 | ||
396 | return 1; | |
397 | } | |
398 | return 0; | |
399 | } |