Commit | Line | Data |
---|---|---|
b920de1b DH |
1 | /* MN10300 Kernel probes implementation |
2 | * | |
3 | * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved. | |
4 | * Written by Mark Salter (msalter@redhat.com) | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public Licence as published by | |
8 | * the Free Software Foundation; either version 2 of the Licence, or | |
9 | * (at your option) any later version. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | * GNU General Public Licence for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU General Public Licence | |
17 | * along with this program; if not, write to the Free Software | |
18 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
19 | */ | |
20 | #include <linux/kprobes.h> | |
21 | #include <linux/ptrace.h> | |
22 | #include <linux/spinlock.h> | |
23 | #include <linux/preempt.h> | |
24 | #include <linux/kdebug.h> | |
25 | #include <asm/cacheflush.h> | |
26 | ||
27 | struct kretprobe_blackpoint kretprobe_blacklist[] = { { NULL, NULL } }; | |
28 | const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist); | |
29 | ||
30 | /* kprobe_status settings */ | |
31 | #define KPROBE_HIT_ACTIVE 0x00000001 | |
32 | #define KPROBE_HIT_SS 0x00000002 | |
33 | ||
390dfd95 TH |
34 | static struct kprobe *cur_kprobe; |
35 | static unsigned long cur_kprobe_orig_pc; | |
36 | static unsigned long cur_kprobe_next_pc; | |
37 | static int cur_kprobe_ss_flags; | |
b920de1b | 38 | static unsigned long kprobe_status; |
390dfd95 TH |
39 | static kprobe_opcode_t cur_kprobe_ss_buf[MAX_INSN_SIZE + 2]; |
40 | static unsigned long cur_kprobe_bp_addr; | |
b920de1b DH |
41 | |
42 | DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; | |
43 | ||
44 | ||
45 | /* singlestep flag bits */ | |
46 | #define SINGLESTEP_BRANCH 1 | |
47 | #define SINGLESTEP_PCREL 2 | |
48 | ||
49 | #define READ_BYTE(p, valp) \ | |
50 | do { *(u8 *)(valp) = *(u8 *)(p); } while (0) | |
51 | ||
52 | #define READ_WORD16(p, valp) \ | |
53 | do { \ | |
54 | READ_BYTE((p), (valp)); \ | |
55 | READ_BYTE((u8 *)(p) + 1, (u8 *)(valp) + 1); \ | |
56 | } while (0) | |
57 | ||
58 | #define READ_WORD32(p, valp) \ | |
59 | do { \ | |
60 | READ_BYTE((p), (valp)); \ | |
61 | READ_BYTE((u8 *)(p) + 1, (u8 *)(valp) + 1); \ | |
62 | READ_BYTE((u8 *)(p) + 2, (u8 *)(valp) + 2); \ | |
63 | READ_BYTE((u8 *)(p) + 3, (u8 *)(valp) + 3); \ | |
64 | } while (0) | |
65 | ||
66 | ||
67 | static const u8 mn10300_insn_sizes[256] = | |
68 | { | |
69 | /* 1 2 3 4 5 6 7 8 9 a b c d e f */ | |
70 | 1, 3, 3, 3, 1, 3, 3, 3, 1, 3, 3, 3, 1, 3, 3, 3, /* 0 */ | |
71 | 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 1 */ | |
72 | 2, 2, 2, 2, 3, 3, 3, 3, 2, 2, 2, 2, 3, 3, 3, 3, /* 2 */ | |
73 | 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 1, 1, 1, 1, /* 3 */ | |
74 | 1, 1, 2, 2, 1, 1, 2, 2, 1, 1, 2, 2, 1, 1, 2, 2, /* 4 */ | |
75 | 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, /* 5 */ | |
76 | 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 6 */ | |
77 | 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 7 */ | |
78 | 2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, /* 8 */ | |
79 | 2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, /* 9 */ | |
80 | 2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, /* a */ | |
81 | 2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, /* b */ | |
82 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 2, /* c */ | |
83 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* d */ | |
84 | 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* e */ | |
85 | 0, 2, 2, 2, 2, 2, 2, 4, 0, 3, 0, 4, 0, 6, 7, 1 /* f */ | |
86 | }; | |
87 | ||
88 | #define LT (1 << 0) | |
89 | #define GT (1 << 1) | |
90 | #define GE (1 << 2) | |
91 | #define LE (1 << 3) | |
92 | #define CS (1 << 4) | |
93 | #define HI (1 << 5) | |
94 | #define CC (1 << 6) | |
95 | #define LS (1 << 7) | |
96 | #define EQ (1 << 8) | |
97 | #define NE (1 << 9) | |
98 | #define RA (1 << 10) | |
99 | #define VC (1 << 11) | |
100 | #define VS (1 << 12) | |
101 | #define NC (1 << 13) | |
102 | #define NS (1 << 14) | |
103 | ||
104 | static const u16 cond_table[] = { | |
105 | /* V C N Z */ | |
106 | /* 0 0 0 0 */ (NE | NC | CC | VC | GE | GT | HI), | |
107 | /* 0 0 0 1 */ (EQ | NC | CC | VC | GE | LE | LS), | |
108 | /* 0 0 1 0 */ (NE | NS | CC | VC | LT | LE | HI), | |
109 | /* 0 0 1 1 */ (EQ | NS | CC | VC | LT | LE | LS), | |
110 | /* 0 1 0 0 */ (NE | NC | CS | VC | GE | GT | LS), | |
111 | /* 0 1 0 1 */ (EQ | NC | CS | VC | GE | LE | LS), | |
112 | /* 0 1 1 0 */ (NE | NS | CS | VC | LT | LE | LS), | |
113 | /* 0 1 1 1 */ (EQ | NS | CS | VC | LT | LE | LS), | |
114 | /* 1 0 0 0 */ (NE | NC | CC | VS | LT | LE | HI), | |
115 | /* 1 0 0 1 */ (EQ | NC | CC | VS | LT | LE | LS), | |
116 | /* 1 0 1 0 */ (NE | NS | CC | VS | GE | GT | HI), | |
117 | /* 1 0 1 1 */ (EQ | NS | CC | VS | GE | LE | LS), | |
118 | /* 1 1 0 0 */ (NE | NC | CS | VS | LT | LE | LS), | |
119 | /* 1 1 0 1 */ (EQ | NC | CS | VS | LT | LE | LS), | |
120 | /* 1 1 1 0 */ (NE | NS | CS | VS | GE | GT | LS), | |
121 | /* 1 1 1 1 */ (EQ | NS | CS | VS | GE | LE | LS), | |
122 | }; | |
123 | ||
124 | /* | |
125 | * Calculate what the PC will be after executing next instruction | |
126 | */ | |
127 | static unsigned find_nextpc(struct pt_regs *regs, int *flags) | |
128 | { | |
129 | unsigned size; | |
130 | s8 x8; | |
131 | s16 x16; | |
132 | s32 x32; | |
133 | u8 opc, *pc, *sp, *next; | |
134 | ||
135 | next = 0; | |
136 | *flags = SINGLESTEP_PCREL; | |
137 | ||
138 | pc = (u8 *) regs->pc; | |
139 | sp = (u8 *) (regs + 1); | |
140 | opc = *pc; | |
141 | ||
142 | size = mn10300_insn_sizes[opc]; | |
143 | if (size > 0) { | |
144 | next = pc + size; | |
145 | } else { | |
146 | switch (opc) { | |
147 | /* Bxx (d8,PC) */ | |
148 | case 0xc0 ... 0xca: | |
149 | x8 = 2; | |
150 | if (cond_table[regs->epsw & 0xf] & (1 << (opc & 0xf))) | |
151 | x8 = (s8)pc[1]; | |
152 | next = pc + x8; | |
153 | *flags |= SINGLESTEP_BRANCH; | |
154 | break; | |
155 | ||
156 | /* JMP (d16,PC) or CALL (d16,PC) */ | |
157 | case 0xcc: | |
158 | case 0xcd: | |
159 | READ_WORD16(pc + 1, &x16); | |
160 | next = pc + x16; | |
161 | *flags |= SINGLESTEP_BRANCH; | |
162 | break; | |
163 | ||
164 | /* JMP (d32,PC) or CALL (d32,PC) */ | |
165 | case 0xdc: | |
166 | case 0xdd: | |
167 | READ_WORD32(pc + 1, &x32); | |
168 | next = pc + x32; | |
169 | *flags |= SINGLESTEP_BRANCH; | |
170 | break; | |
171 | ||
172 | /* RETF */ | |
173 | case 0xde: | |
174 | next = (u8 *)regs->mdr; | |
175 | *flags &= ~SINGLESTEP_PCREL; | |
176 | *flags |= SINGLESTEP_BRANCH; | |
177 | break; | |
178 | ||
179 | /* RET */ | |
180 | case 0xdf: | |
181 | sp += pc[2]; | |
182 | READ_WORD32(sp, &x32); | |
183 | next = (u8 *)x32; | |
184 | *flags &= ~SINGLESTEP_PCREL; | |
185 | *flags |= SINGLESTEP_BRANCH; | |
186 | break; | |
187 | ||
188 | case 0xf0: | |
189 | next = pc + 2; | |
190 | opc = pc[1]; | |
191 | if (opc >= 0xf0 && opc <= 0xf7) { | |
192 | /* JMP (An) / CALLS (An) */ | |
193 | switch (opc & 3) { | |
194 | case 0: | |
195 | next = (u8 *)regs->a0; | |
196 | break; | |
197 | case 1: | |
198 | next = (u8 *)regs->a1; | |
199 | break; | |
200 | case 2: | |
201 | next = (u8 *)regs->a2; | |
202 | break; | |
203 | case 3: | |
204 | next = (u8 *)regs->a3; | |
205 | break; | |
206 | } | |
207 | *flags &= ~SINGLESTEP_PCREL; | |
208 | *flags |= SINGLESTEP_BRANCH; | |
209 | } else if (opc == 0xfc) { | |
210 | /* RETS */ | |
211 | READ_WORD32(sp, &x32); | |
212 | next = (u8 *)x32; | |
213 | *flags &= ~SINGLESTEP_PCREL; | |
214 | *flags |= SINGLESTEP_BRANCH; | |
215 | } else if (opc == 0xfd) { | |
216 | /* RTI */ | |
217 | READ_WORD32(sp + 4, &x32); | |
218 | next = (u8 *)x32; | |
219 | *flags &= ~SINGLESTEP_PCREL; | |
220 | *flags |= SINGLESTEP_BRANCH; | |
221 | } | |
222 | break; | |
223 | ||
224 | /* potential 3-byte conditional branches */ | |
225 | case 0xf8: | |
226 | next = pc + 3; | |
227 | opc = pc[1]; | |
228 | if (opc >= 0xe8 && opc <= 0xeb && | |
229 | (cond_table[regs->epsw & 0xf] & | |
230 | (1 << ((opc & 0xf) + 3))) | |
231 | ) { | |
232 | READ_BYTE(pc+2, &x8); | |
233 | next = pc + x8; | |
234 | *flags |= SINGLESTEP_BRANCH; | |
235 | } | |
236 | break; | |
237 | ||
238 | case 0xfa: | |
239 | if (pc[1] == 0xff) { | |
240 | /* CALLS (d16,PC) */ | |
241 | READ_WORD16(pc + 2, &x16); | |
242 | next = pc + x16; | |
243 | } else | |
244 | next = pc + 4; | |
245 | *flags |= SINGLESTEP_BRANCH; | |
246 | break; | |
247 | ||
248 | case 0xfc: | |
249 | x32 = 6; | |
250 | if (pc[1] == 0xff) { | |
251 | /* CALLS (d32,PC) */ | |
252 | READ_WORD32(pc + 2, &x32); | |
253 | } | |
254 | next = pc + x32; | |
255 | *flags |= SINGLESTEP_BRANCH; | |
256 | break; | |
257 | /* LXX (d8,PC) */ | |
258 | /* SETLB - loads the next four bytes into the LIR reg */ | |
259 | case 0xd0 ... 0xda: | |
260 | case 0xdb: | |
261 | panic("Can't singlestep Lxx/SETLB\n"); | |
262 | break; | |
263 | } | |
264 | } | |
265 | return (unsigned)next; | |
266 | ||
267 | } | |
268 | ||
269 | /* | |
270 | * set up out of place singlestep of some branching instructions | |
271 | */ | |
272 | static unsigned __kprobes singlestep_branch_setup(struct pt_regs *regs) | |
273 | { | |
274 | u8 opc, *pc, *sp, *next; | |
275 | ||
276 | next = NULL; | |
277 | pc = (u8 *) regs->pc; | |
278 | sp = (u8 *) (regs + 1); | |
279 | ||
280 | switch (pc[0]) { | |
281 | case 0xc0 ... 0xca: /* Bxx (d8,PC) */ | |
282 | case 0xcc: /* JMP (d16,PC) */ | |
283 | case 0xdc: /* JMP (d32,PC) */ | |
284 | case 0xf8: /* Bxx (d8,PC) 3-byte version */ | |
285 | /* don't really need to do anything except cause trap */ | |
286 | next = pc; | |
287 | break; | |
288 | ||
289 | case 0xcd: /* CALL (d16,PC) */ | |
290 | pc[1] = 5; | |
291 | pc[2] = 0; | |
292 | next = pc + 5; | |
293 | break; | |
294 | ||
295 | case 0xdd: /* CALL (d32,PC) */ | |
296 | pc[1] = 7; | |
297 | pc[2] = 0; | |
298 | pc[3] = 0; | |
299 | pc[4] = 0; | |
300 | next = pc + 7; | |
301 | break; | |
302 | ||
303 | case 0xde: /* RETF */ | |
304 | next = pc + 3; | |
305 | regs->mdr = (unsigned) next; | |
306 | break; | |
307 | ||
308 | case 0xdf: /* RET */ | |
309 | sp += pc[2]; | |
310 | next = pc + 3; | |
311 | *(unsigned *)sp = (unsigned) next; | |
312 | break; | |
313 | ||
314 | case 0xf0: | |
315 | next = pc + 2; | |
316 | opc = pc[1]; | |
317 | if (opc >= 0xf0 && opc <= 0xf3) { | |
318 | /* CALLS (An) */ | |
319 | /* use CALLS (d16,PC) to avoid mucking with An */ | |
320 | pc[0] = 0xfa; | |
321 | pc[1] = 0xff; | |
322 | pc[2] = 4; | |
323 | pc[3] = 0; | |
324 | next = pc + 4; | |
325 | } else if (opc >= 0xf4 && opc <= 0xf7) { | |
326 | /* JMP (An) */ | |
327 | next = pc; | |
328 | } else if (opc == 0xfc) { | |
329 | /* RETS */ | |
330 | next = pc + 2; | |
331 | *(unsigned *) sp = (unsigned) next; | |
332 | } else if (opc == 0xfd) { | |
333 | /* RTI */ | |
334 | next = pc + 2; | |
335 | *(unsigned *)(sp + 4) = (unsigned) next; | |
336 | } | |
337 | break; | |
338 | ||
339 | case 0xfa: /* CALLS (d16,PC) */ | |
340 | pc[2] = 4; | |
341 | pc[3] = 0; | |
342 | next = pc + 4; | |
343 | break; | |
344 | ||
345 | case 0xfc: /* CALLS (d32,PC) */ | |
346 | pc[2] = 6; | |
347 | pc[3] = 0; | |
348 | pc[4] = 0; | |
349 | pc[5] = 0; | |
350 | next = pc + 6; | |
351 | break; | |
352 | ||
353 | case 0xd0 ... 0xda: /* LXX (d8,PC) */ | |
354 | case 0xdb: /* SETLB */ | |
355 | panic("Can't singlestep Lxx/SETLB\n"); | |
356 | } | |
357 | ||
358 | return (unsigned) next; | |
359 | } | |
360 | ||
361 | int __kprobes arch_prepare_kprobe(struct kprobe *p) | |
362 | { | |
363 | return 0; | |
364 | } | |
365 | ||
366 | void __kprobes arch_copy_kprobe(struct kprobe *p) | |
367 | { | |
368 | memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE); | |
369 | } | |
370 | ||
371 | void __kprobes arch_arm_kprobe(struct kprobe *p) | |
372 | { | |
373 | *p->addr = BREAKPOINT_INSTRUCTION; | |
374 | flush_icache_range((unsigned long) p->addr, | |
375 | (unsigned long) p->addr + sizeof(kprobe_opcode_t)); | |
376 | } | |
377 | ||
378 | void __kprobes arch_disarm_kprobe(struct kprobe *p) | |
379 | { | |
380 | mn10300_dcache_flush(); | |
381 | mn10300_icache_inv(); | |
382 | } | |
383 | ||
384 | void arch_remove_kprobe(struct kprobe *p) | |
385 | { | |
386 | } | |
387 | ||
388 | static inline | |
389 | void __kprobes disarm_kprobe(struct kprobe *p, struct pt_regs *regs) | |
390 | { | |
391 | *p->addr = p->opcode; | |
392 | regs->pc = (unsigned long) p->addr; | |
393 | mn10300_dcache_flush(); | |
394 | mn10300_icache_inv(); | |
395 | } | |
396 | ||
397 | static inline | |
398 | void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs) | |
399 | { | |
400 | unsigned long nextpc; | |
401 | ||
390dfd95 TH |
402 | cur_kprobe_orig_pc = regs->pc; |
403 | memcpy(cur_kprobe_ss_buf, &p->ainsn.insn[0], MAX_INSN_SIZE); | |
404 | regs->pc = (unsigned long) cur_kprobe_ss_buf; | |
b920de1b | 405 | |
390dfd95 TH |
406 | nextpc = find_nextpc(regs, &cur_kprobe_ss_flags); |
407 | if (cur_kprobe_ss_flags & SINGLESTEP_PCREL) | |
408 | cur_kprobe_next_pc = cur_kprobe_orig_pc + (nextpc - regs->pc); | |
b920de1b | 409 | else |
390dfd95 | 410 | cur_kprobe_next_pc = nextpc; |
b920de1b DH |
411 | |
412 | /* branching instructions need special handling */ | |
390dfd95 | 413 | if (cur_kprobe_ss_flags & SINGLESTEP_BRANCH) |
b920de1b DH |
414 | nextpc = singlestep_branch_setup(regs); |
415 | ||
390dfd95 | 416 | cur_kprobe_bp_addr = nextpc; |
b920de1b DH |
417 | |
418 | *(u8 *) nextpc = BREAKPOINT_INSTRUCTION; | |
390dfd95 TH |
419 | mn10300_dcache_flush_range2((unsigned) cur_kprobe_ss_buf, |
420 | sizeof(cur_kprobe_ss_buf)); | |
b920de1b DH |
421 | mn10300_icache_inv(); |
422 | } | |
423 | ||
424 | static inline int __kprobes kprobe_handler(struct pt_regs *regs) | |
425 | { | |
426 | struct kprobe *p; | |
427 | int ret = 0; | |
428 | unsigned int *addr = (unsigned int *) regs->pc; | |
429 | ||
430 | /* We're in an interrupt, but this is clear and BUG()-safe. */ | |
431 | preempt_disable(); | |
432 | ||
433 | /* Check we're not actually recursing */ | |
434 | if (kprobe_running()) { | |
435 | /* We *are* holding lock here, so this is safe. | |
436 | Disarm the probe we just hit, and ignore it. */ | |
437 | p = get_kprobe(addr); | |
438 | if (p) { | |
439 | disarm_kprobe(p, regs); | |
440 | ret = 1; | |
441 | } else { | |
390dfd95 | 442 | p = cur_kprobe; |
b920de1b DH |
443 | if (p->break_handler && p->break_handler(p, regs)) |
444 | goto ss_probe; | |
445 | } | |
446 | /* If it's not ours, can't be delete race, (we hold lock). */ | |
447 | goto no_kprobe; | |
448 | } | |
449 | ||
450 | p = get_kprobe(addr); | |
451 | if (!p) { | |
452 | if (*addr != BREAKPOINT_INSTRUCTION) { | |
453 | /* The breakpoint instruction was removed right after | |
454 | * we hit it. Another cpu has removed either a | |
455 | * probepoint or a debugger breakpoint at this address. | |
456 | * In either case, no further handling of this | |
457 | * interrupt is appropriate. | |
458 | */ | |
459 | ret = 1; | |
460 | } | |
461 | /* Not one of ours: let kernel handle it */ | |
462 | goto no_kprobe; | |
463 | } | |
464 | ||
465 | kprobe_status = KPROBE_HIT_ACTIVE; | |
390dfd95 | 466 | cur_kprobe = p; |
b920de1b DH |
467 | if (p->pre_handler(p, regs)) { |
468 | /* handler has already set things up, so skip ss setup */ | |
469 | return 1; | |
470 | } | |
471 | ||
472 | ss_probe: | |
473 | prepare_singlestep(p, regs); | |
474 | kprobe_status = KPROBE_HIT_SS; | |
475 | return 1; | |
476 | ||
477 | no_kprobe: | |
478 | preempt_enable_no_resched(); | |
479 | return ret; | |
480 | } | |
481 | ||
482 | /* | |
483 | * Called after single-stepping. p->addr is the address of the | |
484 | * instruction whose first byte has been replaced by the "breakpoint" | |
485 | * instruction. To avoid the SMP problems that can occur when we | |
486 | * temporarily put back the original opcode to single-step, we | |
487 | * single-stepped a copy of the instruction. The address of this | |
488 | * copy is p->ainsn.insn. | |
489 | */ | |
490 | static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs) | |
491 | { | |
492 | /* we may need to fixup regs/stack after singlestepping a call insn */ | |
390dfd95 TH |
493 | if (cur_kprobe_ss_flags & SINGLESTEP_BRANCH) { |
494 | regs->pc = cur_kprobe_orig_pc; | |
b920de1b DH |
495 | switch (p->ainsn.insn[0]) { |
496 | case 0xcd: /* CALL (d16,PC) */ | |
497 | *(unsigned *) regs->sp = regs->mdr = regs->pc + 5; | |
498 | break; | |
499 | case 0xdd: /* CALL (d32,PC) */ | |
500 | /* fixup mdr and return address on stack */ | |
501 | *(unsigned *) regs->sp = regs->mdr = regs->pc + 7; | |
502 | break; | |
503 | case 0xf0: | |
504 | if (p->ainsn.insn[1] >= 0xf0 && | |
505 | p->ainsn.insn[1] <= 0xf3) { | |
506 | /* CALLS (An) */ | |
507 | /* fixup MDR and return address on stack */ | |
508 | regs->mdr = regs->pc + 2; | |
509 | *(unsigned *) regs->sp = regs->mdr; | |
510 | } | |
511 | break; | |
512 | ||
513 | case 0xfa: /* CALLS (d16,PC) */ | |
514 | /* fixup MDR and return address on stack */ | |
515 | *(unsigned *) regs->sp = regs->mdr = regs->pc + 4; | |
516 | break; | |
517 | ||
518 | case 0xfc: /* CALLS (d32,PC) */ | |
519 | /* fixup MDR and return address on stack */ | |
520 | *(unsigned *) regs->sp = regs->mdr = regs->pc + 6; | |
521 | break; | |
522 | } | |
523 | } | |
524 | ||
390dfd95 TH |
525 | regs->pc = cur_kprobe_next_pc; |
526 | cur_kprobe_bp_addr = 0; | |
b920de1b DH |
527 | } |
528 | ||
529 | static inline int __kprobes post_kprobe_handler(struct pt_regs *regs) | |
530 | { | |
531 | if (!kprobe_running()) | |
532 | return 0; | |
533 | ||
390dfd95 TH |
534 | if (cur_kprobe->post_handler) |
535 | cur_kprobe->post_handler(cur_kprobe, regs, 0); | |
b920de1b | 536 | |
390dfd95 | 537 | resume_execution(cur_kprobe, regs); |
b920de1b DH |
538 | reset_current_kprobe(); |
539 | preempt_enable_no_resched(); | |
540 | return 1; | |
541 | } | |
542 | ||
543 | /* Interrupts disabled, kprobe_lock held. */ | |
544 | static inline | |
545 | int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) | |
546 | { | |
390dfd95 TH |
547 | if (cur_kprobe->fault_handler && |
548 | cur_kprobe->fault_handler(cur_kprobe, regs, trapnr)) | |
b920de1b DH |
549 | return 1; |
550 | ||
551 | if (kprobe_status & KPROBE_HIT_SS) { | |
390dfd95 | 552 | resume_execution(cur_kprobe, regs); |
b920de1b DH |
553 | reset_current_kprobe(); |
554 | preempt_enable_no_resched(); | |
555 | } | |
556 | return 0; | |
557 | } | |
558 | ||
559 | /* | |
560 | * Wrapper routine to for handling exceptions. | |
561 | */ | |
562 | int __kprobes kprobe_exceptions_notify(struct notifier_block *self, | |
563 | unsigned long val, void *data) | |
564 | { | |
565 | struct die_args *args = data; | |
566 | ||
567 | switch (val) { | |
568 | case DIE_BREAKPOINT: | |
390dfd95 | 569 | if (cur_kprobe_bp_addr != args->regs->pc) { |
b920de1b DH |
570 | if (kprobe_handler(args->regs)) |
571 | return NOTIFY_STOP; | |
572 | } else { | |
573 | if (post_kprobe_handler(args->regs)) | |
574 | return NOTIFY_STOP; | |
575 | } | |
576 | break; | |
577 | case DIE_GPF: | |
578 | if (kprobe_running() && | |
579 | kprobe_fault_handler(args->regs, args->trapnr)) | |
580 | return NOTIFY_STOP; | |
581 | break; | |
582 | default: | |
583 | break; | |
584 | } | |
585 | return NOTIFY_DONE; | |
586 | } | |
587 | ||
588 | /* Jprobes support. */ | |
589 | static struct pt_regs jprobe_saved_regs; | |
590 | static struct pt_regs *jprobe_saved_regs_location; | |
591 | static kprobe_opcode_t jprobe_saved_stack[MAX_STACK_SIZE]; | |
592 | ||
593 | int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) | |
594 | { | |
595 | struct jprobe *jp = container_of(p, struct jprobe, kp); | |
596 | ||
597 | jprobe_saved_regs_location = regs; | |
598 | memcpy(&jprobe_saved_regs, regs, sizeof(struct pt_regs)); | |
599 | ||
600 | /* Save a whole stack frame, this gets arguments | |
601 | * pushed onto the stack after using up all the | |
602 | * arg registers. | |
603 | */ | |
604 | memcpy(&jprobe_saved_stack, regs + 1, sizeof(jprobe_saved_stack)); | |
605 | ||
606 | /* setup return addr to the jprobe handler routine */ | |
607 | regs->pc = (unsigned long) jp->entry; | |
608 | return 1; | |
609 | } | |
610 | ||
611 | void __kprobes jprobe_return(void) | |
612 | { | |
613 | void *orig_sp = jprobe_saved_regs_location + 1; | |
614 | ||
615 | preempt_enable_no_resched(); | |
616 | asm volatile(" mov %0,sp\n" | |
617 | ".globl jprobe_return_bp_addr\n" | |
618 | "jprobe_return_bp_addr:\n\t" | |
619 | " .byte 0xff\n" | |
620 | : : "d" (orig_sp)); | |
621 | } | |
622 | ||
623 | extern void jprobe_return_bp_addr(void); | |
624 | ||
625 | int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) | |
626 | { | |
627 | u8 *addr = (u8 *) regs->pc; | |
628 | ||
629 | if (addr == (u8 *) jprobe_return_bp_addr) { | |
630 | if (jprobe_saved_regs_location != regs) { | |
631 | printk(KERN_ERR"JPROBE:" | |
632 | " Current regs (%p) does not match saved regs" | |
633 | " (%p).\n", | |
634 | regs, jprobe_saved_regs_location); | |
635 | BUG(); | |
636 | } | |
637 | ||
638 | /* Restore old register state. | |
639 | */ | |
640 | memcpy(regs, &jprobe_saved_regs, sizeof(struct pt_regs)); | |
641 | ||
642 | memcpy(regs + 1, &jprobe_saved_stack, | |
643 | sizeof(jprobe_saved_stack)); | |
644 | return 1; | |
645 | } | |
646 | return 0; | |
647 | } | |
648 | ||
649 | int __init arch_init_kprobes(void) | |
650 | { | |
651 | return 0; | |
652 | } |