Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Kernel Probes (KProbes) | |
1da177e4 LT |
3 | * |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License as published by | |
6 | * the Free Software Foundation; either version 2 of the License, or | |
7 | * (at your option) any later version. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program; if not, write to the Free Software | |
16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
17 | * | |
18 | * Copyright (C) IBM Corporation, 2002, 2004 | |
19 | * | |
20 | * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel | |
21 | * Probes initial implementation ( includes contributions from | |
22 | * Rusty Russell). | |
23 | * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes | |
24 | * interface to access function arguments. | |
d6be29b8 MH |
25 | * 2004-Oct Jim Keniston <jkenisto@us.ibm.com> and Prasanna S Panchamukhi |
26 | * <prasanna@in.ibm.com> adapted for x86_64 from i386. | |
1da177e4 LT |
27 | * 2005-Mar Roland McGrath <roland@redhat.com> |
28 | * Fixed to handle %rip-relative addressing mode correctly. | |
d6be29b8 MH |
29 | * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston |
30 | * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi | |
31 | * <prasanna@in.ibm.com> added function-return probes. | |
32 | * 2005-May Rusty Lynch <rusty.lynch@intel.com> | |
33 | * Added function return probes functionality | |
34 | * 2006-Feb Masami Hiramatsu <hiramatu@sdl.hitachi.co.jp> added | |
35 | * kprobe-booster and kretprobe-booster for i386. | |
da07ab03 MH |
36 | * 2007-Dec Masami Hiramatsu <mhiramat@redhat.com> added kprobe-booster |
37 | * and kretprobe-booster for x86-64 | |
d6be29b8 MH |
38 | * 2007-Dec Masami Hiramatsu <mhiramat@redhat.com>, Arjan van de Ven |
39 | * <arjan@infradead.org> and Jim Keniston <jkenisto@us.ibm.com> | |
40 | * unified x86 kprobes code. | |
1da177e4 LT |
41 | */ |
42 | ||
1da177e4 LT |
43 | #include <linux/kprobes.h> |
44 | #include <linux/ptrace.h> | |
1da177e4 LT |
45 | #include <linux/string.h> |
46 | #include <linux/slab.h> | |
b506a9d0 | 47 | #include <linux/hardirq.h> |
1da177e4 | 48 | #include <linux/preempt.h> |
c28f8966 | 49 | #include <linux/module.h> |
1eeb66a1 | 50 | #include <linux/kdebug.h> |
b46b3d70 | 51 | #include <linux/kallsyms.h> |
c0f7ac3a | 52 | #include <linux/ftrace.h> |
9ec4b1f3 | 53 | |
8533bbe9 MH |
54 | #include <asm/cacheflush.h> |
55 | #include <asm/desc.h> | |
1da177e4 | 56 | #include <asm/pgtable.h> |
c28f8966 | 57 | #include <asm/uaccess.h> |
19d36ccd | 58 | #include <asm/alternative.h> |
b46b3d70 | 59 | #include <asm/insn.h> |
62edab90 | 60 | #include <asm/debugreg.h> |
1da177e4 | 61 | |
1da177e4 LT |
62 | void jprobe_return_end(void); |
63 | ||
e7a510f9 AM |
64 | DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; |
65 | DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); | |
1da177e4 | 66 | |
98272ed0 | 67 | #define stack_addr(regs) ((unsigned long *)kernel_stack_pointer(regs)) |
8533bbe9 MH |
68 | |
69 | #define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)\ | |
70 | (((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) | \ | |
71 | (b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) | \ | |
72 | (b8##UL << 0x8)|(b9##UL << 0x9)|(ba##UL << 0xa)|(bb##UL << 0xb) | \ | |
73 | (bc##UL << 0xc)|(bd##UL << 0xd)|(be##UL << 0xe)|(bf##UL << 0xf)) \ | |
74 | << (row % 32)) | |
75 | /* | |
76 | * Undefined/reserved opcodes, conditional jump, Opcode Extension | |
77 | * Groups, and some special opcodes can not boost. | |
7115e3fc LT |
78 | * This is non-const and volatile to keep gcc from statically |
79 | * optimizing it out, as variable_test_bit makes gcc think only | |
80 | * *(unsigned long*) is used. | |
8533bbe9 | 81 | */ |
7115e3fc | 82 | static volatile u32 twobyte_is_boostable[256 / 32] = { |
8533bbe9 MH |
83 | /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ |
84 | /* ---------------------------------------------- */ | |
85 | W(0x00, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0) | /* 00 */ | |
86 | W(0x10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 10 */ | |
87 | W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 20 */ | |
88 | W(0x30, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 30 */ | |
89 | W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */ | |
90 | W(0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 50 */ | |
91 | W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1) | /* 60 */ | |
92 | W(0x70, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1) , /* 70 */ | |
93 | W(0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 80 */ | |
94 | W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */ | |
95 | W(0xa0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) | /* a0 */ | |
96 | W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1) , /* b0 */ | |
97 | W(0xc0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* c0 */ | |
98 | W(0xd0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) , /* d0 */ | |
99 | W(0xe0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) | /* e0 */ | |
100 | W(0xf0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0) /* f0 */ | |
101 | /* ----------------------------------------------- */ | |
102 | /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ | |
103 | }; | |
8533bbe9 MH |
104 | #undef W |
105 | ||
f438d914 MH |
106 | struct kretprobe_blackpoint kretprobe_blacklist[] = { |
107 | {"__switch_to", }, /* This function switches only current task, but | |
108 | doesn't switch kernel stack.*/ | |
109 | {NULL, NULL} /* Terminator */ | |
110 | }; | |
111 | const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist); | |
112 | ||
c0f7ac3a | 113 | static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op) |
aa470140 | 114 | { |
c0f7ac3a MH |
115 | struct __arch_relative_insn { |
116 | u8 op; | |
aa470140 | 117 | s32 raddr; |
c0f7ac3a MH |
118 | } __attribute__((packed)) *insn; |
119 | ||
120 | insn = (struct __arch_relative_insn *)from; | |
121 | insn->raddr = (s32)((long)(to) - ((long)(from) + 5)); | |
122 | insn->op = op; | |
123 | } | |
124 | ||
125 | /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/ | |
126 | static void __kprobes synthesize_reljump(void *from, void *to) | |
127 | { | |
128 | __synthesize_relative_insn(from, to, RELATIVEJUMP_OPCODE); | |
aa470140 MH |
129 | } |
130 | ||
9930927f | 131 | /* |
567a9fd8 | 132 | * Skip the prefixes of the instruction. |
9930927f | 133 | */ |
567a9fd8 | 134 | static kprobe_opcode_t *__kprobes skip_prefixes(kprobe_opcode_t *insn) |
9930927f | 135 | { |
567a9fd8 MH |
136 | insn_attr_t attr; |
137 | ||
138 | attr = inat_get_opcode_attribute((insn_byte_t)*insn); | |
139 | while (inat_is_legacy_prefix(attr)) { | |
140 | insn++; | |
141 | attr = inat_get_opcode_attribute((insn_byte_t)*insn); | |
142 | } | |
9930927f | 143 | #ifdef CONFIG_X86_64 |
567a9fd8 MH |
144 | if (inat_is_rex_prefix(attr)) |
145 | insn++; | |
9930927f | 146 | #endif |
567a9fd8 | 147 | return insn; |
9930927f HH |
148 | } |
149 | ||
aa470140 | 150 | /* |
d6be29b8 MH |
151 | * Returns non-zero if opcode is boostable. |
152 | * RIP relative instructions are adjusted at copying time in 64 bits mode | |
aa470140 | 153 | */ |
e7b5e11e | 154 | static int __kprobes can_boost(kprobe_opcode_t *opcodes) |
aa470140 | 155 | { |
aa470140 MH |
156 | kprobe_opcode_t opcode; |
157 | kprobe_opcode_t *orig_opcodes = opcodes; | |
158 | ||
cde5edbd | 159 | if (search_exception_tables((unsigned long)opcodes)) |
30390880 MH |
160 | return 0; /* Page fault may occur on this address. */ |
161 | ||
aa470140 MH |
162 | retry: |
163 | if (opcodes - orig_opcodes > MAX_INSN_SIZE - 1) | |
164 | return 0; | |
165 | opcode = *(opcodes++); | |
166 | ||
167 | /* 2nd-byte opcode */ | |
168 | if (opcode == 0x0f) { | |
169 | if (opcodes - orig_opcodes > MAX_INSN_SIZE - 1) | |
170 | return 0; | |
8533bbe9 MH |
171 | return test_bit(*opcodes, |
172 | (unsigned long *)twobyte_is_boostable); | |
aa470140 MH |
173 | } |
174 | ||
175 | switch (opcode & 0xf0) { | |
d6be29b8 | 176 | #ifdef CONFIG_X86_64 |
aa470140 MH |
177 | case 0x40: |
178 | goto retry; /* REX prefix is boostable */ | |
d6be29b8 | 179 | #endif |
aa470140 MH |
180 | case 0x60: |
181 | if (0x63 < opcode && opcode < 0x67) | |
182 | goto retry; /* prefixes */ | |
183 | /* can't boost Address-size override and bound */ | |
184 | return (opcode != 0x62 && opcode != 0x67); | |
185 | case 0x70: | |
186 | return 0; /* can't boost conditional jump */ | |
187 | case 0xc0: | |
188 | /* can't boost software-interruptions */ | |
189 | return (0xc1 < opcode && opcode < 0xcc) || opcode == 0xcf; | |
190 | case 0xd0: | |
191 | /* can boost AA* and XLAT */ | |
192 | return (opcode == 0xd4 || opcode == 0xd5 || opcode == 0xd7); | |
193 | case 0xe0: | |
194 | /* can boost in/out and absolute jmps */ | |
195 | return ((opcode & 0x04) || opcode == 0xea); | |
196 | case 0xf0: | |
197 | if ((opcode & 0x0c) == 0 && opcode != 0xf1) | |
198 | goto retry; /* lock/rep(ne) prefix */ | |
199 | /* clear and set flags are boostable */ | |
200 | return (opcode == 0xf5 || (0xf7 < opcode && opcode < 0xfe)); | |
201 | default: | |
202 | /* segment override prefixes are boostable */ | |
203 | if (opcode == 0x26 || opcode == 0x36 || opcode == 0x3e) | |
204 | goto retry; /* prefixes */ | |
205 | /* CS override prefix and call are not boostable */ | |
206 | return (opcode != 0x2e && opcode != 0x9a); | |
207 | } | |
208 | } | |
209 | ||
86b4ce31 MH |
210 | static unsigned long __recover_probed_insn(kprobe_opcode_t *buf, |
211 | unsigned long addr) | |
b46b3d70 MH |
212 | { |
213 | struct kprobe *kp; | |
86b4ce31 | 214 | |
b46b3d70 | 215 | kp = get_kprobe((void *)addr); |
86b4ce31 | 216 | /* There is no probe, return original address */ |
b46b3d70 | 217 | if (!kp) |
86b4ce31 | 218 | return addr; |
b46b3d70 MH |
219 | |
220 | /* | |
221 | * Basically, kp->ainsn.insn has an original instruction. | |
222 | * However, RIP-relative instruction can not do single-stepping | |
c0f7ac3a | 223 | * at different place, __copy_instruction() tweaks the displacement of |
b46b3d70 MH |
224 | * that instruction. In that case, we can't recover the instruction |
225 | * from the kp->ainsn.insn. | |
226 | * | |
227 | * On the other hand, kp->opcode has a copy of the first byte of | |
228 | * the probed instruction, which is overwritten by int3. And | |
229 | * the instruction at kp->addr is not modified by kprobes except | |
230 | * for the first byte, we can recover the original instruction | |
231 | * from it and kp->opcode. | |
232 | */ | |
233 | memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); | |
234 | buf[0] = kp->opcode; | |
86b4ce31 MH |
235 | return (unsigned long)buf; |
236 | } | |
237 | ||
238 | #ifdef CONFIG_OPTPROBES | |
239 | static unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, | |
240 | unsigned long addr) | |
241 | { | |
242 | struct optimized_kprobe *op; | |
243 | struct kprobe *kp; | |
244 | long offs; | |
245 | int i; | |
246 | ||
247 | for (i = 0; i < RELATIVEJUMP_SIZE; i++) { | |
248 | kp = get_kprobe((void *)addr - i); | |
249 | /* This function only handles jump-optimized kprobe */ | |
250 | if (kp && kprobe_optimized(kp)) { | |
251 | op = container_of(kp, struct optimized_kprobe, kp); | |
252 | /* If op->list is not empty, op is under optimizing */ | |
253 | if (list_empty(&op->list)) | |
254 | goto found; | |
255 | } | |
256 | } | |
257 | ||
258 | return addr; | |
259 | found: | |
260 | /* | |
261 | * If the kprobe can be optimized, original bytes which can be | |
262 | * overwritten by jump destination address. In this case, original | |
263 | * bytes must be recovered from op->optinsn.copied_insn buffer. | |
264 | */ | |
265 | memcpy(buf, (void *)addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); | |
266 | if (addr == (unsigned long)kp->addr) { | |
267 | buf[0] = kp->opcode; | |
268 | memcpy(buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE); | |
269 | } else { | |
270 | offs = addr - (unsigned long)kp->addr - 1; | |
271 | memcpy(buf, op->optinsn.copied_insn + offs, RELATIVE_ADDR_SIZE - offs); | |
272 | } | |
273 | ||
274 | return (unsigned long)buf; | |
275 | } | |
276 | #else | |
277 | static inline unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, | |
278 | unsigned long addr) | |
279 | { | |
280 | return addr; | |
281 | } | |
282 | #endif | |
283 | ||
284 | /* | |
285 | * Recover the probed instruction at addr for further analysis. | |
286 | * Caller must lock kprobes by kprobe_mutex, or disable preemption | |
287 | * for preventing to release referencing kprobes. | |
288 | */ | |
289 | static unsigned long recover_probed_instruction(kprobe_opcode_t *buf, | |
290 | unsigned long addr) | |
291 | { | |
292 | unsigned long __addr; | |
293 | ||
294 | __addr = __recover_optprobed_insn(buf, addr); | |
295 | if (__addr != addr) | |
296 | return __addr; | |
297 | ||
298 | return __recover_probed_insn(buf, addr); | |
b46b3d70 MH |
299 | } |
300 | ||
b46b3d70 MH |
301 | /* Check if paddr is at an instruction boundary */ |
302 | static int __kprobes can_probe(unsigned long paddr) | |
303 | { | |
86b4ce31 | 304 | unsigned long addr, __addr, offset = 0; |
b46b3d70 MH |
305 | struct insn insn; |
306 | kprobe_opcode_t buf[MAX_INSN_SIZE]; | |
307 | ||
6abded71 | 308 | if (!kallsyms_lookup_size_offset(paddr, NULL, &offset)) |
b46b3d70 MH |
309 | return 0; |
310 | ||
311 | /* Decode instructions */ | |
312 | addr = paddr - offset; | |
313 | while (addr < paddr) { | |
b46b3d70 MH |
314 | /* |
315 | * Check if the instruction has been modified by another | |
316 | * kprobe, in which case we replace the breakpoint by the | |
317 | * original instruction in our buffer. | |
86b4ce31 MH |
318 | * Also, jump optimization will change the breakpoint to |
319 | * relative-jump. Since the relative-jump itself is | |
320 | * normally used, we just go through if there is no kprobe. | |
b46b3d70 | 321 | */ |
86b4ce31 MH |
322 | __addr = recover_probed_instruction(buf, addr); |
323 | kernel_insn_init(&insn, (void *)__addr); | |
b46b3d70 | 324 | insn_get_length(&insn); |
86b4ce31 MH |
325 | |
326 | /* | |
327 | * Another debugging subsystem might insert this breakpoint. | |
328 | * In that case, we can't recover it. | |
329 | */ | |
330 | if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION) | |
331 | return 0; | |
b46b3d70 MH |
332 | addr += insn.length; |
333 | } | |
334 | ||
335 | return (addr == paddr); | |
336 | } | |
337 | ||
1da177e4 | 338 | /* |
d6be29b8 | 339 | * Returns non-zero if opcode modifies the interrupt flag. |
1da177e4 | 340 | */ |
8645419c | 341 | static int __kprobes is_IF_modifier(kprobe_opcode_t *insn) |
1da177e4 | 342 | { |
567a9fd8 MH |
343 | /* Skip prefixes */ |
344 | insn = skip_prefixes(insn); | |
345 | ||
1da177e4 LT |
346 | switch (*insn) { |
347 | case 0xfa: /* cli */ | |
348 | case 0xfb: /* sti */ | |
349 | case 0xcf: /* iret/iretd */ | |
350 | case 0x9d: /* popf/popfd */ | |
351 | return 1; | |
352 | } | |
9930927f | 353 | |
1da177e4 LT |
354 | return 0; |
355 | } | |
356 | ||
357 | /* | |
c0f7ac3a MH |
358 | * Copy an instruction and adjust the displacement if the instruction |
359 | * uses the %rip-relative addressing mode. | |
aa470140 | 360 | * If it does, Return the address of the 32-bit displacement word. |
1da177e4 | 361 | * If not, return null. |
31f80e45 | 362 | * Only applicable to 64-bit x86. |
1da177e4 | 363 | */ |
c0f7ac3a | 364 | static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover) |
1da177e4 | 365 | { |
89ae465b | 366 | struct insn insn; |
c0f7ac3a | 367 | kprobe_opcode_t buf[MAX_INSN_SIZE]; |
86b4ce31 MH |
368 | u8 *orig_src = src; /* Back up original src for RIP calculation */ |
369 | ||
370 | if (recover) | |
371 | src = (u8 *)recover_probed_instruction(buf, (unsigned long)src); | |
1da177e4 | 372 | |
c0f7ac3a | 373 | kernel_insn_init(&insn, src); |
c0f7ac3a | 374 | insn_get_length(&insn); |
86b4ce31 MH |
375 | /* Another subsystem puts a breakpoint, failed to recover */ |
376 | if (recover && insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION) | |
377 | return 0; | |
c0f7ac3a MH |
378 | memcpy(dest, insn.kaddr, insn.length); |
379 | ||
380 | #ifdef CONFIG_X86_64 | |
89ae465b MH |
381 | if (insn_rip_relative(&insn)) { |
382 | s64 newdisp; | |
383 | u8 *disp; | |
c0f7ac3a | 384 | kernel_insn_init(&insn, dest); |
89ae465b MH |
385 | insn_get_displacement(&insn); |
386 | /* | |
387 | * The copied instruction uses the %rip-relative addressing | |
388 | * mode. Adjust the displacement for the difference between | |
389 | * the original location of this instruction and the location | |
390 | * of the copy that will actually be run. The tricky bit here | |
391 | * is making sure that the sign extension happens correctly in | |
392 | * this calculation, since we need a signed 32-bit result to | |
393 | * be sign-extended to 64 bits when it's added to the %rip | |
394 | * value and yield the same 64-bit result that the sign- | |
395 | * extension of the original signed 32-bit displacement would | |
396 | * have given. | |
397 | */ | |
86b4ce31 | 398 | newdisp = (u8 *) orig_src + (s64) insn.displacement.value - (u8 *) dest; |
89ae465b | 399 | BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */ |
c0f7ac3a | 400 | disp = (u8 *) dest + insn_offset_displacement(&insn); |
89ae465b | 401 | *(s32 *) disp = (s32) newdisp; |
1da177e4 | 402 | } |
d6be29b8 | 403 | #endif |
c0f7ac3a | 404 | return insn.length; |
31f80e45 | 405 | } |
1da177e4 | 406 | |
f709b122 | 407 | static void __kprobes arch_copy_kprobe(struct kprobe *p) |
1da177e4 | 408 | { |
c0f7ac3a MH |
409 | /* |
410 | * Copy an instruction without recovering int3, because it will be | |
411 | * put by another subsystem. | |
412 | */ | |
413 | __copy_instruction(p->ainsn.insn, p->addr, 0); | |
31f80e45 | 414 | |
8533bbe9 | 415 | if (can_boost(p->addr)) |
aa470140 | 416 | p->ainsn.boostable = 0; |
8533bbe9 | 417 | else |
aa470140 | 418 | p->ainsn.boostable = -1; |
8533bbe9 | 419 | |
7e1048b1 | 420 | p->opcode = *p->addr; |
1da177e4 LT |
421 | } |
422 | ||
8533bbe9 MH |
423 | int __kprobes arch_prepare_kprobe(struct kprobe *p) |
424 | { | |
4554dbcb MH |
425 | if (alternatives_text_reserved(p->addr, p->addr)) |
426 | return -EINVAL; | |
427 | ||
b46b3d70 MH |
428 | if (!can_probe((unsigned long)p->addr)) |
429 | return -EILSEQ; | |
8533bbe9 MH |
430 | /* insn: must be on special executable page on x86. */ |
431 | p->ainsn.insn = get_insn_slot(); | |
432 | if (!p->ainsn.insn) | |
433 | return -ENOMEM; | |
434 | arch_copy_kprobe(p); | |
435 | return 0; | |
436 | } | |
437 | ||
0f2fbdcb | 438 | void __kprobes arch_arm_kprobe(struct kprobe *p) |
1da177e4 | 439 | { |
19d36ccd | 440 | text_poke(p->addr, ((unsigned char []){BREAKPOINT_INSTRUCTION}), 1); |
1da177e4 LT |
441 | } |
442 | ||
0f2fbdcb | 443 | void __kprobes arch_disarm_kprobe(struct kprobe *p) |
1da177e4 | 444 | { |
19d36ccd | 445 | text_poke(p->addr, &p->opcode, 1); |
7e1048b1 RL |
446 | } |
447 | ||
0498b635 | 448 | void __kprobes arch_remove_kprobe(struct kprobe *p) |
7e1048b1 | 449 | { |
12941560 MH |
450 | if (p->ainsn.insn) { |
451 | free_insn_slot(p->ainsn.insn, (p->ainsn.boostable == 1)); | |
452 | p->ainsn.insn = NULL; | |
453 | } | |
1da177e4 LT |
454 | } |
455 | ||
3b60211c | 456 | static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) |
aa3d7e3d | 457 | { |
e7a510f9 AM |
458 | kcb->prev_kprobe.kp = kprobe_running(); |
459 | kcb->prev_kprobe.status = kcb->kprobe_status; | |
8533bbe9 MH |
460 | kcb->prev_kprobe.old_flags = kcb->kprobe_old_flags; |
461 | kcb->prev_kprobe.saved_flags = kcb->kprobe_saved_flags; | |
aa3d7e3d PP |
462 | } |
463 | ||
3b60211c | 464 | static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) |
aa3d7e3d | 465 | { |
b76834bc | 466 | __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); |
e7a510f9 | 467 | kcb->kprobe_status = kcb->prev_kprobe.status; |
8533bbe9 MH |
468 | kcb->kprobe_old_flags = kcb->prev_kprobe.old_flags; |
469 | kcb->kprobe_saved_flags = kcb->prev_kprobe.saved_flags; | |
aa3d7e3d PP |
470 | } |
471 | ||
3b60211c | 472 | static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs, |
e7a510f9 | 473 | struct kprobe_ctlblk *kcb) |
aa3d7e3d | 474 | { |
b76834bc | 475 | __this_cpu_write(current_kprobe, p); |
8533bbe9 | 476 | kcb->kprobe_saved_flags = kcb->kprobe_old_flags |
053de044 | 477 | = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF)); |
aa3d7e3d | 478 | if (is_IF_modifier(p->ainsn.insn)) |
053de044 | 479 | kcb->kprobe_saved_flags &= ~X86_EFLAGS_IF; |
aa3d7e3d PP |
480 | } |
481 | ||
e7b5e11e | 482 | static void __kprobes clear_btf(void) |
1ecc798c | 483 | { |
ea8e61b7 PZ |
484 | if (test_thread_flag(TIF_BLOCKSTEP)) { |
485 | unsigned long debugctl = get_debugctlmsr(); | |
486 | ||
487 | debugctl &= ~DEBUGCTLMSR_BTF; | |
488 | update_debugctlmsr(debugctl); | |
489 | } | |
1ecc798c RM |
490 | } |
491 | ||
e7b5e11e | 492 | static void __kprobes restore_btf(void) |
1ecc798c | 493 | { |
ea8e61b7 PZ |
494 | if (test_thread_flag(TIF_BLOCKSTEP)) { |
495 | unsigned long debugctl = get_debugctlmsr(); | |
496 | ||
497 | debugctl |= DEBUGCTLMSR_BTF; | |
498 | update_debugctlmsr(debugctl); | |
499 | } | |
1ecc798c RM |
500 | } |
501 | ||
4c4308cb | 502 | void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, |
0f2fbdcb | 503 | struct pt_regs *regs) |
73649dab | 504 | { |
8533bbe9 | 505 | unsigned long *sara = stack_addr(regs); |
ba8af12f | 506 | |
4c4308cb | 507 | ri->ret_addr = (kprobe_opcode_t *) *sara; |
8533bbe9 | 508 | |
4c4308cb CH |
509 | /* Replace the return addr with trampoline addr */ |
510 | *sara = (unsigned long) &kretprobe_trampoline; | |
73649dab | 511 | } |
f315decb | 512 | |
c0f7ac3a MH |
513 | #ifdef CONFIG_OPTPROBES |
514 | static int __kprobes setup_detour_execution(struct kprobe *p, | |
515 | struct pt_regs *regs, | |
516 | int reenter); | |
517 | #else | |
518 | #define setup_detour_execution(p, regs, reenter) (0) | |
519 | #endif | |
520 | ||
f315decb | 521 | static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs, |
0f94eb63 | 522 | struct kprobe_ctlblk *kcb, int reenter) |
f315decb | 523 | { |
c0f7ac3a MH |
524 | if (setup_detour_execution(p, regs, reenter)) |
525 | return; | |
526 | ||
615d0ebb | 527 | #if !defined(CONFIG_PREEMPT) |
f315decb AS |
528 | if (p->ainsn.boostable == 1 && !p->post_handler) { |
529 | /* Boost up -- we can execute copied instructions directly */ | |
0f94eb63 MH |
530 | if (!reenter) |
531 | reset_current_kprobe(); | |
532 | /* | |
533 | * Reentering boosted probe doesn't reset current_kprobe, | |
534 | * nor set current_kprobe, because it doesn't use single | |
535 | * stepping. | |
536 | */ | |
f315decb AS |
537 | regs->ip = (unsigned long)p->ainsn.insn; |
538 | preempt_enable_no_resched(); | |
539 | return; | |
540 | } | |
541 | #endif | |
0f94eb63 MH |
542 | if (reenter) { |
543 | save_previous_kprobe(kcb); | |
544 | set_current_kprobe(p, regs, kcb); | |
545 | kcb->kprobe_status = KPROBE_REENTER; | |
546 | } else | |
547 | kcb->kprobe_status = KPROBE_HIT_SS; | |
548 | /* Prepare real single stepping */ | |
549 | clear_btf(); | |
550 | regs->flags |= X86_EFLAGS_TF; | |
551 | regs->flags &= ~X86_EFLAGS_IF; | |
552 | /* single step inline if the instruction is an int3 */ | |
553 | if (p->opcode == BREAKPOINT_INSTRUCTION) | |
554 | regs->ip = (unsigned long)p->addr; | |
555 | else | |
556 | regs->ip = (unsigned long)p->ainsn.insn; | |
f315decb AS |
557 | } |
558 | ||
40102d4a HH |
559 | /* |
560 | * We have reentered the kprobe_handler(), since another probe was hit while | |
561 | * within the handler. We save the original kprobes variables and just single | |
562 | * step on the instruction of the new probe without calling any user handlers. | |
563 | */ | |
59e87cdc MH |
564 | static int __kprobes reenter_kprobe(struct kprobe *p, struct pt_regs *regs, |
565 | struct kprobe_ctlblk *kcb) | |
40102d4a | 566 | { |
f315decb AS |
567 | switch (kcb->kprobe_status) { |
568 | case KPROBE_HIT_SSDONE: | |
f315decb | 569 | case KPROBE_HIT_ACTIVE: |
fb8830e7 | 570 | kprobes_inc_nmissed_count(p); |
0f94eb63 | 571 | setup_singlestep(p, regs, kcb, 1); |
f315decb AS |
572 | break; |
573 | case KPROBE_HIT_SS: | |
e9afe9e1 MH |
574 | /* A probe has been hit in the codepath leading up to, or just |
575 | * after, single-stepping of a probed instruction. This entire | |
576 | * codepath should strictly reside in .kprobes.text section. | |
577 | * Raise a BUG or we'll continue in an endless reentering loop | |
578 | * and eventually a stack overflow. | |
579 | */ | |
580 | printk(KERN_WARNING "Unrecoverable kprobe detected at %p.\n", | |
581 | p->addr); | |
582 | dump_kprobe(p); | |
583 | BUG(); | |
f315decb AS |
584 | default: |
585 | /* impossible cases */ | |
586 | WARN_ON(1); | |
fb8830e7 | 587 | return 0; |
59e87cdc | 588 | } |
f315decb | 589 | |
59e87cdc | 590 | return 1; |
40102d4a | 591 | } |
73649dab | 592 | |
8533bbe9 MH |
593 | /* |
594 | * Interrupts are disabled on entry as trap3 is an interrupt gate and they | |
af901ca1 | 595 | * remain disabled throughout this function. |
8533bbe9 MH |
596 | */ |
597 | static int __kprobes kprobe_handler(struct pt_regs *regs) | |
1da177e4 | 598 | { |
8533bbe9 | 599 | kprobe_opcode_t *addr; |
f315decb | 600 | struct kprobe *p; |
d217d545 AM |
601 | struct kprobe_ctlblk *kcb; |
602 | ||
8533bbe9 | 603 | addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t)); |
d217d545 AM |
604 | /* |
605 | * We don't want to be preempted for the entire | |
f315decb AS |
606 | * duration of kprobe processing. We conditionally |
607 | * re-enable preemption at the end of this function, | |
608 | * and also in reenter_kprobe() and setup_singlestep(). | |
d217d545 AM |
609 | */ |
610 | preempt_disable(); | |
1da177e4 | 611 | |
f315decb | 612 | kcb = get_kprobe_ctlblk(); |
b9760156 | 613 | p = get_kprobe(addr); |
f315decb | 614 | |
b9760156 | 615 | if (p) { |
b9760156 | 616 | if (kprobe_running()) { |
f315decb AS |
617 | if (reenter_kprobe(p, regs, kcb)) |
618 | return 1; | |
1da177e4 | 619 | } else { |
b9760156 HH |
620 | set_current_kprobe(p, regs, kcb); |
621 | kcb->kprobe_status = KPROBE_HIT_ACTIVE; | |
f315decb | 622 | |
1da177e4 | 623 | /* |
f315decb AS |
624 | * If we have no pre-handler or it returned 0, we |
625 | * continue with normal processing. If we have a | |
626 | * pre-handler and it returned non-zero, it prepped | |
627 | * for calling the break_handler below on re-entry | |
628 | * for jprobe processing, so get out doing nothing | |
629 | * more here. | |
1da177e4 | 630 | */ |
f315decb | 631 | if (!p->pre_handler || !p->pre_handler(p, regs)) |
0f94eb63 | 632 | setup_singlestep(p, regs, kcb, 0); |
f315decb | 633 | return 1; |
b9760156 | 634 | } |
829e9245 MH |
635 | } else if (*addr != BREAKPOINT_INSTRUCTION) { |
636 | /* | |
637 | * The breakpoint instruction was removed right | |
638 | * after we hit it. Another cpu has removed | |
639 | * either a probepoint or a debugger breakpoint | |
640 | * at this address. In either case, no further | |
641 | * handling of this interrupt is appropriate. | |
642 | * Back up over the (now missing) int3 and run | |
643 | * the original instruction. | |
644 | */ | |
645 | regs->ip = (unsigned long)addr; | |
646 | preempt_enable_no_resched(); | |
647 | return 1; | |
f315decb | 648 | } else if (kprobe_running()) { |
b76834bc | 649 | p = __this_cpu_read(current_kprobe); |
f315decb | 650 | if (p->break_handler && p->break_handler(p, regs)) { |
0f94eb63 | 651 | setup_singlestep(p, regs, kcb, 0); |
f315decb | 652 | return 1; |
1da177e4 | 653 | } |
f315decb | 654 | } /* else: not a kprobe fault; let the kernel handle it */ |
1da177e4 | 655 | |
d217d545 | 656 | preempt_enable_no_resched(); |
f315decb | 657 | return 0; |
1da177e4 LT |
658 | } |
659 | ||
f007ea26 MH |
660 | #ifdef CONFIG_X86_64 |
661 | #define SAVE_REGS_STRING \ | |
662 | /* Skip cs, ip, orig_ax. */ \ | |
663 | " subq $24, %rsp\n" \ | |
664 | " pushq %rdi\n" \ | |
665 | " pushq %rsi\n" \ | |
666 | " pushq %rdx\n" \ | |
667 | " pushq %rcx\n" \ | |
668 | " pushq %rax\n" \ | |
669 | " pushq %r8\n" \ | |
670 | " pushq %r9\n" \ | |
671 | " pushq %r10\n" \ | |
672 | " pushq %r11\n" \ | |
673 | " pushq %rbx\n" \ | |
674 | " pushq %rbp\n" \ | |
675 | " pushq %r12\n" \ | |
676 | " pushq %r13\n" \ | |
677 | " pushq %r14\n" \ | |
678 | " pushq %r15\n" | |
679 | #define RESTORE_REGS_STRING \ | |
680 | " popq %r15\n" \ | |
681 | " popq %r14\n" \ | |
682 | " popq %r13\n" \ | |
683 | " popq %r12\n" \ | |
684 | " popq %rbp\n" \ | |
685 | " popq %rbx\n" \ | |
686 | " popq %r11\n" \ | |
687 | " popq %r10\n" \ | |
688 | " popq %r9\n" \ | |
689 | " popq %r8\n" \ | |
690 | " popq %rax\n" \ | |
691 | " popq %rcx\n" \ | |
692 | " popq %rdx\n" \ | |
693 | " popq %rsi\n" \ | |
694 | " popq %rdi\n" \ | |
695 | /* Skip orig_ax, ip, cs */ \ | |
696 | " addq $24, %rsp\n" | |
697 | #else | |
698 | #define SAVE_REGS_STRING \ | |
699 | /* Skip cs, ip, orig_ax and gs. */ \ | |
700 | " subl $16, %esp\n" \ | |
701 | " pushl %fs\n" \ | |
f007ea26 | 702 | " pushl %es\n" \ |
a1974798 | 703 | " pushl %ds\n" \ |
f007ea26 MH |
704 | " pushl %eax\n" \ |
705 | " pushl %ebp\n" \ | |
706 | " pushl %edi\n" \ | |
707 | " pushl %esi\n" \ | |
708 | " pushl %edx\n" \ | |
709 | " pushl %ecx\n" \ | |
710 | " pushl %ebx\n" | |
711 | #define RESTORE_REGS_STRING \ | |
712 | " popl %ebx\n" \ | |
713 | " popl %ecx\n" \ | |
714 | " popl %edx\n" \ | |
715 | " popl %esi\n" \ | |
716 | " popl %edi\n" \ | |
717 | " popl %ebp\n" \ | |
718 | " popl %eax\n" \ | |
719 | /* Skip ds, es, fs, gs, orig_ax, and ip. Note: don't pop cs here*/\ | |
720 | " addl $24, %esp\n" | |
721 | #endif | |
722 | ||
73649dab | 723 | /* |
da07ab03 MH |
724 | * When a retprobed function returns, this code saves registers and |
725 | * calls trampoline_handler() runs, which calls the kretprobe's handler. | |
73649dab | 726 | */ |
f1452d42 | 727 | static void __used __kprobes kretprobe_trampoline_holder(void) |
1017579a | 728 | { |
d6be29b8 MH |
729 | asm volatile ( |
730 | ".global kretprobe_trampoline\n" | |
da07ab03 | 731 | "kretprobe_trampoline: \n" |
d6be29b8 | 732 | #ifdef CONFIG_X86_64 |
da07ab03 MH |
733 | /* We don't bother saving the ss register */ |
734 | " pushq %rsp\n" | |
735 | " pushfq\n" | |
f007ea26 | 736 | SAVE_REGS_STRING |
da07ab03 MH |
737 | " movq %rsp, %rdi\n" |
738 | " call trampoline_handler\n" | |
739 | /* Replace saved sp with true return address. */ | |
740 | " movq %rax, 152(%rsp)\n" | |
f007ea26 | 741 | RESTORE_REGS_STRING |
da07ab03 | 742 | " popfq\n" |
d6be29b8 MH |
743 | #else |
744 | " pushf\n" | |
f007ea26 | 745 | SAVE_REGS_STRING |
d6be29b8 MH |
746 | " movl %esp, %eax\n" |
747 | " call trampoline_handler\n" | |
748 | /* Move flags to cs */ | |
fee039a1 MH |
749 | " movl 56(%esp), %edx\n" |
750 | " movl %edx, 52(%esp)\n" | |
d6be29b8 | 751 | /* Replace saved flags with true return address. */ |
fee039a1 | 752 | " movl %eax, 56(%esp)\n" |
f007ea26 | 753 | RESTORE_REGS_STRING |
d6be29b8 MH |
754 | " popf\n" |
755 | #endif | |
da07ab03 | 756 | " ret\n"); |
1017579a | 757 | } |
73649dab RL |
758 | |
759 | /* | |
da07ab03 | 760 | * Called from kretprobe_trampoline |
73649dab | 761 | */ |
f1452d42 | 762 | static __used __kprobes void *trampoline_handler(struct pt_regs *regs) |
73649dab | 763 | { |
62c27be0 | 764 | struct kretprobe_instance *ri = NULL; |
99219a3f | 765 | struct hlist_head *head, empty_rp; |
62c27be0 | 766 | struct hlist_node *node, *tmp; |
991a51d8 | 767 | unsigned long flags, orig_ret_address = 0; |
d6be29b8 | 768 | unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline; |
737480a0 | 769 | kprobe_opcode_t *correct_ret_addr = NULL; |
73649dab | 770 | |
99219a3f | 771 | INIT_HLIST_HEAD(&empty_rp); |
ef53d9c5 | 772 | kretprobe_hash_lock(current, &head, &flags); |
8533bbe9 | 773 | /* fixup registers */ |
d6be29b8 | 774 | #ifdef CONFIG_X86_64 |
da07ab03 | 775 | regs->cs = __KERNEL_CS; |
d6be29b8 MH |
776 | #else |
777 | regs->cs = __KERNEL_CS | get_kernel_rpl(); | |
fee039a1 | 778 | regs->gs = 0; |
d6be29b8 | 779 | #endif |
da07ab03 | 780 | regs->ip = trampoline_address; |
8533bbe9 | 781 | regs->orig_ax = ~0UL; |
73649dab | 782 | |
ba8af12f RL |
783 | /* |
784 | * It is possible to have multiple instances associated with a given | |
8533bbe9 | 785 | * task either because multiple functions in the call path have |
025dfdaf | 786 | * return probes installed on them, and/or more than one |
ba8af12f RL |
787 | * return probe was registered for a target function. |
788 | * | |
789 | * We can handle this because: | |
8533bbe9 | 790 | * - instances are always pushed into the head of the list |
ba8af12f | 791 | * - when multiple return probes are registered for the same |
8533bbe9 MH |
792 | * function, the (chronologically) first instance's ret_addr |
793 | * will be the real return address, and all the rest will | |
794 | * point to kretprobe_trampoline. | |
ba8af12f RL |
795 | */ |
796 | hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { | |
62c27be0 | 797 | if (ri->task != current) |
ba8af12f | 798 | /* another task is sharing our hash bucket */ |
62c27be0 | 799 | continue; |
ba8af12f | 800 | |
737480a0 KS |
801 | orig_ret_address = (unsigned long)ri->ret_addr; |
802 | ||
803 | if (orig_ret_address != trampoline_address) | |
804 | /* | |
805 | * This is the real return address. Any other | |
806 | * instances associated with this task are for | |
807 | * other calls deeper on the call stack | |
808 | */ | |
809 | break; | |
810 | } | |
811 | ||
812 | kretprobe_assert(ri, orig_ret_address, trampoline_address); | |
813 | ||
814 | correct_ret_addr = ri->ret_addr; | |
815 | hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { | |
816 | if (ri->task != current) | |
817 | /* another task is sharing our hash bucket */ | |
818 | continue; | |
819 | ||
820 | orig_ret_address = (unsigned long)ri->ret_addr; | |
da07ab03 | 821 | if (ri->rp && ri->rp->handler) { |
b76834bc | 822 | __this_cpu_write(current_kprobe, &ri->rp->kp); |
da07ab03 | 823 | get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE; |
737480a0 | 824 | ri->ret_addr = correct_ret_addr; |
ba8af12f | 825 | ri->rp->handler(ri, regs); |
b76834bc | 826 | __this_cpu_write(current_kprobe, NULL); |
da07ab03 | 827 | } |
ba8af12f | 828 | |
99219a3f | 829 | recycle_rp_inst(ri, &empty_rp); |
ba8af12f RL |
830 | |
831 | if (orig_ret_address != trampoline_address) | |
832 | /* | |
833 | * This is the real return address. Any other | |
834 | * instances associated with this task are for | |
835 | * other calls deeper on the call stack | |
836 | */ | |
837 | break; | |
73649dab | 838 | } |
ba8af12f | 839 | |
ef53d9c5 | 840 | kretprobe_hash_unlock(current, &flags); |
ba8af12f | 841 | |
99219a3f | 842 | hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { |
843 | hlist_del(&ri->hlist); | |
844 | kfree(ri); | |
845 | } | |
da07ab03 | 846 | return (void *)orig_ret_address; |
73649dab RL |
847 | } |
848 | ||
1da177e4 LT |
849 | /* |
850 | * Called after single-stepping. p->addr is the address of the | |
851 | * instruction whose first byte has been replaced by the "int 3" | |
852 | * instruction. To avoid the SMP problems that can occur when we | |
853 | * temporarily put back the original opcode to single-step, we | |
854 | * single-stepped a copy of the instruction. The address of this | |
855 | * copy is p->ainsn.insn. | |
856 | * | |
857 | * This function prepares to return from the post-single-step | |
858 | * interrupt. We have to fix up the stack as follows: | |
859 | * | |
860 | * 0) Except in the case of absolute or indirect jump or call instructions, | |
65ea5b03 | 861 | * the new ip is relative to the copied instruction. We need to make |
1da177e4 LT |
862 | * it relative to the original instruction. |
863 | * | |
864 | * 1) If the single-stepped instruction was pushfl, then the TF and IF | |
65ea5b03 | 865 | * flags are set in the just-pushed flags, and may need to be cleared. |
1da177e4 LT |
866 | * |
867 | * 2) If the single-stepped instruction was a call, the return address | |
868 | * that is atop the stack is the address following the copied instruction. | |
869 | * We need to make it the address following the original instruction. | |
aa470140 MH |
870 | * |
871 | * If this is the first time we've single-stepped the instruction at | |
872 | * this probepoint, and the instruction is boostable, boost it: add a | |
873 | * jump instruction after the copied instruction, that jumps to the next | |
874 | * instruction after the probepoint. | |
1da177e4 | 875 | */ |
e7a510f9 AM |
876 | static void __kprobes resume_execution(struct kprobe *p, |
877 | struct pt_regs *regs, struct kprobe_ctlblk *kcb) | |
1da177e4 | 878 | { |
8533bbe9 MH |
879 | unsigned long *tos = stack_addr(regs); |
880 | unsigned long copy_ip = (unsigned long)p->ainsn.insn; | |
881 | unsigned long orig_ip = (unsigned long)p->addr; | |
1da177e4 LT |
882 | kprobe_opcode_t *insn = p->ainsn.insn; |
883 | ||
567a9fd8 MH |
884 | /* Skip prefixes */ |
885 | insn = skip_prefixes(insn); | |
1da177e4 | 886 | |
053de044 | 887 | regs->flags &= ~X86_EFLAGS_TF; |
1da177e4 | 888 | switch (*insn) { |
0b0122fa | 889 | case 0x9c: /* pushfl */ |
053de044 | 890 | *tos &= ~(X86_EFLAGS_TF | X86_EFLAGS_IF); |
8533bbe9 | 891 | *tos |= kcb->kprobe_old_flags; |
1da177e4 | 892 | break; |
0b0122fa MH |
893 | case 0xc2: /* iret/ret/lret */ |
894 | case 0xc3: | |
0b9e2cac | 895 | case 0xca: |
0b0122fa MH |
896 | case 0xcb: |
897 | case 0xcf: | |
898 | case 0xea: /* jmp absolute -- ip is correct */ | |
899 | /* ip is already adjusted, no more changes required */ | |
aa470140 | 900 | p->ainsn.boostable = 1; |
0b0122fa MH |
901 | goto no_change; |
902 | case 0xe8: /* call relative - Fix return addr */ | |
8533bbe9 | 903 | *tos = orig_ip + (*tos - copy_ip); |
1da177e4 | 904 | break; |
e7b5e11e | 905 | #ifdef CONFIG_X86_32 |
d6be29b8 MH |
906 | case 0x9a: /* call absolute -- same as call absolute, indirect */ |
907 | *tos = orig_ip + (*tos - copy_ip); | |
908 | goto no_change; | |
909 | #endif | |
1da177e4 | 910 | case 0xff: |
dc49e344 | 911 | if ((insn[1] & 0x30) == 0x10) { |
8533bbe9 MH |
912 | /* |
913 | * call absolute, indirect | |
914 | * Fix return addr; ip is correct. | |
915 | * But this is not boostable | |
916 | */ | |
917 | *tos = orig_ip + (*tos - copy_ip); | |
0b0122fa | 918 | goto no_change; |
8533bbe9 MH |
919 | } else if (((insn[1] & 0x31) == 0x20) || |
920 | ((insn[1] & 0x31) == 0x21)) { | |
921 | /* | |
922 | * jmp near and far, absolute indirect | |
923 | * ip is correct. And this is boostable | |
924 | */ | |
aa470140 | 925 | p->ainsn.boostable = 1; |
0b0122fa | 926 | goto no_change; |
1da177e4 | 927 | } |
1da177e4 LT |
928 | default: |
929 | break; | |
930 | } | |
931 | ||
aa470140 | 932 | if (p->ainsn.boostable == 0) { |
8533bbe9 MH |
933 | if ((regs->ip > copy_ip) && |
934 | (regs->ip - copy_ip) + 5 < MAX_INSN_SIZE) { | |
aa470140 MH |
935 | /* |
936 | * These instructions can be executed directly if it | |
937 | * jumps back to correct address. | |
938 | */ | |
c0f7ac3a MH |
939 | synthesize_reljump((void *)regs->ip, |
940 | (void *)orig_ip + (regs->ip - copy_ip)); | |
aa470140 MH |
941 | p->ainsn.boostable = 1; |
942 | } else { | |
943 | p->ainsn.boostable = -1; | |
944 | } | |
945 | } | |
946 | ||
8533bbe9 | 947 | regs->ip += orig_ip - copy_ip; |
65ea5b03 | 948 | |
0b0122fa | 949 | no_change: |
1ecc798c | 950 | restore_btf(); |
1da177e4 LT |
951 | } |
952 | ||
8533bbe9 MH |
953 | /* |
954 | * Interrupts are disabled on entry as trap1 is an interrupt gate and they | |
af901ca1 | 955 | * remain disabled throughout this function. |
8533bbe9 MH |
956 | */ |
957 | static int __kprobes post_kprobe_handler(struct pt_regs *regs) | |
1da177e4 | 958 | { |
e7a510f9 AM |
959 | struct kprobe *cur = kprobe_running(); |
960 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | |
961 | ||
962 | if (!cur) | |
1da177e4 LT |
963 | return 0; |
964 | ||
acb5b8a2 YL |
965 | resume_execution(cur, regs, kcb); |
966 | regs->flags |= kcb->kprobe_saved_flags; | |
acb5b8a2 | 967 | |
e7a510f9 AM |
968 | if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) { |
969 | kcb->kprobe_status = KPROBE_HIT_SSDONE; | |
970 | cur->post_handler(cur, regs, 0); | |
aa3d7e3d | 971 | } |
1da177e4 | 972 | |
8533bbe9 | 973 | /* Restore back the original saved kprobes variables and continue. */ |
e7a510f9 AM |
974 | if (kcb->kprobe_status == KPROBE_REENTER) { |
975 | restore_previous_kprobe(kcb); | |
aa3d7e3d | 976 | goto out; |
aa3d7e3d | 977 | } |
e7a510f9 | 978 | reset_current_kprobe(); |
aa3d7e3d | 979 | out: |
1da177e4 LT |
980 | preempt_enable_no_resched(); |
981 | ||
982 | /* | |
65ea5b03 | 983 | * if somebody else is singlestepping across a probe point, flags |
1da177e4 LT |
984 | * will have TF set, in which case, continue the remaining processing |
985 | * of do_debug, as if this is not a probe hit. | |
986 | */ | |
053de044 | 987 | if (regs->flags & X86_EFLAGS_TF) |
1da177e4 LT |
988 | return 0; |
989 | ||
990 | return 1; | |
991 | } | |
992 | ||
0f2fbdcb | 993 | int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) |
1da177e4 | 994 | { |
e7a510f9 AM |
995 | struct kprobe *cur = kprobe_running(); |
996 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | |
997 | ||
d6be29b8 | 998 | switch (kcb->kprobe_status) { |
c28f8966 PP |
999 | case KPROBE_HIT_SS: |
1000 | case KPROBE_REENTER: | |
1001 | /* | |
1002 | * We are here because the instruction being single | |
1003 | * stepped caused a page fault. We reset the current | |
65ea5b03 | 1004 | * kprobe and the ip points back to the probe address |
c28f8966 PP |
1005 | * and allow the page fault handler to continue as a |
1006 | * normal page fault. | |
1007 | */ | |
65ea5b03 | 1008 | regs->ip = (unsigned long)cur->addr; |
8533bbe9 | 1009 | regs->flags |= kcb->kprobe_old_flags; |
c28f8966 PP |
1010 | if (kcb->kprobe_status == KPROBE_REENTER) |
1011 | restore_previous_kprobe(kcb); | |
1012 | else | |
1013 | reset_current_kprobe(); | |
1da177e4 | 1014 | preempt_enable_no_resched(); |
c28f8966 PP |
1015 | break; |
1016 | case KPROBE_HIT_ACTIVE: | |
1017 | case KPROBE_HIT_SSDONE: | |
1018 | /* | |
1019 | * We increment the nmissed count for accounting, | |
8533bbe9 | 1020 | * we can also use npre/npostfault count for accounting |
c28f8966 PP |
1021 | * these specific fault cases. |
1022 | */ | |
1023 | kprobes_inc_nmissed_count(cur); | |
1024 | ||
1025 | /* | |
1026 | * We come here because instructions in the pre/post | |
1027 | * handler caused the page_fault, this could happen | |
1028 | * if handler tries to access user space by | |
1029 | * copy_from_user(), get_user() etc. Let the | |
1030 | * user-specified handler try to fix it first. | |
1031 | */ | |
1032 | if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) | |
1033 | return 1; | |
1034 | ||
1035 | /* | |
1036 | * In case the user-specified fault handler returned | |
1037 | * zero, try to fix up. | |
1038 | */ | |
d6be29b8 MH |
1039 | if (fixup_exception(regs)) |
1040 | return 1; | |
6d48583b | 1041 | |
c28f8966 | 1042 | /* |
8533bbe9 | 1043 | * fixup routine could not handle it, |
c28f8966 PP |
1044 | * Let do_page_fault() fix it. |
1045 | */ | |
1046 | break; | |
1047 | default: | |
1048 | break; | |
1da177e4 LT |
1049 | } |
1050 | return 0; | |
1051 | } | |
1052 | ||
1053 | /* | |
1054 | * Wrapper routine for handling exceptions. | |
1055 | */ | |
0f2fbdcb PP |
1056 | int __kprobes kprobe_exceptions_notify(struct notifier_block *self, |
1057 | unsigned long val, void *data) | |
1da177e4 | 1058 | { |
ade1af77 | 1059 | struct die_args *args = data; |
66ff2d06 AM |
1060 | int ret = NOTIFY_DONE; |
1061 | ||
8533bbe9 | 1062 | if (args->regs && user_mode_vm(args->regs)) |
2326c770 | 1063 | return ret; |
1064 | ||
1da177e4 LT |
1065 | switch (val) { |
1066 | case DIE_INT3: | |
1067 | if (kprobe_handler(args->regs)) | |
66ff2d06 | 1068 | ret = NOTIFY_STOP; |
1da177e4 LT |
1069 | break; |
1070 | case DIE_DEBUG: | |
62edab90 P |
1071 | if (post_kprobe_handler(args->regs)) { |
1072 | /* | |
1073 | * Reset the BS bit in dr6 (pointed by args->err) to | |
1074 | * denote completion of processing | |
1075 | */ | |
1076 | (*(unsigned long *)ERR_PTR(args->err)) &= ~DR_STEP; | |
66ff2d06 | 1077 | ret = NOTIFY_STOP; |
62edab90 | 1078 | } |
1da177e4 LT |
1079 | break; |
1080 | case DIE_GPF: | |
b506a9d0 QB |
1081 | /* |
1082 | * To be potentially processing a kprobe fault and to | |
1083 | * trust the result from kprobe_running(), we have | |
1084 | * be non-preemptible. | |
1085 | */ | |
1086 | if (!preemptible() && kprobe_running() && | |
1da177e4 | 1087 | kprobe_fault_handler(args->regs, args->trapnr)) |
66ff2d06 | 1088 | ret = NOTIFY_STOP; |
1da177e4 LT |
1089 | break; |
1090 | default: | |
1091 | break; | |
1092 | } | |
66ff2d06 | 1093 | return ret; |
1da177e4 LT |
1094 | } |
1095 | ||
0f2fbdcb | 1096 | int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) |
1da177e4 LT |
1097 | { |
1098 | struct jprobe *jp = container_of(p, struct jprobe, kp); | |
1099 | unsigned long addr; | |
e7a510f9 | 1100 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
1da177e4 | 1101 | |
e7a510f9 | 1102 | kcb->jprobe_saved_regs = *regs; |
8533bbe9 MH |
1103 | kcb->jprobe_saved_sp = stack_addr(regs); |
1104 | addr = (unsigned long)(kcb->jprobe_saved_sp); | |
1105 | ||
1da177e4 LT |
1106 | /* |
1107 | * As Linus pointed out, gcc assumes that the callee | |
1108 | * owns the argument space and could overwrite it, e.g. | |
1109 | * tailcall optimization. So, to be absolutely safe | |
1110 | * we also save and restore enough stack bytes to cover | |
1111 | * the argument area. | |
1112 | */ | |
e7a510f9 | 1113 | memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr, |
d6be29b8 | 1114 | MIN_STACK_SIZE(addr)); |
053de044 | 1115 | regs->flags &= ~X86_EFLAGS_IF; |
58dfe883 | 1116 | trace_hardirqs_off(); |
65ea5b03 | 1117 | regs->ip = (unsigned long)(jp->entry); |
1da177e4 LT |
1118 | return 1; |
1119 | } | |
1120 | ||
0f2fbdcb | 1121 | void __kprobes jprobe_return(void) |
1da177e4 | 1122 | { |
e7a510f9 AM |
1123 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
1124 | ||
d6be29b8 MH |
1125 | asm volatile ( |
1126 | #ifdef CONFIG_X86_64 | |
1127 | " xchg %%rbx,%%rsp \n" | |
1128 | #else | |
1129 | " xchgl %%ebx,%%esp \n" | |
1130 | #endif | |
1131 | " int3 \n" | |
1132 | " .globl jprobe_return_end\n" | |
1133 | " jprobe_return_end: \n" | |
1134 | " nop \n"::"b" | |
1135 | (kcb->jprobe_saved_sp):"memory"); | |
1da177e4 LT |
1136 | } |
1137 | ||
0f2fbdcb | 1138 | int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) |
1da177e4 | 1139 | { |
e7a510f9 | 1140 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
65ea5b03 | 1141 | u8 *addr = (u8 *) (regs->ip - 1); |
1da177e4 LT |
1142 | struct jprobe *jp = container_of(p, struct jprobe, kp); |
1143 | ||
d6be29b8 MH |
1144 | if ((addr > (u8 *) jprobe_return) && |
1145 | (addr < (u8 *) jprobe_return_end)) { | |
8533bbe9 | 1146 | if (stack_addr(regs) != kcb->jprobe_saved_sp) { |
29b6cd79 | 1147 | struct pt_regs *saved_regs = &kcb->jprobe_saved_regs; |
d6be29b8 MH |
1148 | printk(KERN_ERR |
1149 | "current sp %p does not match saved sp %p\n", | |
8533bbe9 | 1150 | stack_addr(regs), kcb->jprobe_saved_sp); |
d6be29b8 | 1151 | printk(KERN_ERR "Saved registers for jprobe %p\n", jp); |
1da177e4 | 1152 | show_registers(saved_regs); |
d6be29b8 | 1153 | printk(KERN_ERR "Current registers\n"); |
1da177e4 LT |
1154 | show_registers(regs); |
1155 | BUG(); | |
1156 | } | |
e7a510f9 | 1157 | *regs = kcb->jprobe_saved_regs; |
8533bbe9 MH |
1158 | memcpy((kprobe_opcode_t *)(kcb->jprobe_saved_sp), |
1159 | kcb->jprobes_stack, | |
1160 | MIN_STACK_SIZE(kcb->jprobe_saved_sp)); | |
d217d545 | 1161 | preempt_enable_no_resched(); |
1da177e4 LT |
1162 | return 1; |
1163 | } | |
1164 | return 0; | |
1165 | } | |
ba8af12f | 1166 | |
c0f7ac3a MH |
1167 | |
1168 | #ifdef CONFIG_OPTPROBES | |
1169 | ||
1170 | /* Insert a call instruction at address 'from', which calls address 'to'.*/ | |
1171 | static void __kprobes synthesize_relcall(void *from, void *to) | |
1172 | { | |
1173 | __synthesize_relative_insn(from, to, RELATIVECALL_OPCODE); | |
1174 | } | |
1175 | ||
1176 | /* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */ | |
1177 | static void __kprobes synthesize_set_arg1(kprobe_opcode_t *addr, | |
1178 | unsigned long val) | |
1179 | { | |
1180 | #ifdef CONFIG_X86_64 | |
1181 | *addr++ = 0x48; | |
1182 | *addr++ = 0xbf; | |
1183 | #else | |
1184 | *addr++ = 0xb8; | |
1185 | #endif | |
1186 | *(unsigned long *)addr = val; | |
1187 | } | |
1188 | ||
6376b229 | 1189 | static void __used __kprobes kprobes_optinsn_template_holder(void) |
c0f7ac3a MH |
1190 | { |
1191 | asm volatile ( | |
1192 | ".global optprobe_template_entry\n" | |
1193 | "optprobe_template_entry: \n" | |
1194 | #ifdef CONFIG_X86_64 | |
1195 | /* We don't bother saving the ss register */ | |
1196 | " pushq %rsp\n" | |
1197 | " pushfq\n" | |
1198 | SAVE_REGS_STRING | |
1199 | " movq %rsp, %rsi\n" | |
1200 | ".global optprobe_template_val\n" | |
1201 | "optprobe_template_val: \n" | |
1202 | ASM_NOP5 | |
1203 | ASM_NOP5 | |
1204 | ".global optprobe_template_call\n" | |
1205 | "optprobe_template_call: \n" | |
1206 | ASM_NOP5 | |
1207 | /* Move flags to rsp */ | |
1208 | " movq 144(%rsp), %rdx\n" | |
1209 | " movq %rdx, 152(%rsp)\n" | |
1210 | RESTORE_REGS_STRING | |
1211 | /* Skip flags entry */ | |
1212 | " addq $8, %rsp\n" | |
1213 | " popfq\n" | |
1214 | #else /* CONFIG_X86_32 */ | |
1215 | " pushf\n" | |
1216 | SAVE_REGS_STRING | |
1217 | " movl %esp, %edx\n" | |
1218 | ".global optprobe_template_val\n" | |
1219 | "optprobe_template_val: \n" | |
1220 | ASM_NOP5 | |
1221 | ".global optprobe_template_call\n" | |
1222 | "optprobe_template_call: \n" | |
1223 | ASM_NOP5 | |
1224 | RESTORE_REGS_STRING | |
1225 | " addl $4, %esp\n" /* skip cs */ | |
1226 | " popf\n" | |
1227 | #endif | |
1228 | ".global optprobe_template_end\n" | |
1229 | "optprobe_template_end: \n"); | |
1230 | } | |
1231 | ||
1232 | #define TMPL_MOVE_IDX \ | |
1233 | ((long)&optprobe_template_val - (long)&optprobe_template_entry) | |
1234 | #define TMPL_CALL_IDX \ | |
1235 | ((long)&optprobe_template_call - (long)&optprobe_template_entry) | |
1236 | #define TMPL_END_IDX \ | |
1237 | ((long)&optprobe_template_end - (long)&optprobe_template_entry) | |
1238 | ||
1239 | #define INT3_SIZE sizeof(kprobe_opcode_t) | |
1240 | ||
1241 | /* Optimized kprobe call back function: called from optinsn */ | |
1242 | static void __kprobes optimized_callback(struct optimized_kprobe *op, | |
1243 | struct pt_regs *regs) | |
1244 | { | |
1245 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | |
9bbeacf5 | 1246 | unsigned long flags; |
c0f7ac3a | 1247 | |
6274de49 MH |
1248 | /* This is possible if op is under delayed unoptimizing */ |
1249 | if (kprobe_disabled(&op->kp)) | |
1250 | return; | |
1251 | ||
9bbeacf5 | 1252 | local_irq_save(flags); |
c0f7ac3a MH |
1253 | if (kprobe_running()) { |
1254 | kprobes_inc_nmissed_count(&op->kp); | |
1255 | } else { | |
1256 | /* Save skipped registers */ | |
1257 | #ifdef CONFIG_X86_64 | |
1258 | regs->cs = __KERNEL_CS; | |
1259 | #else | |
1260 | regs->cs = __KERNEL_CS | get_kernel_rpl(); | |
1261 | regs->gs = 0; | |
1262 | #endif | |
1263 | regs->ip = (unsigned long)op->kp.addr + INT3_SIZE; | |
1264 | regs->orig_ax = ~0UL; | |
1265 | ||
b76834bc | 1266 | __this_cpu_write(current_kprobe, &op->kp); |
c0f7ac3a MH |
1267 | kcb->kprobe_status = KPROBE_HIT_ACTIVE; |
1268 | opt_pre_handler(&op->kp, regs); | |
b76834bc | 1269 | __this_cpu_write(current_kprobe, NULL); |
c0f7ac3a | 1270 | } |
9bbeacf5 | 1271 | local_irq_restore(flags); |
c0f7ac3a MH |
1272 | } |
1273 | ||
1274 | static int __kprobes copy_optimized_instructions(u8 *dest, u8 *src) | |
1275 | { | |
1276 | int len = 0, ret; | |
1277 | ||
1278 | while (len < RELATIVEJUMP_SIZE) { | |
1279 | ret = __copy_instruction(dest + len, src + len, 1); | |
1280 | if (!ret || !can_boost(dest + len)) | |
1281 | return -EINVAL; | |
1282 | len += ret; | |
1283 | } | |
1284 | /* Check whether the address range is reserved */ | |
1285 | if (ftrace_text_reserved(src, src + len - 1) || | |
4c3ef6d7 JB |
1286 | alternatives_text_reserved(src, src + len - 1) || |
1287 | jump_label_text_reserved(src, src + len - 1)) | |
c0f7ac3a MH |
1288 | return -EBUSY; |
1289 | ||
1290 | return len; | |
1291 | } | |
1292 | ||
1293 | /* Check whether insn is indirect jump */ | |
1294 | static int __kprobes insn_is_indirect_jump(struct insn *insn) | |
1295 | { | |
1296 | return ((insn->opcode.bytes[0] == 0xff && | |
1297 | (X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */ | |
1298 | insn->opcode.bytes[0] == 0xea); /* Segment based jump */ | |
1299 | } | |
1300 | ||
1301 | /* Check whether insn jumps into specified address range */ | |
1302 | static int insn_jump_into_range(struct insn *insn, unsigned long start, int len) | |
1303 | { | |
1304 | unsigned long target = 0; | |
1305 | ||
1306 | switch (insn->opcode.bytes[0]) { | |
1307 | case 0xe0: /* loopne */ | |
1308 | case 0xe1: /* loope */ | |
1309 | case 0xe2: /* loop */ | |
1310 | case 0xe3: /* jcxz */ | |
1311 | case 0xe9: /* near relative jump */ | |
1312 | case 0xeb: /* short relative jump */ | |
1313 | break; | |
1314 | case 0x0f: | |
1315 | if ((insn->opcode.bytes[1] & 0xf0) == 0x80) /* jcc near */ | |
1316 | break; | |
1317 | return 0; | |
1318 | default: | |
1319 | if ((insn->opcode.bytes[0] & 0xf0) == 0x70) /* jcc short */ | |
1320 | break; | |
1321 | return 0; | |
1322 | } | |
1323 | target = (unsigned long)insn->next_byte + insn->immediate.value; | |
1324 | ||
1325 | return (start <= target && target <= start + len); | |
1326 | } | |
1327 | ||
1328 | /* Decode whole function to ensure any instructions don't jump into target */ | |
1329 | static int __kprobes can_optimize(unsigned long paddr) | |
1330 | { | |
86b4ce31 | 1331 | unsigned long addr, __addr, size = 0, offset = 0; |
c0f7ac3a MH |
1332 | struct insn insn; |
1333 | kprobe_opcode_t buf[MAX_INSN_SIZE]; | |
c0f7ac3a MH |
1334 | |
1335 | /* Lookup symbol including addr */ | |
6abded71 | 1336 | if (!kallsyms_lookup_size_offset(paddr, &size, &offset)) |
c0f7ac3a MH |
1337 | return 0; |
1338 | ||
2a8247a2 JO |
1339 | /* |
1340 | * Do not optimize in the entry code due to the unstable | |
1341 | * stack handling. | |
1342 | */ | |
1343 | if ((paddr >= (unsigned long )__entry_text_start) && | |
1344 | (paddr < (unsigned long )__entry_text_end)) | |
1345 | return 0; | |
1346 | ||
c0f7ac3a MH |
1347 | /* Check there is enough space for a relative jump. */ |
1348 | if (size - offset < RELATIVEJUMP_SIZE) | |
1349 | return 0; | |
1350 | ||
1351 | /* Decode instructions */ | |
1352 | addr = paddr - offset; | |
1353 | while (addr < paddr - offset + size) { /* Decode until function end */ | |
1354 | if (search_exception_tables(addr)) | |
1355 | /* | |
1356 | * Since some fixup code will jumps into this function, | |
1357 | * we can't optimize kprobe in this function. | |
1358 | */ | |
1359 | return 0; | |
86b4ce31 MH |
1360 | __addr = recover_probed_instruction(buf, addr); |
1361 | kernel_insn_init(&insn, (void *)__addr); | |
c0f7ac3a | 1362 | insn_get_length(&insn); |
86b4ce31 MH |
1363 | /* Another subsystem puts a breakpoint */ |
1364 | if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION) | |
1365 | return 0; | |
c0f7ac3a MH |
1366 | /* Recover address */ |
1367 | insn.kaddr = (void *)addr; | |
1368 | insn.next_byte = (void *)(addr + insn.length); | |
1369 | /* Check any instructions don't jump into target */ | |
1370 | if (insn_is_indirect_jump(&insn) || | |
1371 | insn_jump_into_range(&insn, paddr + INT3_SIZE, | |
1372 | RELATIVE_ADDR_SIZE)) | |
1373 | return 0; | |
1374 | addr += insn.length; | |
1375 | } | |
1376 | ||
1377 | return 1; | |
1378 | } | |
1379 | ||
1380 | /* Check optimized_kprobe can actually be optimized. */ | |
1381 | int __kprobes arch_check_optimized_kprobe(struct optimized_kprobe *op) | |
1382 | { | |
1383 | int i; | |
1384 | struct kprobe *p; | |
1385 | ||
1386 | for (i = 1; i < op->optinsn.size; i++) { | |
1387 | p = get_kprobe(op->kp.addr + i); | |
1388 | if (p && !kprobe_disabled(p)) | |
1389 | return -EEXIST; | |
1390 | } | |
1391 | ||
1392 | return 0; | |
1393 | } | |
1394 | ||
1395 | /* Check the addr is within the optimized instructions. */ | |
1396 | int __kprobes arch_within_optimized_kprobe(struct optimized_kprobe *op, | |
1397 | unsigned long addr) | |
1398 | { | |
1399 | return ((unsigned long)op->kp.addr <= addr && | |
1400 | (unsigned long)op->kp.addr + op->optinsn.size > addr); | |
1401 | } | |
1402 | ||
1403 | /* Free optimized instruction slot */ | |
1404 | static __kprobes | |
1405 | void __arch_remove_optimized_kprobe(struct optimized_kprobe *op, int dirty) | |
1406 | { | |
1407 | if (op->optinsn.insn) { | |
1408 | free_optinsn_slot(op->optinsn.insn, dirty); | |
1409 | op->optinsn.insn = NULL; | |
1410 | op->optinsn.size = 0; | |
1411 | } | |
1412 | } | |
1413 | ||
1414 | void __kprobes arch_remove_optimized_kprobe(struct optimized_kprobe *op) | |
1415 | { | |
1416 | __arch_remove_optimized_kprobe(op, 1); | |
1417 | } | |
1418 | ||
1419 | /* | |
1420 | * Copy replacing target instructions | |
1421 | * Target instructions MUST be relocatable (checked inside) | |
86b4ce31 | 1422 | * This is called when new aggr(opt)probe is allocated or reused. |
c0f7ac3a MH |
1423 | */ |
1424 | int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op) | |
1425 | { | |
1426 | u8 *buf; | |
1427 | int ret; | |
1428 | long rel; | |
1429 | ||
1430 | if (!can_optimize((unsigned long)op->kp.addr)) | |
1431 | return -EILSEQ; | |
1432 | ||
1433 | op->optinsn.insn = get_optinsn_slot(); | |
1434 | if (!op->optinsn.insn) | |
1435 | return -ENOMEM; | |
1436 | ||
1437 | /* | |
1438 | * Verify if the address gap is in 2GB range, because this uses | |
1439 | * a relative jump. | |
1440 | */ | |
1441 | rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE; | |
1442 | if (abs(rel) > 0x7fffffff) | |
1443 | return -ERANGE; | |
1444 | ||
1445 | buf = (u8 *)op->optinsn.insn; | |
1446 | ||
1447 | /* Copy instructions into the out-of-line buffer */ | |
1448 | ret = copy_optimized_instructions(buf + TMPL_END_IDX, op->kp.addr); | |
1449 | if (ret < 0) { | |
1450 | __arch_remove_optimized_kprobe(op, 0); | |
1451 | return ret; | |
1452 | } | |
1453 | op->optinsn.size = ret; | |
1454 | ||
1455 | /* Copy arch-dep-instance from template */ | |
1456 | memcpy(buf, &optprobe_template_entry, TMPL_END_IDX); | |
1457 | ||
1458 | /* Set probe information */ | |
1459 | synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op); | |
1460 | ||
1461 | /* Set probe function call */ | |
1462 | synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback); | |
1463 | ||
1464 | /* Set returning jmp instruction at the tail of out-of-line buffer */ | |
1465 | synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size, | |
1466 | (u8 *)op->kp.addr + op->optinsn.size); | |
1467 | ||
1468 | flush_icache_range((unsigned long) buf, | |
1469 | (unsigned long) buf + TMPL_END_IDX + | |
1470 | op->optinsn.size + RELATIVEJUMP_SIZE); | |
1471 | return 0; | |
1472 | } | |
1473 | ||
cd7ebe22 MH |
1474 | #define MAX_OPTIMIZE_PROBES 256 |
1475 | static struct text_poke_param *jump_poke_params; | |
1476 | static struct jump_poke_buffer { | |
1477 | u8 buf[RELATIVEJUMP_SIZE]; | |
1478 | } *jump_poke_bufs; | |
1479 | ||
1480 | static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm, | |
1481 | u8 *insn_buf, | |
1482 | struct optimized_kprobe *op) | |
c0f7ac3a | 1483 | { |
c0f7ac3a MH |
1484 | s32 rel = (s32)((long)op->optinsn.insn - |
1485 | ((long)op->kp.addr + RELATIVEJUMP_SIZE)); | |
1486 | ||
1487 | /* Backup instructions which will be replaced by jump address */ | |
1488 | memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE, | |
1489 | RELATIVE_ADDR_SIZE); | |
1490 | ||
cd7ebe22 MH |
1491 | insn_buf[0] = RELATIVEJUMP_OPCODE; |
1492 | *(s32 *)(&insn_buf[1]) = rel; | |
1493 | ||
1494 | tprm->addr = op->kp.addr; | |
1495 | tprm->opcode = insn_buf; | |
1496 | tprm->len = RELATIVEJUMP_SIZE; | |
1497 | } | |
1498 | ||
1499 | /* | |
1500 | * Replace breakpoints (int3) with relative jumps. | |
1501 | * Caller must call with locking kprobe_mutex and text_mutex. | |
1502 | */ | |
1503 | void __kprobes arch_optimize_kprobes(struct list_head *oplist) | |
1504 | { | |
1505 | struct optimized_kprobe *op, *tmp; | |
1506 | int c = 0; | |
1507 | ||
1508 | list_for_each_entry_safe(op, tmp, oplist, list) { | |
1509 | WARN_ON(kprobe_disabled(&op->kp)); | |
1510 | /* Setup param */ | |
1511 | setup_optimize_kprobe(&jump_poke_params[c], | |
1512 | jump_poke_bufs[c].buf, op); | |
1513 | list_del_init(&op->list); | |
1514 | if (++c >= MAX_OPTIMIZE_PROBES) | |
1515 | break; | |
1516 | } | |
c0f7ac3a MH |
1517 | |
1518 | /* | |
1519 | * text_poke_smp doesn't support NMI/MCE code modifying. | |
1520 | * However, since kprobes itself also doesn't support NMI/MCE | |
1521 | * code probing, it's not a problem. | |
1522 | */ | |
cd7ebe22 | 1523 | text_poke_smp_batch(jump_poke_params, c); |
c0f7ac3a MH |
1524 | } |
1525 | ||
f984ba4e MH |
1526 | static void __kprobes setup_unoptimize_kprobe(struct text_poke_param *tprm, |
1527 | u8 *insn_buf, | |
1528 | struct optimized_kprobe *op) | |
1529 | { | |
1530 | /* Set int3 to first byte for kprobes */ | |
1531 | insn_buf[0] = BREAKPOINT_INSTRUCTION; | |
1532 | memcpy(insn_buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE); | |
1533 | ||
1534 | tprm->addr = op->kp.addr; | |
1535 | tprm->opcode = insn_buf; | |
1536 | tprm->len = RELATIVEJUMP_SIZE; | |
1537 | } | |
1538 | ||
1539 | /* | |
1540 | * Recover original instructions and breakpoints from relative jumps. | |
1541 | * Caller must call with locking kprobe_mutex. | |
1542 | */ | |
1543 | extern void arch_unoptimize_kprobes(struct list_head *oplist, | |
1544 | struct list_head *done_list) | |
1545 | { | |
1546 | struct optimized_kprobe *op, *tmp; | |
1547 | int c = 0; | |
1548 | ||
1549 | list_for_each_entry_safe(op, tmp, oplist, list) { | |
1550 | /* Setup param */ | |
1551 | setup_unoptimize_kprobe(&jump_poke_params[c], | |
1552 | jump_poke_bufs[c].buf, op); | |
1553 | list_move(&op->list, done_list); | |
1554 | if (++c >= MAX_OPTIMIZE_PROBES) | |
1555 | break; | |
1556 | } | |
1557 | ||
1558 | /* | |
1559 | * text_poke_smp doesn't support NMI/MCE code modifying. | |
1560 | * However, since kprobes itself also doesn't support NMI/MCE | |
1561 | * code probing, it's not a problem. | |
1562 | */ | |
1563 | text_poke_smp_batch(jump_poke_params, c); | |
1564 | } | |
1565 | ||
c0f7ac3a MH |
1566 | /* Replace a relative jump with a breakpoint (int3). */ |
1567 | void __kprobes arch_unoptimize_kprobe(struct optimized_kprobe *op) | |
1568 | { | |
1569 | u8 buf[RELATIVEJUMP_SIZE]; | |
1570 | ||
1571 | /* Set int3 to first byte for kprobes */ | |
1572 | buf[0] = BREAKPOINT_INSTRUCTION; | |
1573 | memcpy(buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE); | |
1574 | text_poke_smp(op->kp.addr, buf, RELATIVEJUMP_SIZE); | |
1575 | } | |
1576 | ||
1577 | static int __kprobes setup_detour_execution(struct kprobe *p, | |
1578 | struct pt_regs *regs, | |
1579 | int reenter) | |
1580 | { | |
1581 | struct optimized_kprobe *op; | |
1582 | ||
1583 | if (p->flags & KPROBE_FLAG_OPTIMIZED) { | |
1584 | /* This kprobe is really able to run optimized path. */ | |
1585 | op = container_of(p, struct optimized_kprobe, kp); | |
1586 | /* Detour through copied instructions */ | |
1587 | regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX; | |
1588 | if (!reenter) | |
1589 | reset_current_kprobe(); | |
1590 | preempt_enable_no_resched(); | |
1591 | return 1; | |
1592 | } | |
1593 | return 0; | |
1594 | } | |
cd7ebe22 MH |
1595 | |
1596 | static int __kprobes init_poke_params(void) | |
1597 | { | |
1598 | /* Allocate code buffer and parameter array */ | |
1599 | jump_poke_bufs = kmalloc(sizeof(struct jump_poke_buffer) * | |
1600 | MAX_OPTIMIZE_PROBES, GFP_KERNEL); | |
1601 | if (!jump_poke_bufs) | |
1602 | return -ENOMEM; | |
1603 | ||
1604 | jump_poke_params = kmalloc(sizeof(struct text_poke_param) * | |
1605 | MAX_OPTIMIZE_PROBES, GFP_KERNEL); | |
1606 | if (!jump_poke_params) { | |
1607 | kfree(jump_poke_bufs); | |
1608 | jump_poke_bufs = NULL; | |
1609 | return -ENOMEM; | |
1610 | } | |
1611 | ||
1612 | return 0; | |
1613 | } | |
1614 | #else /* !CONFIG_OPTPROBES */ | |
1615 | static int __kprobes init_poke_params(void) | |
1616 | { | |
1617 | return 0; | |
1618 | } | |
c0f7ac3a MH |
1619 | #endif |
1620 | ||
6772926b | 1621 | int __init arch_init_kprobes(void) |
ba8af12f | 1622 | { |
cd7ebe22 | 1623 | return init_poke_params(); |
ba8af12f | 1624 | } |
bf8f6e5b AM |
1625 | |
1626 | int __kprobes arch_trampoline_kprobe(struct kprobe *p) | |
1627 | { | |
bf8f6e5b AM |
1628 | return 0; |
1629 | } |