arm64: Blacklist non-kprobe-able symbol
[deliverable/linux.git] / arch / arm64 / kernel / probes / kprobes.c
CommitLineData
2dd0e8d2
SP
1/*
2 * arch/arm64/kernel/probes/kprobes.c
3 *
4 * Kprobes support for ARM64
5 *
6 * Copyright (C) 2013 Linaro Limited.
7 * Author: Sandeepa Prabhu <sandeepa.prabhu@linaro.org>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 */
19#include <linux/kernel.h>
20#include <linux/kprobes.h>
21#include <linux/module.h>
22#include <linux/slab.h>
23#include <linux/stop_machine.h>
24#include <linux/stringify.h>
25#include <asm/traps.h>
26#include <asm/ptrace.h>
27#include <asm/cacheflush.h>
28#include <asm/debug-monitors.h>
29#include <asm/system_misc.h>
30#include <asm/insn.h>
31#include <asm/uaccess.h>
32#include <asm/irq.h>
33
34#include "decode-insn.h"
35
36#define MIN_STACK_SIZE(addr) (on_irq_stack(addr, raw_smp_processor_id()) ? \
37 min((unsigned long)IRQ_STACK_SIZE, \
38 IRQ_STACK_PTR(raw_smp_processor_id()) - (addr)) : \
39 min((unsigned long)MAX_STACK_SIZE, \
40 (unsigned long)current_thread_info() + THREAD_START_SP - (addr)))
41
42void jprobe_return_break(void);
43
44DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
45DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
46
47static void __kprobes arch_prepare_ss_slot(struct kprobe *p)
48{
49 /* prepare insn slot */
50 p->ainsn.insn[0] = cpu_to_le32(p->opcode);
51
52 flush_icache_range((uintptr_t) (p->ainsn.insn),
53 (uintptr_t) (p->ainsn.insn) +
54 MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
55
56 /*
57 * Needs restoring of return address after stepping xol.
58 */
59 p->ainsn.restore = (unsigned long) p->addr +
60 sizeof(kprobe_opcode_t);
61}
62
63int __kprobes arch_prepare_kprobe(struct kprobe *p)
64{
65 unsigned long probe_addr = (unsigned long)p->addr;
66 extern char __start_rodata[];
67 extern char __end_rodata[];
68
69 if (probe_addr & 0x3)
70 return -EINVAL;
71
72 /* copy instruction */
73 p->opcode = le32_to_cpu(*p->addr);
74
75 if (in_exception_text(probe_addr))
76 return -EINVAL;
77 if (probe_addr >= (unsigned long) __start_rodata &&
78 probe_addr <= (unsigned long) __end_rodata)
79 return -EINVAL;
80
81 /* decode instruction */
82 switch (arm_kprobe_decode_insn(p->addr, &p->ainsn)) {
83 case INSN_REJECTED: /* insn not supported */
84 return -EINVAL;
85
86 case INSN_GOOD: /* instruction uses slot */
87 p->ainsn.insn = get_insn_slot();
88 if (!p->ainsn.insn)
89 return -ENOMEM;
90 break;
91 };
92
93 /* prepare the instruction */
94 arch_prepare_ss_slot(p);
95
96 return 0;
97}
98
99static int __kprobes patch_text(kprobe_opcode_t *addr, u32 opcode)
100{
101 void *addrs[1];
102 u32 insns[1];
103
104 addrs[0] = (void *)addr;
105 insns[0] = (u32)opcode;
106
107 return aarch64_insn_patch_text(addrs, insns, 1);
108}
109
110/* arm kprobe: install breakpoint in text */
111void __kprobes arch_arm_kprobe(struct kprobe *p)
112{
113 patch_text(p->addr, BRK64_OPCODE_KPROBES);
114}
115
116/* disarm kprobe: remove breakpoint from text */
117void __kprobes arch_disarm_kprobe(struct kprobe *p)
118{
119 patch_text(p->addr, p->opcode);
120}
121
122void __kprobes arch_remove_kprobe(struct kprobe *p)
123{
124 if (p->ainsn.insn) {
125 free_insn_slot(p->ainsn.insn, 0);
126 p->ainsn.insn = NULL;
127 }
128}
129
130static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
131{
132 kcb->prev_kprobe.kp = kprobe_running();
133 kcb->prev_kprobe.status = kcb->kprobe_status;
134}
135
136static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
137{
138 __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
139 kcb->kprobe_status = kcb->prev_kprobe.status;
140}
141
142static void __kprobes set_current_kprobe(struct kprobe *p)
143{
144 __this_cpu_write(current_kprobe, p);
145}
146
147/*
148 * The D-flag (Debug mask) is set (masked) upon debug exception entry.
149 * Kprobes needs to clear (unmask) D-flag -ONLY- in case of recursive
150 * probe i.e. when probe hit from kprobe handler context upon
151 * executing the pre/post handlers. In this case we return with
152 * D-flag clear so that single-stepping can be carried-out.
153 *
154 * Leave D-flag set in all other cases.
155 */
156static void __kprobes
157spsr_set_debug_flag(struct pt_regs *regs, int mask)
158{
159 unsigned long spsr = regs->pstate;
160
161 if (mask)
162 spsr |= PSR_D_BIT;
163 else
164 spsr &= ~PSR_D_BIT;
165
166 regs->pstate = spsr;
167}
168
169/*
170 * Interrupts need to be disabled before single-step mode is set, and not
171 * reenabled until after single-step mode ends.
172 * Without disabling interrupt on local CPU, there is a chance of
173 * interrupt occurrence in the period of exception return and start of
174 * out-of-line single-step, that result in wrongly single stepping
175 * into the interrupt handler.
176 */
177static void __kprobes kprobes_save_local_irqflag(struct kprobe_ctlblk *kcb,
178 struct pt_regs *regs)
179{
180 kcb->saved_irqflag = regs->pstate;
181 regs->pstate |= PSR_I_BIT;
182}
183
184static void __kprobes kprobes_restore_local_irqflag(struct kprobe_ctlblk *kcb,
185 struct pt_regs *regs)
186{
187 if (kcb->saved_irqflag & PSR_I_BIT)
188 regs->pstate |= PSR_I_BIT;
189 else
190 regs->pstate &= ~PSR_I_BIT;
191}
192
193static void __kprobes
194set_ss_context(struct kprobe_ctlblk *kcb, unsigned long addr)
195{
196 kcb->ss_ctx.ss_pending = true;
197 kcb->ss_ctx.match_addr = addr + sizeof(kprobe_opcode_t);
198}
199
200static void __kprobes clear_ss_context(struct kprobe_ctlblk *kcb)
201{
202 kcb->ss_ctx.ss_pending = false;
203 kcb->ss_ctx.match_addr = 0;
204}
205
206static void __kprobes setup_singlestep(struct kprobe *p,
207 struct pt_regs *regs,
208 struct kprobe_ctlblk *kcb, int reenter)
209{
210 unsigned long slot;
211
212 if (reenter) {
213 save_previous_kprobe(kcb);
214 set_current_kprobe(p);
215 kcb->kprobe_status = KPROBE_REENTER;
216 } else {
217 kcb->kprobe_status = KPROBE_HIT_SS;
218 }
219
220 BUG_ON(!p->ainsn.insn);
221
222 /* prepare for single stepping */
223 slot = (unsigned long)p->ainsn.insn;
224
225 set_ss_context(kcb, slot); /* mark pending ss */
226
227 if (kcb->kprobe_status == KPROBE_REENTER)
228 spsr_set_debug_flag(regs, 0);
229
230 /* IRQs and single stepping do not mix well. */
231 kprobes_save_local_irqflag(kcb, regs);
232 kernel_enable_single_step(regs);
233 instruction_pointer_set(regs, slot);
234}
235
236static int __kprobes reenter_kprobe(struct kprobe *p,
237 struct pt_regs *regs,
238 struct kprobe_ctlblk *kcb)
239{
240 switch (kcb->kprobe_status) {
241 case KPROBE_HIT_SSDONE:
242 case KPROBE_HIT_ACTIVE:
243 kprobes_inc_nmissed_count(p);
244 setup_singlestep(p, regs, kcb, 1);
245 break;
246 case KPROBE_HIT_SS:
247 case KPROBE_REENTER:
248 pr_warn("Unrecoverable kprobe detected at %p.\n", p->addr);
249 dump_kprobe(p);
250 BUG();
251 break;
252 default:
253 WARN_ON(1);
254 return 0;
255 }
256
257 return 1;
258}
259
260static void __kprobes
261post_kprobe_handler(struct kprobe_ctlblk *kcb, struct pt_regs *regs)
262{
263 struct kprobe *cur = kprobe_running();
264
265 if (!cur)
266 return;
267
268 /* return addr restore if non-branching insn */
269 if (cur->ainsn.restore != 0)
270 instruction_pointer_set(regs, cur->ainsn.restore);
271
272 /* restore back original saved kprobe variables and continue */
273 if (kcb->kprobe_status == KPROBE_REENTER) {
274 restore_previous_kprobe(kcb);
275 return;
276 }
277 /* call post handler */
278 kcb->kprobe_status = KPROBE_HIT_SSDONE;
279 if (cur->post_handler) {
280 /* post_handler can hit breakpoint and single step
281 * again, so we enable D-flag for recursive exception.
282 */
283 cur->post_handler(cur, regs, 0);
284 }
285
286 reset_current_kprobe();
287}
288
289int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr)
290{
291 struct kprobe *cur = kprobe_running();
292 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
293
294 switch (kcb->kprobe_status) {
295 case KPROBE_HIT_SS:
296 case KPROBE_REENTER:
297 /*
298 * We are here because the instruction being single
299 * stepped caused a page fault. We reset the current
300 * kprobe and the ip points back to the probe address
301 * and allow the page fault handler to continue as a
302 * normal page fault.
303 */
304 instruction_pointer_set(regs, (unsigned long) cur->addr);
305 if (!instruction_pointer(regs))
306 BUG();
307
308 kernel_disable_single_step();
309 if (kcb->kprobe_status == KPROBE_REENTER)
310 spsr_set_debug_flag(regs, 1);
311
312 if (kcb->kprobe_status == KPROBE_REENTER)
313 restore_previous_kprobe(kcb);
314 else
315 reset_current_kprobe();
316
317 break;
318 case KPROBE_HIT_ACTIVE:
319 case KPROBE_HIT_SSDONE:
320 /*
321 * We increment the nmissed count for accounting,
322 * we can also use npre/npostfault count for accounting
323 * these specific fault cases.
324 */
325 kprobes_inc_nmissed_count(cur);
326
327 /*
328 * We come here because instructions in the pre/post
329 * handler caused the page_fault, this could happen
330 * if handler tries to access user space by
331 * copy_from_user(), get_user() etc. Let the
332 * user-specified handler try to fix it first.
333 */
334 if (cur->fault_handler && cur->fault_handler(cur, regs, fsr))
335 return 1;
336
337 /*
338 * In case the user-specified fault handler returned
339 * zero, try to fix up.
340 */
341 if (fixup_exception(regs))
342 return 1;
343 }
344 return 0;
345}
346
347int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
348 unsigned long val, void *data)
349{
350 return NOTIFY_DONE;
351}
352
353static void __kprobes kprobe_handler(struct pt_regs *regs)
354{
355 struct kprobe *p, *cur_kprobe;
356 struct kprobe_ctlblk *kcb;
357 unsigned long addr = instruction_pointer(regs);
358
359 kcb = get_kprobe_ctlblk();
360 cur_kprobe = kprobe_running();
361
362 p = get_kprobe((kprobe_opcode_t *) addr);
363
364 if (p) {
365 if (cur_kprobe) {
366 if (reenter_kprobe(p, regs, kcb))
367 return;
368 } else {
369 /* Probe hit */
370 set_current_kprobe(p);
371 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
372
373 /*
374 * If we have no pre-handler or it returned 0, we
375 * continue with normal processing. If we have a
376 * pre-handler and it returned non-zero, it prepped
377 * for calling the break_handler below on re-entry,
378 * so get out doing nothing more here.
379 *
380 * pre_handler can hit a breakpoint and can step thru
381 * before return, keep PSTATE D-flag enabled until
382 * pre_handler return back.
383 */
384 if (!p->pre_handler || !p->pre_handler(p, regs)) {
385 setup_singlestep(p, regs, kcb, 0);
386 return;
387 }
388 }
389 } else if ((le32_to_cpu(*(kprobe_opcode_t *) addr) ==
390 BRK64_OPCODE_KPROBES) && cur_kprobe) {
391 /* We probably hit a jprobe. Call its break handler. */
392 if (cur_kprobe->break_handler &&
393 cur_kprobe->break_handler(cur_kprobe, regs)) {
394 setup_singlestep(cur_kprobe, regs, kcb, 0);
395 return;
396 }
397 }
398 /*
399 * The breakpoint instruction was removed right
400 * after we hit it. Another cpu has removed
401 * either a probepoint or a debugger breakpoint
402 * at this address. In either case, no further
403 * handling of this interrupt is appropriate.
404 * Return back to original instruction, and continue.
405 */
406}
407
408static int __kprobes
409kprobe_ss_hit(struct kprobe_ctlblk *kcb, unsigned long addr)
410{
411 if ((kcb->ss_ctx.ss_pending)
412 && (kcb->ss_ctx.match_addr == addr)) {
413 clear_ss_context(kcb); /* clear pending ss */
414 return DBG_HOOK_HANDLED;
415 }
416 /* not ours, kprobes should ignore it */
417 return DBG_HOOK_ERROR;
418}
419
420int __kprobes
421kprobe_single_step_handler(struct pt_regs *regs, unsigned int esr)
422{
423 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
424 int retval;
425
426 /* return error if this is not our step */
427 retval = kprobe_ss_hit(kcb, instruction_pointer(regs));
428
429 if (retval == DBG_HOOK_HANDLED) {
430 kprobes_restore_local_irqflag(kcb, regs);
431 kernel_disable_single_step();
432
433 if (kcb->kprobe_status == KPROBE_REENTER)
434 spsr_set_debug_flag(regs, 1);
435
436 post_kprobe_handler(kcb, regs);
437 }
438
439 return retval;
440}
441
442int __kprobes
443kprobe_breakpoint_handler(struct pt_regs *regs, unsigned int esr)
444{
445 kprobe_handler(regs);
446 return DBG_HOOK_HANDLED;
447}
448
449int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
450{
451 struct jprobe *jp = container_of(p, struct jprobe, kp);
452 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
453 long stack_ptr = kernel_stack_pointer(regs);
454
455 kcb->jprobe_saved_regs = *regs;
456 /*
457 * As Linus pointed out, gcc assumes that the callee
458 * owns the argument space and could overwrite it, e.g.
459 * tailcall optimization. So, to be absolutely safe
460 * we also save and restore enough stack bytes to cover
461 * the argument area.
462 */
463 memcpy(kcb->jprobes_stack, (void *)stack_ptr,
464 MIN_STACK_SIZE(stack_ptr));
465
466 instruction_pointer_set(regs, (unsigned long) jp->entry);
467 preempt_disable();
468 pause_graph_tracing();
469 return 1;
470}
471
472void __kprobes jprobe_return(void)
473{
474 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
475
476 /*
477 * Jprobe handler return by entering break exception,
478 * encoded same as kprobe, but with following conditions
479 * -a magic number in x0 to identify from rest of other kprobes.
480 * -restore stack addr to original saved pt_regs
481 */
482 asm volatile ("ldr x0, [%0]\n\t"
483 "mov sp, x0\n\t"
484 ".globl jprobe_return_break\n\t"
485 "jprobe_return_break:\n\t"
486 "brk %1\n\t"
487 :
488 : "r"(&kcb->jprobe_saved_regs.sp),
489 "I"(BRK64_ESR_KPROBES)
490 : "memory");
491}
492
493int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
494{
495 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
496 long stack_addr = kcb->jprobe_saved_regs.sp;
497 long orig_sp = kernel_stack_pointer(regs);
498 struct jprobe *jp = container_of(p, struct jprobe, kp);
499
500 if (instruction_pointer(regs) != (u64) jprobe_return_break)
501 return 0;
502
503 if (orig_sp != stack_addr) {
504 struct pt_regs *saved_regs =
505 (struct pt_regs *)kcb->jprobe_saved_regs.sp;
506 pr_err("current sp %lx does not match saved sp %lx\n",
507 orig_sp, stack_addr);
508 pr_err("Saved registers for jprobe %p\n", jp);
509 show_regs(saved_regs);
510 pr_err("Current registers\n");
511 show_regs(regs);
512 BUG();
513 }
514 unpause_graph_tracing();
515 *regs = kcb->jprobe_saved_regs;
516 memcpy((void *)stack_addr, kcb->jprobes_stack,
517 MIN_STACK_SIZE(stack_addr));
518 preempt_enable_no_resched();
519 return 1;
520}
521
522int __init arch_init_kprobes(void)
523{
524 return 0;
525}
This page took 0.043357 seconds and 5 git commands to generate.