df1cf15377b44cbc2647ceaafdd4745a47206f9c
[deliverable/linux.git] / arch / arm64 / kernel / hw_breakpoint.c
1 /*
2 * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
3 * using the CPU's debug registers.
4 *
5 * Copyright (C) 2012 ARM Limited
6 * Author: Will Deacon <will.deacon@arm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #define pr_fmt(fmt) "hw-breakpoint: " fmt
22
23 #include <linux/compat.h>
24 #include <linux/cpu_pm.h>
25 #include <linux/errno.h>
26 #include <linux/hw_breakpoint.h>
27 #include <linux/perf_event.h>
28 #include <linux/ptrace.h>
29 #include <linux/smp.h>
30
31 #include <asm/current.h>
32 #include <asm/debug-monitors.h>
33 #include <asm/hw_breakpoint.h>
34 #include <asm/kdebug.h>
35 #include <asm/traps.h>
36 #include <asm/cputype.h>
37 #include <asm/system_misc.h>
38
39 /* Breakpoint currently in use for each BRP. */
40 static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]);
41
42 /* Watchpoint currently in use for each WRP. */
43 static DEFINE_PER_CPU(struct perf_event *, wp_on_reg[ARM_MAX_WRP]);
44
45 /* Currently stepping a per-CPU kernel breakpoint. */
46 static DEFINE_PER_CPU(int, stepping_kernel_bp);
47
48 /* Number of BRP/WRP registers on this CPU. */
49 static int core_num_brps;
50 static int core_num_wrps;
51
52 /* Determine number of BRP registers available. */
53 static int get_num_brps(void)
54 {
55 return ((read_cpuid(ID_AA64DFR0_EL1) >> 12) & 0xf) + 1;
56 }
57
58 /* Determine number of WRP registers available. */
59 static int get_num_wrps(void)
60 {
61 return ((read_cpuid(ID_AA64DFR0_EL1) >> 20) & 0xf) + 1;
62 }
63
64 int hw_breakpoint_slots(int type)
65 {
66 /*
67 * We can be called early, so don't rely on
68 * our static variables being initialised.
69 */
70 switch (type) {
71 case TYPE_INST:
72 return get_num_brps();
73 case TYPE_DATA:
74 return get_num_wrps();
75 default:
76 pr_warning("unknown slot type: %d\n", type);
77 return 0;
78 }
79 }
80
81 #define READ_WB_REG_CASE(OFF, N, REG, VAL) \
82 case (OFF + N): \
83 AARCH64_DBG_READ(N, REG, VAL); \
84 break
85
86 #define WRITE_WB_REG_CASE(OFF, N, REG, VAL) \
87 case (OFF + N): \
88 AARCH64_DBG_WRITE(N, REG, VAL); \
89 break
90
91 #define GEN_READ_WB_REG_CASES(OFF, REG, VAL) \
92 READ_WB_REG_CASE(OFF, 0, REG, VAL); \
93 READ_WB_REG_CASE(OFF, 1, REG, VAL); \
94 READ_WB_REG_CASE(OFF, 2, REG, VAL); \
95 READ_WB_REG_CASE(OFF, 3, REG, VAL); \
96 READ_WB_REG_CASE(OFF, 4, REG, VAL); \
97 READ_WB_REG_CASE(OFF, 5, REG, VAL); \
98 READ_WB_REG_CASE(OFF, 6, REG, VAL); \
99 READ_WB_REG_CASE(OFF, 7, REG, VAL); \
100 READ_WB_REG_CASE(OFF, 8, REG, VAL); \
101 READ_WB_REG_CASE(OFF, 9, REG, VAL); \
102 READ_WB_REG_CASE(OFF, 10, REG, VAL); \
103 READ_WB_REG_CASE(OFF, 11, REG, VAL); \
104 READ_WB_REG_CASE(OFF, 12, REG, VAL); \
105 READ_WB_REG_CASE(OFF, 13, REG, VAL); \
106 READ_WB_REG_CASE(OFF, 14, REG, VAL); \
107 READ_WB_REG_CASE(OFF, 15, REG, VAL)
108
109 #define GEN_WRITE_WB_REG_CASES(OFF, REG, VAL) \
110 WRITE_WB_REG_CASE(OFF, 0, REG, VAL); \
111 WRITE_WB_REG_CASE(OFF, 1, REG, VAL); \
112 WRITE_WB_REG_CASE(OFF, 2, REG, VAL); \
113 WRITE_WB_REG_CASE(OFF, 3, REG, VAL); \
114 WRITE_WB_REG_CASE(OFF, 4, REG, VAL); \
115 WRITE_WB_REG_CASE(OFF, 5, REG, VAL); \
116 WRITE_WB_REG_CASE(OFF, 6, REG, VAL); \
117 WRITE_WB_REG_CASE(OFF, 7, REG, VAL); \
118 WRITE_WB_REG_CASE(OFF, 8, REG, VAL); \
119 WRITE_WB_REG_CASE(OFF, 9, REG, VAL); \
120 WRITE_WB_REG_CASE(OFF, 10, REG, VAL); \
121 WRITE_WB_REG_CASE(OFF, 11, REG, VAL); \
122 WRITE_WB_REG_CASE(OFF, 12, REG, VAL); \
123 WRITE_WB_REG_CASE(OFF, 13, REG, VAL); \
124 WRITE_WB_REG_CASE(OFF, 14, REG, VAL); \
125 WRITE_WB_REG_CASE(OFF, 15, REG, VAL)
126
127 static u64 read_wb_reg(int reg, int n)
128 {
129 u64 val = 0;
130
131 switch (reg + n) {
132 GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_BVR, AARCH64_DBG_REG_NAME_BVR, val);
133 GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_BCR, AARCH64_DBG_REG_NAME_BCR, val);
134 GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_WVR, AARCH64_DBG_REG_NAME_WVR, val);
135 GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_WCR, AARCH64_DBG_REG_NAME_WCR, val);
136 default:
137 pr_warning("attempt to read from unknown breakpoint register %d\n", n);
138 }
139
140 return val;
141 }
142
143 static void write_wb_reg(int reg, int n, u64 val)
144 {
145 switch (reg + n) {
146 GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_BVR, AARCH64_DBG_REG_NAME_BVR, val);
147 GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_BCR, AARCH64_DBG_REG_NAME_BCR, val);
148 GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_WVR, AARCH64_DBG_REG_NAME_WVR, val);
149 GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_WCR, AARCH64_DBG_REG_NAME_WCR, val);
150 default:
151 pr_warning("attempt to write to unknown breakpoint register %d\n", n);
152 }
153 isb();
154 }
155
156 /*
157 * Convert a breakpoint privilege level to the corresponding exception
158 * level.
159 */
160 static enum debug_el debug_exception_level(int privilege)
161 {
162 switch (privilege) {
163 case AARCH64_BREAKPOINT_EL0:
164 return DBG_ACTIVE_EL0;
165 case AARCH64_BREAKPOINT_EL1:
166 return DBG_ACTIVE_EL1;
167 default:
168 pr_warning("invalid breakpoint privilege level %d\n", privilege);
169 return -EINVAL;
170 }
171 }
172
173 enum hw_breakpoint_ops {
174 HW_BREAKPOINT_INSTALL,
175 HW_BREAKPOINT_UNINSTALL,
176 HW_BREAKPOINT_RESTORE
177 };
178
179 /**
180 * hw_breakpoint_slot_setup - Find and setup a perf slot according to
181 * operations
182 *
183 * @slots: pointer to array of slots
184 * @max_slots: max number of slots
185 * @bp: perf_event to setup
186 * @ops: operation to be carried out on the slot
187 *
188 * Return:
189 * slot index on success
190 * -ENOSPC if no slot is available/matches
191 * -EINVAL on wrong operations parameter
192 */
193 static int hw_breakpoint_slot_setup(struct perf_event **slots, int max_slots,
194 struct perf_event *bp,
195 enum hw_breakpoint_ops ops)
196 {
197 int i;
198 struct perf_event **slot;
199
200 for (i = 0; i < max_slots; ++i) {
201 slot = &slots[i];
202 switch (ops) {
203 case HW_BREAKPOINT_INSTALL:
204 if (!*slot) {
205 *slot = bp;
206 return i;
207 }
208 break;
209 case HW_BREAKPOINT_UNINSTALL:
210 if (*slot == bp) {
211 *slot = NULL;
212 return i;
213 }
214 break;
215 case HW_BREAKPOINT_RESTORE:
216 if (*slot == bp)
217 return i;
218 break;
219 default:
220 pr_warn_once("Unhandled hw breakpoint ops %d\n", ops);
221 return -EINVAL;
222 }
223 }
224 return -ENOSPC;
225 }
226
227 static int hw_breakpoint_control(struct perf_event *bp,
228 enum hw_breakpoint_ops ops)
229 {
230 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
231 struct perf_event **slots;
232 struct debug_info *debug_info = &current->thread.debug;
233 int i, max_slots, ctrl_reg, val_reg, reg_enable;
234 enum debug_el dbg_el = debug_exception_level(info->ctrl.privilege);
235 u32 ctrl;
236
237 if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
238 /* Breakpoint */
239 ctrl_reg = AARCH64_DBG_REG_BCR;
240 val_reg = AARCH64_DBG_REG_BVR;
241 slots = this_cpu_ptr(bp_on_reg);
242 max_slots = core_num_brps;
243 reg_enable = !debug_info->bps_disabled;
244 } else {
245 /* Watchpoint */
246 ctrl_reg = AARCH64_DBG_REG_WCR;
247 val_reg = AARCH64_DBG_REG_WVR;
248 slots = this_cpu_ptr(wp_on_reg);
249 max_slots = core_num_wrps;
250 reg_enable = !debug_info->wps_disabled;
251 }
252
253 i = hw_breakpoint_slot_setup(slots, max_slots, bp, ops);
254
255 if (WARN_ONCE(i < 0, "Can't find any breakpoint slot"))
256 return i;
257
258 switch (ops) {
259 case HW_BREAKPOINT_INSTALL:
260 /*
261 * Ensure debug monitors are enabled at the correct exception
262 * level.
263 */
264 enable_debug_monitors(dbg_el);
265 /* Fall through */
266 case HW_BREAKPOINT_RESTORE:
267 /* Setup the address register. */
268 write_wb_reg(val_reg, i, info->address);
269
270 /* Setup the control register. */
271 ctrl = encode_ctrl_reg(info->ctrl);
272 write_wb_reg(ctrl_reg, i,
273 reg_enable ? ctrl | 0x1 : ctrl & ~0x1);
274 break;
275 case HW_BREAKPOINT_UNINSTALL:
276 /* Reset the control register. */
277 write_wb_reg(ctrl_reg, i, 0);
278
279 /*
280 * Release the debug monitors for the correct exception
281 * level.
282 */
283 disable_debug_monitors(dbg_el);
284 break;
285 }
286
287 return 0;
288 }
289
290 /*
291 * Install a perf counter breakpoint.
292 */
293 int arch_install_hw_breakpoint(struct perf_event *bp)
294 {
295 return hw_breakpoint_control(bp, HW_BREAKPOINT_INSTALL);
296 }
297
298 void arch_uninstall_hw_breakpoint(struct perf_event *bp)
299 {
300 hw_breakpoint_control(bp, HW_BREAKPOINT_UNINSTALL);
301 }
302
303 static int get_hbp_len(u8 hbp_len)
304 {
305 unsigned int len_in_bytes = 0;
306
307 switch (hbp_len) {
308 case ARM_BREAKPOINT_LEN_1:
309 len_in_bytes = 1;
310 break;
311 case ARM_BREAKPOINT_LEN_2:
312 len_in_bytes = 2;
313 break;
314 case ARM_BREAKPOINT_LEN_4:
315 len_in_bytes = 4;
316 break;
317 case ARM_BREAKPOINT_LEN_8:
318 len_in_bytes = 8;
319 break;
320 }
321
322 return len_in_bytes;
323 }
324
325 /*
326 * Check whether bp virtual address is in kernel space.
327 */
328 int arch_check_bp_in_kernelspace(struct perf_event *bp)
329 {
330 unsigned int len;
331 unsigned long va;
332 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
333
334 va = info->address;
335 len = get_hbp_len(info->ctrl.len);
336
337 return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
338 }
339
340 /*
341 * Extract generic type and length encodings from an arch_hw_breakpoint_ctrl.
342 * Hopefully this will disappear when ptrace can bypass the conversion
343 * to generic breakpoint descriptions.
344 */
345 int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
346 int *gen_len, int *gen_type)
347 {
348 /* Type */
349 switch (ctrl.type) {
350 case ARM_BREAKPOINT_EXECUTE:
351 *gen_type = HW_BREAKPOINT_X;
352 break;
353 case ARM_BREAKPOINT_LOAD:
354 *gen_type = HW_BREAKPOINT_R;
355 break;
356 case ARM_BREAKPOINT_STORE:
357 *gen_type = HW_BREAKPOINT_W;
358 break;
359 case ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE:
360 *gen_type = HW_BREAKPOINT_RW;
361 break;
362 default:
363 return -EINVAL;
364 }
365
366 /* Len */
367 switch (ctrl.len) {
368 case ARM_BREAKPOINT_LEN_1:
369 *gen_len = HW_BREAKPOINT_LEN_1;
370 break;
371 case ARM_BREAKPOINT_LEN_2:
372 *gen_len = HW_BREAKPOINT_LEN_2;
373 break;
374 case ARM_BREAKPOINT_LEN_4:
375 *gen_len = HW_BREAKPOINT_LEN_4;
376 break;
377 case ARM_BREAKPOINT_LEN_8:
378 *gen_len = HW_BREAKPOINT_LEN_8;
379 break;
380 default:
381 return -EINVAL;
382 }
383
384 return 0;
385 }
386
387 /*
388 * Construct an arch_hw_breakpoint from a perf_event.
389 */
390 static int arch_build_bp_info(struct perf_event *bp)
391 {
392 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
393
394 /* Type */
395 switch (bp->attr.bp_type) {
396 case HW_BREAKPOINT_X:
397 info->ctrl.type = ARM_BREAKPOINT_EXECUTE;
398 break;
399 case HW_BREAKPOINT_R:
400 info->ctrl.type = ARM_BREAKPOINT_LOAD;
401 break;
402 case HW_BREAKPOINT_W:
403 info->ctrl.type = ARM_BREAKPOINT_STORE;
404 break;
405 case HW_BREAKPOINT_RW:
406 info->ctrl.type = ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE;
407 break;
408 default:
409 return -EINVAL;
410 }
411
412 /* Len */
413 switch (bp->attr.bp_len) {
414 case HW_BREAKPOINT_LEN_1:
415 info->ctrl.len = ARM_BREAKPOINT_LEN_1;
416 break;
417 case HW_BREAKPOINT_LEN_2:
418 info->ctrl.len = ARM_BREAKPOINT_LEN_2;
419 break;
420 case HW_BREAKPOINT_LEN_4:
421 info->ctrl.len = ARM_BREAKPOINT_LEN_4;
422 break;
423 case HW_BREAKPOINT_LEN_8:
424 info->ctrl.len = ARM_BREAKPOINT_LEN_8;
425 break;
426 default:
427 return -EINVAL;
428 }
429
430 /*
431 * On AArch64, we only permit breakpoints of length 4, whereas
432 * AArch32 also requires breakpoints of length 2 for Thumb.
433 * Watchpoints can be of length 1, 2, 4 or 8 bytes.
434 */
435 if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
436 if (is_compat_task()) {
437 if (info->ctrl.len != ARM_BREAKPOINT_LEN_2 &&
438 info->ctrl.len != ARM_BREAKPOINT_LEN_4)
439 return -EINVAL;
440 } else if (info->ctrl.len != ARM_BREAKPOINT_LEN_4) {
441 /*
442 * FIXME: Some tools (I'm looking at you perf) assume
443 * that breakpoints should be sizeof(long). This
444 * is nonsense. For now, we fix up the parameter
445 * but we should probably return -EINVAL instead.
446 */
447 info->ctrl.len = ARM_BREAKPOINT_LEN_4;
448 }
449 }
450
451 /* Address */
452 info->address = bp->attr.bp_addr;
453
454 /*
455 * Privilege
456 * Note that we disallow combined EL0/EL1 breakpoints because
457 * that would complicate the stepping code.
458 */
459 if (arch_check_bp_in_kernelspace(bp))
460 info->ctrl.privilege = AARCH64_BREAKPOINT_EL1;
461 else
462 info->ctrl.privilege = AARCH64_BREAKPOINT_EL0;
463
464 /* Enabled? */
465 info->ctrl.enabled = !bp->attr.disabled;
466
467 return 0;
468 }
469
470 /*
471 * Validate the arch-specific HW Breakpoint register settings.
472 */
473 int arch_validate_hwbkpt_settings(struct perf_event *bp)
474 {
475 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
476 int ret;
477 u64 alignment_mask, offset;
478
479 /* Build the arch_hw_breakpoint. */
480 ret = arch_build_bp_info(bp);
481 if (ret)
482 return ret;
483
484 /*
485 * Check address alignment.
486 * We don't do any clever alignment correction for watchpoints
487 * because using 64-bit unaligned addresses is deprecated for
488 * AArch64.
489 *
490 * AArch32 tasks expect some simple alignment fixups, so emulate
491 * that here.
492 */
493 if (is_compat_task()) {
494 if (info->ctrl.len == ARM_BREAKPOINT_LEN_8)
495 alignment_mask = 0x7;
496 else
497 alignment_mask = 0x3;
498 offset = info->address & alignment_mask;
499 switch (offset) {
500 case 0:
501 /* Aligned */
502 break;
503 case 1:
504 /* Allow single byte watchpoint. */
505 if (info->ctrl.len == ARM_BREAKPOINT_LEN_1)
506 break;
507 case 2:
508 /* Allow halfword watchpoints and breakpoints. */
509 if (info->ctrl.len == ARM_BREAKPOINT_LEN_2)
510 break;
511 default:
512 return -EINVAL;
513 }
514
515 info->address &= ~alignment_mask;
516 info->ctrl.len <<= offset;
517 } else {
518 if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE)
519 alignment_mask = 0x3;
520 else
521 alignment_mask = 0x7;
522 if (info->address & alignment_mask)
523 return -EINVAL;
524 }
525
526 /*
527 * Disallow per-task kernel breakpoints since these would
528 * complicate the stepping code.
529 */
530 if (info->ctrl.privilege == AARCH64_BREAKPOINT_EL1 && bp->hw.bp_target)
531 return -EINVAL;
532
533 return 0;
534 }
535
536 /*
537 * Enable/disable all of the breakpoints active at the specified
538 * exception level at the register level.
539 * This is used when single-stepping after a breakpoint exception.
540 */
541 static void toggle_bp_registers(int reg, enum debug_el el, int enable)
542 {
543 int i, max_slots, privilege;
544 u32 ctrl;
545 struct perf_event **slots;
546
547 switch (reg) {
548 case AARCH64_DBG_REG_BCR:
549 slots = this_cpu_ptr(bp_on_reg);
550 max_slots = core_num_brps;
551 break;
552 case AARCH64_DBG_REG_WCR:
553 slots = this_cpu_ptr(wp_on_reg);
554 max_slots = core_num_wrps;
555 break;
556 default:
557 return;
558 }
559
560 for (i = 0; i < max_slots; ++i) {
561 if (!slots[i])
562 continue;
563
564 privilege = counter_arch_bp(slots[i])->ctrl.privilege;
565 if (debug_exception_level(privilege) != el)
566 continue;
567
568 ctrl = read_wb_reg(reg, i);
569 if (enable)
570 ctrl |= 0x1;
571 else
572 ctrl &= ~0x1;
573 write_wb_reg(reg, i, ctrl);
574 }
575 }
576
577 /*
578 * Debug exception handlers.
579 */
580 static int breakpoint_handler(unsigned long unused, unsigned int esr,
581 struct pt_regs *regs)
582 {
583 int i, step = 0, *kernel_step;
584 u32 ctrl_reg;
585 u64 addr, val;
586 struct perf_event *bp, **slots;
587 struct debug_info *debug_info;
588 struct arch_hw_breakpoint_ctrl ctrl;
589
590 slots = this_cpu_ptr(bp_on_reg);
591 addr = instruction_pointer(regs);
592 debug_info = &current->thread.debug;
593
594 for (i = 0; i < core_num_brps; ++i) {
595 rcu_read_lock();
596
597 bp = slots[i];
598
599 if (bp == NULL)
600 goto unlock;
601
602 /* Check if the breakpoint value matches. */
603 val = read_wb_reg(AARCH64_DBG_REG_BVR, i);
604 if (val != (addr & ~0x3))
605 goto unlock;
606
607 /* Possible match, check the byte address select to confirm. */
608 ctrl_reg = read_wb_reg(AARCH64_DBG_REG_BCR, i);
609 decode_ctrl_reg(ctrl_reg, &ctrl);
610 if (!((1 << (addr & 0x3)) & ctrl.len))
611 goto unlock;
612
613 counter_arch_bp(bp)->trigger = addr;
614 perf_bp_event(bp, regs);
615
616 /* Do we need to handle the stepping? */
617 if (!bp->overflow_handler)
618 step = 1;
619 unlock:
620 rcu_read_unlock();
621 }
622
623 if (!step)
624 return 0;
625
626 if (user_mode(regs)) {
627 debug_info->bps_disabled = 1;
628 toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL0, 0);
629
630 /* If we're already stepping a watchpoint, just return. */
631 if (debug_info->wps_disabled)
632 return 0;
633
634 if (test_thread_flag(TIF_SINGLESTEP))
635 debug_info->suspended_step = 1;
636 else
637 user_enable_single_step(current);
638 } else {
639 toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL1, 0);
640 kernel_step = this_cpu_ptr(&stepping_kernel_bp);
641
642 if (*kernel_step != ARM_KERNEL_STEP_NONE)
643 return 0;
644
645 if (kernel_active_single_step()) {
646 *kernel_step = ARM_KERNEL_STEP_SUSPEND;
647 } else {
648 *kernel_step = ARM_KERNEL_STEP_ACTIVE;
649 kernel_enable_single_step(regs);
650 }
651 }
652
653 return 0;
654 }
655
656 static int watchpoint_handler(unsigned long addr, unsigned int esr,
657 struct pt_regs *regs)
658 {
659 int i, step = 0, *kernel_step, access;
660 u32 ctrl_reg;
661 u64 val, alignment_mask;
662 struct perf_event *wp, **slots;
663 struct debug_info *debug_info;
664 struct arch_hw_breakpoint *info;
665 struct arch_hw_breakpoint_ctrl ctrl;
666
667 slots = this_cpu_ptr(wp_on_reg);
668 debug_info = &current->thread.debug;
669
670 for (i = 0; i < core_num_wrps; ++i) {
671 rcu_read_lock();
672
673 wp = slots[i];
674
675 if (wp == NULL)
676 goto unlock;
677
678 info = counter_arch_bp(wp);
679 /* AArch32 watchpoints are either 4 or 8 bytes aligned. */
680 if (is_compat_task()) {
681 if (info->ctrl.len == ARM_BREAKPOINT_LEN_8)
682 alignment_mask = 0x7;
683 else
684 alignment_mask = 0x3;
685 } else {
686 alignment_mask = 0x7;
687 }
688
689 /* Check if the watchpoint value matches. */
690 val = read_wb_reg(AARCH64_DBG_REG_WVR, i);
691 if (val != (addr & ~alignment_mask))
692 goto unlock;
693
694 /* Possible match, check the byte address select to confirm. */
695 ctrl_reg = read_wb_reg(AARCH64_DBG_REG_WCR, i);
696 decode_ctrl_reg(ctrl_reg, &ctrl);
697 if (!((1 << (addr & alignment_mask)) & ctrl.len))
698 goto unlock;
699
700 /*
701 * Check that the access type matches.
702 * 0 => load, otherwise => store
703 */
704 access = (esr & AARCH64_ESR_ACCESS_MASK) ? HW_BREAKPOINT_W :
705 HW_BREAKPOINT_R;
706 if (!(access & hw_breakpoint_type(wp)))
707 goto unlock;
708
709 info->trigger = addr;
710 perf_bp_event(wp, regs);
711
712 /* Do we need to handle the stepping? */
713 if (!wp->overflow_handler)
714 step = 1;
715
716 unlock:
717 rcu_read_unlock();
718 }
719
720 if (!step)
721 return 0;
722
723 /*
724 * We always disable EL0 watchpoints because the kernel can
725 * cause these to fire via an unprivileged access.
726 */
727 toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL0, 0);
728
729 if (user_mode(regs)) {
730 debug_info->wps_disabled = 1;
731
732 /* If we're already stepping a breakpoint, just return. */
733 if (debug_info->bps_disabled)
734 return 0;
735
736 if (test_thread_flag(TIF_SINGLESTEP))
737 debug_info->suspended_step = 1;
738 else
739 user_enable_single_step(current);
740 } else {
741 toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL1, 0);
742 kernel_step = this_cpu_ptr(&stepping_kernel_bp);
743
744 if (*kernel_step != ARM_KERNEL_STEP_NONE)
745 return 0;
746
747 if (kernel_active_single_step()) {
748 *kernel_step = ARM_KERNEL_STEP_SUSPEND;
749 } else {
750 *kernel_step = ARM_KERNEL_STEP_ACTIVE;
751 kernel_enable_single_step(regs);
752 }
753 }
754
755 return 0;
756 }
757
758 /*
759 * Handle single-step exception.
760 */
761 int reinstall_suspended_bps(struct pt_regs *regs)
762 {
763 struct debug_info *debug_info = &current->thread.debug;
764 int handled_exception = 0, *kernel_step;
765
766 kernel_step = this_cpu_ptr(&stepping_kernel_bp);
767
768 /*
769 * Called from single-step exception handler.
770 * Return 0 if execution can resume, 1 if a SIGTRAP should be
771 * reported.
772 */
773 if (user_mode(regs)) {
774 if (debug_info->bps_disabled) {
775 debug_info->bps_disabled = 0;
776 toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL0, 1);
777 handled_exception = 1;
778 }
779
780 if (debug_info->wps_disabled) {
781 debug_info->wps_disabled = 0;
782 toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL0, 1);
783 handled_exception = 1;
784 }
785
786 if (handled_exception) {
787 if (debug_info->suspended_step) {
788 debug_info->suspended_step = 0;
789 /* Allow exception handling to fall-through. */
790 handled_exception = 0;
791 } else {
792 user_disable_single_step(current);
793 }
794 }
795 } else if (*kernel_step != ARM_KERNEL_STEP_NONE) {
796 toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL1, 1);
797 toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL1, 1);
798
799 if (!debug_info->wps_disabled)
800 toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL0, 1);
801
802 if (*kernel_step != ARM_KERNEL_STEP_SUSPEND) {
803 kernel_disable_single_step();
804 handled_exception = 1;
805 } else {
806 handled_exception = 0;
807 }
808
809 *kernel_step = ARM_KERNEL_STEP_NONE;
810 }
811
812 return !handled_exception;
813 }
814
815 /*
816 * Context-switcher for restoring suspended breakpoints.
817 */
818 void hw_breakpoint_thread_switch(struct task_struct *next)
819 {
820 /*
821 * current next
822 * disabled: 0 0 => The usual case, NOTIFY_DONE
823 * 0 1 => Disable the registers
824 * 1 0 => Enable the registers
825 * 1 1 => NOTIFY_DONE. per-task bps will
826 * get taken care of by perf.
827 */
828
829 struct debug_info *current_debug_info, *next_debug_info;
830
831 current_debug_info = &current->thread.debug;
832 next_debug_info = &next->thread.debug;
833
834 /* Update breakpoints. */
835 if (current_debug_info->bps_disabled != next_debug_info->bps_disabled)
836 toggle_bp_registers(AARCH64_DBG_REG_BCR,
837 DBG_ACTIVE_EL0,
838 !next_debug_info->bps_disabled);
839
840 /* Update watchpoints. */
841 if (current_debug_info->wps_disabled != next_debug_info->wps_disabled)
842 toggle_bp_registers(AARCH64_DBG_REG_WCR,
843 DBG_ACTIVE_EL0,
844 !next_debug_info->wps_disabled);
845 }
846
847 /*
848 * CPU initialisation.
849 */
850 static void hw_breakpoint_reset(void *unused)
851 {
852 int i;
853 struct perf_event **slots;
854 /*
855 * When a CPU goes through cold-boot, it does not have any installed
856 * slot, so it is safe to share the same function for restoring and
857 * resetting breakpoints; when a CPU is hotplugged in, it goes
858 * through the slots, which are all empty, hence it just resets control
859 * and value for debug registers.
860 * When this function is triggered on warm-boot through a CPU PM
861 * notifier some slots might be initialized; if so they are
862 * reprogrammed according to the debug slots content.
863 */
864 for (slots = this_cpu_ptr(bp_on_reg), i = 0; i < core_num_brps; ++i) {
865 if (slots[i]) {
866 hw_breakpoint_control(slots[i], HW_BREAKPOINT_RESTORE);
867 } else {
868 write_wb_reg(AARCH64_DBG_REG_BCR, i, 0UL);
869 write_wb_reg(AARCH64_DBG_REG_BVR, i, 0UL);
870 }
871 }
872
873 for (slots = this_cpu_ptr(wp_on_reg), i = 0; i < core_num_wrps; ++i) {
874 if (slots[i]) {
875 hw_breakpoint_control(slots[i], HW_BREAKPOINT_RESTORE);
876 } else {
877 write_wb_reg(AARCH64_DBG_REG_WCR, i, 0UL);
878 write_wb_reg(AARCH64_DBG_REG_WVR, i, 0UL);
879 }
880 }
881 }
882
883 static int hw_breakpoint_reset_notify(struct notifier_block *self,
884 unsigned long action,
885 void *hcpu)
886 {
887 int cpu = (long)hcpu;
888 if (action == CPU_ONLINE)
889 smp_call_function_single(cpu, hw_breakpoint_reset, NULL, 1);
890 return NOTIFY_OK;
891 }
892
893 static struct notifier_block hw_breakpoint_reset_nb = {
894 .notifier_call = hw_breakpoint_reset_notify,
895 };
896
897 #ifdef CONFIG_ARM64_CPU_SUSPEND
898 extern void cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *));
899 #else
900 static inline void cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *))
901 {
902 }
903 #endif
904
905 /*
906 * One-time initialisation.
907 */
908 static int __init arch_hw_breakpoint_init(void)
909 {
910 core_num_brps = get_num_brps();
911 core_num_wrps = get_num_wrps();
912
913 pr_info("found %d breakpoint and %d watchpoint registers.\n",
914 core_num_brps, core_num_wrps);
915
916 cpu_notifier_register_begin();
917
918 /*
919 * Reset the breakpoint resources. We assume that a halting
920 * debugger will leave the world in a nice state for us.
921 */
922 smp_call_function(hw_breakpoint_reset, NULL, 1);
923 hw_breakpoint_reset(NULL);
924
925 /* Register debug fault handlers. */
926 hook_debug_fault_code(DBG_ESR_EVT_HWBP, breakpoint_handler, SIGTRAP,
927 TRAP_HWBKPT, "hw-breakpoint handler");
928 hook_debug_fault_code(DBG_ESR_EVT_HWWP, watchpoint_handler, SIGTRAP,
929 TRAP_HWBKPT, "hw-watchpoint handler");
930
931 /* Register hotplug notifier. */
932 __register_cpu_notifier(&hw_breakpoint_reset_nb);
933
934 cpu_notifier_register_done();
935
936 /* Register cpu_suspend hw breakpoint restore hook */
937 cpu_suspend_set_dbg_restorer(hw_breakpoint_reset);
938
939 return 0;
940 }
941 arch_initcall(arch_hw_breakpoint_init);
942
943 void hw_breakpoint_pmu_read(struct perf_event *bp)
944 {
945 }
946
947 /*
948 * Dummy function to register with die_notifier.
949 */
950 int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
951 unsigned long val, void *data)
952 {
953 return NOTIFY_DONE;
954 }
This page took 0.048904 seconds and 4 git commands to generate.