1 /* Copyright (C) 2009-2018 Free Software Foundation, Inc.
2 Contributed by ARM Ltd.
4 This file is part of GDB.
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19 #include "common-defs.h"
20 #include "break-common.h"
21 #include "common-regcache.h"
22 #include "nat/linux-nat.h"
23 #include "aarch64-linux-hw-point.h"
26 #include <asm/ptrace.h>
27 #include <sys/ptrace.h>
30 /* Number of hardware breakpoints/watchpoints the target supports.
31 They are initialized with values obtained via the ptrace calls
32 with NT_ARM_HW_BREAK and NT_ARM_HW_WATCH respectively. */
34 int aarch64_num_bp_regs
;
35 int aarch64_num_wp_regs
;
37 /* True if this kernel does not have the bug described by PR
38 external/20207 (Linux >= 4.10). A fixed kernel supports any
39 contiguous range of bits in 8-bit byte DR_CONTROL_MASK. A buggy
40 kernel supports only 0x01, 0x03, 0x0f and 0xff. We start by
41 assuming the bug is fixed, and then detect the bug at
42 PTRACE_SETREGSET time. */
43 static bool kernel_supports_any_contiguous_range
= true;
45 /* Return starting byte 0..7 incl. of a watchpoint encoded by CTRL. */
48 aarch64_watchpoint_offset (unsigned int ctrl
)
50 uint8_t mask
= DR_CONTROL_MASK (ctrl
);
53 /* Shift out bottom zeros. */
54 for (retval
= 0; mask
&& (mask
& 1) == 0; ++retval
)
60 /* Utility function that returns the length in bytes of a watchpoint
61 according to the content of a hardware debug control register CTRL.
62 Any contiguous range of bytes in CTRL is supported. The returned
63 value can be between 0..8 (inclusive). */
66 aarch64_watchpoint_length (unsigned int ctrl
)
68 uint8_t mask
= DR_CONTROL_MASK (ctrl
);
71 /* Shift out bottom zeros. */
72 mask
>>= aarch64_watchpoint_offset (ctrl
);
74 /* Count bottom ones. */
75 for (retval
= 0; (mask
& 1) != 0; ++retval
)
79 error (_("Unexpected hardware watchpoint length register value 0x%x"),
80 DR_CONTROL_MASK (ctrl
));
85 /* Given the hardware breakpoint or watchpoint type TYPE and its
86 length LEN, return the expected encoding for a hardware
87 breakpoint/watchpoint control register. */
90 aarch64_point_encode_ctrl_reg (enum target_hw_bp_type type
, int offset
, int len
)
92 unsigned int ctrl
, ttype
;
94 gdb_assert (offset
== 0 || kernel_supports_any_contiguous_range
);
95 gdb_assert (offset
+ len
<= AARCH64_HWP_MAX_LEN_PER_REG
);
113 perror_with_name (_("Unrecognized breakpoint/watchpoint type"));
118 /* offset and length bitmask */
119 ctrl
|= ((1 << len
) - 1) << (5 + offset
);
121 ctrl
|= (2 << 1) | 1;
126 /* Addresses to be written to the hardware breakpoint and watchpoint
127 value registers need to be aligned; the alignment is 4-byte and
128 8-type respectively. Linux kernel rejects any non-aligned address
129 it receives from the related ptrace call. Furthermore, the kernel
130 currently only supports the following Byte Address Select (BAS)
131 values: 0x1, 0x3, 0xf and 0xff, which means that for a hardware
132 watchpoint to be accepted by the kernel (via ptrace call), its
133 valid length can only be 1 byte, 2 bytes, 4 bytes or 8 bytes.
134 Despite these limitations, the unaligned watchpoint is supported in
137 Return 0 for any non-compliant ADDR and/or LEN; return 1 otherwise. */
140 aarch64_point_is_aligned (int is_watchpoint
, CORE_ADDR addr
, int len
)
142 unsigned int alignment
= 0;
145 alignment
= AARCH64_HWP_ALIGNMENT
;
148 struct regcache
*regcache
149 = get_thread_regcache_for_ptid (current_lwp_ptid ());
151 /* Set alignment to 2 only if the current process is 32-bit,
152 since thumb instruction can be 2-byte aligned. Otherwise, set
153 alignment to AARCH64_HBP_ALIGNMENT. */
154 if (regcache_register_size (regcache
, 0) == 8)
155 alignment
= AARCH64_HBP_ALIGNMENT
;
160 if (addr
& (alignment
- 1))
163 if ((!kernel_supports_any_contiguous_range
164 && len
!= 8 && len
!= 4 && len
!= 2 && len
!= 1)
165 || (kernel_supports_any_contiguous_range
166 && (len
< 1 || len
> 8)))
172 /* Given the (potentially unaligned) watchpoint address in ADDR and
173 length in LEN, return the aligned address, offset from that base
174 address, and aligned length in *ALIGNED_ADDR_P, *ALIGNED_OFFSET_P
175 and *ALIGNED_LEN_P, respectively. The returned values will be
176 valid values to write to the hardware watchpoint value and control
179 The given watchpoint may get truncated if more than one hardware
180 register is needed to cover the watched region. *NEXT_ADDR_P
181 and *NEXT_LEN_P, if non-NULL, will return the address and length
182 of the remaining part of the watchpoint (which can be processed
183 by calling this routine again to generate another aligned address,
184 offset and length tuple.
186 Essentially, unaligned watchpoint is achieved by minimally
187 enlarging the watched area to meet the alignment requirement, and
188 if necessary, splitting the watchpoint over several hardware
189 watchpoint registers.
191 On kernels that predate the support for Byte Address Select (BAS)
192 in the hardware watchpoint control register, the offset from the
193 base address is always zero, and so in that case the trade-off is
194 that there will be false-positive hits for the read-type or the
195 access-type hardware watchpoints; for the write type, which is more
196 commonly used, there will be no such issues, as the higher-level
197 breakpoint management in gdb always examines the exact watched
198 region for any content change, and transparently resumes a thread
199 from a watchpoint trap if there is no change to the watched region.
201 Another limitation is that because the watched region is enlarged,
202 the watchpoint fault address discovered by
203 aarch64_stopped_data_address may be outside of the original watched
204 region, especially when the triggering instruction is accessing a
205 larger region. When the fault address is not within any known
206 range, watchpoints_triggered in gdb will get confused, as the
207 higher-level watchpoint management is only aware of original
208 watched regions, and will think that some unknown watchpoint has
209 been triggered. To prevent such a case,
210 aarch64_stopped_data_address implementations in gdb and gdbserver
211 try to match the trapped address with a watched region, and return
212 an address within the latter. */
215 aarch64_align_watchpoint (CORE_ADDR addr
, int len
, CORE_ADDR
*aligned_addr_p
,
216 int *aligned_offset_p
, int *aligned_len_p
,
217 CORE_ADDR
*next_addr_p
, int *next_len_p
,
218 CORE_ADDR
*next_addr_orig_p
)
221 unsigned int offset
, aligned_offset
;
222 CORE_ADDR aligned_addr
;
223 const unsigned int alignment
= AARCH64_HWP_ALIGNMENT
;
224 const unsigned int max_wp_len
= AARCH64_HWP_MAX_LEN_PER_REG
;
226 /* As assumed by the algorithm. */
227 gdb_assert (alignment
== max_wp_len
);
232 /* The address put into the hardware watchpoint value register must
234 offset
= addr
& (alignment
- 1);
235 aligned_addr
= addr
- offset
;
237 = kernel_supports_any_contiguous_range
? addr
& (alignment
- 1) : 0;
239 gdb_assert (offset
>= 0 && offset
< alignment
);
240 gdb_assert (aligned_addr
>= 0 && aligned_addr
<= addr
);
241 gdb_assert (offset
+ len
> 0);
243 if (offset
+ len
>= max_wp_len
)
245 /* Need more than one watchpoint register; truncate at the
246 alignment boundary. */
248 = max_wp_len
- (kernel_supports_any_contiguous_range
? offset
: 0);
249 len
-= (max_wp_len
- offset
);
250 addr
+= (max_wp_len
- offset
);
251 gdb_assert ((addr
& (alignment
- 1)) == 0);
255 /* Find the smallest valid length that is large enough to
256 accommodate this watchpoint. */
257 static const unsigned char
258 aligned_len_array
[AARCH64_HWP_MAX_LEN_PER_REG
] =
259 { 1, 2, 4, 4, 8, 8, 8, 8 };
261 aligned_len
= (kernel_supports_any_contiguous_range
262 ? len
: aligned_len_array
[offset
+ len
- 1]);
268 *aligned_addr_p
= aligned_addr
;
269 if (aligned_offset_p
)
270 *aligned_offset_p
= aligned_offset
;
272 *aligned_len_p
= aligned_len
;
277 if (next_addr_orig_p
)
278 *next_addr_orig_p
= align_down (*next_addr_orig_p
+ alignment
, alignment
);
281 struct aarch64_dr_update_callback_param
287 /* Callback for iterate_over_lwps. Records the
288 information about the change of one hardware breakpoint/watchpoint
289 setting for the thread LWP.
290 The information is passed in via PTR.
291 N.B. The actual updating of hardware debug registers is not
292 carried out until the moment the thread is resumed. */
295 debug_reg_change_callback (struct lwp_info
*lwp
, void *ptr
)
297 struct aarch64_dr_update_callback_param
*param_p
298 = (struct aarch64_dr_update_callback_param
*) ptr
;
299 int tid
= ptid_get_lwp (ptid_of_lwp (lwp
));
300 int idx
= param_p
->idx
;
301 int is_watchpoint
= param_p
->is_watchpoint
;
302 struct arch_lwp_info
*info
= lwp_arch_private_info (lwp
);
303 dr_changed_t
*dr_changed_ptr
;
304 dr_changed_t dr_changed
;
308 info
= XCNEW (struct arch_lwp_info
);
309 lwp_set_arch_private_info (lwp
, info
);
314 debug_printf ("debug_reg_change_callback: \n\tOn entry:\n");
315 debug_printf ("\ttid%d, dr_changed_bp=0x%s, "
316 "dr_changed_wp=0x%s\n", tid
,
317 phex (info
->dr_changed_bp
, 8),
318 phex (info
->dr_changed_wp
, 8));
321 dr_changed_ptr
= is_watchpoint
? &info
->dr_changed_wp
322 : &info
->dr_changed_bp
;
323 dr_changed
= *dr_changed_ptr
;
326 && (idx
<= (is_watchpoint
? aarch64_num_wp_regs
327 : aarch64_num_bp_regs
)));
329 /* The actual update is done later just before resuming the lwp,
330 we just mark that one register pair needs updating. */
331 DR_MARK_N_CHANGED (dr_changed
, idx
);
332 *dr_changed_ptr
= dr_changed
;
334 /* If the lwp isn't stopped, force it to momentarily pause, so
335 we can update its debug registers. */
336 if (!lwp_is_stopped (lwp
))
337 linux_stop_lwp (lwp
);
341 debug_printf ("\tOn exit:\n\ttid%d, dr_changed_bp=0x%s, "
342 "dr_changed_wp=0x%s\n", tid
,
343 phex (info
->dr_changed_bp
, 8),
344 phex (info
->dr_changed_wp
, 8));
350 /* Notify each thread that their IDXth breakpoint/watchpoint register
351 pair needs to be updated. The message will be recorded in each
352 thread's arch-specific data area, the actual updating will be done
353 when the thread is resumed. */
356 aarch64_notify_debug_reg_change (const struct aarch64_debug_reg_state
*state
,
357 int is_watchpoint
, unsigned int idx
)
359 struct aarch64_dr_update_callback_param param
;
360 ptid_t pid_ptid
= ptid_t (ptid_get_pid (current_lwp_ptid ()));
362 param
.is_watchpoint
= is_watchpoint
;
365 iterate_over_lwps (pid_ptid
, debug_reg_change_callback
, (void *) ¶m
);
368 /* Reconfigure STATE to be compatible with Linux kernels with the PR
369 external/20207 bug. This is called when
370 KERNEL_SUPPORTS_ANY_CONTIGUOUS_RANGE transitions to false. Note we
371 don't try to support combining watchpoints with matching (and thus
372 shared) masks, as it's too late when we get here. On buggy
373 kernels, GDB will try to first setup the perfect matching ranges,
374 which will run out of registers before this function can merge
375 them. It doesn't look like worth the effort to improve that, given
376 eventually buggy kernels will be phased out. */
379 aarch64_downgrade_regs (struct aarch64_debug_reg_state
*state
)
381 for (int i
= 0; i
< aarch64_num_wp_regs
; ++i
)
382 if ((state
->dr_ctrl_wp
[i
] & 1) != 0)
384 gdb_assert (state
->dr_ref_count_wp
[i
] != 0);
385 uint8_t mask_orig
= (state
->dr_ctrl_wp
[i
] >> 5) & 0xff;
386 gdb_assert (mask_orig
!= 0);
387 static const uint8_t old_valid
[] = { 0x01, 0x03, 0x0f, 0xff };
389 for (const uint8_t old_mask
: old_valid
)
390 if (mask_orig
<= old_mask
)
395 gdb_assert (mask
!= 0);
397 /* No update needed for this watchpoint? */
398 if (mask
== mask_orig
)
400 state
->dr_ctrl_wp
[i
] |= mask
<< 5;
402 = align_down (state
->dr_addr_wp
[i
], AARCH64_HWP_ALIGNMENT
);
404 /* Try to match duplicate entries. */
405 for (int j
= 0; j
< i
; ++j
)
406 if ((state
->dr_ctrl_wp
[j
] & 1) != 0
407 && state
->dr_addr_wp
[j
] == state
->dr_addr_wp
[i
]
408 && state
->dr_addr_orig_wp
[j
] == state
->dr_addr_orig_wp
[i
]
409 && state
->dr_ctrl_wp
[j
] == state
->dr_ctrl_wp
[i
])
411 state
->dr_ref_count_wp
[j
] += state
->dr_ref_count_wp
[i
];
412 state
->dr_ref_count_wp
[i
] = 0;
413 state
->dr_addr_wp
[i
] = 0;
414 state
->dr_addr_orig_wp
[i
] = 0;
415 state
->dr_ctrl_wp
[i
] &= ~1;
419 aarch64_notify_debug_reg_change (state
, 1 /* is_watchpoint */, i
);
423 /* Record the insertion of one breakpoint/watchpoint, as represented
424 by ADDR and CTRL, in the process' arch-specific data area *STATE. */
427 aarch64_dr_state_insert_one_point (struct aarch64_debug_reg_state
*state
,
428 enum target_hw_bp_type type
,
429 CORE_ADDR addr
, int offset
, int len
,
432 int i
, idx
, num_regs
, is_watchpoint
;
433 unsigned int ctrl
, *dr_ctrl_p
, *dr_ref_count
;
434 CORE_ADDR
*dr_addr_p
, *dr_addr_orig_p
;
436 /* Set up state pointers. */
437 is_watchpoint
= (type
!= hw_execute
);
438 gdb_assert (aarch64_point_is_aligned (is_watchpoint
, addr
, len
));
441 num_regs
= aarch64_num_wp_regs
;
442 dr_addr_p
= state
->dr_addr_wp
;
443 dr_addr_orig_p
= state
->dr_addr_orig_wp
;
444 dr_ctrl_p
= state
->dr_ctrl_wp
;
445 dr_ref_count
= state
->dr_ref_count_wp
;
449 num_regs
= aarch64_num_bp_regs
;
450 dr_addr_p
= state
->dr_addr_bp
;
451 dr_addr_orig_p
= nullptr;
452 dr_ctrl_p
= state
->dr_ctrl_bp
;
453 dr_ref_count
= state
->dr_ref_count_bp
;
456 ctrl
= aarch64_point_encode_ctrl_reg (type
, offset
, len
);
458 /* Find an existing or free register in our cache. */
460 for (i
= 0; i
< num_regs
; ++i
)
462 if ((dr_ctrl_p
[i
] & 1) == 0)
464 gdb_assert (dr_ref_count
[i
] == 0);
466 /* no break; continue hunting for an exising one. */
468 else if (dr_addr_p
[i
] == addr
469 && (dr_addr_orig_p
== nullptr || dr_addr_orig_p
[i
] == addr_orig
)
470 && dr_ctrl_p
[i
] == ctrl
)
472 gdb_assert (dr_ref_count
[i
] != 0);
482 /* Update our cache. */
483 if ((dr_ctrl_p
[idx
] & 1) == 0)
486 dr_addr_p
[idx
] = addr
;
487 if (dr_addr_orig_p
!= nullptr)
488 dr_addr_orig_p
[idx
] = addr_orig
;
489 dr_ctrl_p
[idx
] = ctrl
;
490 dr_ref_count
[idx
] = 1;
491 /* Notify the change. */
492 aarch64_notify_debug_reg_change (state
, is_watchpoint
, idx
);
503 /* Record the removal of one breakpoint/watchpoint, as represented by
504 ADDR and CTRL, in the process' arch-specific data area *STATE. */
507 aarch64_dr_state_remove_one_point (struct aarch64_debug_reg_state
*state
,
508 enum target_hw_bp_type type
,
509 CORE_ADDR addr
, int offset
, int len
,
512 int i
, num_regs
, is_watchpoint
;
513 unsigned int ctrl
, *dr_ctrl_p
, *dr_ref_count
;
514 CORE_ADDR
*dr_addr_p
, *dr_addr_orig_p
;
516 /* Set up state pointers. */
517 is_watchpoint
= (type
!= hw_execute
);
520 num_regs
= aarch64_num_wp_regs
;
521 dr_addr_p
= state
->dr_addr_wp
;
522 dr_addr_orig_p
= state
->dr_addr_orig_wp
;
523 dr_ctrl_p
= state
->dr_ctrl_wp
;
524 dr_ref_count
= state
->dr_ref_count_wp
;
528 num_regs
= aarch64_num_bp_regs
;
529 dr_addr_p
= state
->dr_addr_bp
;
530 dr_addr_orig_p
= nullptr;
531 dr_ctrl_p
= state
->dr_ctrl_bp
;
532 dr_ref_count
= state
->dr_ref_count_bp
;
535 ctrl
= aarch64_point_encode_ctrl_reg (type
, offset
, len
);
537 /* Find the entry that matches the ADDR and CTRL. */
538 for (i
= 0; i
< num_regs
; ++i
)
539 if (dr_addr_p
[i
] == addr
540 && (dr_addr_orig_p
== nullptr || dr_addr_orig_p
[i
] == addr_orig
)
541 && dr_ctrl_p
[i
] == ctrl
)
543 gdb_assert (dr_ref_count
[i
] != 0);
551 /* Clear our cache. */
552 if (--dr_ref_count
[i
] == 0)
554 /* Clear the enable bit. */
557 if (dr_addr_orig_p
!= nullptr)
558 dr_addr_orig_p
[i
] = 0;
560 /* Notify the change. */
561 aarch64_notify_debug_reg_change (state
, is_watchpoint
, i
);
568 aarch64_handle_breakpoint (enum target_hw_bp_type type
, CORE_ADDR addr
,
569 int len
, int is_insert
,
570 struct aarch64_debug_reg_state
*state
)
574 /* The hardware breakpoint on AArch64 should always be 4-byte
575 aligned, but on AArch32, it can be 2-byte aligned. Note that
576 we only check the alignment on inserting breakpoint because
577 aarch64_point_is_aligned needs the inferior_ptid inferior's
578 regcache to decide whether the inferior is 32-bit or 64-bit.
579 However when GDB follows the parent process and detach breakpoints
580 from child process, inferior_ptid is the child ptid, but the
581 child inferior doesn't exist in GDB's view yet. */
582 if (!aarch64_point_is_aligned (0 /* is_watchpoint */ , addr
, len
))
585 return aarch64_dr_state_insert_one_point (state
, type
, addr
, 0, len
, -1);
588 return aarch64_dr_state_remove_one_point (state
, type
, addr
, 0, len
, -1);
591 /* This is essentially the same as aarch64_handle_breakpoint, apart
592 from that it is an aligned watchpoint to be handled. */
595 aarch64_handle_aligned_watchpoint (enum target_hw_bp_type type
,
596 CORE_ADDR addr
, int len
, int is_insert
,
597 struct aarch64_debug_reg_state
*state
)
600 return aarch64_dr_state_insert_one_point (state
, type
, addr
, 0, len
, addr
);
602 return aarch64_dr_state_remove_one_point (state
, type
, addr
, 0, len
, addr
);
605 /* Insert/remove unaligned watchpoint by calling
606 aarch64_align_watchpoint repeatedly until the whole watched region,
607 as represented by ADDR and LEN, has been properly aligned and ready
608 to be written to one or more hardware watchpoint registers.
609 IS_INSERT indicates whether this is an insertion or a deletion.
610 Return 0 if succeed. */
613 aarch64_handle_unaligned_watchpoint (enum target_hw_bp_type type
,
614 CORE_ADDR addr
, int len
, int is_insert
,
615 struct aarch64_debug_reg_state
*state
)
617 CORE_ADDR addr_orig
= addr
;
621 CORE_ADDR aligned_addr
;
622 int aligned_offset
, aligned_len
, ret
;
623 CORE_ADDR addr_orig_next
= addr_orig
;
625 aarch64_align_watchpoint (addr
, len
, &aligned_addr
, &aligned_offset
,
626 &aligned_len
, &addr
, &len
, &addr_orig_next
);
629 ret
= aarch64_dr_state_insert_one_point (state
, type
, aligned_addr
,
631 aligned_len
, addr_orig
);
633 ret
= aarch64_dr_state_remove_one_point (state
, type
, aligned_addr
,
635 aligned_len
, addr_orig
);
638 debug_printf ("handle_unaligned_watchpoint: is_insert: %d\n"
640 "aligned_addr: %s, aligned_len: %d\n"
644 "next_addr: %s, next_len: %d\n"
646 "addr_orig_next: %s\n",
647 is_insert
, core_addr_to_string_nz (aligned_addr
),
648 aligned_len
, core_addr_to_string_nz (addr_orig
),
649 core_addr_to_string_nz (addr
), len
,
650 core_addr_to_string_nz (addr_orig_next
));
652 addr_orig
= addr_orig_next
;
662 aarch64_handle_watchpoint (enum target_hw_bp_type type
, CORE_ADDR addr
,
663 int len
, int is_insert
,
664 struct aarch64_debug_reg_state
*state
)
666 if (aarch64_point_is_aligned (1 /* is_watchpoint */ , addr
, len
))
667 return aarch64_handle_aligned_watchpoint (type
, addr
, len
, is_insert
,
670 return aarch64_handle_unaligned_watchpoint (type
, addr
, len
, is_insert
,
674 /* Call ptrace to set the thread TID's hardware breakpoint/watchpoint
675 registers with data from *STATE. */
678 aarch64_linux_set_debug_regs (struct aarch64_debug_reg_state
*state
,
679 int tid
, int watchpoint
)
683 struct user_hwdebug_state regs
;
684 const CORE_ADDR
*addr
;
685 const unsigned int *ctrl
;
687 memset (®s
, 0, sizeof (regs
));
688 iov
.iov_base
= ®s
;
689 count
= watchpoint
? aarch64_num_wp_regs
: aarch64_num_bp_regs
;
690 addr
= watchpoint
? state
->dr_addr_wp
: state
->dr_addr_bp
;
691 ctrl
= watchpoint
? state
->dr_ctrl_wp
: state
->dr_ctrl_bp
;
694 iov
.iov_len
= (offsetof (struct user_hwdebug_state
, dbg_regs
)
695 + count
* sizeof (regs
.dbg_regs
[0]));
697 for (i
= 0; i
< count
; i
++)
699 regs
.dbg_regs
[i
].addr
= addr
[i
];
700 regs
.dbg_regs
[i
].ctrl
= ctrl
[i
];
703 if (ptrace (PTRACE_SETREGSET
, tid
,
704 watchpoint
? NT_ARM_HW_WATCH
: NT_ARM_HW_BREAK
,
707 /* Handle Linux kernels with the PR external/20207 bug. */
708 if (watchpoint
&& errno
== EINVAL
709 && kernel_supports_any_contiguous_range
)
711 kernel_supports_any_contiguous_range
= false;
712 aarch64_downgrade_regs (state
);
713 aarch64_linux_set_debug_regs (state
, tid
, watchpoint
);
716 error (_("Unexpected error setting hardware debug registers"));
720 /* Print the values of the cached breakpoint/watchpoint registers. */
723 aarch64_show_debug_reg_state (struct aarch64_debug_reg_state
*state
,
724 const char *func
, CORE_ADDR addr
,
725 int len
, enum target_hw_bp_type type
)
729 debug_printf ("%s", func
);
731 debug_printf (" (addr=0x%08lx, len=%d, type=%s)",
732 (unsigned long) addr
, len
,
733 type
== hw_write
? "hw-write-watchpoint"
734 : (type
== hw_read
? "hw-read-watchpoint"
735 : (type
== hw_access
? "hw-access-watchpoint"
736 : (type
== hw_execute
? "hw-breakpoint"
738 debug_printf (":\n");
740 debug_printf ("\tBREAKPOINTs:\n");
741 for (i
= 0; i
< aarch64_num_bp_regs
; i
++)
742 debug_printf ("\tBP%d: addr=%s, ctrl=0x%08x, ref.count=%d\n",
743 i
, core_addr_to_string_nz (state
->dr_addr_bp
[i
]),
744 state
->dr_ctrl_bp
[i
], state
->dr_ref_count_bp
[i
]);
746 debug_printf ("\tWATCHPOINTs:\n");
747 for (i
= 0; i
< aarch64_num_wp_regs
; i
++)
748 debug_printf ("\tWP%d: addr=%s (orig=%s), ctrl=0x%08x, ref.count=%d\n",
749 i
, core_addr_to_string_nz (state
->dr_addr_wp
[i
]),
750 core_addr_to_string_nz (state
->dr_addr_orig_wp
[i
]),
751 state
->dr_ctrl_wp
[i
], state
->dr_ref_count_wp
[i
]);
754 /* Get the hardware debug register capacity information from the
755 process represented by TID. */
758 aarch64_linux_get_debug_reg_capacity (int tid
)
761 struct user_hwdebug_state dreg_state
;
763 iov
.iov_base
= &dreg_state
;
764 iov
.iov_len
= sizeof (dreg_state
);
766 /* Get hardware watchpoint register info. */
767 if (ptrace (PTRACE_GETREGSET
, tid
, NT_ARM_HW_WATCH
, &iov
) == 0
768 && (AARCH64_DEBUG_ARCH (dreg_state
.dbg_info
) == AARCH64_DEBUG_ARCH_V8
769 || AARCH64_DEBUG_ARCH (dreg_state
.dbg_info
) == AARCH64_DEBUG_ARCH_V8_1
770 || AARCH64_DEBUG_ARCH (dreg_state
.dbg_info
) == AARCH64_DEBUG_ARCH_V8_2
))
772 aarch64_num_wp_regs
= AARCH64_DEBUG_NUM_SLOTS (dreg_state
.dbg_info
);
773 if (aarch64_num_wp_regs
> AARCH64_HWP_MAX_NUM
)
775 warning (_("Unexpected number of hardware watchpoint registers"
776 " reported by ptrace, got %d, expected %d."),
777 aarch64_num_wp_regs
, AARCH64_HWP_MAX_NUM
);
778 aarch64_num_wp_regs
= AARCH64_HWP_MAX_NUM
;
783 warning (_("Unable to determine the number of hardware watchpoints"
785 aarch64_num_wp_regs
= 0;
788 /* Get hardware breakpoint register info. */
789 if (ptrace (PTRACE_GETREGSET
, tid
, NT_ARM_HW_BREAK
, &iov
) == 0
790 && (AARCH64_DEBUG_ARCH (dreg_state
.dbg_info
) == AARCH64_DEBUG_ARCH_V8
791 || AARCH64_DEBUG_ARCH (dreg_state
.dbg_info
) == AARCH64_DEBUG_ARCH_V8_1
792 || AARCH64_DEBUG_ARCH (dreg_state
.dbg_info
) == AARCH64_DEBUG_ARCH_V8_2
))
794 aarch64_num_bp_regs
= AARCH64_DEBUG_NUM_SLOTS (dreg_state
.dbg_info
);
795 if (aarch64_num_bp_regs
> AARCH64_HBP_MAX_NUM
)
797 warning (_("Unexpected number of hardware breakpoint registers"
798 " reported by ptrace, got %d, expected %d."),
799 aarch64_num_bp_regs
, AARCH64_HBP_MAX_NUM
);
800 aarch64_num_bp_regs
= AARCH64_HBP_MAX_NUM
;
805 warning (_("Unable to determine the number of hardware breakpoints"
807 aarch64_num_bp_regs
= 0;
811 /* Return true if we can watch a memory region that starts address
812 ADDR and whose length is LEN in bytes. */
815 aarch64_linux_region_ok_for_watchpoint (CORE_ADDR addr
, int len
)
817 CORE_ADDR aligned_addr
;
819 /* Can not set watchpoints for zero or negative lengths. */
823 /* Must have hardware watchpoint debug register(s). */
824 if (aarch64_num_wp_regs
== 0)
827 /* We support unaligned watchpoint address and arbitrary length,
828 as long as the size of the whole watched area after alignment
829 doesn't exceed size of the total area that all watchpoint debug
830 registers can watch cooperatively.
832 This is a very relaxed rule, but unfortunately there are
833 limitations, e.g. false-positive hits, due to limited support of
834 hardware debug registers in the kernel. See comment above
835 aarch64_align_watchpoint for more information. */
837 aligned_addr
= addr
& ~(AARCH64_HWP_MAX_LEN_PER_REG
- 1);
838 if (aligned_addr
+ aarch64_num_wp_regs
* AARCH64_HWP_MAX_LEN_PER_REG
842 /* All tests passed so we are likely to be able to set the watchpoint.
843 The reason that it is 'likely' rather than 'must' is because
844 we don't check the current usage of the watchpoint registers, and
845 there may not be enough registers available for this watchpoint.
846 Ideally we should check the cached debug register state, however
847 the checking is costly. */