1 /* Common target dependent code for GDB on AArch64 systems.
3 Copyright (C) 2009-2021 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
6 This file is part of GDB.
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
28 #include "reggroups.h"
30 #include "arch-utils.h"
32 #include "frame-unwind.h"
33 #include "frame-base.h"
34 #include "trad-frame.h"
37 #include "dwarf2/frame.h"
39 #include "prologue-value.h"
40 #include "target-descriptions.h"
41 #include "user-regs.h"
43 #include "gdbsupport/selftest.h"
45 #include "aarch64-tdep.h"
46 #include "aarch64-ravenscar-thread.h"
49 #include "record-full.h"
50 #include "arch/aarch64-insn.h"
53 #include "opcode/aarch64.h"
56 /* A Homogeneous Floating-Point or Short-Vector Aggregate may have at most
58 #define HA_MAX_NUM_FLDS 4
60 /* All possible aarch64 target descriptors. */
61 struct target_desc
*tdesc_aarch64_list
[AARCH64_MAX_SVE_VQ
+ 1][2/*pauth*/];
63 /* The standard register names, and all the valid aliases for them. */
66 const char *const name
;
68 } aarch64_register_aliases
[] =
70 /* 64-bit register names. */
71 {"fp", AARCH64_FP_REGNUM
},
72 {"lr", AARCH64_LR_REGNUM
},
73 {"sp", AARCH64_SP_REGNUM
},
75 /* 32-bit register names. */
76 {"w0", AARCH64_X0_REGNUM
+ 0},
77 {"w1", AARCH64_X0_REGNUM
+ 1},
78 {"w2", AARCH64_X0_REGNUM
+ 2},
79 {"w3", AARCH64_X0_REGNUM
+ 3},
80 {"w4", AARCH64_X0_REGNUM
+ 4},
81 {"w5", AARCH64_X0_REGNUM
+ 5},
82 {"w6", AARCH64_X0_REGNUM
+ 6},
83 {"w7", AARCH64_X0_REGNUM
+ 7},
84 {"w8", AARCH64_X0_REGNUM
+ 8},
85 {"w9", AARCH64_X0_REGNUM
+ 9},
86 {"w10", AARCH64_X0_REGNUM
+ 10},
87 {"w11", AARCH64_X0_REGNUM
+ 11},
88 {"w12", AARCH64_X0_REGNUM
+ 12},
89 {"w13", AARCH64_X0_REGNUM
+ 13},
90 {"w14", AARCH64_X0_REGNUM
+ 14},
91 {"w15", AARCH64_X0_REGNUM
+ 15},
92 {"w16", AARCH64_X0_REGNUM
+ 16},
93 {"w17", AARCH64_X0_REGNUM
+ 17},
94 {"w18", AARCH64_X0_REGNUM
+ 18},
95 {"w19", AARCH64_X0_REGNUM
+ 19},
96 {"w20", AARCH64_X0_REGNUM
+ 20},
97 {"w21", AARCH64_X0_REGNUM
+ 21},
98 {"w22", AARCH64_X0_REGNUM
+ 22},
99 {"w23", AARCH64_X0_REGNUM
+ 23},
100 {"w24", AARCH64_X0_REGNUM
+ 24},
101 {"w25", AARCH64_X0_REGNUM
+ 25},
102 {"w26", AARCH64_X0_REGNUM
+ 26},
103 {"w27", AARCH64_X0_REGNUM
+ 27},
104 {"w28", AARCH64_X0_REGNUM
+ 28},
105 {"w29", AARCH64_X0_REGNUM
+ 29},
106 {"w30", AARCH64_X0_REGNUM
+ 30},
109 {"ip0", AARCH64_X0_REGNUM
+ 16},
110 {"ip1", AARCH64_X0_REGNUM
+ 17}
113 /* The required core 'R' registers. */
114 static const char *const aarch64_r_register_names
[] =
116 /* These registers must appear in consecutive RAW register number
117 order and they must begin with AARCH64_X0_REGNUM! */
118 "x0", "x1", "x2", "x3",
119 "x4", "x5", "x6", "x7",
120 "x8", "x9", "x10", "x11",
121 "x12", "x13", "x14", "x15",
122 "x16", "x17", "x18", "x19",
123 "x20", "x21", "x22", "x23",
124 "x24", "x25", "x26", "x27",
125 "x28", "x29", "x30", "sp",
129 /* The FP/SIMD 'V' registers. */
130 static const char *const aarch64_v_register_names
[] =
132 /* These registers must appear in consecutive RAW register number
133 order and they must begin with AARCH64_V0_REGNUM! */
134 "v0", "v1", "v2", "v3",
135 "v4", "v5", "v6", "v7",
136 "v8", "v9", "v10", "v11",
137 "v12", "v13", "v14", "v15",
138 "v16", "v17", "v18", "v19",
139 "v20", "v21", "v22", "v23",
140 "v24", "v25", "v26", "v27",
141 "v28", "v29", "v30", "v31",
146 /* The SVE 'Z' and 'P' registers. */
147 static const char *const aarch64_sve_register_names
[] =
149 /* These registers must appear in consecutive RAW register number
150 order and they must begin with AARCH64_SVE_Z0_REGNUM! */
151 "z0", "z1", "z2", "z3",
152 "z4", "z5", "z6", "z7",
153 "z8", "z9", "z10", "z11",
154 "z12", "z13", "z14", "z15",
155 "z16", "z17", "z18", "z19",
156 "z20", "z21", "z22", "z23",
157 "z24", "z25", "z26", "z27",
158 "z28", "z29", "z30", "z31",
160 "p0", "p1", "p2", "p3",
161 "p4", "p5", "p6", "p7",
162 "p8", "p9", "p10", "p11",
163 "p12", "p13", "p14", "p15",
167 static const char *const aarch64_pauth_register_names
[] =
169 /* Authentication mask for data pointer. */
171 /* Authentication mask for code pointer. */
175 /* AArch64 prologue cache structure. */
176 struct aarch64_prologue_cache
178 /* The program counter at the start of the function. It is used to
179 identify this frame as a prologue frame. */
182 /* The program counter at the time this frame was created; i.e. where
183 this function was called from. It is used to identify this frame as a
187 /* The stack pointer at the time this frame was created; i.e. the
188 caller's stack pointer when this function was called. It is used
189 to identify this frame. */
192 /* Is the target available to read from? */
195 /* The frame base for this frame is just prev_sp - frame size.
196 FRAMESIZE is the distance from the frame pointer to the
197 initial stack pointer. */
200 /* The register used to hold the frame pointer for this frame. */
203 /* Saved register offsets. */
204 trad_frame_saved_reg
*saved_regs
;
208 show_aarch64_debug (struct ui_file
*file
, int from_tty
,
209 struct cmd_list_element
*c
, const char *value
)
211 fprintf_filtered (file
, _("AArch64 debugging is %s.\n"), value
);
216 /* Abstract instruction reader. */
218 class abstract_instruction_reader
221 /* Read in one instruction. */
222 virtual ULONGEST
read (CORE_ADDR memaddr
, int len
,
223 enum bfd_endian byte_order
) = 0;
226 /* Instruction reader from real target. */
228 class instruction_reader
: public abstract_instruction_reader
231 ULONGEST
read (CORE_ADDR memaddr
, int len
, enum bfd_endian byte_order
)
234 return read_code_unsigned_integer (memaddr
, len
, byte_order
);
240 /* If address signing is enabled, mask off the signature bits from the link
241 register, which is passed by value in ADDR, using the register values in
245 aarch64_frame_unmask_lr (struct gdbarch_tdep
*tdep
,
246 struct frame_info
*this_frame
, CORE_ADDR addr
)
248 if (tdep
->has_pauth ()
249 && frame_unwind_register_unsigned (this_frame
,
250 tdep
->pauth_ra_state_regnum
))
252 int cmask_num
= AARCH64_PAUTH_CMASK_REGNUM (tdep
->pauth_reg_base
);
253 CORE_ADDR cmask
= frame_unwind_register_unsigned (this_frame
, cmask_num
);
254 addr
= addr
& ~cmask
;
256 /* Record in the frame that the link register required unmasking. */
257 set_frame_previous_pc_masked (this_frame
);
263 /* Implement the "get_pc_address_flags" gdbarch method. */
266 aarch64_get_pc_address_flags (frame_info
*frame
, CORE_ADDR pc
)
268 if (pc
!= 0 && get_frame_pc_masked (frame
))
274 /* Analyze a prologue, looking for a recognizable stack frame
275 and frame pointer. Scan until we encounter a store that could
276 clobber the stack frame unexpectedly, or an unknown instruction. */
279 aarch64_analyze_prologue (struct gdbarch
*gdbarch
,
280 CORE_ADDR start
, CORE_ADDR limit
,
281 struct aarch64_prologue_cache
*cache
,
282 abstract_instruction_reader
& reader
)
284 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
287 /* Whether the stack has been set. This should be true when we notice a SP
288 to FP move or if we are using the SP as the base register for storing
289 data, in case the FP is ommitted. */
290 bool seen_stack_set
= false;
292 /* Track X registers and D registers in prologue. */
293 pv_t regs
[AARCH64_X_REGISTER_COUNT
+ AARCH64_D_REGISTER_COUNT
];
295 for (i
= 0; i
< AARCH64_X_REGISTER_COUNT
+ AARCH64_D_REGISTER_COUNT
; i
++)
296 regs
[i
] = pv_register (i
, 0);
297 pv_area
stack (AARCH64_SP_REGNUM
, gdbarch_addr_bit (gdbarch
));
299 for (; start
< limit
; start
+= 4)
304 insn
= reader
.read (start
, 4, byte_order_for_code
);
306 if (aarch64_decode_insn (insn
, &inst
, 1, NULL
) != 0)
309 if (inst
.opcode
->iclass
== addsub_imm
310 && (inst
.opcode
->op
== OP_ADD
311 || strcmp ("sub", inst
.opcode
->name
) == 0))
313 unsigned rd
= inst
.operands
[0].reg
.regno
;
314 unsigned rn
= inst
.operands
[1].reg
.regno
;
316 gdb_assert (aarch64_num_of_operands (inst
.opcode
) == 3);
317 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_Rd_SP
);
318 gdb_assert (inst
.operands
[1].type
== AARCH64_OPND_Rn_SP
);
319 gdb_assert (inst
.operands
[2].type
== AARCH64_OPND_AIMM
);
321 if (inst
.opcode
->op
== OP_ADD
)
323 regs
[rd
] = pv_add_constant (regs
[rn
],
324 inst
.operands
[2].imm
.value
);
328 regs
[rd
] = pv_add_constant (regs
[rn
],
329 -inst
.operands
[2].imm
.value
);
332 /* Did we move SP to FP? */
333 if (rn
== AARCH64_SP_REGNUM
&& rd
== AARCH64_FP_REGNUM
)
334 seen_stack_set
= true;
336 else if (inst
.opcode
->iclass
== pcreladdr
337 && inst
.operands
[1].type
== AARCH64_OPND_ADDR_ADRP
)
339 gdb_assert (aarch64_num_of_operands (inst
.opcode
) == 2);
340 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_Rd
);
342 regs
[inst
.operands
[0].reg
.regno
] = pv_unknown ();
344 else if (inst
.opcode
->iclass
== branch_imm
)
346 /* Stop analysis on branch. */
349 else if (inst
.opcode
->iclass
== condbranch
)
351 /* Stop analysis on branch. */
354 else if (inst
.opcode
->iclass
== branch_reg
)
356 /* Stop analysis on branch. */
359 else if (inst
.opcode
->iclass
== compbranch
)
361 /* Stop analysis on branch. */
364 else if (inst
.opcode
->op
== OP_MOVZ
)
366 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_Rd
);
368 /* If this shows up before we set the stack, keep going. Otherwise
369 stop the analysis. */
373 regs
[inst
.operands
[0].reg
.regno
] = pv_unknown ();
375 else if (inst
.opcode
->iclass
== log_shift
376 && strcmp (inst
.opcode
->name
, "orr") == 0)
378 unsigned rd
= inst
.operands
[0].reg
.regno
;
379 unsigned rn
= inst
.operands
[1].reg
.regno
;
380 unsigned rm
= inst
.operands
[2].reg
.regno
;
382 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_Rd
);
383 gdb_assert (inst
.operands
[1].type
== AARCH64_OPND_Rn
);
384 gdb_assert (inst
.operands
[2].type
== AARCH64_OPND_Rm_SFT
);
386 if (inst
.operands
[2].shifter
.amount
== 0
387 && rn
== AARCH64_SP_REGNUM
)
391 aarch64_debug_printf ("prologue analysis gave up "
392 "addr=%s opcode=0x%x (orr x register)",
393 core_addr_to_string_nz (start
), insn
);
398 else if (inst
.opcode
->op
== OP_STUR
)
400 unsigned rt
= inst
.operands
[0].reg
.regno
;
401 unsigned rn
= inst
.operands
[1].addr
.base_regno
;
402 int size
= aarch64_get_qualifier_esize (inst
.operands
[0].qualifier
);
404 gdb_assert (aarch64_num_of_operands (inst
.opcode
) == 2);
405 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_Rt
);
406 gdb_assert (inst
.operands
[1].type
== AARCH64_OPND_ADDR_SIMM9
);
407 gdb_assert (!inst
.operands
[1].addr
.offset
.is_reg
);
410 (pv_add_constant (regs
[rn
], inst
.operands
[1].addr
.offset
.imm
),
413 /* Are we storing with SP as a base? */
414 if (rn
== AARCH64_SP_REGNUM
)
415 seen_stack_set
= true;
417 else if ((inst
.opcode
->iclass
== ldstpair_off
418 || (inst
.opcode
->iclass
== ldstpair_indexed
419 && inst
.operands
[2].addr
.preind
))
420 && strcmp ("stp", inst
.opcode
->name
) == 0)
422 /* STP with addressing mode Pre-indexed and Base register. */
425 unsigned rn
= inst
.operands
[2].addr
.base_regno
;
426 int32_t imm
= inst
.operands
[2].addr
.offset
.imm
;
427 int size
= aarch64_get_qualifier_esize (inst
.operands
[0].qualifier
);
429 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_Rt
430 || inst
.operands
[0].type
== AARCH64_OPND_Ft
);
431 gdb_assert (inst
.operands
[1].type
== AARCH64_OPND_Rt2
432 || inst
.operands
[1].type
== AARCH64_OPND_Ft2
);
433 gdb_assert (inst
.operands
[2].type
== AARCH64_OPND_ADDR_SIMM7
);
434 gdb_assert (!inst
.operands
[2].addr
.offset
.is_reg
);
436 /* If recording this store would invalidate the store area
437 (perhaps because rn is not known) then we should abandon
438 further prologue analysis. */
439 if (stack
.store_would_trash (pv_add_constant (regs
[rn
], imm
)))
442 if (stack
.store_would_trash (pv_add_constant (regs
[rn
], imm
+ 8)))
445 rt1
= inst
.operands
[0].reg
.regno
;
446 rt2
= inst
.operands
[1].reg
.regno
;
447 if (inst
.operands
[0].type
== AARCH64_OPND_Ft
)
449 rt1
+= AARCH64_X_REGISTER_COUNT
;
450 rt2
+= AARCH64_X_REGISTER_COUNT
;
453 stack
.store (pv_add_constant (regs
[rn
], imm
), size
, regs
[rt1
]);
454 stack
.store (pv_add_constant (regs
[rn
], imm
+ size
), size
, regs
[rt2
]);
456 if (inst
.operands
[2].addr
.writeback
)
457 regs
[rn
] = pv_add_constant (regs
[rn
], imm
);
459 /* Ignore the instruction that allocates stack space and sets
461 if (rn
== AARCH64_SP_REGNUM
&& !inst
.operands
[2].addr
.writeback
)
462 seen_stack_set
= true;
464 else if ((inst
.opcode
->iclass
== ldst_imm9
/* Signed immediate. */
465 || (inst
.opcode
->iclass
== ldst_pos
/* Unsigned immediate. */
466 && (inst
.opcode
->op
== OP_STR_POS
467 || inst
.opcode
->op
== OP_STRF_POS
)))
468 && inst
.operands
[1].addr
.base_regno
== AARCH64_SP_REGNUM
469 && strcmp ("str", inst
.opcode
->name
) == 0)
471 /* STR (immediate) */
472 unsigned int rt
= inst
.operands
[0].reg
.regno
;
473 int32_t imm
= inst
.operands
[1].addr
.offset
.imm
;
474 unsigned int rn
= inst
.operands
[1].addr
.base_regno
;
475 int size
= aarch64_get_qualifier_esize (inst
.operands
[0].qualifier
);
476 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_Rt
477 || inst
.operands
[0].type
== AARCH64_OPND_Ft
);
479 if (inst
.operands
[0].type
== AARCH64_OPND_Ft
)
480 rt
+= AARCH64_X_REGISTER_COUNT
;
482 stack
.store (pv_add_constant (regs
[rn
], imm
), size
, regs
[rt
]);
483 if (inst
.operands
[1].addr
.writeback
)
484 regs
[rn
] = pv_add_constant (regs
[rn
], imm
);
486 /* Are we storing with SP as a base? */
487 if (rn
== AARCH64_SP_REGNUM
)
488 seen_stack_set
= true;
490 else if (inst
.opcode
->iclass
== testbranch
)
492 /* Stop analysis on branch. */
495 else if (inst
.opcode
->iclass
== ic_system
)
497 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
498 int ra_state_val
= 0;
500 if (insn
== 0xd503233f /* paciasp. */
501 || insn
== 0xd503237f /* pacibsp. */)
503 /* Return addresses are mangled. */
506 else if (insn
== 0xd50323bf /* autiasp. */
507 || insn
== 0xd50323ff /* autibsp. */)
509 /* Return addresses are not mangled. */
514 aarch64_debug_printf ("prologue analysis gave up addr=%s"
515 " opcode=0x%x (iclass)",
516 core_addr_to_string_nz (start
), insn
);
520 if (tdep
->has_pauth () && cache
!= nullptr)
521 trad_frame_set_value (cache
->saved_regs
,
522 tdep
->pauth_ra_state_regnum
,
527 aarch64_debug_printf ("prologue analysis gave up addr=%s"
529 core_addr_to_string_nz (start
), insn
);
538 if (pv_is_register (regs
[AARCH64_FP_REGNUM
], AARCH64_SP_REGNUM
))
540 /* Frame pointer is fp. Frame size is constant. */
541 cache
->framereg
= AARCH64_FP_REGNUM
;
542 cache
->framesize
= -regs
[AARCH64_FP_REGNUM
].k
;
544 else if (pv_is_register (regs
[AARCH64_SP_REGNUM
], AARCH64_SP_REGNUM
))
546 /* Try the stack pointer. */
547 cache
->framesize
= -regs
[AARCH64_SP_REGNUM
].k
;
548 cache
->framereg
= AARCH64_SP_REGNUM
;
552 /* We're just out of luck. We don't know where the frame is. */
553 cache
->framereg
= -1;
554 cache
->framesize
= 0;
557 for (i
= 0; i
< AARCH64_X_REGISTER_COUNT
; i
++)
561 if (stack
.find_reg (gdbarch
, i
, &offset
))
562 cache
->saved_regs
[i
].set_addr (offset
);
565 for (i
= 0; i
< AARCH64_D_REGISTER_COUNT
; i
++)
567 int regnum
= gdbarch_num_regs (gdbarch
);
570 if (stack
.find_reg (gdbarch
, i
+ AARCH64_X_REGISTER_COUNT
,
572 cache
->saved_regs
[i
+ regnum
+ AARCH64_D0_REGNUM
].set_addr (offset
);
579 aarch64_analyze_prologue (struct gdbarch
*gdbarch
,
580 CORE_ADDR start
, CORE_ADDR limit
,
581 struct aarch64_prologue_cache
*cache
)
583 instruction_reader reader
;
585 return aarch64_analyze_prologue (gdbarch
, start
, limit
, cache
,
591 namespace selftests
{
593 /* Instruction reader from manually cooked instruction sequences. */
595 class instruction_reader_test
: public abstract_instruction_reader
598 template<size_t SIZE
>
599 explicit instruction_reader_test (const uint32_t (&insns
)[SIZE
])
600 : m_insns (insns
), m_insns_size (SIZE
)
603 ULONGEST
read (CORE_ADDR memaddr
, int len
, enum bfd_endian byte_order
)
606 SELF_CHECK (len
== 4);
607 SELF_CHECK (memaddr
% 4 == 0);
608 SELF_CHECK (memaddr
/ 4 < m_insns_size
);
610 return m_insns
[memaddr
/ 4];
614 const uint32_t *m_insns
;
619 aarch64_analyze_prologue_test (void)
621 struct gdbarch_info info
;
623 gdbarch_info_init (&info
);
624 info
.bfd_arch_info
= bfd_scan_arch ("aarch64");
626 struct gdbarch
*gdbarch
= gdbarch_find_by_info (info
);
627 SELF_CHECK (gdbarch
!= NULL
);
629 struct aarch64_prologue_cache cache
;
630 cache
.saved_regs
= trad_frame_alloc_saved_regs (gdbarch
);
632 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
634 /* Test the simple prologue in which frame pointer is used. */
636 static const uint32_t insns
[] = {
637 0xa9af7bfd, /* stp x29, x30, [sp,#-272]! */
638 0x910003fd, /* mov x29, sp */
639 0x97ffffe6, /* bl 0x400580 */
641 instruction_reader_test
reader (insns
);
643 CORE_ADDR end
= aarch64_analyze_prologue (gdbarch
, 0, 128, &cache
, reader
);
644 SELF_CHECK (end
== 4 * 2);
646 SELF_CHECK (cache
.framereg
== AARCH64_FP_REGNUM
);
647 SELF_CHECK (cache
.framesize
== 272);
649 for (int i
= 0; i
< AARCH64_X_REGISTER_COUNT
; i
++)
651 if (i
== AARCH64_FP_REGNUM
)
652 SELF_CHECK (cache
.saved_regs
[i
].addr () == -272);
653 else if (i
== AARCH64_LR_REGNUM
)
654 SELF_CHECK (cache
.saved_regs
[i
].addr () == -264);
656 SELF_CHECK (cache
.saved_regs
[i
].is_realreg ());
659 for (int i
= 0; i
< AARCH64_D_REGISTER_COUNT
; i
++)
661 int regnum
= gdbarch_num_regs (gdbarch
);
663 SELF_CHECK (cache
.saved_regs
[i
+ regnum
664 + AARCH64_D0_REGNUM
].is_realreg ());
668 /* Test a prologue in which STR is used and frame pointer is not
671 static const uint32_t insns
[] = {
672 0xf81d0ff3, /* str x19, [sp, #-48]! */
673 0xb9002fe0, /* str w0, [sp, #44] */
674 0xf90013e1, /* str x1, [sp, #32]*/
675 0xfd000fe0, /* str d0, [sp, #24] */
676 0xaa0203f3, /* mov x19, x2 */
677 0xf94013e0, /* ldr x0, [sp, #32] */
679 instruction_reader_test
reader (insns
);
681 trad_frame_reset_saved_regs (gdbarch
, cache
.saved_regs
);
682 CORE_ADDR end
= aarch64_analyze_prologue (gdbarch
, 0, 128, &cache
, reader
);
684 SELF_CHECK (end
== 4 * 5);
686 SELF_CHECK (cache
.framereg
== AARCH64_SP_REGNUM
);
687 SELF_CHECK (cache
.framesize
== 48);
689 for (int i
= 0; i
< AARCH64_X_REGISTER_COUNT
; i
++)
692 SELF_CHECK (cache
.saved_regs
[i
].addr () == -16);
694 SELF_CHECK (cache
.saved_regs
[i
].addr () == -48);
696 SELF_CHECK (cache
.saved_regs
[i
].is_realreg ());
699 for (int i
= 0; i
< AARCH64_D_REGISTER_COUNT
; i
++)
701 int regnum
= gdbarch_num_regs (gdbarch
);
704 SELF_CHECK (cache
.saved_regs
[i
+ regnum
705 + AARCH64_D0_REGNUM
].addr ()
708 SELF_CHECK (cache
.saved_regs
[i
+ regnum
709 + AARCH64_D0_REGNUM
].is_realreg ());
713 /* Test handling of movz before setting the frame pointer. */
715 static const uint32_t insns
[] = {
716 0xa9bf7bfd, /* stp x29, x30, [sp, #-16]! */
717 0x52800020, /* mov w0, #0x1 */
718 0x910003fd, /* mov x29, sp */
719 0x528000a2, /* mov w2, #0x5 */
720 0x97fffff8, /* bl 6e4 */
723 instruction_reader_test
reader (insns
);
725 trad_frame_reset_saved_regs (gdbarch
, cache
.saved_regs
);
726 CORE_ADDR end
= aarch64_analyze_prologue (gdbarch
, 0, 128, &cache
, reader
);
728 /* We should stop at the 4th instruction. */
729 SELF_CHECK (end
== (4 - 1) * 4);
730 SELF_CHECK (cache
.framereg
== AARCH64_FP_REGNUM
);
731 SELF_CHECK (cache
.framesize
== 16);
734 /* Test handling of movz/stp when using the stack pointer as frame
737 static const uint32_t insns
[] = {
738 0xa9bc7bfd, /* stp x29, x30, [sp, #-64]! */
739 0x52800020, /* mov w0, #0x1 */
740 0x290207e0, /* stp w0, w1, [sp, #16] */
741 0xa9018fe2, /* stp x2, x3, [sp, #24] */
742 0x528000a2, /* mov w2, #0x5 */
743 0x97fffff8, /* bl 6e4 */
746 instruction_reader_test
reader (insns
);
748 trad_frame_reset_saved_regs (gdbarch
, cache
.saved_regs
);
749 CORE_ADDR end
= aarch64_analyze_prologue (gdbarch
, 0, 128, &cache
, reader
);
751 /* We should stop at the 5th instruction. */
752 SELF_CHECK (end
== (5 - 1) * 4);
753 SELF_CHECK (cache
.framereg
== AARCH64_SP_REGNUM
);
754 SELF_CHECK (cache
.framesize
== 64);
757 /* Test handling of movz/str when using the stack pointer as frame
760 static const uint32_t insns
[] = {
761 0xa9bc7bfd, /* stp x29, x30, [sp, #-64]! */
762 0x52800020, /* mov w0, #0x1 */
763 0xb9002be4, /* str w4, [sp, #40] */
764 0xf9001be5, /* str x5, [sp, #48] */
765 0x528000a2, /* mov w2, #0x5 */
766 0x97fffff8, /* bl 6e4 */
769 instruction_reader_test
reader (insns
);
771 trad_frame_reset_saved_regs (gdbarch
, cache
.saved_regs
);
772 CORE_ADDR end
= aarch64_analyze_prologue (gdbarch
, 0, 128, &cache
, reader
);
774 /* We should stop at the 5th instruction. */
775 SELF_CHECK (end
== (5 - 1) * 4);
776 SELF_CHECK (cache
.framereg
== AARCH64_SP_REGNUM
);
777 SELF_CHECK (cache
.framesize
== 64);
780 /* Test handling of movz/stur when using the stack pointer as frame
783 static const uint32_t insns
[] = {
784 0xa9bc7bfd, /* stp x29, x30, [sp, #-64]! */
785 0x52800020, /* mov w0, #0x1 */
786 0xb80343e6, /* stur w6, [sp, #52] */
787 0xf80383e7, /* stur x7, [sp, #56] */
788 0x528000a2, /* mov w2, #0x5 */
789 0x97fffff8, /* bl 6e4 */
792 instruction_reader_test
reader (insns
);
794 trad_frame_reset_saved_regs (gdbarch
, cache
.saved_regs
);
795 CORE_ADDR end
= aarch64_analyze_prologue (gdbarch
, 0, 128, &cache
, reader
);
797 /* We should stop at the 5th instruction. */
798 SELF_CHECK (end
== (5 - 1) * 4);
799 SELF_CHECK (cache
.framereg
== AARCH64_SP_REGNUM
);
800 SELF_CHECK (cache
.framesize
== 64);
803 /* Test handling of movz when there is no frame pointer set or no stack
806 static const uint32_t insns
[] = {
807 0xa9bf7bfd, /* stp x29, x30, [sp, #-16]! */
808 0x52800020, /* mov w0, #0x1 */
809 0x528000a2, /* mov w2, #0x5 */
810 0x97fffff8, /* bl 6e4 */
813 instruction_reader_test
reader (insns
);
815 trad_frame_reset_saved_regs (gdbarch
, cache
.saved_regs
);
816 CORE_ADDR end
= aarch64_analyze_prologue (gdbarch
, 0, 128, &cache
, reader
);
818 /* We should stop at the 4th instruction. */
819 SELF_CHECK (end
== (4 - 1) * 4);
820 SELF_CHECK (cache
.framereg
== AARCH64_SP_REGNUM
);
821 SELF_CHECK (cache
.framesize
== 16);
824 /* Test a prologue in which there is a return address signing instruction. */
825 if (tdep
->has_pauth ())
827 static const uint32_t insns
[] = {
828 0xd503233f, /* paciasp */
829 0xa9bd7bfd, /* stp x29, x30, [sp, #-48]! */
830 0x910003fd, /* mov x29, sp */
831 0xf801c3f3, /* str x19, [sp, #28] */
832 0xb9401fa0, /* ldr x19, [x29, #28] */
834 instruction_reader_test
reader (insns
);
836 trad_frame_reset_saved_regs (gdbarch
, cache
.saved_regs
);
837 CORE_ADDR end
= aarch64_analyze_prologue (gdbarch
, 0, 128, &cache
,
840 SELF_CHECK (end
== 4 * 4);
841 SELF_CHECK (cache
.framereg
== AARCH64_FP_REGNUM
);
842 SELF_CHECK (cache
.framesize
== 48);
844 for (int i
= 0; i
< AARCH64_X_REGISTER_COUNT
; i
++)
847 SELF_CHECK (cache
.saved_regs
[i
].addr () == -20);
848 else if (i
== AARCH64_FP_REGNUM
)
849 SELF_CHECK (cache
.saved_regs
[i
].addr () == -48);
850 else if (i
== AARCH64_LR_REGNUM
)
851 SELF_CHECK (cache
.saved_regs
[i
].addr () == -40);
853 SELF_CHECK (cache
.saved_regs
[i
].is_realreg ());
856 if (tdep
->has_pauth ())
858 SELF_CHECK (trad_frame_value_p (cache
.saved_regs
,
859 tdep
->pauth_ra_state_regnum
));
860 SELF_CHECK (cache
.saved_regs
[tdep
->pauth_ra_state_regnum
].addr ()
865 } // namespace selftests
866 #endif /* GDB_SELF_TEST */
868 /* Implement the "skip_prologue" gdbarch method. */
871 aarch64_skip_prologue (struct gdbarch
*gdbarch
, CORE_ADDR pc
)
873 CORE_ADDR func_addr
, limit_pc
;
875 /* See if we can determine the end of the prologue via the symbol
876 table. If so, then return either PC, or the PC after the
877 prologue, whichever is greater. */
878 if (find_pc_partial_function (pc
, NULL
, &func_addr
, NULL
))
880 CORE_ADDR post_prologue_pc
881 = skip_prologue_using_sal (gdbarch
, func_addr
);
883 if (post_prologue_pc
!= 0)
884 return std::max (pc
, post_prologue_pc
);
887 /* Can't determine prologue from the symbol table, need to examine
890 /* Find an upper limit on the function prologue using the debug
891 information. If the debug information could not be used to
892 provide that bound, then use an arbitrary large number as the
894 limit_pc
= skip_prologue_using_sal (gdbarch
, pc
);
896 limit_pc
= pc
+ 128; /* Magic. */
898 /* Try disassembling prologue. */
899 return aarch64_analyze_prologue (gdbarch
, pc
, limit_pc
, NULL
);
902 /* Scan the function prologue for THIS_FRAME and populate the prologue
906 aarch64_scan_prologue (struct frame_info
*this_frame
,
907 struct aarch64_prologue_cache
*cache
)
909 CORE_ADDR block_addr
= get_frame_address_in_block (this_frame
);
910 CORE_ADDR prologue_start
;
911 CORE_ADDR prologue_end
;
912 CORE_ADDR prev_pc
= get_frame_pc (this_frame
);
913 struct gdbarch
*gdbarch
= get_frame_arch (this_frame
);
915 cache
->prev_pc
= prev_pc
;
917 /* Assume we do not find a frame. */
918 cache
->framereg
= -1;
919 cache
->framesize
= 0;
921 if (find_pc_partial_function (block_addr
, NULL
, &prologue_start
,
924 struct symtab_and_line sal
= find_pc_line (prologue_start
, 0);
928 /* No line info so use the current PC. */
929 prologue_end
= prev_pc
;
931 else if (sal
.end
< prologue_end
)
933 /* The next line begins after the function end. */
934 prologue_end
= sal
.end
;
937 prologue_end
= std::min (prologue_end
, prev_pc
);
938 aarch64_analyze_prologue (gdbarch
, prologue_start
, prologue_end
, cache
);
944 frame_loc
= get_frame_register_unsigned (this_frame
, AARCH64_FP_REGNUM
);
948 cache
->framereg
= AARCH64_FP_REGNUM
;
949 cache
->framesize
= 16;
950 cache
->saved_regs
[29].set_addr (0);
951 cache
->saved_regs
[30].set_addr (8);
955 /* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
956 function may throw an exception if the inferior's registers or memory is
960 aarch64_make_prologue_cache_1 (struct frame_info
*this_frame
,
961 struct aarch64_prologue_cache
*cache
)
963 CORE_ADDR unwound_fp
;
966 aarch64_scan_prologue (this_frame
, cache
);
968 if (cache
->framereg
== -1)
971 unwound_fp
= get_frame_register_unsigned (this_frame
, cache
->framereg
);
975 cache
->prev_sp
= unwound_fp
+ cache
->framesize
;
977 /* Calculate actual addresses of saved registers using offsets
978 determined by aarch64_analyze_prologue. */
979 for (reg
= 0; reg
< gdbarch_num_regs (get_frame_arch (this_frame
)); reg
++)
980 if (trad_frame_addr_p (cache
->saved_regs
, reg
))
981 cache
->saved_regs
[reg
].set_addr (cache
->saved_regs
[reg
].addr ()
984 cache
->func
= get_frame_func (this_frame
);
986 cache
->available_p
= 1;
989 /* Allocate and fill in *THIS_CACHE with information about the prologue of
990 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
991 Return a pointer to the current aarch64_prologue_cache in
994 static struct aarch64_prologue_cache
*
995 aarch64_make_prologue_cache (struct frame_info
*this_frame
, void **this_cache
)
997 struct aarch64_prologue_cache
*cache
;
999 if (*this_cache
!= NULL
)
1000 return (struct aarch64_prologue_cache
*) *this_cache
;
1002 cache
= FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache
);
1003 cache
->saved_regs
= trad_frame_alloc_saved_regs (this_frame
);
1004 *this_cache
= cache
;
1008 aarch64_make_prologue_cache_1 (this_frame
, cache
);
1010 catch (const gdb_exception_error
&ex
)
1012 if (ex
.error
!= NOT_AVAILABLE_ERROR
)
1019 /* Implement the "stop_reason" frame_unwind method. */
1021 static enum unwind_stop_reason
1022 aarch64_prologue_frame_unwind_stop_reason (struct frame_info
*this_frame
,
1025 struct aarch64_prologue_cache
*cache
1026 = aarch64_make_prologue_cache (this_frame
, this_cache
);
1028 if (!cache
->available_p
)
1029 return UNWIND_UNAVAILABLE
;
1031 /* Halt the backtrace at "_start". */
1032 if (cache
->prev_pc
<= gdbarch_tdep (get_frame_arch (this_frame
))->lowest_pc
)
1033 return UNWIND_OUTERMOST
;
1035 /* We've hit a wall, stop. */
1036 if (cache
->prev_sp
== 0)
1037 return UNWIND_OUTERMOST
;
1039 return UNWIND_NO_REASON
;
1042 /* Our frame ID for a normal frame is the current function's starting
1043 PC and the caller's SP when we were called. */
1046 aarch64_prologue_this_id (struct frame_info
*this_frame
,
1047 void **this_cache
, struct frame_id
*this_id
)
1049 struct aarch64_prologue_cache
*cache
1050 = aarch64_make_prologue_cache (this_frame
, this_cache
);
1052 if (!cache
->available_p
)
1053 *this_id
= frame_id_build_unavailable_stack (cache
->func
);
1055 *this_id
= frame_id_build (cache
->prev_sp
, cache
->func
);
1058 /* Implement the "prev_register" frame_unwind method. */
1060 static struct value
*
1061 aarch64_prologue_prev_register (struct frame_info
*this_frame
,
1062 void **this_cache
, int prev_regnum
)
1064 struct aarch64_prologue_cache
*cache
1065 = aarch64_make_prologue_cache (this_frame
, this_cache
);
1067 /* If we are asked to unwind the PC, then we need to return the LR
1068 instead. The prologue may save PC, but it will point into this
1069 frame's prologue, not the next frame's resume location. */
1070 if (prev_regnum
== AARCH64_PC_REGNUM
)
1073 struct gdbarch
*gdbarch
= get_frame_arch (this_frame
);
1074 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1076 lr
= frame_unwind_register_unsigned (this_frame
, AARCH64_LR_REGNUM
);
1078 if (tdep
->has_pauth ()
1079 && trad_frame_value_p (cache
->saved_regs
,
1080 tdep
->pauth_ra_state_regnum
))
1081 lr
= aarch64_frame_unmask_lr (tdep
, this_frame
, lr
);
1083 return frame_unwind_got_constant (this_frame
, prev_regnum
, lr
);
1086 /* SP is generally not saved to the stack, but this frame is
1087 identified by the next frame's stack pointer at the time of the
1088 call. The value was already reconstructed into PREV_SP. */
1094 | | | <- Previous SP
1097 +--| saved fp |<- FP
1101 if (prev_regnum
== AARCH64_SP_REGNUM
)
1102 return frame_unwind_got_constant (this_frame
, prev_regnum
,
1105 return trad_frame_get_prev_register (this_frame
, cache
->saved_regs
,
1109 /* AArch64 prologue unwinder. */
1110 struct frame_unwind aarch64_prologue_unwind
=
1113 aarch64_prologue_frame_unwind_stop_reason
,
1114 aarch64_prologue_this_id
,
1115 aarch64_prologue_prev_register
,
1117 default_frame_sniffer
1120 /* Allocate and fill in *THIS_CACHE with information about the prologue of
1121 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
1122 Return a pointer to the current aarch64_prologue_cache in
1125 static struct aarch64_prologue_cache
*
1126 aarch64_make_stub_cache (struct frame_info
*this_frame
, void **this_cache
)
1128 struct aarch64_prologue_cache
*cache
;
1130 if (*this_cache
!= NULL
)
1131 return (struct aarch64_prologue_cache
*) *this_cache
;
1133 cache
= FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache
);
1134 cache
->saved_regs
= trad_frame_alloc_saved_regs (this_frame
);
1135 *this_cache
= cache
;
1139 cache
->prev_sp
= get_frame_register_unsigned (this_frame
,
1141 cache
->prev_pc
= get_frame_pc (this_frame
);
1142 cache
->available_p
= 1;
1144 catch (const gdb_exception_error
&ex
)
1146 if (ex
.error
!= NOT_AVAILABLE_ERROR
)
1153 /* Implement the "stop_reason" frame_unwind method. */
1155 static enum unwind_stop_reason
1156 aarch64_stub_frame_unwind_stop_reason (struct frame_info
*this_frame
,
1159 struct aarch64_prologue_cache
*cache
1160 = aarch64_make_stub_cache (this_frame
, this_cache
);
1162 if (!cache
->available_p
)
1163 return UNWIND_UNAVAILABLE
;
1165 return UNWIND_NO_REASON
;
1168 /* Our frame ID for a stub frame is the current SP and LR. */
1171 aarch64_stub_this_id (struct frame_info
*this_frame
,
1172 void **this_cache
, struct frame_id
*this_id
)
1174 struct aarch64_prologue_cache
*cache
1175 = aarch64_make_stub_cache (this_frame
, this_cache
);
1177 if (cache
->available_p
)
1178 *this_id
= frame_id_build (cache
->prev_sp
, cache
->prev_pc
);
1180 *this_id
= frame_id_build_unavailable_stack (cache
->prev_pc
);
1183 /* Implement the "sniffer" frame_unwind method. */
1186 aarch64_stub_unwind_sniffer (const struct frame_unwind
*self
,
1187 struct frame_info
*this_frame
,
1188 void **this_prologue_cache
)
1190 CORE_ADDR addr_in_block
;
1193 addr_in_block
= get_frame_address_in_block (this_frame
);
1194 if (in_plt_section (addr_in_block
)
1195 /* We also use the stub winder if the target memory is unreadable
1196 to avoid having the prologue unwinder trying to read it. */
1197 || target_read_memory (get_frame_pc (this_frame
), dummy
, 4) != 0)
1203 /* AArch64 stub unwinder. */
1204 struct frame_unwind aarch64_stub_unwind
=
1207 aarch64_stub_frame_unwind_stop_reason
,
1208 aarch64_stub_this_id
,
1209 aarch64_prologue_prev_register
,
1211 aarch64_stub_unwind_sniffer
1214 /* Return the frame base address of *THIS_FRAME. */
1217 aarch64_normal_frame_base (struct frame_info
*this_frame
, void **this_cache
)
1219 struct aarch64_prologue_cache
*cache
1220 = aarch64_make_prologue_cache (this_frame
, this_cache
);
1222 return cache
->prev_sp
- cache
->framesize
;
1225 /* AArch64 default frame base information. */
1226 struct frame_base aarch64_normal_base
=
1228 &aarch64_prologue_unwind
,
1229 aarch64_normal_frame_base
,
1230 aarch64_normal_frame_base
,
1231 aarch64_normal_frame_base
1234 /* Return the value of the REGNUM register in the previous frame of
1237 static struct value
*
1238 aarch64_dwarf2_prev_register (struct frame_info
*this_frame
,
1239 void **this_cache
, int regnum
)
1241 struct gdbarch_tdep
*tdep
= gdbarch_tdep (get_frame_arch (this_frame
));
1246 case AARCH64_PC_REGNUM
:
1247 lr
= frame_unwind_register_unsigned (this_frame
, AARCH64_LR_REGNUM
);
1248 lr
= aarch64_frame_unmask_lr (tdep
, this_frame
, lr
);
1249 return frame_unwind_got_constant (this_frame
, regnum
, lr
);
1252 internal_error (__FILE__
, __LINE__
,
1253 _("Unexpected register %d"), regnum
);
1257 static const unsigned char op_lit0
= DW_OP_lit0
;
1258 static const unsigned char op_lit1
= DW_OP_lit1
;
1260 /* Implement the "init_reg" dwarf2_frame_ops method. */
1263 aarch64_dwarf2_frame_init_reg (struct gdbarch
*gdbarch
, int regnum
,
1264 struct dwarf2_frame_state_reg
*reg
,
1265 struct frame_info
*this_frame
)
1267 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1271 case AARCH64_PC_REGNUM
:
1272 reg
->how
= DWARF2_FRAME_REG_FN
;
1273 reg
->loc
.fn
= aarch64_dwarf2_prev_register
;
1276 case AARCH64_SP_REGNUM
:
1277 reg
->how
= DWARF2_FRAME_REG_CFA
;
1281 /* Init pauth registers. */
1282 if (tdep
->has_pauth ())
1284 if (regnum
== tdep
->pauth_ra_state_regnum
)
1286 /* Initialize RA_STATE to zero. */
1287 reg
->how
= DWARF2_FRAME_REG_SAVED_VAL_EXP
;
1288 reg
->loc
.exp
.start
= &op_lit0
;
1289 reg
->loc
.exp
.len
= 1;
1292 else if (regnum
== AARCH64_PAUTH_DMASK_REGNUM (tdep
->pauth_reg_base
)
1293 || regnum
== AARCH64_PAUTH_CMASK_REGNUM (tdep
->pauth_reg_base
))
1295 reg
->how
= DWARF2_FRAME_REG_SAME_VALUE
;
1301 /* Implement the execute_dwarf_cfa_vendor_op method. */
1304 aarch64_execute_dwarf_cfa_vendor_op (struct gdbarch
*gdbarch
, gdb_byte op
,
1305 struct dwarf2_frame_state
*fs
)
1307 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1308 struct dwarf2_frame_state_reg
*ra_state
;
1310 if (op
== DW_CFA_AARCH64_negate_ra_state
)
1312 /* On systems without pauth, treat as a nop. */
1313 if (!tdep
->has_pauth ())
1316 /* Allocate RA_STATE column if it's not allocated yet. */
1317 fs
->regs
.alloc_regs (AARCH64_DWARF_PAUTH_RA_STATE
+ 1);
1319 /* Toggle the status of RA_STATE between 0 and 1. */
1320 ra_state
= &(fs
->regs
.reg
[AARCH64_DWARF_PAUTH_RA_STATE
]);
1321 ra_state
->how
= DWARF2_FRAME_REG_SAVED_VAL_EXP
;
1323 if (ra_state
->loc
.exp
.start
== nullptr
1324 || ra_state
->loc
.exp
.start
== &op_lit0
)
1325 ra_state
->loc
.exp
.start
= &op_lit1
;
1327 ra_state
->loc
.exp
.start
= &op_lit0
;
1329 ra_state
->loc
.exp
.len
= 1;
1337 /* Used for matching BRK instructions for AArch64. */
1338 static constexpr uint32_t BRK_INSN_MASK
= 0xffe0001f;
1339 static constexpr uint32_t BRK_INSN_BASE
= 0xd4200000;
1341 /* Implementation of gdbarch_program_breakpoint_here_p for aarch64. */
1344 aarch64_program_breakpoint_here_p (gdbarch
*gdbarch
, CORE_ADDR address
)
1346 const uint32_t insn_len
= 4;
1347 gdb_byte target_mem
[4];
1349 /* Enable the automatic memory restoration from breakpoints while
1350 we read the memory. Otherwise we may find temporary breakpoints, ones
1351 inserted by GDB, and flag them as permanent breakpoints. */
1352 scoped_restore restore_memory
1353 = make_scoped_restore_show_memory_breakpoints (0);
1355 if (target_read_memory (address
, target_mem
, insn_len
) == 0)
1358 (uint32_t) extract_unsigned_integer (target_mem
, insn_len
,
1359 gdbarch_byte_order_for_code (gdbarch
));
1361 /* Check if INSN is a BRK instruction pattern. There are multiple choices
1362 of such instructions with different immediate values. Different OS'
1363 may use a different variation, but they have the same outcome. */
1364 return ((insn
& BRK_INSN_MASK
) == BRK_INSN_BASE
);
1370 /* When arguments must be pushed onto the stack, they go on in reverse
1371 order. The code below implements a FILO (stack) to do this. */
1375 /* Value to pass on stack. It can be NULL if this item is for stack
1377 const gdb_byte
*data
;
1379 /* Size in bytes of value to pass on stack. */
1383 /* Implement the gdbarch type alignment method, overrides the generic
1384 alignment algorithm for anything that is aarch64 specific. */
1387 aarch64_type_align (gdbarch
*gdbarch
, struct type
*t
)
1389 t
= check_typedef (t
);
1390 if (t
->code () == TYPE_CODE_ARRAY
&& t
->is_vector ())
1392 /* Use the natural alignment for vector types (the same for
1393 scalar type), but the maximum alignment is 128-bit. */
1394 if (TYPE_LENGTH (t
) > 16)
1397 return TYPE_LENGTH (t
);
1400 /* Allow the common code to calculate the alignment. */
1404 /* Worker function for aapcs_is_vfp_call_or_return_candidate.
1406 Return the number of register required, or -1 on failure.
1408 When encountering a base element, if FUNDAMENTAL_TYPE is not set then set it
1409 to the element, else fail if the type of this element does not match the
1413 aapcs_is_vfp_call_or_return_candidate_1 (struct type
*type
,
1414 struct type
**fundamental_type
)
1416 if (type
== nullptr)
1419 switch (type
->code ())
1422 if (TYPE_LENGTH (type
) > 16)
1425 if (*fundamental_type
== nullptr)
1426 *fundamental_type
= type
;
1427 else if (TYPE_LENGTH (type
) != TYPE_LENGTH (*fundamental_type
)
1428 || type
->code () != (*fundamental_type
)->code ())
1433 case TYPE_CODE_COMPLEX
:
1435 struct type
*target_type
= check_typedef (TYPE_TARGET_TYPE (type
));
1436 if (TYPE_LENGTH (target_type
) > 16)
1439 if (*fundamental_type
== nullptr)
1440 *fundamental_type
= target_type
;
1441 else if (TYPE_LENGTH (target_type
) != TYPE_LENGTH (*fundamental_type
)
1442 || target_type
->code () != (*fundamental_type
)->code ())
1448 case TYPE_CODE_ARRAY
:
1450 if (type
->is_vector ())
1452 if (TYPE_LENGTH (type
) != 8 && TYPE_LENGTH (type
) != 16)
1455 if (*fundamental_type
== nullptr)
1456 *fundamental_type
= type
;
1457 else if (TYPE_LENGTH (type
) != TYPE_LENGTH (*fundamental_type
)
1458 || type
->code () != (*fundamental_type
)->code ())
1465 struct type
*target_type
= TYPE_TARGET_TYPE (type
);
1466 int count
= aapcs_is_vfp_call_or_return_candidate_1
1467 (target_type
, fundamental_type
);
1472 count
*= (TYPE_LENGTH (type
) / TYPE_LENGTH (target_type
));
1477 case TYPE_CODE_STRUCT
:
1478 case TYPE_CODE_UNION
:
1482 for (int i
= 0; i
< type
->num_fields (); i
++)
1484 /* Ignore any static fields. */
1485 if (field_is_static (&type
->field (i
)))
1488 struct type
*member
= check_typedef (type
->field (i
).type ());
1490 int sub_count
= aapcs_is_vfp_call_or_return_candidate_1
1491 (member
, fundamental_type
);
1492 if (sub_count
== -1)
1497 /* Ensure there is no padding between the fields (allowing for empty
1498 zero length structs) */
1499 int ftype_length
= (*fundamental_type
== nullptr)
1500 ? 0 : TYPE_LENGTH (*fundamental_type
);
1501 if (count
* ftype_length
!= TYPE_LENGTH (type
))
1514 /* Return true if an argument, whose type is described by TYPE, can be passed or
1515 returned in simd/fp registers, providing enough parameter passing registers
1516 are available. This is as described in the AAPCS64.
1518 Upon successful return, *COUNT returns the number of needed registers,
1519 *FUNDAMENTAL_TYPE contains the type of those registers.
1521 Candidate as per the AAPCS64 5.4.2.C is either a:
1524 - HFA (Homogeneous Floating-point Aggregate, 4.3.5.1). A Composite type where
1525 all the members are floats and has at most 4 members.
1526 - HVA (Homogeneous Short-vector Aggregate, 4.3.5.2). A Composite type where
1527 all the members are short vectors and has at most 4 members.
1530 Note that HFAs and HVAs can include nested structures and arrays. */
1533 aapcs_is_vfp_call_or_return_candidate (struct type
*type
, int *count
,
1534 struct type
**fundamental_type
)
1536 if (type
== nullptr)
1539 *fundamental_type
= nullptr;
1541 int ag_count
= aapcs_is_vfp_call_or_return_candidate_1 (type
,
1544 if (ag_count
> 0 && ag_count
<= HA_MAX_NUM_FLDS
)
1553 /* AArch64 function call information structure. */
1554 struct aarch64_call_info
1556 /* the current argument number. */
1557 unsigned argnum
= 0;
1559 /* The next general purpose register number, equivalent to NGRN as
1560 described in the AArch64 Procedure Call Standard. */
1563 /* The next SIMD and floating point register number, equivalent to
1564 NSRN as described in the AArch64 Procedure Call Standard. */
1567 /* The next stacked argument address, equivalent to NSAA as
1568 described in the AArch64 Procedure Call Standard. */
1571 /* Stack item vector. */
1572 std::vector
<stack_item_t
> si
;
1575 /* Pass a value in a sequence of consecutive X registers. The caller
1576 is responsible for ensuring sufficient registers are available. */
1579 pass_in_x (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
1580 struct aarch64_call_info
*info
, struct type
*type
,
1583 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1584 int len
= TYPE_LENGTH (type
);
1585 enum type_code typecode
= type
->code ();
1586 int regnum
= AARCH64_X0_REGNUM
+ info
->ngrn
;
1587 const bfd_byte
*buf
= value_contents (arg
);
1593 int partial_len
= len
< X_REGISTER_SIZE
? len
: X_REGISTER_SIZE
;
1594 CORE_ADDR regval
= extract_unsigned_integer (buf
, partial_len
,
1598 /* Adjust sub-word struct/union args when big-endian. */
1599 if (byte_order
== BFD_ENDIAN_BIG
1600 && partial_len
< X_REGISTER_SIZE
1601 && (typecode
== TYPE_CODE_STRUCT
|| typecode
== TYPE_CODE_UNION
))
1602 regval
<<= ((X_REGISTER_SIZE
- partial_len
) * TARGET_CHAR_BIT
);
1604 aarch64_debug_printf ("arg %d in %s = 0x%s", info
->argnum
,
1605 gdbarch_register_name (gdbarch
, regnum
),
1606 phex (regval
, X_REGISTER_SIZE
));
1608 regcache_cooked_write_unsigned (regcache
, regnum
, regval
);
1615 /* Attempt to marshall a value in a V register. Return 1 if
1616 successful, or 0 if insufficient registers are available. This
1617 function, unlike the equivalent pass_in_x() function does not
1618 handle arguments spread across multiple registers. */
1621 pass_in_v (struct gdbarch
*gdbarch
,
1622 struct regcache
*regcache
,
1623 struct aarch64_call_info
*info
,
1624 int len
, const bfd_byte
*buf
)
1628 int regnum
= AARCH64_V0_REGNUM
+ info
->nsrn
;
1629 /* Enough space for a full vector register. */
1630 gdb_byte reg
[register_size (gdbarch
, regnum
)];
1631 gdb_assert (len
<= sizeof (reg
));
1636 memset (reg
, 0, sizeof (reg
));
1637 /* PCS C.1, the argument is allocated to the least significant
1638 bits of V register. */
1639 memcpy (reg
, buf
, len
);
1640 regcache
->cooked_write (regnum
, reg
);
1642 aarch64_debug_printf ("arg %d in %s", info
->argnum
,
1643 gdbarch_register_name (gdbarch
, regnum
));
1651 /* Marshall an argument onto the stack. */
1654 pass_on_stack (struct aarch64_call_info
*info
, struct type
*type
,
1657 const bfd_byte
*buf
= value_contents (arg
);
1658 int len
= TYPE_LENGTH (type
);
1664 align
= type_align (type
);
1666 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1667 Natural alignment of the argument's type. */
1668 align
= align_up (align
, 8);
1670 /* The AArch64 PCS requires at most doubleword alignment. */
1674 aarch64_debug_printf ("arg %d len=%d @ sp + %d\n", info
->argnum
, len
,
1679 info
->si
.push_back (item
);
1682 if (info
->nsaa
& (align
- 1))
1684 /* Push stack alignment padding. */
1685 int pad
= align
- (info
->nsaa
& (align
- 1));
1690 info
->si
.push_back (item
);
1695 /* Marshall an argument into a sequence of one or more consecutive X
1696 registers or, if insufficient X registers are available then onto
1700 pass_in_x_or_stack (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
1701 struct aarch64_call_info
*info
, struct type
*type
,
1704 int len
= TYPE_LENGTH (type
);
1705 int nregs
= (len
+ X_REGISTER_SIZE
- 1) / X_REGISTER_SIZE
;
1707 /* PCS C.13 - Pass in registers if we have enough spare */
1708 if (info
->ngrn
+ nregs
<= 8)
1710 pass_in_x (gdbarch
, regcache
, info
, type
, arg
);
1711 info
->ngrn
+= nregs
;
1716 pass_on_stack (info
, type
, arg
);
1720 /* Pass a value, which is of type arg_type, in a V register. Assumes value is a
1721 aapcs_is_vfp_call_or_return_candidate and there are enough spare V
1722 registers. A return value of false is an error state as the value will have
1723 been partially passed to the stack. */
1725 pass_in_v_vfp_candidate (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
1726 struct aarch64_call_info
*info
, struct type
*arg_type
,
1729 switch (arg_type
->code ())
1732 return pass_in_v (gdbarch
, regcache
, info
, TYPE_LENGTH (arg_type
),
1733 value_contents (arg
));
1736 case TYPE_CODE_COMPLEX
:
1738 const bfd_byte
*buf
= value_contents (arg
);
1739 struct type
*target_type
= check_typedef (TYPE_TARGET_TYPE (arg_type
));
1741 if (!pass_in_v (gdbarch
, regcache
, info
, TYPE_LENGTH (target_type
),
1745 return pass_in_v (gdbarch
, regcache
, info
, TYPE_LENGTH (target_type
),
1746 buf
+ TYPE_LENGTH (target_type
));
1749 case TYPE_CODE_ARRAY
:
1750 if (arg_type
->is_vector ())
1751 return pass_in_v (gdbarch
, regcache
, info
, TYPE_LENGTH (arg_type
),
1752 value_contents (arg
));
1755 case TYPE_CODE_STRUCT
:
1756 case TYPE_CODE_UNION
:
1757 for (int i
= 0; i
< arg_type
->num_fields (); i
++)
1759 /* Don't include static fields. */
1760 if (field_is_static (&arg_type
->field (i
)))
1763 struct value
*field
= value_primitive_field (arg
, 0, i
, arg_type
);
1764 struct type
*field_type
= check_typedef (value_type (field
));
1766 if (!pass_in_v_vfp_candidate (gdbarch
, regcache
, info
, field_type
,
1777 /* Implement the "push_dummy_call" gdbarch method. */
1780 aarch64_push_dummy_call (struct gdbarch
*gdbarch
, struct value
*function
,
1781 struct regcache
*regcache
, CORE_ADDR bp_addr
,
1783 struct value
**args
, CORE_ADDR sp
,
1784 function_call_return_method return_method
,
1785 CORE_ADDR struct_addr
)
1788 struct aarch64_call_info info
;
1790 /* We need to know what the type of the called function is in order
1791 to determine the number of named/anonymous arguments for the
1792 actual argument placement, and the return type in order to handle
1793 return value correctly.
1795 The generic code above us views the decision of return in memory
1796 or return in registers as a two stage processes. The language
1797 handler is consulted first and may decide to return in memory (eg
1798 class with copy constructor returned by value), this will cause
1799 the generic code to allocate space AND insert an initial leading
1802 If the language code does not decide to pass in memory then the
1803 target code is consulted.
1805 If the language code decides to pass in memory we want to move
1806 the pointer inserted as the initial argument from the argument
1807 list and into X8, the conventional AArch64 struct return pointer
1810 /* Set the return address. For the AArch64, the return breakpoint
1811 is always at BP_ADDR. */
1812 regcache_cooked_write_unsigned (regcache
, AARCH64_LR_REGNUM
, bp_addr
);
1814 /* If we were given an initial argument for the return slot, lose it. */
1815 if (return_method
== return_method_hidden_param
)
1821 /* The struct_return pointer occupies X8. */
1822 if (return_method
!= return_method_normal
)
1824 aarch64_debug_printf ("struct return in %s = 0x%s",
1825 gdbarch_register_name
1826 (gdbarch
, AARCH64_STRUCT_RETURN_REGNUM
),
1827 paddress (gdbarch
, struct_addr
));
1829 regcache_cooked_write_unsigned (regcache
, AARCH64_STRUCT_RETURN_REGNUM
,
1833 for (argnum
= 0; argnum
< nargs
; argnum
++)
1835 struct value
*arg
= args
[argnum
];
1836 struct type
*arg_type
, *fundamental_type
;
1839 arg_type
= check_typedef (value_type (arg
));
1840 len
= TYPE_LENGTH (arg_type
);
1842 /* If arg can be passed in v registers as per the AAPCS64, then do so if
1843 if there are enough spare registers. */
1844 if (aapcs_is_vfp_call_or_return_candidate (arg_type
, &elements
,
1847 if (info
.nsrn
+ elements
<= 8)
1849 /* We know that we have sufficient registers available therefore
1850 this will never need to fallback to the stack. */
1851 if (!pass_in_v_vfp_candidate (gdbarch
, regcache
, &info
, arg_type
,
1853 gdb_assert_not_reached ("Failed to push args");
1858 pass_on_stack (&info
, arg_type
, arg
);
1863 switch (arg_type
->code ())
1866 case TYPE_CODE_BOOL
:
1867 case TYPE_CODE_CHAR
:
1868 case TYPE_CODE_RANGE
:
1869 case TYPE_CODE_ENUM
:
1872 /* Promote to 32 bit integer. */
1873 if (arg_type
->is_unsigned ())
1874 arg_type
= builtin_type (gdbarch
)->builtin_uint32
;
1876 arg_type
= builtin_type (gdbarch
)->builtin_int32
;
1877 arg
= value_cast (arg_type
, arg
);
1879 pass_in_x_or_stack (gdbarch
, regcache
, &info
, arg_type
, arg
);
1882 case TYPE_CODE_STRUCT
:
1883 case TYPE_CODE_ARRAY
:
1884 case TYPE_CODE_UNION
:
1887 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1888 invisible reference. */
1890 /* Allocate aligned storage. */
1891 sp
= align_down (sp
- len
, 16);
1893 /* Write the real data into the stack. */
1894 write_memory (sp
, value_contents (arg
), len
);
1896 /* Construct the indirection. */
1897 arg_type
= lookup_pointer_type (arg_type
);
1898 arg
= value_from_pointer (arg_type
, sp
);
1899 pass_in_x_or_stack (gdbarch
, regcache
, &info
, arg_type
, arg
);
1902 /* PCS C.15 / C.18 multiple values pass. */
1903 pass_in_x_or_stack (gdbarch
, regcache
, &info
, arg_type
, arg
);
1907 pass_in_x_or_stack (gdbarch
, regcache
, &info
, arg_type
, arg
);
1912 /* Make sure stack retains 16 byte alignment. */
1914 sp
-= 16 - (info
.nsaa
& 15);
1916 while (!info
.si
.empty ())
1918 const stack_item_t
&si
= info
.si
.back ();
1921 if (si
.data
!= NULL
)
1922 write_memory (sp
, si
.data
, si
.len
);
1923 info
.si
.pop_back ();
1926 /* Finally, update the SP register. */
1927 regcache_cooked_write_unsigned (regcache
, AARCH64_SP_REGNUM
, sp
);
1932 /* Implement the "frame_align" gdbarch method. */
1935 aarch64_frame_align (struct gdbarch
*gdbarch
, CORE_ADDR sp
)
1937 /* Align the stack to sixteen bytes. */
1938 return sp
& ~(CORE_ADDR
) 15;
1941 /* Return the type for an AdvSISD Q register. */
1943 static struct type
*
1944 aarch64_vnq_type (struct gdbarch
*gdbarch
)
1946 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1948 if (tdep
->vnq_type
== NULL
)
1953 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnq",
1956 elem
= builtin_type (gdbarch
)->builtin_uint128
;
1957 append_composite_type_field (t
, "u", elem
);
1959 elem
= builtin_type (gdbarch
)->builtin_int128
;
1960 append_composite_type_field (t
, "s", elem
);
1965 return tdep
->vnq_type
;
1968 /* Return the type for an AdvSISD D register. */
1970 static struct type
*
1971 aarch64_vnd_type (struct gdbarch
*gdbarch
)
1973 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1975 if (tdep
->vnd_type
== NULL
)
1980 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnd",
1983 elem
= builtin_type (gdbarch
)->builtin_double
;
1984 append_composite_type_field (t
, "f", elem
);
1986 elem
= builtin_type (gdbarch
)->builtin_uint64
;
1987 append_composite_type_field (t
, "u", elem
);
1989 elem
= builtin_type (gdbarch
)->builtin_int64
;
1990 append_composite_type_field (t
, "s", elem
);
1995 return tdep
->vnd_type
;
1998 /* Return the type for an AdvSISD S register. */
2000 static struct type
*
2001 aarch64_vns_type (struct gdbarch
*gdbarch
)
2003 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2005 if (tdep
->vns_type
== NULL
)
2010 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vns",
2013 elem
= builtin_type (gdbarch
)->builtin_float
;
2014 append_composite_type_field (t
, "f", elem
);
2016 elem
= builtin_type (gdbarch
)->builtin_uint32
;
2017 append_composite_type_field (t
, "u", elem
);
2019 elem
= builtin_type (gdbarch
)->builtin_int32
;
2020 append_composite_type_field (t
, "s", elem
);
2025 return tdep
->vns_type
;
2028 /* Return the type for an AdvSISD H register. */
2030 static struct type
*
2031 aarch64_vnh_type (struct gdbarch
*gdbarch
)
2033 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2035 if (tdep
->vnh_type
== NULL
)
2040 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnh",
2043 elem
= builtin_type (gdbarch
)->builtin_bfloat16
;
2044 append_composite_type_field (t
, "bf", elem
);
2046 elem
= builtin_type (gdbarch
)->builtin_half
;
2047 append_composite_type_field (t
, "f", elem
);
2049 elem
= builtin_type (gdbarch
)->builtin_uint16
;
2050 append_composite_type_field (t
, "u", elem
);
2052 elem
= builtin_type (gdbarch
)->builtin_int16
;
2053 append_composite_type_field (t
, "s", elem
);
2058 return tdep
->vnh_type
;
2061 /* Return the type for an AdvSISD B register. */
2063 static struct type
*
2064 aarch64_vnb_type (struct gdbarch
*gdbarch
)
2066 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2068 if (tdep
->vnb_type
== NULL
)
2073 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnb",
2076 elem
= builtin_type (gdbarch
)->builtin_uint8
;
2077 append_composite_type_field (t
, "u", elem
);
2079 elem
= builtin_type (gdbarch
)->builtin_int8
;
2080 append_composite_type_field (t
, "s", elem
);
2085 return tdep
->vnb_type
;
2088 /* Return the type for an AdvSISD V register. */
2090 static struct type
*
2091 aarch64_vnv_type (struct gdbarch
*gdbarch
)
2093 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2095 if (tdep
->vnv_type
== NULL
)
2097 /* The other AArch64 pseudo registers (Q,D,H,S,B) refer to a single value
2098 slice from the non-pseudo vector registers. However NEON V registers
2099 are always vector registers, and need constructing as such. */
2100 const struct builtin_type
*bt
= builtin_type (gdbarch
);
2102 struct type
*t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnv",
2105 struct type
*sub
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnd",
2107 append_composite_type_field (sub
, "f",
2108 init_vector_type (bt
->builtin_double
, 2));
2109 append_composite_type_field (sub
, "u",
2110 init_vector_type (bt
->builtin_uint64
, 2));
2111 append_composite_type_field (sub
, "s",
2112 init_vector_type (bt
->builtin_int64
, 2));
2113 append_composite_type_field (t
, "d", sub
);
2115 sub
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vns",
2117 append_composite_type_field (sub
, "f",
2118 init_vector_type (bt
->builtin_float
, 4));
2119 append_composite_type_field (sub
, "u",
2120 init_vector_type (bt
->builtin_uint32
, 4));
2121 append_composite_type_field (sub
, "s",
2122 init_vector_type (bt
->builtin_int32
, 4));
2123 append_composite_type_field (t
, "s", sub
);
2125 sub
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnh",
2127 append_composite_type_field (sub
, "bf",
2128 init_vector_type (bt
->builtin_bfloat16
, 8));
2129 append_composite_type_field (sub
, "f",
2130 init_vector_type (bt
->builtin_half
, 8));
2131 append_composite_type_field (sub
, "u",
2132 init_vector_type (bt
->builtin_uint16
, 8));
2133 append_composite_type_field (sub
, "s",
2134 init_vector_type (bt
->builtin_int16
, 8));
2135 append_composite_type_field (t
, "h", sub
);
2137 sub
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnb",
2139 append_composite_type_field (sub
, "u",
2140 init_vector_type (bt
->builtin_uint8
, 16));
2141 append_composite_type_field (sub
, "s",
2142 init_vector_type (bt
->builtin_int8
, 16));
2143 append_composite_type_field (t
, "b", sub
);
2145 sub
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnq",
2147 append_composite_type_field (sub
, "u",
2148 init_vector_type (bt
->builtin_uint128
, 1));
2149 append_composite_type_field (sub
, "s",
2150 init_vector_type (bt
->builtin_int128
, 1));
2151 append_composite_type_field (t
, "q", sub
);
2156 return tdep
->vnv_type
;
2159 /* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
2162 aarch64_dwarf_reg_to_regnum (struct gdbarch
*gdbarch
, int reg
)
2164 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2166 if (reg
>= AARCH64_DWARF_X0
&& reg
<= AARCH64_DWARF_X0
+ 30)
2167 return AARCH64_X0_REGNUM
+ reg
- AARCH64_DWARF_X0
;
2169 if (reg
== AARCH64_DWARF_SP
)
2170 return AARCH64_SP_REGNUM
;
2172 if (reg
>= AARCH64_DWARF_V0
&& reg
<= AARCH64_DWARF_V0
+ 31)
2173 return AARCH64_V0_REGNUM
+ reg
- AARCH64_DWARF_V0
;
2175 if (reg
== AARCH64_DWARF_SVE_VG
)
2176 return AARCH64_SVE_VG_REGNUM
;
2178 if (reg
== AARCH64_DWARF_SVE_FFR
)
2179 return AARCH64_SVE_FFR_REGNUM
;
2181 if (reg
>= AARCH64_DWARF_SVE_P0
&& reg
<= AARCH64_DWARF_SVE_P0
+ 15)
2182 return AARCH64_SVE_P0_REGNUM
+ reg
- AARCH64_DWARF_SVE_P0
;
2184 if (reg
>= AARCH64_DWARF_SVE_Z0
&& reg
<= AARCH64_DWARF_SVE_Z0
+ 15)
2185 return AARCH64_SVE_Z0_REGNUM
+ reg
- AARCH64_DWARF_SVE_Z0
;
2187 if (tdep
->has_pauth ())
2189 if (reg
>= AARCH64_DWARF_PAUTH_DMASK
&& reg
<= AARCH64_DWARF_PAUTH_CMASK
)
2190 return tdep
->pauth_reg_base
+ reg
- AARCH64_DWARF_PAUTH_DMASK
;
2192 if (reg
== AARCH64_DWARF_PAUTH_RA_STATE
)
2193 return tdep
->pauth_ra_state_regnum
;
2199 /* Implement the "print_insn" gdbarch method. */
2202 aarch64_gdb_print_insn (bfd_vma memaddr
, disassemble_info
*info
)
2204 info
->symbols
= NULL
;
2205 return default_print_insn (memaddr
, info
);
2208 /* AArch64 BRK software debug mode instruction.
2209 Note that AArch64 code is always little-endian.
2210 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
2211 constexpr gdb_byte aarch64_default_breakpoint
[] = {0x00, 0x00, 0x20, 0xd4};
2213 typedef BP_MANIPULATION (aarch64_default_breakpoint
) aarch64_breakpoint
;
2215 /* Extract from an array REGS containing the (raw) register state a
2216 function return value of type TYPE, and copy that, in virtual
2217 format, into VALBUF. */
2220 aarch64_extract_return_value (struct type
*type
, struct regcache
*regs
,
2223 struct gdbarch
*gdbarch
= regs
->arch ();
2224 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
2226 struct type
*fundamental_type
;
2228 if (aapcs_is_vfp_call_or_return_candidate (type
, &elements
,
2231 int len
= TYPE_LENGTH (fundamental_type
);
2233 for (int i
= 0; i
< elements
; i
++)
2235 int regno
= AARCH64_V0_REGNUM
+ i
;
2236 /* Enough space for a full vector register. */
2237 gdb_byte buf
[register_size (gdbarch
, regno
)];
2238 gdb_assert (len
<= sizeof (buf
));
2240 aarch64_debug_printf
2241 ("read HFA or HVA return value element %d from %s",
2242 i
+ 1, gdbarch_register_name (gdbarch
, regno
));
2244 regs
->cooked_read (regno
, buf
);
2246 memcpy (valbuf
, buf
, len
);
2250 else if (type
->code () == TYPE_CODE_INT
2251 || type
->code () == TYPE_CODE_CHAR
2252 || type
->code () == TYPE_CODE_BOOL
2253 || type
->code () == TYPE_CODE_PTR
2254 || TYPE_IS_REFERENCE (type
)
2255 || type
->code () == TYPE_CODE_ENUM
)
2257 /* If the type is a plain integer, then the access is
2258 straight-forward. Otherwise we have to play around a bit
2260 int len
= TYPE_LENGTH (type
);
2261 int regno
= AARCH64_X0_REGNUM
;
2266 /* By using store_unsigned_integer we avoid having to do
2267 anything special for small big-endian values. */
2268 regcache_cooked_read_unsigned (regs
, regno
++, &tmp
);
2269 store_unsigned_integer (valbuf
,
2270 (len
> X_REGISTER_SIZE
2271 ? X_REGISTER_SIZE
: len
), byte_order
, tmp
);
2272 len
-= X_REGISTER_SIZE
;
2273 valbuf
+= X_REGISTER_SIZE
;
2278 /* For a structure or union the behaviour is as if the value had
2279 been stored to word-aligned memory and then loaded into
2280 registers with 64-bit load instruction(s). */
2281 int len
= TYPE_LENGTH (type
);
2282 int regno
= AARCH64_X0_REGNUM
;
2283 bfd_byte buf
[X_REGISTER_SIZE
];
2287 regs
->cooked_read (regno
++, buf
);
2288 memcpy (valbuf
, buf
, len
> X_REGISTER_SIZE
? X_REGISTER_SIZE
: len
);
2289 len
-= X_REGISTER_SIZE
;
2290 valbuf
+= X_REGISTER_SIZE
;
2296 /* Will a function return an aggregate type in memory or in a
2297 register? Return 0 if an aggregate type can be returned in a
2298 register, 1 if it must be returned in memory. */
2301 aarch64_return_in_memory (struct gdbarch
*gdbarch
, struct type
*type
)
2303 type
= check_typedef (type
);
2305 struct type
*fundamental_type
;
2307 if (aapcs_is_vfp_call_or_return_candidate (type
, &elements
,
2310 /* v0-v7 are used to return values and one register is allocated
2311 for one member. However, HFA or HVA has at most four members. */
2315 if (TYPE_LENGTH (type
) > 16)
2317 /* PCS B.6 Aggregates larger than 16 bytes are passed by
2318 invisible reference. */
2326 /* Write into appropriate registers a function return value of type
2327 TYPE, given in virtual format. */
2330 aarch64_store_return_value (struct type
*type
, struct regcache
*regs
,
2331 const gdb_byte
*valbuf
)
2333 struct gdbarch
*gdbarch
= regs
->arch ();
2334 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
2336 struct type
*fundamental_type
;
2338 if (aapcs_is_vfp_call_or_return_candidate (type
, &elements
,
2341 int len
= TYPE_LENGTH (fundamental_type
);
2343 for (int i
= 0; i
< elements
; i
++)
2345 int regno
= AARCH64_V0_REGNUM
+ i
;
2346 /* Enough space for a full vector register. */
2347 gdb_byte tmpbuf
[register_size (gdbarch
, regno
)];
2348 gdb_assert (len
<= sizeof (tmpbuf
));
2350 aarch64_debug_printf
2351 ("write HFA or HVA return value element %d to %s",
2352 i
+ 1, gdbarch_register_name (gdbarch
, regno
));
2354 memcpy (tmpbuf
, valbuf
,
2355 len
> V_REGISTER_SIZE
? V_REGISTER_SIZE
: len
);
2356 regs
->cooked_write (regno
, tmpbuf
);
2360 else if (type
->code () == TYPE_CODE_INT
2361 || type
->code () == TYPE_CODE_CHAR
2362 || type
->code () == TYPE_CODE_BOOL
2363 || type
->code () == TYPE_CODE_PTR
2364 || TYPE_IS_REFERENCE (type
)
2365 || type
->code () == TYPE_CODE_ENUM
)
2367 if (TYPE_LENGTH (type
) <= X_REGISTER_SIZE
)
2369 /* Values of one word or less are zero/sign-extended and
2371 bfd_byte tmpbuf
[X_REGISTER_SIZE
];
2372 LONGEST val
= unpack_long (type
, valbuf
);
2374 store_signed_integer (tmpbuf
, X_REGISTER_SIZE
, byte_order
, val
);
2375 regs
->cooked_write (AARCH64_X0_REGNUM
, tmpbuf
);
2379 /* Integral values greater than one word are stored in
2380 consecutive registers starting with r0. This will always
2381 be a multiple of the regiser size. */
2382 int len
= TYPE_LENGTH (type
);
2383 int regno
= AARCH64_X0_REGNUM
;
2387 regs
->cooked_write (regno
++, valbuf
);
2388 len
-= X_REGISTER_SIZE
;
2389 valbuf
+= X_REGISTER_SIZE
;
2395 /* For a structure or union the behaviour is as if the value had
2396 been stored to word-aligned memory and then loaded into
2397 registers with 64-bit load instruction(s). */
2398 int len
= TYPE_LENGTH (type
);
2399 int regno
= AARCH64_X0_REGNUM
;
2400 bfd_byte tmpbuf
[X_REGISTER_SIZE
];
2404 memcpy (tmpbuf
, valbuf
,
2405 len
> X_REGISTER_SIZE
? X_REGISTER_SIZE
: len
);
2406 regs
->cooked_write (regno
++, tmpbuf
);
2407 len
-= X_REGISTER_SIZE
;
2408 valbuf
+= X_REGISTER_SIZE
;
2413 /* Implement the "return_value" gdbarch method. */
2415 static enum return_value_convention
2416 aarch64_return_value (struct gdbarch
*gdbarch
, struct value
*func_value
,
2417 struct type
*valtype
, struct regcache
*regcache
,
2418 gdb_byte
*readbuf
, const gdb_byte
*writebuf
)
2421 if (valtype
->code () == TYPE_CODE_STRUCT
2422 || valtype
->code () == TYPE_CODE_UNION
2423 || valtype
->code () == TYPE_CODE_ARRAY
)
2425 if (aarch64_return_in_memory (gdbarch
, valtype
))
2427 aarch64_debug_printf ("return value in memory");
2428 return RETURN_VALUE_STRUCT_CONVENTION
;
2433 aarch64_store_return_value (valtype
, regcache
, writebuf
);
2436 aarch64_extract_return_value (valtype
, regcache
, readbuf
);
2438 aarch64_debug_printf ("return value in registers");
2440 return RETURN_VALUE_REGISTER_CONVENTION
;
2443 /* Implement the "get_longjmp_target" gdbarch method. */
2446 aarch64_get_longjmp_target (struct frame_info
*frame
, CORE_ADDR
*pc
)
2449 gdb_byte buf
[X_REGISTER_SIZE
];
2450 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
2451 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2452 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
2454 jb_addr
= get_frame_register_unsigned (frame
, AARCH64_X0_REGNUM
);
2456 if (target_read_memory (jb_addr
+ tdep
->jb_pc
* tdep
->jb_elt_size
, buf
,
2460 *pc
= extract_unsigned_integer (buf
, X_REGISTER_SIZE
, byte_order
);
2464 /* Implement the "gen_return_address" gdbarch method. */
2467 aarch64_gen_return_address (struct gdbarch
*gdbarch
,
2468 struct agent_expr
*ax
, struct axs_value
*value
,
2471 value
->type
= register_type (gdbarch
, AARCH64_LR_REGNUM
);
2472 value
->kind
= axs_lvalue_register
;
2473 value
->u
.reg
= AARCH64_LR_REGNUM
;
2477 /* Return the pseudo register name corresponding to register regnum. */
2480 aarch64_pseudo_register_name (struct gdbarch
*gdbarch
, int regnum
)
2482 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2484 static const char *const q_name
[] =
2486 "q0", "q1", "q2", "q3",
2487 "q4", "q5", "q6", "q7",
2488 "q8", "q9", "q10", "q11",
2489 "q12", "q13", "q14", "q15",
2490 "q16", "q17", "q18", "q19",
2491 "q20", "q21", "q22", "q23",
2492 "q24", "q25", "q26", "q27",
2493 "q28", "q29", "q30", "q31",
2496 static const char *const d_name
[] =
2498 "d0", "d1", "d2", "d3",
2499 "d4", "d5", "d6", "d7",
2500 "d8", "d9", "d10", "d11",
2501 "d12", "d13", "d14", "d15",
2502 "d16", "d17", "d18", "d19",
2503 "d20", "d21", "d22", "d23",
2504 "d24", "d25", "d26", "d27",
2505 "d28", "d29", "d30", "d31",
2508 static const char *const s_name
[] =
2510 "s0", "s1", "s2", "s3",
2511 "s4", "s5", "s6", "s7",
2512 "s8", "s9", "s10", "s11",
2513 "s12", "s13", "s14", "s15",
2514 "s16", "s17", "s18", "s19",
2515 "s20", "s21", "s22", "s23",
2516 "s24", "s25", "s26", "s27",
2517 "s28", "s29", "s30", "s31",
2520 static const char *const h_name
[] =
2522 "h0", "h1", "h2", "h3",
2523 "h4", "h5", "h6", "h7",
2524 "h8", "h9", "h10", "h11",
2525 "h12", "h13", "h14", "h15",
2526 "h16", "h17", "h18", "h19",
2527 "h20", "h21", "h22", "h23",
2528 "h24", "h25", "h26", "h27",
2529 "h28", "h29", "h30", "h31",
2532 static const char *const b_name
[] =
2534 "b0", "b1", "b2", "b3",
2535 "b4", "b5", "b6", "b7",
2536 "b8", "b9", "b10", "b11",
2537 "b12", "b13", "b14", "b15",
2538 "b16", "b17", "b18", "b19",
2539 "b20", "b21", "b22", "b23",
2540 "b24", "b25", "b26", "b27",
2541 "b28", "b29", "b30", "b31",
2544 int p_regnum
= regnum
- gdbarch_num_regs (gdbarch
);
2546 if (p_regnum
>= AARCH64_Q0_REGNUM
&& p_regnum
< AARCH64_Q0_REGNUM
+ 32)
2547 return q_name
[p_regnum
- AARCH64_Q0_REGNUM
];
2549 if (p_regnum
>= AARCH64_D0_REGNUM
&& p_regnum
< AARCH64_D0_REGNUM
+ 32)
2550 return d_name
[p_regnum
- AARCH64_D0_REGNUM
];
2552 if (p_regnum
>= AARCH64_S0_REGNUM
&& p_regnum
< AARCH64_S0_REGNUM
+ 32)
2553 return s_name
[p_regnum
- AARCH64_S0_REGNUM
];
2555 if (p_regnum
>= AARCH64_H0_REGNUM
&& p_regnum
< AARCH64_H0_REGNUM
+ 32)
2556 return h_name
[p_regnum
- AARCH64_H0_REGNUM
];
2558 if (p_regnum
>= AARCH64_B0_REGNUM
&& p_regnum
< AARCH64_B0_REGNUM
+ 32)
2559 return b_name
[p_regnum
- AARCH64_B0_REGNUM
];
2561 if (tdep
->has_sve ())
2563 static const char *const sve_v_name
[] =
2565 "v0", "v1", "v2", "v3",
2566 "v4", "v5", "v6", "v7",
2567 "v8", "v9", "v10", "v11",
2568 "v12", "v13", "v14", "v15",
2569 "v16", "v17", "v18", "v19",
2570 "v20", "v21", "v22", "v23",
2571 "v24", "v25", "v26", "v27",
2572 "v28", "v29", "v30", "v31",
2575 if (p_regnum
>= AARCH64_SVE_V0_REGNUM
2576 && p_regnum
< AARCH64_SVE_V0_REGNUM
+ AARCH64_V_REGS_NUM
)
2577 return sve_v_name
[p_regnum
- AARCH64_SVE_V0_REGNUM
];
2580 /* RA_STATE is used for unwinding only. Do not assign it a name - this
2581 prevents it from being read by methods such as
2582 mi_cmd_trace_frame_collected. */
2583 if (tdep
->has_pauth () && regnum
== tdep
->pauth_ra_state_regnum
)
2586 internal_error (__FILE__
, __LINE__
,
2587 _("aarch64_pseudo_register_name: bad register number %d"),
2591 /* Implement the "pseudo_register_type" tdesc_arch_data method. */
2593 static struct type
*
2594 aarch64_pseudo_register_type (struct gdbarch
*gdbarch
, int regnum
)
2596 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2598 int p_regnum
= regnum
- gdbarch_num_regs (gdbarch
);
2600 if (p_regnum
>= AARCH64_Q0_REGNUM
&& p_regnum
< AARCH64_Q0_REGNUM
+ 32)
2601 return aarch64_vnq_type (gdbarch
);
2603 if (p_regnum
>= AARCH64_D0_REGNUM
&& p_regnum
< AARCH64_D0_REGNUM
+ 32)
2604 return aarch64_vnd_type (gdbarch
);
2606 if (p_regnum
>= AARCH64_S0_REGNUM
&& p_regnum
< AARCH64_S0_REGNUM
+ 32)
2607 return aarch64_vns_type (gdbarch
);
2609 if (p_regnum
>= AARCH64_H0_REGNUM
&& p_regnum
< AARCH64_H0_REGNUM
+ 32)
2610 return aarch64_vnh_type (gdbarch
);
2612 if (p_regnum
>= AARCH64_B0_REGNUM
&& p_regnum
< AARCH64_B0_REGNUM
+ 32)
2613 return aarch64_vnb_type (gdbarch
);
2615 if (tdep
->has_sve () && p_regnum
>= AARCH64_SVE_V0_REGNUM
2616 && p_regnum
< AARCH64_SVE_V0_REGNUM
+ AARCH64_V_REGS_NUM
)
2617 return aarch64_vnv_type (gdbarch
);
2619 if (tdep
->has_pauth () && regnum
== tdep
->pauth_ra_state_regnum
)
2620 return builtin_type (gdbarch
)->builtin_uint64
;
2622 internal_error (__FILE__
, __LINE__
,
2623 _("aarch64_pseudo_register_type: bad register number %d"),
2627 /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2630 aarch64_pseudo_register_reggroup_p (struct gdbarch
*gdbarch
, int regnum
,
2631 struct reggroup
*group
)
2633 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2635 int p_regnum
= regnum
- gdbarch_num_regs (gdbarch
);
2637 if (p_regnum
>= AARCH64_Q0_REGNUM
&& p_regnum
< AARCH64_Q0_REGNUM
+ 32)
2638 return group
== all_reggroup
|| group
== vector_reggroup
;
2639 else if (p_regnum
>= AARCH64_D0_REGNUM
&& p_regnum
< AARCH64_D0_REGNUM
+ 32)
2640 return (group
== all_reggroup
|| group
== vector_reggroup
2641 || group
== float_reggroup
);
2642 else if (p_regnum
>= AARCH64_S0_REGNUM
&& p_regnum
< AARCH64_S0_REGNUM
+ 32)
2643 return (group
== all_reggroup
|| group
== vector_reggroup
2644 || group
== float_reggroup
);
2645 else if (p_regnum
>= AARCH64_H0_REGNUM
&& p_regnum
< AARCH64_H0_REGNUM
+ 32)
2646 return group
== all_reggroup
|| group
== vector_reggroup
;
2647 else if (p_regnum
>= AARCH64_B0_REGNUM
&& p_regnum
< AARCH64_B0_REGNUM
+ 32)
2648 return group
== all_reggroup
|| group
== vector_reggroup
;
2649 else if (tdep
->has_sve () && p_regnum
>= AARCH64_SVE_V0_REGNUM
2650 && p_regnum
< AARCH64_SVE_V0_REGNUM
+ AARCH64_V_REGS_NUM
)
2651 return group
== all_reggroup
|| group
== vector_reggroup
;
2652 /* RA_STATE is used for unwinding only. Do not assign it to any groups. */
2653 if (tdep
->has_pauth () && regnum
== tdep
->pauth_ra_state_regnum
)
2656 return group
== all_reggroup
;
2659 /* Helper for aarch64_pseudo_read_value. */
2661 static struct value
*
2662 aarch64_pseudo_read_value_1 (struct gdbarch
*gdbarch
,
2663 readable_regcache
*regcache
, int regnum_offset
,
2664 int regsize
, struct value
*result_value
)
2666 unsigned v_regnum
= AARCH64_V0_REGNUM
+ regnum_offset
;
2668 /* Enough space for a full vector register. */
2669 gdb_byte reg_buf
[register_size (gdbarch
, AARCH64_V0_REGNUM
)];
2670 gdb_static_assert (AARCH64_V0_REGNUM
== AARCH64_SVE_Z0_REGNUM
);
2672 if (regcache
->raw_read (v_regnum
, reg_buf
) != REG_VALID
)
2673 mark_value_bytes_unavailable (result_value
, 0,
2674 TYPE_LENGTH (value_type (result_value
)));
2676 memcpy (value_contents_raw (result_value
), reg_buf
, regsize
);
2678 return result_value
;
2681 /* Implement the "pseudo_register_read_value" gdbarch method. */
2683 static struct value
*
2684 aarch64_pseudo_read_value (struct gdbarch
*gdbarch
, readable_regcache
*regcache
,
2687 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2688 struct value
*result_value
= allocate_value (register_type (gdbarch
, regnum
));
2690 VALUE_LVAL (result_value
) = lval_register
;
2691 VALUE_REGNUM (result_value
) = regnum
;
2693 regnum
-= gdbarch_num_regs (gdbarch
);
2695 if (regnum
>= AARCH64_Q0_REGNUM
&& regnum
< AARCH64_Q0_REGNUM
+ 32)
2696 return aarch64_pseudo_read_value_1 (gdbarch
, regcache
,
2697 regnum
- AARCH64_Q0_REGNUM
,
2698 Q_REGISTER_SIZE
, result_value
);
2700 if (regnum
>= AARCH64_D0_REGNUM
&& regnum
< AARCH64_D0_REGNUM
+ 32)
2701 return aarch64_pseudo_read_value_1 (gdbarch
, regcache
,
2702 regnum
- AARCH64_D0_REGNUM
,
2703 D_REGISTER_SIZE
, result_value
);
2705 if (regnum
>= AARCH64_S0_REGNUM
&& regnum
< AARCH64_S0_REGNUM
+ 32)
2706 return aarch64_pseudo_read_value_1 (gdbarch
, regcache
,
2707 regnum
- AARCH64_S0_REGNUM
,
2708 S_REGISTER_SIZE
, result_value
);
2710 if (regnum
>= AARCH64_H0_REGNUM
&& regnum
< AARCH64_H0_REGNUM
+ 32)
2711 return aarch64_pseudo_read_value_1 (gdbarch
, regcache
,
2712 regnum
- AARCH64_H0_REGNUM
,
2713 H_REGISTER_SIZE
, result_value
);
2715 if (regnum
>= AARCH64_B0_REGNUM
&& regnum
< AARCH64_B0_REGNUM
+ 32)
2716 return aarch64_pseudo_read_value_1 (gdbarch
, regcache
,
2717 regnum
- AARCH64_B0_REGNUM
,
2718 B_REGISTER_SIZE
, result_value
);
2720 if (tdep
->has_sve () && regnum
>= AARCH64_SVE_V0_REGNUM
2721 && regnum
< AARCH64_SVE_V0_REGNUM
+ 32)
2722 return aarch64_pseudo_read_value_1 (gdbarch
, regcache
,
2723 regnum
- AARCH64_SVE_V0_REGNUM
,
2724 V_REGISTER_SIZE
, result_value
);
2726 gdb_assert_not_reached ("regnum out of bound");
2729 /* Helper for aarch64_pseudo_write. */
2732 aarch64_pseudo_write_1 (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
2733 int regnum_offset
, int regsize
, const gdb_byte
*buf
)
2735 unsigned v_regnum
= AARCH64_V0_REGNUM
+ regnum_offset
;
2737 /* Enough space for a full vector register. */
2738 gdb_byte reg_buf
[register_size (gdbarch
, AARCH64_V0_REGNUM
)];
2739 gdb_static_assert (AARCH64_V0_REGNUM
== AARCH64_SVE_Z0_REGNUM
);
2741 /* Ensure the register buffer is zero, we want gdb writes of the
2742 various 'scalar' pseudo registers to behavior like architectural
2743 writes, register width bytes are written the remainder are set to
2745 memset (reg_buf
, 0, register_size (gdbarch
, AARCH64_V0_REGNUM
));
2747 memcpy (reg_buf
, buf
, regsize
);
2748 regcache
->raw_write (v_regnum
, reg_buf
);
2751 /* Implement the "pseudo_register_write" gdbarch method. */
2754 aarch64_pseudo_write (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
2755 int regnum
, const gdb_byte
*buf
)
2757 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2758 regnum
-= gdbarch_num_regs (gdbarch
);
2760 if (regnum
>= AARCH64_Q0_REGNUM
&& regnum
< AARCH64_Q0_REGNUM
+ 32)
2761 return aarch64_pseudo_write_1 (gdbarch
, regcache
,
2762 regnum
- AARCH64_Q0_REGNUM
, Q_REGISTER_SIZE
,
2765 if (regnum
>= AARCH64_D0_REGNUM
&& regnum
< AARCH64_D0_REGNUM
+ 32)
2766 return aarch64_pseudo_write_1 (gdbarch
, regcache
,
2767 regnum
- AARCH64_D0_REGNUM
, D_REGISTER_SIZE
,
2770 if (regnum
>= AARCH64_S0_REGNUM
&& regnum
< AARCH64_S0_REGNUM
+ 32)
2771 return aarch64_pseudo_write_1 (gdbarch
, regcache
,
2772 regnum
- AARCH64_S0_REGNUM
, S_REGISTER_SIZE
,
2775 if (regnum
>= AARCH64_H0_REGNUM
&& regnum
< AARCH64_H0_REGNUM
+ 32)
2776 return aarch64_pseudo_write_1 (gdbarch
, regcache
,
2777 regnum
- AARCH64_H0_REGNUM
, H_REGISTER_SIZE
,
2780 if (regnum
>= AARCH64_B0_REGNUM
&& regnum
< AARCH64_B0_REGNUM
+ 32)
2781 return aarch64_pseudo_write_1 (gdbarch
, regcache
,
2782 regnum
- AARCH64_B0_REGNUM
, B_REGISTER_SIZE
,
2785 if (tdep
->has_sve () && regnum
>= AARCH64_SVE_V0_REGNUM
2786 && regnum
< AARCH64_SVE_V0_REGNUM
+ 32)
2787 return aarch64_pseudo_write_1 (gdbarch
, regcache
,
2788 regnum
- AARCH64_SVE_V0_REGNUM
,
2789 V_REGISTER_SIZE
, buf
);
2791 gdb_assert_not_reached ("regnum out of bound");
2794 /* Callback function for user_reg_add. */
2796 static struct value
*
2797 value_of_aarch64_user_reg (struct frame_info
*frame
, const void *baton
)
2799 const int *reg_p
= (const int *) baton
;
2801 return value_of_register (*reg_p
, frame
);
2805 /* Implement the "software_single_step" gdbarch method, needed to
2806 single step through atomic sequences on AArch64. */
2808 static std::vector
<CORE_ADDR
>
2809 aarch64_software_single_step (struct regcache
*regcache
)
2811 struct gdbarch
*gdbarch
= regcache
->arch ();
2812 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
2813 const int insn_size
= 4;
2814 const int atomic_sequence_length
= 16; /* Instruction sequence length. */
2815 CORE_ADDR pc
= regcache_read_pc (regcache
);
2816 CORE_ADDR breaks
[2] = { CORE_ADDR_MAX
, CORE_ADDR_MAX
};
2818 CORE_ADDR closing_insn
= 0;
2819 uint32_t insn
= read_memory_unsigned_integer (loc
, insn_size
,
2820 byte_order_for_code
);
2823 int bc_insn_count
= 0; /* Conditional branch instruction count. */
2824 int last_breakpoint
= 0; /* Defaults to 0 (no breakpoints placed). */
2827 if (aarch64_decode_insn (insn
, &inst
, 1, NULL
) != 0)
2830 /* Look for a Load Exclusive instruction which begins the sequence. */
2831 if (inst
.opcode
->iclass
!= ldstexcl
|| bit (insn
, 22) == 0)
2834 for (insn_count
= 0; insn_count
< atomic_sequence_length
; ++insn_count
)
2837 insn
= read_memory_unsigned_integer (loc
, insn_size
,
2838 byte_order_for_code
);
2840 if (aarch64_decode_insn (insn
, &inst
, 1, NULL
) != 0)
2842 /* Check if the instruction is a conditional branch. */
2843 if (inst
.opcode
->iclass
== condbranch
)
2845 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_ADDR_PCREL19
);
2847 if (bc_insn_count
>= 1)
2850 /* It is, so we'll try to set a breakpoint at the destination. */
2851 breaks
[1] = loc
+ inst
.operands
[0].imm
.value
;
2857 /* Look for the Store Exclusive which closes the atomic sequence. */
2858 if (inst
.opcode
->iclass
== ldstexcl
&& bit (insn
, 22) == 0)
2865 /* We didn't find a closing Store Exclusive instruction, fall back. */
2869 /* Insert breakpoint after the end of the atomic sequence. */
2870 breaks
[0] = loc
+ insn_size
;
2872 /* Check for duplicated breakpoints, and also check that the second
2873 breakpoint is not within the atomic sequence. */
2875 && (breaks
[1] == breaks
[0]
2876 || (breaks
[1] >= pc
&& breaks
[1] <= closing_insn
)))
2877 last_breakpoint
= 0;
2879 std::vector
<CORE_ADDR
> next_pcs
;
2881 /* Insert the breakpoint at the end of the sequence, and one at the
2882 destination of the conditional branch, if it exists. */
2883 for (index
= 0; index
<= last_breakpoint
; index
++)
2884 next_pcs
.push_back (breaks
[index
]);
2889 struct aarch64_displaced_step_copy_insn_closure
2890 : public displaced_step_copy_insn_closure
2892 /* It is true when condition instruction, such as B.CON, TBZ, etc,
2893 is being displaced stepping. */
2896 /* PC adjustment offset after displaced stepping. If 0, then we don't
2897 write the PC back, assuming the PC is already the right address. */
2898 int32_t pc_adjust
= 0;
2901 /* Data when visiting instructions for displaced stepping. */
2903 struct aarch64_displaced_step_data
2905 struct aarch64_insn_data base
;
2907 /* The address where the instruction will be executed at. */
2909 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
2910 uint32_t insn_buf
[AARCH64_DISPLACED_MODIFIED_INSNS
];
2911 /* Number of instructions in INSN_BUF. */
2912 unsigned insn_count
;
2913 /* Registers when doing displaced stepping. */
2914 struct regcache
*regs
;
2916 aarch64_displaced_step_copy_insn_closure
*dsc
;
2919 /* Implementation of aarch64_insn_visitor method "b". */
2922 aarch64_displaced_step_b (const int is_bl
, const int32_t offset
,
2923 struct aarch64_insn_data
*data
)
2925 struct aarch64_displaced_step_data
*dsd
2926 = (struct aarch64_displaced_step_data
*) data
;
2927 int64_t new_offset
= data
->insn_addr
- dsd
->new_addr
+ offset
;
2929 if (can_encode_int32 (new_offset
, 28))
2931 /* Emit B rather than BL, because executing BL on a new address
2932 will get the wrong address into LR. In order to avoid this,
2933 we emit B, and update LR if the instruction is BL. */
2934 emit_b (dsd
->insn_buf
, 0, new_offset
);
2940 emit_nop (dsd
->insn_buf
);
2942 dsd
->dsc
->pc_adjust
= offset
;
2948 regcache_cooked_write_unsigned (dsd
->regs
, AARCH64_LR_REGNUM
,
2949 data
->insn_addr
+ 4);
2953 /* Implementation of aarch64_insn_visitor method "b_cond". */
2956 aarch64_displaced_step_b_cond (const unsigned cond
, const int32_t offset
,
2957 struct aarch64_insn_data
*data
)
2959 struct aarch64_displaced_step_data
*dsd
2960 = (struct aarch64_displaced_step_data
*) data
;
2962 /* GDB has to fix up PC after displaced step this instruction
2963 differently according to the condition is true or false. Instead
2964 of checking COND against conditional flags, we can use
2965 the following instructions, and GDB can tell how to fix up PC
2966 according to the PC value.
2968 B.COND TAKEN ; If cond is true, then jump to TAKEN.
2974 emit_bcond (dsd
->insn_buf
, cond
, 8);
2975 dsd
->dsc
->cond
= true;
2976 dsd
->dsc
->pc_adjust
= offset
;
2977 dsd
->insn_count
= 1;
2980 /* Dynamically allocate a new register. If we know the register
2981 statically, we should make it a global as above instead of using this
2984 static struct aarch64_register
2985 aarch64_register (unsigned num
, int is64
)
2987 return (struct aarch64_register
) { num
, is64
};
2990 /* Implementation of aarch64_insn_visitor method "cb". */
2993 aarch64_displaced_step_cb (const int32_t offset
, const int is_cbnz
,
2994 const unsigned rn
, int is64
,
2995 struct aarch64_insn_data
*data
)
2997 struct aarch64_displaced_step_data
*dsd
2998 = (struct aarch64_displaced_step_data
*) data
;
3000 /* The offset is out of range for a compare and branch
3001 instruction. We can use the following instructions instead:
3003 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
3008 emit_cb (dsd
->insn_buf
, is_cbnz
, aarch64_register (rn
, is64
), 8);
3009 dsd
->insn_count
= 1;
3010 dsd
->dsc
->cond
= true;
3011 dsd
->dsc
->pc_adjust
= offset
;
3014 /* Implementation of aarch64_insn_visitor method "tb". */
3017 aarch64_displaced_step_tb (const int32_t offset
, int is_tbnz
,
3018 const unsigned rt
, unsigned bit
,
3019 struct aarch64_insn_data
*data
)
3021 struct aarch64_displaced_step_data
*dsd
3022 = (struct aarch64_displaced_step_data
*) data
;
3024 /* The offset is out of range for a test bit and branch
3025 instruction We can use the following instructions instead:
3027 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
3033 emit_tb (dsd
->insn_buf
, is_tbnz
, bit
, aarch64_register (rt
, 1), 8);
3034 dsd
->insn_count
= 1;
3035 dsd
->dsc
->cond
= true;
3036 dsd
->dsc
->pc_adjust
= offset
;
3039 /* Implementation of aarch64_insn_visitor method "adr". */
3042 aarch64_displaced_step_adr (const int32_t offset
, const unsigned rd
,
3043 const int is_adrp
, struct aarch64_insn_data
*data
)
3045 struct aarch64_displaced_step_data
*dsd
3046 = (struct aarch64_displaced_step_data
*) data
;
3047 /* We know exactly the address the ADR{P,} instruction will compute.
3048 We can just write it to the destination register. */
3049 CORE_ADDR address
= data
->insn_addr
+ offset
;
3053 /* Clear the lower 12 bits of the offset to get the 4K page. */
3054 regcache_cooked_write_unsigned (dsd
->regs
, AARCH64_X0_REGNUM
+ rd
,
3058 regcache_cooked_write_unsigned (dsd
->regs
, AARCH64_X0_REGNUM
+ rd
,
3061 dsd
->dsc
->pc_adjust
= 4;
3062 emit_nop (dsd
->insn_buf
);
3063 dsd
->insn_count
= 1;
3066 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
3069 aarch64_displaced_step_ldr_literal (const int32_t offset
, const int is_sw
,
3070 const unsigned rt
, const int is64
,
3071 struct aarch64_insn_data
*data
)
3073 struct aarch64_displaced_step_data
*dsd
3074 = (struct aarch64_displaced_step_data
*) data
;
3075 CORE_ADDR address
= data
->insn_addr
+ offset
;
3076 struct aarch64_memory_operand zero
= { MEMORY_OPERAND_OFFSET
, 0 };
3078 regcache_cooked_write_unsigned (dsd
->regs
, AARCH64_X0_REGNUM
+ rt
,
3082 dsd
->insn_count
= emit_ldrsw (dsd
->insn_buf
, aarch64_register (rt
, 1),
3083 aarch64_register (rt
, 1), zero
);
3085 dsd
->insn_count
= emit_ldr (dsd
->insn_buf
, aarch64_register (rt
, is64
),
3086 aarch64_register (rt
, 1), zero
);
3088 dsd
->dsc
->pc_adjust
= 4;
3091 /* Implementation of aarch64_insn_visitor method "others". */
3094 aarch64_displaced_step_others (const uint32_t insn
,
3095 struct aarch64_insn_data
*data
)
3097 struct aarch64_displaced_step_data
*dsd
3098 = (struct aarch64_displaced_step_data
*) data
;
3100 aarch64_emit_insn (dsd
->insn_buf
, insn
);
3101 dsd
->insn_count
= 1;
3103 if ((insn
& 0xfffffc1f) == 0xd65f0000)
3106 dsd
->dsc
->pc_adjust
= 0;
3109 dsd
->dsc
->pc_adjust
= 4;
3112 static const struct aarch64_insn_visitor visitor
=
3114 aarch64_displaced_step_b
,
3115 aarch64_displaced_step_b_cond
,
3116 aarch64_displaced_step_cb
,
3117 aarch64_displaced_step_tb
,
3118 aarch64_displaced_step_adr
,
3119 aarch64_displaced_step_ldr_literal
,
3120 aarch64_displaced_step_others
,
3123 /* Implement the "displaced_step_copy_insn" gdbarch method. */
3125 displaced_step_copy_insn_closure_up
3126 aarch64_displaced_step_copy_insn (struct gdbarch
*gdbarch
,
3127 CORE_ADDR from
, CORE_ADDR to
,
3128 struct regcache
*regs
)
3130 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
3131 uint32_t insn
= read_memory_unsigned_integer (from
, 4, byte_order_for_code
);
3132 struct aarch64_displaced_step_data dsd
;
3135 if (aarch64_decode_insn (insn
, &inst
, 1, NULL
) != 0)
3138 /* Look for a Load Exclusive instruction which begins the sequence. */
3139 if (inst
.opcode
->iclass
== ldstexcl
&& bit (insn
, 22))
3141 /* We can't displaced step atomic sequences. */
3145 std::unique_ptr
<aarch64_displaced_step_copy_insn_closure
> dsc
3146 (new aarch64_displaced_step_copy_insn_closure
);
3147 dsd
.base
.insn_addr
= from
;
3150 dsd
.dsc
= dsc
.get ();
3152 aarch64_relocate_instruction (insn
, &visitor
,
3153 (struct aarch64_insn_data
*) &dsd
);
3154 gdb_assert (dsd
.insn_count
<= AARCH64_DISPLACED_MODIFIED_INSNS
);
3156 if (dsd
.insn_count
!= 0)
3160 /* Instruction can be relocated to scratch pad. Copy
3161 relocated instruction(s) there. */
3162 for (i
= 0; i
< dsd
.insn_count
; i
++)
3164 displaced_debug_printf ("writing insn %.8x at %s",
3166 paddress (gdbarch
, to
+ i
* 4));
3168 write_memory_unsigned_integer (to
+ i
* 4, 4, byte_order_for_code
,
3169 (ULONGEST
) dsd
.insn_buf
[i
]);
3177 /* This is a work around for a problem with g++ 4.8. */
3178 return displaced_step_copy_insn_closure_up (dsc
.release ());
3181 /* Implement the "displaced_step_fixup" gdbarch method. */
3184 aarch64_displaced_step_fixup (struct gdbarch
*gdbarch
,
3185 struct displaced_step_copy_insn_closure
*dsc_
,
3186 CORE_ADDR from
, CORE_ADDR to
,
3187 struct regcache
*regs
)
3189 aarch64_displaced_step_copy_insn_closure
*dsc
3190 = (aarch64_displaced_step_copy_insn_closure
*) dsc_
;
3194 regcache_cooked_read_unsigned (regs
, AARCH64_PC_REGNUM
, &pc
);
3196 displaced_debug_printf ("PC after stepping: %s (was %s).",
3197 paddress (gdbarch
, pc
), paddress (gdbarch
, to
));
3201 displaced_debug_printf ("[Conditional] pc_adjust before: %d",
3206 /* Condition is true. */
3208 else if (pc
- to
== 4)
3210 /* Condition is false. */
3214 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
3216 displaced_debug_printf ("[Conditional] pc_adjust after: %d",
3220 displaced_debug_printf ("%s PC by %d",
3221 dsc
->pc_adjust
? "adjusting" : "not adjusting",
3224 if (dsc
->pc_adjust
!= 0)
3226 /* Make sure the previous instruction was executed (that is, the PC
3227 has changed). If the PC didn't change, then discard the adjustment
3228 offset. Otherwise we may skip an instruction before its execution
3232 displaced_debug_printf ("PC did not move. Discarding PC adjustment.");
3236 displaced_debug_printf ("fixup: set PC to %s:%d",
3237 paddress (gdbarch
, from
), dsc
->pc_adjust
);
3239 regcache_cooked_write_unsigned (regs
, AARCH64_PC_REGNUM
,
3240 from
+ dsc
->pc_adjust
);
3244 /* Implement the "displaced_step_hw_singlestep" gdbarch method. */
3247 aarch64_displaced_step_hw_singlestep (struct gdbarch
*gdbarch
)
3252 /* Get the correct target description for the given VQ value.
3253 If VQ is zero then it is assumed SVE is not supported.
3254 (It is not possible to set VQ to zero on an SVE system). */
3257 aarch64_read_description (uint64_t vq
, bool pauth_p
)
3259 if (vq
> AARCH64_MAX_SVE_VQ
)
3260 error (_("VQ is %" PRIu64
", maximum supported value is %d"), vq
,
3261 AARCH64_MAX_SVE_VQ
);
3263 struct target_desc
*tdesc
= tdesc_aarch64_list
[vq
][pauth_p
];
3267 tdesc
= aarch64_create_target_description (vq
, pauth_p
);
3268 tdesc_aarch64_list
[vq
][pauth_p
] = tdesc
;
3274 /* Return the VQ used when creating the target description TDESC. */
3277 aarch64_get_tdesc_vq (const struct target_desc
*tdesc
)
3279 const struct tdesc_feature
*feature_sve
;
3281 if (!tdesc_has_registers (tdesc
))
3284 feature_sve
= tdesc_find_feature (tdesc
, "org.gnu.gdb.aarch64.sve");
3286 if (feature_sve
== nullptr)
3289 uint64_t vl
= tdesc_register_bitsize (feature_sve
,
3290 aarch64_sve_register_names
[0]) / 8;
3291 return sve_vq_from_vl (vl
);
3294 /* Add all the expected register sets into GDBARCH. */
3297 aarch64_add_reggroups (struct gdbarch
*gdbarch
)
3299 reggroup_add (gdbarch
, general_reggroup
);
3300 reggroup_add (gdbarch
, float_reggroup
);
3301 reggroup_add (gdbarch
, system_reggroup
);
3302 reggroup_add (gdbarch
, vector_reggroup
);
3303 reggroup_add (gdbarch
, all_reggroup
);
3304 reggroup_add (gdbarch
, save_reggroup
);
3305 reggroup_add (gdbarch
, restore_reggroup
);
3308 /* Implement the "cannot_store_register" gdbarch method. */
3311 aarch64_cannot_store_register (struct gdbarch
*gdbarch
, int regnum
)
3313 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
3315 if (!tdep
->has_pauth ())
3318 /* Pointer authentication registers are read-only. */
3319 return (regnum
== AARCH64_PAUTH_DMASK_REGNUM (tdep
->pauth_reg_base
)
3320 || regnum
== AARCH64_PAUTH_CMASK_REGNUM (tdep
->pauth_reg_base
));
3323 /* Initialize the current architecture based on INFO. If possible,
3324 re-use an architecture from ARCHES, which is a list of
3325 architectures already created during this debugging session.
3327 Called e.g. at program startup, when reading a core file, and when
3328 reading a binary file. */
3330 static struct gdbarch
*
3331 aarch64_gdbarch_init (struct gdbarch_info info
, struct gdbarch_list
*arches
)
3333 const struct tdesc_feature
*feature_core
, *feature_fpu
, *feature_sve
;
3334 const struct tdesc_feature
*feature_pauth
;
3335 bool valid_p
= true;
3336 int i
, num_regs
= 0, num_pseudo_regs
= 0;
3337 int first_pauth_regnum
= -1, pauth_ra_state_offset
= -1;
3339 /* Use the vector length passed via the target info. Here -1 is used for no
3340 SVE, and 0 is unset. If unset then use the vector length from the existing
3343 if (info
.id
== (int *) -1)
3345 else if (info
.id
!= 0)
3346 vq
= (uint64_t) info
.id
;
3348 vq
= aarch64_get_tdesc_vq (info
.target_desc
);
3350 if (vq
> AARCH64_MAX_SVE_VQ
)
3351 internal_error (__FILE__
, __LINE__
, _("VQ out of bounds: %s (max %d)"),
3352 pulongest (vq
), AARCH64_MAX_SVE_VQ
);
3354 /* If there is already a candidate, use it. */
3355 for (gdbarch_list
*best_arch
= gdbarch_list_lookup_by_info (arches
, &info
);
3356 best_arch
!= nullptr;
3357 best_arch
= gdbarch_list_lookup_by_info (best_arch
->next
, &info
))
3359 struct gdbarch_tdep
*tdep
= gdbarch_tdep (best_arch
->gdbarch
);
3360 if (tdep
&& tdep
->vq
== vq
)
3361 return best_arch
->gdbarch
;
3364 /* Ensure we always have a target descriptor, and that it is for the given VQ
3366 const struct target_desc
*tdesc
= info
.target_desc
;
3367 if (!tdesc_has_registers (tdesc
) || vq
!= aarch64_get_tdesc_vq (tdesc
))
3368 tdesc
= aarch64_read_description (vq
, false);
3371 feature_core
= tdesc_find_feature (tdesc
,"org.gnu.gdb.aarch64.core");
3372 feature_fpu
= tdesc_find_feature (tdesc
, "org.gnu.gdb.aarch64.fpu");
3373 feature_sve
= tdesc_find_feature (tdesc
, "org.gnu.gdb.aarch64.sve");
3374 feature_pauth
= tdesc_find_feature (tdesc
, "org.gnu.gdb.aarch64.pauth");
3376 if (feature_core
== nullptr)
3379 tdesc_arch_data_up tdesc_data
= tdesc_data_alloc ();
3381 /* Validate the description provides the mandatory core R registers
3382 and allocate their numbers. */
3383 for (i
= 0; i
< ARRAY_SIZE (aarch64_r_register_names
); i
++)
3384 valid_p
&= tdesc_numbered_register (feature_core
, tdesc_data
.get (),
3385 AARCH64_X0_REGNUM
+ i
,
3386 aarch64_r_register_names
[i
]);
3388 num_regs
= AARCH64_X0_REGNUM
+ i
;
3390 /* Add the V registers. */
3391 if (feature_fpu
!= nullptr)
3393 if (feature_sve
!= nullptr)
3394 error (_("Program contains both fpu and SVE features."));
3396 /* Validate the description provides the mandatory V registers
3397 and allocate their numbers. */
3398 for (i
= 0; i
< ARRAY_SIZE (aarch64_v_register_names
); i
++)
3399 valid_p
&= tdesc_numbered_register (feature_fpu
, tdesc_data
.get (),
3400 AARCH64_V0_REGNUM
+ i
,
3401 aarch64_v_register_names
[i
]);
3403 num_regs
= AARCH64_V0_REGNUM
+ i
;
3406 /* Add the SVE registers. */
3407 if (feature_sve
!= nullptr)
3409 /* Validate the description provides the mandatory SVE registers
3410 and allocate their numbers. */
3411 for (i
= 0; i
< ARRAY_SIZE (aarch64_sve_register_names
); i
++)
3412 valid_p
&= tdesc_numbered_register (feature_sve
, tdesc_data
.get (),
3413 AARCH64_SVE_Z0_REGNUM
+ i
,
3414 aarch64_sve_register_names
[i
]);
3416 num_regs
= AARCH64_SVE_Z0_REGNUM
+ i
;
3417 num_pseudo_regs
+= 32; /* add the Vn register pseudos. */
3420 if (feature_fpu
!= nullptr || feature_sve
!= nullptr)
3422 num_pseudo_regs
+= 32; /* add the Qn scalar register pseudos */
3423 num_pseudo_regs
+= 32; /* add the Dn scalar register pseudos */
3424 num_pseudo_regs
+= 32; /* add the Sn scalar register pseudos */
3425 num_pseudo_regs
+= 32; /* add the Hn scalar register pseudos */
3426 num_pseudo_regs
+= 32; /* add the Bn scalar register pseudos */
3429 /* Add the pauth registers. */
3430 if (feature_pauth
!= NULL
)
3432 first_pauth_regnum
= num_regs
;
3433 pauth_ra_state_offset
= num_pseudo_regs
;
3434 /* Validate the descriptor provides the mandatory PAUTH registers and
3435 allocate their numbers. */
3436 for (i
= 0; i
< ARRAY_SIZE (aarch64_pauth_register_names
); i
++)
3437 valid_p
&= tdesc_numbered_register (feature_pauth
, tdesc_data
.get (),
3438 first_pauth_regnum
+ i
,
3439 aarch64_pauth_register_names
[i
]);
3442 num_pseudo_regs
+= 1; /* Count RA_STATE pseudo register. */
3448 /* AArch64 code is always little-endian. */
3449 info
.byte_order_for_code
= BFD_ENDIAN_LITTLE
;
3451 struct gdbarch_tdep
*tdep
= XCNEW (struct gdbarch_tdep
);
3452 struct gdbarch
*gdbarch
= gdbarch_alloc (&info
, tdep
);
3454 /* This should be low enough for everything. */
3455 tdep
->lowest_pc
= 0x20;
3456 tdep
->jb_pc
= -1; /* Longjump support not enabled by default. */
3457 tdep
->jb_elt_size
= 8;
3459 tdep
->pauth_reg_base
= first_pauth_regnum
;
3460 tdep
->pauth_ra_state_regnum
= (feature_pauth
== NULL
) ? -1
3461 : pauth_ra_state_offset
+ num_regs
;
3463 set_gdbarch_push_dummy_call (gdbarch
, aarch64_push_dummy_call
);
3464 set_gdbarch_frame_align (gdbarch
, aarch64_frame_align
);
3466 /* Advance PC across function entry code. */
3467 set_gdbarch_skip_prologue (gdbarch
, aarch64_skip_prologue
);
3469 /* The stack grows downward. */
3470 set_gdbarch_inner_than (gdbarch
, core_addr_lessthan
);
3472 /* Breakpoint manipulation. */
3473 set_gdbarch_breakpoint_kind_from_pc (gdbarch
,
3474 aarch64_breakpoint::kind_from_pc
);
3475 set_gdbarch_sw_breakpoint_from_kind (gdbarch
,
3476 aarch64_breakpoint::bp_from_kind
);
3477 set_gdbarch_have_nonsteppable_watchpoint (gdbarch
, 1);
3478 set_gdbarch_software_single_step (gdbarch
, aarch64_software_single_step
);
3480 /* Information about registers, etc. */
3481 set_gdbarch_sp_regnum (gdbarch
, AARCH64_SP_REGNUM
);
3482 set_gdbarch_pc_regnum (gdbarch
, AARCH64_PC_REGNUM
);
3483 set_gdbarch_num_regs (gdbarch
, num_regs
);
3485 set_gdbarch_num_pseudo_regs (gdbarch
, num_pseudo_regs
);
3486 set_gdbarch_pseudo_register_read_value (gdbarch
, aarch64_pseudo_read_value
);
3487 set_gdbarch_pseudo_register_write (gdbarch
, aarch64_pseudo_write
);
3488 set_tdesc_pseudo_register_name (gdbarch
, aarch64_pseudo_register_name
);
3489 set_tdesc_pseudo_register_type (gdbarch
, aarch64_pseudo_register_type
);
3490 set_tdesc_pseudo_register_reggroup_p (gdbarch
,
3491 aarch64_pseudo_register_reggroup_p
);
3492 set_gdbarch_cannot_store_register (gdbarch
, aarch64_cannot_store_register
);
3495 set_gdbarch_short_bit (gdbarch
, 16);
3496 set_gdbarch_int_bit (gdbarch
, 32);
3497 set_gdbarch_float_bit (gdbarch
, 32);
3498 set_gdbarch_double_bit (gdbarch
, 64);
3499 set_gdbarch_long_double_bit (gdbarch
, 128);
3500 set_gdbarch_long_bit (gdbarch
, 64);
3501 set_gdbarch_long_long_bit (gdbarch
, 64);
3502 set_gdbarch_ptr_bit (gdbarch
, 64);
3503 set_gdbarch_char_signed (gdbarch
, 0);
3504 set_gdbarch_wchar_signed (gdbarch
, 0);
3505 set_gdbarch_float_format (gdbarch
, floatformats_ieee_single
);
3506 set_gdbarch_double_format (gdbarch
, floatformats_ieee_double
);
3507 set_gdbarch_long_double_format (gdbarch
, floatformats_ia64_quad
);
3508 set_gdbarch_type_align (gdbarch
, aarch64_type_align
);
3510 /* Internal <-> external register number maps. */
3511 set_gdbarch_dwarf2_reg_to_regnum (gdbarch
, aarch64_dwarf_reg_to_regnum
);
3513 /* Returning results. */
3514 set_gdbarch_return_value (gdbarch
, aarch64_return_value
);
3517 set_gdbarch_print_insn (gdbarch
, aarch64_gdb_print_insn
);
3519 /* Virtual tables. */
3520 set_gdbarch_vbit_in_delta (gdbarch
, 1);
3522 /* Register architecture. */
3523 aarch64_add_reggroups (gdbarch
);
3525 /* Hook in the ABI-specific overrides, if they have been registered. */
3526 info
.target_desc
= tdesc
;
3527 info
.tdesc_data
= tdesc_data
.get ();
3528 gdbarch_init_osabi (info
, gdbarch
);
3530 dwarf2_frame_set_init_reg (gdbarch
, aarch64_dwarf2_frame_init_reg
);
3531 /* Register DWARF CFA vendor handler. */
3532 set_gdbarch_execute_dwarf_cfa_vendor_op (gdbarch
,
3533 aarch64_execute_dwarf_cfa_vendor_op
);
3535 /* Permanent/Program breakpoint handling. */
3536 set_gdbarch_program_breakpoint_here_p (gdbarch
,
3537 aarch64_program_breakpoint_here_p
);
3539 /* Add some default predicates. */
3540 frame_unwind_append_unwinder (gdbarch
, &aarch64_stub_unwind
);
3541 dwarf2_append_unwinders (gdbarch
);
3542 frame_unwind_append_unwinder (gdbarch
, &aarch64_prologue_unwind
);
3544 frame_base_set_default (gdbarch
, &aarch64_normal_base
);
3546 /* Now we have tuned the configuration, set a few final things,
3547 based on what the OS ABI has told us. */
3549 if (tdep
->jb_pc
>= 0)
3550 set_gdbarch_get_longjmp_target (gdbarch
, aarch64_get_longjmp_target
);
3552 set_gdbarch_gen_return_address (gdbarch
, aarch64_gen_return_address
);
3554 set_gdbarch_get_pc_address_flags (gdbarch
, aarch64_get_pc_address_flags
);
3556 tdesc_use_registers (gdbarch
, tdesc
, std::move (tdesc_data
));
3558 /* Add standard register aliases. */
3559 for (i
= 0; i
< ARRAY_SIZE (aarch64_register_aliases
); i
++)
3560 user_reg_add (gdbarch
, aarch64_register_aliases
[i
].name
,
3561 value_of_aarch64_user_reg
,
3562 &aarch64_register_aliases
[i
].regnum
);
3564 register_aarch64_ravenscar_ops (gdbarch
);
3570 aarch64_dump_tdep (struct gdbarch
*gdbarch
, struct ui_file
*file
)
3572 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
3577 fprintf_unfiltered (file
, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
3578 paddress (gdbarch
, tdep
->lowest_pc
));
3584 static void aarch64_process_record_test (void);
3588 void _initialize_aarch64_tdep ();
3590 _initialize_aarch64_tdep ()
3592 gdbarch_register (bfd_arch_aarch64
, aarch64_gdbarch_init
,
3595 /* Debug this file's internals. */
3596 add_setshow_boolean_cmd ("aarch64", class_maintenance
, &aarch64_debug
, _("\
3597 Set AArch64 debugging."), _("\
3598 Show AArch64 debugging."), _("\
3599 When on, AArch64 specific debugging is enabled."),
3602 &setdebuglist
, &showdebuglist
);
3605 selftests::register_test ("aarch64-analyze-prologue",
3606 selftests::aarch64_analyze_prologue_test
);
3607 selftests::register_test ("aarch64-process-record",
3608 selftests::aarch64_process_record_test
);
3612 /* AArch64 process record-replay related structures, defines etc. */
3614 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
3617 unsigned int reg_len = LENGTH; \
3620 REGS = XNEWVEC (uint32_t, reg_len); \
3621 memcpy(®S[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
3626 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
3629 unsigned int mem_len = LENGTH; \
3632 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
3633 memcpy(&MEMS->len, &RECORD_BUF[0], \
3634 sizeof(struct aarch64_mem_r) * LENGTH); \
3639 /* AArch64 record/replay structures and enumerations. */
3641 struct aarch64_mem_r
3643 uint64_t len
; /* Record length. */
3644 uint64_t addr
; /* Memory address. */
3647 enum aarch64_record_result
3649 AARCH64_RECORD_SUCCESS
,
3650 AARCH64_RECORD_UNSUPPORTED
,
3651 AARCH64_RECORD_UNKNOWN
3654 typedef struct insn_decode_record_t
3656 struct gdbarch
*gdbarch
;
3657 struct regcache
*regcache
;
3658 CORE_ADDR this_addr
; /* Address of insn to be recorded. */
3659 uint32_t aarch64_insn
; /* Insn to be recorded. */
3660 uint32_t mem_rec_count
; /* Count of memory records. */
3661 uint32_t reg_rec_count
; /* Count of register records. */
3662 uint32_t *aarch64_regs
; /* Registers to be recorded. */
3663 struct aarch64_mem_r
*aarch64_mems
; /* Memory locations to be recorded. */
3664 } insn_decode_record
;
3666 /* Record handler for data processing - register instructions. */
3669 aarch64_record_data_proc_reg (insn_decode_record
*aarch64_insn_r
)
3671 uint8_t reg_rd
, insn_bits24_27
, insn_bits21_23
;
3672 uint32_t record_buf
[4];
3674 reg_rd
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3675 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
3676 insn_bits21_23
= bits (aarch64_insn_r
->aarch64_insn
, 21, 23);
3678 if (!bit (aarch64_insn_r
->aarch64_insn
, 28))
3682 /* Logical (shifted register). */
3683 if (insn_bits24_27
== 0x0a)
3684 setflags
= (bits (aarch64_insn_r
->aarch64_insn
, 29, 30) == 0x03);
3686 else if (insn_bits24_27
== 0x0b)
3687 setflags
= bit (aarch64_insn_r
->aarch64_insn
, 29);
3689 return AARCH64_RECORD_UNKNOWN
;
3691 record_buf
[0] = reg_rd
;
3692 aarch64_insn_r
->reg_rec_count
= 1;
3694 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_CPSR_REGNUM
;
3698 if (insn_bits24_27
== 0x0b)
3700 /* Data-processing (3 source). */
3701 record_buf
[0] = reg_rd
;
3702 aarch64_insn_r
->reg_rec_count
= 1;
3704 else if (insn_bits24_27
== 0x0a)
3706 if (insn_bits21_23
== 0x00)
3708 /* Add/subtract (with carry). */
3709 record_buf
[0] = reg_rd
;
3710 aarch64_insn_r
->reg_rec_count
= 1;
3711 if (bit (aarch64_insn_r
->aarch64_insn
, 29))
3713 record_buf
[1] = AARCH64_CPSR_REGNUM
;
3714 aarch64_insn_r
->reg_rec_count
= 2;
3717 else if (insn_bits21_23
== 0x02)
3719 /* Conditional compare (register) and conditional compare
3720 (immediate) instructions. */
3721 record_buf
[0] = AARCH64_CPSR_REGNUM
;
3722 aarch64_insn_r
->reg_rec_count
= 1;
3724 else if (insn_bits21_23
== 0x04 || insn_bits21_23
== 0x06)
3726 /* Conditional select. */
3727 /* Data-processing (2 source). */
3728 /* Data-processing (1 source). */
3729 record_buf
[0] = reg_rd
;
3730 aarch64_insn_r
->reg_rec_count
= 1;
3733 return AARCH64_RECORD_UNKNOWN
;
3737 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3739 return AARCH64_RECORD_SUCCESS
;
3742 /* Record handler for data processing - immediate instructions. */
3745 aarch64_record_data_proc_imm (insn_decode_record
*aarch64_insn_r
)
3747 uint8_t reg_rd
, insn_bit23
, insn_bits24_27
, setflags
;
3748 uint32_t record_buf
[4];
3750 reg_rd
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3751 insn_bit23
= bit (aarch64_insn_r
->aarch64_insn
, 23);
3752 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
3754 if (insn_bits24_27
== 0x00 /* PC rel addressing. */
3755 || insn_bits24_27
== 0x03 /* Bitfield and Extract. */
3756 || (insn_bits24_27
== 0x02 && insn_bit23
)) /* Move wide (immediate). */
3758 record_buf
[0] = reg_rd
;
3759 aarch64_insn_r
->reg_rec_count
= 1;
3761 else if (insn_bits24_27
== 0x01)
3763 /* Add/Subtract (immediate). */
3764 setflags
= bit (aarch64_insn_r
->aarch64_insn
, 29);
3765 record_buf
[0] = reg_rd
;
3766 aarch64_insn_r
->reg_rec_count
= 1;
3768 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_CPSR_REGNUM
;
3770 else if (insn_bits24_27
== 0x02 && !insn_bit23
)
3772 /* Logical (immediate). */
3773 setflags
= bits (aarch64_insn_r
->aarch64_insn
, 29, 30) == 0x03;
3774 record_buf
[0] = reg_rd
;
3775 aarch64_insn_r
->reg_rec_count
= 1;
3777 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_CPSR_REGNUM
;
3780 return AARCH64_RECORD_UNKNOWN
;
3782 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3784 return AARCH64_RECORD_SUCCESS
;
3787 /* Record handler for branch, exception generation and system instructions. */
3790 aarch64_record_branch_except_sys (insn_decode_record
*aarch64_insn_r
)
3792 struct gdbarch_tdep
*tdep
= gdbarch_tdep (aarch64_insn_r
->gdbarch
);
3793 uint8_t insn_bits24_27
, insn_bits28_31
, insn_bits22_23
;
3794 uint32_t record_buf
[4];
3796 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
3797 insn_bits28_31
= bits (aarch64_insn_r
->aarch64_insn
, 28, 31);
3798 insn_bits22_23
= bits (aarch64_insn_r
->aarch64_insn
, 22, 23);
3800 if (insn_bits28_31
== 0x0d)
3802 /* Exception generation instructions. */
3803 if (insn_bits24_27
== 0x04)
3805 if (!bits (aarch64_insn_r
->aarch64_insn
, 2, 4)
3806 && !bits (aarch64_insn_r
->aarch64_insn
, 21, 23)
3807 && bits (aarch64_insn_r
->aarch64_insn
, 0, 1) == 0x01)
3809 ULONGEST svc_number
;
3811 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, 8,
3813 return tdep
->aarch64_syscall_record (aarch64_insn_r
->regcache
,
3817 return AARCH64_RECORD_UNSUPPORTED
;
3819 /* System instructions. */
3820 else if (insn_bits24_27
== 0x05 && insn_bits22_23
== 0x00)
3822 uint32_t reg_rt
, reg_crn
;
3824 reg_rt
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3825 reg_crn
= bits (aarch64_insn_r
->aarch64_insn
, 12, 15);
3827 /* Record rt in case of sysl and mrs instructions. */
3828 if (bit (aarch64_insn_r
->aarch64_insn
, 21))
3830 record_buf
[0] = reg_rt
;
3831 aarch64_insn_r
->reg_rec_count
= 1;
3833 /* Record cpsr for hint and msr(immediate) instructions. */
3834 else if (reg_crn
== 0x02 || reg_crn
== 0x04)
3836 record_buf
[0] = AARCH64_CPSR_REGNUM
;
3837 aarch64_insn_r
->reg_rec_count
= 1;
3840 /* Unconditional branch (register). */
3841 else if((insn_bits24_27
& 0x0e) == 0x06)
3843 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_PC_REGNUM
;
3844 if (bits (aarch64_insn_r
->aarch64_insn
, 21, 22) == 0x01)
3845 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_LR_REGNUM
;
3848 return AARCH64_RECORD_UNKNOWN
;
3850 /* Unconditional branch (immediate). */
3851 else if ((insn_bits28_31
& 0x07) == 0x01 && (insn_bits24_27
& 0x0c) == 0x04)
3853 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_PC_REGNUM
;
3854 if (bit (aarch64_insn_r
->aarch64_insn
, 31))
3855 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_LR_REGNUM
;
3858 /* Compare & branch (immediate), Test & branch (immediate) and
3859 Conditional branch (immediate). */
3860 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_PC_REGNUM
;
3862 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3864 return AARCH64_RECORD_SUCCESS
;
3867 /* Record handler for advanced SIMD load and store instructions. */
3870 aarch64_record_asimd_load_store (insn_decode_record
*aarch64_insn_r
)
3873 uint64_t addr_offset
= 0;
3874 uint32_t record_buf
[24];
3875 uint64_t record_buf_mem
[24];
3876 uint32_t reg_rn
, reg_rt
;
3877 uint32_t reg_index
= 0, mem_index
= 0;
3878 uint8_t opcode_bits
, size_bits
;
3880 reg_rt
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3881 reg_rn
= bits (aarch64_insn_r
->aarch64_insn
, 5, 9);
3882 size_bits
= bits (aarch64_insn_r
->aarch64_insn
, 10, 11);
3883 opcode_bits
= bits (aarch64_insn_r
->aarch64_insn
, 12, 15);
3884 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
, &address
);
3887 debug_printf ("Process record: Advanced SIMD load/store\n");
3889 /* Load/store single structure. */
3890 if (bit (aarch64_insn_r
->aarch64_insn
, 24))
3892 uint8_t sindex
, scale
, selem
, esize
, replicate
= 0;
3893 scale
= opcode_bits
>> 2;
3894 selem
= ((opcode_bits
& 0x02) |
3895 bit (aarch64_insn_r
->aarch64_insn
, 21)) + 1;
3899 if (size_bits
& 0x01)
3900 return AARCH64_RECORD_UNKNOWN
;
3903 if ((size_bits
>> 1) & 0x01)
3904 return AARCH64_RECORD_UNKNOWN
;
3905 if (size_bits
& 0x01)
3907 if (!((opcode_bits
>> 1) & 0x01))
3910 return AARCH64_RECORD_UNKNOWN
;
3914 if (bit (aarch64_insn_r
->aarch64_insn
, 22) && !(opcode_bits
& 0x01))
3921 return AARCH64_RECORD_UNKNOWN
;
3927 for (sindex
= 0; sindex
< selem
; sindex
++)
3929 record_buf
[reg_index
++] = reg_rt
+ AARCH64_V0_REGNUM
;
3930 reg_rt
= (reg_rt
+ 1) % 32;
3934 for (sindex
= 0; sindex
< selem
; sindex
++)
3936 if (bit (aarch64_insn_r
->aarch64_insn
, 22))
3937 record_buf
[reg_index
++] = reg_rt
+ AARCH64_V0_REGNUM
;
3940 record_buf_mem
[mem_index
++] = esize
/ 8;
3941 record_buf_mem
[mem_index
++] = address
+ addr_offset
;
3943 addr_offset
= addr_offset
+ (esize
/ 8);
3944 reg_rt
= (reg_rt
+ 1) % 32;
3948 /* Load/store multiple structure. */
3951 uint8_t selem
, esize
, rpt
, elements
;
3952 uint8_t eindex
, rindex
;
3954 esize
= 8 << size_bits
;
3955 if (bit (aarch64_insn_r
->aarch64_insn
, 30))
3956 elements
= 128 / esize
;
3958 elements
= 64 / esize
;
3960 switch (opcode_bits
)
3962 /*LD/ST4 (4 Registers). */
3967 /*LD/ST1 (4 Registers). */
3972 /*LD/ST3 (3 Registers). */
3977 /*LD/ST1 (3 Registers). */
3982 /*LD/ST1 (1 Register). */
3987 /*LD/ST2 (2 Registers). */
3992 /*LD/ST1 (2 Registers). */
3998 return AARCH64_RECORD_UNSUPPORTED
;
4001 for (rindex
= 0; rindex
< rpt
; rindex
++)
4002 for (eindex
= 0; eindex
< elements
; eindex
++)
4004 uint8_t reg_tt
, sindex
;
4005 reg_tt
= (reg_rt
+ rindex
) % 32;
4006 for (sindex
= 0; sindex
< selem
; sindex
++)
4008 if (bit (aarch64_insn_r
->aarch64_insn
, 22))
4009 record_buf
[reg_index
++] = reg_tt
+ AARCH64_V0_REGNUM
;
4012 record_buf_mem
[mem_index
++] = esize
/ 8;
4013 record_buf_mem
[mem_index
++] = address
+ addr_offset
;
4015 addr_offset
= addr_offset
+ (esize
/ 8);
4016 reg_tt
= (reg_tt
+ 1) % 32;
4021 if (bit (aarch64_insn_r
->aarch64_insn
, 23))
4022 record_buf
[reg_index
++] = reg_rn
;
4024 aarch64_insn_r
->reg_rec_count
= reg_index
;
4025 aarch64_insn_r
->mem_rec_count
= mem_index
/ 2;
4026 MEM_ALLOC (aarch64_insn_r
->aarch64_mems
, aarch64_insn_r
->mem_rec_count
,
4028 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
4030 return AARCH64_RECORD_SUCCESS
;
4033 /* Record handler for load and store instructions. */
4036 aarch64_record_load_store (insn_decode_record
*aarch64_insn_r
)
4038 uint8_t insn_bits24_27
, insn_bits28_29
, insn_bits10_11
;
4039 uint8_t insn_bit23
, insn_bit21
;
4040 uint8_t opc
, size_bits
, ld_flag
, vector_flag
;
4041 uint32_t reg_rn
, reg_rt
, reg_rt2
;
4042 uint64_t datasize
, offset
;
4043 uint32_t record_buf
[8];
4044 uint64_t record_buf_mem
[8];
4047 insn_bits10_11
= bits (aarch64_insn_r
->aarch64_insn
, 10, 11);
4048 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
4049 insn_bits28_29
= bits (aarch64_insn_r
->aarch64_insn
, 28, 29);
4050 insn_bit21
= bit (aarch64_insn_r
->aarch64_insn
, 21);
4051 insn_bit23
= bit (aarch64_insn_r
->aarch64_insn
, 23);
4052 ld_flag
= bit (aarch64_insn_r
->aarch64_insn
, 22);
4053 vector_flag
= bit (aarch64_insn_r
->aarch64_insn
, 26);
4054 reg_rt
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
4055 reg_rn
= bits (aarch64_insn_r
->aarch64_insn
, 5, 9);
4056 reg_rt2
= bits (aarch64_insn_r
->aarch64_insn
, 10, 14);
4057 size_bits
= bits (aarch64_insn_r
->aarch64_insn
, 30, 31);
4059 /* Load/store exclusive. */
4060 if (insn_bits24_27
== 0x08 && insn_bits28_29
== 0x00)
4063 debug_printf ("Process record: load/store exclusive\n");
4067 record_buf
[0] = reg_rt
;
4068 aarch64_insn_r
->reg_rec_count
= 1;
4071 record_buf
[1] = reg_rt2
;
4072 aarch64_insn_r
->reg_rec_count
= 2;
4078 datasize
= (8 << size_bits
) * 2;
4080 datasize
= (8 << size_bits
);
4081 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
4083 record_buf_mem
[0] = datasize
/ 8;
4084 record_buf_mem
[1] = address
;
4085 aarch64_insn_r
->mem_rec_count
= 1;
4088 /* Save register rs. */
4089 record_buf
[0] = bits (aarch64_insn_r
->aarch64_insn
, 16, 20);
4090 aarch64_insn_r
->reg_rec_count
= 1;
4094 /* Load register (literal) instructions decoding. */
4095 else if ((insn_bits24_27
& 0x0b) == 0x08 && insn_bits28_29
== 0x01)
4098 debug_printf ("Process record: load register (literal)\n");
4100 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
4102 record_buf
[0] = reg_rt
;
4103 aarch64_insn_r
->reg_rec_count
= 1;
4105 /* All types of load/store pair instructions decoding. */
4106 else if ((insn_bits24_27
& 0x0a) == 0x08 && insn_bits28_29
== 0x02)
4109 debug_printf ("Process record: load/store pair\n");
4115 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
4116 record_buf
[1] = reg_rt2
+ AARCH64_V0_REGNUM
;
4120 record_buf
[0] = reg_rt
;
4121 record_buf
[1] = reg_rt2
;
4123 aarch64_insn_r
->reg_rec_count
= 2;
4128 imm7_off
= bits (aarch64_insn_r
->aarch64_insn
, 15, 21);
4130 size_bits
= size_bits
>> 1;
4131 datasize
= 8 << (2 + size_bits
);
4132 offset
= (imm7_off
& 0x40) ? (~imm7_off
& 0x007f) + 1 : imm7_off
;
4133 offset
= offset
<< (2 + size_bits
);
4134 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
4136 if (!((insn_bits24_27
& 0x0b) == 0x08 && insn_bit23
))
4138 if (imm7_off
& 0x40)
4139 address
= address
- offset
;
4141 address
= address
+ offset
;
4144 record_buf_mem
[0] = datasize
/ 8;
4145 record_buf_mem
[1] = address
;
4146 record_buf_mem
[2] = datasize
/ 8;
4147 record_buf_mem
[3] = address
+ (datasize
/ 8);
4148 aarch64_insn_r
->mem_rec_count
= 2;
4150 if (bit (aarch64_insn_r
->aarch64_insn
, 23))
4151 record_buf
[aarch64_insn_r
->reg_rec_count
++] = reg_rn
;
4153 /* Load/store register (unsigned immediate) instructions. */
4154 else if ((insn_bits24_27
& 0x0b) == 0x09 && insn_bits28_29
== 0x03)
4156 opc
= bits (aarch64_insn_r
->aarch64_insn
, 22, 23);
4166 if (size_bits
== 0x3 && vector_flag
== 0x0 && opc
== 0x2)
4168 /* PRFM (immediate) */
4169 return AARCH64_RECORD_SUCCESS
;
4171 else if (size_bits
== 0x2 && vector_flag
== 0x0 && opc
== 0x2)
4173 /* LDRSW (immediate) */
4187 debug_printf ("Process record: load/store (unsigned immediate):"
4188 " size %x V %d opc %x\n", size_bits
, vector_flag
,
4194 offset
= bits (aarch64_insn_r
->aarch64_insn
, 10, 21);
4195 datasize
= 8 << size_bits
;
4196 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
4198 offset
= offset
<< size_bits
;
4199 address
= address
+ offset
;
4201 record_buf_mem
[0] = datasize
>> 3;
4202 record_buf_mem
[1] = address
;
4203 aarch64_insn_r
->mem_rec_count
= 1;
4208 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
4210 record_buf
[0] = reg_rt
;
4211 aarch64_insn_r
->reg_rec_count
= 1;
4214 /* Load/store register (register offset) instructions. */
4215 else if ((insn_bits24_27
& 0x0b) == 0x08 && insn_bits28_29
== 0x03
4216 && insn_bits10_11
== 0x02 && insn_bit21
)
4219 debug_printf ("Process record: load/store (register offset)\n");
4220 opc
= bits (aarch64_insn_r
->aarch64_insn
, 22, 23);
4227 if (size_bits
!= 0x03)
4230 return AARCH64_RECORD_UNKNOWN
;
4234 ULONGEST reg_rm_val
;
4236 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
,
4237 bits (aarch64_insn_r
->aarch64_insn
, 16, 20), ®_rm_val
);
4238 if (bit (aarch64_insn_r
->aarch64_insn
, 12))
4239 offset
= reg_rm_val
<< size_bits
;
4241 offset
= reg_rm_val
;
4242 datasize
= 8 << size_bits
;
4243 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
4245 address
= address
+ offset
;
4246 record_buf_mem
[0] = datasize
>> 3;
4247 record_buf_mem
[1] = address
;
4248 aarch64_insn_r
->mem_rec_count
= 1;
4253 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
4255 record_buf
[0] = reg_rt
;
4256 aarch64_insn_r
->reg_rec_count
= 1;
4259 /* Load/store register (immediate and unprivileged) instructions. */
4260 else if ((insn_bits24_27
& 0x0b) == 0x08 && insn_bits28_29
== 0x03
4265 debug_printf ("Process record: load/store "
4266 "(immediate and unprivileged)\n");
4268 opc
= bits (aarch64_insn_r
->aarch64_insn
, 22, 23);
4275 if (size_bits
!= 0x03)
4278 return AARCH64_RECORD_UNKNOWN
;
4283 imm9_off
= bits (aarch64_insn_r
->aarch64_insn
, 12, 20);
4284 offset
= (imm9_off
& 0x0100) ? (((~imm9_off
) & 0x01ff) + 1) : imm9_off
;
4285 datasize
= 8 << size_bits
;
4286 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
4288 if (insn_bits10_11
!= 0x01)
4290 if (imm9_off
& 0x0100)
4291 address
= address
- offset
;
4293 address
= address
+ offset
;
4295 record_buf_mem
[0] = datasize
>> 3;
4296 record_buf_mem
[1] = address
;
4297 aarch64_insn_r
->mem_rec_count
= 1;
4302 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
4304 record_buf
[0] = reg_rt
;
4305 aarch64_insn_r
->reg_rec_count
= 1;
4307 if (insn_bits10_11
== 0x01 || insn_bits10_11
== 0x03)
4308 record_buf
[aarch64_insn_r
->reg_rec_count
++] = reg_rn
;
4310 /* Advanced SIMD load/store instructions. */
4312 return aarch64_record_asimd_load_store (aarch64_insn_r
);
4314 MEM_ALLOC (aarch64_insn_r
->aarch64_mems
, aarch64_insn_r
->mem_rec_count
,
4316 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
4318 return AARCH64_RECORD_SUCCESS
;
4321 /* Record handler for data processing SIMD and floating point instructions. */
4324 aarch64_record_data_proc_simd_fp (insn_decode_record
*aarch64_insn_r
)
4326 uint8_t insn_bit21
, opcode
, rmode
, reg_rd
;
4327 uint8_t insn_bits24_27
, insn_bits28_31
, insn_bits10_11
, insn_bits12_15
;
4328 uint8_t insn_bits11_14
;
4329 uint32_t record_buf
[2];
4331 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
4332 insn_bits28_31
= bits (aarch64_insn_r
->aarch64_insn
, 28, 31);
4333 insn_bits10_11
= bits (aarch64_insn_r
->aarch64_insn
, 10, 11);
4334 insn_bits12_15
= bits (aarch64_insn_r
->aarch64_insn
, 12, 15);
4335 insn_bits11_14
= bits (aarch64_insn_r
->aarch64_insn
, 11, 14);
4336 opcode
= bits (aarch64_insn_r
->aarch64_insn
, 16, 18);
4337 rmode
= bits (aarch64_insn_r
->aarch64_insn
, 19, 20);
4338 reg_rd
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
4339 insn_bit21
= bit (aarch64_insn_r
->aarch64_insn
, 21);
4342 debug_printf ("Process record: data processing SIMD/FP: ");
4344 if ((insn_bits28_31
& 0x05) == 0x01 && insn_bits24_27
== 0x0e)
4346 /* Floating point - fixed point conversion instructions. */
4350 debug_printf ("FP - fixed point conversion");
4352 if ((opcode
>> 1) == 0x0 && rmode
== 0x03)
4353 record_buf
[0] = reg_rd
;
4355 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
4357 /* Floating point - conditional compare instructions. */
4358 else if (insn_bits10_11
== 0x01)
4361 debug_printf ("FP - conditional compare");
4363 record_buf
[0] = AARCH64_CPSR_REGNUM
;
4365 /* Floating point - data processing (2-source) and
4366 conditional select instructions. */
4367 else if (insn_bits10_11
== 0x02 || insn_bits10_11
== 0x03)
4370 debug_printf ("FP - DP (2-source)");
4372 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
4374 else if (insn_bits10_11
== 0x00)
4376 /* Floating point - immediate instructions. */
4377 if ((insn_bits12_15
& 0x01) == 0x01
4378 || (insn_bits12_15
& 0x07) == 0x04)
4381 debug_printf ("FP - immediate");
4382 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
4384 /* Floating point - compare instructions. */
4385 else if ((insn_bits12_15
& 0x03) == 0x02)
4388 debug_printf ("FP - immediate");
4389 record_buf
[0] = AARCH64_CPSR_REGNUM
;
4391 /* Floating point - integer conversions instructions. */
4392 else if (insn_bits12_15
== 0x00)
4394 /* Convert float to integer instruction. */
4395 if (!(opcode
>> 1) || ((opcode
>> 1) == 0x02 && !rmode
))
4398 debug_printf ("float to int conversion");
4400 record_buf
[0] = reg_rd
+ AARCH64_X0_REGNUM
;
4402 /* Convert integer to float instruction. */
4403 else if ((opcode
>> 1) == 0x01 && !rmode
)
4406 debug_printf ("int to float conversion");
4408 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
4410 /* Move float to integer instruction. */
4411 else if ((opcode
>> 1) == 0x03)
4414 debug_printf ("move float to int");
4416 if (!(opcode
& 0x01))
4417 record_buf
[0] = reg_rd
+ AARCH64_X0_REGNUM
;
4419 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
4422 return AARCH64_RECORD_UNKNOWN
;
4425 return AARCH64_RECORD_UNKNOWN
;
4428 return AARCH64_RECORD_UNKNOWN
;
4430 else if ((insn_bits28_31
& 0x09) == 0x00 && insn_bits24_27
== 0x0e)
4433 debug_printf ("SIMD copy");
4435 /* Advanced SIMD copy instructions. */
4436 if (!bits (aarch64_insn_r
->aarch64_insn
, 21, 23)
4437 && !bit (aarch64_insn_r
->aarch64_insn
, 15)
4438 && bit (aarch64_insn_r
->aarch64_insn
, 10))
4440 if (insn_bits11_14
== 0x05 || insn_bits11_14
== 0x07)
4441 record_buf
[0] = reg_rd
+ AARCH64_X0_REGNUM
;
4443 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
4446 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
4448 /* All remaining floating point or advanced SIMD instructions. */
4452 debug_printf ("all remain");
4454 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
4458 debug_printf ("\n");
4460 /* Record the V/X register. */
4461 aarch64_insn_r
->reg_rec_count
++;
4463 /* Some of these instructions may set bits in the FPSR, so record it
4465 record_buf
[1] = AARCH64_FPSR_REGNUM
;
4466 aarch64_insn_r
->reg_rec_count
++;
4468 gdb_assert (aarch64_insn_r
->reg_rec_count
== 2);
4469 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
4471 return AARCH64_RECORD_SUCCESS
;
4474 /* Decodes insns type and invokes its record handler. */
4477 aarch64_record_decode_insn_handler (insn_decode_record
*aarch64_insn_r
)
4479 uint32_t ins_bit25
, ins_bit26
, ins_bit27
, ins_bit28
;
4481 ins_bit25
= bit (aarch64_insn_r
->aarch64_insn
, 25);
4482 ins_bit26
= bit (aarch64_insn_r
->aarch64_insn
, 26);
4483 ins_bit27
= bit (aarch64_insn_r
->aarch64_insn
, 27);
4484 ins_bit28
= bit (aarch64_insn_r
->aarch64_insn
, 28);
4486 /* Data processing - immediate instructions. */
4487 if (!ins_bit26
&& !ins_bit27
&& ins_bit28
)
4488 return aarch64_record_data_proc_imm (aarch64_insn_r
);
4490 /* Branch, exception generation and system instructions. */
4491 if (ins_bit26
&& !ins_bit27
&& ins_bit28
)
4492 return aarch64_record_branch_except_sys (aarch64_insn_r
);
4494 /* Load and store instructions. */
4495 if (!ins_bit25
&& ins_bit27
)
4496 return aarch64_record_load_store (aarch64_insn_r
);
4498 /* Data processing - register instructions. */
4499 if (ins_bit25
&& !ins_bit26
&& ins_bit27
)
4500 return aarch64_record_data_proc_reg (aarch64_insn_r
);
4502 /* Data processing - SIMD and floating point instructions. */
4503 if (ins_bit25
&& ins_bit26
&& ins_bit27
)
4504 return aarch64_record_data_proc_simd_fp (aarch64_insn_r
);
4506 return AARCH64_RECORD_UNSUPPORTED
;
4509 /* Cleans up local record registers and memory allocations. */
4512 deallocate_reg_mem (insn_decode_record
*record
)
4514 xfree (record
->aarch64_regs
);
4515 xfree (record
->aarch64_mems
);
4519 namespace selftests
{
4522 aarch64_process_record_test (void)
4524 struct gdbarch_info info
;
4527 gdbarch_info_init (&info
);
4528 info
.bfd_arch_info
= bfd_scan_arch ("aarch64");
4530 struct gdbarch
*gdbarch
= gdbarch_find_by_info (info
);
4531 SELF_CHECK (gdbarch
!= NULL
);
4533 insn_decode_record aarch64_record
;
4535 memset (&aarch64_record
, 0, sizeof (insn_decode_record
));
4536 aarch64_record
.regcache
= NULL
;
4537 aarch64_record
.this_addr
= 0;
4538 aarch64_record
.gdbarch
= gdbarch
;
4540 /* 20 00 80 f9 prfm pldl1keep, [x1] */
4541 aarch64_record
.aarch64_insn
= 0xf9800020;
4542 ret
= aarch64_record_decode_insn_handler (&aarch64_record
);
4543 SELF_CHECK (ret
== AARCH64_RECORD_SUCCESS
);
4544 SELF_CHECK (aarch64_record
.reg_rec_count
== 0);
4545 SELF_CHECK (aarch64_record
.mem_rec_count
== 0);
4547 deallocate_reg_mem (&aarch64_record
);
4550 } // namespace selftests
4551 #endif /* GDB_SELF_TEST */
4553 /* Parse the current instruction and record the values of the registers and
4554 memory that will be changed in current instruction to record_arch_list
4555 return -1 if something is wrong. */
4558 aarch64_process_record (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
4559 CORE_ADDR insn_addr
)
4561 uint32_t rec_no
= 0;
4562 uint8_t insn_size
= 4;
4564 gdb_byte buf
[insn_size
];
4565 insn_decode_record aarch64_record
;
4567 memset (&buf
[0], 0, insn_size
);
4568 memset (&aarch64_record
, 0, sizeof (insn_decode_record
));
4569 target_read_memory (insn_addr
, &buf
[0], insn_size
);
4570 aarch64_record
.aarch64_insn
4571 = (uint32_t) extract_unsigned_integer (&buf
[0],
4573 gdbarch_byte_order (gdbarch
));
4574 aarch64_record
.regcache
= regcache
;
4575 aarch64_record
.this_addr
= insn_addr
;
4576 aarch64_record
.gdbarch
= gdbarch
;
4578 ret
= aarch64_record_decode_insn_handler (&aarch64_record
);
4579 if (ret
== AARCH64_RECORD_UNSUPPORTED
)
4581 printf_unfiltered (_("Process record does not support instruction "
4582 "0x%0x at address %s.\n"),
4583 aarch64_record
.aarch64_insn
,
4584 paddress (gdbarch
, insn_addr
));
4590 /* Record registers. */
4591 record_full_arch_list_add_reg (aarch64_record
.regcache
,
4593 /* Always record register CPSR. */
4594 record_full_arch_list_add_reg (aarch64_record
.regcache
,
4595 AARCH64_CPSR_REGNUM
);
4596 if (aarch64_record
.aarch64_regs
)
4597 for (rec_no
= 0; rec_no
< aarch64_record
.reg_rec_count
; rec_no
++)
4598 if (record_full_arch_list_add_reg (aarch64_record
.regcache
,
4599 aarch64_record
.aarch64_regs
[rec_no
]))
4602 /* Record memories. */
4603 if (aarch64_record
.aarch64_mems
)
4604 for (rec_no
= 0; rec_no
< aarch64_record
.mem_rec_count
; rec_no
++)
4605 if (record_full_arch_list_add_mem
4606 ((CORE_ADDR
)aarch64_record
.aarch64_mems
[rec_no
].addr
,
4607 aarch64_record
.aarch64_mems
[rec_no
].len
))
4610 if (record_full_arch_list_add_end ())
4614 deallocate_reg_mem (&aarch64_record
);