1 /* Common target dependent code for GDB on AArch64 systems.
3 Copyright (C) 2009-2021 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
6 This file is part of GDB.
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
28 #include "reggroups.h"
30 #include "arch-utils.h"
32 #include "frame-unwind.h"
33 #include "frame-base.h"
34 #include "trad-frame.h"
37 #include "dwarf2/frame.h"
39 #include "prologue-value.h"
40 #include "target-descriptions.h"
41 #include "user-regs.h"
43 #include "gdbsupport/selftest.h"
45 #include "aarch64-tdep.h"
46 #include "aarch64-ravenscar-thread.h"
49 #include "record-full.h"
50 #include "arch/aarch64-insn.h"
53 #include "opcode/aarch64.h"
56 /* A Homogeneous Floating-Point or Short-Vector Aggregate may have at most
58 #define HA_MAX_NUM_FLDS 4
60 /* All possible aarch64 target descriptors. */
61 struct target_desc
*tdesc_aarch64_list
[AARCH64_MAX_SVE_VQ
+ 1][2/*pauth*/];
63 /* The standard register names, and all the valid aliases for them. */
66 const char *const name
;
68 } aarch64_register_aliases
[] =
70 /* 64-bit register names. */
71 {"fp", AARCH64_FP_REGNUM
},
72 {"lr", AARCH64_LR_REGNUM
},
73 {"sp", AARCH64_SP_REGNUM
},
75 /* 32-bit register names. */
76 {"w0", AARCH64_X0_REGNUM
+ 0},
77 {"w1", AARCH64_X0_REGNUM
+ 1},
78 {"w2", AARCH64_X0_REGNUM
+ 2},
79 {"w3", AARCH64_X0_REGNUM
+ 3},
80 {"w4", AARCH64_X0_REGNUM
+ 4},
81 {"w5", AARCH64_X0_REGNUM
+ 5},
82 {"w6", AARCH64_X0_REGNUM
+ 6},
83 {"w7", AARCH64_X0_REGNUM
+ 7},
84 {"w8", AARCH64_X0_REGNUM
+ 8},
85 {"w9", AARCH64_X0_REGNUM
+ 9},
86 {"w10", AARCH64_X0_REGNUM
+ 10},
87 {"w11", AARCH64_X0_REGNUM
+ 11},
88 {"w12", AARCH64_X0_REGNUM
+ 12},
89 {"w13", AARCH64_X0_REGNUM
+ 13},
90 {"w14", AARCH64_X0_REGNUM
+ 14},
91 {"w15", AARCH64_X0_REGNUM
+ 15},
92 {"w16", AARCH64_X0_REGNUM
+ 16},
93 {"w17", AARCH64_X0_REGNUM
+ 17},
94 {"w18", AARCH64_X0_REGNUM
+ 18},
95 {"w19", AARCH64_X0_REGNUM
+ 19},
96 {"w20", AARCH64_X0_REGNUM
+ 20},
97 {"w21", AARCH64_X0_REGNUM
+ 21},
98 {"w22", AARCH64_X0_REGNUM
+ 22},
99 {"w23", AARCH64_X0_REGNUM
+ 23},
100 {"w24", AARCH64_X0_REGNUM
+ 24},
101 {"w25", AARCH64_X0_REGNUM
+ 25},
102 {"w26", AARCH64_X0_REGNUM
+ 26},
103 {"w27", AARCH64_X0_REGNUM
+ 27},
104 {"w28", AARCH64_X0_REGNUM
+ 28},
105 {"w29", AARCH64_X0_REGNUM
+ 29},
106 {"w30", AARCH64_X0_REGNUM
+ 30},
109 {"ip0", AARCH64_X0_REGNUM
+ 16},
110 {"ip1", AARCH64_X0_REGNUM
+ 17}
113 /* The required core 'R' registers. */
114 static const char *const aarch64_r_register_names
[] =
116 /* These registers must appear in consecutive RAW register number
117 order and they must begin with AARCH64_X0_REGNUM! */
118 "x0", "x1", "x2", "x3",
119 "x4", "x5", "x6", "x7",
120 "x8", "x9", "x10", "x11",
121 "x12", "x13", "x14", "x15",
122 "x16", "x17", "x18", "x19",
123 "x20", "x21", "x22", "x23",
124 "x24", "x25", "x26", "x27",
125 "x28", "x29", "x30", "sp",
129 /* The FP/SIMD 'V' registers. */
130 static const char *const aarch64_v_register_names
[] =
132 /* These registers must appear in consecutive RAW register number
133 order and they must begin with AARCH64_V0_REGNUM! */
134 "v0", "v1", "v2", "v3",
135 "v4", "v5", "v6", "v7",
136 "v8", "v9", "v10", "v11",
137 "v12", "v13", "v14", "v15",
138 "v16", "v17", "v18", "v19",
139 "v20", "v21", "v22", "v23",
140 "v24", "v25", "v26", "v27",
141 "v28", "v29", "v30", "v31",
146 /* The SVE 'Z' and 'P' registers. */
147 static const char *const aarch64_sve_register_names
[] =
149 /* These registers must appear in consecutive RAW register number
150 order and they must begin with AARCH64_SVE_Z0_REGNUM! */
151 "z0", "z1", "z2", "z3",
152 "z4", "z5", "z6", "z7",
153 "z8", "z9", "z10", "z11",
154 "z12", "z13", "z14", "z15",
155 "z16", "z17", "z18", "z19",
156 "z20", "z21", "z22", "z23",
157 "z24", "z25", "z26", "z27",
158 "z28", "z29", "z30", "z31",
160 "p0", "p1", "p2", "p3",
161 "p4", "p5", "p6", "p7",
162 "p8", "p9", "p10", "p11",
163 "p12", "p13", "p14", "p15",
167 static const char *const aarch64_pauth_register_names
[] =
169 /* Authentication mask for data pointer. */
171 /* Authentication mask for code pointer. */
175 /* AArch64 prologue cache structure. */
176 struct aarch64_prologue_cache
178 /* The program counter at the start of the function. It is used to
179 identify this frame as a prologue frame. */
182 /* The program counter at the time this frame was created; i.e. where
183 this function was called from. It is used to identify this frame as a
187 /* The stack pointer at the time this frame was created; i.e. the
188 caller's stack pointer when this function was called. It is used
189 to identify this frame. */
192 /* Is the target available to read from? */
195 /* The frame base for this frame is just prev_sp - frame size.
196 FRAMESIZE is the distance from the frame pointer to the
197 initial stack pointer. */
200 /* The register used to hold the frame pointer for this frame. */
203 /* Saved register offsets. */
204 trad_frame_saved_reg
*saved_regs
;
208 show_aarch64_debug (struct ui_file
*file
, int from_tty
,
209 struct cmd_list_element
*c
, const char *value
)
211 fprintf_filtered (file
, _("AArch64 debugging is %s.\n"), value
);
216 /* Abstract instruction reader. */
218 class abstract_instruction_reader
221 /* Read in one instruction. */
222 virtual ULONGEST
read (CORE_ADDR memaddr
, int len
,
223 enum bfd_endian byte_order
) = 0;
226 /* Instruction reader from real target. */
228 class instruction_reader
: public abstract_instruction_reader
231 ULONGEST
read (CORE_ADDR memaddr
, int len
, enum bfd_endian byte_order
)
234 return read_code_unsigned_integer (memaddr
, len
, byte_order
);
240 /* If address signing is enabled, mask off the signature bits from the link
241 register, which is passed by value in ADDR, using the register values in
245 aarch64_frame_unmask_lr (struct gdbarch_tdep
*tdep
,
246 struct frame_info
*this_frame
, CORE_ADDR addr
)
248 if (tdep
->has_pauth ()
249 && frame_unwind_register_unsigned (this_frame
,
250 tdep
->pauth_ra_state_regnum
))
252 int cmask_num
= AARCH64_PAUTH_CMASK_REGNUM (tdep
->pauth_reg_base
);
253 CORE_ADDR cmask
= frame_unwind_register_unsigned (this_frame
, cmask_num
);
254 addr
= addr
& ~cmask
;
256 /* Record in the frame that the link register required unmasking. */
257 set_frame_previous_pc_masked (this_frame
);
263 /* Implement the "get_pc_address_flags" gdbarch method. */
266 aarch64_get_pc_address_flags (frame_info
*frame
, CORE_ADDR pc
)
268 if (pc
!= 0 && get_frame_pc_masked (frame
))
274 /* Analyze a prologue, looking for a recognizable stack frame
275 and frame pointer. Scan until we encounter a store that could
276 clobber the stack frame unexpectedly, or an unknown instruction. */
279 aarch64_analyze_prologue (struct gdbarch
*gdbarch
,
280 CORE_ADDR start
, CORE_ADDR limit
,
281 struct aarch64_prologue_cache
*cache
,
282 abstract_instruction_reader
& reader
)
284 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
287 /* Whether the stack has been set. This should be true when we notice a SP
288 to FP move or if we are using the SP as the base register for storing
289 data, in case the FP is ommitted. */
290 bool seen_stack_set
= false;
292 /* Track X registers and D registers in prologue. */
293 pv_t regs
[AARCH64_X_REGISTER_COUNT
+ AARCH64_D_REGISTER_COUNT
];
295 for (i
= 0; i
< AARCH64_X_REGISTER_COUNT
+ AARCH64_D_REGISTER_COUNT
; i
++)
296 regs
[i
] = pv_register (i
, 0);
297 pv_area
stack (AARCH64_SP_REGNUM
, gdbarch_addr_bit (gdbarch
));
299 for (; start
< limit
; start
+= 4)
304 insn
= reader
.read (start
, 4, byte_order_for_code
);
306 if (aarch64_decode_insn (insn
, &inst
, 1, NULL
) != 0)
309 if (inst
.opcode
->iclass
== addsub_imm
310 && (inst
.opcode
->op
== OP_ADD
311 || strcmp ("sub", inst
.opcode
->name
) == 0))
313 unsigned rd
= inst
.operands
[0].reg
.regno
;
314 unsigned rn
= inst
.operands
[1].reg
.regno
;
316 gdb_assert (aarch64_num_of_operands (inst
.opcode
) == 3);
317 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_Rd_SP
);
318 gdb_assert (inst
.operands
[1].type
== AARCH64_OPND_Rn_SP
);
319 gdb_assert (inst
.operands
[2].type
== AARCH64_OPND_AIMM
);
321 if (inst
.opcode
->op
== OP_ADD
)
323 regs
[rd
] = pv_add_constant (regs
[rn
],
324 inst
.operands
[2].imm
.value
);
328 regs
[rd
] = pv_add_constant (regs
[rn
],
329 -inst
.operands
[2].imm
.value
);
332 /* Did we move SP to FP? */
333 if (rn
== AARCH64_SP_REGNUM
&& rd
== AARCH64_FP_REGNUM
)
334 seen_stack_set
= true;
336 else if (inst
.opcode
->iclass
== pcreladdr
337 && inst
.operands
[1].type
== AARCH64_OPND_ADDR_ADRP
)
339 gdb_assert (aarch64_num_of_operands (inst
.opcode
) == 2);
340 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_Rd
);
342 regs
[inst
.operands
[0].reg
.regno
] = pv_unknown ();
344 else if (inst
.opcode
->iclass
== branch_imm
)
346 /* Stop analysis on branch. */
349 else if (inst
.opcode
->iclass
== condbranch
)
351 /* Stop analysis on branch. */
354 else if (inst
.opcode
->iclass
== branch_reg
)
356 /* Stop analysis on branch. */
359 else if (inst
.opcode
->iclass
== compbranch
)
361 /* Stop analysis on branch. */
364 else if (inst
.opcode
->op
== OP_MOVZ
)
366 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_Rd
);
368 /* If this shows up before we set the stack, keep going. Otherwise
369 stop the analysis. */
373 regs
[inst
.operands
[0].reg
.regno
] = pv_unknown ();
375 else if (inst
.opcode
->iclass
== log_shift
376 && strcmp (inst
.opcode
->name
, "orr") == 0)
378 unsigned rd
= inst
.operands
[0].reg
.regno
;
379 unsigned rn
= inst
.operands
[1].reg
.regno
;
380 unsigned rm
= inst
.operands
[2].reg
.regno
;
382 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_Rd
);
383 gdb_assert (inst
.operands
[1].type
== AARCH64_OPND_Rn
);
384 gdb_assert (inst
.operands
[2].type
== AARCH64_OPND_Rm_SFT
);
386 if (inst
.operands
[2].shifter
.amount
== 0
387 && rn
== AARCH64_SP_REGNUM
)
393 debug_printf ("aarch64: prologue analysis gave up "
394 "addr=%s opcode=0x%x (orr x register)\n",
395 core_addr_to_string_nz (start
), insn
);
400 else if (inst
.opcode
->op
== OP_STUR
)
402 unsigned rt
= inst
.operands
[0].reg
.regno
;
403 unsigned rn
= inst
.operands
[1].addr
.base_regno
;
404 int size
= aarch64_get_qualifier_esize (inst
.operands
[0].qualifier
);
406 gdb_assert (aarch64_num_of_operands (inst
.opcode
) == 2);
407 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_Rt
);
408 gdb_assert (inst
.operands
[1].type
== AARCH64_OPND_ADDR_SIMM9
);
409 gdb_assert (!inst
.operands
[1].addr
.offset
.is_reg
);
412 (pv_add_constant (regs
[rn
], inst
.operands
[1].addr
.offset
.imm
),
415 /* Are we storing with SP as a base? */
416 if (rn
== AARCH64_SP_REGNUM
)
417 seen_stack_set
= true;
419 else if ((inst
.opcode
->iclass
== ldstpair_off
420 || (inst
.opcode
->iclass
== ldstpair_indexed
421 && inst
.operands
[2].addr
.preind
))
422 && strcmp ("stp", inst
.opcode
->name
) == 0)
424 /* STP with addressing mode Pre-indexed and Base register. */
427 unsigned rn
= inst
.operands
[2].addr
.base_regno
;
428 int32_t imm
= inst
.operands
[2].addr
.offset
.imm
;
429 int size
= aarch64_get_qualifier_esize (inst
.operands
[0].qualifier
);
431 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_Rt
432 || inst
.operands
[0].type
== AARCH64_OPND_Ft
);
433 gdb_assert (inst
.operands
[1].type
== AARCH64_OPND_Rt2
434 || inst
.operands
[1].type
== AARCH64_OPND_Ft2
);
435 gdb_assert (inst
.operands
[2].type
== AARCH64_OPND_ADDR_SIMM7
);
436 gdb_assert (!inst
.operands
[2].addr
.offset
.is_reg
);
438 /* If recording this store would invalidate the store area
439 (perhaps because rn is not known) then we should abandon
440 further prologue analysis. */
441 if (stack
.store_would_trash (pv_add_constant (regs
[rn
], imm
)))
444 if (stack
.store_would_trash (pv_add_constant (regs
[rn
], imm
+ 8)))
447 rt1
= inst
.operands
[0].reg
.regno
;
448 rt2
= inst
.operands
[1].reg
.regno
;
449 if (inst
.operands
[0].type
== AARCH64_OPND_Ft
)
451 rt1
+= AARCH64_X_REGISTER_COUNT
;
452 rt2
+= AARCH64_X_REGISTER_COUNT
;
455 stack
.store (pv_add_constant (regs
[rn
], imm
), size
, regs
[rt1
]);
456 stack
.store (pv_add_constant (regs
[rn
], imm
+ size
), size
, regs
[rt2
]);
458 if (inst
.operands
[2].addr
.writeback
)
459 regs
[rn
] = pv_add_constant (regs
[rn
], imm
);
461 /* Ignore the instruction that allocates stack space and sets
463 if (rn
== AARCH64_SP_REGNUM
&& !inst
.operands
[2].addr
.writeback
)
464 seen_stack_set
= true;
466 else if ((inst
.opcode
->iclass
== ldst_imm9
/* Signed immediate. */
467 || (inst
.opcode
->iclass
== ldst_pos
/* Unsigned immediate. */
468 && (inst
.opcode
->op
== OP_STR_POS
469 || inst
.opcode
->op
== OP_STRF_POS
)))
470 && inst
.operands
[1].addr
.base_regno
== AARCH64_SP_REGNUM
471 && strcmp ("str", inst
.opcode
->name
) == 0)
473 /* STR (immediate) */
474 unsigned int rt
= inst
.operands
[0].reg
.regno
;
475 int32_t imm
= inst
.operands
[1].addr
.offset
.imm
;
476 unsigned int rn
= inst
.operands
[1].addr
.base_regno
;
477 int size
= aarch64_get_qualifier_esize (inst
.operands
[0].qualifier
);
478 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_Rt
479 || inst
.operands
[0].type
== AARCH64_OPND_Ft
);
481 if (inst
.operands
[0].type
== AARCH64_OPND_Ft
)
482 rt
+= AARCH64_X_REGISTER_COUNT
;
484 stack
.store (pv_add_constant (regs
[rn
], imm
), size
, regs
[rt
]);
485 if (inst
.operands
[1].addr
.writeback
)
486 regs
[rn
] = pv_add_constant (regs
[rn
], imm
);
488 /* Are we storing with SP as a base? */
489 if (rn
== AARCH64_SP_REGNUM
)
490 seen_stack_set
= true;
492 else if (inst
.opcode
->iclass
== testbranch
)
494 /* Stop analysis on branch. */
497 else if (inst
.opcode
->iclass
== ic_system
)
499 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
500 int ra_state_val
= 0;
502 if (insn
== 0xd503233f /* paciasp. */
503 || insn
== 0xd503237f /* pacibsp. */)
505 /* Return addresses are mangled. */
508 else if (insn
== 0xd50323bf /* autiasp. */
509 || insn
== 0xd50323ff /* autibsp. */)
511 /* Return addresses are not mangled. */
517 debug_printf ("aarch64: prologue analysis gave up addr=%s"
518 " opcode=0x%x (iclass)\n",
519 core_addr_to_string_nz (start
), insn
);
523 if (tdep
->has_pauth () && cache
!= nullptr)
524 trad_frame_set_value (cache
->saved_regs
,
525 tdep
->pauth_ra_state_regnum
,
532 debug_printf ("aarch64: prologue analysis gave up addr=%s"
534 core_addr_to_string_nz (start
), insn
);
543 if (pv_is_register (regs
[AARCH64_FP_REGNUM
], AARCH64_SP_REGNUM
))
545 /* Frame pointer is fp. Frame size is constant. */
546 cache
->framereg
= AARCH64_FP_REGNUM
;
547 cache
->framesize
= -regs
[AARCH64_FP_REGNUM
].k
;
549 else if (pv_is_register (regs
[AARCH64_SP_REGNUM
], AARCH64_SP_REGNUM
))
551 /* Try the stack pointer. */
552 cache
->framesize
= -regs
[AARCH64_SP_REGNUM
].k
;
553 cache
->framereg
= AARCH64_SP_REGNUM
;
557 /* We're just out of luck. We don't know where the frame is. */
558 cache
->framereg
= -1;
559 cache
->framesize
= 0;
562 for (i
= 0; i
< AARCH64_X_REGISTER_COUNT
; i
++)
566 if (stack
.find_reg (gdbarch
, i
, &offset
))
567 cache
->saved_regs
[i
].set_addr (offset
);
570 for (i
= 0; i
< AARCH64_D_REGISTER_COUNT
; i
++)
572 int regnum
= gdbarch_num_regs (gdbarch
);
575 if (stack
.find_reg (gdbarch
, i
+ AARCH64_X_REGISTER_COUNT
,
577 cache
->saved_regs
[i
+ regnum
+ AARCH64_D0_REGNUM
].set_addr (offset
);
584 aarch64_analyze_prologue (struct gdbarch
*gdbarch
,
585 CORE_ADDR start
, CORE_ADDR limit
,
586 struct aarch64_prologue_cache
*cache
)
588 instruction_reader reader
;
590 return aarch64_analyze_prologue (gdbarch
, start
, limit
, cache
,
596 namespace selftests
{
598 /* Instruction reader from manually cooked instruction sequences. */
600 class instruction_reader_test
: public abstract_instruction_reader
603 template<size_t SIZE
>
604 explicit instruction_reader_test (const uint32_t (&insns
)[SIZE
])
605 : m_insns (insns
), m_insns_size (SIZE
)
608 ULONGEST
read (CORE_ADDR memaddr
, int len
, enum bfd_endian byte_order
)
611 SELF_CHECK (len
== 4);
612 SELF_CHECK (memaddr
% 4 == 0);
613 SELF_CHECK (memaddr
/ 4 < m_insns_size
);
615 return m_insns
[memaddr
/ 4];
619 const uint32_t *m_insns
;
624 aarch64_analyze_prologue_test (void)
626 struct gdbarch_info info
;
628 gdbarch_info_init (&info
);
629 info
.bfd_arch_info
= bfd_scan_arch ("aarch64");
631 struct gdbarch
*gdbarch
= gdbarch_find_by_info (info
);
632 SELF_CHECK (gdbarch
!= NULL
);
634 struct aarch64_prologue_cache cache
;
635 cache
.saved_regs
= trad_frame_alloc_saved_regs (gdbarch
);
637 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
639 /* Test the simple prologue in which frame pointer is used. */
641 static const uint32_t insns
[] = {
642 0xa9af7bfd, /* stp x29, x30, [sp,#-272]! */
643 0x910003fd, /* mov x29, sp */
644 0x97ffffe6, /* bl 0x400580 */
646 instruction_reader_test
reader (insns
);
648 CORE_ADDR end
= aarch64_analyze_prologue (gdbarch
, 0, 128, &cache
, reader
);
649 SELF_CHECK (end
== 4 * 2);
651 SELF_CHECK (cache
.framereg
== AARCH64_FP_REGNUM
);
652 SELF_CHECK (cache
.framesize
== 272);
654 for (int i
= 0; i
< AARCH64_X_REGISTER_COUNT
; i
++)
656 if (i
== AARCH64_FP_REGNUM
)
657 SELF_CHECK (cache
.saved_regs
[i
].addr () == -272);
658 else if (i
== AARCH64_LR_REGNUM
)
659 SELF_CHECK (cache
.saved_regs
[i
].addr () == -264);
661 SELF_CHECK (cache
.saved_regs
[i
].is_realreg ());
664 for (int i
= 0; i
< AARCH64_D_REGISTER_COUNT
; i
++)
666 int regnum
= gdbarch_num_regs (gdbarch
);
668 SELF_CHECK (cache
.saved_regs
[i
+ regnum
669 + AARCH64_D0_REGNUM
].is_realreg ());
673 /* Test a prologue in which STR is used and frame pointer is not
676 static const uint32_t insns
[] = {
677 0xf81d0ff3, /* str x19, [sp, #-48]! */
678 0xb9002fe0, /* str w0, [sp, #44] */
679 0xf90013e1, /* str x1, [sp, #32]*/
680 0xfd000fe0, /* str d0, [sp, #24] */
681 0xaa0203f3, /* mov x19, x2 */
682 0xf94013e0, /* ldr x0, [sp, #32] */
684 instruction_reader_test
reader (insns
);
686 trad_frame_reset_saved_regs (gdbarch
, cache
.saved_regs
);
687 CORE_ADDR end
= aarch64_analyze_prologue (gdbarch
, 0, 128, &cache
, reader
);
689 SELF_CHECK (end
== 4 * 5);
691 SELF_CHECK (cache
.framereg
== AARCH64_SP_REGNUM
);
692 SELF_CHECK (cache
.framesize
== 48);
694 for (int i
= 0; i
< AARCH64_X_REGISTER_COUNT
; i
++)
697 SELF_CHECK (cache
.saved_regs
[i
].addr () == -16);
699 SELF_CHECK (cache
.saved_regs
[i
].addr () == -48);
701 SELF_CHECK (cache
.saved_regs
[i
].is_realreg ());
704 for (int i
= 0; i
< AARCH64_D_REGISTER_COUNT
; i
++)
706 int regnum
= gdbarch_num_regs (gdbarch
);
709 SELF_CHECK (cache
.saved_regs
[i
+ regnum
710 + AARCH64_D0_REGNUM
].addr ()
713 SELF_CHECK (cache
.saved_regs
[i
+ regnum
714 + AARCH64_D0_REGNUM
].is_realreg ());
718 /* Test handling of movz before setting the frame pointer. */
720 static const uint32_t insns
[] = {
721 0xa9bf7bfd, /* stp x29, x30, [sp, #-16]! */
722 0x52800020, /* mov w0, #0x1 */
723 0x910003fd, /* mov x29, sp */
724 0x528000a2, /* mov w2, #0x5 */
725 0x97fffff8, /* bl 6e4 */
728 instruction_reader_test
reader (insns
);
730 trad_frame_reset_saved_regs (gdbarch
, cache
.saved_regs
);
731 CORE_ADDR end
= aarch64_analyze_prologue (gdbarch
, 0, 128, &cache
, reader
);
733 /* We should stop at the 4th instruction. */
734 SELF_CHECK (end
== (4 - 1) * 4);
735 SELF_CHECK (cache
.framereg
== AARCH64_FP_REGNUM
);
736 SELF_CHECK (cache
.framesize
== 16);
739 /* Test handling of movz/stp when using the stack pointer as frame
742 static const uint32_t insns
[] = {
743 0xa9bc7bfd, /* stp x29, x30, [sp, #-64]! */
744 0x52800020, /* mov w0, #0x1 */
745 0x290207e0, /* stp w0, w1, [sp, #16] */
746 0xa9018fe2, /* stp x2, x3, [sp, #24] */
747 0x528000a2, /* mov w2, #0x5 */
748 0x97fffff8, /* bl 6e4 */
751 instruction_reader_test
reader (insns
);
753 trad_frame_reset_saved_regs (gdbarch
, cache
.saved_regs
);
754 CORE_ADDR end
= aarch64_analyze_prologue (gdbarch
, 0, 128, &cache
, reader
);
756 /* We should stop at the 5th instruction. */
757 SELF_CHECK (end
== (5 - 1) * 4);
758 SELF_CHECK (cache
.framereg
== AARCH64_SP_REGNUM
);
759 SELF_CHECK (cache
.framesize
== 64);
762 /* Test handling of movz/str when using the stack pointer as frame
765 static const uint32_t insns
[] = {
766 0xa9bc7bfd, /* stp x29, x30, [sp, #-64]! */
767 0x52800020, /* mov w0, #0x1 */
768 0xb9002be4, /* str w4, [sp, #40] */
769 0xf9001be5, /* str x5, [sp, #48] */
770 0x528000a2, /* mov w2, #0x5 */
771 0x97fffff8, /* bl 6e4 */
774 instruction_reader_test
reader (insns
);
776 trad_frame_reset_saved_regs (gdbarch
, cache
.saved_regs
);
777 CORE_ADDR end
= aarch64_analyze_prologue (gdbarch
, 0, 128, &cache
, reader
);
779 /* We should stop at the 5th instruction. */
780 SELF_CHECK (end
== (5 - 1) * 4);
781 SELF_CHECK (cache
.framereg
== AARCH64_SP_REGNUM
);
782 SELF_CHECK (cache
.framesize
== 64);
785 /* Test handling of movz/stur when using the stack pointer as frame
788 static const uint32_t insns
[] = {
789 0xa9bc7bfd, /* stp x29, x30, [sp, #-64]! */
790 0x52800020, /* mov w0, #0x1 */
791 0xb80343e6, /* stur w6, [sp, #52] */
792 0xf80383e7, /* stur x7, [sp, #56] */
793 0x528000a2, /* mov w2, #0x5 */
794 0x97fffff8, /* bl 6e4 */
797 instruction_reader_test
reader (insns
);
799 trad_frame_reset_saved_regs (gdbarch
, cache
.saved_regs
);
800 CORE_ADDR end
= aarch64_analyze_prologue (gdbarch
, 0, 128, &cache
, reader
);
802 /* We should stop at the 5th instruction. */
803 SELF_CHECK (end
== (5 - 1) * 4);
804 SELF_CHECK (cache
.framereg
== AARCH64_SP_REGNUM
);
805 SELF_CHECK (cache
.framesize
== 64);
808 /* Test handling of movz when there is no frame pointer set or no stack
811 static const uint32_t insns
[] = {
812 0xa9bf7bfd, /* stp x29, x30, [sp, #-16]! */
813 0x52800020, /* mov w0, #0x1 */
814 0x528000a2, /* mov w2, #0x5 */
815 0x97fffff8, /* bl 6e4 */
818 instruction_reader_test
reader (insns
);
820 trad_frame_reset_saved_regs (gdbarch
, cache
.saved_regs
);
821 CORE_ADDR end
= aarch64_analyze_prologue (gdbarch
, 0, 128, &cache
, reader
);
823 /* We should stop at the 4th instruction. */
824 SELF_CHECK (end
== (4 - 1) * 4);
825 SELF_CHECK (cache
.framereg
== AARCH64_SP_REGNUM
);
826 SELF_CHECK (cache
.framesize
== 16);
829 /* Test a prologue in which there is a return address signing instruction. */
830 if (tdep
->has_pauth ())
832 static const uint32_t insns
[] = {
833 0xd503233f, /* paciasp */
834 0xa9bd7bfd, /* stp x29, x30, [sp, #-48]! */
835 0x910003fd, /* mov x29, sp */
836 0xf801c3f3, /* str x19, [sp, #28] */
837 0xb9401fa0, /* ldr x19, [x29, #28] */
839 instruction_reader_test
reader (insns
);
841 trad_frame_reset_saved_regs (gdbarch
, cache
.saved_regs
);
842 CORE_ADDR end
= aarch64_analyze_prologue (gdbarch
, 0, 128, &cache
,
845 SELF_CHECK (end
== 4 * 4);
846 SELF_CHECK (cache
.framereg
== AARCH64_FP_REGNUM
);
847 SELF_CHECK (cache
.framesize
== 48);
849 for (int i
= 0; i
< AARCH64_X_REGISTER_COUNT
; i
++)
852 SELF_CHECK (cache
.saved_regs
[i
].addr () == -20);
853 else if (i
== AARCH64_FP_REGNUM
)
854 SELF_CHECK (cache
.saved_regs
[i
].addr () == -48);
855 else if (i
== AARCH64_LR_REGNUM
)
856 SELF_CHECK (cache
.saved_regs
[i
].addr () == -40);
858 SELF_CHECK (cache
.saved_regs
[i
].is_realreg ());
861 if (tdep
->has_pauth ())
863 SELF_CHECK (trad_frame_value_p (cache
.saved_regs
,
864 tdep
->pauth_ra_state_regnum
));
865 SELF_CHECK (cache
.saved_regs
[tdep
->pauth_ra_state_regnum
].addr ()
870 } // namespace selftests
871 #endif /* GDB_SELF_TEST */
873 /* Implement the "skip_prologue" gdbarch method. */
876 aarch64_skip_prologue (struct gdbarch
*gdbarch
, CORE_ADDR pc
)
878 CORE_ADDR func_addr
, limit_pc
;
880 /* See if we can determine the end of the prologue via the symbol
881 table. If so, then return either PC, or the PC after the
882 prologue, whichever is greater. */
883 if (find_pc_partial_function (pc
, NULL
, &func_addr
, NULL
))
885 CORE_ADDR post_prologue_pc
886 = skip_prologue_using_sal (gdbarch
, func_addr
);
888 if (post_prologue_pc
!= 0)
889 return std::max (pc
, post_prologue_pc
);
892 /* Can't determine prologue from the symbol table, need to examine
895 /* Find an upper limit on the function prologue using the debug
896 information. If the debug information could not be used to
897 provide that bound, then use an arbitrary large number as the
899 limit_pc
= skip_prologue_using_sal (gdbarch
, pc
);
901 limit_pc
= pc
+ 128; /* Magic. */
903 /* Try disassembling prologue. */
904 return aarch64_analyze_prologue (gdbarch
, pc
, limit_pc
, NULL
);
907 /* Scan the function prologue for THIS_FRAME and populate the prologue
911 aarch64_scan_prologue (struct frame_info
*this_frame
,
912 struct aarch64_prologue_cache
*cache
)
914 CORE_ADDR block_addr
= get_frame_address_in_block (this_frame
);
915 CORE_ADDR prologue_start
;
916 CORE_ADDR prologue_end
;
917 CORE_ADDR prev_pc
= get_frame_pc (this_frame
);
918 struct gdbarch
*gdbarch
= get_frame_arch (this_frame
);
920 cache
->prev_pc
= prev_pc
;
922 /* Assume we do not find a frame. */
923 cache
->framereg
= -1;
924 cache
->framesize
= 0;
926 if (find_pc_partial_function (block_addr
, NULL
, &prologue_start
,
929 struct symtab_and_line sal
= find_pc_line (prologue_start
, 0);
933 /* No line info so use the current PC. */
934 prologue_end
= prev_pc
;
936 else if (sal
.end
< prologue_end
)
938 /* The next line begins after the function end. */
939 prologue_end
= sal
.end
;
942 prologue_end
= std::min (prologue_end
, prev_pc
);
943 aarch64_analyze_prologue (gdbarch
, prologue_start
, prologue_end
, cache
);
949 frame_loc
= get_frame_register_unsigned (this_frame
, AARCH64_FP_REGNUM
);
953 cache
->framereg
= AARCH64_FP_REGNUM
;
954 cache
->framesize
= 16;
955 cache
->saved_regs
[29].set_addr (0);
956 cache
->saved_regs
[30].set_addr (8);
960 /* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
961 function may throw an exception if the inferior's registers or memory is
965 aarch64_make_prologue_cache_1 (struct frame_info
*this_frame
,
966 struct aarch64_prologue_cache
*cache
)
968 CORE_ADDR unwound_fp
;
971 aarch64_scan_prologue (this_frame
, cache
);
973 if (cache
->framereg
== -1)
976 unwound_fp
= get_frame_register_unsigned (this_frame
, cache
->framereg
);
980 cache
->prev_sp
= unwound_fp
+ cache
->framesize
;
982 /* Calculate actual addresses of saved registers using offsets
983 determined by aarch64_analyze_prologue. */
984 for (reg
= 0; reg
< gdbarch_num_regs (get_frame_arch (this_frame
)); reg
++)
985 if (trad_frame_addr_p (cache
->saved_regs
, reg
))
986 cache
->saved_regs
[reg
].set_addr (cache
->saved_regs
[reg
].addr ()
989 cache
->func
= get_frame_func (this_frame
);
991 cache
->available_p
= 1;
994 /* Allocate and fill in *THIS_CACHE with information about the prologue of
995 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
996 Return a pointer to the current aarch64_prologue_cache in
999 static struct aarch64_prologue_cache
*
1000 aarch64_make_prologue_cache (struct frame_info
*this_frame
, void **this_cache
)
1002 struct aarch64_prologue_cache
*cache
;
1004 if (*this_cache
!= NULL
)
1005 return (struct aarch64_prologue_cache
*) *this_cache
;
1007 cache
= FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache
);
1008 cache
->saved_regs
= trad_frame_alloc_saved_regs (this_frame
);
1009 *this_cache
= cache
;
1013 aarch64_make_prologue_cache_1 (this_frame
, cache
);
1015 catch (const gdb_exception_error
&ex
)
1017 if (ex
.error
!= NOT_AVAILABLE_ERROR
)
1024 /* Implement the "stop_reason" frame_unwind method. */
1026 static enum unwind_stop_reason
1027 aarch64_prologue_frame_unwind_stop_reason (struct frame_info
*this_frame
,
1030 struct aarch64_prologue_cache
*cache
1031 = aarch64_make_prologue_cache (this_frame
, this_cache
);
1033 if (!cache
->available_p
)
1034 return UNWIND_UNAVAILABLE
;
1036 /* Halt the backtrace at "_start". */
1037 if (cache
->prev_pc
<= gdbarch_tdep (get_frame_arch (this_frame
))->lowest_pc
)
1038 return UNWIND_OUTERMOST
;
1040 /* We've hit a wall, stop. */
1041 if (cache
->prev_sp
== 0)
1042 return UNWIND_OUTERMOST
;
1044 return UNWIND_NO_REASON
;
1047 /* Our frame ID for a normal frame is the current function's starting
1048 PC and the caller's SP when we were called. */
1051 aarch64_prologue_this_id (struct frame_info
*this_frame
,
1052 void **this_cache
, struct frame_id
*this_id
)
1054 struct aarch64_prologue_cache
*cache
1055 = aarch64_make_prologue_cache (this_frame
, this_cache
);
1057 if (!cache
->available_p
)
1058 *this_id
= frame_id_build_unavailable_stack (cache
->func
);
1060 *this_id
= frame_id_build (cache
->prev_sp
, cache
->func
);
1063 /* Implement the "prev_register" frame_unwind method. */
1065 static struct value
*
1066 aarch64_prologue_prev_register (struct frame_info
*this_frame
,
1067 void **this_cache
, int prev_regnum
)
1069 struct aarch64_prologue_cache
*cache
1070 = aarch64_make_prologue_cache (this_frame
, this_cache
);
1072 /* If we are asked to unwind the PC, then we need to return the LR
1073 instead. The prologue may save PC, but it will point into this
1074 frame's prologue, not the next frame's resume location. */
1075 if (prev_regnum
== AARCH64_PC_REGNUM
)
1078 struct gdbarch
*gdbarch
= get_frame_arch (this_frame
);
1079 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1081 lr
= frame_unwind_register_unsigned (this_frame
, AARCH64_LR_REGNUM
);
1083 if (tdep
->has_pauth ()
1084 && trad_frame_value_p (cache
->saved_regs
,
1085 tdep
->pauth_ra_state_regnum
))
1086 lr
= aarch64_frame_unmask_lr (tdep
, this_frame
, lr
);
1088 return frame_unwind_got_constant (this_frame
, prev_regnum
, lr
);
1091 /* SP is generally not saved to the stack, but this frame is
1092 identified by the next frame's stack pointer at the time of the
1093 call. The value was already reconstructed into PREV_SP. */
1099 | | | <- Previous SP
1102 +--| saved fp |<- FP
1106 if (prev_regnum
== AARCH64_SP_REGNUM
)
1107 return frame_unwind_got_constant (this_frame
, prev_regnum
,
1110 return trad_frame_get_prev_register (this_frame
, cache
->saved_regs
,
1114 /* AArch64 prologue unwinder. */
1115 struct frame_unwind aarch64_prologue_unwind
=
1118 aarch64_prologue_frame_unwind_stop_reason
,
1119 aarch64_prologue_this_id
,
1120 aarch64_prologue_prev_register
,
1122 default_frame_sniffer
1125 /* Allocate and fill in *THIS_CACHE with information about the prologue of
1126 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
1127 Return a pointer to the current aarch64_prologue_cache in
1130 static struct aarch64_prologue_cache
*
1131 aarch64_make_stub_cache (struct frame_info
*this_frame
, void **this_cache
)
1133 struct aarch64_prologue_cache
*cache
;
1135 if (*this_cache
!= NULL
)
1136 return (struct aarch64_prologue_cache
*) *this_cache
;
1138 cache
= FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache
);
1139 cache
->saved_regs
= trad_frame_alloc_saved_regs (this_frame
);
1140 *this_cache
= cache
;
1144 cache
->prev_sp
= get_frame_register_unsigned (this_frame
,
1146 cache
->prev_pc
= get_frame_pc (this_frame
);
1147 cache
->available_p
= 1;
1149 catch (const gdb_exception_error
&ex
)
1151 if (ex
.error
!= NOT_AVAILABLE_ERROR
)
1158 /* Implement the "stop_reason" frame_unwind method. */
1160 static enum unwind_stop_reason
1161 aarch64_stub_frame_unwind_stop_reason (struct frame_info
*this_frame
,
1164 struct aarch64_prologue_cache
*cache
1165 = aarch64_make_stub_cache (this_frame
, this_cache
);
1167 if (!cache
->available_p
)
1168 return UNWIND_UNAVAILABLE
;
1170 return UNWIND_NO_REASON
;
1173 /* Our frame ID for a stub frame is the current SP and LR. */
1176 aarch64_stub_this_id (struct frame_info
*this_frame
,
1177 void **this_cache
, struct frame_id
*this_id
)
1179 struct aarch64_prologue_cache
*cache
1180 = aarch64_make_stub_cache (this_frame
, this_cache
);
1182 if (cache
->available_p
)
1183 *this_id
= frame_id_build (cache
->prev_sp
, cache
->prev_pc
);
1185 *this_id
= frame_id_build_unavailable_stack (cache
->prev_pc
);
1188 /* Implement the "sniffer" frame_unwind method. */
1191 aarch64_stub_unwind_sniffer (const struct frame_unwind
*self
,
1192 struct frame_info
*this_frame
,
1193 void **this_prologue_cache
)
1195 CORE_ADDR addr_in_block
;
1198 addr_in_block
= get_frame_address_in_block (this_frame
);
1199 if (in_plt_section (addr_in_block
)
1200 /* We also use the stub winder if the target memory is unreadable
1201 to avoid having the prologue unwinder trying to read it. */
1202 || target_read_memory (get_frame_pc (this_frame
), dummy
, 4) != 0)
1208 /* AArch64 stub unwinder. */
1209 struct frame_unwind aarch64_stub_unwind
=
1212 aarch64_stub_frame_unwind_stop_reason
,
1213 aarch64_stub_this_id
,
1214 aarch64_prologue_prev_register
,
1216 aarch64_stub_unwind_sniffer
1219 /* Return the frame base address of *THIS_FRAME. */
1222 aarch64_normal_frame_base (struct frame_info
*this_frame
, void **this_cache
)
1224 struct aarch64_prologue_cache
*cache
1225 = aarch64_make_prologue_cache (this_frame
, this_cache
);
1227 return cache
->prev_sp
- cache
->framesize
;
1230 /* AArch64 default frame base information. */
1231 struct frame_base aarch64_normal_base
=
1233 &aarch64_prologue_unwind
,
1234 aarch64_normal_frame_base
,
1235 aarch64_normal_frame_base
,
1236 aarch64_normal_frame_base
1239 /* Return the value of the REGNUM register in the previous frame of
1242 static struct value
*
1243 aarch64_dwarf2_prev_register (struct frame_info
*this_frame
,
1244 void **this_cache
, int regnum
)
1246 struct gdbarch_tdep
*tdep
= gdbarch_tdep (get_frame_arch (this_frame
));
1251 case AARCH64_PC_REGNUM
:
1252 lr
= frame_unwind_register_unsigned (this_frame
, AARCH64_LR_REGNUM
);
1253 lr
= aarch64_frame_unmask_lr (tdep
, this_frame
, lr
);
1254 return frame_unwind_got_constant (this_frame
, regnum
, lr
);
1257 internal_error (__FILE__
, __LINE__
,
1258 _("Unexpected register %d"), regnum
);
1262 static const unsigned char op_lit0
= DW_OP_lit0
;
1263 static const unsigned char op_lit1
= DW_OP_lit1
;
1265 /* Implement the "init_reg" dwarf2_frame_ops method. */
1268 aarch64_dwarf2_frame_init_reg (struct gdbarch
*gdbarch
, int regnum
,
1269 struct dwarf2_frame_state_reg
*reg
,
1270 struct frame_info
*this_frame
)
1272 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1276 case AARCH64_PC_REGNUM
:
1277 reg
->how
= DWARF2_FRAME_REG_FN
;
1278 reg
->loc
.fn
= aarch64_dwarf2_prev_register
;
1281 case AARCH64_SP_REGNUM
:
1282 reg
->how
= DWARF2_FRAME_REG_CFA
;
1286 /* Init pauth registers. */
1287 if (tdep
->has_pauth ())
1289 if (regnum
== tdep
->pauth_ra_state_regnum
)
1291 /* Initialize RA_STATE to zero. */
1292 reg
->how
= DWARF2_FRAME_REG_SAVED_VAL_EXP
;
1293 reg
->loc
.exp
.start
= &op_lit0
;
1294 reg
->loc
.exp
.len
= 1;
1297 else if (regnum
== AARCH64_PAUTH_DMASK_REGNUM (tdep
->pauth_reg_base
)
1298 || regnum
== AARCH64_PAUTH_CMASK_REGNUM (tdep
->pauth_reg_base
))
1300 reg
->how
= DWARF2_FRAME_REG_SAME_VALUE
;
1306 /* Implement the execute_dwarf_cfa_vendor_op method. */
1309 aarch64_execute_dwarf_cfa_vendor_op (struct gdbarch
*gdbarch
, gdb_byte op
,
1310 struct dwarf2_frame_state
*fs
)
1312 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1313 struct dwarf2_frame_state_reg
*ra_state
;
1315 if (op
== DW_CFA_AARCH64_negate_ra_state
)
1317 /* On systems without pauth, treat as a nop. */
1318 if (!tdep
->has_pauth ())
1321 /* Allocate RA_STATE column if it's not allocated yet. */
1322 fs
->regs
.alloc_regs (AARCH64_DWARF_PAUTH_RA_STATE
+ 1);
1324 /* Toggle the status of RA_STATE between 0 and 1. */
1325 ra_state
= &(fs
->regs
.reg
[AARCH64_DWARF_PAUTH_RA_STATE
]);
1326 ra_state
->how
= DWARF2_FRAME_REG_SAVED_VAL_EXP
;
1328 if (ra_state
->loc
.exp
.start
== nullptr
1329 || ra_state
->loc
.exp
.start
== &op_lit0
)
1330 ra_state
->loc
.exp
.start
= &op_lit1
;
1332 ra_state
->loc
.exp
.start
= &op_lit0
;
1334 ra_state
->loc
.exp
.len
= 1;
1342 /* Used for matching BRK instructions for AArch64. */
1343 static constexpr uint32_t BRK_INSN_MASK
= 0xffe0001f;
1344 static constexpr uint32_t BRK_INSN_BASE
= 0xd4200000;
1346 /* Implementation of gdbarch_program_breakpoint_here_p for aarch64. */
1349 aarch64_program_breakpoint_here_p (gdbarch
*gdbarch
, CORE_ADDR address
)
1351 const uint32_t insn_len
= 4;
1352 gdb_byte target_mem
[4];
1354 /* Enable the automatic memory restoration from breakpoints while
1355 we read the memory. Otherwise we may find temporary breakpoints, ones
1356 inserted by GDB, and flag them as permanent breakpoints. */
1357 scoped_restore restore_memory
1358 = make_scoped_restore_show_memory_breakpoints (0);
1360 if (target_read_memory (address
, target_mem
, insn_len
) == 0)
1363 (uint32_t) extract_unsigned_integer (target_mem
, insn_len
,
1364 gdbarch_byte_order_for_code (gdbarch
));
1366 /* Check if INSN is a BRK instruction pattern. There are multiple choices
1367 of such instructions with different immediate values. Different OS'
1368 may use a different variation, but they have the same outcome. */
1369 return ((insn
& BRK_INSN_MASK
) == BRK_INSN_BASE
);
1375 /* When arguments must be pushed onto the stack, they go on in reverse
1376 order. The code below implements a FILO (stack) to do this. */
1380 /* Value to pass on stack. It can be NULL if this item is for stack
1382 const gdb_byte
*data
;
1384 /* Size in bytes of value to pass on stack. */
1388 /* Implement the gdbarch type alignment method, overrides the generic
1389 alignment algorithm for anything that is aarch64 specific. */
1392 aarch64_type_align (gdbarch
*gdbarch
, struct type
*t
)
1394 t
= check_typedef (t
);
1395 if (t
->code () == TYPE_CODE_ARRAY
&& t
->is_vector ())
1397 /* Use the natural alignment for vector types (the same for
1398 scalar type), but the maximum alignment is 128-bit. */
1399 if (TYPE_LENGTH (t
) > 16)
1402 return TYPE_LENGTH (t
);
1405 /* Allow the common code to calculate the alignment. */
1409 /* Worker function for aapcs_is_vfp_call_or_return_candidate.
1411 Return the number of register required, or -1 on failure.
1413 When encountering a base element, if FUNDAMENTAL_TYPE is not set then set it
1414 to the element, else fail if the type of this element does not match the
1418 aapcs_is_vfp_call_or_return_candidate_1 (struct type
*type
,
1419 struct type
**fundamental_type
)
1421 if (type
== nullptr)
1424 switch (type
->code ())
1427 if (TYPE_LENGTH (type
) > 16)
1430 if (*fundamental_type
== nullptr)
1431 *fundamental_type
= type
;
1432 else if (TYPE_LENGTH (type
) != TYPE_LENGTH (*fundamental_type
)
1433 || type
->code () != (*fundamental_type
)->code ())
1438 case TYPE_CODE_COMPLEX
:
1440 struct type
*target_type
= check_typedef (TYPE_TARGET_TYPE (type
));
1441 if (TYPE_LENGTH (target_type
) > 16)
1444 if (*fundamental_type
== nullptr)
1445 *fundamental_type
= target_type
;
1446 else if (TYPE_LENGTH (target_type
) != TYPE_LENGTH (*fundamental_type
)
1447 || target_type
->code () != (*fundamental_type
)->code ())
1453 case TYPE_CODE_ARRAY
:
1455 if (type
->is_vector ())
1457 if (TYPE_LENGTH (type
) != 8 && TYPE_LENGTH (type
) != 16)
1460 if (*fundamental_type
== nullptr)
1461 *fundamental_type
= type
;
1462 else if (TYPE_LENGTH (type
) != TYPE_LENGTH (*fundamental_type
)
1463 || type
->code () != (*fundamental_type
)->code ())
1470 struct type
*target_type
= TYPE_TARGET_TYPE (type
);
1471 int count
= aapcs_is_vfp_call_or_return_candidate_1
1472 (target_type
, fundamental_type
);
1477 count
*= (TYPE_LENGTH (type
) / TYPE_LENGTH (target_type
));
1482 case TYPE_CODE_STRUCT
:
1483 case TYPE_CODE_UNION
:
1487 for (int i
= 0; i
< type
->num_fields (); i
++)
1489 /* Ignore any static fields. */
1490 if (field_is_static (&type
->field (i
)))
1493 struct type
*member
= check_typedef (type
->field (i
).type ());
1495 int sub_count
= aapcs_is_vfp_call_or_return_candidate_1
1496 (member
, fundamental_type
);
1497 if (sub_count
== -1)
1502 /* Ensure there is no padding between the fields (allowing for empty
1503 zero length structs) */
1504 int ftype_length
= (*fundamental_type
== nullptr)
1505 ? 0 : TYPE_LENGTH (*fundamental_type
);
1506 if (count
* ftype_length
!= TYPE_LENGTH (type
))
1519 /* Return true if an argument, whose type is described by TYPE, can be passed or
1520 returned in simd/fp registers, providing enough parameter passing registers
1521 are available. This is as described in the AAPCS64.
1523 Upon successful return, *COUNT returns the number of needed registers,
1524 *FUNDAMENTAL_TYPE contains the type of those registers.
1526 Candidate as per the AAPCS64 5.4.2.C is either a:
1529 - HFA (Homogeneous Floating-point Aggregate, 4.3.5.1). A Composite type where
1530 all the members are floats and has at most 4 members.
1531 - HVA (Homogeneous Short-vector Aggregate, 4.3.5.2). A Composite type where
1532 all the members are short vectors and has at most 4 members.
1535 Note that HFAs and HVAs can include nested structures and arrays. */
1538 aapcs_is_vfp_call_or_return_candidate (struct type
*type
, int *count
,
1539 struct type
**fundamental_type
)
1541 if (type
== nullptr)
1544 *fundamental_type
= nullptr;
1546 int ag_count
= aapcs_is_vfp_call_or_return_candidate_1 (type
,
1549 if (ag_count
> 0 && ag_count
<= HA_MAX_NUM_FLDS
)
1558 /* AArch64 function call information structure. */
1559 struct aarch64_call_info
1561 /* the current argument number. */
1562 unsigned argnum
= 0;
1564 /* The next general purpose register number, equivalent to NGRN as
1565 described in the AArch64 Procedure Call Standard. */
1568 /* The next SIMD and floating point register number, equivalent to
1569 NSRN as described in the AArch64 Procedure Call Standard. */
1572 /* The next stacked argument address, equivalent to NSAA as
1573 described in the AArch64 Procedure Call Standard. */
1576 /* Stack item vector. */
1577 std::vector
<stack_item_t
> si
;
1580 /* Pass a value in a sequence of consecutive X registers. The caller
1581 is responsible for ensuring sufficient registers are available. */
1584 pass_in_x (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
1585 struct aarch64_call_info
*info
, struct type
*type
,
1588 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1589 int len
= TYPE_LENGTH (type
);
1590 enum type_code typecode
= type
->code ();
1591 int regnum
= AARCH64_X0_REGNUM
+ info
->ngrn
;
1592 const bfd_byte
*buf
= value_contents (arg
);
1598 int partial_len
= len
< X_REGISTER_SIZE
? len
: X_REGISTER_SIZE
;
1599 CORE_ADDR regval
= extract_unsigned_integer (buf
, partial_len
,
1603 /* Adjust sub-word struct/union args when big-endian. */
1604 if (byte_order
== BFD_ENDIAN_BIG
1605 && partial_len
< X_REGISTER_SIZE
1606 && (typecode
== TYPE_CODE_STRUCT
|| typecode
== TYPE_CODE_UNION
))
1607 regval
<<= ((X_REGISTER_SIZE
- partial_len
) * TARGET_CHAR_BIT
);
1611 debug_printf ("arg %d in %s = 0x%s\n", info
->argnum
,
1612 gdbarch_register_name (gdbarch
, regnum
),
1613 phex (regval
, X_REGISTER_SIZE
));
1615 regcache_cooked_write_unsigned (regcache
, regnum
, regval
);
1622 /* Attempt to marshall a value in a V register. Return 1 if
1623 successful, or 0 if insufficient registers are available. This
1624 function, unlike the equivalent pass_in_x() function does not
1625 handle arguments spread across multiple registers. */
1628 pass_in_v (struct gdbarch
*gdbarch
,
1629 struct regcache
*regcache
,
1630 struct aarch64_call_info
*info
,
1631 int len
, const bfd_byte
*buf
)
1635 int regnum
= AARCH64_V0_REGNUM
+ info
->nsrn
;
1636 /* Enough space for a full vector register. */
1637 gdb_byte reg
[register_size (gdbarch
, regnum
)];
1638 gdb_assert (len
<= sizeof (reg
));
1643 memset (reg
, 0, sizeof (reg
));
1644 /* PCS C.1, the argument is allocated to the least significant
1645 bits of V register. */
1646 memcpy (reg
, buf
, len
);
1647 regcache
->cooked_write (regnum
, reg
);
1651 debug_printf ("arg %d in %s\n", info
->argnum
,
1652 gdbarch_register_name (gdbarch
, regnum
));
1660 /* Marshall an argument onto the stack. */
1663 pass_on_stack (struct aarch64_call_info
*info
, struct type
*type
,
1666 const bfd_byte
*buf
= value_contents (arg
);
1667 int len
= TYPE_LENGTH (type
);
1673 align
= type_align (type
);
1675 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1676 Natural alignment of the argument's type. */
1677 align
= align_up (align
, 8);
1679 /* The AArch64 PCS requires at most doubleword alignment. */
1685 debug_printf ("arg %d len=%d @ sp + %d\n", info
->argnum
, len
,
1691 info
->si
.push_back (item
);
1694 if (info
->nsaa
& (align
- 1))
1696 /* Push stack alignment padding. */
1697 int pad
= align
- (info
->nsaa
& (align
- 1));
1702 info
->si
.push_back (item
);
1707 /* Marshall an argument into a sequence of one or more consecutive X
1708 registers or, if insufficient X registers are available then onto
1712 pass_in_x_or_stack (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
1713 struct aarch64_call_info
*info
, struct type
*type
,
1716 int len
= TYPE_LENGTH (type
);
1717 int nregs
= (len
+ X_REGISTER_SIZE
- 1) / X_REGISTER_SIZE
;
1719 /* PCS C.13 - Pass in registers if we have enough spare */
1720 if (info
->ngrn
+ nregs
<= 8)
1722 pass_in_x (gdbarch
, regcache
, info
, type
, arg
);
1723 info
->ngrn
+= nregs
;
1728 pass_on_stack (info
, type
, arg
);
1732 /* Pass a value, which is of type arg_type, in a V register. Assumes value is a
1733 aapcs_is_vfp_call_or_return_candidate and there are enough spare V
1734 registers. A return value of false is an error state as the value will have
1735 been partially passed to the stack. */
1737 pass_in_v_vfp_candidate (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
1738 struct aarch64_call_info
*info
, struct type
*arg_type
,
1741 switch (arg_type
->code ())
1744 return pass_in_v (gdbarch
, regcache
, info
, TYPE_LENGTH (arg_type
),
1745 value_contents (arg
));
1748 case TYPE_CODE_COMPLEX
:
1750 const bfd_byte
*buf
= value_contents (arg
);
1751 struct type
*target_type
= check_typedef (TYPE_TARGET_TYPE (arg_type
));
1753 if (!pass_in_v (gdbarch
, regcache
, info
, TYPE_LENGTH (target_type
),
1757 return pass_in_v (gdbarch
, regcache
, info
, TYPE_LENGTH (target_type
),
1758 buf
+ TYPE_LENGTH (target_type
));
1761 case TYPE_CODE_ARRAY
:
1762 if (arg_type
->is_vector ())
1763 return pass_in_v (gdbarch
, regcache
, info
, TYPE_LENGTH (arg_type
),
1764 value_contents (arg
));
1767 case TYPE_CODE_STRUCT
:
1768 case TYPE_CODE_UNION
:
1769 for (int i
= 0; i
< arg_type
->num_fields (); i
++)
1771 /* Don't include static fields. */
1772 if (field_is_static (&arg_type
->field (i
)))
1775 struct value
*field
= value_primitive_field (arg
, 0, i
, arg_type
);
1776 struct type
*field_type
= check_typedef (value_type (field
));
1778 if (!pass_in_v_vfp_candidate (gdbarch
, regcache
, info
, field_type
,
1789 /* Implement the "push_dummy_call" gdbarch method. */
1792 aarch64_push_dummy_call (struct gdbarch
*gdbarch
, struct value
*function
,
1793 struct regcache
*regcache
, CORE_ADDR bp_addr
,
1795 struct value
**args
, CORE_ADDR sp
,
1796 function_call_return_method return_method
,
1797 CORE_ADDR struct_addr
)
1800 struct aarch64_call_info info
;
1802 /* We need to know what the type of the called function is in order
1803 to determine the number of named/anonymous arguments for the
1804 actual argument placement, and the return type in order to handle
1805 return value correctly.
1807 The generic code above us views the decision of return in memory
1808 or return in registers as a two stage processes. The language
1809 handler is consulted first and may decide to return in memory (eg
1810 class with copy constructor returned by value), this will cause
1811 the generic code to allocate space AND insert an initial leading
1814 If the language code does not decide to pass in memory then the
1815 target code is consulted.
1817 If the language code decides to pass in memory we want to move
1818 the pointer inserted as the initial argument from the argument
1819 list and into X8, the conventional AArch64 struct return pointer
1822 /* Set the return address. For the AArch64, the return breakpoint
1823 is always at BP_ADDR. */
1824 regcache_cooked_write_unsigned (regcache
, AARCH64_LR_REGNUM
, bp_addr
);
1826 /* If we were given an initial argument for the return slot, lose it. */
1827 if (return_method
== return_method_hidden_param
)
1833 /* The struct_return pointer occupies X8. */
1834 if (return_method
!= return_method_normal
)
1838 debug_printf ("struct return in %s = 0x%s\n",
1839 gdbarch_register_name (gdbarch
,
1840 AARCH64_STRUCT_RETURN_REGNUM
),
1841 paddress (gdbarch
, struct_addr
));
1843 regcache_cooked_write_unsigned (regcache
, AARCH64_STRUCT_RETURN_REGNUM
,
1847 for (argnum
= 0; argnum
< nargs
; argnum
++)
1849 struct value
*arg
= args
[argnum
];
1850 struct type
*arg_type
, *fundamental_type
;
1853 arg_type
= check_typedef (value_type (arg
));
1854 len
= TYPE_LENGTH (arg_type
);
1856 /* If arg can be passed in v registers as per the AAPCS64, then do so if
1857 if there are enough spare registers. */
1858 if (aapcs_is_vfp_call_or_return_candidate (arg_type
, &elements
,
1861 if (info
.nsrn
+ elements
<= 8)
1863 /* We know that we have sufficient registers available therefore
1864 this will never need to fallback to the stack. */
1865 if (!pass_in_v_vfp_candidate (gdbarch
, regcache
, &info
, arg_type
,
1867 gdb_assert_not_reached ("Failed to push args");
1872 pass_on_stack (&info
, arg_type
, arg
);
1877 switch (arg_type
->code ())
1880 case TYPE_CODE_BOOL
:
1881 case TYPE_CODE_CHAR
:
1882 case TYPE_CODE_RANGE
:
1883 case TYPE_CODE_ENUM
:
1886 /* Promote to 32 bit integer. */
1887 if (arg_type
->is_unsigned ())
1888 arg_type
= builtin_type (gdbarch
)->builtin_uint32
;
1890 arg_type
= builtin_type (gdbarch
)->builtin_int32
;
1891 arg
= value_cast (arg_type
, arg
);
1893 pass_in_x_or_stack (gdbarch
, regcache
, &info
, arg_type
, arg
);
1896 case TYPE_CODE_STRUCT
:
1897 case TYPE_CODE_ARRAY
:
1898 case TYPE_CODE_UNION
:
1901 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1902 invisible reference. */
1904 /* Allocate aligned storage. */
1905 sp
= align_down (sp
- len
, 16);
1907 /* Write the real data into the stack. */
1908 write_memory (sp
, value_contents (arg
), len
);
1910 /* Construct the indirection. */
1911 arg_type
= lookup_pointer_type (arg_type
);
1912 arg
= value_from_pointer (arg_type
, sp
);
1913 pass_in_x_or_stack (gdbarch
, regcache
, &info
, arg_type
, arg
);
1916 /* PCS C.15 / C.18 multiple values pass. */
1917 pass_in_x_or_stack (gdbarch
, regcache
, &info
, arg_type
, arg
);
1921 pass_in_x_or_stack (gdbarch
, regcache
, &info
, arg_type
, arg
);
1926 /* Make sure stack retains 16 byte alignment. */
1928 sp
-= 16 - (info
.nsaa
& 15);
1930 while (!info
.si
.empty ())
1932 const stack_item_t
&si
= info
.si
.back ();
1935 if (si
.data
!= NULL
)
1936 write_memory (sp
, si
.data
, si
.len
);
1937 info
.si
.pop_back ();
1940 /* Finally, update the SP register. */
1941 regcache_cooked_write_unsigned (regcache
, AARCH64_SP_REGNUM
, sp
);
1946 /* Implement the "frame_align" gdbarch method. */
1949 aarch64_frame_align (struct gdbarch
*gdbarch
, CORE_ADDR sp
)
1951 /* Align the stack to sixteen bytes. */
1952 return sp
& ~(CORE_ADDR
) 15;
1955 /* Return the type for an AdvSISD Q register. */
1957 static struct type
*
1958 aarch64_vnq_type (struct gdbarch
*gdbarch
)
1960 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1962 if (tdep
->vnq_type
== NULL
)
1967 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnq",
1970 elem
= builtin_type (gdbarch
)->builtin_uint128
;
1971 append_composite_type_field (t
, "u", elem
);
1973 elem
= builtin_type (gdbarch
)->builtin_int128
;
1974 append_composite_type_field (t
, "s", elem
);
1979 return tdep
->vnq_type
;
1982 /* Return the type for an AdvSISD D register. */
1984 static struct type
*
1985 aarch64_vnd_type (struct gdbarch
*gdbarch
)
1987 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1989 if (tdep
->vnd_type
== NULL
)
1994 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnd",
1997 elem
= builtin_type (gdbarch
)->builtin_double
;
1998 append_composite_type_field (t
, "f", elem
);
2000 elem
= builtin_type (gdbarch
)->builtin_uint64
;
2001 append_composite_type_field (t
, "u", elem
);
2003 elem
= builtin_type (gdbarch
)->builtin_int64
;
2004 append_composite_type_field (t
, "s", elem
);
2009 return tdep
->vnd_type
;
2012 /* Return the type for an AdvSISD S register. */
2014 static struct type
*
2015 aarch64_vns_type (struct gdbarch
*gdbarch
)
2017 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2019 if (tdep
->vns_type
== NULL
)
2024 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vns",
2027 elem
= builtin_type (gdbarch
)->builtin_float
;
2028 append_composite_type_field (t
, "f", elem
);
2030 elem
= builtin_type (gdbarch
)->builtin_uint32
;
2031 append_composite_type_field (t
, "u", elem
);
2033 elem
= builtin_type (gdbarch
)->builtin_int32
;
2034 append_composite_type_field (t
, "s", elem
);
2039 return tdep
->vns_type
;
2042 /* Return the type for an AdvSISD H register. */
2044 static struct type
*
2045 aarch64_vnh_type (struct gdbarch
*gdbarch
)
2047 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2049 if (tdep
->vnh_type
== NULL
)
2054 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnh",
2057 elem
= builtin_type (gdbarch
)->builtin_half
;
2058 append_composite_type_field (t
, "f", elem
);
2060 elem
= builtin_type (gdbarch
)->builtin_uint16
;
2061 append_composite_type_field (t
, "u", elem
);
2063 elem
= builtin_type (gdbarch
)->builtin_int16
;
2064 append_composite_type_field (t
, "s", elem
);
2069 return tdep
->vnh_type
;
2072 /* Return the type for an AdvSISD B register. */
2074 static struct type
*
2075 aarch64_vnb_type (struct gdbarch
*gdbarch
)
2077 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2079 if (tdep
->vnb_type
== NULL
)
2084 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnb",
2087 elem
= builtin_type (gdbarch
)->builtin_uint8
;
2088 append_composite_type_field (t
, "u", elem
);
2090 elem
= builtin_type (gdbarch
)->builtin_int8
;
2091 append_composite_type_field (t
, "s", elem
);
2096 return tdep
->vnb_type
;
2099 /* Return the type for an AdvSISD V register. */
2101 static struct type
*
2102 aarch64_vnv_type (struct gdbarch
*gdbarch
)
2104 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2106 if (tdep
->vnv_type
== NULL
)
2108 /* The other AArch64 pseudo registers (Q,D,H,S,B) refer to a single value
2109 slice from the non-pseudo vector registers. However NEON V registers
2110 are always vector registers, and need constructing as such. */
2111 const struct builtin_type
*bt
= builtin_type (gdbarch
);
2113 struct type
*t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnv",
2116 struct type
*sub
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnd",
2118 append_composite_type_field (sub
, "f",
2119 init_vector_type (bt
->builtin_double
, 2));
2120 append_composite_type_field (sub
, "u",
2121 init_vector_type (bt
->builtin_uint64
, 2));
2122 append_composite_type_field (sub
, "s",
2123 init_vector_type (bt
->builtin_int64
, 2));
2124 append_composite_type_field (t
, "d", sub
);
2126 sub
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vns",
2128 append_composite_type_field (sub
, "f",
2129 init_vector_type (bt
->builtin_float
, 4));
2130 append_composite_type_field (sub
, "u",
2131 init_vector_type (bt
->builtin_uint32
, 4));
2132 append_composite_type_field (sub
, "s",
2133 init_vector_type (bt
->builtin_int32
, 4));
2134 append_composite_type_field (t
, "s", sub
);
2136 sub
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnh",
2138 append_composite_type_field (sub
, "f",
2139 init_vector_type (bt
->builtin_half
, 8));
2140 append_composite_type_field (sub
, "u",
2141 init_vector_type (bt
->builtin_uint16
, 8));
2142 append_composite_type_field (sub
, "s",
2143 init_vector_type (bt
->builtin_int16
, 8));
2144 append_composite_type_field (t
, "h", sub
);
2146 sub
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnb",
2148 append_composite_type_field (sub
, "u",
2149 init_vector_type (bt
->builtin_uint8
, 16));
2150 append_composite_type_field (sub
, "s",
2151 init_vector_type (bt
->builtin_int8
, 16));
2152 append_composite_type_field (t
, "b", sub
);
2154 sub
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnq",
2156 append_composite_type_field (sub
, "u",
2157 init_vector_type (bt
->builtin_uint128
, 1));
2158 append_composite_type_field (sub
, "s",
2159 init_vector_type (bt
->builtin_int128
, 1));
2160 append_composite_type_field (t
, "q", sub
);
2165 return tdep
->vnv_type
;
2168 /* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
2171 aarch64_dwarf_reg_to_regnum (struct gdbarch
*gdbarch
, int reg
)
2173 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2175 if (reg
>= AARCH64_DWARF_X0
&& reg
<= AARCH64_DWARF_X0
+ 30)
2176 return AARCH64_X0_REGNUM
+ reg
- AARCH64_DWARF_X0
;
2178 if (reg
== AARCH64_DWARF_SP
)
2179 return AARCH64_SP_REGNUM
;
2181 if (reg
>= AARCH64_DWARF_V0
&& reg
<= AARCH64_DWARF_V0
+ 31)
2182 return AARCH64_V0_REGNUM
+ reg
- AARCH64_DWARF_V0
;
2184 if (reg
== AARCH64_DWARF_SVE_VG
)
2185 return AARCH64_SVE_VG_REGNUM
;
2187 if (reg
== AARCH64_DWARF_SVE_FFR
)
2188 return AARCH64_SVE_FFR_REGNUM
;
2190 if (reg
>= AARCH64_DWARF_SVE_P0
&& reg
<= AARCH64_DWARF_SVE_P0
+ 15)
2191 return AARCH64_SVE_P0_REGNUM
+ reg
- AARCH64_DWARF_SVE_P0
;
2193 if (reg
>= AARCH64_DWARF_SVE_Z0
&& reg
<= AARCH64_DWARF_SVE_Z0
+ 15)
2194 return AARCH64_SVE_Z0_REGNUM
+ reg
- AARCH64_DWARF_SVE_Z0
;
2196 if (tdep
->has_pauth ())
2198 if (reg
>= AARCH64_DWARF_PAUTH_DMASK
&& reg
<= AARCH64_DWARF_PAUTH_CMASK
)
2199 return tdep
->pauth_reg_base
+ reg
- AARCH64_DWARF_PAUTH_DMASK
;
2201 if (reg
== AARCH64_DWARF_PAUTH_RA_STATE
)
2202 return tdep
->pauth_ra_state_regnum
;
2208 /* Implement the "print_insn" gdbarch method. */
2211 aarch64_gdb_print_insn (bfd_vma memaddr
, disassemble_info
*info
)
2213 info
->symbols
= NULL
;
2214 return default_print_insn (memaddr
, info
);
2217 /* AArch64 BRK software debug mode instruction.
2218 Note that AArch64 code is always little-endian.
2219 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
2220 constexpr gdb_byte aarch64_default_breakpoint
[] = {0x00, 0x00, 0x20, 0xd4};
2222 typedef BP_MANIPULATION (aarch64_default_breakpoint
) aarch64_breakpoint
;
2224 /* Extract from an array REGS containing the (raw) register state a
2225 function return value of type TYPE, and copy that, in virtual
2226 format, into VALBUF. */
2229 aarch64_extract_return_value (struct type
*type
, struct regcache
*regs
,
2232 struct gdbarch
*gdbarch
= regs
->arch ();
2233 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
2235 struct type
*fundamental_type
;
2237 if (aapcs_is_vfp_call_or_return_candidate (type
, &elements
,
2240 int len
= TYPE_LENGTH (fundamental_type
);
2242 for (int i
= 0; i
< elements
; i
++)
2244 int regno
= AARCH64_V0_REGNUM
+ i
;
2245 /* Enough space for a full vector register. */
2246 gdb_byte buf
[register_size (gdbarch
, regno
)];
2247 gdb_assert (len
<= sizeof (buf
));
2251 debug_printf ("read HFA or HVA return value element %d from %s\n",
2253 gdbarch_register_name (gdbarch
, regno
));
2255 regs
->cooked_read (regno
, buf
);
2257 memcpy (valbuf
, buf
, len
);
2261 else if (type
->code () == TYPE_CODE_INT
2262 || type
->code () == TYPE_CODE_CHAR
2263 || type
->code () == TYPE_CODE_BOOL
2264 || type
->code () == TYPE_CODE_PTR
2265 || TYPE_IS_REFERENCE (type
)
2266 || type
->code () == TYPE_CODE_ENUM
)
2268 /* If the type is a plain integer, then the access is
2269 straight-forward. Otherwise we have to play around a bit
2271 int len
= TYPE_LENGTH (type
);
2272 int regno
= AARCH64_X0_REGNUM
;
2277 /* By using store_unsigned_integer we avoid having to do
2278 anything special for small big-endian values. */
2279 regcache_cooked_read_unsigned (regs
, regno
++, &tmp
);
2280 store_unsigned_integer (valbuf
,
2281 (len
> X_REGISTER_SIZE
2282 ? X_REGISTER_SIZE
: len
), byte_order
, tmp
);
2283 len
-= X_REGISTER_SIZE
;
2284 valbuf
+= X_REGISTER_SIZE
;
2289 /* For a structure or union the behaviour is as if the value had
2290 been stored to word-aligned memory and then loaded into
2291 registers with 64-bit load instruction(s). */
2292 int len
= TYPE_LENGTH (type
);
2293 int regno
= AARCH64_X0_REGNUM
;
2294 bfd_byte buf
[X_REGISTER_SIZE
];
2298 regs
->cooked_read (regno
++, buf
);
2299 memcpy (valbuf
, buf
, len
> X_REGISTER_SIZE
? X_REGISTER_SIZE
: len
);
2300 len
-= X_REGISTER_SIZE
;
2301 valbuf
+= X_REGISTER_SIZE
;
2307 /* Will a function return an aggregate type in memory or in a
2308 register? Return 0 if an aggregate type can be returned in a
2309 register, 1 if it must be returned in memory. */
2312 aarch64_return_in_memory (struct gdbarch
*gdbarch
, struct type
*type
)
2314 type
= check_typedef (type
);
2316 struct type
*fundamental_type
;
2318 if (aapcs_is_vfp_call_or_return_candidate (type
, &elements
,
2321 /* v0-v7 are used to return values and one register is allocated
2322 for one member. However, HFA or HVA has at most four members. */
2326 if (TYPE_LENGTH (type
) > 16)
2328 /* PCS B.6 Aggregates larger than 16 bytes are passed by
2329 invisible reference. */
2337 /* Write into appropriate registers a function return value of type
2338 TYPE, given in virtual format. */
2341 aarch64_store_return_value (struct type
*type
, struct regcache
*regs
,
2342 const gdb_byte
*valbuf
)
2344 struct gdbarch
*gdbarch
= regs
->arch ();
2345 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
2347 struct type
*fundamental_type
;
2349 if (aapcs_is_vfp_call_or_return_candidate (type
, &elements
,
2352 int len
= TYPE_LENGTH (fundamental_type
);
2354 for (int i
= 0; i
< elements
; i
++)
2356 int regno
= AARCH64_V0_REGNUM
+ i
;
2357 /* Enough space for a full vector register. */
2358 gdb_byte tmpbuf
[register_size (gdbarch
, regno
)];
2359 gdb_assert (len
<= sizeof (tmpbuf
));
2363 debug_printf ("write HFA or HVA return value element %d to %s\n",
2365 gdbarch_register_name (gdbarch
, regno
));
2368 memcpy (tmpbuf
, valbuf
,
2369 len
> V_REGISTER_SIZE
? V_REGISTER_SIZE
: len
);
2370 regs
->cooked_write (regno
, tmpbuf
);
2374 else if (type
->code () == TYPE_CODE_INT
2375 || type
->code () == TYPE_CODE_CHAR
2376 || type
->code () == TYPE_CODE_BOOL
2377 || type
->code () == TYPE_CODE_PTR
2378 || TYPE_IS_REFERENCE (type
)
2379 || type
->code () == TYPE_CODE_ENUM
)
2381 if (TYPE_LENGTH (type
) <= X_REGISTER_SIZE
)
2383 /* Values of one word or less are zero/sign-extended and
2385 bfd_byte tmpbuf
[X_REGISTER_SIZE
];
2386 LONGEST val
= unpack_long (type
, valbuf
);
2388 store_signed_integer (tmpbuf
, X_REGISTER_SIZE
, byte_order
, val
);
2389 regs
->cooked_write (AARCH64_X0_REGNUM
, tmpbuf
);
2393 /* Integral values greater than one word are stored in
2394 consecutive registers starting with r0. This will always
2395 be a multiple of the regiser size. */
2396 int len
= TYPE_LENGTH (type
);
2397 int regno
= AARCH64_X0_REGNUM
;
2401 regs
->cooked_write (regno
++, valbuf
);
2402 len
-= X_REGISTER_SIZE
;
2403 valbuf
+= X_REGISTER_SIZE
;
2409 /* For a structure or union the behaviour is as if the value had
2410 been stored to word-aligned memory and then loaded into
2411 registers with 64-bit load instruction(s). */
2412 int len
= TYPE_LENGTH (type
);
2413 int regno
= AARCH64_X0_REGNUM
;
2414 bfd_byte tmpbuf
[X_REGISTER_SIZE
];
2418 memcpy (tmpbuf
, valbuf
,
2419 len
> X_REGISTER_SIZE
? X_REGISTER_SIZE
: len
);
2420 regs
->cooked_write (regno
++, tmpbuf
);
2421 len
-= X_REGISTER_SIZE
;
2422 valbuf
+= X_REGISTER_SIZE
;
2427 /* Implement the "return_value" gdbarch method. */
2429 static enum return_value_convention
2430 aarch64_return_value (struct gdbarch
*gdbarch
, struct value
*func_value
,
2431 struct type
*valtype
, struct regcache
*regcache
,
2432 gdb_byte
*readbuf
, const gdb_byte
*writebuf
)
2435 if (valtype
->code () == TYPE_CODE_STRUCT
2436 || valtype
->code () == TYPE_CODE_UNION
2437 || valtype
->code () == TYPE_CODE_ARRAY
)
2439 if (aarch64_return_in_memory (gdbarch
, valtype
))
2442 debug_printf ("return value in memory\n");
2443 return RETURN_VALUE_STRUCT_CONVENTION
;
2448 aarch64_store_return_value (valtype
, regcache
, writebuf
);
2451 aarch64_extract_return_value (valtype
, regcache
, readbuf
);
2454 debug_printf ("return value in registers\n");
2456 return RETURN_VALUE_REGISTER_CONVENTION
;
2459 /* Implement the "get_longjmp_target" gdbarch method. */
2462 aarch64_get_longjmp_target (struct frame_info
*frame
, CORE_ADDR
*pc
)
2465 gdb_byte buf
[X_REGISTER_SIZE
];
2466 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
2467 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2468 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
2470 jb_addr
= get_frame_register_unsigned (frame
, AARCH64_X0_REGNUM
);
2472 if (target_read_memory (jb_addr
+ tdep
->jb_pc
* tdep
->jb_elt_size
, buf
,
2476 *pc
= extract_unsigned_integer (buf
, X_REGISTER_SIZE
, byte_order
);
2480 /* Implement the "gen_return_address" gdbarch method. */
2483 aarch64_gen_return_address (struct gdbarch
*gdbarch
,
2484 struct agent_expr
*ax
, struct axs_value
*value
,
2487 value
->type
= register_type (gdbarch
, AARCH64_LR_REGNUM
);
2488 value
->kind
= axs_lvalue_register
;
2489 value
->u
.reg
= AARCH64_LR_REGNUM
;
2493 /* Return the pseudo register name corresponding to register regnum. */
2496 aarch64_pseudo_register_name (struct gdbarch
*gdbarch
, int regnum
)
2498 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2500 static const char *const q_name
[] =
2502 "q0", "q1", "q2", "q3",
2503 "q4", "q5", "q6", "q7",
2504 "q8", "q9", "q10", "q11",
2505 "q12", "q13", "q14", "q15",
2506 "q16", "q17", "q18", "q19",
2507 "q20", "q21", "q22", "q23",
2508 "q24", "q25", "q26", "q27",
2509 "q28", "q29", "q30", "q31",
2512 static const char *const d_name
[] =
2514 "d0", "d1", "d2", "d3",
2515 "d4", "d5", "d6", "d7",
2516 "d8", "d9", "d10", "d11",
2517 "d12", "d13", "d14", "d15",
2518 "d16", "d17", "d18", "d19",
2519 "d20", "d21", "d22", "d23",
2520 "d24", "d25", "d26", "d27",
2521 "d28", "d29", "d30", "d31",
2524 static const char *const s_name
[] =
2526 "s0", "s1", "s2", "s3",
2527 "s4", "s5", "s6", "s7",
2528 "s8", "s9", "s10", "s11",
2529 "s12", "s13", "s14", "s15",
2530 "s16", "s17", "s18", "s19",
2531 "s20", "s21", "s22", "s23",
2532 "s24", "s25", "s26", "s27",
2533 "s28", "s29", "s30", "s31",
2536 static const char *const h_name
[] =
2538 "h0", "h1", "h2", "h3",
2539 "h4", "h5", "h6", "h7",
2540 "h8", "h9", "h10", "h11",
2541 "h12", "h13", "h14", "h15",
2542 "h16", "h17", "h18", "h19",
2543 "h20", "h21", "h22", "h23",
2544 "h24", "h25", "h26", "h27",
2545 "h28", "h29", "h30", "h31",
2548 static const char *const b_name
[] =
2550 "b0", "b1", "b2", "b3",
2551 "b4", "b5", "b6", "b7",
2552 "b8", "b9", "b10", "b11",
2553 "b12", "b13", "b14", "b15",
2554 "b16", "b17", "b18", "b19",
2555 "b20", "b21", "b22", "b23",
2556 "b24", "b25", "b26", "b27",
2557 "b28", "b29", "b30", "b31",
2560 int p_regnum
= regnum
- gdbarch_num_regs (gdbarch
);
2562 if (p_regnum
>= AARCH64_Q0_REGNUM
&& p_regnum
< AARCH64_Q0_REGNUM
+ 32)
2563 return q_name
[p_regnum
- AARCH64_Q0_REGNUM
];
2565 if (p_regnum
>= AARCH64_D0_REGNUM
&& p_regnum
< AARCH64_D0_REGNUM
+ 32)
2566 return d_name
[p_regnum
- AARCH64_D0_REGNUM
];
2568 if (p_regnum
>= AARCH64_S0_REGNUM
&& p_regnum
< AARCH64_S0_REGNUM
+ 32)
2569 return s_name
[p_regnum
- AARCH64_S0_REGNUM
];
2571 if (p_regnum
>= AARCH64_H0_REGNUM
&& p_regnum
< AARCH64_H0_REGNUM
+ 32)
2572 return h_name
[p_regnum
- AARCH64_H0_REGNUM
];
2574 if (p_regnum
>= AARCH64_B0_REGNUM
&& p_regnum
< AARCH64_B0_REGNUM
+ 32)
2575 return b_name
[p_regnum
- AARCH64_B0_REGNUM
];
2577 if (tdep
->has_sve ())
2579 static const char *const sve_v_name
[] =
2581 "v0", "v1", "v2", "v3",
2582 "v4", "v5", "v6", "v7",
2583 "v8", "v9", "v10", "v11",
2584 "v12", "v13", "v14", "v15",
2585 "v16", "v17", "v18", "v19",
2586 "v20", "v21", "v22", "v23",
2587 "v24", "v25", "v26", "v27",
2588 "v28", "v29", "v30", "v31",
2591 if (p_regnum
>= AARCH64_SVE_V0_REGNUM
2592 && p_regnum
< AARCH64_SVE_V0_REGNUM
+ AARCH64_V_REGS_NUM
)
2593 return sve_v_name
[p_regnum
- AARCH64_SVE_V0_REGNUM
];
2596 /* RA_STATE is used for unwinding only. Do not assign it a name - this
2597 prevents it from being read by methods such as
2598 mi_cmd_trace_frame_collected. */
2599 if (tdep
->has_pauth () && regnum
== tdep
->pauth_ra_state_regnum
)
2602 internal_error (__FILE__
, __LINE__
,
2603 _("aarch64_pseudo_register_name: bad register number %d"),
2607 /* Implement the "pseudo_register_type" tdesc_arch_data method. */
2609 static struct type
*
2610 aarch64_pseudo_register_type (struct gdbarch
*gdbarch
, int regnum
)
2612 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2614 int p_regnum
= regnum
- gdbarch_num_regs (gdbarch
);
2616 if (p_regnum
>= AARCH64_Q0_REGNUM
&& p_regnum
< AARCH64_Q0_REGNUM
+ 32)
2617 return aarch64_vnq_type (gdbarch
);
2619 if (p_regnum
>= AARCH64_D0_REGNUM
&& p_regnum
< AARCH64_D0_REGNUM
+ 32)
2620 return aarch64_vnd_type (gdbarch
);
2622 if (p_regnum
>= AARCH64_S0_REGNUM
&& p_regnum
< AARCH64_S0_REGNUM
+ 32)
2623 return aarch64_vns_type (gdbarch
);
2625 if (p_regnum
>= AARCH64_H0_REGNUM
&& p_regnum
< AARCH64_H0_REGNUM
+ 32)
2626 return aarch64_vnh_type (gdbarch
);
2628 if (p_regnum
>= AARCH64_B0_REGNUM
&& p_regnum
< AARCH64_B0_REGNUM
+ 32)
2629 return aarch64_vnb_type (gdbarch
);
2631 if (tdep
->has_sve () && p_regnum
>= AARCH64_SVE_V0_REGNUM
2632 && p_regnum
< AARCH64_SVE_V0_REGNUM
+ AARCH64_V_REGS_NUM
)
2633 return aarch64_vnv_type (gdbarch
);
2635 if (tdep
->has_pauth () && regnum
== tdep
->pauth_ra_state_regnum
)
2636 return builtin_type (gdbarch
)->builtin_uint64
;
2638 internal_error (__FILE__
, __LINE__
,
2639 _("aarch64_pseudo_register_type: bad register number %d"),
2643 /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2646 aarch64_pseudo_register_reggroup_p (struct gdbarch
*gdbarch
, int regnum
,
2647 struct reggroup
*group
)
2649 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2651 int p_regnum
= regnum
- gdbarch_num_regs (gdbarch
);
2653 if (p_regnum
>= AARCH64_Q0_REGNUM
&& p_regnum
< AARCH64_Q0_REGNUM
+ 32)
2654 return group
== all_reggroup
|| group
== vector_reggroup
;
2655 else if (p_regnum
>= AARCH64_D0_REGNUM
&& p_regnum
< AARCH64_D0_REGNUM
+ 32)
2656 return (group
== all_reggroup
|| group
== vector_reggroup
2657 || group
== float_reggroup
);
2658 else if (p_regnum
>= AARCH64_S0_REGNUM
&& p_regnum
< AARCH64_S0_REGNUM
+ 32)
2659 return (group
== all_reggroup
|| group
== vector_reggroup
2660 || group
== float_reggroup
);
2661 else if (p_regnum
>= AARCH64_H0_REGNUM
&& p_regnum
< AARCH64_H0_REGNUM
+ 32)
2662 return group
== all_reggroup
|| group
== vector_reggroup
;
2663 else if (p_regnum
>= AARCH64_B0_REGNUM
&& p_regnum
< AARCH64_B0_REGNUM
+ 32)
2664 return group
== all_reggroup
|| group
== vector_reggroup
;
2665 else if (tdep
->has_sve () && p_regnum
>= AARCH64_SVE_V0_REGNUM
2666 && p_regnum
< AARCH64_SVE_V0_REGNUM
+ AARCH64_V_REGS_NUM
)
2667 return group
== all_reggroup
|| group
== vector_reggroup
;
2668 /* RA_STATE is used for unwinding only. Do not assign it to any groups. */
2669 if (tdep
->has_pauth () && regnum
== tdep
->pauth_ra_state_regnum
)
2672 return group
== all_reggroup
;
2675 /* Helper for aarch64_pseudo_read_value. */
2677 static struct value
*
2678 aarch64_pseudo_read_value_1 (struct gdbarch
*gdbarch
,
2679 readable_regcache
*regcache
, int regnum_offset
,
2680 int regsize
, struct value
*result_value
)
2682 unsigned v_regnum
= AARCH64_V0_REGNUM
+ regnum_offset
;
2684 /* Enough space for a full vector register. */
2685 gdb_byte reg_buf
[register_size (gdbarch
, AARCH64_V0_REGNUM
)];
2686 gdb_static_assert (AARCH64_V0_REGNUM
== AARCH64_SVE_Z0_REGNUM
);
2688 if (regcache
->raw_read (v_regnum
, reg_buf
) != REG_VALID
)
2689 mark_value_bytes_unavailable (result_value
, 0,
2690 TYPE_LENGTH (value_type (result_value
)));
2692 memcpy (value_contents_raw (result_value
), reg_buf
, regsize
);
2694 return result_value
;
2697 /* Implement the "pseudo_register_read_value" gdbarch method. */
2699 static struct value
*
2700 aarch64_pseudo_read_value (struct gdbarch
*gdbarch
, readable_regcache
*regcache
,
2703 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2704 struct value
*result_value
= allocate_value (register_type (gdbarch
, regnum
));
2706 VALUE_LVAL (result_value
) = lval_register
;
2707 VALUE_REGNUM (result_value
) = regnum
;
2709 regnum
-= gdbarch_num_regs (gdbarch
);
2711 if (regnum
>= AARCH64_Q0_REGNUM
&& regnum
< AARCH64_Q0_REGNUM
+ 32)
2712 return aarch64_pseudo_read_value_1 (gdbarch
, regcache
,
2713 regnum
- AARCH64_Q0_REGNUM
,
2714 Q_REGISTER_SIZE
, result_value
);
2716 if (regnum
>= AARCH64_D0_REGNUM
&& regnum
< AARCH64_D0_REGNUM
+ 32)
2717 return aarch64_pseudo_read_value_1 (gdbarch
, regcache
,
2718 regnum
- AARCH64_D0_REGNUM
,
2719 D_REGISTER_SIZE
, result_value
);
2721 if (regnum
>= AARCH64_S0_REGNUM
&& regnum
< AARCH64_S0_REGNUM
+ 32)
2722 return aarch64_pseudo_read_value_1 (gdbarch
, regcache
,
2723 regnum
- AARCH64_S0_REGNUM
,
2724 S_REGISTER_SIZE
, result_value
);
2726 if (regnum
>= AARCH64_H0_REGNUM
&& regnum
< AARCH64_H0_REGNUM
+ 32)
2727 return aarch64_pseudo_read_value_1 (gdbarch
, regcache
,
2728 regnum
- AARCH64_H0_REGNUM
,
2729 H_REGISTER_SIZE
, result_value
);
2731 if (regnum
>= AARCH64_B0_REGNUM
&& regnum
< AARCH64_B0_REGNUM
+ 32)
2732 return aarch64_pseudo_read_value_1 (gdbarch
, regcache
,
2733 regnum
- AARCH64_B0_REGNUM
,
2734 B_REGISTER_SIZE
, result_value
);
2736 if (tdep
->has_sve () && regnum
>= AARCH64_SVE_V0_REGNUM
2737 && regnum
< AARCH64_SVE_V0_REGNUM
+ 32)
2738 return aarch64_pseudo_read_value_1 (gdbarch
, regcache
,
2739 regnum
- AARCH64_SVE_V0_REGNUM
,
2740 V_REGISTER_SIZE
, result_value
);
2742 gdb_assert_not_reached ("regnum out of bound");
2745 /* Helper for aarch64_pseudo_write. */
2748 aarch64_pseudo_write_1 (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
2749 int regnum_offset
, int regsize
, const gdb_byte
*buf
)
2751 unsigned v_regnum
= AARCH64_V0_REGNUM
+ regnum_offset
;
2753 /* Enough space for a full vector register. */
2754 gdb_byte reg_buf
[register_size (gdbarch
, AARCH64_V0_REGNUM
)];
2755 gdb_static_assert (AARCH64_V0_REGNUM
== AARCH64_SVE_Z0_REGNUM
);
2757 /* Ensure the register buffer is zero, we want gdb writes of the
2758 various 'scalar' pseudo registers to behavior like architectural
2759 writes, register width bytes are written the remainder are set to
2761 memset (reg_buf
, 0, register_size (gdbarch
, AARCH64_V0_REGNUM
));
2763 memcpy (reg_buf
, buf
, regsize
);
2764 regcache
->raw_write (v_regnum
, reg_buf
);
2767 /* Implement the "pseudo_register_write" gdbarch method. */
2770 aarch64_pseudo_write (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
2771 int regnum
, const gdb_byte
*buf
)
2773 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2774 regnum
-= gdbarch_num_regs (gdbarch
);
2776 if (regnum
>= AARCH64_Q0_REGNUM
&& regnum
< AARCH64_Q0_REGNUM
+ 32)
2777 return aarch64_pseudo_write_1 (gdbarch
, regcache
,
2778 regnum
- AARCH64_Q0_REGNUM
, Q_REGISTER_SIZE
,
2781 if (regnum
>= AARCH64_D0_REGNUM
&& regnum
< AARCH64_D0_REGNUM
+ 32)
2782 return aarch64_pseudo_write_1 (gdbarch
, regcache
,
2783 regnum
- AARCH64_D0_REGNUM
, D_REGISTER_SIZE
,
2786 if (regnum
>= AARCH64_S0_REGNUM
&& regnum
< AARCH64_S0_REGNUM
+ 32)
2787 return aarch64_pseudo_write_1 (gdbarch
, regcache
,
2788 regnum
- AARCH64_S0_REGNUM
, S_REGISTER_SIZE
,
2791 if (regnum
>= AARCH64_H0_REGNUM
&& regnum
< AARCH64_H0_REGNUM
+ 32)
2792 return aarch64_pseudo_write_1 (gdbarch
, regcache
,
2793 regnum
- AARCH64_H0_REGNUM
, H_REGISTER_SIZE
,
2796 if (regnum
>= AARCH64_B0_REGNUM
&& regnum
< AARCH64_B0_REGNUM
+ 32)
2797 return aarch64_pseudo_write_1 (gdbarch
, regcache
,
2798 regnum
- AARCH64_B0_REGNUM
, B_REGISTER_SIZE
,
2801 if (tdep
->has_sve () && regnum
>= AARCH64_SVE_V0_REGNUM
2802 && regnum
< AARCH64_SVE_V0_REGNUM
+ 32)
2803 return aarch64_pseudo_write_1 (gdbarch
, regcache
,
2804 regnum
- AARCH64_SVE_V0_REGNUM
,
2805 V_REGISTER_SIZE
, buf
);
2807 gdb_assert_not_reached ("regnum out of bound");
2810 /* Callback function for user_reg_add. */
2812 static struct value
*
2813 value_of_aarch64_user_reg (struct frame_info
*frame
, const void *baton
)
2815 const int *reg_p
= (const int *) baton
;
2817 return value_of_register (*reg_p
, frame
);
2821 /* Implement the "software_single_step" gdbarch method, needed to
2822 single step through atomic sequences on AArch64. */
2824 static std::vector
<CORE_ADDR
>
2825 aarch64_software_single_step (struct regcache
*regcache
)
2827 struct gdbarch
*gdbarch
= regcache
->arch ();
2828 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
2829 const int insn_size
= 4;
2830 const int atomic_sequence_length
= 16; /* Instruction sequence length. */
2831 CORE_ADDR pc
= regcache_read_pc (regcache
);
2832 CORE_ADDR breaks
[2] = { CORE_ADDR_MAX
, CORE_ADDR_MAX
};
2834 CORE_ADDR closing_insn
= 0;
2835 uint32_t insn
= read_memory_unsigned_integer (loc
, insn_size
,
2836 byte_order_for_code
);
2839 int bc_insn_count
= 0; /* Conditional branch instruction count. */
2840 int last_breakpoint
= 0; /* Defaults to 0 (no breakpoints placed). */
2843 if (aarch64_decode_insn (insn
, &inst
, 1, NULL
) != 0)
2846 /* Look for a Load Exclusive instruction which begins the sequence. */
2847 if (inst
.opcode
->iclass
!= ldstexcl
|| bit (insn
, 22) == 0)
2850 for (insn_count
= 0; insn_count
< atomic_sequence_length
; ++insn_count
)
2853 insn
= read_memory_unsigned_integer (loc
, insn_size
,
2854 byte_order_for_code
);
2856 if (aarch64_decode_insn (insn
, &inst
, 1, NULL
) != 0)
2858 /* Check if the instruction is a conditional branch. */
2859 if (inst
.opcode
->iclass
== condbranch
)
2861 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_ADDR_PCREL19
);
2863 if (bc_insn_count
>= 1)
2866 /* It is, so we'll try to set a breakpoint at the destination. */
2867 breaks
[1] = loc
+ inst
.operands
[0].imm
.value
;
2873 /* Look for the Store Exclusive which closes the atomic sequence. */
2874 if (inst
.opcode
->iclass
== ldstexcl
&& bit (insn
, 22) == 0)
2881 /* We didn't find a closing Store Exclusive instruction, fall back. */
2885 /* Insert breakpoint after the end of the atomic sequence. */
2886 breaks
[0] = loc
+ insn_size
;
2888 /* Check for duplicated breakpoints, and also check that the second
2889 breakpoint is not within the atomic sequence. */
2891 && (breaks
[1] == breaks
[0]
2892 || (breaks
[1] >= pc
&& breaks
[1] <= closing_insn
)))
2893 last_breakpoint
= 0;
2895 std::vector
<CORE_ADDR
> next_pcs
;
2897 /* Insert the breakpoint at the end of the sequence, and one at the
2898 destination of the conditional branch, if it exists. */
2899 for (index
= 0; index
<= last_breakpoint
; index
++)
2900 next_pcs
.push_back (breaks
[index
]);
2905 struct aarch64_displaced_step_copy_insn_closure
2906 : public displaced_step_copy_insn_closure
2908 /* It is true when condition instruction, such as B.CON, TBZ, etc,
2909 is being displaced stepping. */
2912 /* PC adjustment offset after displaced stepping. If 0, then we don't
2913 write the PC back, assuming the PC is already the right address. */
2914 int32_t pc_adjust
= 0;
2917 /* Data when visiting instructions for displaced stepping. */
2919 struct aarch64_displaced_step_data
2921 struct aarch64_insn_data base
;
2923 /* The address where the instruction will be executed at. */
2925 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
2926 uint32_t insn_buf
[AARCH64_DISPLACED_MODIFIED_INSNS
];
2927 /* Number of instructions in INSN_BUF. */
2928 unsigned insn_count
;
2929 /* Registers when doing displaced stepping. */
2930 struct regcache
*regs
;
2932 aarch64_displaced_step_copy_insn_closure
*dsc
;
2935 /* Implementation of aarch64_insn_visitor method "b". */
2938 aarch64_displaced_step_b (const int is_bl
, const int32_t offset
,
2939 struct aarch64_insn_data
*data
)
2941 struct aarch64_displaced_step_data
*dsd
2942 = (struct aarch64_displaced_step_data
*) data
;
2943 int64_t new_offset
= data
->insn_addr
- dsd
->new_addr
+ offset
;
2945 if (can_encode_int32 (new_offset
, 28))
2947 /* Emit B rather than BL, because executing BL on a new address
2948 will get the wrong address into LR. In order to avoid this,
2949 we emit B, and update LR if the instruction is BL. */
2950 emit_b (dsd
->insn_buf
, 0, new_offset
);
2956 emit_nop (dsd
->insn_buf
);
2958 dsd
->dsc
->pc_adjust
= offset
;
2964 regcache_cooked_write_unsigned (dsd
->regs
, AARCH64_LR_REGNUM
,
2965 data
->insn_addr
+ 4);
2969 /* Implementation of aarch64_insn_visitor method "b_cond". */
2972 aarch64_displaced_step_b_cond (const unsigned cond
, const int32_t offset
,
2973 struct aarch64_insn_data
*data
)
2975 struct aarch64_displaced_step_data
*dsd
2976 = (struct aarch64_displaced_step_data
*) data
;
2978 /* GDB has to fix up PC after displaced step this instruction
2979 differently according to the condition is true or false. Instead
2980 of checking COND against conditional flags, we can use
2981 the following instructions, and GDB can tell how to fix up PC
2982 according to the PC value.
2984 B.COND TAKEN ; If cond is true, then jump to TAKEN.
2990 emit_bcond (dsd
->insn_buf
, cond
, 8);
2991 dsd
->dsc
->cond
= true;
2992 dsd
->dsc
->pc_adjust
= offset
;
2993 dsd
->insn_count
= 1;
2996 /* Dynamically allocate a new register. If we know the register
2997 statically, we should make it a global as above instead of using this
3000 static struct aarch64_register
3001 aarch64_register (unsigned num
, int is64
)
3003 return (struct aarch64_register
) { num
, is64
};
3006 /* Implementation of aarch64_insn_visitor method "cb". */
3009 aarch64_displaced_step_cb (const int32_t offset
, const int is_cbnz
,
3010 const unsigned rn
, int is64
,
3011 struct aarch64_insn_data
*data
)
3013 struct aarch64_displaced_step_data
*dsd
3014 = (struct aarch64_displaced_step_data
*) data
;
3016 /* The offset is out of range for a compare and branch
3017 instruction. We can use the following instructions instead:
3019 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
3024 emit_cb (dsd
->insn_buf
, is_cbnz
, aarch64_register (rn
, is64
), 8);
3025 dsd
->insn_count
= 1;
3026 dsd
->dsc
->cond
= true;
3027 dsd
->dsc
->pc_adjust
= offset
;
3030 /* Implementation of aarch64_insn_visitor method "tb". */
3033 aarch64_displaced_step_tb (const int32_t offset
, int is_tbnz
,
3034 const unsigned rt
, unsigned bit
,
3035 struct aarch64_insn_data
*data
)
3037 struct aarch64_displaced_step_data
*dsd
3038 = (struct aarch64_displaced_step_data
*) data
;
3040 /* The offset is out of range for a test bit and branch
3041 instruction We can use the following instructions instead:
3043 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
3049 emit_tb (dsd
->insn_buf
, is_tbnz
, bit
, aarch64_register (rt
, 1), 8);
3050 dsd
->insn_count
= 1;
3051 dsd
->dsc
->cond
= true;
3052 dsd
->dsc
->pc_adjust
= offset
;
3055 /* Implementation of aarch64_insn_visitor method "adr". */
3058 aarch64_displaced_step_adr (const int32_t offset
, const unsigned rd
,
3059 const int is_adrp
, struct aarch64_insn_data
*data
)
3061 struct aarch64_displaced_step_data
*dsd
3062 = (struct aarch64_displaced_step_data
*) data
;
3063 /* We know exactly the address the ADR{P,} instruction will compute.
3064 We can just write it to the destination register. */
3065 CORE_ADDR address
= data
->insn_addr
+ offset
;
3069 /* Clear the lower 12 bits of the offset to get the 4K page. */
3070 regcache_cooked_write_unsigned (dsd
->regs
, AARCH64_X0_REGNUM
+ rd
,
3074 regcache_cooked_write_unsigned (dsd
->regs
, AARCH64_X0_REGNUM
+ rd
,
3077 dsd
->dsc
->pc_adjust
= 4;
3078 emit_nop (dsd
->insn_buf
);
3079 dsd
->insn_count
= 1;
3082 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
3085 aarch64_displaced_step_ldr_literal (const int32_t offset
, const int is_sw
,
3086 const unsigned rt
, const int is64
,
3087 struct aarch64_insn_data
*data
)
3089 struct aarch64_displaced_step_data
*dsd
3090 = (struct aarch64_displaced_step_data
*) data
;
3091 CORE_ADDR address
= data
->insn_addr
+ offset
;
3092 struct aarch64_memory_operand zero
= { MEMORY_OPERAND_OFFSET
, 0 };
3094 regcache_cooked_write_unsigned (dsd
->regs
, AARCH64_X0_REGNUM
+ rt
,
3098 dsd
->insn_count
= emit_ldrsw (dsd
->insn_buf
, aarch64_register (rt
, 1),
3099 aarch64_register (rt
, 1), zero
);
3101 dsd
->insn_count
= emit_ldr (dsd
->insn_buf
, aarch64_register (rt
, is64
),
3102 aarch64_register (rt
, 1), zero
);
3104 dsd
->dsc
->pc_adjust
= 4;
3107 /* Implementation of aarch64_insn_visitor method "others". */
3110 aarch64_displaced_step_others (const uint32_t insn
,
3111 struct aarch64_insn_data
*data
)
3113 struct aarch64_displaced_step_data
*dsd
3114 = (struct aarch64_displaced_step_data
*) data
;
3116 aarch64_emit_insn (dsd
->insn_buf
, insn
);
3117 dsd
->insn_count
= 1;
3119 if ((insn
& 0xfffffc1f) == 0xd65f0000)
3122 dsd
->dsc
->pc_adjust
= 0;
3125 dsd
->dsc
->pc_adjust
= 4;
3128 static const struct aarch64_insn_visitor visitor
=
3130 aarch64_displaced_step_b
,
3131 aarch64_displaced_step_b_cond
,
3132 aarch64_displaced_step_cb
,
3133 aarch64_displaced_step_tb
,
3134 aarch64_displaced_step_adr
,
3135 aarch64_displaced_step_ldr_literal
,
3136 aarch64_displaced_step_others
,
3139 /* Implement the "displaced_step_copy_insn" gdbarch method. */
3141 displaced_step_copy_insn_closure_up
3142 aarch64_displaced_step_copy_insn (struct gdbarch
*gdbarch
,
3143 CORE_ADDR from
, CORE_ADDR to
,
3144 struct regcache
*regs
)
3146 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
3147 uint32_t insn
= read_memory_unsigned_integer (from
, 4, byte_order_for_code
);
3148 struct aarch64_displaced_step_data dsd
;
3151 if (aarch64_decode_insn (insn
, &inst
, 1, NULL
) != 0)
3154 /* Look for a Load Exclusive instruction which begins the sequence. */
3155 if (inst
.opcode
->iclass
== ldstexcl
&& bit (insn
, 22))
3157 /* We can't displaced step atomic sequences. */
3161 std::unique_ptr
<aarch64_displaced_step_copy_insn_closure
> dsc
3162 (new aarch64_displaced_step_copy_insn_closure
);
3163 dsd
.base
.insn_addr
= from
;
3166 dsd
.dsc
= dsc
.get ();
3168 aarch64_relocate_instruction (insn
, &visitor
,
3169 (struct aarch64_insn_data
*) &dsd
);
3170 gdb_assert (dsd
.insn_count
<= AARCH64_DISPLACED_MODIFIED_INSNS
);
3172 if (dsd
.insn_count
!= 0)
3176 /* Instruction can be relocated to scratch pad. Copy
3177 relocated instruction(s) there. */
3178 for (i
= 0; i
< dsd
.insn_count
; i
++)
3180 displaced_debug_printf ("writing insn %.8x at %s",
3182 paddress (gdbarch
, to
+ i
* 4));
3184 write_memory_unsigned_integer (to
+ i
* 4, 4, byte_order_for_code
,
3185 (ULONGEST
) dsd
.insn_buf
[i
]);
3193 /* This is a work around for a problem with g++ 4.8. */
3194 return displaced_step_copy_insn_closure_up (dsc
.release ());
3197 /* Implement the "displaced_step_fixup" gdbarch method. */
3200 aarch64_displaced_step_fixup (struct gdbarch
*gdbarch
,
3201 struct displaced_step_copy_insn_closure
*dsc_
,
3202 CORE_ADDR from
, CORE_ADDR to
,
3203 struct regcache
*regs
)
3205 aarch64_displaced_step_copy_insn_closure
*dsc
3206 = (aarch64_displaced_step_copy_insn_closure
*) dsc_
;
3210 regcache_cooked_read_unsigned (regs
, AARCH64_PC_REGNUM
, &pc
);
3212 displaced_debug_printf ("PC after stepping: %s (was %s).",
3213 paddress (gdbarch
, pc
), paddress (gdbarch
, to
));
3217 displaced_debug_printf ("[Conditional] pc_adjust before: %d",
3222 /* Condition is true. */
3224 else if (pc
- to
== 4)
3226 /* Condition is false. */
3230 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
3232 displaced_debug_printf ("[Conditional] pc_adjust after: %d",
3236 displaced_debug_printf ("%s PC by %d",
3237 dsc
->pc_adjust
? "adjusting" : "not adjusting",
3240 if (dsc
->pc_adjust
!= 0)
3242 /* Make sure the previous instruction was executed (that is, the PC
3243 has changed). If the PC didn't change, then discard the adjustment
3244 offset. Otherwise we may skip an instruction before its execution
3248 displaced_debug_printf ("PC did not move. Discarding PC adjustment.");
3252 displaced_debug_printf ("fixup: set PC to %s:%d",
3253 paddress (gdbarch
, from
), dsc
->pc_adjust
);
3255 regcache_cooked_write_unsigned (regs
, AARCH64_PC_REGNUM
,
3256 from
+ dsc
->pc_adjust
);
3260 /* Implement the "displaced_step_hw_singlestep" gdbarch method. */
3263 aarch64_displaced_step_hw_singlestep (struct gdbarch
*gdbarch
)
3268 /* Get the correct target description for the given VQ value.
3269 If VQ is zero then it is assumed SVE is not supported.
3270 (It is not possible to set VQ to zero on an SVE system). */
3273 aarch64_read_description (uint64_t vq
, bool pauth_p
)
3275 if (vq
> AARCH64_MAX_SVE_VQ
)
3276 error (_("VQ is %" PRIu64
", maximum supported value is %d"), vq
,
3277 AARCH64_MAX_SVE_VQ
);
3279 struct target_desc
*tdesc
= tdesc_aarch64_list
[vq
][pauth_p
];
3283 tdesc
= aarch64_create_target_description (vq
, pauth_p
);
3284 tdesc_aarch64_list
[vq
][pauth_p
] = tdesc
;
3290 /* Return the VQ used when creating the target description TDESC. */
3293 aarch64_get_tdesc_vq (const struct target_desc
*tdesc
)
3295 const struct tdesc_feature
*feature_sve
;
3297 if (!tdesc_has_registers (tdesc
))
3300 feature_sve
= tdesc_find_feature (tdesc
, "org.gnu.gdb.aarch64.sve");
3302 if (feature_sve
== nullptr)
3305 uint64_t vl
= tdesc_register_bitsize (feature_sve
,
3306 aarch64_sve_register_names
[0]) / 8;
3307 return sve_vq_from_vl (vl
);
3310 /* Add all the expected register sets into GDBARCH. */
3313 aarch64_add_reggroups (struct gdbarch
*gdbarch
)
3315 reggroup_add (gdbarch
, general_reggroup
);
3316 reggroup_add (gdbarch
, float_reggroup
);
3317 reggroup_add (gdbarch
, system_reggroup
);
3318 reggroup_add (gdbarch
, vector_reggroup
);
3319 reggroup_add (gdbarch
, all_reggroup
);
3320 reggroup_add (gdbarch
, save_reggroup
);
3321 reggroup_add (gdbarch
, restore_reggroup
);
3324 /* Implement the "cannot_store_register" gdbarch method. */
3327 aarch64_cannot_store_register (struct gdbarch
*gdbarch
, int regnum
)
3329 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
3331 if (!tdep
->has_pauth ())
3334 /* Pointer authentication registers are read-only. */
3335 return (regnum
== AARCH64_PAUTH_DMASK_REGNUM (tdep
->pauth_reg_base
)
3336 || regnum
== AARCH64_PAUTH_CMASK_REGNUM (tdep
->pauth_reg_base
));
3339 /* Initialize the current architecture based on INFO. If possible,
3340 re-use an architecture from ARCHES, which is a list of
3341 architectures already created during this debugging session.
3343 Called e.g. at program startup, when reading a core file, and when
3344 reading a binary file. */
3346 static struct gdbarch
*
3347 aarch64_gdbarch_init (struct gdbarch_info info
, struct gdbarch_list
*arches
)
3349 const struct tdesc_feature
*feature_core
, *feature_fpu
, *feature_sve
;
3350 const struct tdesc_feature
*feature_pauth
;
3351 bool valid_p
= true;
3352 int i
, num_regs
= 0, num_pseudo_regs
= 0;
3353 int first_pauth_regnum
= -1, pauth_ra_state_offset
= -1;
3355 /* Use the vector length passed via the target info. Here -1 is used for no
3356 SVE, and 0 is unset. If unset then use the vector length from the existing
3359 if (info
.id
== (int *) -1)
3361 else if (info
.id
!= 0)
3362 vq
= (uint64_t) info
.id
;
3364 vq
= aarch64_get_tdesc_vq (info
.target_desc
);
3366 if (vq
> AARCH64_MAX_SVE_VQ
)
3367 internal_error (__FILE__
, __LINE__
, _("VQ out of bounds: %s (max %d)"),
3368 pulongest (vq
), AARCH64_MAX_SVE_VQ
);
3370 /* If there is already a candidate, use it. */
3371 for (gdbarch_list
*best_arch
= gdbarch_list_lookup_by_info (arches
, &info
);
3372 best_arch
!= nullptr;
3373 best_arch
= gdbarch_list_lookup_by_info (best_arch
->next
, &info
))
3375 struct gdbarch_tdep
*tdep
= gdbarch_tdep (best_arch
->gdbarch
);
3376 if (tdep
&& tdep
->vq
== vq
)
3377 return best_arch
->gdbarch
;
3380 /* Ensure we always have a target descriptor, and that it is for the given VQ
3382 const struct target_desc
*tdesc
= info
.target_desc
;
3383 if (!tdesc_has_registers (tdesc
) || vq
!= aarch64_get_tdesc_vq (tdesc
))
3384 tdesc
= aarch64_read_description (vq
, false);
3387 feature_core
= tdesc_find_feature (tdesc
,"org.gnu.gdb.aarch64.core");
3388 feature_fpu
= tdesc_find_feature (tdesc
, "org.gnu.gdb.aarch64.fpu");
3389 feature_sve
= tdesc_find_feature (tdesc
, "org.gnu.gdb.aarch64.sve");
3390 feature_pauth
= tdesc_find_feature (tdesc
, "org.gnu.gdb.aarch64.pauth");
3392 if (feature_core
== nullptr)
3395 tdesc_arch_data_up tdesc_data
= tdesc_data_alloc ();
3397 /* Validate the description provides the mandatory core R registers
3398 and allocate their numbers. */
3399 for (i
= 0; i
< ARRAY_SIZE (aarch64_r_register_names
); i
++)
3400 valid_p
&= tdesc_numbered_register (feature_core
, tdesc_data
.get (),
3401 AARCH64_X0_REGNUM
+ i
,
3402 aarch64_r_register_names
[i
]);
3404 num_regs
= AARCH64_X0_REGNUM
+ i
;
3406 /* Add the V registers. */
3407 if (feature_fpu
!= nullptr)
3409 if (feature_sve
!= nullptr)
3410 error (_("Program contains both fpu and SVE features."));
3412 /* Validate the description provides the mandatory V registers
3413 and allocate their numbers. */
3414 for (i
= 0; i
< ARRAY_SIZE (aarch64_v_register_names
); i
++)
3415 valid_p
&= tdesc_numbered_register (feature_fpu
, tdesc_data
.get (),
3416 AARCH64_V0_REGNUM
+ i
,
3417 aarch64_v_register_names
[i
]);
3419 num_regs
= AARCH64_V0_REGNUM
+ i
;
3422 /* Add the SVE registers. */
3423 if (feature_sve
!= nullptr)
3425 /* Validate the description provides the mandatory SVE registers
3426 and allocate their numbers. */
3427 for (i
= 0; i
< ARRAY_SIZE (aarch64_sve_register_names
); i
++)
3428 valid_p
&= tdesc_numbered_register (feature_sve
, tdesc_data
.get (),
3429 AARCH64_SVE_Z0_REGNUM
+ i
,
3430 aarch64_sve_register_names
[i
]);
3432 num_regs
= AARCH64_SVE_Z0_REGNUM
+ i
;
3433 num_pseudo_regs
+= 32; /* add the Vn register pseudos. */
3436 if (feature_fpu
!= nullptr || feature_sve
!= nullptr)
3438 num_pseudo_regs
+= 32; /* add the Qn scalar register pseudos */
3439 num_pseudo_regs
+= 32; /* add the Dn scalar register pseudos */
3440 num_pseudo_regs
+= 32; /* add the Sn scalar register pseudos */
3441 num_pseudo_regs
+= 32; /* add the Hn scalar register pseudos */
3442 num_pseudo_regs
+= 32; /* add the Bn scalar register pseudos */
3445 /* Add the pauth registers. */
3446 if (feature_pauth
!= NULL
)
3448 first_pauth_regnum
= num_regs
;
3449 pauth_ra_state_offset
= num_pseudo_regs
;
3450 /* Validate the descriptor provides the mandatory PAUTH registers and
3451 allocate their numbers. */
3452 for (i
= 0; i
< ARRAY_SIZE (aarch64_pauth_register_names
); i
++)
3453 valid_p
&= tdesc_numbered_register (feature_pauth
, tdesc_data
.get (),
3454 first_pauth_regnum
+ i
,
3455 aarch64_pauth_register_names
[i
]);
3458 num_pseudo_regs
+= 1; /* Count RA_STATE pseudo register. */
3464 /* AArch64 code is always little-endian. */
3465 info
.byte_order_for_code
= BFD_ENDIAN_LITTLE
;
3467 struct gdbarch_tdep
*tdep
= XCNEW (struct gdbarch_tdep
);
3468 struct gdbarch
*gdbarch
= gdbarch_alloc (&info
, tdep
);
3470 /* This should be low enough for everything. */
3471 tdep
->lowest_pc
= 0x20;
3472 tdep
->jb_pc
= -1; /* Longjump support not enabled by default. */
3473 tdep
->jb_elt_size
= 8;
3475 tdep
->pauth_reg_base
= first_pauth_regnum
;
3476 tdep
->pauth_ra_state_regnum
= (feature_pauth
== NULL
) ? -1
3477 : pauth_ra_state_offset
+ num_regs
;
3479 set_gdbarch_push_dummy_call (gdbarch
, aarch64_push_dummy_call
);
3480 set_gdbarch_frame_align (gdbarch
, aarch64_frame_align
);
3482 /* Advance PC across function entry code. */
3483 set_gdbarch_skip_prologue (gdbarch
, aarch64_skip_prologue
);
3485 /* The stack grows downward. */
3486 set_gdbarch_inner_than (gdbarch
, core_addr_lessthan
);
3488 /* Breakpoint manipulation. */
3489 set_gdbarch_breakpoint_kind_from_pc (gdbarch
,
3490 aarch64_breakpoint::kind_from_pc
);
3491 set_gdbarch_sw_breakpoint_from_kind (gdbarch
,
3492 aarch64_breakpoint::bp_from_kind
);
3493 set_gdbarch_have_nonsteppable_watchpoint (gdbarch
, 1);
3494 set_gdbarch_software_single_step (gdbarch
, aarch64_software_single_step
);
3496 /* Information about registers, etc. */
3497 set_gdbarch_sp_regnum (gdbarch
, AARCH64_SP_REGNUM
);
3498 set_gdbarch_pc_regnum (gdbarch
, AARCH64_PC_REGNUM
);
3499 set_gdbarch_num_regs (gdbarch
, num_regs
);
3501 set_gdbarch_num_pseudo_regs (gdbarch
, num_pseudo_regs
);
3502 set_gdbarch_pseudo_register_read_value (gdbarch
, aarch64_pseudo_read_value
);
3503 set_gdbarch_pseudo_register_write (gdbarch
, aarch64_pseudo_write
);
3504 set_tdesc_pseudo_register_name (gdbarch
, aarch64_pseudo_register_name
);
3505 set_tdesc_pseudo_register_type (gdbarch
, aarch64_pseudo_register_type
);
3506 set_tdesc_pseudo_register_reggroup_p (gdbarch
,
3507 aarch64_pseudo_register_reggroup_p
);
3508 set_gdbarch_cannot_store_register (gdbarch
, aarch64_cannot_store_register
);
3511 set_gdbarch_short_bit (gdbarch
, 16);
3512 set_gdbarch_int_bit (gdbarch
, 32);
3513 set_gdbarch_float_bit (gdbarch
, 32);
3514 set_gdbarch_double_bit (gdbarch
, 64);
3515 set_gdbarch_long_double_bit (gdbarch
, 128);
3516 set_gdbarch_long_bit (gdbarch
, 64);
3517 set_gdbarch_long_long_bit (gdbarch
, 64);
3518 set_gdbarch_ptr_bit (gdbarch
, 64);
3519 set_gdbarch_char_signed (gdbarch
, 0);
3520 set_gdbarch_wchar_signed (gdbarch
, 0);
3521 set_gdbarch_float_format (gdbarch
, floatformats_ieee_single
);
3522 set_gdbarch_double_format (gdbarch
, floatformats_ieee_double
);
3523 set_gdbarch_long_double_format (gdbarch
, floatformats_ia64_quad
);
3524 set_gdbarch_type_align (gdbarch
, aarch64_type_align
);
3526 /* Internal <-> external register number maps. */
3527 set_gdbarch_dwarf2_reg_to_regnum (gdbarch
, aarch64_dwarf_reg_to_regnum
);
3529 /* Returning results. */
3530 set_gdbarch_return_value (gdbarch
, aarch64_return_value
);
3533 set_gdbarch_print_insn (gdbarch
, aarch64_gdb_print_insn
);
3535 /* Virtual tables. */
3536 set_gdbarch_vbit_in_delta (gdbarch
, 1);
3538 /* Register architecture. */
3539 aarch64_add_reggroups (gdbarch
);
3541 /* Hook in the ABI-specific overrides, if they have been registered. */
3542 info
.target_desc
= tdesc
;
3543 info
.tdesc_data
= tdesc_data
.get ();
3544 gdbarch_init_osabi (info
, gdbarch
);
3546 dwarf2_frame_set_init_reg (gdbarch
, aarch64_dwarf2_frame_init_reg
);
3547 /* Register DWARF CFA vendor handler. */
3548 set_gdbarch_execute_dwarf_cfa_vendor_op (gdbarch
,
3549 aarch64_execute_dwarf_cfa_vendor_op
);
3551 /* Permanent/Program breakpoint handling. */
3552 set_gdbarch_program_breakpoint_here_p (gdbarch
,
3553 aarch64_program_breakpoint_here_p
);
3555 /* Add some default predicates. */
3556 frame_unwind_append_unwinder (gdbarch
, &aarch64_stub_unwind
);
3557 dwarf2_append_unwinders (gdbarch
);
3558 frame_unwind_append_unwinder (gdbarch
, &aarch64_prologue_unwind
);
3560 frame_base_set_default (gdbarch
, &aarch64_normal_base
);
3562 /* Now we have tuned the configuration, set a few final things,
3563 based on what the OS ABI has told us. */
3565 if (tdep
->jb_pc
>= 0)
3566 set_gdbarch_get_longjmp_target (gdbarch
, aarch64_get_longjmp_target
);
3568 set_gdbarch_gen_return_address (gdbarch
, aarch64_gen_return_address
);
3570 set_gdbarch_get_pc_address_flags (gdbarch
, aarch64_get_pc_address_flags
);
3572 tdesc_use_registers (gdbarch
, tdesc
, std::move (tdesc_data
));
3574 /* Add standard register aliases. */
3575 for (i
= 0; i
< ARRAY_SIZE (aarch64_register_aliases
); i
++)
3576 user_reg_add (gdbarch
, aarch64_register_aliases
[i
].name
,
3577 value_of_aarch64_user_reg
,
3578 &aarch64_register_aliases
[i
].regnum
);
3580 register_aarch64_ravenscar_ops (gdbarch
);
3586 aarch64_dump_tdep (struct gdbarch
*gdbarch
, struct ui_file
*file
)
3588 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
3593 fprintf_unfiltered (file
, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
3594 paddress (gdbarch
, tdep
->lowest_pc
));
3600 static void aarch64_process_record_test (void);
3604 void _initialize_aarch64_tdep ();
3606 _initialize_aarch64_tdep ()
3608 gdbarch_register (bfd_arch_aarch64
, aarch64_gdbarch_init
,
3611 /* Debug this file's internals. */
3612 add_setshow_boolean_cmd ("aarch64", class_maintenance
, &aarch64_debug
, _("\
3613 Set AArch64 debugging."), _("\
3614 Show AArch64 debugging."), _("\
3615 When on, AArch64 specific debugging is enabled."),
3618 &setdebuglist
, &showdebuglist
);
3621 selftests::register_test ("aarch64-analyze-prologue",
3622 selftests::aarch64_analyze_prologue_test
);
3623 selftests::register_test ("aarch64-process-record",
3624 selftests::aarch64_process_record_test
);
3628 /* AArch64 process record-replay related structures, defines etc. */
3630 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
3633 unsigned int reg_len = LENGTH; \
3636 REGS = XNEWVEC (uint32_t, reg_len); \
3637 memcpy(®S[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
3642 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
3645 unsigned int mem_len = LENGTH; \
3648 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
3649 memcpy(&MEMS->len, &RECORD_BUF[0], \
3650 sizeof(struct aarch64_mem_r) * LENGTH); \
3655 /* AArch64 record/replay structures and enumerations. */
3657 struct aarch64_mem_r
3659 uint64_t len
; /* Record length. */
3660 uint64_t addr
; /* Memory address. */
3663 enum aarch64_record_result
3665 AARCH64_RECORD_SUCCESS
,
3666 AARCH64_RECORD_UNSUPPORTED
,
3667 AARCH64_RECORD_UNKNOWN
3670 typedef struct insn_decode_record_t
3672 struct gdbarch
*gdbarch
;
3673 struct regcache
*regcache
;
3674 CORE_ADDR this_addr
; /* Address of insn to be recorded. */
3675 uint32_t aarch64_insn
; /* Insn to be recorded. */
3676 uint32_t mem_rec_count
; /* Count of memory records. */
3677 uint32_t reg_rec_count
; /* Count of register records. */
3678 uint32_t *aarch64_regs
; /* Registers to be recorded. */
3679 struct aarch64_mem_r
*aarch64_mems
; /* Memory locations to be recorded. */
3680 } insn_decode_record
;
3682 /* Record handler for data processing - register instructions. */
3685 aarch64_record_data_proc_reg (insn_decode_record
*aarch64_insn_r
)
3687 uint8_t reg_rd
, insn_bits24_27
, insn_bits21_23
;
3688 uint32_t record_buf
[4];
3690 reg_rd
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3691 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
3692 insn_bits21_23
= bits (aarch64_insn_r
->aarch64_insn
, 21, 23);
3694 if (!bit (aarch64_insn_r
->aarch64_insn
, 28))
3698 /* Logical (shifted register). */
3699 if (insn_bits24_27
== 0x0a)
3700 setflags
= (bits (aarch64_insn_r
->aarch64_insn
, 29, 30) == 0x03);
3702 else if (insn_bits24_27
== 0x0b)
3703 setflags
= bit (aarch64_insn_r
->aarch64_insn
, 29);
3705 return AARCH64_RECORD_UNKNOWN
;
3707 record_buf
[0] = reg_rd
;
3708 aarch64_insn_r
->reg_rec_count
= 1;
3710 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_CPSR_REGNUM
;
3714 if (insn_bits24_27
== 0x0b)
3716 /* Data-processing (3 source). */
3717 record_buf
[0] = reg_rd
;
3718 aarch64_insn_r
->reg_rec_count
= 1;
3720 else if (insn_bits24_27
== 0x0a)
3722 if (insn_bits21_23
== 0x00)
3724 /* Add/subtract (with carry). */
3725 record_buf
[0] = reg_rd
;
3726 aarch64_insn_r
->reg_rec_count
= 1;
3727 if (bit (aarch64_insn_r
->aarch64_insn
, 29))
3729 record_buf
[1] = AARCH64_CPSR_REGNUM
;
3730 aarch64_insn_r
->reg_rec_count
= 2;
3733 else if (insn_bits21_23
== 0x02)
3735 /* Conditional compare (register) and conditional compare
3736 (immediate) instructions. */
3737 record_buf
[0] = AARCH64_CPSR_REGNUM
;
3738 aarch64_insn_r
->reg_rec_count
= 1;
3740 else if (insn_bits21_23
== 0x04 || insn_bits21_23
== 0x06)
3742 /* Conditional select. */
3743 /* Data-processing (2 source). */
3744 /* Data-processing (1 source). */
3745 record_buf
[0] = reg_rd
;
3746 aarch64_insn_r
->reg_rec_count
= 1;
3749 return AARCH64_RECORD_UNKNOWN
;
3753 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3755 return AARCH64_RECORD_SUCCESS
;
3758 /* Record handler for data processing - immediate instructions. */
3761 aarch64_record_data_proc_imm (insn_decode_record
*aarch64_insn_r
)
3763 uint8_t reg_rd
, insn_bit23
, insn_bits24_27
, setflags
;
3764 uint32_t record_buf
[4];
3766 reg_rd
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3767 insn_bit23
= bit (aarch64_insn_r
->aarch64_insn
, 23);
3768 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
3770 if (insn_bits24_27
== 0x00 /* PC rel addressing. */
3771 || insn_bits24_27
== 0x03 /* Bitfield and Extract. */
3772 || (insn_bits24_27
== 0x02 && insn_bit23
)) /* Move wide (immediate). */
3774 record_buf
[0] = reg_rd
;
3775 aarch64_insn_r
->reg_rec_count
= 1;
3777 else if (insn_bits24_27
== 0x01)
3779 /* Add/Subtract (immediate). */
3780 setflags
= bit (aarch64_insn_r
->aarch64_insn
, 29);
3781 record_buf
[0] = reg_rd
;
3782 aarch64_insn_r
->reg_rec_count
= 1;
3784 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_CPSR_REGNUM
;
3786 else if (insn_bits24_27
== 0x02 && !insn_bit23
)
3788 /* Logical (immediate). */
3789 setflags
= bits (aarch64_insn_r
->aarch64_insn
, 29, 30) == 0x03;
3790 record_buf
[0] = reg_rd
;
3791 aarch64_insn_r
->reg_rec_count
= 1;
3793 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_CPSR_REGNUM
;
3796 return AARCH64_RECORD_UNKNOWN
;
3798 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3800 return AARCH64_RECORD_SUCCESS
;
3803 /* Record handler for branch, exception generation and system instructions. */
3806 aarch64_record_branch_except_sys (insn_decode_record
*aarch64_insn_r
)
3808 struct gdbarch_tdep
*tdep
= gdbarch_tdep (aarch64_insn_r
->gdbarch
);
3809 uint8_t insn_bits24_27
, insn_bits28_31
, insn_bits22_23
;
3810 uint32_t record_buf
[4];
3812 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
3813 insn_bits28_31
= bits (aarch64_insn_r
->aarch64_insn
, 28, 31);
3814 insn_bits22_23
= bits (aarch64_insn_r
->aarch64_insn
, 22, 23);
3816 if (insn_bits28_31
== 0x0d)
3818 /* Exception generation instructions. */
3819 if (insn_bits24_27
== 0x04)
3821 if (!bits (aarch64_insn_r
->aarch64_insn
, 2, 4)
3822 && !bits (aarch64_insn_r
->aarch64_insn
, 21, 23)
3823 && bits (aarch64_insn_r
->aarch64_insn
, 0, 1) == 0x01)
3825 ULONGEST svc_number
;
3827 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, 8,
3829 return tdep
->aarch64_syscall_record (aarch64_insn_r
->regcache
,
3833 return AARCH64_RECORD_UNSUPPORTED
;
3835 /* System instructions. */
3836 else if (insn_bits24_27
== 0x05 && insn_bits22_23
== 0x00)
3838 uint32_t reg_rt
, reg_crn
;
3840 reg_rt
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3841 reg_crn
= bits (aarch64_insn_r
->aarch64_insn
, 12, 15);
3843 /* Record rt in case of sysl and mrs instructions. */
3844 if (bit (aarch64_insn_r
->aarch64_insn
, 21))
3846 record_buf
[0] = reg_rt
;
3847 aarch64_insn_r
->reg_rec_count
= 1;
3849 /* Record cpsr for hint and msr(immediate) instructions. */
3850 else if (reg_crn
== 0x02 || reg_crn
== 0x04)
3852 record_buf
[0] = AARCH64_CPSR_REGNUM
;
3853 aarch64_insn_r
->reg_rec_count
= 1;
3856 /* Unconditional branch (register). */
3857 else if((insn_bits24_27
& 0x0e) == 0x06)
3859 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_PC_REGNUM
;
3860 if (bits (aarch64_insn_r
->aarch64_insn
, 21, 22) == 0x01)
3861 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_LR_REGNUM
;
3864 return AARCH64_RECORD_UNKNOWN
;
3866 /* Unconditional branch (immediate). */
3867 else if ((insn_bits28_31
& 0x07) == 0x01 && (insn_bits24_27
& 0x0c) == 0x04)
3869 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_PC_REGNUM
;
3870 if (bit (aarch64_insn_r
->aarch64_insn
, 31))
3871 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_LR_REGNUM
;
3874 /* Compare & branch (immediate), Test & branch (immediate) and
3875 Conditional branch (immediate). */
3876 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_PC_REGNUM
;
3878 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3880 return AARCH64_RECORD_SUCCESS
;
3883 /* Record handler for advanced SIMD load and store instructions. */
3886 aarch64_record_asimd_load_store (insn_decode_record
*aarch64_insn_r
)
3889 uint64_t addr_offset
= 0;
3890 uint32_t record_buf
[24];
3891 uint64_t record_buf_mem
[24];
3892 uint32_t reg_rn
, reg_rt
;
3893 uint32_t reg_index
= 0, mem_index
= 0;
3894 uint8_t opcode_bits
, size_bits
;
3896 reg_rt
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3897 reg_rn
= bits (aarch64_insn_r
->aarch64_insn
, 5, 9);
3898 size_bits
= bits (aarch64_insn_r
->aarch64_insn
, 10, 11);
3899 opcode_bits
= bits (aarch64_insn_r
->aarch64_insn
, 12, 15);
3900 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
, &address
);
3903 debug_printf ("Process record: Advanced SIMD load/store\n");
3905 /* Load/store single structure. */
3906 if (bit (aarch64_insn_r
->aarch64_insn
, 24))
3908 uint8_t sindex
, scale
, selem
, esize
, replicate
= 0;
3909 scale
= opcode_bits
>> 2;
3910 selem
= ((opcode_bits
& 0x02) |
3911 bit (aarch64_insn_r
->aarch64_insn
, 21)) + 1;
3915 if (size_bits
& 0x01)
3916 return AARCH64_RECORD_UNKNOWN
;
3919 if ((size_bits
>> 1) & 0x01)
3920 return AARCH64_RECORD_UNKNOWN
;
3921 if (size_bits
& 0x01)
3923 if (!((opcode_bits
>> 1) & 0x01))
3926 return AARCH64_RECORD_UNKNOWN
;
3930 if (bit (aarch64_insn_r
->aarch64_insn
, 22) && !(opcode_bits
& 0x01))
3937 return AARCH64_RECORD_UNKNOWN
;
3943 for (sindex
= 0; sindex
< selem
; sindex
++)
3945 record_buf
[reg_index
++] = reg_rt
+ AARCH64_V0_REGNUM
;
3946 reg_rt
= (reg_rt
+ 1) % 32;
3950 for (sindex
= 0; sindex
< selem
; sindex
++)
3952 if (bit (aarch64_insn_r
->aarch64_insn
, 22))
3953 record_buf
[reg_index
++] = reg_rt
+ AARCH64_V0_REGNUM
;
3956 record_buf_mem
[mem_index
++] = esize
/ 8;
3957 record_buf_mem
[mem_index
++] = address
+ addr_offset
;
3959 addr_offset
= addr_offset
+ (esize
/ 8);
3960 reg_rt
= (reg_rt
+ 1) % 32;
3964 /* Load/store multiple structure. */
3967 uint8_t selem
, esize
, rpt
, elements
;
3968 uint8_t eindex
, rindex
;
3970 esize
= 8 << size_bits
;
3971 if (bit (aarch64_insn_r
->aarch64_insn
, 30))
3972 elements
= 128 / esize
;
3974 elements
= 64 / esize
;
3976 switch (opcode_bits
)
3978 /*LD/ST4 (4 Registers). */
3983 /*LD/ST1 (4 Registers). */
3988 /*LD/ST3 (3 Registers). */
3993 /*LD/ST1 (3 Registers). */
3998 /*LD/ST1 (1 Register). */
4003 /*LD/ST2 (2 Registers). */
4008 /*LD/ST1 (2 Registers). */
4014 return AARCH64_RECORD_UNSUPPORTED
;
4017 for (rindex
= 0; rindex
< rpt
; rindex
++)
4018 for (eindex
= 0; eindex
< elements
; eindex
++)
4020 uint8_t reg_tt
, sindex
;
4021 reg_tt
= (reg_rt
+ rindex
) % 32;
4022 for (sindex
= 0; sindex
< selem
; sindex
++)
4024 if (bit (aarch64_insn_r
->aarch64_insn
, 22))
4025 record_buf
[reg_index
++] = reg_tt
+ AARCH64_V0_REGNUM
;
4028 record_buf_mem
[mem_index
++] = esize
/ 8;
4029 record_buf_mem
[mem_index
++] = address
+ addr_offset
;
4031 addr_offset
= addr_offset
+ (esize
/ 8);
4032 reg_tt
= (reg_tt
+ 1) % 32;
4037 if (bit (aarch64_insn_r
->aarch64_insn
, 23))
4038 record_buf
[reg_index
++] = reg_rn
;
4040 aarch64_insn_r
->reg_rec_count
= reg_index
;
4041 aarch64_insn_r
->mem_rec_count
= mem_index
/ 2;
4042 MEM_ALLOC (aarch64_insn_r
->aarch64_mems
, aarch64_insn_r
->mem_rec_count
,
4044 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
4046 return AARCH64_RECORD_SUCCESS
;
4049 /* Record handler for load and store instructions. */
4052 aarch64_record_load_store (insn_decode_record
*aarch64_insn_r
)
4054 uint8_t insn_bits24_27
, insn_bits28_29
, insn_bits10_11
;
4055 uint8_t insn_bit23
, insn_bit21
;
4056 uint8_t opc
, size_bits
, ld_flag
, vector_flag
;
4057 uint32_t reg_rn
, reg_rt
, reg_rt2
;
4058 uint64_t datasize
, offset
;
4059 uint32_t record_buf
[8];
4060 uint64_t record_buf_mem
[8];
4063 insn_bits10_11
= bits (aarch64_insn_r
->aarch64_insn
, 10, 11);
4064 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
4065 insn_bits28_29
= bits (aarch64_insn_r
->aarch64_insn
, 28, 29);
4066 insn_bit21
= bit (aarch64_insn_r
->aarch64_insn
, 21);
4067 insn_bit23
= bit (aarch64_insn_r
->aarch64_insn
, 23);
4068 ld_flag
= bit (aarch64_insn_r
->aarch64_insn
, 22);
4069 vector_flag
= bit (aarch64_insn_r
->aarch64_insn
, 26);
4070 reg_rt
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
4071 reg_rn
= bits (aarch64_insn_r
->aarch64_insn
, 5, 9);
4072 reg_rt2
= bits (aarch64_insn_r
->aarch64_insn
, 10, 14);
4073 size_bits
= bits (aarch64_insn_r
->aarch64_insn
, 30, 31);
4075 /* Load/store exclusive. */
4076 if (insn_bits24_27
== 0x08 && insn_bits28_29
== 0x00)
4079 debug_printf ("Process record: load/store exclusive\n");
4083 record_buf
[0] = reg_rt
;
4084 aarch64_insn_r
->reg_rec_count
= 1;
4087 record_buf
[1] = reg_rt2
;
4088 aarch64_insn_r
->reg_rec_count
= 2;
4094 datasize
= (8 << size_bits
) * 2;
4096 datasize
= (8 << size_bits
);
4097 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
4099 record_buf_mem
[0] = datasize
/ 8;
4100 record_buf_mem
[1] = address
;
4101 aarch64_insn_r
->mem_rec_count
= 1;
4104 /* Save register rs. */
4105 record_buf
[0] = bits (aarch64_insn_r
->aarch64_insn
, 16, 20);
4106 aarch64_insn_r
->reg_rec_count
= 1;
4110 /* Load register (literal) instructions decoding. */
4111 else if ((insn_bits24_27
& 0x0b) == 0x08 && insn_bits28_29
== 0x01)
4114 debug_printf ("Process record: load register (literal)\n");
4116 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
4118 record_buf
[0] = reg_rt
;
4119 aarch64_insn_r
->reg_rec_count
= 1;
4121 /* All types of load/store pair instructions decoding. */
4122 else if ((insn_bits24_27
& 0x0a) == 0x08 && insn_bits28_29
== 0x02)
4125 debug_printf ("Process record: load/store pair\n");
4131 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
4132 record_buf
[1] = reg_rt2
+ AARCH64_V0_REGNUM
;
4136 record_buf
[0] = reg_rt
;
4137 record_buf
[1] = reg_rt2
;
4139 aarch64_insn_r
->reg_rec_count
= 2;
4144 imm7_off
= bits (aarch64_insn_r
->aarch64_insn
, 15, 21);
4146 size_bits
= size_bits
>> 1;
4147 datasize
= 8 << (2 + size_bits
);
4148 offset
= (imm7_off
& 0x40) ? (~imm7_off
& 0x007f) + 1 : imm7_off
;
4149 offset
= offset
<< (2 + size_bits
);
4150 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
4152 if (!((insn_bits24_27
& 0x0b) == 0x08 && insn_bit23
))
4154 if (imm7_off
& 0x40)
4155 address
= address
- offset
;
4157 address
= address
+ offset
;
4160 record_buf_mem
[0] = datasize
/ 8;
4161 record_buf_mem
[1] = address
;
4162 record_buf_mem
[2] = datasize
/ 8;
4163 record_buf_mem
[3] = address
+ (datasize
/ 8);
4164 aarch64_insn_r
->mem_rec_count
= 2;
4166 if (bit (aarch64_insn_r
->aarch64_insn
, 23))
4167 record_buf
[aarch64_insn_r
->reg_rec_count
++] = reg_rn
;
4169 /* Load/store register (unsigned immediate) instructions. */
4170 else if ((insn_bits24_27
& 0x0b) == 0x09 && insn_bits28_29
== 0x03)
4172 opc
= bits (aarch64_insn_r
->aarch64_insn
, 22, 23);
4182 if (size_bits
== 0x3 && vector_flag
== 0x0 && opc
== 0x2)
4184 /* PRFM (immediate) */
4185 return AARCH64_RECORD_SUCCESS
;
4187 else if (size_bits
== 0x2 && vector_flag
== 0x0 && opc
== 0x2)
4189 /* LDRSW (immediate) */
4203 debug_printf ("Process record: load/store (unsigned immediate):"
4204 " size %x V %d opc %x\n", size_bits
, vector_flag
,
4210 offset
= bits (aarch64_insn_r
->aarch64_insn
, 10, 21);
4211 datasize
= 8 << size_bits
;
4212 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
4214 offset
= offset
<< size_bits
;
4215 address
= address
+ offset
;
4217 record_buf_mem
[0] = datasize
>> 3;
4218 record_buf_mem
[1] = address
;
4219 aarch64_insn_r
->mem_rec_count
= 1;
4224 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
4226 record_buf
[0] = reg_rt
;
4227 aarch64_insn_r
->reg_rec_count
= 1;
4230 /* Load/store register (register offset) instructions. */
4231 else if ((insn_bits24_27
& 0x0b) == 0x08 && insn_bits28_29
== 0x03
4232 && insn_bits10_11
== 0x02 && insn_bit21
)
4235 debug_printf ("Process record: load/store (register offset)\n");
4236 opc
= bits (aarch64_insn_r
->aarch64_insn
, 22, 23);
4243 if (size_bits
!= 0x03)
4246 return AARCH64_RECORD_UNKNOWN
;
4250 ULONGEST reg_rm_val
;
4252 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
,
4253 bits (aarch64_insn_r
->aarch64_insn
, 16, 20), ®_rm_val
);
4254 if (bit (aarch64_insn_r
->aarch64_insn
, 12))
4255 offset
= reg_rm_val
<< size_bits
;
4257 offset
= reg_rm_val
;
4258 datasize
= 8 << size_bits
;
4259 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
4261 address
= address
+ offset
;
4262 record_buf_mem
[0] = datasize
>> 3;
4263 record_buf_mem
[1] = address
;
4264 aarch64_insn_r
->mem_rec_count
= 1;
4269 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
4271 record_buf
[0] = reg_rt
;
4272 aarch64_insn_r
->reg_rec_count
= 1;
4275 /* Load/store register (immediate and unprivileged) instructions. */
4276 else if ((insn_bits24_27
& 0x0b) == 0x08 && insn_bits28_29
== 0x03
4281 debug_printf ("Process record: load/store "
4282 "(immediate and unprivileged)\n");
4284 opc
= bits (aarch64_insn_r
->aarch64_insn
, 22, 23);
4291 if (size_bits
!= 0x03)
4294 return AARCH64_RECORD_UNKNOWN
;
4299 imm9_off
= bits (aarch64_insn_r
->aarch64_insn
, 12, 20);
4300 offset
= (imm9_off
& 0x0100) ? (((~imm9_off
) & 0x01ff) + 1) : imm9_off
;
4301 datasize
= 8 << size_bits
;
4302 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
4304 if (insn_bits10_11
!= 0x01)
4306 if (imm9_off
& 0x0100)
4307 address
= address
- offset
;
4309 address
= address
+ offset
;
4311 record_buf_mem
[0] = datasize
>> 3;
4312 record_buf_mem
[1] = address
;
4313 aarch64_insn_r
->mem_rec_count
= 1;
4318 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
4320 record_buf
[0] = reg_rt
;
4321 aarch64_insn_r
->reg_rec_count
= 1;
4323 if (insn_bits10_11
== 0x01 || insn_bits10_11
== 0x03)
4324 record_buf
[aarch64_insn_r
->reg_rec_count
++] = reg_rn
;
4326 /* Advanced SIMD load/store instructions. */
4328 return aarch64_record_asimd_load_store (aarch64_insn_r
);
4330 MEM_ALLOC (aarch64_insn_r
->aarch64_mems
, aarch64_insn_r
->mem_rec_count
,
4332 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
4334 return AARCH64_RECORD_SUCCESS
;
4337 /* Record handler for data processing SIMD and floating point instructions. */
4340 aarch64_record_data_proc_simd_fp (insn_decode_record
*aarch64_insn_r
)
4342 uint8_t insn_bit21
, opcode
, rmode
, reg_rd
;
4343 uint8_t insn_bits24_27
, insn_bits28_31
, insn_bits10_11
, insn_bits12_15
;
4344 uint8_t insn_bits11_14
;
4345 uint32_t record_buf
[2];
4347 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
4348 insn_bits28_31
= bits (aarch64_insn_r
->aarch64_insn
, 28, 31);
4349 insn_bits10_11
= bits (aarch64_insn_r
->aarch64_insn
, 10, 11);
4350 insn_bits12_15
= bits (aarch64_insn_r
->aarch64_insn
, 12, 15);
4351 insn_bits11_14
= bits (aarch64_insn_r
->aarch64_insn
, 11, 14);
4352 opcode
= bits (aarch64_insn_r
->aarch64_insn
, 16, 18);
4353 rmode
= bits (aarch64_insn_r
->aarch64_insn
, 19, 20);
4354 reg_rd
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
4355 insn_bit21
= bit (aarch64_insn_r
->aarch64_insn
, 21);
4358 debug_printf ("Process record: data processing SIMD/FP: ");
4360 if ((insn_bits28_31
& 0x05) == 0x01 && insn_bits24_27
== 0x0e)
4362 /* Floating point - fixed point conversion instructions. */
4366 debug_printf ("FP - fixed point conversion");
4368 if ((opcode
>> 1) == 0x0 && rmode
== 0x03)
4369 record_buf
[0] = reg_rd
;
4371 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
4373 /* Floating point - conditional compare instructions. */
4374 else if (insn_bits10_11
== 0x01)
4377 debug_printf ("FP - conditional compare");
4379 record_buf
[0] = AARCH64_CPSR_REGNUM
;
4381 /* Floating point - data processing (2-source) and
4382 conditional select instructions. */
4383 else if (insn_bits10_11
== 0x02 || insn_bits10_11
== 0x03)
4386 debug_printf ("FP - DP (2-source)");
4388 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
4390 else if (insn_bits10_11
== 0x00)
4392 /* Floating point - immediate instructions. */
4393 if ((insn_bits12_15
& 0x01) == 0x01
4394 || (insn_bits12_15
& 0x07) == 0x04)
4397 debug_printf ("FP - immediate");
4398 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
4400 /* Floating point - compare instructions. */
4401 else if ((insn_bits12_15
& 0x03) == 0x02)
4404 debug_printf ("FP - immediate");
4405 record_buf
[0] = AARCH64_CPSR_REGNUM
;
4407 /* Floating point - integer conversions instructions. */
4408 else if (insn_bits12_15
== 0x00)
4410 /* Convert float to integer instruction. */
4411 if (!(opcode
>> 1) || ((opcode
>> 1) == 0x02 && !rmode
))
4414 debug_printf ("float to int conversion");
4416 record_buf
[0] = reg_rd
+ AARCH64_X0_REGNUM
;
4418 /* Convert integer to float instruction. */
4419 else if ((opcode
>> 1) == 0x01 && !rmode
)
4422 debug_printf ("int to float conversion");
4424 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
4426 /* Move float to integer instruction. */
4427 else if ((opcode
>> 1) == 0x03)
4430 debug_printf ("move float to int");
4432 if (!(opcode
& 0x01))
4433 record_buf
[0] = reg_rd
+ AARCH64_X0_REGNUM
;
4435 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
4438 return AARCH64_RECORD_UNKNOWN
;
4441 return AARCH64_RECORD_UNKNOWN
;
4444 return AARCH64_RECORD_UNKNOWN
;
4446 else if ((insn_bits28_31
& 0x09) == 0x00 && insn_bits24_27
== 0x0e)
4449 debug_printf ("SIMD copy");
4451 /* Advanced SIMD copy instructions. */
4452 if (!bits (aarch64_insn_r
->aarch64_insn
, 21, 23)
4453 && !bit (aarch64_insn_r
->aarch64_insn
, 15)
4454 && bit (aarch64_insn_r
->aarch64_insn
, 10))
4456 if (insn_bits11_14
== 0x05 || insn_bits11_14
== 0x07)
4457 record_buf
[0] = reg_rd
+ AARCH64_X0_REGNUM
;
4459 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
4462 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
4464 /* All remaining floating point or advanced SIMD instructions. */
4468 debug_printf ("all remain");
4470 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
4474 debug_printf ("\n");
4476 /* Record the V/X register. */
4477 aarch64_insn_r
->reg_rec_count
++;
4479 /* Some of these instructions may set bits in the FPSR, so record it
4481 record_buf
[1] = AARCH64_FPSR_REGNUM
;
4482 aarch64_insn_r
->reg_rec_count
++;
4484 gdb_assert (aarch64_insn_r
->reg_rec_count
== 2);
4485 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
4487 return AARCH64_RECORD_SUCCESS
;
4490 /* Decodes insns type and invokes its record handler. */
4493 aarch64_record_decode_insn_handler (insn_decode_record
*aarch64_insn_r
)
4495 uint32_t ins_bit25
, ins_bit26
, ins_bit27
, ins_bit28
;
4497 ins_bit25
= bit (aarch64_insn_r
->aarch64_insn
, 25);
4498 ins_bit26
= bit (aarch64_insn_r
->aarch64_insn
, 26);
4499 ins_bit27
= bit (aarch64_insn_r
->aarch64_insn
, 27);
4500 ins_bit28
= bit (aarch64_insn_r
->aarch64_insn
, 28);
4502 /* Data processing - immediate instructions. */
4503 if (!ins_bit26
&& !ins_bit27
&& ins_bit28
)
4504 return aarch64_record_data_proc_imm (aarch64_insn_r
);
4506 /* Branch, exception generation and system instructions. */
4507 if (ins_bit26
&& !ins_bit27
&& ins_bit28
)
4508 return aarch64_record_branch_except_sys (aarch64_insn_r
);
4510 /* Load and store instructions. */
4511 if (!ins_bit25
&& ins_bit27
)
4512 return aarch64_record_load_store (aarch64_insn_r
);
4514 /* Data processing - register instructions. */
4515 if (ins_bit25
&& !ins_bit26
&& ins_bit27
)
4516 return aarch64_record_data_proc_reg (aarch64_insn_r
);
4518 /* Data processing - SIMD and floating point instructions. */
4519 if (ins_bit25
&& ins_bit26
&& ins_bit27
)
4520 return aarch64_record_data_proc_simd_fp (aarch64_insn_r
);
4522 return AARCH64_RECORD_UNSUPPORTED
;
4525 /* Cleans up local record registers and memory allocations. */
4528 deallocate_reg_mem (insn_decode_record
*record
)
4530 xfree (record
->aarch64_regs
);
4531 xfree (record
->aarch64_mems
);
4535 namespace selftests
{
4538 aarch64_process_record_test (void)
4540 struct gdbarch_info info
;
4543 gdbarch_info_init (&info
);
4544 info
.bfd_arch_info
= bfd_scan_arch ("aarch64");
4546 struct gdbarch
*gdbarch
= gdbarch_find_by_info (info
);
4547 SELF_CHECK (gdbarch
!= NULL
);
4549 insn_decode_record aarch64_record
;
4551 memset (&aarch64_record
, 0, sizeof (insn_decode_record
));
4552 aarch64_record
.regcache
= NULL
;
4553 aarch64_record
.this_addr
= 0;
4554 aarch64_record
.gdbarch
= gdbarch
;
4556 /* 20 00 80 f9 prfm pldl1keep, [x1] */
4557 aarch64_record
.aarch64_insn
= 0xf9800020;
4558 ret
= aarch64_record_decode_insn_handler (&aarch64_record
);
4559 SELF_CHECK (ret
== AARCH64_RECORD_SUCCESS
);
4560 SELF_CHECK (aarch64_record
.reg_rec_count
== 0);
4561 SELF_CHECK (aarch64_record
.mem_rec_count
== 0);
4563 deallocate_reg_mem (&aarch64_record
);
4566 } // namespace selftests
4567 #endif /* GDB_SELF_TEST */
4569 /* Parse the current instruction and record the values of the registers and
4570 memory that will be changed in current instruction to record_arch_list
4571 return -1 if something is wrong. */
4574 aarch64_process_record (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
4575 CORE_ADDR insn_addr
)
4577 uint32_t rec_no
= 0;
4578 uint8_t insn_size
= 4;
4580 gdb_byte buf
[insn_size
];
4581 insn_decode_record aarch64_record
;
4583 memset (&buf
[0], 0, insn_size
);
4584 memset (&aarch64_record
, 0, sizeof (insn_decode_record
));
4585 target_read_memory (insn_addr
, &buf
[0], insn_size
);
4586 aarch64_record
.aarch64_insn
4587 = (uint32_t) extract_unsigned_integer (&buf
[0],
4589 gdbarch_byte_order (gdbarch
));
4590 aarch64_record
.regcache
= regcache
;
4591 aarch64_record
.this_addr
= insn_addr
;
4592 aarch64_record
.gdbarch
= gdbarch
;
4594 ret
= aarch64_record_decode_insn_handler (&aarch64_record
);
4595 if (ret
== AARCH64_RECORD_UNSUPPORTED
)
4597 printf_unfiltered (_("Process record does not support instruction "
4598 "0x%0x at address %s.\n"),
4599 aarch64_record
.aarch64_insn
,
4600 paddress (gdbarch
, insn_addr
));
4606 /* Record registers. */
4607 record_full_arch_list_add_reg (aarch64_record
.regcache
,
4609 /* Always record register CPSR. */
4610 record_full_arch_list_add_reg (aarch64_record
.regcache
,
4611 AARCH64_CPSR_REGNUM
);
4612 if (aarch64_record
.aarch64_regs
)
4613 for (rec_no
= 0; rec_no
< aarch64_record
.reg_rec_count
; rec_no
++)
4614 if (record_full_arch_list_add_reg (aarch64_record
.regcache
,
4615 aarch64_record
.aarch64_regs
[rec_no
]))
4618 /* Record memories. */
4619 if (aarch64_record
.aarch64_mems
)
4620 for (rec_no
= 0; rec_no
< aarch64_record
.mem_rec_count
; rec_no
++)
4621 if (record_full_arch_list_add_mem
4622 ((CORE_ADDR
)aarch64_record
.aarch64_mems
[rec_no
].addr
,
4623 aarch64_record
.aarch64_mems
[rec_no
].len
))
4626 if (record_full_arch_list_add_end ())
4630 deallocate_reg_mem (&aarch64_record
);