1 /* Common target dependent code for GDB on AArch64 systems.
3 Copyright (C) 2009-2020 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
6 This file is part of GDB.
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
28 #include "reggroups.h"
30 #include "arch-utils.h"
32 #include "frame-unwind.h"
33 #include "frame-base.h"
34 #include "trad-frame.h"
37 #include "dwarf2/frame.h"
39 #include "prologue-value.h"
40 #include "target-descriptions.h"
41 #include "user-regs.h"
43 #include "gdbsupport/selftest.h"
45 #include "aarch64-tdep.h"
46 #include "aarch64-ravenscar-thread.h"
49 #include "record-full.h"
50 #include "arch/aarch64-insn.h"
53 #include "opcode/aarch64.h"
56 /* A Homogeneous Floating-Point or Short-Vector Aggregate may have at most
58 #define HA_MAX_NUM_FLDS 4
60 /* All possible aarch64 target descriptors. */
61 struct target_desc
*tdesc_aarch64_list
[AARCH64_MAX_SVE_VQ
+ 1][2/*pauth*/];
63 /* The standard register names, and all the valid aliases for them. */
66 const char *const name
;
68 } aarch64_register_aliases
[] =
70 /* 64-bit register names. */
71 {"fp", AARCH64_FP_REGNUM
},
72 {"lr", AARCH64_LR_REGNUM
},
73 {"sp", AARCH64_SP_REGNUM
},
75 /* 32-bit register names. */
76 {"w0", AARCH64_X0_REGNUM
+ 0},
77 {"w1", AARCH64_X0_REGNUM
+ 1},
78 {"w2", AARCH64_X0_REGNUM
+ 2},
79 {"w3", AARCH64_X0_REGNUM
+ 3},
80 {"w4", AARCH64_X0_REGNUM
+ 4},
81 {"w5", AARCH64_X0_REGNUM
+ 5},
82 {"w6", AARCH64_X0_REGNUM
+ 6},
83 {"w7", AARCH64_X0_REGNUM
+ 7},
84 {"w8", AARCH64_X0_REGNUM
+ 8},
85 {"w9", AARCH64_X0_REGNUM
+ 9},
86 {"w10", AARCH64_X0_REGNUM
+ 10},
87 {"w11", AARCH64_X0_REGNUM
+ 11},
88 {"w12", AARCH64_X0_REGNUM
+ 12},
89 {"w13", AARCH64_X0_REGNUM
+ 13},
90 {"w14", AARCH64_X0_REGNUM
+ 14},
91 {"w15", AARCH64_X0_REGNUM
+ 15},
92 {"w16", AARCH64_X0_REGNUM
+ 16},
93 {"w17", AARCH64_X0_REGNUM
+ 17},
94 {"w18", AARCH64_X0_REGNUM
+ 18},
95 {"w19", AARCH64_X0_REGNUM
+ 19},
96 {"w20", AARCH64_X0_REGNUM
+ 20},
97 {"w21", AARCH64_X0_REGNUM
+ 21},
98 {"w22", AARCH64_X0_REGNUM
+ 22},
99 {"w23", AARCH64_X0_REGNUM
+ 23},
100 {"w24", AARCH64_X0_REGNUM
+ 24},
101 {"w25", AARCH64_X0_REGNUM
+ 25},
102 {"w26", AARCH64_X0_REGNUM
+ 26},
103 {"w27", AARCH64_X0_REGNUM
+ 27},
104 {"w28", AARCH64_X0_REGNUM
+ 28},
105 {"w29", AARCH64_X0_REGNUM
+ 29},
106 {"w30", AARCH64_X0_REGNUM
+ 30},
109 {"ip0", AARCH64_X0_REGNUM
+ 16},
110 {"ip1", AARCH64_X0_REGNUM
+ 17}
113 /* The required core 'R' registers. */
114 static const char *const aarch64_r_register_names
[] =
116 /* These registers must appear in consecutive RAW register number
117 order and they must begin with AARCH64_X0_REGNUM! */
118 "x0", "x1", "x2", "x3",
119 "x4", "x5", "x6", "x7",
120 "x8", "x9", "x10", "x11",
121 "x12", "x13", "x14", "x15",
122 "x16", "x17", "x18", "x19",
123 "x20", "x21", "x22", "x23",
124 "x24", "x25", "x26", "x27",
125 "x28", "x29", "x30", "sp",
129 /* The FP/SIMD 'V' registers. */
130 static const char *const aarch64_v_register_names
[] =
132 /* These registers must appear in consecutive RAW register number
133 order and they must begin with AARCH64_V0_REGNUM! */
134 "v0", "v1", "v2", "v3",
135 "v4", "v5", "v6", "v7",
136 "v8", "v9", "v10", "v11",
137 "v12", "v13", "v14", "v15",
138 "v16", "v17", "v18", "v19",
139 "v20", "v21", "v22", "v23",
140 "v24", "v25", "v26", "v27",
141 "v28", "v29", "v30", "v31",
146 /* The SVE 'Z' and 'P' registers. */
147 static const char *const aarch64_sve_register_names
[] =
149 /* These registers must appear in consecutive RAW register number
150 order and they must begin with AARCH64_SVE_Z0_REGNUM! */
151 "z0", "z1", "z2", "z3",
152 "z4", "z5", "z6", "z7",
153 "z8", "z9", "z10", "z11",
154 "z12", "z13", "z14", "z15",
155 "z16", "z17", "z18", "z19",
156 "z20", "z21", "z22", "z23",
157 "z24", "z25", "z26", "z27",
158 "z28", "z29", "z30", "z31",
160 "p0", "p1", "p2", "p3",
161 "p4", "p5", "p6", "p7",
162 "p8", "p9", "p10", "p11",
163 "p12", "p13", "p14", "p15",
167 static const char *const aarch64_pauth_register_names
[] =
169 /* Authentication mask for data pointer. */
171 /* Authentication mask for code pointer. */
175 /* AArch64 prologue cache structure. */
176 struct aarch64_prologue_cache
178 /* The program counter at the start of the function. It is used to
179 identify this frame as a prologue frame. */
182 /* The program counter at the time this frame was created; i.e. where
183 this function was called from. It is used to identify this frame as a
187 /* The stack pointer at the time this frame was created; i.e. the
188 caller's stack pointer when this function was called. It is used
189 to identify this frame. */
192 /* Is the target available to read from? */
195 /* The frame base for this frame is just prev_sp - frame size.
196 FRAMESIZE is the distance from the frame pointer to the
197 initial stack pointer. */
200 /* The register used to hold the frame pointer for this frame. */
203 /* Saved register offsets. */
204 struct trad_frame_saved_reg
*saved_regs
;
208 show_aarch64_debug (struct ui_file
*file
, int from_tty
,
209 struct cmd_list_element
*c
, const char *value
)
211 fprintf_filtered (file
, _("AArch64 debugging is %s.\n"), value
);
216 /* Abstract instruction reader. */
218 class abstract_instruction_reader
221 /* Read in one instruction. */
222 virtual ULONGEST
read (CORE_ADDR memaddr
, int len
,
223 enum bfd_endian byte_order
) = 0;
226 /* Instruction reader from real target. */
228 class instruction_reader
: public abstract_instruction_reader
231 ULONGEST
read (CORE_ADDR memaddr
, int len
, enum bfd_endian byte_order
)
234 return read_code_unsigned_integer (memaddr
, len
, byte_order
);
240 /* If address signing is enabled, mask off the signature bits from the link
241 register, which is passed by value in ADDR, using the register values in
245 aarch64_frame_unmask_lr (struct gdbarch_tdep
*tdep
,
246 struct frame_info
*this_frame
, CORE_ADDR addr
)
248 if (tdep
->has_pauth ()
249 && frame_unwind_register_unsigned (this_frame
,
250 tdep
->pauth_ra_state_regnum
))
252 int cmask_num
= AARCH64_PAUTH_CMASK_REGNUM (tdep
->pauth_reg_base
);
253 CORE_ADDR cmask
= frame_unwind_register_unsigned (this_frame
, cmask_num
);
254 addr
= addr
& ~cmask
;
256 /* Record in the frame that the link register required unmasking. */
257 set_frame_previous_pc_masked (this_frame
);
263 /* Implement the "get_pc_address_flags" gdbarch method. */
266 aarch64_get_pc_address_flags (frame_info
*frame
, CORE_ADDR pc
)
268 if (pc
!= 0 && get_frame_pc_masked (frame
))
274 /* Analyze a prologue, looking for a recognizable stack frame
275 and frame pointer. Scan until we encounter a store that could
276 clobber the stack frame unexpectedly, or an unknown instruction. */
279 aarch64_analyze_prologue (struct gdbarch
*gdbarch
,
280 CORE_ADDR start
, CORE_ADDR limit
,
281 struct aarch64_prologue_cache
*cache
,
282 abstract_instruction_reader
& reader
)
284 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
287 /* Whether the stack has been set. This should be true when we notice a SP
288 to FP move or if we are using the SP as the base register for storing
289 data, in case the FP is ommitted. */
290 bool seen_stack_set
= false;
292 /* Track X registers and D registers in prologue. */
293 pv_t regs
[AARCH64_X_REGISTER_COUNT
+ AARCH64_D_REGISTER_COUNT
];
295 for (i
= 0; i
< AARCH64_X_REGISTER_COUNT
+ AARCH64_D_REGISTER_COUNT
; i
++)
296 regs
[i
] = pv_register (i
, 0);
297 pv_area
stack (AARCH64_SP_REGNUM
, gdbarch_addr_bit (gdbarch
));
299 for (; start
< limit
; start
+= 4)
304 insn
= reader
.read (start
, 4, byte_order_for_code
);
306 if (aarch64_decode_insn (insn
, &inst
, 1, NULL
) != 0)
309 if (inst
.opcode
->iclass
== addsub_imm
310 && (inst
.opcode
->op
== OP_ADD
311 || strcmp ("sub", inst
.opcode
->name
) == 0))
313 unsigned rd
= inst
.operands
[0].reg
.regno
;
314 unsigned rn
= inst
.operands
[1].reg
.regno
;
316 gdb_assert (aarch64_num_of_operands (inst
.opcode
) == 3);
317 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_Rd_SP
);
318 gdb_assert (inst
.operands
[1].type
== AARCH64_OPND_Rn_SP
);
319 gdb_assert (inst
.operands
[2].type
== AARCH64_OPND_AIMM
);
321 if (inst
.opcode
->op
== OP_ADD
)
323 regs
[rd
] = pv_add_constant (regs
[rn
],
324 inst
.operands
[2].imm
.value
);
328 regs
[rd
] = pv_add_constant (regs
[rn
],
329 -inst
.operands
[2].imm
.value
);
332 /* Did we move SP to FP? */
333 if (rn
== AARCH64_SP_REGNUM
&& rd
== AARCH64_FP_REGNUM
)
334 seen_stack_set
= true;
336 else if (inst
.opcode
->iclass
== pcreladdr
337 && inst
.operands
[1].type
== AARCH64_OPND_ADDR_ADRP
)
339 gdb_assert (aarch64_num_of_operands (inst
.opcode
) == 2);
340 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_Rd
);
342 regs
[inst
.operands
[0].reg
.regno
] = pv_unknown ();
344 else if (inst
.opcode
->iclass
== branch_imm
)
346 /* Stop analysis on branch. */
349 else if (inst
.opcode
->iclass
== condbranch
)
351 /* Stop analysis on branch. */
354 else if (inst
.opcode
->iclass
== branch_reg
)
356 /* Stop analysis on branch. */
359 else if (inst
.opcode
->iclass
== compbranch
)
361 /* Stop analysis on branch. */
364 else if (inst
.opcode
->op
== OP_MOVZ
)
366 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_Rd
);
368 /* If this shows up before we set the stack, keep going. Otherwise
369 stop the analysis. */
373 regs
[inst
.operands
[0].reg
.regno
] = pv_unknown ();
375 else if (inst
.opcode
->iclass
== log_shift
376 && strcmp (inst
.opcode
->name
, "orr") == 0)
378 unsigned rd
= inst
.operands
[0].reg
.regno
;
379 unsigned rn
= inst
.operands
[1].reg
.regno
;
380 unsigned rm
= inst
.operands
[2].reg
.regno
;
382 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_Rd
);
383 gdb_assert (inst
.operands
[1].type
== AARCH64_OPND_Rn
);
384 gdb_assert (inst
.operands
[2].type
== AARCH64_OPND_Rm_SFT
);
386 if (inst
.operands
[2].shifter
.amount
== 0
387 && rn
== AARCH64_SP_REGNUM
)
393 debug_printf ("aarch64: prologue analysis gave up "
394 "addr=%s opcode=0x%x (orr x register)\n",
395 core_addr_to_string_nz (start
), insn
);
400 else if (inst
.opcode
->op
== OP_STUR
)
402 unsigned rt
= inst
.operands
[0].reg
.regno
;
403 unsigned rn
= inst
.operands
[1].addr
.base_regno
;
404 int size
= aarch64_get_qualifier_esize (inst
.operands
[0].qualifier
);
406 gdb_assert (aarch64_num_of_operands (inst
.opcode
) == 2);
407 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_Rt
);
408 gdb_assert (inst
.operands
[1].type
== AARCH64_OPND_ADDR_SIMM9
);
409 gdb_assert (!inst
.operands
[1].addr
.offset
.is_reg
);
412 (pv_add_constant (regs
[rn
], inst
.operands
[1].addr
.offset
.imm
),
415 /* Are we storing with SP as a base? */
416 if (rn
== AARCH64_SP_REGNUM
)
417 seen_stack_set
= true;
419 else if ((inst
.opcode
->iclass
== ldstpair_off
420 || (inst
.opcode
->iclass
== ldstpair_indexed
421 && inst
.operands
[2].addr
.preind
))
422 && strcmp ("stp", inst
.opcode
->name
) == 0)
424 /* STP with addressing mode Pre-indexed and Base register. */
427 unsigned rn
= inst
.operands
[2].addr
.base_regno
;
428 int32_t imm
= inst
.operands
[2].addr
.offset
.imm
;
429 int size
= aarch64_get_qualifier_esize (inst
.operands
[0].qualifier
);
431 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_Rt
432 || inst
.operands
[0].type
== AARCH64_OPND_Ft
);
433 gdb_assert (inst
.operands
[1].type
== AARCH64_OPND_Rt2
434 || inst
.operands
[1].type
== AARCH64_OPND_Ft2
);
435 gdb_assert (inst
.operands
[2].type
== AARCH64_OPND_ADDR_SIMM7
);
436 gdb_assert (!inst
.operands
[2].addr
.offset
.is_reg
);
438 /* If recording this store would invalidate the store area
439 (perhaps because rn is not known) then we should abandon
440 further prologue analysis. */
441 if (stack
.store_would_trash (pv_add_constant (regs
[rn
], imm
)))
444 if (stack
.store_would_trash (pv_add_constant (regs
[rn
], imm
+ 8)))
447 rt1
= inst
.operands
[0].reg
.regno
;
448 rt2
= inst
.operands
[1].reg
.regno
;
449 if (inst
.operands
[0].type
== AARCH64_OPND_Ft
)
451 rt1
+= AARCH64_X_REGISTER_COUNT
;
452 rt2
+= AARCH64_X_REGISTER_COUNT
;
455 stack
.store (pv_add_constant (regs
[rn
], imm
), size
, regs
[rt1
]);
456 stack
.store (pv_add_constant (regs
[rn
], imm
+ size
), size
, regs
[rt2
]);
458 if (inst
.operands
[2].addr
.writeback
)
459 regs
[rn
] = pv_add_constant (regs
[rn
], imm
);
461 /* Ignore the instruction that allocates stack space and sets
463 if (rn
== AARCH64_SP_REGNUM
&& !inst
.operands
[2].addr
.writeback
)
464 seen_stack_set
= true;
466 else if ((inst
.opcode
->iclass
== ldst_imm9
/* Signed immediate. */
467 || (inst
.opcode
->iclass
== ldst_pos
/* Unsigned immediate. */
468 && (inst
.opcode
->op
== OP_STR_POS
469 || inst
.opcode
->op
== OP_STRF_POS
)))
470 && inst
.operands
[1].addr
.base_regno
== AARCH64_SP_REGNUM
471 && strcmp ("str", inst
.opcode
->name
) == 0)
473 /* STR (immediate) */
474 unsigned int rt
= inst
.operands
[0].reg
.regno
;
475 int32_t imm
= inst
.operands
[1].addr
.offset
.imm
;
476 unsigned int rn
= inst
.operands
[1].addr
.base_regno
;
477 int size
= aarch64_get_qualifier_esize (inst
.operands
[0].qualifier
);
478 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_Rt
479 || inst
.operands
[0].type
== AARCH64_OPND_Ft
);
481 if (inst
.operands
[0].type
== AARCH64_OPND_Ft
)
482 rt
+= AARCH64_X_REGISTER_COUNT
;
484 stack
.store (pv_add_constant (regs
[rn
], imm
), size
, regs
[rt
]);
485 if (inst
.operands
[1].addr
.writeback
)
486 regs
[rn
] = pv_add_constant (regs
[rn
], imm
);
488 /* Are we storing with SP as a base? */
489 if (rn
== AARCH64_SP_REGNUM
)
490 seen_stack_set
= true;
492 else if (inst
.opcode
->iclass
== testbranch
)
494 /* Stop analysis on branch. */
497 else if (inst
.opcode
->iclass
== ic_system
)
499 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
500 int ra_state_val
= 0;
502 if (insn
== 0xd503233f /* paciasp. */
503 || insn
== 0xd503237f /* pacibsp. */)
505 /* Return addresses are mangled. */
508 else if (insn
== 0xd50323bf /* autiasp. */
509 || insn
== 0xd50323ff /* autibsp. */)
511 /* Return addresses are not mangled. */
517 debug_printf ("aarch64: prologue analysis gave up addr=%s"
518 " opcode=0x%x (iclass)\n",
519 core_addr_to_string_nz (start
), insn
);
523 if (tdep
->has_pauth () && cache
!= nullptr)
524 trad_frame_set_value (cache
->saved_regs
,
525 tdep
->pauth_ra_state_regnum
,
532 debug_printf ("aarch64: prologue analysis gave up addr=%s"
534 core_addr_to_string_nz (start
), insn
);
543 if (pv_is_register (regs
[AARCH64_FP_REGNUM
], AARCH64_SP_REGNUM
))
545 /* Frame pointer is fp. Frame size is constant. */
546 cache
->framereg
= AARCH64_FP_REGNUM
;
547 cache
->framesize
= -regs
[AARCH64_FP_REGNUM
].k
;
549 else if (pv_is_register (regs
[AARCH64_SP_REGNUM
], AARCH64_SP_REGNUM
))
551 /* Try the stack pointer. */
552 cache
->framesize
= -regs
[AARCH64_SP_REGNUM
].k
;
553 cache
->framereg
= AARCH64_SP_REGNUM
;
557 /* We're just out of luck. We don't know where the frame is. */
558 cache
->framereg
= -1;
559 cache
->framesize
= 0;
562 for (i
= 0; i
< AARCH64_X_REGISTER_COUNT
; i
++)
566 if (stack
.find_reg (gdbarch
, i
, &offset
))
567 cache
->saved_regs
[i
].addr
= offset
;
570 for (i
= 0; i
< AARCH64_D_REGISTER_COUNT
; i
++)
572 int regnum
= gdbarch_num_regs (gdbarch
);
575 if (stack
.find_reg (gdbarch
, i
+ AARCH64_X_REGISTER_COUNT
,
577 cache
->saved_regs
[i
+ regnum
+ AARCH64_D0_REGNUM
].addr
= offset
;
584 aarch64_analyze_prologue (struct gdbarch
*gdbarch
,
585 CORE_ADDR start
, CORE_ADDR limit
,
586 struct aarch64_prologue_cache
*cache
)
588 instruction_reader reader
;
590 return aarch64_analyze_prologue (gdbarch
, start
, limit
, cache
,
596 namespace selftests
{
598 /* Instruction reader from manually cooked instruction sequences. */
600 class instruction_reader_test
: public abstract_instruction_reader
603 template<size_t SIZE
>
604 explicit instruction_reader_test (const uint32_t (&insns
)[SIZE
])
605 : m_insns (insns
), m_insns_size (SIZE
)
608 ULONGEST
read (CORE_ADDR memaddr
, int len
, enum bfd_endian byte_order
)
611 SELF_CHECK (len
== 4);
612 SELF_CHECK (memaddr
% 4 == 0);
613 SELF_CHECK (memaddr
/ 4 < m_insns_size
);
615 return m_insns
[memaddr
/ 4];
619 const uint32_t *m_insns
;
624 aarch64_analyze_prologue_test (void)
626 struct gdbarch_info info
;
628 gdbarch_info_init (&info
);
629 info
.bfd_arch_info
= bfd_scan_arch ("aarch64");
631 struct gdbarch
*gdbarch
= gdbarch_find_by_info (info
);
632 SELF_CHECK (gdbarch
!= NULL
);
634 struct aarch64_prologue_cache cache
;
635 cache
.saved_regs
= trad_frame_alloc_saved_regs (gdbarch
);
637 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
639 /* Test the simple prologue in which frame pointer is used. */
641 static const uint32_t insns
[] = {
642 0xa9af7bfd, /* stp x29, x30, [sp,#-272]! */
643 0x910003fd, /* mov x29, sp */
644 0x97ffffe6, /* bl 0x400580 */
646 instruction_reader_test
reader (insns
);
648 CORE_ADDR end
= aarch64_analyze_prologue (gdbarch
, 0, 128, &cache
, reader
);
649 SELF_CHECK (end
== 4 * 2);
651 SELF_CHECK (cache
.framereg
== AARCH64_FP_REGNUM
);
652 SELF_CHECK (cache
.framesize
== 272);
654 for (int i
= 0; i
< AARCH64_X_REGISTER_COUNT
; i
++)
656 if (i
== AARCH64_FP_REGNUM
)
657 SELF_CHECK (cache
.saved_regs
[i
].addr
== -272);
658 else if (i
== AARCH64_LR_REGNUM
)
659 SELF_CHECK (cache
.saved_regs
[i
].addr
== -264);
661 SELF_CHECK (cache
.saved_regs
[i
].addr
== -1);
664 for (int i
= 0; i
< AARCH64_D_REGISTER_COUNT
; i
++)
666 int regnum
= gdbarch_num_regs (gdbarch
);
668 SELF_CHECK (cache
.saved_regs
[i
+ regnum
+ AARCH64_D0_REGNUM
].addr
673 /* Test a prologue in which STR is used and frame pointer is not
676 static const uint32_t insns
[] = {
677 0xf81d0ff3, /* str x19, [sp, #-48]! */
678 0xb9002fe0, /* str w0, [sp, #44] */
679 0xf90013e1, /* str x1, [sp, #32]*/
680 0xfd000fe0, /* str d0, [sp, #24] */
681 0xaa0203f3, /* mov x19, x2 */
682 0xf94013e0, /* ldr x0, [sp, #32] */
684 instruction_reader_test
reader (insns
);
686 trad_frame_reset_saved_regs (gdbarch
, cache
.saved_regs
);
687 CORE_ADDR end
= aarch64_analyze_prologue (gdbarch
, 0, 128, &cache
, reader
);
689 SELF_CHECK (end
== 4 * 5);
691 SELF_CHECK (cache
.framereg
== AARCH64_SP_REGNUM
);
692 SELF_CHECK (cache
.framesize
== 48);
694 for (int i
= 0; i
< AARCH64_X_REGISTER_COUNT
; i
++)
697 SELF_CHECK (cache
.saved_regs
[i
].addr
== -16);
699 SELF_CHECK (cache
.saved_regs
[i
].addr
== -48);
701 SELF_CHECK (cache
.saved_regs
[i
].addr
== -1);
704 for (int i
= 0; i
< AARCH64_D_REGISTER_COUNT
; i
++)
706 int regnum
= gdbarch_num_regs (gdbarch
);
709 SELF_CHECK (cache
.saved_regs
[i
+ regnum
+ AARCH64_D0_REGNUM
].addr
712 SELF_CHECK (cache
.saved_regs
[i
+ regnum
+ AARCH64_D0_REGNUM
].addr
717 /* Test handling of movz before setting the frame pointer. */
719 static const uint32_t insns
[] = {
720 0xa9bf7bfd, /* stp x29, x30, [sp, #-16]! */
721 0x52800020, /* mov w0, #0x1 */
722 0x910003fd, /* mov x29, sp */
723 0x528000a2, /* mov w2, #0x5 */
724 0x97fffff8, /* bl 6e4 */
727 instruction_reader_test
reader (insns
);
729 trad_frame_reset_saved_regs (gdbarch
, cache
.saved_regs
);
730 CORE_ADDR end
= aarch64_analyze_prologue (gdbarch
, 0, 128, &cache
, reader
);
732 /* We should stop at the 4th instruction. */
733 SELF_CHECK (end
== (4 - 1) * 4);
734 SELF_CHECK (cache
.framereg
== AARCH64_FP_REGNUM
);
735 SELF_CHECK (cache
.framesize
== 16);
738 /* Test handling of movz/stp when using the stack pointer as frame
741 static const uint32_t insns
[] = {
742 0xa9bc7bfd, /* stp x29, x30, [sp, #-64]! */
743 0x52800020, /* mov w0, #0x1 */
744 0x290207e0, /* stp w0, w1, [sp, #16] */
745 0xa9018fe2, /* stp x2, x3, [sp, #24] */
746 0x528000a2, /* mov w2, #0x5 */
747 0x97fffff8, /* bl 6e4 */
750 instruction_reader_test
reader (insns
);
752 trad_frame_reset_saved_regs (gdbarch
, cache
.saved_regs
);
753 CORE_ADDR end
= aarch64_analyze_prologue (gdbarch
, 0, 128, &cache
, reader
);
755 /* We should stop at the 5th instruction. */
756 SELF_CHECK (end
== (5 - 1) * 4);
757 SELF_CHECK (cache
.framereg
== AARCH64_SP_REGNUM
);
758 SELF_CHECK (cache
.framesize
== 64);
761 /* Test handling of movz/str when using the stack pointer as frame
764 static const uint32_t insns
[] = {
765 0xa9bc7bfd, /* stp x29, x30, [sp, #-64]! */
766 0x52800020, /* mov w0, #0x1 */
767 0xb9002be4, /* str w4, [sp, #40] */
768 0xf9001be5, /* str x5, [sp, #48] */
769 0x528000a2, /* mov w2, #0x5 */
770 0x97fffff8, /* bl 6e4 */
773 instruction_reader_test
reader (insns
);
775 trad_frame_reset_saved_regs (gdbarch
, cache
.saved_regs
);
776 CORE_ADDR end
= aarch64_analyze_prologue (gdbarch
, 0, 128, &cache
, reader
);
778 /* We should stop at the 5th instruction. */
779 SELF_CHECK (end
== (5 - 1) * 4);
780 SELF_CHECK (cache
.framereg
== AARCH64_SP_REGNUM
);
781 SELF_CHECK (cache
.framesize
== 64);
784 /* Test handling of movz/stur when using the stack pointer as frame
787 static const uint32_t insns
[] = {
788 0xa9bc7bfd, /* stp x29, x30, [sp, #-64]! */
789 0x52800020, /* mov w0, #0x1 */
790 0xb80343e6, /* stur w6, [sp, #52] */
791 0xf80383e7, /* stur x7, [sp, #56] */
792 0x528000a2, /* mov w2, #0x5 */
793 0x97fffff8, /* bl 6e4 */
796 instruction_reader_test
reader (insns
);
798 trad_frame_reset_saved_regs (gdbarch
, cache
.saved_regs
);
799 CORE_ADDR end
= aarch64_analyze_prologue (gdbarch
, 0, 128, &cache
, reader
);
801 /* We should stop at the 5th instruction. */
802 SELF_CHECK (end
== (5 - 1) * 4);
803 SELF_CHECK (cache
.framereg
== AARCH64_SP_REGNUM
);
804 SELF_CHECK (cache
.framesize
== 64);
807 /* Test handling of movz when there is no frame pointer set or no stack
810 static const uint32_t insns
[] = {
811 0xa9bf7bfd, /* stp x29, x30, [sp, #-16]! */
812 0x52800020, /* mov w0, #0x1 */
813 0x528000a2, /* mov w2, #0x5 */
814 0x97fffff8, /* bl 6e4 */
817 instruction_reader_test
reader (insns
);
819 trad_frame_reset_saved_regs (gdbarch
, cache
.saved_regs
);
820 CORE_ADDR end
= aarch64_analyze_prologue (gdbarch
, 0, 128, &cache
, reader
);
822 /* We should stop at the 4th instruction. */
823 SELF_CHECK (end
== (4 - 1) * 4);
824 SELF_CHECK (cache
.framereg
== AARCH64_SP_REGNUM
);
825 SELF_CHECK (cache
.framesize
== 16);
828 /* Test a prologue in which there is a return address signing instruction. */
829 if (tdep
->has_pauth ())
831 static const uint32_t insns
[] = {
832 0xd503233f, /* paciasp */
833 0xa9bd7bfd, /* stp x29, x30, [sp, #-48]! */
834 0x910003fd, /* mov x29, sp */
835 0xf801c3f3, /* str x19, [sp, #28] */
836 0xb9401fa0, /* ldr x19, [x29, #28] */
838 instruction_reader_test
reader (insns
);
840 trad_frame_reset_saved_regs (gdbarch
, cache
.saved_regs
);
841 CORE_ADDR end
= aarch64_analyze_prologue (gdbarch
, 0, 128, &cache
,
844 SELF_CHECK (end
== 4 * 4);
845 SELF_CHECK (cache
.framereg
== AARCH64_FP_REGNUM
);
846 SELF_CHECK (cache
.framesize
== 48);
848 for (int i
= 0; i
< AARCH64_X_REGISTER_COUNT
; i
++)
851 SELF_CHECK (cache
.saved_regs
[i
].addr
== -20);
852 else if (i
== AARCH64_FP_REGNUM
)
853 SELF_CHECK (cache
.saved_regs
[i
].addr
== -48);
854 else if (i
== AARCH64_LR_REGNUM
)
855 SELF_CHECK (cache
.saved_regs
[i
].addr
== -40);
857 SELF_CHECK (cache
.saved_regs
[i
].addr
== -1);
860 if (tdep
->has_pauth ())
862 SELF_CHECK (trad_frame_value_p (cache
.saved_regs
,
863 tdep
->pauth_ra_state_regnum
));
864 SELF_CHECK (cache
.saved_regs
[tdep
->pauth_ra_state_regnum
].addr
== 1);
868 } // namespace selftests
869 #endif /* GDB_SELF_TEST */
871 /* Implement the "skip_prologue" gdbarch method. */
874 aarch64_skip_prologue (struct gdbarch
*gdbarch
, CORE_ADDR pc
)
876 CORE_ADDR func_addr
, limit_pc
;
878 /* See if we can determine the end of the prologue via the symbol
879 table. If so, then return either PC, or the PC after the
880 prologue, whichever is greater. */
881 if (find_pc_partial_function (pc
, NULL
, &func_addr
, NULL
))
883 CORE_ADDR post_prologue_pc
884 = skip_prologue_using_sal (gdbarch
, func_addr
);
886 if (post_prologue_pc
!= 0)
887 return std::max (pc
, post_prologue_pc
);
890 /* Can't determine prologue from the symbol table, need to examine
893 /* Find an upper limit on the function prologue using the debug
894 information. If the debug information could not be used to
895 provide that bound, then use an arbitrary large number as the
897 limit_pc
= skip_prologue_using_sal (gdbarch
, pc
);
899 limit_pc
= pc
+ 128; /* Magic. */
901 /* Try disassembling prologue. */
902 return aarch64_analyze_prologue (gdbarch
, pc
, limit_pc
, NULL
);
905 /* Scan the function prologue for THIS_FRAME and populate the prologue
909 aarch64_scan_prologue (struct frame_info
*this_frame
,
910 struct aarch64_prologue_cache
*cache
)
912 CORE_ADDR block_addr
= get_frame_address_in_block (this_frame
);
913 CORE_ADDR prologue_start
;
914 CORE_ADDR prologue_end
;
915 CORE_ADDR prev_pc
= get_frame_pc (this_frame
);
916 struct gdbarch
*gdbarch
= get_frame_arch (this_frame
);
918 cache
->prev_pc
= prev_pc
;
920 /* Assume we do not find a frame. */
921 cache
->framereg
= -1;
922 cache
->framesize
= 0;
924 if (find_pc_partial_function (block_addr
, NULL
, &prologue_start
,
927 struct symtab_and_line sal
= find_pc_line (prologue_start
, 0);
931 /* No line info so use the current PC. */
932 prologue_end
= prev_pc
;
934 else if (sal
.end
< prologue_end
)
936 /* The next line begins after the function end. */
937 prologue_end
= sal
.end
;
940 prologue_end
= std::min (prologue_end
, prev_pc
);
941 aarch64_analyze_prologue (gdbarch
, prologue_start
, prologue_end
, cache
);
947 frame_loc
= get_frame_register_unsigned (this_frame
, AARCH64_FP_REGNUM
);
951 cache
->framereg
= AARCH64_FP_REGNUM
;
952 cache
->framesize
= 16;
953 cache
->saved_regs
[29].addr
= 0;
954 cache
->saved_regs
[30].addr
= 8;
958 /* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
959 function may throw an exception if the inferior's registers or memory is
963 aarch64_make_prologue_cache_1 (struct frame_info
*this_frame
,
964 struct aarch64_prologue_cache
*cache
)
966 CORE_ADDR unwound_fp
;
969 aarch64_scan_prologue (this_frame
, cache
);
971 if (cache
->framereg
== -1)
974 unwound_fp
= get_frame_register_unsigned (this_frame
, cache
->framereg
);
978 cache
->prev_sp
= unwound_fp
+ cache
->framesize
;
980 /* Calculate actual addresses of saved registers using offsets
981 determined by aarch64_analyze_prologue. */
982 for (reg
= 0; reg
< gdbarch_num_regs (get_frame_arch (this_frame
)); reg
++)
983 if (trad_frame_addr_p (cache
->saved_regs
, reg
))
984 cache
->saved_regs
[reg
].addr
+= cache
->prev_sp
;
986 cache
->func
= get_frame_func (this_frame
);
988 cache
->available_p
= 1;
991 /* Allocate and fill in *THIS_CACHE with information about the prologue of
992 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
993 Return a pointer to the current aarch64_prologue_cache in
996 static struct aarch64_prologue_cache
*
997 aarch64_make_prologue_cache (struct frame_info
*this_frame
, void **this_cache
)
999 struct aarch64_prologue_cache
*cache
;
1001 if (*this_cache
!= NULL
)
1002 return (struct aarch64_prologue_cache
*) *this_cache
;
1004 cache
= FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache
);
1005 cache
->saved_regs
= trad_frame_alloc_saved_regs (this_frame
);
1006 *this_cache
= cache
;
1010 aarch64_make_prologue_cache_1 (this_frame
, cache
);
1012 catch (const gdb_exception_error
&ex
)
1014 if (ex
.error
!= NOT_AVAILABLE_ERROR
)
1021 /* Implement the "stop_reason" frame_unwind method. */
1023 static enum unwind_stop_reason
1024 aarch64_prologue_frame_unwind_stop_reason (struct frame_info
*this_frame
,
1027 struct aarch64_prologue_cache
*cache
1028 = aarch64_make_prologue_cache (this_frame
, this_cache
);
1030 if (!cache
->available_p
)
1031 return UNWIND_UNAVAILABLE
;
1033 /* Halt the backtrace at "_start". */
1034 if (cache
->prev_pc
<= gdbarch_tdep (get_frame_arch (this_frame
))->lowest_pc
)
1035 return UNWIND_OUTERMOST
;
1037 /* We've hit a wall, stop. */
1038 if (cache
->prev_sp
== 0)
1039 return UNWIND_OUTERMOST
;
1041 return UNWIND_NO_REASON
;
1044 /* Our frame ID for a normal frame is the current function's starting
1045 PC and the caller's SP when we were called. */
1048 aarch64_prologue_this_id (struct frame_info
*this_frame
,
1049 void **this_cache
, struct frame_id
*this_id
)
1051 struct aarch64_prologue_cache
*cache
1052 = aarch64_make_prologue_cache (this_frame
, this_cache
);
1054 if (!cache
->available_p
)
1055 *this_id
= frame_id_build_unavailable_stack (cache
->func
);
1057 *this_id
= frame_id_build (cache
->prev_sp
, cache
->func
);
1060 /* Implement the "prev_register" frame_unwind method. */
1062 static struct value
*
1063 aarch64_prologue_prev_register (struct frame_info
*this_frame
,
1064 void **this_cache
, int prev_regnum
)
1066 struct aarch64_prologue_cache
*cache
1067 = aarch64_make_prologue_cache (this_frame
, this_cache
);
1069 /* If we are asked to unwind the PC, then we need to return the LR
1070 instead. The prologue may save PC, but it will point into this
1071 frame's prologue, not the next frame's resume location. */
1072 if (prev_regnum
== AARCH64_PC_REGNUM
)
1075 struct gdbarch
*gdbarch
= get_frame_arch (this_frame
);
1076 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1078 lr
= frame_unwind_register_unsigned (this_frame
, AARCH64_LR_REGNUM
);
1080 if (tdep
->has_pauth ()
1081 && trad_frame_value_p (cache
->saved_regs
,
1082 tdep
->pauth_ra_state_regnum
))
1083 lr
= aarch64_frame_unmask_lr (tdep
, this_frame
, lr
);
1085 return frame_unwind_got_constant (this_frame
, prev_regnum
, lr
);
1088 /* SP is generally not saved to the stack, but this frame is
1089 identified by the next frame's stack pointer at the time of the
1090 call. The value was already reconstructed into PREV_SP. */
1096 | | | <- Previous SP
1099 +--| saved fp |<- FP
1103 if (prev_regnum
== AARCH64_SP_REGNUM
)
1104 return frame_unwind_got_constant (this_frame
, prev_regnum
,
1107 return trad_frame_get_prev_register (this_frame
, cache
->saved_regs
,
1111 /* AArch64 prologue unwinder. */
1112 struct frame_unwind aarch64_prologue_unwind
=
1115 aarch64_prologue_frame_unwind_stop_reason
,
1116 aarch64_prologue_this_id
,
1117 aarch64_prologue_prev_register
,
1119 default_frame_sniffer
1122 /* Allocate and fill in *THIS_CACHE with information about the prologue of
1123 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
1124 Return a pointer to the current aarch64_prologue_cache in
1127 static struct aarch64_prologue_cache
*
1128 aarch64_make_stub_cache (struct frame_info
*this_frame
, void **this_cache
)
1130 struct aarch64_prologue_cache
*cache
;
1132 if (*this_cache
!= NULL
)
1133 return (struct aarch64_prologue_cache
*) *this_cache
;
1135 cache
= FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache
);
1136 cache
->saved_regs
= trad_frame_alloc_saved_regs (this_frame
);
1137 *this_cache
= cache
;
1141 cache
->prev_sp
= get_frame_register_unsigned (this_frame
,
1143 cache
->prev_pc
= get_frame_pc (this_frame
);
1144 cache
->available_p
= 1;
1146 catch (const gdb_exception_error
&ex
)
1148 if (ex
.error
!= NOT_AVAILABLE_ERROR
)
1155 /* Implement the "stop_reason" frame_unwind method. */
1157 static enum unwind_stop_reason
1158 aarch64_stub_frame_unwind_stop_reason (struct frame_info
*this_frame
,
1161 struct aarch64_prologue_cache
*cache
1162 = aarch64_make_stub_cache (this_frame
, this_cache
);
1164 if (!cache
->available_p
)
1165 return UNWIND_UNAVAILABLE
;
1167 return UNWIND_NO_REASON
;
1170 /* Our frame ID for a stub frame is the current SP and LR. */
1173 aarch64_stub_this_id (struct frame_info
*this_frame
,
1174 void **this_cache
, struct frame_id
*this_id
)
1176 struct aarch64_prologue_cache
*cache
1177 = aarch64_make_stub_cache (this_frame
, this_cache
);
1179 if (cache
->available_p
)
1180 *this_id
= frame_id_build (cache
->prev_sp
, cache
->prev_pc
);
1182 *this_id
= frame_id_build_unavailable_stack (cache
->prev_pc
);
1185 /* Implement the "sniffer" frame_unwind method. */
1188 aarch64_stub_unwind_sniffer (const struct frame_unwind
*self
,
1189 struct frame_info
*this_frame
,
1190 void **this_prologue_cache
)
1192 CORE_ADDR addr_in_block
;
1195 addr_in_block
= get_frame_address_in_block (this_frame
);
1196 if (in_plt_section (addr_in_block
)
1197 /* We also use the stub winder if the target memory is unreadable
1198 to avoid having the prologue unwinder trying to read it. */
1199 || target_read_memory (get_frame_pc (this_frame
), dummy
, 4) != 0)
1205 /* AArch64 stub unwinder. */
1206 struct frame_unwind aarch64_stub_unwind
=
1209 aarch64_stub_frame_unwind_stop_reason
,
1210 aarch64_stub_this_id
,
1211 aarch64_prologue_prev_register
,
1213 aarch64_stub_unwind_sniffer
1216 /* Return the frame base address of *THIS_FRAME. */
1219 aarch64_normal_frame_base (struct frame_info
*this_frame
, void **this_cache
)
1221 struct aarch64_prologue_cache
*cache
1222 = aarch64_make_prologue_cache (this_frame
, this_cache
);
1224 return cache
->prev_sp
- cache
->framesize
;
1227 /* AArch64 default frame base information. */
1228 struct frame_base aarch64_normal_base
=
1230 &aarch64_prologue_unwind
,
1231 aarch64_normal_frame_base
,
1232 aarch64_normal_frame_base
,
1233 aarch64_normal_frame_base
1236 /* Return the value of the REGNUM register in the previous frame of
1239 static struct value
*
1240 aarch64_dwarf2_prev_register (struct frame_info
*this_frame
,
1241 void **this_cache
, int regnum
)
1243 struct gdbarch_tdep
*tdep
= gdbarch_tdep (get_frame_arch (this_frame
));
1248 case AARCH64_PC_REGNUM
:
1249 lr
= frame_unwind_register_unsigned (this_frame
, AARCH64_LR_REGNUM
);
1250 lr
= aarch64_frame_unmask_lr (tdep
, this_frame
, lr
);
1251 return frame_unwind_got_constant (this_frame
, regnum
, lr
);
1254 internal_error (__FILE__
, __LINE__
,
1255 _("Unexpected register %d"), regnum
);
1259 static const unsigned char op_lit0
= DW_OP_lit0
;
1260 static const unsigned char op_lit1
= DW_OP_lit1
;
1262 /* Implement the "init_reg" dwarf2_frame_ops method. */
1265 aarch64_dwarf2_frame_init_reg (struct gdbarch
*gdbarch
, int regnum
,
1266 struct dwarf2_frame_state_reg
*reg
,
1267 struct frame_info
*this_frame
)
1269 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1273 case AARCH64_PC_REGNUM
:
1274 reg
->how
= DWARF2_FRAME_REG_FN
;
1275 reg
->loc
.fn
= aarch64_dwarf2_prev_register
;
1278 case AARCH64_SP_REGNUM
:
1279 reg
->how
= DWARF2_FRAME_REG_CFA
;
1283 /* Init pauth registers. */
1284 if (tdep
->has_pauth ())
1286 if (regnum
== tdep
->pauth_ra_state_regnum
)
1288 /* Initialize RA_STATE to zero. */
1289 reg
->how
= DWARF2_FRAME_REG_SAVED_VAL_EXP
;
1290 reg
->loc
.exp
.start
= &op_lit0
;
1291 reg
->loc
.exp
.len
= 1;
1294 else if (regnum
== AARCH64_PAUTH_DMASK_REGNUM (tdep
->pauth_reg_base
)
1295 || regnum
== AARCH64_PAUTH_CMASK_REGNUM (tdep
->pauth_reg_base
))
1297 reg
->how
= DWARF2_FRAME_REG_SAME_VALUE
;
1303 /* Implement the execute_dwarf_cfa_vendor_op method. */
1306 aarch64_execute_dwarf_cfa_vendor_op (struct gdbarch
*gdbarch
, gdb_byte op
,
1307 struct dwarf2_frame_state
*fs
)
1309 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1310 struct dwarf2_frame_state_reg
*ra_state
;
1312 if (op
== DW_CFA_AARCH64_negate_ra_state
)
1314 /* On systems without pauth, treat as a nop. */
1315 if (!tdep
->has_pauth ())
1318 /* Allocate RA_STATE column if it's not allocated yet. */
1319 fs
->regs
.alloc_regs (AARCH64_DWARF_PAUTH_RA_STATE
+ 1);
1321 /* Toggle the status of RA_STATE between 0 and 1. */
1322 ra_state
= &(fs
->regs
.reg
[AARCH64_DWARF_PAUTH_RA_STATE
]);
1323 ra_state
->how
= DWARF2_FRAME_REG_SAVED_VAL_EXP
;
1325 if (ra_state
->loc
.exp
.start
== nullptr
1326 || ra_state
->loc
.exp
.start
== &op_lit0
)
1327 ra_state
->loc
.exp
.start
= &op_lit1
;
1329 ra_state
->loc
.exp
.start
= &op_lit0
;
1331 ra_state
->loc
.exp
.len
= 1;
1339 /* Used for matching BRK instructions for AArch64. */
1340 static constexpr uint32_t BRK_INSN_MASK
= 0xffe0001f;
1341 static constexpr uint32_t BRK_INSN_BASE
= 0xd4200000;
1343 /* Implementation of gdbarch_program_breakpoint_here_p for aarch64. */
1346 aarch64_program_breakpoint_here_p (gdbarch
*gdbarch
, CORE_ADDR address
)
1348 const uint32_t insn_len
= 4;
1349 gdb_byte target_mem
[4];
1351 /* Enable the automatic memory restoration from breakpoints while
1352 we read the memory. Otherwise we may find temporary breakpoints, ones
1353 inserted by GDB, and flag them as permanent breakpoints. */
1354 scoped_restore restore_memory
1355 = make_scoped_restore_show_memory_breakpoints (0);
1357 if (target_read_memory (address
, target_mem
, insn_len
) == 0)
1360 (uint32_t) extract_unsigned_integer (target_mem
, insn_len
,
1361 gdbarch_byte_order_for_code (gdbarch
));
1363 /* Check if INSN is a BRK instruction pattern. There are multiple choices
1364 of such instructions with different immediate values. Different OS'
1365 may use a different variation, but they have the same outcome. */
1366 return ((insn
& BRK_INSN_MASK
) == BRK_INSN_BASE
);
1372 /* When arguments must be pushed onto the stack, they go on in reverse
1373 order. The code below implements a FILO (stack) to do this. */
1377 /* Value to pass on stack. It can be NULL if this item is for stack
1379 const gdb_byte
*data
;
1381 /* Size in bytes of value to pass on stack. */
1385 /* Implement the gdbarch type alignment method, overrides the generic
1386 alignment algorithm for anything that is aarch64 specific. */
1389 aarch64_type_align (gdbarch
*gdbarch
, struct type
*t
)
1391 t
= check_typedef (t
);
1392 if (t
->code () == TYPE_CODE_ARRAY
&& t
->is_vector ())
1394 /* Use the natural alignment for vector types (the same for
1395 scalar type), but the maximum alignment is 128-bit. */
1396 if (TYPE_LENGTH (t
) > 16)
1399 return TYPE_LENGTH (t
);
1402 /* Allow the common code to calculate the alignment. */
1406 /* Worker function for aapcs_is_vfp_call_or_return_candidate.
1408 Return the number of register required, or -1 on failure.
1410 When encountering a base element, if FUNDAMENTAL_TYPE is not set then set it
1411 to the element, else fail if the type of this element does not match the
1415 aapcs_is_vfp_call_or_return_candidate_1 (struct type
*type
,
1416 struct type
**fundamental_type
)
1418 if (type
== nullptr)
1421 switch (type
->code ())
1424 if (TYPE_LENGTH (type
) > 16)
1427 if (*fundamental_type
== nullptr)
1428 *fundamental_type
= type
;
1429 else if (TYPE_LENGTH (type
) != TYPE_LENGTH (*fundamental_type
)
1430 || type
->code () != (*fundamental_type
)->code ())
1435 case TYPE_CODE_COMPLEX
:
1437 struct type
*target_type
= check_typedef (TYPE_TARGET_TYPE (type
));
1438 if (TYPE_LENGTH (target_type
) > 16)
1441 if (*fundamental_type
== nullptr)
1442 *fundamental_type
= target_type
;
1443 else if (TYPE_LENGTH (target_type
) != TYPE_LENGTH (*fundamental_type
)
1444 || target_type
->code () != (*fundamental_type
)->code ())
1450 case TYPE_CODE_ARRAY
:
1452 if (type
->is_vector ())
1454 if (TYPE_LENGTH (type
) != 8 && TYPE_LENGTH (type
) != 16)
1457 if (*fundamental_type
== nullptr)
1458 *fundamental_type
= type
;
1459 else if (TYPE_LENGTH (type
) != TYPE_LENGTH (*fundamental_type
)
1460 || type
->code () != (*fundamental_type
)->code ())
1467 struct type
*target_type
= TYPE_TARGET_TYPE (type
);
1468 int count
= aapcs_is_vfp_call_or_return_candidate_1
1469 (target_type
, fundamental_type
);
1474 count
*= (TYPE_LENGTH (type
) / TYPE_LENGTH (target_type
));
1479 case TYPE_CODE_STRUCT
:
1480 case TYPE_CODE_UNION
:
1484 for (int i
= 0; i
< type
->num_fields (); i
++)
1486 /* Ignore any static fields. */
1487 if (field_is_static (&type
->field (i
)))
1490 struct type
*member
= check_typedef (type
->field (i
).type ());
1492 int sub_count
= aapcs_is_vfp_call_or_return_candidate_1
1493 (member
, fundamental_type
);
1494 if (sub_count
== -1)
1499 /* Ensure there is no padding between the fields (allowing for empty
1500 zero length structs) */
1501 int ftype_length
= (*fundamental_type
== nullptr)
1502 ? 0 : TYPE_LENGTH (*fundamental_type
);
1503 if (count
* ftype_length
!= TYPE_LENGTH (type
))
1516 /* Return true if an argument, whose type is described by TYPE, can be passed or
1517 returned in simd/fp registers, providing enough parameter passing registers
1518 are available. This is as described in the AAPCS64.
1520 Upon successful return, *COUNT returns the number of needed registers,
1521 *FUNDAMENTAL_TYPE contains the type of those registers.
1523 Candidate as per the AAPCS64 5.4.2.C is either a:
1526 - HFA (Homogeneous Floating-point Aggregate, 4.3.5.1). A Composite type where
1527 all the members are floats and has at most 4 members.
1528 - HVA (Homogeneous Short-vector Aggregate, 4.3.5.2). A Composite type where
1529 all the members are short vectors and has at most 4 members.
1532 Note that HFAs and HVAs can include nested structures and arrays. */
1535 aapcs_is_vfp_call_or_return_candidate (struct type
*type
, int *count
,
1536 struct type
**fundamental_type
)
1538 if (type
== nullptr)
1541 *fundamental_type
= nullptr;
1543 int ag_count
= aapcs_is_vfp_call_or_return_candidate_1 (type
,
1546 if (ag_count
> 0 && ag_count
<= HA_MAX_NUM_FLDS
)
1555 /* AArch64 function call information structure. */
1556 struct aarch64_call_info
1558 /* the current argument number. */
1559 unsigned argnum
= 0;
1561 /* The next general purpose register number, equivalent to NGRN as
1562 described in the AArch64 Procedure Call Standard. */
1565 /* The next SIMD and floating point register number, equivalent to
1566 NSRN as described in the AArch64 Procedure Call Standard. */
1569 /* The next stacked argument address, equivalent to NSAA as
1570 described in the AArch64 Procedure Call Standard. */
1573 /* Stack item vector. */
1574 std::vector
<stack_item_t
> si
;
1577 /* Pass a value in a sequence of consecutive X registers. The caller
1578 is responsible for ensuring sufficient registers are available. */
1581 pass_in_x (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
1582 struct aarch64_call_info
*info
, struct type
*type
,
1585 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1586 int len
= TYPE_LENGTH (type
);
1587 enum type_code typecode
= type
->code ();
1588 int regnum
= AARCH64_X0_REGNUM
+ info
->ngrn
;
1589 const bfd_byte
*buf
= value_contents (arg
);
1595 int partial_len
= len
< X_REGISTER_SIZE
? len
: X_REGISTER_SIZE
;
1596 CORE_ADDR regval
= extract_unsigned_integer (buf
, partial_len
,
1600 /* Adjust sub-word struct/union args when big-endian. */
1601 if (byte_order
== BFD_ENDIAN_BIG
1602 && partial_len
< X_REGISTER_SIZE
1603 && (typecode
== TYPE_CODE_STRUCT
|| typecode
== TYPE_CODE_UNION
))
1604 regval
<<= ((X_REGISTER_SIZE
- partial_len
) * TARGET_CHAR_BIT
);
1608 debug_printf ("arg %d in %s = 0x%s\n", info
->argnum
,
1609 gdbarch_register_name (gdbarch
, regnum
),
1610 phex (regval
, X_REGISTER_SIZE
));
1612 regcache_cooked_write_unsigned (regcache
, regnum
, regval
);
1619 /* Attempt to marshall a value in a V register. Return 1 if
1620 successful, or 0 if insufficient registers are available. This
1621 function, unlike the equivalent pass_in_x() function does not
1622 handle arguments spread across multiple registers. */
1625 pass_in_v (struct gdbarch
*gdbarch
,
1626 struct regcache
*regcache
,
1627 struct aarch64_call_info
*info
,
1628 int len
, const bfd_byte
*buf
)
1632 int regnum
= AARCH64_V0_REGNUM
+ info
->nsrn
;
1633 /* Enough space for a full vector register. */
1634 gdb_byte reg
[register_size (gdbarch
, regnum
)];
1635 gdb_assert (len
<= sizeof (reg
));
1640 memset (reg
, 0, sizeof (reg
));
1641 /* PCS C.1, the argument is allocated to the least significant
1642 bits of V register. */
1643 memcpy (reg
, buf
, len
);
1644 regcache
->cooked_write (regnum
, reg
);
1648 debug_printf ("arg %d in %s\n", info
->argnum
,
1649 gdbarch_register_name (gdbarch
, regnum
));
1657 /* Marshall an argument onto the stack. */
1660 pass_on_stack (struct aarch64_call_info
*info
, struct type
*type
,
1663 const bfd_byte
*buf
= value_contents (arg
);
1664 int len
= TYPE_LENGTH (type
);
1670 align
= type_align (type
);
1672 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1673 Natural alignment of the argument's type. */
1674 align
= align_up (align
, 8);
1676 /* The AArch64 PCS requires at most doubleword alignment. */
1682 debug_printf ("arg %d len=%d @ sp + %d\n", info
->argnum
, len
,
1688 info
->si
.push_back (item
);
1691 if (info
->nsaa
& (align
- 1))
1693 /* Push stack alignment padding. */
1694 int pad
= align
- (info
->nsaa
& (align
- 1));
1699 info
->si
.push_back (item
);
1704 /* Marshall an argument into a sequence of one or more consecutive X
1705 registers or, if insufficient X registers are available then onto
1709 pass_in_x_or_stack (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
1710 struct aarch64_call_info
*info
, struct type
*type
,
1713 int len
= TYPE_LENGTH (type
);
1714 int nregs
= (len
+ X_REGISTER_SIZE
- 1) / X_REGISTER_SIZE
;
1716 /* PCS C.13 - Pass in registers if we have enough spare */
1717 if (info
->ngrn
+ nregs
<= 8)
1719 pass_in_x (gdbarch
, regcache
, info
, type
, arg
);
1720 info
->ngrn
+= nregs
;
1725 pass_on_stack (info
, type
, arg
);
1729 /* Pass a value, which is of type arg_type, in a V register. Assumes value is a
1730 aapcs_is_vfp_call_or_return_candidate and there are enough spare V
1731 registers. A return value of false is an error state as the value will have
1732 been partially passed to the stack. */
1734 pass_in_v_vfp_candidate (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
1735 struct aarch64_call_info
*info
, struct type
*arg_type
,
1738 switch (arg_type
->code ())
1741 return pass_in_v (gdbarch
, regcache
, info
, TYPE_LENGTH (arg_type
),
1742 value_contents (arg
));
1745 case TYPE_CODE_COMPLEX
:
1747 const bfd_byte
*buf
= value_contents (arg
);
1748 struct type
*target_type
= check_typedef (TYPE_TARGET_TYPE (arg_type
));
1750 if (!pass_in_v (gdbarch
, regcache
, info
, TYPE_LENGTH (target_type
),
1754 return pass_in_v (gdbarch
, regcache
, info
, TYPE_LENGTH (target_type
),
1755 buf
+ TYPE_LENGTH (target_type
));
1758 case TYPE_CODE_ARRAY
:
1759 if (arg_type
->is_vector ())
1760 return pass_in_v (gdbarch
, regcache
, info
, TYPE_LENGTH (arg_type
),
1761 value_contents (arg
));
1764 case TYPE_CODE_STRUCT
:
1765 case TYPE_CODE_UNION
:
1766 for (int i
= 0; i
< arg_type
->num_fields (); i
++)
1768 /* Don't include static fields. */
1769 if (field_is_static (&arg_type
->field (i
)))
1772 struct value
*field
= value_primitive_field (arg
, 0, i
, arg_type
);
1773 struct type
*field_type
= check_typedef (value_type (field
));
1775 if (!pass_in_v_vfp_candidate (gdbarch
, regcache
, info
, field_type
,
1786 /* Implement the "push_dummy_call" gdbarch method. */
1789 aarch64_push_dummy_call (struct gdbarch
*gdbarch
, struct value
*function
,
1790 struct regcache
*regcache
, CORE_ADDR bp_addr
,
1792 struct value
**args
, CORE_ADDR sp
,
1793 function_call_return_method return_method
,
1794 CORE_ADDR struct_addr
)
1797 struct aarch64_call_info info
;
1799 /* We need to know what the type of the called function is in order
1800 to determine the number of named/anonymous arguments for the
1801 actual argument placement, and the return type in order to handle
1802 return value correctly.
1804 The generic code above us views the decision of return in memory
1805 or return in registers as a two stage processes. The language
1806 handler is consulted first and may decide to return in memory (eg
1807 class with copy constructor returned by value), this will cause
1808 the generic code to allocate space AND insert an initial leading
1811 If the language code does not decide to pass in memory then the
1812 target code is consulted.
1814 If the language code decides to pass in memory we want to move
1815 the pointer inserted as the initial argument from the argument
1816 list and into X8, the conventional AArch64 struct return pointer
1819 /* Set the return address. For the AArch64, the return breakpoint
1820 is always at BP_ADDR. */
1821 regcache_cooked_write_unsigned (regcache
, AARCH64_LR_REGNUM
, bp_addr
);
1823 /* If we were given an initial argument for the return slot, lose it. */
1824 if (return_method
== return_method_hidden_param
)
1830 /* The struct_return pointer occupies X8. */
1831 if (return_method
!= return_method_normal
)
1835 debug_printf ("struct return in %s = 0x%s\n",
1836 gdbarch_register_name (gdbarch
,
1837 AARCH64_STRUCT_RETURN_REGNUM
),
1838 paddress (gdbarch
, struct_addr
));
1840 regcache_cooked_write_unsigned (regcache
, AARCH64_STRUCT_RETURN_REGNUM
,
1844 for (argnum
= 0; argnum
< nargs
; argnum
++)
1846 struct value
*arg
= args
[argnum
];
1847 struct type
*arg_type
, *fundamental_type
;
1850 arg_type
= check_typedef (value_type (arg
));
1851 len
= TYPE_LENGTH (arg_type
);
1853 /* If arg can be passed in v registers as per the AAPCS64, then do so if
1854 if there are enough spare registers. */
1855 if (aapcs_is_vfp_call_or_return_candidate (arg_type
, &elements
,
1858 if (info
.nsrn
+ elements
<= 8)
1860 /* We know that we have sufficient registers available therefore
1861 this will never need to fallback to the stack. */
1862 if (!pass_in_v_vfp_candidate (gdbarch
, regcache
, &info
, arg_type
,
1864 gdb_assert_not_reached ("Failed to push args");
1869 pass_on_stack (&info
, arg_type
, arg
);
1874 switch (arg_type
->code ())
1877 case TYPE_CODE_BOOL
:
1878 case TYPE_CODE_CHAR
:
1879 case TYPE_CODE_RANGE
:
1880 case TYPE_CODE_ENUM
:
1883 /* Promote to 32 bit integer. */
1884 if (arg_type
->is_unsigned ())
1885 arg_type
= builtin_type (gdbarch
)->builtin_uint32
;
1887 arg_type
= builtin_type (gdbarch
)->builtin_int32
;
1888 arg
= value_cast (arg_type
, arg
);
1890 pass_in_x_or_stack (gdbarch
, regcache
, &info
, arg_type
, arg
);
1893 case TYPE_CODE_STRUCT
:
1894 case TYPE_CODE_ARRAY
:
1895 case TYPE_CODE_UNION
:
1898 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1899 invisible reference. */
1901 /* Allocate aligned storage. */
1902 sp
= align_down (sp
- len
, 16);
1904 /* Write the real data into the stack. */
1905 write_memory (sp
, value_contents (arg
), len
);
1907 /* Construct the indirection. */
1908 arg_type
= lookup_pointer_type (arg_type
);
1909 arg
= value_from_pointer (arg_type
, sp
);
1910 pass_in_x_or_stack (gdbarch
, regcache
, &info
, arg_type
, arg
);
1913 /* PCS C.15 / C.18 multiple values pass. */
1914 pass_in_x_or_stack (gdbarch
, regcache
, &info
, arg_type
, arg
);
1918 pass_in_x_or_stack (gdbarch
, regcache
, &info
, arg_type
, arg
);
1923 /* Make sure stack retains 16 byte alignment. */
1925 sp
-= 16 - (info
.nsaa
& 15);
1927 while (!info
.si
.empty ())
1929 const stack_item_t
&si
= info
.si
.back ();
1932 if (si
.data
!= NULL
)
1933 write_memory (sp
, si
.data
, si
.len
);
1934 info
.si
.pop_back ();
1937 /* Finally, update the SP register. */
1938 regcache_cooked_write_unsigned (regcache
, AARCH64_SP_REGNUM
, sp
);
1943 /* Implement the "frame_align" gdbarch method. */
1946 aarch64_frame_align (struct gdbarch
*gdbarch
, CORE_ADDR sp
)
1948 /* Align the stack to sixteen bytes. */
1949 return sp
& ~(CORE_ADDR
) 15;
1952 /* Return the type for an AdvSISD Q register. */
1954 static struct type
*
1955 aarch64_vnq_type (struct gdbarch
*gdbarch
)
1957 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1959 if (tdep
->vnq_type
== NULL
)
1964 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnq",
1967 elem
= builtin_type (gdbarch
)->builtin_uint128
;
1968 append_composite_type_field (t
, "u", elem
);
1970 elem
= builtin_type (gdbarch
)->builtin_int128
;
1971 append_composite_type_field (t
, "s", elem
);
1976 return tdep
->vnq_type
;
1979 /* Return the type for an AdvSISD D register. */
1981 static struct type
*
1982 aarch64_vnd_type (struct gdbarch
*gdbarch
)
1984 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1986 if (tdep
->vnd_type
== NULL
)
1991 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnd",
1994 elem
= builtin_type (gdbarch
)->builtin_double
;
1995 append_composite_type_field (t
, "f", elem
);
1997 elem
= builtin_type (gdbarch
)->builtin_uint64
;
1998 append_composite_type_field (t
, "u", elem
);
2000 elem
= builtin_type (gdbarch
)->builtin_int64
;
2001 append_composite_type_field (t
, "s", elem
);
2006 return tdep
->vnd_type
;
2009 /* Return the type for an AdvSISD S register. */
2011 static struct type
*
2012 aarch64_vns_type (struct gdbarch
*gdbarch
)
2014 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2016 if (tdep
->vns_type
== NULL
)
2021 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vns",
2024 elem
= builtin_type (gdbarch
)->builtin_float
;
2025 append_composite_type_field (t
, "f", elem
);
2027 elem
= builtin_type (gdbarch
)->builtin_uint32
;
2028 append_composite_type_field (t
, "u", elem
);
2030 elem
= builtin_type (gdbarch
)->builtin_int32
;
2031 append_composite_type_field (t
, "s", elem
);
2036 return tdep
->vns_type
;
2039 /* Return the type for an AdvSISD H register. */
2041 static struct type
*
2042 aarch64_vnh_type (struct gdbarch
*gdbarch
)
2044 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2046 if (tdep
->vnh_type
== NULL
)
2051 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnh",
2054 elem
= builtin_type (gdbarch
)->builtin_half
;
2055 append_composite_type_field (t
, "f", elem
);
2057 elem
= builtin_type (gdbarch
)->builtin_uint16
;
2058 append_composite_type_field (t
, "u", elem
);
2060 elem
= builtin_type (gdbarch
)->builtin_int16
;
2061 append_composite_type_field (t
, "s", elem
);
2066 return tdep
->vnh_type
;
2069 /* Return the type for an AdvSISD B register. */
2071 static struct type
*
2072 aarch64_vnb_type (struct gdbarch
*gdbarch
)
2074 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2076 if (tdep
->vnb_type
== NULL
)
2081 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnb",
2084 elem
= builtin_type (gdbarch
)->builtin_uint8
;
2085 append_composite_type_field (t
, "u", elem
);
2087 elem
= builtin_type (gdbarch
)->builtin_int8
;
2088 append_composite_type_field (t
, "s", elem
);
2093 return tdep
->vnb_type
;
2096 /* Return the type for an AdvSISD V register. */
2098 static struct type
*
2099 aarch64_vnv_type (struct gdbarch
*gdbarch
)
2101 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2103 if (tdep
->vnv_type
== NULL
)
2105 /* The other AArch64 pseudo registers (Q,D,H,S,B) refer to a single value
2106 slice from the non-pseudo vector registers. However NEON V registers
2107 are always vector registers, and need constructing as such. */
2108 const struct builtin_type
*bt
= builtin_type (gdbarch
);
2110 struct type
*t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnv",
2113 struct type
*sub
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnd",
2115 append_composite_type_field (sub
, "f",
2116 init_vector_type (bt
->builtin_double
, 2));
2117 append_composite_type_field (sub
, "u",
2118 init_vector_type (bt
->builtin_uint64
, 2));
2119 append_composite_type_field (sub
, "s",
2120 init_vector_type (bt
->builtin_int64
, 2));
2121 append_composite_type_field (t
, "d", sub
);
2123 sub
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vns",
2125 append_composite_type_field (sub
, "f",
2126 init_vector_type (bt
->builtin_float
, 4));
2127 append_composite_type_field (sub
, "u",
2128 init_vector_type (bt
->builtin_uint32
, 4));
2129 append_composite_type_field (sub
, "s",
2130 init_vector_type (bt
->builtin_int32
, 4));
2131 append_composite_type_field (t
, "s", sub
);
2133 sub
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnh",
2135 append_composite_type_field (sub
, "f",
2136 init_vector_type (bt
->builtin_half
, 8));
2137 append_composite_type_field (sub
, "u",
2138 init_vector_type (bt
->builtin_uint16
, 8));
2139 append_composite_type_field (sub
, "s",
2140 init_vector_type (bt
->builtin_int16
, 8));
2141 append_composite_type_field (t
, "h", sub
);
2143 sub
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnb",
2145 append_composite_type_field (sub
, "u",
2146 init_vector_type (bt
->builtin_uint8
, 16));
2147 append_composite_type_field (sub
, "s",
2148 init_vector_type (bt
->builtin_int8
, 16));
2149 append_composite_type_field (t
, "b", sub
);
2151 sub
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnq",
2153 append_composite_type_field (sub
, "u",
2154 init_vector_type (bt
->builtin_uint128
, 1));
2155 append_composite_type_field (sub
, "s",
2156 init_vector_type (bt
->builtin_int128
, 1));
2157 append_composite_type_field (t
, "q", sub
);
2162 return tdep
->vnv_type
;
2165 /* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
2168 aarch64_dwarf_reg_to_regnum (struct gdbarch
*gdbarch
, int reg
)
2170 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2172 if (reg
>= AARCH64_DWARF_X0
&& reg
<= AARCH64_DWARF_X0
+ 30)
2173 return AARCH64_X0_REGNUM
+ reg
- AARCH64_DWARF_X0
;
2175 if (reg
== AARCH64_DWARF_SP
)
2176 return AARCH64_SP_REGNUM
;
2178 if (reg
>= AARCH64_DWARF_V0
&& reg
<= AARCH64_DWARF_V0
+ 31)
2179 return AARCH64_V0_REGNUM
+ reg
- AARCH64_DWARF_V0
;
2181 if (reg
== AARCH64_DWARF_SVE_VG
)
2182 return AARCH64_SVE_VG_REGNUM
;
2184 if (reg
== AARCH64_DWARF_SVE_FFR
)
2185 return AARCH64_SVE_FFR_REGNUM
;
2187 if (reg
>= AARCH64_DWARF_SVE_P0
&& reg
<= AARCH64_DWARF_SVE_P0
+ 15)
2188 return AARCH64_SVE_P0_REGNUM
+ reg
- AARCH64_DWARF_SVE_P0
;
2190 if (reg
>= AARCH64_DWARF_SVE_Z0
&& reg
<= AARCH64_DWARF_SVE_Z0
+ 15)
2191 return AARCH64_SVE_Z0_REGNUM
+ reg
- AARCH64_DWARF_SVE_Z0
;
2193 if (tdep
->has_pauth ())
2195 if (reg
>= AARCH64_DWARF_PAUTH_DMASK
&& reg
<= AARCH64_DWARF_PAUTH_CMASK
)
2196 return tdep
->pauth_reg_base
+ reg
- AARCH64_DWARF_PAUTH_DMASK
;
2198 if (reg
== AARCH64_DWARF_PAUTH_RA_STATE
)
2199 return tdep
->pauth_ra_state_regnum
;
2205 /* Implement the "print_insn" gdbarch method. */
2208 aarch64_gdb_print_insn (bfd_vma memaddr
, disassemble_info
*info
)
2210 info
->symbols
= NULL
;
2211 return default_print_insn (memaddr
, info
);
2214 /* AArch64 BRK software debug mode instruction.
2215 Note that AArch64 code is always little-endian.
2216 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
2217 constexpr gdb_byte aarch64_default_breakpoint
[] = {0x00, 0x00, 0x20, 0xd4};
2219 typedef BP_MANIPULATION (aarch64_default_breakpoint
) aarch64_breakpoint
;
2221 /* Extract from an array REGS containing the (raw) register state a
2222 function return value of type TYPE, and copy that, in virtual
2223 format, into VALBUF. */
2226 aarch64_extract_return_value (struct type
*type
, struct regcache
*regs
,
2229 struct gdbarch
*gdbarch
= regs
->arch ();
2230 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
2232 struct type
*fundamental_type
;
2234 if (aapcs_is_vfp_call_or_return_candidate (type
, &elements
,
2237 int len
= TYPE_LENGTH (fundamental_type
);
2239 for (int i
= 0; i
< elements
; i
++)
2241 int regno
= AARCH64_V0_REGNUM
+ i
;
2242 /* Enough space for a full vector register. */
2243 gdb_byte buf
[register_size (gdbarch
, regno
)];
2244 gdb_assert (len
<= sizeof (buf
));
2248 debug_printf ("read HFA or HVA return value element %d from %s\n",
2250 gdbarch_register_name (gdbarch
, regno
));
2252 regs
->cooked_read (regno
, buf
);
2254 memcpy (valbuf
, buf
, len
);
2258 else if (type
->code () == TYPE_CODE_INT
2259 || type
->code () == TYPE_CODE_CHAR
2260 || type
->code () == TYPE_CODE_BOOL
2261 || type
->code () == TYPE_CODE_PTR
2262 || TYPE_IS_REFERENCE (type
)
2263 || type
->code () == TYPE_CODE_ENUM
)
2265 /* If the type is a plain integer, then the access is
2266 straight-forward. Otherwise we have to play around a bit
2268 int len
= TYPE_LENGTH (type
);
2269 int regno
= AARCH64_X0_REGNUM
;
2274 /* By using store_unsigned_integer we avoid having to do
2275 anything special for small big-endian values. */
2276 regcache_cooked_read_unsigned (regs
, regno
++, &tmp
);
2277 store_unsigned_integer (valbuf
,
2278 (len
> X_REGISTER_SIZE
2279 ? X_REGISTER_SIZE
: len
), byte_order
, tmp
);
2280 len
-= X_REGISTER_SIZE
;
2281 valbuf
+= X_REGISTER_SIZE
;
2286 /* For a structure or union the behaviour is as if the value had
2287 been stored to word-aligned memory and then loaded into
2288 registers with 64-bit load instruction(s). */
2289 int len
= TYPE_LENGTH (type
);
2290 int regno
= AARCH64_X0_REGNUM
;
2291 bfd_byte buf
[X_REGISTER_SIZE
];
2295 regs
->cooked_read (regno
++, buf
);
2296 memcpy (valbuf
, buf
, len
> X_REGISTER_SIZE
? X_REGISTER_SIZE
: len
);
2297 len
-= X_REGISTER_SIZE
;
2298 valbuf
+= X_REGISTER_SIZE
;
2304 /* Will a function return an aggregate type in memory or in a
2305 register? Return 0 if an aggregate type can be returned in a
2306 register, 1 if it must be returned in memory. */
2309 aarch64_return_in_memory (struct gdbarch
*gdbarch
, struct type
*type
)
2311 type
= check_typedef (type
);
2313 struct type
*fundamental_type
;
2315 if (aapcs_is_vfp_call_or_return_candidate (type
, &elements
,
2318 /* v0-v7 are used to return values and one register is allocated
2319 for one member. However, HFA or HVA has at most four members. */
2323 if (TYPE_LENGTH (type
) > 16)
2325 /* PCS B.6 Aggregates larger than 16 bytes are passed by
2326 invisible reference. */
2334 /* Write into appropriate registers a function return value of type
2335 TYPE, given in virtual format. */
2338 aarch64_store_return_value (struct type
*type
, struct regcache
*regs
,
2339 const gdb_byte
*valbuf
)
2341 struct gdbarch
*gdbarch
= regs
->arch ();
2342 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
2344 struct type
*fundamental_type
;
2346 if (aapcs_is_vfp_call_or_return_candidate (type
, &elements
,
2349 int len
= TYPE_LENGTH (fundamental_type
);
2351 for (int i
= 0; i
< elements
; i
++)
2353 int regno
= AARCH64_V0_REGNUM
+ i
;
2354 /* Enough space for a full vector register. */
2355 gdb_byte tmpbuf
[register_size (gdbarch
, regno
)];
2356 gdb_assert (len
<= sizeof (tmpbuf
));
2360 debug_printf ("write HFA or HVA return value element %d to %s\n",
2362 gdbarch_register_name (gdbarch
, regno
));
2365 memcpy (tmpbuf
, valbuf
,
2366 len
> V_REGISTER_SIZE
? V_REGISTER_SIZE
: len
);
2367 regs
->cooked_write (regno
, tmpbuf
);
2371 else if (type
->code () == TYPE_CODE_INT
2372 || type
->code () == TYPE_CODE_CHAR
2373 || type
->code () == TYPE_CODE_BOOL
2374 || type
->code () == TYPE_CODE_PTR
2375 || TYPE_IS_REFERENCE (type
)
2376 || type
->code () == TYPE_CODE_ENUM
)
2378 if (TYPE_LENGTH (type
) <= X_REGISTER_SIZE
)
2380 /* Values of one word or less are zero/sign-extended and
2382 bfd_byte tmpbuf
[X_REGISTER_SIZE
];
2383 LONGEST val
= unpack_long (type
, valbuf
);
2385 store_signed_integer (tmpbuf
, X_REGISTER_SIZE
, byte_order
, val
);
2386 regs
->cooked_write (AARCH64_X0_REGNUM
, tmpbuf
);
2390 /* Integral values greater than one word are stored in
2391 consecutive registers starting with r0. This will always
2392 be a multiple of the regiser size. */
2393 int len
= TYPE_LENGTH (type
);
2394 int regno
= AARCH64_X0_REGNUM
;
2398 regs
->cooked_write (regno
++, valbuf
);
2399 len
-= X_REGISTER_SIZE
;
2400 valbuf
+= X_REGISTER_SIZE
;
2406 /* For a structure or union the behaviour is as if the value had
2407 been stored to word-aligned memory and then loaded into
2408 registers with 64-bit load instruction(s). */
2409 int len
= TYPE_LENGTH (type
);
2410 int regno
= AARCH64_X0_REGNUM
;
2411 bfd_byte tmpbuf
[X_REGISTER_SIZE
];
2415 memcpy (tmpbuf
, valbuf
,
2416 len
> X_REGISTER_SIZE
? X_REGISTER_SIZE
: len
);
2417 regs
->cooked_write (regno
++, tmpbuf
);
2418 len
-= X_REGISTER_SIZE
;
2419 valbuf
+= X_REGISTER_SIZE
;
2424 /* Implement the "return_value" gdbarch method. */
2426 static enum return_value_convention
2427 aarch64_return_value (struct gdbarch
*gdbarch
, struct value
*func_value
,
2428 struct type
*valtype
, struct regcache
*regcache
,
2429 gdb_byte
*readbuf
, const gdb_byte
*writebuf
)
2432 if (valtype
->code () == TYPE_CODE_STRUCT
2433 || valtype
->code () == TYPE_CODE_UNION
2434 || valtype
->code () == TYPE_CODE_ARRAY
)
2436 if (aarch64_return_in_memory (gdbarch
, valtype
))
2439 debug_printf ("return value in memory\n");
2440 return RETURN_VALUE_STRUCT_CONVENTION
;
2445 aarch64_store_return_value (valtype
, regcache
, writebuf
);
2448 aarch64_extract_return_value (valtype
, regcache
, readbuf
);
2451 debug_printf ("return value in registers\n");
2453 return RETURN_VALUE_REGISTER_CONVENTION
;
2456 /* Implement the "get_longjmp_target" gdbarch method. */
2459 aarch64_get_longjmp_target (struct frame_info
*frame
, CORE_ADDR
*pc
)
2462 gdb_byte buf
[X_REGISTER_SIZE
];
2463 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
2464 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2465 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
2467 jb_addr
= get_frame_register_unsigned (frame
, AARCH64_X0_REGNUM
);
2469 if (target_read_memory (jb_addr
+ tdep
->jb_pc
* tdep
->jb_elt_size
, buf
,
2473 *pc
= extract_unsigned_integer (buf
, X_REGISTER_SIZE
, byte_order
);
2477 /* Implement the "gen_return_address" gdbarch method. */
2480 aarch64_gen_return_address (struct gdbarch
*gdbarch
,
2481 struct agent_expr
*ax
, struct axs_value
*value
,
2484 value
->type
= register_type (gdbarch
, AARCH64_LR_REGNUM
);
2485 value
->kind
= axs_lvalue_register
;
2486 value
->u
.reg
= AARCH64_LR_REGNUM
;
2490 /* Return the pseudo register name corresponding to register regnum. */
2493 aarch64_pseudo_register_name (struct gdbarch
*gdbarch
, int regnum
)
2495 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2497 static const char *const q_name
[] =
2499 "q0", "q1", "q2", "q3",
2500 "q4", "q5", "q6", "q7",
2501 "q8", "q9", "q10", "q11",
2502 "q12", "q13", "q14", "q15",
2503 "q16", "q17", "q18", "q19",
2504 "q20", "q21", "q22", "q23",
2505 "q24", "q25", "q26", "q27",
2506 "q28", "q29", "q30", "q31",
2509 static const char *const d_name
[] =
2511 "d0", "d1", "d2", "d3",
2512 "d4", "d5", "d6", "d7",
2513 "d8", "d9", "d10", "d11",
2514 "d12", "d13", "d14", "d15",
2515 "d16", "d17", "d18", "d19",
2516 "d20", "d21", "d22", "d23",
2517 "d24", "d25", "d26", "d27",
2518 "d28", "d29", "d30", "d31",
2521 static const char *const s_name
[] =
2523 "s0", "s1", "s2", "s3",
2524 "s4", "s5", "s6", "s7",
2525 "s8", "s9", "s10", "s11",
2526 "s12", "s13", "s14", "s15",
2527 "s16", "s17", "s18", "s19",
2528 "s20", "s21", "s22", "s23",
2529 "s24", "s25", "s26", "s27",
2530 "s28", "s29", "s30", "s31",
2533 static const char *const h_name
[] =
2535 "h0", "h1", "h2", "h3",
2536 "h4", "h5", "h6", "h7",
2537 "h8", "h9", "h10", "h11",
2538 "h12", "h13", "h14", "h15",
2539 "h16", "h17", "h18", "h19",
2540 "h20", "h21", "h22", "h23",
2541 "h24", "h25", "h26", "h27",
2542 "h28", "h29", "h30", "h31",
2545 static const char *const b_name
[] =
2547 "b0", "b1", "b2", "b3",
2548 "b4", "b5", "b6", "b7",
2549 "b8", "b9", "b10", "b11",
2550 "b12", "b13", "b14", "b15",
2551 "b16", "b17", "b18", "b19",
2552 "b20", "b21", "b22", "b23",
2553 "b24", "b25", "b26", "b27",
2554 "b28", "b29", "b30", "b31",
2557 int p_regnum
= regnum
- gdbarch_num_regs (gdbarch
);
2559 if (p_regnum
>= AARCH64_Q0_REGNUM
&& p_regnum
< AARCH64_Q0_REGNUM
+ 32)
2560 return q_name
[p_regnum
- AARCH64_Q0_REGNUM
];
2562 if (p_regnum
>= AARCH64_D0_REGNUM
&& p_regnum
< AARCH64_D0_REGNUM
+ 32)
2563 return d_name
[p_regnum
- AARCH64_D0_REGNUM
];
2565 if (p_regnum
>= AARCH64_S0_REGNUM
&& p_regnum
< AARCH64_S0_REGNUM
+ 32)
2566 return s_name
[p_regnum
- AARCH64_S0_REGNUM
];
2568 if (p_regnum
>= AARCH64_H0_REGNUM
&& p_regnum
< AARCH64_H0_REGNUM
+ 32)
2569 return h_name
[p_regnum
- AARCH64_H0_REGNUM
];
2571 if (p_regnum
>= AARCH64_B0_REGNUM
&& p_regnum
< AARCH64_B0_REGNUM
+ 32)
2572 return b_name
[p_regnum
- AARCH64_B0_REGNUM
];
2574 if (tdep
->has_sve ())
2576 static const char *const sve_v_name
[] =
2578 "v0", "v1", "v2", "v3",
2579 "v4", "v5", "v6", "v7",
2580 "v8", "v9", "v10", "v11",
2581 "v12", "v13", "v14", "v15",
2582 "v16", "v17", "v18", "v19",
2583 "v20", "v21", "v22", "v23",
2584 "v24", "v25", "v26", "v27",
2585 "v28", "v29", "v30", "v31",
2588 if (p_regnum
>= AARCH64_SVE_V0_REGNUM
2589 && p_regnum
< AARCH64_SVE_V0_REGNUM
+ AARCH64_V_REGS_NUM
)
2590 return sve_v_name
[p_regnum
- AARCH64_SVE_V0_REGNUM
];
2593 /* RA_STATE is used for unwinding only. Do not assign it a name - this
2594 prevents it from being read by methods such as
2595 mi_cmd_trace_frame_collected. */
2596 if (tdep
->has_pauth () && regnum
== tdep
->pauth_ra_state_regnum
)
2599 internal_error (__FILE__
, __LINE__
,
2600 _("aarch64_pseudo_register_name: bad register number %d"),
2604 /* Implement the "pseudo_register_type" tdesc_arch_data method. */
2606 static struct type
*
2607 aarch64_pseudo_register_type (struct gdbarch
*gdbarch
, int regnum
)
2609 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2611 int p_regnum
= regnum
- gdbarch_num_regs (gdbarch
);
2613 if (p_regnum
>= AARCH64_Q0_REGNUM
&& p_regnum
< AARCH64_Q0_REGNUM
+ 32)
2614 return aarch64_vnq_type (gdbarch
);
2616 if (p_regnum
>= AARCH64_D0_REGNUM
&& p_regnum
< AARCH64_D0_REGNUM
+ 32)
2617 return aarch64_vnd_type (gdbarch
);
2619 if (p_regnum
>= AARCH64_S0_REGNUM
&& p_regnum
< AARCH64_S0_REGNUM
+ 32)
2620 return aarch64_vns_type (gdbarch
);
2622 if (p_regnum
>= AARCH64_H0_REGNUM
&& p_regnum
< AARCH64_H0_REGNUM
+ 32)
2623 return aarch64_vnh_type (gdbarch
);
2625 if (p_regnum
>= AARCH64_B0_REGNUM
&& p_regnum
< AARCH64_B0_REGNUM
+ 32)
2626 return aarch64_vnb_type (gdbarch
);
2628 if (tdep
->has_sve () && p_regnum
>= AARCH64_SVE_V0_REGNUM
2629 && p_regnum
< AARCH64_SVE_V0_REGNUM
+ AARCH64_V_REGS_NUM
)
2630 return aarch64_vnv_type (gdbarch
);
2632 if (tdep
->has_pauth () && regnum
== tdep
->pauth_ra_state_regnum
)
2633 return builtin_type (gdbarch
)->builtin_uint64
;
2635 internal_error (__FILE__
, __LINE__
,
2636 _("aarch64_pseudo_register_type: bad register number %d"),
2640 /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2643 aarch64_pseudo_register_reggroup_p (struct gdbarch
*gdbarch
, int regnum
,
2644 struct reggroup
*group
)
2646 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2648 int p_regnum
= regnum
- gdbarch_num_regs (gdbarch
);
2650 if (p_regnum
>= AARCH64_Q0_REGNUM
&& p_regnum
< AARCH64_Q0_REGNUM
+ 32)
2651 return group
== all_reggroup
|| group
== vector_reggroup
;
2652 else if (p_regnum
>= AARCH64_D0_REGNUM
&& p_regnum
< AARCH64_D0_REGNUM
+ 32)
2653 return (group
== all_reggroup
|| group
== vector_reggroup
2654 || group
== float_reggroup
);
2655 else if (p_regnum
>= AARCH64_S0_REGNUM
&& p_regnum
< AARCH64_S0_REGNUM
+ 32)
2656 return (group
== all_reggroup
|| group
== vector_reggroup
2657 || group
== float_reggroup
);
2658 else if (p_regnum
>= AARCH64_H0_REGNUM
&& p_regnum
< AARCH64_H0_REGNUM
+ 32)
2659 return group
== all_reggroup
|| group
== vector_reggroup
;
2660 else if (p_regnum
>= AARCH64_B0_REGNUM
&& p_regnum
< AARCH64_B0_REGNUM
+ 32)
2661 return group
== all_reggroup
|| group
== vector_reggroup
;
2662 else if (tdep
->has_sve () && p_regnum
>= AARCH64_SVE_V0_REGNUM
2663 && p_regnum
< AARCH64_SVE_V0_REGNUM
+ AARCH64_V_REGS_NUM
)
2664 return group
== all_reggroup
|| group
== vector_reggroup
;
2665 /* RA_STATE is used for unwinding only. Do not assign it to any groups. */
2666 if (tdep
->has_pauth () && regnum
== tdep
->pauth_ra_state_regnum
)
2669 return group
== all_reggroup
;
2672 /* Helper for aarch64_pseudo_read_value. */
2674 static struct value
*
2675 aarch64_pseudo_read_value_1 (struct gdbarch
*gdbarch
,
2676 readable_regcache
*regcache
, int regnum_offset
,
2677 int regsize
, struct value
*result_value
)
2679 unsigned v_regnum
= AARCH64_V0_REGNUM
+ regnum_offset
;
2681 /* Enough space for a full vector register. */
2682 gdb_byte reg_buf
[register_size (gdbarch
, AARCH64_V0_REGNUM
)];
2683 gdb_static_assert (AARCH64_V0_REGNUM
== AARCH64_SVE_Z0_REGNUM
);
2685 if (regcache
->raw_read (v_regnum
, reg_buf
) != REG_VALID
)
2686 mark_value_bytes_unavailable (result_value
, 0,
2687 TYPE_LENGTH (value_type (result_value
)));
2689 memcpy (value_contents_raw (result_value
), reg_buf
, regsize
);
2691 return result_value
;
2694 /* Implement the "pseudo_register_read_value" gdbarch method. */
2696 static struct value
*
2697 aarch64_pseudo_read_value (struct gdbarch
*gdbarch
, readable_regcache
*regcache
,
2700 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2701 struct value
*result_value
= allocate_value (register_type (gdbarch
, regnum
));
2703 VALUE_LVAL (result_value
) = lval_register
;
2704 VALUE_REGNUM (result_value
) = regnum
;
2706 regnum
-= gdbarch_num_regs (gdbarch
);
2708 if (regnum
>= AARCH64_Q0_REGNUM
&& regnum
< AARCH64_Q0_REGNUM
+ 32)
2709 return aarch64_pseudo_read_value_1 (gdbarch
, regcache
,
2710 regnum
- AARCH64_Q0_REGNUM
,
2711 Q_REGISTER_SIZE
, result_value
);
2713 if (regnum
>= AARCH64_D0_REGNUM
&& regnum
< AARCH64_D0_REGNUM
+ 32)
2714 return aarch64_pseudo_read_value_1 (gdbarch
, regcache
,
2715 regnum
- AARCH64_D0_REGNUM
,
2716 D_REGISTER_SIZE
, result_value
);
2718 if (regnum
>= AARCH64_S0_REGNUM
&& regnum
< AARCH64_S0_REGNUM
+ 32)
2719 return aarch64_pseudo_read_value_1 (gdbarch
, regcache
,
2720 regnum
- AARCH64_S0_REGNUM
,
2721 S_REGISTER_SIZE
, result_value
);
2723 if (regnum
>= AARCH64_H0_REGNUM
&& regnum
< AARCH64_H0_REGNUM
+ 32)
2724 return aarch64_pseudo_read_value_1 (gdbarch
, regcache
,
2725 regnum
- AARCH64_H0_REGNUM
,
2726 H_REGISTER_SIZE
, result_value
);
2728 if (regnum
>= AARCH64_B0_REGNUM
&& regnum
< AARCH64_B0_REGNUM
+ 32)
2729 return aarch64_pseudo_read_value_1 (gdbarch
, regcache
,
2730 regnum
- AARCH64_B0_REGNUM
,
2731 B_REGISTER_SIZE
, result_value
);
2733 if (tdep
->has_sve () && regnum
>= AARCH64_SVE_V0_REGNUM
2734 && regnum
< AARCH64_SVE_V0_REGNUM
+ 32)
2735 return aarch64_pseudo_read_value_1 (gdbarch
, regcache
,
2736 regnum
- AARCH64_SVE_V0_REGNUM
,
2737 V_REGISTER_SIZE
, result_value
);
2739 gdb_assert_not_reached ("regnum out of bound");
2742 /* Helper for aarch64_pseudo_write. */
2745 aarch64_pseudo_write_1 (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
2746 int regnum_offset
, int regsize
, const gdb_byte
*buf
)
2748 unsigned v_regnum
= AARCH64_V0_REGNUM
+ regnum_offset
;
2750 /* Enough space for a full vector register. */
2751 gdb_byte reg_buf
[register_size (gdbarch
, AARCH64_V0_REGNUM
)];
2752 gdb_static_assert (AARCH64_V0_REGNUM
== AARCH64_SVE_Z0_REGNUM
);
2754 /* Ensure the register buffer is zero, we want gdb writes of the
2755 various 'scalar' pseudo registers to behavior like architectural
2756 writes, register width bytes are written the remainder are set to
2758 memset (reg_buf
, 0, register_size (gdbarch
, AARCH64_V0_REGNUM
));
2760 memcpy (reg_buf
, buf
, regsize
);
2761 regcache
->raw_write (v_regnum
, reg_buf
);
2764 /* Implement the "pseudo_register_write" gdbarch method. */
2767 aarch64_pseudo_write (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
2768 int regnum
, const gdb_byte
*buf
)
2770 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2771 regnum
-= gdbarch_num_regs (gdbarch
);
2773 if (regnum
>= AARCH64_Q0_REGNUM
&& regnum
< AARCH64_Q0_REGNUM
+ 32)
2774 return aarch64_pseudo_write_1 (gdbarch
, regcache
,
2775 regnum
- AARCH64_Q0_REGNUM
, Q_REGISTER_SIZE
,
2778 if (regnum
>= AARCH64_D0_REGNUM
&& regnum
< AARCH64_D0_REGNUM
+ 32)
2779 return aarch64_pseudo_write_1 (gdbarch
, regcache
,
2780 regnum
- AARCH64_D0_REGNUM
, D_REGISTER_SIZE
,
2783 if (regnum
>= AARCH64_S0_REGNUM
&& regnum
< AARCH64_S0_REGNUM
+ 32)
2784 return aarch64_pseudo_write_1 (gdbarch
, regcache
,
2785 regnum
- AARCH64_S0_REGNUM
, S_REGISTER_SIZE
,
2788 if (regnum
>= AARCH64_H0_REGNUM
&& regnum
< AARCH64_H0_REGNUM
+ 32)
2789 return aarch64_pseudo_write_1 (gdbarch
, regcache
,
2790 regnum
- AARCH64_H0_REGNUM
, H_REGISTER_SIZE
,
2793 if (regnum
>= AARCH64_B0_REGNUM
&& regnum
< AARCH64_B0_REGNUM
+ 32)
2794 return aarch64_pseudo_write_1 (gdbarch
, regcache
,
2795 regnum
- AARCH64_B0_REGNUM
, B_REGISTER_SIZE
,
2798 if (tdep
->has_sve () && regnum
>= AARCH64_SVE_V0_REGNUM
2799 && regnum
< AARCH64_SVE_V0_REGNUM
+ 32)
2800 return aarch64_pseudo_write_1 (gdbarch
, regcache
,
2801 regnum
- AARCH64_SVE_V0_REGNUM
,
2802 V_REGISTER_SIZE
, buf
);
2804 gdb_assert_not_reached ("regnum out of bound");
2807 /* Callback function for user_reg_add. */
2809 static struct value
*
2810 value_of_aarch64_user_reg (struct frame_info
*frame
, const void *baton
)
2812 const int *reg_p
= (const int *) baton
;
2814 return value_of_register (*reg_p
, frame
);
2818 /* Implement the "software_single_step" gdbarch method, needed to
2819 single step through atomic sequences on AArch64. */
2821 static std::vector
<CORE_ADDR
>
2822 aarch64_software_single_step (struct regcache
*regcache
)
2824 struct gdbarch
*gdbarch
= regcache
->arch ();
2825 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
2826 const int insn_size
= 4;
2827 const int atomic_sequence_length
= 16; /* Instruction sequence length. */
2828 CORE_ADDR pc
= regcache_read_pc (regcache
);
2829 CORE_ADDR breaks
[2] = { CORE_ADDR_MAX
, CORE_ADDR_MAX
};
2831 CORE_ADDR closing_insn
= 0;
2832 uint32_t insn
= read_memory_unsigned_integer (loc
, insn_size
,
2833 byte_order_for_code
);
2836 int bc_insn_count
= 0; /* Conditional branch instruction count. */
2837 int last_breakpoint
= 0; /* Defaults to 0 (no breakpoints placed). */
2840 if (aarch64_decode_insn (insn
, &inst
, 1, NULL
) != 0)
2843 /* Look for a Load Exclusive instruction which begins the sequence. */
2844 if (inst
.opcode
->iclass
!= ldstexcl
|| bit (insn
, 22) == 0)
2847 for (insn_count
= 0; insn_count
< atomic_sequence_length
; ++insn_count
)
2850 insn
= read_memory_unsigned_integer (loc
, insn_size
,
2851 byte_order_for_code
);
2853 if (aarch64_decode_insn (insn
, &inst
, 1, NULL
) != 0)
2855 /* Check if the instruction is a conditional branch. */
2856 if (inst
.opcode
->iclass
== condbranch
)
2858 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_ADDR_PCREL19
);
2860 if (bc_insn_count
>= 1)
2863 /* It is, so we'll try to set a breakpoint at the destination. */
2864 breaks
[1] = loc
+ inst
.operands
[0].imm
.value
;
2870 /* Look for the Store Exclusive which closes the atomic sequence. */
2871 if (inst
.opcode
->iclass
== ldstexcl
&& bit (insn
, 22) == 0)
2878 /* We didn't find a closing Store Exclusive instruction, fall back. */
2882 /* Insert breakpoint after the end of the atomic sequence. */
2883 breaks
[0] = loc
+ insn_size
;
2885 /* Check for duplicated breakpoints, and also check that the second
2886 breakpoint is not within the atomic sequence. */
2888 && (breaks
[1] == breaks
[0]
2889 || (breaks
[1] >= pc
&& breaks
[1] <= closing_insn
)))
2890 last_breakpoint
= 0;
2892 std::vector
<CORE_ADDR
> next_pcs
;
2894 /* Insert the breakpoint at the end of the sequence, and one at the
2895 destination of the conditional branch, if it exists. */
2896 for (index
= 0; index
<= last_breakpoint
; index
++)
2897 next_pcs
.push_back (breaks
[index
]);
2902 struct aarch64_displaced_step_copy_insn_closure
2903 : public displaced_step_copy_insn_closure
2905 /* It is true when condition instruction, such as B.CON, TBZ, etc,
2906 is being displaced stepping. */
2909 /* PC adjustment offset after displaced stepping. If 0, then we don't
2910 write the PC back, assuming the PC is already the right address. */
2911 int32_t pc_adjust
= 0;
2914 /* Data when visiting instructions for displaced stepping. */
2916 struct aarch64_displaced_step_data
2918 struct aarch64_insn_data base
;
2920 /* The address where the instruction will be executed at. */
2922 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
2923 uint32_t insn_buf
[AARCH64_DISPLACED_MODIFIED_INSNS
];
2924 /* Number of instructions in INSN_BUF. */
2925 unsigned insn_count
;
2926 /* Registers when doing displaced stepping. */
2927 struct regcache
*regs
;
2929 aarch64_displaced_step_copy_insn_closure
*dsc
;
2932 /* Implementation of aarch64_insn_visitor method "b". */
2935 aarch64_displaced_step_b (const int is_bl
, const int32_t offset
,
2936 struct aarch64_insn_data
*data
)
2938 struct aarch64_displaced_step_data
*dsd
2939 = (struct aarch64_displaced_step_data
*) data
;
2940 int64_t new_offset
= data
->insn_addr
- dsd
->new_addr
+ offset
;
2942 if (can_encode_int32 (new_offset
, 28))
2944 /* Emit B rather than BL, because executing BL on a new address
2945 will get the wrong address into LR. In order to avoid this,
2946 we emit B, and update LR if the instruction is BL. */
2947 emit_b (dsd
->insn_buf
, 0, new_offset
);
2953 emit_nop (dsd
->insn_buf
);
2955 dsd
->dsc
->pc_adjust
= offset
;
2961 regcache_cooked_write_unsigned (dsd
->regs
, AARCH64_LR_REGNUM
,
2962 data
->insn_addr
+ 4);
2966 /* Implementation of aarch64_insn_visitor method "b_cond". */
2969 aarch64_displaced_step_b_cond (const unsigned cond
, const int32_t offset
,
2970 struct aarch64_insn_data
*data
)
2972 struct aarch64_displaced_step_data
*dsd
2973 = (struct aarch64_displaced_step_data
*) data
;
2975 /* GDB has to fix up PC after displaced step this instruction
2976 differently according to the condition is true or false. Instead
2977 of checking COND against conditional flags, we can use
2978 the following instructions, and GDB can tell how to fix up PC
2979 according to the PC value.
2981 B.COND TAKEN ; If cond is true, then jump to TAKEN.
2987 emit_bcond (dsd
->insn_buf
, cond
, 8);
2988 dsd
->dsc
->cond
= true;
2989 dsd
->dsc
->pc_adjust
= offset
;
2990 dsd
->insn_count
= 1;
2993 /* Dynamically allocate a new register. If we know the register
2994 statically, we should make it a global as above instead of using this
2997 static struct aarch64_register
2998 aarch64_register (unsigned num
, int is64
)
3000 return (struct aarch64_register
) { num
, is64
};
3003 /* Implementation of aarch64_insn_visitor method "cb". */
3006 aarch64_displaced_step_cb (const int32_t offset
, const int is_cbnz
,
3007 const unsigned rn
, int is64
,
3008 struct aarch64_insn_data
*data
)
3010 struct aarch64_displaced_step_data
*dsd
3011 = (struct aarch64_displaced_step_data
*) data
;
3013 /* The offset is out of range for a compare and branch
3014 instruction. We can use the following instructions instead:
3016 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
3021 emit_cb (dsd
->insn_buf
, is_cbnz
, aarch64_register (rn
, is64
), 8);
3022 dsd
->insn_count
= 1;
3023 dsd
->dsc
->cond
= true;
3024 dsd
->dsc
->pc_adjust
= offset
;
3027 /* Implementation of aarch64_insn_visitor method "tb". */
3030 aarch64_displaced_step_tb (const int32_t offset
, int is_tbnz
,
3031 const unsigned rt
, unsigned bit
,
3032 struct aarch64_insn_data
*data
)
3034 struct aarch64_displaced_step_data
*dsd
3035 = (struct aarch64_displaced_step_data
*) data
;
3037 /* The offset is out of range for a test bit and branch
3038 instruction We can use the following instructions instead:
3040 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
3046 emit_tb (dsd
->insn_buf
, is_tbnz
, bit
, aarch64_register (rt
, 1), 8);
3047 dsd
->insn_count
= 1;
3048 dsd
->dsc
->cond
= true;
3049 dsd
->dsc
->pc_adjust
= offset
;
3052 /* Implementation of aarch64_insn_visitor method "adr". */
3055 aarch64_displaced_step_adr (const int32_t offset
, const unsigned rd
,
3056 const int is_adrp
, struct aarch64_insn_data
*data
)
3058 struct aarch64_displaced_step_data
*dsd
3059 = (struct aarch64_displaced_step_data
*) data
;
3060 /* We know exactly the address the ADR{P,} instruction will compute.
3061 We can just write it to the destination register. */
3062 CORE_ADDR address
= data
->insn_addr
+ offset
;
3066 /* Clear the lower 12 bits of the offset to get the 4K page. */
3067 regcache_cooked_write_unsigned (dsd
->regs
, AARCH64_X0_REGNUM
+ rd
,
3071 regcache_cooked_write_unsigned (dsd
->regs
, AARCH64_X0_REGNUM
+ rd
,
3074 dsd
->dsc
->pc_adjust
= 4;
3075 emit_nop (dsd
->insn_buf
);
3076 dsd
->insn_count
= 1;
3079 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
3082 aarch64_displaced_step_ldr_literal (const int32_t offset
, const int is_sw
,
3083 const unsigned rt
, const int is64
,
3084 struct aarch64_insn_data
*data
)
3086 struct aarch64_displaced_step_data
*dsd
3087 = (struct aarch64_displaced_step_data
*) data
;
3088 CORE_ADDR address
= data
->insn_addr
+ offset
;
3089 struct aarch64_memory_operand zero
= { MEMORY_OPERAND_OFFSET
, 0 };
3091 regcache_cooked_write_unsigned (dsd
->regs
, AARCH64_X0_REGNUM
+ rt
,
3095 dsd
->insn_count
= emit_ldrsw (dsd
->insn_buf
, aarch64_register (rt
, 1),
3096 aarch64_register (rt
, 1), zero
);
3098 dsd
->insn_count
= emit_ldr (dsd
->insn_buf
, aarch64_register (rt
, is64
),
3099 aarch64_register (rt
, 1), zero
);
3101 dsd
->dsc
->pc_adjust
= 4;
3104 /* Implementation of aarch64_insn_visitor method "others". */
3107 aarch64_displaced_step_others (const uint32_t insn
,
3108 struct aarch64_insn_data
*data
)
3110 struct aarch64_displaced_step_data
*dsd
3111 = (struct aarch64_displaced_step_data
*) data
;
3113 aarch64_emit_insn (dsd
->insn_buf
, insn
);
3114 dsd
->insn_count
= 1;
3116 if ((insn
& 0xfffffc1f) == 0xd65f0000)
3119 dsd
->dsc
->pc_adjust
= 0;
3122 dsd
->dsc
->pc_adjust
= 4;
3125 static const struct aarch64_insn_visitor visitor
=
3127 aarch64_displaced_step_b
,
3128 aarch64_displaced_step_b_cond
,
3129 aarch64_displaced_step_cb
,
3130 aarch64_displaced_step_tb
,
3131 aarch64_displaced_step_adr
,
3132 aarch64_displaced_step_ldr_literal
,
3133 aarch64_displaced_step_others
,
3136 /* Implement the "displaced_step_copy_insn" gdbarch method. */
3138 displaced_step_copy_insn_closure_up
3139 aarch64_displaced_step_copy_insn (struct gdbarch
*gdbarch
,
3140 CORE_ADDR from
, CORE_ADDR to
,
3141 struct regcache
*regs
)
3143 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
3144 uint32_t insn
= read_memory_unsigned_integer (from
, 4, byte_order_for_code
);
3145 struct aarch64_displaced_step_data dsd
;
3148 if (aarch64_decode_insn (insn
, &inst
, 1, NULL
) != 0)
3151 /* Look for a Load Exclusive instruction which begins the sequence. */
3152 if (inst
.opcode
->iclass
== ldstexcl
&& bit (insn
, 22))
3154 /* We can't displaced step atomic sequences. */
3158 std::unique_ptr
<aarch64_displaced_step_copy_insn_closure
> dsc
3159 (new aarch64_displaced_step_copy_insn_closure
);
3160 dsd
.base
.insn_addr
= from
;
3163 dsd
.dsc
= dsc
.get ();
3165 aarch64_relocate_instruction (insn
, &visitor
,
3166 (struct aarch64_insn_data
*) &dsd
);
3167 gdb_assert (dsd
.insn_count
<= AARCH64_DISPLACED_MODIFIED_INSNS
);
3169 if (dsd
.insn_count
!= 0)
3173 /* Instruction can be relocated to scratch pad. Copy
3174 relocated instruction(s) there. */
3175 for (i
= 0; i
< dsd
.insn_count
; i
++)
3177 displaced_debug_printf ("writing insn %.8x at %s",
3179 paddress (gdbarch
, to
+ i
* 4));
3181 write_memory_unsigned_integer (to
+ i
* 4, 4, byte_order_for_code
,
3182 (ULONGEST
) dsd
.insn_buf
[i
]);
3190 /* This is a work around for a problem with g++ 4.8. */
3191 return displaced_step_copy_insn_closure_up (dsc
.release ());
3194 /* Implement the "displaced_step_fixup" gdbarch method. */
3197 aarch64_displaced_step_fixup (struct gdbarch
*gdbarch
,
3198 struct displaced_step_copy_insn_closure
*dsc_
,
3199 CORE_ADDR from
, CORE_ADDR to
,
3200 struct regcache
*regs
)
3202 aarch64_displaced_step_copy_insn_closure
*dsc
3203 = (aarch64_displaced_step_copy_insn_closure
*) dsc_
;
3207 regcache_cooked_read_unsigned (regs
, AARCH64_PC_REGNUM
, &pc
);
3209 displaced_debug_printf ("PC after stepping: %s (was %s).",
3210 paddress (gdbarch
, pc
), paddress (gdbarch
, to
));
3214 displaced_debug_printf ("[Conditional] pc_adjust before: %d",
3219 /* Condition is true. */
3221 else if (pc
- to
== 4)
3223 /* Condition is false. */
3227 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
3229 displaced_debug_printf ("[Conditional] pc_adjust after: %d",
3233 displaced_debug_printf ("%s PC by %d",
3234 dsc
->pc_adjust
? "adjusting" : "not adjusting",
3237 if (dsc
->pc_adjust
!= 0)
3239 /* Make sure the previous instruction was executed (that is, the PC
3240 has changed). If the PC didn't change, then discard the adjustment
3241 offset. Otherwise we may skip an instruction before its execution
3245 displaced_debug_printf ("PC did not move. Discarding PC adjustment.");
3249 displaced_debug_printf ("fixup: set PC to %s:%d",
3250 paddress (gdbarch
, from
), dsc
->pc_adjust
);
3252 regcache_cooked_write_unsigned (regs
, AARCH64_PC_REGNUM
,
3253 from
+ dsc
->pc_adjust
);
3257 /* Implement the "displaced_step_hw_singlestep" gdbarch method. */
3260 aarch64_displaced_step_hw_singlestep (struct gdbarch
*gdbarch
)
3265 /* Get the correct target description for the given VQ value.
3266 If VQ is zero then it is assumed SVE is not supported.
3267 (It is not possible to set VQ to zero on an SVE system). */
3270 aarch64_read_description (uint64_t vq
, bool pauth_p
)
3272 if (vq
> AARCH64_MAX_SVE_VQ
)
3273 error (_("VQ is %" PRIu64
", maximum supported value is %d"), vq
,
3274 AARCH64_MAX_SVE_VQ
);
3276 struct target_desc
*tdesc
= tdesc_aarch64_list
[vq
][pauth_p
];
3280 tdesc
= aarch64_create_target_description (vq
, pauth_p
);
3281 tdesc_aarch64_list
[vq
][pauth_p
] = tdesc
;
3287 /* Return the VQ used when creating the target description TDESC. */
3290 aarch64_get_tdesc_vq (const struct target_desc
*tdesc
)
3292 const struct tdesc_feature
*feature_sve
;
3294 if (!tdesc_has_registers (tdesc
))
3297 feature_sve
= tdesc_find_feature (tdesc
, "org.gnu.gdb.aarch64.sve");
3299 if (feature_sve
== nullptr)
3302 uint64_t vl
= tdesc_register_bitsize (feature_sve
,
3303 aarch64_sve_register_names
[0]) / 8;
3304 return sve_vq_from_vl (vl
);
3307 /* Add all the expected register sets into GDBARCH. */
3310 aarch64_add_reggroups (struct gdbarch
*gdbarch
)
3312 reggroup_add (gdbarch
, general_reggroup
);
3313 reggroup_add (gdbarch
, float_reggroup
);
3314 reggroup_add (gdbarch
, system_reggroup
);
3315 reggroup_add (gdbarch
, vector_reggroup
);
3316 reggroup_add (gdbarch
, all_reggroup
);
3317 reggroup_add (gdbarch
, save_reggroup
);
3318 reggroup_add (gdbarch
, restore_reggroup
);
3321 /* Implement the "cannot_store_register" gdbarch method. */
3324 aarch64_cannot_store_register (struct gdbarch
*gdbarch
, int regnum
)
3326 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
3328 if (!tdep
->has_pauth ())
3331 /* Pointer authentication registers are read-only. */
3332 return (regnum
== AARCH64_PAUTH_DMASK_REGNUM (tdep
->pauth_reg_base
)
3333 || regnum
== AARCH64_PAUTH_CMASK_REGNUM (tdep
->pauth_reg_base
));
3336 /* Initialize the current architecture based on INFO. If possible,
3337 re-use an architecture from ARCHES, which is a list of
3338 architectures already created during this debugging session.
3340 Called e.g. at program startup, when reading a core file, and when
3341 reading a binary file. */
3343 static struct gdbarch
*
3344 aarch64_gdbarch_init (struct gdbarch_info info
, struct gdbarch_list
*arches
)
3346 const struct tdesc_feature
*feature_core
, *feature_fpu
, *feature_sve
;
3347 const struct tdesc_feature
*feature_pauth
;
3348 bool valid_p
= true;
3349 int i
, num_regs
= 0, num_pseudo_regs
= 0;
3350 int first_pauth_regnum
= -1, pauth_ra_state_offset
= -1;
3352 /* Use the vector length passed via the target info. Here -1 is used for no
3353 SVE, and 0 is unset. If unset then use the vector length from the existing
3356 if (info
.id
== (int *) -1)
3358 else if (info
.id
!= 0)
3359 vq
= (uint64_t) info
.id
;
3361 vq
= aarch64_get_tdesc_vq (info
.target_desc
);
3363 if (vq
> AARCH64_MAX_SVE_VQ
)
3364 internal_error (__FILE__
, __LINE__
, _("VQ out of bounds: %s (max %d)"),
3365 pulongest (vq
), AARCH64_MAX_SVE_VQ
);
3367 /* If there is already a candidate, use it. */
3368 for (gdbarch_list
*best_arch
= gdbarch_list_lookup_by_info (arches
, &info
);
3369 best_arch
!= nullptr;
3370 best_arch
= gdbarch_list_lookup_by_info (best_arch
->next
, &info
))
3372 struct gdbarch_tdep
*tdep
= gdbarch_tdep (best_arch
->gdbarch
);
3373 if (tdep
&& tdep
->vq
== vq
)
3374 return best_arch
->gdbarch
;
3377 /* Ensure we always have a target descriptor, and that it is for the given VQ
3379 const struct target_desc
*tdesc
= info
.target_desc
;
3380 if (!tdesc_has_registers (tdesc
) || vq
!= aarch64_get_tdesc_vq (tdesc
))
3381 tdesc
= aarch64_read_description (vq
, false);
3384 feature_core
= tdesc_find_feature (tdesc
,"org.gnu.gdb.aarch64.core");
3385 feature_fpu
= tdesc_find_feature (tdesc
, "org.gnu.gdb.aarch64.fpu");
3386 feature_sve
= tdesc_find_feature (tdesc
, "org.gnu.gdb.aarch64.sve");
3387 feature_pauth
= tdesc_find_feature (tdesc
, "org.gnu.gdb.aarch64.pauth");
3389 if (feature_core
== nullptr)
3392 tdesc_arch_data_up tdesc_data
= tdesc_data_alloc ();
3394 /* Validate the description provides the mandatory core R registers
3395 and allocate their numbers. */
3396 for (i
= 0; i
< ARRAY_SIZE (aarch64_r_register_names
); i
++)
3397 valid_p
&= tdesc_numbered_register (feature_core
, tdesc_data
.get (),
3398 AARCH64_X0_REGNUM
+ i
,
3399 aarch64_r_register_names
[i
]);
3401 num_regs
= AARCH64_X0_REGNUM
+ i
;
3403 /* Add the V registers. */
3404 if (feature_fpu
!= nullptr)
3406 if (feature_sve
!= nullptr)
3407 error (_("Program contains both fpu and SVE features."));
3409 /* Validate the description provides the mandatory V registers
3410 and allocate their numbers. */
3411 for (i
= 0; i
< ARRAY_SIZE (aarch64_v_register_names
); i
++)
3412 valid_p
&= tdesc_numbered_register (feature_fpu
, tdesc_data
.get (),
3413 AARCH64_V0_REGNUM
+ i
,
3414 aarch64_v_register_names
[i
]);
3416 num_regs
= AARCH64_V0_REGNUM
+ i
;
3419 /* Add the SVE registers. */
3420 if (feature_sve
!= nullptr)
3422 /* Validate the description provides the mandatory SVE registers
3423 and allocate their numbers. */
3424 for (i
= 0; i
< ARRAY_SIZE (aarch64_sve_register_names
); i
++)
3425 valid_p
&= tdesc_numbered_register (feature_sve
, tdesc_data
.get (),
3426 AARCH64_SVE_Z0_REGNUM
+ i
,
3427 aarch64_sve_register_names
[i
]);
3429 num_regs
= AARCH64_SVE_Z0_REGNUM
+ i
;
3430 num_pseudo_regs
+= 32; /* add the Vn register pseudos. */
3433 if (feature_fpu
!= nullptr || feature_sve
!= nullptr)
3435 num_pseudo_regs
+= 32; /* add the Qn scalar register pseudos */
3436 num_pseudo_regs
+= 32; /* add the Dn scalar register pseudos */
3437 num_pseudo_regs
+= 32; /* add the Sn scalar register pseudos */
3438 num_pseudo_regs
+= 32; /* add the Hn scalar register pseudos */
3439 num_pseudo_regs
+= 32; /* add the Bn scalar register pseudos */
3442 /* Add the pauth registers. */
3443 if (feature_pauth
!= NULL
)
3445 first_pauth_regnum
= num_regs
;
3446 pauth_ra_state_offset
= num_pseudo_regs
;
3447 /* Validate the descriptor provides the mandatory PAUTH registers and
3448 allocate their numbers. */
3449 for (i
= 0; i
< ARRAY_SIZE (aarch64_pauth_register_names
); i
++)
3450 valid_p
&= tdesc_numbered_register (feature_pauth
, tdesc_data
.get (),
3451 first_pauth_regnum
+ i
,
3452 aarch64_pauth_register_names
[i
]);
3455 num_pseudo_regs
+= 1; /* Count RA_STATE pseudo register. */
3461 /* AArch64 code is always little-endian. */
3462 info
.byte_order_for_code
= BFD_ENDIAN_LITTLE
;
3464 struct gdbarch_tdep
*tdep
= XCNEW (struct gdbarch_tdep
);
3465 struct gdbarch
*gdbarch
= gdbarch_alloc (&info
, tdep
);
3467 /* This should be low enough for everything. */
3468 tdep
->lowest_pc
= 0x20;
3469 tdep
->jb_pc
= -1; /* Longjump support not enabled by default. */
3470 tdep
->jb_elt_size
= 8;
3472 tdep
->pauth_reg_base
= first_pauth_regnum
;
3473 tdep
->pauth_ra_state_regnum
= (feature_pauth
== NULL
) ? -1
3474 : pauth_ra_state_offset
+ num_regs
;
3476 set_gdbarch_push_dummy_call (gdbarch
, aarch64_push_dummy_call
);
3477 set_gdbarch_frame_align (gdbarch
, aarch64_frame_align
);
3479 /* Advance PC across function entry code. */
3480 set_gdbarch_skip_prologue (gdbarch
, aarch64_skip_prologue
);
3482 /* The stack grows downward. */
3483 set_gdbarch_inner_than (gdbarch
, core_addr_lessthan
);
3485 /* Breakpoint manipulation. */
3486 set_gdbarch_breakpoint_kind_from_pc (gdbarch
,
3487 aarch64_breakpoint::kind_from_pc
);
3488 set_gdbarch_sw_breakpoint_from_kind (gdbarch
,
3489 aarch64_breakpoint::bp_from_kind
);
3490 set_gdbarch_have_nonsteppable_watchpoint (gdbarch
, 1);
3491 set_gdbarch_software_single_step (gdbarch
, aarch64_software_single_step
);
3493 /* Information about registers, etc. */
3494 set_gdbarch_sp_regnum (gdbarch
, AARCH64_SP_REGNUM
);
3495 set_gdbarch_pc_regnum (gdbarch
, AARCH64_PC_REGNUM
);
3496 set_gdbarch_num_regs (gdbarch
, num_regs
);
3498 set_gdbarch_num_pseudo_regs (gdbarch
, num_pseudo_regs
);
3499 set_gdbarch_pseudo_register_read_value (gdbarch
, aarch64_pseudo_read_value
);
3500 set_gdbarch_pseudo_register_write (gdbarch
, aarch64_pseudo_write
);
3501 set_tdesc_pseudo_register_name (gdbarch
, aarch64_pseudo_register_name
);
3502 set_tdesc_pseudo_register_type (gdbarch
, aarch64_pseudo_register_type
);
3503 set_tdesc_pseudo_register_reggroup_p (gdbarch
,
3504 aarch64_pseudo_register_reggroup_p
);
3505 set_gdbarch_cannot_store_register (gdbarch
, aarch64_cannot_store_register
);
3508 set_gdbarch_short_bit (gdbarch
, 16);
3509 set_gdbarch_int_bit (gdbarch
, 32);
3510 set_gdbarch_float_bit (gdbarch
, 32);
3511 set_gdbarch_double_bit (gdbarch
, 64);
3512 set_gdbarch_long_double_bit (gdbarch
, 128);
3513 set_gdbarch_long_bit (gdbarch
, 64);
3514 set_gdbarch_long_long_bit (gdbarch
, 64);
3515 set_gdbarch_ptr_bit (gdbarch
, 64);
3516 set_gdbarch_char_signed (gdbarch
, 0);
3517 set_gdbarch_wchar_signed (gdbarch
, 0);
3518 set_gdbarch_float_format (gdbarch
, floatformats_ieee_single
);
3519 set_gdbarch_double_format (gdbarch
, floatformats_ieee_double
);
3520 set_gdbarch_long_double_format (gdbarch
, floatformats_ia64_quad
);
3521 set_gdbarch_type_align (gdbarch
, aarch64_type_align
);
3523 /* Internal <-> external register number maps. */
3524 set_gdbarch_dwarf2_reg_to_regnum (gdbarch
, aarch64_dwarf_reg_to_regnum
);
3526 /* Returning results. */
3527 set_gdbarch_return_value (gdbarch
, aarch64_return_value
);
3530 set_gdbarch_print_insn (gdbarch
, aarch64_gdb_print_insn
);
3532 /* Virtual tables. */
3533 set_gdbarch_vbit_in_delta (gdbarch
, 1);
3535 /* Register architecture. */
3536 aarch64_add_reggroups (gdbarch
);
3538 /* Hook in the ABI-specific overrides, if they have been registered. */
3539 info
.target_desc
= tdesc
;
3540 info
.tdesc_data
= tdesc_data
.get ();
3541 gdbarch_init_osabi (info
, gdbarch
);
3543 dwarf2_frame_set_init_reg (gdbarch
, aarch64_dwarf2_frame_init_reg
);
3544 /* Register DWARF CFA vendor handler. */
3545 set_gdbarch_execute_dwarf_cfa_vendor_op (gdbarch
,
3546 aarch64_execute_dwarf_cfa_vendor_op
);
3548 /* Permanent/Program breakpoint handling. */
3549 set_gdbarch_program_breakpoint_here_p (gdbarch
,
3550 aarch64_program_breakpoint_here_p
);
3552 /* Add some default predicates. */
3553 frame_unwind_append_unwinder (gdbarch
, &aarch64_stub_unwind
);
3554 dwarf2_append_unwinders (gdbarch
);
3555 frame_unwind_append_unwinder (gdbarch
, &aarch64_prologue_unwind
);
3557 frame_base_set_default (gdbarch
, &aarch64_normal_base
);
3559 /* Now we have tuned the configuration, set a few final things,
3560 based on what the OS ABI has told us. */
3562 if (tdep
->jb_pc
>= 0)
3563 set_gdbarch_get_longjmp_target (gdbarch
, aarch64_get_longjmp_target
);
3565 set_gdbarch_gen_return_address (gdbarch
, aarch64_gen_return_address
);
3567 set_gdbarch_get_pc_address_flags (gdbarch
, aarch64_get_pc_address_flags
);
3569 tdesc_use_registers (gdbarch
, tdesc
, std::move (tdesc_data
));
3571 /* Add standard register aliases. */
3572 for (i
= 0; i
< ARRAY_SIZE (aarch64_register_aliases
); i
++)
3573 user_reg_add (gdbarch
, aarch64_register_aliases
[i
].name
,
3574 value_of_aarch64_user_reg
,
3575 &aarch64_register_aliases
[i
].regnum
);
3577 register_aarch64_ravenscar_ops (gdbarch
);
3583 aarch64_dump_tdep (struct gdbarch
*gdbarch
, struct ui_file
*file
)
3585 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
3590 fprintf_unfiltered (file
, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
3591 paddress (gdbarch
, tdep
->lowest_pc
));
3597 static void aarch64_process_record_test (void);
3601 void _initialize_aarch64_tdep ();
3603 _initialize_aarch64_tdep ()
3605 gdbarch_register (bfd_arch_aarch64
, aarch64_gdbarch_init
,
3608 /* Debug this file's internals. */
3609 add_setshow_boolean_cmd ("aarch64", class_maintenance
, &aarch64_debug
, _("\
3610 Set AArch64 debugging."), _("\
3611 Show AArch64 debugging."), _("\
3612 When on, AArch64 specific debugging is enabled."),
3615 &setdebuglist
, &showdebuglist
);
3618 selftests::register_test ("aarch64-analyze-prologue",
3619 selftests::aarch64_analyze_prologue_test
);
3620 selftests::register_test ("aarch64-process-record",
3621 selftests::aarch64_process_record_test
);
3625 /* AArch64 process record-replay related structures, defines etc. */
3627 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
3630 unsigned int reg_len = LENGTH; \
3633 REGS = XNEWVEC (uint32_t, reg_len); \
3634 memcpy(®S[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
3639 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
3642 unsigned int mem_len = LENGTH; \
3645 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
3646 memcpy(&MEMS->len, &RECORD_BUF[0], \
3647 sizeof(struct aarch64_mem_r) * LENGTH); \
3652 /* AArch64 record/replay structures and enumerations. */
3654 struct aarch64_mem_r
3656 uint64_t len
; /* Record length. */
3657 uint64_t addr
; /* Memory address. */
3660 enum aarch64_record_result
3662 AARCH64_RECORD_SUCCESS
,
3663 AARCH64_RECORD_UNSUPPORTED
,
3664 AARCH64_RECORD_UNKNOWN
3667 typedef struct insn_decode_record_t
3669 struct gdbarch
*gdbarch
;
3670 struct regcache
*regcache
;
3671 CORE_ADDR this_addr
; /* Address of insn to be recorded. */
3672 uint32_t aarch64_insn
; /* Insn to be recorded. */
3673 uint32_t mem_rec_count
; /* Count of memory records. */
3674 uint32_t reg_rec_count
; /* Count of register records. */
3675 uint32_t *aarch64_regs
; /* Registers to be recorded. */
3676 struct aarch64_mem_r
*aarch64_mems
; /* Memory locations to be recorded. */
3677 } insn_decode_record
;
3679 /* Record handler for data processing - register instructions. */
3682 aarch64_record_data_proc_reg (insn_decode_record
*aarch64_insn_r
)
3684 uint8_t reg_rd
, insn_bits24_27
, insn_bits21_23
;
3685 uint32_t record_buf
[4];
3687 reg_rd
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3688 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
3689 insn_bits21_23
= bits (aarch64_insn_r
->aarch64_insn
, 21, 23);
3691 if (!bit (aarch64_insn_r
->aarch64_insn
, 28))
3695 /* Logical (shifted register). */
3696 if (insn_bits24_27
== 0x0a)
3697 setflags
= (bits (aarch64_insn_r
->aarch64_insn
, 29, 30) == 0x03);
3699 else if (insn_bits24_27
== 0x0b)
3700 setflags
= bit (aarch64_insn_r
->aarch64_insn
, 29);
3702 return AARCH64_RECORD_UNKNOWN
;
3704 record_buf
[0] = reg_rd
;
3705 aarch64_insn_r
->reg_rec_count
= 1;
3707 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_CPSR_REGNUM
;
3711 if (insn_bits24_27
== 0x0b)
3713 /* Data-processing (3 source). */
3714 record_buf
[0] = reg_rd
;
3715 aarch64_insn_r
->reg_rec_count
= 1;
3717 else if (insn_bits24_27
== 0x0a)
3719 if (insn_bits21_23
== 0x00)
3721 /* Add/subtract (with carry). */
3722 record_buf
[0] = reg_rd
;
3723 aarch64_insn_r
->reg_rec_count
= 1;
3724 if (bit (aarch64_insn_r
->aarch64_insn
, 29))
3726 record_buf
[1] = AARCH64_CPSR_REGNUM
;
3727 aarch64_insn_r
->reg_rec_count
= 2;
3730 else if (insn_bits21_23
== 0x02)
3732 /* Conditional compare (register) and conditional compare
3733 (immediate) instructions. */
3734 record_buf
[0] = AARCH64_CPSR_REGNUM
;
3735 aarch64_insn_r
->reg_rec_count
= 1;
3737 else if (insn_bits21_23
== 0x04 || insn_bits21_23
== 0x06)
3739 /* Conditional select. */
3740 /* Data-processing (2 source). */
3741 /* Data-processing (1 source). */
3742 record_buf
[0] = reg_rd
;
3743 aarch64_insn_r
->reg_rec_count
= 1;
3746 return AARCH64_RECORD_UNKNOWN
;
3750 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3752 return AARCH64_RECORD_SUCCESS
;
3755 /* Record handler for data processing - immediate instructions. */
3758 aarch64_record_data_proc_imm (insn_decode_record
*aarch64_insn_r
)
3760 uint8_t reg_rd
, insn_bit23
, insn_bits24_27
, setflags
;
3761 uint32_t record_buf
[4];
3763 reg_rd
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3764 insn_bit23
= bit (aarch64_insn_r
->aarch64_insn
, 23);
3765 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
3767 if (insn_bits24_27
== 0x00 /* PC rel addressing. */
3768 || insn_bits24_27
== 0x03 /* Bitfield and Extract. */
3769 || (insn_bits24_27
== 0x02 && insn_bit23
)) /* Move wide (immediate). */
3771 record_buf
[0] = reg_rd
;
3772 aarch64_insn_r
->reg_rec_count
= 1;
3774 else if (insn_bits24_27
== 0x01)
3776 /* Add/Subtract (immediate). */
3777 setflags
= bit (aarch64_insn_r
->aarch64_insn
, 29);
3778 record_buf
[0] = reg_rd
;
3779 aarch64_insn_r
->reg_rec_count
= 1;
3781 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_CPSR_REGNUM
;
3783 else if (insn_bits24_27
== 0x02 && !insn_bit23
)
3785 /* Logical (immediate). */
3786 setflags
= bits (aarch64_insn_r
->aarch64_insn
, 29, 30) == 0x03;
3787 record_buf
[0] = reg_rd
;
3788 aarch64_insn_r
->reg_rec_count
= 1;
3790 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_CPSR_REGNUM
;
3793 return AARCH64_RECORD_UNKNOWN
;
3795 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3797 return AARCH64_RECORD_SUCCESS
;
3800 /* Record handler for branch, exception generation and system instructions. */
3803 aarch64_record_branch_except_sys (insn_decode_record
*aarch64_insn_r
)
3805 struct gdbarch_tdep
*tdep
= gdbarch_tdep (aarch64_insn_r
->gdbarch
);
3806 uint8_t insn_bits24_27
, insn_bits28_31
, insn_bits22_23
;
3807 uint32_t record_buf
[4];
3809 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
3810 insn_bits28_31
= bits (aarch64_insn_r
->aarch64_insn
, 28, 31);
3811 insn_bits22_23
= bits (aarch64_insn_r
->aarch64_insn
, 22, 23);
3813 if (insn_bits28_31
== 0x0d)
3815 /* Exception generation instructions. */
3816 if (insn_bits24_27
== 0x04)
3818 if (!bits (aarch64_insn_r
->aarch64_insn
, 2, 4)
3819 && !bits (aarch64_insn_r
->aarch64_insn
, 21, 23)
3820 && bits (aarch64_insn_r
->aarch64_insn
, 0, 1) == 0x01)
3822 ULONGEST svc_number
;
3824 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, 8,
3826 return tdep
->aarch64_syscall_record (aarch64_insn_r
->regcache
,
3830 return AARCH64_RECORD_UNSUPPORTED
;
3832 /* System instructions. */
3833 else if (insn_bits24_27
== 0x05 && insn_bits22_23
== 0x00)
3835 uint32_t reg_rt
, reg_crn
;
3837 reg_rt
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3838 reg_crn
= bits (aarch64_insn_r
->aarch64_insn
, 12, 15);
3840 /* Record rt in case of sysl and mrs instructions. */
3841 if (bit (aarch64_insn_r
->aarch64_insn
, 21))
3843 record_buf
[0] = reg_rt
;
3844 aarch64_insn_r
->reg_rec_count
= 1;
3846 /* Record cpsr for hint and msr(immediate) instructions. */
3847 else if (reg_crn
== 0x02 || reg_crn
== 0x04)
3849 record_buf
[0] = AARCH64_CPSR_REGNUM
;
3850 aarch64_insn_r
->reg_rec_count
= 1;
3853 /* Unconditional branch (register). */
3854 else if((insn_bits24_27
& 0x0e) == 0x06)
3856 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_PC_REGNUM
;
3857 if (bits (aarch64_insn_r
->aarch64_insn
, 21, 22) == 0x01)
3858 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_LR_REGNUM
;
3861 return AARCH64_RECORD_UNKNOWN
;
3863 /* Unconditional branch (immediate). */
3864 else if ((insn_bits28_31
& 0x07) == 0x01 && (insn_bits24_27
& 0x0c) == 0x04)
3866 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_PC_REGNUM
;
3867 if (bit (aarch64_insn_r
->aarch64_insn
, 31))
3868 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_LR_REGNUM
;
3871 /* Compare & branch (immediate), Test & branch (immediate) and
3872 Conditional branch (immediate). */
3873 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_PC_REGNUM
;
3875 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3877 return AARCH64_RECORD_SUCCESS
;
3880 /* Record handler for advanced SIMD load and store instructions. */
3883 aarch64_record_asimd_load_store (insn_decode_record
*aarch64_insn_r
)
3886 uint64_t addr_offset
= 0;
3887 uint32_t record_buf
[24];
3888 uint64_t record_buf_mem
[24];
3889 uint32_t reg_rn
, reg_rt
;
3890 uint32_t reg_index
= 0, mem_index
= 0;
3891 uint8_t opcode_bits
, size_bits
;
3893 reg_rt
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3894 reg_rn
= bits (aarch64_insn_r
->aarch64_insn
, 5, 9);
3895 size_bits
= bits (aarch64_insn_r
->aarch64_insn
, 10, 11);
3896 opcode_bits
= bits (aarch64_insn_r
->aarch64_insn
, 12, 15);
3897 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
, &address
);
3900 debug_printf ("Process record: Advanced SIMD load/store\n");
3902 /* Load/store single structure. */
3903 if (bit (aarch64_insn_r
->aarch64_insn
, 24))
3905 uint8_t sindex
, scale
, selem
, esize
, replicate
= 0;
3906 scale
= opcode_bits
>> 2;
3907 selem
= ((opcode_bits
& 0x02) |
3908 bit (aarch64_insn_r
->aarch64_insn
, 21)) + 1;
3912 if (size_bits
& 0x01)
3913 return AARCH64_RECORD_UNKNOWN
;
3916 if ((size_bits
>> 1) & 0x01)
3917 return AARCH64_RECORD_UNKNOWN
;
3918 if (size_bits
& 0x01)
3920 if (!((opcode_bits
>> 1) & 0x01))
3923 return AARCH64_RECORD_UNKNOWN
;
3927 if (bit (aarch64_insn_r
->aarch64_insn
, 22) && !(opcode_bits
& 0x01))
3934 return AARCH64_RECORD_UNKNOWN
;
3940 for (sindex
= 0; sindex
< selem
; sindex
++)
3942 record_buf
[reg_index
++] = reg_rt
+ AARCH64_V0_REGNUM
;
3943 reg_rt
= (reg_rt
+ 1) % 32;
3947 for (sindex
= 0; sindex
< selem
; sindex
++)
3949 if (bit (aarch64_insn_r
->aarch64_insn
, 22))
3950 record_buf
[reg_index
++] = reg_rt
+ AARCH64_V0_REGNUM
;
3953 record_buf_mem
[mem_index
++] = esize
/ 8;
3954 record_buf_mem
[mem_index
++] = address
+ addr_offset
;
3956 addr_offset
= addr_offset
+ (esize
/ 8);
3957 reg_rt
= (reg_rt
+ 1) % 32;
3961 /* Load/store multiple structure. */
3964 uint8_t selem
, esize
, rpt
, elements
;
3965 uint8_t eindex
, rindex
;
3967 esize
= 8 << size_bits
;
3968 if (bit (aarch64_insn_r
->aarch64_insn
, 30))
3969 elements
= 128 / esize
;
3971 elements
= 64 / esize
;
3973 switch (opcode_bits
)
3975 /*LD/ST4 (4 Registers). */
3980 /*LD/ST1 (4 Registers). */
3985 /*LD/ST3 (3 Registers). */
3990 /*LD/ST1 (3 Registers). */
3995 /*LD/ST1 (1 Register). */
4000 /*LD/ST2 (2 Registers). */
4005 /*LD/ST1 (2 Registers). */
4011 return AARCH64_RECORD_UNSUPPORTED
;
4014 for (rindex
= 0; rindex
< rpt
; rindex
++)
4015 for (eindex
= 0; eindex
< elements
; eindex
++)
4017 uint8_t reg_tt
, sindex
;
4018 reg_tt
= (reg_rt
+ rindex
) % 32;
4019 for (sindex
= 0; sindex
< selem
; sindex
++)
4021 if (bit (aarch64_insn_r
->aarch64_insn
, 22))
4022 record_buf
[reg_index
++] = reg_tt
+ AARCH64_V0_REGNUM
;
4025 record_buf_mem
[mem_index
++] = esize
/ 8;
4026 record_buf_mem
[mem_index
++] = address
+ addr_offset
;
4028 addr_offset
= addr_offset
+ (esize
/ 8);
4029 reg_tt
= (reg_tt
+ 1) % 32;
4034 if (bit (aarch64_insn_r
->aarch64_insn
, 23))
4035 record_buf
[reg_index
++] = reg_rn
;
4037 aarch64_insn_r
->reg_rec_count
= reg_index
;
4038 aarch64_insn_r
->mem_rec_count
= mem_index
/ 2;
4039 MEM_ALLOC (aarch64_insn_r
->aarch64_mems
, aarch64_insn_r
->mem_rec_count
,
4041 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
4043 return AARCH64_RECORD_SUCCESS
;
4046 /* Record handler for load and store instructions. */
4049 aarch64_record_load_store (insn_decode_record
*aarch64_insn_r
)
4051 uint8_t insn_bits24_27
, insn_bits28_29
, insn_bits10_11
;
4052 uint8_t insn_bit23
, insn_bit21
;
4053 uint8_t opc
, size_bits
, ld_flag
, vector_flag
;
4054 uint32_t reg_rn
, reg_rt
, reg_rt2
;
4055 uint64_t datasize
, offset
;
4056 uint32_t record_buf
[8];
4057 uint64_t record_buf_mem
[8];
4060 insn_bits10_11
= bits (aarch64_insn_r
->aarch64_insn
, 10, 11);
4061 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
4062 insn_bits28_29
= bits (aarch64_insn_r
->aarch64_insn
, 28, 29);
4063 insn_bit21
= bit (aarch64_insn_r
->aarch64_insn
, 21);
4064 insn_bit23
= bit (aarch64_insn_r
->aarch64_insn
, 23);
4065 ld_flag
= bit (aarch64_insn_r
->aarch64_insn
, 22);
4066 vector_flag
= bit (aarch64_insn_r
->aarch64_insn
, 26);
4067 reg_rt
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
4068 reg_rn
= bits (aarch64_insn_r
->aarch64_insn
, 5, 9);
4069 reg_rt2
= bits (aarch64_insn_r
->aarch64_insn
, 10, 14);
4070 size_bits
= bits (aarch64_insn_r
->aarch64_insn
, 30, 31);
4072 /* Load/store exclusive. */
4073 if (insn_bits24_27
== 0x08 && insn_bits28_29
== 0x00)
4076 debug_printf ("Process record: load/store exclusive\n");
4080 record_buf
[0] = reg_rt
;
4081 aarch64_insn_r
->reg_rec_count
= 1;
4084 record_buf
[1] = reg_rt2
;
4085 aarch64_insn_r
->reg_rec_count
= 2;
4091 datasize
= (8 << size_bits
) * 2;
4093 datasize
= (8 << size_bits
);
4094 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
4096 record_buf_mem
[0] = datasize
/ 8;
4097 record_buf_mem
[1] = address
;
4098 aarch64_insn_r
->mem_rec_count
= 1;
4101 /* Save register rs. */
4102 record_buf
[0] = bits (aarch64_insn_r
->aarch64_insn
, 16, 20);
4103 aarch64_insn_r
->reg_rec_count
= 1;
4107 /* Load register (literal) instructions decoding. */
4108 else if ((insn_bits24_27
& 0x0b) == 0x08 && insn_bits28_29
== 0x01)
4111 debug_printf ("Process record: load register (literal)\n");
4113 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
4115 record_buf
[0] = reg_rt
;
4116 aarch64_insn_r
->reg_rec_count
= 1;
4118 /* All types of load/store pair instructions decoding. */
4119 else if ((insn_bits24_27
& 0x0a) == 0x08 && insn_bits28_29
== 0x02)
4122 debug_printf ("Process record: load/store pair\n");
4128 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
4129 record_buf
[1] = reg_rt2
+ AARCH64_V0_REGNUM
;
4133 record_buf
[0] = reg_rt
;
4134 record_buf
[1] = reg_rt2
;
4136 aarch64_insn_r
->reg_rec_count
= 2;
4141 imm7_off
= bits (aarch64_insn_r
->aarch64_insn
, 15, 21);
4143 size_bits
= size_bits
>> 1;
4144 datasize
= 8 << (2 + size_bits
);
4145 offset
= (imm7_off
& 0x40) ? (~imm7_off
& 0x007f) + 1 : imm7_off
;
4146 offset
= offset
<< (2 + size_bits
);
4147 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
4149 if (!((insn_bits24_27
& 0x0b) == 0x08 && insn_bit23
))
4151 if (imm7_off
& 0x40)
4152 address
= address
- offset
;
4154 address
= address
+ offset
;
4157 record_buf_mem
[0] = datasize
/ 8;
4158 record_buf_mem
[1] = address
;
4159 record_buf_mem
[2] = datasize
/ 8;
4160 record_buf_mem
[3] = address
+ (datasize
/ 8);
4161 aarch64_insn_r
->mem_rec_count
= 2;
4163 if (bit (aarch64_insn_r
->aarch64_insn
, 23))
4164 record_buf
[aarch64_insn_r
->reg_rec_count
++] = reg_rn
;
4166 /* Load/store register (unsigned immediate) instructions. */
4167 else if ((insn_bits24_27
& 0x0b) == 0x09 && insn_bits28_29
== 0x03)
4169 opc
= bits (aarch64_insn_r
->aarch64_insn
, 22, 23);
4179 if (size_bits
== 0x3 && vector_flag
== 0x0 && opc
== 0x2)
4181 /* PRFM (immediate) */
4182 return AARCH64_RECORD_SUCCESS
;
4184 else if (size_bits
== 0x2 && vector_flag
== 0x0 && opc
== 0x2)
4186 /* LDRSW (immediate) */
4200 debug_printf ("Process record: load/store (unsigned immediate):"
4201 " size %x V %d opc %x\n", size_bits
, vector_flag
,
4207 offset
= bits (aarch64_insn_r
->aarch64_insn
, 10, 21);
4208 datasize
= 8 << size_bits
;
4209 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
4211 offset
= offset
<< size_bits
;
4212 address
= address
+ offset
;
4214 record_buf_mem
[0] = datasize
>> 3;
4215 record_buf_mem
[1] = address
;
4216 aarch64_insn_r
->mem_rec_count
= 1;
4221 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
4223 record_buf
[0] = reg_rt
;
4224 aarch64_insn_r
->reg_rec_count
= 1;
4227 /* Load/store register (register offset) instructions. */
4228 else if ((insn_bits24_27
& 0x0b) == 0x08 && insn_bits28_29
== 0x03
4229 && insn_bits10_11
== 0x02 && insn_bit21
)
4232 debug_printf ("Process record: load/store (register offset)\n");
4233 opc
= bits (aarch64_insn_r
->aarch64_insn
, 22, 23);
4240 if (size_bits
!= 0x03)
4243 return AARCH64_RECORD_UNKNOWN
;
4247 ULONGEST reg_rm_val
;
4249 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
,
4250 bits (aarch64_insn_r
->aarch64_insn
, 16, 20), ®_rm_val
);
4251 if (bit (aarch64_insn_r
->aarch64_insn
, 12))
4252 offset
= reg_rm_val
<< size_bits
;
4254 offset
= reg_rm_val
;
4255 datasize
= 8 << size_bits
;
4256 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
4258 address
= address
+ offset
;
4259 record_buf_mem
[0] = datasize
>> 3;
4260 record_buf_mem
[1] = address
;
4261 aarch64_insn_r
->mem_rec_count
= 1;
4266 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
4268 record_buf
[0] = reg_rt
;
4269 aarch64_insn_r
->reg_rec_count
= 1;
4272 /* Load/store register (immediate and unprivileged) instructions. */
4273 else if ((insn_bits24_27
& 0x0b) == 0x08 && insn_bits28_29
== 0x03
4278 debug_printf ("Process record: load/store "
4279 "(immediate and unprivileged)\n");
4281 opc
= bits (aarch64_insn_r
->aarch64_insn
, 22, 23);
4288 if (size_bits
!= 0x03)
4291 return AARCH64_RECORD_UNKNOWN
;
4296 imm9_off
= bits (aarch64_insn_r
->aarch64_insn
, 12, 20);
4297 offset
= (imm9_off
& 0x0100) ? (((~imm9_off
) & 0x01ff) + 1) : imm9_off
;
4298 datasize
= 8 << size_bits
;
4299 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
4301 if (insn_bits10_11
!= 0x01)
4303 if (imm9_off
& 0x0100)
4304 address
= address
- offset
;
4306 address
= address
+ offset
;
4308 record_buf_mem
[0] = datasize
>> 3;
4309 record_buf_mem
[1] = address
;
4310 aarch64_insn_r
->mem_rec_count
= 1;
4315 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
4317 record_buf
[0] = reg_rt
;
4318 aarch64_insn_r
->reg_rec_count
= 1;
4320 if (insn_bits10_11
== 0x01 || insn_bits10_11
== 0x03)
4321 record_buf
[aarch64_insn_r
->reg_rec_count
++] = reg_rn
;
4323 /* Advanced SIMD load/store instructions. */
4325 return aarch64_record_asimd_load_store (aarch64_insn_r
);
4327 MEM_ALLOC (aarch64_insn_r
->aarch64_mems
, aarch64_insn_r
->mem_rec_count
,
4329 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
4331 return AARCH64_RECORD_SUCCESS
;
4334 /* Record handler for data processing SIMD and floating point instructions. */
4337 aarch64_record_data_proc_simd_fp (insn_decode_record
*aarch64_insn_r
)
4339 uint8_t insn_bit21
, opcode
, rmode
, reg_rd
;
4340 uint8_t insn_bits24_27
, insn_bits28_31
, insn_bits10_11
, insn_bits12_15
;
4341 uint8_t insn_bits11_14
;
4342 uint32_t record_buf
[2];
4344 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
4345 insn_bits28_31
= bits (aarch64_insn_r
->aarch64_insn
, 28, 31);
4346 insn_bits10_11
= bits (aarch64_insn_r
->aarch64_insn
, 10, 11);
4347 insn_bits12_15
= bits (aarch64_insn_r
->aarch64_insn
, 12, 15);
4348 insn_bits11_14
= bits (aarch64_insn_r
->aarch64_insn
, 11, 14);
4349 opcode
= bits (aarch64_insn_r
->aarch64_insn
, 16, 18);
4350 rmode
= bits (aarch64_insn_r
->aarch64_insn
, 19, 20);
4351 reg_rd
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
4352 insn_bit21
= bit (aarch64_insn_r
->aarch64_insn
, 21);
4355 debug_printf ("Process record: data processing SIMD/FP: ");
4357 if ((insn_bits28_31
& 0x05) == 0x01 && insn_bits24_27
== 0x0e)
4359 /* Floating point - fixed point conversion instructions. */
4363 debug_printf ("FP - fixed point conversion");
4365 if ((opcode
>> 1) == 0x0 && rmode
== 0x03)
4366 record_buf
[0] = reg_rd
;
4368 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
4370 /* Floating point - conditional compare instructions. */
4371 else if (insn_bits10_11
== 0x01)
4374 debug_printf ("FP - conditional compare");
4376 record_buf
[0] = AARCH64_CPSR_REGNUM
;
4378 /* Floating point - data processing (2-source) and
4379 conditional select instructions. */
4380 else if (insn_bits10_11
== 0x02 || insn_bits10_11
== 0x03)
4383 debug_printf ("FP - DP (2-source)");
4385 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
4387 else if (insn_bits10_11
== 0x00)
4389 /* Floating point - immediate instructions. */
4390 if ((insn_bits12_15
& 0x01) == 0x01
4391 || (insn_bits12_15
& 0x07) == 0x04)
4394 debug_printf ("FP - immediate");
4395 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
4397 /* Floating point - compare instructions. */
4398 else if ((insn_bits12_15
& 0x03) == 0x02)
4401 debug_printf ("FP - immediate");
4402 record_buf
[0] = AARCH64_CPSR_REGNUM
;
4404 /* Floating point - integer conversions instructions. */
4405 else if (insn_bits12_15
== 0x00)
4407 /* Convert float to integer instruction. */
4408 if (!(opcode
>> 1) || ((opcode
>> 1) == 0x02 && !rmode
))
4411 debug_printf ("float to int conversion");
4413 record_buf
[0] = reg_rd
+ AARCH64_X0_REGNUM
;
4415 /* Convert integer to float instruction. */
4416 else if ((opcode
>> 1) == 0x01 && !rmode
)
4419 debug_printf ("int to float conversion");
4421 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
4423 /* Move float to integer instruction. */
4424 else if ((opcode
>> 1) == 0x03)
4427 debug_printf ("move float to int");
4429 if (!(opcode
& 0x01))
4430 record_buf
[0] = reg_rd
+ AARCH64_X0_REGNUM
;
4432 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
4435 return AARCH64_RECORD_UNKNOWN
;
4438 return AARCH64_RECORD_UNKNOWN
;
4441 return AARCH64_RECORD_UNKNOWN
;
4443 else if ((insn_bits28_31
& 0x09) == 0x00 && insn_bits24_27
== 0x0e)
4446 debug_printf ("SIMD copy");
4448 /* Advanced SIMD copy instructions. */
4449 if (!bits (aarch64_insn_r
->aarch64_insn
, 21, 23)
4450 && !bit (aarch64_insn_r
->aarch64_insn
, 15)
4451 && bit (aarch64_insn_r
->aarch64_insn
, 10))
4453 if (insn_bits11_14
== 0x05 || insn_bits11_14
== 0x07)
4454 record_buf
[0] = reg_rd
+ AARCH64_X0_REGNUM
;
4456 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
4459 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
4461 /* All remaining floating point or advanced SIMD instructions. */
4465 debug_printf ("all remain");
4467 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
4471 debug_printf ("\n");
4473 /* Record the V/X register. */
4474 aarch64_insn_r
->reg_rec_count
++;
4476 /* Some of these instructions may set bits in the FPSR, so record it
4478 record_buf
[1] = AARCH64_FPSR_REGNUM
;
4479 aarch64_insn_r
->reg_rec_count
++;
4481 gdb_assert (aarch64_insn_r
->reg_rec_count
== 2);
4482 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
4484 return AARCH64_RECORD_SUCCESS
;
4487 /* Decodes insns type and invokes its record handler. */
4490 aarch64_record_decode_insn_handler (insn_decode_record
*aarch64_insn_r
)
4492 uint32_t ins_bit25
, ins_bit26
, ins_bit27
, ins_bit28
;
4494 ins_bit25
= bit (aarch64_insn_r
->aarch64_insn
, 25);
4495 ins_bit26
= bit (aarch64_insn_r
->aarch64_insn
, 26);
4496 ins_bit27
= bit (aarch64_insn_r
->aarch64_insn
, 27);
4497 ins_bit28
= bit (aarch64_insn_r
->aarch64_insn
, 28);
4499 /* Data processing - immediate instructions. */
4500 if (!ins_bit26
&& !ins_bit27
&& ins_bit28
)
4501 return aarch64_record_data_proc_imm (aarch64_insn_r
);
4503 /* Branch, exception generation and system instructions. */
4504 if (ins_bit26
&& !ins_bit27
&& ins_bit28
)
4505 return aarch64_record_branch_except_sys (aarch64_insn_r
);
4507 /* Load and store instructions. */
4508 if (!ins_bit25
&& ins_bit27
)
4509 return aarch64_record_load_store (aarch64_insn_r
);
4511 /* Data processing - register instructions. */
4512 if (ins_bit25
&& !ins_bit26
&& ins_bit27
)
4513 return aarch64_record_data_proc_reg (aarch64_insn_r
);
4515 /* Data processing - SIMD and floating point instructions. */
4516 if (ins_bit25
&& ins_bit26
&& ins_bit27
)
4517 return aarch64_record_data_proc_simd_fp (aarch64_insn_r
);
4519 return AARCH64_RECORD_UNSUPPORTED
;
4522 /* Cleans up local record registers and memory allocations. */
4525 deallocate_reg_mem (insn_decode_record
*record
)
4527 xfree (record
->aarch64_regs
);
4528 xfree (record
->aarch64_mems
);
4532 namespace selftests
{
4535 aarch64_process_record_test (void)
4537 struct gdbarch_info info
;
4540 gdbarch_info_init (&info
);
4541 info
.bfd_arch_info
= bfd_scan_arch ("aarch64");
4543 struct gdbarch
*gdbarch
= gdbarch_find_by_info (info
);
4544 SELF_CHECK (gdbarch
!= NULL
);
4546 insn_decode_record aarch64_record
;
4548 memset (&aarch64_record
, 0, sizeof (insn_decode_record
));
4549 aarch64_record
.regcache
= NULL
;
4550 aarch64_record
.this_addr
= 0;
4551 aarch64_record
.gdbarch
= gdbarch
;
4553 /* 20 00 80 f9 prfm pldl1keep, [x1] */
4554 aarch64_record
.aarch64_insn
= 0xf9800020;
4555 ret
= aarch64_record_decode_insn_handler (&aarch64_record
);
4556 SELF_CHECK (ret
== AARCH64_RECORD_SUCCESS
);
4557 SELF_CHECK (aarch64_record
.reg_rec_count
== 0);
4558 SELF_CHECK (aarch64_record
.mem_rec_count
== 0);
4560 deallocate_reg_mem (&aarch64_record
);
4563 } // namespace selftests
4564 #endif /* GDB_SELF_TEST */
4566 /* Parse the current instruction and record the values of the registers and
4567 memory that will be changed in current instruction to record_arch_list
4568 return -1 if something is wrong. */
4571 aarch64_process_record (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
4572 CORE_ADDR insn_addr
)
4574 uint32_t rec_no
= 0;
4575 uint8_t insn_size
= 4;
4577 gdb_byte buf
[insn_size
];
4578 insn_decode_record aarch64_record
;
4580 memset (&buf
[0], 0, insn_size
);
4581 memset (&aarch64_record
, 0, sizeof (insn_decode_record
));
4582 target_read_memory (insn_addr
, &buf
[0], insn_size
);
4583 aarch64_record
.aarch64_insn
4584 = (uint32_t) extract_unsigned_integer (&buf
[0],
4586 gdbarch_byte_order (gdbarch
));
4587 aarch64_record
.regcache
= regcache
;
4588 aarch64_record
.this_addr
= insn_addr
;
4589 aarch64_record
.gdbarch
= gdbarch
;
4591 ret
= aarch64_record_decode_insn_handler (&aarch64_record
);
4592 if (ret
== AARCH64_RECORD_UNSUPPORTED
)
4594 printf_unfiltered (_("Process record does not support instruction "
4595 "0x%0x at address %s.\n"),
4596 aarch64_record
.aarch64_insn
,
4597 paddress (gdbarch
, insn_addr
));
4603 /* Record registers. */
4604 record_full_arch_list_add_reg (aarch64_record
.regcache
,
4606 /* Always record register CPSR. */
4607 record_full_arch_list_add_reg (aarch64_record
.regcache
,
4608 AARCH64_CPSR_REGNUM
);
4609 if (aarch64_record
.aarch64_regs
)
4610 for (rec_no
= 0; rec_no
< aarch64_record
.reg_rec_count
; rec_no
++)
4611 if (record_full_arch_list_add_reg (aarch64_record
.regcache
,
4612 aarch64_record
.aarch64_regs
[rec_no
]))
4615 /* Record memories. */
4616 if (aarch64_record
.aarch64_mems
)
4617 for (rec_no
= 0; rec_no
< aarch64_record
.mem_rec_count
; rec_no
++)
4618 if (record_full_arch_list_add_mem
4619 ((CORE_ADDR
)aarch64_record
.aarch64_mems
[rec_no
].addr
,
4620 aarch64_record
.aarch64_mems
[rec_no
].len
))
4623 if (record_full_arch_list_add_end ())
4627 deallocate_reg_mem (&aarch64_record
);