1 /* Common target dependent code for GDB on AArch64 systems.
3 Copyright (C) 2009-2018 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
6 This file is part of GDB.
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
29 #include "reggroups.h"
31 #include "arch-utils.h"
33 #include "frame-unwind.h"
34 #include "frame-base.h"
35 #include "trad-frame.h"
37 #include "dwarf2-frame.h"
39 #include "prologue-value.h"
40 #include "target-descriptions.h"
41 #include "user-regs.h"
48 #include "aarch64-tdep.h"
51 #include "elf/aarch64.h"
56 #include "record-full.h"
57 #include "arch/aarch64-insn.h"
59 #include "opcode/aarch64.h"
62 #define submask(x) ((1L << ((x) + 1)) - 1)
63 #define bit(obj,st) (((obj) >> (st)) & 1)
64 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
66 /* Pseudo register base numbers. */
67 #define AARCH64_Q0_REGNUM 0
68 #define AARCH64_D0_REGNUM (AARCH64_Q0_REGNUM + AARCH64_D_REGISTER_COUNT)
69 #define AARCH64_S0_REGNUM (AARCH64_D0_REGNUM + 32)
70 #define AARCH64_H0_REGNUM (AARCH64_S0_REGNUM + 32)
71 #define AARCH64_B0_REGNUM (AARCH64_H0_REGNUM + 32)
72 #define AARCH64_SVE_V0_REGNUM (AARCH64_B0_REGNUM + 32)
74 /* A Homogeneous Floating-Point or Short-Vector Aggregate may have at most
76 #define HA_MAX_NUM_FLDS 4
78 /* All possible aarch64 target descriptors. */
79 struct target_desc
*tdesc_aarch64_list
[AARCH64_MAX_SVE_VQ
+ 1];
81 /* The standard register names, and all the valid aliases for them. */
84 const char *const name
;
86 } aarch64_register_aliases
[] =
88 /* 64-bit register names. */
89 {"fp", AARCH64_FP_REGNUM
},
90 {"lr", AARCH64_LR_REGNUM
},
91 {"sp", AARCH64_SP_REGNUM
},
93 /* 32-bit register names. */
94 {"w0", AARCH64_X0_REGNUM
+ 0},
95 {"w1", AARCH64_X0_REGNUM
+ 1},
96 {"w2", AARCH64_X0_REGNUM
+ 2},
97 {"w3", AARCH64_X0_REGNUM
+ 3},
98 {"w4", AARCH64_X0_REGNUM
+ 4},
99 {"w5", AARCH64_X0_REGNUM
+ 5},
100 {"w6", AARCH64_X0_REGNUM
+ 6},
101 {"w7", AARCH64_X0_REGNUM
+ 7},
102 {"w8", AARCH64_X0_REGNUM
+ 8},
103 {"w9", AARCH64_X0_REGNUM
+ 9},
104 {"w10", AARCH64_X0_REGNUM
+ 10},
105 {"w11", AARCH64_X0_REGNUM
+ 11},
106 {"w12", AARCH64_X0_REGNUM
+ 12},
107 {"w13", AARCH64_X0_REGNUM
+ 13},
108 {"w14", AARCH64_X0_REGNUM
+ 14},
109 {"w15", AARCH64_X0_REGNUM
+ 15},
110 {"w16", AARCH64_X0_REGNUM
+ 16},
111 {"w17", AARCH64_X0_REGNUM
+ 17},
112 {"w18", AARCH64_X0_REGNUM
+ 18},
113 {"w19", AARCH64_X0_REGNUM
+ 19},
114 {"w20", AARCH64_X0_REGNUM
+ 20},
115 {"w21", AARCH64_X0_REGNUM
+ 21},
116 {"w22", AARCH64_X0_REGNUM
+ 22},
117 {"w23", AARCH64_X0_REGNUM
+ 23},
118 {"w24", AARCH64_X0_REGNUM
+ 24},
119 {"w25", AARCH64_X0_REGNUM
+ 25},
120 {"w26", AARCH64_X0_REGNUM
+ 26},
121 {"w27", AARCH64_X0_REGNUM
+ 27},
122 {"w28", AARCH64_X0_REGNUM
+ 28},
123 {"w29", AARCH64_X0_REGNUM
+ 29},
124 {"w30", AARCH64_X0_REGNUM
+ 30},
127 {"ip0", AARCH64_X0_REGNUM
+ 16},
128 {"ip1", AARCH64_X0_REGNUM
+ 17}
131 /* The required core 'R' registers. */
132 static const char *const aarch64_r_register_names
[] =
134 /* These registers must appear in consecutive RAW register number
135 order and they must begin with AARCH64_X0_REGNUM! */
136 "x0", "x1", "x2", "x3",
137 "x4", "x5", "x6", "x7",
138 "x8", "x9", "x10", "x11",
139 "x12", "x13", "x14", "x15",
140 "x16", "x17", "x18", "x19",
141 "x20", "x21", "x22", "x23",
142 "x24", "x25", "x26", "x27",
143 "x28", "x29", "x30", "sp",
147 /* The FP/SIMD 'V' registers. */
148 static const char *const aarch64_v_register_names
[] =
150 /* These registers must appear in consecutive RAW register number
151 order and they must begin with AARCH64_V0_REGNUM! */
152 "v0", "v1", "v2", "v3",
153 "v4", "v5", "v6", "v7",
154 "v8", "v9", "v10", "v11",
155 "v12", "v13", "v14", "v15",
156 "v16", "v17", "v18", "v19",
157 "v20", "v21", "v22", "v23",
158 "v24", "v25", "v26", "v27",
159 "v28", "v29", "v30", "v31",
164 /* The SVE 'Z' and 'P' registers. */
165 static const char *const aarch64_sve_register_names
[] =
167 /* These registers must appear in consecutive RAW register number
168 order and they must begin with AARCH64_SVE_Z0_REGNUM! */
169 "z0", "z1", "z2", "z3",
170 "z4", "z5", "z6", "z7",
171 "z8", "z9", "z10", "z11",
172 "z12", "z13", "z14", "z15",
173 "z16", "z17", "z18", "z19",
174 "z20", "z21", "z22", "z23",
175 "z24", "z25", "z26", "z27",
176 "z28", "z29", "z30", "z31",
178 "p0", "p1", "p2", "p3",
179 "p4", "p5", "p6", "p7",
180 "p8", "p9", "p10", "p11",
181 "p12", "p13", "p14", "p15",
185 /* AArch64 prologue cache structure. */
186 struct aarch64_prologue_cache
188 /* The program counter at the start of the function. It is used to
189 identify this frame as a prologue frame. */
192 /* The program counter at the time this frame was created; i.e. where
193 this function was called from. It is used to identify this frame as a
197 /* The stack pointer at the time this frame was created; i.e. the
198 caller's stack pointer when this function was called. It is used
199 to identify this frame. */
202 /* Is the target available to read from? */
205 /* The frame base for this frame is just prev_sp - frame size.
206 FRAMESIZE is the distance from the frame pointer to the
207 initial stack pointer. */
210 /* The register used to hold the frame pointer for this frame. */
213 /* Saved register offsets. */
214 struct trad_frame_saved_reg
*saved_regs
;
218 show_aarch64_debug (struct ui_file
*file
, int from_tty
,
219 struct cmd_list_element
*c
, const char *value
)
221 fprintf_filtered (file
, _("AArch64 debugging is %s.\n"), value
);
226 /* Abstract instruction reader. */
228 class abstract_instruction_reader
231 /* Read in one instruction. */
232 virtual ULONGEST
read (CORE_ADDR memaddr
, int len
,
233 enum bfd_endian byte_order
) = 0;
236 /* Instruction reader from real target. */
238 class instruction_reader
: public abstract_instruction_reader
241 ULONGEST
read (CORE_ADDR memaddr
, int len
, enum bfd_endian byte_order
)
244 return read_code_unsigned_integer (memaddr
, len
, byte_order
);
250 /* Analyze a prologue, looking for a recognizable stack frame
251 and frame pointer. Scan until we encounter a store that could
252 clobber the stack frame unexpectedly, or an unknown instruction. */
255 aarch64_analyze_prologue (struct gdbarch
*gdbarch
,
256 CORE_ADDR start
, CORE_ADDR limit
,
257 struct aarch64_prologue_cache
*cache
,
258 abstract_instruction_reader
& reader
)
260 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
262 /* Track X registers and D registers in prologue. */
263 pv_t regs
[AARCH64_X_REGISTER_COUNT
+ AARCH64_D_REGISTER_COUNT
];
265 for (i
= 0; i
< AARCH64_X_REGISTER_COUNT
+ AARCH64_D_REGISTER_COUNT
; i
++)
266 regs
[i
] = pv_register (i
, 0);
267 pv_area
stack (AARCH64_SP_REGNUM
, gdbarch_addr_bit (gdbarch
));
269 for (; start
< limit
; start
+= 4)
274 insn
= reader
.read (start
, 4, byte_order_for_code
);
276 if (aarch64_decode_insn (insn
, &inst
, 1, NULL
) != 0)
279 if (inst
.opcode
->iclass
== addsub_imm
280 && (inst
.opcode
->op
== OP_ADD
281 || strcmp ("sub", inst
.opcode
->name
) == 0))
283 unsigned rd
= inst
.operands
[0].reg
.regno
;
284 unsigned rn
= inst
.operands
[1].reg
.regno
;
286 gdb_assert (aarch64_num_of_operands (inst
.opcode
) == 3);
287 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_Rd_SP
);
288 gdb_assert (inst
.operands
[1].type
== AARCH64_OPND_Rn_SP
);
289 gdb_assert (inst
.operands
[2].type
== AARCH64_OPND_AIMM
);
291 if (inst
.opcode
->op
== OP_ADD
)
293 regs
[rd
] = pv_add_constant (regs
[rn
],
294 inst
.operands
[2].imm
.value
);
298 regs
[rd
] = pv_add_constant (regs
[rn
],
299 -inst
.operands
[2].imm
.value
);
302 else if (inst
.opcode
->iclass
== pcreladdr
303 && inst
.operands
[1].type
== AARCH64_OPND_ADDR_ADRP
)
305 gdb_assert (aarch64_num_of_operands (inst
.opcode
) == 2);
306 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_Rd
);
308 regs
[inst
.operands
[0].reg
.regno
] = pv_unknown ();
310 else if (inst
.opcode
->iclass
== branch_imm
)
312 /* Stop analysis on branch. */
315 else if (inst
.opcode
->iclass
== condbranch
)
317 /* Stop analysis on branch. */
320 else if (inst
.opcode
->iclass
== branch_reg
)
322 /* Stop analysis on branch. */
325 else if (inst
.opcode
->iclass
== compbranch
)
327 /* Stop analysis on branch. */
330 else if (inst
.opcode
->op
== OP_MOVZ
)
332 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_Rd
);
333 regs
[inst
.operands
[0].reg
.regno
] = pv_unknown ();
335 else if (inst
.opcode
->iclass
== log_shift
336 && strcmp (inst
.opcode
->name
, "orr") == 0)
338 unsigned rd
= inst
.operands
[0].reg
.regno
;
339 unsigned rn
= inst
.operands
[1].reg
.regno
;
340 unsigned rm
= inst
.operands
[2].reg
.regno
;
342 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_Rd
);
343 gdb_assert (inst
.operands
[1].type
== AARCH64_OPND_Rn
);
344 gdb_assert (inst
.operands
[2].type
== AARCH64_OPND_Rm_SFT
);
346 if (inst
.operands
[2].shifter
.amount
== 0
347 && rn
== AARCH64_SP_REGNUM
)
353 debug_printf ("aarch64: prologue analysis gave up "
354 "addr=%s opcode=0x%x (orr x register)\n",
355 core_addr_to_string_nz (start
), insn
);
360 else if (inst
.opcode
->op
== OP_STUR
)
362 unsigned rt
= inst
.operands
[0].reg
.regno
;
363 unsigned rn
= inst
.operands
[1].addr
.base_regno
;
365 = (aarch64_get_qualifier_esize (inst
.operands
[0].qualifier
) == 8);
367 gdb_assert (aarch64_num_of_operands (inst
.opcode
) == 2);
368 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_Rt
);
369 gdb_assert (inst
.operands
[1].type
== AARCH64_OPND_ADDR_SIMM9
);
370 gdb_assert (!inst
.operands
[1].addr
.offset
.is_reg
);
372 stack
.store (pv_add_constant (regs
[rn
],
373 inst
.operands
[1].addr
.offset
.imm
),
374 is64
? 8 : 4, regs
[rt
]);
376 else if ((inst
.opcode
->iclass
== ldstpair_off
377 || (inst
.opcode
->iclass
== ldstpair_indexed
378 && inst
.operands
[2].addr
.preind
))
379 && strcmp ("stp", inst
.opcode
->name
) == 0)
381 /* STP with addressing mode Pre-indexed and Base register. */
384 unsigned rn
= inst
.operands
[2].addr
.base_regno
;
385 int32_t imm
= inst
.operands
[2].addr
.offset
.imm
;
387 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_Rt
388 || inst
.operands
[0].type
== AARCH64_OPND_Ft
);
389 gdb_assert (inst
.operands
[1].type
== AARCH64_OPND_Rt2
390 || inst
.operands
[1].type
== AARCH64_OPND_Ft2
);
391 gdb_assert (inst
.operands
[2].type
== AARCH64_OPND_ADDR_SIMM7
);
392 gdb_assert (!inst
.operands
[2].addr
.offset
.is_reg
);
394 /* If recording this store would invalidate the store area
395 (perhaps because rn is not known) then we should abandon
396 further prologue analysis. */
397 if (stack
.store_would_trash (pv_add_constant (regs
[rn
], imm
)))
400 if (stack
.store_would_trash (pv_add_constant (regs
[rn
], imm
+ 8)))
403 rt1
= inst
.operands
[0].reg
.regno
;
404 rt2
= inst
.operands
[1].reg
.regno
;
405 if (inst
.operands
[0].type
== AARCH64_OPND_Ft
)
407 /* Only bottom 64-bit of each V register (D register) need
409 gdb_assert (inst
.operands
[0].qualifier
== AARCH64_OPND_QLF_S_D
);
410 rt1
+= AARCH64_X_REGISTER_COUNT
;
411 rt2
+= AARCH64_X_REGISTER_COUNT
;
414 stack
.store (pv_add_constant (regs
[rn
], imm
), 8,
416 stack
.store (pv_add_constant (regs
[rn
], imm
+ 8), 8,
419 if (inst
.operands
[2].addr
.writeback
)
420 regs
[rn
] = pv_add_constant (regs
[rn
], imm
);
423 else if ((inst
.opcode
->iclass
== ldst_imm9
/* Signed immediate. */
424 || (inst
.opcode
->iclass
== ldst_pos
/* Unsigned immediate. */
425 && (inst
.opcode
->op
== OP_STR_POS
426 || inst
.opcode
->op
== OP_STRF_POS
)))
427 && inst
.operands
[1].addr
.base_regno
== AARCH64_SP_REGNUM
428 && strcmp ("str", inst
.opcode
->name
) == 0)
430 /* STR (immediate) */
431 unsigned int rt
= inst
.operands
[0].reg
.regno
;
432 int32_t imm
= inst
.operands
[1].addr
.offset
.imm
;
433 unsigned int rn
= inst
.operands
[1].addr
.base_regno
;
435 = (aarch64_get_qualifier_esize (inst
.operands
[0].qualifier
) == 8);
436 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_Rt
437 || inst
.operands
[0].type
== AARCH64_OPND_Ft
);
439 if (inst
.operands
[0].type
== AARCH64_OPND_Ft
)
441 /* Only bottom 64-bit of each V register (D register) need
443 gdb_assert (inst
.operands
[0].qualifier
== AARCH64_OPND_QLF_S_D
);
444 rt
+= AARCH64_X_REGISTER_COUNT
;
447 stack
.store (pv_add_constant (regs
[rn
], imm
),
448 is64
? 8 : 4, regs
[rt
]);
449 if (inst
.operands
[1].addr
.writeback
)
450 regs
[rn
] = pv_add_constant (regs
[rn
], imm
);
452 else if (inst
.opcode
->iclass
== testbranch
)
454 /* Stop analysis on branch. */
461 debug_printf ("aarch64: prologue analysis gave up addr=%s"
463 core_addr_to_string_nz (start
), insn
);
472 if (pv_is_register (regs
[AARCH64_FP_REGNUM
], AARCH64_SP_REGNUM
))
474 /* Frame pointer is fp. Frame size is constant. */
475 cache
->framereg
= AARCH64_FP_REGNUM
;
476 cache
->framesize
= -regs
[AARCH64_FP_REGNUM
].k
;
478 else if (pv_is_register (regs
[AARCH64_SP_REGNUM
], AARCH64_SP_REGNUM
))
480 /* Try the stack pointer. */
481 cache
->framesize
= -regs
[AARCH64_SP_REGNUM
].k
;
482 cache
->framereg
= AARCH64_SP_REGNUM
;
486 /* We're just out of luck. We don't know where the frame is. */
487 cache
->framereg
= -1;
488 cache
->framesize
= 0;
491 for (i
= 0; i
< AARCH64_X_REGISTER_COUNT
; i
++)
495 if (stack
.find_reg (gdbarch
, i
, &offset
))
496 cache
->saved_regs
[i
].addr
= offset
;
499 for (i
= 0; i
< AARCH64_D_REGISTER_COUNT
; i
++)
501 int regnum
= gdbarch_num_regs (gdbarch
);
504 if (stack
.find_reg (gdbarch
, i
+ AARCH64_X_REGISTER_COUNT
,
506 cache
->saved_regs
[i
+ regnum
+ AARCH64_D0_REGNUM
].addr
= offset
;
513 aarch64_analyze_prologue (struct gdbarch
*gdbarch
,
514 CORE_ADDR start
, CORE_ADDR limit
,
515 struct aarch64_prologue_cache
*cache
)
517 instruction_reader reader
;
519 return aarch64_analyze_prologue (gdbarch
, start
, limit
, cache
,
525 namespace selftests
{
527 /* Instruction reader from manually cooked instruction sequences. */
529 class instruction_reader_test
: public abstract_instruction_reader
532 template<size_t SIZE
>
533 explicit instruction_reader_test (const uint32_t (&insns
)[SIZE
])
534 : m_insns (insns
), m_insns_size (SIZE
)
537 ULONGEST
read (CORE_ADDR memaddr
, int len
, enum bfd_endian byte_order
)
540 SELF_CHECK (len
== 4);
541 SELF_CHECK (memaddr
% 4 == 0);
542 SELF_CHECK (memaddr
/ 4 < m_insns_size
);
544 return m_insns
[memaddr
/ 4];
548 const uint32_t *m_insns
;
553 aarch64_analyze_prologue_test (void)
555 struct gdbarch_info info
;
557 gdbarch_info_init (&info
);
558 info
.bfd_arch_info
= bfd_scan_arch ("aarch64");
560 struct gdbarch
*gdbarch
= gdbarch_find_by_info (info
);
561 SELF_CHECK (gdbarch
!= NULL
);
563 /* Test the simple prologue in which frame pointer is used. */
565 struct aarch64_prologue_cache cache
;
566 cache
.saved_regs
= trad_frame_alloc_saved_regs (gdbarch
);
568 static const uint32_t insns
[] = {
569 0xa9af7bfd, /* stp x29, x30, [sp,#-272]! */
570 0x910003fd, /* mov x29, sp */
571 0x97ffffe6, /* bl 0x400580 */
573 instruction_reader_test
reader (insns
);
575 CORE_ADDR end
= aarch64_analyze_prologue (gdbarch
, 0, 128, &cache
, reader
);
576 SELF_CHECK (end
== 4 * 2);
578 SELF_CHECK (cache
.framereg
== AARCH64_FP_REGNUM
);
579 SELF_CHECK (cache
.framesize
== 272);
581 for (int i
= 0; i
< AARCH64_X_REGISTER_COUNT
; i
++)
583 if (i
== AARCH64_FP_REGNUM
)
584 SELF_CHECK (cache
.saved_regs
[i
].addr
== -272);
585 else if (i
== AARCH64_LR_REGNUM
)
586 SELF_CHECK (cache
.saved_regs
[i
].addr
== -264);
588 SELF_CHECK (cache
.saved_regs
[i
].addr
== -1);
591 for (int i
= 0; i
< AARCH64_D_REGISTER_COUNT
; i
++)
593 int regnum
= gdbarch_num_regs (gdbarch
);
595 SELF_CHECK (cache
.saved_regs
[i
+ regnum
+ AARCH64_D0_REGNUM
].addr
600 /* Test a prologue in which STR is used and frame pointer is not
603 struct aarch64_prologue_cache cache
;
604 cache
.saved_regs
= trad_frame_alloc_saved_regs (gdbarch
);
606 static const uint32_t insns
[] = {
607 0xf81d0ff3, /* str x19, [sp, #-48]! */
608 0xb9002fe0, /* str w0, [sp, #44] */
609 0xf90013e1, /* str x1, [sp, #32]*/
610 0xfd000fe0, /* str d0, [sp, #24] */
611 0xaa0203f3, /* mov x19, x2 */
612 0xf94013e0, /* ldr x0, [sp, #32] */
614 instruction_reader_test
reader (insns
);
616 CORE_ADDR end
= aarch64_analyze_prologue (gdbarch
, 0, 128, &cache
, reader
);
618 SELF_CHECK (end
== 4 * 5);
620 SELF_CHECK (cache
.framereg
== AARCH64_SP_REGNUM
);
621 SELF_CHECK (cache
.framesize
== 48);
623 for (int i
= 0; i
< AARCH64_X_REGISTER_COUNT
; i
++)
626 SELF_CHECK (cache
.saved_regs
[i
].addr
== -16);
628 SELF_CHECK (cache
.saved_regs
[i
].addr
== -48);
630 SELF_CHECK (cache
.saved_regs
[i
].addr
== -1);
633 for (int i
= 0; i
< AARCH64_D_REGISTER_COUNT
; i
++)
635 int regnum
= gdbarch_num_regs (gdbarch
);
638 SELF_CHECK (cache
.saved_regs
[i
+ regnum
+ AARCH64_D0_REGNUM
].addr
641 SELF_CHECK (cache
.saved_regs
[i
+ regnum
+ AARCH64_D0_REGNUM
].addr
646 } // namespace selftests
647 #endif /* GDB_SELF_TEST */
649 /* Implement the "skip_prologue" gdbarch method. */
652 aarch64_skip_prologue (struct gdbarch
*gdbarch
, CORE_ADDR pc
)
654 CORE_ADDR func_addr
, limit_pc
;
656 /* See if we can determine the end of the prologue via the symbol
657 table. If so, then return either PC, or the PC after the
658 prologue, whichever is greater. */
659 if (find_pc_partial_function (pc
, NULL
, &func_addr
, NULL
))
661 CORE_ADDR post_prologue_pc
662 = skip_prologue_using_sal (gdbarch
, func_addr
);
664 if (post_prologue_pc
!= 0)
665 return std::max (pc
, post_prologue_pc
);
668 /* Can't determine prologue from the symbol table, need to examine
671 /* Find an upper limit on the function prologue using the debug
672 information. If the debug information could not be used to
673 provide that bound, then use an arbitrary large number as the
675 limit_pc
= skip_prologue_using_sal (gdbarch
, pc
);
677 limit_pc
= pc
+ 128; /* Magic. */
679 /* Try disassembling prologue. */
680 return aarch64_analyze_prologue (gdbarch
, pc
, limit_pc
, NULL
);
683 /* Scan the function prologue for THIS_FRAME and populate the prologue
687 aarch64_scan_prologue (struct frame_info
*this_frame
,
688 struct aarch64_prologue_cache
*cache
)
690 CORE_ADDR block_addr
= get_frame_address_in_block (this_frame
);
691 CORE_ADDR prologue_start
;
692 CORE_ADDR prologue_end
;
693 CORE_ADDR prev_pc
= get_frame_pc (this_frame
);
694 struct gdbarch
*gdbarch
= get_frame_arch (this_frame
);
696 cache
->prev_pc
= prev_pc
;
698 /* Assume we do not find a frame. */
699 cache
->framereg
= -1;
700 cache
->framesize
= 0;
702 if (find_pc_partial_function (block_addr
, NULL
, &prologue_start
,
705 struct symtab_and_line sal
= find_pc_line (prologue_start
, 0);
709 /* No line info so use the current PC. */
710 prologue_end
= prev_pc
;
712 else if (sal
.end
< prologue_end
)
714 /* The next line begins after the function end. */
715 prologue_end
= sal
.end
;
718 prologue_end
= std::min (prologue_end
, prev_pc
);
719 aarch64_analyze_prologue (gdbarch
, prologue_start
, prologue_end
, cache
);
725 frame_loc
= get_frame_register_unsigned (this_frame
, AARCH64_FP_REGNUM
);
729 cache
->framereg
= AARCH64_FP_REGNUM
;
730 cache
->framesize
= 16;
731 cache
->saved_regs
[29].addr
= 0;
732 cache
->saved_regs
[30].addr
= 8;
736 /* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
737 function may throw an exception if the inferior's registers or memory is
741 aarch64_make_prologue_cache_1 (struct frame_info
*this_frame
,
742 struct aarch64_prologue_cache
*cache
)
744 CORE_ADDR unwound_fp
;
747 aarch64_scan_prologue (this_frame
, cache
);
749 if (cache
->framereg
== -1)
752 unwound_fp
= get_frame_register_unsigned (this_frame
, cache
->framereg
);
756 cache
->prev_sp
= unwound_fp
+ cache
->framesize
;
758 /* Calculate actual addresses of saved registers using offsets
759 determined by aarch64_analyze_prologue. */
760 for (reg
= 0; reg
< gdbarch_num_regs (get_frame_arch (this_frame
)); reg
++)
761 if (trad_frame_addr_p (cache
->saved_regs
, reg
))
762 cache
->saved_regs
[reg
].addr
+= cache
->prev_sp
;
764 cache
->func
= get_frame_func (this_frame
);
766 cache
->available_p
= 1;
769 /* Allocate and fill in *THIS_CACHE with information about the prologue of
770 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
771 Return a pointer to the current aarch64_prologue_cache in
774 static struct aarch64_prologue_cache
*
775 aarch64_make_prologue_cache (struct frame_info
*this_frame
, void **this_cache
)
777 struct aarch64_prologue_cache
*cache
;
779 if (*this_cache
!= NULL
)
780 return (struct aarch64_prologue_cache
*) *this_cache
;
782 cache
= FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache
);
783 cache
->saved_regs
= trad_frame_alloc_saved_regs (this_frame
);
788 aarch64_make_prologue_cache_1 (this_frame
, cache
);
790 CATCH (ex
, RETURN_MASK_ERROR
)
792 if (ex
.error
!= NOT_AVAILABLE_ERROR
)
793 throw_exception (ex
);
800 /* Implement the "stop_reason" frame_unwind method. */
802 static enum unwind_stop_reason
803 aarch64_prologue_frame_unwind_stop_reason (struct frame_info
*this_frame
,
806 struct aarch64_prologue_cache
*cache
807 = aarch64_make_prologue_cache (this_frame
, this_cache
);
809 if (!cache
->available_p
)
810 return UNWIND_UNAVAILABLE
;
812 /* Halt the backtrace at "_start". */
813 if (cache
->prev_pc
<= gdbarch_tdep (get_frame_arch (this_frame
))->lowest_pc
)
814 return UNWIND_OUTERMOST
;
816 /* We've hit a wall, stop. */
817 if (cache
->prev_sp
== 0)
818 return UNWIND_OUTERMOST
;
820 return UNWIND_NO_REASON
;
823 /* Our frame ID for a normal frame is the current function's starting
824 PC and the caller's SP when we were called. */
827 aarch64_prologue_this_id (struct frame_info
*this_frame
,
828 void **this_cache
, struct frame_id
*this_id
)
830 struct aarch64_prologue_cache
*cache
831 = aarch64_make_prologue_cache (this_frame
, this_cache
);
833 if (!cache
->available_p
)
834 *this_id
= frame_id_build_unavailable_stack (cache
->func
);
836 *this_id
= frame_id_build (cache
->prev_sp
, cache
->func
);
839 /* Implement the "prev_register" frame_unwind method. */
841 static struct value
*
842 aarch64_prologue_prev_register (struct frame_info
*this_frame
,
843 void **this_cache
, int prev_regnum
)
845 struct aarch64_prologue_cache
*cache
846 = aarch64_make_prologue_cache (this_frame
, this_cache
);
848 /* If we are asked to unwind the PC, then we need to return the LR
849 instead. The prologue may save PC, but it will point into this
850 frame's prologue, not the next frame's resume location. */
851 if (prev_regnum
== AARCH64_PC_REGNUM
)
855 lr
= frame_unwind_register_unsigned (this_frame
, AARCH64_LR_REGNUM
);
856 return frame_unwind_got_constant (this_frame
, prev_regnum
, lr
);
859 /* SP is generally not saved to the stack, but this frame is
860 identified by the next frame's stack pointer at the time of the
861 call. The value was already reconstructed into PREV_SP. */
874 if (prev_regnum
== AARCH64_SP_REGNUM
)
875 return frame_unwind_got_constant (this_frame
, prev_regnum
,
878 return trad_frame_get_prev_register (this_frame
, cache
->saved_regs
,
882 /* AArch64 prologue unwinder. */
883 struct frame_unwind aarch64_prologue_unwind
=
886 aarch64_prologue_frame_unwind_stop_reason
,
887 aarch64_prologue_this_id
,
888 aarch64_prologue_prev_register
,
890 default_frame_sniffer
893 /* Allocate and fill in *THIS_CACHE with information about the prologue of
894 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
895 Return a pointer to the current aarch64_prologue_cache in
898 static struct aarch64_prologue_cache
*
899 aarch64_make_stub_cache (struct frame_info
*this_frame
, void **this_cache
)
901 struct aarch64_prologue_cache
*cache
;
903 if (*this_cache
!= NULL
)
904 return (struct aarch64_prologue_cache
*) *this_cache
;
906 cache
= FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache
);
907 cache
->saved_regs
= trad_frame_alloc_saved_regs (this_frame
);
912 cache
->prev_sp
= get_frame_register_unsigned (this_frame
,
914 cache
->prev_pc
= get_frame_pc (this_frame
);
915 cache
->available_p
= 1;
917 CATCH (ex
, RETURN_MASK_ERROR
)
919 if (ex
.error
!= NOT_AVAILABLE_ERROR
)
920 throw_exception (ex
);
927 /* Implement the "stop_reason" frame_unwind method. */
929 static enum unwind_stop_reason
930 aarch64_stub_frame_unwind_stop_reason (struct frame_info
*this_frame
,
933 struct aarch64_prologue_cache
*cache
934 = aarch64_make_stub_cache (this_frame
, this_cache
);
936 if (!cache
->available_p
)
937 return UNWIND_UNAVAILABLE
;
939 return UNWIND_NO_REASON
;
942 /* Our frame ID for a stub frame is the current SP and LR. */
945 aarch64_stub_this_id (struct frame_info
*this_frame
,
946 void **this_cache
, struct frame_id
*this_id
)
948 struct aarch64_prologue_cache
*cache
949 = aarch64_make_stub_cache (this_frame
, this_cache
);
951 if (cache
->available_p
)
952 *this_id
= frame_id_build (cache
->prev_sp
, cache
->prev_pc
);
954 *this_id
= frame_id_build_unavailable_stack (cache
->prev_pc
);
957 /* Implement the "sniffer" frame_unwind method. */
960 aarch64_stub_unwind_sniffer (const struct frame_unwind
*self
,
961 struct frame_info
*this_frame
,
962 void **this_prologue_cache
)
964 CORE_ADDR addr_in_block
;
967 addr_in_block
= get_frame_address_in_block (this_frame
);
968 if (in_plt_section (addr_in_block
)
969 /* We also use the stub winder if the target memory is unreadable
970 to avoid having the prologue unwinder trying to read it. */
971 || target_read_memory (get_frame_pc (this_frame
), dummy
, 4) != 0)
977 /* AArch64 stub unwinder. */
978 struct frame_unwind aarch64_stub_unwind
=
981 aarch64_stub_frame_unwind_stop_reason
,
982 aarch64_stub_this_id
,
983 aarch64_prologue_prev_register
,
985 aarch64_stub_unwind_sniffer
988 /* Return the frame base address of *THIS_FRAME. */
991 aarch64_normal_frame_base (struct frame_info
*this_frame
, void **this_cache
)
993 struct aarch64_prologue_cache
*cache
994 = aarch64_make_prologue_cache (this_frame
, this_cache
);
996 return cache
->prev_sp
- cache
->framesize
;
999 /* AArch64 default frame base information. */
1000 struct frame_base aarch64_normal_base
=
1002 &aarch64_prologue_unwind
,
1003 aarch64_normal_frame_base
,
1004 aarch64_normal_frame_base
,
1005 aarch64_normal_frame_base
1008 /* Assuming THIS_FRAME is a dummy, return the frame ID of that
1009 dummy frame. The frame ID's base needs to match the TOS value
1010 saved by save_dummy_frame_tos () and returned from
1011 aarch64_push_dummy_call, and the PC needs to match the dummy
1012 frame's breakpoint. */
1014 static struct frame_id
1015 aarch64_dummy_id (struct gdbarch
*gdbarch
, struct frame_info
*this_frame
)
1017 return frame_id_build (get_frame_register_unsigned (this_frame
,
1019 get_frame_pc (this_frame
));
1022 /* Implement the "unwind_pc" gdbarch method. */
1025 aarch64_unwind_pc (struct gdbarch
*gdbarch
, struct frame_info
*this_frame
)
1028 = frame_unwind_register_unsigned (this_frame
, AARCH64_PC_REGNUM
);
1033 /* Implement the "unwind_sp" gdbarch method. */
1036 aarch64_unwind_sp (struct gdbarch
*gdbarch
, struct frame_info
*this_frame
)
1038 return frame_unwind_register_unsigned (this_frame
, AARCH64_SP_REGNUM
);
1041 /* Return the value of the REGNUM register in the previous frame of
1044 static struct value
*
1045 aarch64_dwarf2_prev_register (struct frame_info
*this_frame
,
1046 void **this_cache
, int regnum
)
1052 case AARCH64_PC_REGNUM
:
1053 lr
= frame_unwind_register_unsigned (this_frame
, AARCH64_LR_REGNUM
);
1054 return frame_unwind_got_constant (this_frame
, regnum
, lr
);
1057 internal_error (__FILE__
, __LINE__
,
1058 _("Unexpected register %d"), regnum
);
1062 /* Implement the "init_reg" dwarf2_frame_ops method. */
1065 aarch64_dwarf2_frame_init_reg (struct gdbarch
*gdbarch
, int regnum
,
1066 struct dwarf2_frame_state_reg
*reg
,
1067 struct frame_info
*this_frame
)
1071 case AARCH64_PC_REGNUM
:
1072 reg
->how
= DWARF2_FRAME_REG_FN
;
1073 reg
->loc
.fn
= aarch64_dwarf2_prev_register
;
1075 case AARCH64_SP_REGNUM
:
1076 reg
->how
= DWARF2_FRAME_REG_CFA
;
1081 /* When arguments must be pushed onto the stack, they go on in reverse
1082 order. The code below implements a FILO (stack) to do this. */
1086 /* Value to pass on stack. It can be NULL if this item is for stack
1088 const gdb_byte
*data
;
1090 /* Size in bytes of value to pass on stack. */
1094 DEF_VEC_O (stack_item_t
);
1096 /* Return the alignment (in bytes) of the given type. */
1099 aarch64_type_align (struct type
*t
)
1105 t
= check_typedef (t
);
1106 switch (TYPE_CODE (t
))
1109 /* Should never happen. */
1110 internal_error (__FILE__
, __LINE__
, _("unknown type alignment"));
1114 case TYPE_CODE_ENUM
:
1118 case TYPE_CODE_RANGE
:
1119 case TYPE_CODE_BITSTRING
:
1121 case TYPE_CODE_RVALUE_REF
:
1122 case TYPE_CODE_CHAR
:
1123 case TYPE_CODE_BOOL
:
1124 return TYPE_LENGTH (t
);
1126 case TYPE_CODE_ARRAY
:
1127 if (TYPE_VECTOR (t
))
1129 /* Use the natural alignment for vector types (the same for
1130 scalar type), but the maximum alignment is 128-bit. */
1131 if (TYPE_LENGTH (t
) > 16)
1134 return TYPE_LENGTH (t
);
1137 return aarch64_type_align (TYPE_TARGET_TYPE (t
));
1138 case TYPE_CODE_COMPLEX
:
1139 return aarch64_type_align (TYPE_TARGET_TYPE (t
));
1141 case TYPE_CODE_STRUCT
:
1142 case TYPE_CODE_UNION
:
1144 for (n
= 0; n
< TYPE_NFIELDS (t
); n
++)
1146 falign
= aarch64_type_align (TYPE_FIELD_TYPE (t
, n
));
1154 /* Worker function for aapcs_is_vfp_call_or_return_candidate.
1156 Return the number of register required, or -1 on failure.
1158 When encountering a base element, if FUNDAMENTAL_TYPE is not set then set it
1159 to the element, else fail if the type of this element does not match the
1163 aapcs_is_vfp_call_or_return_candidate_1 (struct type
*type
,
1164 struct type
**fundamental_type
)
1166 if (type
== nullptr)
1169 switch (TYPE_CODE (type
))
1172 if (TYPE_LENGTH (type
) > 16)
1175 if (*fundamental_type
== nullptr)
1176 *fundamental_type
= type
;
1177 else if (TYPE_LENGTH (type
) != TYPE_LENGTH (*fundamental_type
)
1178 || TYPE_CODE (type
) != TYPE_CODE (*fundamental_type
))
1183 case TYPE_CODE_COMPLEX
:
1185 struct type
*target_type
= check_typedef (TYPE_TARGET_TYPE (type
));
1186 if (TYPE_LENGTH (target_type
) > 16)
1189 if (*fundamental_type
== nullptr)
1190 *fundamental_type
= target_type
;
1191 else if (TYPE_LENGTH (target_type
) != TYPE_LENGTH (*fundamental_type
)
1192 || TYPE_CODE (target_type
) != TYPE_CODE (*fundamental_type
))
1198 case TYPE_CODE_ARRAY
:
1200 if (TYPE_VECTOR (type
))
1202 if (TYPE_LENGTH (type
) != 8 && TYPE_LENGTH (type
) != 16)
1205 if (*fundamental_type
== nullptr)
1206 *fundamental_type
= type
;
1207 else if (TYPE_LENGTH (type
) != TYPE_LENGTH (*fundamental_type
)
1208 || TYPE_CODE (type
) != TYPE_CODE (*fundamental_type
))
1215 struct type
*target_type
= TYPE_TARGET_TYPE (type
);
1216 int count
= aapcs_is_vfp_call_or_return_candidate_1
1217 (target_type
, fundamental_type
);
1222 count
*= TYPE_LENGTH (type
);
1227 case TYPE_CODE_STRUCT
:
1228 case TYPE_CODE_UNION
:
1232 for (int i
= 0; i
< TYPE_NFIELDS (type
); i
++)
1234 struct type
*member
= check_typedef (TYPE_FIELD_TYPE (type
, i
));
1236 int sub_count
= aapcs_is_vfp_call_or_return_candidate_1
1237 (member
, fundamental_type
);
1238 if (sub_count
== -1)
1252 /* Return true if an argument, whose type is described by TYPE, can be passed or
1253 returned in simd/fp registers, providing enough parameter passing registers
1254 are available. This is as described in the AAPCS64.
1256 Upon successful return, *COUNT returns the number of needed registers,
1257 *FUNDAMENTAL_TYPE contains the type of those registers.
1259 Candidate as per the AAPCS64 5.4.2.C is either a:
1262 - HFA (Homogeneous Floating-point Aggregate, 4.3.5.1). A Composite type where
1263 all the members are floats and has at most 4 members.
1264 - HVA (Homogeneous Short-vector Aggregate, 4.3.5.2). A Composite type where
1265 all the members are short vectors and has at most 4 members.
1268 Note that HFAs and HVAs can include nested structures and arrays. */
1271 aapcs_is_vfp_call_or_return_candidate (struct type
*type
, int *count
,
1272 struct type
**fundamental_type
)
1274 if (type
== nullptr)
1277 *fundamental_type
= nullptr;
1279 int ag_count
= aapcs_is_vfp_call_or_return_candidate_1 (type
,
1282 if (ag_count
> 0 && ag_count
<= HA_MAX_NUM_FLDS
)
1291 /* AArch64 function call information structure. */
1292 struct aarch64_call_info
1294 /* the current argument number. */
1297 /* The next general purpose register number, equivalent to NGRN as
1298 described in the AArch64 Procedure Call Standard. */
1301 /* The next SIMD and floating point register number, equivalent to
1302 NSRN as described in the AArch64 Procedure Call Standard. */
1305 /* The next stacked argument address, equivalent to NSAA as
1306 described in the AArch64 Procedure Call Standard. */
1309 /* Stack item vector. */
1310 VEC(stack_item_t
) *si
;
1313 /* Pass a value in a sequence of consecutive X registers. The caller
1314 is responsbile for ensuring sufficient registers are available. */
1317 pass_in_x (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
1318 struct aarch64_call_info
*info
, struct type
*type
,
1321 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1322 int len
= TYPE_LENGTH (type
);
1323 enum type_code typecode
= TYPE_CODE (type
);
1324 int regnum
= AARCH64_X0_REGNUM
+ info
->ngrn
;
1325 const bfd_byte
*buf
= value_contents (arg
);
1331 int partial_len
= len
< X_REGISTER_SIZE
? len
: X_REGISTER_SIZE
;
1332 CORE_ADDR regval
= extract_unsigned_integer (buf
, partial_len
,
1336 /* Adjust sub-word struct/union args when big-endian. */
1337 if (byte_order
== BFD_ENDIAN_BIG
1338 && partial_len
< X_REGISTER_SIZE
1339 && (typecode
== TYPE_CODE_STRUCT
|| typecode
== TYPE_CODE_UNION
))
1340 regval
<<= ((X_REGISTER_SIZE
- partial_len
) * TARGET_CHAR_BIT
);
1344 debug_printf ("arg %d in %s = 0x%s\n", info
->argnum
,
1345 gdbarch_register_name (gdbarch
, regnum
),
1346 phex (regval
, X_REGISTER_SIZE
));
1348 regcache_cooked_write_unsigned (regcache
, regnum
, regval
);
1355 /* Attempt to marshall a value in a V register. Return 1 if
1356 successful, or 0 if insufficient registers are available. This
1357 function, unlike the equivalent pass_in_x() function does not
1358 handle arguments spread across multiple registers. */
1361 pass_in_v (struct gdbarch
*gdbarch
,
1362 struct regcache
*regcache
,
1363 struct aarch64_call_info
*info
,
1364 int len
, const bfd_byte
*buf
)
1368 int regnum
= AARCH64_V0_REGNUM
+ info
->nsrn
;
1369 /* Enough space for a full vector register. */
1370 gdb_byte reg
[register_size (gdbarch
, regnum
)];
1371 gdb_assert (len
<= sizeof (reg
));
1376 memset (reg
, 0, sizeof (reg
));
1377 /* PCS C.1, the argument is allocated to the least significant
1378 bits of V register. */
1379 memcpy (reg
, buf
, len
);
1380 regcache
->cooked_write (regnum
, reg
);
1384 debug_printf ("arg %d in %s\n", info
->argnum
,
1385 gdbarch_register_name (gdbarch
, regnum
));
1393 /* Marshall an argument onto the stack. */
1396 pass_on_stack (struct aarch64_call_info
*info
, struct type
*type
,
1399 const bfd_byte
*buf
= value_contents (arg
);
1400 int len
= TYPE_LENGTH (type
);
1406 align
= aarch64_type_align (type
);
1408 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1409 Natural alignment of the argument's type. */
1410 align
= align_up (align
, 8);
1412 /* The AArch64 PCS requires at most doubleword alignment. */
1418 debug_printf ("arg %d len=%d @ sp + %d\n", info
->argnum
, len
,
1424 VEC_safe_push (stack_item_t
, info
->si
, &item
);
1427 if (info
->nsaa
& (align
- 1))
1429 /* Push stack alignment padding. */
1430 int pad
= align
- (info
->nsaa
& (align
- 1));
1435 VEC_safe_push (stack_item_t
, info
->si
, &item
);
1440 /* Marshall an argument into a sequence of one or more consecutive X
1441 registers or, if insufficient X registers are available then onto
1445 pass_in_x_or_stack (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
1446 struct aarch64_call_info
*info
, struct type
*type
,
1449 int len
= TYPE_LENGTH (type
);
1450 int nregs
= (len
+ X_REGISTER_SIZE
- 1) / X_REGISTER_SIZE
;
1452 /* PCS C.13 - Pass in registers if we have enough spare */
1453 if (info
->ngrn
+ nregs
<= 8)
1455 pass_in_x (gdbarch
, regcache
, info
, type
, arg
);
1456 info
->ngrn
+= nregs
;
1461 pass_on_stack (info
, type
, arg
);
1465 /* Pass a value, which is of type arg_type, in a V register. Assumes value is a
1466 aapcs_is_vfp_call_or_return_candidate and there are enough spare V
1467 registers. A return value of false is an error state as the value will have
1468 been partially passed to the stack. */
1470 pass_in_v_vfp_candidate (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
1471 struct aarch64_call_info
*info
, struct type
*arg_type
,
1474 switch (TYPE_CODE (arg_type
))
1477 return pass_in_v (gdbarch
, regcache
, info
, TYPE_LENGTH (arg_type
),
1478 value_contents (arg
));
1481 case TYPE_CODE_COMPLEX
:
1483 const bfd_byte
*buf
= value_contents (arg
);
1484 struct type
*target_type
= check_typedef (TYPE_TARGET_TYPE (arg_type
));
1486 if (!pass_in_v (gdbarch
, regcache
, info
, TYPE_LENGTH (target_type
),
1490 return pass_in_v (gdbarch
, regcache
, info
, TYPE_LENGTH (target_type
),
1491 buf
+ TYPE_LENGTH (target_type
));
1494 case TYPE_CODE_ARRAY
:
1495 if (TYPE_VECTOR (arg_type
))
1496 return pass_in_v (gdbarch
, regcache
, info
, TYPE_LENGTH (arg_type
),
1497 value_contents (arg
));
1500 case TYPE_CODE_STRUCT
:
1501 case TYPE_CODE_UNION
:
1502 for (int i
= 0; i
< TYPE_NFIELDS (arg_type
); i
++)
1504 struct value
*field
= value_primitive_field (arg
, 0, i
, arg_type
);
1505 struct type
*field_type
= check_typedef (value_type (field
));
1507 if (!pass_in_v_vfp_candidate (gdbarch
, regcache
, info
, field_type
,
1518 /* Implement the "push_dummy_call" gdbarch method. */
1521 aarch64_push_dummy_call (struct gdbarch
*gdbarch
, struct value
*function
,
1522 struct regcache
*regcache
, CORE_ADDR bp_addr
,
1524 struct value
**args
, CORE_ADDR sp
, int struct_return
,
1525 CORE_ADDR struct_addr
)
1528 struct aarch64_call_info info
;
1529 struct type
*func_type
;
1530 struct type
*return_type
;
1531 int lang_struct_return
;
1533 memset (&info
, 0, sizeof (info
));
1535 /* We need to know what the type of the called function is in order
1536 to determine the number of named/anonymous arguments for the
1537 actual argument placement, and the return type in order to handle
1538 return value correctly.
1540 The generic code above us views the decision of return in memory
1541 or return in registers as a two stage processes. The language
1542 handler is consulted first and may decide to return in memory (eg
1543 class with copy constructor returned by value), this will cause
1544 the generic code to allocate space AND insert an initial leading
1547 If the language code does not decide to pass in memory then the
1548 target code is consulted.
1550 If the language code decides to pass in memory we want to move
1551 the pointer inserted as the initial argument from the argument
1552 list and into X8, the conventional AArch64 struct return pointer
1555 This is slightly awkward, ideally the flag "lang_struct_return"
1556 would be passed to the targets implementation of push_dummy_call.
1557 Rather that change the target interface we call the language code
1558 directly ourselves. */
1560 func_type
= check_typedef (value_type (function
));
1562 /* Dereference function pointer types. */
1563 if (TYPE_CODE (func_type
) == TYPE_CODE_PTR
)
1564 func_type
= TYPE_TARGET_TYPE (func_type
);
1566 gdb_assert (TYPE_CODE (func_type
) == TYPE_CODE_FUNC
1567 || TYPE_CODE (func_type
) == TYPE_CODE_METHOD
);
1569 /* If language_pass_by_reference () returned true we will have been
1570 given an additional initial argument, a hidden pointer to the
1571 return slot in memory. */
1572 return_type
= TYPE_TARGET_TYPE (func_type
);
1573 lang_struct_return
= language_pass_by_reference (return_type
);
1575 /* Set the return address. For the AArch64, the return breakpoint
1576 is always at BP_ADDR. */
1577 regcache_cooked_write_unsigned (regcache
, AARCH64_LR_REGNUM
, bp_addr
);
1579 /* If we were given an initial argument for the return slot because
1580 lang_struct_return was true, lose it. */
1581 if (lang_struct_return
)
1587 /* The struct_return pointer occupies X8. */
1588 if (struct_return
|| lang_struct_return
)
1592 debug_printf ("struct return in %s = 0x%s\n",
1593 gdbarch_register_name (gdbarch
,
1594 AARCH64_STRUCT_RETURN_REGNUM
),
1595 paddress (gdbarch
, struct_addr
));
1597 regcache_cooked_write_unsigned (regcache
, AARCH64_STRUCT_RETURN_REGNUM
,
1601 for (argnum
= 0; argnum
< nargs
; argnum
++)
1603 struct value
*arg
= args
[argnum
];
1604 struct type
*arg_type
, *fundamental_type
;
1607 arg_type
= check_typedef (value_type (arg
));
1608 len
= TYPE_LENGTH (arg_type
);
1610 /* If arg can be passed in v registers as per the AAPCS64, then do so if
1611 if there are enough spare registers. */
1612 if (aapcs_is_vfp_call_or_return_candidate (arg_type
, &elements
,
1615 if (info
.nsrn
+ elements
<= 8)
1617 /* We know that we have sufficient registers available therefore
1618 this will never need to fallback to the stack. */
1619 if (!pass_in_v_vfp_candidate (gdbarch
, regcache
, &info
, arg_type
,
1621 gdb_assert_not_reached ("Failed to push args");
1626 pass_on_stack (&info
, arg_type
, arg
);
1631 switch (TYPE_CODE (arg_type
))
1634 case TYPE_CODE_BOOL
:
1635 case TYPE_CODE_CHAR
:
1636 case TYPE_CODE_RANGE
:
1637 case TYPE_CODE_ENUM
:
1640 /* Promote to 32 bit integer. */
1641 if (TYPE_UNSIGNED (arg_type
))
1642 arg_type
= builtin_type (gdbarch
)->builtin_uint32
;
1644 arg_type
= builtin_type (gdbarch
)->builtin_int32
;
1645 arg
= value_cast (arg_type
, arg
);
1647 pass_in_x_or_stack (gdbarch
, regcache
, &info
, arg_type
, arg
);
1650 case TYPE_CODE_STRUCT
:
1651 case TYPE_CODE_ARRAY
:
1652 case TYPE_CODE_UNION
:
1655 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1656 invisible reference. */
1658 /* Allocate aligned storage. */
1659 sp
= align_down (sp
- len
, 16);
1661 /* Write the real data into the stack. */
1662 write_memory (sp
, value_contents (arg
), len
);
1664 /* Construct the indirection. */
1665 arg_type
= lookup_pointer_type (arg_type
);
1666 arg
= value_from_pointer (arg_type
, sp
);
1667 pass_in_x_or_stack (gdbarch
, regcache
, &info
, arg_type
, arg
);
1670 /* PCS C.15 / C.18 multiple values pass. */
1671 pass_in_x_or_stack (gdbarch
, regcache
, &info
, arg_type
, arg
);
1675 pass_in_x_or_stack (gdbarch
, regcache
, &info
, arg_type
, arg
);
1680 /* Make sure stack retains 16 byte alignment. */
1682 sp
-= 16 - (info
.nsaa
& 15);
1684 while (!VEC_empty (stack_item_t
, info
.si
))
1686 stack_item_t
*si
= VEC_last (stack_item_t
, info
.si
);
1689 if (si
->data
!= NULL
)
1690 write_memory (sp
, si
->data
, si
->len
);
1691 VEC_pop (stack_item_t
, info
.si
);
1694 VEC_free (stack_item_t
, info
.si
);
1696 /* Finally, update the SP register. */
1697 regcache_cooked_write_unsigned (regcache
, AARCH64_SP_REGNUM
, sp
);
1702 /* Implement the "frame_align" gdbarch method. */
1705 aarch64_frame_align (struct gdbarch
*gdbarch
, CORE_ADDR sp
)
1707 /* Align the stack to sixteen bytes. */
1708 return sp
& ~(CORE_ADDR
) 15;
1711 /* Return the type for an AdvSISD Q register. */
1713 static struct type
*
1714 aarch64_vnq_type (struct gdbarch
*gdbarch
)
1716 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1718 if (tdep
->vnq_type
== NULL
)
1723 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnq",
1726 elem
= builtin_type (gdbarch
)->builtin_uint128
;
1727 append_composite_type_field (t
, "u", elem
);
1729 elem
= builtin_type (gdbarch
)->builtin_int128
;
1730 append_composite_type_field (t
, "s", elem
);
1735 return tdep
->vnq_type
;
1738 /* Return the type for an AdvSISD D register. */
1740 static struct type
*
1741 aarch64_vnd_type (struct gdbarch
*gdbarch
)
1743 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1745 if (tdep
->vnd_type
== NULL
)
1750 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnd",
1753 elem
= builtin_type (gdbarch
)->builtin_double
;
1754 append_composite_type_field (t
, "f", elem
);
1756 elem
= builtin_type (gdbarch
)->builtin_uint64
;
1757 append_composite_type_field (t
, "u", elem
);
1759 elem
= builtin_type (gdbarch
)->builtin_int64
;
1760 append_composite_type_field (t
, "s", elem
);
1765 return tdep
->vnd_type
;
1768 /* Return the type for an AdvSISD S register. */
1770 static struct type
*
1771 aarch64_vns_type (struct gdbarch
*gdbarch
)
1773 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1775 if (tdep
->vns_type
== NULL
)
1780 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vns",
1783 elem
= builtin_type (gdbarch
)->builtin_float
;
1784 append_composite_type_field (t
, "f", elem
);
1786 elem
= builtin_type (gdbarch
)->builtin_uint32
;
1787 append_composite_type_field (t
, "u", elem
);
1789 elem
= builtin_type (gdbarch
)->builtin_int32
;
1790 append_composite_type_field (t
, "s", elem
);
1795 return tdep
->vns_type
;
1798 /* Return the type for an AdvSISD H register. */
1800 static struct type
*
1801 aarch64_vnh_type (struct gdbarch
*gdbarch
)
1803 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1805 if (tdep
->vnh_type
== NULL
)
1810 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnh",
1813 elem
= builtin_type (gdbarch
)->builtin_uint16
;
1814 append_composite_type_field (t
, "u", elem
);
1816 elem
= builtin_type (gdbarch
)->builtin_int16
;
1817 append_composite_type_field (t
, "s", elem
);
1822 return tdep
->vnh_type
;
1825 /* Return the type for an AdvSISD B register. */
1827 static struct type
*
1828 aarch64_vnb_type (struct gdbarch
*gdbarch
)
1830 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1832 if (tdep
->vnb_type
== NULL
)
1837 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnb",
1840 elem
= builtin_type (gdbarch
)->builtin_uint8
;
1841 append_composite_type_field (t
, "u", elem
);
1843 elem
= builtin_type (gdbarch
)->builtin_int8
;
1844 append_composite_type_field (t
, "s", elem
);
1849 return tdep
->vnb_type
;
1852 /* Return the type for an AdvSISD V register. */
1854 static struct type
*
1855 aarch64_vnv_type (struct gdbarch
*gdbarch
)
1857 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1859 if (tdep
->vnv_type
== NULL
)
1861 struct type
*t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnv",
1864 append_composite_type_field (t
, "d", aarch64_vnd_type (gdbarch
));
1865 append_composite_type_field (t
, "s", aarch64_vns_type (gdbarch
));
1866 append_composite_type_field (t
, "h", aarch64_vnh_type (gdbarch
));
1867 append_composite_type_field (t
, "b", aarch64_vnb_type (gdbarch
));
1868 append_composite_type_field (t
, "q", aarch64_vnq_type (gdbarch
));
1873 return tdep
->vnv_type
;
1876 /* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
1879 aarch64_dwarf_reg_to_regnum (struct gdbarch
*gdbarch
, int reg
)
1881 if (reg
>= AARCH64_DWARF_X0
&& reg
<= AARCH64_DWARF_X0
+ 30)
1882 return AARCH64_X0_REGNUM
+ reg
- AARCH64_DWARF_X0
;
1884 if (reg
== AARCH64_DWARF_SP
)
1885 return AARCH64_SP_REGNUM
;
1887 if (reg
>= AARCH64_DWARF_V0
&& reg
<= AARCH64_DWARF_V0
+ 31)
1888 return AARCH64_V0_REGNUM
+ reg
- AARCH64_DWARF_V0
;
1890 if (reg
== AARCH64_DWARF_SVE_VG
)
1891 return AARCH64_SVE_VG_REGNUM
;
1893 if (reg
== AARCH64_DWARF_SVE_FFR
)
1894 return AARCH64_SVE_FFR_REGNUM
;
1896 if (reg
>= AARCH64_DWARF_SVE_P0
&& reg
<= AARCH64_DWARF_SVE_P0
+ 15)
1897 return AARCH64_SVE_P0_REGNUM
+ reg
- AARCH64_DWARF_SVE_P0
;
1899 if (reg
>= AARCH64_DWARF_SVE_Z0
&& reg
<= AARCH64_DWARF_SVE_Z0
+ 15)
1900 return AARCH64_SVE_Z0_REGNUM
+ reg
- AARCH64_DWARF_SVE_Z0
;
1905 /* Implement the "print_insn" gdbarch method. */
1908 aarch64_gdb_print_insn (bfd_vma memaddr
, disassemble_info
*info
)
1910 info
->symbols
= NULL
;
1911 return default_print_insn (memaddr
, info
);
1914 /* AArch64 BRK software debug mode instruction.
1915 Note that AArch64 code is always little-endian.
1916 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
1917 constexpr gdb_byte aarch64_default_breakpoint
[] = {0x00, 0x00, 0x20, 0xd4};
1919 typedef BP_MANIPULATION (aarch64_default_breakpoint
) aarch64_breakpoint
;
1921 /* Extract from an array REGS containing the (raw) register state a
1922 function return value of type TYPE, and copy that, in virtual
1923 format, into VALBUF. */
1926 aarch64_extract_return_value (struct type
*type
, struct regcache
*regs
,
1929 struct gdbarch
*gdbarch
= regs
->arch ();
1930 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1932 struct type
*fundamental_type
;
1934 if (aapcs_is_vfp_call_or_return_candidate (type
, &elements
,
1937 int len
= TYPE_LENGTH (fundamental_type
);
1939 for (int i
= 0; i
< elements
; i
++)
1941 int regno
= AARCH64_V0_REGNUM
+ i
;
1942 /* Enough space for a full vector register. */
1943 gdb_byte buf
[register_size (gdbarch
, regno
)];
1944 gdb_assert (len
<= sizeof (buf
));
1948 debug_printf ("read HFA or HVA return value element %d from %s\n",
1950 gdbarch_register_name (gdbarch
, regno
));
1952 regs
->cooked_read (regno
, buf
);
1954 memcpy (valbuf
, buf
, len
);
1958 else if (TYPE_CODE (type
) == TYPE_CODE_INT
1959 || TYPE_CODE (type
) == TYPE_CODE_CHAR
1960 || TYPE_CODE (type
) == TYPE_CODE_BOOL
1961 || TYPE_CODE (type
) == TYPE_CODE_PTR
1962 || TYPE_IS_REFERENCE (type
)
1963 || TYPE_CODE (type
) == TYPE_CODE_ENUM
)
1965 /* If the the type is a plain integer, then the access is
1966 straight-forward. Otherwise we have to play around a bit
1968 int len
= TYPE_LENGTH (type
);
1969 int regno
= AARCH64_X0_REGNUM
;
1974 /* By using store_unsigned_integer we avoid having to do
1975 anything special for small big-endian values. */
1976 regcache_cooked_read_unsigned (regs
, regno
++, &tmp
);
1977 store_unsigned_integer (valbuf
,
1978 (len
> X_REGISTER_SIZE
1979 ? X_REGISTER_SIZE
: len
), byte_order
, tmp
);
1980 len
-= X_REGISTER_SIZE
;
1981 valbuf
+= X_REGISTER_SIZE
;
1986 /* For a structure or union the behaviour is as if the value had
1987 been stored to word-aligned memory and then loaded into
1988 registers with 64-bit load instruction(s). */
1989 int len
= TYPE_LENGTH (type
);
1990 int regno
= AARCH64_X0_REGNUM
;
1991 bfd_byte buf
[X_REGISTER_SIZE
];
1995 regs
->cooked_read (regno
++, buf
);
1996 memcpy (valbuf
, buf
, len
> X_REGISTER_SIZE
? X_REGISTER_SIZE
: len
);
1997 len
-= X_REGISTER_SIZE
;
1998 valbuf
+= X_REGISTER_SIZE
;
2004 /* Will a function return an aggregate type in memory or in a
2005 register? Return 0 if an aggregate type can be returned in a
2006 register, 1 if it must be returned in memory. */
2009 aarch64_return_in_memory (struct gdbarch
*gdbarch
, struct type
*type
)
2011 type
= check_typedef (type
);
2013 struct type
*fundamental_type
;
2015 if (aapcs_is_vfp_call_or_return_candidate (type
, &elements
,
2018 /* v0-v7 are used to return values and one register is allocated
2019 for one member. However, HFA or HVA has at most four members. */
2023 if (TYPE_LENGTH (type
) > 16)
2025 /* PCS B.6 Aggregates larger than 16 bytes are passed by
2026 invisible reference. */
2034 /* Write into appropriate registers a function return value of type
2035 TYPE, given in virtual format. */
2038 aarch64_store_return_value (struct type
*type
, struct regcache
*regs
,
2039 const gdb_byte
*valbuf
)
2041 struct gdbarch
*gdbarch
= regs
->arch ();
2042 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
2044 struct type
*fundamental_type
;
2046 if (aapcs_is_vfp_call_or_return_candidate (type
, &elements
,
2049 int len
= TYPE_LENGTH (fundamental_type
);
2051 for (int i
= 0; i
< elements
; i
++)
2053 int regno
= AARCH64_V0_REGNUM
+ i
;
2054 /* Enough space for a full vector register. */
2055 gdb_byte tmpbuf
[register_size (gdbarch
, regno
)];
2056 gdb_assert (len
<= sizeof (tmpbuf
));
2060 debug_printf ("write HFA or HVA return value element %d to %s\n",
2062 gdbarch_register_name (gdbarch
, regno
));
2065 memcpy (tmpbuf
, valbuf
,
2066 len
> V_REGISTER_SIZE
? V_REGISTER_SIZE
: len
);
2067 regs
->cooked_write (regno
, tmpbuf
);
2071 else if (TYPE_CODE (type
) == TYPE_CODE_INT
2072 || TYPE_CODE (type
) == TYPE_CODE_CHAR
2073 || TYPE_CODE (type
) == TYPE_CODE_BOOL
2074 || TYPE_CODE (type
) == TYPE_CODE_PTR
2075 || TYPE_IS_REFERENCE (type
)
2076 || TYPE_CODE (type
) == TYPE_CODE_ENUM
)
2078 if (TYPE_LENGTH (type
) <= X_REGISTER_SIZE
)
2080 /* Values of one word or less are zero/sign-extended and
2082 bfd_byte tmpbuf
[X_REGISTER_SIZE
];
2083 LONGEST val
= unpack_long (type
, valbuf
);
2085 store_signed_integer (tmpbuf
, X_REGISTER_SIZE
, byte_order
, val
);
2086 regs
->cooked_write (AARCH64_X0_REGNUM
, tmpbuf
);
2090 /* Integral values greater than one word are stored in
2091 consecutive registers starting with r0. This will always
2092 be a multiple of the regiser size. */
2093 int len
= TYPE_LENGTH (type
);
2094 int regno
= AARCH64_X0_REGNUM
;
2098 regs
->cooked_write (regno
++, valbuf
);
2099 len
-= X_REGISTER_SIZE
;
2100 valbuf
+= X_REGISTER_SIZE
;
2106 /* For a structure or union the behaviour is as if the value had
2107 been stored to word-aligned memory and then loaded into
2108 registers with 64-bit load instruction(s). */
2109 int len
= TYPE_LENGTH (type
);
2110 int regno
= AARCH64_X0_REGNUM
;
2111 bfd_byte tmpbuf
[X_REGISTER_SIZE
];
2115 memcpy (tmpbuf
, valbuf
,
2116 len
> X_REGISTER_SIZE
? X_REGISTER_SIZE
: len
);
2117 regs
->cooked_write (regno
++, tmpbuf
);
2118 len
-= X_REGISTER_SIZE
;
2119 valbuf
+= X_REGISTER_SIZE
;
2124 /* Implement the "return_value" gdbarch method. */
2126 static enum return_value_convention
2127 aarch64_return_value (struct gdbarch
*gdbarch
, struct value
*func_value
,
2128 struct type
*valtype
, struct regcache
*regcache
,
2129 gdb_byte
*readbuf
, const gdb_byte
*writebuf
)
2132 if (TYPE_CODE (valtype
) == TYPE_CODE_STRUCT
2133 || TYPE_CODE (valtype
) == TYPE_CODE_UNION
2134 || TYPE_CODE (valtype
) == TYPE_CODE_ARRAY
)
2136 if (aarch64_return_in_memory (gdbarch
, valtype
))
2139 debug_printf ("return value in memory\n");
2140 return RETURN_VALUE_STRUCT_CONVENTION
;
2145 aarch64_store_return_value (valtype
, regcache
, writebuf
);
2148 aarch64_extract_return_value (valtype
, regcache
, readbuf
);
2151 debug_printf ("return value in registers\n");
2153 return RETURN_VALUE_REGISTER_CONVENTION
;
2156 /* Implement the "get_longjmp_target" gdbarch method. */
2159 aarch64_get_longjmp_target (struct frame_info
*frame
, CORE_ADDR
*pc
)
2162 gdb_byte buf
[X_REGISTER_SIZE
];
2163 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
2164 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2165 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
2167 jb_addr
= get_frame_register_unsigned (frame
, AARCH64_X0_REGNUM
);
2169 if (target_read_memory (jb_addr
+ tdep
->jb_pc
* tdep
->jb_elt_size
, buf
,
2173 *pc
= extract_unsigned_integer (buf
, X_REGISTER_SIZE
, byte_order
);
2177 /* Implement the "gen_return_address" gdbarch method. */
2180 aarch64_gen_return_address (struct gdbarch
*gdbarch
,
2181 struct agent_expr
*ax
, struct axs_value
*value
,
2184 value
->type
= register_type (gdbarch
, AARCH64_LR_REGNUM
);
2185 value
->kind
= axs_lvalue_register
;
2186 value
->u
.reg
= AARCH64_LR_REGNUM
;
2190 /* Return the pseudo register name corresponding to register regnum. */
2193 aarch64_pseudo_register_name (struct gdbarch
*gdbarch
, int regnum
)
2195 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2197 static const char *const q_name
[] =
2199 "q0", "q1", "q2", "q3",
2200 "q4", "q5", "q6", "q7",
2201 "q8", "q9", "q10", "q11",
2202 "q12", "q13", "q14", "q15",
2203 "q16", "q17", "q18", "q19",
2204 "q20", "q21", "q22", "q23",
2205 "q24", "q25", "q26", "q27",
2206 "q28", "q29", "q30", "q31",
2209 static const char *const d_name
[] =
2211 "d0", "d1", "d2", "d3",
2212 "d4", "d5", "d6", "d7",
2213 "d8", "d9", "d10", "d11",
2214 "d12", "d13", "d14", "d15",
2215 "d16", "d17", "d18", "d19",
2216 "d20", "d21", "d22", "d23",
2217 "d24", "d25", "d26", "d27",
2218 "d28", "d29", "d30", "d31",
2221 static const char *const s_name
[] =
2223 "s0", "s1", "s2", "s3",
2224 "s4", "s5", "s6", "s7",
2225 "s8", "s9", "s10", "s11",
2226 "s12", "s13", "s14", "s15",
2227 "s16", "s17", "s18", "s19",
2228 "s20", "s21", "s22", "s23",
2229 "s24", "s25", "s26", "s27",
2230 "s28", "s29", "s30", "s31",
2233 static const char *const h_name
[] =
2235 "h0", "h1", "h2", "h3",
2236 "h4", "h5", "h6", "h7",
2237 "h8", "h9", "h10", "h11",
2238 "h12", "h13", "h14", "h15",
2239 "h16", "h17", "h18", "h19",
2240 "h20", "h21", "h22", "h23",
2241 "h24", "h25", "h26", "h27",
2242 "h28", "h29", "h30", "h31",
2245 static const char *const b_name
[] =
2247 "b0", "b1", "b2", "b3",
2248 "b4", "b5", "b6", "b7",
2249 "b8", "b9", "b10", "b11",
2250 "b12", "b13", "b14", "b15",
2251 "b16", "b17", "b18", "b19",
2252 "b20", "b21", "b22", "b23",
2253 "b24", "b25", "b26", "b27",
2254 "b28", "b29", "b30", "b31",
2257 regnum
-= gdbarch_num_regs (gdbarch
);
2259 if (regnum
>= AARCH64_Q0_REGNUM
&& regnum
< AARCH64_Q0_REGNUM
+ 32)
2260 return q_name
[regnum
- AARCH64_Q0_REGNUM
];
2262 if (regnum
>= AARCH64_D0_REGNUM
&& regnum
< AARCH64_D0_REGNUM
+ 32)
2263 return d_name
[regnum
- AARCH64_D0_REGNUM
];
2265 if (regnum
>= AARCH64_S0_REGNUM
&& regnum
< AARCH64_S0_REGNUM
+ 32)
2266 return s_name
[regnum
- AARCH64_S0_REGNUM
];
2268 if (regnum
>= AARCH64_H0_REGNUM
&& regnum
< AARCH64_H0_REGNUM
+ 32)
2269 return h_name
[regnum
- AARCH64_H0_REGNUM
];
2271 if (regnum
>= AARCH64_B0_REGNUM
&& regnum
< AARCH64_B0_REGNUM
+ 32)
2272 return b_name
[regnum
- AARCH64_B0_REGNUM
];
2274 if (tdep
->has_sve ())
2276 static const char *const sve_v_name
[] =
2278 "v0", "v1", "v2", "v3",
2279 "v4", "v5", "v6", "v7",
2280 "v8", "v9", "v10", "v11",
2281 "v12", "v13", "v14", "v15",
2282 "v16", "v17", "v18", "v19",
2283 "v20", "v21", "v22", "v23",
2284 "v24", "v25", "v26", "v27",
2285 "v28", "v29", "v30", "v31",
2288 if (regnum
>= AARCH64_SVE_V0_REGNUM
2289 && regnum
< AARCH64_SVE_V0_REGNUM
+ AARCH64_V_REGS_NUM
)
2290 return sve_v_name
[regnum
- AARCH64_SVE_V0_REGNUM
];
2293 internal_error (__FILE__
, __LINE__
,
2294 _("aarch64_pseudo_register_name: bad register number %d"),
2298 /* Implement the "pseudo_register_type" tdesc_arch_data method. */
2300 static struct type
*
2301 aarch64_pseudo_register_type (struct gdbarch
*gdbarch
, int regnum
)
2303 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2305 regnum
-= gdbarch_num_regs (gdbarch
);
2307 if (regnum
>= AARCH64_Q0_REGNUM
&& regnum
< AARCH64_Q0_REGNUM
+ 32)
2308 return aarch64_vnq_type (gdbarch
);
2310 if (regnum
>= AARCH64_D0_REGNUM
&& regnum
< AARCH64_D0_REGNUM
+ 32)
2311 return aarch64_vnd_type (gdbarch
);
2313 if (regnum
>= AARCH64_S0_REGNUM
&& regnum
< AARCH64_S0_REGNUM
+ 32)
2314 return aarch64_vns_type (gdbarch
);
2316 if (regnum
>= AARCH64_H0_REGNUM
&& regnum
< AARCH64_H0_REGNUM
+ 32)
2317 return aarch64_vnh_type (gdbarch
);
2319 if (regnum
>= AARCH64_B0_REGNUM
&& regnum
< AARCH64_B0_REGNUM
+ 32)
2320 return aarch64_vnb_type (gdbarch
);
2322 if (tdep
->has_sve () && regnum
>= AARCH64_SVE_V0_REGNUM
2323 && regnum
< AARCH64_SVE_V0_REGNUM
+ AARCH64_V_REGS_NUM
)
2324 return aarch64_vnv_type (gdbarch
);
2326 internal_error (__FILE__
, __LINE__
,
2327 _("aarch64_pseudo_register_type: bad register number %d"),
2331 /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2334 aarch64_pseudo_register_reggroup_p (struct gdbarch
*gdbarch
, int regnum
,
2335 struct reggroup
*group
)
2337 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2339 regnum
-= gdbarch_num_regs (gdbarch
);
2341 if (regnum
>= AARCH64_Q0_REGNUM
&& regnum
< AARCH64_Q0_REGNUM
+ 32)
2342 return group
== all_reggroup
|| group
== vector_reggroup
;
2343 else if (regnum
>= AARCH64_D0_REGNUM
&& regnum
< AARCH64_D0_REGNUM
+ 32)
2344 return (group
== all_reggroup
|| group
== vector_reggroup
2345 || group
== float_reggroup
);
2346 else if (regnum
>= AARCH64_S0_REGNUM
&& regnum
< AARCH64_S0_REGNUM
+ 32)
2347 return (group
== all_reggroup
|| group
== vector_reggroup
2348 || group
== float_reggroup
);
2349 else if (regnum
>= AARCH64_H0_REGNUM
&& regnum
< AARCH64_H0_REGNUM
+ 32)
2350 return group
== all_reggroup
|| group
== vector_reggroup
;
2351 else if (regnum
>= AARCH64_B0_REGNUM
&& regnum
< AARCH64_B0_REGNUM
+ 32)
2352 return group
== all_reggroup
|| group
== vector_reggroup
;
2353 else if (tdep
->has_sve () && regnum
>= AARCH64_SVE_V0_REGNUM
2354 && regnum
< AARCH64_SVE_V0_REGNUM
+ AARCH64_V_REGS_NUM
)
2355 return group
== all_reggroup
|| group
== vector_reggroup
;
2357 return group
== all_reggroup
;
2360 /* Helper for aarch64_pseudo_read_value. */
2362 static struct value
*
2363 aarch64_pseudo_read_value_1 (struct gdbarch
*gdbarch
,
2364 readable_regcache
*regcache
, int regnum_offset
,
2365 int regsize
, struct value
*result_value
)
2367 unsigned v_regnum
= AARCH64_V0_REGNUM
+ regnum_offset
;
2369 /* Enough space for a full vector register. */
2370 gdb_byte reg_buf
[register_size (gdbarch
, AARCH64_V0_REGNUM
)];
2371 gdb_static_assert (AARCH64_V0_REGNUM
== AARCH64_SVE_Z0_REGNUM
);
2373 if (regcache
->raw_read (v_regnum
, reg_buf
) != REG_VALID
)
2374 mark_value_bytes_unavailable (result_value
, 0,
2375 TYPE_LENGTH (value_type (result_value
)));
2377 memcpy (value_contents_raw (result_value
), reg_buf
, regsize
);
2379 return result_value
;
2382 /* Implement the "pseudo_register_read_value" gdbarch method. */
2384 static struct value
*
2385 aarch64_pseudo_read_value (struct gdbarch
*gdbarch
, readable_regcache
*regcache
,
2388 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2389 struct value
*result_value
= allocate_value (register_type (gdbarch
, regnum
));
2391 VALUE_LVAL (result_value
) = lval_register
;
2392 VALUE_REGNUM (result_value
) = regnum
;
2394 regnum
-= gdbarch_num_regs (gdbarch
);
2396 if (regnum
>= AARCH64_Q0_REGNUM
&& regnum
< AARCH64_Q0_REGNUM
+ 32)
2397 return aarch64_pseudo_read_value_1 (gdbarch
, regcache
,
2398 regnum
- AARCH64_Q0_REGNUM
,
2399 Q_REGISTER_SIZE
, result_value
);
2401 if (regnum
>= AARCH64_D0_REGNUM
&& regnum
< AARCH64_D0_REGNUM
+ 32)
2402 return aarch64_pseudo_read_value_1 (gdbarch
, regcache
,
2403 regnum
- AARCH64_D0_REGNUM
,
2404 D_REGISTER_SIZE
, result_value
);
2406 if (regnum
>= AARCH64_S0_REGNUM
&& regnum
< AARCH64_S0_REGNUM
+ 32)
2407 return aarch64_pseudo_read_value_1 (gdbarch
, regcache
,
2408 regnum
- AARCH64_S0_REGNUM
,
2409 S_REGISTER_SIZE
, result_value
);
2411 if (regnum
>= AARCH64_H0_REGNUM
&& regnum
< AARCH64_H0_REGNUM
+ 32)
2412 return aarch64_pseudo_read_value_1 (gdbarch
, regcache
,
2413 regnum
- AARCH64_H0_REGNUM
,
2414 H_REGISTER_SIZE
, result_value
);
2416 if (regnum
>= AARCH64_B0_REGNUM
&& regnum
< AARCH64_B0_REGNUM
+ 32)
2417 return aarch64_pseudo_read_value_1 (gdbarch
, regcache
,
2418 regnum
- AARCH64_B0_REGNUM
,
2419 B_REGISTER_SIZE
, result_value
);
2421 if (tdep
->has_sve () && regnum
>= AARCH64_SVE_V0_REGNUM
2422 && regnum
< AARCH64_SVE_V0_REGNUM
+ 32)
2423 return aarch64_pseudo_read_value_1 (gdbarch
, regcache
,
2424 regnum
- AARCH64_SVE_V0_REGNUM
,
2425 V_REGISTER_SIZE
, result_value
);
2427 gdb_assert_not_reached ("regnum out of bound");
2430 /* Helper for aarch64_pseudo_write. */
2433 aarch64_pseudo_write_1 (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
2434 int regnum_offset
, int regsize
, const gdb_byte
*buf
)
2436 unsigned v_regnum
= AARCH64_V0_REGNUM
+ regnum_offset
;
2438 /* Enough space for a full vector register. */
2439 gdb_byte reg_buf
[register_size (gdbarch
, AARCH64_V0_REGNUM
)];
2440 gdb_static_assert (AARCH64_V0_REGNUM
== AARCH64_SVE_Z0_REGNUM
);
2442 /* Ensure the register buffer is zero, we want gdb writes of the
2443 various 'scalar' pseudo registers to behavior like architectural
2444 writes, register width bytes are written the remainder are set to
2446 memset (reg_buf
, 0, register_size (gdbarch
, AARCH64_V0_REGNUM
));
2448 memcpy (reg_buf
, buf
, regsize
);
2449 regcache
->raw_write (v_regnum
, reg_buf
);
2452 /* Implement the "pseudo_register_write" gdbarch method. */
2455 aarch64_pseudo_write (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
2456 int regnum
, const gdb_byte
*buf
)
2458 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2459 regnum
-= gdbarch_num_regs (gdbarch
);
2461 if (regnum
>= AARCH64_Q0_REGNUM
&& regnum
< AARCH64_Q0_REGNUM
+ 32)
2462 return aarch64_pseudo_write_1 (gdbarch
, regcache
,
2463 regnum
- AARCH64_Q0_REGNUM
, Q_REGISTER_SIZE
,
2466 if (regnum
>= AARCH64_D0_REGNUM
&& regnum
< AARCH64_D0_REGNUM
+ 32)
2467 return aarch64_pseudo_write_1 (gdbarch
, regcache
,
2468 regnum
- AARCH64_D0_REGNUM
, D_REGISTER_SIZE
,
2471 if (regnum
>= AARCH64_S0_REGNUM
&& regnum
< AARCH64_S0_REGNUM
+ 32)
2472 return aarch64_pseudo_write_1 (gdbarch
, regcache
,
2473 regnum
- AARCH64_S0_REGNUM
, S_REGISTER_SIZE
,
2476 if (regnum
>= AARCH64_H0_REGNUM
&& regnum
< AARCH64_H0_REGNUM
+ 32)
2477 return aarch64_pseudo_write_1 (gdbarch
, regcache
,
2478 regnum
- AARCH64_H0_REGNUM
, H_REGISTER_SIZE
,
2481 if (regnum
>= AARCH64_B0_REGNUM
&& regnum
< AARCH64_B0_REGNUM
+ 32)
2482 return aarch64_pseudo_write_1 (gdbarch
, regcache
,
2483 regnum
- AARCH64_B0_REGNUM
, B_REGISTER_SIZE
,
2486 if (tdep
->has_sve () && regnum
>= AARCH64_SVE_V0_REGNUM
2487 && regnum
< AARCH64_SVE_V0_REGNUM
+ 32)
2488 return aarch64_pseudo_write_1 (gdbarch
, regcache
,
2489 regnum
- AARCH64_SVE_V0_REGNUM
,
2490 V_REGISTER_SIZE
, buf
);
2492 gdb_assert_not_reached ("regnum out of bound");
2495 /* Callback function for user_reg_add. */
2497 static struct value
*
2498 value_of_aarch64_user_reg (struct frame_info
*frame
, const void *baton
)
2500 const int *reg_p
= (const int *) baton
;
2502 return value_of_register (*reg_p
, frame
);
2506 /* Implement the "software_single_step" gdbarch method, needed to
2507 single step through atomic sequences on AArch64. */
2509 static std::vector
<CORE_ADDR
>
2510 aarch64_software_single_step (struct regcache
*regcache
)
2512 struct gdbarch
*gdbarch
= regcache
->arch ();
2513 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
2514 const int insn_size
= 4;
2515 const int atomic_sequence_length
= 16; /* Instruction sequence length. */
2516 CORE_ADDR pc
= regcache_read_pc (regcache
);
2517 CORE_ADDR breaks
[2] = { CORE_ADDR_MAX
, CORE_ADDR_MAX
};
2519 CORE_ADDR closing_insn
= 0;
2520 uint32_t insn
= read_memory_unsigned_integer (loc
, insn_size
,
2521 byte_order_for_code
);
2524 int bc_insn_count
= 0; /* Conditional branch instruction count. */
2525 int last_breakpoint
= 0; /* Defaults to 0 (no breakpoints placed). */
2528 if (aarch64_decode_insn (insn
, &inst
, 1, NULL
) != 0)
2531 /* Look for a Load Exclusive instruction which begins the sequence. */
2532 if (inst
.opcode
->iclass
!= ldstexcl
|| bit (insn
, 22) == 0)
2535 for (insn_count
= 0; insn_count
< atomic_sequence_length
; ++insn_count
)
2538 insn
= read_memory_unsigned_integer (loc
, insn_size
,
2539 byte_order_for_code
);
2541 if (aarch64_decode_insn (insn
, &inst
, 1, NULL
) != 0)
2543 /* Check if the instruction is a conditional branch. */
2544 if (inst
.opcode
->iclass
== condbranch
)
2546 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_ADDR_PCREL19
);
2548 if (bc_insn_count
>= 1)
2551 /* It is, so we'll try to set a breakpoint at the destination. */
2552 breaks
[1] = loc
+ inst
.operands
[0].imm
.value
;
2558 /* Look for the Store Exclusive which closes the atomic sequence. */
2559 if (inst
.opcode
->iclass
== ldstexcl
&& bit (insn
, 22) == 0)
2566 /* We didn't find a closing Store Exclusive instruction, fall back. */
2570 /* Insert breakpoint after the end of the atomic sequence. */
2571 breaks
[0] = loc
+ insn_size
;
2573 /* Check for duplicated breakpoints, and also check that the second
2574 breakpoint is not within the atomic sequence. */
2576 && (breaks
[1] == breaks
[0]
2577 || (breaks
[1] >= pc
&& breaks
[1] <= closing_insn
)))
2578 last_breakpoint
= 0;
2580 std::vector
<CORE_ADDR
> next_pcs
;
2582 /* Insert the breakpoint at the end of the sequence, and one at the
2583 destination of the conditional branch, if it exists. */
2584 for (index
= 0; index
<= last_breakpoint
; index
++)
2585 next_pcs
.push_back (breaks
[index
]);
2590 struct aarch64_displaced_step_closure
: public displaced_step_closure
2592 /* It is true when condition instruction, such as B.CON, TBZ, etc,
2593 is being displaced stepping. */
2596 /* PC adjustment offset after displaced stepping. */
2597 int32_t pc_adjust
= 0;
2600 /* Data when visiting instructions for displaced stepping. */
2602 struct aarch64_displaced_step_data
2604 struct aarch64_insn_data base
;
2606 /* The address where the instruction will be executed at. */
2608 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
2609 uint32_t insn_buf
[DISPLACED_MODIFIED_INSNS
];
2610 /* Number of instructions in INSN_BUF. */
2611 unsigned insn_count
;
2612 /* Registers when doing displaced stepping. */
2613 struct regcache
*regs
;
2615 aarch64_displaced_step_closure
*dsc
;
2618 /* Implementation of aarch64_insn_visitor method "b". */
2621 aarch64_displaced_step_b (const int is_bl
, const int32_t offset
,
2622 struct aarch64_insn_data
*data
)
2624 struct aarch64_displaced_step_data
*dsd
2625 = (struct aarch64_displaced_step_data
*) data
;
2626 int64_t new_offset
= data
->insn_addr
- dsd
->new_addr
+ offset
;
2628 if (can_encode_int32 (new_offset
, 28))
2630 /* Emit B rather than BL, because executing BL on a new address
2631 will get the wrong address into LR. In order to avoid this,
2632 we emit B, and update LR if the instruction is BL. */
2633 emit_b (dsd
->insn_buf
, 0, new_offset
);
2639 emit_nop (dsd
->insn_buf
);
2641 dsd
->dsc
->pc_adjust
= offset
;
2647 regcache_cooked_write_unsigned (dsd
->regs
, AARCH64_LR_REGNUM
,
2648 data
->insn_addr
+ 4);
2652 /* Implementation of aarch64_insn_visitor method "b_cond". */
2655 aarch64_displaced_step_b_cond (const unsigned cond
, const int32_t offset
,
2656 struct aarch64_insn_data
*data
)
2658 struct aarch64_displaced_step_data
*dsd
2659 = (struct aarch64_displaced_step_data
*) data
;
2661 /* GDB has to fix up PC after displaced step this instruction
2662 differently according to the condition is true or false. Instead
2663 of checking COND against conditional flags, we can use
2664 the following instructions, and GDB can tell how to fix up PC
2665 according to the PC value.
2667 B.COND TAKEN ; If cond is true, then jump to TAKEN.
2673 emit_bcond (dsd
->insn_buf
, cond
, 8);
2675 dsd
->dsc
->pc_adjust
= offset
;
2676 dsd
->insn_count
= 1;
2679 /* Dynamically allocate a new register. If we know the register
2680 statically, we should make it a global as above instead of using this
2683 static struct aarch64_register
2684 aarch64_register (unsigned num
, int is64
)
2686 return (struct aarch64_register
) { num
, is64
};
2689 /* Implementation of aarch64_insn_visitor method "cb". */
2692 aarch64_displaced_step_cb (const int32_t offset
, const int is_cbnz
,
2693 const unsigned rn
, int is64
,
2694 struct aarch64_insn_data
*data
)
2696 struct aarch64_displaced_step_data
*dsd
2697 = (struct aarch64_displaced_step_data
*) data
;
2699 /* The offset is out of range for a compare and branch
2700 instruction. We can use the following instructions instead:
2702 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
2707 emit_cb (dsd
->insn_buf
, is_cbnz
, aarch64_register (rn
, is64
), 8);
2708 dsd
->insn_count
= 1;
2710 dsd
->dsc
->pc_adjust
= offset
;
2713 /* Implementation of aarch64_insn_visitor method "tb". */
2716 aarch64_displaced_step_tb (const int32_t offset
, int is_tbnz
,
2717 const unsigned rt
, unsigned bit
,
2718 struct aarch64_insn_data
*data
)
2720 struct aarch64_displaced_step_data
*dsd
2721 = (struct aarch64_displaced_step_data
*) data
;
2723 /* The offset is out of range for a test bit and branch
2724 instruction We can use the following instructions instead:
2726 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
2732 emit_tb (dsd
->insn_buf
, is_tbnz
, bit
, aarch64_register (rt
, 1), 8);
2733 dsd
->insn_count
= 1;
2735 dsd
->dsc
->pc_adjust
= offset
;
2738 /* Implementation of aarch64_insn_visitor method "adr". */
2741 aarch64_displaced_step_adr (const int32_t offset
, const unsigned rd
,
2742 const int is_adrp
, struct aarch64_insn_data
*data
)
2744 struct aarch64_displaced_step_data
*dsd
2745 = (struct aarch64_displaced_step_data
*) data
;
2746 /* We know exactly the address the ADR{P,} instruction will compute.
2747 We can just write it to the destination register. */
2748 CORE_ADDR address
= data
->insn_addr
+ offset
;
2752 /* Clear the lower 12 bits of the offset to get the 4K page. */
2753 regcache_cooked_write_unsigned (dsd
->regs
, AARCH64_X0_REGNUM
+ rd
,
2757 regcache_cooked_write_unsigned (dsd
->regs
, AARCH64_X0_REGNUM
+ rd
,
2760 dsd
->dsc
->pc_adjust
= 4;
2761 emit_nop (dsd
->insn_buf
);
2762 dsd
->insn_count
= 1;
2765 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
2768 aarch64_displaced_step_ldr_literal (const int32_t offset
, const int is_sw
,
2769 const unsigned rt
, const int is64
,
2770 struct aarch64_insn_data
*data
)
2772 struct aarch64_displaced_step_data
*dsd
2773 = (struct aarch64_displaced_step_data
*) data
;
2774 CORE_ADDR address
= data
->insn_addr
+ offset
;
2775 struct aarch64_memory_operand zero
= { MEMORY_OPERAND_OFFSET
, 0 };
2777 regcache_cooked_write_unsigned (dsd
->regs
, AARCH64_X0_REGNUM
+ rt
,
2781 dsd
->insn_count
= emit_ldrsw (dsd
->insn_buf
, aarch64_register (rt
, 1),
2782 aarch64_register (rt
, 1), zero
);
2784 dsd
->insn_count
= emit_ldr (dsd
->insn_buf
, aarch64_register (rt
, is64
),
2785 aarch64_register (rt
, 1), zero
);
2787 dsd
->dsc
->pc_adjust
= 4;
2790 /* Implementation of aarch64_insn_visitor method "others". */
2793 aarch64_displaced_step_others (const uint32_t insn
,
2794 struct aarch64_insn_data
*data
)
2796 struct aarch64_displaced_step_data
*dsd
2797 = (struct aarch64_displaced_step_data
*) data
;
2799 aarch64_emit_insn (dsd
->insn_buf
, insn
);
2800 dsd
->insn_count
= 1;
2802 if ((insn
& 0xfffffc1f) == 0xd65f0000)
2805 dsd
->dsc
->pc_adjust
= 0;
2808 dsd
->dsc
->pc_adjust
= 4;
2811 static const struct aarch64_insn_visitor visitor
=
2813 aarch64_displaced_step_b
,
2814 aarch64_displaced_step_b_cond
,
2815 aarch64_displaced_step_cb
,
2816 aarch64_displaced_step_tb
,
2817 aarch64_displaced_step_adr
,
2818 aarch64_displaced_step_ldr_literal
,
2819 aarch64_displaced_step_others
,
2822 /* Implement the "displaced_step_copy_insn" gdbarch method. */
2824 struct displaced_step_closure
*
2825 aarch64_displaced_step_copy_insn (struct gdbarch
*gdbarch
,
2826 CORE_ADDR from
, CORE_ADDR to
,
2827 struct regcache
*regs
)
2829 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
2830 uint32_t insn
= read_memory_unsigned_integer (from
, 4, byte_order_for_code
);
2831 struct aarch64_displaced_step_data dsd
;
2834 if (aarch64_decode_insn (insn
, &inst
, 1, NULL
) != 0)
2837 /* Look for a Load Exclusive instruction which begins the sequence. */
2838 if (inst
.opcode
->iclass
== ldstexcl
&& bit (insn
, 22))
2840 /* We can't displaced step atomic sequences. */
2844 std::unique_ptr
<aarch64_displaced_step_closure
> dsc
2845 (new aarch64_displaced_step_closure
);
2846 dsd
.base
.insn_addr
= from
;
2849 dsd
.dsc
= dsc
.get ();
2851 aarch64_relocate_instruction (insn
, &visitor
,
2852 (struct aarch64_insn_data
*) &dsd
);
2853 gdb_assert (dsd
.insn_count
<= DISPLACED_MODIFIED_INSNS
);
2855 if (dsd
.insn_count
!= 0)
2859 /* Instruction can be relocated to scratch pad. Copy
2860 relocated instruction(s) there. */
2861 for (i
= 0; i
< dsd
.insn_count
; i
++)
2863 if (debug_displaced
)
2865 debug_printf ("displaced: writing insn ");
2866 debug_printf ("%.8x", dsd
.insn_buf
[i
]);
2867 debug_printf (" at %s\n", paddress (gdbarch
, to
+ i
* 4));
2869 write_memory_unsigned_integer (to
+ i
* 4, 4, byte_order_for_code
,
2870 (ULONGEST
) dsd
.insn_buf
[i
]);
2878 return dsc
.release ();
2881 /* Implement the "displaced_step_fixup" gdbarch method. */
2884 aarch64_displaced_step_fixup (struct gdbarch
*gdbarch
,
2885 struct displaced_step_closure
*dsc_
,
2886 CORE_ADDR from
, CORE_ADDR to
,
2887 struct regcache
*regs
)
2889 aarch64_displaced_step_closure
*dsc
= (aarch64_displaced_step_closure
*) dsc_
;
2895 regcache_cooked_read_unsigned (regs
, AARCH64_PC_REGNUM
, &pc
);
2898 /* Condition is true. */
2900 else if (pc
- to
== 4)
2902 /* Condition is false. */
2906 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
2909 if (dsc
->pc_adjust
!= 0)
2911 if (debug_displaced
)
2913 debug_printf ("displaced: fixup: set PC to %s:%d\n",
2914 paddress (gdbarch
, from
), dsc
->pc_adjust
);
2916 regcache_cooked_write_unsigned (regs
, AARCH64_PC_REGNUM
,
2917 from
+ dsc
->pc_adjust
);
2921 /* Implement the "displaced_step_hw_singlestep" gdbarch method. */
2924 aarch64_displaced_step_hw_singlestep (struct gdbarch
*gdbarch
,
2925 struct displaced_step_closure
*closure
)
2930 /* Get the correct target description for the given VQ value.
2931 If VQ is zero then it is assumed SVE is not supported.
2932 (It is not possible to set VQ to zero on an SVE system). */
2935 aarch64_read_description (uint64_t vq
)
2937 if (vq
> AARCH64_MAX_SVE_VQ
)
2938 error (_("VQ is %" PRIu64
", maximum supported value is %d"), vq
,
2939 AARCH64_MAX_SVE_VQ
);
2941 struct target_desc
*tdesc
= tdesc_aarch64_list
[vq
];
2945 tdesc
= aarch64_create_target_description (vq
);
2946 tdesc_aarch64_list
[vq
] = tdesc
;
2952 /* Return the VQ used when creating the target description TDESC. */
2955 aarch64_get_tdesc_vq (const struct target_desc
*tdesc
)
2957 const struct tdesc_feature
*feature_sve
;
2959 if (!tdesc_has_registers (tdesc
))
2962 feature_sve
= tdesc_find_feature (tdesc
, "org.gnu.gdb.aarch64.sve");
2964 if (feature_sve
== nullptr)
2967 uint64_t vl
= tdesc_register_bitsize (feature_sve
,
2968 aarch64_sve_register_names
[0]) / 8;
2969 return sve_vq_from_vl (vl
);
2973 /* Initialize the current architecture based on INFO. If possible,
2974 re-use an architecture from ARCHES, which is a list of
2975 architectures already created during this debugging session.
2977 Called e.g. at program startup, when reading a core file, and when
2978 reading a binary file. */
2980 static struct gdbarch
*
2981 aarch64_gdbarch_init (struct gdbarch_info info
, struct gdbarch_list
*arches
)
2983 struct gdbarch_tdep
*tdep
;
2984 struct gdbarch
*gdbarch
;
2985 struct gdbarch_list
*best_arch
;
2986 struct tdesc_arch_data
*tdesc_data
= NULL
;
2987 const struct target_desc
*tdesc
= info
.target_desc
;
2990 const struct tdesc_feature
*feature_core
;
2991 const struct tdesc_feature
*feature_fpu
;
2992 const struct tdesc_feature
*feature_sve
;
2994 int num_pseudo_regs
= 0;
2996 /* Ensure we always have a target description. */
2997 if (!tdesc_has_registers (tdesc
))
2998 tdesc
= aarch64_read_description (0);
3001 feature_core
= tdesc_find_feature (tdesc
, "org.gnu.gdb.aarch64.core");
3002 feature_fpu
= tdesc_find_feature (tdesc
, "org.gnu.gdb.aarch64.fpu");
3003 feature_sve
= tdesc_find_feature (tdesc
, "org.gnu.gdb.aarch64.sve");
3005 if (feature_core
== NULL
)
3008 tdesc_data
= tdesc_data_alloc ();
3010 /* Validate the description provides the mandatory core R registers
3011 and allocate their numbers. */
3012 for (i
= 0; i
< ARRAY_SIZE (aarch64_r_register_names
); i
++)
3013 valid_p
&= tdesc_numbered_register (feature_core
, tdesc_data
,
3014 AARCH64_X0_REGNUM
+ i
,
3015 aarch64_r_register_names
[i
]);
3017 num_regs
= AARCH64_X0_REGNUM
+ i
;
3019 /* Add the V registers. */
3020 if (feature_fpu
!= NULL
)
3022 if (feature_sve
!= NULL
)
3023 error (_("Program contains both fpu and SVE features."));
3025 /* Validate the description provides the mandatory V registers
3026 and allocate their numbers. */
3027 for (i
= 0; i
< ARRAY_SIZE (aarch64_v_register_names
); i
++)
3028 valid_p
&= tdesc_numbered_register (feature_fpu
, tdesc_data
,
3029 AARCH64_V0_REGNUM
+ i
,
3030 aarch64_v_register_names
[i
]);
3032 num_regs
= AARCH64_V0_REGNUM
+ i
;
3035 /* Add the SVE registers. */
3036 if (feature_sve
!= NULL
)
3038 /* Validate the description provides the mandatory SVE registers
3039 and allocate their numbers. */
3040 for (i
= 0; i
< ARRAY_SIZE (aarch64_sve_register_names
); i
++)
3041 valid_p
&= tdesc_numbered_register (feature_sve
, tdesc_data
,
3042 AARCH64_SVE_Z0_REGNUM
+ i
,
3043 aarch64_sve_register_names
[i
]);
3045 num_regs
= AARCH64_SVE_Z0_REGNUM
+ i
;
3046 num_pseudo_regs
+= 32; /* add the Vn register pseudos. */
3049 if (feature_fpu
!= NULL
|| feature_sve
!= NULL
)
3051 num_pseudo_regs
+= 32; /* add the Qn scalar register pseudos */
3052 num_pseudo_regs
+= 32; /* add the Dn scalar register pseudos */
3053 num_pseudo_regs
+= 32; /* add the Sn scalar register pseudos */
3054 num_pseudo_regs
+= 32; /* add the Hn scalar register pseudos */
3055 num_pseudo_regs
+= 32; /* add the Bn scalar register pseudos */
3060 tdesc_data_cleanup (tdesc_data
);
3064 /* AArch64 code is always little-endian. */
3065 info
.byte_order_for_code
= BFD_ENDIAN_LITTLE
;
3067 /* If there is already a candidate, use it. */
3068 for (best_arch
= gdbarch_list_lookup_by_info (arches
, &info
);
3070 best_arch
= gdbarch_list_lookup_by_info (best_arch
->next
, &info
))
3072 /* Found a match. */
3076 if (best_arch
!= NULL
)
3078 if (tdesc_data
!= NULL
)
3079 tdesc_data_cleanup (tdesc_data
);
3080 return best_arch
->gdbarch
;
3083 tdep
= XCNEW (struct gdbarch_tdep
);
3084 gdbarch
= gdbarch_alloc (&info
, tdep
);
3086 /* This should be low enough for everything. */
3087 tdep
->lowest_pc
= 0x20;
3088 tdep
->jb_pc
= -1; /* Longjump support not enabled by default. */
3089 tdep
->jb_elt_size
= 8;
3090 tdep
->vq
= aarch64_get_tdesc_vq (tdesc
);
3092 set_gdbarch_push_dummy_call (gdbarch
, aarch64_push_dummy_call
);
3093 set_gdbarch_frame_align (gdbarch
, aarch64_frame_align
);
3095 /* Frame handling. */
3096 set_gdbarch_dummy_id (gdbarch
, aarch64_dummy_id
);
3097 set_gdbarch_unwind_pc (gdbarch
, aarch64_unwind_pc
);
3098 set_gdbarch_unwind_sp (gdbarch
, aarch64_unwind_sp
);
3100 /* Advance PC across function entry code. */
3101 set_gdbarch_skip_prologue (gdbarch
, aarch64_skip_prologue
);
3103 /* The stack grows downward. */
3104 set_gdbarch_inner_than (gdbarch
, core_addr_lessthan
);
3106 /* Breakpoint manipulation. */
3107 set_gdbarch_breakpoint_kind_from_pc (gdbarch
,
3108 aarch64_breakpoint::kind_from_pc
);
3109 set_gdbarch_sw_breakpoint_from_kind (gdbarch
,
3110 aarch64_breakpoint::bp_from_kind
);
3111 set_gdbarch_have_nonsteppable_watchpoint (gdbarch
, 1);
3112 set_gdbarch_software_single_step (gdbarch
, aarch64_software_single_step
);
3114 /* Information about registers, etc. */
3115 set_gdbarch_sp_regnum (gdbarch
, AARCH64_SP_REGNUM
);
3116 set_gdbarch_pc_regnum (gdbarch
, AARCH64_PC_REGNUM
);
3117 set_gdbarch_num_regs (gdbarch
, num_regs
);
3119 set_gdbarch_num_pseudo_regs (gdbarch
, num_pseudo_regs
);
3120 set_gdbarch_pseudo_register_read_value (gdbarch
, aarch64_pseudo_read_value
);
3121 set_gdbarch_pseudo_register_write (gdbarch
, aarch64_pseudo_write
);
3122 set_tdesc_pseudo_register_name (gdbarch
, aarch64_pseudo_register_name
);
3123 set_tdesc_pseudo_register_type (gdbarch
, aarch64_pseudo_register_type
);
3124 set_tdesc_pseudo_register_reggroup_p (gdbarch
,
3125 aarch64_pseudo_register_reggroup_p
);
3128 set_gdbarch_short_bit (gdbarch
, 16);
3129 set_gdbarch_int_bit (gdbarch
, 32);
3130 set_gdbarch_float_bit (gdbarch
, 32);
3131 set_gdbarch_double_bit (gdbarch
, 64);
3132 set_gdbarch_long_double_bit (gdbarch
, 128);
3133 set_gdbarch_long_bit (gdbarch
, 64);
3134 set_gdbarch_long_long_bit (gdbarch
, 64);
3135 set_gdbarch_ptr_bit (gdbarch
, 64);
3136 set_gdbarch_char_signed (gdbarch
, 0);
3137 set_gdbarch_wchar_signed (gdbarch
, 0);
3138 set_gdbarch_float_format (gdbarch
, floatformats_ieee_single
);
3139 set_gdbarch_double_format (gdbarch
, floatformats_ieee_double
);
3140 set_gdbarch_long_double_format (gdbarch
, floatformats_ia64_quad
);
3142 /* Internal <-> external register number maps. */
3143 set_gdbarch_dwarf2_reg_to_regnum (gdbarch
, aarch64_dwarf_reg_to_regnum
);
3145 /* Returning results. */
3146 set_gdbarch_return_value (gdbarch
, aarch64_return_value
);
3149 set_gdbarch_print_insn (gdbarch
, aarch64_gdb_print_insn
);
3151 /* Virtual tables. */
3152 set_gdbarch_vbit_in_delta (gdbarch
, 1);
3154 /* Hook in the ABI-specific overrides, if they have been registered. */
3155 info
.target_desc
= tdesc
;
3156 info
.tdesc_data
= tdesc_data
;
3157 gdbarch_init_osabi (info
, gdbarch
);
3159 dwarf2_frame_set_init_reg (gdbarch
, aarch64_dwarf2_frame_init_reg
);
3161 /* Add some default predicates. */
3162 frame_unwind_append_unwinder (gdbarch
, &aarch64_stub_unwind
);
3163 dwarf2_append_unwinders (gdbarch
);
3164 frame_unwind_append_unwinder (gdbarch
, &aarch64_prologue_unwind
);
3166 frame_base_set_default (gdbarch
, &aarch64_normal_base
);
3168 /* Now we have tuned the configuration, set a few final things,
3169 based on what the OS ABI has told us. */
3171 if (tdep
->jb_pc
>= 0)
3172 set_gdbarch_get_longjmp_target (gdbarch
, aarch64_get_longjmp_target
);
3174 set_gdbarch_gen_return_address (gdbarch
, aarch64_gen_return_address
);
3176 tdesc_use_registers (gdbarch
, tdesc
, tdesc_data
);
3178 /* Add standard register aliases. */
3179 for (i
= 0; i
< ARRAY_SIZE (aarch64_register_aliases
); i
++)
3180 user_reg_add (gdbarch
, aarch64_register_aliases
[i
].name
,
3181 value_of_aarch64_user_reg
,
3182 &aarch64_register_aliases
[i
].regnum
);
3188 aarch64_dump_tdep (struct gdbarch
*gdbarch
, struct ui_file
*file
)
3190 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
3195 fprintf_unfiltered (file
, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
3196 paddress (gdbarch
, tdep
->lowest_pc
));
3202 static void aarch64_process_record_test (void);
3207 _initialize_aarch64_tdep (void)
3209 gdbarch_register (bfd_arch_aarch64
, aarch64_gdbarch_init
,
3212 /* Debug this file's internals. */
3213 add_setshow_boolean_cmd ("aarch64", class_maintenance
, &aarch64_debug
, _("\
3214 Set AArch64 debugging."), _("\
3215 Show AArch64 debugging."), _("\
3216 When on, AArch64 specific debugging is enabled."),
3219 &setdebuglist
, &showdebuglist
);
3222 selftests::register_test ("aarch64-analyze-prologue",
3223 selftests::aarch64_analyze_prologue_test
);
3224 selftests::register_test ("aarch64-process-record",
3225 selftests::aarch64_process_record_test
);
3226 selftests::record_xml_tdesc ("aarch64.xml",
3227 aarch64_create_target_description (0));
3231 /* AArch64 process record-replay related structures, defines etc. */
3233 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
3236 unsigned int reg_len = LENGTH; \
3239 REGS = XNEWVEC (uint32_t, reg_len); \
3240 memcpy(®S[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
3245 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
3248 unsigned int mem_len = LENGTH; \
3251 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
3252 memcpy(&MEMS->len, &RECORD_BUF[0], \
3253 sizeof(struct aarch64_mem_r) * LENGTH); \
3258 /* AArch64 record/replay structures and enumerations. */
3260 struct aarch64_mem_r
3262 uint64_t len
; /* Record length. */
3263 uint64_t addr
; /* Memory address. */
3266 enum aarch64_record_result
3268 AARCH64_RECORD_SUCCESS
,
3269 AARCH64_RECORD_UNSUPPORTED
,
3270 AARCH64_RECORD_UNKNOWN
3273 typedef struct insn_decode_record_t
3275 struct gdbarch
*gdbarch
;
3276 struct regcache
*regcache
;
3277 CORE_ADDR this_addr
; /* Address of insn to be recorded. */
3278 uint32_t aarch64_insn
; /* Insn to be recorded. */
3279 uint32_t mem_rec_count
; /* Count of memory records. */
3280 uint32_t reg_rec_count
; /* Count of register records. */
3281 uint32_t *aarch64_regs
; /* Registers to be recorded. */
3282 struct aarch64_mem_r
*aarch64_mems
; /* Memory locations to be recorded. */
3283 } insn_decode_record
;
3285 /* Record handler for data processing - register instructions. */
3288 aarch64_record_data_proc_reg (insn_decode_record
*aarch64_insn_r
)
3290 uint8_t reg_rd
, insn_bits24_27
, insn_bits21_23
;
3291 uint32_t record_buf
[4];
3293 reg_rd
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3294 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
3295 insn_bits21_23
= bits (aarch64_insn_r
->aarch64_insn
, 21, 23);
3297 if (!bit (aarch64_insn_r
->aarch64_insn
, 28))
3301 /* Logical (shifted register). */
3302 if (insn_bits24_27
== 0x0a)
3303 setflags
= (bits (aarch64_insn_r
->aarch64_insn
, 29, 30) == 0x03);
3305 else if (insn_bits24_27
== 0x0b)
3306 setflags
= bit (aarch64_insn_r
->aarch64_insn
, 29);
3308 return AARCH64_RECORD_UNKNOWN
;
3310 record_buf
[0] = reg_rd
;
3311 aarch64_insn_r
->reg_rec_count
= 1;
3313 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_CPSR_REGNUM
;
3317 if (insn_bits24_27
== 0x0b)
3319 /* Data-processing (3 source). */
3320 record_buf
[0] = reg_rd
;
3321 aarch64_insn_r
->reg_rec_count
= 1;
3323 else if (insn_bits24_27
== 0x0a)
3325 if (insn_bits21_23
== 0x00)
3327 /* Add/subtract (with carry). */
3328 record_buf
[0] = reg_rd
;
3329 aarch64_insn_r
->reg_rec_count
= 1;
3330 if (bit (aarch64_insn_r
->aarch64_insn
, 29))
3332 record_buf
[1] = AARCH64_CPSR_REGNUM
;
3333 aarch64_insn_r
->reg_rec_count
= 2;
3336 else if (insn_bits21_23
== 0x02)
3338 /* Conditional compare (register) and conditional compare
3339 (immediate) instructions. */
3340 record_buf
[0] = AARCH64_CPSR_REGNUM
;
3341 aarch64_insn_r
->reg_rec_count
= 1;
3343 else if (insn_bits21_23
== 0x04 || insn_bits21_23
== 0x06)
3345 /* CConditional select. */
3346 /* Data-processing (2 source). */
3347 /* Data-processing (1 source). */
3348 record_buf
[0] = reg_rd
;
3349 aarch64_insn_r
->reg_rec_count
= 1;
3352 return AARCH64_RECORD_UNKNOWN
;
3356 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3358 return AARCH64_RECORD_SUCCESS
;
3361 /* Record handler for data processing - immediate instructions. */
3364 aarch64_record_data_proc_imm (insn_decode_record
*aarch64_insn_r
)
3366 uint8_t reg_rd
, insn_bit23
, insn_bits24_27
, setflags
;
3367 uint32_t record_buf
[4];
3369 reg_rd
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3370 insn_bit23
= bit (aarch64_insn_r
->aarch64_insn
, 23);
3371 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
3373 if (insn_bits24_27
== 0x00 /* PC rel addressing. */
3374 || insn_bits24_27
== 0x03 /* Bitfield and Extract. */
3375 || (insn_bits24_27
== 0x02 && insn_bit23
)) /* Move wide (immediate). */
3377 record_buf
[0] = reg_rd
;
3378 aarch64_insn_r
->reg_rec_count
= 1;
3380 else if (insn_bits24_27
== 0x01)
3382 /* Add/Subtract (immediate). */
3383 setflags
= bit (aarch64_insn_r
->aarch64_insn
, 29);
3384 record_buf
[0] = reg_rd
;
3385 aarch64_insn_r
->reg_rec_count
= 1;
3387 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_CPSR_REGNUM
;
3389 else if (insn_bits24_27
== 0x02 && !insn_bit23
)
3391 /* Logical (immediate). */
3392 setflags
= bits (aarch64_insn_r
->aarch64_insn
, 29, 30) == 0x03;
3393 record_buf
[0] = reg_rd
;
3394 aarch64_insn_r
->reg_rec_count
= 1;
3396 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_CPSR_REGNUM
;
3399 return AARCH64_RECORD_UNKNOWN
;
3401 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3403 return AARCH64_RECORD_SUCCESS
;
3406 /* Record handler for branch, exception generation and system instructions. */
3409 aarch64_record_branch_except_sys (insn_decode_record
*aarch64_insn_r
)
3411 struct gdbarch_tdep
*tdep
= gdbarch_tdep (aarch64_insn_r
->gdbarch
);
3412 uint8_t insn_bits24_27
, insn_bits28_31
, insn_bits22_23
;
3413 uint32_t record_buf
[4];
3415 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
3416 insn_bits28_31
= bits (aarch64_insn_r
->aarch64_insn
, 28, 31);
3417 insn_bits22_23
= bits (aarch64_insn_r
->aarch64_insn
, 22, 23);
3419 if (insn_bits28_31
== 0x0d)
3421 /* Exception generation instructions. */
3422 if (insn_bits24_27
== 0x04)
3424 if (!bits (aarch64_insn_r
->aarch64_insn
, 2, 4)
3425 && !bits (aarch64_insn_r
->aarch64_insn
, 21, 23)
3426 && bits (aarch64_insn_r
->aarch64_insn
, 0, 1) == 0x01)
3428 ULONGEST svc_number
;
3430 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, 8,
3432 return tdep
->aarch64_syscall_record (aarch64_insn_r
->regcache
,
3436 return AARCH64_RECORD_UNSUPPORTED
;
3438 /* System instructions. */
3439 else if (insn_bits24_27
== 0x05 && insn_bits22_23
== 0x00)
3441 uint32_t reg_rt
, reg_crn
;
3443 reg_rt
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3444 reg_crn
= bits (aarch64_insn_r
->aarch64_insn
, 12, 15);
3446 /* Record rt in case of sysl and mrs instructions. */
3447 if (bit (aarch64_insn_r
->aarch64_insn
, 21))
3449 record_buf
[0] = reg_rt
;
3450 aarch64_insn_r
->reg_rec_count
= 1;
3452 /* Record cpsr for hint and msr(immediate) instructions. */
3453 else if (reg_crn
== 0x02 || reg_crn
== 0x04)
3455 record_buf
[0] = AARCH64_CPSR_REGNUM
;
3456 aarch64_insn_r
->reg_rec_count
= 1;
3459 /* Unconditional branch (register). */
3460 else if((insn_bits24_27
& 0x0e) == 0x06)
3462 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_PC_REGNUM
;
3463 if (bits (aarch64_insn_r
->aarch64_insn
, 21, 22) == 0x01)
3464 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_LR_REGNUM
;
3467 return AARCH64_RECORD_UNKNOWN
;
3469 /* Unconditional branch (immediate). */
3470 else if ((insn_bits28_31
& 0x07) == 0x01 && (insn_bits24_27
& 0x0c) == 0x04)
3472 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_PC_REGNUM
;
3473 if (bit (aarch64_insn_r
->aarch64_insn
, 31))
3474 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_LR_REGNUM
;
3477 /* Compare & branch (immediate), Test & branch (immediate) and
3478 Conditional branch (immediate). */
3479 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_PC_REGNUM
;
3481 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3483 return AARCH64_RECORD_SUCCESS
;
3486 /* Record handler for advanced SIMD load and store instructions. */
3489 aarch64_record_asimd_load_store (insn_decode_record
*aarch64_insn_r
)
3492 uint64_t addr_offset
= 0;
3493 uint32_t record_buf
[24];
3494 uint64_t record_buf_mem
[24];
3495 uint32_t reg_rn
, reg_rt
;
3496 uint32_t reg_index
= 0, mem_index
= 0;
3497 uint8_t opcode_bits
, size_bits
;
3499 reg_rt
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3500 reg_rn
= bits (aarch64_insn_r
->aarch64_insn
, 5, 9);
3501 size_bits
= bits (aarch64_insn_r
->aarch64_insn
, 10, 11);
3502 opcode_bits
= bits (aarch64_insn_r
->aarch64_insn
, 12, 15);
3503 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
, &address
);
3506 debug_printf ("Process record: Advanced SIMD load/store\n");
3508 /* Load/store single structure. */
3509 if (bit (aarch64_insn_r
->aarch64_insn
, 24))
3511 uint8_t sindex
, scale
, selem
, esize
, replicate
= 0;
3512 scale
= opcode_bits
>> 2;
3513 selem
= ((opcode_bits
& 0x02) |
3514 bit (aarch64_insn_r
->aarch64_insn
, 21)) + 1;
3518 if (size_bits
& 0x01)
3519 return AARCH64_RECORD_UNKNOWN
;
3522 if ((size_bits
>> 1) & 0x01)
3523 return AARCH64_RECORD_UNKNOWN
;
3524 if (size_bits
& 0x01)
3526 if (!((opcode_bits
>> 1) & 0x01))
3529 return AARCH64_RECORD_UNKNOWN
;
3533 if (bit (aarch64_insn_r
->aarch64_insn
, 22) && !(opcode_bits
& 0x01))
3540 return AARCH64_RECORD_UNKNOWN
;
3546 for (sindex
= 0; sindex
< selem
; sindex
++)
3548 record_buf
[reg_index
++] = reg_rt
+ AARCH64_V0_REGNUM
;
3549 reg_rt
= (reg_rt
+ 1) % 32;
3553 for (sindex
= 0; sindex
< selem
; sindex
++)
3555 if (bit (aarch64_insn_r
->aarch64_insn
, 22))
3556 record_buf
[reg_index
++] = reg_rt
+ AARCH64_V0_REGNUM
;
3559 record_buf_mem
[mem_index
++] = esize
/ 8;
3560 record_buf_mem
[mem_index
++] = address
+ addr_offset
;
3562 addr_offset
= addr_offset
+ (esize
/ 8);
3563 reg_rt
= (reg_rt
+ 1) % 32;
3567 /* Load/store multiple structure. */
3570 uint8_t selem
, esize
, rpt
, elements
;
3571 uint8_t eindex
, rindex
;
3573 esize
= 8 << size_bits
;
3574 if (bit (aarch64_insn_r
->aarch64_insn
, 30))
3575 elements
= 128 / esize
;
3577 elements
= 64 / esize
;
3579 switch (opcode_bits
)
3581 /*LD/ST4 (4 Registers). */
3586 /*LD/ST1 (4 Registers). */
3591 /*LD/ST3 (3 Registers). */
3596 /*LD/ST1 (3 Registers). */
3601 /*LD/ST1 (1 Register). */
3606 /*LD/ST2 (2 Registers). */
3611 /*LD/ST1 (2 Registers). */
3617 return AARCH64_RECORD_UNSUPPORTED
;
3620 for (rindex
= 0; rindex
< rpt
; rindex
++)
3621 for (eindex
= 0; eindex
< elements
; eindex
++)
3623 uint8_t reg_tt
, sindex
;
3624 reg_tt
= (reg_rt
+ rindex
) % 32;
3625 for (sindex
= 0; sindex
< selem
; sindex
++)
3627 if (bit (aarch64_insn_r
->aarch64_insn
, 22))
3628 record_buf
[reg_index
++] = reg_tt
+ AARCH64_V0_REGNUM
;
3631 record_buf_mem
[mem_index
++] = esize
/ 8;
3632 record_buf_mem
[mem_index
++] = address
+ addr_offset
;
3634 addr_offset
= addr_offset
+ (esize
/ 8);
3635 reg_tt
= (reg_tt
+ 1) % 32;
3640 if (bit (aarch64_insn_r
->aarch64_insn
, 23))
3641 record_buf
[reg_index
++] = reg_rn
;
3643 aarch64_insn_r
->reg_rec_count
= reg_index
;
3644 aarch64_insn_r
->mem_rec_count
= mem_index
/ 2;
3645 MEM_ALLOC (aarch64_insn_r
->aarch64_mems
, aarch64_insn_r
->mem_rec_count
,
3647 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3649 return AARCH64_RECORD_SUCCESS
;
3652 /* Record handler for load and store instructions. */
3655 aarch64_record_load_store (insn_decode_record
*aarch64_insn_r
)
3657 uint8_t insn_bits24_27
, insn_bits28_29
, insn_bits10_11
;
3658 uint8_t insn_bit23
, insn_bit21
;
3659 uint8_t opc
, size_bits
, ld_flag
, vector_flag
;
3660 uint32_t reg_rn
, reg_rt
, reg_rt2
;
3661 uint64_t datasize
, offset
;
3662 uint32_t record_buf
[8];
3663 uint64_t record_buf_mem
[8];
3666 insn_bits10_11
= bits (aarch64_insn_r
->aarch64_insn
, 10, 11);
3667 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
3668 insn_bits28_29
= bits (aarch64_insn_r
->aarch64_insn
, 28, 29);
3669 insn_bit21
= bit (aarch64_insn_r
->aarch64_insn
, 21);
3670 insn_bit23
= bit (aarch64_insn_r
->aarch64_insn
, 23);
3671 ld_flag
= bit (aarch64_insn_r
->aarch64_insn
, 22);
3672 vector_flag
= bit (aarch64_insn_r
->aarch64_insn
, 26);
3673 reg_rt
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3674 reg_rn
= bits (aarch64_insn_r
->aarch64_insn
, 5, 9);
3675 reg_rt2
= bits (aarch64_insn_r
->aarch64_insn
, 10, 14);
3676 size_bits
= bits (aarch64_insn_r
->aarch64_insn
, 30, 31);
3678 /* Load/store exclusive. */
3679 if (insn_bits24_27
== 0x08 && insn_bits28_29
== 0x00)
3682 debug_printf ("Process record: load/store exclusive\n");
3686 record_buf
[0] = reg_rt
;
3687 aarch64_insn_r
->reg_rec_count
= 1;
3690 record_buf
[1] = reg_rt2
;
3691 aarch64_insn_r
->reg_rec_count
= 2;
3697 datasize
= (8 << size_bits
) * 2;
3699 datasize
= (8 << size_bits
);
3700 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
3702 record_buf_mem
[0] = datasize
/ 8;
3703 record_buf_mem
[1] = address
;
3704 aarch64_insn_r
->mem_rec_count
= 1;
3707 /* Save register rs. */
3708 record_buf
[0] = bits (aarch64_insn_r
->aarch64_insn
, 16, 20);
3709 aarch64_insn_r
->reg_rec_count
= 1;
3713 /* Load register (literal) instructions decoding. */
3714 else if ((insn_bits24_27
& 0x0b) == 0x08 && insn_bits28_29
== 0x01)
3717 debug_printf ("Process record: load register (literal)\n");
3719 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
3721 record_buf
[0] = reg_rt
;
3722 aarch64_insn_r
->reg_rec_count
= 1;
3724 /* All types of load/store pair instructions decoding. */
3725 else if ((insn_bits24_27
& 0x0a) == 0x08 && insn_bits28_29
== 0x02)
3728 debug_printf ("Process record: load/store pair\n");
3734 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
3735 record_buf
[1] = reg_rt2
+ AARCH64_V0_REGNUM
;
3739 record_buf
[0] = reg_rt
;
3740 record_buf
[1] = reg_rt2
;
3742 aarch64_insn_r
->reg_rec_count
= 2;
3747 imm7_off
= bits (aarch64_insn_r
->aarch64_insn
, 15, 21);
3749 size_bits
= size_bits
>> 1;
3750 datasize
= 8 << (2 + size_bits
);
3751 offset
= (imm7_off
& 0x40) ? (~imm7_off
& 0x007f) + 1 : imm7_off
;
3752 offset
= offset
<< (2 + size_bits
);
3753 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
3755 if (!((insn_bits24_27
& 0x0b) == 0x08 && insn_bit23
))
3757 if (imm7_off
& 0x40)
3758 address
= address
- offset
;
3760 address
= address
+ offset
;
3763 record_buf_mem
[0] = datasize
/ 8;
3764 record_buf_mem
[1] = address
;
3765 record_buf_mem
[2] = datasize
/ 8;
3766 record_buf_mem
[3] = address
+ (datasize
/ 8);
3767 aarch64_insn_r
->mem_rec_count
= 2;
3769 if (bit (aarch64_insn_r
->aarch64_insn
, 23))
3770 record_buf
[aarch64_insn_r
->reg_rec_count
++] = reg_rn
;
3772 /* Load/store register (unsigned immediate) instructions. */
3773 else if ((insn_bits24_27
& 0x0b) == 0x09 && insn_bits28_29
== 0x03)
3775 opc
= bits (aarch64_insn_r
->aarch64_insn
, 22, 23);
3785 if (size_bits
== 0x3 && vector_flag
== 0x0 && opc
== 0x2)
3787 /* PRFM (immediate) */
3788 return AARCH64_RECORD_SUCCESS
;
3790 else if (size_bits
== 0x2 && vector_flag
== 0x0 && opc
== 0x2)
3792 /* LDRSW (immediate) */
3806 debug_printf ("Process record: load/store (unsigned immediate):"
3807 " size %x V %d opc %x\n", size_bits
, vector_flag
,
3813 offset
= bits (aarch64_insn_r
->aarch64_insn
, 10, 21);
3814 datasize
= 8 << size_bits
;
3815 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
3817 offset
= offset
<< size_bits
;
3818 address
= address
+ offset
;
3820 record_buf_mem
[0] = datasize
>> 3;
3821 record_buf_mem
[1] = address
;
3822 aarch64_insn_r
->mem_rec_count
= 1;
3827 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
3829 record_buf
[0] = reg_rt
;
3830 aarch64_insn_r
->reg_rec_count
= 1;
3833 /* Load/store register (register offset) instructions. */
3834 else if ((insn_bits24_27
& 0x0b) == 0x08 && insn_bits28_29
== 0x03
3835 && insn_bits10_11
== 0x02 && insn_bit21
)
3838 debug_printf ("Process record: load/store (register offset)\n");
3839 opc
= bits (aarch64_insn_r
->aarch64_insn
, 22, 23);
3846 if (size_bits
!= 0x03)
3849 return AARCH64_RECORD_UNKNOWN
;
3853 ULONGEST reg_rm_val
;
3855 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
,
3856 bits (aarch64_insn_r
->aarch64_insn
, 16, 20), ®_rm_val
);
3857 if (bit (aarch64_insn_r
->aarch64_insn
, 12))
3858 offset
= reg_rm_val
<< size_bits
;
3860 offset
= reg_rm_val
;
3861 datasize
= 8 << size_bits
;
3862 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
3864 address
= address
+ offset
;
3865 record_buf_mem
[0] = datasize
>> 3;
3866 record_buf_mem
[1] = address
;
3867 aarch64_insn_r
->mem_rec_count
= 1;
3872 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
3874 record_buf
[0] = reg_rt
;
3875 aarch64_insn_r
->reg_rec_count
= 1;
3878 /* Load/store register (immediate and unprivileged) instructions. */
3879 else if ((insn_bits24_27
& 0x0b) == 0x08 && insn_bits28_29
== 0x03
3884 debug_printf ("Process record: load/store "
3885 "(immediate and unprivileged)\n");
3887 opc
= bits (aarch64_insn_r
->aarch64_insn
, 22, 23);
3894 if (size_bits
!= 0x03)
3897 return AARCH64_RECORD_UNKNOWN
;
3902 imm9_off
= bits (aarch64_insn_r
->aarch64_insn
, 12, 20);
3903 offset
= (imm9_off
& 0x0100) ? (((~imm9_off
) & 0x01ff) + 1) : imm9_off
;
3904 datasize
= 8 << size_bits
;
3905 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
3907 if (insn_bits10_11
!= 0x01)
3909 if (imm9_off
& 0x0100)
3910 address
= address
- offset
;
3912 address
= address
+ offset
;
3914 record_buf_mem
[0] = datasize
>> 3;
3915 record_buf_mem
[1] = address
;
3916 aarch64_insn_r
->mem_rec_count
= 1;
3921 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
3923 record_buf
[0] = reg_rt
;
3924 aarch64_insn_r
->reg_rec_count
= 1;
3926 if (insn_bits10_11
== 0x01 || insn_bits10_11
== 0x03)
3927 record_buf
[aarch64_insn_r
->reg_rec_count
++] = reg_rn
;
3929 /* Advanced SIMD load/store instructions. */
3931 return aarch64_record_asimd_load_store (aarch64_insn_r
);
3933 MEM_ALLOC (aarch64_insn_r
->aarch64_mems
, aarch64_insn_r
->mem_rec_count
,
3935 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3937 return AARCH64_RECORD_SUCCESS
;
3940 /* Record handler for data processing SIMD and floating point instructions. */
3943 aarch64_record_data_proc_simd_fp (insn_decode_record
*aarch64_insn_r
)
3945 uint8_t insn_bit21
, opcode
, rmode
, reg_rd
;
3946 uint8_t insn_bits24_27
, insn_bits28_31
, insn_bits10_11
, insn_bits12_15
;
3947 uint8_t insn_bits11_14
;
3948 uint32_t record_buf
[2];
3950 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
3951 insn_bits28_31
= bits (aarch64_insn_r
->aarch64_insn
, 28, 31);
3952 insn_bits10_11
= bits (aarch64_insn_r
->aarch64_insn
, 10, 11);
3953 insn_bits12_15
= bits (aarch64_insn_r
->aarch64_insn
, 12, 15);
3954 insn_bits11_14
= bits (aarch64_insn_r
->aarch64_insn
, 11, 14);
3955 opcode
= bits (aarch64_insn_r
->aarch64_insn
, 16, 18);
3956 rmode
= bits (aarch64_insn_r
->aarch64_insn
, 19, 20);
3957 reg_rd
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3958 insn_bit21
= bit (aarch64_insn_r
->aarch64_insn
, 21);
3961 debug_printf ("Process record: data processing SIMD/FP: ");
3963 if ((insn_bits28_31
& 0x05) == 0x01 && insn_bits24_27
== 0x0e)
3965 /* Floating point - fixed point conversion instructions. */
3969 debug_printf ("FP - fixed point conversion");
3971 if ((opcode
>> 1) == 0x0 && rmode
== 0x03)
3972 record_buf
[0] = reg_rd
;
3974 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3976 /* Floating point - conditional compare instructions. */
3977 else if (insn_bits10_11
== 0x01)
3980 debug_printf ("FP - conditional compare");
3982 record_buf
[0] = AARCH64_CPSR_REGNUM
;
3984 /* Floating point - data processing (2-source) and
3985 conditional select instructions. */
3986 else if (insn_bits10_11
== 0x02 || insn_bits10_11
== 0x03)
3989 debug_printf ("FP - DP (2-source)");
3991 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3993 else if (insn_bits10_11
== 0x00)
3995 /* Floating point - immediate instructions. */
3996 if ((insn_bits12_15
& 0x01) == 0x01
3997 || (insn_bits12_15
& 0x07) == 0x04)
4000 debug_printf ("FP - immediate");
4001 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
4003 /* Floating point - compare instructions. */
4004 else if ((insn_bits12_15
& 0x03) == 0x02)
4007 debug_printf ("FP - immediate");
4008 record_buf
[0] = AARCH64_CPSR_REGNUM
;
4010 /* Floating point - integer conversions instructions. */
4011 else if (insn_bits12_15
== 0x00)
4013 /* Convert float to integer instruction. */
4014 if (!(opcode
>> 1) || ((opcode
>> 1) == 0x02 && !rmode
))
4017 debug_printf ("float to int conversion");
4019 record_buf
[0] = reg_rd
+ AARCH64_X0_REGNUM
;
4021 /* Convert integer to float instruction. */
4022 else if ((opcode
>> 1) == 0x01 && !rmode
)
4025 debug_printf ("int to float conversion");
4027 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
4029 /* Move float to integer instruction. */
4030 else if ((opcode
>> 1) == 0x03)
4033 debug_printf ("move float to int");
4035 if (!(opcode
& 0x01))
4036 record_buf
[0] = reg_rd
+ AARCH64_X0_REGNUM
;
4038 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
4041 return AARCH64_RECORD_UNKNOWN
;
4044 return AARCH64_RECORD_UNKNOWN
;
4047 return AARCH64_RECORD_UNKNOWN
;
4049 else if ((insn_bits28_31
& 0x09) == 0x00 && insn_bits24_27
== 0x0e)
4052 debug_printf ("SIMD copy");
4054 /* Advanced SIMD copy instructions. */
4055 if (!bits (aarch64_insn_r
->aarch64_insn
, 21, 23)
4056 && !bit (aarch64_insn_r
->aarch64_insn
, 15)
4057 && bit (aarch64_insn_r
->aarch64_insn
, 10))
4059 if (insn_bits11_14
== 0x05 || insn_bits11_14
== 0x07)
4060 record_buf
[0] = reg_rd
+ AARCH64_X0_REGNUM
;
4062 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
4065 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
4067 /* All remaining floating point or advanced SIMD instructions. */
4071 debug_printf ("all remain");
4073 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
4077 debug_printf ("\n");
4079 aarch64_insn_r
->reg_rec_count
++;
4080 gdb_assert (aarch64_insn_r
->reg_rec_count
== 1);
4081 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
4083 return AARCH64_RECORD_SUCCESS
;
4086 /* Decodes insns type and invokes its record handler. */
4089 aarch64_record_decode_insn_handler (insn_decode_record
*aarch64_insn_r
)
4091 uint32_t ins_bit25
, ins_bit26
, ins_bit27
, ins_bit28
;
4093 ins_bit25
= bit (aarch64_insn_r
->aarch64_insn
, 25);
4094 ins_bit26
= bit (aarch64_insn_r
->aarch64_insn
, 26);
4095 ins_bit27
= bit (aarch64_insn_r
->aarch64_insn
, 27);
4096 ins_bit28
= bit (aarch64_insn_r
->aarch64_insn
, 28);
4098 /* Data processing - immediate instructions. */
4099 if (!ins_bit26
&& !ins_bit27
&& ins_bit28
)
4100 return aarch64_record_data_proc_imm (aarch64_insn_r
);
4102 /* Branch, exception generation and system instructions. */
4103 if (ins_bit26
&& !ins_bit27
&& ins_bit28
)
4104 return aarch64_record_branch_except_sys (aarch64_insn_r
);
4106 /* Load and store instructions. */
4107 if (!ins_bit25
&& ins_bit27
)
4108 return aarch64_record_load_store (aarch64_insn_r
);
4110 /* Data processing - register instructions. */
4111 if (ins_bit25
&& !ins_bit26
&& ins_bit27
)
4112 return aarch64_record_data_proc_reg (aarch64_insn_r
);
4114 /* Data processing - SIMD and floating point instructions. */
4115 if (ins_bit25
&& ins_bit26
&& ins_bit27
)
4116 return aarch64_record_data_proc_simd_fp (aarch64_insn_r
);
4118 return AARCH64_RECORD_UNSUPPORTED
;
4121 /* Cleans up local record registers and memory allocations. */
4124 deallocate_reg_mem (insn_decode_record
*record
)
4126 xfree (record
->aarch64_regs
);
4127 xfree (record
->aarch64_mems
);
4131 namespace selftests
{
4134 aarch64_process_record_test (void)
4136 struct gdbarch_info info
;
4139 gdbarch_info_init (&info
);
4140 info
.bfd_arch_info
= bfd_scan_arch ("aarch64");
4142 struct gdbarch
*gdbarch
= gdbarch_find_by_info (info
);
4143 SELF_CHECK (gdbarch
!= NULL
);
4145 insn_decode_record aarch64_record
;
4147 memset (&aarch64_record
, 0, sizeof (insn_decode_record
));
4148 aarch64_record
.regcache
= NULL
;
4149 aarch64_record
.this_addr
= 0;
4150 aarch64_record
.gdbarch
= gdbarch
;
4152 /* 20 00 80 f9 prfm pldl1keep, [x1] */
4153 aarch64_record
.aarch64_insn
= 0xf9800020;
4154 ret
= aarch64_record_decode_insn_handler (&aarch64_record
);
4155 SELF_CHECK (ret
== AARCH64_RECORD_SUCCESS
);
4156 SELF_CHECK (aarch64_record
.reg_rec_count
== 0);
4157 SELF_CHECK (aarch64_record
.mem_rec_count
== 0);
4159 deallocate_reg_mem (&aarch64_record
);
4162 } // namespace selftests
4163 #endif /* GDB_SELF_TEST */
4165 /* Parse the current instruction and record the values of the registers and
4166 memory that will be changed in current instruction to record_arch_list
4167 return -1 if something is wrong. */
4170 aarch64_process_record (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
4171 CORE_ADDR insn_addr
)
4173 uint32_t rec_no
= 0;
4174 uint8_t insn_size
= 4;
4176 gdb_byte buf
[insn_size
];
4177 insn_decode_record aarch64_record
;
4179 memset (&buf
[0], 0, insn_size
);
4180 memset (&aarch64_record
, 0, sizeof (insn_decode_record
));
4181 target_read_memory (insn_addr
, &buf
[0], insn_size
);
4182 aarch64_record
.aarch64_insn
4183 = (uint32_t) extract_unsigned_integer (&buf
[0],
4185 gdbarch_byte_order (gdbarch
));
4186 aarch64_record
.regcache
= regcache
;
4187 aarch64_record
.this_addr
= insn_addr
;
4188 aarch64_record
.gdbarch
= gdbarch
;
4190 ret
= aarch64_record_decode_insn_handler (&aarch64_record
);
4191 if (ret
== AARCH64_RECORD_UNSUPPORTED
)
4193 printf_unfiltered (_("Process record does not support instruction "
4194 "0x%0x at address %s.\n"),
4195 aarch64_record
.aarch64_insn
,
4196 paddress (gdbarch
, insn_addr
));
4202 /* Record registers. */
4203 record_full_arch_list_add_reg (aarch64_record
.regcache
,
4205 /* Always record register CPSR. */
4206 record_full_arch_list_add_reg (aarch64_record
.regcache
,
4207 AARCH64_CPSR_REGNUM
);
4208 if (aarch64_record
.aarch64_regs
)
4209 for (rec_no
= 0; rec_no
< aarch64_record
.reg_rec_count
; rec_no
++)
4210 if (record_full_arch_list_add_reg (aarch64_record
.regcache
,
4211 aarch64_record
.aarch64_regs
[rec_no
]))
4214 /* Record memories. */
4215 if (aarch64_record
.aarch64_mems
)
4216 for (rec_no
= 0; rec_no
< aarch64_record
.mem_rec_count
; rec_no
++)
4217 if (record_full_arch_list_add_mem
4218 ((CORE_ADDR
)aarch64_record
.aarch64_mems
[rec_no
].addr
,
4219 aarch64_record
.aarch64_mems
[rec_no
].len
))
4222 if (record_full_arch_list_add_end ())
4226 deallocate_reg_mem (&aarch64_record
);